diff options
170 files changed, 2323 insertions, 1599 deletions
diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt new file mode 100644 index 000000000000..c1e9545c59bd --- /dev/null +++ b/Documentation/crypto/async-tx-api.txt | |||
@@ -0,0 +1,219 @@ | |||
1 | Asynchronous Transfers/Transforms API | ||
2 | |||
3 | 1 INTRODUCTION | ||
4 | |||
5 | 2 GENEALOGY | ||
6 | |||
7 | 3 USAGE | ||
8 | 3.1 General format of the API | ||
9 | 3.2 Supported operations | ||
10 | 3.3 Descriptor management | ||
11 | 3.4 When does the operation execute? | ||
12 | 3.5 When does the operation complete? | ||
13 | 3.6 Constraints | ||
14 | 3.7 Example | ||
15 | |||
16 | 4 DRIVER DEVELOPER NOTES | ||
17 | 4.1 Conformance points | ||
18 | 4.2 "My application needs finer control of hardware channels" | ||
19 | |||
20 | 5 SOURCE | ||
21 | |||
22 | --- | ||
23 | |||
24 | 1 INTRODUCTION | ||
25 | |||
26 | The async_tx API provides methods for describing a chain of asynchronous | ||
27 | bulk memory transfers/transforms with support for inter-transactional | ||
28 | dependencies. It is implemented as a dmaengine client that smooths over | ||
29 | the details of different hardware offload engine implementations. Code | ||
30 | that is written to the API can optimize for asynchronous operation and | ||
31 | the API will fit the chain of operations to the available offload | ||
32 | resources. | ||
33 | |||
34 | 2 GENEALOGY | ||
35 | |||
36 | The API was initially designed to offload the memory copy and | ||
37 | xor-parity-calculations of the md-raid5 driver using the offload engines | ||
38 | present in the Intel(R) Xscale series of I/O processors. It also built | ||
39 | on the 'dmaengine' layer developed for offloading memory copies in the | ||
40 | network stack using Intel(R) I/OAT engines. The following design | ||
41 | features surfaced as a result: | ||
42 | 1/ implicit synchronous path: users of the API do not need to know if | ||
43 | the platform they are running on has offload capabilities. The | ||
44 | operation will be offloaded when an engine is available and carried out | ||
45 | in software otherwise. | ||
46 | 2/ cross channel dependency chains: the API allows a chain of dependent | ||
47 | operations to be submitted, like xor->copy->xor in the raid5 case. The | ||
48 | API automatically handles cases where the transition from one operation | ||
49 | to another implies a hardware channel switch. | ||
50 | 3/ dmaengine extensions to support multiple clients and operation types | ||
51 | beyond 'memcpy' | ||
52 | |||
53 | 3 USAGE | ||
54 | |||
55 | 3.1 General format of the API: | ||
56 | struct dma_async_tx_descriptor * | ||
57 | async_<operation>(<op specific parameters>, | ||
58 | enum async_tx_flags flags, | ||
59 | struct dma_async_tx_descriptor *dependency, | ||
60 | dma_async_tx_callback callback_routine, | ||
61 | void *callback_parameter); | ||
62 | |||
63 | 3.2 Supported operations: | ||
64 | memcpy - memory copy between a source and a destination buffer | ||
65 | memset - fill a destination buffer with a byte value | ||
66 | xor - xor a series of source buffers and write the result to a | ||
67 | destination buffer | ||
68 | xor_zero_sum - xor a series of source buffers and set a flag if the | ||
69 | result is zero. The implementation attempts to prevent | ||
70 | writes to memory | ||
71 | |||
72 | 3.3 Descriptor management: | ||
73 | The return value is non-NULL and points to a 'descriptor' when the operation | ||
74 | has been queued to execute asynchronously. Descriptors are recycled | ||
75 | resources, under control of the offload engine driver, to be reused as | ||
76 | operations complete. When an application needs to submit a chain of | ||
77 | operations it must guarantee that the descriptor is not automatically recycled | ||
78 | before the dependency is submitted. This requires that all descriptors be | ||
79 | acknowledged by the application before the offload engine driver is allowed to | ||
80 | recycle (or free) the descriptor. A descriptor can be acked by one of the | ||
81 | following methods: | ||
82 | 1/ setting the ASYNC_TX_ACK flag if no child operations are to be submitted | ||
83 | 2/ setting the ASYNC_TX_DEP_ACK flag to acknowledge the parent | ||
84 | descriptor of a new operation. | ||
85 | 3/ calling async_tx_ack() on the descriptor. | ||
86 | |||
87 | 3.4 When does the operation execute? | ||
88 | Operations do not immediately issue after return from the | ||
89 | async_<operation> call. Offload engine drivers batch operations to | ||
90 | improve performance by reducing the number of mmio cycles needed to | ||
91 | manage the channel. Once a driver-specific threshold is met the driver | ||
92 | automatically issues pending operations. An application can force this | ||
93 | event by calling async_tx_issue_pending_all(). This operates on all | ||
94 | channels since the application has no knowledge of channel to operation | ||
95 | mapping. | ||
96 | |||
97 | 3.5 When does the operation complete? | ||
98 | There are two methods for an application to learn about the completion | ||
99 | of an operation. | ||
100 | 1/ Call dma_wait_for_async_tx(). This call causes the CPU to spin while | ||
101 | it polls for the completion of the operation. It handles dependency | ||
102 | chains and issuing pending operations. | ||
103 | 2/ Specify a completion callback. The callback routine runs in tasklet | ||
104 | context if the offload engine driver supports interrupts, or it is | ||
105 | called in application context if the operation is carried out | ||
106 | synchronously in software. The callback can be set in the call to | ||
107 | async_<operation>, or when the application needs to submit a chain of | ||
108 | unknown length it can use the async_trigger_callback() routine to set a | ||
109 | completion interrupt/callback at the end of the chain. | ||
110 | |||
111 | 3.6 Constraints: | ||
112 | 1/ Calls to async_<operation> are not permitted in IRQ context. Other | ||
113 | contexts are permitted provided constraint #2 is not violated. | ||
114 | 2/ Completion callback routines cannot submit new operations. This | ||
115 | results in recursion in the synchronous case and spin_locks being | ||
116 | acquired twice in the asynchronous case. | ||
117 | |||
118 | 3.7 Example: | ||
119 | Perform a xor->copy->xor operation where each operation depends on the | ||
120 | result from the previous operation: | ||
121 | |||
122 | void complete_xor_copy_xor(void *param) | ||
123 | { | ||
124 | printk("complete\n"); | ||
125 | } | ||
126 | |||
127 | int run_xor_copy_xor(struct page **xor_srcs, | ||
128 | int xor_src_cnt, | ||
129 | struct page *xor_dest, | ||
130 | size_t xor_len, | ||
131 | struct page *copy_src, | ||
132 | struct page *copy_dest, | ||
133 | size_t copy_len) | ||
134 | { | ||
135 | struct dma_async_tx_descriptor *tx; | ||
136 | |||
137 | tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, | ||
138 | ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL); | ||
139 | tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len, | ||
140 | ASYNC_TX_DEP_ACK, tx, NULL, NULL); | ||
141 | tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, | ||
142 | ASYNC_TX_XOR_DROP_DST | ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, | ||
143 | tx, complete_xor_copy_xor, NULL); | ||
144 | |||
145 | async_tx_issue_pending_all(); | ||
146 | } | ||
147 | |||
148 | See include/linux/async_tx.h for more information on the flags. See the | ||
149 | ops_run_* and ops_complete_* routines in drivers/md/raid5.c for more | ||
150 | implementation examples. | ||
151 | |||
152 | 4 DRIVER DEVELOPMENT NOTES | ||
153 | 4.1 Conformance points: | ||
154 | There are a few conformance points required in dmaengine drivers to | ||
155 | accommodate assumptions made by applications using the async_tx API: | ||
156 | 1/ Completion callbacks are expected to happen in tasklet context | ||
157 | 2/ dma_async_tx_descriptor fields are never manipulated in IRQ context | ||
158 | 3/ Use async_tx_run_dependencies() in the descriptor clean up path to | ||
159 | handle submission of dependent operations | ||
160 | |||
161 | 4.2 "My application needs finer control of hardware channels" | ||
162 | This requirement seems to arise from cases where a DMA engine driver is | ||
163 | trying to support device-to-memory DMA. The dmaengine and async_tx | ||
164 | implementations were designed for offloading memory-to-memory | ||
165 | operations; however, there are some capabilities of the dmaengine layer | ||
166 | that can be used for platform-specific channel management. | ||
167 | Platform-specific constraints can be handled by registering the | ||
168 | application as a 'dma_client' and implementing a 'dma_event_callback' to | ||
169 | apply a filter to the available channels in the system. Before showing | ||
170 | how to implement a custom dma_event callback some background of | ||
171 | dmaengine's client support is required. | ||
172 | |||
173 | The following routines in dmaengine support multiple clients requesting | ||
174 | use of a channel: | ||
175 | - dma_async_client_register(struct dma_client *client) | ||
176 | - dma_async_client_chan_request(struct dma_client *client) | ||
177 | |||
178 | dma_async_client_register takes a pointer to an initialized dma_client | ||
179 | structure. It expects that the 'event_callback' and 'cap_mask' fields | ||
180 | are already initialized. | ||
181 | |||
182 | dma_async_client_chan_request triggers dmaengine to notify the client of | ||
183 | all channels that satisfy the capability mask. It is up to the client's | ||
184 | event_callback routine to track how many channels the client needs and | ||
185 | how many it is currently using. The dma_event_callback routine returns a | ||
186 | dma_state_client code to let dmaengine know the status of the | ||
187 | allocation. | ||
188 | |||
189 | Below is the example of how to extend this functionality for | ||
190 | platform-specific filtering of the available channels beyond the | ||
191 | standard capability mask: | ||
192 | |||
193 | static enum dma_state_client | ||
194 | my_dma_client_callback(struct dma_client *client, | ||
195 | struct dma_chan *chan, enum dma_state state) | ||
196 | { | ||
197 | struct dma_device *dma_dev; | ||
198 | struct my_platform_specific_dma *plat_dma_dev; | ||
199 | |||
200 | dma_dev = chan->device; | ||
201 | plat_dma_dev = container_of(dma_dev, | ||
202 | struct my_platform_specific_dma, | ||
203 | dma_dev); | ||
204 | |||
205 | if (!plat_dma_dev->platform_specific_capability) | ||
206 | return DMA_DUP; | ||
207 | |||
208 | . . . | ||
209 | } | ||
210 | |||
211 | 5 SOURCE | ||
212 | include/linux/dmaengine.h: core header file for DMA drivers and clients | ||
213 | drivers/dma/dmaengine.c: offload engine channel management routines | ||
214 | drivers/dma/: location for offload engine drivers | ||
215 | include/linux/async_tx.h: core header file for the async_tx api | ||
216 | crypto/async_tx/async_tx.c: async_tx interface to dmaengine and common code | ||
217 | crypto/async_tx/async_memcpy.c: copy offload | ||
218 | crypto/async_tx/async_memset.c: memory fill offload | ||
219 | crypto/async_tx/async_xor.c: xor and xor zero sum offload | ||
diff --git a/Documentation/devices.txt b/Documentation/devices.txt index 8de132a02ba9..6c46730c631a 100644 --- a/Documentation/devices.txt +++ b/Documentation/devices.txt | |||
@@ -94,6 +94,8 @@ Your cooperation is appreciated. | |||
94 | 9 = /dev/urandom Faster, less secure random number gen. | 94 | 9 = /dev/urandom Faster, less secure random number gen. |
95 | 10 = /dev/aio Asynchronous I/O notification interface | 95 | 10 = /dev/aio Asynchronous I/O notification interface |
96 | 11 = /dev/kmsg Writes to this come out as printk's | 96 | 11 = /dev/kmsg Writes to this come out as printk's |
97 | 12 = /dev/oldmem Used by crashdump kernels to access | ||
98 | the memory of the kernel that crashed. | ||
97 | 99 | ||
98 | 1 block RAM disk | 100 | 1 block RAM disk |
99 | 0 = /dev/ram0 First RAM disk | 101 | 0 = /dev/ram0 First RAM disk |
diff --git a/Documentation/input/iforce-protocol.txt b/Documentation/input/iforce-protocol.txt index 95df4ca70e71..8777d2d321e3 100644 --- a/Documentation/input/iforce-protocol.txt +++ b/Documentation/input/iforce-protocol.txt | |||
@@ -1,254 +1,254 @@ | |||
1 | ** Introduction | 1 | ** Introduction |
2 | This document describes what I managed to discover about the protocol used to | 2 | This document describes what I managed to discover about the protocol used to |
3 | specify force effects to I-Force 2.0 devices. None of this information comes | 3 | specify force effects to I-Force 2.0 devices. None of this information comes |
4 | from Immerse. That's why you should not trust what is written in this | 4 | from Immerse. That's why you should not trust what is written in this |
5 | document. This document is intended to help understanding the protocol. | 5 | document. This document is intended to help understanding the protocol. |
6 | This is not a reference. Comments and corrections are welcome. To contact me, | 6 | This is not a reference. Comments and corrections are welcome. To contact me, |
7 | send an email to: deneux@ifrance.com | 7 | send an email to: deneux@ifrance.com |
8 | 8 | ||
9 | ** WARNING ** | 9 | ** WARNING ** |
10 | I may not be held responsible for any dammage or harm caused if you try to | 10 | I may not be held responsible for any dammage or harm caused if you try to |
11 | send data to your I-Force device based on what you read in this document. | 11 | send data to your I-Force device based on what you read in this document. |
12 | 12 | ||
13 | ** Preliminary Notes: | 13 | ** Preliminary Notes: |
14 | All values are hexadecimal with big-endian encoding (msb on the left). Beware, | 14 | All values are hexadecimal with big-endian encoding (msb on the left). Beware, |
15 | values inside packets are encoded using little-endian. Bytes whose roles are | 15 | values inside packets are encoded using little-endian. Bytes whose roles are |
16 | unknown are marked ??? Information that needs deeper inspection is marked (?) | 16 | unknown are marked ??? Information that needs deeper inspection is marked (?) |
17 | 17 | ||
18 | ** General form of a packet ** | 18 | ** General form of a packet ** |
19 | This is how packets look when the device uses the rs232 to communicate. | 19 | This is how packets look when the device uses the rs232 to communicate. |
20 | 2B OP LEN DATA CS | 20 | 2B OP LEN DATA CS |
21 | CS is the checksum. It is equal to the exclusive or of all bytes. | 21 | CS is the checksum. It is equal to the exclusive or of all bytes. |
22 | 22 | ||
23 | When using USB: | 23 | When using USB: |
24 | OP DATA | 24 | OP DATA |
25 | The 2B, LEN and CS fields have disappeared, probably because USB handles frames and | 25 | The 2B, LEN and CS fields have disappeared, probably because USB handles frames and |
26 | data corruption is handled or unsignificant. | 26 | data corruption is handled or unsignificant. |
27 | 27 | ||
28 | First, I describe effects that are sent by the device to the computer | 28 | First, I describe effects that are sent by the device to the computer |
29 | 29 | ||
30 | ** Device input state | 30 | ** Device input state |
31 | This packet is used to indicate the state of each button and the value of each | 31 | This packet is used to indicate the state of each button and the value of each |
32 | axis | 32 | axis |
33 | OP= 01 for a joystick, 03 for a wheel | 33 | OP= 01 for a joystick, 03 for a wheel |
34 | LEN= Varies from device to device | 34 | LEN= Varies from device to device |
35 | 00 X-Axis lsb | 35 | 00 X-Axis lsb |
36 | 01 X-Axis msb | 36 | 01 X-Axis msb |
37 | 02 Y-Axis lsb, or gas pedal for a wheel | 37 | 02 Y-Axis lsb, or gas pedal for a wheel |
38 | 03 Y-Axis msb, or brake pedal for a wheel | 38 | 03 Y-Axis msb, or brake pedal for a wheel |
39 | 04 Throttle | 39 | 04 Throttle |
40 | 05 Buttons | 40 | 05 Buttons |
41 | 06 Lower 4 bits: Buttons | 41 | 06 Lower 4 bits: Buttons |
42 | Upper 4 bits: Hat | 42 | Upper 4 bits: Hat |
43 | 07 Rudder | 43 | 07 Rudder |
44 | 44 | ||
45 | ** Device effects states | 45 | ** Device effects states |
46 | OP= 02 | 46 | OP= 02 |
47 | LEN= Varies | 47 | LEN= Varies |
48 | 00 ? Bit 1 (Value 2) is the value of the deadman switch | 48 | 00 ? Bit 1 (Value 2) is the value of the deadman switch |
49 | 01 Bit 8 is set if the effect is playing. Bits 0 to 7 are the effect id. | 49 | 01 Bit 8 is set if the effect is playing. Bits 0 to 7 are the effect id. |
50 | 02 ?? | 50 | 02 ?? |
51 | 03 Address of parameter block changed (lsb) | 51 | 03 Address of parameter block changed (lsb) |
52 | 04 Address of parameter block changed (msb) | 52 | 04 Address of parameter block changed (msb) |
53 | 05 Address of second parameter block changed (lsb) | 53 | 05 Address of second parameter block changed (lsb) |
54 | ... depending on the number of parameter blocks updated | 54 | ... depending on the number of parameter blocks updated |
55 | 55 | ||
56 | ** Force effect ** | 56 | ** Force effect ** |
57 | OP= 01 | 57 | OP= 01 |
58 | LEN= 0e | 58 | LEN= 0e |
59 | 00 Channel (when playing several effects at the same time, each must be assigned a channel) | 59 | 00 Channel (when playing several effects at the same time, each must be assigned a channel) |
60 | 01 Wave form | 60 | 01 Wave form |
61 | Val 00 Constant | 61 | Val 00 Constant |
62 | Val 20 Square | 62 | Val 20 Square |
63 | Val 21 Triangle | 63 | Val 21 Triangle |
64 | Val 22 Sine | 64 | Val 22 Sine |
65 | Val 23 Sawtooth up | 65 | Val 23 Sawtooth up |
66 | Val 24 Sawtooth down | 66 | Val 24 Sawtooth down |
67 | Val 40 Spring (Force = f(pos)) | 67 | Val 40 Spring (Force = f(pos)) |
68 | Val 41 Friction (Force = f(velocity)) and Inertia (Force = f(acceleration)) | 68 | Val 41 Friction (Force = f(velocity)) and Inertia (Force = f(acceleration)) |
69 | 69 | ||
70 | 70 | ||
71 | 02 Axes affected and trigger | 71 | 02 Axes affected and trigger |
72 | Bits 4-7: Val 2 = effect along one axis. Byte 05 indicates direction | 72 | Bits 4-7: Val 2 = effect along one axis. Byte 05 indicates direction |
73 | Val 4 = X axis only. Byte 05 must contain 5a | 73 | Val 4 = X axis only. Byte 05 must contain 5a |
74 | Val 8 = Y axis only. Byte 05 must contain b4 | 74 | Val 8 = Y axis only. Byte 05 must contain b4 |
75 | Val c = X and Y axes. Bytes 05 must contain 60 | 75 | Val c = X and Y axes. Bytes 05 must contain 60 |
76 | Bits 0-3: Val 0 = No trigger | 76 | Bits 0-3: Val 0 = No trigger |
77 | Val x+1 = Button x triggers the effect | 77 | Val x+1 = Button x triggers the effect |
78 | When the whole byte is 0, cancel the previously set trigger | 78 | When the whole byte is 0, cancel the previously set trigger |
79 | 79 | ||
80 | 03-04 Duration of effect (little endian encoding, in ms) | 80 | 03-04 Duration of effect (little endian encoding, in ms) |
81 | 81 | ||
82 | 05 Direction of effect, if applicable. Else, see 02 for value to assign. | 82 | 05 Direction of effect, if applicable. Else, see 02 for value to assign. |
83 | 83 | ||
84 | 06-07 Minimum time between triggering. | 84 | 06-07 Minimum time between triggering. |
85 | 85 | ||
86 | 08-09 Address of periodicity or magnitude parameters | 86 | 08-09 Address of periodicity or magnitude parameters |
87 | 0a-0b Address of attack and fade parameters, or ffff if none. | 87 | 0a-0b Address of attack and fade parameters, or ffff if none. |
88 | *or* | 88 | *or* |
89 | 08-09 Address of interactive parameters for X-axis, or ffff if not applicable | 89 | 08-09 Address of interactive parameters for X-axis, or ffff if not applicable |
90 | 0a-0b Address of interactive parameters for Y-axis, or ffff if not applicable | 90 | 0a-0b Address of interactive parameters for Y-axis, or ffff if not applicable |
91 | 91 | ||
92 | 0c-0d Delay before execution of effect (little endian encoding, in ms) | 92 | 0c-0d Delay before execution of effect (little endian encoding, in ms) |
93 | 93 | ||
94 | 94 | ||
95 | ** Time based parameters ** | 95 | ** Time based parameters ** |
96 | 96 | ||
97 | *** Attack and fade *** | 97 | *** Attack and fade *** |
98 | OP= 02 | 98 | OP= 02 |
99 | LEN= 08 | 99 | LEN= 08 |
100 | 00-01 Address where to store the parameteres | 100 | 00-01 Address where to store the parameteres |
101 | 02-03 Duration of attack (little endian encoding, in ms) | 101 | 02-03 Duration of attack (little endian encoding, in ms) |
102 | 04 Level at end of attack. Signed byte. | 102 | 04 Level at end of attack. Signed byte. |
103 | 05-06 Duration of fade. | 103 | 05-06 Duration of fade. |
104 | 07 Level at end of fade. | 104 | 07 Level at end of fade. |
105 | 105 | ||
106 | *** Magnitude *** | 106 | *** Magnitude *** |
107 | OP= 03 | 107 | OP= 03 |
108 | LEN= 03 | 108 | LEN= 03 |
109 | 00-01 Address | 109 | 00-01 Address |
110 | 02 Level. Signed byte. | 110 | 02 Level. Signed byte. |
111 | 111 | ||
112 | *** Periodicity *** | 112 | *** Periodicity *** |
113 | OP= 04 | 113 | OP= 04 |
114 | LEN= 07 | 114 | LEN= 07 |
115 | 00-01 Address | 115 | 00-01 Address |
116 | 02 Magnitude. Signed byte. | 116 | 02 Magnitude. Signed byte. |
117 | 03 Offset. Signed byte. | 117 | 03 Offset. Signed byte. |
118 | 04 Phase. Val 00 = 0 deg, Val 40 = 90 degs. | 118 | 04 Phase. Val 00 = 0 deg, Val 40 = 90 degs. |
119 | 05-06 Period (little endian encoding, in ms) | 119 | 05-06 Period (little endian encoding, in ms) |
120 | 120 | ||
121 | ** Interactive parameters ** | 121 | ** Interactive parameters ** |
122 | OP= 05 | 122 | OP= 05 |
123 | LEN= 0a | 123 | LEN= 0a |
124 | 00-01 Address | 124 | 00-01 Address |
125 | 02 Positive Coeff | 125 | 02 Positive Coeff |
126 | 03 Negative Coeff | 126 | 03 Negative Coeff |
127 | 04+05 Offset (center) | 127 | 04+05 Offset (center) |
128 | 06+07 Dead band (Val 01F4 = 5000 (decimal)) | 128 | 06+07 Dead band (Val 01F4 = 5000 (decimal)) |
129 | 08 Positive saturation (Val 0a = 1000 (decimal) Val 64 = 10000 (decimal)) | 129 | 08 Positive saturation (Val 0a = 1000 (decimal) Val 64 = 10000 (decimal)) |
130 | 09 Negative saturation | 130 | 09 Negative saturation |
131 | 131 | ||
132 | The encoding is a bit funny here: For coeffs, these are signed values. The | 132 | The encoding is a bit funny here: For coeffs, these are signed values. The |
133 | maximum value is 64 (100 decimal), the min is 9c. | 133 | maximum value is 64 (100 decimal), the min is 9c. |
134 | For the offset, the minimum value is FE0C, the maximum value is 01F4. | 134 | For the offset, the minimum value is FE0C, the maximum value is 01F4. |
135 | For the deadband, the minimum value is 0, the max is 03E8. | 135 | For the deadband, the minimum value is 0, the max is 03E8. |
136 | 136 | ||
137 | ** Controls ** | 137 | ** Controls ** |
138 | OP= 41 | 138 | OP= 41 |
139 | LEN= 03 | 139 | LEN= 03 |
140 | 00 Channel | 140 | 00 Channel |
141 | 01 Start/Stop | 141 | 01 Start/Stop |
142 | Val 00: Stop | 142 | Val 00: Stop |
143 | Val 01: Start and play once. | 143 | Val 01: Start and play once. |
144 | Val 41: Start and play n times (See byte 02 below) | 144 | Val 41: Start and play n times (See byte 02 below) |
145 | 02 Number of iterations n. | 145 | 02 Number of iterations n. |
146 | 146 | ||
147 | ** Init ** | 147 | ** Init ** |
148 | 148 | ||
149 | *** Querying features *** | 149 | *** Querying features *** |
150 | OP= ff | 150 | OP= ff |
151 | Query command. Length varies according to the query type. | 151 | Query command. Length varies according to the query type. |
152 | The general format of this packet is: | 152 | The general format of this packet is: |
153 | ff 01 QUERY [INDEX] CHECKSUM | 153 | ff 01 QUERY [INDEX] CHECKSUM |
154 | reponses are of the same form: | 154 | reponses are of the same form: |
155 | FF LEN QUERY VALUE_QUERIED CHECKSUM2 | 155 | FF LEN QUERY VALUE_QUERIED CHECKSUM2 |
156 | where LEN = 1 + length(VALUE_QUERIED) | 156 | where LEN = 1 + length(VALUE_QUERIED) |
157 | 157 | ||
158 | **** Query ram size **** | 158 | **** Query ram size **** |
159 | QUERY = 42 ('B'uffer size) | 159 | QUERY = 42 ('B'uffer size) |
160 | The device should reply with the same packet plus two additionnal bytes | 160 | The device should reply with the same packet plus two additionnal bytes |
161 | containing the size of the memory: | 161 | containing the size of the memory: |
162 | ff 03 42 03 e8 CS would mean that the device has 1000 bytes of ram available. | 162 | ff 03 42 03 e8 CS would mean that the device has 1000 bytes of ram available. |
163 | 163 | ||
164 | **** Query number of effects **** | 164 | **** Query number of effects **** |
165 | QUERY = 4e ('N'umber of effects) | 165 | QUERY = 4e ('N'umber of effects) |
166 | The device should respond by sending the number of effects that can be played | 166 | The device should respond by sending the number of effects that can be played |
167 | at the same time (one byte) | 167 | at the same time (one byte) |
168 | ff 02 4e 14 CS would stand for 20 effects. | 168 | ff 02 4e 14 CS would stand for 20 effects. |
169 | 169 | ||
170 | **** Vendor's id **** | 170 | **** Vendor's id **** |
171 | QUERY = 4d ('M'anufacturer) | 171 | QUERY = 4d ('M'anufacturer) |
172 | Query the vendors'id (2 bytes) | 172 | Query the vendors'id (2 bytes) |
173 | 173 | ||
174 | **** Product id ***** | 174 | **** Product id ***** |
175 | QUERY = 50 ('P'roduct) | 175 | QUERY = 50 ('P'roduct) |
176 | Query the product id (2 bytes) | 176 | Query the product id (2 bytes) |
177 | 177 | ||
178 | **** Open device **** | 178 | **** Open device **** |
179 | QUERY = 4f ('O'pen) | 179 | QUERY = 4f ('O'pen) |
180 | No data returned. | 180 | No data returned. |
181 | 181 | ||
182 | **** Close device ***** | 182 | **** Close device ***** |
183 | QUERY = 43 ('C')lose | 183 | QUERY = 43 ('C')lose |
184 | No data returned. | 184 | No data returned. |
185 | 185 | ||
186 | **** Query effect **** | 186 | **** Query effect **** |
187 | QUERY = 45 ('E') | 187 | QUERY = 45 ('E') |
188 | Send effect type. | 188 | Send effect type. |
189 | Returns nonzero if supported (2 bytes) | 189 | Returns nonzero if supported (2 bytes) |
190 | 190 | ||
191 | **** Firmware Version **** | 191 | **** Firmware Version **** |
192 | QUERY = 56 ('V'ersion) | 192 | QUERY = 56 ('V'ersion) |
193 | Sends back 3 bytes - major, minor, subminor | 193 | Sends back 3 bytes - major, minor, subminor |
194 | 194 | ||
195 | *** Initialisation of the device *** | 195 | *** Initialisation of the device *** |
196 | 196 | ||
197 | **** Set Control **** | 197 | **** Set Control **** |
198 | !!! Device dependent, can be different on different models !!! | 198 | !!! Device dependent, can be different on different models !!! |
199 | OP= 40 <idx> <val> [<val>] | 199 | OP= 40 <idx> <val> [<val>] |
200 | LEN= 2 or 3 | 200 | LEN= 2 or 3 |
201 | 00 Idx | 201 | 00 Idx |
202 | Idx 00 Set dead zone (0..2048) | 202 | Idx 00 Set dead zone (0..2048) |
203 | Idx 01 Ignore Deadman sensor (0..1) | 203 | Idx 01 Ignore Deadman sensor (0..1) |
204 | Idx 02 Enable comm watchdog (0..1) | 204 | Idx 02 Enable comm watchdog (0..1) |
205 | Idx 03 Set the strength of the spring (0..100) | 205 | Idx 03 Set the strength of the spring (0..100) |
206 | Idx 04 Enable or disable the spring (0/1) | 206 | Idx 04 Enable or disable the spring (0/1) |
207 | Idx 05 Set axis saturation threshold (0..2048) | 207 | Idx 05 Set axis saturation threshold (0..2048) |
208 | 208 | ||
209 | **** Set Effect State **** | 209 | **** Set Effect State **** |
210 | OP= 42 <val> | 210 | OP= 42 <val> |
211 | LEN= 1 | 211 | LEN= 1 |
212 | 00 State | 212 | 00 State |
213 | Bit 3 Pause force feedback | 213 | Bit 3 Pause force feedback |
214 | Bit 2 Enable force feedback | 214 | Bit 2 Enable force feedback |
215 | Bit 0 Stop all effects | 215 | Bit 0 Stop all effects |
216 | 216 | ||
217 | **** Set overall gain **** | 217 | **** Set overall gain **** |
218 | OP= 43 <val> | 218 | OP= 43 <val> |
219 | LEN= 1 | 219 | LEN= 1 |
220 | 00 Gain | 220 | 00 Gain |
221 | Val 00 = 0% | 221 | Val 00 = 0% |
222 | Val 40 = 50% | 222 | Val 40 = 50% |
223 | Val 80 = 100% | 223 | Val 80 = 100% |
224 | 224 | ||
225 | ** Parameter memory ** | 225 | ** Parameter memory ** |
226 | 226 | ||
227 | Each device has a certain amount of memory to store parameters of effects. | 227 | Each device has a certain amount of memory to store parameters of effects. |
228 | The amount of RAM may vary, I encountered values from 200 to 1000 bytes. Below | 228 | The amount of RAM may vary, I encountered values from 200 to 1000 bytes. Below |
229 | is the amount of memory apparently needed for every set of parameters: | 229 | is the amount of memory apparently needed for every set of parameters: |
230 | - period : 0c | 230 | - period : 0c |
231 | - magnitude : 02 | 231 | - magnitude : 02 |
232 | - attack and fade : 0e | 232 | - attack and fade : 0e |
233 | - interactive : 08 | 233 | - interactive : 08 |
234 | 234 | ||
235 | ** Appendix: How to study the protocol ? ** | 235 | ** Appendix: How to study the protocol ? ** |
236 | 236 | ||
237 | 1. Generate effects using the force editor provided with the DirectX SDK, or use Immersion Studio (freely available at their web site in the developer section: www.immersion.com) | 237 | 1. Generate effects using the force editor provided with the DirectX SDK, or use Immersion Studio (freely available at their web site in the developer section: www.immersion.com) |
238 | 2. Start a soft spying RS232 or USB (depending on where you connected your joystick/wheel). I used ComPortSpy from fCoder (alpha version!) | 238 | 2. Start a soft spying RS232 or USB (depending on where you connected your joystick/wheel). I used ComPortSpy from fCoder (alpha version!) |
239 | 3. Play the effect, and watch what happens on the spy screen. | 239 | 3. Play the effect, and watch what happens on the spy screen. |
240 | 240 | ||
241 | A few words about ComPortSpy: | 241 | A few words about ComPortSpy: |
242 | At first glance, this soft seems, hum, well... buggy. In fact, data appear with a few seconds latency. Personnaly, I restart it every time I play an effect. | 242 | At first glance, this soft seems, hum, well... buggy. In fact, data appear with a few seconds latency. Personnaly, I restart it every time I play an effect. |
243 | Remember it's free (as in free beer) and alpha! | 243 | Remember it's free (as in free beer) and alpha! |
244 | 244 | ||
245 | ** URLS ** | 245 | ** URLS ** |
246 | Check www.immerse.com for Immersion Studio, and www.fcoder.com for ComPortSpy. | 246 | Check www.immerse.com for Immersion Studio, and www.fcoder.com for ComPortSpy. |
247 | 247 | ||
248 | ** Author of this document ** | 248 | ** Author of this document ** |
249 | Johann Deneux <deneux@ifrance.com> | 249 | Johann Deneux <deneux@ifrance.com> |
250 | Home page at http://www.esil.univ-mrs.fr/~jdeneux/projects/ff/ | 250 | Home page at http://www.esil.univ-mrs.fr/~jdeneux/projects/ff/ |
251 | 251 | ||
252 | Additions by Vojtech Pavlik. | 252 | Additions by Vojtech Pavlik. |
253 | 253 | ||
254 | I-Force is trademark of Immersion Corp. | 254 | I-Force is trademark of Immersion Corp. |
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c index f7918401a007..73c5f1f3d5d2 100644 --- a/Documentation/lguest/lguest.c +++ b/Documentation/lguest/lguest.c | |||
@@ -882,7 +882,7 @@ static u32 handle_block_output(int fd, const struct iovec *iov, | |||
882 | * of the block file (possibly extending it). */ | 882 | * of the block file (possibly extending it). */ |
883 | if (off + len > device_len) { | 883 | if (off + len > device_len) { |
884 | /* Trim it back to the correct length */ | 884 | /* Trim it back to the correct length */ |
885 | ftruncate(dev->fd, device_len); | 885 | ftruncate64(dev->fd, device_len); |
886 | /* Die, bad Guest, die. */ | 886 | /* Die, bad Guest, die. */ |
887 | errx(1, "Write past end %llu+%u", off, len); | 887 | errx(1, "Write past end %llu+%u", off, len); |
888 | } | 888 | } |
diff --git a/MAINTAINERS b/MAINTAINERS index 06259b494e6f..2ef086214135 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2624,8 +2624,8 @@ P: Harald Welte | |||
2624 | P: Jozsef Kadlecsik | 2624 | P: Jozsef Kadlecsik |
2625 | P: Patrick McHardy | 2625 | P: Patrick McHardy |
2626 | M: kaber@trash.net | 2626 | M: kaber@trash.net |
2627 | L: netfilter-devel@lists.netfilter.org | 2627 | L: netfilter-devel@vger.kernel.org |
2628 | L: netfilter@lists.netfilter.org (subscribers-only) | 2628 | L: netfilter@vger.kernel.org |
2629 | L: coreteam@netfilter.org | 2629 | L: coreteam@netfilter.org |
2630 | W: http://www.netfilter.org/ | 2630 | W: http://www.netfilter.org/ |
2631 | W: http://www.iptables.org/ | 2631 | W: http://www.iptables.org/ |
@@ -2678,7 +2678,7 @@ M: jmorris@namei.org | |||
2678 | P: Hideaki YOSHIFUJI | 2678 | P: Hideaki YOSHIFUJI |
2679 | M: yoshfuji@linux-ipv6.org | 2679 | M: yoshfuji@linux-ipv6.org |
2680 | P: Patrick McHardy | 2680 | P: Patrick McHardy |
2681 | M: kaber@coreworks.de | 2681 | M: kaber@trash.net |
2682 | L: netdev@vger.kernel.org | 2682 | L: netdev@vger.kernel.org |
2683 | T: git kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6.git | 2683 | T: git kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6.git |
2684 | S: Maintained | 2684 | S: Maintained |
@@ -1,8 +1,8 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 23 | 3 | SUBLEVEL = 23 |
4 | EXTRAVERSION =-rc6 | 4 | EXTRAVERSION =-rc9 |
5 | NAME = Pink Farting Weasel | 5 | NAME = Arr Matey! A Hairy Bilge Rat! |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
8 | # To see a list of typical targets execute "make help" | 8 | # To see a list of typical targets execute "make help" |
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 240c448ec31c..a2dd930d11ef 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c | |||
@@ -338,7 +338,7 @@ pbus_assign_bus_resources(struct pci_bus *bus, struct pci_sys_data *root) | |||
338 | * pcibios_fixup_bus - Called after each bus is probed, | 338 | * pcibios_fixup_bus - Called after each bus is probed, |
339 | * but before its children are examined. | 339 | * but before its children are examined. |
340 | */ | 340 | */ |
341 | void __devinit pcibios_fixup_bus(struct pci_bus *bus) | 341 | void pcibios_fixup_bus(struct pci_bus *bus) |
342 | { | 342 | { |
343 | struct pci_sys_data *root = bus->sysdata; | 343 | struct pci_sys_data *root = bus->sysdata; |
344 | struct pci_dev *dev; | 344 | struct pci_dev *dev; |
@@ -419,7 +419,7 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus) | |||
419 | /* | 419 | /* |
420 | * Convert from Linux-centric to bus-centric addresses for bridge devices. | 420 | * Convert from Linux-centric to bus-centric addresses for bridge devices. |
421 | */ | 421 | */ |
422 | void __devinit | 422 | void |
423 | pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, | 423 | pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, |
424 | struct resource *res) | 424 | struct resource *res) |
425 | { | 425 | { |
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index 851cc7158ca3..70b2c7801110 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c | |||
@@ -336,7 +336,7 @@ static int ep93xx_gpio_irq_type(unsigned int irq, unsigned int type) | |||
336 | if (line >= 0 && line < 16) { | 336 | if (line >= 0 && line < 16) { |
337 | gpio_line_config(line, GPIO_IN); | 337 | gpio_line_config(line, GPIO_IN); |
338 | } else { | 338 | } else { |
339 | gpio_line_config(EP93XX_GPIO_LINE_F(line), GPIO_IN); | 339 | gpio_line_config(EP93XX_GPIO_LINE_F(line-16), GPIO_IN); |
340 | } | 340 | } |
341 | 341 | ||
342 | port = line >> 3; | 342 | port = line >> 3; |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index b4e9b734e0bd..76b800a95191 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -57,7 +57,17 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) | |||
57 | { | 57 | { |
58 | unsigned long addr; | 58 | unsigned long addr; |
59 | 59 | ||
60 | start &= ~(CACHE_LINE_SIZE - 1); | 60 | if (start & (CACHE_LINE_SIZE - 1)) { |
61 | start &= ~(CACHE_LINE_SIZE - 1); | ||
62 | sync_writel(start, L2X0_CLEAN_INV_LINE_PA, 1); | ||
63 | start += CACHE_LINE_SIZE; | ||
64 | } | ||
65 | |||
66 | if (end & (CACHE_LINE_SIZE - 1)) { | ||
67 | end &= ~(CACHE_LINE_SIZE - 1); | ||
68 | sync_writel(end, L2X0_CLEAN_INV_LINE_PA, 1); | ||
69 | } | ||
70 | |||
61 | for (addr = start; addr < end; addr += CACHE_LINE_SIZE) | 71 | for (addr = start; addr < end; addr += CACHE_LINE_SIZE) |
62 | sync_writel(addr, L2X0_INV_LINE_PA, 1); | 72 | sync_writel(addr, L2X0_INV_LINE_PA, 1); |
63 | cache_sync(); | 73 | cache_sync(); |
diff --git a/arch/i386/boot/header.S b/arch/i386/boot/header.S index 7f4a2c53bd76..f3140e596d40 100644 --- a/arch/i386/boot/header.S +++ b/arch/i386/boot/header.S | |||
@@ -275,7 +275,7 @@ die: | |||
275 | hlt | 275 | hlt |
276 | jmp die | 276 | jmp die |
277 | 277 | ||
278 | .size die, .-due | 278 | .size die, .-die |
279 | 279 | ||
280 | .section ".initdata", "a" | 280 | .section ".initdata", "a" |
281 | setup_corrupt: | 281 | setup_corrupt: |
diff --git a/arch/i386/boot/memory.c b/arch/i386/boot/memory.c index 1a2e62db8bed..378353956b5d 100644 --- a/arch/i386/boot/memory.c +++ b/arch/i386/boot/memory.c | |||
@@ -20,6 +20,7 @@ | |||
20 | 20 | ||
21 | static int detect_memory_e820(void) | 21 | static int detect_memory_e820(void) |
22 | { | 22 | { |
23 | int count = 0; | ||
23 | u32 next = 0; | 24 | u32 next = 0; |
24 | u32 size, id; | 25 | u32 size, id; |
25 | u8 err; | 26 | u8 err; |
@@ -27,20 +28,33 @@ static int detect_memory_e820(void) | |||
27 | 28 | ||
28 | do { | 29 | do { |
29 | size = sizeof(struct e820entry); | 30 | size = sizeof(struct e820entry); |
30 | id = SMAP; | 31 | |
32 | /* Important: %edx is clobbered by some BIOSes, | ||
33 | so it must be either used for the error output | ||
34 | or explicitly marked clobbered. */ | ||
31 | asm("int $0x15; setc %0" | 35 | asm("int $0x15; setc %0" |
32 | : "=am" (err), "+b" (next), "+d" (id), "+c" (size), | 36 | : "=d" (err), "+b" (next), "=a" (id), "+c" (size), |
33 | "=m" (*desc) | 37 | "=m" (*desc) |
34 | : "D" (desc), "a" (0xe820)); | 38 | : "D" (desc), "d" (SMAP), "a" (0xe820)); |
39 | |||
40 | /* Some BIOSes stop returning SMAP in the middle of | ||
41 | the search loop. We don't know exactly how the BIOS | ||
42 | screwed up the map at that point, we might have a | ||
43 | partial map, the full map, or complete garbage, so | ||
44 | just return failure. */ | ||
45 | if (id != SMAP) { | ||
46 | count = 0; | ||
47 | break; | ||
48 | } | ||
35 | 49 | ||
36 | if (err || id != SMAP) | 50 | if (err) |
37 | break; | 51 | break; |
38 | 52 | ||
39 | boot_params.e820_entries++; | 53 | count++; |
40 | desc++; | 54 | desc++; |
41 | } while (next && boot_params.e820_entries < E820MAX); | 55 | } while (next && count < E820MAX); |
42 | 56 | ||
43 | return boot_params.e820_entries; | 57 | return boot_params.e820_entries = count; |
44 | } | 58 | } |
45 | 59 | ||
46 | static int detect_memory_e801(void) | 60 | static int detect_memory_e801(void) |
@@ -89,11 +103,16 @@ static int detect_memory_88(void) | |||
89 | 103 | ||
90 | int detect_memory(void) | 104 | int detect_memory(void) |
91 | { | 105 | { |
106 | int err = -1; | ||
107 | |||
92 | if (detect_memory_e820() > 0) | 108 | if (detect_memory_e820() > 0) |
93 | return 0; | 109 | err = 0; |
94 | 110 | ||
95 | if (!detect_memory_e801()) | 111 | if (!detect_memory_e801()) |
96 | return 0; | 112 | err = 0; |
113 | |||
114 | if (!detect_memory_88()) | ||
115 | err = 0; | ||
97 | 116 | ||
98 | return detect_memory_88(); | 117 | return err; |
99 | } | 118 | } |
diff --git a/arch/i386/boot/video.c b/arch/i386/boot/video.c index 693f20d3102e..e4ba897bf9a3 100644 --- a/arch/i386/boot/video.c +++ b/arch/i386/boot/video.c | |||
@@ -147,7 +147,7 @@ int mode_defined(u16 mode) | |||
147 | } | 147 | } |
148 | 148 | ||
149 | /* Set mode (without recalc) */ | 149 | /* Set mode (without recalc) */ |
150 | static int raw_set_mode(u16 mode) | 150 | static int raw_set_mode(u16 mode, u16 *real_mode) |
151 | { | 151 | { |
152 | int nmode, i; | 152 | int nmode, i; |
153 | struct card_info *card; | 153 | struct card_info *card; |
@@ -165,8 +165,10 @@ static int raw_set_mode(u16 mode) | |||
165 | 165 | ||
166 | if ((mode == nmode && visible) || | 166 | if ((mode == nmode && visible) || |
167 | mode == mi->mode || | 167 | mode == mi->mode || |
168 | mode == (mi->y << 8)+mi->x) | 168 | mode == (mi->y << 8)+mi->x) { |
169 | *real_mode = mi->mode; | ||
169 | return card->set_mode(mi); | 170 | return card->set_mode(mi); |
171 | } | ||
170 | 172 | ||
171 | if (visible) | 173 | if (visible) |
172 | nmode++; | 174 | nmode++; |
@@ -178,7 +180,7 @@ static int raw_set_mode(u16 mode) | |||
178 | if (mode >= card->xmode_first && | 180 | if (mode >= card->xmode_first && |
179 | mode < card->xmode_first+card->xmode_n) { | 181 | mode < card->xmode_first+card->xmode_n) { |
180 | struct mode_info mix; | 182 | struct mode_info mix; |
181 | mix.mode = mode; | 183 | *real_mode = mix.mode = mode; |
182 | mix.x = mix.y = 0; | 184 | mix.x = mix.y = 0; |
183 | return card->set_mode(&mix); | 185 | return card->set_mode(&mix); |
184 | } | 186 | } |
@@ -223,6 +225,7 @@ static void vga_recalc_vertical(void) | |||
223 | static int set_mode(u16 mode) | 225 | static int set_mode(u16 mode) |
224 | { | 226 | { |
225 | int rv; | 227 | int rv; |
228 | u16 real_mode; | ||
226 | 229 | ||
227 | /* Very special mode numbers... */ | 230 | /* Very special mode numbers... */ |
228 | if (mode == VIDEO_CURRENT_MODE) | 231 | if (mode == VIDEO_CURRENT_MODE) |
@@ -232,13 +235,16 @@ static int set_mode(u16 mode) | |||
232 | else if (mode == EXTENDED_VGA) | 235 | else if (mode == EXTENDED_VGA) |
233 | mode = VIDEO_8POINT; | 236 | mode = VIDEO_8POINT; |
234 | 237 | ||
235 | rv = raw_set_mode(mode); | 238 | rv = raw_set_mode(mode, &real_mode); |
236 | if (rv) | 239 | if (rv) |
237 | return rv; | 240 | return rv; |
238 | 241 | ||
239 | if (mode & VIDEO_RECALC) | 242 | if (mode & VIDEO_RECALC) |
240 | vga_recalc_vertical(); | 243 | vga_recalc_vertical(); |
241 | 244 | ||
245 | /* Save the canonical mode number for the kernel, not | ||
246 | an alias, size specification or menu position */ | ||
247 | boot_params.hdr.vid_mode = real_mode; | ||
242 | return 0; | 248 | return 0; |
243 | } | 249 | } |
244 | 250 | ||
diff --git a/arch/i386/kernel/acpi/wakeup.S b/arch/i386/kernel/acpi/wakeup.S index ed0a0f2c1597..f22ba8534d26 100644 --- a/arch/i386/kernel/acpi/wakeup.S +++ b/arch/i386/kernel/acpi/wakeup.S | |||
@@ -151,51 +151,30 @@ bogus_real_magic: | |||
151 | #define VIDEO_FIRST_V7 0x0900 | 151 | #define VIDEO_FIRST_V7 0x0900 |
152 | 152 | ||
153 | # Setting of user mode (AX=mode ID) => CF=success | 153 | # Setting of user mode (AX=mode ID) => CF=success |
154 | |||
155 | # For now, we only handle VESA modes (0x0200..0x03ff). To handle other | ||
156 | # modes, we should probably compile in the video code from the boot | ||
157 | # directory. | ||
154 | mode_set: | 158 | mode_set: |
155 | movw %ax, %bx | 159 | movw %ax, %bx |
156 | #if 0 | 160 | subb $VIDEO_FIRST_VESA>>8, %bh |
157 | cmpb $0xff, %ah | 161 | cmpb $2, %bh |
158 | jz setalias | 162 | jb check_vesa |
159 | |||
160 | testb $VIDEO_RECALC>>8, %ah | ||
161 | jnz _setrec | ||
162 | |||
163 | cmpb $VIDEO_FIRST_RESOLUTION>>8, %ah | ||
164 | jnc setres | ||
165 | |||
166 | cmpb $VIDEO_FIRST_SPECIAL>>8, %ah | ||
167 | jz setspc | ||
168 | |||
169 | cmpb $VIDEO_FIRST_V7>>8, %ah | ||
170 | jz setv7 | ||
171 | #endif | ||
172 | |||
173 | cmpb $VIDEO_FIRST_VESA>>8, %ah | ||
174 | jnc check_vesa | ||
175 | #if 0 | ||
176 | orb %ah, %ah | ||
177 | jz setmenu | ||
178 | #endif | ||
179 | |||
180 | decb %ah | ||
181 | # jz setbios Add bios modes later | ||
182 | 163 | ||
183 | setbad: clc | 164 | setbad: |
165 | clc | ||
184 | ret | 166 | ret |
185 | 167 | ||
186 | check_vesa: | 168 | check_vesa: |
187 | subb $VIDEO_FIRST_VESA>>8, %bh | ||
188 | orw $0x4000, %bx # Use linear frame buffer | 169 | orw $0x4000, %bx # Use linear frame buffer |
189 | movw $0x4f02, %ax # VESA BIOS mode set call | 170 | movw $0x4f02, %ax # VESA BIOS mode set call |
190 | int $0x10 | 171 | int $0x10 |
191 | cmpw $0x004f, %ax # AL=4f if implemented | 172 | cmpw $0x004f, %ax # AL=4f if implemented |
192 | jnz _setbad # AH=0 if OK | 173 | jnz setbad # AH=0 if OK |
193 | 174 | ||
194 | stc | 175 | stc |
195 | ret | 176 | ret |
196 | 177 | ||
197 | _setbad: jmp setbad | ||
198 | |||
199 | .code32 | 178 | .code32 |
200 | ALIGN | 179 | ALIGN |
201 | 180 | ||
diff --git a/arch/i386/xen/mmu.c b/arch/i386/xen/mmu.c index 4ae038aa6c24..874db0cd1d2a 100644 --- a/arch/i386/xen/mmu.c +++ b/arch/i386/xen/mmu.c | |||
@@ -559,6 +559,9 @@ void xen_exit_mmap(struct mm_struct *mm) | |||
559 | put_cpu(); | 559 | put_cpu(); |
560 | 560 | ||
561 | spin_lock(&mm->page_table_lock); | 561 | spin_lock(&mm->page_table_lock); |
562 | xen_pgd_unpin(mm->pgd); | 562 | |
563 | /* pgd may not be pinned in the error exit path of execve */ | ||
564 | if (PagePinned(virt_to_page(mm->pgd))) | ||
565 | xen_pgd_unpin(mm->pgd); | ||
563 | spin_unlock(&mm->page_table_lock); | 566 | spin_unlock(&mm->page_table_lock); |
564 | } | 567 | } |
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index b6c30800c667..3a2d255361bc 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c | |||
@@ -177,10 +177,7 @@ handle_real_irq: | |||
177 | outb(cached_master_mask, PIC_MASTER_IMR); | 177 | outb(cached_master_mask, PIC_MASTER_IMR); |
178 | outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */ | 178 | outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */ |
179 | } | 179 | } |
180 | #ifdef CONFIG_MIPS_MT_SMTC | 180 | smtc_im_ack_irq(irq); |
181 | if (irq_hwmask[irq] & ST0_IM) | ||
182 | set_c0_status(irq_hwmask[irq] & ST0_IM); | ||
183 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
184 | spin_unlock_irqrestore(&i8259A_lock, flags); | 181 | spin_unlock_irqrestore(&i8259A_lock, flags); |
185 | return; | 182 | return; |
186 | 183 | ||
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index 410868b5ea5f..1ecdd50bfc60 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c | |||
@@ -52,11 +52,8 @@ static void level_mask_and_ack_msc_irq(unsigned int irq) | |||
52 | mask_msc_irq(irq); | 52 | mask_msc_irq(irq); |
53 | if (!cpu_has_veic) | 53 | if (!cpu_has_veic) |
54 | MSCIC_WRITE(MSC01_IC_EOI, 0); | 54 | MSCIC_WRITE(MSC01_IC_EOI, 0); |
55 | #ifdef CONFIG_MIPS_MT_SMTC | ||
56 | /* This actually needs to be a call into platform code */ | 55 | /* This actually needs to be a call into platform code */ |
57 | if (irq_hwmask[irq] & ST0_IM) | 56 | smtc_im_ack_irq(irq); |
58 | set_c0_status(irq_hwmask[irq] & ST0_IM); | ||
59 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
60 | } | 57 | } |
61 | 58 | ||
62 | /* | 59 | /* |
@@ -73,10 +70,7 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq) | |||
73 | MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); | 70 | MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); |
74 | MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); | 71 | MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); |
75 | } | 72 | } |
76 | #ifdef CONFIG_MIPS_MT_SMTC | 73 | smtc_im_ack_irq(irq); |
77 | if (irq_hwmask[irq] & ST0_IM) | ||
78 | set_c0_status(irq_hwmask[irq] & ST0_IM); | ||
79 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
80 | } | 74 | } |
81 | 75 | ||
82 | /* | 76 | /* |
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index aeded6c17de5..a990aad2f049 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c | |||
@@ -74,20 +74,12 @@ EXPORT_SYMBOL_GPL(free_irqno); | |||
74 | */ | 74 | */ |
75 | void ack_bad_irq(unsigned int irq) | 75 | void ack_bad_irq(unsigned int irq) |
76 | { | 76 | { |
77 | smtc_im_ack_irq(irq); | ||
77 | printk("unexpected IRQ # %d\n", irq); | 78 | printk("unexpected IRQ # %d\n", irq); |
78 | } | 79 | } |
79 | 80 | ||
80 | atomic_t irq_err_count; | 81 | atomic_t irq_err_count; |
81 | 82 | ||
82 | #ifdef CONFIG_MIPS_MT_SMTC | ||
83 | /* | ||
84 | * SMTC Kernel needs to manipulate low-level CPU interrupt mask | ||
85 | * in do_IRQ. These are passed in setup_irq_smtc() and stored | ||
86 | * in this table. | ||
87 | */ | ||
88 | unsigned long irq_hwmask[NR_IRQS]; | ||
89 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
90 | |||
91 | /* | 83 | /* |
92 | * Generic, controller-independent functions: | 84 | * Generic, controller-independent functions: |
93 | */ | 85 | */ |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index b3ed731a24c6..dd68afce7da5 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -525,5 +525,5 @@ sys_call_table: | |||
525 | PTR compat_sys_signalfd | 525 | PTR compat_sys_signalfd |
526 | PTR compat_sys_timerfd | 526 | PTR compat_sys_timerfd |
527 | PTR sys_eventfd | 527 | PTR sys_eventfd |
528 | PTR sys_fallocate /* 4320 */ | 528 | PTR sys32_fallocate /* 4320 */ |
529 | .size sys_call_table,.-sys_call_table | 529 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 43826c16101d..f09404377ef1 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -25,8 +25,11 @@ | |||
25 | #include <asm/smtc_proc.h> | 25 | #include <asm/smtc_proc.h> |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. | 28 | * SMTC Kernel needs to manipulate low-level CPU interrupt mask |
29 | * in do_IRQ. These are passed in setup_irq_smtc() and stored | ||
30 | * in this table. | ||
29 | */ | 31 | */ |
32 | unsigned long irq_hwmask[NR_IRQS]; | ||
30 | 33 | ||
31 | #define LOCK_MT_PRA() \ | 34 | #define LOCK_MT_PRA() \ |
32 | local_irq_save(flags); \ | 35 | local_irq_save(flags); \ |
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 60bbaecde187..087ab997487d 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S | |||
@@ -45,6 +45,8 @@ SECTIONS | |||
45 | __dbe_table : { *(__dbe_table) } | 45 | __dbe_table : { *(__dbe_table) } |
46 | __stop___dbe_table = .; | 46 | __stop___dbe_table = .; |
47 | 47 | ||
48 | NOTES | ||
49 | |||
48 | RODATA | 50 | RODATA |
49 | 51 | ||
50 | /* writeable */ | 52 | /* writeable */ |
diff --git a/arch/mips/sgi-ip32/ip32-platform.c b/arch/mips/sgi-ip32/ip32-platform.c index ba3697ee7ff6..7309e48d163d 100644 --- a/arch/mips/sgi-ip32/ip32-platform.c +++ b/arch/mips/sgi-ip32/ip32-platform.c | |||
@@ -41,8 +41,8 @@ static struct platform_device uart8250_device = { | |||
41 | 41 | ||
42 | static int __init uart8250_init(void) | 42 | static int __init uart8250_init(void) |
43 | { | 43 | { |
44 | uart8250_data[0].iobase = (unsigned long) &mace->isa.serial1; | 44 | uart8250_data[0].membase = (void __iomem *) &mace->isa.serial1; |
45 | uart8250_data[1].iobase = (unsigned long) &mace->isa.serial1; | 45 | uart8250_data[1].membase = (void __iomem *) &mace->isa.serial1; |
46 | 46 | ||
47 | return platform_device_register(&uart8250_device); | 47 | return platform_device_register(&uart8250_device); |
48 | } | 48 | } |
diff --git a/arch/mips/sibyte/bcm1480/setup.c b/arch/mips/sibyte/bcm1480/setup.c index bb28f28e8042..7e1aa348b8e0 100644 --- a/arch/mips/sibyte/bcm1480/setup.c +++ b/arch/mips/sibyte/bcm1480/setup.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
17 | */ | 17 | */ |
18 | #include <linux/init.h> | ||
18 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
19 | #include <linux/module.h> | 20 | #include <linux/module.h> |
20 | #include <linux/reboot.h> | 21 | #include <linux/reboot.h> |
@@ -35,6 +36,7 @@ unsigned int soc_type; | |||
35 | EXPORT_SYMBOL(soc_type); | 36 | EXPORT_SYMBOL(soc_type); |
36 | unsigned int periph_rev; | 37 | unsigned int periph_rev; |
37 | unsigned int zbbus_mhz; | 38 | unsigned int zbbus_mhz; |
39 | EXPORT_SYMBOL(zbbus_mhz); | ||
38 | 40 | ||
39 | static unsigned int part_type; | 41 | static unsigned int part_type; |
40 | 42 | ||
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts index f5c3086bcb5d..3bc32029ca5b 100644 --- a/arch/powerpc/boot/dts/mpc8349emitx.dts +++ b/arch/powerpc/boot/dts/mpc8349emitx.dts | |||
@@ -97,6 +97,7 @@ | |||
97 | #size-cells = <0>; | 97 | #size-cells = <0>; |
98 | interrupt-parent = < &ipic >; | 98 | interrupt-parent = < &ipic >; |
99 | interrupts = <26 8>; | 99 | interrupts = <26 8>; |
100 | dr_mode = "peripheral"; | ||
100 | phy_type = "ulpi"; | 101 | phy_type = "ulpi"; |
101 | }; | 102 | }; |
102 | 103 | ||
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 588c0cb8115e..15998b57767c 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -613,6 +613,13 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) | |||
613 | regs->ccr = 0; | 613 | regs->ccr = 0; |
614 | regs->gpr[1] = sp; | 614 | regs->gpr[1] = sp; |
615 | 615 | ||
616 | /* | ||
617 | * We have just cleared all the nonvolatile GPRs, so make | ||
618 | * FULL_REGS(regs) return true. This is necessary to allow | ||
619 | * ptrace to examine the thread immediately after exec. | ||
620 | */ | ||
621 | regs->trap &= ~1UL; | ||
622 | |||
616 | #ifdef CONFIG_PPC32 | 623 | #ifdef CONFIG_PPC32 |
617 | regs->mq = 0; | 624 | regs->mq = 0; |
618 | regs->nip = start; | 625 | regs->nip = start; |
diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c index e7fdf013cd39..eafe7605cdac 100644 --- a/arch/powerpc/platforms/83xx/usb.c +++ b/arch/powerpc/platforms/83xx/usb.c | |||
@@ -76,14 +76,14 @@ int mpc834x_usb_cfg(void) | |||
76 | if (port0_is_dr) | 76 | if (port0_is_dr) |
77 | printk(KERN_WARNING | 77 | printk(KERN_WARNING |
78 | "834x USB port0 can't be used by both DR and MPH!\n"); | 78 | "834x USB port0 can't be used by both DR and MPH!\n"); |
79 | sicrl |= MPC834X_SICRL_USB0; | 79 | sicrl &= ~MPC834X_SICRL_USB0; |
80 | } | 80 | } |
81 | prop = of_get_property(np, "port1", NULL); | 81 | prop = of_get_property(np, "port1", NULL); |
82 | if (prop) { | 82 | if (prop) { |
83 | if (port1_is_dr) | 83 | if (port1_is_dr) |
84 | printk(KERN_WARNING | 84 | printk(KERN_WARNING |
85 | "834x USB port1 can't be used by both DR and MPH!\n"); | 85 | "834x USB port1 can't be used by both DR and MPH!\n"); |
86 | sicrl |= MPC834X_SICRL_USB1; | 86 | sicrl &= ~MPC834X_SICRL_USB1; |
87 | } | 87 | } |
88 | of_node_put(np); | 88 | of_node_put(np); |
89 | } | 89 | } |
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index b93a0275a217..d72b16d6816e 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c | |||
@@ -2110,8 +2110,8 @@ struct tree_descr spufs_dir_contents[] = { | |||
2110 | { "mbox_stat", &spufs_mbox_stat_fops, 0444, }, | 2110 | { "mbox_stat", &spufs_mbox_stat_fops, 0444, }, |
2111 | { "ibox_stat", &spufs_ibox_stat_fops, 0444, }, | 2111 | { "ibox_stat", &spufs_ibox_stat_fops, 0444, }, |
2112 | { "wbox_stat", &spufs_wbox_stat_fops, 0444, }, | 2112 | { "wbox_stat", &spufs_wbox_stat_fops, 0444, }, |
2113 | { "signal1", &spufs_signal1_nosched_fops, 0222, }, | 2113 | { "signal1", &spufs_signal1_fops, 0666, }, |
2114 | { "signal2", &spufs_signal2_nosched_fops, 0222, }, | 2114 | { "signal2", &spufs_signal2_fops, 0666, }, |
2115 | { "signal1_type", &spufs_signal1_type, 0666, }, | 2115 | { "signal1_type", &spufs_signal1_type, 0666, }, |
2116 | { "signal2_type", &spufs_signal2_type, 0666, }, | 2116 | { "signal2_type", &spufs_signal2_type, 0666, }, |
2117 | { "cntl", &spufs_cntl_fops, 0666, }, | 2117 | { "cntl", &spufs_cntl_fops, 0666, }, |
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 5ddb0259b1fb..66e7d68ffeb1 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -419,7 +419,7 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) | |||
419 | * For the moment only implement delivery to all cpus or one cpu. | 419 | * For the moment only implement delivery to all cpus or one cpu. |
420 | * Get current irq_server for the given irq | 420 | * Get current irq_server for the given irq |
421 | */ | 421 | */ |
422 | irq_server = get_irq_server(irq, 1); | 422 | irq_server = get_irq_server(virq, 1); |
423 | if (irq_server == -1) { | 423 | if (irq_server == -1) { |
424 | char cpulist[128]; | 424 | char cpulist[128]; |
425 | cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); | 425 | cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); |
diff --git a/arch/powerpc/sysdev/commproc.c b/arch/powerpc/sysdev/commproc.c index b562afc4e50c..160a8b49bdea 100644 --- a/arch/powerpc/sysdev/commproc.c +++ b/arch/powerpc/sysdev/commproc.c | |||
@@ -387,4 +387,4 @@ uint cpm_dpram_phys(u8* addr) | |||
387 | { | 387 | { |
388 | return (dpram_pbase + (uint)(addr - dpram_vbase)); | 388 | return (dpram_pbase + (uint)(addr - dpram_vbase)); |
389 | } | 389 | } |
390 | EXPORT_SYMBOL(cpm_dpram_addr); | 390 | EXPORT_SYMBOL(cpm_dpram_phys); |
diff --git a/arch/ppc/8xx_io/commproc.c b/arch/ppc/8xx_io/commproc.c index 7088428e1fe2..9da880be4dc0 100644 --- a/arch/ppc/8xx_io/commproc.c +++ b/arch/ppc/8xx_io/commproc.c | |||
@@ -459,7 +459,7 @@ EXPORT_SYMBOL(cpm_dpdump); | |||
459 | 459 | ||
460 | void *cpm_dpram_addr(unsigned long offset) | 460 | void *cpm_dpram_addr(unsigned long offset) |
461 | { | 461 | { |
462 | return ((immap_t *)IMAP_ADDR)->im_cpm.cp_dpmem + offset; | 462 | return (void *)(dpram_vbase + offset); |
463 | } | 463 | } |
464 | EXPORT_SYMBOL(cpm_dpram_addr); | 464 | EXPORT_SYMBOL(cpm_dpram_addr); |
465 | 465 | ||
diff --git a/arch/sparc/kernel/ebus.c b/arch/sparc/kernel/ebus.c index e2d02fd13f35..d850785b2080 100644 --- a/arch/sparc/kernel/ebus.c +++ b/arch/sparc/kernel/ebus.c | |||
@@ -156,6 +156,8 @@ void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *d | |||
156 | dev->prom_node = dp; | 156 | dev->prom_node = dp; |
157 | 157 | ||
158 | regs = of_get_property(dp, "reg", &len); | 158 | regs = of_get_property(dp, "reg", &len); |
159 | if (!regs) | ||
160 | len = 0; | ||
159 | if (len % sizeof(struct linux_prom_registers)) { | 161 | if (len % sizeof(struct linux_prom_registers)) { |
160 | prom_printf("UGH: proplen for %s was %d, need multiple of %d\n", | 162 | prom_printf("UGH: proplen for %s was %d, need multiple of %d\n", |
161 | dev->prom_node->name, len, | 163 | dev->prom_node->name, len, |
diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c index f205fc7cbcd0..d208cc7804f2 100644 --- a/arch/sparc64/kernel/binfmt_aout32.c +++ b/arch/sparc64/kernel/binfmt_aout32.c | |||
@@ -177,7 +177,7 @@ static u32 __user *create_aout32_tables(char __user *p, struct linux_binprm *bpr | |||
177 | get_user(c,p++); | 177 | get_user(c,p++); |
178 | } while (c); | 178 | } while (c); |
179 | } | 179 | } |
180 | put_user(NULL,argv); | 180 | put_user(0,argv); |
181 | current->mm->arg_end = current->mm->env_start = (unsigned long) p; | 181 | current->mm->arg_end = current->mm->env_start = (unsigned long) p; |
182 | while (envc-->0) { | 182 | while (envc-->0) { |
183 | char c; | 183 | char c; |
@@ -186,7 +186,7 @@ static u32 __user *create_aout32_tables(char __user *p, struct linux_binprm *bpr | |||
186 | get_user(c,p++); | 186 | get_user(c,p++); |
187 | } while (c); | 187 | } while (c); |
188 | } | 188 | } |
189 | put_user(NULL,envp); | 189 | put_user(0,envp); |
190 | current->mm->env_end = (unsigned long) p; | 190 | current->mm->env_end = (unsigned long) p; |
191 | return sp; | 191 | return sp; |
192 | } | 192 | } |
diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c index bc9ae36f7a43..04ab81cb4f48 100644 --- a/arch/sparc64/kernel/ebus.c +++ b/arch/sparc64/kernel/ebus.c | |||
@@ -375,7 +375,10 @@ static void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_de | |||
375 | dev->num_addrs = 0; | 375 | dev->num_addrs = 0; |
376 | dev->num_irqs = 0; | 376 | dev->num_irqs = 0; |
377 | } else { | 377 | } else { |
378 | (void) of_get_property(dp, "reg", &len); | 378 | const int *regs = of_get_property(dp, "reg", &len); |
379 | |||
380 | if (!regs) | ||
381 | len = 0; | ||
379 | dev->num_addrs = len / sizeof(struct linux_prom_registers); | 382 | dev->num_addrs = len / sizeof(struct linux_prom_registers); |
380 | 383 | ||
381 | for (i = 0; i < dev->num_addrs; i++) | 384 | for (i = 0; i < dev->num_addrs; i++) |
diff --git a/arch/sparc64/lib/NGcopy_from_user.S b/arch/sparc64/lib/NGcopy_from_user.S index 2d93456f76dd..e7f433f71b42 100644 --- a/arch/sparc64/lib/NGcopy_from_user.S +++ b/arch/sparc64/lib/NGcopy_from_user.S | |||
@@ -1,6 +1,6 @@ | |||
1 | /* NGcopy_from_user.S: Niagara optimized copy from userspace. | 1 | /* NGcopy_from_user.S: Niagara optimized copy from userspace. |
2 | * | 2 | * |
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | 3 | * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #define EX_LD(x) \ | 6 | #define EX_LD(x) \ |
@@ -8,8 +8,8 @@ | |||
8 | .section .fixup; \ | 8 | .section .fixup; \ |
9 | .align 4; \ | 9 | .align 4; \ |
10 | 99: wr %g0, ASI_AIUS, %asi;\ | 10 | 99: wr %g0, ASI_AIUS, %asi;\ |
11 | retl; \ | 11 | ret; \ |
12 | mov 1, %o0; \ | 12 | restore %g0, 1, %o0; \ |
13 | .section __ex_table,"a";\ | 13 | .section __ex_table,"a";\ |
14 | .align 4; \ | 14 | .align 4; \ |
15 | .word 98b, 99b; \ | 15 | .word 98b, 99b; \ |
@@ -24,7 +24,7 @@ | |||
24 | #define LOAD(type,addr,dest) type##a [addr] ASI_AIUS, dest | 24 | #define LOAD(type,addr,dest) type##a [addr] ASI_AIUS, dest |
25 | #define LOAD_TWIN(addr_reg,dest0,dest1) \ | 25 | #define LOAD_TWIN(addr_reg,dest0,dest1) \ |
26 | ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_AIUS, dest0 | 26 | ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_AIUS, dest0 |
27 | #define EX_RETVAL(x) 0 | 27 | #define EX_RETVAL(x) %g0 |
28 | 28 | ||
29 | #ifdef __KERNEL__ | 29 | #ifdef __KERNEL__ |
30 | #define PREAMBLE \ | 30 | #define PREAMBLE \ |
diff --git a/arch/sparc64/lib/NGcopy_to_user.S b/arch/sparc64/lib/NGcopy_to_user.S index 34112d5054ef..6ea01c5532a0 100644 --- a/arch/sparc64/lib/NGcopy_to_user.S +++ b/arch/sparc64/lib/NGcopy_to_user.S | |||
@@ -1,6 +1,6 @@ | |||
1 | /* NGcopy_to_user.S: Niagara optimized copy to userspace. | 1 | /* NGcopy_to_user.S: Niagara optimized copy to userspace. |
2 | * | 2 | * |
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | 3 | * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #define EX_ST(x) \ | 6 | #define EX_ST(x) \ |
@@ -8,8 +8,8 @@ | |||
8 | .section .fixup; \ | 8 | .section .fixup; \ |
9 | .align 4; \ | 9 | .align 4; \ |
10 | 99: wr %g0, ASI_AIUS, %asi;\ | 10 | 99: wr %g0, ASI_AIUS, %asi;\ |
11 | retl; \ | 11 | ret; \ |
12 | mov 1, %o0; \ | 12 | restore %g0, 1, %o0; \ |
13 | .section __ex_table,"a";\ | 13 | .section __ex_table,"a";\ |
14 | .align 4; \ | 14 | .align 4; \ |
15 | .word 98b, 99b; \ | 15 | .word 98b, 99b; \ |
@@ -23,7 +23,7 @@ | |||
23 | #define FUNC_NAME NGcopy_to_user | 23 | #define FUNC_NAME NGcopy_to_user |
24 | #define STORE(type,src,addr) type##a src, [addr] ASI_AIUS | 24 | #define STORE(type,src,addr) type##a src, [addr] ASI_AIUS |
25 | #define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS | 25 | #define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS |
26 | #define EX_RETVAL(x) 0 | 26 | #define EX_RETVAL(x) %g0 |
27 | 27 | ||
28 | #ifdef __KERNEL__ | 28 | #ifdef __KERNEL__ |
29 | /* Writing to %asi is _expensive_ so we hardcode it. | 29 | /* Writing to %asi is _expensive_ so we hardcode it. |
diff --git a/arch/sparc64/lib/NGmemcpy.S b/arch/sparc64/lib/NGmemcpy.S index 66063a9a66b8..605cb3f09900 100644 --- a/arch/sparc64/lib/NGmemcpy.S +++ b/arch/sparc64/lib/NGmemcpy.S | |||
@@ -1,6 +1,6 @@ | |||
1 | /* NGmemcpy.S: Niagara optimized memcpy. | 1 | /* NGmemcpy.S: Niagara optimized memcpy. |
2 | * | 2 | * |
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | 3 | * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #ifdef __KERNEL__ | 6 | #ifdef __KERNEL__ |
@@ -16,6 +16,12 @@ | |||
16 | wr %g0, ASI_PNF, %asi | 16 | wr %g0, ASI_PNF, %asi |
17 | #endif | 17 | #endif |
18 | 18 | ||
19 | #ifdef __sparc_v9__ | ||
20 | #define SAVE_AMOUNT 128 | ||
21 | #else | ||
22 | #define SAVE_AMOUNT 64 | ||
23 | #endif | ||
24 | |||
19 | #ifndef STORE_ASI | 25 | #ifndef STORE_ASI |
20 | #define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P | 26 | #define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P |
21 | #endif | 27 | #endif |
@@ -50,7 +56,11 @@ | |||
50 | #endif | 56 | #endif |
51 | 57 | ||
52 | #ifndef STORE_INIT | 58 | #ifndef STORE_INIT |
59 | #ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA | ||
53 | #define STORE_INIT(src,addr) stxa src, [addr] %asi | 60 | #define STORE_INIT(src,addr) stxa src, [addr] %asi |
61 | #else | ||
62 | #define STORE_INIT(src,addr) stx src, [addr + 0x00] | ||
63 | #endif | ||
54 | #endif | 64 | #endif |
55 | 65 | ||
56 | #ifndef FUNC_NAME | 66 | #ifndef FUNC_NAME |
@@ -73,18 +83,19 @@ | |||
73 | 83 | ||
74 | .globl FUNC_NAME | 84 | .globl FUNC_NAME |
75 | .type FUNC_NAME,#function | 85 | .type FUNC_NAME,#function |
76 | FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | 86 | FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ |
77 | srlx %o2, 31, %g2 | 87 | PREAMBLE |
88 | save %sp, -SAVE_AMOUNT, %sp | ||
89 | srlx %i2, 31, %g2 | ||
78 | cmp %g2, 0 | 90 | cmp %g2, 0 |
79 | tne %xcc, 5 | 91 | tne %xcc, 5 |
80 | PREAMBLE | 92 | mov %i0, %o0 |
81 | mov %o0, GLOBAL_SPARE | 93 | cmp %i2, 0 |
82 | cmp %o2, 0 | ||
83 | be,pn %XCC, 85f | 94 | be,pn %XCC, 85f |
84 | or %o0, %o1, %o3 | 95 | or %o0, %i1, %i3 |
85 | cmp %o2, 16 | 96 | cmp %i2, 16 |
86 | blu,a,pn %XCC, 80f | 97 | blu,a,pn %XCC, 80f |
87 | or %o3, %o2, %o3 | 98 | or %i3, %i2, %i3 |
88 | 99 | ||
89 | /* 2 blocks (128 bytes) is the minimum we can do the block | 100 | /* 2 blocks (128 bytes) is the minimum we can do the block |
90 | * copy with. We need to ensure that we'll iterate at least | 101 | * copy with. We need to ensure that we'll iterate at least |
@@ -93,31 +104,31 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
93 | * to (64 - 1) bytes from the length before we perform the | 104 | * to (64 - 1) bytes from the length before we perform the |
94 | * block copy loop. | 105 | * block copy loop. |
95 | */ | 106 | */ |
96 | cmp %o2, (2 * 64) | 107 | cmp %i2, (2 * 64) |
97 | blu,pt %XCC, 70f | 108 | blu,pt %XCC, 70f |
98 | andcc %o3, 0x7, %g0 | 109 | andcc %i3, 0x7, %g0 |
99 | 110 | ||
100 | /* %o0: dst | 111 | /* %o0: dst |
101 | * %o1: src | 112 | * %i1: src |
102 | * %o2: len (known to be >= 128) | 113 | * %i2: len (known to be >= 128) |
103 | * | 114 | * |
104 | * The block copy loops will use %o4/%o5,%g2/%g3 as | 115 | * The block copy loops will use %i4/%i5,%g2/%g3 as |
105 | * temporaries while copying the data. | 116 | * temporaries while copying the data. |
106 | */ | 117 | */ |
107 | 118 | ||
108 | LOAD(prefetch, %o1, #one_read) | 119 | LOAD(prefetch, %i1, #one_read) |
109 | wr %g0, STORE_ASI, %asi | 120 | wr %g0, STORE_ASI, %asi |
110 | 121 | ||
111 | /* Align destination on 64-byte boundary. */ | 122 | /* Align destination on 64-byte boundary. */ |
112 | andcc %o0, (64 - 1), %o4 | 123 | andcc %o0, (64 - 1), %i4 |
113 | be,pt %XCC, 2f | 124 | be,pt %XCC, 2f |
114 | sub %o4, 64, %o4 | 125 | sub %i4, 64, %i4 |
115 | sub %g0, %o4, %o4 ! bytes to align dst | 126 | sub %g0, %i4, %i4 ! bytes to align dst |
116 | sub %o2, %o4, %o2 | 127 | sub %i2, %i4, %i2 |
117 | 1: subcc %o4, 1, %o4 | 128 | 1: subcc %i4, 1, %i4 |
118 | EX_LD(LOAD(ldub, %o1, %g1)) | 129 | EX_LD(LOAD(ldub, %i1, %g1)) |
119 | EX_ST(STORE(stb, %g1, %o0)) | 130 | EX_ST(STORE(stb, %g1, %o0)) |
120 | add %o1, 1, %o1 | 131 | add %i1, 1, %i1 |
121 | bne,pt %XCC, 1b | 132 | bne,pt %XCC, 1b |
122 | add %o0, 1, %o0 | 133 | add %o0, 1, %o0 |
123 | 134 | ||
@@ -136,111 +147,155 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
136 | * aligned store data at a time, this is easy to ensure. | 147 | * aligned store data at a time, this is easy to ensure. |
137 | */ | 148 | */ |
138 | 2: | 149 | 2: |
139 | andcc %o1, (16 - 1), %o4 | 150 | andcc %i1, (16 - 1), %i4 |
140 | andn %o2, (64 - 1), %g1 ! block copy loop iterator | 151 | andn %i2, (64 - 1), %g1 ! block copy loop iterator |
141 | sub %o2, %g1, %o2 ! final sub-block copy bytes | ||
142 | be,pt %XCC, 50f | 152 | be,pt %XCC, 50f |
143 | cmp %o4, 8 | 153 | sub %i2, %g1, %i2 ! final sub-block copy bytes |
144 | be,a,pt %XCC, 10f | 154 | |
145 | sub %o1, 0x8, %o1 | 155 | cmp %i4, 8 |
156 | be,pt %XCC, 10f | ||
157 | sub %i1, %i4, %i1 | ||
146 | 158 | ||
147 | /* Neither 8-byte nor 16-byte aligned, shift and mask. */ | 159 | /* Neither 8-byte nor 16-byte aligned, shift and mask. */ |
148 | mov %g1, %o4 | 160 | and %i4, 0x7, GLOBAL_SPARE |
149 | and %o1, 0x7, %g1 | 161 | sll GLOBAL_SPARE, 3, GLOBAL_SPARE |
150 | sll %g1, 3, %g1 | 162 | mov 64, %i5 |
151 | mov 64, %o3 | 163 | EX_LD(LOAD_TWIN(%i1, %g2, %g3)) |
152 | andn %o1, 0x7, %o1 | 164 | sub %i5, GLOBAL_SPARE, %i5 |
153 | EX_LD(LOAD(ldx, %o1, %g2)) | 165 | mov 16, %o4 |
154 | sub %o3, %g1, %o3 | 166 | mov 32, %o5 |
155 | sllx %g2, %g1, %g2 | 167 | mov 48, %o7 |
168 | mov 64, %i3 | ||
169 | |||
170 | bg,pn %XCC, 9f | ||
171 | nop | ||
156 | 172 | ||
157 | #define SWIVEL_ONE_DWORD(SRC, TMP1, TMP2, PRE_VAL, PRE_SHIFT, POST_SHIFT, DST)\ | 173 | #define MIX_THREE_WORDS(WORD1, WORD2, WORD3, PRE_SHIFT, POST_SHIFT, TMP) \ |
158 | EX_LD(LOAD(ldx, SRC, TMP1)); \ | 174 | sllx WORD1, POST_SHIFT, WORD1; \ |
159 | srlx TMP1, PRE_SHIFT, TMP2; \ | 175 | srlx WORD2, PRE_SHIFT, TMP; \ |
160 | or TMP2, PRE_VAL, TMP2; \ | 176 | sllx WORD2, POST_SHIFT, WORD2; \ |
161 | EX_ST(STORE_INIT(TMP2, DST)); \ | 177 | or WORD1, TMP, WORD1; \ |
162 | sllx TMP1, POST_SHIFT, PRE_VAL; | 178 | srlx WORD3, PRE_SHIFT, TMP; \ |
163 | 179 | or WORD2, TMP, WORD2; | |
164 | 1: add %o1, 0x8, %o1 | 180 | |
165 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x00) | 181 | 8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3)) |
166 | add %o1, 0x8, %o1 | 182 | MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1) |
167 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x08) | 183 | LOAD(prefetch, %i1 + %i3, #one_read) |
168 | add %o1, 0x8, %o1 | 184 | |
169 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x10) | 185 | EX_ST(STORE_INIT(%g2, %o0 + 0x00)) |
170 | add %o1, 0x8, %o1 | 186 | EX_ST(STORE_INIT(%g3, %o0 + 0x08)) |
171 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x18) | 187 | |
172 | add %o1, 32, %o1 | 188 | EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3)) |
173 | LOAD(prefetch, %o1, #one_read) | 189 | MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1) |
174 | sub %o1, 32 - 8, %o1 | 190 | |
175 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x20) | 191 | EX_ST(STORE_INIT(%o2, %o0 + 0x10)) |
176 | add %o1, 8, %o1 | 192 | EX_ST(STORE_INIT(%o3, %o0 + 0x18)) |
177 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x28) | 193 | |
178 | add %o1, 8, %o1 | 194 | EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) |
179 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x30) | 195 | MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1) |
180 | add %o1, 8, %o1 | 196 | |
181 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x38) | 197 | EX_ST(STORE_INIT(%g2, %o0 + 0x20)) |
182 | subcc %o4, 64, %o4 | 198 | EX_ST(STORE_INIT(%g3, %o0 + 0x28)) |
183 | bne,pt %XCC, 1b | 199 | |
200 | EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3)) | ||
201 | add %i1, 64, %i1 | ||
202 | MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1) | ||
203 | |||
204 | EX_ST(STORE_INIT(%o2, %o0 + 0x30)) | ||
205 | EX_ST(STORE_INIT(%o3, %o0 + 0x38)) | ||
206 | |||
207 | subcc %g1, 64, %g1 | ||
208 | bne,pt %XCC, 8b | ||
184 | add %o0, 64, %o0 | 209 | add %o0, 64, %o0 |
185 | 210 | ||
186 | #undef SWIVEL_ONE_DWORD | 211 | ba,pt %XCC, 60f |
212 | add %i1, %i4, %i1 | ||
213 | |||
214 | 9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3)) | ||
215 | MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1) | ||
216 | LOAD(prefetch, %i1 + %i3, #one_read) | ||
217 | |||
218 | EX_ST(STORE_INIT(%g3, %o0 + 0x00)) | ||
219 | EX_ST(STORE_INIT(%o2, %o0 + 0x08)) | ||
220 | |||
221 | EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3)) | ||
222 | MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1) | ||
223 | |||
224 | EX_ST(STORE_INIT(%o3, %o0 + 0x10)) | ||
225 | EX_ST(STORE_INIT(%g2, %o0 + 0x18)) | ||
226 | |||
227 | EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) | ||
228 | MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1) | ||
229 | |||
230 | EX_ST(STORE_INIT(%g3, %o0 + 0x20)) | ||
231 | EX_ST(STORE_INIT(%o2, %o0 + 0x28)) | ||
232 | |||
233 | EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3)) | ||
234 | add %i1, 64, %i1 | ||
235 | MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1) | ||
236 | |||
237 | EX_ST(STORE_INIT(%o3, %o0 + 0x30)) | ||
238 | EX_ST(STORE_INIT(%g2, %o0 + 0x38)) | ||
239 | |||
240 | subcc %g1, 64, %g1 | ||
241 | bne,pt %XCC, 9b | ||
242 | add %o0, 64, %o0 | ||
187 | 243 | ||
188 | srl %g1, 3, %g1 | ||
189 | ba,pt %XCC, 60f | 244 | ba,pt %XCC, 60f |
190 | add %o1, %g1, %o1 | 245 | add %i1, %i4, %i1 |
191 | 246 | ||
192 | 10: /* Destination is 64-byte aligned, source was only 8-byte | 247 | 10: /* Destination is 64-byte aligned, source was only 8-byte |
193 | * aligned but it has been subtracted by 8 and we perform | 248 | * aligned but it has been subtracted by 8 and we perform |
194 | * one twin load ahead, then add 8 back into source when | 249 | * one twin load ahead, then add 8 back into source when |
195 | * we finish the loop. | 250 | * we finish the loop. |
196 | */ | 251 | */ |
197 | EX_LD(LOAD_TWIN(%o1, %o4, %o5)) | 252 | EX_LD(LOAD_TWIN(%i1, %o4, %o5)) |
198 | 1: add %o1, 16, %o1 | 253 | mov 16, %o7 |
199 | EX_LD(LOAD_TWIN(%o1, %g2, %g3)) | 254 | mov 32, %g2 |
200 | add %o1, 16 + 32, %o1 | 255 | mov 48, %g3 |
201 | LOAD(prefetch, %o1, #one_read) | 256 | mov 64, %o1 |
202 | sub %o1, 32, %o1 | 257 | 1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) |
258 | LOAD(prefetch, %i1 + %o1, #one_read) | ||
203 | EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line | 259 | EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line |
204 | EX_ST(STORE_INIT(%g2, %o0 + 0x08)) | 260 | EX_ST(STORE_INIT(%o2, %o0 + 0x08)) |
205 | EX_LD(LOAD_TWIN(%o1, %o4, %o5)) | 261 | EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5)) |
206 | add %o1, 16, %o1 | 262 | EX_ST(STORE_INIT(%o3, %o0 + 0x10)) |
207 | EX_ST(STORE_INIT(%g3, %o0 + 0x10)) | ||
208 | EX_ST(STORE_INIT(%o4, %o0 + 0x18)) | 263 | EX_ST(STORE_INIT(%o4, %o0 + 0x18)) |
209 | EX_LD(LOAD_TWIN(%o1, %g2, %g3)) | 264 | EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3)) |
210 | add %o1, 16, %o1 | ||
211 | EX_ST(STORE_INIT(%o5, %o0 + 0x20)) | 265 | EX_ST(STORE_INIT(%o5, %o0 + 0x20)) |
212 | EX_ST(STORE_INIT(%g2, %o0 + 0x28)) | 266 | EX_ST(STORE_INIT(%o2, %o0 + 0x28)) |
213 | EX_LD(LOAD_TWIN(%o1, %o4, %o5)) | 267 | EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5)) |
214 | EX_ST(STORE_INIT(%g3, %o0 + 0x30)) | 268 | add %i1, 64, %i1 |
269 | EX_ST(STORE_INIT(%o3, %o0 + 0x30)) | ||
215 | EX_ST(STORE_INIT(%o4, %o0 + 0x38)) | 270 | EX_ST(STORE_INIT(%o4, %o0 + 0x38)) |
216 | subcc %g1, 64, %g1 | 271 | subcc %g1, 64, %g1 |
217 | bne,pt %XCC, 1b | 272 | bne,pt %XCC, 1b |
218 | add %o0, 64, %o0 | 273 | add %o0, 64, %o0 |
219 | 274 | ||
220 | ba,pt %XCC, 60f | 275 | ba,pt %XCC, 60f |
221 | add %o1, 0x8, %o1 | 276 | add %i1, 0x8, %i1 |
222 | 277 | ||
223 | 50: /* Destination is 64-byte aligned, and source is 16-byte | 278 | 50: /* Destination is 64-byte aligned, and source is 16-byte |
224 | * aligned. | 279 | * aligned. |
225 | */ | 280 | */ |
226 | 1: EX_LD(LOAD_TWIN(%o1, %o4, %o5)) | 281 | mov 16, %o7 |
227 | add %o1, 16, %o1 | 282 | mov 32, %g2 |
228 | EX_LD(LOAD_TWIN(%o1, %g2, %g3)) | 283 | mov 48, %g3 |
229 | add %o1, 16 + 32, %o1 | 284 | mov 64, %o1 |
230 | LOAD(prefetch, %o1, #one_read) | 285 | 1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5)) |
231 | sub %o1, 32, %o1 | 286 | EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) |
287 | LOAD(prefetch, %i1 + %o1, #one_read) | ||
232 | EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line | 288 | EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line |
233 | EX_ST(STORE_INIT(%o5, %o0 + 0x08)) | 289 | EX_ST(STORE_INIT(%o5, %o0 + 0x08)) |
234 | EX_LD(LOAD_TWIN(%o1, %o4, %o5)) | 290 | EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5)) |
235 | add %o1, 16, %o1 | 291 | EX_ST(STORE_INIT(%o2, %o0 + 0x10)) |
236 | EX_ST(STORE_INIT(%g2, %o0 + 0x10)) | 292 | EX_ST(STORE_INIT(%o3, %o0 + 0x18)) |
237 | EX_ST(STORE_INIT(%g3, %o0 + 0x18)) | 293 | EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3)) |
238 | EX_LD(LOAD_TWIN(%o1, %g2, %g3)) | 294 | add %i1, 64, %i1 |
239 | add %o1, 16, %o1 | ||
240 | EX_ST(STORE_INIT(%o4, %o0 + 0x20)) | 295 | EX_ST(STORE_INIT(%o4, %o0 + 0x20)) |
241 | EX_ST(STORE_INIT(%o5, %o0 + 0x28)) | 296 | EX_ST(STORE_INIT(%o5, %o0 + 0x28)) |
242 | EX_ST(STORE_INIT(%g2, %o0 + 0x30)) | 297 | EX_ST(STORE_INIT(%o2, %o0 + 0x30)) |
243 | EX_ST(STORE_INIT(%g3, %o0 + 0x38)) | 298 | EX_ST(STORE_INIT(%o3, %o0 + 0x38)) |
244 | subcc %g1, 64, %g1 | 299 | subcc %g1, 64, %g1 |
245 | bne,pt %XCC, 1b | 300 | bne,pt %XCC, 1b |
246 | add %o0, 64, %o0 | 301 | add %o0, 64, %o0 |
@@ -249,47 +304,47 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
249 | 60: | 304 | 60: |
250 | membar #Sync | 305 | membar #Sync |
251 | 306 | ||
252 | /* %o2 contains any final bytes still needed to be copied | 307 | /* %i2 contains any final bytes still needed to be copied |
253 | * over. If anything is left, we copy it one byte at a time. | 308 | * over. If anything is left, we copy it one byte at a time. |
254 | */ | 309 | */ |
255 | RESTORE_ASI(%o3) | 310 | RESTORE_ASI(%i3) |
256 | brz,pt %o2, 85f | 311 | brz,pt %i2, 85f |
257 | sub %o0, %o1, %o3 | 312 | sub %o0, %i1, %i3 |
258 | ba,a,pt %XCC, 90f | 313 | ba,a,pt %XCC, 90f |
259 | 314 | ||
260 | .align 64 | 315 | .align 64 |
261 | 70: /* 16 < len <= 64 */ | 316 | 70: /* 16 < len <= 64 */ |
262 | bne,pn %XCC, 75f | 317 | bne,pn %XCC, 75f |
263 | sub %o0, %o1, %o3 | 318 | sub %o0, %i1, %i3 |
264 | 319 | ||
265 | 72: | 320 | 72: |
266 | andn %o2, 0xf, %o4 | 321 | andn %i2, 0xf, %i4 |
267 | and %o2, 0xf, %o2 | 322 | and %i2, 0xf, %i2 |
268 | 1: subcc %o4, 0x10, %o4 | 323 | 1: subcc %i4, 0x10, %i4 |
269 | EX_LD(LOAD(ldx, %o1, %o5)) | 324 | EX_LD(LOAD(ldx, %i1, %i5)) |
270 | add %o1, 0x08, %o1 | 325 | add %i1, 0x08, %i1 |
271 | EX_LD(LOAD(ldx, %o1, %g1)) | 326 | EX_LD(LOAD(ldx, %i1, %g1)) |
272 | sub %o1, 0x08, %o1 | 327 | sub %i1, 0x08, %i1 |
273 | EX_ST(STORE(stx, %o5, %o1 + %o3)) | 328 | EX_ST(STORE(stx, %i5, %i1 + %i3)) |
274 | add %o1, 0x8, %o1 | 329 | add %i1, 0x8, %i1 |
275 | EX_ST(STORE(stx, %g1, %o1 + %o3)) | 330 | EX_ST(STORE(stx, %g1, %i1 + %i3)) |
276 | bgu,pt %XCC, 1b | 331 | bgu,pt %XCC, 1b |
277 | add %o1, 0x8, %o1 | 332 | add %i1, 0x8, %i1 |
278 | 73: andcc %o2, 0x8, %g0 | 333 | 73: andcc %i2, 0x8, %g0 |
279 | be,pt %XCC, 1f | 334 | be,pt %XCC, 1f |
280 | nop | 335 | nop |
281 | sub %o2, 0x8, %o2 | 336 | sub %i2, 0x8, %i2 |
282 | EX_LD(LOAD(ldx, %o1, %o5)) | 337 | EX_LD(LOAD(ldx, %i1, %i5)) |
283 | EX_ST(STORE(stx, %o5, %o1 + %o3)) | 338 | EX_ST(STORE(stx, %i5, %i1 + %i3)) |
284 | add %o1, 0x8, %o1 | 339 | add %i1, 0x8, %i1 |
285 | 1: andcc %o2, 0x4, %g0 | 340 | 1: andcc %i2, 0x4, %g0 |
286 | be,pt %XCC, 1f | 341 | be,pt %XCC, 1f |
287 | nop | 342 | nop |
288 | sub %o2, 0x4, %o2 | 343 | sub %i2, 0x4, %i2 |
289 | EX_LD(LOAD(lduw, %o1, %o5)) | 344 | EX_LD(LOAD(lduw, %i1, %i5)) |
290 | EX_ST(STORE(stw, %o5, %o1 + %o3)) | 345 | EX_ST(STORE(stw, %i5, %i1 + %i3)) |
291 | add %o1, 0x4, %o1 | 346 | add %i1, 0x4, %i1 |
292 | 1: cmp %o2, 0 | 347 | 1: cmp %i2, 0 |
293 | be,pt %XCC, 85f | 348 | be,pt %XCC, 85f |
294 | nop | 349 | nop |
295 | ba,pt %xcc, 90f | 350 | ba,pt %xcc, 90f |
@@ -300,71 +355,71 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
300 | sub %g1, 0x8, %g1 | 355 | sub %g1, 0x8, %g1 |
301 | be,pn %icc, 2f | 356 | be,pn %icc, 2f |
302 | sub %g0, %g1, %g1 | 357 | sub %g0, %g1, %g1 |
303 | sub %o2, %g1, %o2 | 358 | sub %i2, %g1, %i2 |
304 | 359 | ||
305 | 1: subcc %g1, 1, %g1 | 360 | 1: subcc %g1, 1, %g1 |
306 | EX_LD(LOAD(ldub, %o1, %o5)) | 361 | EX_LD(LOAD(ldub, %i1, %i5)) |
307 | EX_ST(STORE(stb, %o5, %o1 + %o3)) | 362 | EX_ST(STORE(stb, %i5, %i1 + %i3)) |
308 | bgu,pt %icc, 1b | 363 | bgu,pt %icc, 1b |
309 | add %o1, 1, %o1 | 364 | add %i1, 1, %i1 |
310 | 365 | ||
311 | 2: add %o1, %o3, %o0 | 366 | 2: add %i1, %i3, %o0 |
312 | andcc %o1, 0x7, %g1 | 367 | andcc %i1, 0x7, %g1 |
313 | bne,pt %icc, 8f | 368 | bne,pt %icc, 8f |
314 | sll %g1, 3, %g1 | 369 | sll %g1, 3, %g1 |
315 | 370 | ||
316 | cmp %o2, 16 | 371 | cmp %i2, 16 |
317 | bgeu,pt %icc, 72b | 372 | bgeu,pt %icc, 72b |
318 | nop | 373 | nop |
319 | ba,a,pt %xcc, 73b | 374 | ba,a,pt %xcc, 73b |
320 | 375 | ||
321 | 8: mov 64, %o3 | 376 | 8: mov 64, %i3 |
322 | andn %o1, 0x7, %o1 | 377 | andn %i1, 0x7, %i1 |
323 | EX_LD(LOAD(ldx, %o1, %g2)) | 378 | EX_LD(LOAD(ldx, %i1, %g2)) |
324 | sub %o3, %g1, %o3 | 379 | sub %i3, %g1, %i3 |
325 | andn %o2, 0x7, %o4 | 380 | andn %i2, 0x7, %i4 |
326 | sllx %g2, %g1, %g2 | 381 | sllx %g2, %g1, %g2 |
327 | 1: add %o1, 0x8, %o1 | 382 | 1: add %i1, 0x8, %i1 |
328 | EX_LD(LOAD(ldx, %o1, %g3)) | 383 | EX_LD(LOAD(ldx, %i1, %g3)) |
329 | subcc %o4, 0x8, %o4 | 384 | subcc %i4, 0x8, %i4 |
330 | srlx %g3, %o3, %o5 | 385 | srlx %g3, %i3, %i5 |
331 | or %o5, %g2, %o5 | 386 | or %i5, %g2, %i5 |
332 | EX_ST(STORE(stx, %o5, %o0)) | 387 | EX_ST(STORE(stx, %i5, %o0)) |
333 | add %o0, 0x8, %o0 | 388 | add %o0, 0x8, %o0 |
334 | bgu,pt %icc, 1b | 389 | bgu,pt %icc, 1b |
335 | sllx %g3, %g1, %g2 | 390 | sllx %g3, %g1, %g2 |
336 | 391 | ||
337 | srl %g1, 3, %g1 | 392 | srl %g1, 3, %g1 |
338 | andcc %o2, 0x7, %o2 | 393 | andcc %i2, 0x7, %i2 |
339 | be,pn %icc, 85f | 394 | be,pn %icc, 85f |
340 | add %o1, %g1, %o1 | 395 | add %i1, %g1, %i1 |
341 | ba,pt %xcc, 90f | 396 | ba,pt %xcc, 90f |
342 | sub %o0, %o1, %o3 | 397 | sub %o0, %i1, %i3 |
343 | 398 | ||
344 | .align 64 | 399 | .align 64 |
345 | 80: /* 0 < len <= 16 */ | 400 | 80: /* 0 < len <= 16 */ |
346 | andcc %o3, 0x3, %g0 | 401 | andcc %i3, 0x3, %g0 |
347 | bne,pn %XCC, 90f | 402 | bne,pn %XCC, 90f |
348 | sub %o0, %o1, %o3 | 403 | sub %o0, %i1, %i3 |
349 | 404 | ||
350 | 1: | 405 | 1: |
351 | subcc %o2, 4, %o2 | 406 | subcc %i2, 4, %i2 |
352 | EX_LD(LOAD(lduw, %o1, %g1)) | 407 | EX_LD(LOAD(lduw, %i1, %g1)) |
353 | EX_ST(STORE(stw, %g1, %o1 + %o3)) | 408 | EX_ST(STORE(stw, %g1, %i1 + %i3)) |
354 | bgu,pt %XCC, 1b | 409 | bgu,pt %XCC, 1b |
355 | add %o1, 4, %o1 | 410 | add %i1, 4, %i1 |
356 | 411 | ||
357 | 85: retl | 412 | 85: ret |
358 | mov EX_RETVAL(GLOBAL_SPARE), %o0 | 413 | restore EX_RETVAL(%i0), %g0, %o0 |
359 | 414 | ||
360 | .align 32 | 415 | .align 32 |
361 | 90: | 416 | 90: |
362 | subcc %o2, 1, %o2 | 417 | subcc %i2, 1, %i2 |
363 | EX_LD(LOAD(ldub, %o1, %g1)) | 418 | EX_LD(LOAD(ldub, %i1, %g1)) |
364 | EX_ST(STORE(stb, %g1, %o1 + %o3)) | 419 | EX_ST(STORE(stb, %g1, %i1 + %i3)) |
365 | bgu,pt %XCC, 90b | 420 | bgu,pt %XCC, 90b |
366 | add %o1, 1, %o1 | 421 | add %i1, 1, %i1 |
367 | retl | 422 | ret |
368 | mov EX_RETVAL(GLOBAL_SPARE), %o0 | 423 | restore EX_RETVAL(%i0), %g0, %o0 |
369 | 424 | ||
370 | .size FUNC_NAME, .-FUNC_NAME | 425 | .size FUNC_NAME, .-FUNC_NAME |
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index ffa036406289..b4d9089a6a06 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig | |||
@@ -60,14 +60,6 @@ config ZONE_DMA | |||
60 | bool | 60 | bool |
61 | default y | 61 | default y |
62 | 62 | ||
63 | config QUICKLIST | ||
64 | bool | ||
65 | default y | ||
66 | |||
67 | config NR_QUICK | ||
68 | int | ||
69 | default 2 | ||
70 | |||
71 | config ISA | 63 | config ISA |
72 | bool | 64 | bool |
73 | 65 | ||
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S index 938278697e20..18b231810908 100644 --- a/arch/x86_64/ia32/ia32entry.S +++ b/arch/x86_64/ia32/ia32entry.S | |||
@@ -38,6 +38,18 @@ | |||
38 | movq %rax,R8(%rsp) | 38 | movq %rax,R8(%rsp) |
39 | .endm | 39 | .endm |
40 | 40 | ||
41 | .macro LOAD_ARGS32 offset | ||
42 | movl \offset(%rsp),%r11d | ||
43 | movl \offset+8(%rsp),%r10d | ||
44 | movl \offset+16(%rsp),%r9d | ||
45 | movl \offset+24(%rsp),%r8d | ||
46 | movl \offset+40(%rsp),%ecx | ||
47 | movl \offset+48(%rsp),%edx | ||
48 | movl \offset+56(%rsp),%esi | ||
49 | movl \offset+64(%rsp),%edi | ||
50 | movl \offset+72(%rsp),%eax | ||
51 | .endm | ||
52 | |||
41 | .macro CFI_STARTPROC32 simple | 53 | .macro CFI_STARTPROC32 simple |
42 | CFI_STARTPROC \simple | 54 | CFI_STARTPROC \simple |
43 | CFI_UNDEFINED r8 | 55 | CFI_UNDEFINED r8 |
@@ -152,7 +164,7 @@ sysenter_tracesys: | |||
152 | movq $-ENOSYS,RAX(%rsp) /* really needed? */ | 164 | movq $-ENOSYS,RAX(%rsp) /* really needed? */ |
153 | movq %rsp,%rdi /* &pt_regs -> arg1 */ | 165 | movq %rsp,%rdi /* &pt_regs -> arg1 */ |
154 | call syscall_trace_enter | 166 | call syscall_trace_enter |
155 | LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */ | 167 | LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ |
156 | RESTORE_REST | 168 | RESTORE_REST |
157 | movl %ebp, %ebp | 169 | movl %ebp, %ebp |
158 | /* no need to do an access_ok check here because rbp has been | 170 | /* no need to do an access_ok check here because rbp has been |
@@ -255,7 +267,7 @@ cstar_tracesys: | |||
255 | movq $-ENOSYS,RAX(%rsp) /* really needed? */ | 267 | movq $-ENOSYS,RAX(%rsp) /* really needed? */ |
256 | movq %rsp,%rdi /* &pt_regs -> arg1 */ | 268 | movq %rsp,%rdi /* &pt_regs -> arg1 */ |
257 | call syscall_trace_enter | 269 | call syscall_trace_enter |
258 | LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */ | 270 | LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ |
259 | RESTORE_REST | 271 | RESTORE_REST |
260 | movl RSP-ARGOFFSET(%rsp), %r8d | 272 | movl RSP-ARGOFFSET(%rsp), %r8d |
261 | /* no need to do an access_ok check here because r8 has been | 273 | /* no need to do an access_ok check here because r8 has been |
@@ -334,7 +346,7 @@ ia32_tracesys: | |||
334 | movq $-ENOSYS,RAX(%rsp) /* really needed? */ | 346 | movq $-ENOSYS,RAX(%rsp) /* really needed? */ |
335 | movq %rsp,%rdi /* &pt_regs -> arg1 */ | 347 | movq %rsp,%rdi /* &pt_regs -> arg1 */ |
336 | call syscall_trace_enter | 348 | call syscall_trace_enter |
337 | LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */ | 349 | LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ |
338 | RESTORE_REST | 350 | RESTORE_REST |
339 | jmp ia32_do_syscall | 351 | jmp ia32_do_syscall |
340 | END(ia32_syscall) | 352 | END(ia32_syscall) |
diff --git a/arch/x86_64/kernel/acpi/wakeup.S b/arch/x86_64/kernel/acpi/wakeup.S index 13f1480cbec9..a06f2bcabef9 100644 --- a/arch/x86_64/kernel/acpi/wakeup.S +++ b/arch/x86_64/kernel/acpi/wakeup.S | |||
@@ -81,7 +81,7 @@ wakeup_code: | |||
81 | testl $2, realmode_flags - wakeup_code | 81 | testl $2, realmode_flags - wakeup_code |
82 | jz 1f | 82 | jz 1f |
83 | mov video_mode - wakeup_code, %ax | 83 | mov video_mode - wakeup_code, %ax |
84 | call mode_seta | 84 | call mode_set |
85 | 1: | 85 | 1: |
86 | 86 | ||
87 | movw $0xb800, %ax | 87 | movw $0xb800, %ax |
@@ -291,52 +291,31 @@ no_longmode: | |||
291 | #define VIDEO_FIRST_V7 0x0900 | 291 | #define VIDEO_FIRST_V7 0x0900 |
292 | 292 | ||
293 | # Setting of user mode (AX=mode ID) => CF=success | 293 | # Setting of user mode (AX=mode ID) => CF=success |
294 | |||
295 | # For now, we only handle VESA modes (0x0200..0x03ff). To handle other | ||
296 | # modes, we should probably compile in the video code from the boot | ||
297 | # directory. | ||
294 | .code16 | 298 | .code16 |
295 | mode_seta: | 299 | mode_set: |
296 | movw %ax, %bx | 300 | movw %ax, %bx |
297 | #if 0 | 301 | subb $VIDEO_FIRST_VESA>>8, %bh |
298 | cmpb $0xff, %ah | 302 | cmpb $2, %bh |
299 | jz setalias | 303 | jb check_vesa |
300 | |||
301 | testb $VIDEO_RECALC>>8, %ah | ||
302 | jnz _setrec | ||
303 | |||
304 | cmpb $VIDEO_FIRST_RESOLUTION>>8, %ah | ||
305 | jnc setres | ||
306 | |||
307 | cmpb $VIDEO_FIRST_SPECIAL>>8, %ah | ||
308 | jz setspc | ||
309 | |||
310 | cmpb $VIDEO_FIRST_V7>>8, %ah | ||
311 | jz setv7 | ||
312 | #endif | ||
313 | |||
314 | cmpb $VIDEO_FIRST_VESA>>8, %ah | ||
315 | jnc check_vesaa | ||
316 | #if 0 | ||
317 | orb %ah, %ah | ||
318 | jz setmenu | ||
319 | #endif | ||
320 | |||
321 | decb %ah | ||
322 | # jz setbios Add bios modes later | ||
323 | 304 | ||
324 | setbada: clc | 305 | setbad: |
306 | clc | ||
325 | ret | 307 | ret |
326 | 308 | ||
327 | check_vesaa: | 309 | check_vesa: |
328 | subb $VIDEO_FIRST_VESA>>8, %bh | ||
329 | orw $0x4000, %bx # Use linear frame buffer | 310 | orw $0x4000, %bx # Use linear frame buffer |
330 | movw $0x4f02, %ax # VESA BIOS mode set call | 311 | movw $0x4f02, %ax # VESA BIOS mode set call |
331 | int $0x10 | 312 | int $0x10 |
332 | cmpw $0x004f, %ax # AL=4f if implemented | 313 | cmpw $0x004f, %ax # AL=4f if implemented |
333 | jnz _setbada # AH=0 if OK | 314 | jnz setbad # AH=0 if OK |
334 | 315 | ||
335 | stc | 316 | stc |
336 | ret | 317 | ret |
337 | 318 | ||
338 | _setbada: jmp setbada | ||
339 | |||
340 | wakeup_stack_begin: # Stack grows down | 319 | wakeup_stack_begin: # Stack grows down |
341 | 320 | ||
342 | .org 0xff0 | 321 | .org 0xff0 |
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 2842f50cbe3f..98956555450b 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c | |||
@@ -208,7 +208,6 @@ void cpu_idle (void) | |||
208 | if (__get_cpu_var(cpu_idle_state)) | 208 | if (__get_cpu_var(cpu_idle_state)) |
209 | __get_cpu_var(cpu_idle_state) = 0; | 209 | __get_cpu_var(cpu_idle_state) = 0; |
210 | 210 | ||
211 | check_pgt_cache(); | ||
212 | rmb(); | 211 | rmb(); |
213 | idle = pm_idle; | 212 | idle = pm_idle; |
214 | if (!idle) | 213 | if (!idle) |
diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c index e83cc67155ac..eea3702427b4 100644 --- a/arch/x86_64/kernel/ptrace.c +++ b/arch/x86_64/kernel/ptrace.c | |||
@@ -232,10 +232,6 @@ static int putreg(struct task_struct *child, | |||
232 | { | 232 | { |
233 | unsigned long tmp; | 233 | unsigned long tmp; |
234 | 234 | ||
235 | /* Some code in the 64bit emulation may not be 64bit clean. | ||
236 | Don't take any chances. */ | ||
237 | if (test_tsk_thread_flag(child, TIF_IA32)) | ||
238 | value &= 0xffffffff; | ||
239 | switch (regno) { | 235 | switch (regno) { |
240 | case offsetof(struct user_regs_struct,fs): | 236 | case offsetof(struct user_regs_struct,fs): |
241 | if (value && (value & 3) != 3) | 237 | if (value && (value & 3) != 3) |
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c index 673a300b5944..df4a82812adb 100644 --- a/arch/x86_64/kernel/smp.c +++ b/arch/x86_64/kernel/smp.c | |||
@@ -241,7 +241,7 @@ void flush_tlb_mm (struct mm_struct * mm) | |||
241 | } | 241 | } |
242 | if (!cpus_empty(cpu_mask)) | 242 | if (!cpus_empty(cpu_mask)) |
243 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | 243 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); |
244 | check_pgt_cache(); | 244 | |
245 | preempt_enable(); | 245 | preempt_enable(); |
246 | } | 246 | } |
247 | EXPORT_SYMBOL(flush_tlb_mm); | 247 | EXPORT_SYMBOL(flush_tlb_mm); |
diff --git a/arch/x86_64/vdso/voffset.h b/arch/x86_64/vdso/voffset.h index 5304204911f2..4af67c79085f 100644 --- a/arch/x86_64/vdso/voffset.h +++ b/arch/x86_64/vdso/voffset.h | |||
@@ -1 +1 @@ | |||
#define VDSO_TEXT_OFFSET 0x500 | #define VDSO_TEXT_OFFSET 0x600 | ||
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 035007145e78..bc18cbb8ea79 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -80,6 +80,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |||
80 | { | 80 | { |
81 | enum dma_status status; | 81 | enum dma_status status; |
82 | struct dma_async_tx_descriptor *iter; | 82 | struct dma_async_tx_descriptor *iter; |
83 | struct dma_async_tx_descriptor *parent; | ||
83 | 84 | ||
84 | if (!tx) | 85 | if (!tx) |
85 | return DMA_SUCCESS; | 86 | return DMA_SUCCESS; |
@@ -87,8 +88,15 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |||
87 | /* poll through the dependency chain, return when tx is complete */ | 88 | /* poll through the dependency chain, return when tx is complete */ |
88 | do { | 89 | do { |
89 | iter = tx; | 90 | iter = tx; |
90 | while (iter->cookie == -EBUSY) | 91 | |
91 | iter = iter->parent; | 92 | /* find the root of the unsubmitted dependency chain */ |
93 | while (iter->cookie == -EBUSY) { | ||
94 | parent = iter->parent; | ||
95 | if (parent && parent->cookie == -EBUSY) | ||
96 | iter = iter->parent; | ||
97 | else | ||
98 | break; | ||
99 | } | ||
92 | 100 | ||
93 | status = dma_sync_wait(iter->chan, iter->cookie); | 101 | status = dma_sync_wait(iter->chan, iter->cookie); |
94 | } while (status == DMA_IN_PROGRESS || (iter != tx)); | 102 | } while (status == DMA_IN_PROGRESS || (iter != tx)); |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 2afb3d2086b3..9f11dc296cdd 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -102,6 +102,8 @@ static struct acpi_driver acpi_processor_driver = { | |||
102 | .add = acpi_processor_add, | 102 | .add = acpi_processor_add, |
103 | .remove = acpi_processor_remove, | 103 | .remove = acpi_processor_remove, |
104 | .start = acpi_processor_start, | 104 | .start = acpi_processor_start, |
105 | .suspend = acpi_processor_suspend, | ||
106 | .resume = acpi_processor_resume, | ||
105 | }, | 107 | }, |
106 | }; | 108 | }; |
107 | 109 | ||
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index d9b8af763e1e..f18261368e76 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -325,6 +325,23 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr, | |||
325 | 325 | ||
326 | #endif | 326 | #endif |
327 | 327 | ||
328 | /* | ||
329 | * Suspend / resume control | ||
330 | */ | ||
331 | static int acpi_idle_suspend; | ||
332 | |||
333 | int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) | ||
334 | { | ||
335 | acpi_idle_suspend = 1; | ||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | int acpi_processor_resume(struct acpi_device * device) | ||
340 | { | ||
341 | acpi_idle_suspend = 0; | ||
342 | return 0; | ||
343 | } | ||
344 | |||
328 | static void acpi_processor_idle(void) | 345 | static void acpi_processor_idle(void) |
329 | { | 346 | { |
330 | struct acpi_processor *pr = NULL; | 347 | struct acpi_processor *pr = NULL; |
@@ -355,7 +372,7 @@ static void acpi_processor_idle(void) | |||
355 | } | 372 | } |
356 | 373 | ||
357 | cx = pr->power.state; | 374 | cx = pr->power.state; |
358 | if (!cx) { | 375 | if (!cx || acpi_idle_suspend) { |
359 | if (pm_idle_save) | 376 | if (pm_idle_save) |
360 | pm_idle_save(); | 377 | pm_idle_save(); |
361 | else | 378 | else |
diff --git a/drivers/acpi/sleep/Makefile b/drivers/acpi/sleep/Makefile index 195a4f69c0f7..f1fb888c2d29 100644 --- a/drivers/acpi/sleep/Makefile +++ b/drivers/acpi/sleep/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | obj-y := poweroff.o wakeup.o | 1 | obj-y := wakeup.o |
2 | obj-$(CONFIG_ACPI_SLEEP) += main.o | 2 | obj-y += main.o |
3 | obj-$(CONFIG_ACPI_SLEEP) += proc.o | 3 | obj-$(CONFIG_ACPI_SLEEP) += proc.o |
4 | 4 | ||
5 | EXTRA_CFLAGS += $(ACPI_CFLAGS) | 5 | EXTRA_CFLAGS += $(ACPI_CFLAGS) |
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c index c52ade816fb4..2cbb9aabd00e 100644 --- a/drivers/acpi/sleep/main.c +++ b/drivers/acpi/sleep/main.c | |||
@@ -15,13 +15,39 @@ | |||
15 | #include <linux/dmi.h> | 15 | #include <linux/dmi.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/suspend.h> | 17 | #include <linux/suspend.h> |
18 | |||
19 | #include <asm/io.h> | ||
20 | |||
18 | #include <acpi/acpi_bus.h> | 21 | #include <acpi/acpi_bus.h> |
19 | #include <acpi/acpi_drivers.h> | 22 | #include <acpi/acpi_drivers.h> |
20 | #include "sleep.h" | 23 | #include "sleep.h" |
21 | 24 | ||
22 | u8 sleep_states[ACPI_S_STATE_COUNT]; | 25 | u8 sleep_states[ACPI_S_STATE_COUNT]; |
23 | 26 | ||
27 | #ifdef CONFIG_PM_SLEEP | ||
24 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; | 28 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; |
29 | #endif | ||
30 | |||
31 | int acpi_sleep_prepare(u32 acpi_state) | ||
32 | { | ||
33 | #ifdef CONFIG_ACPI_SLEEP | ||
34 | /* do we have a wakeup address for S2 and S3? */ | ||
35 | if (acpi_state == ACPI_STATE_S3) { | ||
36 | if (!acpi_wakeup_address) { | ||
37 | return -EFAULT; | ||
38 | } | ||
39 | acpi_set_firmware_waking_vector((acpi_physical_address) | ||
40 | virt_to_phys((void *) | ||
41 | acpi_wakeup_address)); | ||
42 | |||
43 | } | ||
44 | ACPI_FLUSH_CPU_CACHE(); | ||
45 | acpi_enable_wakeup_device_prep(acpi_state); | ||
46 | #endif | ||
47 | acpi_gpe_sleep_prepare(acpi_state); | ||
48 | acpi_enter_sleep_state_prep(acpi_state); | ||
49 | return 0; | ||
50 | } | ||
25 | 51 | ||
26 | #ifdef CONFIG_SUSPEND | 52 | #ifdef CONFIG_SUSPEND |
27 | static struct pm_ops acpi_pm_ops; | 53 | static struct pm_ops acpi_pm_ops; |
@@ -275,6 +301,7 @@ int acpi_suspend(u32 acpi_state) | |||
275 | return -EINVAL; | 301 | return -EINVAL; |
276 | } | 302 | } |
277 | 303 | ||
304 | #ifdef CONFIG_PM_SLEEP | ||
278 | /** | 305 | /** |
279 | * acpi_pm_device_sleep_state - return preferred power state of ACPI device | 306 | * acpi_pm_device_sleep_state - return preferred power state of ACPI device |
280 | * in the system sleep state given by %acpi_target_sleep_state | 307 | * in the system sleep state given by %acpi_target_sleep_state |
@@ -349,6 +376,21 @@ int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p) | |||
349 | *d_min_p = d_min; | 376 | *d_min_p = d_min; |
350 | return d_max; | 377 | return d_max; |
351 | } | 378 | } |
379 | #endif | ||
380 | |||
381 | static void acpi_power_off_prepare(void) | ||
382 | { | ||
383 | /* Prepare to power off the system */ | ||
384 | acpi_sleep_prepare(ACPI_STATE_S5); | ||
385 | } | ||
386 | |||
387 | static void acpi_power_off(void) | ||
388 | { | ||
389 | /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ | ||
390 | printk("%s called\n", __FUNCTION__); | ||
391 | local_irq_disable(); | ||
392 | acpi_enter_sleep_state(ACPI_STATE_S5); | ||
393 | } | ||
352 | 394 | ||
353 | int __init acpi_sleep_init(void) | 395 | int __init acpi_sleep_init(void) |
354 | { | 396 | { |
@@ -363,16 +405,17 @@ int __init acpi_sleep_init(void) | |||
363 | if (acpi_disabled) | 405 | if (acpi_disabled) |
364 | return 0; | 406 | return 0; |
365 | 407 | ||
408 | sleep_states[ACPI_STATE_S0] = 1; | ||
409 | printk(KERN_INFO PREFIX "(supports S0"); | ||
410 | |||
366 | #ifdef CONFIG_SUSPEND | 411 | #ifdef CONFIG_SUSPEND |
367 | printk(KERN_INFO PREFIX "(supports"); | 412 | for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) { |
368 | for (i = ACPI_STATE_S0; i < ACPI_STATE_S4; i++) { | ||
369 | status = acpi_get_sleep_type_data(i, &type_a, &type_b); | 413 | status = acpi_get_sleep_type_data(i, &type_a, &type_b); |
370 | if (ACPI_SUCCESS(status)) { | 414 | if (ACPI_SUCCESS(status)) { |
371 | sleep_states[i] = 1; | 415 | sleep_states[i] = 1; |
372 | printk(" S%d", i); | 416 | printk(" S%d", i); |
373 | } | 417 | } |
374 | } | 418 | } |
375 | printk(")\n"); | ||
376 | 419 | ||
377 | pm_set_ops(&acpi_pm_ops); | 420 | pm_set_ops(&acpi_pm_ops); |
378 | #endif | 421 | #endif |
@@ -382,10 +425,16 @@ int __init acpi_sleep_init(void) | |||
382 | if (ACPI_SUCCESS(status)) { | 425 | if (ACPI_SUCCESS(status)) { |
383 | hibernation_set_ops(&acpi_hibernation_ops); | 426 | hibernation_set_ops(&acpi_hibernation_ops); |
384 | sleep_states[ACPI_STATE_S4] = 1; | 427 | sleep_states[ACPI_STATE_S4] = 1; |
428 | printk(" S4"); | ||
385 | } | 429 | } |
386 | #else | ||
387 | sleep_states[ACPI_STATE_S4] = 0; | ||
388 | #endif | 430 | #endif |
389 | 431 | status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b); | |
432 | if (ACPI_SUCCESS(status)) { | ||
433 | sleep_states[ACPI_STATE_S5] = 1; | ||
434 | printk(" S5"); | ||
435 | pm_power_off_prepare = acpi_power_off_prepare; | ||
436 | pm_power_off = acpi_power_off; | ||
437 | } | ||
438 | printk(")\n"); | ||
390 | return 0; | 439 | return 0; |
391 | } | 440 | } |
diff --git a/drivers/acpi/sleep/poweroff.c b/drivers/acpi/sleep/poweroff.c deleted file mode 100644 index 39e40d56b034..000000000000 --- a/drivers/acpi/sleep/poweroff.c +++ /dev/null | |||
@@ -1,75 +0,0 @@ | |||
1 | /* | ||
2 | * poweroff.c - ACPI handler for powering off the system. | ||
3 | * | ||
4 | * AKA S5, but it is independent of whether or not the kernel supports | ||
5 | * any other sleep support in the system. | ||
6 | * | ||
7 | * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> | ||
8 | * | ||
9 | * This file is released under the GPLv2. | ||
10 | */ | ||
11 | |||
12 | #include <linux/pm.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <acpi/acpi_bus.h> | ||
15 | #include <linux/sysdev.h> | ||
16 | #include <asm/io.h> | ||
17 | #include "sleep.h" | ||
18 | |||
19 | int acpi_sleep_prepare(u32 acpi_state) | ||
20 | { | ||
21 | #ifdef CONFIG_ACPI_SLEEP | ||
22 | /* do we have a wakeup address for S2 and S3? */ | ||
23 | if (acpi_state == ACPI_STATE_S3) { | ||
24 | if (!acpi_wakeup_address) { | ||
25 | return -EFAULT; | ||
26 | } | ||
27 | acpi_set_firmware_waking_vector((acpi_physical_address) | ||
28 | virt_to_phys((void *) | ||
29 | acpi_wakeup_address)); | ||
30 | |||
31 | } | ||
32 | ACPI_FLUSH_CPU_CACHE(); | ||
33 | acpi_enable_wakeup_device_prep(acpi_state); | ||
34 | #endif | ||
35 | acpi_gpe_sleep_prepare(acpi_state); | ||
36 | acpi_enter_sleep_state_prep(acpi_state); | ||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | #ifdef CONFIG_PM | ||
41 | |||
42 | static void acpi_power_off_prepare(void) | ||
43 | { | ||
44 | /* Prepare to power off the system */ | ||
45 | acpi_sleep_prepare(ACPI_STATE_S5); | ||
46 | } | ||
47 | |||
48 | static void acpi_power_off(void) | ||
49 | { | ||
50 | /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ | ||
51 | printk("%s called\n", __FUNCTION__); | ||
52 | local_irq_disable(); | ||
53 | /* Some SMP machines only can poweroff in boot CPU */ | ||
54 | acpi_enter_sleep_state(ACPI_STATE_S5); | ||
55 | } | ||
56 | |||
57 | static int acpi_poweroff_init(void) | ||
58 | { | ||
59 | if (!acpi_disabled) { | ||
60 | u8 type_a, type_b; | ||
61 | acpi_status status; | ||
62 | |||
63 | status = | ||
64 | acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b); | ||
65 | if (ACPI_SUCCESS(status)) { | ||
66 | pm_power_off_prepare = acpi_power_off_prepare; | ||
67 | pm_power_off = acpi_power_off; | ||
68 | } | ||
69 | } | ||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | late_initcall(acpi_poweroff_init); | ||
74 | |||
75 | #endif /* CONFIG_PM */ | ||
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 3c9bb85a6a93..d05891f16282 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -417,7 +417,6 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level) | |||
417 | arg0.integer.value = level; | 417 | arg0.integer.value = level; |
418 | status = acpi_evaluate_object(device->dev->handle, "_BCM", &args, NULL); | 418 | status = acpi_evaluate_object(device->dev->handle, "_BCM", &args, NULL); |
419 | 419 | ||
420 | printk(KERN_DEBUG "set_level status: %x\n", status); | ||
421 | return status; | 420 | return status; |
422 | } | 421 | } |
423 | 422 | ||
@@ -1754,7 +1753,7 @@ static int acpi_video_bus_put_devices(struct acpi_video_bus *video) | |||
1754 | 1753 | ||
1755 | static int acpi_video_bus_start_devices(struct acpi_video_bus *video) | 1754 | static int acpi_video_bus_start_devices(struct acpi_video_bus *video) |
1756 | { | 1755 | { |
1757 | return acpi_video_bus_DOS(video, 1, 0); | 1756 | return acpi_video_bus_DOS(video, 0, 0); |
1758 | } | 1757 | } |
1759 | 1758 | ||
1760 | static int acpi_video_bus_stop_devices(struct acpi_video_bus *video) | 1759 | static int acpi_video_bus_stop_devices(struct acpi_video_bus *video) |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 06f212ff2b4f..c16820325d7b 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -418,10 +418,12 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
418 | 418 | ||
419 | /* ATI */ | 419 | /* ATI */ |
420 | { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ | 420 | { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ |
421 | { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb600 }, /* ATI SB700 IDE */ | 421 | { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb600 }, /* ATI SB700/800 */ |
422 | { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb600 }, /* ATI SB700 AHCI */ | 422 | { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb600 }, /* ATI SB700/800 */ |
423 | { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb600 }, /* ATI SB700 nraid5 */ | 423 | { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb600 }, /* ATI SB700/800 */ |
424 | { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb600 }, /* ATI SB700 raid5 */ | 424 | { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb600 }, /* ATI SB700/800 */ |
425 | { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb600 }, /* ATI SB700/800 */ | ||
426 | { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb600 }, /* ATI SB700/800 */ | ||
425 | 427 | ||
426 | /* VIA */ | 428 | /* VIA */ |
427 | { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ | 429 | { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 3b8bf1812dc8..6996eb5b7506 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -921,6 +921,13 @@ static int piix_broken_suspend(void) | |||
921 | { | 921 | { |
922 | static struct dmi_system_id sysids[] = { | 922 | static struct dmi_system_id sysids[] = { |
923 | { | 923 | { |
924 | .ident = "TECRA M3", | ||
925 | .matches = { | ||
926 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
927 | DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M3"), | ||
928 | }, | ||
929 | }, | ||
930 | { | ||
924 | .ident = "TECRA M5", | 931 | .ident = "TECRA M5", |
925 | .matches = { | 932 | .matches = { |
926 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | 933 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index c43de9a710db..772be09b4689 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -3778,6 +3778,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
3778 | { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ }, | 3778 | { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ }, |
3779 | { "Maxtor 6B200M0", "BANC1BM0", ATA_HORKAGE_NONCQ }, | 3779 | { "Maxtor 6B200M0", "BANC1BM0", ATA_HORKAGE_NONCQ }, |
3780 | { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ }, | 3780 | { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ }, |
3781 | { "Maxtor 7B250S0", "BANC1B70", ATA_HORKAGE_NONCQ, }, | ||
3782 | { "Maxtor 7B300S0", "BANC1B70", ATA_HORKAGE_NONCQ }, | ||
3783 | { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, | ||
3781 | { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI", | 3784 | { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI", |
3782 | ATA_HORKAGE_NONCQ }, | 3785 | ATA_HORKAGE_NONCQ }, |
3783 | /* NCQ hard hangs device under heavier load, needs hard power cycle */ | 3786 | /* NCQ hard hangs device under heavier load, needs hard power cycle */ |
@@ -3794,6 +3797,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
3794 | { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, | 3797 | { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, |
3795 | { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, }, | 3798 | { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, }, |
3796 | { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, }, | 3799 | { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, }, |
3800 | { "ST3160812AS", "3.AD", ATA_HORKAGE_NONCQ, }, | ||
3797 | { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, }, | 3801 | { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, }, |
3798 | 3802 | ||
3799 | /* devices which puke on READ_NATIVE_MAX */ | 3803 | /* devices which puke on READ_NATIVE_MAX */ |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 1cce2198baaf..8023167bbbeb 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -297,7 +297,7 @@ void ata_bmdma_start (struct ata_queued_cmd *qc) | |||
297 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | 297 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); |
298 | iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | 298 | iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); |
299 | 299 | ||
300 | /* Strictly, one may wish to issue a readb() here, to | 300 | /* Strictly, one may wish to issue an ioread8() here, to |
301 | * flush the mmio write. However, control also passes | 301 | * flush the mmio write. However, control also passes |
302 | * to the hardware at this point, and it will interrupt | 302 | * to the hardware at this point, and it will interrupt |
303 | * us when we are to resume control. So, in effect, | 303 | * us when we are to resume control. So, in effect, |
@@ -307,6 +307,9 @@ void ata_bmdma_start (struct ata_queued_cmd *qc) | |||
307 | * is expected, so I think it is best to not add a readb() | 307 | * is expected, so I think it is best to not add a readb() |
308 | * without first all the MMIO ATA cards/mobos. | 308 | * without first all the MMIO ATA cards/mobos. |
309 | * Or maybe I'm just being paranoid. | 309 | * Or maybe I'm just being paranoid. |
310 | * | ||
311 | * FIXME: The posting of this write means I/O starts are | ||
312 | * unneccessarily delayed for MMIO | ||
310 | */ | 313 | */ |
311 | } | 314 | } |
312 | 315 | ||
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c index 2bd7645f1a88..cce2834b2b60 100644 --- a/drivers/ata/pata_sis.c +++ b/drivers/ata/pata_sis.c | |||
@@ -375,8 +375,9 @@ static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev) | |||
375 | int drive_pci = sis_old_port_base(adev); | 375 | int drive_pci = sis_old_port_base(adev); |
376 | u16 timing; | 376 | u16 timing; |
377 | 377 | ||
378 | /* MWDMA 0-2 and UDMA 0-5 */ | ||
378 | const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 }; | 379 | const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 }; |
379 | const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000}; | 380 | const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000, 0x8000 }; |
380 | 381 | ||
381 | pci_read_config_word(pdev, drive_pci, &timing); | 382 | pci_read_config_word(pdev, drive_pci, &timing); |
382 | 383 | ||
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index ef83e6b1e314..233e88693395 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c | |||
@@ -888,6 +888,16 @@ static inline void sil24_host_intr(struct ata_port *ap) | |||
888 | u32 slot_stat, qc_active; | 888 | u32 slot_stat, qc_active; |
889 | int rc; | 889 | int rc; |
890 | 890 | ||
891 | /* If PCIX_IRQ_WOC, there's an inherent race window between | ||
892 | * clearing IRQ pending status and reading PORT_SLOT_STAT | ||
893 | * which may cause spurious interrupts afterwards. This is | ||
894 | * unavoidable and much better than losing interrupts which | ||
895 | * happens if IRQ pending is cleared after reading | ||
896 | * PORT_SLOT_STAT. | ||
897 | */ | ||
898 | if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) | ||
899 | writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT); | ||
900 | |||
891 | slot_stat = readl(port + PORT_SLOT_STAT); | 901 | slot_stat = readl(port + PORT_SLOT_STAT); |
892 | 902 | ||
893 | if (unlikely(slot_stat & HOST_SSTAT_ATTN)) { | 903 | if (unlikely(slot_stat & HOST_SSTAT_ATTN)) { |
@@ -895,9 +905,6 @@ static inline void sil24_host_intr(struct ata_port *ap) | |||
895 | return; | 905 | return; |
896 | } | 906 | } |
897 | 907 | ||
898 | if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) | ||
899 | writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT); | ||
900 | |||
901 | qc_active = slot_stat & ~HOST_SSTAT_ATTN; | 908 | qc_active = slot_stat & ~HOST_SSTAT_ATTN; |
902 | rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc); | 909 | rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc); |
903 | if (rc > 0) | 910 | if (rc > 0) |
@@ -910,7 +917,8 @@ static inline void sil24_host_intr(struct ata_port *ap) | |||
910 | return; | 917 | return; |
911 | } | 918 | } |
912 | 919 | ||
913 | if (ata_ratelimit()) | 920 | /* spurious interrupts are expected if PCIX_IRQ_WOC */ |
921 | if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit()) | ||
914 | ata_port_printk(ap, KERN_INFO, "spurious interrupt " | 922 | ata_port_printk(ap, KERN_INFO, "spurious interrupt " |
915 | "(slot_stat 0x%x active_tag %d sactive 0x%x)\n", | 923 | "(slot_stat 0x%x active_tag %d sactive 0x%x)\n", |
916 | slot_stat, ap->active_tag, ap->sactive); | 924 | slot_stat, ap->active_tag, ap->sactive); |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 6de33d7a29ba..67c92582d6ef 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -284,6 +284,7 @@ static ssize_t show_uevent(struct device *dev, struct device_attribute *attr, | |||
284 | 284 | ||
285 | /* let the kset specific function add its keys */ | 285 | /* let the kset specific function add its keys */ |
286 | pos = data; | 286 | pos = data; |
287 | memset(envp, 0, sizeof(envp)); | ||
287 | retval = kset->uevent_ops->uevent(kset, &dev->kobj, | 288 | retval = kset->uevent_ops->uevent(kset, &dev->kobj, |
288 | envp, ARRAY_SIZE(envp), | 289 | envp, ARRAY_SIZE(envp), |
289 | pos, PAGE_SIZE); | 290 | pos, PAGE_SIZE); |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 67ee3d4b2878..79245714f0a7 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
@@ -1032,6 +1032,10 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp) | |||
1032 | check_disk_change(ip->i_bdev); | 1032 | check_disk_change(ip->i_bdev); |
1033 | return 0; | 1033 | return 0; |
1034 | err_release: | 1034 | err_release: |
1035 | if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { | ||
1036 | cdi->ops->lock_door(cdi, 0); | ||
1037 | cdinfo(CD_OPEN, "door unlocked.\n"); | ||
1038 | } | ||
1035 | cdi->ops->release(cdi); | 1039 | cdi->ops->release(cdi); |
1036 | err: | 1040 | err: |
1037 | cdi->use_count--; | 1041 | cdi->use_count--; |
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h index 737088bd0780..28b98733beb8 100644 --- a/drivers/char/drm/i915_drv.h +++ b/drivers/char/drm/i915_drv.h | |||
@@ -210,6 +210,12 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
210 | #define I915REG_INT_MASK_R 0x020a8 | 210 | #define I915REG_INT_MASK_R 0x020a8 |
211 | #define I915REG_INT_ENABLE_R 0x020a0 | 211 | #define I915REG_INT_ENABLE_R 0x020a0 |
212 | 212 | ||
213 | #define I915REG_PIPEASTAT 0x70024 | ||
214 | #define I915REG_PIPEBSTAT 0x71024 | ||
215 | |||
216 | #define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17) | ||
217 | #define I915_VBLANK_CLEAR (1UL<<1) | ||
218 | |||
213 | #define SRX_INDEX 0x3c4 | 219 | #define SRX_INDEX 0x3c4 |
214 | #define SRX_DATA 0x3c5 | 220 | #define SRX_DATA 0x3c5 |
215 | #define SR01 1 | 221 | #define SR01 1 |
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c index 4b4b2ce89863..bb8e9e9c8201 100644 --- a/drivers/char/drm/i915_irq.c +++ b/drivers/char/drm/i915_irq.c | |||
@@ -214,6 +214,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
214 | struct drm_device *dev = (struct drm_device *) arg; | 214 | struct drm_device *dev = (struct drm_device *) arg; |
215 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 215 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
216 | u16 temp; | 216 | u16 temp; |
217 | u32 pipea_stats, pipeb_stats; | ||
218 | |||
219 | pipea_stats = I915_READ(I915REG_PIPEASTAT); | ||
220 | pipeb_stats = I915_READ(I915REG_PIPEBSTAT); | ||
217 | 221 | ||
218 | temp = I915_READ16(I915REG_INT_IDENTITY_R); | 222 | temp = I915_READ16(I915REG_INT_IDENTITY_R); |
219 | 223 | ||
@@ -225,6 +229,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
225 | return IRQ_NONE; | 229 | return IRQ_NONE; |
226 | 230 | ||
227 | I915_WRITE16(I915REG_INT_IDENTITY_R, temp); | 231 | I915_WRITE16(I915REG_INT_IDENTITY_R, temp); |
232 | (void) I915_READ16(I915REG_INT_IDENTITY_R); | ||
233 | DRM_READMEMORYBARRIER(); | ||
228 | 234 | ||
229 | dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | 235 | dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
230 | 236 | ||
@@ -252,6 +258,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
252 | 258 | ||
253 | if (dev_priv->swaps_pending > 0) | 259 | if (dev_priv->swaps_pending > 0) |
254 | drm_locked_tasklet(dev, i915_vblank_tasklet); | 260 | drm_locked_tasklet(dev, i915_vblank_tasklet); |
261 | I915_WRITE(I915REG_PIPEASTAT, | ||
262 | pipea_stats|I915_VBLANK_INTERRUPT_ENABLE| | ||
263 | I915_VBLANK_CLEAR); | ||
264 | I915_WRITE(I915REG_PIPEBSTAT, | ||
265 | pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE| | ||
266 | I915_VBLANK_CLEAR); | ||
255 | } | 267 | } |
256 | 268 | ||
257 | return IRQ_HANDLED; | 269 | return IRQ_HANDLED; |
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 7ecffc9c738f..4c16778e3f84 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c | |||
@@ -62,6 +62,8 @@ | |||
62 | 62 | ||
63 | static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ; | 63 | static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ; |
64 | 64 | ||
65 | /* This clocksource driver currently only works on ia64 */ | ||
66 | #ifdef CONFIG_IA64 | ||
65 | static void __iomem *hpet_mctr; | 67 | static void __iomem *hpet_mctr; |
66 | 68 | ||
67 | static cycle_t read_hpet(void) | 69 | static cycle_t read_hpet(void) |
@@ -79,6 +81,7 @@ static struct clocksource clocksource_hpet = { | |||
79 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 81 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
80 | }; | 82 | }; |
81 | static struct clocksource *hpet_clocksource; | 83 | static struct clocksource *hpet_clocksource; |
84 | #endif | ||
82 | 85 | ||
83 | /* A lock for concurrent access by app and isr hpet activity. */ | 86 | /* A lock for concurrent access by app and isr hpet activity. */ |
84 | static DEFINE_SPINLOCK(hpet_lock); | 87 | static DEFINE_SPINLOCK(hpet_lock); |
@@ -943,14 +946,14 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data) | |||
943 | printk(KERN_DEBUG "%s: 0x%lx is busy\n", | 946 | printk(KERN_DEBUG "%s: 0x%lx is busy\n", |
944 | __FUNCTION__, hdp->hd_phys_address); | 947 | __FUNCTION__, hdp->hd_phys_address); |
945 | iounmap(hdp->hd_address); | 948 | iounmap(hdp->hd_address); |
946 | return -EBUSY; | 949 | return AE_ALREADY_EXISTS; |
947 | } | 950 | } |
948 | } else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) { | 951 | } else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) { |
949 | struct acpi_resource_fixed_memory32 *fixmem32; | 952 | struct acpi_resource_fixed_memory32 *fixmem32; |
950 | 953 | ||
951 | fixmem32 = &res->data.fixed_memory32; | 954 | fixmem32 = &res->data.fixed_memory32; |
952 | if (!fixmem32) | 955 | if (!fixmem32) |
953 | return -EINVAL; | 956 | return AE_NO_MEMORY; |
954 | 957 | ||
955 | hdp->hd_phys_address = fixmem32->address; | 958 | hdp->hd_phys_address = fixmem32->address; |
956 | hdp->hd_address = ioremap(fixmem32->address, | 959 | hdp->hd_address = ioremap(fixmem32->address, |
@@ -960,7 +963,7 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data) | |||
960 | printk(KERN_DEBUG "%s: 0x%lx is busy\n", | 963 | printk(KERN_DEBUG "%s: 0x%lx is busy\n", |
961 | __FUNCTION__, hdp->hd_phys_address); | 964 | __FUNCTION__, hdp->hd_phys_address); |
962 | iounmap(hdp->hd_address); | 965 | iounmap(hdp->hd_address); |
963 | return -EBUSY; | 966 | return AE_ALREADY_EXISTS; |
964 | } | 967 | } |
965 | } else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) { | 968 | } else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) { |
966 | struct acpi_resource_extended_irq *irqp; | 969 | struct acpi_resource_extended_irq *irqp; |
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c index 049a46cc9f87..04ac155d3a07 100644 --- a/drivers/char/mspec.c +++ b/drivers/char/mspec.c | |||
@@ -155,23 +155,22 @@ mspec_open(struct vm_area_struct *vma) | |||
155 | * mspec_close | 155 | * mspec_close |
156 | * | 156 | * |
157 | * Called when unmapping a device mapping. Frees all mspec pages | 157 | * Called when unmapping a device mapping. Frees all mspec pages |
158 | * belonging to the vma. | 158 | * belonging to all the vma's sharing this vma_data structure. |
159 | */ | 159 | */ |
160 | static void | 160 | static void |
161 | mspec_close(struct vm_area_struct *vma) | 161 | mspec_close(struct vm_area_struct *vma) |
162 | { | 162 | { |
163 | struct vma_data *vdata; | 163 | struct vma_data *vdata; |
164 | int index, last_index, result; | 164 | int index, last_index; |
165 | unsigned long my_page; | 165 | unsigned long my_page; |
166 | 166 | ||
167 | vdata = vma->vm_private_data; | 167 | vdata = vma->vm_private_data; |
168 | 168 | ||
169 | BUG_ON(vma->vm_start < vdata->vm_start || vma->vm_end > vdata->vm_end); | 169 | if (!atomic_dec_and_test(&vdata->refcnt)) |
170 | return; | ||
170 | 171 | ||
171 | spin_lock(&vdata->lock); | 172 | last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT; |
172 | index = (vma->vm_start - vdata->vm_start) >> PAGE_SHIFT; | 173 | for (index = 0; index < last_index; index++) { |
173 | last_index = (vma->vm_end - vdata->vm_start) >> PAGE_SHIFT; | ||
174 | for (; index < last_index; index++) { | ||
175 | if (vdata->maddr[index] == 0) | 174 | if (vdata->maddr[index] == 0) |
176 | continue; | 175 | continue; |
177 | /* | 176 | /* |
@@ -180,20 +179,12 @@ mspec_close(struct vm_area_struct *vma) | |||
180 | */ | 179 | */ |
181 | my_page = vdata->maddr[index]; | 180 | my_page = vdata->maddr[index]; |
182 | vdata->maddr[index] = 0; | 181 | vdata->maddr[index] = 0; |
183 | spin_unlock(&vdata->lock); | 182 | if (!mspec_zero_block(my_page, PAGE_SIZE)) |
184 | result = mspec_zero_block(my_page, PAGE_SIZE); | ||
185 | if (!result) | ||
186 | uncached_free_page(my_page); | 183 | uncached_free_page(my_page); |
187 | else | 184 | else |
188 | printk(KERN_WARNING "mspec_close(): " | 185 | printk(KERN_WARNING "mspec_close(): " |
189 | "failed to zero page %i\n", | 186 | "failed to zero page %ld\n", my_page); |
190 | result); | ||
191 | spin_lock(&vdata->lock); | ||
192 | } | 187 | } |
193 | spin_unlock(&vdata->lock); | ||
194 | |||
195 | if (!atomic_dec_and_test(&vdata->refcnt)) | ||
196 | return; | ||
197 | 188 | ||
198 | if (vdata->flags & VMD_VMALLOCED) | 189 | if (vdata->flags & VMD_VMALLOCED) |
199 | vfree(vdata); | 190 | vfree(vdata); |
@@ -201,7 +192,6 @@ mspec_close(struct vm_area_struct *vma) | |||
201 | kfree(vdata); | 192 | kfree(vdata); |
202 | } | 193 | } |
203 | 194 | ||
204 | |||
205 | /* | 195 | /* |
206 | * mspec_nopfn | 196 | * mspec_nopfn |
207 | * | 197 | * |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 397c714cf2ba..af274e5a25ee 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -1550,11 +1550,13 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, | |||
1550 | * As close as possible to RFC 793, which | 1550 | * As close as possible to RFC 793, which |
1551 | * suggests using a 250 kHz clock. | 1551 | * suggests using a 250 kHz clock. |
1552 | * Further reading shows this assumes 2 Mb/s networks. | 1552 | * Further reading shows this assumes 2 Mb/s networks. |
1553 | * For 10 Gb/s Ethernet, a 1 GHz clock is appropriate. | 1553 | * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate. |
1554 | * That's funny, Linux has one built in! Use it! | 1554 | * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but |
1555 | * (Networks are faster now - should this be increased?) | 1555 | * we also need to limit the resolution so that the u32 seq |
1556 | * overlaps less than one time per MSL (2 minutes). | ||
1557 | * Choosing a clock of 64 ns period is OK. (period of 274 s) | ||
1556 | */ | 1558 | */ |
1557 | seq += ktime_get_real().tv64; | 1559 | seq += ktime_get_real().tv64 >> 6; |
1558 | #if 0 | 1560 | #if 0 |
1559 | printk("init_seq(%lx, %lx, %d, %d) = %d\n", | 1561 | printk("init_seq(%lx, %lx, %d, %d) = %d\n", |
1560 | saddr, daddr, sport, dport, seq); | 1562 | saddr, daddr, sport, dport, seq); |
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index c6f6f4209739..c799b7f7bbb3 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c | |||
@@ -770,6 +770,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
770 | /* | 770 | /* |
771 | * Switching-from response | 771 | * Switching-from response |
772 | */ | 772 | */ |
773 | acquire_console_sem(); | ||
773 | if (vc->vt_newvt >= 0) { | 774 | if (vc->vt_newvt >= 0) { |
774 | if (arg == 0) | 775 | if (arg == 0) |
775 | /* | 776 | /* |
@@ -784,7 +785,6 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
784 | * complete the switch. | 785 | * complete the switch. |
785 | */ | 786 | */ |
786 | int newvt; | 787 | int newvt; |
787 | acquire_console_sem(); | ||
788 | newvt = vc->vt_newvt; | 788 | newvt = vc->vt_newvt; |
789 | vc->vt_newvt = -1; | 789 | vc->vt_newvt = -1; |
790 | i = vc_allocate(newvt); | 790 | i = vc_allocate(newvt); |
@@ -798,7 +798,6 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
798 | * other console switches.. | 798 | * other console switches.. |
799 | */ | 799 | */ |
800 | complete_change_console(vc_cons[newvt].d); | 800 | complete_change_console(vc_cons[newvt].d); |
801 | release_console_sem(); | ||
802 | } | 801 | } |
803 | } | 802 | } |
804 | 803 | ||
@@ -810,9 +809,12 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
810 | /* | 809 | /* |
811 | * If it's just an ACK, ignore it | 810 | * If it's just an ACK, ignore it |
812 | */ | 811 | */ |
813 | if (arg != VT_ACKACQ) | 812 | if (arg != VT_ACKACQ) { |
813 | release_console_sem(); | ||
814 | return -EINVAL; | 814 | return -EINVAL; |
815 | } | ||
815 | } | 816 | } |
817 | release_console_sem(); | ||
816 | 818 | ||
817 | return 0; | 819 | return 0; |
818 | 820 | ||
@@ -1208,15 +1210,18 @@ void change_console(struct vc_data *new_vc) | |||
1208 | /* | 1210 | /* |
1209 | * Send the signal as privileged - kill_pid() will | 1211 | * Send the signal as privileged - kill_pid() will |
1210 | * tell us if the process has gone or something else | 1212 | * tell us if the process has gone or something else |
1211 | * is awry | 1213 | * is awry. |
1214 | * | ||
1215 | * We need to set vt_newvt *before* sending the signal or we | ||
1216 | * have a race. | ||
1212 | */ | 1217 | */ |
1218 | vc->vt_newvt = new_vc->vc_num; | ||
1213 | if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) { | 1219 | if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) { |
1214 | /* | 1220 | /* |
1215 | * It worked. Mark the vt to switch to and | 1221 | * It worked. Mark the vt to switch to and |
1216 | * return. The process needs to send us a | 1222 | * return. The process needs to send us a |
1217 | * VT_RELDISP ioctl to complete the switch. | 1223 | * VT_RELDISP ioctl to complete the switch. |
1218 | */ | 1224 | */ |
1219 | vc->vt_newvt = new_vc->vc_num; | ||
1220 | return; | 1225 | return; |
1221 | } | 1226 | } |
1222 | 1227 | ||
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c index ee45259573c8..98fd985a32ff 100644 --- a/drivers/ieee1394/ieee1394_core.c +++ b/drivers/ieee1394/ieee1394_core.c | |||
@@ -1273,7 +1273,7 @@ static void __exit ieee1394_cleanup(void) | |||
1273 | unregister_chrdev_region(IEEE1394_CORE_DEV, 256); | 1273 | unregister_chrdev_region(IEEE1394_CORE_DEV, 256); |
1274 | } | 1274 | } |
1275 | 1275 | ||
1276 | fs_initcall(ieee1394_init); /* same as ohci1394 */ | 1276 | module_init(ieee1394_init); |
1277 | module_exit(ieee1394_cleanup); | 1277 | module_exit(ieee1394_cleanup); |
1278 | 1278 | ||
1279 | /* Exported symbols */ | 1279 | /* Exported symbols */ |
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c index 5667c8102efc..372c5c16eb31 100644 --- a/drivers/ieee1394/ohci1394.c +++ b/drivers/ieee1394/ohci1394.c | |||
@@ -3537,7 +3537,5 @@ static int __init ohci1394_init(void) | |||
3537 | return pci_register_driver(&ohci1394_pci_driver); | 3537 | return pci_register_driver(&ohci1394_pci_driver); |
3538 | } | 3538 | } |
3539 | 3539 | ||
3540 | /* Register before most other device drivers. | 3540 | module_init(ohci1394_init); |
3541 | * Useful for remote debugging via physical DMA, e.g. using firescope. */ | ||
3542 | fs_initcall(ohci1394_init); | ||
3543 | module_exit(ohci1394_cleanup); | 3541 | module_exit(ohci1394_cleanup); |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index ba0428d872aa..85c51bdc36f1 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -1211,12 +1211,42 @@ static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, | |||
1211 | dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); | 1211 | dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); |
1212 | } | 1212 | } |
1213 | 1213 | ||
1214 | static void set_data_seg(struct mlx4_wqe_data_seg *dseg, | 1214 | static void set_mlx_icrc_seg(void *dseg) |
1215 | struct ib_sge *sg) | 1215 | { |
1216 | u32 *t = dseg; | ||
1217 | struct mlx4_wqe_inline_seg *iseg = dseg; | ||
1218 | |||
1219 | t[1] = 0; | ||
1220 | |||
1221 | /* | ||
1222 | * Need a barrier here before writing the byte_count field to | ||
1223 | * make sure that all the data is visible before the | ||
1224 | * byte_count field is set. Otherwise, if the segment begins | ||
1225 | * a new cacheline, the HCA prefetcher could grab the 64-byte | ||
1226 | * chunk and get a valid (!= * 0xffffffff) byte count but | ||
1227 | * stale data, and end up sending the wrong data. | ||
1228 | */ | ||
1229 | wmb(); | ||
1230 | |||
1231 | iseg->byte_count = cpu_to_be32((1 << 31) | 4); | ||
1232 | } | ||
1233 | |||
1234 | static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) | ||
1216 | { | 1235 | { |
1217 | dseg->byte_count = cpu_to_be32(sg->length); | ||
1218 | dseg->lkey = cpu_to_be32(sg->lkey); | 1236 | dseg->lkey = cpu_to_be32(sg->lkey); |
1219 | dseg->addr = cpu_to_be64(sg->addr); | 1237 | dseg->addr = cpu_to_be64(sg->addr); |
1238 | |||
1239 | /* | ||
1240 | * Need a barrier here before writing the byte_count field to | ||
1241 | * make sure that all the data is visible before the | ||
1242 | * byte_count field is set. Otherwise, if the segment begins | ||
1243 | * a new cacheline, the HCA prefetcher could grab the 64-byte | ||
1244 | * chunk and get a valid (!= * 0xffffffff) byte count but | ||
1245 | * stale data, and end up sending the wrong data. | ||
1246 | */ | ||
1247 | wmb(); | ||
1248 | |||
1249 | dseg->byte_count = cpu_to_be32(sg->length); | ||
1220 | } | 1250 | } |
1221 | 1251 | ||
1222 | int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | 1252 | int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |
@@ -1225,6 +1255,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1225 | struct mlx4_ib_qp *qp = to_mqp(ibqp); | 1255 | struct mlx4_ib_qp *qp = to_mqp(ibqp); |
1226 | void *wqe; | 1256 | void *wqe; |
1227 | struct mlx4_wqe_ctrl_seg *ctrl; | 1257 | struct mlx4_wqe_ctrl_seg *ctrl; |
1258 | struct mlx4_wqe_data_seg *dseg; | ||
1228 | unsigned long flags; | 1259 | unsigned long flags; |
1229 | int nreq; | 1260 | int nreq; |
1230 | int err = 0; | 1261 | int err = 0; |
@@ -1324,22 +1355,27 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1324 | break; | 1355 | break; |
1325 | } | 1356 | } |
1326 | 1357 | ||
1327 | for (i = 0; i < wr->num_sge; ++i) { | 1358 | /* |
1328 | set_data_seg(wqe, wr->sg_list + i); | 1359 | * Write data segments in reverse order, so as to |
1360 | * overwrite cacheline stamp last within each | ||
1361 | * cacheline. This avoids issues with WQE | ||
1362 | * prefetching. | ||
1363 | */ | ||
1329 | 1364 | ||
1330 | wqe += sizeof (struct mlx4_wqe_data_seg); | 1365 | dseg = wqe; |
1331 | size += sizeof (struct mlx4_wqe_data_seg) / 16; | 1366 | dseg += wr->num_sge - 1; |
1332 | } | 1367 | size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16); |
1333 | 1368 | ||
1334 | /* Add one more inline data segment for ICRC for MLX sends */ | 1369 | /* Add one more inline data segment for ICRC for MLX sends */ |
1335 | if (qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI) { | 1370 | if (unlikely(qp->ibqp.qp_type == IB_QPT_SMI || |
1336 | ((struct mlx4_wqe_inline_seg *) wqe)->byte_count = | 1371 | qp->ibqp.qp_type == IB_QPT_GSI)) { |
1337 | cpu_to_be32((1 << 31) | 4); | 1372 | set_mlx_icrc_seg(dseg + 1); |
1338 | ((u32 *) wqe)[1] = 0; | ||
1339 | wqe += sizeof (struct mlx4_wqe_data_seg); | ||
1340 | size += sizeof (struct mlx4_wqe_data_seg) / 16; | 1373 | size += sizeof (struct mlx4_wqe_data_seg) / 16; |
1341 | } | 1374 | } |
1342 | 1375 | ||
1376 | for (i = wr->num_sge - 1; i >= 0; --i, --dseg) | ||
1377 | set_data_seg(dseg, wr->sg_list + i); | ||
1378 | |||
1343 | ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? | 1379 | ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? |
1344 | MLX4_WQE_CTRL_FENCE : 0) | size; | 1380 | MLX4_WQE_CTRL_FENCE : 0) | size; |
1345 | 1381 | ||
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig index e2abe18e575d..7c662ee594a3 100644 --- a/drivers/input/joystick/Kconfig +++ b/drivers/input/joystick/Kconfig | |||
@@ -277,7 +277,7 @@ config JOYSTICK_XPAD_FF | |||
277 | 277 | ||
278 | config JOYSTICK_XPAD_LEDS | 278 | config JOYSTICK_XPAD_LEDS |
279 | bool "LED Support for Xbox360 controller 'BigX' LED" | 279 | bool "LED Support for Xbox360 controller 'BigX' LED" |
280 | depends on LEDS_CLASS && JOYSTICK_XPAD | 280 | depends on JOYSTICK_XPAD && (LEDS_CLASS=y || LEDS_CLASS=JOYSTICK_XPAD) |
281 | ---help--- | 281 | ---help--- |
282 | This option enables support for the LED which surrounds the Big X on | 282 | This option enables support for the LED which surrounds the Big X on |
283 | XBox 360 controller. | 283 | XBox 360 controller. |
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c index 2bea1b2c631c..a1804bfdbb8c 100644 --- a/drivers/input/mouse/appletouch.c +++ b/drivers/input/mouse/appletouch.c | |||
@@ -328,6 +328,7 @@ static void atp_complete(struct urb* urb) | |||
328 | { | 328 | { |
329 | int x, y, x_z, y_z, x_f, y_f; | 329 | int x, y, x_z, y_z, x_f, y_f; |
330 | int retval, i, j; | 330 | int retval, i, j; |
331 | int key; | ||
331 | struct atp *dev = urb->context; | 332 | struct atp *dev = urb->context; |
332 | 333 | ||
333 | switch (urb->status) { | 334 | switch (urb->status) { |
@@ -468,6 +469,7 @@ static void atp_complete(struct urb* urb) | |||
468 | ATP_XFACT, &x_z, &x_f); | 469 | ATP_XFACT, &x_z, &x_f); |
469 | y = atp_calculate_abs(dev->xy_acc + ATP_XSENSORS, ATP_YSENSORS, | 470 | y = atp_calculate_abs(dev->xy_acc + ATP_XSENSORS, ATP_YSENSORS, |
470 | ATP_YFACT, &y_z, &y_f); | 471 | ATP_YFACT, &y_z, &y_f); |
472 | key = dev->data[dev->datalen - 1] & 1; | ||
471 | 473 | ||
472 | if (x && y) { | 474 | if (x && y) { |
473 | if (dev->x_old != -1) { | 475 | if (dev->x_old != -1) { |
@@ -505,7 +507,7 @@ static void atp_complete(struct urb* urb) | |||
505 | the first touch unless reinitialised. Do so if it's been | 507 | the first touch unless reinitialised. Do so if it's been |
506 | idle for a while in order to avoid waking the kernel up | 508 | idle for a while in order to avoid waking the kernel up |
507 | several hundred times a second */ | 509 | several hundred times a second */ |
508 | if (atp_is_geyser_3(dev)) { | 510 | if (!key && atp_is_geyser_3(dev)) { |
509 | dev->idlecount++; | 511 | dev->idlecount++; |
510 | if (dev->idlecount == 10) { | 512 | if (dev->idlecount == 10) { |
511 | dev->valid = 0; | 513 | dev->valid = 0; |
@@ -514,7 +516,7 @@ static void atp_complete(struct urb* urb) | |||
514 | } | 516 | } |
515 | } | 517 | } |
516 | 518 | ||
517 | input_report_key(dev->input, BTN_LEFT, dev->data[dev->datalen - 1] & 1); | 519 | input_report_key(dev->input, BTN_LEFT, key); |
518 | input_sync(dev->input); | 520 | input_sync(dev->input); |
519 | 521 | ||
520 | exit: | 522 | exit: |
diff --git a/drivers/kvm/Kconfig b/drivers/kvm/Kconfig index 7b64fd4aa2f3..0a419a0de603 100644 --- a/drivers/kvm/Kconfig +++ b/drivers/kvm/Kconfig | |||
@@ -6,7 +6,8 @@ menuconfig VIRTUALIZATION | |||
6 | depends on X86 | 6 | depends on X86 |
7 | default y | 7 | default y |
8 | ---help--- | 8 | ---help--- |
9 | Say Y here to get to see options for virtualization guest drivers. | 9 | Say Y here to get to see options for using your Linux host to run other |
10 | operating systems inside virtual machines (guests). | ||
10 | This option alone does not add any kernel code. | 11 | This option alone does not add any kernel code. |
11 | 12 | ||
12 | If you say N, all options in this submenu will be skipped and disabled. | 13 | If you say N, all options in this submenu will be skipped and disabled. |
diff --git a/drivers/lguest/lguest_asm.S b/drivers/lguest/lguest_asm.S index f182c6a36209..1ddcd5cd20f6 100644 --- a/drivers/lguest/lguest_asm.S +++ b/drivers/lguest/lguest_asm.S | |||
@@ -22,8 +22,9 @@ | |||
22 | jmp lguest_init | 22 | jmp lguest_init |
23 | 23 | ||
24 | /*G:055 We create a macro which puts the assembler code between lgstart_ and | 24 | /*G:055 We create a macro which puts the assembler code between lgstart_ and |
25 | * lgend_ markers. These templates end up in the .init.text section, so they | 25 | * lgend_ markers. These templates are put in the .text section: they can't be |
26 | * are discarded after boot. */ | 26 | * discarded after boot as we may need to patch modules, too. */ |
27 | .text | ||
27 | #define LGUEST_PATCH(name, insns...) \ | 28 | #define LGUEST_PATCH(name, insns...) \ |
28 | lgstart_##name: insns; lgend_##name:; \ | 29 | lgstart_##name: insns; lgend_##name:; \ |
29 | .globl lgstart_##name; .globl lgend_##name | 30 | .globl lgstart_##name; .globl lgend_##name |
@@ -34,7 +35,6 @@ LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled) | |||
34 | LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax) | 35 | LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax) |
35 | /*:*/ | 36 | /*:*/ |
36 | 37 | ||
37 | .text | ||
38 | /* These demark the EIP range where host should never deliver interrupts. */ | 38 | /* These demark the EIP range where host should never deliver interrupts. */ |
39 | .global lguest_noirq_start | 39 | .global lguest_noirq_start |
40 | .global lguest_noirq_end | 40 | .global lguest_noirq_end |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4d63773ee73a..f96dea975fa5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -514,7 +514,7 @@ static void ops_complete_biofill(void *stripe_head_ref) | |||
514 | struct stripe_head *sh = stripe_head_ref; | 514 | struct stripe_head *sh = stripe_head_ref; |
515 | struct bio *return_bi = NULL; | 515 | struct bio *return_bi = NULL; |
516 | raid5_conf_t *conf = sh->raid_conf; | 516 | raid5_conf_t *conf = sh->raid_conf; |
517 | int i, more_to_read = 0; | 517 | int i; |
518 | 518 | ||
519 | pr_debug("%s: stripe %llu\n", __FUNCTION__, | 519 | pr_debug("%s: stripe %llu\n", __FUNCTION__, |
520 | (unsigned long long)sh->sector); | 520 | (unsigned long long)sh->sector); |
@@ -522,16 +522,14 @@ static void ops_complete_biofill(void *stripe_head_ref) | |||
522 | /* clear completed biofills */ | 522 | /* clear completed biofills */ |
523 | for (i = sh->disks; i--; ) { | 523 | for (i = sh->disks; i--; ) { |
524 | struct r5dev *dev = &sh->dev[i]; | 524 | struct r5dev *dev = &sh->dev[i]; |
525 | /* check if this stripe has new incoming reads */ | ||
526 | if (dev->toread) | ||
527 | more_to_read++; | ||
528 | 525 | ||
529 | /* acknowledge completion of a biofill operation */ | 526 | /* acknowledge completion of a biofill operation */ |
530 | /* and check if we need to reply to a read request | 527 | /* and check if we need to reply to a read request, |
531 | */ | 528 | * new R5_Wantfill requests are held off until |
532 | if (test_bit(R5_Wantfill, &dev->flags) && !dev->toread) { | 529 | * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending) |
530 | */ | ||
531 | if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { | ||
533 | struct bio *rbi, *rbi2; | 532 | struct bio *rbi, *rbi2; |
534 | clear_bit(R5_Wantfill, &dev->flags); | ||
535 | 533 | ||
536 | /* The access to dev->read is outside of the | 534 | /* The access to dev->read is outside of the |
537 | * spin_lock_irq(&conf->device_lock), but is protected | 535 | * spin_lock_irq(&conf->device_lock), but is protected |
@@ -558,8 +556,7 @@ static void ops_complete_biofill(void *stripe_head_ref) | |||
558 | 556 | ||
559 | return_io(return_bi); | 557 | return_io(return_bi); |
560 | 558 | ||
561 | if (more_to_read) | 559 | set_bit(STRIPE_HANDLE, &sh->state); |
562 | set_bit(STRIPE_HANDLE, &sh->state); | ||
563 | release_stripe(sh); | 560 | release_stripe(sh); |
564 | } | 561 | } |
565 | 562 | ||
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c index 0285c4a830eb..66ea3cbc369c 100644 --- a/drivers/media/video/ivtv/ivtv-fileops.c +++ b/drivers/media/video/ivtv/ivtv-fileops.c | |||
@@ -754,9 +754,11 @@ static void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts) | |||
754 | ivtv_yuv_close(itv); | 754 | ivtv_yuv_close(itv); |
755 | } | 755 | } |
756 | if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_YUV) | 756 | if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_YUV) |
757 | itv->output_mode = OUT_NONE; | 757 | itv->output_mode = OUT_NONE; |
758 | else if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_UDMA_YUV) | ||
759 | itv->output_mode = OUT_NONE; | ||
758 | else if (s->type == IVTV_DEC_STREAM_TYPE_MPG && itv->output_mode == OUT_MPG) | 760 | else if (s->type == IVTV_DEC_STREAM_TYPE_MPG && itv->output_mode == OUT_MPG) |
759 | itv->output_mode = OUT_NONE; | 761 | itv->output_mode = OUT_NONE; |
760 | 762 | ||
761 | itv->speed = 0; | 763 | itv->speed = 0; |
762 | clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags); | 764 | clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags); |
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c index e3371f972240..0cb006f2943d 100644 --- a/drivers/media/video/usbvision/usbvision-video.c +++ b/drivers/media/video/usbvision/usbvision-video.c | |||
@@ -1387,7 +1387,6 @@ static const struct file_operations usbvision_fops = { | |||
1387 | .ioctl = video_ioctl2, | 1387 | .ioctl = video_ioctl2, |
1388 | .llseek = no_llseek, | 1388 | .llseek = no_llseek, |
1389 | /* .poll = video_poll, */ | 1389 | /* .poll = video_poll, */ |
1390 | .mmap = usbvision_v4l2_mmap, | ||
1391 | .compat_ioctl = v4l_compat_ioctl32, | 1390 | .compat_ioctl = v4l_compat_ioctl32, |
1392 | }; | 1391 | }; |
1393 | static struct video_device usbvision_video_template = { | 1392 | static struct video_device usbvision_video_template = { |
@@ -1413,7 +1412,7 @@ static struct video_device usbvision_video_template = { | |||
1413 | .vidioc_s_input = vidioc_s_input, | 1412 | .vidioc_s_input = vidioc_s_input, |
1414 | .vidioc_queryctrl = vidioc_queryctrl, | 1413 | .vidioc_queryctrl = vidioc_queryctrl, |
1415 | .vidioc_g_audio = vidioc_g_audio, | 1414 | .vidioc_g_audio = vidioc_g_audio, |
1416 | .vidioc_g_audio = vidioc_s_audio, | 1415 | .vidioc_s_audio = vidioc_s_audio, |
1417 | .vidioc_g_ctrl = vidioc_g_ctrl, | 1416 | .vidioc_g_ctrl = vidioc_g_ctrl, |
1418 | .vidioc_s_ctrl = vidioc_s_ctrl, | 1417 | .vidioc_s_ctrl = vidioc_s_ctrl, |
1419 | .vidioc_streamon = vidioc_streamon, | 1418 | .vidioc_streamon = vidioc_streamon, |
@@ -1459,7 +1458,7 @@ static struct video_device usbvision_radio_template= | |||
1459 | .vidioc_s_input = vidioc_s_input, | 1458 | .vidioc_s_input = vidioc_s_input, |
1460 | .vidioc_queryctrl = vidioc_queryctrl, | 1459 | .vidioc_queryctrl = vidioc_queryctrl, |
1461 | .vidioc_g_audio = vidioc_g_audio, | 1460 | .vidioc_g_audio = vidioc_g_audio, |
1462 | .vidioc_g_audio = vidioc_s_audio, | 1461 | .vidioc_s_audio = vidioc_s_audio, |
1463 | .vidioc_g_ctrl = vidioc_g_ctrl, | 1462 | .vidioc_g_ctrl = vidioc_g_ctrl, |
1464 | .vidioc_s_ctrl = vidioc_s_ctrl, | 1463 | .vidioc_s_ctrl = vidioc_s_ctrl, |
1465 | .vidioc_g_tuner = vidioc_g_tuner, | 1464 | .vidioc_g_tuner = vidioc_g_tuner, |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 854d80c330ec..66eed22cbd21 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -54,8 +54,8 @@ | |||
54 | 54 | ||
55 | #define DRV_MODULE_NAME "bnx2" | 55 | #define DRV_MODULE_NAME "bnx2" |
56 | #define PFX DRV_MODULE_NAME ": " | 56 | #define PFX DRV_MODULE_NAME ": " |
57 | #define DRV_MODULE_VERSION "1.6.4" | 57 | #define DRV_MODULE_VERSION "1.6.5" |
58 | #define DRV_MODULE_RELDATE "August 3, 2007" | 58 | #define DRV_MODULE_RELDATE "September 20, 2007" |
59 | 59 | ||
60 | #define RUN_AT(x) (jiffies + (x)) | 60 | #define RUN_AT(x) (jiffies + (x)) |
61 | 61 | ||
@@ -6727,7 +6727,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
6727 | } else if (CHIP_NUM(bp) == CHIP_NUM_5706 || | 6727 | } else if (CHIP_NUM(bp) == CHIP_NUM_5706 || |
6728 | CHIP_NUM(bp) == CHIP_NUM_5708) | 6728 | CHIP_NUM(bp) == CHIP_NUM_5708) |
6729 | bp->phy_flags |= PHY_CRC_FIX_FLAG; | 6729 | bp->phy_flags |= PHY_CRC_FIX_FLAG; |
6730 | else if (CHIP_ID(bp) == CHIP_ID_5709_A0) | 6730 | else if (CHIP_ID(bp) == CHIP_ID_5709_A0 || |
6731 | CHIP_ID(bp) == CHIP_ID_5709_A1) | ||
6731 | bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG; | 6732 | bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG; |
6732 | 6733 | ||
6733 | if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || | 6734 | if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || |
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index 4c3785c9d4b8..9ecc3adcf6c1 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c | |||
@@ -1726,6 +1726,7 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol | |||
1726 | case E1000_DEV_ID_82571EB_QUAD_COPPER: | 1726 | case E1000_DEV_ID_82571EB_QUAD_COPPER: |
1727 | case E1000_DEV_ID_82571EB_QUAD_FIBER: | 1727 | case E1000_DEV_ID_82571EB_QUAD_FIBER: |
1728 | case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: | 1728 | case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: |
1729 | case E1000_DEV_ID_82571PT_QUAD_COPPER: | ||
1729 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: | 1730 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: |
1730 | /* quad port adapters only support WoL on port A */ | 1731 | /* quad port adapters only support WoL on port A */ |
1731 | if (!adapter->quad_port_a) { | 1732 | if (!adapter->quad_port_a) { |
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index ba120f7fb0be..8604adbe351c 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c | |||
@@ -387,6 +387,7 @@ e1000_set_mac_type(struct e1000_hw *hw) | |||
387 | case E1000_DEV_ID_82571EB_SERDES_DUAL: | 387 | case E1000_DEV_ID_82571EB_SERDES_DUAL: |
388 | case E1000_DEV_ID_82571EB_SERDES_QUAD: | 388 | case E1000_DEV_ID_82571EB_SERDES_QUAD: |
389 | case E1000_DEV_ID_82571EB_QUAD_COPPER: | 389 | case E1000_DEV_ID_82571EB_QUAD_COPPER: |
390 | case E1000_DEV_ID_82571PT_QUAD_COPPER: | ||
390 | case E1000_DEV_ID_82571EB_QUAD_FIBER: | 391 | case E1000_DEV_ID_82571EB_QUAD_FIBER: |
391 | case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: | 392 | case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: |
392 | hw->mac_type = e1000_82571; | 393 | hw->mac_type = e1000_82571; |
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index fe8714655c90..07f0ea73676e 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h | |||
@@ -475,6 +475,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); | |||
475 | #define E1000_DEV_ID_82571EB_FIBER 0x105F | 475 | #define E1000_DEV_ID_82571EB_FIBER 0x105F |
476 | #define E1000_DEV_ID_82571EB_SERDES 0x1060 | 476 | #define E1000_DEV_ID_82571EB_SERDES 0x1060 |
477 | #define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 | 477 | #define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 |
478 | #define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5 | ||
478 | #define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 | 479 | #define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 |
479 | #define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC | 480 | #define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC |
480 | #define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 | 481 | #define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 4a225950fb43..e7c8951f47fa 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -108,6 +108,7 @@ static struct pci_device_id e1000_pci_tbl[] = { | |||
108 | INTEL_E1000_ETHERNET_DEVICE(0x10BC), | 108 | INTEL_E1000_ETHERNET_DEVICE(0x10BC), |
109 | INTEL_E1000_ETHERNET_DEVICE(0x10C4), | 109 | INTEL_E1000_ETHERNET_DEVICE(0x10C4), |
110 | INTEL_E1000_ETHERNET_DEVICE(0x10C5), | 110 | INTEL_E1000_ETHERNET_DEVICE(0x10C5), |
111 | INTEL_E1000_ETHERNET_DEVICE(0x10D5), | ||
111 | INTEL_E1000_ETHERNET_DEVICE(0x10D9), | 112 | INTEL_E1000_ETHERNET_DEVICE(0x10D9), |
112 | INTEL_E1000_ETHERNET_DEVICE(0x10DA), | 113 | INTEL_E1000_ETHERNET_DEVICE(0x10DA), |
113 | /* required last entry */ | 114 | /* required last entry */ |
@@ -1101,6 +1102,7 @@ e1000_probe(struct pci_dev *pdev, | |||
1101 | case E1000_DEV_ID_82571EB_QUAD_COPPER: | 1102 | case E1000_DEV_ID_82571EB_QUAD_COPPER: |
1102 | case E1000_DEV_ID_82571EB_QUAD_FIBER: | 1103 | case E1000_DEV_ID_82571EB_QUAD_FIBER: |
1103 | case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: | 1104 | case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: |
1105 | case E1000_DEV_ID_82571PT_QUAD_COPPER: | ||
1104 | /* if quad port adapter, disable WoL on all but port A */ | 1106 | /* if quad port adapter, disable WoL on all but port A */ |
1105 | if (global_quad_port_a != 0) | 1107 | if (global_quad_port_a != 0) |
1106 | adapter->eeprom_wol = 0; | 1108 | adapter->eeprom_wol = 0; |
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 6a117e9968cb..315335671f0f 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -534,7 +534,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
534 | } | 534 | } |
535 | 535 | ||
536 | /* PHY status changed */ | 536 | /* PHY status changed */ |
537 | if (eth_int_cause_ext & ETH_INT_CAUSE_PHY) { | 537 | if (eth_int_cause_ext & (ETH_INT_CAUSE_PHY | ETH_INT_CAUSE_STATE)) { |
538 | struct ethtool_cmd cmd; | 538 | struct ethtool_cmd cmd; |
539 | 539 | ||
540 | if (mii_link_ok(&mp->mii)) { | 540 | if (mii_link_ok(&mp->mii)) { |
@@ -1357,7 +1357,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1357 | #endif | 1357 | #endif |
1358 | 1358 | ||
1359 | dev->watchdog_timeo = 2 * HZ; | 1359 | dev->watchdog_timeo = 2 * HZ; |
1360 | dev->tx_queue_len = mp->tx_ring_size; | ||
1361 | dev->base_addr = 0; | 1360 | dev->base_addr = 0; |
1362 | dev->change_mtu = mv643xx_eth_change_mtu; | 1361 | dev->change_mtu = mv643xx_eth_change_mtu; |
1363 | dev->do_ioctl = mv643xx_eth_do_ioctl; | 1362 | dev->do_ioctl = mv643xx_eth_do_ioctl; |
@@ -2768,8 +2767,6 @@ static const struct ethtool_ops mv643xx_ethtool_ops = { | |||
2768 | .get_stats_count = mv643xx_get_stats_count, | 2767 | .get_stats_count = mv643xx_get_stats_count, |
2769 | .get_ethtool_stats = mv643xx_get_ethtool_stats, | 2768 | .get_ethtool_stats = mv643xx_get_ethtool_stats, |
2770 | .get_strings = mv643xx_get_strings, | 2769 | .get_strings = mv643xx_get_strings, |
2771 | .get_stats_count = mv643xx_get_stats_count, | ||
2772 | .get_ethtool_stats = mv643xx_get_ethtool_stats, | ||
2773 | .nway_reset = mv643xx_eth_nway_restart, | 2770 | .nway_reset = mv643xx_eth_nway_restart, |
2774 | }; | 2771 | }; |
2775 | 2772 | ||
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h index 82f8c0cbfb64..565b96696aca 100644 --- a/drivers/net/mv643xx_eth.h +++ b/drivers/net/mv643xx_eth.h | |||
@@ -64,7 +64,9 @@ | |||
64 | #define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8) | 64 | #define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8) |
65 | #define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR) | 65 | #define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR) |
66 | #define ETH_INT_CAUSE_PHY 0x00010000 | 66 | #define ETH_INT_CAUSE_PHY 0x00010000 |
67 | #define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY) | 67 | #define ETH_INT_CAUSE_STATE 0x00100000 |
68 | #define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \ | ||
69 | ETH_INT_CAUSE_STATE) | ||
68 | 70 | ||
69 | #define ETH_INT_MASK_ALL 0x00000000 | 71 | #define ETH_INT_MASK_ALL 0x00000000 |
70 | #define ETH_INT_MASK_ALL_EXT 0x00000000 | 72 | #define ETH_INT_MASK_ALL_EXT 0x00000000 |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 1c42266bf889..556962f9612d 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -3094,9 +3094,12 @@ static void myri10ge_remove(struct pci_dev *pdev) | |||
3094 | } | 3094 | } |
3095 | 3095 | ||
3096 | #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008 | 3096 | #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008 |
3097 | #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009 | ||
3097 | 3098 | ||
3098 | static struct pci_device_id myri10ge_pci_tbl[] = { | 3099 | static struct pci_device_id myri10ge_pci_tbl[] = { |
3099 | {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)}, | 3100 | {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)}, |
3101 | {PCI_DEVICE | ||
3102 | (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)}, | ||
3100 | {0}, | 3103 | {0}, |
3101 | }; | 3104 | }; |
3102 | 3105 | ||
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c index c06cae3f0b56..503f2685fb73 100644 --- a/drivers/net/pcmcia/3c589_cs.c +++ b/drivers/net/pcmcia/3c589_cs.c | |||
@@ -116,7 +116,7 @@ struct el3_private { | |||
116 | spinlock_t lock; | 116 | spinlock_t lock; |
117 | }; | 117 | }; |
118 | 118 | ||
119 | static const char *if_names[] = { "auto", "10base2", "10baseT", "AUI" }; | 119 | static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" }; |
120 | 120 | ||
121 | /*====================================================================*/ | 121 | /*====================================================================*/ |
122 | 122 | ||
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 0cc4369cacba..cb230f44d6fc 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -409,6 +409,7 @@ int phy_mii_ioctl(struct phy_device *phydev, | |||
409 | 409 | ||
410 | return 0; | 410 | return 0; |
411 | } | 411 | } |
412 | EXPORT_SYMBOL(phy_mii_ioctl); | ||
412 | 413 | ||
413 | /** | 414 | /** |
414 | * phy_start_aneg - start auto-negotiation for this PHY device | 415 | * phy_start_aneg - start auto-negotiation for this PHY device |
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c index f79cf87a2bff..c0b6d19d1457 100644 --- a/drivers/net/ppp_mppe.c +++ b/drivers/net/ppp_mppe.c | |||
@@ -136,7 +136,7 @@ struct ppp_mppe_state { | |||
136 | * Key Derivation, from RFC 3078, RFC 3079. | 136 | * Key Derivation, from RFC 3078, RFC 3079. |
137 | * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079. | 137 | * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079. |
138 | */ | 138 | */ |
139 | static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey) | 139 | static void get_new_key_from_sha(struct ppp_mppe_state * state) |
140 | { | 140 | { |
141 | struct hash_desc desc; | 141 | struct hash_desc desc; |
142 | struct scatterlist sg[4]; | 142 | struct scatterlist sg[4]; |
@@ -153,8 +153,6 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *I | |||
153 | desc.flags = 0; | 153 | desc.flags = 0; |
154 | 154 | ||
155 | crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest); | 155 | crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest); |
156 | |||
157 | memcpy(InterimKey, state->sha1_digest, state->keylen); | ||
158 | } | 156 | } |
159 | 157 | ||
160 | /* | 158 | /* |
@@ -163,21 +161,21 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *I | |||
163 | */ | 161 | */ |
164 | static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) | 162 | static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) |
165 | { | 163 | { |
166 | unsigned char InterimKey[MPPE_MAX_KEY_LEN]; | ||
167 | struct scatterlist sg_in[1], sg_out[1]; | 164 | struct scatterlist sg_in[1], sg_out[1]; |
168 | struct blkcipher_desc desc = { .tfm = state->arc4 }; | 165 | struct blkcipher_desc desc = { .tfm = state->arc4 }; |
169 | 166 | ||
170 | get_new_key_from_sha(state, InterimKey); | 167 | get_new_key_from_sha(state); |
171 | if (!initial_key) { | 168 | if (!initial_key) { |
172 | crypto_blkcipher_setkey(state->arc4, InterimKey, state->keylen); | 169 | crypto_blkcipher_setkey(state->arc4, state->sha1_digest, |
173 | setup_sg(sg_in, InterimKey, state->keylen); | 170 | state->keylen); |
171 | setup_sg(sg_in, state->sha1_digest, state->keylen); | ||
174 | setup_sg(sg_out, state->session_key, state->keylen); | 172 | setup_sg(sg_out, state->session_key, state->keylen); |
175 | if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, | 173 | if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, |
176 | state->keylen) != 0) { | 174 | state->keylen) != 0) { |
177 | printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); | 175 | printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); |
178 | } | 176 | } |
179 | } else { | 177 | } else { |
180 | memcpy(state->session_key, InterimKey, state->keylen); | 178 | memcpy(state->session_key, state->sha1_digest, state->keylen); |
181 | } | 179 | } |
182 | if (state->keylen == 8) { | 180 | if (state->keylen == 8) { |
183 | /* See RFC 3078 */ | 181 | /* See RFC 3078 */ |
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 0d7f570b9a54..9b30cd600a64 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c | |||
@@ -879,8 +879,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) | |||
879 | dev->hard_header(skb, dev, ETH_P_PPP_SES, | 879 | dev->hard_header(skb, dev, ETH_P_PPP_SES, |
880 | po->pppoe_pa.remote, NULL, data_len); | 880 | po->pppoe_pa.remote, NULL, data_len); |
881 | 881 | ||
882 | if (dev_queue_xmit(skb) < 0) | 882 | dev_queue_xmit(skb); |
883 | goto abort; | ||
884 | 883 | ||
885 | return 1; | 884 | return 1; |
886 | 885 | ||
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index 266e8b38fe10..abe91cb595f4 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c | |||
@@ -491,44 +491,46 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb) | |||
491 | u16 hdrflags; | 491 | u16 hdrflags; |
492 | u16 tunnel_id, session_id; | 492 | u16 tunnel_id, session_id; |
493 | int length; | 493 | int length; |
494 | struct udphdr *uh; | 494 | int offset; |
495 | 495 | ||
496 | tunnel = pppol2tp_sock_to_tunnel(sock); | 496 | tunnel = pppol2tp_sock_to_tunnel(sock); |
497 | if (tunnel == NULL) | 497 | if (tunnel == NULL) |
498 | goto error; | 498 | goto error; |
499 | 499 | ||
500 | /* UDP always verifies the packet length. */ | ||
501 | __skb_pull(skb, sizeof(struct udphdr)); | ||
502 | |||
500 | /* Short packet? */ | 503 | /* Short packet? */ |
501 | if (skb->len < sizeof(struct udphdr)) { | 504 | if (!pskb_may_pull(skb, 12)) { |
502 | PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO, | 505 | PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO, |
503 | "%s: recv short packet (len=%d)\n", tunnel->name, skb->len); | 506 | "%s: recv short packet (len=%d)\n", tunnel->name, skb->len); |
504 | goto error; | 507 | goto error; |
505 | } | 508 | } |
506 | 509 | ||
507 | /* Point to L2TP header */ | 510 | /* Point to L2TP header */ |
508 | ptr = skb->data + sizeof(struct udphdr); | 511 | ptr = skb->data; |
509 | 512 | ||
510 | /* Get L2TP header flags */ | 513 | /* Get L2TP header flags */ |
511 | hdrflags = ntohs(*(__be16*)ptr); | 514 | hdrflags = ntohs(*(__be16*)ptr); |
512 | 515 | ||
513 | /* Trace packet contents, if enabled */ | 516 | /* Trace packet contents, if enabled */ |
514 | if (tunnel->debug & PPPOL2TP_MSG_DATA) { | 517 | if (tunnel->debug & PPPOL2TP_MSG_DATA) { |
518 | length = min(16u, skb->len); | ||
519 | if (!pskb_may_pull(skb, length)) | ||
520 | goto error; | ||
521 | |||
515 | printk(KERN_DEBUG "%s: recv: ", tunnel->name); | 522 | printk(KERN_DEBUG "%s: recv: ", tunnel->name); |
516 | 523 | ||
517 | for (length = 0; length < 16; length++) | 524 | offset = 0; |
518 | printk(" %02X", ptr[length]); | 525 | do { |
526 | printk(" %02X", ptr[offset]); | ||
527 | } while (++offset < length); | ||
528 | |||
519 | printk("\n"); | 529 | printk("\n"); |
520 | } | 530 | } |
521 | 531 | ||
522 | /* Get length of L2TP packet */ | 532 | /* Get length of L2TP packet */ |
523 | uh = (struct udphdr *) skb_transport_header(skb); | 533 | length = skb->len; |
524 | length = ntohs(uh->len) - sizeof(struct udphdr); | ||
525 | |||
526 | /* Too short? */ | ||
527 | if (length < 12) { | ||
528 | PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO, | ||
529 | "%s: recv short L2TP packet (len=%d)\n", tunnel->name, length); | ||
530 | goto error; | ||
531 | } | ||
532 | 534 | ||
533 | /* If type is control packet, it is handled by userspace. */ | 535 | /* If type is control packet, it is handled by userspace. */ |
534 | if (hdrflags & L2TP_HDRFLAG_T) { | 536 | if (hdrflags & L2TP_HDRFLAG_T) { |
@@ -606,7 +608,6 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb) | |||
606 | "%s: recv data has no seq numbers when required. " | 608 | "%s: recv data has no seq numbers when required. " |
607 | "Discarding\n", session->name); | 609 | "Discarding\n", session->name); |
608 | session->stats.rx_seq_discards++; | 610 | session->stats.rx_seq_discards++; |
609 | session->stats.rx_errors++; | ||
610 | goto discard; | 611 | goto discard; |
611 | } | 612 | } |
612 | 613 | ||
@@ -625,7 +626,6 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb) | |||
625 | "%s: recv data has no seq numbers when required. " | 626 | "%s: recv data has no seq numbers when required. " |
626 | "Discarding\n", session->name); | 627 | "Discarding\n", session->name); |
627 | session->stats.rx_seq_discards++; | 628 | session->stats.rx_seq_discards++; |
628 | session->stats.rx_errors++; | ||
629 | goto discard; | 629 | goto discard; |
630 | } | 630 | } |
631 | 631 | ||
@@ -634,10 +634,14 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb) | |||
634 | } | 634 | } |
635 | 635 | ||
636 | /* If offset bit set, skip it. */ | 636 | /* If offset bit set, skip it. */ |
637 | if (hdrflags & L2TP_HDRFLAG_O) | 637 | if (hdrflags & L2TP_HDRFLAG_O) { |
638 | ptr += 2 + ntohs(*(__be16 *) ptr); | 638 | offset = ntohs(*(__be16 *)ptr); |
639 | skb->transport_header += 2 + offset; | ||
640 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + 2)) | ||
641 | goto discard; | ||
642 | } | ||
639 | 643 | ||
640 | skb_pull(skb, ptr - skb->data); | 644 | __skb_pull(skb, skb_transport_offset(skb)); |
641 | 645 | ||
642 | /* Skip PPP header, if present. In testing, Microsoft L2TP clients | 646 | /* Skip PPP header, if present. In testing, Microsoft L2TP clients |
643 | * don't send the PPP header (PPP header compression enabled), but | 647 | * don't send the PPP header (PPP header compression enabled), but |
@@ -673,7 +677,6 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb) | |||
673 | */ | 677 | */ |
674 | if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) { | 678 | if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) { |
675 | session->stats.rx_seq_discards++; | 679 | session->stats.rx_seq_discards++; |
676 | session->stats.rx_errors++; | ||
677 | PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, | 680 | PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, |
678 | "%s: oos pkt %hu len %d discarded, " | 681 | "%s: oos pkt %hu len %d discarded, " |
679 | "waiting for %hu, reorder_q_len=%d\n", | 682 | "waiting for %hu, reorder_q_len=%d\n", |
@@ -698,6 +701,7 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb) | |||
698 | return 0; | 701 | return 0; |
699 | 702 | ||
700 | discard: | 703 | discard: |
704 | session->stats.rx_errors++; | ||
701 | kfree_skb(skb); | 705 | kfree_skb(skb); |
702 | sock_put(session->sock); | 706 | sock_put(session->sock); |
703 | 707 | ||
@@ -958,7 +962,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | |||
958 | int data_len = skb->len; | 962 | int data_len = skb->len; |
959 | struct inet_sock *inet; | 963 | struct inet_sock *inet; |
960 | __wsum csum = 0; | 964 | __wsum csum = 0; |
961 | struct sk_buff *skb2 = NULL; | ||
962 | struct udphdr *uh; | 965 | struct udphdr *uh; |
963 | unsigned int len; | 966 | unsigned int len; |
964 | 967 | ||
@@ -989,41 +992,30 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | |||
989 | */ | 992 | */ |
990 | headroom = NET_SKB_PAD + sizeof(struct iphdr) + | 993 | headroom = NET_SKB_PAD + sizeof(struct iphdr) + |
991 | sizeof(struct udphdr) + hdr_len + sizeof(ppph); | 994 | sizeof(struct udphdr) + hdr_len + sizeof(ppph); |
992 | if (skb_headroom(skb) < headroom) { | 995 | if (skb_cow_head(skb, headroom)) |
993 | skb2 = skb_realloc_headroom(skb, headroom); | 996 | goto abort; |
994 | if (skb2 == NULL) | ||
995 | goto abort; | ||
996 | } else | ||
997 | skb2 = skb; | ||
998 | |||
999 | /* Check that the socket has room */ | ||
1000 | if (atomic_read(&sk_tun->sk_wmem_alloc) < sk_tun->sk_sndbuf) | ||
1001 | skb_set_owner_w(skb2, sk_tun); | ||
1002 | else | ||
1003 | goto discard; | ||
1004 | 997 | ||
1005 | /* Setup PPP header */ | 998 | /* Setup PPP header */ |
1006 | skb_push(skb2, sizeof(ppph)); | 999 | __skb_push(skb, sizeof(ppph)); |
1007 | skb2->data[0] = ppph[0]; | 1000 | skb->data[0] = ppph[0]; |
1008 | skb2->data[1] = ppph[1]; | 1001 | skb->data[1] = ppph[1]; |
1009 | 1002 | ||
1010 | /* Setup L2TP header */ | 1003 | /* Setup L2TP header */ |
1011 | skb_push(skb2, hdr_len); | 1004 | pppol2tp_build_l2tp_header(session, __skb_push(skb, hdr_len)); |
1012 | pppol2tp_build_l2tp_header(session, skb2->data); | ||
1013 | 1005 | ||
1014 | /* Setup UDP header */ | 1006 | /* Setup UDP header */ |
1015 | inet = inet_sk(sk_tun); | 1007 | inet = inet_sk(sk_tun); |
1016 | skb_push(skb2, sizeof(struct udphdr)); | 1008 | __skb_push(skb, sizeof(*uh)); |
1017 | skb_reset_transport_header(skb2); | 1009 | skb_reset_transport_header(skb); |
1018 | uh = (struct udphdr *) skb2->data; | 1010 | uh = udp_hdr(skb); |
1019 | uh->source = inet->sport; | 1011 | uh->source = inet->sport; |
1020 | uh->dest = inet->dport; | 1012 | uh->dest = inet->dport; |
1021 | uh->len = htons(sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len); | 1013 | uh->len = htons(sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len); |
1022 | uh->check = 0; | 1014 | uh->check = 0; |
1023 | 1015 | ||
1024 | /* Calculate UDP checksum if configured to do so */ | 1016 | /* *BROKEN* Calculate UDP checksum if configured to do so */ |
1025 | if (sk_tun->sk_no_check != UDP_CSUM_NOXMIT) | 1017 | if (sk_tun->sk_no_check != UDP_CSUM_NOXMIT) |
1026 | csum = udp_csum_outgoing(sk_tun, skb2); | 1018 | csum = udp_csum_outgoing(sk_tun, skb); |
1027 | 1019 | ||
1028 | /* Debug */ | 1020 | /* Debug */ |
1029 | if (session->send_seq) | 1021 | if (session->send_seq) |
@@ -1036,7 +1028,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | |||
1036 | 1028 | ||
1037 | if (session->debug & PPPOL2TP_MSG_DATA) { | 1029 | if (session->debug & PPPOL2TP_MSG_DATA) { |
1038 | int i; | 1030 | int i; |
1039 | unsigned char *datap = skb2->data; | 1031 | unsigned char *datap = skb->data; |
1040 | 1032 | ||
1041 | printk(KERN_DEBUG "%s: xmit:", session->name); | 1033 | printk(KERN_DEBUG "%s: xmit:", session->name); |
1042 | for (i = 0; i < data_len; i++) { | 1034 | for (i = 0; i < data_len; i++) { |
@@ -1049,18 +1041,18 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | |||
1049 | printk("\n"); | 1041 | printk("\n"); |
1050 | } | 1042 | } |
1051 | 1043 | ||
1052 | memset(&(IPCB(skb2)->opt), 0, sizeof(IPCB(skb2)->opt)); | 1044 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
1053 | IPCB(skb2)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | | 1045 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | |
1054 | IPSKB_REROUTED); | 1046 | IPSKB_REROUTED); |
1055 | nf_reset(skb2); | 1047 | nf_reset(skb); |
1056 | 1048 | ||
1057 | /* Get routing info from the tunnel socket */ | 1049 | /* Get routing info from the tunnel socket */ |
1058 | dst_release(skb2->dst); | 1050 | dst_release(skb->dst); |
1059 | skb2->dst = sk_dst_get(sk_tun); | 1051 | skb->dst = sk_dst_get(sk_tun); |
1060 | 1052 | ||
1061 | /* Queue the packet to IP for output */ | 1053 | /* Queue the packet to IP for output */ |
1062 | len = skb2->len; | 1054 | len = skb->len; |
1063 | rc = ip_queue_xmit(skb2, 1); | 1055 | rc = ip_queue_xmit(skb, 1); |
1064 | 1056 | ||
1065 | /* Update stats */ | 1057 | /* Update stats */ |
1066 | if (rc >= 0) { | 1058 | if (rc >= 0) { |
@@ -1073,17 +1065,12 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | |||
1073 | session->stats.tx_errors++; | 1065 | session->stats.tx_errors++; |
1074 | } | 1066 | } |
1075 | 1067 | ||
1076 | /* Free the original skb */ | ||
1077 | kfree_skb(skb); | ||
1078 | |||
1079 | return 1; | 1068 | return 1; |
1080 | 1069 | ||
1081 | discard: | ||
1082 | /* Free the new skb. Caller will free original skb. */ | ||
1083 | if (skb2 != skb) | ||
1084 | kfree_skb(skb2); | ||
1085 | abort: | 1070 | abort: |
1086 | return 0; | 1071 | /* Free the original skb */ |
1072 | kfree_skb(skb); | ||
1073 | return 1; | ||
1087 | } | 1074 | } |
1088 | 1075 | ||
1089 | /***************************************************************************** | 1076 | /***************************************************************************** |
@@ -1326,12 +1313,14 @@ static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id, | |||
1326 | goto err; | 1313 | goto err; |
1327 | } | 1314 | } |
1328 | 1315 | ||
1316 | sk = sock->sk; | ||
1317 | |||
1329 | /* Quick sanity checks */ | 1318 | /* Quick sanity checks */ |
1330 | err = -ESOCKTNOSUPPORT; | 1319 | err = -EPROTONOSUPPORT; |
1331 | if (sock->type != SOCK_DGRAM) { | 1320 | if (sk->sk_protocol != IPPROTO_UDP) { |
1332 | PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR, | 1321 | PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR, |
1333 | "tunl %hu: fd %d wrong type, got %d, expected %d\n", | 1322 | "tunl %hu: fd %d wrong protocol, got %d, expected %d\n", |
1334 | tunnel_id, fd, sock->type, SOCK_DGRAM); | 1323 | tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP); |
1335 | goto err; | 1324 | goto err; |
1336 | } | 1325 | } |
1337 | err = -EAFNOSUPPORT; | 1326 | err = -EAFNOSUPPORT; |
@@ -1343,7 +1332,6 @@ static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id, | |||
1343 | } | 1332 | } |
1344 | 1333 | ||
1345 | err = -ENOTCONN; | 1334 | err = -ENOTCONN; |
1346 | sk = sock->sk; | ||
1347 | 1335 | ||
1348 | /* Check if this socket has already been prepped */ | 1336 | /* Check if this socket has already been prepped */ |
1349 | tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data; | 1337 | tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data; |
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index 69da95b5ad0c..ea151315050c 100755 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -2248,6 +2248,13 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, | |||
2248 | qdev->rsp_consumer_index) && (work_done < work_to_do)) { | 2248 | qdev->rsp_consumer_index) && (work_done < work_to_do)) { |
2249 | 2249 | ||
2250 | net_rsp = qdev->rsp_current; | 2250 | net_rsp = qdev->rsp_current; |
2251 | rmb(); | ||
2252 | /* | ||
2253 | * Fix 4032 chipe undocumented "feature" where bit-8 is set if the | ||
2254 | * inbound completion is for a VLAN. | ||
2255 | */ | ||
2256 | if (qdev->device_id == QL3032_DEVICE_ID) | ||
2257 | net_rsp->opcode &= 0x7f; | ||
2251 | switch (net_rsp->opcode) { | 2258 | switch (net_rsp->opcode) { |
2252 | 2259 | ||
2253 | case OPCODE_OB_MAC_IOCB_FN0: | 2260 | case OPCODE_OB_MAC_IOCB_FN0: |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index b85ab4a8f2a3..c921ec32c232 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -1228,7 +1228,10 @@ static void rtl8169_hw_phy_config(struct net_device *dev) | |||
1228 | return; | 1228 | return; |
1229 | } | 1229 | } |
1230 | 1230 | ||
1231 | /* phy config for RTL8169s mac_version C chip */ | 1231 | if ((tp->mac_version != RTL_GIGA_MAC_VER_02) && |
1232 | (tp->mac_version != RTL_GIGA_MAC_VER_03)) | ||
1233 | return; | ||
1234 | |||
1232 | mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1 | 1235 | mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1 |
1233 | mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000 | 1236 | mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000 |
1234 | mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7 | 1237 | mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7 |
@@ -2567,6 +2570,15 @@ static void rtl8169_tx_interrupt(struct net_device *dev, | |||
2567 | (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { | 2570 | (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { |
2568 | netif_wake_queue(dev); | 2571 | netif_wake_queue(dev); |
2569 | } | 2572 | } |
2573 | /* | ||
2574 | * 8168 hack: TxPoll requests are lost when the Tx packets are | ||
2575 | * too close. Let's kick an extra TxPoll request when a burst | ||
2576 | * of start_xmit activity is detected (if it is not detected, | ||
2577 | * it is slow enough). -- FR | ||
2578 | */ | ||
2579 | smp_rmb(); | ||
2580 | if (tp->cur_tx != dirty_tx) | ||
2581 | RTL_W8(TxPoll, NPQ); | ||
2570 | } | 2582 | } |
2571 | } | 2583 | } |
2572 | 2584 | ||
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 5d812de65d90..162489b9f599 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -51,7 +51,7 @@ | |||
51 | #include "sky2.h" | 51 | #include "sky2.h" |
52 | 52 | ||
53 | #define DRV_NAME "sky2" | 53 | #define DRV_NAME "sky2" |
54 | #define DRV_VERSION "1.17" | 54 | #define DRV_VERSION "1.18" |
55 | #define PFX DRV_NAME " " | 55 | #define PFX DRV_NAME " " |
56 | 56 | ||
57 | /* | 57 | /* |
@@ -118,12 +118,15 @@ static const struct pci_device_id sky2_id_table[] = { | |||
118 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */ | 118 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */ |
119 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */ | 119 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */ |
120 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */ | 120 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */ |
121 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */ | ||
121 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */ | 122 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */ |
123 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */ | ||
122 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */ | 124 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */ |
123 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */ | 125 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */ |
124 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */ | 126 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */ |
125 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */ | 127 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */ |
126 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */ | 128 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */ |
129 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) }, /* 88E8070 */ | ||
127 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */ | 130 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */ |
128 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */ | 131 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */ |
129 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */ | 132 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */ |
@@ -147,6 +150,7 @@ static const char *yukon2_name[] = { | |||
147 | "Extreme", /* 0xb5 */ | 150 | "Extreme", /* 0xb5 */ |
148 | "EC", /* 0xb6 */ | 151 | "EC", /* 0xb6 */ |
149 | "FE", /* 0xb7 */ | 152 | "FE", /* 0xb7 */ |
153 | "FE+", /* 0xb8 */ | ||
150 | }; | 154 | }; |
151 | 155 | ||
152 | static void sky2_set_multicast(struct net_device *dev); | 156 | static void sky2_set_multicast(struct net_device *dev); |
@@ -217,8 +221,7 @@ static void sky2_power_on(struct sky2_hw *hw) | |||
217 | else | 221 | else |
218 | sky2_write8(hw, B2_Y2_CLK_GATE, 0); | 222 | sky2_write8(hw, B2_Y2_CLK_GATE, 0); |
219 | 223 | ||
220 | if (hw->chip_id == CHIP_ID_YUKON_EC_U || | 224 | if (hw->flags & SKY2_HW_ADV_POWER_CTL) { |
221 | hw->chip_id == CHIP_ID_YUKON_EX) { | ||
222 | u32 reg; | 225 | u32 reg; |
223 | 226 | ||
224 | sky2_pci_write32(hw, PCI_DEV_REG3, 0); | 227 | sky2_pci_write32(hw, PCI_DEV_REG3, 0); |
@@ -311,10 +314,8 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
311 | struct sky2_port *sky2 = netdev_priv(hw->dev[port]); | 314 | struct sky2_port *sky2 = netdev_priv(hw->dev[port]); |
312 | u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg; | 315 | u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg; |
313 | 316 | ||
314 | if (sky2->autoneg == AUTONEG_ENABLE | 317 | if (sky2->autoneg == AUTONEG_ENABLE && |
315 | && !(hw->chip_id == CHIP_ID_YUKON_XL | 318 | !(hw->flags & SKY2_HW_NEWER_PHY)) { |
316 | || hw->chip_id == CHIP_ID_YUKON_EC_U | ||
317 | || hw->chip_id == CHIP_ID_YUKON_EX)) { | ||
318 | u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); | 319 | u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); |
319 | 320 | ||
320 | ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | | 321 | ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | |
@@ -334,9 +335,19 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
334 | 335 | ||
335 | ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); | 336 | ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); |
336 | if (sky2_is_copper(hw)) { | 337 | if (sky2_is_copper(hw)) { |
337 | if (hw->chip_id == CHIP_ID_YUKON_FE) { | 338 | if (!(hw->flags & SKY2_HW_GIGABIT)) { |
338 | /* enable automatic crossover */ | 339 | /* enable automatic crossover */ |
339 | ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1; | 340 | ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1; |
341 | |||
342 | if (hw->chip_id == CHIP_ID_YUKON_FE_P && | ||
343 | hw->chip_rev == CHIP_REV_YU_FE2_A0) { | ||
344 | u16 spec; | ||
345 | |||
346 | /* Enable Class A driver for FE+ A0 */ | ||
347 | spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2); | ||
348 | spec |= PHY_M_FESC_SEL_CL_A; | ||
349 | gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec); | ||
350 | } | ||
340 | } else { | 351 | } else { |
341 | /* disable energy detect */ | 352 | /* disable energy detect */ |
342 | ctrl &= ~PHY_M_PC_EN_DET_MSK; | 353 | ctrl &= ~PHY_M_PC_EN_DET_MSK; |
@@ -346,9 +357,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
346 | 357 | ||
347 | /* downshift on PHY 88E1112 and 88E1149 is changed */ | 358 | /* downshift on PHY 88E1112 and 88E1149 is changed */ |
348 | if (sky2->autoneg == AUTONEG_ENABLE | 359 | if (sky2->autoneg == AUTONEG_ENABLE |
349 | && (hw->chip_id == CHIP_ID_YUKON_XL | 360 | && (hw->flags & SKY2_HW_NEWER_PHY)) { |
350 | || hw->chip_id == CHIP_ID_YUKON_EC_U | ||
351 | || hw->chip_id == CHIP_ID_YUKON_EX)) { | ||
352 | /* set downshift counter to 3x and enable downshift */ | 361 | /* set downshift counter to 3x and enable downshift */ |
353 | ctrl &= ~PHY_M_PC_DSC_MSK; | 362 | ctrl &= ~PHY_M_PC_DSC_MSK; |
354 | ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; | 363 | ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; |
@@ -364,7 +373,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
364 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); | 373 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); |
365 | 374 | ||
366 | /* special setup for PHY 88E1112 Fiber */ | 375 | /* special setup for PHY 88E1112 Fiber */ |
367 | if (hw->chip_id == CHIP_ID_YUKON_XL && !sky2_is_copper(hw)) { | 376 | if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) { |
368 | pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); | 377 | pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); |
369 | 378 | ||
370 | /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */ | 379 | /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */ |
@@ -455,7 +464,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
455 | 464 | ||
456 | gma_write16(hw, port, GM_GP_CTRL, reg); | 465 | gma_write16(hw, port, GM_GP_CTRL, reg); |
457 | 466 | ||
458 | if (hw->chip_id != CHIP_ID_YUKON_FE) | 467 | if (hw->flags & SKY2_HW_GIGABIT) |
459 | gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); | 468 | gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); |
460 | 469 | ||
461 | gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); | 470 | gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); |
@@ -479,6 +488,23 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
479 | gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); | 488 | gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); |
480 | break; | 489 | break; |
481 | 490 | ||
491 | case CHIP_ID_YUKON_FE_P: | ||
492 | /* Enable Link Partner Next Page */ | ||
493 | ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); | ||
494 | ctrl |= PHY_M_PC_ENA_LIP_NP; | ||
495 | |||
496 | /* disable Energy Detect and enable scrambler */ | ||
497 | ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB); | ||
498 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); | ||
499 | |||
500 | /* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */ | ||
501 | ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) | | ||
502 | PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) | | ||
503 | PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED); | ||
504 | |||
505 | gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); | ||
506 | break; | ||
507 | |||
482 | case CHIP_ID_YUKON_XL: | 508 | case CHIP_ID_YUKON_XL: |
483 | pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); | 509 | pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); |
484 | 510 | ||
@@ -548,7 +574,13 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
548 | 574 | ||
549 | /* set page register to 0 */ | 575 | /* set page register to 0 */ |
550 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); | 576 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); |
577 | } else if (hw->chip_id == CHIP_ID_YUKON_FE_P && | ||
578 | hw->chip_rev == CHIP_REV_YU_FE2_A0) { | ||
579 | /* apply workaround for integrated resistors calibration */ | ||
580 | gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17); | ||
581 | gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60); | ||
551 | } else if (hw->chip_id != CHIP_ID_YUKON_EX) { | 582 | } else if (hw->chip_id != CHIP_ID_YUKON_EX) { |
583 | /* no effect on Yukon-XL */ | ||
552 | gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); | 584 | gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); |
553 | 585 | ||
554 | if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) { | 586 | if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) { |
@@ -669,25 +701,25 @@ static void sky2_wol_init(struct sky2_port *sky2) | |||
669 | 701 | ||
670 | static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port) | 702 | static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port) |
671 | { | 703 | { |
672 | if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev != CHIP_REV_YU_EX_A0) { | 704 | struct net_device *dev = hw->dev[port]; |
705 | |||
706 | if (dev->mtu <= ETH_DATA_LEN) | ||
673 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | 707 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), |
674 | TX_STFW_ENA | | 708 | TX_JUMBO_DIS | TX_STFW_ENA); |
675 | (hw->dev[port]->mtu > ETH_DATA_LEN) ? TX_JUMBO_ENA : TX_JUMBO_DIS); | ||
676 | } else { | ||
677 | if (hw->dev[port]->mtu > ETH_DATA_LEN) { | ||
678 | /* set Tx GMAC FIFO Almost Empty Threshold */ | ||
679 | sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), | ||
680 | (ECU_JUMBO_WM << 16) | ECU_AE_THR); | ||
681 | 709 | ||
682 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | 710 | else if (hw->chip_id != CHIP_ID_YUKON_EC_U) |
683 | TX_JUMBO_ENA | TX_STFW_DIS); | 711 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), |
712 | TX_STFW_ENA | TX_JUMBO_ENA); | ||
713 | else { | ||
714 | /* set Tx GMAC FIFO Almost Empty Threshold */ | ||
715 | sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), | ||
716 | (ECU_JUMBO_WM << 16) | ECU_AE_THR); | ||
684 | 717 | ||
685 | /* Can't do offload because of lack of store/forward */ | 718 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), |
686 | hw->dev[port]->features &= ~(NETIF_F_TSO | NETIF_F_SG | 719 | TX_JUMBO_ENA | TX_STFW_DIS); |
687 | | NETIF_F_ALL_CSUM); | 720 | |
688 | } else | 721 | /* Can't do offload because of lack of store/forward */ |
689 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | 722 | dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | NETIF_F_ALL_CSUM); |
690 | TX_JUMBO_DIS | TX_STFW_ENA); | ||
691 | } | 723 | } |
692 | } | 724 | } |
693 | 725 | ||
@@ -773,7 +805,8 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) | |||
773 | /* Configure Rx MAC FIFO */ | 805 | /* Configure Rx MAC FIFO */ |
774 | sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); | 806 | sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); |
775 | rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON; | 807 | rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON; |
776 | if (hw->chip_id == CHIP_ID_YUKON_EX) | 808 | if (hw->chip_id == CHIP_ID_YUKON_EX || |
809 | hw->chip_id == CHIP_ID_YUKON_FE_P) | ||
777 | rx_reg |= GMF_RX_OVER_ON; | 810 | rx_reg |= GMF_RX_OVER_ON; |
778 | 811 | ||
779 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg); | 812 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg); |
@@ -782,13 +815,19 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) | |||
782 | sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); | 815 | sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); |
783 | 816 | ||
784 | /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */ | 817 | /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */ |
785 | sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1); | 818 | reg = RX_GMF_FL_THR_DEF + 1; |
819 | /* Another magic mystery workaround from sk98lin */ | ||
820 | if (hw->chip_id == CHIP_ID_YUKON_FE_P && | ||
821 | hw->chip_rev == CHIP_REV_YU_FE2_A0) | ||
822 | reg = 0x178; | ||
823 | sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg); | ||
786 | 824 | ||
787 | /* Configure Tx MAC FIFO */ | 825 | /* Configure Tx MAC FIFO */ |
788 | sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); | 826 | sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); |
789 | sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); | 827 | sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); |
790 | 828 | ||
791 | if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) { | 829 | /* On chips without ram buffer, pause is controled by MAC level */ |
830 | if (sky2_read8(hw, B2_E_0) == 0) { | ||
792 | sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); | 831 | sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); |
793 | sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); | 832 | sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); |
794 | 833 | ||
@@ -871,6 +910,20 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2) | |||
871 | return le; | 910 | return le; |
872 | } | 911 | } |
873 | 912 | ||
913 | static void tx_init(struct sky2_port *sky2) | ||
914 | { | ||
915 | struct sky2_tx_le *le; | ||
916 | |||
917 | sky2->tx_prod = sky2->tx_cons = 0; | ||
918 | sky2->tx_tcpsum = 0; | ||
919 | sky2->tx_last_mss = 0; | ||
920 | |||
921 | le = get_tx_le(sky2); | ||
922 | le->addr = 0; | ||
923 | le->opcode = OP_ADDR64 | HW_OWNER; | ||
924 | sky2->tx_addr64 = 0; | ||
925 | } | ||
926 | |||
874 | static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2, | 927 | static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2, |
875 | struct sky2_tx_le *le) | 928 | struct sky2_tx_le *le) |
876 | { | 929 | { |
@@ -967,19 +1020,15 @@ static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re) | |||
967 | */ | 1020 | */ |
968 | static void rx_set_checksum(struct sky2_port *sky2) | 1021 | static void rx_set_checksum(struct sky2_port *sky2) |
969 | { | 1022 | { |
970 | struct sky2_rx_le *le; | 1023 | struct sky2_rx_le *le = sky2_next_rx(sky2); |
971 | |||
972 | if (sky2->hw->chip_id != CHIP_ID_YUKON_EX) { | ||
973 | le = sky2_next_rx(sky2); | ||
974 | le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN); | ||
975 | le->ctrl = 0; | ||
976 | le->opcode = OP_TCPSTART | HW_OWNER; | ||
977 | 1024 | ||
978 | sky2_write32(sky2->hw, | 1025 | le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN); |
979 | Q_ADDR(rxqaddr[sky2->port], Q_CSR), | 1026 | le->ctrl = 0; |
980 | sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); | 1027 | le->opcode = OP_TCPSTART | HW_OWNER; |
981 | } | ||
982 | 1028 | ||
1029 | sky2_write32(sky2->hw, | ||
1030 | Q_ADDR(rxqaddr[sky2->port], Q_CSR), | ||
1031 | sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); | ||
983 | } | 1032 | } |
984 | 1033 | ||
985 | /* | 1034 | /* |
@@ -1175,7 +1224,8 @@ static int sky2_rx_start(struct sky2_port *sky2) | |||
1175 | 1224 | ||
1176 | sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); | 1225 | sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); |
1177 | 1226 | ||
1178 | rx_set_checksum(sky2); | 1227 | if (!(hw->flags & SKY2_HW_NEW_LE)) |
1228 | rx_set_checksum(sky2); | ||
1179 | 1229 | ||
1180 | /* Space needed for frame data + headers rounded up */ | 1230 | /* Space needed for frame data + headers rounded up */ |
1181 | size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8); | 1231 | size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8); |
@@ -1246,7 +1296,7 @@ static int sky2_up(struct net_device *dev) | |||
1246 | struct sky2_port *sky2 = netdev_priv(dev); | 1296 | struct sky2_port *sky2 = netdev_priv(dev); |
1247 | struct sky2_hw *hw = sky2->hw; | 1297 | struct sky2_hw *hw = sky2->hw; |
1248 | unsigned port = sky2->port; | 1298 | unsigned port = sky2->port; |
1249 | u32 ramsize, imask; | 1299 | u32 imask, ramsize; |
1250 | int cap, err = -ENOMEM; | 1300 | int cap, err = -ENOMEM; |
1251 | struct net_device *otherdev = hw->dev[sky2->port^1]; | 1301 | struct net_device *otherdev = hw->dev[sky2->port^1]; |
1252 | 1302 | ||
@@ -1284,7 +1334,8 @@ static int sky2_up(struct net_device *dev) | |||
1284 | GFP_KERNEL); | 1334 | GFP_KERNEL); |
1285 | if (!sky2->tx_ring) | 1335 | if (!sky2->tx_ring) |
1286 | goto err_out; | 1336 | goto err_out; |
1287 | sky2->tx_prod = sky2->tx_cons = 0; | 1337 | |
1338 | tx_init(sky2); | ||
1288 | 1339 | ||
1289 | sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES, | 1340 | sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES, |
1290 | &sky2->rx_le_map); | 1341 | &sky2->rx_le_map); |
@@ -1303,11 +1354,10 @@ static int sky2_up(struct net_device *dev) | |||
1303 | 1354 | ||
1304 | /* Register is number of 4K blocks on internal RAM buffer. */ | 1355 | /* Register is number of 4K blocks on internal RAM buffer. */ |
1305 | ramsize = sky2_read8(hw, B2_E_0) * 4; | 1356 | ramsize = sky2_read8(hw, B2_E_0) * 4; |
1306 | printk(KERN_INFO PFX "%s: ram buffer %dK\n", dev->name, ramsize); | ||
1307 | |||
1308 | if (ramsize > 0) { | 1357 | if (ramsize > 0) { |
1309 | u32 rxspace; | 1358 | u32 rxspace; |
1310 | 1359 | ||
1360 | pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize); | ||
1311 | if (ramsize < 16) | 1361 | if (ramsize < 16) |
1312 | rxspace = ramsize / 2; | 1362 | rxspace = ramsize / 2; |
1313 | else | 1363 | else |
@@ -1436,13 +1486,15 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1436 | /* Check for TCP Segmentation Offload */ | 1486 | /* Check for TCP Segmentation Offload */ |
1437 | mss = skb_shinfo(skb)->gso_size; | 1487 | mss = skb_shinfo(skb)->gso_size; |
1438 | if (mss != 0) { | 1488 | if (mss != 0) { |
1439 | if (hw->chip_id != CHIP_ID_YUKON_EX) | 1489 | |
1490 | if (!(hw->flags & SKY2_HW_NEW_LE)) | ||
1440 | mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); | 1491 | mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); |
1441 | 1492 | ||
1442 | if (mss != sky2->tx_last_mss) { | 1493 | if (mss != sky2->tx_last_mss) { |
1443 | le = get_tx_le(sky2); | 1494 | le = get_tx_le(sky2); |
1444 | le->addr = cpu_to_le32(mss); | 1495 | le->addr = cpu_to_le32(mss); |
1445 | if (hw->chip_id == CHIP_ID_YUKON_EX) | 1496 | |
1497 | if (hw->flags & SKY2_HW_NEW_LE) | ||
1446 | le->opcode = OP_MSS | HW_OWNER; | 1498 | le->opcode = OP_MSS | HW_OWNER; |
1447 | else | 1499 | else |
1448 | le->opcode = OP_LRGLEN | HW_OWNER; | 1500 | le->opcode = OP_LRGLEN | HW_OWNER; |
@@ -1468,8 +1520,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1468 | /* Handle TCP checksum offload */ | 1520 | /* Handle TCP checksum offload */ |
1469 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1521 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1470 | /* On Yukon EX (some versions) encoding change. */ | 1522 | /* On Yukon EX (some versions) encoding change. */ |
1471 | if (hw->chip_id == CHIP_ID_YUKON_EX | 1523 | if (hw->flags & SKY2_HW_AUTO_TX_SUM) |
1472 | && hw->chip_rev != CHIP_REV_YU_EX_B0) | ||
1473 | ctrl |= CALSUM; /* auto checksum */ | 1524 | ctrl |= CALSUM; /* auto checksum */ |
1474 | else { | 1525 | else { |
1475 | const unsigned offset = skb_transport_offset(skb); | 1526 | const unsigned offset = skb_transport_offset(skb); |
@@ -1622,9 +1673,6 @@ static int sky2_down(struct net_device *dev) | |||
1622 | if (netif_msg_ifdown(sky2)) | 1673 | if (netif_msg_ifdown(sky2)) |
1623 | printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); | 1674 | printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); |
1624 | 1675 | ||
1625 | if (netif_carrier_ok(dev) && --hw->active == 0) | ||
1626 | del_timer(&hw->watchdog_timer); | ||
1627 | |||
1628 | /* Stop more packets from being queued */ | 1676 | /* Stop more packets from being queued */ |
1629 | netif_stop_queue(dev); | 1677 | netif_stop_queue(dev); |
1630 | 1678 | ||
@@ -1708,11 +1756,15 @@ static int sky2_down(struct net_device *dev) | |||
1708 | 1756 | ||
1709 | static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux) | 1757 | static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux) |
1710 | { | 1758 | { |
1711 | if (!sky2_is_copper(hw)) | 1759 | if (hw->flags & SKY2_HW_FIBRE_PHY) |
1712 | return SPEED_1000; | 1760 | return SPEED_1000; |
1713 | 1761 | ||
1714 | if (hw->chip_id == CHIP_ID_YUKON_FE) | 1762 | if (!(hw->flags & SKY2_HW_GIGABIT)) { |
1715 | return (aux & PHY_M_PS_SPEED_100) ? SPEED_100 : SPEED_10; | 1763 | if (aux & PHY_M_PS_SPEED_100) |
1764 | return SPEED_100; | ||
1765 | else | ||
1766 | return SPEED_10; | ||
1767 | } | ||
1716 | 1768 | ||
1717 | switch (aux & PHY_M_PS_SPEED_MSK) { | 1769 | switch (aux & PHY_M_PS_SPEED_MSK) { |
1718 | case PHY_M_PS_SPEED_1000: | 1770 | case PHY_M_PS_SPEED_1000: |
@@ -1745,17 +1797,13 @@ static void sky2_link_up(struct sky2_port *sky2) | |||
1745 | 1797 | ||
1746 | netif_carrier_on(sky2->netdev); | 1798 | netif_carrier_on(sky2->netdev); |
1747 | 1799 | ||
1748 | if (hw->active++ == 0) | 1800 | mod_timer(&hw->watchdog_timer, jiffies + 1); |
1749 | mod_timer(&hw->watchdog_timer, jiffies + 1); | ||
1750 | |||
1751 | 1801 | ||
1752 | /* Turn on link LED */ | 1802 | /* Turn on link LED */ |
1753 | sky2_write8(hw, SK_REG(port, LNK_LED_REG), | 1803 | sky2_write8(hw, SK_REG(port, LNK_LED_REG), |
1754 | LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); | 1804 | LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); |
1755 | 1805 | ||
1756 | if (hw->chip_id == CHIP_ID_YUKON_XL | 1806 | if (hw->flags & SKY2_HW_NEWER_PHY) { |
1757 | || hw->chip_id == CHIP_ID_YUKON_EC_U | ||
1758 | || hw->chip_id == CHIP_ID_YUKON_EX) { | ||
1759 | u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); | 1807 | u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); |
1760 | u16 led = PHY_M_LEDC_LOS_CTRL(1); /* link active */ | 1808 | u16 led = PHY_M_LEDC_LOS_CTRL(1); /* link active */ |
1761 | 1809 | ||
@@ -1800,11 +1848,6 @@ static void sky2_link_down(struct sky2_port *sky2) | |||
1800 | 1848 | ||
1801 | netif_carrier_off(sky2->netdev); | 1849 | netif_carrier_off(sky2->netdev); |
1802 | 1850 | ||
1803 | /* Stop watchdog if both ports are not active */ | ||
1804 | if (--hw->active == 0) | ||
1805 | del_timer(&hw->watchdog_timer); | ||
1806 | |||
1807 | |||
1808 | /* Turn on link LED */ | 1851 | /* Turn on link LED */ |
1809 | sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); | 1852 | sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); |
1810 | 1853 | ||
@@ -1847,7 +1890,7 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux) | |||
1847 | /* Since the pause result bits seem to in different positions on | 1890 | /* Since the pause result bits seem to in different positions on |
1848 | * different chips. look at registers. | 1891 | * different chips. look at registers. |
1849 | */ | 1892 | */ |
1850 | if (!sky2_is_copper(hw)) { | 1893 | if (hw->flags & SKY2_HW_FIBRE_PHY) { |
1851 | /* Shift for bits in fiber PHY */ | 1894 | /* Shift for bits in fiber PHY */ |
1852 | advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM); | 1895 | advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM); |
1853 | lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM); | 1896 | lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM); |
@@ -1958,7 +2001,9 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) | |||
1958 | if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) | 2001 | if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) |
1959 | return -EINVAL; | 2002 | return -EINVAL; |
1960 | 2003 | ||
1961 | if (new_mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_FE) | 2004 | if (new_mtu > ETH_DATA_LEN && |
2005 | (hw->chip_id == CHIP_ID_YUKON_FE || | ||
2006 | hw->chip_id == CHIP_ID_YUKON_FE_P)) | ||
1962 | return -EINVAL; | 2007 | return -EINVAL; |
1963 | 2008 | ||
1964 | if (!netif_running(dev)) { | 2009 | if (!netif_running(dev)) { |
@@ -1975,7 +2020,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) | |||
1975 | 2020 | ||
1976 | synchronize_irq(hw->pdev->irq); | 2021 | synchronize_irq(hw->pdev->irq); |
1977 | 2022 | ||
1978 | if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) | 2023 | if (sky2_read8(hw, B2_E_0) == 0) |
1979 | sky2_set_tx_stfwd(hw, port); | 2024 | sky2_set_tx_stfwd(hw, port); |
1980 | 2025 | ||
1981 | ctl = gma_read16(hw, port, GM_GP_CTRL); | 2026 | ctl = gma_read16(hw, port, GM_GP_CTRL); |
@@ -2103,6 +2148,13 @@ static struct sk_buff *sky2_receive(struct net_device *dev, | |||
2103 | struct sky2_port *sky2 = netdev_priv(dev); | 2148 | struct sky2_port *sky2 = netdev_priv(dev); |
2104 | struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next; | 2149 | struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next; |
2105 | struct sk_buff *skb = NULL; | 2150 | struct sk_buff *skb = NULL; |
2151 | u16 count = (status & GMR_FS_LEN) >> 16; | ||
2152 | |||
2153 | #ifdef SKY2_VLAN_TAG_USED | ||
2154 | /* Account for vlan tag */ | ||
2155 | if (sky2->vlgrp && (status & GMR_FS_VLAN)) | ||
2156 | count -= VLAN_HLEN; | ||
2157 | #endif | ||
2106 | 2158 | ||
2107 | if (unlikely(netif_msg_rx_status(sky2))) | 2159 | if (unlikely(netif_msg_rx_status(sky2))) |
2108 | printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n", | 2160 | printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n", |
@@ -2111,15 +2163,29 @@ static struct sk_buff *sky2_receive(struct net_device *dev, | |||
2111 | sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; | 2163 | sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; |
2112 | prefetch(sky2->rx_ring + sky2->rx_next); | 2164 | prefetch(sky2->rx_ring + sky2->rx_next); |
2113 | 2165 | ||
2166 | if (length < ETH_ZLEN || length > sky2->rx_data_size) | ||
2167 | goto len_error; | ||
2168 | |||
2169 | /* This chip has hardware problems that generates bogus status. | ||
2170 | * So do only marginal checking and expect higher level protocols | ||
2171 | * to handle crap frames. | ||
2172 | */ | ||
2173 | if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && | ||
2174 | sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 && | ||
2175 | length != count) | ||
2176 | goto okay; | ||
2177 | |||
2114 | if (status & GMR_FS_ANY_ERR) | 2178 | if (status & GMR_FS_ANY_ERR) |
2115 | goto error; | 2179 | goto error; |
2116 | 2180 | ||
2117 | if (!(status & GMR_FS_RX_OK)) | 2181 | if (!(status & GMR_FS_RX_OK)) |
2118 | goto resubmit; | 2182 | goto resubmit; |
2119 | 2183 | ||
2120 | if (status >> 16 != length) | 2184 | /* if length reported by DMA does not match PHY, packet was truncated */ |
2121 | goto len_mismatch; | 2185 | if (length != count) |
2186 | goto len_error; | ||
2122 | 2187 | ||
2188 | okay: | ||
2123 | if (length < copybreak) | 2189 | if (length < copybreak) |
2124 | skb = receive_copy(sky2, re, length); | 2190 | skb = receive_copy(sky2, re, length); |
2125 | else | 2191 | else |
@@ -2129,10 +2195,14 @@ resubmit: | |||
2129 | 2195 | ||
2130 | return skb; | 2196 | return skb; |
2131 | 2197 | ||
2132 | len_mismatch: | 2198 | len_error: |
2133 | /* Truncation of overlength packets | 2199 | /* Truncation of overlength packets |
2134 | causes PHY length to not match MAC length */ | 2200 | causes PHY length to not match MAC length */ |
2135 | ++sky2->net_stats.rx_length_errors; | 2201 | ++sky2->net_stats.rx_length_errors; |
2202 | if (netif_msg_rx_err(sky2) && net_ratelimit()) | ||
2203 | pr_info(PFX "%s: rx length error: status %#x length %d\n", | ||
2204 | dev->name, status, length); | ||
2205 | goto resubmit; | ||
2136 | 2206 | ||
2137 | error: | 2207 | error: |
2138 | ++sky2->net_stats.rx_errors; | 2208 | ++sky2->net_stats.rx_errors; |
@@ -2202,7 +2272,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) | |||
2202 | } | 2272 | } |
2203 | 2273 | ||
2204 | /* This chip reports checksum status differently */ | 2274 | /* This chip reports checksum status differently */ |
2205 | if (hw->chip_id == CHIP_ID_YUKON_EX) { | 2275 | if (hw->flags & SKY2_HW_NEW_LE) { |
2206 | if (sky2->rx_csum && | 2276 | if (sky2->rx_csum && |
2207 | (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) && | 2277 | (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) && |
2208 | (le->css & CSS_TCPUDPCSOK)) | 2278 | (le->css & CSS_TCPUDPCSOK)) |
@@ -2243,8 +2313,14 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) | |||
2243 | if (!sky2->rx_csum) | 2313 | if (!sky2->rx_csum) |
2244 | break; | 2314 | break; |
2245 | 2315 | ||
2246 | if (hw->chip_id == CHIP_ID_YUKON_EX) | 2316 | /* If this happens then driver assuming wrong format */ |
2317 | if (unlikely(hw->flags & SKY2_HW_NEW_LE)) { | ||
2318 | if (net_ratelimit()) | ||
2319 | printk(KERN_NOTICE "%s: unexpected" | ||
2320 | " checksum status\n", | ||
2321 | dev->name); | ||
2247 | break; | 2322 | break; |
2323 | } | ||
2248 | 2324 | ||
2249 | /* Both checksum counters are programmed to start at | 2325 | /* Both checksum counters are programmed to start at |
2250 | * the same offset, so unless there is a problem they | 2326 | * the same offset, so unless there is a problem they |
@@ -2436,20 +2512,72 @@ static void sky2_le_error(struct sky2_hw *hw, unsigned port, | |||
2436 | sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK); | 2512 | sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK); |
2437 | } | 2513 | } |
2438 | 2514 | ||
2439 | /* Check for lost IRQ once a second */ | 2515 | static int sky2_rx_hung(struct net_device *dev) |
2516 | { | ||
2517 | struct sky2_port *sky2 = netdev_priv(dev); | ||
2518 | struct sky2_hw *hw = sky2->hw; | ||
2519 | unsigned port = sky2->port; | ||
2520 | unsigned rxq = rxqaddr[port]; | ||
2521 | u32 mac_rp = sky2_read32(hw, SK_REG(port, RX_GMF_RP)); | ||
2522 | u8 mac_lev = sky2_read8(hw, SK_REG(port, RX_GMF_RLEV)); | ||
2523 | u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP)); | ||
2524 | u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL)); | ||
2525 | |||
2526 | /* If idle and MAC or PCI is stuck */ | ||
2527 | if (sky2->check.last == dev->last_rx && | ||
2528 | ((mac_rp == sky2->check.mac_rp && | ||
2529 | mac_lev != 0 && mac_lev >= sky2->check.mac_lev) || | ||
2530 | /* Check if the PCI RX hang */ | ||
2531 | (fifo_rp == sky2->check.fifo_rp && | ||
2532 | fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) { | ||
2533 | printk(KERN_DEBUG PFX "%s: hung mac %d:%d fifo %d (%d:%d)\n", | ||
2534 | dev->name, mac_lev, mac_rp, fifo_lev, fifo_rp, | ||
2535 | sky2_read8(hw, Q_ADDR(rxq, Q_WP))); | ||
2536 | return 1; | ||
2537 | } else { | ||
2538 | sky2->check.last = dev->last_rx; | ||
2539 | sky2->check.mac_rp = mac_rp; | ||
2540 | sky2->check.mac_lev = mac_lev; | ||
2541 | sky2->check.fifo_rp = fifo_rp; | ||
2542 | sky2->check.fifo_lev = fifo_lev; | ||
2543 | return 0; | ||
2544 | } | ||
2545 | } | ||
2546 | |||
2440 | static void sky2_watchdog(unsigned long arg) | 2547 | static void sky2_watchdog(unsigned long arg) |
2441 | { | 2548 | { |
2442 | struct sky2_hw *hw = (struct sky2_hw *) arg; | 2549 | struct sky2_hw *hw = (struct sky2_hw *) arg; |
2550 | struct net_device *dev; | ||
2443 | 2551 | ||
2552 | /* Check for lost IRQ once a second */ | ||
2444 | if (sky2_read32(hw, B0_ISRC)) { | 2553 | if (sky2_read32(hw, B0_ISRC)) { |
2445 | struct net_device *dev = hw->dev[0]; | 2554 | dev = hw->dev[0]; |
2446 | |||
2447 | if (__netif_rx_schedule_prep(dev)) | 2555 | if (__netif_rx_schedule_prep(dev)) |
2448 | __netif_rx_schedule(dev); | 2556 | __netif_rx_schedule(dev); |
2557 | } else { | ||
2558 | int i, active = 0; | ||
2559 | |||
2560 | for (i = 0; i < hw->ports; i++) { | ||
2561 | dev = hw->dev[i]; | ||
2562 | if (!netif_running(dev)) | ||
2563 | continue; | ||
2564 | ++active; | ||
2565 | |||
2566 | /* For chips with Rx FIFO, check if stuck */ | ||
2567 | if ((hw->flags & SKY2_HW_FIFO_HANG_CHECK) && | ||
2568 | sky2_rx_hung(dev)) { | ||
2569 | pr_info(PFX "%s: receiver hang detected\n", | ||
2570 | dev->name); | ||
2571 | schedule_work(&hw->restart_work); | ||
2572 | return; | ||
2573 | } | ||
2574 | } | ||
2575 | |||
2576 | if (active == 0) | ||
2577 | return; | ||
2449 | } | 2578 | } |
2450 | 2579 | ||
2451 | if (hw->active > 0) | 2580 | mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ)); |
2452 | mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ)); | ||
2453 | } | 2581 | } |
2454 | 2582 | ||
2455 | /* Hardware/software error handling */ | 2583 | /* Hardware/software error handling */ |
@@ -2546,17 +2674,25 @@ static void sky2_netpoll(struct net_device *dev) | |||
2546 | #endif | 2674 | #endif |
2547 | 2675 | ||
2548 | /* Chip internal frequency for clock calculations */ | 2676 | /* Chip internal frequency for clock calculations */ |
2549 | static inline u32 sky2_mhz(const struct sky2_hw *hw) | 2677 | static u32 sky2_mhz(const struct sky2_hw *hw) |
2550 | { | 2678 | { |
2551 | switch (hw->chip_id) { | 2679 | switch (hw->chip_id) { |
2552 | case CHIP_ID_YUKON_EC: | 2680 | case CHIP_ID_YUKON_EC: |
2553 | case CHIP_ID_YUKON_EC_U: | 2681 | case CHIP_ID_YUKON_EC_U: |
2554 | case CHIP_ID_YUKON_EX: | 2682 | case CHIP_ID_YUKON_EX: |
2555 | return 125; /* 125 Mhz */ | 2683 | return 125; |
2684 | |||
2556 | case CHIP_ID_YUKON_FE: | 2685 | case CHIP_ID_YUKON_FE: |
2557 | return 100; /* 100 Mhz */ | 2686 | return 100; |
2558 | default: /* YUKON_XL */ | 2687 | |
2559 | return 156; /* 156 Mhz */ | 2688 | case CHIP_ID_YUKON_FE_P: |
2689 | return 50; | ||
2690 | |||
2691 | case CHIP_ID_YUKON_XL: | ||
2692 | return 156; | ||
2693 | |||
2694 | default: | ||
2695 | BUG(); | ||
2560 | } | 2696 | } |
2561 | } | 2697 | } |
2562 | 2698 | ||
@@ -2581,23 +2717,63 @@ static int __devinit sky2_init(struct sky2_hw *hw) | |||
2581 | sky2_write8(hw, B0_CTST, CS_RST_CLR); | 2717 | sky2_write8(hw, B0_CTST, CS_RST_CLR); |
2582 | 2718 | ||
2583 | hw->chip_id = sky2_read8(hw, B2_CHIP_ID); | 2719 | hw->chip_id = sky2_read8(hw, B2_CHIP_ID); |
2584 | if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) { | 2720 | hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; |
2721 | |||
2722 | switch(hw->chip_id) { | ||
2723 | case CHIP_ID_YUKON_XL: | ||
2724 | hw->flags = SKY2_HW_GIGABIT | ||
2725 | | SKY2_HW_NEWER_PHY; | ||
2726 | if (hw->chip_rev < 3) | ||
2727 | hw->flags |= SKY2_HW_FIFO_HANG_CHECK; | ||
2728 | |||
2729 | break; | ||
2730 | |||
2731 | case CHIP_ID_YUKON_EC_U: | ||
2732 | hw->flags = SKY2_HW_GIGABIT | ||
2733 | | SKY2_HW_NEWER_PHY | ||
2734 | | SKY2_HW_ADV_POWER_CTL; | ||
2735 | break; | ||
2736 | |||
2737 | case CHIP_ID_YUKON_EX: | ||
2738 | hw->flags = SKY2_HW_GIGABIT | ||
2739 | | SKY2_HW_NEWER_PHY | ||
2740 | | SKY2_HW_NEW_LE | ||
2741 | | SKY2_HW_ADV_POWER_CTL; | ||
2742 | |||
2743 | /* New transmit checksum */ | ||
2744 | if (hw->chip_rev != CHIP_REV_YU_EX_B0) | ||
2745 | hw->flags |= SKY2_HW_AUTO_TX_SUM; | ||
2746 | break; | ||
2747 | |||
2748 | case CHIP_ID_YUKON_EC: | ||
2749 | /* This rev is really old, and requires untested workarounds */ | ||
2750 | if (hw->chip_rev == CHIP_REV_YU_EC_A1) { | ||
2751 | dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n"); | ||
2752 | return -EOPNOTSUPP; | ||
2753 | } | ||
2754 | hw->flags = SKY2_HW_GIGABIT | SKY2_HW_FIFO_HANG_CHECK; | ||
2755 | break; | ||
2756 | |||
2757 | case CHIP_ID_YUKON_FE: | ||
2758 | break; | ||
2759 | |||
2760 | case CHIP_ID_YUKON_FE_P: | ||
2761 | hw->flags = SKY2_HW_NEWER_PHY | ||
2762 | | SKY2_HW_NEW_LE | ||
2763 | | SKY2_HW_AUTO_TX_SUM | ||
2764 | | SKY2_HW_ADV_POWER_CTL; | ||
2765 | break; | ||
2766 | default: | ||
2585 | dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", | 2767 | dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", |
2586 | hw->chip_id); | 2768 | hw->chip_id); |
2587 | return -EOPNOTSUPP; | 2769 | return -EOPNOTSUPP; |
2588 | } | 2770 | } |
2589 | 2771 | ||
2590 | hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; | 2772 | hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); |
2773 | if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') | ||
2774 | hw->flags |= SKY2_HW_FIBRE_PHY; | ||
2591 | 2775 | ||
2592 | /* This rev is really old, and requires untested workarounds */ | ||
2593 | if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) { | ||
2594 | dev_err(&hw->pdev->dev, "unsupported revision Yukon-%s (0x%x) rev %d\n", | ||
2595 | yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL], | ||
2596 | hw->chip_id, hw->chip_rev); | ||
2597 | return -EOPNOTSUPP; | ||
2598 | } | ||
2599 | 2776 | ||
2600 | hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); | ||
2601 | hw->ports = 1; | 2777 | hw->ports = 1; |
2602 | t8 = sky2_read8(hw, B2_Y2_HW_RES); | 2778 | t8 = sky2_read8(hw, B2_Y2_HW_RES); |
2603 | if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) { | 2779 | if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) { |
@@ -2791,7 +2967,9 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
2791 | 2967 | ||
2792 | sky2->wol = wol->wolopts; | 2968 | sky2->wol = wol->wolopts; |
2793 | 2969 | ||
2794 | if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) | 2970 | if (hw->chip_id == CHIP_ID_YUKON_EC_U || |
2971 | hw->chip_id == CHIP_ID_YUKON_EX || | ||
2972 | hw->chip_id == CHIP_ID_YUKON_FE_P) | ||
2795 | sky2_write32(hw, B0_CTST, sky2->wol | 2973 | sky2_write32(hw, B0_CTST, sky2->wol |
2796 | ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF); | 2974 | ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF); |
2797 | 2975 | ||
@@ -2809,7 +2987,7 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw) | |||
2809 | | SUPPORTED_100baseT_Full | 2987 | | SUPPORTED_100baseT_Full |
2810 | | SUPPORTED_Autoneg | SUPPORTED_TP; | 2988 | | SUPPORTED_Autoneg | SUPPORTED_TP; |
2811 | 2989 | ||
2812 | if (hw->chip_id != CHIP_ID_YUKON_FE) | 2990 | if (hw->flags & SKY2_HW_GIGABIT) |
2813 | modes |= SUPPORTED_1000baseT_Half | 2991 | modes |= SUPPORTED_1000baseT_Half |
2814 | | SUPPORTED_1000baseT_Full; | 2992 | | SUPPORTED_1000baseT_Full; |
2815 | return modes; | 2993 | return modes; |
@@ -2829,13 +3007,6 @@ static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
2829 | ecmd->supported = sky2_supported_modes(hw); | 3007 | ecmd->supported = sky2_supported_modes(hw); |
2830 | ecmd->phy_address = PHY_ADDR_MARV; | 3008 | ecmd->phy_address = PHY_ADDR_MARV; |
2831 | if (sky2_is_copper(hw)) { | 3009 | if (sky2_is_copper(hw)) { |
2832 | ecmd->supported = SUPPORTED_10baseT_Half | ||
2833 | | SUPPORTED_10baseT_Full | ||
2834 | | SUPPORTED_100baseT_Half | ||
2835 | | SUPPORTED_100baseT_Full | ||
2836 | | SUPPORTED_1000baseT_Half | ||
2837 | | SUPPORTED_1000baseT_Full | ||
2838 | | SUPPORTED_Autoneg | SUPPORTED_TP; | ||
2839 | ecmd->port = PORT_TP; | 3010 | ecmd->port = PORT_TP; |
2840 | ecmd->speed = sky2->speed; | 3011 | ecmd->speed = sky2->speed; |
2841 | } else { | 3012 | } else { |
@@ -3814,8 +3985,12 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, | |||
3814 | dev->features |= NETIF_F_HIGHDMA; | 3985 | dev->features |= NETIF_F_HIGHDMA; |
3815 | 3986 | ||
3816 | #ifdef SKY2_VLAN_TAG_USED | 3987 | #ifdef SKY2_VLAN_TAG_USED |
3817 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | 3988 | /* The workaround for FE+ status conflicts with VLAN tag detection. */ |
3818 | dev->vlan_rx_register = sky2_vlan_rx_register; | 3989 | if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && |
3990 | sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) { | ||
3991 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | ||
3992 | dev->vlan_rx_register = sky2_vlan_rx_register; | ||
3993 | } | ||
3819 | #endif | 3994 | #endif |
3820 | 3995 | ||
3821 | /* read the mac address */ | 3996 | /* read the mac address */ |
@@ -3846,7 +4021,7 @@ static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id) | |||
3846 | return IRQ_NONE; | 4021 | return IRQ_NONE; |
3847 | 4022 | ||
3848 | if (status & Y2_IS_IRQ_SW) { | 4023 | if (status & Y2_IS_IRQ_SW) { |
3849 | hw->msi = 1; | 4024 | hw->flags |= SKY2_HW_USE_MSI; |
3850 | wake_up(&hw->msi_wait); | 4025 | wake_up(&hw->msi_wait); |
3851 | sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); | 4026 | sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); |
3852 | } | 4027 | } |
@@ -3874,9 +4049,9 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw) | |||
3874 | sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); | 4049 | sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); |
3875 | sky2_read8(hw, B0_CTST); | 4050 | sky2_read8(hw, B0_CTST); |
3876 | 4051 | ||
3877 | wait_event_timeout(hw->msi_wait, hw->msi, HZ/10); | 4052 | wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10); |
3878 | 4053 | ||
3879 | if (!hw->msi) { | 4054 | if (!(hw->flags & SKY2_HW_USE_MSI)) { |
3880 | /* MSI test failed, go back to INTx mode */ | 4055 | /* MSI test failed, go back to INTx mode */ |
3881 | dev_info(&pdev->dev, "No interrupt generated using MSI, " | 4056 | dev_info(&pdev->dev, "No interrupt generated using MSI, " |
3882 | "switching to INTx mode.\n"); | 4057 | "switching to INTx mode.\n"); |
@@ -4009,7 +4184,8 @@ static int __devinit sky2_probe(struct pci_dev *pdev, | |||
4009 | goto err_out_free_netdev; | 4184 | goto err_out_free_netdev; |
4010 | } | 4185 | } |
4011 | 4186 | ||
4012 | err = request_irq(pdev->irq, sky2_intr, hw->msi ? 0 : IRQF_SHARED, | 4187 | err = request_irq(pdev->irq, sky2_intr, |
4188 | (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED, | ||
4013 | dev->name, hw); | 4189 | dev->name, hw); |
4014 | if (err) { | 4190 | if (err) { |
4015 | dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); | 4191 | dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); |
@@ -4042,7 +4218,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev, | |||
4042 | return 0; | 4218 | return 0; |
4043 | 4219 | ||
4044 | err_out_unregister: | 4220 | err_out_unregister: |
4045 | if (hw->msi) | 4221 | if (hw->flags & SKY2_HW_USE_MSI) |
4046 | pci_disable_msi(pdev); | 4222 | pci_disable_msi(pdev); |
4047 | unregister_netdev(dev); | 4223 | unregister_netdev(dev); |
4048 | err_out_free_netdev: | 4224 | err_out_free_netdev: |
@@ -4091,7 +4267,7 @@ static void __devexit sky2_remove(struct pci_dev *pdev) | |||
4091 | sky2_read8(hw, B0_CTST); | 4267 | sky2_read8(hw, B0_CTST); |
4092 | 4268 | ||
4093 | free_irq(pdev->irq, hw); | 4269 | free_irq(pdev->irq, hw); |
4094 | if (hw->msi) | 4270 | if (hw->flags & SKY2_HW_USE_MSI) |
4095 | pci_disable_msi(pdev); | 4271 | pci_disable_msi(pdev); |
4096 | pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); | 4272 | pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); |
4097 | pci_release_regions(pdev); | 4273 | pci_release_regions(pdev); |
@@ -4159,7 +4335,9 @@ static int sky2_resume(struct pci_dev *pdev) | |||
4159 | pci_enable_wake(pdev, PCI_D0, 0); | 4335 | pci_enable_wake(pdev, PCI_D0, 0); |
4160 | 4336 | ||
4161 | /* Re-enable all clocks */ | 4337 | /* Re-enable all clocks */ |
4162 | if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_EC_U) | 4338 | if (hw->chip_id == CHIP_ID_YUKON_EX || |
4339 | hw->chip_id == CHIP_ID_YUKON_EC_U || | ||
4340 | hw->chip_id == CHIP_ID_YUKON_FE_P) | ||
4163 | sky2_pci_write32(hw, PCI_DEV_REG3, 0); | 4341 | sky2_pci_write32(hw, PCI_DEV_REG3, 0); |
4164 | 4342 | ||
4165 | sky2_reset(hw); | 4343 | sky2_reset(hw); |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 72e12b7cfa40..8bc5c54e3efa 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -470,18 +470,24 @@ enum { | |||
470 | CHIP_ID_YUKON_EX = 0xb5, /* Chip ID for YUKON-2 Extreme */ | 470 | CHIP_ID_YUKON_EX = 0xb5, /* Chip ID for YUKON-2 Extreme */ |
471 | CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */ | 471 | CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */ |
472 | CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */ | 472 | CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */ |
473 | 473 | CHIP_ID_YUKON_FE_P = 0xb8, /* Chip ID for YUKON-2 FE+ */ | |
474 | }; | ||
475 | enum yukon_ec_rev { | ||
474 | CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */ | 476 | CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */ |
475 | CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */ | 477 | CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */ |
476 | CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */ | 478 | CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */ |
477 | 479 | }; | |
480 | enum yukon_ec_u_rev { | ||
478 | CHIP_REV_YU_EC_U_A0 = 1, | 481 | CHIP_REV_YU_EC_U_A0 = 1, |
479 | CHIP_REV_YU_EC_U_A1 = 2, | 482 | CHIP_REV_YU_EC_U_A1 = 2, |
480 | CHIP_REV_YU_EC_U_B0 = 3, | 483 | CHIP_REV_YU_EC_U_B0 = 3, |
481 | 484 | }; | |
485 | enum yukon_fe_rev { | ||
482 | CHIP_REV_YU_FE_A1 = 1, | 486 | CHIP_REV_YU_FE_A1 = 1, |
483 | CHIP_REV_YU_FE_A2 = 2, | 487 | CHIP_REV_YU_FE_A2 = 2, |
484 | 488 | }; | |
489 | enum yukon_fe_p_rev { | ||
490 | CHIP_REV_YU_FE2_A0 = 0, | ||
485 | }; | 491 | }; |
486 | enum yukon_ex_rev { | 492 | enum yukon_ex_rev { |
487 | CHIP_REV_YU_EX_A0 = 1, | 493 | CHIP_REV_YU_EX_A0 = 1, |
@@ -1668,7 +1674,7 @@ enum { | |||
1668 | 1674 | ||
1669 | /* Receive Frame Status Encoding */ | 1675 | /* Receive Frame Status Encoding */ |
1670 | enum { | 1676 | enum { |
1671 | GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */ | 1677 | GMR_FS_LEN = 0x7fff<<16, /* Bit 30..16: Rx Frame Length */ |
1672 | GMR_FS_VLAN = 1<<13, /* VLAN Packet */ | 1678 | GMR_FS_VLAN = 1<<13, /* VLAN Packet */ |
1673 | GMR_FS_JABBER = 1<<12, /* Jabber Packet */ | 1679 | GMR_FS_JABBER = 1<<12, /* Jabber Packet */ |
1674 | GMR_FS_UN_SIZE = 1<<11, /* Undersize Packet */ | 1680 | GMR_FS_UN_SIZE = 1<<11, /* Undersize Packet */ |
@@ -1729,6 +1735,10 @@ enum { | |||
1729 | GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON, | 1735 | GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON, |
1730 | }; | 1736 | }; |
1731 | 1737 | ||
1738 | /* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */ | ||
1739 | enum { | ||
1740 | TX_DYN_WM_ENA = 3, /* Yukon-FE+ specific */ | ||
1741 | }; | ||
1732 | 1742 | ||
1733 | /* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */ | 1743 | /* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */ |
1734 | enum { | 1744 | enum { |
@@ -2017,6 +2027,14 @@ struct sky2_port { | |||
2017 | u16 rx_tag; | 2027 | u16 rx_tag; |
2018 | struct vlan_group *vlgrp; | 2028 | struct vlan_group *vlgrp; |
2019 | #endif | 2029 | #endif |
2030 | struct { | ||
2031 | unsigned long last; | ||
2032 | u32 mac_rp; | ||
2033 | u8 mac_lev; | ||
2034 | u8 fifo_rp; | ||
2035 | u8 fifo_lev; | ||
2036 | } check; | ||
2037 | |||
2020 | 2038 | ||
2021 | dma_addr_t rx_le_map; | 2039 | dma_addr_t rx_le_map; |
2022 | dma_addr_t tx_le_map; | 2040 | dma_addr_t tx_le_map; |
@@ -2040,12 +2058,20 @@ struct sky2_hw { | |||
2040 | void __iomem *regs; | 2058 | void __iomem *regs; |
2041 | struct pci_dev *pdev; | 2059 | struct pci_dev *pdev; |
2042 | struct net_device *dev[2]; | 2060 | struct net_device *dev[2]; |
2061 | unsigned long flags; | ||
2062 | #define SKY2_HW_USE_MSI 0x00000001 | ||
2063 | #define SKY2_HW_FIBRE_PHY 0x00000002 | ||
2064 | #define SKY2_HW_GIGABIT 0x00000004 | ||
2065 | #define SKY2_HW_NEWER_PHY 0x00000008 | ||
2066 | #define SKY2_HW_FIFO_HANG_CHECK 0x00000010 | ||
2067 | #define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ | ||
2068 | #define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ | ||
2069 | #define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ | ||
2043 | 2070 | ||
2044 | u8 chip_id; | 2071 | u8 chip_id; |
2045 | u8 chip_rev; | 2072 | u8 chip_rev; |
2046 | u8 pmd_type; | 2073 | u8 pmd_type; |
2047 | u8 ports; | 2074 | u8 ports; |
2048 | u8 active; | ||
2049 | 2075 | ||
2050 | struct sky2_status_le *st_le; | 2076 | struct sky2_status_le *st_le; |
2051 | u32 st_idx; | 2077 | u32 st_idx; |
@@ -2053,13 +2079,12 @@ struct sky2_hw { | |||
2053 | 2079 | ||
2054 | struct timer_list watchdog_timer; | 2080 | struct timer_list watchdog_timer; |
2055 | struct work_struct restart_work; | 2081 | struct work_struct restart_work; |
2056 | int msi; | ||
2057 | wait_queue_head_t msi_wait; | 2082 | wait_queue_head_t msi_wait; |
2058 | }; | 2083 | }; |
2059 | 2084 | ||
2060 | static inline int sky2_is_copper(const struct sky2_hw *hw) | 2085 | static inline int sky2_is_copper(const struct sky2_hw *hw) |
2061 | { | 2086 | { |
2062 | return !(hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P'); | 2087 | return !(hw->flags & SKY2_HW_FIBRE_PHY); |
2063 | } | 2088 | } |
2064 | 2089 | ||
2065 | /* Register accessor for memory mapped device */ | 2090 | /* Register accessor for memory mapped device */ |
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 16c7a0e87850..a2de32fabc17 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c | |||
@@ -405,7 +405,7 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf) | |||
405 | dev->net->ethtool_ops = &dm9601_ethtool_ops; | 405 | dev->net->ethtool_ops = &dm9601_ethtool_ops; |
406 | dev->net->hard_header_len += DM_TX_OVERHEAD; | 406 | dev->net->hard_header_len += DM_TX_OVERHEAD; |
407 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; | 407 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; |
408 | dev->rx_urb_size = dev->net->mtu + DM_RX_OVERHEAD; | 408 | dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD; |
409 | 409 | ||
410 | dev->mii.dev = dev->net; | 410 | dev->mii.dev = dev->net; |
411 | dev->mii.mdio_read = dm9601_mdio_read; | 411 | dev->mii.mdio_read = dm9601_mdio_read; |
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index ef35bc6c4a22..4eb6d9752881 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile | |||
@@ -43,7 +43,7 @@ obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o | |||
43 | obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o | 43 | obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o |
44 | 44 | ||
45 | obj-$(CONFIG_USB_ZD1201) += zd1201.o | 45 | obj-$(CONFIG_USB_ZD1201) += zd1201.o |
46 | obj-$(CONFIG_LIBERTAS_USB) += libertas/ | 46 | obj-$(CONFIG_LIBERTAS) += libertas/ |
47 | 47 | ||
48 | rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o | 48 | rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o |
49 | obj-$(CONFIG_RTL8187) += rtl8187.o | 49 | obj-$(CONFIG_RTL8187) += rtl8187.o |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 7dcaa09b3c20..50f2dd9e1bb2 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -1444,7 +1444,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos); | |||
1444 | static void __devinit quirk_e100_interrupt(struct pci_dev *dev) | 1444 | static void __devinit quirk_e100_interrupt(struct pci_dev *dev) |
1445 | { | 1445 | { |
1446 | u16 command; | 1446 | u16 command; |
1447 | u32 bar; | ||
1448 | u8 __iomem *csr; | 1447 | u8 __iomem *csr; |
1449 | u8 cmd_hi; | 1448 | u8 cmd_hi; |
1450 | 1449 | ||
@@ -1476,12 +1475,12 @@ static void __devinit quirk_e100_interrupt(struct pci_dev *dev) | |||
1476 | * re-enable them when it's ready. | 1475 | * re-enable them when it's ready. |
1477 | */ | 1476 | */ |
1478 | pci_read_config_word(dev, PCI_COMMAND, &command); | 1477 | pci_read_config_word(dev, PCI_COMMAND, &command); |
1479 | pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &bar); | ||
1480 | 1478 | ||
1481 | if (!(command & PCI_COMMAND_MEMORY) || !bar) | 1479 | if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0)) |
1482 | return; | 1480 | return; |
1483 | 1481 | ||
1484 | csr = ioremap(bar, 8); | 1482 | /* Convert from PCI bus to resource space. */ |
1483 | csr = ioremap(pci_resource_start(dev, 0), 8); | ||
1485 | if (!csr) { | 1484 | if (!csr) { |
1486 | printk(KERN_WARNING "PCI: Can't map %s e100 registers\n", | 1485 | printk(KERN_WARNING "PCI: Can't map %s e100 registers\n", |
1487 | pci_name(dev)); | 1486 | pci_name(dev)); |
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c index c7c4574729b1..de3155b21285 100644 --- a/drivers/power/power_supply_sysfs.c +++ b/drivers/power/power_supply_sysfs.c | |||
@@ -289,6 +289,7 @@ int power_supply_uevent(struct device *dev, char **envp, int num_envp, | |||
289 | if (ret) | 289 | if (ret) |
290 | goto out; | 290 | goto out; |
291 | } | 291 | } |
292 | envp[i] = NULL; | ||
292 | 293 | ||
293 | out: | 294 | out: |
294 | free_page((unsigned long)prop_buf); | 295 | free_page((unsigned long)prop_buf); |
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c index d5d8caba3560..ab13824df856 100644 --- a/drivers/scsi/aic94xx/aic94xx_task.c +++ b/drivers/scsi/aic94xx/aic94xx_task.c | |||
@@ -451,7 +451,7 @@ static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task, | |||
451 | struct scb *scb; | 451 | struct scb *scb; |
452 | 452 | ||
453 | pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1, | 453 | pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1, |
454 | PCI_DMA_FROMDEVICE); | 454 | PCI_DMA_TODEVICE); |
455 | pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1, | 455 | pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1, |
456 | PCI_DMA_FROMDEVICE); | 456 | PCI_DMA_FROMDEVICE); |
457 | 457 | ||
@@ -486,7 +486,7 @@ static void asd_unbuild_smp_ascb(struct asd_ascb *a) | |||
486 | 486 | ||
487 | BUG_ON(!task); | 487 | BUG_ON(!task); |
488 | pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1, | 488 | pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1, |
489 | PCI_DMA_FROMDEVICE); | 489 | PCI_DMA_TODEVICE); |
490 | pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1, | 490 | pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1, |
491 | PCI_DMA_FROMDEVICE); | 491 | PCI_DMA_FROMDEVICE); |
492 | } | 492 | } |
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c index 77b06a983fa7..95cf7b6cd622 100644 --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c | |||
@@ -2314,6 +2314,7 @@ int __devinit scsi_esp_register(struct esp *esp, struct device *dev) | |||
2314 | esp->host->transportt = esp_transport_template; | 2314 | esp->host->transportt = esp_transport_template; |
2315 | esp->host->max_lun = ESP_MAX_LUN; | 2315 | esp->host->max_lun = ESP_MAX_LUN; |
2316 | esp->host->cmd_per_lun = 2; | 2316 | esp->host->cmd_per_lun = 2; |
2317 | esp->host->unique_id = instance; | ||
2317 | 2318 | ||
2318 | esp_set_clock_params(esp); | 2319 | esp_set_clock_params(esp); |
2319 | 2320 | ||
@@ -2337,7 +2338,7 @@ int __devinit scsi_esp_register(struct esp *esp, struct device *dev) | |||
2337 | if (err) | 2338 | if (err) |
2338 | return err; | 2339 | return err; |
2339 | 2340 | ||
2340 | esp->host->unique_id = instance++; | 2341 | instance++; |
2341 | 2342 | ||
2342 | scsi_scan_host(esp->host); | 2343 | scsi_scan_host(esp->host); |
2343 | 2344 | ||
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index 6f56f8750635..4df21c92ff1e 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c | |||
@@ -787,10 +787,12 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) | |||
787 | struct scsi_target *starget = sdev->sdev_target; | 787 | struct scsi_target *starget = sdev->sdev_target; |
788 | struct Scsi_Host *shost = sdev->host; | 788 | struct Scsi_Host *shost = sdev->host; |
789 | int len = sdev->inquiry_len; | 789 | int len = sdev->inquiry_len; |
790 | int min_period = spi_min_period(starget); | ||
791 | int max_width = spi_max_width(starget); | ||
790 | /* first set us up for narrow async */ | 792 | /* first set us up for narrow async */ |
791 | DV_SET(offset, 0); | 793 | DV_SET(offset, 0); |
792 | DV_SET(width, 0); | 794 | DV_SET(width, 0); |
793 | 795 | ||
794 | if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS) | 796 | if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS) |
795 | != SPI_COMPARE_SUCCESS) { | 797 | != SPI_COMPARE_SUCCESS) { |
796 | starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n"); | 798 | starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n"); |
@@ -798,9 +800,13 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) | |||
798 | return; | 800 | return; |
799 | } | 801 | } |
800 | 802 | ||
803 | if (!scsi_device_wide(sdev)) { | ||
804 | spi_max_width(starget) = 0; | ||
805 | max_width = 0; | ||
806 | } | ||
807 | |||
801 | /* test width */ | 808 | /* test width */ |
802 | if (i->f->set_width && spi_max_width(starget) && | 809 | if (i->f->set_width && max_width) { |
803 | scsi_device_wide(sdev)) { | ||
804 | i->f->set_width(starget, 1); | 810 | i->f->set_width(starget, 1); |
805 | 811 | ||
806 | if (spi_dv_device_compare_inquiry(sdev, buffer, | 812 | if (spi_dv_device_compare_inquiry(sdev, buffer, |
@@ -809,6 +815,11 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) | |||
809 | != SPI_COMPARE_SUCCESS) { | 815 | != SPI_COMPARE_SUCCESS) { |
810 | starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n"); | 816 | starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n"); |
811 | i->f->set_width(starget, 0); | 817 | i->f->set_width(starget, 0); |
818 | /* Make sure we don't force wide back on by asking | ||
819 | * for a transfer period that requires it */ | ||
820 | max_width = 0; | ||
821 | if (min_period < 10) | ||
822 | min_period = 10; | ||
812 | } | 823 | } |
813 | } | 824 | } |
814 | 825 | ||
@@ -828,7 +839,8 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) | |||
828 | 839 | ||
829 | /* now set up to the maximum */ | 840 | /* now set up to the maximum */ |
830 | DV_SET(offset, spi_max_offset(starget)); | 841 | DV_SET(offset, spi_max_offset(starget)); |
831 | DV_SET(period, spi_min_period(starget)); | 842 | DV_SET(period, min_period); |
843 | |||
832 | /* try QAS requests; this should be harmless to set if the | 844 | /* try QAS requests; this should be harmless to set if the |
833 | * target supports it */ | 845 | * target supports it */ |
834 | if (scsi_device_qas(sdev)) { | 846 | if (scsi_device_qas(sdev)) { |
@@ -837,14 +849,14 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) | |||
837 | DV_SET(qas, 0); | 849 | DV_SET(qas, 0); |
838 | } | 850 | } |
839 | 851 | ||
840 | if (scsi_device_ius(sdev) && spi_min_period(starget) < 9) { | 852 | if (scsi_device_ius(sdev) && min_period < 9) { |
841 | /* This u320 (or u640). Set IU transfers */ | 853 | /* This u320 (or u640). Set IU transfers */ |
842 | DV_SET(iu, 1); | 854 | DV_SET(iu, 1); |
843 | /* Then set the optional parameters */ | 855 | /* Then set the optional parameters */ |
844 | DV_SET(rd_strm, 1); | 856 | DV_SET(rd_strm, 1); |
845 | DV_SET(wr_flow, 1); | 857 | DV_SET(wr_flow, 1); |
846 | DV_SET(rti, 1); | 858 | DV_SET(rti, 1); |
847 | if (spi_min_period(starget) == 8) | 859 | if (min_period == 8) |
848 | DV_SET(pcomp_en, 1); | 860 | DV_SET(pcomp_en, 1); |
849 | } else { | 861 | } else { |
850 | DV_SET(iu, 0); | 862 | DV_SET(iu, 0); |
@@ -862,6 +874,10 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) | |||
862 | } else { | 874 | } else { |
863 | DV_SET(dt, 1); | 875 | DV_SET(dt, 1); |
864 | } | 876 | } |
877 | /* set width last because it will pull all the other | ||
878 | * parameters down to required values */ | ||
879 | DV_SET(width, max_width); | ||
880 | |||
865 | /* Do the read only INQUIRY tests */ | 881 | /* Do the read only INQUIRY tests */ |
866 | spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len, | 882 | spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len, |
867 | spi_dv_device_compare_inquiry); | 883 | spi_dv_device_compare_inquiry); |
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.h b/drivers/serial/cpm_uart/cpm_uart_cpm1.h index a99e45e2b6d8..2a6477834c3e 100644 --- a/drivers/serial/cpm_uart/cpm_uart_cpm1.h +++ b/drivers/serial/cpm_uart/cpm_uart_cpm1.h | |||
@@ -37,6 +37,6 @@ static inline void cpm_set_smc_fcr(volatile smc_uart_t * up) | |||
37 | up->smc_tfcr = SMC_EB; | 37 | up->smc_tfcr = SMC_EB; |
38 | } | 38 | } |
39 | 39 | ||
40 | #define DPRAM_BASE ((unsigned char *)&cpmp->cp_dpmem[0]) | 40 | #define DPRAM_BASE ((unsigned char *)cpm_dpram_addr(0)) |
41 | 41 | ||
42 | #endif | 42 | #endif |
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c index e348ba684050..ff610c23314b 100644 --- a/drivers/serial/sunsab.c +++ b/drivers/serial/sunsab.c | |||
@@ -38,7 +38,7 @@ | |||
38 | #include <asm/prom.h> | 38 | #include <asm/prom.h> |
39 | #include <asm/of_device.h> | 39 | #include <asm/of_device.h> |
40 | 40 | ||
41 | #if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) | 41 | #if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) |
42 | #define SUPPORT_SYSRQ | 42 | #define SUPPORT_SYSRQ |
43 | #endif | 43 | #endif |
44 | 44 | ||
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index 8d7ab74170d5..a593f900eff4 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c | |||
@@ -431,6 +431,7 @@ static int w1_uevent(struct device *dev, char **envp, int num_envp, | |||
431 | err = add_uevent_var(envp, num_envp, &cur_index, buffer, buffer_size, | 431 | err = add_uevent_var(envp, num_envp, &cur_index, buffer, buffer_size, |
432 | &cur_len, "W1_SLAVE_ID=%024LX", | 432 | &cur_len, "W1_SLAVE_ID=%024LX", |
433 | (unsigned long long)sl->reg_num.id); | 433 | (unsigned long long)sl->reg_num.id); |
434 | envp[cur_index] = NULL; | ||
434 | if (err) | 435 | if (err) |
435 | return err; | 436 | return err; |
436 | 437 | ||
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 5a5b7116cefb..37310b0e8107 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c | |||
@@ -3190,6 +3190,8 @@ COMPATIBLE_IOCTL(SIOCSIWRETRY) | |||
3190 | COMPATIBLE_IOCTL(SIOCGIWRETRY) | 3190 | COMPATIBLE_IOCTL(SIOCGIWRETRY) |
3191 | COMPATIBLE_IOCTL(SIOCSIWPOWER) | 3191 | COMPATIBLE_IOCTL(SIOCSIWPOWER) |
3192 | COMPATIBLE_IOCTL(SIOCGIWPOWER) | 3192 | COMPATIBLE_IOCTL(SIOCGIWPOWER) |
3193 | COMPATIBLE_IOCTL(SIOCSIWAUTH) | ||
3194 | COMPATIBLE_IOCTL(SIOCGIWAUTH) | ||
3193 | /* hiddev */ | 3195 | /* hiddev */ |
3194 | COMPATIBLE_IOCTL(HIDIOCGVERSION) | 3196 | COMPATIBLE_IOCTL(HIDIOCGVERSION) |
3195 | COMPATIBLE_IOCTL(HIDIOCAPPLICATION) | 3197 | COMPATIBLE_IOCTL(HIDIOCAPPLICATION) |
@@ -50,7 +50,6 @@ | |||
50 | #include <linux/tsacct_kern.h> | 50 | #include <linux/tsacct_kern.h> |
51 | #include <linux/cn_proc.h> | 51 | #include <linux/cn_proc.h> |
52 | #include <linux/audit.h> | 52 | #include <linux/audit.h> |
53 | #include <linux/signalfd.h> | ||
54 | 53 | ||
55 | #include <asm/uaccess.h> | 54 | #include <asm/uaccess.h> |
56 | #include <asm/mmu_context.h> | 55 | #include <asm/mmu_context.h> |
@@ -784,7 +783,6 @@ static int de_thread(struct task_struct *tsk) | |||
784 | * and we can just re-use it all. | 783 | * and we can just re-use it all. |
785 | */ | 784 | */ |
786 | if (atomic_read(&oldsighand->count) <= 1) { | 785 | if (atomic_read(&oldsighand->count) <= 1) { |
787 | signalfd_detach(tsk); | ||
788 | exit_itimers(sig); | 786 | exit_itimers(sig); |
789 | return 0; | 787 | return 0; |
790 | } | 788 | } |
@@ -923,7 +921,6 @@ static int de_thread(struct task_struct *tsk) | |||
923 | sig->flags = 0; | 921 | sig->flags = 0; |
924 | 922 | ||
925 | no_thread_group: | 923 | no_thread_group: |
926 | signalfd_detach(tsk); | ||
927 | exit_itimers(sig); | 924 | exit_itimers(sig); |
928 | if (leader) | 925 | if (leader) |
929 | release_task(leader); | 926 | release_task(leader); |
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index a21e4bc5444b..d098c7af0d22 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c | |||
@@ -171,19 +171,14 @@ found: | |||
171 | * GRANTED_RES message by cookie, without having to rely on the client's IP | 171 | * GRANTED_RES message by cookie, without having to rely on the client's IP |
172 | * address. --okir | 172 | * address. --okir |
173 | */ | 173 | */ |
174 | static inline struct nlm_block * | 174 | static struct nlm_block * |
175 | nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file, | 175 | nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host, |
176 | struct nlm_lock *lock, struct nlm_cookie *cookie) | 176 | struct nlm_file *file, struct nlm_lock *lock, |
177 | struct nlm_cookie *cookie) | ||
177 | { | 178 | { |
178 | struct nlm_block *block; | 179 | struct nlm_block *block; |
179 | struct nlm_host *host; | ||
180 | struct nlm_rqst *call = NULL; | 180 | struct nlm_rqst *call = NULL; |
181 | 181 | ||
182 | /* Create host handle for callback */ | ||
183 | host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len); | ||
184 | if (host == NULL) | ||
185 | return NULL; | ||
186 | |||
187 | call = nlm_alloc_call(host); | 182 | call = nlm_alloc_call(host); |
188 | if (call == NULL) | 183 | if (call == NULL) |
189 | return NULL; | 184 | return NULL; |
@@ -366,6 +361,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, | |||
366 | struct nlm_lock *lock, int wait, struct nlm_cookie *cookie) | 361 | struct nlm_lock *lock, int wait, struct nlm_cookie *cookie) |
367 | { | 362 | { |
368 | struct nlm_block *block = NULL; | 363 | struct nlm_block *block = NULL; |
364 | struct nlm_host *host; | ||
369 | int error; | 365 | int error; |
370 | __be32 ret; | 366 | __be32 ret; |
371 | 367 | ||
@@ -377,6 +373,10 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, | |||
377 | (long long)lock->fl.fl_end, | 373 | (long long)lock->fl.fl_end, |
378 | wait); | 374 | wait); |
379 | 375 | ||
376 | /* Create host handle for callback */ | ||
377 | host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len); | ||
378 | if (host == NULL) | ||
379 | return nlm_lck_denied_nolocks; | ||
380 | 380 | ||
381 | /* Lock file against concurrent access */ | 381 | /* Lock file against concurrent access */ |
382 | mutex_lock(&file->f_mutex); | 382 | mutex_lock(&file->f_mutex); |
@@ -385,7 +385,8 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, | |||
385 | */ | 385 | */ |
386 | block = nlmsvc_lookup_block(file, lock); | 386 | block = nlmsvc_lookup_block(file, lock); |
387 | if (block == NULL) { | 387 | if (block == NULL) { |
388 | block = nlmsvc_create_block(rqstp, file, lock, cookie); | 388 | block = nlmsvc_create_block(rqstp, nlm_get_host(host), file, |
389 | lock, cookie); | ||
389 | ret = nlm_lck_denied_nolocks; | 390 | ret = nlm_lck_denied_nolocks; |
390 | if (block == NULL) | 391 | if (block == NULL) |
391 | goto out; | 392 | goto out; |
@@ -449,6 +450,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, | |||
449 | out: | 450 | out: |
450 | mutex_unlock(&file->f_mutex); | 451 | mutex_unlock(&file->f_mutex); |
451 | nlmsvc_release_block(block); | 452 | nlmsvc_release_block(block); |
453 | nlm_release_host(host); | ||
452 | dprintk("lockd: nlmsvc_lock returned %u\n", ret); | 454 | dprintk("lockd: nlmsvc_lock returned %u\n", ret); |
453 | return ret; | 455 | return ret; |
454 | } | 456 | } |
@@ -477,10 +479,15 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, | |||
477 | 479 | ||
478 | if (block == NULL) { | 480 | if (block == NULL) { |
479 | struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL); | 481 | struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL); |
482 | struct nlm_host *host; | ||
480 | 483 | ||
481 | if (conf == NULL) | 484 | if (conf == NULL) |
482 | return nlm_granted; | 485 | return nlm_granted; |
483 | block = nlmsvc_create_block(rqstp, file, lock, cookie); | 486 | /* Create host handle for callback */ |
487 | host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len); | ||
488 | if (host == NULL) | ||
489 | return nlm_lck_denied_nolocks; | ||
490 | block = nlmsvc_create_block(rqstp, host, file, lock, cookie); | ||
484 | if (block == NULL) { | 491 | if (block == NULL) { |
485 | kfree(conf); | 492 | kfree(conf); |
486 | return nlm_granted; | 493 | return nlm_granted; |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index a49f9feff776..a204484072f3 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -588,16 +588,6 @@ static int nfs_init_server(struct nfs_server *server, const struct nfs_mount_dat | |||
588 | server->namelen = data->namlen; | 588 | server->namelen = data->namlen; |
589 | /* Create a client RPC handle for the NFSv3 ACL management interface */ | 589 | /* Create a client RPC handle for the NFSv3 ACL management interface */ |
590 | nfs_init_server_aclclient(server); | 590 | nfs_init_server_aclclient(server); |
591 | if (clp->cl_nfsversion == 3) { | ||
592 | if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN) | ||
593 | server->namelen = NFS3_MAXNAMLEN; | ||
594 | if (!(data->flags & NFS_MOUNT_NORDIRPLUS)) | ||
595 | server->caps |= NFS_CAP_READDIRPLUS; | ||
596 | } else { | ||
597 | if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN) | ||
598 | server->namelen = NFS2_MAXNAMLEN; | ||
599 | } | ||
600 | |||
601 | dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp); | 591 | dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp); |
602 | return 0; | 592 | return 0; |
603 | 593 | ||
@@ -794,6 +784,16 @@ struct nfs_server *nfs_create_server(const struct nfs_mount_data *data, | |||
794 | error = nfs_probe_fsinfo(server, mntfh, &fattr); | 784 | error = nfs_probe_fsinfo(server, mntfh, &fattr); |
795 | if (error < 0) | 785 | if (error < 0) |
796 | goto error; | 786 | goto error; |
787 | if (server->nfs_client->rpc_ops->version == 3) { | ||
788 | if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN) | ||
789 | server->namelen = NFS3_MAXNAMLEN; | ||
790 | if (!(data->flags & NFS_MOUNT_NORDIRPLUS)) | ||
791 | server->caps |= NFS_CAP_READDIRPLUS; | ||
792 | } else { | ||
793 | if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN) | ||
794 | server->namelen = NFS2_MAXNAMLEN; | ||
795 | } | ||
796 | |||
797 | if (!(fattr.valid & NFS_ATTR_FATTR)) { | 797 | if (!(fattr.valid & NFS_ATTR_FATTR)) { |
798 | error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr); | 798 | error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr); |
799 | if (error < 0) { | 799 | if (error < 0) { |
@@ -984,6 +984,9 @@ struct nfs_server *nfs4_create_server(const struct nfs4_mount_data *data, | |||
984 | if (error < 0) | 984 | if (error < 0) |
985 | goto error; | 985 | goto error; |
986 | 986 | ||
987 | if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) | ||
988 | server->namelen = NFS4_MAXNAMLEN; | ||
989 | |||
987 | BUG_ON(!server->nfs_client); | 990 | BUG_ON(!server->nfs_client); |
988 | BUG_ON(!server->nfs_client->rpc_ops); | 991 | BUG_ON(!server->nfs_client->rpc_ops); |
989 | BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); | 992 | BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); |
@@ -1056,6 +1059,9 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data, | |||
1056 | if (error < 0) | 1059 | if (error < 0) |
1057 | goto error; | 1060 | goto error; |
1058 | 1061 | ||
1062 | if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) | ||
1063 | server->namelen = NFS4_MAXNAMLEN; | ||
1064 | |||
1059 | dprintk("Referral FSID: %llx:%llx\n", | 1065 | dprintk("Referral FSID: %llx:%llx\n", |
1060 | (unsigned long long) server->fsid.major, | 1066 | (unsigned long long) server->fsid.major, |
1061 | (unsigned long long) server->fsid.minor); | 1067 | (unsigned long long) server->fsid.minor); |
@@ -1115,6 +1121,9 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source, | |||
1115 | if (error < 0) | 1121 | if (error < 0) |
1116 | goto out_free_server; | 1122 | goto out_free_server; |
1117 | 1123 | ||
1124 | if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) | ||
1125 | server->namelen = NFS4_MAXNAMLEN; | ||
1126 | |||
1118 | dprintk("Cloned FSID: %llx:%llx\n", | 1127 | dprintk("Cloned FSID: %llx:%llx\n", |
1119 | (unsigned long long) server->fsid.major, | 1128 | (unsigned long long) server->fsid.major, |
1120 | (unsigned long long) server->fsid.minor); | 1129 | (unsigned long long) server->fsid.minor); |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index ea97408e423e..e4a04d16b8b0 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -1162,6 +1162,8 @@ static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc) | |||
1162 | } | 1162 | } |
1163 | if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR)) | 1163 | if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR)) |
1164 | return NULL; | 1164 | return NULL; |
1165 | if (name.len > NFS_SERVER(dir)->namelen) | ||
1166 | return NULL; | ||
1165 | /* Note: caller is already holding the dir->i_mutex! */ | 1167 | /* Note: caller is already holding the dir->i_mutex! */ |
1166 | dentry = d_alloc(parent, &name); | 1168 | dentry = d_alloc(parent, &name); |
1167 | if (dentry == NULL) | 1169 | if (dentry == NULL) |
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c index d1cbf0a0fbb2..522e5ad4d8ad 100644 --- a/fs/nfs/getroot.c +++ b/fs/nfs/getroot.c | |||
@@ -175,6 +175,9 @@ next_component: | |||
175 | path++; | 175 | path++; |
176 | name.len = path - (const char *) name.name; | 176 | name.len = path - (const char *) name.name; |
177 | 177 | ||
178 | if (name.len > NFS4_MAXNAMLEN) | ||
179 | return -ENAMETOOLONG; | ||
180 | |||
178 | eat_dot_dir: | 181 | eat_dot_dir: |
179 | while (*path == '/') | 182 | while (*path == '/') |
180 | path++; | 183 | path++; |
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 50cd8a209012..f37f25c931f5 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
@@ -930,18 +930,11 @@ static void ocfs2_write_failure(struct inode *inode, | |||
930 | loff_t user_pos, unsigned user_len) | 930 | loff_t user_pos, unsigned user_len) |
931 | { | 931 | { |
932 | int i; | 932 | int i; |
933 | unsigned from, to; | 933 | unsigned from = user_pos & (PAGE_CACHE_SIZE - 1), |
934 | to = user_pos + user_len; | ||
934 | struct page *tmppage; | 935 | struct page *tmppage; |
935 | 936 | ||
936 | ocfs2_zero_new_buffers(wc->w_target_page, user_pos, user_len); | 937 | ocfs2_zero_new_buffers(wc->w_target_page, from, to); |
937 | |||
938 | if (wc->w_large_pages) { | ||
939 | from = wc->w_target_from; | ||
940 | to = wc->w_target_to; | ||
941 | } else { | ||
942 | from = 0; | ||
943 | to = PAGE_CACHE_SIZE; | ||
944 | } | ||
945 | 938 | ||
946 | for(i = 0; i < wc->w_num_pages; i++) { | 939 | for(i = 0; i < wc->w_num_pages; i++) { |
947 | tmppage = wc->w_pages[i]; | 940 | tmppage = wc->w_pages[i]; |
@@ -991,9 +984,6 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, | |||
991 | map_from = cluster_start; | 984 | map_from = cluster_start; |
992 | map_to = cluster_end; | 985 | map_to = cluster_end; |
993 | } | 986 | } |
994 | |||
995 | wc->w_target_from = map_from; | ||
996 | wc->w_target_to = map_to; | ||
997 | } else { | 987 | } else { |
998 | /* | 988 | /* |
999 | * If we haven't allocated the new page yet, we | 989 | * If we haven't allocated the new page yet, we |
@@ -1211,18 +1201,33 @@ static int ocfs2_write_cluster_by_desc(struct address_space *mapping, | |||
1211 | loff_t pos, unsigned len) | 1201 | loff_t pos, unsigned len) |
1212 | { | 1202 | { |
1213 | int ret, i; | 1203 | int ret, i; |
1204 | loff_t cluster_off; | ||
1205 | unsigned int local_len = len; | ||
1214 | struct ocfs2_write_cluster_desc *desc; | 1206 | struct ocfs2_write_cluster_desc *desc; |
1207 | struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb); | ||
1215 | 1208 | ||
1216 | for (i = 0; i < wc->w_clen; i++) { | 1209 | for (i = 0; i < wc->w_clen; i++) { |
1217 | desc = &wc->w_desc[i]; | 1210 | desc = &wc->w_desc[i]; |
1218 | 1211 | ||
1212 | /* | ||
1213 | * We have to make sure that the total write passed in | ||
1214 | * doesn't extend past a single cluster. | ||
1215 | */ | ||
1216 | local_len = len; | ||
1217 | cluster_off = pos & (osb->s_clustersize - 1); | ||
1218 | if ((cluster_off + local_len) > osb->s_clustersize) | ||
1219 | local_len = osb->s_clustersize - cluster_off; | ||
1220 | |||
1219 | ret = ocfs2_write_cluster(mapping, desc->c_phys, | 1221 | ret = ocfs2_write_cluster(mapping, desc->c_phys, |
1220 | desc->c_unwritten, data_ac, meta_ac, | 1222 | desc->c_unwritten, data_ac, meta_ac, |
1221 | wc, desc->c_cpos, pos, len); | 1223 | wc, desc->c_cpos, pos, local_len); |
1222 | if (ret) { | 1224 | if (ret) { |
1223 | mlog_errno(ret); | 1225 | mlog_errno(ret); |
1224 | goto out; | 1226 | goto out; |
1225 | } | 1227 | } |
1228 | |||
1229 | len -= local_len; | ||
1230 | pos += local_len; | ||
1226 | } | 1231 | } |
1227 | 1232 | ||
1228 | ret = 0; | 1233 | ret = 0; |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 7e34e66159c6..f3bc3658e7a5 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -491,8 +491,8 @@ int ocfs2_do_extend_allocation(struct ocfs2_super *osb, | |||
491 | goto leave; | 491 | goto leave; |
492 | } | 492 | } |
493 | 493 | ||
494 | status = ocfs2_claim_clusters(osb, handle, data_ac, 1, | 494 | status = __ocfs2_claim_clusters(osb, handle, data_ac, 1, |
495 | &bit_off, &num_bits); | 495 | clusters_to_add, &bit_off, &num_bits); |
496 | if (status < 0) { | 496 | if (status < 0) { |
497 | if (status != -ENOSPC) | 497 | if (status != -ENOSPC) |
498 | mlog_errno(status); | 498 | mlog_errno(status); |
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index 545f7892cdf3..de984d272576 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c | |||
@@ -524,13 +524,12 @@ bail: | |||
524 | int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb, | 524 | int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb, |
525 | handle_t *handle, | 525 | handle_t *handle, |
526 | struct ocfs2_alloc_context *ac, | 526 | struct ocfs2_alloc_context *ac, |
527 | u32 min_bits, | 527 | u32 bits_wanted, |
528 | u32 *bit_off, | 528 | u32 *bit_off, |
529 | u32 *num_bits) | 529 | u32 *num_bits) |
530 | { | 530 | { |
531 | int status, start; | 531 | int status, start; |
532 | struct inode *local_alloc_inode; | 532 | struct inode *local_alloc_inode; |
533 | u32 bits_wanted; | ||
534 | void *bitmap; | 533 | void *bitmap; |
535 | struct ocfs2_dinode *alloc; | 534 | struct ocfs2_dinode *alloc; |
536 | struct ocfs2_local_alloc *la; | 535 | struct ocfs2_local_alloc *la; |
@@ -538,7 +537,6 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb, | |||
538 | mlog_entry_void(); | 537 | mlog_entry_void(); |
539 | BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL); | 538 | BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL); |
540 | 539 | ||
541 | bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given; | ||
542 | local_alloc_inode = ac->ac_inode; | 540 | local_alloc_inode = ac->ac_inode; |
543 | alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; | 541 | alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; |
544 | la = OCFS2_LOCAL_ALLOC(alloc); | 542 | la = OCFS2_LOCAL_ALLOC(alloc); |
diff --git a/fs/ocfs2/localalloc.h b/fs/ocfs2/localalloc.h index 385a10152f9c..3f76631e110c 100644 --- a/fs/ocfs2/localalloc.h +++ b/fs/ocfs2/localalloc.h | |||
@@ -48,7 +48,7 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb, | |||
48 | int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb, | 48 | int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb, |
49 | handle_t *handle, | 49 | handle_t *handle, |
50 | struct ocfs2_alloc_context *ac, | 50 | struct ocfs2_alloc_context *ac, |
51 | u32 min_bits, | 51 | u32 bits_wanted, |
52 | u32 *bit_off, | 52 | u32 *bit_off, |
53 | u32 *num_bits); | 53 | u32 *num_bits); |
54 | 54 | ||
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index d9c5c9fcb30f..8f09f5235e3a 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c | |||
@@ -1486,21 +1486,21 @@ static inline void ocfs2_block_to_cluster_group(struct inode *inode, | |||
1486 | * contig. allocation, set to '1' to indicate we can deal with extents | 1486 | * contig. allocation, set to '1' to indicate we can deal with extents |
1487 | * of any size. | 1487 | * of any size. |
1488 | */ | 1488 | */ |
1489 | int ocfs2_claim_clusters(struct ocfs2_super *osb, | 1489 | int __ocfs2_claim_clusters(struct ocfs2_super *osb, |
1490 | handle_t *handle, | 1490 | handle_t *handle, |
1491 | struct ocfs2_alloc_context *ac, | 1491 | struct ocfs2_alloc_context *ac, |
1492 | u32 min_clusters, | 1492 | u32 min_clusters, |
1493 | u32 *cluster_start, | 1493 | u32 max_clusters, |
1494 | u32 *num_clusters) | 1494 | u32 *cluster_start, |
1495 | u32 *num_clusters) | ||
1495 | { | 1496 | { |
1496 | int status; | 1497 | int status; |
1497 | unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given; | 1498 | unsigned int bits_wanted = max_clusters; |
1498 | u64 bg_blkno = 0; | 1499 | u64 bg_blkno = 0; |
1499 | u16 bg_bit_off; | 1500 | u16 bg_bit_off; |
1500 | 1501 | ||
1501 | mlog_entry_void(); | 1502 | mlog_entry_void(); |
1502 | 1503 | ||
1503 | BUG_ON(!ac); | ||
1504 | BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted); | 1504 | BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted); |
1505 | 1505 | ||
1506 | BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL | 1506 | BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL |
@@ -1557,6 +1557,19 @@ bail: | |||
1557 | return status; | 1557 | return status; |
1558 | } | 1558 | } |
1559 | 1559 | ||
1560 | int ocfs2_claim_clusters(struct ocfs2_super *osb, | ||
1561 | handle_t *handle, | ||
1562 | struct ocfs2_alloc_context *ac, | ||
1563 | u32 min_clusters, | ||
1564 | u32 *cluster_start, | ||
1565 | u32 *num_clusters) | ||
1566 | { | ||
1567 | unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given; | ||
1568 | |||
1569 | return __ocfs2_claim_clusters(osb, handle, ac, min_clusters, | ||
1570 | bits_wanted, cluster_start, num_clusters); | ||
1571 | } | ||
1572 | |||
1560 | static inline int ocfs2_block_group_clear_bits(handle_t *handle, | 1573 | static inline int ocfs2_block_group_clear_bits(handle_t *handle, |
1561 | struct inode *alloc_inode, | 1574 | struct inode *alloc_inode, |
1562 | struct ocfs2_group_desc *bg, | 1575 | struct ocfs2_group_desc *bg, |
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index f212dc01a84b..cafe93703095 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h | |||
@@ -85,6 +85,17 @@ int ocfs2_claim_clusters(struct ocfs2_super *osb, | |||
85 | u32 min_clusters, | 85 | u32 min_clusters, |
86 | u32 *cluster_start, | 86 | u32 *cluster_start, |
87 | u32 *num_clusters); | 87 | u32 *num_clusters); |
88 | /* | ||
89 | * Use this variant of ocfs2_claim_clusters to specify a maxiumum | ||
90 | * number of clusters smaller than the allocation reserved. | ||
91 | */ | ||
92 | int __ocfs2_claim_clusters(struct ocfs2_super *osb, | ||
93 | handle_t *handle, | ||
94 | struct ocfs2_alloc_context *ac, | ||
95 | u32 min_clusters, | ||
96 | u32 max_clusters, | ||
97 | u32 *cluster_start, | ||
98 | u32 *num_clusters); | ||
88 | 99 | ||
89 | int ocfs2_free_suballoc_bits(handle_t *handle, | 100 | int ocfs2_free_suballoc_bits(handle_t *handle, |
90 | struct inode *alloc_inode, | 101 | struct inode *alloc_inode, |
diff --git a/fs/ocfs2/vote.c b/fs/ocfs2/vote.c index 66a13ee63d4c..c05358538f2b 100644 --- a/fs/ocfs2/vote.c +++ b/fs/ocfs2/vote.c | |||
@@ -66,7 +66,7 @@ struct ocfs2_vote_msg | |||
66 | { | 66 | { |
67 | struct ocfs2_msg_hdr v_hdr; | 67 | struct ocfs2_msg_hdr v_hdr; |
68 | __be32 v_reserved1; | 68 | __be32 v_reserved1; |
69 | }; | 69 | } __attribute__ ((packed)); |
70 | 70 | ||
71 | /* Responses are given these values to maintain backwards | 71 | /* Responses are given these values to maintain backwards |
72 | * compatibility with older ocfs2 versions */ | 72 | * compatibility with older ocfs2 versions */ |
@@ -78,7 +78,7 @@ struct ocfs2_response_msg | |||
78 | { | 78 | { |
79 | struct ocfs2_msg_hdr r_hdr; | 79 | struct ocfs2_msg_hdr r_hdr; |
80 | __be32 r_response; | 80 | __be32 r_response; |
81 | }; | 81 | } __attribute__ ((packed)); |
82 | 82 | ||
83 | struct ocfs2_vote_work { | 83 | struct ocfs2_vote_work { |
84 | struct list_head w_list; | 84 | struct list_head w_list; |
diff --git a/fs/signalfd.c b/fs/signalfd.c index a8e293d30034..aefb0be07942 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c | |||
@@ -11,8 +11,10 @@ | |||
11 | * Now using anonymous inode source. | 11 | * Now using anonymous inode source. |
12 | * Thanks to Oleg Nesterov for useful code review and suggestions. | 12 | * Thanks to Oleg Nesterov for useful code review and suggestions. |
13 | * More comments and suggestions from Arnd Bergmann. | 13 | * More comments and suggestions from Arnd Bergmann. |
14 | * Sat May 19, 2007: Davi E. M. Arnaut <davi@haxent.com.br> | 14 | * Sat May 19, 2007: Davi E. M. Arnaut <davi@haxent.com.br> |
15 | * Retrieve multiple signals with one read() call | 15 | * Retrieve multiple signals with one read() call |
16 | * Sun Jul 15, 2007: Davide Libenzi <davidel@xmailserver.org> | ||
17 | * Attach to the sighand only during read() and poll(). | ||
16 | */ | 18 | */ |
17 | 19 | ||
18 | #include <linux/file.h> | 20 | #include <linux/file.h> |
@@ -27,102 +29,12 @@ | |||
27 | #include <linux/signalfd.h> | 29 | #include <linux/signalfd.h> |
28 | 30 | ||
29 | struct signalfd_ctx { | 31 | struct signalfd_ctx { |
30 | struct list_head lnk; | ||
31 | wait_queue_head_t wqh; | ||
32 | sigset_t sigmask; | 32 | sigset_t sigmask; |
33 | struct task_struct *tsk; | ||
34 | }; | 33 | }; |
35 | 34 | ||
36 | struct signalfd_lockctx { | ||
37 | struct task_struct *tsk; | ||
38 | unsigned long flags; | ||
39 | }; | ||
40 | |||
41 | /* | ||
42 | * Tries to acquire the sighand lock. We do not increment the sighand | ||
43 | * use count, and we do not even pin the task struct, so we need to | ||
44 | * do it inside an RCU read lock, and we must be prepared for the | ||
45 | * ctx->tsk going to NULL (in signalfd_deliver()), and for the sighand | ||
46 | * being detached. We return 0 if the sighand has been detached, or | ||
47 | * 1 if we were able to pin the sighand lock. | ||
48 | */ | ||
49 | static int signalfd_lock(struct signalfd_ctx *ctx, struct signalfd_lockctx *lk) | ||
50 | { | ||
51 | struct sighand_struct *sighand = NULL; | ||
52 | |||
53 | rcu_read_lock(); | ||
54 | lk->tsk = rcu_dereference(ctx->tsk); | ||
55 | if (likely(lk->tsk != NULL)) | ||
56 | sighand = lock_task_sighand(lk->tsk, &lk->flags); | ||
57 | rcu_read_unlock(); | ||
58 | |||
59 | if (!sighand) | ||
60 | return 0; | ||
61 | |||
62 | if (!ctx->tsk) { | ||
63 | unlock_task_sighand(lk->tsk, &lk->flags); | ||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | if (lk->tsk->tgid == current->tgid) | ||
68 | lk->tsk = current; | ||
69 | |||
70 | return 1; | ||
71 | } | ||
72 | |||
73 | static void signalfd_unlock(struct signalfd_lockctx *lk) | ||
74 | { | ||
75 | unlock_task_sighand(lk->tsk, &lk->flags); | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * This must be called with the sighand lock held. | ||
80 | */ | ||
81 | void signalfd_deliver(struct task_struct *tsk, int sig) | ||
82 | { | ||
83 | struct sighand_struct *sighand = tsk->sighand; | ||
84 | struct signalfd_ctx *ctx, *tmp; | ||
85 | |||
86 | BUG_ON(!sig); | ||
87 | list_for_each_entry_safe(ctx, tmp, &sighand->signalfd_list, lnk) { | ||
88 | /* | ||
89 | * We use a negative signal value as a way to broadcast that the | ||
90 | * sighand has been orphaned, so that we can notify all the | ||
91 | * listeners about this. Remember the ctx->sigmask is inverted, | ||
92 | * so if the user is interested in a signal, that corresponding | ||
93 | * bit will be zero. | ||
94 | */ | ||
95 | if (sig < 0) { | ||
96 | if (ctx->tsk == tsk) { | ||
97 | ctx->tsk = NULL; | ||
98 | list_del_init(&ctx->lnk); | ||
99 | wake_up(&ctx->wqh); | ||
100 | } | ||
101 | } else { | ||
102 | if (!sigismember(&ctx->sigmask, sig)) | ||
103 | wake_up(&ctx->wqh); | ||
104 | } | ||
105 | } | ||
106 | } | ||
107 | |||
108 | static void signalfd_cleanup(struct signalfd_ctx *ctx) | ||
109 | { | ||
110 | struct signalfd_lockctx lk; | ||
111 | |||
112 | /* | ||
113 | * This is tricky. If the sighand is gone, we do not need to remove | ||
114 | * context from the list, the list itself won't be there anymore. | ||
115 | */ | ||
116 | if (signalfd_lock(ctx, &lk)) { | ||
117 | list_del(&ctx->lnk); | ||
118 | signalfd_unlock(&lk); | ||
119 | } | ||
120 | kfree(ctx); | ||
121 | } | ||
122 | |||
123 | static int signalfd_release(struct inode *inode, struct file *file) | 35 | static int signalfd_release(struct inode *inode, struct file *file) |
124 | { | 36 | { |
125 | signalfd_cleanup(file->private_data); | 37 | kfree(file->private_data); |
126 | return 0; | 38 | return 0; |
127 | } | 39 | } |
128 | 40 | ||
@@ -130,23 +42,15 @@ static unsigned int signalfd_poll(struct file *file, poll_table *wait) | |||
130 | { | 42 | { |
131 | struct signalfd_ctx *ctx = file->private_data; | 43 | struct signalfd_ctx *ctx = file->private_data; |
132 | unsigned int events = 0; | 44 | unsigned int events = 0; |
133 | struct signalfd_lockctx lk; | ||
134 | 45 | ||
135 | poll_wait(file, &ctx->wqh, wait); | 46 | poll_wait(file, ¤t->sighand->signalfd_wqh, wait); |
136 | 47 | ||
137 | /* | 48 | spin_lock_irq(¤t->sighand->siglock); |
138 | * Let the caller get a POLLIN in this case, ala socket recv() when | 49 | if (next_signal(¤t->pending, &ctx->sigmask) || |
139 | * the peer disconnects. | 50 | next_signal(¤t->signal->shared_pending, |
140 | */ | 51 | &ctx->sigmask)) |
141 | if (signalfd_lock(ctx, &lk)) { | ||
142 | if ((lk.tsk == current && | ||
143 | next_signal(&lk.tsk->pending, &ctx->sigmask) > 0) || | ||
144 | next_signal(&lk.tsk->signal->shared_pending, | ||
145 | &ctx->sigmask) > 0) | ||
146 | events |= POLLIN; | ||
147 | signalfd_unlock(&lk); | ||
148 | } else | ||
149 | events |= POLLIN; | 52 | events |= POLLIN; |
53 | spin_unlock_irq(¤t->sighand->siglock); | ||
150 | 54 | ||
151 | return events; | 55 | return events; |
152 | } | 56 | } |
@@ -219,59 +123,46 @@ static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, siginfo_t *info, | |||
219 | int nonblock) | 123 | int nonblock) |
220 | { | 124 | { |
221 | ssize_t ret; | 125 | ssize_t ret; |
222 | struct signalfd_lockctx lk; | ||
223 | DECLARE_WAITQUEUE(wait, current); | 126 | DECLARE_WAITQUEUE(wait, current); |
224 | 127 | ||
225 | if (!signalfd_lock(ctx, &lk)) | 128 | spin_lock_irq(¤t->sighand->siglock); |
226 | return 0; | 129 | ret = dequeue_signal(current, &ctx->sigmask, info); |
227 | |||
228 | ret = dequeue_signal(lk.tsk, &ctx->sigmask, info); | ||
229 | switch (ret) { | 130 | switch (ret) { |
230 | case 0: | 131 | case 0: |
231 | if (!nonblock) | 132 | if (!nonblock) |
232 | break; | 133 | break; |
233 | ret = -EAGAIN; | 134 | ret = -EAGAIN; |
234 | default: | 135 | default: |
235 | signalfd_unlock(&lk); | 136 | spin_unlock_irq(¤t->sighand->siglock); |
236 | return ret; | 137 | return ret; |
237 | } | 138 | } |
238 | 139 | ||
239 | add_wait_queue(&ctx->wqh, &wait); | 140 | add_wait_queue(¤t->sighand->signalfd_wqh, &wait); |
240 | for (;;) { | 141 | for (;;) { |
241 | set_current_state(TASK_INTERRUPTIBLE); | 142 | set_current_state(TASK_INTERRUPTIBLE); |
242 | ret = dequeue_signal(lk.tsk, &ctx->sigmask, info); | 143 | ret = dequeue_signal(current, &ctx->sigmask, info); |
243 | signalfd_unlock(&lk); | ||
244 | if (ret != 0) | 144 | if (ret != 0) |
245 | break; | 145 | break; |
246 | if (signal_pending(current)) { | 146 | if (signal_pending(current)) { |
247 | ret = -ERESTARTSYS; | 147 | ret = -ERESTARTSYS; |
248 | break; | 148 | break; |
249 | } | 149 | } |
150 | spin_unlock_irq(¤t->sighand->siglock); | ||
250 | schedule(); | 151 | schedule(); |
251 | ret = signalfd_lock(ctx, &lk); | 152 | spin_lock_irq(¤t->sighand->siglock); |
252 | if (unlikely(!ret)) { | ||
253 | /* | ||
254 | * Let the caller read zero byte, ala socket | ||
255 | * recv() when the peer disconnect. This test | ||
256 | * must be done before doing a dequeue_signal(), | ||
257 | * because if the sighand has been orphaned, | ||
258 | * the dequeue_signal() call is going to crash | ||
259 | * because ->sighand will be long gone. | ||
260 | */ | ||
261 | break; | ||
262 | } | ||
263 | } | 153 | } |
154 | spin_unlock_irq(¤t->sighand->siglock); | ||
264 | 155 | ||
265 | remove_wait_queue(&ctx->wqh, &wait); | 156 | remove_wait_queue(¤t->sighand->signalfd_wqh, &wait); |
266 | __set_current_state(TASK_RUNNING); | 157 | __set_current_state(TASK_RUNNING); |
267 | 158 | ||
268 | return ret; | 159 | return ret; |
269 | } | 160 | } |
270 | 161 | ||
271 | /* | 162 | /* |
272 | * Returns either the size of a "struct signalfd_siginfo", or zero if the | 163 | * Returns a multiple of the size of a "struct signalfd_siginfo", or a negative |
273 | * sighand we are attached to, has been orphaned. The "count" parameter | 164 | * error code. The "count" parameter must be at least the size of a |
274 | * must be at least the size of a "struct signalfd_siginfo". | 165 | * "struct signalfd_siginfo". |
275 | */ | 166 | */ |
276 | static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count, | 167 | static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count, |
277 | loff_t *ppos) | 168 | loff_t *ppos) |
@@ -287,7 +178,6 @@ static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count, | |||
287 | return -EINVAL; | 178 | return -EINVAL; |
288 | 179 | ||
289 | siginfo = (struct signalfd_siginfo __user *) buf; | 180 | siginfo = (struct signalfd_siginfo __user *) buf; |
290 | |||
291 | do { | 181 | do { |
292 | ret = signalfd_dequeue(ctx, &info, nonblock); | 182 | ret = signalfd_dequeue(ctx, &info, nonblock); |
293 | if (unlikely(ret <= 0)) | 183 | if (unlikely(ret <= 0)) |
@@ -300,7 +190,7 @@ static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count, | |||
300 | nonblock = 1; | 190 | nonblock = 1; |
301 | } while (--count); | 191 | } while (--count); |
302 | 192 | ||
303 | return total ? total : ret; | 193 | return total ? total: ret; |
304 | } | 194 | } |
305 | 195 | ||
306 | static const struct file_operations signalfd_fops = { | 196 | static const struct file_operations signalfd_fops = { |
@@ -309,20 +199,13 @@ static const struct file_operations signalfd_fops = { | |||
309 | .read = signalfd_read, | 199 | .read = signalfd_read, |
310 | }; | 200 | }; |
311 | 201 | ||
312 | /* | ||
313 | * Create a file descriptor that is associated with our signal | ||
314 | * state. We can pass it around to others if we want to, but | ||
315 | * it will always be _our_ signal state. | ||
316 | */ | ||
317 | asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask) | 202 | asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask) |
318 | { | 203 | { |
319 | int error; | 204 | int error; |
320 | sigset_t sigmask; | 205 | sigset_t sigmask; |
321 | struct signalfd_ctx *ctx; | 206 | struct signalfd_ctx *ctx; |
322 | struct sighand_struct *sighand; | ||
323 | struct file *file; | 207 | struct file *file; |
324 | struct inode *inode; | 208 | struct inode *inode; |
325 | struct signalfd_lockctx lk; | ||
326 | 209 | ||
327 | if (sizemask != sizeof(sigset_t) || | 210 | if (sizemask != sizeof(sigset_t) || |
328 | copy_from_user(&sigmask, user_mask, sizeof(sigmask))) | 211 | copy_from_user(&sigmask, user_mask, sizeof(sigmask))) |
@@ -335,17 +218,7 @@ asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemas | |||
335 | if (!ctx) | 218 | if (!ctx) |
336 | return -ENOMEM; | 219 | return -ENOMEM; |
337 | 220 | ||
338 | init_waitqueue_head(&ctx->wqh); | ||
339 | ctx->sigmask = sigmask; | 221 | ctx->sigmask = sigmask; |
340 | ctx->tsk = current->group_leader; | ||
341 | |||
342 | sighand = current->sighand; | ||
343 | /* | ||
344 | * Add this fd to the list of signal listeners. | ||
345 | */ | ||
346 | spin_lock_irq(&sighand->siglock); | ||
347 | list_add_tail(&ctx->lnk, &sighand->signalfd_list); | ||
348 | spin_unlock_irq(&sighand->siglock); | ||
349 | 222 | ||
350 | /* | 223 | /* |
351 | * When we call this, the initialization must be complete, since | 224 | * When we call this, the initialization must be complete, since |
@@ -364,23 +237,18 @@ asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemas | |||
364 | fput(file); | 237 | fput(file); |
365 | return -EINVAL; | 238 | return -EINVAL; |
366 | } | 239 | } |
367 | /* | 240 | spin_lock_irq(¤t->sighand->siglock); |
368 | * We need to be prepared of the fact that the sighand this fd | 241 | ctx->sigmask = sigmask; |
369 | * is attached to, has been detched. In that case signalfd_lock() | 242 | spin_unlock_irq(¤t->sighand->siglock); |
370 | * will return 0, and we'll just skip setting the new mask. | 243 | |
371 | */ | 244 | wake_up(¤t->sighand->signalfd_wqh); |
372 | if (signalfd_lock(ctx, &lk)) { | ||
373 | ctx->sigmask = sigmask; | ||
374 | signalfd_unlock(&lk); | ||
375 | } | ||
376 | wake_up(&ctx->wqh); | ||
377 | fput(file); | 245 | fput(file); |
378 | } | 246 | } |
379 | 247 | ||
380 | return ufd; | 248 | return ufd; |
381 | 249 | ||
382 | err_fdalloc: | 250 | err_fdalloc: |
383 | signalfd_cleanup(ctx); | 251 | kfree(ctx); |
384 | return error; | 252 | return error; |
385 | } | 253 | } |
386 | 254 | ||
diff --git a/fs/splice.c b/fs/splice.c index c010a72ca2d2..e95a36228863 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -1224,6 +1224,33 @@ static long do_splice(struct file *in, loff_t __user *off_in, | |||
1224 | } | 1224 | } |
1225 | 1225 | ||
1226 | /* | 1226 | /* |
1227 | * Do a copy-from-user while holding the mmap_semaphore for reading, in a | ||
1228 | * manner safe from deadlocking with simultaneous mmap() (grabbing mmap_sem | ||
1229 | * for writing) and page faulting on the user memory pointed to by src. | ||
1230 | * This assumes that we will very rarely hit the partial != 0 path, or this | ||
1231 | * will not be a win. | ||
1232 | */ | ||
1233 | static int copy_from_user_mmap_sem(void *dst, const void __user *src, size_t n) | ||
1234 | { | ||
1235 | int partial; | ||
1236 | |||
1237 | pagefault_disable(); | ||
1238 | partial = __copy_from_user_inatomic(dst, src, n); | ||
1239 | pagefault_enable(); | ||
1240 | |||
1241 | /* | ||
1242 | * Didn't copy everything, drop the mmap_sem and do a faulting copy | ||
1243 | */ | ||
1244 | if (unlikely(partial)) { | ||
1245 | up_read(¤t->mm->mmap_sem); | ||
1246 | partial = copy_from_user(dst, src, n); | ||
1247 | down_read(¤t->mm->mmap_sem); | ||
1248 | } | ||
1249 | |||
1250 | return partial; | ||
1251 | } | ||
1252 | |||
1253 | /* | ||
1227 | * Map an iov into an array of pages and offset/length tupples. With the | 1254 | * Map an iov into an array of pages and offset/length tupples. With the |
1228 | * partial_page structure, we can map several non-contiguous ranges into | 1255 | * partial_page structure, we can map several non-contiguous ranges into |
1229 | * our ones pages[] map instead of splitting that operation into pieces. | 1256 | * our ones pages[] map instead of splitting that operation into pieces. |
@@ -1236,31 +1263,26 @@ static int get_iovec_page_array(const struct iovec __user *iov, | |||
1236 | { | 1263 | { |
1237 | int buffers = 0, error = 0; | 1264 | int buffers = 0, error = 0; |
1238 | 1265 | ||
1239 | /* | ||
1240 | * It's ok to take the mmap_sem for reading, even | ||
1241 | * across a "get_user()". | ||
1242 | */ | ||
1243 | down_read(¤t->mm->mmap_sem); | 1266 | down_read(¤t->mm->mmap_sem); |
1244 | 1267 | ||
1245 | while (nr_vecs) { | 1268 | while (nr_vecs) { |
1246 | unsigned long off, npages; | 1269 | unsigned long off, npages; |
1270 | struct iovec entry; | ||
1247 | void __user *base; | 1271 | void __user *base; |
1248 | size_t len; | 1272 | size_t len; |
1249 | int i; | 1273 | int i; |
1250 | 1274 | ||
1251 | /* | 1275 | error = -EFAULT; |
1252 | * Get user address base and length for this iovec. | 1276 | if (copy_from_user_mmap_sem(&entry, iov, sizeof(entry))) |
1253 | */ | ||
1254 | error = get_user(base, &iov->iov_base); | ||
1255 | if (unlikely(error)) | ||
1256 | break; | ||
1257 | error = get_user(len, &iov->iov_len); | ||
1258 | if (unlikely(error)) | ||
1259 | break; | 1277 | break; |
1260 | 1278 | ||
1279 | base = entry.iov_base; | ||
1280 | len = entry.iov_len; | ||
1281 | |||
1261 | /* | 1282 | /* |
1262 | * Sanity check this iovec. 0 read succeeds. | 1283 | * Sanity check this iovec. 0 read succeeds. |
1263 | */ | 1284 | */ |
1285 | error = 0; | ||
1264 | if (unlikely(!len)) | 1286 | if (unlikely(!len)) |
1265 | break; | 1287 | break; |
1266 | error = -EFAULT; | 1288 | error = -EFAULT; |
diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 73402c5eeb8a..38eb0b7a1f3d 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c | |||
@@ -894,7 +894,7 @@ magic_found: | |||
894 | goto again; | 894 | goto again; |
895 | } | 895 | } |
896 | 896 | ||
897 | 897 | sbi->s_flags = flags;/*after that line some functions use s_flags*/ | |
898 | ufs_print_super_stuff(sb, usb1, usb2, usb3); | 898 | ufs_print_super_stuff(sb, usb1, usb2, usb3); |
899 | 899 | ||
900 | /* | 900 | /* |
@@ -1025,8 +1025,6 @@ magic_found: | |||
1025 | UFS_MOUNT_UFSTYPE_44BSD) | 1025 | UFS_MOUNT_UFSTYPE_44BSD) |
1026 | uspi->s_maxsymlinklen = | 1026 | uspi->s_maxsymlinklen = |
1027 | fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen); | 1027 | fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen); |
1028 | |||
1029 | sbi->s_flags = flags; | ||
1030 | 1028 | ||
1031 | inode = iget(sb, UFS_ROOTINO); | 1029 | inode = iget(sb, UFS_ROOTINO); |
1032 | if (!inode || is_bad_inode(inode)) | 1030 | if (!inode || is_bad_inode(inode)) |
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h index fa25b7dcc6c3..d7e136143066 100644 --- a/fs/xfs/xfs_buf_item.h +++ b/fs/xfs/xfs_buf_item.h | |||
@@ -52,11 +52,6 @@ typedef struct xfs_buf_log_format_t { | |||
52 | #define XFS_BLI_UDQUOT_BUF 0x4 | 52 | #define XFS_BLI_UDQUOT_BUF 0x4 |
53 | #define XFS_BLI_PDQUOT_BUF 0x8 | 53 | #define XFS_BLI_PDQUOT_BUF 0x8 |
54 | #define XFS_BLI_GDQUOT_BUF 0x10 | 54 | #define XFS_BLI_GDQUOT_BUF 0x10 |
55 | /* | ||
56 | * This flag indicates that the buffer contains newly allocated | ||
57 | * inodes. | ||
58 | */ | ||
59 | #define XFS_BLI_INODE_NEW_BUF 0x20 | ||
60 | 55 | ||
61 | #define XFS_BLI_CHUNK 128 | 56 | #define XFS_BLI_CHUNK 128 |
62 | #define XFS_BLI_SHIFT 7 | 57 | #define XFS_BLI_SHIFT 7 |
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c index 16f8e175167d..36d8f6aa11af 100644 --- a/fs/xfs/xfs_filestream.c +++ b/fs/xfs/xfs_filestream.c | |||
@@ -350,9 +350,10 @@ _xfs_filestream_update_ag( | |||
350 | /* xfs_fstrm_free_func(): callback for freeing cached stream items. */ | 350 | /* xfs_fstrm_free_func(): callback for freeing cached stream items. */ |
351 | void | 351 | void |
352 | xfs_fstrm_free_func( | 352 | xfs_fstrm_free_func( |
353 | xfs_ino_t ino, | 353 | unsigned long ino, |
354 | fstrm_item_t *item) | 354 | void *data) |
355 | { | 355 | { |
356 | fstrm_item_t *item = (fstrm_item_t *)data; | ||
356 | xfs_inode_t *ip = item->ip; | 357 | xfs_inode_t *ip = item->ip; |
357 | int ref; | 358 | int ref; |
358 | 359 | ||
@@ -438,7 +439,7 @@ xfs_filestream_mount( | |||
438 | grp_count = 10; | 439 | grp_count = 10; |
439 | 440 | ||
440 | err = xfs_mru_cache_create(&mp->m_filestream, lifetime, grp_count, | 441 | err = xfs_mru_cache_create(&mp->m_filestream, lifetime, grp_count, |
441 | (xfs_mru_cache_free_func_t)xfs_fstrm_free_func); | 442 | xfs_fstrm_free_func); |
442 | 443 | ||
443 | return err; | 444 | return err; |
444 | } | 445 | } |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index dacb19739cc2..8ae6e8e5f3db 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -1874,7 +1874,6 @@ xlog_recover_do_inode_buffer( | |||
1874 | /*ARGSUSED*/ | 1874 | /*ARGSUSED*/ |
1875 | STATIC void | 1875 | STATIC void |
1876 | xlog_recover_do_reg_buffer( | 1876 | xlog_recover_do_reg_buffer( |
1877 | xfs_mount_t *mp, | ||
1878 | xlog_recover_item_t *item, | 1877 | xlog_recover_item_t *item, |
1879 | xfs_buf_t *bp, | 1878 | xfs_buf_t *bp, |
1880 | xfs_buf_log_format_t *buf_f) | 1879 | xfs_buf_log_format_t *buf_f) |
@@ -1885,50 +1884,6 @@ xlog_recover_do_reg_buffer( | |||
1885 | unsigned int *data_map = NULL; | 1884 | unsigned int *data_map = NULL; |
1886 | unsigned int map_size = 0; | 1885 | unsigned int map_size = 0; |
1887 | int error; | 1886 | int error; |
1888 | int stale_buf = 1; | ||
1889 | |||
1890 | /* | ||
1891 | * Scan through the on-disk inode buffer and attempt to | ||
1892 | * determine if it has been written to since it was logged. | ||
1893 | * | ||
1894 | * - If any of the magic numbers are incorrect then the buffer is stale | ||
1895 | * - If any of the modes are non-zero then the buffer is not stale | ||
1896 | * - If all of the modes are zero and at least one of the generation | ||
1897 | * counts is non-zero then the buffer is stale | ||
1898 | * | ||
1899 | * If the end result is a stale buffer then the log buffer is replayed | ||
1900 | * otherwise it is skipped. | ||
1901 | * | ||
1902 | * This heuristic is not perfect. It can be improved by scanning the | ||
1903 | * entire inode chunk for evidence that any of the inode clusters have | ||
1904 | * been updated. To fix this problem completely we will need a major | ||
1905 | * architectural change to the logging system. | ||
1906 | */ | ||
1907 | if (buf_f->blf_flags & XFS_BLI_INODE_NEW_BUF) { | ||
1908 | xfs_dinode_t *dip; | ||
1909 | int inodes_per_buf; | ||
1910 | int mode_count = 0; | ||
1911 | int gen_count = 0; | ||
1912 | |||
1913 | stale_buf = 0; | ||
1914 | inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog; | ||
1915 | for (i = 0; i < inodes_per_buf; i++) { | ||
1916 | dip = (xfs_dinode_t *)xfs_buf_offset(bp, | ||
1917 | i * mp->m_sb.sb_inodesize); | ||
1918 | if (be16_to_cpu(dip->di_core.di_magic) != | ||
1919 | XFS_DINODE_MAGIC) { | ||
1920 | stale_buf = 1; | ||
1921 | break; | ||
1922 | } | ||
1923 | if (be16_to_cpu(dip->di_core.di_mode)) | ||
1924 | mode_count++; | ||
1925 | if (be16_to_cpu(dip->di_core.di_gen)) | ||
1926 | gen_count++; | ||
1927 | } | ||
1928 | |||
1929 | if (!mode_count && gen_count) | ||
1930 | stale_buf = 1; | ||
1931 | } | ||
1932 | 1887 | ||
1933 | switch (buf_f->blf_type) { | 1888 | switch (buf_f->blf_type) { |
1934 | case XFS_LI_BUF: | 1889 | case XFS_LI_BUF: |
@@ -1962,7 +1917,7 @@ xlog_recover_do_reg_buffer( | |||
1962 | -1, 0, XFS_QMOPT_DOWARN, | 1917 | -1, 0, XFS_QMOPT_DOWARN, |
1963 | "dquot_buf_recover"); | 1918 | "dquot_buf_recover"); |
1964 | } | 1919 | } |
1965 | if (!error && stale_buf) | 1920 | if (!error) |
1966 | memcpy(xfs_buf_offset(bp, | 1921 | memcpy(xfs_buf_offset(bp, |
1967 | (uint)bit << XFS_BLI_SHIFT), /* dest */ | 1922 | (uint)bit << XFS_BLI_SHIFT), /* dest */ |
1968 | item->ri_buf[i].i_addr, /* source */ | 1923 | item->ri_buf[i].i_addr, /* source */ |
@@ -2134,7 +2089,7 @@ xlog_recover_do_dquot_buffer( | |||
2134 | if (log->l_quotaoffs_flag & type) | 2089 | if (log->l_quotaoffs_flag & type) |
2135 | return; | 2090 | return; |
2136 | 2091 | ||
2137 | xlog_recover_do_reg_buffer(mp, item, bp, buf_f); | 2092 | xlog_recover_do_reg_buffer(item, bp, buf_f); |
2138 | } | 2093 | } |
2139 | 2094 | ||
2140 | /* | 2095 | /* |
@@ -2235,7 +2190,7 @@ xlog_recover_do_buffer_trans( | |||
2235 | (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) { | 2190 | (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) { |
2236 | xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); | 2191 | xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); |
2237 | } else { | 2192 | } else { |
2238 | xlog_recover_do_reg_buffer(mp, item, bp, buf_f); | 2193 | xlog_recover_do_reg_buffer(item, bp, buf_f); |
2239 | } | 2194 | } |
2240 | if (error) | 2195 | if (error) |
2241 | return XFS_ERROR(error); | 2196 | return XFS_ERROR(error); |
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 95fff6872a2f..60b6b898022b 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c | |||
@@ -966,7 +966,6 @@ xfs_trans_inode_alloc_buf( | |||
966 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | 966 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
967 | 967 | ||
968 | bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; | 968 | bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; |
969 | bip->bli_format.blf_flags |= XFS_BLI_INODE_NEW_BUF; | ||
970 | } | 969 | } |
971 | 970 | ||
972 | 971 | ||
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h index 202acb9ff4d0..f85f77a538aa 100644 --- a/include/acpi/acpi_drivers.h +++ b/include/acpi/acpi_drivers.h | |||
@@ -147,10 +147,6 @@ static inline void unregister_hotplug_dock_device(acpi_handle handle) | |||
147 | /*-------------------------------------------------------------------------- | 147 | /*-------------------------------------------------------------------------- |
148 | Suspend/Resume | 148 | Suspend/Resume |
149 | -------------------------------------------------------------------------- */ | 149 | -------------------------------------------------------------------------- */ |
150 | #ifdef CONFIG_ACPI_SLEEP | ||
151 | extern int acpi_sleep_init(void); | 150 | extern int acpi_sleep_init(void); |
152 | #else | ||
153 | static inline int acpi_sleep_init(void) { return 0; } | ||
154 | #endif | ||
155 | 151 | ||
156 | #endif /*__ACPI_DRIVERS_H__*/ | 152 | #endif /*__ACPI_DRIVERS_H__*/ |
diff --git a/include/acpi/processor.h b/include/acpi/processor.h index ec3ffdadb4d2..99934a999e66 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h | |||
@@ -320,6 +320,8 @@ int acpi_processor_power_init(struct acpi_processor *pr, | |||
320 | int acpi_processor_cst_has_changed(struct acpi_processor *pr); | 320 | int acpi_processor_cst_has_changed(struct acpi_processor *pr); |
321 | int acpi_processor_power_exit(struct acpi_processor *pr, | 321 | int acpi_processor_power_exit(struct acpi_processor *pr, |
322 | struct acpi_device *device); | 322 | struct acpi_device *device); |
323 | int acpi_processor_suspend(struct acpi_device * device, pm_message_t state); | ||
324 | int acpi_processor_resume(struct acpi_device * device); | ||
323 | 325 | ||
324 | /* in processor_thermal.c */ | 326 | /* in processor_thermal.c */ |
325 | int acpi_processor_get_limit_info(struct acpi_processor *pr); | 327 | int acpi_processor_get_limit_info(struct acpi_processor *pr); |
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 609756c61676..d69ba937e092 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h | |||
@@ -214,11 +214,6 @@ static inline unsigned long get_limit(unsigned long segment) | |||
214 | */ | 214 | */ |
215 | 215 | ||
216 | 216 | ||
217 | /* | ||
218 | * Actually only lfence would be needed for mb() because all stores done | ||
219 | * by the kernel should be already ordered. But keep a full barrier for now. | ||
220 | */ | ||
221 | |||
222 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | 217 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) |
223 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | 218 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) |
224 | 219 | ||
diff --git a/include/asm-mips/fcntl.h b/include/asm-mips/fcntl.h index 00a50ec1c19f..2a52333a062d 100644 --- a/include/asm-mips/fcntl.h +++ b/include/asm-mips/fcntl.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #define O_SYNC 0x0010 | 13 | #define O_SYNC 0x0010 |
14 | #define O_NONBLOCK 0x0080 | 14 | #define O_NONBLOCK 0x0080 |
15 | #define O_CREAT 0x0100 /* not fcntl */ | 15 | #define O_CREAT 0x0100 /* not fcntl */ |
16 | #define O_TRUNC 0x0200 /* not fcntl */ | ||
16 | #define O_EXCL 0x0400 /* not fcntl */ | 17 | #define O_EXCL 0x0400 /* not fcntl */ |
17 | #define O_NOCTTY 0x0800 /* not fcntl */ | 18 | #define O_NOCTTY 0x0800 /* not fcntl */ |
18 | #define FASYNC 0x1000 /* fcntl, for BSD compatibility */ | 19 | #define FASYNC 0x1000 /* fcntl, for BSD compatibility */ |
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h index 97102ebc54b1..2cb52cf8bd4e 100644 --- a/include/asm-mips/irq.h +++ b/include/asm-mips/irq.h | |||
@@ -24,7 +24,30 @@ static inline int irq_canonicalize(int irq) | |||
24 | #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ | 24 | #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ |
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | #ifdef CONFIG_MIPS_MT_SMTC | ||
28 | |||
29 | struct irqaction; | ||
30 | |||
31 | extern unsigned long irq_hwmask[]; | ||
32 | extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, | ||
33 | unsigned long hwmask); | ||
34 | |||
35 | static inline void smtc_im_ack_irq(unsigned int irq) | ||
36 | { | ||
37 | if (irq_hwmask[irq] & ST0_IM) | ||
38 | set_c0_status(irq_hwmask[irq] & ST0_IM); | ||
39 | } | ||
40 | |||
41 | #else | ||
42 | |||
43 | static inline void smtc_im_ack_irq(unsigned int irq) | ||
44 | { | ||
45 | } | ||
46 | |||
47 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
48 | |||
27 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP | 49 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP |
50 | |||
28 | /* | 51 | /* |
29 | * Clear interrupt mask handling "backstop" if irq_hwmask | 52 | * Clear interrupt mask handling "backstop" if irq_hwmask |
30 | * entry so indicates. This implies that the ack() or end() | 53 | * entry so indicates. This implies that the ack() or end() |
@@ -38,6 +61,7 @@ do { \ | |||
38 | ~(irq_hwmask[irq] & 0x0000ff00)); \ | 61 | ~(irq_hwmask[irq] & 0x0000ff00)); \ |
39 | } while (0) | 62 | } while (0) |
40 | #else | 63 | #else |
64 | |||
41 | #define __DO_IRQ_SMTC_HOOK(irq) do { } while (0) | 65 | #define __DO_IRQ_SMTC_HOOK(irq) do { } while (0) |
42 | #endif | 66 | #endif |
43 | 67 | ||
@@ -60,14 +84,6 @@ do { \ | |||
60 | extern void arch_init_irq(void); | 84 | extern void arch_init_irq(void); |
61 | extern void spurious_interrupt(void); | 85 | extern void spurious_interrupt(void); |
62 | 86 | ||
63 | #ifdef CONFIG_MIPS_MT_SMTC | ||
64 | struct irqaction; | ||
65 | |||
66 | extern unsigned long irq_hwmask[]; | ||
67 | extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, | ||
68 | unsigned long hwmask); | ||
69 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
70 | |||
71 | extern int allocate_irqno(void); | 87 | extern int allocate_irqno(void); |
72 | extern void alloc_legacy_irqno(void); | 88 | extern void alloc_legacy_irqno(void); |
73 | extern void free_irqno(unsigned int irq); | 89 | extern void free_irqno(unsigned int irq); |
diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h index b92dd8c760da..e3301e54d559 100644 --- a/include/asm-mips/page.h +++ b/include/asm-mips/page.h | |||
@@ -142,7 +142,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
142 | /* | 142 | /* |
143 | * __pa()/__va() should be used only during mem init. | 143 | * __pa()/__va() should be used only during mem init. |
144 | */ | 144 | */ |
145 | #if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64) | 145 | #ifdef CONFIG_64BIT |
146 | #define __pa(x) \ | 146 | #define __pa(x) \ |
147 | ({ \ | 147 | ({ \ |
148 | unsigned long __x = (unsigned long)(x); \ | 148 | unsigned long __x = (unsigned long)(x); \ |
diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h index b467be6d367f..8bb564687860 100644 --- a/include/asm-x86_64/pgalloc.h +++ b/include/asm-x86_64/pgalloc.h | |||
@@ -4,10 +4,6 @@ | |||
4 | #include <asm/pda.h> | 4 | #include <asm/pda.h> |
5 | #include <linux/threads.h> | 5 | #include <linux/threads.h> |
6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
7 | #include <linux/quicklist.h> | ||
8 | |||
9 | #define QUICK_PGD 0 /* We preserve special mappings over free */ | ||
10 | #define QUICK_PT 1 /* Other page table pages that are zero on free */ | ||
11 | 7 | ||
12 | #define pmd_populate_kernel(mm, pmd, pte) \ | 8 | #define pmd_populate_kernel(mm, pmd, pte) \ |
13 | set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte))) | 9 | set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte))) |
@@ -24,23 +20,23 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p | |||
24 | static inline void pmd_free(pmd_t *pmd) | 20 | static inline void pmd_free(pmd_t *pmd) |
25 | { | 21 | { |
26 | BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); | 22 | BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); |
27 | quicklist_free(QUICK_PT, NULL, pmd); | 23 | free_page((unsigned long)pmd); |
28 | } | 24 | } |
29 | 25 | ||
30 | static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr) | 26 | static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr) |
31 | { | 27 | { |
32 | return (pmd_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL); | 28 | return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); |
33 | } | 29 | } |
34 | 30 | ||
35 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) | 31 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) |
36 | { | 32 | { |
37 | return (pud_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL); | 33 | return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); |
38 | } | 34 | } |
39 | 35 | ||
40 | static inline void pud_free (pud_t *pud) | 36 | static inline void pud_free (pud_t *pud) |
41 | { | 37 | { |
42 | BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); | 38 | BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); |
43 | quicklist_free(QUICK_PT, NULL, pud); | 39 | free_page((unsigned long)pud); |
44 | } | 40 | } |
45 | 41 | ||
46 | static inline void pgd_list_add(pgd_t *pgd) | 42 | static inline void pgd_list_add(pgd_t *pgd) |
@@ -61,57 +57,41 @@ static inline void pgd_list_del(pgd_t *pgd) | |||
61 | spin_unlock(&pgd_lock); | 57 | spin_unlock(&pgd_lock); |
62 | } | 58 | } |
63 | 59 | ||
64 | static inline void pgd_ctor(void *x) | 60 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
65 | { | 61 | { |
66 | unsigned boundary; | 62 | unsigned boundary; |
67 | pgd_t *pgd = x; | 63 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); |
68 | struct page *page = virt_to_page(pgd); | 64 | if (!pgd) |
69 | 65 | return NULL; | |
66 | pgd_list_add(pgd); | ||
70 | /* | 67 | /* |
71 | * Copy kernel pointers in from init. | 68 | * Copy kernel pointers in from init. |
69 | * Could keep a freelist or slab cache of those because the kernel | ||
70 | * part never changes. | ||
72 | */ | 71 | */ |
73 | boundary = pgd_index(__PAGE_OFFSET); | 72 | boundary = pgd_index(__PAGE_OFFSET); |
73 | memset(pgd, 0, boundary * sizeof(pgd_t)); | ||
74 | memcpy(pgd + boundary, | 74 | memcpy(pgd + boundary, |
75 | init_level4_pgt + boundary, | 75 | init_level4_pgt + boundary, |
76 | (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); | 76 | (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); |
77 | |||
78 | spin_lock(&pgd_lock); | ||
79 | list_add(&page->lru, &pgd_list); | ||
80 | spin_unlock(&pgd_lock); | ||
81 | } | ||
82 | |||
83 | static inline void pgd_dtor(void *x) | ||
84 | { | ||
85 | pgd_t *pgd = x; | ||
86 | struct page *page = virt_to_page(pgd); | ||
87 | |||
88 | spin_lock(&pgd_lock); | ||
89 | list_del(&page->lru); | ||
90 | spin_unlock(&pgd_lock); | ||
91 | } | ||
92 | |||
93 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | ||
94 | { | ||
95 | pgd_t *pgd = (pgd_t *)quicklist_alloc(QUICK_PGD, | ||
96 | GFP_KERNEL|__GFP_REPEAT, pgd_ctor); | ||
97 | return pgd; | 77 | return pgd; |
98 | } | 78 | } |
99 | 79 | ||
100 | static inline void pgd_free(pgd_t *pgd) | 80 | static inline void pgd_free(pgd_t *pgd) |
101 | { | 81 | { |
102 | BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); | 82 | BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); |
103 | quicklist_free(QUICK_PGD, pgd_dtor, pgd); | 83 | pgd_list_del(pgd); |
84 | free_page((unsigned long)pgd); | ||
104 | } | 85 | } |
105 | 86 | ||
106 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 87 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
107 | { | 88 | { |
108 | return (pte_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL); | 89 | return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); |
109 | } | 90 | } |
110 | 91 | ||
111 | static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | 92 | static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) |
112 | { | 93 | { |
113 | void *p = (void *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL); | 94 | void *p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); |
114 | |||
115 | if (!p) | 95 | if (!p) |
116 | return NULL; | 96 | return NULL; |
117 | return virt_to_page(p); | 97 | return virt_to_page(p); |
@@ -123,22 +103,17 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add | |||
123 | static inline void pte_free_kernel(pte_t *pte) | 103 | static inline void pte_free_kernel(pte_t *pte) |
124 | { | 104 | { |
125 | BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); | 105 | BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); |
126 | quicklist_free(QUICK_PT, NULL, pte); | 106 | free_page((unsigned long)pte); |
127 | } | 107 | } |
128 | 108 | ||
129 | static inline void pte_free(struct page *pte) | 109 | static inline void pte_free(struct page *pte) |
130 | { | 110 | { |
131 | quicklist_free_page(QUICK_PT, NULL, pte); | 111 | __free_page(pte); |
132 | } | 112 | } |
133 | 113 | ||
134 | #define __pte_free_tlb(tlb,pte) quicklist_free_page(QUICK_PT, NULL,(pte)) | 114 | #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) |
135 | 115 | ||
136 | #define __pmd_free_tlb(tlb,x) quicklist_free(QUICK_PT, NULL, (x)) | 116 | #define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) |
137 | #define __pud_free_tlb(tlb,x) quicklist_free(QUICK_PT, NULL, (x)) | 117 | #define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) |
138 | 118 | ||
139 | static inline void check_pgt_cache(void) | ||
140 | { | ||
141 | quicklist_trim(QUICK_PGD, pgd_dtor, 25, 16); | ||
142 | quicklist_trim(QUICK_PT, NULL, 25, 16); | ||
143 | } | ||
144 | #endif /* _X86_64_PGALLOC_H */ | 119 | #endif /* _X86_64_PGALLOC_H */ |
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index c9d8764c89d1..57dd6b3107ea 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h | |||
@@ -411,6 +411,7 @@ pte_t *lookup_address(unsigned long addr); | |||
411 | #define HAVE_ARCH_UNMAPPED_AREA | 411 | #define HAVE_ARCH_UNMAPPED_AREA |
412 | 412 | ||
413 | #define pgtable_cache_init() do { } while (0) | 413 | #define pgtable_cache_init() do { } while (0) |
414 | #define check_pgt_cache() do { } while (0) | ||
414 | 415 | ||
415 | #define PAGE_AGP PAGE_KERNEL_NOCACHE | 416 | #define PAGE_AGP PAGE_KERNEL_NOCACHE |
416 | #define HAVE_PAGE_AGP 1 | 417 | #define HAVE_PAGE_AGP 1 |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 963051a967d6..3ec6e7ff5fbd 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -32,15 +32,7 @@ | |||
32 | * CPUFREQ NOTIFIER INTERFACE * | 32 | * CPUFREQ NOTIFIER INTERFACE * |
33 | *********************************************************************/ | 33 | *********************************************************************/ |
34 | 34 | ||
35 | #ifdef CONFIG_CPU_FREQ | ||
36 | int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); | 35 | int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); |
37 | #else | ||
38 | static inline int cpufreq_register_notifier(struct notifier_block *nb, | ||
39 | unsigned int list) | ||
40 | { | ||
41 | return 0; | ||
42 | } | ||
43 | #endif | ||
44 | int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); | 36 | int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); |
45 | 37 | ||
46 | #define CPUFREQ_TRANSITION_NOTIFIER (0) | 38 | #define CPUFREQ_TRANSITION_NOTIFIER (0) |
@@ -268,22 +260,17 @@ struct freq_attr { | |||
268 | int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); | 260 | int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); |
269 | int cpufreq_update_policy(unsigned int cpu); | 261 | int cpufreq_update_policy(unsigned int cpu); |
270 | 262 | ||
263 | /* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */ | ||
264 | unsigned int cpufreq_get(unsigned int cpu); | ||
271 | 265 | ||
272 | /* | 266 | /* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */ |
273 | * query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it | ||
274 | */ | ||
275 | #ifdef CONFIG_CPU_FREQ | 267 | #ifdef CONFIG_CPU_FREQ |
276 | unsigned int cpufreq_quick_get(unsigned int cpu); | 268 | unsigned int cpufreq_quick_get(unsigned int cpu); |
277 | unsigned int cpufreq_get(unsigned int cpu); | ||
278 | #else | 269 | #else |
279 | static inline unsigned int cpufreq_quick_get(unsigned int cpu) | 270 | static inline unsigned int cpufreq_quick_get(unsigned int cpu) |
280 | { | 271 | { |
281 | return 0; | 272 | return 0; |
282 | } | 273 | } |
283 | static inline unsigned int cpufreq_get(unsigned int cpu) | ||
284 | { | ||
285 | return 0; | ||
286 | } | ||
287 | #endif | 274 | #endif |
288 | 275 | ||
289 | 276 | ||
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index cab741c2d603..f8abfa349ef9 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -86,7 +86,7 @@ extern struct nsproxy init_nsproxy; | |||
86 | .count = ATOMIC_INIT(1), \ | 86 | .count = ATOMIC_INIT(1), \ |
87 | .action = { { { .sa_handler = NULL, } }, }, \ | 87 | .action = { { { .sa_handler = NULL, } }, }, \ |
88 | .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ | 88 | .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ |
89 | .signalfd_list = LIST_HEAD_INIT(sighand.signalfd_list), \ | 89 | .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \ |
90 | } | 90 | } |
91 | 91 | ||
92 | extern struct group_info init_groups; | 92 | extern struct group_info init_groups; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 5445eaec6908..a01ac6dd5f5e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -438,7 +438,7 @@ struct sighand_struct { | |||
438 | atomic_t count; | 438 | atomic_t count; |
439 | struct k_sigaction action[_NSIG]; | 439 | struct k_sigaction action[_NSIG]; |
440 | spinlock_t siglock; | 440 | spinlock_t siglock; |
441 | struct list_head signalfd_list; | 441 | wait_queue_head_t signalfd_wqh; |
442 | }; | 442 | }; |
443 | 443 | ||
444 | struct pacct_struct { | 444 | struct pacct_struct { |
@@ -1406,6 +1406,7 @@ extern unsigned int sysctl_sched_wakeup_granularity; | |||
1406 | extern unsigned int sysctl_sched_batch_wakeup_granularity; | 1406 | extern unsigned int sysctl_sched_batch_wakeup_granularity; |
1407 | extern unsigned int sysctl_sched_stat_granularity; | 1407 | extern unsigned int sysctl_sched_stat_granularity; |
1408 | extern unsigned int sysctl_sched_runtime_limit; | 1408 | extern unsigned int sysctl_sched_runtime_limit; |
1409 | extern unsigned int sysctl_sched_compat_yield; | ||
1409 | extern unsigned int sysctl_sched_child_runs_first; | 1410 | extern unsigned int sysctl_sched_child_runs_first; |
1410 | extern unsigned int sysctl_sched_features; | 1411 | extern unsigned int sysctl_sched_features; |
1411 | 1412 | ||
diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h index 510429495690..4c9ff0910ae0 100644 --- a/include/linux/signalfd.h +++ b/include/linux/signalfd.h | |||
@@ -45,49 +45,17 @@ struct signalfd_siginfo { | |||
45 | #ifdef CONFIG_SIGNALFD | 45 | #ifdef CONFIG_SIGNALFD |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * Deliver the signal to listening signalfd. This must be called | 48 | * Deliver the signal to listening signalfd. |
49 | * with the sighand lock held. Same are the following that end up | ||
50 | * calling signalfd_deliver(). | ||
51 | */ | ||
52 | void signalfd_deliver(struct task_struct *tsk, int sig); | ||
53 | |||
54 | /* | ||
55 | * No need to fall inside signalfd_deliver() if no signal listeners | ||
56 | * are available. | ||
57 | */ | 49 | */ |
58 | static inline void signalfd_notify(struct task_struct *tsk, int sig) | 50 | static inline void signalfd_notify(struct task_struct *tsk, int sig) |
59 | { | 51 | { |
60 | if (unlikely(!list_empty(&tsk->sighand->signalfd_list))) | 52 | if (unlikely(waitqueue_active(&tsk->sighand->signalfd_wqh))) |
61 | signalfd_deliver(tsk, sig); | 53 | wake_up(&tsk->sighand->signalfd_wqh); |
62 | } | ||
63 | |||
64 | /* | ||
65 | * The signal -1 is used to notify the signalfd that the sighand | ||
66 | * is on its way to be detached. | ||
67 | */ | ||
68 | static inline void signalfd_detach_locked(struct task_struct *tsk) | ||
69 | { | ||
70 | if (unlikely(!list_empty(&tsk->sighand->signalfd_list))) | ||
71 | signalfd_deliver(tsk, -1); | ||
72 | } | ||
73 | |||
74 | static inline void signalfd_detach(struct task_struct *tsk) | ||
75 | { | ||
76 | struct sighand_struct *sighand = tsk->sighand; | ||
77 | |||
78 | if (unlikely(!list_empty(&sighand->signalfd_list))) { | ||
79 | spin_lock_irq(&sighand->siglock); | ||
80 | signalfd_deliver(tsk, -1); | ||
81 | spin_unlock_irq(&sighand->siglock); | ||
82 | } | ||
83 | } | 54 | } |
84 | 55 | ||
85 | #else /* CONFIG_SIGNALFD */ | 56 | #else /* CONFIG_SIGNALFD */ |
86 | 57 | ||
87 | #define signalfd_deliver(t, s) do { } while (0) | 58 | static inline void signalfd_notify(struct task_struct *tsk, int sig) { } |
88 | #define signalfd_notify(t, s) do { } while (0) | ||
89 | #define signalfd_detach_locked(t) do { } while (0) | ||
90 | #define signalfd_detach(t) do { } while (0) | ||
91 | 59 | ||
92 | #endif /* CONFIG_SIGNALFD */ | 60 | #endif /* CONFIG_SIGNALFD */ |
93 | 61 | ||
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 991c85bb9e36..e8e3a64eb322 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h | |||
@@ -114,7 +114,6 @@ sctp_state_fn_t sctp_sf_do_4_C; | |||
114 | sctp_state_fn_t sctp_sf_eat_data_6_2; | 114 | sctp_state_fn_t sctp_sf_eat_data_6_2; |
115 | sctp_state_fn_t sctp_sf_eat_data_fast_4_4; | 115 | sctp_state_fn_t sctp_sf_eat_data_fast_4_4; |
116 | sctp_state_fn_t sctp_sf_eat_sack_6_2; | 116 | sctp_state_fn_t sctp_sf_eat_sack_6_2; |
117 | sctp_state_fn_t sctp_sf_tabort_8_4_8; | ||
118 | sctp_state_fn_t sctp_sf_operr_notify; | 117 | sctp_state_fn_t sctp_sf_operr_notify; |
119 | sctp_state_fn_t sctp_sf_t1_init_timer_expire; | 118 | sctp_state_fn_t sctp_sf_t1_init_timer_expire; |
120 | sctp_state_fn_t sctp_sf_t1_cookie_timer_expire; | 119 | sctp_state_fn_t sctp_sf_t1_cookie_timer_expire; |
@@ -247,6 +246,9 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *, | |||
247 | int, __be16); | 246 | int, __be16); |
248 | struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, | 247 | struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, |
249 | union sctp_addr *addr); | 248 | union sctp_addr *addr); |
249 | int sctp_verify_asconf(const struct sctp_association *asoc, | ||
250 | struct sctp_paramhdr *param_hdr, void *chunk_end, | ||
251 | struct sctp_paramhdr **errp); | ||
250 | struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, | 252 | struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, |
251 | struct sctp_chunk *asconf); | 253 | struct sctp_chunk *asconf); |
252 | int sctp_process_asconf_ack(struct sctp_association *asoc, | 254 | int sctp_process_asconf_ack(struct sctp_association *asoc, |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index c2fe2dcc9afc..baff49dfcdbd 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -421,6 +421,7 @@ struct sctp_signed_cookie { | |||
421 | * internally. | 421 | * internally. |
422 | */ | 422 | */ |
423 | union sctp_addr_param { | 423 | union sctp_addr_param { |
424 | struct sctp_paramhdr p; | ||
424 | struct sctp_ipv4addr_param v4; | 425 | struct sctp_ipv4addr_param v4; |
425 | struct sctp_ipv6addr_param v6; | 426 | struct sctp_ipv6addr_param v6; |
426 | }; | 427 | }; |
@@ -1156,7 +1157,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest, | |||
1156 | int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *, | 1157 | int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *, |
1157 | __u8 use_as_src, gfp_t gfp); | 1158 | __u8 use_as_src, gfp_t gfp); |
1158 | int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *, | 1159 | int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *, |
1159 | void (*rcu_call)(struct rcu_head *, | 1160 | void fastcall (*rcu_call)(struct rcu_head *, |
1160 | void (*func)(struct rcu_head *))); | 1161 | void (*func)(struct rcu_head *))); |
1161 | int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *, | 1162 | int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *, |
1162 | struct sctp_sock *); | 1163 | struct sctp_sock *); |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 185c7ecce4cc..54053de0bdd7 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -1059,14 +1059,12 @@ struct tcp_md5sig_key { | |||
1059 | }; | 1059 | }; |
1060 | 1060 | ||
1061 | struct tcp4_md5sig_key { | 1061 | struct tcp4_md5sig_key { |
1062 | u8 *key; | 1062 | struct tcp_md5sig_key base; |
1063 | u16 keylen; | ||
1064 | __be32 addr; | 1063 | __be32 addr; |
1065 | }; | 1064 | }; |
1066 | 1065 | ||
1067 | struct tcp6_md5sig_key { | 1066 | struct tcp6_md5sig_key { |
1068 | u8 *key; | 1067 | struct tcp_md5sig_key base; |
1069 | u16 keylen; | ||
1070 | #if 0 | 1068 | #if 0 |
1071 | u32 scope_id; /* XXX */ | 1069 | u32 scope_id; /* XXX */ |
1072 | #endif | 1070 | #endif |
diff --git a/kernel/exit.c b/kernel/exit.c index 06b24b3aa370..993369ee94d1 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/pid_namespace.h> | 24 | #include <linux/pid_namespace.h> |
25 | #include <linux/ptrace.h> | 25 | #include <linux/ptrace.h> |
26 | #include <linux/profile.h> | 26 | #include <linux/profile.h> |
27 | #include <linux/signalfd.h> | ||
28 | #include <linux/mount.h> | 27 | #include <linux/mount.h> |
29 | #include <linux/proc_fs.h> | 28 | #include <linux/proc_fs.h> |
30 | #include <linux/kthread.h> | 29 | #include <linux/kthread.h> |
@@ -86,14 +85,6 @@ static void __exit_signal(struct task_struct *tsk) | |||
86 | sighand = rcu_dereference(tsk->sighand); | 85 | sighand = rcu_dereference(tsk->sighand); |
87 | spin_lock(&sighand->siglock); | 86 | spin_lock(&sighand->siglock); |
88 | 87 | ||
89 | /* | ||
90 | * Notify that this sighand has been detached. This must | ||
91 | * be called with the tsk->sighand lock held. Also, this | ||
92 | * access tsk->sighand internally, so it must be called | ||
93 | * before tsk->sighand is reset. | ||
94 | */ | ||
95 | signalfd_detach_locked(tsk); | ||
96 | |||
97 | posix_cpu_timers_exit(tsk); | 88 | posix_cpu_timers_exit(tsk); |
98 | if (atomic_dec_and_test(&sig->count)) | 89 | if (atomic_dec_and_test(&sig->count)) |
99 | posix_cpu_timers_exit_group(tsk); | 90 | posix_cpu_timers_exit_group(tsk); |
diff --git a/kernel/fork.c b/kernel/fork.c index 7332e236d367..33f12f48684a 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1438,7 +1438,7 @@ static void sighand_ctor(void *data, struct kmem_cache *cachep, | |||
1438 | struct sighand_struct *sighand = data; | 1438 | struct sighand_struct *sighand = data; |
1439 | 1439 | ||
1440 | spin_lock_init(&sighand->siglock); | 1440 | spin_lock_init(&sighand->siglock); |
1441 | INIT_LIST_HEAD(&sighand->signalfd_list); | 1441 | init_waitqueue_head(&sighand->signalfd_wqh); |
1442 | } | 1442 | } |
1443 | 1443 | ||
1444 | void __init proc_caches_init(void) | 1444 | void __init proc_caches_init(void) |
diff --git a/kernel/futex.c b/kernel/futex.c index e8935b195e88..fcc94e7b4086 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -1943,9 +1943,10 @@ static inline int fetch_robust_entry(struct robust_list __user **entry, | |||
1943 | void exit_robust_list(struct task_struct *curr) | 1943 | void exit_robust_list(struct task_struct *curr) |
1944 | { | 1944 | { |
1945 | struct robust_list_head __user *head = curr->robust_list; | 1945 | struct robust_list_head __user *head = curr->robust_list; |
1946 | struct robust_list __user *entry, *pending; | 1946 | struct robust_list __user *entry, *next_entry, *pending; |
1947 | unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; | 1947 | unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; |
1948 | unsigned long futex_offset; | 1948 | unsigned long futex_offset; |
1949 | int rc; | ||
1949 | 1950 | ||
1950 | /* | 1951 | /* |
1951 | * Fetch the list head (which was registered earlier, via | 1952 | * Fetch the list head (which was registered earlier, via |
@@ -1965,12 +1966,14 @@ void exit_robust_list(struct task_struct *curr) | |||
1965 | if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) | 1966 | if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) |
1966 | return; | 1967 | return; |
1967 | 1968 | ||
1968 | if (pending) | 1969 | next_entry = NULL; /* avoid warning with gcc */ |
1969 | handle_futex_death((void __user *)pending + futex_offset, | ||
1970 | curr, pip); | ||
1971 | |||
1972 | while (entry != &head->list) { | 1970 | while (entry != &head->list) { |
1973 | /* | 1971 | /* |
1972 | * Fetch the next entry in the list before calling | ||
1973 | * handle_futex_death: | ||
1974 | */ | ||
1975 | rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); | ||
1976 | /* | ||
1974 | * A pending lock might already be on the list, so | 1977 | * A pending lock might already be on the list, so |
1975 | * don't process it twice: | 1978 | * don't process it twice: |
1976 | */ | 1979 | */ |
@@ -1978,11 +1981,10 @@ void exit_robust_list(struct task_struct *curr) | |||
1978 | if (handle_futex_death((void __user *)entry + futex_offset, | 1981 | if (handle_futex_death((void __user *)entry + futex_offset, |
1979 | curr, pi)) | 1982 | curr, pi)) |
1980 | return; | 1983 | return; |
1981 | /* | 1984 | if (rc) |
1982 | * Fetch the next entry in the list: | ||
1983 | */ | ||
1984 | if (fetch_robust_entry(&entry, &entry->next, &pi)) | ||
1985 | return; | 1985 | return; |
1986 | entry = next_entry; | ||
1987 | pi = next_pi; | ||
1986 | /* | 1988 | /* |
1987 | * Avoid excessively long or circular lists: | 1989 | * Avoid excessively long or circular lists: |
1988 | */ | 1990 | */ |
@@ -1991,6 +1993,10 @@ void exit_robust_list(struct task_struct *curr) | |||
1991 | 1993 | ||
1992 | cond_resched(); | 1994 | cond_resched(); |
1993 | } | 1995 | } |
1996 | |||
1997 | if (pending) | ||
1998 | handle_futex_death((void __user *)pending + futex_offset, | ||
1999 | curr, pip); | ||
1994 | } | 2000 | } |
1995 | 2001 | ||
1996 | long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, | 2002 | long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, |
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index 7e52eb051f22..2c2e2954b713 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
@@ -38,10 +38,11 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, | |||
38 | void compat_exit_robust_list(struct task_struct *curr) | 38 | void compat_exit_robust_list(struct task_struct *curr) |
39 | { | 39 | { |
40 | struct compat_robust_list_head __user *head = curr->compat_robust_list; | 40 | struct compat_robust_list_head __user *head = curr->compat_robust_list; |
41 | struct robust_list __user *entry, *pending; | 41 | struct robust_list __user *entry, *next_entry, *pending; |
42 | unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; | 42 | unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; |
43 | compat_uptr_t uentry, upending; | 43 | compat_uptr_t uentry, next_uentry, upending; |
44 | compat_long_t futex_offset; | 44 | compat_long_t futex_offset; |
45 | int rc; | ||
45 | 46 | ||
46 | /* | 47 | /* |
47 | * Fetch the list head (which was registered earlier, via | 48 | * Fetch the list head (which was registered earlier, via |
@@ -61,11 +62,16 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
61 | if (fetch_robust_entry(&upending, &pending, | 62 | if (fetch_robust_entry(&upending, &pending, |
62 | &head->list_op_pending, &pip)) | 63 | &head->list_op_pending, &pip)) |
63 | return; | 64 | return; |
64 | if (pending) | ||
65 | handle_futex_death((void __user *)pending + futex_offset, curr, pip); | ||
66 | 65 | ||
66 | next_entry = NULL; /* avoid warning with gcc */ | ||
67 | while (entry != (struct robust_list __user *) &head->list) { | 67 | while (entry != (struct robust_list __user *) &head->list) { |
68 | /* | 68 | /* |
69 | * Fetch the next entry in the list before calling | ||
70 | * handle_futex_death: | ||
71 | */ | ||
72 | rc = fetch_robust_entry(&next_uentry, &next_entry, | ||
73 | (compat_uptr_t __user *)&entry->next, &next_pi); | ||
74 | /* | ||
69 | * A pending lock might already be on the list, so | 75 | * A pending lock might already be on the list, so |
70 | * dont process it twice: | 76 | * dont process it twice: |
71 | */ | 77 | */ |
@@ -74,12 +80,11 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
74 | curr, pi)) | 80 | curr, pi)) |
75 | return; | 81 | return; |
76 | 82 | ||
77 | /* | 83 | if (rc) |
78 | * Fetch the next entry in the list: | ||
79 | */ | ||
80 | if (fetch_robust_entry(&uentry, &entry, | ||
81 | (compat_uptr_t __user *)&entry->next, &pi)) | ||
82 | return; | 84 | return; |
85 | uentry = next_uentry; | ||
86 | entry = next_entry; | ||
87 | pi = next_pi; | ||
83 | /* | 88 | /* |
84 | * Avoid excessively long or circular lists: | 89 | * Avoid excessively long or circular lists: |
85 | */ | 90 | */ |
@@ -88,6 +93,9 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
88 | 93 | ||
89 | cond_resched(); | 94 | cond_resched(); |
90 | } | 95 | } |
96 | if (pending) | ||
97 | handle_futex_death((void __user *)pending + futex_offset, | ||
98 | curr, pip); | ||
91 | } | 99 | } |
92 | 100 | ||
93 | asmlinkage long | 101 | asmlinkage long |
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index c8580a1e6873..14b0e10dc95c 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
@@ -110,7 +110,7 @@ config SUSPEND | |||
110 | 110 | ||
111 | config HIBERNATION_UP_POSSIBLE | 111 | config HIBERNATION_UP_POSSIBLE |
112 | bool | 112 | bool |
113 | depends on X86 || PPC64_SWSUSP || FRV || PPC32 | 113 | depends on X86 || PPC64_SWSUSP || PPC32 |
114 | depends on !SMP | 114 | depends on !SMP |
115 | default y | 115 | default y |
116 | 116 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index deeb1f8e0c30..6107a0cd6325 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1682,6 +1682,11 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
1682 | 1682 | ||
1683 | p->prio = effective_prio(p); | 1683 | p->prio = effective_prio(p); |
1684 | 1684 | ||
1685 | if (rt_prio(p->prio)) | ||
1686 | p->sched_class = &rt_sched_class; | ||
1687 | else | ||
1688 | p->sched_class = &fair_sched_class; | ||
1689 | |||
1685 | if (!p->sched_class->task_new || !sysctl_sched_child_runs_first || | 1690 | if (!p->sched_class->task_new || !sysctl_sched_child_runs_first || |
1686 | (clone_flags & CLONE_VM) || task_cpu(p) != this_cpu || | 1691 | (clone_flags & CLONE_VM) || task_cpu(p) != this_cpu || |
1687 | !current->se.on_rq) { | 1692 | !current->se.on_rq) { |
@@ -4550,10 +4555,7 @@ asmlinkage long sys_sched_yield(void) | |||
4550 | struct rq *rq = this_rq_lock(); | 4555 | struct rq *rq = this_rq_lock(); |
4551 | 4556 | ||
4552 | schedstat_inc(rq, yld_cnt); | 4557 | schedstat_inc(rq, yld_cnt); |
4553 | if (unlikely(rq->nr_running == 1)) | 4558 | current->sched_class->yield_task(rq, current); |
4554 | schedstat_inc(rq, yld_act_empty); | ||
4555 | else | ||
4556 | current->sched_class->yield_task(rq, current); | ||
4557 | 4559 | ||
4558 | /* | 4560 | /* |
4559 | * Since we are going to call schedule() anyway, there's | 4561 | * Since we are going to call schedule() anyway, there's |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 892616bf2c77..67c67a87146e 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -43,6 +43,14 @@ unsigned int sysctl_sched_latency __read_mostly = 20000000ULL; | |||
43 | unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL; | 43 | unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL; |
44 | 44 | ||
45 | /* | 45 | /* |
46 | * sys_sched_yield() compat mode | ||
47 | * | ||
48 | * This option switches the agressive yield implementation of the | ||
49 | * old scheduler back on. | ||
50 | */ | ||
51 | unsigned int __read_mostly sysctl_sched_compat_yield; | ||
52 | |||
53 | /* | ||
46 | * SCHED_BATCH wake-up granularity. | 54 | * SCHED_BATCH wake-up granularity. |
47 | * (default: 25 msec, units: nanoseconds) | 55 | * (default: 25 msec, units: nanoseconds) |
48 | * | 56 | * |
@@ -631,6 +639,16 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
631 | 639 | ||
632 | se->block_start = 0; | 640 | se->block_start = 0; |
633 | se->sum_sleep_runtime += delta; | 641 | se->sum_sleep_runtime += delta; |
642 | |||
643 | /* | ||
644 | * Blocking time is in units of nanosecs, so shift by 20 to | ||
645 | * get a milliseconds-range estimation of the amount of | ||
646 | * time that the task spent sleeping: | ||
647 | */ | ||
648 | if (unlikely(prof_on == SLEEP_PROFILING)) { | ||
649 | profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), | ||
650 | delta >> 20); | ||
651 | } | ||
634 | } | 652 | } |
635 | #endif | 653 | #endif |
636 | } | 654 | } |
@@ -897,19 +915,62 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) | |||
897 | } | 915 | } |
898 | 916 | ||
899 | /* | 917 | /* |
900 | * sched_yield() support is very simple - we dequeue and enqueue | 918 | * sched_yield() support is very simple - we dequeue and enqueue. |
919 | * | ||
920 | * If compat_yield is turned on then we requeue to the end of the tree. | ||
901 | */ | 921 | */ |
902 | static void yield_task_fair(struct rq *rq, struct task_struct *p) | 922 | static void yield_task_fair(struct rq *rq, struct task_struct *p) |
903 | { | 923 | { |
904 | struct cfs_rq *cfs_rq = task_cfs_rq(p); | 924 | struct cfs_rq *cfs_rq = task_cfs_rq(p); |
925 | struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; | ||
926 | struct sched_entity *rightmost, *se = &p->se; | ||
927 | struct rb_node *parent; | ||
905 | 928 | ||
906 | __update_rq_clock(rq); | ||
907 | /* | 929 | /* |
908 | * Dequeue and enqueue the task to update its | 930 | * Are we the only task in the tree? |
909 | * position within the tree: | 931 | */ |
932 | if (unlikely(cfs_rq->nr_running == 1)) | ||
933 | return; | ||
934 | |||
935 | if (likely(!sysctl_sched_compat_yield)) { | ||
936 | __update_rq_clock(rq); | ||
937 | /* | ||
938 | * Dequeue and enqueue the task to update its | ||
939 | * position within the tree: | ||
940 | */ | ||
941 | dequeue_entity(cfs_rq, &p->se, 0); | ||
942 | enqueue_entity(cfs_rq, &p->se, 0); | ||
943 | |||
944 | return; | ||
945 | } | ||
946 | /* | ||
947 | * Find the rightmost entry in the rbtree: | ||
948 | */ | ||
949 | do { | ||
950 | parent = *link; | ||
951 | link = &parent->rb_right; | ||
952 | } while (*link); | ||
953 | |||
954 | rightmost = rb_entry(parent, struct sched_entity, run_node); | ||
955 | /* | ||
956 | * Already in the rightmost position? | ||
910 | */ | 957 | */ |
911 | dequeue_entity(cfs_rq, &p->se, 0); | 958 | if (unlikely(rightmost == se)) |
912 | enqueue_entity(cfs_rq, &p->se, 0); | 959 | return; |
960 | |||
961 | /* | ||
962 | * Minimally necessary key value to be last in the tree: | ||
963 | */ | ||
964 | se->fair_key = rightmost->fair_key + 1; | ||
965 | |||
966 | if (cfs_rq->rb_leftmost == &se->run_node) | ||
967 | cfs_rq->rb_leftmost = rb_next(&se->run_node); | ||
968 | /* | ||
969 | * Relink the task to the rightmost position: | ||
970 | */ | ||
971 | rb_erase(&se->run_node, &cfs_rq->tasks_timeline); | ||
972 | rb_link_node(&se->run_node, parent, link); | ||
973 | rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); | ||
913 | } | 974 | } |
914 | 975 | ||
915 | /* | 976 | /* |
diff --git a/kernel/signal.c b/kernel/signal.c index 3169bed0b4d0..9fb91a32edda 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -378,8 +378,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |||
378 | /* We only dequeue private signals from ourselves, we don't let | 378 | /* We only dequeue private signals from ourselves, we don't let |
379 | * signalfd steal them | 379 | * signalfd steal them |
380 | */ | 380 | */ |
381 | if (likely(tsk == current)) | 381 | signr = __dequeue_signal(&tsk->pending, mask, info); |
382 | signr = __dequeue_signal(&tsk->pending, mask, info); | ||
383 | if (!signr) { | 382 | if (!signr) { |
384 | signr = __dequeue_signal(&tsk->signal->shared_pending, | 383 | signr = __dequeue_signal(&tsk->signal->shared_pending, |
385 | mask, info); | 384 | mask, info); |
@@ -407,8 +406,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |||
407 | } | 406 | } |
408 | } | 407 | } |
409 | } | 408 | } |
410 | if (likely(tsk == current)) | 409 | recalc_sigpending(); |
411 | recalc_sigpending(); | ||
412 | if (signr && unlikely(sig_kernel_stop(signr))) { | 410 | if (signr && unlikely(sig_kernel_stop(signr))) { |
413 | /* | 411 | /* |
414 | * Set a marker that we have dequeued a stop signal. Our | 412 | * Set a marker that we have dequeued a stop signal. Our |
@@ -425,7 +423,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |||
425 | if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) | 423 | if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) |
426 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; | 424 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; |
427 | } | 425 | } |
428 | if (signr && likely(tsk == current) && | 426 | if (signr && |
429 | ((info->si_code & __SI_MASK) == __SI_TIMER) && | 427 | ((info->si_code & __SI_MASK) == __SI_TIMER) && |
430 | info->si_sys_private){ | 428 | info->si_sys_private){ |
431 | /* | 429 | /* |
diff --git a/kernel/sys.c b/kernel/sys.c index 1b33b05d346b..8ae2e636eb1b 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/getcpu.h> | 32 | #include <linux/getcpu.h> |
33 | #include <linux/task_io_accounting_ops.h> | 33 | #include <linux/task_io_accounting_ops.h> |
34 | #include <linux/seccomp.h> | 34 | #include <linux/seccomp.h> |
35 | #include <linux/cpu.h> | ||
35 | 36 | ||
36 | #include <linux/compat.h> | 37 | #include <linux/compat.h> |
37 | #include <linux/syscalls.h> | 38 | #include <linux/syscalls.h> |
@@ -878,6 +879,7 @@ void kernel_power_off(void) | |||
878 | kernel_shutdown_prepare(SYSTEM_POWER_OFF); | 879 | kernel_shutdown_prepare(SYSTEM_POWER_OFF); |
879 | if (pm_power_off_prepare) | 880 | if (pm_power_off_prepare) |
880 | pm_power_off_prepare(); | 881 | pm_power_off_prepare(); |
882 | disable_nonboot_cpus(); | ||
881 | sysdev_shutdown(); | 883 | sysdev_shutdown(); |
882 | printk(KERN_EMERG "Power down.\n"); | 884 | printk(KERN_EMERG "Power down.\n"); |
883 | machine_power_off(); | 885 | machine_power_off(); |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 6ace893c17c9..53a456ebf6d5 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -303,6 +303,14 @@ static ctl_table kern_table[] = { | |||
303 | .proc_handler = &proc_dointvec, | 303 | .proc_handler = &proc_dointvec, |
304 | }, | 304 | }, |
305 | #endif | 305 | #endif |
306 | { | ||
307 | .ctl_name = CTL_UNNUMBERED, | ||
308 | .procname = "sched_compat_yield", | ||
309 | .data = &sysctl_sched_compat_yield, | ||
310 | .maxlen = sizeof(unsigned int), | ||
311 | .mode = 0644, | ||
312 | .proc_handler = &proc_dointvec, | ||
313 | }, | ||
306 | #ifdef CONFIG_PROVE_LOCKING | 314 | #ifdef CONFIG_PROVE_LOCKING |
307 | { | 315 | { |
308 | .ctl_name = CTL_UNNUMBERED, | 316 | .ctl_name = CTL_UNNUMBERED, |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index aab881c86a1a..0962e0577660 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -382,23 +382,8 @@ static int tick_broadcast_set_event(ktime_t expires, int force) | |||
382 | 382 | ||
383 | int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | 383 | int tick_resume_broadcast_oneshot(struct clock_event_device *bc) |
384 | { | 384 | { |
385 | int cpu = smp_processor_id(); | ||
386 | |||
387 | /* | ||
388 | * If the CPU is marked for broadcast, enforce oneshot | ||
389 | * broadcast mode. The jinxed VAIO does not resume otherwise. | ||
390 | * No idea why it ends up in a lower C State during resume | ||
391 | * without notifying the clock events layer. | ||
392 | */ | ||
393 | if (cpu_isset(cpu, tick_broadcast_mask)) | ||
394 | cpu_set(cpu, tick_broadcast_oneshot_mask); | ||
395 | |||
396 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | 385 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); |
397 | 386 | return 0; | |
398 | if(!cpus_empty(tick_broadcast_oneshot_mask)) | ||
399 | tick_broadcast_set_event(ktime_get(), 1); | ||
400 | |||
401 | return cpu_isset(cpu, tick_broadcast_oneshot_mask); | ||
402 | } | 387 | } |
403 | 388 | ||
404 | /* | 389 | /* |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 51e2fd0d851c..39fc6ae173b9 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -284,7 +284,7 @@ config LOCKDEP | |||
284 | select KALLSYMS_ALL | 284 | select KALLSYMS_ALL |
285 | 285 | ||
286 | config LOCK_STAT | 286 | config LOCK_STAT |
287 | bool "Lock usage statisitics" | 287 | bool "Lock usage statistics" |
288 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 288 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
289 | select LOCKDEP | 289 | select LOCKDEP |
290 | select DEBUG_SPINLOCK | 290 | select DEBUG_SPINLOCK |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 84c795ee2d65..eab8c428cc93 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -42,7 +42,7 @@ static void clear_huge_page(struct page *page, unsigned long addr) | |||
42 | might_sleep(); | 42 | might_sleep(); |
43 | for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { | 43 | for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { |
44 | cond_resched(); | 44 | cond_resched(); |
45 | clear_user_highpage(page + i, addr); | 45 | clear_user_highpage(page + i, addr + i * PAGE_SIZE); |
46 | } | 46 | } |
47 | } | 47 | } |
48 | 48 | ||
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index f2de2e48b021..6284c99b456e 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c | |||
@@ -366,6 +366,12 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
366 | frag = WLAN_GET_SEQ_FRAG(sc); | 366 | frag = WLAN_GET_SEQ_FRAG(sc); |
367 | hdrlen = ieee80211_get_hdrlen(fc); | 367 | hdrlen = ieee80211_get_hdrlen(fc); |
368 | 368 | ||
369 | if (skb->len < hdrlen) { | ||
370 | printk(KERN_INFO "%s: invalid SKB length %d\n", | ||
371 | dev->name, skb->len); | ||
372 | goto rx_dropped; | ||
373 | } | ||
374 | |||
369 | /* Put this code here so that we avoid duplicating it in all | 375 | /* Put this code here so that we avoid duplicating it in all |
370 | * Rx paths. - Jean II */ | 376 | * Rx paths. - Jean II */ |
371 | #ifdef CONFIG_WIRELESS_EXT | 377 | #ifdef CONFIG_WIRELESS_EXT |
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c index afb6c6698b27..e475f2e1be13 100644 --- a/net/ieee80211/softmac/ieee80211softmac_assoc.c +++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c | |||
@@ -273,8 +273,6 @@ ieee80211softmac_assoc_work(struct work_struct *work) | |||
273 | ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL); | 273 | ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL); |
274 | if (ieee80211softmac_start_scan(mac)) { | 274 | if (ieee80211softmac_start_scan(mac)) { |
275 | dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n"); | 275 | dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n"); |
276 | mac->associnfo.associating = 0; | ||
277 | mac->associnfo.associated = 0; | ||
278 | } | 276 | } |
279 | goto out; | 277 | goto out; |
280 | } else { | 278 | } else { |
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c index d054e9224b3e..442b9875f3fb 100644 --- a/net/ieee80211/softmac/ieee80211softmac_wx.c +++ b/net/ieee80211/softmac/ieee80211softmac_wx.c | |||
@@ -70,44 +70,30 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev, | |||
70 | char *extra) | 70 | char *extra) |
71 | { | 71 | { |
72 | struct ieee80211softmac_device *sm = ieee80211_priv(net_dev); | 72 | struct ieee80211softmac_device *sm = ieee80211_priv(net_dev); |
73 | struct ieee80211softmac_network *n; | ||
74 | struct ieee80211softmac_auth_queue_item *authptr; | 73 | struct ieee80211softmac_auth_queue_item *authptr; |
75 | int length = 0; | 74 | int length = 0; |
76 | 75 | ||
77 | check_assoc_again: | 76 | check_assoc_again: |
78 | mutex_lock(&sm->associnfo.mutex); | 77 | mutex_lock(&sm->associnfo.mutex); |
79 | /* Check if we're already associating to this or another network | ||
80 | * If it's another network, cancel and start over with our new network | ||
81 | * If it's our network, ignore the change, we're already doing it! | ||
82 | */ | ||
83 | if((sm->associnfo.associating || sm->associnfo.associated) && | 78 | if((sm->associnfo.associating || sm->associnfo.associated) && |
84 | (data->essid.flags && data->essid.length)) { | 79 | (data->essid.flags && data->essid.length)) { |
85 | /* Get the associating network */ | 80 | dprintk(KERN_INFO PFX "Canceling existing associate request!\n"); |
86 | n = ieee80211softmac_get_network_by_bssid(sm, sm->associnfo.bssid); | 81 | /* Cancel assoc work */ |
87 | if(n && n->essid.len == data->essid.length && | 82 | cancel_delayed_work(&sm->associnfo.work); |
88 | !memcmp(n->essid.data, extra, n->essid.len)) { | 83 | /* We don't have to do this, but it's a little cleaner */ |
89 | dprintk(KERN_INFO PFX "Already associating or associated to "MAC_FMT"\n", | 84 | list_for_each_entry(authptr, &sm->auth_queue, list) |
90 | MAC_ARG(sm->associnfo.bssid)); | 85 | cancel_delayed_work(&authptr->work); |
91 | goto out; | 86 | sm->associnfo.bssvalid = 0; |
92 | } else { | 87 | sm->associnfo.bssfixed = 0; |
93 | dprintk(KERN_INFO PFX "Canceling existing associate request!\n"); | 88 | sm->associnfo.associating = 0; |
94 | /* Cancel assoc work */ | 89 | sm->associnfo.associated = 0; |
95 | cancel_delayed_work(&sm->associnfo.work); | 90 | /* We must unlock to avoid deadlocks with the assoc workqueue |
96 | /* We don't have to do this, but it's a little cleaner */ | 91 | * on the associnfo.mutex */ |
97 | list_for_each_entry(authptr, &sm->auth_queue, list) | 92 | mutex_unlock(&sm->associnfo.mutex); |
98 | cancel_delayed_work(&authptr->work); | 93 | flush_scheduled_work(); |
99 | sm->associnfo.bssvalid = 0; | 94 | /* Avoid race! Check assoc status again. Maybe someone started an |
100 | sm->associnfo.bssfixed = 0; | 95 | * association while we flushed. */ |
101 | sm->associnfo.associating = 0; | 96 | goto check_assoc_again; |
102 | sm->associnfo.associated = 0; | ||
103 | /* We must unlock to avoid deadlocks with the assoc workqueue | ||
104 | * on the associnfo.mutex */ | ||
105 | mutex_unlock(&sm->associnfo.mutex); | ||
106 | flush_scheduled_work(); | ||
107 | /* Avoid race! Check assoc status again. Maybe someone started an | ||
108 | * association while we flushed. */ | ||
109 | goto check_assoc_again; | ||
110 | } | ||
111 | } | 97 | } |
112 | 98 | ||
113 | sm->associnfo.static_essid = 0; | 99 | sm->associnfo.static_essid = 0; |
@@ -153,13 +139,13 @@ ieee80211softmac_wx_get_essid(struct net_device *net_dev, | |||
153 | data->essid.length = sm->associnfo.req_essid.len; | 139 | data->essid.length = sm->associnfo.req_essid.len; |
154 | data->essid.flags = 1; /* active */ | 140 | data->essid.flags = 1; /* active */ |
155 | memcpy(extra, sm->associnfo.req_essid.data, sm->associnfo.req_essid.len); | 141 | memcpy(extra, sm->associnfo.req_essid.data, sm->associnfo.req_essid.len); |
156 | } | 142 | dprintk(KERN_INFO PFX "Getting essid from req_essid\n"); |
157 | 143 | } else if (sm->associnfo.associated || sm->associnfo.associating) { | |
158 | /* If we're associating/associated, return that */ | 144 | /* If we're associating/associated, return that */ |
159 | if (sm->associnfo.associated || sm->associnfo.associating) { | ||
160 | data->essid.length = sm->associnfo.associate_essid.len; | 145 | data->essid.length = sm->associnfo.associate_essid.len; |
161 | data->essid.flags = 1; /* active */ | 146 | data->essid.flags = 1; /* active */ |
162 | memcpy(extra, sm->associnfo.associate_essid.data, sm->associnfo.associate_essid.len); | 147 | memcpy(extra, sm->associnfo.associate_essid.data, sm->associnfo.associate_essid.len); |
148 | dprintk(KERN_INFO PFX "Getting essid from associate_essid\n"); | ||
163 | } | 149 | } |
164 | mutex_unlock(&sm->associnfo.mutex); | 150 | mutex_unlock(&sm->associnfo.mutex); |
165 | 151 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 9c94627c8c7e..e089a978e128 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -833,8 +833,7 @@ static struct tcp_md5sig_key * | |||
833 | return NULL; | 833 | return NULL; |
834 | for (i = 0; i < tp->md5sig_info->entries4; i++) { | 834 | for (i = 0; i < tp->md5sig_info->entries4; i++) { |
835 | if (tp->md5sig_info->keys4[i].addr == addr) | 835 | if (tp->md5sig_info->keys4[i].addr == addr) |
836 | return (struct tcp_md5sig_key *) | 836 | return &tp->md5sig_info->keys4[i].base; |
837 | &tp->md5sig_info->keys4[i]; | ||
838 | } | 837 | } |
839 | return NULL; | 838 | return NULL; |
840 | } | 839 | } |
@@ -865,9 +864,9 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, | |||
865 | key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr); | 864 | key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr); |
866 | if (key) { | 865 | if (key) { |
867 | /* Pre-existing entry - just update that one. */ | 866 | /* Pre-existing entry - just update that one. */ |
868 | kfree(key->key); | 867 | kfree(key->base.key); |
869 | key->key = newkey; | 868 | key->base.key = newkey; |
870 | key->keylen = newkeylen; | 869 | key->base.keylen = newkeylen; |
871 | } else { | 870 | } else { |
872 | struct tcp_md5sig_info *md5sig; | 871 | struct tcp_md5sig_info *md5sig; |
873 | 872 | ||
@@ -906,9 +905,9 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, | |||
906 | md5sig->alloced4++; | 905 | md5sig->alloced4++; |
907 | } | 906 | } |
908 | md5sig->entries4++; | 907 | md5sig->entries4++; |
909 | md5sig->keys4[md5sig->entries4 - 1].addr = addr; | 908 | md5sig->keys4[md5sig->entries4 - 1].addr = addr; |
910 | md5sig->keys4[md5sig->entries4 - 1].key = newkey; | 909 | md5sig->keys4[md5sig->entries4 - 1].base.key = newkey; |
911 | md5sig->keys4[md5sig->entries4 - 1].keylen = newkeylen; | 910 | md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen; |
912 | } | 911 | } |
913 | return 0; | 912 | return 0; |
914 | } | 913 | } |
@@ -930,7 +929,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) | |||
930 | for (i = 0; i < tp->md5sig_info->entries4; i++) { | 929 | for (i = 0; i < tp->md5sig_info->entries4; i++) { |
931 | if (tp->md5sig_info->keys4[i].addr == addr) { | 930 | if (tp->md5sig_info->keys4[i].addr == addr) { |
932 | /* Free the key */ | 931 | /* Free the key */ |
933 | kfree(tp->md5sig_info->keys4[i].key); | 932 | kfree(tp->md5sig_info->keys4[i].base.key); |
934 | tp->md5sig_info->entries4--; | 933 | tp->md5sig_info->entries4--; |
935 | 934 | ||
936 | if (tp->md5sig_info->entries4 == 0) { | 935 | if (tp->md5sig_info->entries4 == 0) { |
@@ -964,7 +963,7 @@ static void tcp_v4_clear_md5_list(struct sock *sk) | |||
964 | if (tp->md5sig_info->entries4) { | 963 | if (tp->md5sig_info->entries4) { |
965 | int i; | 964 | int i; |
966 | for (i = 0; i < tp->md5sig_info->entries4; i++) | 965 | for (i = 0; i < tp->md5sig_info->entries4; i++) |
967 | kfree(tp->md5sig_info->keys4[i].key); | 966 | kfree(tp->md5sig_info->keys4[i].base.key); |
968 | tp->md5sig_info->entries4 = 0; | 967 | tp->md5sig_info->entries4 = 0; |
969 | tcp_free_md5sig_pool(); | 968 | tcp_free_md5sig_pool(); |
970 | } | 969 | } |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 0f7defb482e9..3e06799b37a6 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -539,7 +539,7 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, | |||
539 | 539 | ||
540 | for (i = 0; i < tp->md5sig_info->entries6; i++) { | 540 | for (i = 0; i < tp->md5sig_info->entries6; i++) { |
541 | if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0) | 541 | if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0) |
542 | return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i]; | 542 | return &tp->md5sig_info->keys6[i].base; |
543 | } | 543 | } |
544 | return NULL; | 544 | return NULL; |
545 | } | 545 | } |
@@ -567,9 +567,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, | |||
567 | key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer); | 567 | key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer); |
568 | if (key) { | 568 | if (key) { |
569 | /* modify existing entry - just update that one */ | 569 | /* modify existing entry - just update that one */ |
570 | kfree(key->key); | 570 | kfree(key->base.key); |
571 | key->key = newkey; | 571 | key->base.key = newkey; |
572 | key->keylen = newkeylen; | 572 | key->base.keylen = newkeylen; |
573 | } else { | 573 | } else { |
574 | /* reallocate new list if current one is full. */ | 574 | /* reallocate new list if current one is full. */ |
575 | if (!tp->md5sig_info) { | 575 | if (!tp->md5sig_info) { |
@@ -603,8 +603,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, | |||
603 | 603 | ||
604 | ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr, | 604 | ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr, |
605 | peer); | 605 | peer); |
606 | tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey; | 606 | tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey; |
607 | tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen; | 607 | tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen; |
608 | 608 | ||
609 | tp->md5sig_info->entries6++; | 609 | tp->md5sig_info->entries6++; |
610 | } | 610 | } |
@@ -626,7 +626,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer) | |||
626 | for (i = 0; i < tp->md5sig_info->entries6; i++) { | 626 | for (i = 0; i < tp->md5sig_info->entries6; i++) { |
627 | if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) { | 627 | if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) { |
628 | /* Free the key */ | 628 | /* Free the key */ |
629 | kfree(tp->md5sig_info->keys6[i].key); | 629 | kfree(tp->md5sig_info->keys6[i].base.key); |
630 | tp->md5sig_info->entries6--; | 630 | tp->md5sig_info->entries6--; |
631 | 631 | ||
632 | if (tp->md5sig_info->entries6 == 0) { | 632 | if (tp->md5sig_info->entries6 == 0) { |
@@ -657,7 +657,7 @@ static void tcp_v6_clear_md5_list (struct sock *sk) | |||
657 | 657 | ||
658 | if (tp->md5sig_info->entries6) { | 658 | if (tp->md5sig_info->entries6) { |
659 | for (i = 0; i < tp->md5sig_info->entries6; i++) | 659 | for (i = 0; i < tp->md5sig_info->entries6; i++) |
660 | kfree(tp->md5sig_info->keys6[i].key); | 660 | kfree(tp->md5sig_info->keys6[i].base.key); |
661 | tp->md5sig_info->entries6 = 0; | 661 | tp->md5sig_info->entries6 = 0; |
662 | tcp_free_md5sig_pool(); | 662 | tcp_free_md5sig_pool(); |
663 | } | 663 | } |
@@ -668,7 +668,7 @@ static void tcp_v6_clear_md5_list (struct sock *sk) | |||
668 | 668 | ||
669 | if (tp->md5sig_info->entries4) { | 669 | if (tp->md5sig_info->entries4) { |
670 | for (i = 0; i < tp->md5sig_info->entries4; i++) | 670 | for (i = 0; i < tp->md5sig_info->entries4; i++) |
671 | kfree(tp->md5sig_info->keys4[i].key); | 671 | kfree(tp->md5sig_info->keys4[i].base.key); |
672 | tp->md5sig_info->entries4 = 0; | 672 | tp->md5sig_info->entries4 = 0; |
673 | tcp_free_md5sig_pool(); | 673 | tcp_free_md5sig_pool(); |
674 | } | 674 | } |
diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c index 7286c389a4d0..ff2172ffd861 100644 --- a/net/mac80211/ieee80211.c +++ b/net/mac80211/ieee80211.c | |||
@@ -5259,7 +5259,7 @@ static void __exit ieee80211_exit(void) | |||
5259 | } | 5259 | } |
5260 | 5260 | ||
5261 | 5261 | ||
5262 | module_init(ieee80211_init); | 5262 | subsys_initcall(ieee80211_init); |
5263 | module_exit(ieee80211_exit); | 5263 | module_exit(ieee80211_exit); |
5264 | 5264 | ||
5265 | MODULE_DESCRIPTION("IEEE 802.11 subsystem"); | 5265 | MODULE_DESCRIPTION("IEEE 802.11 subsystem"); |
diff --git a/net/mac80211/rc80211_simple.c b/net/mac80211/rc80211_simple.c index f6780d63b342..17b9f46bbf2b 100644 --- a/net/mac80211/rc80211_simple.c +++ b/net/mac80211/rc80211_simple.c | |||
@@ -431,7 +431,7 @@ static void __exit rate_control_simple_exit(void) | |||
431 | } | 431 | } |
432 | 432 | ||
433 | 433 | ||
434 | module_init(rate_control_simple_init); | 434 | subsys_initcall(rate_control_simple_init); |
435 | module_exit(rate_control_simple_exit); | 435 | module_exit(rate_control_simple_exit); |
436 | 436 | ||
437 | MODULE_DESCRIPTION("Simple rate control algorithm for ieee80211"); | 437 | MODULE_DESCRIPTION("Simple rate control algorithm for ieee80211"); |
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 89ce81529694..7ab82b376e1b 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c | |||
@@ -424,7 +424,7 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct rtattr *opt) | |||
424 | skb_queue_head_init(&q->requeued[i]); | 424 | skb_queue_head_init(&q->requeued[i]); |
425 | q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops, | 425 | q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops, |
426 | qd->handle); | 426 | qd->handle); |
427 | if (q->queues[i] == 0) { | 427 | if (!q->queues[i]) { |
428 | q->queues[i] = &noop_qdisc; | 428 | q->queues[i] = &noop_qdisc; |
429 | printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i); | 429 | printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i); |
430 | } | 430 | } |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index e185a5b55913..2351533a8507 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -58,7 +58,6 @@ struct nfulnl_instance { | |||
58 | 58 | ||
59 | unsigned int qlen; /* number of nlmsgs in skb */ | 59 | unsigned int qlen; /* number of nlmsgs in skb */ |
60 | struct sk_buff *skb; /* pre-allocatd skb */ | 60 | struct sk_buff *skb; /* pre-allocatd skb */ |
61 | struct nlmsghdr *lastnlh; /* netlink header of last msg in skb */ | ||
62 | struct timer_list timer; | 61 | struct timer_list timer; |
63 | int peer_pid; /* PID of the peer process */ | 62 | int peer_pid; /* PID of the peer process */ |
64 | 63 | ||
@@ -345,10 +344,12 @@ static struct sk_buff *nfulnl_alloc_skb(unsigned int inst_size, | |||
345 | static int | 344 | static int |
346 | __nfulnl_send(struct nfulnl_instance *inst) | 345 | __nfulnl_send(struct nfulnl_instance *inst) |
347 | { | 346 | { |
348 | int status; | 347 | int status = -1; |
349 | 348 | ||
350 | if (inst->qlen > 1) | 349 | if (inst->qlen > 1) |
351 | inst->lastnlh->nlmsg_type = NLMSG_DONE; | 350 | NLMSG_PUT(inst->skb, 0, 0, |
351 | NLMSG_DONE, | ||
352 | sizeof(struct nfgenmsg)); | ||
352 | 353 | ||
353 | status = nfnetlink_unicast(inst->skb, inst->peer_pid, MSG_DONTWAIT); | 354 | status = nfnetlink_unicast(inst->skb, inst->peer_pid, MSG_DONTWAIT); |
354 | if (status < 0) { | 355 | if (status < 0) { |
@@ -358,8 +359,8 @@ __nfulnl_send(struct nfulnl_instance *inst) | |||
358 | 359 | ||
359 | inst->qlen = 0; | 360 | inst->qlen = 0; |
360 | inst->skb = NULL; | 361 | inst->skb = NULL; |
361 | inst->lastnlh = NULL; | ||
362 | 362 | ||
363 | nlmsg_failure: | ||
363 | return status; | 364 | return status; |
364 | } | 365 | } |
365 | 366 | ||
@@ -538,7 +539,6 @@ __build_packet_message(struct nfulnl_instance *inst, | |||
538 | } | 539 | } |
539 | 540 | ||
540 | nlh->nlmsg_len = inst->skb->tail - old_tail; | 541 | nlh->nlmsg_len = inst->skb->tail - old_tail; |
541 | inst->lastnlh = nlh; | ||
542 | return 0; | 542 | return 0; |
543 | 543 | ||
544 | nlmsg_failure: | 544 | nlmsg_failure: |
@@ -644,7 +644,8 @@ nfulnl_log_packet(unsigned int pf, | |||
644 | } | 644 | } |
645 | 645 | ||
646 | if (inst->qlen >= qthreshold || | 646 | if (inst->qlen >= qthreshold || |
647 | (inst->skb && size > skb_tailroom(inst->skb))) { | 647 | (inst->skb && size > |
648 | skb_tailroom(inst->skb) - sizeof(struct nfgenmsg))) { | ||
648 | /* either the queue len is too high or we don't have | 649 | /* either the queue len is too high or we don't have |
649 | * enough room in the skb left. flush to userspace. */ | 650 | * enough room in the skb left. flush to userspace. */ |
650 | UDEBUG("flushing old skb\n"); | 651 | UDEBUG("flushing old skb\n"); |
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 957957309859..b542c875e154 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/ipv6.h> | 20 | #include <linux/ipv6.h> |
21 | #include <linux/skbuff.h> | 21 | #include <linux/skbuff.h> |
22 | #include <linux/jhash.h> | ||
22 | #include <net/ip.h> | 23 | #include <net/ip.h> |
23 | #include <net/netlink.h> | 24 | #include <net/netlink.h> |
24 | #include <net/pkt_sched.h> | 25 | #include <net/pkt_sched.h> |
@@ -95,7 +96,7 @@ struct sfq_sched_data | |||
95 | 96 | ||
96 | /* Variables */ | 97 | /* Variables */ |
97 | struct timer_list perturb_timer; | 98 | struct timer_list perturb_timer; |
98 | int perturbation; | 99 | u32 perturbation; |
99 | sfq_index tail; /* Index of current slot in round */ | 100 | sfq_index tail; /* Index of current slot in round */ |
100 | sfq_index max_depth; /* Maximal depth */ | 101 | sfq_index max_depth; /* Maximal depth */ |
101 | 102 | ||
@@ -109,12 +110,7 @@ struct sfq_sched_data | |||
109 | 110 | ||
110 | static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) | 111 | static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) |
111 | { | 112 | { |
112 | int pert = q->perturbation; | 113 | return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1); |
113 | |||
114 | /* Have we any rotation primitives? If not, WHY? */ | ||
115 | h ^= (h1<<pert) ^ (h1>>(0x1F - pert)); | ||
116 | h ^= h>>10; | ||
117 | return h & 0x3FF; | ||
118 | } | 114 | } |
119 | 115 | ||
120 | static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) | 116 | static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) |
@@ -256,6 +252,13 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
256 | q->ht[hash] = x = q->dep[SFQ_DEPTH].next; | 252 | q->ht[hash] = x = q->dep[SFQ_DEPTH].next; |
257 | q->hash[x] = hash; | 253 | q->hash[x] = hash; |
258 | } | 254 | } |
255 | /* If selected queue has length q->limit, this means that | ||
256 | * all another queues are empty and that we do simple tail drop, | ||
257 | * i.e. drop _this_ packet. | ||
258 | */ | ||
259 | if (q->qs[x].qlen >= q->limit) | ||
260 | return qdisc_drop(skb, sch); | ||
261 | |||
259 | sch->qstats.backlog += skb->len; | 262 | sch->qstats.backlog += skb->len; |
260 | __skb_queue_tail(&q->qs[x], skb); | 263 | __skb_queue_tail(&q->qs[x], skb); |
261 | sfq_inc(q, x); | 264 | sfq_inc(q, x); |
@@ -270,7 +273,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
270 | q->tail = x; | 273 | q->tail = x; |
271 | } | 274 | } |
272 | } | 275 | } |
273 | if (++sch->q.qlen < q->limit-1) { | 276 | if (++sch->q.qlen <= q->limit) { |
274 | sch->bstats.bytes += skb->len; | 277 | sch->bstats.bytes += skb->len; |
275 | sch->bstats.packets++; | 278 | sch->bstats.packets++; |
276 | return 0; | 279 | return 0; |
@@ -294,6 +297,19 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc* sch) | |||
294 | } | 297 | } |
295 | sch->qstats.backlog += skb->len; | 298 | sch->qstats.backlog += skb->len; |
296 | __skb_queue_head(&q->qs[x], skb); | 299 | __skb_queue_head(&q->qs[x], skb); |
300 | /* If selected queue has length q->limit+1, this means that | ||
301 | * all another queues are empty and we do simple tail drop. | ||
302 | * This packet is still requeued at head of queue, tail packet | ||
303 | * is dropped. | ||
304 | */ | ||
305 | if (q->qs[x].qlen > q->limit) { | ||
306 | skb = q->qs[x].prev; | ||
307 | __skb_unlink(skb, &q->qs[x]); | ||
308 | sch->qstats.drops++; | ||
309 | sch->qstats.backlog -= skb->len; | ||
310 | kfree_skb(skb); | ||
311 | return NET_XMIT_CN; | ||
312 | } | ||
297 | sfq_inc(q, x); | 313 | sfq_inc(q, x); |
298 | if (q->qs[x].qlen == 1) { /* The flow is new */ | 314 | if (q->qs[x].qlen == 1) { /* The flow is new */ |
299 | if (q->tail == SFQ_DEPTH) { /* It is the first flow */ | 315 | if (q->tail == SFQ_DEPTH) { /* It is the first flow */ |
@@ -306,7 +322,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc* sch) | |||
306 | q->tail = x; | 322 | q->tail = x; |
307 | } | 323 | } |
308 | } | 324 | } |
309 | if (++sch->q.qlen < q->limit - 1) { | 325 | if (++sch->q.qlen <= q->limit) { |
310 | sch->qstats.requeues++; | 326 | sch->qstats.requeues++; |
311 | return 0; | 327 | return 0; |
312 | } | 328 | } |
@@ -370,12 +386,10 @@ static void sfq_perturbation(unsigned long arg) | |||
370 | struct Qdisc *sch = (struct Qdisc*)arg; | 386 | struct Qdisc *sch = (struct Qdisc*)arg; |
371 | struct sfq_sched_data *q = qdisc_priv(sch); | 387 | struct sfq_sched_data *q = qdisc_priv(sch); |
372 | 388 | ||
373 | q->perturbation = net_random()&0x1F; | 389 | get_random_bytes(&q->perturbation, 4); |
374 | 390 | ||
375 | if (q->perturb_period) { | 391 | if (q->perturb_period) |
376 | q->perturb_timer.expires = jiffies + q->perturb_period; | 392 | mod_timer(&q->perturb_timer, jiffies + q->perturb_period); |
377 | add_timer(&q->perturb_timer); | ||
378 | } | ||
379 | } | 393 | } |
380 | 394 | ||
381 | static int sfq_change(struct Qdisc *sch, struct rtattr *opt) | 395 | static int sfq_change(struct Qdisc *sch, struct rtattr *opt) |
@@ -391,17 +405,17 @@ static int sfq_change(struct Qdisc *sch, struct rtattr *opt) | |||
391 | q->quantum = ctl->quantum ? : psched_mtu(sch->dev); | 405 | q->quantum = ctl->quantum ? : psched_mtu(sch->dev); |
392 | q->perturb_period = ctl->perturb_period*HZ; | 406 | q->perturb_period = ctl->perturb_period*HZ; |
393 | if (ctl->limit) | 407 | if (ctl->limit) |
394 | q->limit = min_t(u32, ctl->limit, SFQ_DEPTH); | 408 | q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1); |
395 | 409 | ||
396 | qlen = sch->q.qlen; | 410 | qlen = sch->q.qlen; |
397 | while (sch->q.qlen >= q->limit-1) | 411 | while (sch->q.qlen > q->limit) |
398 | sfq_drop(sch); | 412 | sfq_drop(sch); |
399 | qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); | 413 | qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); |
400 | 414 | ||
401 | del_timer(&q->perturb_timer); | 415 | del_timer(&q->perturb_timer); |
402 | if (q->perturb_period) { | 416 | if (q->perturb_period) { |
403 | q->perturb_timer.expires = jiffies + q->perturb_period; | 417 | mod_timer(&q->perturb_timer, jiffies + q->perturb_period); |
404 | add_timer(&q->perturb_timer); | 418 | get_random_bytes(&q->perturbation, 4); |
405 | } | 419 | } |
406 | sch_tree_unlock(sch); | 420 | sch_tree_unlock(sch); |
407 | return 0; | 421 | return 0; |
@@ -423,12 +437,13 @@ static int sfq_init(struct Qdisc *sch, struct rtattr *opt) | |||
423 | q->dep[i+SFQ_DEPTH].next = i+SFQ_DEPTH; | 437 | q->dep[i+SFQ_DEPTH].next = i+SFQ_DEPTH; |
424 | q->dep[i+SFQ_DEPTH].prev = i+SFQ_DEPTH; | 438 | q->dep[i+SFQ_DEPTH].prev = i+SFQ_DEPTH; |
425 | } | 439 | } |
426 | q->limit = SFQ_DEPTH; | 440 | q->limit = SFQ_DEPTH - 1; |
427 | q->max_depth = 0; | 441 | q->max_depth = 0; |
428 | q->tail = SFQ_DEPTH; | 442 | q->tail = SFQ_DEPTH; |
429 | if (opt == NULL) { | 443 | if (opt == NULL) { |
430 | q->quantum = psched_mtu(sch->dev); | 444 | q->quantum = psched_mtu(sch->dev); |
431 | q->perturb_period = 0; | 445 | q->perturb_period = 0; |
446 | get_random_bytes(&q->perturbation, 4); | ||
432 | } else { | 447 | } else { |
433 | int err = sfq_change(sch, opt); | 448 | int err = sfq_change(sch, opt); |
434 | if (err) | 449 | if (err) |
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index d35cbf5aae33..dfffa94fb9f6 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c | |||
@@ -181,7 +181,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, | |||
181 | * structure. | 181 | * structure. |
182 | */ | 182 | */ |
183 | int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr, | 183 | int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr, |
184 | void (*rcu_call)(struct rcu_head *head, | 184 | void fastcall (*rcu_call)(struct rcu_head *head, |
185 | void (*func)(struct rcu_head *head))) | 185 | void (*func)(struct rcu_head *head))) |
186 | { | 186 | { |
187 | struct sctp_sockaddr_entry *addr, *temp; | 187 | struct sctp_sockaddr_entry *addr, *temp; |
diff --git a/net/sctp/input.c b/net/sctp/input.c index 47e56017f4ce..f9a0c9276e3b 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -622,6 +622,14 @@ static int sctp_rcv_ootb(struct sk_buff *skb) | |||
622 | if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type) | 622 | if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type) |
623 | goto discard; | 623 | goto discard; |
624 | 624 | ||
625 | /* RFC 4460, 2.11.2 | ||
626 | * This will discard packets with INIT chunk bundled as | ||
627 | * subsequent chunks in the packet. When INIT is first, | ||
628 | * the normal INIT processing will discard the chunk. | ||
629 | */ | ||
630 | if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data) | ||
631 | goto discard; | ||
632 | |||
625 | /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR | 633 | /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR |
626 | * or a COOKIE ACK the SCTP Packet should be silently | 634 | * or a COOKIE ACK the SCTP Packet should be silently |
627 | * discarded. | 635 | * discarded. |
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index 88aa22407549..e4ea7fdf36ed 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c | |||
@@ -130,6 +130,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) | |||
130 | /* Force chunk->skb->data to chunk->chunk_end. */ | 130 | /* Force chunk->skb->data to chunk->chunk_end. */ |
131 | skb_pull(chunk->skb, | 131 | skb_pull(chunk->skb, |
132 | chunk->chunk_end - chunk->skb->data); | 132 | chunk->chunk_end - chunk->skb->data); |
133 | |||
134 | /* Verify that we have at least chunk headers | ||
135 | * worth of buffer left. | ||
136 | */ | ||
137 | if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) { | ||
138 | sctp_chunk_free(chunk); | ||
139 | chunk = queue->in_progress = NULL; | ||
140 | } | ||
133 | } | 141 | } |
134 | } | 142 | } |
135 | 143 | ||
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 2e34220d94cd..23ae37ec8711 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -2499,6 +2499,52 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, | |||
2499 | return SCTP_ERROR_NO_ERROR; | 2499 | return SCTP_ERROR_NO_ERROR; |
2500 | } | 2500 | } |
2501 | 2501 | ||
2502 | /* Verify the ASCONF packet before we process it. */ | ||
2503 | int sctp_verify_asconf(const struct sctp_association *asoc, | ||
2504 | struct sctp_paramhdr *param_hdr, void *chunk_end, | ||
2505 | struct sctp_paramhdr **errp) { | ||
2506 | sctp_addip_param_t *asconf_param; | ||
2507 | union sctp_params param; | ||
2508 | int length, plen; | ||
2509 | |||
2510 | param.v = (sctp_paramhdr_t *) param_hdr; | ||
2511 | while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) { | ||
2512 | length = ntohs(param.p->length); | ||
2513 | *errp = param.p; | ||
2514 | |||
2515 | if (param.v > chunk_end - length || | ||
2516 | length < sizeof(sctp_paramhdr_t)) | ||
2517 | return 0; | ||
2518 | |||
2519 | switch (param.p->type) { | ||
2520 | case SCTP_PARAM_ADD_IP: | ||
2521 | case SCTP_PARAM_DEL_IP: | ||
2522 | case SCTP_PARAM_SET_PRIMARY: | ||
2523 | asconf_param = (sctp_addip_param_t *)param.v; | ||
2524 | plen = ntohs(asconf_param->param_hdr.length); | ||
2525 | if (plen < sizeof(sctp_addip_param_t) + | ||
2526 | sizeof(sctp_paramhdr_t)) | ||
2527 | return 0; | ||
2528 | break; | ||
2529 | case SCTP_PARAM_SUCCESS_REPORT: | ||
2530 | case SCTP_PARAM_ADAPTATION_LAYER_IND: | ||
2531 | if (length != sizeof(sctp_addip_param_t)) | ||
2532 | return 0; | ||
2533 | |||
2534 | break; | ||
2535 | default: | ||
2536 | break; | ||
2537 | } | ||
2538 | |||
2539 | param.v += WORD_ROUND(length); | ||
2540 | } | ||
2541 | |||
2542 | if (param.v != chunk_end) | ||
2543 | return 0; | ||
2544 | |||
2545 | return 1; | ||
2546 | } | ||
2547 | |||
2502 | /* Process an incoming ASCONF chunk with the next expected serial no. and | 2548 | /* Process an incoming ASCONF chunk with the next expected serial no. and |
2503 | * return an ASCONF_ACK chunk to be sent in response. | 2549 | * return an ASCONF_ACK chunk to be sent in response. |
2504 | */ | 2550 | */ |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 177528ed3e1b..a583d67cab63 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -90,6 +90,11 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep, | |||
90 | const sctp_subtype_t type, | 90 | const sctp_subtype_t type, |
91 | void *arg, | 91 | void *arg, |
92 | sctp_cmd_seq_t *commands); | 92 | sctp_cmd_seq_t *commands); |
93 | static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep, | ||
94 | const struct sctp_association *asoc, | ||
95 | const sctp_subtype_t type, | ||
96 | void *arg, | ||
97 | sctp_cmd_seq_t *commands); | ||
93 | static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); | 98 | static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); |
94 | 99 | ||
95 | static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, | 100 | static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, |
@@ -98,6 +103,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, | |||
98 | struct sctp_transport *transport); | 103 | struct sctp_transport *transport); |
99 | 104 | ||
100 | static sctp_disposition_t sctp_sf_abort_violation( | 105 | static sctp_disposition_t sctp_sf_abort_violation( |
106 | const struct sctp_endpoint *ep, | ||
101 | const struct sctp_association *asoc, | 107 | const struct sctp_association *asoc, |
102 | void *arg, | 108 | void *arg, |
103 | sctp_cmd_seq_t *commands, | 109 | sctp_cmd_seq_t *commands, |
@@ -111,6 +117,13 @@ static sctp_disposition_t sctp_sf_violation_chunklen( | |||
111 | void *arg, | 117 | void *arg, |
112 | sctp_cmd_seq_t *commands); | 118 | sctp_cmd_seq_t *commands); |
113 | 119 | ||
120 | static sctp_disposition_t sctp_sf_violation_paramlen( | ||
121 | const struct sctp_endpoint *ep, | ||
122 | const struct sctp_association *asoc, | ||
123 | const sctp_subtype_t type, | ||
124 | void *arg, | ||
125 | sctp_cmd_seq_t *commands); | ||
126 | |||
114 | static sctp_disposition_t sctp_sf_violation_ctsn( | 127 | static sctp_disposition_t sctp_sf_violation_ctsn( |
115 | const struct sctp_endpoint *ep, | 128 | const struct sctp_endpoint *ep, |
116 | const struct sctp_association *asoc, | 129 | const struct sctp_association *asoc, |
@@ -118,6 +131,13 @@ static sctp_disposition_t sctp_sf_violation_ctsn( | |||
118 | void *arg, | 131 | void *arg, |
119 | sctp_cmd_seq_t *commands); | 132 | sctp_cmd_seq_t *commands); |
120 | 133 | ||
134 | static sctp_disposition_t sctp_sf_violation_chunk( | ||
135 | const struct sctp_endpoint *ep, | ||
136 | const struct sctp_association *asoc, | ||
137 | const sctp_subtype_t type, | ||
138 | void *arg, | ||
139 | sctp_cmd_seq_t *commands); | ||
140 | |||
121 | /* Small helper function that checks if the chunk length | 141 | /* Small helper function that checks if the chunk length |
122 | * is of the appropriate length. The 'required_length' argument | 142 | * is of the appropriate length. The 'required_length' argument |
123 | * is set to be the size of a specific chunk we are testing. | 143 | * is set to be the size of a specific chunk we are testing. |
@@ -181,16 +201,21 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep, | |||
181 | struct sctp_chunk *chunk = arg; | 201 | struct sctp_chunk *chunk = arg; |
182 | struct sctp_ulpevent *ev; | 202 | struct sctp_ulpevent *ev; |
183 | 203 | ||
204 | if (!sctp_vtag_verify_either(chunk, asoc)) | ||
205 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
206 | |||
184 | /* RFC 2960 6.10 Bundling | 207 | /* RFC 2960 6.10 Bundling |
185 | * | 208 | * |
186 | * An endpoint MUST NOT bundle INIT, INIT ACK or | 209 | * An endpoint MUST NOT bundle INIT, INIT ACK or |
187 | * SHUTDOWN COMPLETE with any other chunks. | 210 | * SHUTDOWN COMPLETE with any other chunks. |
188 | */ | 211 | */ |
189 | if (!chunk->singleton) | 212 | if (!chunk->singleton) |
190 | return SCTP_DISPOSITION_VIOLATION; | 213 | return sctp_sf_violation_chunk(ep, asoc, type, arg, commands); |
191 | 214 | ||
192 | if (!sctp_vtag_verify_either(chunk, asoc)) | 215 | /* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */ |
193 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 216 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) |
217 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
218 | commands); | ||
194 | 219 | ||
195 | /* RFC 2960 10.2 SCTP-to-ULP | 220 | /* RFC 2960 10.2 SCTP-to-ULP |
196 | * | 221 | * |
@@ -450,17 +475,17 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, | |||
450 | if (!sctp_vtag_verify(chunk, asoc)) | 475 | if (!sctp_vtag_verify(chunk, asoc)) |
451 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 476 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
452 | 477 | ||
453 | /* Make sure that the INIT-ACK chunk has a valid length */ | ||
454 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t))) | ||
455 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
456 | commands); | ||
457 | /* 6.10 Bundling | 478 | /* 6.10 Bundling |
458 | * An endpoint MUST NOT bundle INIT, INIT ACK or | 479 | * An endpoint MUST NOT bundle INIT, INIT ACK or |
459 | * SHUTDOWN COMPLETE with any other chunks. | 480 | * SHUTDOWN COMPLETE with any other chunks. |
460 | */ | 481 | */ |
461 | if (!chunk->singleton) | 482 | if (!chunk->singleton) |
462 | return SCTP_DISPOSITION_VIOLATION; | 483 | return sctp_sf_violation_chunk(ep, asoc, type, arg, commands); |
463 | 484 | ||
485 | /* Make sure that the INIT-ACK chunk has a valid length */ | ||
486 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t))) | ||
487 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
488 | commands); | ||
464 | /* Grab the INIT header. */ | 489 | /* Grab the INIT header. */ |
465 | chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data; | 490 | chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data; |
466 | 491 | ||
@@ -585,7 +610,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, | |||
585 | * control endpoint, respond with an ABORT. | 610 | * control endpoint, respond with an ABORT. |
586 | */ | 611 | */ |
587 | if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) | 612 | if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) |
588 | return sctp_sf_ootb(ep, asoc, type, arg, commands); | 613 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); |
589 | 614 | ||
590 | /* Make sure that the COOKIE_ECHO chunk has a valid length. | 615 | /* Make sure that the COOKIE_ECHO chunk has a valid length. |
591 | * In this case, we check that we have enough for at least a | 616 | * In this case, we check that we have enough for at least a |
@@ -2496,6 +2521,11 @@ sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep, | |||
2496 | struct sctp_chunk *chunk = (struct sctp_chunk *) arg; | 2521 | struct sctp_chunk *chunk = (struct sctp_chunk *) arg; |
2497 | struct sctp_chunk *reply; | 2522 | struct sctp_chunk *reply; |
2498 | 2523 | ||
2524 | /* Make sure that the chunk has a valid length */ | ||
2525 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) | ||
2526 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
2527 | commands); | ||
2528 | |||
2499 | /* Since we are not going to really process this INIT, there | 2529 | /* Since we are not going to really process this INIT, there |
2500 | * is no point in verifying chunk boundries. Just generate | 2530 | * is no point in verifying chunk boundries. Just generate |
2501 | * the SHUTDOWN ACK. | 2531 | * the SHUTDOWN ACK. |
@@ -2929,7 +2959,7 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep, | |||
2929 | * | 2959 | * |
2930 | * The return value is the disposition of the chunk. | 2960 | * The return value is the disposition of the chunk. |
2931 | */ | 2961 | */ |
2932 | sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep, | 2962 | static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep, |
2933 | const struct sctp_association *asoc, | 2963 | const struct sctp_association *asoc, |
2934 | const sctp_subtype_t type, | 2964 | const sctp_subtype_t type, |
2935 | void *arg, | 2965 | void *arg, |
@@ -2965,6 +2995,7 @@ sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep, | |||
2965 | 2995 | ||
2966 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | 2996 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); |
2967 | 2997 | ||
2998 | sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
2968 | return SCTP_DISPOSITION_CONSUME; | 2999 | return SCTP_DISPOSITION_CONSUME; |
2969 | } | 3000 | } |
2970 | 3001 | ||
@@ -3125,14 +3156,14 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep, | |||
3125 | 3156 | ||
3126 | ch = (sctp_chunkhdr_t *) chunk->chunk_hdr; | 3157 | ch = (sctp_chunkhdr_t *) chunk->chunk_hdr; |
3127 | do { | 3158 | do { |
3128 | /* Break out if chunk length is less then minimal. */ | 3159 | /* Report violation if the chunk is less then minimal */ |
3129 | if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) | 3160 | if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) |
3130 | break; | 3161 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, |
3131 | 3162 | commands); | |
3132 | ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); | ||
3133 | if (ch_end > skb_tail_pointer(skb)) | ||
3134 | break; | ||
3135 | 3163 | ||
3164 | /* Now that we know we at least have a chunk header, | ||
3165 | * do things that are type appropriate. | ||
3166 | */ | ||
3136 | if (SCTP_CID_SHUTDOWN_ACK == ch->type) | 3167 | if (SCTP_CID_SHUTDOWN_ACK == ch->type) |
3137 | ootb_shut_ack = 1; | 3168 | ootb_shut_ack = 1; |
3138 | 3169 | ||
@@ -3144,15 +3175,19 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep, | |||
3144 | if (SCTP_CID_ABORT == ch->type) | 3175 | if (SCTP_CID_ABORT == ch->type) |
3145 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 3176 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
3146 | 3177 | ||
3178 | /* Report violation if chunk len overflows */ | ||
3179 | ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); | ||
3180 | if (ch_end > skb_tail_pointer(skb)) | ||
3181 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
3182 | commands); | ||
3183 | |||
3147 | ch = (sctp_chunkhdr_t *) ch_end; | 3184 | ch = (sctp_chunkhdr_t *) ch_end; |
3148 | } while (ch_end < skb_tail_pointer(skb)); | 3185 | } while (ch_end < skb_tail_pointer(skb)); |
3149 | 3186 | ||
3150 | if (ootb_shut_ack) | 3187 | if (ootb_shut_ack) |
3151 | sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands); | 3188 | return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands); |
3152 | else | 3189 | else |
3153 | sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); | 3190 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); |
3154 | |||
3155 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
3156 | } | 3191 | } |
3157 | 3192 | ||
3158 | /* | 3193 | /* |
@@ -3218,7 +3253,11 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep, | |||
3218 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) | 3253 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) |
3219 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 3254 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
3220 | 3255 | ||
3221 | return SCTP_DISPOSITION_CONSUME; | 3256 | /* We need to discard the rest of the packet to prevent |
3257 | * potential bomming attacks from additional bundled chunks. | ||
3258 | * This is documented in SCTP Threats ID. | ||
3259 | */ | ||
3260 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
3222 | } | 3261 | } |
3223 | 3262 | ||
3224 | return SCTP_DISPOSITION_NOMEM; | 3263 | return SCTP_DISPOSITION_NOMEM; |
@@ -3241,6 +3280,13 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep, | |||
3241 | void *arg, | 3280 | void *arg, |
3242 | sctp_cmd_seq_t *commands) | 3281 | sctp_cmd_seq_t *commands) |
3243 | { | 3282 | { |
3283 | struct sctp_chunk *chunk = arg; | ||
3284 | |||
3285 | /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */ | ||
3286 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) | ||
3287 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
3288 | commands); | ||
3289 | |||
3244 | /* Although we do have an association in this case, it corresponds | 3290 | /* Although we do have an association in this case, it corresponds |
3245 | * to a restarted association. So the packet is treated as an OOTB | 3291 | * to a restarted association. So the packet is treated as an OOTB |
3246 | * packet and the state function that handles OOTB SHUTDOWN_ACK is | 3292 | * packet and the state function that handles OOTB SHUTDOWN_ACK is |
@@ -3257,8 +3303,11 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep, | |||
3257 | { | 3303 | { |
3258 | struct sctp_chunk *chunk = arg; | 3304 | struct sctp_chunk *chunk = arg; |
3259 | struct sctp_chunk *asconf_ack = NULL; | 3305 | struct sctp_chunk *asconf_ack = NULL; |
3306 | struct sctp_paramhdr *err_param = NULL; | ||
3260 | sctp_addiphdr_t *hdr; | 3307 | sctp_addiphdr_t *hdr; |
3308 | union sctp_addr_param *addr_param; | ||
3261 | __u32 serial; | 3309 | __u32 serial; |
3310 | int length; | ||
3262 | 3311 | ||
3263 | if (!sctp_vtag_verify(chunk, asoc)) { | 3312 | if (!sctp_vtag_verify(chunk, asoc)) { |
3264 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, | 3313 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, |
@@ -3274,6 +3323,20 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep, | |||
3274 | hdr = (sctp_addiphdr_t *)chunk->skb->data; | 3323 | hdr = (sctp_addiphdr_t *)chunk->skb->data; |
3275 | serial = ntohl(hdr->serial); | 3324 | serial = ntohl(hdr->serial); |
3276 | 3325 | ||
3326 | addr_param = (union sctp_addr_param *)hdr->params; | ||
3327 | length = ntohs(addr_param->p.length); | ||
3328 | if (length < sizeof(sctp_paramhdr_t)) | ||
3329 | return sctp_sf_violation_paramlen(ep, asoc, type, | ||
3330 | (void *)addr_param, commands); | ||
3331 | |||
3332 | /* Verify the ASCONF chunk before processing it. */ | ||
3333 | if (!sctp_verify_asconf(asoc, | ||
3334 | (sctp_paramhdr_t *)((void *)addr_param + length), | ||
3335 | (void *)chunk->chunk_end, | ||
3336 | &err_param)) | ||
3337 | return sctp_sf_violation_paramlen(ep, asoc, type, | ||
3338 | (void *)&err_param, commands); | ||
3339 | |||
3277 | /* ADDIP 4.2 C1) Compare the value of the serial number to the value | 3340 | /* ADDIP 4.2 C1) Compare the value of the serial number to the value |
3278 | * the endpoint stored in a new association variable | 3341 | * the endpoint stored in a new association variable |
3279 | * 'Peer-Serial-Number'. | 3342 | * 'Peer-Serial-Number'. |
@@ -3328,6 +3391,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, | |||
3328 | struct sctp_chunk *asconf_ack = arg; | 3391 | struct sctp_chunk *asconf_ack = arg; |
3329 | struct sctp_chunk *last_asconf = asoc->addip_last_asconf; | 3392 | struct sctp_chunk *last_asconf = asoc->addip_last_asconf; |
3330 | struct sctp_chunk *abort; | 3393 | struct sctp_chunk *abort; |
3394 | struct sctp_paramhdr *err_param = NULL; | ||
3331 | sctp_addiphdr_t *addip_hdr; | 3395 | sctp_addiphdr_t *addip_hdr; |
3332 | __u32 sent_serial, rcvd_serial; | 3396 | __u32 sent_serial, rcvd_serial; |
3333 | 3397 | ||
@@ -3345,6 +3409,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, | |||
3345 | addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data; | 3409 | addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data; |
3346 | rcvd_serial = ntohl(addip_hdr->serial); | 3410 | rcvd_serial = ntohl(addip_hdr->serial); |
3347 | 3411 | ||
3412 | /* Verify the ASCONF-ACK chunk before processing it. */ | ||
3413 | if (!sctp_verify_asconf(asoc, | ||
3414 | (sctp_paramhdr_t *)addip_hdr->params, | ||
3415 | (void *)asconf_ack->chunk_end, | ||
3416 | &err_param)) | ||
3417 | return sctp_sf_violation_paramlen(ep, asoc, type, | ||
3418 | (void *)&err_param, commands); | ||
3419 | |||
3348 | if (last_asconf) { | 3420 | if (last_asconf) { |
3349 | addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr; | 3421 | addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr; |
3350 | sent_serial = ntohl(addip_hdr->serial); | 3422 | sent_serial = ntohl(addip_hdr->serial); |
@@ -3655,6 +3727,16 @@ sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep, | |||
3655 | void *arg, | 3727 | void *arg, |
3656 | sctp_cmd_seq_t *commands) | 3728 | sctp_cmd_seq_t *commands) |
3657 | { | 3729 | { |
3730 | struct sctp_chunk *chunk = arg; | ||
3731 | |||
3732 | /* Make sure that the chunk has a valid length. | ||
3733 | * Since we don't know the chunk type, we use a general | ||
3734 | * chunkhdr structure to make a comparison. | ||
3735 | */ | ||
3736 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) | ||
3737 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
3738 | commands); | ||
3739 | |||
3658 | SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk); | 3740 | SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk); |
3659 | return SCTP_DISPOSITION_DISCARD; | 3741 | return SCTP_DISPOSITION_DISCARD; |
3660 | } | 3742 | } |
@@ -3710,6 +3792,13 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep, | |||
3710 | void *arg, | 3792 | void *arg, |
3711 | sctp_cmd_seq_t *commands) | 3793 | sctp_cmd_seq_t *commands) |
3712 | { | 3794 | { |
3795 | struct sctp_chunk *chunk = arg; | ||
3796 | |||
3797 | /* Make sure that the chunk has a valid length. */ | ||
3798 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) | ||
3799 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
3800 | commands); | ||
3801 | |||
3713 | return SCTP_DISPOSITION_VIOLATION; | 3802 | return SCTP_DISPOSITION_VIOLATION; |
3714 | } | 3803 | } |
3715 | 3804 | ||
@@ -3717,12 +3806,14 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep, | |||
3717 | * Common function to handle a protocol violation. | 3806 | * Common function to handle a protocol violation. |
3718 | */ | 3807 | */ |
3719 | static sctp_disposition_t sctp_sf_abort_violation( | 3808 | static sctp_disposition_t sctp_sf_abort_violation( |
3809 | const struct sctp_endpoint *ep, | ||
3720 | const struct sctp_association *asoc, | 3810 | const struct sctp_association *asoc, |
3721 | void *arg, | 3811 | void *arg, |
3722 | sctp_cmd_seq_t *commands, | 3812 | sctp_cmd_seq_t *commands, |
3723 | const __u8 *payload, | 3813 | const __u8 *payload, |
3724 | const size_t paylen) | 3814 | const size_t paylen) |
3725 | { | 3815 | { |
3816 | struct sctp_packet *packet = NULL; | ||
3726 | struct sctp_chunk *chunk = arg; | 3817 | struct sctp_chunk *chunk = arg; |
3727 | struct sctp_chunk *abort = NULL; | 3818 | struct sctp_chunk *abort = NULL; |
3728 | 3819 | ||
@@ -3731,30 +3822,51 @@ static sctp_disposition_t sctp_sf_abort_violation( | |||
3731 | if (!abort) | 3822 | if (!abort) |
3732 | goto nomem; | 3823 | goto nomem; |
3733 | 3824 | ||
3734 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); | 3825 | if (asoc) { |
3735 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | 3826 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); |
3827 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | ||
3736 | 3828 | ||
3737 | if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { | 3829 | if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { |
3738 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | 3830 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, |
3739 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); | 3831 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); |
3740 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | 3832 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, |
3741 | SCTP_ERROR(ECONNREFUSED)); | 3833 | SCTP_ERROR(ECONNREFUSED)); |
3742 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | 3834 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, |
3743 | SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); | 3835 | SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); |
3836 | } else { | ||
3837 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
3838 | SCTP_ERROR(ECONNABORTED)); | ||
3839 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | ||
3840 | SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); | ||
3841 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | ||
3842 | } | ||
3744 | } else { | 3843 | } else { |
3745 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | 3844 | packet = sctp_ootb_pkt_new(asoc, chunk); |
3746 | SCTP_ERROR(ECONNABORTED)); | 3845 | |
3747 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 3846 | if (!packet) |
3748 | SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); | 3847 | goto nomem_pkt; |
3749 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | 3848 | |
3849 | if (sctp_test_T_bit(abort)) | ||
3850 | packet->vtag = ntohl(chunk->sctp_hdr->vtag); | ||
3851 | |||
3852 | abort->skb->sk = ep->base.sk; | ||
3853 | |||
3854 | sctp_packet_append_chunk(packet, abort); | ||
3855 | |||
3856 | sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, | ||
3857 | SCTP_PACKET(packet)); | ||
3858 | |||
3859 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | ||
3750 | } | 3860 | } |
3751 | 3861 | ||
3752 | sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); | 3862 | sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands); |
3753 | 3863 | ||
3754 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 3864 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); |
3755 | 3865 | ||
3756 | return SCTP_DISPOSITION_ABORT; | 3866 | return SCTP_DISPOSITION_ABORT; |
3757 | 3867 | ||
3868 | nomem_pkt: | ||
3869 | sctp_chunk_free(abort); | ||
3758 | nomem: | 3870 | nomem: |
3759 | return SCTP_DISPOSITION_NOMEM; | 3871 | return SCTP_DISPOSITION_NOMEM; |
3760 | } | 3872 | } |
@@ -3787,7 +3899,24 @@ static sctp_disposition_t sctp_sf_violation_chunklen( | |||
3787 | { | 3899 | { |
3788 | char err_str[]="The following chunk had invalid length:"; | 3900 | char err_str[]="The following chunk had invalid length:"; |
3789 | 3901 | ||
3790 | return sctp_sf_abort_violation(asoc, arg, commands, err_str, | 3902 | return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, |
3903 | sizeof(err_str)); | ||
3904 | } | ||
3905 | |||
3906 | /* | ||
3907 | * Handle a protocol violation when the parameter length is invalid. | ||
3908 | * "Invalid" length is identified as smaller then the minimal length a | ||
3909 | * given parameter can be. | ||
3910 | */ | ||
3911 | static sctp_disposition_t sctp_sf_violation_paramlen( | ||
3912 | const struct sctp_endpoint *ep, | ||
3913 | const struct sctp_association *asoc, | ||
3914 | const sctp_subtype_t type, | ||
3915 | void *arg, | ||
3916 | sctp_cmd_seq_t *commands) { | ||
3917 | char err_str[] = "The following parameter had invalid length:"; | ||
3918 | |||
3919 | return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, | ||
3791 | sizeof(err_str)); | 3920 | sizeof(err_str)); |
3792 | } | 3921 | } |
3793 | 3922 | ||
@@ -3806,10 +3935,31 @@ static sctp_disposition_t sctp_sf_violation_ctsn( | |||
3806 | { | 3935 | { |
3807 | char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:"; | 3936 | char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:"; |
3808 | 3937 | ||
3809 | return sctp_sf_abort_violation(asoc, arg, commands, err_str, | 3938 | return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, |
3810 | sizeof(err_str)); | 3939 | sizeof(err_str)); |
3811 | } | 3940 | } |
3812 | 3941 | ||
3942 | /* Handle protocol violation of an invalid chunk bundling. For example, | ||
3943 | * when we have an association and we recieve bundled INIT-ACK, or | ||
3944 | * SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle" | ||
3945 | * statement from the specs. Additinally, there might be an attacker | ||
3946 | * on the path and we may not want to continue this communication. | ||
3947 | */ | ||
3948 | static sctp_disposition_t sctp_sf_violation_chunk( | ||
3949 | const struct sctp_endpoint *ep, | ||
3950 | const struct sctp_association *asoc, | ||
3951 | const sctp_subtype_t type, | ||
3952 | void *arg, | ||
3953 | sctp_cmd_seq_t *commands) | ||
3954 | { | ||
3955 | char err_str[]="The following chunk violates protocol:"; | ||
3956 | |||
3957 | if (!asoc) | ||
3958 | return sctp_sf_violation(ep, asoc, type, arg, commands); | ||
3959 | |||
3960 | return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, | ||
3961 | sizeof(err_str)); | ||
3962 | } | ||
3813 | /*************************************************************************** | 3963 | /*************************************************************************** |
3814 | * These are the state functions for handling primitive (Section 10) events. | 3964 | * These are the state functions for handling primitive (Section 10) events. |
3815 | ***************************************************************************/ | 3965 | ***************************************************************************/ |
@@ -5176,7 +5326,22 @@ static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc | |||
5176 | * association exists, otherwise, use the peer's vtag. | 5326 | * association exists, otherwise, use the peer's vtag. |
5177 | */ | 5327 | */ |
5178 | if (asoc) { | 5328 | if (asoc) { |
5179 | vtag = asoc->peer.i.init_tag; | 5329 | /* Special case the INIT-ACK as there is no peer's vtag |
5330 | * yet. | ||
5331 | */ | ||
5332 | switch(chunk->chunk_hdr->type) { | ||
5333 | case SCTP_CID_INIT_ACK: | ||
5334 | { | ||
5335 | sctp_initack_chunk_t *initack; | ||
5336 | |||
5337 | initack = (sctp_initack_chunk_t *)chunk->chunk_hdr; | ||
5338 | vtag = ntohl(initack->init_hdr.init_tag); | ||
5339 | break; | ||
5340 | } | ||
5341 | default: | ||
5342 | vtag = asoc->peer.i.init_tag; | ||
5343 | break; | ||
5344 | } | ||
5180 | } else { | 5345 | } else { |
5181 | /* Special case the INIT and stale COOKIE_ECHO as there is no | 5346 | /* Special case the INIT and stale COOKIE_ECHO as there is no |
5182 | * vtag yet. | 5347 | * vtag yet. |
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index 70a91ece3c49..ddb0ba3974b0 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c | |||
@@ -110,7 +110,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, | |||
110 | /* SCTP_STATE_EMPTY */ \ | 110 | /* SCTP_STATE_EMPTY */ \ |
111 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ | 111 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
112 | /* SCTP_STATE_CLOSED */ \ | 112 | /* SCTP_STATE_CLOSED */ \ |
113 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ | 113 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
114 | /* SCTP_STATE_COOKIE_WAIT */ \ | 114 | /* SCTP_STATE_COOKIE_WAIT */ \ |
115 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ | 115 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
116 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 116 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
@@ -173,7 +173,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, | |||
173 | /* SCTP_STATE_EMPTY */ \ | 173 | /* SCTP_STATE_EMPTY */ \ |
174 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ | 174 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
175 | /* SCTP_STATE_CLOSED */ \ | 175 | /* SCTP_STATE_CLOSED */ \ |
176 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ | 176 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
177 | /* SCTP_STATE_COOKIE_WAIT */ \ | 177 | /* SCTP_STATE_COOKIE_WAIT */ \ |
178 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ | 178 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
179 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 179 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
@@ -194,7 +194,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, | |||
194 | /* SCTP_STATE_EMPTY */ \ | 194 | /* SCTP_STATE_EMPTY */ \ |
195 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ | 195 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
196 | /* SCTP_STATE_CLOSED */ \ | 196 | /* SCTP_STATE_CLOSED */ \ |
197 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ | 197 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
198 | /* SCTP_STATE_COOKIE_WAIT */ \ | 198 | /* SCTP_STATE_COOKIE_WAIT */ \ |
199 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ | 199 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
200 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 200 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
@@ -216,7 +216,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, | |||
216 | /* SCTP_STATE_EMPTY */ \ | 216 | /* SCTP_STATE_EMPTY */ \ |
217 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ | 217 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
218 | /* SCTP_STATE_CLOSED */ \ | 218 | /* SCTP_STATE_CLOSED */ \ |
219 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ | 219 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
220 | /* SCTP_STATE_COOKIE_WAIT */ \ | 220 | /* SCTP_STATE_COOKIE_WAIT */ \ |
221 | TYPE_SCTP_FUNC(sctp_sf_violation), \ | 221 | TYPE_SCTP_FUNC(sctp_sf_violation), \ |
222 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 222 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
@@ -258,7 +258,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, | |||
258 | /* SCTP_STATE_EMPTY */ \ | 258 | /* SCTP_STATE_EMPTY */ \ |
259 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ | 259 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
260 | /* SCTP_STATE_CLOSED */ \ | 260 | /* SCTP_STATE_CLOSED */ \ |
261 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ | 261 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
262 | /* SCTP_STATE_COOKIE_WAIT */ \ | 262 | /* SCTP_STATE_COOKIE_WAIT */ \ |
263 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ | 263 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
264 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 264 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
@@ -300,7 +300,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, | |||
300 | /* SCTP_STATE_EMPTY */ \ | 300 | /* SCTP_STATE_EMPTY */ \ |
301 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ | 301 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
302 | /* SCTP_STATE_CLOSED */ \ | 302 | /* SCTP_STATE_CLOSED */ \ |
303 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ | 303 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
304 | /* SCTP_STATE_COOKIE_WAIT */ \ | 304 | /* SCTP_STATE_COOKIE_WAIT */ \ |
305 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ | 305 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
306 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 306 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
@@ -499,7 +499,7 @@ static const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_ | |||
499 | /* SCTP_STATE_EMPTY */ \ | 499 | /* SCTP_STATE_EMPTY */ \ |
500 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ | 500 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
501 | /* SCTP_STATE_CLOSED */ \ | 501 | /* SCTP_STATE_CLOSED */ \ |
502 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ | 502 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
503 | /* SCTP_STATE_COOKIE_WAIT */ \ | 503 | /* SCTP_STATE_COOKIE_WAIT */ \ |
504 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ | 504 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
505 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 505 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
@@ -528,7 +528,7 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { | |||
528 | /* SCTP_STATE_EMPTY */ | 528 | /* SCTP_STATE_EMPTY */ |
529 | TYPE_SCTP_FUNC(sctp_sf_ootb), | 529 | TYPE_SCTP_FUNC(sctp_sf_ootb), |
530 | /* SCTP_STATE_CLOSED */ | 530 | /* SCTP_STATE_CLOSED */ |
531 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), | 531 | TYPE_SCTP_FUNC(sctp_sf_ootb), |
532 | /* SCTP_STATE_COOKIE_WAIT */ | 532 | /* SCTP_STATE_COOKIE_WAIT */ |
533 | TYPE_SCTP_FUNC(sctp_sf_unk_chunk), | 533 | TYPE_SCTP_FUNC(sctp_sf_unk_chunk), |
534 | /* SCTP_STATE_COOKIE_ECHOED */ | 534 | /* SCTP_STATE_COOKIE_ECHOED */ |
diff --git a/net/socket.c b/net/socket.c index 7d44453dfae1..b09eb9036a17 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -777,9 +777,6 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
777 | if (pos != 0) | 777 | if (pos != 0) |
778 | return -ESPIPE; | 778 | return -ESPIPE; |
779 | 779 | ||
780 | if (iocb->ki_left == 0) /* Match SYS5 behaviour */ | ||
781 | return 0; | ||
782 | |||
783 | x = alloc_sock_iocb(iocb, &siocb); | 780 | x = alloc_sock_iocb(iocb, &siocb); |
784 | if (!x) | 781 | if (!x) |
785 | return -ENOMEM; | 782 | return -ENOMEM; |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 1a899924023f..036ab520df21 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -1110,7 +1110,8 @@ svc_tcp_accept(struct svc_sock *svsk) | |||
1110 | serv->sv_name); | 1110 | serv->sv_name); |
1111 | printk(KERN_NOTICE | 1111 | printk(KERN_NOTICE |
1112 | "%s: last TCP connect from %s\n", | 1112 | "%s: last TCP connect from %s\n", |
1113 | serv->sv_name, buf); | 1113 | serv->sv_name, __svc_print_addr(sin, |
1114 | buf, sizeof(buf))); | ||
1114 | } | 1115 | } |
1115 | /* | 1116 | /* |
1116 | * Always select the oldest socket. It's not fair, | 1117 | * Always select the oldest socket. It's not fair, |
diff --git a/net/wireless/core.c b/net/wireless/core.c index 7eabd55417a5..9771451eae21 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -213,7 +213,7 @@ out_fail_notifier: | |||
213 | out_fail_sysfs: | 213 | out_fail_sysfs: |
214 | return err; | 214 | return err; |
215 | } | 215 | } |
216 | module_init(cfg80211_init); | 216 | subsys_initcall(cfg80211_init); |
217 | 217 | ||
218 | static void cfg80211_exit(void) | 218 | static void cfg80211_exit(void) |
219 | { | 219 | { |
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c index 88aaacd9f822..2d5d2255a27c 100644 --- a/net/wireless/sysfs.c +++ b/net/wireless/sysfs.c | |||
@@ -52,12 +52,14 @@ static void wiphy_dev_release(struct device *dev) | |||
52 | cfg80211_dev_free(rdev); | 52 | cfg80211_dev_free(rdev); |
53 | } | 53 | } |
54 | 54 | ||
55 | #ifdef CONFIG_HOTPLUG | ||
55 | static int wiphy_uevent(struct device *dev, char **envp, | 56 | static int wiphy_uevent(struct device *dev, char **envp, |
56 | int num_envp, char *buf, int size) | 57 | int num_envp, char *buf, int size) |
57 | { | 58 | { |
58 | /* TODO, we probably need stuff here */ | 59 | /* TODO, we probably need stuff here */ |
59 | return 0; | 60 | return 0; |
60 | } | 61 | } |
62 | #endif | ||
61 | 63 | ||
62 | struct class ieee80211_class = { | 64 | struct class ieee80211_class = { |
63 | .name = "ieee80211", | 65 | .name = "ieee80211", |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 36946629b6ca..0753b20e23fe 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -316,6 +316,7 @@ static inline int inode_doinit(struct inode *inode) | |||
316 | } | 316 | } |
317 | 317 | ||
318 | enum { | 318 | enum { |
319 | Opt_error = -1, | ||
319 | Opt_context = 1, | 320 | Opt_context = 1, |
320 | Opt_fscontext = 2, | 321 | Opt_fscontext = 2, |
321 | Opt_defcontext = 4, | 322 | Opt_defcontext = 4, |
@@ -327,6 +328,7 @@ static match_table_t tokens = { | |||
327 | {Opt_fscontext, "fscontext=%s"}, | 328 | {Opt_fscontext, "fscontext=%s"}, |
328 | {Opt_defcontext, "defcontext=%s"}, | 329 | {Opt_defcontext, "defcontext=%s"}, |
329 | {Opt_rootcontext, "rootcontext=%s"}, | 330 | {Opt_rootcontext, "rootcontext=%s"}, |
331 | {Opt_error, NULL}, | ||
330 | }; | 332 | }; |
331 | 333 | ||
332 | #define SEL_MOUNT_FAIL_MSG "SELinux: duplicate or incompatible mount options\n" | 334 | #define SEL_MOUNT_FAIL_MSG "SELinux: duplicate or incompatible mount options\n" |
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index f057430db0d0..9b5656d8bcca 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/pci.h> | 27 | #include <linux/pci.h> |
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
30 | #include <linux/seq_file.h> | ||
30 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
31 | #include <linux/dma-mapping.h> | 32 | #include <linux/dma-mapping.h> |
32 | #include <linux/moduleparam.h> | 33 | #include <linux/moduleparam.h> |
@@ -481,53 +482,54 @@ static void free_all_reserved_pages(void) | |||
481 | #define SND_MEM_PROC_FILE "driver/snd-page-alloc" | 482 | #define SND_MEM_PROC_FILE "driver/snd-page-alloc" |
482 | static struct proc_dir_entry *snd_mem_proc; | 483 | static struct proc_dir_entry *snd_mem_proc; |
483 | 484 | ||
484 | static int snd_mem_proc_read(char *page, char **start, off_t off, | 485 | static int snd_mem_proc_read(struct seq_file *seq, void *offset) |
485 | int count, int *eof, void *data) | ||
486 | { | 486 | { |
487 | int len = 0; | ||
488 | long pages = snd_allocated_pages >> (PAGE_SHIFT-12); | 487 | long pages = snd_allocated_pages >> (PAGE_SHIFT-12); |
489 | struct snd_mem_list *mem; | 488 | struct snd_mem_list *mem; |
490 | int devno; | 489 | int devno; |
491 | static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" }; | 490 | static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" }; |
492 | 491 | ||
493 | mutex_lock(&list_mutex); | 492 | mutex_lock(&list_mutex); |
494 | len += snprintf(page + len, count - len, | 493 | seq_printf(seq, "pages : %li bytes (%li pages per %likB)\n", |
495 | "pages : %li bytes (%li pages per %likB)\n", | 494 | pages * PAGE_SIZE, pages, PAGE_SIZE / 1024); |
496 | pages * PAGE_SIZE, pages, PAGE_SIZE / 1024); | ||
497 | devno = 0; | 495 | devno = 0; |
498 | list_for_each_entry(mem, &mem_list_head, list) { | 496 | list_for_each_entry(mem, &mem_list_head, list) { |
499 | devno++; | 497 | devno++; |
500 | len += snprintf(page + len, count - len, | 498 | seq_printf(seq, "buffer %d : ID %08x : type %s\n", |
501 | "buffer %d : ID %08x : type %s\n", | 499 | devno, mem->id, types[mem->buffer.dev.type]); |
502 | devno, mem->id, types[mem->buffer.dev.type]); | 500 | seq_printf(seq, " addr = 0x%lx, size = %d bytes\n", |
503 | len += snprintf(page + len, count - len, | 501 | (unsigned long)mem->buffer.addr, |
504 | " addr = 0x%lx, size = %d bytes\n", | 502 | (int)mem->buffer.bytes); |
505 | (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes); | ||
506 | } | 503 | } |
507 | mutex_unlock(&list_mutex); | 504 | mutex_unlock(&list_mutex); |
508 | return len; | 505 | return 0; |
506 | } | ||
507 | |||
508 | static int snd_mem_proc_open(struct inode *inode, struct file *file) | ||
509 | { | ||
510 | return single_open(file, snd_mem_proc_read, NULL); | ||
509 | } | 511 | } |
510 | 512 | ||
511 | /* FIXME: for pci only - other bus? */ | 513 | /* FIXME: for pci only - other bus? */ |
512 | #ifdef CONFIG_PCI | 514 | #ifdef CONFIG_PCI |
513 | #define gettoken(bufp) strsep(bufp, " \t\n") | 515 | #define gettoken(bufp) strsep(bufp, " \t\n") |
514 | 516 | ||
515 | static int snd_mem_proc_write(struct file *file, const char __user *buffer, | 517 | static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer, |
516 | unsigned long count, void *data) | 518 | size_t count, loff_t * ppos) |
517 | { | 519 | { |
518 | char buf[128]; | 520 | char buf[128]; |
519 | char *token, *p; | 521 | char *token, *p; |
520 | 522 | ||
521 | if (count > ARRAY_SIZE(buf) - 1) | 523 | if (count > sizeof(buf) - 1) |
522 | count = ARRAY_SIZE(buf) - 1; | 524 | return -EINVAL; |
523 | if (copy_from_user(buf, buffer, count)) | 525 | if (copy_from_user(buf, buffer, count)) |
524 | return -EFAULT; | 526 | return -EFAULT; |
525 | buf[ARRAY_SIZE(buf) - 1] = '\0'; | 527 | buf[count] = '\0'; |
526 | 528 | ||
527 | p = buf; | 529 | p = buf; |
528 | token = gettoken(&p); | 530 | token = gettoken(&p); |
529 | if (! token || *token == '#') | 531 | if (! token || *token == '#') |
530 | return (int)count; | 532 | return count; |
531 | if (strcmp(token, "add") == 0) { | 533 | if (strcmp(token, "add") == 0) { |
532 | char *endp; | 534 | char *endp; |
533 | int vendor, device, size, buffers; | 535 | int vendor, device, size, buffers; |
@@ -548,7 +550,7 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer, | |||
548 | (buffers = simple_strtol(token, NULL, 0)) <= 0 || | 550 | (buffers = simple_strtol(token, NULL, 0)) <= 0 || |
549 | buffers > 4) { | 551 | buffers > 4) { |
550 | printk(KERN_ERR "snd-page-alloc: invalid proc write format\n"); | 552 | printk(KERN_ERR "snd-page-alloc: invalid proc write format\n"); |
551 | return (int)count; | 553 | return count; |
552 | } | 554 | } |
553 | vendor &= 0xffff; | 555 | vendor &= 0xffff; |
554 | device &= 0xffff; | 556 | device &= 0xffff; |
@@ -560,7 +562,7 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer, | |||
560 | if (pci_set_dma_mask(pci, mask) < 0 || | 562 | if (pci_set_dma_mask(pci, mask) < 0 || |
561 | pci_set_consistent_dma_mask(pci, mask) < 0) { | 563 | pci_set_consistent_dma_mask(pci, mask) < 0) { |
562 | printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device); | 564 | printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device); |
563 | return (int)count; | 565 | return count; |
564 | } | 566 | } |
565 | } | 567 | } |
566 | for (i = 0; i < buffers; i++) { | 568 | for (i = 0; i < buffers; i++) { |
@@ -570,7 +572,7 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer, | |||
570 | size, &dmab) < 0) { | 572 | size, &dmab) < 0) { |
571 | printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); | 573 | printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); |
572 | pci_dev_put(pci); | 574 | pci_dev_put(pci); |
573 | return (int)count; | 575 | return count; |
574 | } | 576 | } |
575 | snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci)); | 577 | snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci)); |
576 | } | 578 | } |
@@ -596,9 +598,21 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer, | |||
596 | free_all_reserved_pages(); | 598 | free_all_reserved_pages(); |
597 | else | 599 | else |
598 | printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n"); | 600 | printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n"); |
599 | return (int)count; | 601 | return count; |
600 | } | 602 | } |
601 | #endif /* CONFIG_PCI */ | 603 | #endif /* CONFIG_PCI */ |
604 | |||
605 | static const struct file_operations snd_mem_proc_fops = { | ||
606 | .owner = THIS_MODULE, | ||
607 | .open = snd_mem_proc_open, | ||
608 | .read = seq_read, | ||
609 | #ifdef CONFIG_PCI | ||
610 | .write = snd_mem_proc_write, | ||
611 | #endif | ||
612 | .llseek = seq_lseek, | ||
613 | .release = single_release, | ||
614 | }; | ||
615 | |||
602 | #endif /* CONFIG_PROC_FS */ | 616 | #endif /* CONFIG_PROC_FS */ |
603 | 617 | ||
604 | /* | 618 | /* |
@@ -609,12 +623,8 @@ static int __init snd_mem_init(void) | |||
609 | { | 623 | { |
610 | #ifdef CONFIG_PROC_FS | 624 | #ifdef CONFIG_PROC_FS |
611 | snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL); | 625 | snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL); |
612 | if (snd_mem_proc) { | 626 | if (snd_mem_proc) |
613 | snd_mem_proc->read_proc = snd_mem_proc_read; | 627 | snd_mem_proc->proc_fops = &snd_mem_proc_fops; |
614 | #ifdef CONFIG_PCI | ||
615 | snd_mem_proc->write_proc = snd_mem_proc_write; | ||
616 | #endif | ||
617 | } | ||
618 | #endif | 628 | #endif |
619 | return 0; | 629 | return 0; |
620 | } | 630 | } |