diff options
964 files changed, 10052 insertions, 5610 deletions
@@ -3554,12 +3554,12 @@ E: cvance@nai.com | |||
3554 | D: portions of the Linux Security Module (LSM) framework and security modules | 3554 | D: portions of the Linux Security Module (LSM) framework and security modules |
3555 | 3555 | ||
3556 | N: Petr Vandrovec | 3556 | N: Petr Vandrovec |
3557 | E: vandrove@vc.cvut.cz | 3557 | E: petr@vandrovec.name |
3558 | D: Small contributions to ncpfs | 3558 | D: Small contributions to ncpfs |
3559 | D: Matrox framebuffer driver | 3559 | D: Matrox framebuffer driver |
3560 | S: Chudenicka 8 | 3560 | S: 21513 Conradia Ct |
3561 | S: 10200 Prague 10, Hostivar | 3561 | S: Cupertino, CA 95014 |
3562 | S: Czech Republic | 3562 | S: USA |
3563 | 3563 | ||
3564 | N: Thibaut Varene | 3564 | N: Thibaut Varene |
3565 | E: T-Bone@parisc-linux.org | 3565 | E: T-Bone@parisc-linux.org |
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl index ecd35e9d4410..feca0758391e 100644 --- a/Documentation/DocBook/device-drivers.tmpl +++ b/Documentation/DocBook/device-drivers.tmpl | |||
@@ -46,7 +46,6 @@ | |||
46 | 46 | ||
47 | <sect1><title>Atomic and pointer manipulation</title> | 47 | <sect1><title>Atomic and pointer manipulation</title> |
48 | !Iarch/x86/include/asm/atomic.h | 48 | !Iarch/x86/include/asm/atomic.h |
49 | !Iarch/x86/include/asm/unaligned.h | ||
50 | </sect1> | 49 | </sect1> |
51 | 50 | ||
52 | <sect1><title>Delaying, scheduling, and timer routines</title> | 51 | <sect1><title>Delaying, scheduling, and timer routines</title> |
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl index a20c6f6fffc3..6899f471fb15 100644 --- a/Documentation/DocBook/kernel-api.tmpl +++ b/Documentation/DocBook/kernel-api.tmpl | |||
@@ -57,7 +57,6 @@ | |||
57 | </para> | 57 | </para> |
58 | 58 | ||
59 | <sect1><title>String Conversions</title> | 59 | <sect1><title>String Conversions</title> |
60 | !Ilib/vsprintf.c | ||
61 | !Elib/vsprintf.c | 60 | !Elib/vsprintf.c |
62 | </sect1> | 61 | </sect1> |
63 | <sect1><title>String Manipulation</title> | 62 | <sect1><title>String Manipulation</title> |
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl index 0b1a3f97f285..a0d479d1e1dd 100644 --- a/Documentation/DocBook/kernel-locking.tmpl +++ b/Documentation/DocBook/kernel-locking.tmpl | |||
@@ -1961,6 +1961,12 @@ machines due to caching. | |||
1961 | </sect1> | 1961 | </sect1> |
1962 | </chapter> | 1962 | </chapter> |
1963 | 1963 | ||
1964 | <chapter id="apiref"> | ||
1965 | <title>Mutex API reference</title> | ||
1966 | !Iinclude/linux/mutex.h | ||
1967 | !Ekernel/mutex.c | ||
1968 | </chapter> | ||
1969 | |||
1964 | <chapter id="references"> | 1970 | <chapter id="references"> |
1965 | <title>Further reading</title> | 1971 | <title>Further reading</title> |
1966 | 1972 | ||
diff --git a/Documentation/DocBook/tracepoint.tmpl b/Documentation/DocBook/tracepoint.tmpl index e8473eae2a20..b57a9ede3224 100644 --- a/Documentation/DocBook/tracepoint.tmpl +++ b/Documentation/DocBook/tracepoint.tmpl | |||
@@ -104,4 +104,9 @@ | |||
104 | <title>Block IO</title> | 104 | <title>Block IO</title> |
105 | !Iinclude/trace/events/block.h | 105 | !Iinclude/trace/events/block.h |
106 | </chapter> | 106 | </chapter> |
107 | |||
108 | <chapter id="workqueue"> | ||
109 | <title>Workqueue</title> | ||
110 | !Iinclude/trace/events/workqueue.h | ||
111 | </chapter> | ||
107 | </book> | 112 | </book> |
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt new file mode 100644 index 000000000000..e578feed6d81 --- /dev/null +++ b/Documentation/block/cfq-iosched.txt | |||
@@ -0,0 +1,45 @@ | |||
1 | CFQ ioscheduler tunables | ||
2 | ======================== | ||
3 | |||
4 | slice_idle | ||
5 | ---------- | ||
6 | This specifies how long CFQ should idle for next request on certain cfq queues | ||
7 | (for sequential workloads) and service trees (for random workloads) before | ||
8 | queue is expired and CFQ selects next queue to dispatch from. | ||
9 | |||
10 | By default slice_idle is a non-zero value. That means by default we idle on | ||
11 | queues/service trees. This can be very helpful on highly seeky media like | ||
12 | single spindle SATA/SAS disks where we can cut down on overall number of | ||
13 | seeks and see improved throughput. | ||
14 | |||
15 | Setting slice_idle to 0 will remove all the idling on queues/service tree | ||
16 | level and one should see an overall improved throughput on faster storage | ||
17 | devices like multiple SATA/SAS disks in hardware RAID configuration. The down | ||
18 | side is that isolation provided from WRITES also goes down and notion of | ||
19 | IO priority becomes weaker. | ||
20 | |||
21 | So depending on storage and workload, it might be useful to set slice_idle=0. | ||
22 | In general I think for SATA/SAS disks and software RAID of SATA/SAS disks | ||
23 | keeping slice_idle enabled should be useful. For any configurations where | ||
24 | there are multiple spindles behind single LUN (Host based hardware RAID | ||
25 | controller or for storage arrays), setting slice_idle=0 might end up in better | ||
26 | throughput and acceptable latencies. | ||
27 | |||
28 | CFQ IOPS Mode for group scheduling | ||
29 | =================================== | ||
30 | Basic CFQ design is to provide priority based time slices. Higher priority | ||
31 | process gets bigger time slice and lower priority process gets smaller time | ||
32 | slice. Measuring time becomes harder if storage is fast and supports NCQ and | ||
33 | it would be better to dispatch multiple requests from multiple cfq queues in | ||
34 | request queue at a time. In such scenario, it is not possible to measure time | ||
35 | consumed by single queue accurately. | ||
36 | |||
37 | What is possible though is to measure number of requests dispatched from a | ||
38 | single queue and also allow dispatch from multiple cfq queue at the same time. | ||
39 | This effectively becomes the fairness in terms of IOPS (IO operations per | ||
40 | second). | ||
41 | |||
42 | If one sets slice_idle=0 and if storage supports NCQ, CFQ internally switches | ||
43 | to IOPS mode and starts providing fairness in terms of number of requests | ||
44 | dispatched. Note that this mode switching takes effect only for group | ||
45 | scheduling. For non-cgroup users nothing should change. | ||
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt index 48e0b21b0059..6919d62591d9 100644 --- a/Documentation/cgroups/blkio-controller.txt +++ b/Documentation/cgroups/blkio-controller.txt | |||
@@ -217,6 +217,7 @@ Details of cgroup files | |||
217 | CFQ sysfs tunable | 217 | CFQ sysfs tunable |
218 | ================= | 218 | ================= |
219 | /sys/block/<disk>/queue/iosched/group_isolation | 219 | /sys/block/<disk>/queue/iosched/group_isolation |
220 | ----------------------------------------------- | ||
220 | 221 | ||
221 | If group_isolation=1, it provides stronger isolation between groups at the | 222 | If group_isolation=1, it provides stronger isolation between groups at the |
222 | expense of throughput. By default group_isolation is 0. In general that | 223 | expense of throughput. By default group_isolation is 0. In general that |
@@ -243,6 +244,33 @@ By default one should run with group_isolation=0. If that is not sufficient | |||
243 | and one wants stronger isolation between groups, then set group_isolation=1 | 244 | and one wants stronger isolation between groups, then set group_isolation=1 |
244 | but this will come at cost of reduced throughput. | 245 | but this will come at cost of reduced throughput. |
245 | 246 | ||
247 | /sys/block/<disk>/queue/iosched/slice_idle | ||
248 | ------------------------------------------ | ||
249 | On a faster hardware CFQ can be slow, especially with sequential workload. | ||
250 | This happens because CFQ idles on a single queue and single queue might not | ||
251 | drive deeper request queue depths to keep the storage busy. In such scenarios | ||
252 | one can try setting slice_idle=0 and that would switch CFQ to IOPS | ||
253 | (IO operations per second) mode on NCQ supporting hardware. | ||
254 | |||
255 | That means CFQ will not idle between cfq queues of a cfq group and hence be | ||
256 | able to driver higher queue depth and achieve better throughput. That also | ||
257 | means that cfq provides fairness among groups in terms of IOPS and not in | ||
258 | terms of disk time. | ||
259 | |||
260 | /sys/block/<disk>/queue/iosched/group_idle | ||
261 | ------------------------------------------ | ||
262 | If one disables idling on individual cfq queues and cfq service trees by | ||
263 | setting slice_idle=0, group_idle kicks in. That means CFQ will still idle | ||
264 | on the group in an attempt to provide fairness among groups. | ||
265 | |||
266 | By default group_idle is same as slice_idle and does not do anything if | ||
267 | slice_idle is enabled. | ||
268 | |||
269 | One can experience an overall throughput drop if you have created multiple | ||
270 | groups and put applications in that group which are not driving enough | ||
271 | IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle | ||
272 | on individual groups and throughput should improve. | ||
273 | |||
246 | What works | 274 | What works |
247 | ========== | 275 | ========== |
248 | - Currently only sync IO queues are support. All the buffered writes are | 276 | - Currently only sync IO queues are support. All the buffered writes are |
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt index d96a6dba5748..9633da01ff46 100644 --- a/Documentation/gpio.txt +++ b/Documentation/gpio.txt | |||
@@ -109,17 +109,19 @@ use numbers 2000-2063 to identify GPIOs in a bank of I2C GPIO expanders. | |||
109 | 109 | ||
110 | If you want to initialize a structure with an invalid GPIO number, use | 110 | If you want to initialize a structure with an invalid GPIO number, use |
111 | some negative number (perhaps "-EINVAL"); that will never be valid. To | 111 | some negative number (perhaps "-EINVAL"); that will never be valid. To |
112 | test if a number could reference a GPIO, you may use this predicate: | 112 | test if such number from such a structure could reference a GPIO, you |
113 | may use this predicate: | ||
113 | 114 | ||
114 | int gpio_is_valid(int number); | 115 | int gpio_is_valid(int number); |
115 | 116 | ||
116 | A number that's not valid will be rejected by calls which may request | 117 | A number that's not valid will be rejected by calls which may request |
117 | or free GPIOs (see below). Other numbers may also be rejected; for | 118 | or free GPIOs (see below). Other numbers may also be rejected; for |
118 | example, a number might be valid but unused on a given board. | 119 | example, a number might be valid but temporarily unused on a given board. |
119 | |||
120 | Whether a platform supports multiple GPIO controllers is currently a | ||
121 | platform-specific implementation issue. | ||
122 | 120 | ||
121 | Whether a platform supports multiple GPIO controllers is a platform-specific | ||
122 | implementation issue, as are whether that support can leave "holes" in the space | ||
123 | of GPIO numbers, and whether new controllers can be added at runtime. Such issues | ||
124 | can affect things including whether adjacent GPIO numbers are both valid. | ||
123 | 125 | ||
124 | Using GPIOs | 126 | Using GPIOs |
125 | ----------- | 127 | ----------- |
@@ -480,12 +482,16 @@ To support this framework, a platform's Kconfig will "select" either | |||
480 | ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB | 482 | ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB |
481 | and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines | 483 | and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines |
482 | three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep(). | 484 | three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep(). |
483 | They may also want to provide a custom value for ARCH_NR_GPIOS. | ||
484 | 485 | ||
485 | ARCH_REQUIRE_GPIOLIB means that the gpio-lib code will always get compiled | 486 | It may also provide a custom value for ARCH_NR_GPIOS, so that it better |
487 | reflects the number of GPIOs in actual use on that platform, without | ||
488 | wasting static table space. (It should count both built-in/SoC GPIOs and | ||
489 | also ones on GPIO expanders. | ||
490 | |||
491 | ARCH_REQUIRE_GPIOLIB means that the gpiolib code will always get compiled | ||
486 | into the kernel on that architecture. | 492 | into the kernel on that architecture. |
487 | 493 | ||
488 | ARCH_WANT_OPTIONAL_GPIOLIB means the gpio-lib code defaults to off and the user | 494 | ARCH_WANT_OPTIONAL_GPIOLIB means the gpiolib code defaults to off and the user |
489 | can enable it and build it into the kernel optionally. | 495 | can enable it and build it into the kernel optionally. |
490 | 496 | ||
491 | If neither of these options are selected, the platform does not support | 497 | If neither of these options are selected, the platform does not support |
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface index ff45d1f837c8..48ceabedf55d 100644 --- a/Documentation/hwmon/sysfs-interface +++ b/Documentation/hwmon/sysfs-interface | |||
@@ -91,12 +91,11 @@ name The chip name. | |||
91 | I2C devices get this attribute created automatically. | 91 | I2C devices get this attribute created automatically. |
92 | RO | 92 | RO |
93 | 93 | ||
94 | update_rate The rate at which the chip will update readings. | 94 | update_interval The interval at which the chip will update readings. |
95 | Unit: millisecond | 95 | Unit: millisecond |
96 | RW | 96 | RW |
97 | Some devices have a variable update rate. This attribute | 97 | Some devices have a variable update rate or interval. |
98 | can be used to change the update rate to the desired | 98 | This attribute can be used to change it to the desired value. |
99 | frequency. | ||
100 | 99 | ||
101 | 100 | ||
102 | ************ | 101 | ************ |
diff --git a/Documentation/kernel-doc-nano-HOWTO.txt b/Documentation/kernel-doc-nano-HOWTO.txt index 27a52b35d55b..3d8a97747f77 100644 --- a/Documentation/kernel-doc-nano-HOWTO.txt +++ b/Documentation/kernel-doc-nano-HOWTO.txt | |||
@@ -345,5 +345,10 @@ documentation, in <filename>, for the functions listed. | |||
345 | section titled <section title> from <filename>. | 345 | section titled <section title> from <filename>. |
346 | Spaces are allowed in <section title>; do not quote the <section title>. | 346 | Spaces are allowed in <section title>; do not quote the <section title>. |
347 | 347 | ||
348 | !C<filename> is replaced by nothing, but makes the tools check that | ||
349 | all DOC: sections and documented functions, symbols, etc. are used. | ||
350 | This makes sense to use when you use !F/!P only and want to verify | ||
351 | that all documentation is included. | ||
352 | |||
348 | Tim. | 353 | Tim. |
349 | */ <twaugh@redhat.com> | 354 | */ <twaugh@redhat.com> |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index f084af0cb8e0..8dd7248508a9 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -1974,15 +1974,18 @@ and is between 256 and 4096 characters. It is defined in the file | |||
1974 | force Enable ASPM even on devices that claim not to support it. | 1974 | force Enable ASPM even on devices that claim not to support it. |
1975 | WARNING: Forcing ASPM on may cause system lockups. | 1975 | WARNING: Forcing ASPM on may cause system lockups. |
1976 | 1976 | ||
1977 | pcie_ports= [PCIE] PCIe ports handling: | ||
1978 | auto Ask the BIOS whether or not to use native PCIe services | ||
1979 | associated with PCIe ports (PME, hot-plug, AER). Use | ||
1980 | them only if that is allowed by the BIOS. | ||
1981 | native Use native PCIe services associated with PCIe ports | ||
1982 | unconditionally. | ||
1983 | compat Treat PCIe ports as PCI-to-PCI bridges, disable the PCIe | ||
1984 | ports driver. | ||
1985 | |||
1977 | pcie_pme= [PCIE,PM] Native PCIe PME signaling options: | 1986 | pcie_pme= [PCIE,PM] Native PCIe PME signaling options: |
1978 | Format: {auto|force}[,nomsi] | ||
1979 | auto Use native PCIe PME signaling if the BIOS allows the | ||
1980 | kernel to control PCIe config registers of root ports. | ||
1981 | force Use native PCIe PME signaling even if the BIOS refuses | ||
1982 | to allow the kernel to control the relevant PCIe config | ||
1983 | registers. | ||
1984 | nomsi Do not use MSI for native PCIe PME signaling (this makes | 1987 | nomsi Do not use MSI for native PCIe PME signaling (this makes |
1985 | all PCIe root ports use INTx for everything). | 1988 | all PCIe root ports use INTx for all services). |
1986 | 1989 | ||
1987 | pcmv= [HW,PCMCIA] BadgePAD 4 | 1990 | pcmv= [HW,PCMCIA] BadgePAD 4 |
1988 | 1991 | ||
diff --git a/Documentation/mutex-design.txt b/Documentation/mutex-design.txt index c91ccc0720fa..38c10fd7f411 100644 --- a/Documentation/mutex-design.txt +++ b/Documentation/mutex-design.txt | |||
@@ -9,7 +9,7 @@ firstly, there's nothing wrong with semaphores. But if the simpler | |||
9 | mutex semantics are sufficient for your code, then there are a couple | 9 | mutex semantics are sufficient for your code, then there are a couple |
10 | of advantages of mutexes: | 10 | of advantages of mutexes: |
11 | 11 | ||
12 | - 'struct mutex' is smaller on most architectures: .e.g on x86, | 12 | - 'struct mutex' is smaller on most architectures: E.g. on x86, |
13 | 'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes. | 13 | 'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes. |
14 | A smaller structure size means less RAM footprint, and better | 14 | A smaller structure size means less RAM footprint, and better |
15 | CPU-cache utilization. | 15 | CPU-cache utilization. |
@@ -136,3 +136,4 @@ the APIs of 'struct mutex' have been streamlined: | |||
136 | void mutex_lock_nested(struct mutex *lock, unsigned int subclass); | 136 | void mutex_lock_nested(struct mutex *lock, unsigned int subclass); |
137 | int mutex_lock_interruptible_nested(struct mutex *lock, | 137 | int mutex_lock_interruptible_nested(struct mutex *lock, |
138 | unsigned int subclass); | 138 | unsigned int subclass); |
139 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); | ||
diff --git a/Documentation/power/regulator/overview.txt b/Documentation/power/regulator/overview.txt index 9363e056188a..8ed17587a74b 100644 --- a/Documentation/power/regulator/overview.txt +++ b/Documentation/power/regulator/overview.txt | |||
@@ -13,7 +13,7 @@ regulators (where voltage output is controllable) and current sinks (where | |||
13 | current limit is controllable). | 13 | current limit is controllable). |
14 | 14 | ||
15 | (C) 2008 Wolfson Microelectronics PLC. | 15 | (C) 2008 Wolfson Microelectronics PLC. |
16 | Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> | 16 | Author: Liam Girdwood <lrg@slimlogic.co.uk> |
17 | 17 | ||
18 | 18 | ||
19 | Nomenclature | 19 | Nomenclature |
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index ce46fa1e643e..37c6aad5e590 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt | |||
@@ -296,6 +296,7 @@ Conexant 5051 | |||
296 | Conexant 5066 | 296 | Conexant 5066 |
297 | ============= | 297 | ============= |
298 | laptop Basic Laptop config (default) | 298 | laptop Basic Laptop config (default) |
299 | hp-laptop HP laptops, e g G60 | ||
299 | dell-laptop Dell laptops | 300 | dell-laptop Dell laptops |
300 | dell-vostro Dell Vostro | 301 | dell-vostro Dell Vostro |
301 | olpc-xo-1_5 OLPC XO 1.5 | 302 | olpc-xo-1_5 OLPC XO 1.5 |
diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt new file mode 100644 index 000000000000..e4498a2872c3 --- /dev/null +++ b/Documentation/workqueue.txt | |||
@@ -0,0 +1,380 @@ | |||
1 | |||
2 | Concurrency Managed Workqueue (cmwq) | ||
3 | |||
4 | September, 2010 Tejun Heo <tj@kernel.org> | ||
5 | Florian Mickler <florian@mickler.org> | ||
6 | |||
7 | CONTENTS | ||
8 | |||
9 | 1. Introduction | ||
10 | 2. Why cmwq? | ||
11 | 3. The Design | ||
12 | 4. Application Programming Interface (API) | ||
13 | 5. Example Execution Scenarios | ||
14 | 6. Guidelines | ||
15 | |||
16 | |||
17 | 1. Introduction | ||
18 | |||
19 | There are many cases where an asynchronous process execution context | ||
20 | is needed and the workqueue (wq) API is the most commonly used | ||
21 | mechanism for such cases. | ||
22 | |||
23 | When such an asynchronous execution context is needed, a work item | ||
24 | describing which function to execute is put on a queue. An | ||
25 | independent thread serves as the asynchronous execution context. The | ||
26 | queue is called workqueue and the thread is called worker. | ||
27 | |||
28 | While there are work items on the workqueue the worker executes the | ||
29 | functions associated with the work items one after the other. When | ||
30 | there is no work item left on the workqueue the worker becomes idle. | ||
31 | When a new work item gets queued, the worker begins executing again. | ||
32 | |||
33 | |||
34 | 2. Why cmwq? | ||
35 | |||
36 | In the original wq implementation, a multi threaded (MT) wq had one | ||
37 | worker thread per CPU and a single threaded (ST) wq had one worker | ||
38 | thread system-wide. A single MT wq needed to keep around the same | ||
39 | number of workers as the number of CPUs. The kernel grew a lot of MT | ||
40 | wq users over the years and with the number of CPU cores continuously | ||
41 | rising, some systems saturated the default 32k PID space just booting | ||
42 | up. | ||
43 | |||
44 | Although MT wq wasted a lot of resource, the level of concurrency | ||
45 | provided was unsatisfactory. The limitation was common to both ST and | ||
46 | MT wq albeit less severe on MT. Each wq maintained its own separate | ||
47 | worker pool. A MT wq could provide only one execution context per CPU | ||
48 | while a ST wq one for the whole system. Work items had to compete for | ||
49 | those very limited execution contexts leading to various problems | ||
50 | including proneness to deadlocks around the single execution context. | ||
51 | |||
52 | The tension between the provided level of concurrency and resource | ||
53 | usage also forced its users to make unnecessary tradeoffs like libata | ||
54 | choosing to use ST wq for polling PIOs and accepting an unnecessary | ||
55 | limitation that no two polling PIOs can progress at the same time. As | ||
56 | MT wq don't provide much better concurrency, users which require | ||
57 | higher level of concurrency, like async or fscache, had to implement | ||
58 | their own thread pool. | ||
59 | |||
60 | Concurrency Managed Workqueue (cmwq) is a reimplementation of wq with | ||
61 | focus on the following goals. | ||
62 | |||
63 | * Maintain compatibility with the original workqueue API. | ||
64 | |||
65 | * Use per-CPU unified worker pools shared by all wq to provide | ||
66 | flexible level of concurrency on demand without wasting a lot of | ||
67 | resource. | ||
68 | |||
69 | * Automatically regulate worker pool and level of concurrency so that | ||
70 | the API users don't need to worry about such details. | ||
71 | |||
72 | |||
73 | 3. The Design | ||
74 | |||
75 | In order to ease the asynchronous execution of functions a new | ||
76 | abstraction, the work item, is introduced. | ||
77 | |||
78 | A work item is a simple struct that holds a pointer to the function | ||
79 | that is to be executed asynchronously. Whenever a driver or subsystem | ||
80 | wants a function to be executed asynchronously it has to set up a work | ||
81 | item pointing to that function and queue that work item on a | ||
82 | workqueue. | ||
83 | |||
84 | Special purpose threads, called worker threads, execute the functions | ||
85 | off of the queue, one after the other. If no work is queued, the | ||
86 | worker threads become idle. These worker threads are managed in so | ||
87 | called thread-pools. | ||
88 | |||
89 | The cmwq design differentiates between the user-facing workqueues that | ||
90 | subsystems and drivers queue work items on and the backend mechanism | ||
91 | which manages thread-pool and processes the queued work items. | ||
92 | |||
93 | The backend is called gcwq. There is one gcwq for each possible CPU | ||
94 | and one gcwq to serve work items queued on unbound workqueues. | ||
95 | |||
96 | Subsystems and drivers can create and queue work items through special | ||
97 | workqueue API functions as they see fit. They can influence some | ||
98 | aspects of the way the work items are executed by setting flags on the | ||
99 | workqueue they are putting the work item on. These flags include | ||
100 | things like CPU locality, reentrancy, concurrency limits and more. To | ||
101 | get a detailed overview refer to the API description of | ||
102 | alloc_workqueue() below. | ||
103 | |||
104 | When a work item is queued to a workqueue, the target gcwq is | ||
105 | determined according to the queue parameters and workqueue attributes | ||
106 | and appended on the shared worklist of the gcwq. For example, unless | ||
107 | specifically overridden, a work item of a bound workqueue will be | ||
108 | queued on the worklist of exactly that gcwq that is associated to the | ||
109 | CPU the issuer is running on. | ||
110 | |||
111 | For any worker pool implementation, managing the concurrency level | ||
112 | (how many execution contexts are active) is an important issue. cmwq | ||
113 | tries to keep the concurrency at a minimal but sufficient level. | ||
114 | Minimal to save resources and sufficient in that the system is used at | ||
115 | its full capacity. | ||
116 | |||
117 | Each gcwq bound to an actual CPU implements concurrency management by | ||
118 | hooking into the scheduler. The gcwq is notified whenever an active | ||
119 | worker wakes up or sleeps and keeps track of the number of the | ||
120 | currently runnable workers. Generally, work items are not expected to | ||
121 | hog a CPU and consume many cycles. That means maintaining just enough | ||
122 | concurrency to prevent work processing from stalling should be | ||
123 | optimal. As long as there are one or more runnable workers on the | ||
124 | CPU, the gcwq doesn't start execution of a new work, but, when the | ||
125 | last running worker goes to sleep, it immediately schedules a new | ||
126 | worker so that the CPU doesn't sit idle while there are pending work | ||
127 | items. This allows using a minimal number of workers without losing | ||
128 | execution bandwidth. | ||
129 | |||
130 | Keeping idle workers around doesn't cost other than the memory space | ||
131 | for kthreads, so cmwq holds onto idle ones for a while before killing | ||
132 | them. | ||
133 | |||
134 | For an unbound wq, the above concurrency management doesn't apply and | ||
135 | the gcwq for the pseudo unbound CPU tries to start executing all work | ||
136 | items as soon as possible. The responsibility of regulating | ||
137 | concurrency level is on the users. There is also a flag to mark a | ||
138 | bound wq to ignore the concurrency management. Please refer to the | ||
139 | API section for details. | ||
140 | |||
141 | Forward progress guarantee relies on that workers can be created when | ||
142 | more execution contexts are necessary, which in turn is guaranteed | ||
143 | through the use of rescue workers. All work items which might be used | ||
144 | on code paths that handle memory reclaim are required to be queued on | ||
145 | wq's that have a rescue-worker reserved for execution under memory | ||
146 | pressure. Else it is possible that the thread-pool deadlocks waiting | ||
147 | for execution contexts to free up. | ||
148 | |||
149 | |||
150 | 4. Application Programming Interface (API) | ||
151 | |||
152 | alloc_workqueue() allocates a wq. The original create_*workqueue() | ||
153 | functions are deprecated and scheduled for removal. alloc_workqueue() | ||
154 | takes three arguments - @name, @flags and @max_active. @name is the | ||
155 | name of the wq and also used as the name of the rescuer thread if | ||
156 | there is one. | ||
157 | |||
158 | A wq no longer manages execution resources but serves as a domain for | ||
159 | forward progress guarantee, flush and work item attributes. @flags | ||
160 | and @max_active control how work items are assigned execution | ||
161 | resources, scheduled and executed. | ||
162 | |||
163 | @flags: | ||
164 | |||
165 | WQ_NON_REENTRANT | ||
166 | |||
167 | By default, a wq guarantees non-reentrance only on the same | ||
168 | CPU. A work item may not be executed concurrently on the same | ||
169 | CPU by multiple workers but is allowed to be executed | ||
170 | concurrently on multiple CPUs. This flag makes sure | ||
171 | non-reentrance is enforced across all CPUs. Work items queued | ||
172 | to a non-reentrant wq are guaranteed to be executed by at most | ||
173 | one worker system-wide at any given time. | ||
174 | |||
175 | WQ_UNBOUND | ||
176 | |||
177 | Work items queued to an unbound wq are served by a special | ||
178 | gcwq which hosts workers which are not bound to any specific | ||
179 | CPU. This makes the wq behave as a simple execution context | ||
180 | provider without concurrency management. The unbound gcwq | ||
181 | tries to start execution of work items as soon as possible. | ||
182 | Unbound wq sacrifices locality but is useful for the following | ||
183 | cases. | ||
184 | |||
185 | * Wide fluctuation in the concurrency level requirement is | ||
186 | expected and using bound wq may end up creating large number | ||
187 | of mostly unused workers across different CPUs as the issuer | ||
188 | hops through different CPUs. | ||
189 | |||
190 | * Long running CPU intensive workloads which can be better | ||
191 | managed by the system scheduler. | ||
192 | |||
193 | WQ_FREEZEABLE | ||
194 | |||
195 | A freezeable wq participates in the freeze phase of the system | ||
196 | suspend operations. Work items on the wq are drained and no | ||
197 | new work item starts execution until thawed. | ||
198 | |||
199 | WQ_RESCUER | ||
200 | |||
201 | All wq which might be used in the memory reclaim paths _MUST_ | ||
202 | have this flag set. This reserves one worker exclusively for | ||
203 | the execution of this wq under memory pressure. | ||
204 | |||
205 | WQ_HIGHPRI | ||
206 | |||
207 | Work items of a highpri wq are queued at the head of the | ||
208 | worklist of the target gcwq and start execution regardless of | ||
209 | the current concurrency level. In other words, highpri work | ||
210 | items will always start execution as soon as execution | ||
211 | resource is available. | ||
212 | |||
213 | Ordering among highpri work items is preserved - a highpri | ||
214 | work item queued after another highpri work item will start | ||
215 | execution after the earlier highpri work item starts. | ||
216 | |||
217 | Although highpri work items are not held back by other | ||
218 | runnable work items, they still contribute to the concurrency | ||
219 | level. Highpri work items in runnable state will prevent | ||
220 | non-highpri work items from starting execution. | ||
221 | |||
222 | This flag is meaningless for unbound wq. | ||
223 | |||
224 | WQ_CPU_INTENSIVE | ||
225 | |||
226 | Work items of a CPU intensive wq do not contribute to the | ||
227 | concurrency level. In other words, runnable CPU intensive | ||
228 | work items will not prevent other work items from starting | ||
229 | execution. This is useful for bound work items which are | ||
230 | expected to hog CPU cycles so that their execution is | ||
231 | regulated by the system scheduler. | ||
232 | |||
233 | Although CPU intensive work items don't contribute to the | ||
234 | concurrency level, start of their executions is still | ||
235 | regulated by the concurrency management and runnable | ||
236 | non-CPU-intensive work items can delay execution of CPU | ||
237 | intensive work items. | ||
238 | |||
239 | This flag is meaningless for unbound wq. | ||
240 | |||
241 | WQ_HIGHPRI | WQ_CPU_INTENSIVE | ||
242 | |||
243 | This combination makes the wq avoid interaction with | ||
244 | concurrency management completely and behave as a simple | ||
245 | per-CPU execution context provider. Work items queued on a | ||
246 | highpri CPU-intensive wq start execution as soon as resources | ||
247 | are available and don't affect execution of other work items. | ||
248 | |||
249 | @max_active: | ||
250 | |||
251 | @max_active determines the maximum number of execution contexts per | ||
252 | CPU which can be assigned to the work items of a wq. For example, | ||
253 | with @max_active of 16, at most 16 work items of the wq can be | ||
254 | executing at the same time per CPU. | ||
255 | |||
256 | Currently, for a bound wq, the maximum limit for @max_active is 512 | ||
257 | and the default value used when 0 is specified is 256. For an unbound | ||
258 | wq, the limit is higher of 512 and 4 * num_possible_cpus(). These | ||
259 | values are chosen sufficiently high such that they are not the | ||
260 | limiting factor while providing protection in runaway cases. | ||
261 | |||
262 | The number of active work items of a wq is usually regulated by the | ||
263 | users of the wq, more specifically, by how many work items the users | ||
264 | may queue at the same time. Unless there is a specific need for | ||
265 | throttling the number of active work items, specifying '0' is | ||
266 | recommended. | ||
267 | |||
268 | Some users depend on the strict execution ordering of ST wq. The | ||
269 | combination of @max_active of 1 and WQ_UNBOUND is used to achieve this | ||
270 | behavior. Work items on such wq are always queued to the unbound gcwq | ||
271 | and only one work item can be active at any given time thus achieving | ||
272 | the same ordering property as ST wq. | ||
273 | |||
274 | |||
275 | 5. Example Execution Scenarios | ||
276 | |||
277 | The following example execution scenarios try to illustrate how cmwq | ||
278 | behave under different configurations. | ||
279 | |||
280 | Work items w0, w1, w2 are queued to a bound wq q0 on the same CPU. | ||
281 | w0 burns CPU for 5ms then sleeps for 10ms then burns CPU for 5ms | ||
282 | again before finishing. w1 and w2 burn CPU for 5ms then sleep for | ||
283 | 10ms. | ||
284 | |||
285 | Ignoring all other tasks, works and processing overhead, and assuming | ||
286 | simple FIFO scheduling, the following is one highly simplified version | ||
287 | of possible sequences of events with the original wq. | ||
288 | |||
289 | TIME IN MSECS EVENT | ||
290 | 0 w0 starts and burns CPU | ||
291 | 5 w0 sleeps | ||
292 | 15 w0 wakes up and burns CPU | ||
293 | 20 w0 finishes | ||
294 | 20 w1 starts and burns CPU | ||
295 | 25 w1 sleeps | ||
296 | 35 w1 wakes up and finishes | ||
297 | 35 w2 starts and burns CPU | ||
298 | 40 w2 sleeps | ||
299 | 50 w2 wakes up and finishes | ||
300 | |||
301 | And with cmwq with @max_active >= 3, | ||
302 | |||
303 | TIME IN MSECS EVENT | ||
304 | 0 w0 starts and burns CPU | ||
305 | 5 w0 sleeps | ||
306 | 5 w1 starts and burns CPU | ||
307 | 10 w1 sleeps | ||
308 | 10 w2 starts and burns CPU | ||
309 | 15 w2 sleeps | ||
310 | 15 w0 wakes up and burns CPU | ||
311 | 20 w0 finishes | ||
312 | 20 w1 wakes up and finishes | ||
313 | 25 w2 wakes up and finishes | ||
314 | |||
315 | If @max_active == 2, | ||
316 | |||
317 | TIME IN MSECS EVENT | ||
318 | 0 w0 starts and burns CPU | ||
319 | 5 w0 sleeps | ||
320 | 5 w1 starts and burns CPU | ||
321 | 10 w1 sleeps | ||
322 | 15 w0 wakes up and burns CPU | ||
323 | 20 w0 finishes | ||
324 | 20 w1 wakes up and finishes | ||
325 | 20 w2 starts and burns CPU | ||
326 | 25 w2 sleeps | ||
327 | 35 w2 wakes up and finishes | ||
328 | |||
329 | Now, let's assume w1 and w2 are queued to a different wq q1 which has | ||
330 | WQ_HIGHPRI set, | ||
331 | |||
332 | TIME IN MSECS EVENT | ||
333 | 0 w1 and w2 start and burn CPU | ||
334 | 5 w1 sleeps | ||
335 | 10 w2 sleeps | ||
336 | 10 w0 starts and burns CPU | ||
337 | 15 w0 sleeps | ||
338 | 15 w1 wakes up and finishes | ||
339 | 20 w2 wakes up and finishes | ||
340 | 25 w0 wakes up and burns CPU | ||
341 | 30 w0 finishes | ||
342 | |||
343 | If q1 has WQ_CPU_INTENSIVE set, | ||
344 | |||
345 | TIME IN MSECS EVENT | ||
346 | 0 w0 starts and burns CPU | ||
347 | 5 w0 sleeps | ||
348 | 5 w1 and w2 start and burn CPU | ||
349 | 10 w1 sleeps | ||
350 | 15 w2 sleeps | ||
351 | 15 w0 wakes up and burns CPU | ||
352 | 20 w0 finishes | ||
353 | 20 w1 wakes up and finishes | ||
354 | 25 w2 wakes up and finishes | ||
355 | |||
356 | |||
357 | 6. Guidelines | ||
358 | |||
359 | * Do not forget to use WQ_RESCUER if a wq may process work items which | ||
360 | are used during memory reclaim. Each wq with WQ_RESCUER set has one | ||
361 | rescuer thread reserved for it. If there is dependency among | ||
362 | multiple work items used during memory reclaim, they should be | ||
363 | queued to separate wq each with WQ_RESCUER. | ||
364 | |||
365 | * Unless strict ordering is required, there is no need to use ST wq. | ||
366 | |||
367 | * Unless there is a specific need, using 0 for @max_active is | ||
368 | recommended. In most use cases, concurrency level usually stays | ||
369 | well under the default limit. | ||
370 | |||
371 | * A wq serves as a domain for forward progress guarantee (WQ_RESCUER), | ||
372 | flush and work item attributes. Work items which are not involved | ||
373 | in memory reclaim and don't need to be flushed as a part of a group | ||
374 | of work items, and don't require any special attribute, can use one | ||
375 | of the system wq. There is no difference in execution | ||
376 | characteristics between using a dedicated wq and a system wq. | ||
377 | |||
378 | * Unless work items are expected to consume a huge amount of CPU | ||
379 | cycles, using a bound wq is usually beneficial due to the increased | ||
380 | level of locality in wq operations and work item execution. | ||
diff --git a/MAINTAINERS b/MAINTAINERS index c36f5d76e1a2..f46d8e66333f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -962,6 +962,13 @@ W: http://www.fluff.org/ben/linux/ | |||
962 | S: Maintained | 962 | S: Maintained |
963 | F: arch/arm/mach-s3c6410/ | 963 | F: arch/arm/mach-s3c6410/ |
964 | 964 | ||
965 | ARM/S5P ARM ARCHITECTURES | ||
966 | M: Kukjin Kim <kgene.kim@samsung.com> | ||
967 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
968 | L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) | ||
969 | S: Maintained | ||
970 | F: arch/arm/mach-s5p*/ | ||
971 | |||
965 | ARM/SHMOBILE ARM ARCHITECTURE | 972 | ARM/SHMOBILE ARM ARCHITECTURE |
966 | M: Paul Mundt <lethal@linux-sh.org> | 973 | M: Paul Mundt <lethal@linux-sh.org> |
967 | M: Magnus Damm <magnus.damm@gmail.com> | 974 | M: Magnus Damm <magnus.damm@gmail.com> |
@@ -1135,7 +1142,7 @@ ATLX ETHERNET DRIVERS | |||
1135 | M: Jay Cliburn <jcliburn@gmail.com> | 1142 | M: Jay Cliburn <jcliburn@gmail.com> |
1136 | M: Chris Snook <chris.snook@gmail.com> | 1143 | M: Chris Snook <chris.snook@gmail.com> |
1137 | M: Jie Yang <jie.yang@atheros.com> | 1144 | M: Jie Yang <jie.yang@atheros.com> |
1138 | L: atl1-devel@lists.sourceforge.net | 1145 | L: netdev@vger.kernel.org |
1139 | W: http://sourceforge.net/projects/atl1 | 1146 | W: http://sourceforge.net/projects/atl1 |
1140 | W: http://atl1.sourceforge.net | 1147 | W: http://atl1.sourceforge.net |
1141 | S: Maintained | 1148 | S: Maintained |
@@ -1220,7 +1227,7 @@ F: drivers/auxdisplay/ | |||
1220 | F: include/linux/cfag12864b.h | 1227 | F: include/linux/cfag12864b.h |
1221 | 1228 | ||
1222 | AVR32 ARCHITECTURE | 1229 | AVR32 ARCHITECTURE |
1223 | M: Haavard Skinnemoen <hskinnemoen@atmel.com> | 1230 | M: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com> |
1224 | W: http://www.atmel.com/products/AVR32/ | 1231 | W: http://www.atmel.com/products/AVR32/ |
1225 | W: http://avr32linux.org/ | 1232 | W: http://avr32linux.org/ |
1226 | W: http://avrfreaks.net/ | 1233 | W: http://avrfreaks.net/ |
@@ -1228,7 +1235,7 @@ S: Supported | |||
1228 | F: arch/avr32/ | 1235 | F: arch/avr32/ |
1229 | 1236 | ||
1230 | AVR32/AT32AP MACHINE SUPPORT | 1237 | AVR32/AT32AP MACHINE SUPPORT |
1231 | M: Haavard Skinnemoen <hskinnemoen@atmel.com> | 1238 | M: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com> |
1232 | S: Supported | 1239 | S: Supported |
1233 | F: arch/avr32/mach-at32ap/ | 1240 | F: arch/avr32/mach-at32ap/ |
1234 | 1241 | ||
@@ -1445,6 +1452,16 @@ S: Maintained | |||
1445 | F: Documentation/video4linux/cafe_ccic | 1452 | F: Documentation/video4linux/cafe_ccic |
1446 | F: drivers/media/video/cafe_ccic* | 1453 | F: drivers/media/video/cafe_ccic* |
1447 | 1454 | ||
1455 | CAIF NETWORK LAYER | ||
1456 | M: Sjur Braendeland <sjur.brandeland@stericsson.com> | ||
1457 | L: netdev@vger.kernel.org | ||
1458 | S: Supported | ||
1459 | F: Documentation/networking/caif/ | ||
1460 | F: drivers/net/caif/ | ||
1461 | F: include/linux/caif/ | ||
1462 | F: include/net/caif/ | ||
1463 | F: net/caif/ | ||
1464 | |||
1448 | CALGARY x86-64 IOMMU | 1465 | CALGARY x86-64 IOMMU |
1449 | M: Muli Ben-Yehuda <muli@il.ibm.com> | 1466 | M: Muli Ben-Yehuda <muli@il.ibm.com> |
1450 | M: "Jon D. Mason" <jdmason@kudzu.us> | 1467 | M: "Jon D. Mason" <jdmason@kudzu.us> |
@@ -2189,6 +2206,12 @@ W: http://acpi4asus.sf.net | |||
2189 | S: Maintained | 2206 | S: Maintained |
2190 | F: drivers/platform/x86/eeepc-laptop.c | 2207 | F: drivers/platform/x86/eeepc-laptop.c |
2191 | 2208 | ||
2209 | EFIFB FRAMEBUFFER DRIVER | ||
2210 | L: linux-fbdev@vger.kernel.org | ||
2211 | M: Peter Jones <pjones@redhat.com> | ||
2212 | S: Maintained | ||
2213 | F: drivers/video/efifb.c | ||
2214 | |||
2192 | EFS FILESYSTEM | 2215 | EFS FILESYSTEM |
2193 | W: http://aeschi.ch.eu.org/efs/ | 2216 | W: http://aeschi.ch.eu.org/efs/ |
2194 | S: Orphan | 2217 | S: Orphan |
@@ -2201,6 +2224,12 @@ L: linux-rdma@vger.kernel.org | |||
2201 | S: Supported | 2224 | S: Supported |
2202 | F: drivers/infiniband/hw/ehca/ | 2225 | F: drivers/infiniband/hw/ehca/ |
2203 | 2226 | ||
2227 | EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER | ||
2228 | M: Breno Leitao <leitao@linux.vnet.ibm.com> | ||
2229 | L: netdev@vger.kernel.org | ||
2230 | S: Maintained | ||
2231 | F: drivers/net/ehea/ | ||
2232 | |||
2204 | EMBEDDED LINUX | 2233 | EMBEDDED LINUX |
2205 | M: Paul Gortmaker <paul.gortmaker@windriver.com> | 2234 | M: Paul Gortmaker <paul.gortmaker@windriver.com> |
2206 | M: Matt Mackall <mpm@selenic.com> | 2235 | M: Matt Mackall <mpm@selenic.com> |
@@ -2641,9 +2670,14 @@ S: Maintained | |||
2641 | F: drivers/media/video/gspca/ | 2670 | F: drivers/media/video/gspca/ |
2642 | 2671 | ||
2643 | HARDWARE MONITORING | 2672 | HARDWARE MONITORING |
2673 | M: Jean Delvare <khali@linux-fr.org> | ||
2674 | M: Guenter Roeck <guenter.roeck@ericsson.com> | ||
2644 | L: lm-sensors@lm-sensors.org | 2675 | L: lm-sensors@lm-sensors.org |
2645 | W: http://www.lm-sensors.org/ | 2676 | W: http://www.lm-sensors.org/ |
2646 | S: Orphan | 2677 | T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ |
2678 | T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/ | ||
2679 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git | ||
2680 | S: Maintained | ||
2647 | F: Documentation/hwmon/ | 2681 | F: Documentation/hwmon/ |
2648 | F: drivers/hwmon/ | 2682 | F: drivers/hwmon/ |
2649 | F: include/linux/hwmon*.h | 2683 | F: include/linux/hwmon*.h |
@@ -2781,11 +2815,6 @@ S: Maintained | |||
2781 | F: arch/x86/kernel/hpet.c | 2815 | F: arch/x86/kernel/hpet.c |
2782 | F: arch/x86/include/asm/hpet.h | 2816 | F: arch/x86/include/asm/hpet.h |
2783 | 2817 | ||
2784 | HPET: ACPI | ||
2785 | M: Bob Picco <bob.picco@hp.com> | ||
2786 | S: Maintained | ||
2787 | F: drivers/char/hpet.c | ||
2788 | |||
2789 | HPFS FILESYSTEM | 2818 | HPFS FILESYSTEM |
2790 | M: Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz> | 2819 | M: Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz> |
2791 | W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi | 2820 | W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi |
@@ -3398,7 +3427,7 @@ F: drivers/s390/kvm/ | |||
3398 | 3427 | ||
3399 | KEXEC | 3428 | KEXEC |
3400 | M: Eric Biederman <ebiederm@xmission.com> | 3429 | M: Eric Biederman <ebiederm@xmission.com> |
3401 | W: http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/ | 3430 | W: http://kernel.org/pub/linux/utils/kernel/kexec/ |
3402 | L: kexec@lists.infradead.org | 3431 | L: kexec@lists.infradead.org |
3403 | S: Maintained | 3432 | S: Maintained |
3404 | F: include/linux/kexec.h | 3433 | F: include/linux/kexec.h |
@@ -3759,9 +3788,8 @@ W: http://www.syskonnect.com | |||
3759 | S: Supported | 3788 | S: Supported |
3760 | 3789 | ||
3761 | MATROX FRAMEBUFFER DRIVER | 3790 | MATROX FRAMEBUFFER DRIVER |
3762 | M: Petr Vandrovec <vandrove@vc.cvut.cz> | ||
3763 | L: linux-fbdev@vger.kernel.org | 3791 | L: linux-fbdev@vger.kernel.org |
3764 | S: Maintained | 3792 | S: Orphan |
3765 | F: drivers/video/matrox/matroxfb_* | 3793 | F: drivers/video/matrox/matroxfb_* |
3766 | F: include/linux/matroxfb.h | 3794 | F: include/linux/matroxfb.h |
3767 | 3795 | ||
@@ -3885,10 +3913,8 @@ F: Documentation/serial/moxa-smartio | |||
3885 | F: drivers/char/mxser.* | 3913 | F: drivers/char/mxser.* |
3886 | 3914 | ||
3887 | MSI LAPTOP SUPPORT | 3915 | MSI LAPTOP SUPPORT |
3888 | M: Lennart Poettering <mzxreary@0pointer.de> | 3916 | M: Lee, Chun-Yi <jlee@novell.com> |
3889 | L: platform-driver-x86@vger.kernel.org | 3917 | L: platform-driver-x86@vger.kernel.org |
3890 | W: https://tango.0pointer.de/mailman/listinfo/s270-linux | ||
3891 | W: http://0pointer.de/lennart/tchibo.html | ||
3892 | S: Maintained | 3918 | S: Maintained |
3893 | F: drivers/platform/x86/msi-laptop.c | 3919 | F: drivers/platform/x86/msi-laptop.c |
3894 | 3920 | ||
@@ -3905,8 +3931,10 @@ S: Supported | |||
3905 | F: drivers/mfd/ | 3931 | F: drivers/mfd/ |
3906 | 3932 | ||
3907 | MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM | 3933 | MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM |
3908 | S: Orphan | 3934 | M: Chris Ball <cjb@laptop.org> |
3909 | L: linux-mmc@vger.kernel.org | 3935 | L: linux-mmc@vger.kernel.org |
3936 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git | ||
3937 | S: Maintained | ||
3910 | F: drivers/mmc/ | 3938 | F: drivers/mmc/ |
3911 | F: include/linux/mmc/ | 3939 | F: include/linux/mmc/ |
3912 | 3940 | ||
@@ -3923,13 +3951,12 @@ F: Documentation/sound/oss/MultiSound | |||
3923 | F: sound/oss/msnd* | 3951 | F: sound/oss/msnd* |
3924 | 3952 | ||
3925 | MULTITECH MULTIPORT CARD (ISICOM) | 3953 | MULTITECH MULTIPORT CARD (ISICOM) |
3926 | M: Jiri Slaby <jirislaby@gmail.com> | 3954 | S: Orphan |
3927 | S: Maintained | ||
3928 | F: drivers/char/isicom.c | 3955 | F: drivers/char/isicom.c |
3929 | F: include/linux/isicom.h | 3956 | F: include/linux/isicom.h |
3930 | 3957 | ||
3931 | MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER | 3958 | MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER |
3932 | M: Felipe Balbi <felipe.balbi@nokia.com> | 3959 | M: Felipe Balbi <balbi@ti.com> |
3933 | L: linux-usb@vger.kernel.org | 3960 | L: linux-usb@vger.kernel.org |
3934 | T: git git://gitorious.org/usb/usb.git | 3961 | T: git git://gitorious.org/usb/usb.git |
3935 | S: Maintained | 3962 | S: Maintained |
@@ -3949,8 +3976,8 @@ S: Maintained | |||
3949 | F: drivers/net/natsemi.c | 3976 | F: drivers/net/natsemi.c |
3950 | 3977 | ||
3951 | NCP FILESYSTEM | 3978 | NCP FILESYSTEM |
3952 | M: Petr Vandrovec <vandrove@vc.cvut.cz> | 3979 | M: Petr Vandrovec <petr@vandrovec.name> |
3953 | S: Maintained | 3980 | S: Odd Fixes |
3954 | F: fs/ncpfs/ | 3981 | F: fs/ncpfs/ |
3955 | 3982 | ||
3956 | NCR DUAL 700 SCSI DRIVER (MICROCHANNEL) | 3983 | NCR DUAL 700 SCSI DRIVER (MICROCHANNEL) |
@@ -4227,7 +4254,7 @@ S: Maintained | |||
4227 | F: drivers/char/hw_random/omap-rng.c | 4254 | F: drivers/char/hw_random/omap-rng.c |
4228 | 4255 | ||
4229 | OMAP USB SUPPORT | 4256 | OMAP USB SUPPORT |
4230 | M: Felipe Balbi <felipe.balbi@nokia.com> | 4257 | M: Felipe Balbi <balbi@ti.com> |
4231 | M: David Brownell <dbrownell@users.sourceforge.net> | 4258 | M: David Brownell <dbrownell@users.sourceforge.net> |
4232 | L: linux-usb@vger.kernel.org | 4259 | L: linux-usb@vger.kernel.org |
4233 | L: linux-omap@vger.kernel.org | 4260 | L: linux-omap@vger.kernel.org |
@@ -4604,7 +4631,7 @@ F: include/linux/preempt.h | |||
4604 | PRISM54 WIRELESS DRIVER | 4631 | PRISM54 WIRELESS DRIVER |
4605 | M: "Luis R. Rodriguez" <mcgrof@gmail.com> | 4632 | M: "Luis R. Rodriguez" <mcgrof@gmail.com> |
4606 | L: linux-wireless@vger.kernel.org | 4633 | L: linux-wireless@vger.kernel.org |
4607 | W: http://prism54.org | 4634 | W: http://wireless.kernel.org/en/users/Drivers/p54 |
4608 | S: Obsolete | 4635 | S: Obsolete |
4609 | F: drivers/net/wireless/prism54/ | 4636 | F: drivers/net/wireless/prism54/ |
4610 | 4637 | ||
@@ -4805,6 +4832,7 @@ RCUTORTURE MODULE | |||
4805 | M: Josh Triplett <josh@freedesktop.org> | 4832 | M: Josh Triplett <josh@freedesktop.org> |
4806 | M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> | 4833 | M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> |
4807 | S: Supported | 4834 | S: Supported |
4835 | T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git | ||
4808 | F: Documentation/RCU/torture.txt | 4836 | F: Documentation/RCU/torture.txt |
4809 | F: kernel/rcutorture.c | 4837 | F: kernel/rcutorture.c |
4810 | 4838 | ||
@@ -4829,6 +4857,7 @@ M: Dipankar Sarma <dipankar@in.ibm.com> | |||
4829 | M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> | 4857 | M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> |
4830 | W: http://www.rdrop.com/users/paulmck/rclock/ | 4858 | W: http://www.rdrop.com/users/paulmck/rclock/ |
4831 | S: Supported | 4859 | S: Supported |
4860 | T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git | ||
4832 | F: Documentation/RCU/ | 4861 | F: Documentation/RCU/ |
4833 | F: include/linux/rcu* | 4862 | F: include/linux/rcu* |
4834 | F: include/linux/srcu* | 4863 | F: include/linux/srcu* |
@@ -4836,12 +4865,10 @@ F: kernel/rcu* | |||
4836 | F: kernel/srcu* | 4865 | F: kernel/srcu* |
4837 | X: kernel/rcutorture.c | 4866 | X: kernel/rcutorture.c |
4838 | 4867 | ||
4839 | REAL TIME CLOCK DRIVER | 4868 | REAL TIME CLOCK DRIVER (LEGACY) |
4840 | M: Paul Gortmaker <p_gortmaker@yahoo.com> | 4869 | M: Paul Gortmaker <p_gortmaker@yahoo.com> |
4841 | S: Maintained | 4870 | S: Maintained |
4842 | F: Documentation/rtc.txt | 4871 | F: drivers/char/rtc.c |
4843 | F: drivers/rtc/ | ||
4844 | F: include/linux/rtc.h | ||
4845 | 4872 | ||
4846 | REAL TIME CLOCK (RTC) SUBSYSTEM | 4873 | REAL TIME CLOCK (RTC) SUBSYSTEM |
4847 | M: Alessandro Zummo <a.zummo@towertech.it> | 4874 | M: Alessandro Zummo <a.zummo@towertech.it> |
@@ -5078,8 +5105,10 @@ S: Maintained | |||
5078 | F: drivers/mmc/host/sdricoh_cs.c | 5105 | F: drivers/mmc/host/sdricoh_cs.c |
5079 | 5106 | ||
5080 | SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER | 5107 | SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER |
5081 | S: Orphan | 5108 | M: Chris Ball <cjb@laptop.org> |
5082 | L: linux-mmc@vger.kernel.org | 5109 | L: linux-mmc@vger.kernel.org |
5110 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git | ||
5111 | S: Maintained | ||
5083 | F: drivers/mmc/host/sdhci.* | 5112 | F: drivers/mmc/host/sdhci.* |
5084 | 5113 | ||
5085 | SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF) | 5114 | SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF) |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 36 | 3 | SUBLEVEL = 36 |
4 | EXTRAVERSION = -rc3 | 4 | EXTRAVERSION = -rc7 |
5 | NAME = Sheep on Meth | 5 | NAME = Sheep on Meth |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/Kconfig b/arch/Kconfig index 4877a8c8ee16..fe48fc7a3eba 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -32,8 +32,9 @@ config HAVE_OPROFILE | |||
32 | 32 | ||
33 | config KPROBES | 33 | config KPROBES |
34 | bool "Kprobes" | 34 | bool "Kprobes" |
35 | depends on KALLSYMS && MODULES | 35 | depends on MODULES |
36 | depends on HAVE_KPROBES | 36 | depends on HAVE_KPROBES |
37 | select KALLSYMS | ||
37 | help | 38 | help |
38 | Kprobes allows you to trap at almost any kernel address and | 39 | Kprobes allows you to trap at almost any kernel address and |
39 | execute a callback function. register_kprobe() establishes | 40 | execute a callback function. register_kprobe() establishes |
@@ -45,7 +46,6 @@ config OPTPROBES | |||
45 | def_bool y | 46 | def_bool y |
46 | depends on KPROBES && HAVE_OPTPROBES | 47 | depends on KPROBES && HAVE_OPTPROBES |
47 | depends on !PREEMPT | 48 | depends on !PREEMPT |
48 | select KALLSYMS_ALL | ||
49 | 49 | ||
50 | config HAVE_EFFICIENT_UNALIGNED_ACCESS | 50 | config HAVE_EFFICIENT_UNALIGNED_ACCESS |
51 | bool | 51 | bool |
diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h index f199e69a5d0b..ad368a93a46a 100644 --- a/arch/alpha/include/asm/cache.h +++ b/arch/alpha/include/asm/cache.h | |||
@@ -17,7 +17,6 @@ | |||
17 | # define L1_CACHE_SHIFT 5 | 17 | # define L1_CACHE_SHIFT 5 |
18 | #endif | 18 | #endif |
19 | 19 | ||
20 | #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) | ||
21 | #define SMP_CACHE_BYTES L1_CACHE_BYTES | 20 | #define SMP_CACHE_BYTES L1_CACHE_BYTES |
22 | 21 | ||
23 | #endif | 22 | #endif |
diff --git a/arch/alpha/include/asm/cacheflush.h b/arch/alpha/include/asm/cacheflush.h index 01d71e1c8a9e..012f1243b1c1 100644 --- a/arch/alpha/include/asm/cacheflush.h +++ b/arch/alpha/include/asm/cacheflush.h | |||
@@ -43,6 +43,8 @@ extern void smp_imb(void); | |||
43 | /* ??? Ought to use this in arch/alpha/kernel/signal.c too. */ | 43 | /* ??? Ought to use this in arch/alpha/kernel/signal.c too. */ |
44 | 44 | ||
45 | #ifndef CONFIG_SMP | 45 | #ifndef CONFIG_SMP |
46 | #include <linux/sched.h> | ||
47 | |||
46 | extern void __load_new_mm_context(struct mm_struct *); | 48 | extern void __load_new_mm_context(struct mm_struct *); |
47 | static inline void | 49 | static inline void |
48 | flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | 50 | flush_icache_user_range(struct vm_area_struct *vma, struct page *page, |
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h index 804e5311c841..058937bf5a77 100644 --- a/arch/alpha/include/asm/unistd.h +++ b/arch/alpha/include/asm/unistd.h | |||
@@ -449,10 +449,13 @@ | |||
449 | #define __NR_pwritev 491 | 449 | #define __NR_pwritev 491 |
450 | #define __NR_rt_tgsigqueueinfo 492 | 450 | #define __NR_rt_tgsigqueueinfo 492 |
451 | #define __NR_perf_event_open 493 | 451 | #define __NR_perf_event_open 493 |
452 | #define __NR_fanotify_init 494 | ||
453 | #define __NR_fanotify_mark 495 | ||
454 | #define __NR_prlimit64 496 | ||
452 | 455 | ||
453 | #ifdef __KERNEL__ | 456 | #ifdef __KERNEL__ |
454 | 457 | ||
455 | #define NR_SYSCALLS 494 | 458 | #define NR_SYSCALLS 497 |
456 | 459 | ||
457 | #define __ARCH_WANT_IPC_PARSE_VERSION | 460 | #define __ARCH_WANT_IPC_PARSE_VERSION |
458 | #define __ARCH_WANT_OLD_READDIR | 461 | #define __ARCH_WANT_OLD_READDIR |
@@ -463,6 +466,7 @@ | |||
463 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT | 466 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT |
464 | #define __ARCH_WANT_SYS_OLDUMOUNT | 467 | #define __ARCH_WANT_SYS_OLDUMOUNT |
465 | #define __ARCH_WANT_SYS_SIGPENDING | 468 | #define __ARCH_WANT_SYS_SIGPENDING |
469 | #define __ARCH_WANT_SYS_RT_SIGSUSPEND | ||
466 | 470 | ||
467 | /* "Conditional" syscalls. What we want is | 471 | /* "Conditional" syscalls. What we want is |
468 | 472 | ||
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index b45d913a51c3..6d159cee5f2f 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S | |||
@@ -73,8 +73,6 @@ | |||
73 | ldq $20, HAE_REG($19); \ | 73 | ldq $20, HAE_REG($19); \ |
74 | stq $21, HAE_CACHE($19); \ | 74 | stq $21, HAE_CACHE($19); \ |
75 | stq $21, 0($20); \ | 75 | stq $21, 0($20); \ |
76 | ldq $0, 0($sp); \ | ||
77 | ldq $1, 8($sp); \ | ||
78 | 99:; \ | 76 | 99:; \ |
79 | ldq $19, 72($sp); \ | 77 | ldq $19, 72($sp); \ |
80 | ldq $20, 80($sp); \ | 78 | ldq $20, 80($sp); \ |
@@ -316,19 +314,24 @@ ret_from_sys_call: | |||
316 | cmovne $26, 0, $19 /* $19 = 0 => non-restartable */ | 314 | cmovne $26, 0, $19 /* $19 = 0 => non-restartable */ |
317 | ldq $0, SP_OFF($sp) | 315 | ldq $0, SP_OFF($sp) |
318 | and $0, 8, $0 | 316 | and $0, 8, $0 |
319 | beq $0, restore_all | 317 | beq $0, ret_to_kernel |
320 | ret_from_reschedule: | 318 | ret_to_user: |
321 | /* Make sure need_resched and sigpending don't change between | 319 | /* Make sure need_resched and sigpending don't change between |
322 | sampling and the rti. */ | 320 | sampling and the rti. */ |
323 | lda $16, 7 | 321 | lda $16, 7 |
324 | call_pal PAL_swpipl | 322 | call_pal PAL_swpipl |
325 | ldl $5, TI_FLAGS($8) | 323 | ldl $5, TI_FLAGS($8) |
326 | and $5, _TIF_WORK_MASK, $2 | 324 | and $5, _TIF_WORK_MASK, $2 |
327 | bne $5, work_pending | 325 | bne $2, work_pending |
328 | restore_all: | 326 | restore_all: |
329 | RESTORE_ALL | 327 | RESTORE_ALL |
330 | call_pal PAL_rti | 328 | call_pal PAL_rti |
331 | 329 | ||
330 | ret_to_kernel: | ||
331 | lda $16, 7 | ||
332 | call_pal PAL_swpipl | ||
333 | br restore_all | ||
334 | |||
332 | .align 3 | 335 | .align 3 |
333 | $syscall_error: | 336 | $syscall_error: |
334 | /* | 337 | /* |
@@ -363,7 +366,7 @@ $ret_success: | |||
363 | * $8: current. | 366 | * $8: current. |
364 | * $19: The old syscall number, or zero if this is not a return | 367 | * $19: The old syscall number, or zero if this is not a return |
365 | * from a syscall that errored and is possibly restartable. | 368 | * from a syscall that errored and is possibly restartable. |
366 | * $20: Error indication. | 369 | * $20: The old a3 value |
367 | */ | 370 | */ |
368 | 371 | ||
369 | .align 4 | 372 | .align 4 |
@@ -392,12 +395,18 @@ $work_resched: | |||
392 | 395 | ||
393 | $work_notifysig: | 396 | $work_notifysig: |
394 | mov $sp, $16 | 397 | mov $sp, $16 |
395 | br $1, do_switch_stack | 398 | bsr $1, do_switch_stack |
396 | mov $sp, $17 | 399 | mov $sp, $17 |
397 | mov $5, $18 | 400 | mov $5, $18 |
401 | mov $19, $9 /* save old syscall number */ | ||
402 | mov $20, $10 /* save old a3 */ | ||
403 | and $5, _TIF_SIGPENDING, $2 | ||
404 | cmovne $2, 0, $9 /* we don't want double syscall restarts */ | ||
398 | jsr $26, do_notify_resume | 405 | jsr $26, do_notify_resume |
406 | mov $9, $19 | ||
407 | mov $10, $20 | ||
399 | bsr $1, undo_switch_stack | 408 | bsr $1, undo_switch_stack |
400 | br restore_all | 409 | br ret_to_user |
401 | .end work_pending | 410 | .end work_pending |
402 | 411 | ||
403 | /* | 412 | /* |
@@ -430,6 +439,7 @@ strace: | |||
430 | beq $1, 1f | 439 | beq $1, 1f |
431 | ldq $27, 0($2) | 440 | ldq $27, 0($2) |
432 | 1: jsr $26, ($27), sys_gettimeofday | 441 | 1: jsr $26, ($27), sys_gettimeofday |
442 | ret_from_straced: | ||
433 | ldgp $gp, 0($26) | 443 | ldgp $gp, 0($26) |
434 | 444 | ||
435 | /* check return.. */ | 445 | /* check return.. */ |
@@ -650,7 +660,7 @@ kernel_thread: | |||
650 | /* We don't actually care for a3 success widgetry in the kernel. | 660 | /* We don't actually care for a3 success widgetry in the kernel. |
651 | Not for positive errno values. */ | 661 | Not for positive errno values. */ |
652 | stq $0, 0($sp) /* $0 */ | 662 | stq $0, 0($sp) /* $0 */ |
653 | br restore_all | 663 | br ret_to_kernel |
654 | .end kernel_thread | 664 | .end kernel_thread |
655 | 665 | ||
656 | /* | 666 | /* |
@@ -757,11 +767,15 @@ sys_vfork: | |||
757 | .ent sys_sigreturn | 767 | .ent sys_sigreturn |
758 | sys_sigreturn: | 768 | sys_sigreturn: |
759 | .prologue 0 | 769 | .prologue 0 |
770 | lda $9, ret_from_straced | ||
771 | cmpult $26, $9, $9 | ||
760 | mov $sp, $17 | 772 | mov $sp, $17 |
761 | lda $18, -SWITCH_STACK_SIZE($sp) | 773 | lda $18, -SWITCH_STACK_SIZE($sp) |
762 | lda $sp, -SWITCH_STACK_SIZE($sp) | 774 | lda $sp, -SWITCH_STACK_SIZE($sp) |
763 | jsr $26, do_sigreturn | 775 | jsr $26, do_sigreturn |
764 | br $1, undo_switch_stack | 776 | bne $9, 1f |
777 | jsr $26, syscall_trace | ||
778 | 1: br $1, undo_switch_stack | ||
765 | br ret_from_sys_call | 779 | br ret_from_sys_call |
766 | .end sys_sigreturn | 780 | .end sys_sigreturn |
767 | 781 | ||
@@ -770,47 +784,19 @@ sys_sigreturn: | |||
770 | .ent sys_rt_sigreturn | 784 | .ent sys_rt_sigreturn |
771 | sys_rt_sigreturn: | 785 | sys_rt_sigreturn: |
772 | .prologue 0 | 786 | .prologue 0 |
787 | lda $9, ret_from_straced | ||
788 | cmpult $26, $9, $9 | ||
773 | mov $sp, $17 | 789 | mov $sp, $17 |
774 | lda $18, -SWITCH_STACK_SIZE($sp) | 790 | lda $18, -SWITCH_STACK_SIZE($sp) |
775 | lda $sp, -SWITCH_STACK_SIZE($sp) | 791 | lda $sp, -SWITCH_STACK_SIZE($sp) |
776 | jsr $26, do_rt_sigreturn | 792 | jsr $26, do_rt_sigreturn |
777 | br $1, undo_switch_stack | 793 | bne $9, 1f |
794 | jsr $26, syscall_trace | ||
795 | 1: br $1, undo_switch_stack | ||
778 | br ret_from_sys_call | 796 | br ret_from_sys_call |
779 | .end sys_rt_sigreturn | 797 | .end sys_rt_sigreturn |
780 | 798 | ||
781 | .align 4 | 799 | .align 4 |
782 | .globl sys_sigsuspend | ||
783 | .ent sys_sigsuspend | ||
784 | sys_sigsuspend: | ||
785 | .prologue 0 | ||
786 | mov $sp, $17 | ||
787 | br $1, do_switch_stack | ||
788 | mov $sp, $18 | ||
789 | subq $sp, 16, $sp | ||
790 | stq $26, 0($sp) | ||
791 | jsr $26, do_sigsuspend | ||
792 | ldq $26, 0($sp) | ||
793 | lda $sp, SWITCH_STACK_SIZE+16($sp) | ||
794 | ret | ||
795 | .end sys_sigsuspend | ||
796 | |||
797 | .align 4 | ||
798 | .globl sys_rt_sigsuspend | ||
799 | .ent sys_rt_sigsuspend | ||
800 | sys_rt_sigsuspend: | ||
801 | .prologue 0 | ||
802 | mov $sp, $18 | ||
803 | br $1, do_switch_stack | ||
804 | mov $sp, $19 | ||
805 | subq $sp, 16, $sp | ||
806 | stq $26, 0($sp) | ||
807 | jsr $26, do_rt_sigsuspend | ||
808 | ldq $26, 0($sp) | ||
809 | lda $sp, SWITCH_STACK_SIZE+16($sp) | ||
810 | ret | ||
811 | .end sys_rt_sigsuspend | ||
812 | |||
813 | .align 4 | ||
814 | .globl sys_sethae | 800 | .globl sys_sethae |
815 | .ent sys_sethae | 801 | .ent sys_sethae |
816 | sys_sethae: | 802 | sys_sethae: |
@@ -929,15 +915,6 @@ sys_execve: | |||
929 | .end sys_execve | 915 | .end sys_execve |
930 | 916 | ||
931 | .align 4 | 917 | .align 4 |
932 | .globl osf_sigprocmask | ||
933 | .ent osf_sigprocmask | ||
934 | osf_sigprocmask: | ||
935 | .prologue 0 | ||
936 | mov $sp, $18 | ||
937 | jmp $31, sys_osf_sigprocmask | ||
938 | .end osf_sigprocmask | ||
939 | |||
940 | .align 4 | ||
941 | .globl alpha_ni_syscall | 918 | .globl alpha_ni_syscall |
942 | .ent alpha_ni_syscall | 919 | .ent alpha_ni_syscall |
943 | alpha_ni_syscall: | 920 | alpha_ni_syscall: |
diff --git a/arch/alpha/kernel/err_ev6.c b/arch/alpha/kernel/err_ev6.c index 8ca6345bf131..253cf1a87481 100644 --- a/arch/alpha/kernel/err_ev6.c +++ b/arch/alpha/kernel/err_ev6.c | |||
@@ -90,11 +90,13 @@ static int | |||
90 | ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn, | 90 | ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn, |
91 | u64 c_stat, u64 c_sts, int print) | 91 | u64 c_stat, u64 c_sts, int print) |
92 | { | 92 | { |
93 | char *sourcename[] = { "UNKNOWN", "UNKNOWN", "UNKNOWN", | 93 | static const char * const sourcename[] = { |
94 | "MEMORY", "BCACHE", "DCACHE", | 94 | "UNKNOWN", "UNKNOWN", "UNKNOWN", |
95 | "BCACHE PROBE", "BCACHE PROBE" }; | 95 | "MEMORY", "BCACHE", "DCACHE", |
96 | char *streamname[] = { "D", "I" }; | 96 | "BCACHE PROBE", "BCACHE PROBE" |
97 | char *bitsname[] = { "SINGLE", "DOUBLE" }; | 97 | }; |
98 | static const char * const streamname[] = { "D", "I" }; | ||
99 | static const char * const bitsname[] = { "SINGLE", "DOUBLE" }; | ||
98 | int status = MCHK_DISPOSITION_REPORT; | 100 | int status = MCHK_DISPOSITION_REPORT; |
99 | int source = -1, stream = -1, bits = -1; | 101 | int source = -1, stream = -1, bits = -1; |
100 | 102 | ||
diff --git a/arch/alpha/kernel/err_marvel.c b/arch/alpha/kernel/err_marvel.c index 52a79dfc13c6..648ae88aeb8a 100644 --- a/arch/alpha/kernel/err_marvel.c +++ b/arch/alpha/kernel/err_marvel.c | |||
@@ -109,7 +109,7 @@ marvel_print_err_cyc(u64 err_cyc) | |||
109 | #define IO7__ERR_CYC__CYCLE__M (0x7) | 109 | #define IO7__ERR_CYC__CYCLE__M (0x7) |
110 | 110 | ||
111 | printk("%s Packet In Error: %s\n" | 111 | printk("%s Packet In Error: %s\n" |
112 | "%s Error in %s, cycle %ld%s%s\n", | 112 | "%s Error in %s, cycle %lld%s%s\n", |
113 | err_print_prefix, | 113 | err_print_prefix, |
114 | packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)], | 114 | packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)], |
115 | err_print_prefix, | 115 | err_print_prefix, |
@@ -313,7 +313,7 @@ marvel_print_po7_ugbge_sym(u64 ugbge_sym) | |||
313 | } | 313 | } |
314 | 314 | ||
315 | printk("%s Up Hose Garbage Symptom:\n" | 315 | printk("%s Up Hose Garbage Symptom:\n" |
316 | "%s Source Port: %ld - Dest PID: %ld - OpCode: %s\n", | 316 | "%s Source Port: %lld - Dest PID: %lld - OpCode: %s\n", |
317 | err_print_prefix, | 317 | err_print_prefix, |
318 | err_print_prefix, | 318 | err_print_prefix, |
319 | EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT), | 319 | EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT), |
@@ -552,7 +552,7 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt) | |||
552 | #define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M (0xfff) | 552 | #define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M (0xfff) |
553 | 553 | ||
554 | printk("%s Split Completion Error:\n" | 554 | printk("%s Split Completion Error:\n" |
555 | "%s Source (Bus:Dev:Func): %ld:%ld:%ld\n", | 555 | "%s Source (Bus:Dev:Func): %lld:%lld:%lld\n", |
556 | err_print_prefix, | 556 | err_print_prefix, |
557 | err_print_prefix, | 557 | err_print_prefix, |
558 | EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS), | 558 | EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS), |
@@ -589,22 +589,23 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt) | |||
589 | static void | 589 | static void |
590 | marvel_print_pox_trans_sum(u64 trans_sum) | 590 | marvel_print_pox_trans_sum(u64 trans_sum) |
591 | { | 591 | { |
592 | char *pcix_cmd[] = { "Interrupt Acknowledge", | 592 | static const char * const pcix_cmd[] = { |
593 | "Special Cycle", | 593 | "Interrupt Acknowledge", |
594 | "I/O Read", | 594 | "Special Cycle", |
595 | "I/O Write", | 595 | "I/O Read", |
596 | "Reserved", | 596 | "I/O Write", |
597 | "Reserved / Device ID Message", | 597 | "Reserved", |
598 | "Memory Read", | 598 | "Reserved / Device ID Message", |
599 | "Memory Write", | 599 | "Memory Read", |
600 | "Reserved / Alias to Memory Read Block", | 600 | "Memory Write", |
601 | "Reserved / Alias to Memory Write Block", | 601 | "Reserved / Alias to Memory Read Block", |
602 | "Configuration Read", | 602 | "Reserved / Alias to Memory Write Block", |
603 | "Configuration Write", | 603 | "Configuration Read", |
604 | "Memory Read Multiple / Split Completion", | 604 | "Configuration Write", |
605 | "Dual Address Cycle", | 605 | "Memory Read Multiple / Split Completion", |
606 | "Memory Read Line / Memory Read Block", | 606 | "Dual Address Cycle", |
607 | "Memory Write and Invalidate / Memory Write Block" | 607 | "Memory Read Line / Memory Read Block", |
608 | "Memory Write and Invalidate / Memory Write Block" | ||
608 | }; | 609 | }; |
609 | 610 | ||
610 | #define IO7__POX_TRANSUM__PCI_ADDR__S (0) | 611 | #define IO7__POX_TRANSUM__PCI_ADDR__S (0) |
diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c index f7ed97ce0dfd..c3b3781a03de 100644 --- a/arch/alpha/kernel/err_titan.c +++ b/arch/alpha/kernel/err_titan.c | |||
@@ -75,8 +75,12 @@ titan_parse_p_serror(int which, u64 serror, int print) | |||
75 | int status = MCHK_DISPOSITION_REPORT; | 75 | int status = MCHK_DISPOSITION_REPORT; |
76 | 76 | ||
77 | #ifdef CONFIG_VERBOSE_MCHECK | 77 | #ifdef CONFIG_VERBOSE_MCHECK |
78 | char *serror_src[] = {"GPCI", "APCI", "AGP HP", "AGP LP"}; | 78 | static const char * const serror_src[] = { |
79 | char *serror_cmd[] = {"DMA Read", "DMA RMW", "SGTE Read", "Reserved"}; | 79 | "GPCI", "APCI", "AGP HP", "AGP LP" |
80 | }; | ||
81 | static const char * const serror_cmd[] = { | ||
82 | "DMA Read", "DMA RMW", "SGTE Read", "Reserved" | ||
83 | }; | ||
80 | #endif /* CONFIG_VERBOSE_MCHECK */ | 84 | #endif /* CONFIG_VERBOSE_MCHECK */ |
81 | 85 | ||
82 | #define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0) | 86 | #define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0) |
@@ -140,14 +144,15 @@ titan_parse_p_perror(int which, int port, u64 perror, int print) | |||
140 | int status = MCHK_DISPOSITION_REPORT; | 144 | int status = MCHK_DISPOSITION_REPORT; |
141 | 145 | ||
142 | #ifdef CONFIG_VERBOSE_MCHECK | 146 | #ifdef CONFIG_VERBOSE_MCHECK |
143 | char *perror_cmd[] = { "Interrupt Acknowledge", "Special Cycle", | 147 | static const char * const perror_cmd[] = { |
144 | "I/O Read", "I/O Write", | 148 | "Interrupt Acknowledge", "Special Cycle", |
145 | "Reserved", "Reserved", | 149 | "I/O Read", "I/O Write", |
146 | "Memory Read", "Memory Write", | 150 | "Reserved", "Reserved", |
147 | "Reserved", "Reserved", | 151 | "Memory Read", "Memory Write", |
148 | "Configuration Read", "Configuration Write", | 152 | "Reserved", "Reserved", |
149 | "Memory Read Multiple", "Dual Address Cycle", | 153 | "Configuration Read", "Configuration Write", |
150 | "Memory Read Line","Memory Write and Invalidate" | 154 | "Memory Read Multiple", "Dual Address Cycle", |
155 | "Memory Read Line", "Memory Write and Invalidate" | ||
151 | }; | 156 | }; |
152 | #endif /* CONFIG_VERBOSE_MCHECK */ | 157 | #endif /* CONFIG_VERBOSE_MCHECK */ |
153 | 158 | ||
@@ -273,11 +278,11 @@ titan_parse_p_agperror(int which, u64 agperror, int print) | |||
273 | int cmd, len; | 278 | int cmd, len; |
274 | unsigned long addr; | 279 | unsigned long addr; |
275 | 280 | ||
276 | char *agperror_cmd[] = { "Read (low-priority)", "Read (high-priority)", | 281 | static const char * const agperror_cmd[] = { |
277 | "Write (low-priority)", | 282 | "Read (low-priority)", "Read (high-priority)", |
278 | "Write (high-priority)", | 283 | "Write (low-priority)", "Write (high-priority)", |
279 | "Reserved", "Reserved", | 284 | "Reserved", "Reserved", |
280 | "Flush", "Fence" | 285 | "Flush", "Fence" |
281 | }; | 286 | }; |
282 | #endif /* CONFIG_VERBOSE_MCHECK */ | 287 | #endif /* CONFIG_VERBOSE_MCHECK */ |
283 | 288 | ||
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 5d1e6d6ce684..547e8b84b2f7 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
18 | #include <linux/smp_lock.h> | ||
19 | #include <linux/stddef.h> | 18 | #include <linux/stddef.h> |
20 | #include <linux/syscalls.h> | 19 | #include <linux/syscalls.h> |
21 | #include <linux/unistd.h> | 20 | #include <linux/unistd.h> |
@@ -69,7 +68,6 @@ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start, | |||
69 | { | 68 | { |
70 | struct mm_struct *mm; | 69 | struct mm_struct *mm; |
71 | 70 | ||
72 | lock_kernel(); | ||
73 | mm = current->mm; | 71 | mm = current->mm; |
74 | mm->end_code = bss_start + bss_len; | 72 | mm->end_code = bss_start + bss_len; |
75 | mm->start_brk = bss_start + bss_len; | 73 | mm->start_brk = bss_start + bss_len; |
@@ -78,7 +76,6 @@ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start, | |||
78 | printk("set_program_attributes(%lx %lx %lx %lx)\n", | 76 | printk("set_program_attributes(%lx %lx %lx %lx)\n", |
79 | text_start, text_len, bss_start, bss_len); | 77 | text_start, text_len, bss_start, bss_len); |
80 | #endif | 78 | #endif |
81 | unlock_kernel(); | ||
82 | return 0; | 79 | return 0; |
83 | } | 80 | } |
84 | 81 | ||
@@ -517,7 +514,6 @@ SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code, | |||
517 | long error; | 514 | long error; |
518 | int __user *min_buf_size_ptr; | 515 | int __user *min_buf_size_ptr; |
519 | 516 | ||
520 | lock_kernel(); | ||
521 | switch (code) { | 517 | switch (code) { |
522 | case PL_SET: | 518 | case PL_SET: |
523 | if (get_user(error, &args->set.nbytes)) | 519 | if (get_user(error, &args->set.nbytes)) |
@@ -547,7 +543,6 @@ SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code, | |||
547 | error = -EOPNOTSUPP; | 543 | error = -EOPNOTSUPP; |
548 | break; | 544 | break; |
549 | }; | 545 | }; |
550 | unlock_kernel(); | ||
551 | return error; | 546 | return error; |
552 | } | 547 | } |
553 | 548 | ||
@@ -594,7 +589,7 @@ SYSCALL_DEFINE2(osf_sigstack, struct sigstack __user *, uss, | |||
594 | 589 | ||
595 | SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) | 590 | SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) |
596 | { | 591 | { |
597 | char *sysinfo_table[] = { | 592 | const char *sysinfo_table[] = { |
598 | utsname()->sysname, | 593 | utsname()->sysname, |
599 | utsname()->nodename, | 594 | utsname()->nodename, |
600 | utsname()->release, | 595 | utsname()->release, |
@@ -606,7 +601,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) | |||
606 | "dummy", /* secure RPC domain */ | 601 | "dummy", /* secure RPC domain */ |
607 | }; | 602 | }; |
608 | unsigned long offset; | 603 | unsigned long offset; |
609 | char *res; | 604 | const char *res; |
610 | long len, err = -EINVAL; | 605 | long len, err = -EINVAL; |
611 | 606 | ||
612 | offset = command-1; | 607 | offset = command-1; |
diff --git a/arch/alpha/kernel/pci-sysfs.c b/arch/alpha/kernel/pci-sysfs.c index 738fc824e2ea..b899e95f79fd 100644 --- a/arch/alpha/kernel/pci-sysfs.c +++ b/arch/alpha/kernel/pci-sysfs.c | |||
@@ -66,7 +66,7 @@ static int pci_mmap_resource(struct kobject *kobj, | |||
66 | { | 66 | { |
67 | struct pci_dev *pdev = to_pci_dev(container_of(kobj, | 67 | struct pci_dev *pdev = to_pci_dev(container_of(kobj, |
68 | struct device, kobj)); | 68 | struct device, kobj)); |
69 | struct resource *res = (struct resource *)attr->private; | 69 | struct resource *res = attr->private; |
70 | enum pci_mmap_state mmap_type; | 70 | enum pci_mmap_state mmap_type; |
71 | struct pci_bus_region bar; | 71 | struct pci_bus_region bar; |
72 | int i; | 72 | int i; |
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 51c39fa41693..85d8e4f58c83 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c | |||
@@ -241,20 +241,20 @@ static inline unsigned long alpha_read_pmc(int idx) | |||
241 | static int alpha_perf_event_set_period(struct perf_event *event, | 241 | static int alpha_perf_event_set_period(struct perf_event *event, |
242 | struct hw_perf_event *hwc, int idx) | 242 | struct hw_perf_event *hwc, int idx) |
243 | { | 243 | { |
244 | long left = atomic64_read(&hwc->period_left); | 244 | long left = local64_read(&hwc->period_left); |
245 | long period = hwc->sample_period; | 245 | long period = hwc->sample_period; |
246 | int ret = 0; | 246 | int ret = 0; |
247 | 247 | ||
248 | if (unlikely(left <= -period)) { | 248 | if (unlikely(left <= -period)) { |
249 | left = period; | 249 | left = period; |
250 | atomic64_set(&hwc->period_left, left); | 250 | local64_set(&hwc->period_left, left); |
251 | hwc->last_period = period; | 251 | hwc->last_period = period; |
252 | ret = 1; | 252 | ret = 1; |
253 | } | 253 | } |
254 | 254 | ||
255 | if (unlikely(left <= 0)) { | 255 | if (unlikely(left <= 0)) { |
256 | left += period; | 256 | left += period; |
257 | atomic64_set(&hwc->period_left, left); | 257 | local64_set(&hwc->period_left, left); |
258 | hwc->last_period = period; | 258 | hwc->last_period = period; |
259 | ret = 1; | 259 | ret = 1; |
260 | } | 260 | } |
@@ -269,7 +269,7 @@ static int alpha_perf_event_set_period(struct perf_event *event, | |||
269 | if (left > (long)alpha_pmu->pmc_max_period[idx]) | 269 | if (left > (long)alpha_pmu->pmc_max_period[idx]) |
270 | left = alpha_pmu->pmc_max_period[idx]; | 270 | left = alpha_pmu->pmc_max_period[idx]; |
271 | 271 | ||
272 | atomic64_set(&hwc->prev_count, (unsigned long)(-left)); | 272 | local64_set(&hwc->prev_count, (unsigned long)(-left)); |
273 | 273 | ||
274 | alpha_write_pmc(idx, (unsigned long)(-left)); | 274 | alpha_write_pmc(idx, (unsigned long)(-left)); |
275 | 275 | ||
@@ -300,10 +300,10 @@ static unsigned long alpha_perf_event_update(struct perf_event *event, | |||
300 | long delta; | 300 | long delta; |
301 | 301 | ||
302 | again: | 302 | again: |
303 | prev_raw_count = atomic64_read(&hwc->prev_count); | 303 | prev_raw_count = local64_read(&hwc->prev_count); |
304 | new_raw_count = alpha_read_pmc(idx); | 304 | new_raw_count = alpha_read_pmc(idx); |
305 | 305 | ||
306 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | 306 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
307 | new_raw_count) != prev_raw_count) | 307 | new_raw_count) != prev_raw_count) |
308 | goto again; | 308 | goto again; |
309 | 309 | ||
@@ -316,8 +316,8 @@ again: | |||
316 | delta += alpha_pmu->pmc_max_period[idx] + 1; | 316 | delta += alpha_pmu->pmc_max_period[idx] + 1; |
317 | } | 317 | } |
318 | 318 | ||
319 | atomic64_add(delta, &event->count); | 319 | local64_add(delta, &event->count); |
320 | atomic64_sub(delta, &hwc->period_left); | 320 | local64_sub(delta, &hwc->period_left); |
321 | 321 | ||
322 | return new_raw_count; | 322 | return new_raw_count; |
323 | } | 323 | } |
@@ -636,7 +636,7 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
636 | if (!hwc->sample_period) { | 636 | if (!hwc->sample_period) { |
637 | hwc->sample_period = alpha_pmu->pmc_max_period[0]; | 637 | hwc->sample_period = alpha_pmu->pmc_max_period[0]; |
638 | hwc->last_period = hwc->sample_period; | 638 | hwc->last_period = hwc->sample_period; |
639 | atomic64_set(&hwc->period_left, hwc->sample_period); | 639 | local64_set(&hwc->period_left, hwc->sample_period); |
640 | } | 640 | } |
641 | 641 | ||
642 | return 0; | 642 | return 0; |
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 842dba308eab..3ec35066f1dc 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c | |||
@@ -356,7 +356,7 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti) | |||
356 | dest[27] = pt->r27; | 356 | dest[27] = pt->r27; |
357 | dest[28] = pt->r28; | 357 | dest[28] = pt->r28; |
358 | dest[29] = pt->gp; | 358 | dest[29] = pt->gp; |
359 | dest[30] = rdusp(); | 359 | dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp; |
360 | dest[31] = pt->pc; | 360 | dest[31] = pt->pc; |
361 | 361 | ||
362 | /* Once upon a time this was the PS value. Which is stupid | 362 | /* Once upon a time this was the PS value. Which is stupid |
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h index 3d2627ec9860..d3e52d3fd592 100644 --- a/arch/alpha/kernel/proto.h +++ b/arch/alpha/kernel/proto.h | |||
@@ -156,9 +156,6 @@ extern void SMC669_Init(int); | |||
156 | /* es1888.c */ | 156 | /* es1888.c */ |
157 | extern void es1888_init(void); | 157 | extern void es1888_init(void); |
158 | 158 | ||
159 | /* ns87312.c */ | ||
160 | extern void ns87312_enable_ide(long ide_base); | ||
161 | |||
162 | /* ../lib/fpreg.c */ | 159 | /* ../lib/fpreg.c */ |
163 | extern void alpha_write_fp_reg (unsigned long reg, unsigned long val); | 160 | extern void alpha_write_fp_reg (unsigned long reg, unsigned long val); |
164 | extern unsigned long alpha_read_fp_reg (unsigned long reg); | 161 | extern unsigned long alpha_read_fp_reg (unsigned long reg); |
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index 0932dbb1ef8e..6f7feb5db271 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c | |||
@@ -41,46 +41,20 @@ static void do_signal(struct pt_regs *, struct switch_stack *, | |||
41 | /* | 41 | /* |
42 | * The OSF/1 sigprocmask calling sequence is different from the | 42 | * The OSF/1 sigprocmask calling sequence is different from the |
43 | * C sigprocmask() sequence.. | 43 | * C sigprocmask() sequence.. |
44 | * | ||
45 | * how: | ||
46 | * 1 - SIG_BLOCK | ||
47 | * 2 - SIG_UNBLOCK | ||
48 | * 3 - SIG_SETMASK | ||
49 | * | ||
50 | * We change the range to -1 .. 1 in order to let gcc easily | ||
51 | * use the conditional move instructions. | ||
52 | * | ||
53 | * Note that we don't need to acquire the kernel lock for SMP | ||
54 | * operation, as all of this is local to this thread. | ||
55 | */ | 44 | */ |
56 | SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask, | 45 | SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask) |
57 | struct pt_regs *, regs) | ||
58 | { | 46 | { |
59 | unsigned long oldmask = -EINVAL; | 47 | sigset_t oldmask; |
60 | 48 | sigset_t mask; | |
61 | if ((unsigned long)how-1 <= 2) { | 49 | unsigned long res; |
62 | long sign = how-2; /* -1 .. 1 */ | 50 | |
63 | unsigned long block, unblock; | 51 | siginitset(&mask, newmask & _BLOCKABLE); |
64 | 52 | res = sigprocmask(how, &mask, &oldmask); | |
65 | newmask &= _BLOCKABLE; | 53 | if (!res) { |
66 | spin_lock_irq(¤t->sighand->siglock); | 54 | force_successful_syscall_return(); |
67 | oldmask = current->blocked.sig[0]; | 55 | res = oldmask.sig[0]; |
68 | |||
69 | unblock = oldmask & ~newmask; | ||
70 | block = oldmask | newmask; | ||
71 | if (!sign) | ||
72 | block = unblock; | ||
73 | if (sign <= 0) | ||
74 | newmask = block; | ||
75 | if (_NSIG_WORDS > 1 && sign > 0) | ||
76 | sigemptyset(¤t->blocked); | ||
77 | current->blocked.sig[0] = newmask; | ||
78 | recalc_sigpending(); | ||
79 | spin_unlock_irq(¤t->sighand->siglock); | ||
80 | |||
81 | regs->r0 = 0; /* special no error return */ | ||
82 | } | 56 | } |
83 | return oldmask; | 57 | return res; |
84 | } | 58 | } |
85 | 59 | ||
86 | SYSCALL_DEFINE3(osf_sigaction, int, sig, | 60 | SYSCALL_DEFINE3(osf_sigaction, int, sig, |
@@ -94,9 +68,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig, | |||
94 | old_sigset_t mask; | 68 | old_sigset_t mask; |
95 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | 69 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || |
96 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | 70 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || |
97 | __get_user(new_ka.sa.sa_flags, &act->sa_flags)) | 71 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || |
72 | __get_user(mask, &act->sa_mask)) | ||
98 | return -EFAULT; | 73 | return -EFAULT; |
99 | __get_user(mask, &act->sa_mask); | ||
100 | siginitset(&new_ka.sa.sa_mask, mask); | 74 | siginitset(&new_ka.sa.sa_mask, mask); |
101 | new_ka.ka_restorer = NULL; | 75 | new_ka.ka_restorer = NULL; |
102 | } | 76 | } |
@@ -106,9 +80,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig, | |||
106 | if (!ret && oact) { | 80 | if (!ret && oact) { |
107 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | 81 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || |
108 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | 82 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || |
109 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags)) | 83 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || |
84 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) | ||
110 | return -EFAULT; | 85 | return -EFAULT; |
111 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | ||
112 | } | 86 | } |
113 | 87 | ||
114 | return ret; | 88 | return ret; |
@@ -144,8 +118,7 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act, | |||
144 | /* | 118 | /* |
145 | * Atomically swap in the new signal mask, and wait for a signal. | 119 | * Atomically swap in the new signal mask, and wait for a signal. |
146 | */ | 120 | */ |
147 | asmlinkage int | 121 | SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask) |
148 | do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw) | ||
149 | { | 122 | { |
150 | mask &= _BLOCKABLE; | 123 | mask &= _BLOCKABLE; |
151 | spin_lock_irq(¤t->sighand->siglock); | 124 | spin_lock_irq(¤t->sighand->siglock); |
@@ -154,41 +127,6 @@ do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw) | |||
154 | recalc_sigpending(); | 127 | recalc_sigpending(); |
155 | spin_unlock_irq(¤t->sighand->siglock); | 128 | spin_unlock_irq(¤t->sighand->siglock); |
156 | 129 | ||
157 | /* Indicate EINTR on return from any possible signal handler, | ||
158 | which will not come back through here, but via sigreturn. */ | ||
159 | regs->r0 = EINTR; | ||
160 | regs->r19 = 1; | ||
161 | |||
162 | current->state = TASK_INTERRUPTIBLE; | ||
163 | schedule(); | ||
164 | set_thread_flag(TIF_RESTORE_SIGMASK); | ||
165 | return -ERESTARTNOHAND; | ||
166 | } | ||
167 | |||
168 | asmlinkage int | ||
169 | do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize, | ||
170 | struct pt_regs *regs, struct switch_stack *sw) | ||
171 | { | ||
172 | sigset_t set; | ||
173 | |||
174 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
175 | if (sigsetsize != sizeof(sigset_t)) | ||
176 | return -EINVAL; | ||
177 | if (copy_from_user(&set, uset, sizeof(set))) | ||
178 | return -EFAULT; | ||
179 | |||
180 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
181 | spin_lock_irq(¤t->sighand->siglock); | ||
182 | current->saved_sigmask = current->blocked; | ||
183 | current->blocked = set; | ||
184 | recalc_sigpending(); | ||
185 | spin_unlock_irq(¤t->sighand->siglock); | ||
186 | |||
187 | /* Indicate EINTR on return from any possible signal handler, | ||
188 | which will not come back through here, but via sigreturn. */ | ||
189 | regs->r0 = EINTR; | ||
190 | regs->r19 = 1; | ||
191 | |||
192 | current->state = TASK_INTERRUPTIBLE; | 130 | current->state = TASK_INTERRUPTIBLE; |
193 | schedule(); | 131 | schedule(); |
194 | set_thread_flag(TIF_RESTORE_SIGMASK); | 132 | set_thread_flag(TIF_RESTORE_SIGMASK); |
@@ -239,6 +177,8 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, | |||
239 | unsigned long usp; | 177 | unsigned long usp; |
240 | long i, err = __get_user(regs->pc, &sc->sc_pc); | 178 | long i, err = __get_user(regs->pc, &sc->sc_pc); |
241 | 179 | ||
180 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
181 | |||
242 | sw->r26 = (unsigned long) ret_from_sys_call; | 182 | sw->r26 = (unsigned long) ret_from_sys_call; |
243 | 183 | ||
244 | err |= __get_user(regs->r0, sc->sc_regs+0); | 184 | err |= __get_user(regs->r0, sc->sc_regs+0); |
@@ -591,7 +531,6 @@ syscall_restart(unsigned long r0, unsigned long r19, | |||
591 | regs->pc -= 4; | 531 | regs->pc -= 4; |
592 | break; | 532 | break; |
593 | case ERESTART_RESTARTBLOCK: | 533 | case ERESTART_RESTARTBLOCK: |
594 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
595 | regs->r0 = EINTR; | 534 | regs->r0 = EINTR; |
596 | break; | 535 | break; |
597 | } | 536 | } |
diff --git a/arch/alpha/kernel/srm_env.c b/arch/alpha/kernel/srm_env.c index 4afc1a1e2e5a..f0df3fbd8402 100644 --- a/arch/alpha/kernel/srm_env.c +++ b/arch/alpha/kernel/srm_env.c | |||
@@ -87,7 +87,7 @@ static int srm_env_proc_show(struct seq_file *m, void *v) | |||
87 | srm_env_t *entry; | 87 | srm_env_t *entry; |
88 | char *page; | 88 | char *page; |
89 | 89 | ||
90 | entry = (srm_env_t *)m->private; | 90 | entry = m->private; |
91 | page = (char *)__get_free_page(GFP_USER); | 91 | page = (char *)__get_free_page(GFP_USER); |
92 | if (!page) | 92 | if (!page) |
93 | return -ENOMEM; | 93 | return -ENOMEM; |
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c index affd0f3f25df..14c8898d19ec 100644 --- a/arch/alpha/kernel/sys_cabriolet.c +++ b/arch/alpha/kernel/sys_cabriolet.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include "irq_impl.h" | 33 | #include "irq_impl.h" |
34 | #include "pci_impl.h" | 34 | #include "pci_impl.h" |
35 | #include "machvec_impl.h" | 35 | #include "machvec_impl.h" |
36 | 36 | #include "pc873xx.h" | |
37 | 37 | ||
38 | /* Note mask bit is true for DISABLED irqs. */ | 38 | /* Note mask bit is true for DISABLED irqs. */ |
39 | static unsigned long cached_irq_mask = ~0UL; | 39 | static unsigned long cached_irq_mask = ~0UL; |
@@ -236,17 +236,30 @@ cabriolet_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | |||
236 | } | 236 | } |
237 | 237 | ||
238 | static inline void __init | 238 | static inline void __init |
239 | cabriolet_enable_ide(void) | ||
240 | { | ||
241 | if (pc873xx_probe() == -1) { | ||
242 | printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n"); | ||
243 | } else { | ||
244 | printk(KERN_INFO "Found %s Super IO chip at 0x%x\n", | ||
245 | pc873xx_get_model(), pc873xx_get_base()); | ||
246 | |||
247 | pc873xx_enable_ide(); | ||
248 | } | ||
249 | } | ||
250 | |||
251 | static inline void __init | ||
239 | cabriolet_init_pci(void) | 252 | cabriolet_init_pci(void) |
240 | { | 253 | { |
241 | common_init_pci(); | 254 | common_init_pci(); |
242 | ns87312_enable_ide(0x398); | 255 | cabriolet_enable_ide(); |
243 | } | 256 | } |
244 | 257 | ||
245 | static inline void __init | 258 | static inline void __init |
246 | cia_cab_init_pci(void) | 259 | cia_cab_init_pci(void) |
247 | { | 260 | { |
248 | cia_init_pci(); | 261 | cia_init_pci(); |
249 | ns87312_enable_ide(0x398); | 262 | cabriolet_enable_ide(); |
250 | } | 263 | } |
251 | 264 | ||
252 | /* | 265 | /* |
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c index 230464885b5c..4da596b6adbb 100644 --- a/arch/alpha/kernel/sys_takara.c +++ b/arch/alpha/kernel/sys_takara.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include "irq_impl.h" | 29 | #include "irq_impl.h" |
30 | #include "pci_impl.h" | 30 | #include "pci_impl.h" |
31 | #include "machvec_impl.h" | 31 | #include "machvec_impl.h" |
32 | 32 | #include "pc873xx.h" | |
33 | 33 | ||
34 | /* Note mask bit is true for DISABLED irqs. */ | 34 | /* Note mask bit is true for DISABLED irqs. */ |
35 | static unsigned long cached_irq_mask[2] = { -1, -1 }; | 35 | static unsigned long cached_irq_mask[2] = { -1, -1 }; |
@@ -264,7 +264,14 @@ takara_init_pci(void) | |||
264 | alpha_mv.pci_map_irq = takara_map_irq_srm; | 264 | alpha_mv.pci_map_irq = takara_map_irq_srm; |
265 | 265 | ||
266 | cia_init_pci(); | 266 | cia_init_pci(); |
267 | ns87312_enable_ide(0x26e); | 267 | |
268 | if (pc873xx_probe() == -1) { | ||
269 | printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n"); | ||
270 | } else { | ||
271 | printk(KERN_INFO "Found %s Super IO chip at 0x%x\n", | ||
272 | pc873xx_get_model(), pc873xx_get_base()); | ||
273 | pc873xx_enable_ide(); | ||
274 | } | ||
268 | } | 275 | } |
269 | 276 | ||
270 | 277 | ||
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index 09acb786e72b..a6a1de9db16f 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S | |||
@@ -58,7 +58,7 @@ sys_call_table: | |||
58 | .quad sys_open /* 45 */ | 58 | .quad sys_open /* 45 */ |
59 | .quad alpha_ni_syscall | 59 | .quad alpha_ni_syscall |
60 | .quad sys_getxgid | 60 | .quad sys_getxgid |
61 | .quad osf_sigprocmask | 61 | .quad sys_osf_sigprocmask |
62 | .quad alpha_ni_syscall | 62 | .quad alpha_ni_syscall |
63 | .quad alpha_ni_syscall /* 50 */ | 63 | .quad alpha_ni_syscall /* 50 */ |
64 | .quad sys_acct | 64 | .quad sys_acct |
@@ -512,6 +512,9 @@ sys_call_table: | |||
512 | .quad sys_pwritev | 512 | .quad sys_pwritev |
513 | .quad sys_rt_tgsigqueueinfo | 513 | .quad sys_rt_tgsigqueueinfo |
514 | .quad sys_perf_event_open | 514 | .quad sys_perf_event_open |
515 | .quad sys_fanotify_init | ||
516 | .quad sys_fanotify_mark /* 495 */ | ||
517 | .quad sys_prlimit64 | ||
515 | 518 | ||
516 | .size sys_call_table, . - sys_call_table | 519 | .size sys_call_table, . - sys_call_table |
517 | .type sys_call_table, @object | 520 | .type sys_call_table, @object |
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index eacceb26d9c8..396af1799ea4 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c | |||
@@ -191,16 +191,16 @@ irqreturn_t timer_interrupt(int irq, void *dev) | |||
191 | 191 | ||
192 | write_sequnlock(&xtime_lock); | 192 | write_sequnlock(&xtime_lock); |
193 | 193 | ||
194 | #ifndef CONFIG_SMP | ||
195 | while (nticks--) | ||
196 | update_process_times(user_mode(get_irq_regs())); | ||
197 | #endif | ||
198 | |||
199 | if (test_perf_event_pending()) { | 194 | if (test_perf_event_pending()) { |
200 | clear_perf_event_pending(); | 195 | clear_perf_event_pending(); |
201 | perf_event_do_pending(); | 196 | perf_event_do_pending(); |
202 | } | 197 | } |
203 | 198 | ||
199 | #ifndef CONFIG_SMP | ||
200 | while (nticks--) | ||
201 | update_process_times(user_mode(get_irq_regs())); | ||
202 | #endif | ||
203 | |||
204 | return IRQ_HANDLED; | 204 | return IRQ_HANDLED; |
205 | } | 205 | } |
206 | 206 | ||
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index b14f015008ad..0414e021a91c 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/tty.h> | 14 | #include <linux/tty.h> |
15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <linux/smp_lock.h> | ||
17 | #include <linux/module.h> | 16 | #include <linux/module.h> |
18 | #include <linux/init.h> | 17 | #include <linux/init.h> |
19 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
@@ -623,7 +622,6 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, | |||
623 | return; | 622 | return; |
624 | } | 623 | } |
625 | 624 | ||
626 | lock_kernel(); | ||
627 | printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n", | 625 | printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n", |
628 | pc, va, opcode, reg); | 626 | pc, va, opcode, reg); |
629 | do_exit(SIGSEGV); | 627 | do_exit(SIGSEGV); |
@@ -646,7 +644,6 @@ got_exception: | |||
646 | * Yikes! No one to forward the exception to. | 644 | * Yikes! No one to forward the exception to. |
647 | * Since the registers are in a weird format, dump them ourselves. | 645 | * Since the registers are in a weird format, dump them ourselves. |
648 | */ | 646 | */ |
649 | lock_kernel(); | ||
650 | 647 | ||
651 | printk("%s(%d): unhandled unaligned exception\n", | 648 | printk("%s(%d): unhandled unaligned exception\n", |
652 | current->comm, task_pid_nr(current)); | 649 | current->comm, task_pid_nr(current)); |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index a7ed21f0136a..88c97bc7a6f5 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -271,7 +271,6 @@ config ARCH_AT91 | |||
271 | bool "Atmel AT91" | 271 | bool "Atmel AT91" |
272 | select ARCH_REQUIRE_GPIOLIB | 272 | select ARCH_REQUIRE_GPIOLIB |
273 | select HAVE_CLK | 273 | select HAVE_CLK |
274 | select ARCH_USES_GETTIMEOFFSET | ||
275 | help | 274 | help |
276 | This enables support for systems based on the Atmel AT91RM9200, | 275 | This enables support for systems based on the Atmel AT91RM9200, |
277 | AT91SAM9 and AT91CAP9 processors. | 276 | AT91SAM9 and AT91CAP9 processors. |
@@ -1051,6 +1050,32 @@ config ARM_ERRATA_460075 | |||
1051 | ACTLR register. Note that setting specific bits in the ACTLR register | 1050 | ACTLR register. Note that setting specific bits in the ACTLR register |
1052 | may not be available in non-secure mode. | 1051 | may not be available in non-secure mode. |
1053 | 1052 | ||
1053 | config ARM_ERRATA_742230 | ||
1054 | bool "ARM errata: DMB operation may be faulty" | ||
1055 | depends on CPU_V7 && SMP | ||
1056 | help | ||
1057 | This option enables the workaround for the 742230 Cortex-A9 | ||
1058 | (r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction | ||
1059 | between two write operations may not ensure the correct visibility | ||
1060 | ordering of the two writes. This workaround sets a specific bit in | ||
1061 | the diagnostic register of the Cortex-A9 which causes the DMB | ||
1062 | instruction to behave as a DSB, ensuring the correct behaviour of | ||
1063 | the two writes. | ||
1064 | |||
1065 | config ARM_ERRATA_742231 | ||
1066 | bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption" | ||
1067 | depends on CPU_V7 && SMP | ||
1068 | help | ||
1069 | This option enables the workaround for the 742231 Cortex-A9 | ||
1070 | (r2p0..r2p2) erratum. Under certain conditions, specific to the | ||
1071 | Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode, | ||
1072 | accessing some data located in the same cache line, may get corrupted | ||
1073 | data due to bad handling of the address hazard when the line gets | ||
1074 | replaced from one of the CPUs at the same time as another CPU is | ||
1075 | accessing it. This workaround sets specific bits in the diagnostic | ||
1076 | register of the Cortex-A9 which reduces the linefill issuing | ||
1077 | capabilities of the processor. | ||
1078 | |||
1054 | config PL310_ERRATA_588369 | 1079 | config PL310_ERRATA_588369 |
1055 | bool "Clean & Invalidate maintenance operations do not invalidate clean lines" | 1080 | bool "Clean & Invalidate maintenance operations do not invalidate clean lines" |
1056 | depends on CACHE_L2X0 && ARCH_OMAP4 | 1081 | depends on CACHE_L2X0 && ARCH_OMAP4 |
@@ -1576,96 +1601,6 @@ config AUTO_ZRELADDR | |||
1576 | 0xf8000000. This assumes the zImage being placed in the first 128MB | 1601 | 0xf8000000. This assumes the zImage being placed in the first 128MB |
1577 | from start of memory. | 1602 | from start of memory. |
1578 | 1603 | ||
1579 | config ZRELADDR | ||
1580 | hex "Physical address of the decompressed kernel image" | ||
1581 | depends on !AUTO_ZRELADDR | ||
1582 | default 0x00008000 if ARCH_BCMRING ||\ | ||
1583 | ARCH_CNS3XXX ||\ | ||
1584 | ARCH_DOVE ||\ | ||
1585 | ARCH_EBSA110 ||\ | ||
1586 | ARCH_FOOTBRIDGE ||\ | ||
1587 | ARCH_INTEGRATOR ||\ | ||
1588 | ARCH_IOP13XX ||\ | ||
1589 | ARCH_IOP33X ||\ | ||
1590 | ARCH_IXP2000 ||\ | ||
1591 | ARCH_IXP23XX ||\ | ||
1592 | ARCH_IXP4XX ||\ | ||
1593 | ARCH_KIRKWOOD ||\ | ||
1594 | ARCH_KS8695 ||\ | ||
1595 | ARCH_LOKI ||\ | ||
1596 | ARCH_MMP ||\ | ||
1597 | ARCH_MV78XX0 ||\ | ||
1598 | ARCH_NOMADIK ||\ | ||
1599 | ARCH_NUC93X ||\ | ||
1600 | ARCH_NS9XXX ||\ | ||
1601 | ARCH_ORION5X ||\ | ||
1602 | ARCH_SPEAR3XX ||\ | ||
1603 | ARCH_SPEAR6XX ||\ | ||
1604 | ARCH_U8500 ||\ | ||
1605 | ARCH_VERSATILE ||\ | ||
1606 | ARCH_W90X900 | ||
1607 | default 0x08008000 if ARCH_MX1 ||\ | ||
1608 | ARCH_SHARK | ||
1609 | default 0x10008000 if ARCH_MSM ||\ | ||
1610 | ARCH_OMAP1 ||\ | ||
1611 | ARCH_RPC | ||
1612 | default 0x20008000 if ARCH_S5P6440 ||\ | ||
1613 | ARCH_S5P6442 ||\ | ||
1614 | ARCH_S5PC100 ||\ | ||
1615 | ARCH_S5PV210 | ||
1616 | default 0x30008000 if ARCH_S3C2410 ||\ | ||
1617 | ARCH_S3C2400 ||\ | ||
1618 | ARCH_S3C2412 ||\ | ||
1619 | ARCH_S3C2416 ||\ | ||
1620 | ARCH_S3C2440 ||\ | ||
1621 | ARCH_S3C2443 | ||
1622 | default 0x40008000 if ARCH_STMP378X ||\ | ||
1623 | ARCH_STMP37XX ||\ | ||
1624 | ARCH_SH7372 ||\ | ||
1625 | ARCH_SH7377 ||\ | ||
1626 | ARCH_S5PV310 | ||
1627 | default 0x50008000 if ARCH_S3C64XX ||\ | ||
1628 | ARCH_SH7367 | ||
1629 | default 0x60008000 if ARCH_VEXPRESS | ||
1630 | default 0x80008000 if ARCH_MX25 ||\ | ||
1631 | ARCH_MX3 ||\ | ||
1632 | ARCH_NETX ||\ | ||
1633 | ARCH_OMAP2PLUS ||\ | ||
1634 | ARCH_PNX4008 | ||
1635 | default 0x90008000 if ARCH_MX5 ||\ | ||
1636 | ARCH_MX91231 | ||
1637 | default 0xa0008000 if ARCH_IOP32X ||\ | ||
1638 | ARCH_PXA ||\ | ||
1639 | MACH_MX27 | ||
1640 | default 0xc0008000 if ARCH_LH7A40X ||\ | ||
1641 | MACH_MX21 | ||
1642 | default 0xf0008000 if ARCH_AAEC2000 ||\ | ||
1643 | ARCH_L7200 | ||
1644 | default 0xc0028000 if ARCH_CLPS711X | ||
1645 | default 0x70008000 if ARCH_AT91 && (ARCH_AT91CAP9 || ARCH_AT91SAM9G45) | ||
1646 | default 0x20008000 if ARCH_AT91 && !(ARCH_AT91CAP9 || ARCH_AT91SAM9G45) | ||
1647 | default 0xc0008000 if ARCH_DAVINCI && ARCH_DAVINCI_DA8XX | ||
1648 | default 0x80008000 if ARCH_DAVINCI && !ARCH_DAVINCI_DA8XX | ||
1649 | default 0x00008000 if ARCH_EP93XX && EP93XX_SDCE3_SYNC_PHYS_OFFSET | ||
1650 | default 0xc0008000 if ARCH_EP93XX && EP93XX_SDCE0_PHYS_OFFSET | ||
1651 | default 0xd0008000 if ARCH_EP93XX && EP93XX_SDCE1_PHYS_OFFSET | ||
1652 | default 0xe0008000 if ARCH_EP93XX && EP93XX_SDCE2_PHYS_OFFSET | ||
1653 | default 0xf0008000 if ARCH_EP93XX && EP93XX_SDCE3_ASYNC_PHYS_OFFSET | ||
1654 | default 0x00008000 if ARCH_GEMINI && GEMINI_MEM_SWAP | ||
1655 | default 0x10008000 if ARCH_GEMINI && !GEMINI_MEM_SWAP | ||
1656 | default 0x70008000 if ARCH_REALVIEW && REALVIEW_HIGH_PHYS_OFFSET | ||
1657 | default 0x00008000 if ARCH_REALVIEW && !REALVIEW_HIGH_PHYS_OFFSET | ||
1658 | default 0xc0208000 if ARCH_SA1100 && SA1111 | ||
1659 | default 0xc0008000 if ARCH_SA1100 && !SA1111 | ||
1660 | default 0x30108000 if ARCH_S3C2410 && PM_H1940 | ||
1661 | default 0x28E08000 if ARCH_U300 && MACH_U300_SINGLE_RAM | ||
1662 | default 0x48008000 if ARCH_U300 && !MACH_U300_SINGLE_RAM | ||
1663 | help | ||
1664 | ZRELADDR is the physical address where the decompressed kernel | ||
1665 | image will be placed. ZRELADDR has to be specified when the | ||
1666 | assumption of AUTO_ZRELADDR is not valid, or when ZBOOT_ROM is | ||
1667 | selected. | ||
1668 | |||
1669 | endmenu | 1604 | endmenu |
1670 | 1605 | ||
1671 | menu "CPU Power Management" | 1606 | menu "CPU Power Management" |
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile index f705213caa88..4a590f4113e2 100644 --- a/arch/arm/boot/Makefile +++ b/arch/arm/boot/Makefile | |||
@@ -14,16 +14,18 @@ | |||
14 | MKIMAGE := $(srctree)/scripts/mkuboot.sh | 14 | MKIMAGE := $(srctree)/scripts/mkuboot.sh |
15 | 15 | ||
16 | ifneq ($(MACHINE),) | 16 | ifneq ($(MACHINE),) |
17 | -include $(srctree)/$(MACHINE)/Makefile.boot | 17 | include $(srctree)/$(MACHINE)/Makefile.boot |
18 | endif | 18 | endif |
19 | 19 | ||
20 | # Note: the following conditions must always be true: | 20 | # Note: the following conditions must always be true: |
21 | # ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET) | ||
21 | # PARAMS_PHYS must be within 4MB of ZRELADDR | 22 | # PARAMS_PHYS must be within 4MB of ZRELADDR |
22 | # INITRD_PHYS must be in RAM | 23 | # INITRD_PHYS must be in RAM |
24 | ZRELADDR := $(zreladdr-y) | ||
23 | PARAMS_PHYS := $(params_phys-y) | 25 | PARAMS_PHYS := $(params_phys-y) |
24 | INITRD_PHYS := $(initrd_phys-y) | 26 | INITRD_PHYS := $(initrd_phys-y) |
25 | 27 | ||
26 | export INITRD_PHYS PARAMS_PHYS | 28 | export ZRELADDR INITRD_PHYS PARAMS_PHYS |
27 | 29 | ||
28 | targets := Image zImage xipImage bootpImage uImage | 30 | targets := Image zImage xipImage bootpImage uImage |
29 | 31 | ||
@@ -65,7 +67,7 @@ quiet_cmd_uimage = UIMAGE $@ | |||
65 | ifeq ($(CONFIG_ZBOOT_ROM),y) | 67 | ifeq ($(CONFIG_ZBOOT_ROM),y) |
66 | $(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT) | 68 | $(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT) |
67 | else | 69 | else |
68 | $(obj)/uImage: LOADADDR=$(CONFIG_ZRELADDR) | 70 | $(obj)/uImage: LOADADDR=$(ZRELADDR) |
69 | endif | 71 | endif |
70 | 72 | ||
71 | ifeq ($(CONFIG_THUMB2_KERNEL),y) | 73 | ifeq ($(CONFIG_THUMB2_KERNEL),y) |
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 68775e33476c..65a7c1c588a9 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile | |||
@@ -79,6 +79,10 @@ endif | |||
79 | EXTRA_CFLAGS := -fpic -fno-builtin | 79 | EXTRA_CFLAGS := -fpic -fno-builtin |
80 | EXTRA_AFLAGS := -Wa,-march=all | 80 | EXTRA_AFLAGS := -Wa,-march=all |
81 | 81 | ||
82 | # Supply ZRELADDR to the decompressor via a linker symbol. | ||
83 | ifneq ($(CONFIG_AUTO_ZRELADDR),y) | ||
84 | LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR) | ||
85 | endif | ||
82 | ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) | 86 | ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) |
83 | LDFLAGS_vmlinux += --be8 | 87 | LDFLAGS_vmlinux += --be8 |
84 | endif | 88 | endif |
@@ -112,5 +116,5 @@ CFLAGS_font.o := -Dstatic= | |||
112 | $(obj)/font.c: $(FONTC) | 116 | $(obj)/font.c: $(FONTC) |
113 | $(call cmd,shipped) | 117 | $(call cmd,shipped) |
114 | 118 | ||
115 | $(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config | 119 | $(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG) |
116 | @sed "$(SEDFLAGS)" < $< > $@ | 120 | @sed "$(SEDFLAGS)" < $< > $@ |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 6af9907c3b5c..6825c34646d4 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
@@ -177,7 +177,7 @@ not_angel: | |||
177 | and r4, pc, #0xf8000000 | 177 | and r4, pc, #0xf8000000 |
178 | add r4, r4, #TEXT_OFFSET | 178 | add r4, r4, #TEXT_OFFSET |
179 | #else | 179 | #else |
180 | ldr r4, =CONFIG_ZRELADDR | 180 | ldr r4, =zreladdr |
181 | #endif | 181 | #endif |
182 | subs r0, r0, r1 @ calculate the delta offset | 182 | subs r0, r0, r1 @ calculate the delta offset |
183 | 183 | ||
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 6c0913562455..1bec96e85196 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c | |||
@@ -263,6 +263,22 @@ static int it8152_pci_platform_notify_remove(struct device *dev) | |||
263 | return 0; | 263 | return 0; |
264 | } | 264 | } |
265 | 265 | ||
266 | int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) | ||
267 | { | ||
268 | dev_dbg(dev, "%s: dma_addr %08x, size %08x\n", | ||
269 | __func__, dma_addr, size); | ||
270 | return (dev->bus == &pci_bus_type) && | ||
271 | ((dma_addr + size - PHYS_OFFSET) >= SZ_64M); | ||
272 | } | ||
273 | |||
274 | int dma_set_coherent_mask(struct device *dev, u64 mask) | ||
275 | { | ||
276 | if (mask >= PHYS_OFFSET + SZ_64M - 1) | ||
277 | return 0; | ||
278 | |||
279 | return -EIO; | ||
280 | } | ||
281 | |||
266 | int __init it8152_pci_setup(int nr, struct pci_sys_data *sys) | 282 | int __init it8152_pci_setup(int nr, struct pci_sys_data *sys) |
267 | { | 283 | { |
268 | it8152_io.start = IT8152_IO_BASE + 0x12000; | 284 | it8152_io.start = IT8152_IO_BASE + 0x12000; |
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index c226fe10553e..c568da7dcae4 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -288,15 +288,7 @@ extern void dmabounce_unregister_dev(struct device *); | |||
288 | * DMA access and 1 if the buffer needs to be bounced. | 288 | * DMA access and 1 if the buffer needs to be bounced. |
289 | * | 289 | * |
290 | */ | 290 | */ |
291 | #ifdef CONFIG_SA1111 | ||
292 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | 291 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); |
293 | #else | ||
294 | static inline int dma_needs_bounce(struct device *dev, dma_addr_t addr, | ||
295 | size_t size) | ||
296 | { | ||
297 | return 0; | ||
298 | } | ||
299 | #endif | ||
300 | 292 | ||
301 | /* | 293 | /* |
302 | * The DMA API, implemented by dmabounce.c. See below for descriptions. | 294 | * The DMA API, implemented by dmabounce.c. See below for descriptions. |
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 48837e6d8887..b5799a3b7117 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h | |||
@@ -17,7 +17,7 @@ | |||
17 | * counter interrupts are regular interrupts and not an NMI. This | 17 | * counter interrupts are regular interrupts and not an NMI. This |
18 | * means that when we receive the interrupt we can call | 18 | * means that when we receive the interrupt we can call |
19 | * perf_event_do_pending() that handles all of the work with | 19 | * perf_event_do_pending() that handles all of the work with |
20 | * interrupts enabled. | 20 | * interrupts disabled. |
21 | */ | 21 | */ |
22 | static inline void | 22 | static inline void |
23 | set_perf_event_pending(void) | 23 | set_perf_event_pending(void) |
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index ab68cf1ef80f..e90b167ea848 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h | |||
@@ -317,6 +317,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | |||
317 | #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE | 317 | #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE |
318 | #define pgprot_dmacoherent(prot) \ | 318 | #define pgprot_dmacoherent(prot) \ |
319 | __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) | 319 | __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) |
320 | #define __HAVE_PHYS_MEM_ACCESS_PROT | ||
321 | struct file; | ||
322 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
323 | unsigned long size, pgprot_t vma_prot); | ||
320 | #else | 324 | #else |
321 | #define pgprot_dmacoherent(prot) \ | 325 | #define pgprot_dmacoherent(prot) \ |
322 | __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED) | 326 | __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED) |
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index d02cfb683487..c891eb76c0e3 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h | |||
@@ -393,6 +393,9 @@ | |||
393 | #define __NR_perf_event_open (__NR_SYSCALL_BASE+364) | 393 | #define __NR_perf_event_open (__NR_SYSCALL_BASE+364) |
394 | #define __NR_recvmmsg (__NR_SYSCALL_BASE+365) | 394 | #define __NR_recvmmsg (__NR_SYSCALL_BASE+365) |
395 | #define __NR_accept4 (__NR_SYSCALL_BASE+366) | 395 | #define __NR_accept4 (__NR_SYSCALL_BASE+366) |
396 | #define __NR_fanotify_init (__NR_SYSCALL_BASE+367) | ||
397 | #define __NR_fanotify_mark (__NR_SYSCALL_BASE+368) | ||
398 | #define __NR_prlimit64 (__NR_SYSCALL_BASE+369) | ||
396 | 399 | ||
397 | /* | 400 | /* |
398 | * The following SWIs are ARM private. | 401 | * The following SWIs are ARM private. |
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index afeb71fa72cb..5c26eccef998 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S | |||
@@ -376,6 +376,9 @@ | |||
376 | CALL(sys_perf_event_open) | 376 | CALL(sys_perf_event_open) |
377 | /* 365 */ CALL(sys_recvmmsg) | 377 | /* 365 */ CALL(sys_recvmmsg) |
378 | CALL(sys_accept4) | 378 | CALL(sys_accept4) |
379 | CALL(sys_fanotify_init) | ||
380 | CALL(sys_fanotify_mark) | ||
381 | CALL(sys_prlimit64) | ||
379 | #ifndef syscalls_counted | 382 | #ifndef syscalls_counted |
380 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls | 383 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls |
381 | #define syscalls_counted | 384 | #define syscalls_counted |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f05a35a59694..7885722bdf4e 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -48,6 +48,8 @@ work_pending: | |||
48 | beq no_work_pending | 48 | beq no_work_pending |
49 | mov r0, sp @ 'regs' | 49 | mov r0, sp @ 'regs' |
50 | mov r2, why @ 'syscall' | 50 | mov r2, why @ 'syscall' |
51 | tst r1, #_TIF_SIGPENDING @ delivering a signal? | ||
52 | movne why, #0 @ prevent further restarts | ||
51 | bl do_notify_resume | 53 | bl do_notify_resume |
52 | b ret_slow_syscall @ Check work again | 54 | b ret_slow_syscall @ Check work again |
53 | 55 | ||
@@ -418,11 +420,13 @@ ENDPROC(sys_clone_wrapper) | |||
418 | 420 | ||
419 | sys_sigreturn_wrapper: | 421 | sys_sigreturn_wrapper: |
420 | add r0, sp, #S_OFF | 422 | add r0, sp, #S_OFF |
423 | mov why, #0 @ prevent syscall restart handling | ||
421 | b sys_sigreturn | 424 | b sys_sigreturn |
422 | ENDPROC(sys_sigreturn_wrapper) | 425 | ENDPROC(sys_sigreturn_wrapper) |
423 | 426 | ||
424 | sys_rt_sigreturn_wrapper: | 427 | sys_rt_sigreturn_wrapper: |
425 | add r0, sp, #S_OFF | 428 | add r0, sp, #S_OFF |
429 | mov why, #0 @ prevent syscall restart handling | ||
426 | b sys_rt_sigreturn | 430 | b sys_rt_sigreturn |
427 | ENDPROC(sys_rt_sigreturn_wrapper) | 431 | ENDPROC(sys_rt_sigreturn_wrapper) |
428 | 432 | ||
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 417c392ddf1c..ecbb0288e5dd 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -319,8 +319,8 @@ validate_event(struct cpu_hw_events *cpuc, | |||
319 | { | 319 | { |
320 | struct hw_perf_event fake_event = event->hw; | 320 | struct hw_perf_event fake_event = event->hw; |
321 | 321 | ||
322 | if (event->pmu && event->pmu != &pmu) | 322 | if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) |
323 | return 0; | 323 | return 1; |
324 | 324 | ||
325 | return armpmu->get_event_idx(cpuc, &fake_event) >= 0; | 325 | return armpmu->get_event_idx(cpuc, &fake_event) >= 0; |
326 | } | 326 | } |
@@ -1041,8 +1041,8 @@ armv6pmu_handle_irq(int irq_num, | |||
1041 | /* | 1041 | /* |
1042 | * Handle the pending perf events. | 1042 | * Handle the pending perf events. |
1043 | * | 1043 | * |
1044 | * Note: this call *must* be run with interrupts enabled. For | 1044 | * Note: this call *must* be run with interrupts disabled. For |
1045 | * platforms that can have the PMU interrupts raised as a PMI, this | 1045 | * platforms that can have the PMU interrupts raised as an NMI, this |
1046 | * will not work. | 1046 | * will not work. |
1047 | */ | 1047 | */ |
1048 | perf_event_do_pending(); | 1048 | perf_event_do_pending(); |
@@ -2017,8 +2017,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
2017 | /* | 2017 | /* |
2018 | * Handle the pending perf events. | 2018 | * Handle the pending perf events. |
2019 | * | 2019 | * |
2020 | * Note: this call *must* be run with interrupts enabled. For | 2020 | * Note: this call *must* be run with interrupts disabled. For |
2021 | * platforms that can have the PMU interrupts raised as a PMI, this | 2021 | * platforms that can have the PMU interrupts raised as an NMI, this |
2022 | * will not work. | 2022 | * will not work. |
2023 | */ | 2023 | */ |
2024 | perf_event_do_pending(); | 2024 | perf_event_do_pending(); |
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c index 753c0d31a3d3..c67b47f1c0fd 100644 --- a/arch/arm/mach-at91/at91sam9g45.c +++ b/arch/arm/mach-at91/at91sam9g45.c | |||
@@ -121,8 +121,8 @@ static struct clk ssc1_clk = { | |||
121 | .pmc_mask = 1 << AT91SAM9G45_ID_SSC1, | 121 | .pmc_mask = 1 << AT91SAM9G45_ID_SSC1, |
122 | .type = CLK_TYPE_PERIPHERAL, | 122 | .type = CLK_TYPE_PERIPHERAL, |
123 | }; | 123 | }; |
124 | static struct clk tcb_clk = { | 124 | static struct clk tcb0_clk = { |
125 | .name = "tcb_clk", | 125 | .name = "tcb0_clk", |
126 | .pmc_mask = 1 << AT91SAM9G45_ID_TCB, | 126 | .pmc_mask = 1 << AT91SAM9G45_ID_TCB, |
127 | .type = CLK_TYPE_PERIPHERAL, | 127 | .type = CLK_TYPE_PERIPHERAL, |
128 | }; | 128 | }; |
@@ -192,6 +192,14 @@ static struct clk ohci_clk = { | |||
192 | .parent = &uhphs_clk, | 192 | .parent = &uhphs_clk, |
193 | }; | 193 | }; |
194 | 194 | ||
195 | /* One additional fake clock for second TC block */ | ||
196 | static struct clk tcb1_clk = { | ||
197 | .name = "tcb1_clk", | ||
198 | .pmc_mask = 0, | ||
199 | .type = CLK_TYPE_PERIPHERAL, | ||
200 | .parent = &tcb0_clk, | ||
201 | }; | ||
202 | |||
195 | static struct clk *periph_clocks[] __initdata = { | 203 | static struct clk *periph_clocks[] __initdata = { |
196 | &pioA_clk, | 204 | &pioA_clk, |
197 | &pioB_clk, | 205 | &pioB_clk, |
@@ -208,7 +216,7 @@ static struct clk *periph_clocks[] __initdata = { | |||
208 | &spi1_clk, | 216 | &spi1_clk, |
209 | &ssc0_clk, | 217 | &ssc0_clk, |
210 | &ssc1_clk, | 218 | &ssc1_clk, |
211 | &tcb_clk, | 219 | &tcb0_clk, |
212 | &pwm_clk, | 220 | &pwm_clk, |
213 | &tsc_clk, | 221 | &tsc_clk, |
214 | &dma_clk, | 222 | &dma_clk, |
@@ -221,6 +229,7 @@ static struct clk *periph_clocks[] __initdata = { | |||
221 | &mmc1_clk, | 229 | &mmc1_clk, |
222 | // irq0 | 230 | // irq0 |
223 | &ohci_clk, | 231 | &ohci_clk, |
232 | &tcb1_clk, | ||
224 | }; | 233 | }; |
225 | 234 | ||
226 | /* | 235 | /* |
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index 809114d5a5a6..1276babf84d5 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c | |||
@@ -46,7 +46,7 @@ static struct resource hdmac_resources[] = { | |||
46 | .end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1, | 46 | .end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1, |
47 | .flags = IORESOURCE_MEM, | 47 | .flags = IORESOURCE_MEM, |
48 | }, | 48 | }, |
49 | [2] = { | 49 | [1] = { |
50 | .start = AT91SAM9G45_ID_DMA, | 50 | .start = AT91SAM9G45_ID_DMA, |
51 | .end = AT91SAM9G45_ID_DMA, | 51 | .end = AT91SAM9G45_ID_DMA, |
52 | .flags = IORESOURCE_IRQ, | 52 | .flags = IORESOURCE_IRQ, |
@@ -426,7 +426,7 @@ static struct i2c_gpio_platform_data pdata_i2c0 = { | |||
426 | .sda_is_open_drain = 1, | 426 | .sda_is_open_drain = 1, |
427 | .scl_pin = AT91_PIN_PA21, | 427 | .scl_pin = AT91_PIN_PA21, |
428 | .scl_is_open_drain = 1, | 428 | .scl_is_open_drain = 1, |
429 | .udelay = 2, /* ~100 kHz */ | 429 | .udelay = 5, /* ~100 kHz */ |
430 | }; | 430 | }; |
431 | 431 | ||
432 | static struct platform_device at91sam9g45_twi0_device = { | 432 | static struct platform_device at91sam9g45_twi0_device = { |
@@ -440,7 +440,7 @@ static struct i2c_gpio_platform_data pdata_i2c1 = { | |||
440 | .sda_is_open_drain = 1, | 440 | .sda_is_open_drain = 1, |
441 | .scl_pin = AT91_PIN_PB11, | 441 | .scl_pin = AT91_PIN_PB11, |
442 | .scl_is_open_drain = 1, | 442 | .scl_is_open_drain = 1, |
443 | .udelay = 2, /* ~100 kHz */ | 443 | .udelay = 5, /* ~100 kHz */ |
444 | }; | 444 | }; |
445 | 445 | ||
446 | static struct platform_device at91sam9g45_twi1_device = { | 446 | static struct platform_device at91sam9g45_twi1_device = { |
@@ -835,9 +835,9 @@ static struct platform_device at91sam9g45_tcb1_device = { | |||
835 | static void __init at91_add_device_tc(void) | 835 | static void __init at91_add_device_tc(void) |
836 | { | 836 | { |
837 | /* this chip has one clock and irq for all six TC channels */ | 837 | /* this chip has one clock and irq for all six TC channels */ |
838 | at91_clock_associate("tcb_clk", &at91sam9g45_tcb0_device.dev, "t0_clk"); | 838 | at91_clock_associate("tcb0_clk", &at91sam9g45_tcb0_device.dev, "t0_clk"); |
839 | platform_device_register(&at91sam9g45_tcb0_device); | 839 | platform_device_register(&at91sam9g45_tcb0_device); |
840 | at91_clock_associate("tcb_clk", &at91sam9g45_tcb1_device.dev, "t0_clk"); | 840 | at91_clock_associate("tcb1_clk", &at91sam9g45_tcb1_device.dev, "t0_clk"); |
841 | platform_device_register(&at91sam9g45_tcb1_device); | 841 | platform_device_register(&at91sam9g45_tcb1_device); |
842 | } | 842 | } |
843 | #else | 843 | #else |
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c index c4c8865d52d7..65eb0943194f 100644 --- a/arch/arm/mach-at91/board-sam9261ek.c +++ b/arch/arm/mach-at91/board-sam9261ek.c | |||
@@ -93,11 +93,12 @@ static struct resource dm9000_resource[] = { | |||
93 | .start = AT91_PIN_PC11, | 93 | .start = AT91_PIN_PC11, |
94 | .end = AT91_PIN_PC11, | 94 | .end = AT91_PIN_PC11, |
95 | .flags = IORESOURCE_IRQ | 95 | .flags = IORESOURCE_IRQ |
96 | | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE, | ||
96 | } | 97 | } |
97 | }; | 98 | }; |
98 | 99 | ||
99 | static struct dm9000_plat_data dm9000_platdata = { | 100 | static struct dm9000_plat_data dm9000_platdata = { |
100 | .flags = DM9000_PLATF_16BITONLY, | 101 | .flags = DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM, |
101 | }; | 102 | }; |
102 | 103 | ||
103 | static struct platform_device dm9000_device = { | 104 | static struct platform_device dm9000_device = { |
@@ -168,17 +169,6 @@ static struct at91_udc_data __initdata ek_udc_data = { | |||
168 | 169 | ||
169 | 170 | ||
170 | /* | 171 | /* |
171 | * MCI (SD/MMC) | ||
172 | */ | ||
173 | static struct at91_mmc_data __initdata ek_mmc_data = { | ||
174 | .wire4 = 1, | ||
175 | // .det_pin = ... not connected | ||
176 | // .wp_pin = ... not connected | ||
177 | // .vcc_pin = ... not connected | ||
178 | }; | ||
179 | |||
180 | |||
181 | /* | ||
182 | * NAND flash | 172 | * NAND flash |
183 | */ | 173 | */ |
184 | static struct mtd_partition __initdata ek_nand_partition[] = { | 174 | static struct mtd_partition __initdata ek_nand_partition[] = { |
@@ -246,6 +236,10 @@ static void __init ek_add_device_nand(void) | |||
246 | at91_add_device_nand(&ek_nand_data); | 236 | at91_add_device_nand(&ek_nand_data); |
247 | } | 237 | } |
248 | 238 | ||
239 | /* | ||
240 | * SPI related devices | ||
241 | */ | ||
242 | #if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE) | ||
249 | 243 | ||
250 | /* | 244 | /* |
251 | * ADS7846 Touchscreen | 245 | * ADS7846 Touchscreen |
@@ -356,6 +350,19 @@ static struct spi_board_info ek_spi_devices[] = { | |||
356 | #endif | 350 | #endif |
357 | }; | 351 | }; |
358 | 352 | ||
353 | #else /* CONFIG_SPI_ATMEL_* */ | ||
354 | /* spi0 and mmc/sd share the same PIO pins: cannot be used at the same time */ | ||
355 | |||
356 | /* | ||
357 | * MCI (SD/MMC) | ||
358 | * det_pin, wp_pin and vcc_pin are not connected | ||
359 | */ | ||
360 | static struct at91_mmc_data __initdata ek_mmc_data = { | ||
361 | .wire4 = 1, | ||
362 | }; | ||
363 | |||
364 | #endif /* CONFIG_SPI_ATMEL_* */ | ||
365 | |||
359 | 366 | ||
360 | /* | 367 | /* |
361 | * LCD Controller | 368 | * LCD Controller |
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c index 7f7da439341f..7525cee3983f 100644 --- a/arch/arm/mach-at91/clock.c +++ b/arch/arm/mach-at91/clock.c | |||
@@ -501,7 +501,8 @@ postcore_initcall(at91_clk_debugfs_init); | |||
501 | int __init clk_register(struct clk *clk) | 501 | int __init clk_register(struct clk *clk) |
502 | { | 502 | { |
503 | if (clk_is_peripheral(clk)) { | 503 | if (clk_is_peripheral(clk)) { |
504 | clk->parent = &mck; | 504 | if (!clk->parent) |
505 | clk->parent = &mck; | ||
505 | clk->mode = pmc_periph_mode; | 506 | clk->mode = pmc_periph_mode; |
506 | list_add_tail(&clk->node, &clocks); | 507 | list_add_tail(&clk->node, &clocks); |
507 | } | 508 | } |
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index 3d996b659ff4..9be261beae7d 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c | |||
@@ -769,8 +769,7 @@ static struct map_desc dm355_io_desc[] = { | |||
769 | .virtual = SRAM_VIRT, | 769 | .virtual = SRAM_VIRT, |
770 | .pfn = __phys_to_pfn(0x00010000), | 770 | .pfn = __phys_to_pfn(0x00010000), |
771 | .length = SZ_32K, | 771 | .length = SZ_32K, |
772 | /* MT_MEMORY_NONCACHED requires supersection alignment */ | 772 | .type = MT_MEMORY_NONCACHED, |
773 | .type = MT_DEVICE, | ||
774 | }, | 773 | }, |
775 | }; | 774 | }; |
776 | 775 | ||
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index 6b6f4c643709..7781e35daec3 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c | |||
@@ -969,8 +969,7 @@ static struct map_desc dm365_io_desc[] = { | |||
969 | .virtual = SRAM_VIRT, | 969 | .virtual = SRAM_VIRT, |
970 | .pfn = __phys_to_pfn(0x00010000), | 970 | .pfn = __phys_to_pfn(0x00010000), |
971 | .length = SZ_32K, | 971 | .length = SZ_32K, |
972 | /* MT_MEMORY_NONCACHED requires supersection alignment */ | 972 | .type = MT_MEMORY_NONCACHED, |
973 | .type = MT_DEVICE, | ||
974 | }, | 973 | }, |
975 | }; | 974 | }; |
976 | 975 | ||
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index 40fec315c99a..5e5b0a7831fb 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c | |||
@@ -653,8 +653,7 @@ static struct map_desc dm644x_io_desc[] = { | |||
653 | .virtual = SRAM_VIRT, | 653 | .virtual = SRAM_VIRT, |
654 | .pfn = __phys_to_pfn(0x00008000), | 654 | .pfn = __phys_to_pfn(0x00008000), |
655 | .length = SZ_16K, | 655 | .length = SZ_16K, |
656 | /* MT_MEMORY_NONCACHED requires supersection alignment */ | 656 | .type = MT_MEMORY_NONCACHED, |
657 | .type = MT_DEVICE, | ||
658 | }, | 657 | }, |
659 | }; | 658 | }; |
660 | 659 | ||
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index e4a3df1872ac..26e8a9c7f50b 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c | |||
@@ -737,8 +737,7 @@ static struct map_desc dm646x_io_desc[] = { | |||
737 | .virtual = SRAM_VIRT, | 737 | .virtual = SRAM_VIRT, |
738 | .pfn = __phys_to_pfn(0x00010000), | 738 | .pfn = __phys_to_pfn(0x00010000), |
739 | .length = SZ_32K, | 739 | .length = SZ_32K, |
740 | /* MT_MEMORY_NONCACHED requires supersection alignment */ | 740 | .type = MT_MEMORY_NONCACHED, |
741 | .type = MT_DEVICE, | ||
742 | }, | 741 | }, |
743 | }; | 742 | }; |
744 | 743 | ||
diff --git a/arch/arm/mach-dove/include/mach/io.h b/arch/arm/mach-dove/include/mach/io.h index 3b3e4721ce2e..eb4936ff90ad 100644 --- a/arch/arm/mach-dove/include/mach/io.h +++ b/arch/arm/mach-dove/include/mach/io.h | |||
@@ -13,8 +13,8 @@ | |||
13 | 13 | ||
14 | #define IO_SPACE_LIMIT 0xffffffff | 14 | #define IO_SPACE_LIMIT 0xffffffff |
15 | 15 | ||
16 | #define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\ | 16 | #define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \ |
17 | DOVE_PCIE0_IO_VIRT_BASE)) | 17 | DOVE_PCIE0_IO_VIRT_BASE)) |
18 | #define __mem_pci(a) (a) | 18 | #define __mem_pci(a) (a) |
19 | 19 | ||
20 | #endif | 20 | #endif |
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c index 8bf3cec98cfa..4566bd1c8660 100644 --- a/arch/arm/mach-ep93xx/clock.c +++ b/arch/arm/mach-ep93xx/clock.c | |||
@@ -560,4 +560,4 @@ static int __init ep93xx_clock_init(void) | |||
560 | clkdev_add_table(clocks, ARRAY_SIZE(clocks)); | 560 | clkdev_add_table(clocks, ARRAY_SIZE(clocks)); |
561 | return 0; | 561 | return 0; |
562 | } | 562 | } |
563 | arch_initcall(ep93xx_clock_init); | 563 | postcore_initcall(ep93xx_clock_init); |
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index 61cd4d64b985..24498a932ba6 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c | |||
@@ -503,6 +503,14 @@ struct pci_bus * __devinit ixp4xx_scan_bus(int nr, struct pci_sys_data *sys) | |||
503 | return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys); | 503 | return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys); |
504 | } | 504 | } |
505 | 505 | ||
506 | int dma_set_coherent_mask(struct device *dev, u64 mask) | ||
507 | { | ||
508 | if (mask >= SZ_64M - 1) | ||
509 | return 0; | ||
510 | |||
511 | return -EIO; | ||
512 | } | ||
513 | |||
506 | EXPORT_SYMBOL(ixp4xx_pci_read); | 514 | EXPORT_SYMBOL(ixp4xx_pci_read); |
507 | EXPORT_SYMBOL(ixp4xx_pci_write); | 515 | EXPORT_SYMBOL(ixp4xx_pci_write); |
508 | 516 | ||
diff --git a/arch/arm/mach-ixp4xx/include/mach/hardware.h b/arch/arm/mach-ixp4xx/include/mach/hardware.h index f91ca6d4fbe8..8138371c406e 100644 --- a/arch/arm/mach-ixp4xx/include/mach/hardware.h +++ b/arch/arm/mach-ixp4xx/include/mach/hardware.h | |||
@@ -26,6 +26,8 @@ | |||
26 | #define PCIBIOS_MAX_MEM 0x4BFFFFFF | 26 | #define PCIBIOS_MAX_MEM 0x4BFFFFFF |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | #define ARCH_HAS_DMA_SET_COHERENT_MASK | ||
30 | |||
29 | #define pcibios_assign_all_busses() 1 | 31 | #define pcibios_assign_all_busses() 1 |
30 | 32 | ||
31 | /* Register locations and bits */ | 33 | /* Register locations and bits */ |
diff --git a/arch/arm/mach-kirkwood/include/mach/kirkwood.h b/arch/arm/mach-kirkwood/include/mach/kirkwood.h index 93fc2ec95e76..6e924b398919 100644 --- a/arch/arm/mach-kirkwood/include/mach/kirkwood.h +++ b/arch/arm/mach-kirkwood/include/mach/kirkwood.h | |||
@@ -38,7 +38,7 @@ | |||
38 | 38 | ||
39 | #define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000 | 39 | #define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000 |
40 | #define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000 | 40 | #define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000 |
41 | #define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00000000 | 41 | #define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00100000 |
42 | #define KIRKWOOD_PCIE1_IO_SIZE SZ_1M | 42 | #define KIRKWOOD_PCIE1_IO_SIZE SZ_1M |
43 | 43 | ||
44 | #define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000 | 44 | #define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000 |
diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c index 55e7f00836b7..513ad3102d7c 100644 --- a/arch/arm/mach-kirkwood/pcie.c +++ b/arch/arm/mach-kirkwood/pcie.c | |||
@@ -117,7 +117,7 @@ static void __init pcie0_ioresources_init(struct pcie_port *pp) | |||
117 | * IORESOURCE_IO | 117 | * IORESOURCE_IO |
118 | */ | 118 | */ |
119 | pp->res[0].name = "PCIe 0 I/O Space"; | 119 | pp->res[0].name = "PCIe 0 I/O Space"; |
120 | pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE; | 120 | pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE; |
121 | pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1; | 121 | pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1; |
122 | pp->res[0].flags = IORESOURCE_IO; | 122 | pp->res[0].flags = IORESOURCE_IO; |
123 | 123 | ||
@@ -139,7 +139,7 @@ static void __init pcie1_ioresources_init(struct pcie_port *pp) | |||
139 | * IORESOURCE_IO | 139 | * IORESOURCE_IO |
140 | */ | 140 | */ |
141 | pp->res[0].name = "PCIe 1 I/O Space"; | 141 | pp->res[0].name = "PCIe 1 I/O Space"; |
142 | pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE; | 142 | pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE; |
143 | pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1; | 143 | pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1; |
144 | pp->res[0].flags = IORESOURCE_IO; | 144 | pp->res[0].flags = IORESOURCE_IO; |
145 | 145 | ||
diff --git a/arch/arm/mach-mmp/include/mach/system.h b/arch/arm/mach-mmp/include/mach/system.h index 4f5b0e0ce6cf..1a8a25edb1b4 100644 --- a/arch/arm/mach-mmp/include/mach/system.h +++ b/arch/arm/mach-mmp/include/mach/system.h | |||
@@ -9,6 +9,8 @@ | |||
9 | #ifndef __ASM_MACH_SYSTEM_H | 9 | #ifndef __ASM_MACH_SYSTEM_H |
10 | #define __ASM_MACH_SYSTEM_H | 10 | #define __ASM_MACH_SYSTEM_H |
11 | 11 | ||
12 | #include <mach/cputype.h> | ||
13 | |||
12 | static inline void arch_idle(void) | 14 | static inline void arch_idle(void) |
13 | { | 15 | { |
14 | cpu_do_idle(); | 16 | cpu_do_idle(); |
@@ -16,6 +18,9 @@ static inline void arch_idle(void) | |||
16 | 18 | ||
17 | static inline void arch_reset(char mode, const char *cmd) | 19 | static inline void arch_reset(char mode, const char *cmd) |
18 | { | 20 | { |
19 | cpu_reset(0); | 21 | if (cpu_is_pxa168()) |
22 | cpu_reset(0xffff0000); | ||
23 | else | ||
24 | cpu_reset(0); | ||
20 | } | 25 | } |
21 | #endif /* __ASM_MACH_SYSTEM_H */ | 26 | #endif /* __ASM_MACH_SYSTEM_H */ |
diff --git a/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c index 91931dcb0689..4aaadc753d3e 100644 --- a/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c +++ b/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c | |||
@@ -215,7 +215,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = { | |||
215 | * Add platform devices present on this baseboard and init | 215 | * Add platform devices present on this baseboard and init |
216 | * them from CPU side as far as required to use them later on | 216 | * them from CPU side as far as required to use them later on |
217 | */ | 217 | */ |
218 | void __init eukrea_mbimxsd_baseboard_init(void) | 218 | void __init eukrea_mbimxsd25_baseboard_init(void) |
219 | { | 219 | { |
220 | if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads, | 220 | if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads, |
221 | ARRAY_SIZE(eukrea_mbimxsd_pads))) | 221 | ARRAY_SIZE(eukrea_mbimxsd_pads))) |
diff --git a/arch/arm/mach-mx25/mach-cpuimx25.c b/arch/arm/mach-mx25/mach-cpuimx25.c index a5f0174290b4..e064bb3d6919 100644 --- a/arch/arm/mach-mx25/mach-cpuimx25.c +++ b/arch/arm/mach-mx25/mach-cpuimx25.c | |||
@@ -147,8 +147,8 @@ static void __init eukrea_cpuimx25_init(void) | |||
147 | if (!otg_mode_host) | 147 | if (!otg_mode_host) |
148 | mxc_register_device(&otg_udc_device, &otg_device_pdata); | 148 | mxc_register_device(&otg_udc_device, &otg_device_pdata); |
149 | 149 | ||
150 | #ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD | 150 | #ifdef CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD |
151 | eukrea_mbimxsd_baseboard_init(); | 151 | eukrea_mbimxsd25_baseboard_init(); |
152 | #endif | 152 | #endif |
153 | } | 153 | } |
154 | 154 | ||
diff --git a/arch/arm/mach-mx3/clock-imx35.c b/arch/arm/mach-mx3/clock-imx35.c index d3af0fdf8475..7a62e744a8b0 100644 --- a/arch/arm/mach-mx3/clock-imx35.c +++ b/arch/arm/mach-mx3/clock-imx35.c | |||
@@ -155,7 +155,7 @@ static unsigned long get_rate_arm(void) | |||
155 | 155 | ||
156 | aad = &clk_consumer[(pdr0 >> 16) & 0xf]; | 156 | aad = &clk_consumer[(pdr0 >> 16) & 0xf]; |
157 | if (aad->sel) | 157 | if (aad->sel) |
158 | fref = fref * 2 / 3; | 158 | fref = fref * 3 / 4; |
159 | 159 | ||
160 | return fref / aad->arm; | 160 | return fref / aad->arm; |
161 | } | 161 | } |
@@ -164,7 +164,7 @@ static unsigned long get_rate_ahb(struct clk *clk) | |||
164 | { | 164 | { |
165 | unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0); | 165 | unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0); |
166 | struct arm_ahb_div *aad; | 166 | struct arm_ahb_div *aad; |
167 | unsigned long fref = get_rate_mpll(); | 167 | unsigned long fref = get_rate_arm(); |
168 | 168 | ||
169 | aad = &clk_consumer[(pdr0 >> 16) & 0xf]; | 169 | aad = &clk_consumer[(pdr0 >> 16) & 0xf]; |
170 | 170 | ||
@@ -176,16 +176,11 @@ static unsigned long get_rate_ipg(struct clk *clk) | |||
176 | return get_rate_ahb(NULL) >> 1; | 176 | return get_rate_ahb(NULL) >> 1; |
177 | } | 177 | } |
178 | 178 | ||
179 | static unsigned long get_3_3_div(unsigned long in) | ||
180 | { | ||
181 | return (((in >> 3) & 0x7) + 1) * ((in & 0x7) + 1); | ||
182 | } | ||
183 | |||
184 | static unsigned long get_rate_uart(struct clk *clk) | 179 | static unsigned long get_rate_uart(struct clk *clk) |
185 | { | 180 | { |
186 | unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3); | 181 | unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3); |
187 | unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4); | 182 | unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4); |
188 | unsigned long div = get_3_3_div(pdr4 >> 10); | 183 | unsigned long div = ((pdr4 >> 10) & 0x3f) + 1; |
189 | 184 | ||
190 | if (pdr3 & (1 << 14)) | 185 | if (pdr3 & (1 << 14)) |
191 | return get_rate_arm() / div; | 186 | return get_rate_arm() / div; |
@@ -216,7 +211,7 @@ static unsigned long get_rate_sdhc(struct clk *clk) | |||
216 | break; | 211 | break; |
217 | } | 212 | } |
218 | 213 | ||
219 | return rate / get_3_3_div(div); | 214 | return rate / (div + 1); |
220 | } | 215 | } |
221 | 216 | ||
222 | static unsigned long get_rate_mshc(struct clk *clk) | 217 | static unsigned long get_rate_mshc(struct clk *clk) |
@@ -270,7 +265,7 @@ static unsigned long get_rate_csi(struct clk *clk) | |||
270 | else | 265 | else |
271 | rate = get_rate_ppll(); | 266 | rate = get_rate_ppll(); |
272 | 267 | ||
273 | return rate / get_3_3_div((pdr2 >> 16) & 0x3f); | 268 | return rate / (((pdr2 >> 16) & 0x3f) + 1); |
274 | } | 269 | } |
275 | 270 | ||
276 | static unsigned long get_rate_otg(struct clk *clk) | 271 | static unsigned long get_rate_otg(struct clk *clk) |
@@ -283,25 +278,51 @@ static unsigned long get_rate_otg(struct clk *clk) | |||
283 | else | 278 | else |
284 | rate = get_rate_ppll(); | 279 | rate = get_rate_ppll(); |
285 | 280 | ||
286 | return rate / get_3_3_div((pdr4 >> 22) & 0x3f); | 281 | return rate / (((pdr4 >> 22) & 0x3f) + 1); |
287 | } | 282 | } |
288 | 283 | ||
289 | static unsigned long get_rate_ipg_per(struct clk *clk) | 284 | static unsigned long get_rate_ipg_per(struct clk *clk) |
290 | { | 285 | { |
291 | unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0); | 286 | unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0); |
292 | unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4); | 287 | unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4); |
293 | unsigned long div1, div2; | 288 | unsigned long div; |
294 | 289 | ||
295 | if (pdr0 & (1 << 26)) { | 290 | if (pdr0 & (1 << 26)) { |
296 | div1 = (pdr4 >> 19) & 0x7; | 291 | div = (pdr4 >> 16) & 0x3f; |
297 | div2 = (pdr4 >> 16) & 0x7; | 292 | return get_rate_arm() / (div + 1); |
298 | return get_rate_arm() / ((div1 + 1) * (div2 + 1)); | ||
299 | } else { | 293 | } else { |
300 | div1 = (pdr0 >> 12) & 0x7; | 294 | div = (pdr0 >> 12) & 0x7; |
301 | return get_rate_ahb(NULL) / div1; | 295 | return get_rate_ahb(NULL) / (div + 1); |
302 | } | 296 | } |
303 | } | 297 | } |
304 | 298 | ||
299 | static unsigned long get_rate_hsp(struct clk *clk) | ||
300 | { | ||
301 | unsigned long hsp_podf = (__raw_readl(CCM_BASE + CCM_PDR0) >> 20) & 0x03; | ||
302 | unsigned long fref = get_rate_mpll(); | ||
303 | |||
304 | if (fref > 400 * 1000 * 1000) { | ||
305 | switch (hsp_podf) { | ||
306 | case 0: | ||
307 | return fref >> 2; | ||
308 | case 1: | ||
309 | return fref >> 3; | ||
310 | case 2: | ||
311 | return fref / 3; | ||
312 | } | ||
313 | } else { | ||
314 | switch (hsp_podf) { | ||
315 | case 0: | ||
316 | case 2: | ||
317 | return fref / 3; | ||
318 | case 1: | ||
319 | return fref / 6; | ||
320 | } | ||
321 | } | ||
322 | |||
323 | return 0; | ||
324 | } | ||
325 | |||
305 | static int clk_cgr_enable(struct clk *clk) | 326 | static int clk_cgr_enable(struct clk *clk) |
306 | { | 327 | { |
307 | u32 reg; | 328 | u32 reg; |
@@ -359,7 +380,7 @@ DEFINE_CLOCK(i2c1_clk, 0, CCM_CGR1, 10, get_rate_ipg_per, NULL); | |||
359 | DEFINE_CLOCK(i2c2_clk, 1, CCM_CGR1, 12, get_rate_ipg_per, NULL); | 380 | DEFINE_CLOCK(i2c2_clk, 1, CCM_CGR1, 12, get_rate_ipg_per, NULL); |
360 | DEFINE_CLOCK(i2c3_clk, 2, CCM_CGR1, 14, get_rate_ipg_per, NULL); | 381 | DEFINE_CLOCK(i2c3_clk, 2, CCM_CGR1, 14, get_rate_ipg_per, NULL); |
361 | DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL); | 382 | DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL); |
362 | DEFINE_CLOCK(ipu_clk, 0, CCM_CGR1, 18, get_rate_ahb, NULL); | 383 | DEFINE_CLOCK(ipu_clk, 0, CCM_CGR1, 18, get_rate_hsp, NULL); |
363 | DEFINE_CLOCK(kpp_clk, 0, CCM_CGR1, 20, get_rate_ipg, NULL); | 384 | DEFINE_CLOCK(kpp_clk, 0, CCM_CGR1, 20, get_rate_ipg, NULL); |
364 | DEFINE_CLOCK(mlb_clk, 0, CCM_CGR1, 22, get_rate_ahb, NULL); | 385 | DEFINE_CLOCK(mlb_clk, 0, CCM_CGR1, 22, get_rate_ahb, NULL); |
365 | DEFINE_CLOCK(mshc_clk, 0, CCM_CGR1, 24, get_rate_mshc, NULL); | 386 | DEFINE_CLOCK(mshc_clk, 0, CCM_CGR1, 24, get_rate_mshc, NULL); |
@@ -485,10 +506,10 @@ static struct clk_lookup lookups[] = { | |||
485 | 506 | ||
486 | int __init mx35_clocks_init() | 507 | int __init mx35_clocks_init() |
487 | { | 508 | { |
488 | unsigned int ll = 0; | 509 | unsigned int cgr2 = 3 << 26, cgr3 = 0; |
489 | 510 | ||
490 | #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC) | 511 | #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC) |
491 | ll = (3 << 16); | 512 | cgr2 |= 3 << 16; |
492 | #endif | 513 | #endif |
493 | 514 | ||
494 | clkdev_add_table(lookups, ARRAY_SIZE(lookups)); | 515 | clkdev_add_table(lookups, ARRAY_SIZE(lookups)); |
@@ -499,8 +520,20 @@ int __init mx35_clocks_init() | |||
499 | __raw_writel((3 << 18), CCM_BASE + CCM_CGR0); | 520 | __raw_writel((3 << 18), CCM_BASE + CCM_CGR0); |
500 | __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16), | 521 | __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16), |
501 | CCM_BASE + CCM_CGR1); | 522 | CCM_BASE + CCM_CGR1); |
502 | __raw_writel((3 << 26) | ll, CCM_BASE + CCM_CGR2); | 523 | |
503 | __raw_writel(0, CCM_BASE + CCM_CGR3); | 524 | /* |
525 | * Check if we came up in internal boot mode. If yes, we need some | ||
526 | * extra clocks turned on, otherwise the MX35 boot ROM code will | ||
527 | * hang after a watchdog reset. | ||
528 | */ | ||
529 | if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) { | ||
530 | /* Additionally turn on UART1, SCC, and IIM clocks */ | ||
531 | cgr2 |= 3 << 16 | 3 << 4; | ||
532 | cgr3 |= 3 << 2; | ||
533 | } | ||
534 | |||
535 | __raw_writel(cgr2, CCM_BASE + CCM_CGR2); | ||
536 | __raw_writel(cgr3, CCM_BASE + CCM_CGR3); | ||
504 | 537 | ||
505 | mxc_timer_init(&gpt_clk, | 538 | mxc_timer_init(&gpt_clk, |
506 | MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT); | 539 | MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT); |
diff --git a/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c index 1dc5004df866..f8f15e3ac7a0 100644 --- a/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c +++ b/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c | |||
@@ -216,7 +216,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = { | |||
216 | * Add platform devices present on this baseboard and init | 216 | * Add platform devices present on this baseboard and init |
217 | * them from CPU side as far as required to use them later on | 217 | * them from CPU side as far as required to use them later on |
218 | */ | 218 | */ |
219 | void __init eukrea_mbimxsd_baseboard_init(void) | 219 | void __init eukrea_mbimxsd35_baseboard_init(void) |
220 | { | 220 | { |
221 | if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads, | 221 | if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads, |
222 | ARRAY_SIZE(eukrea_mbimxsd_pads))) | 222 | ARRAY_SIZE(eukrea_mbimxsd_pads))) |
diff --git a/arch/arm/mach-mx3/mach-cpuimx35.c b/arch/arm/mach-mx3/mach-cpuimx35.c index 9770a6a973be..2a4f8b781ba4 100644 --- a/arch/arm/mach-mx3/mach-cpuimx35.c +++ b/arch/arm/mach-mx3/mach-cpuimx35.c | |||
@@ -201,8 +201,8 @@ static void __init mxc_board_init(void) | |||
201 | if (!otg_mode_host) | 201 | if (!otg_mode_host) |
202 | mxc_register_device(&mxc_otg_udc_device, &otg_device_pdata); | 202 | mxc_register_device(&mxc_otg_udc_device, &otg_device_pdata); |
203 | 203 | ||
204 | #ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD | 204 | #ifdef CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD |
205 | eukrea_mbimxsd_baseboard_init(); | 205 | eukrea_mbimxsd35_baseboard_init(); |
206 | #endif | 206 | #endif |
207 | } | 207 | } |
208 | 208 | ||
diff --git a/arch/arm/mach-mx5/clock-mx51.c b/arch/arm/mach-mx5/clock-mx51.c index 6af69def357f..57c10a9926cc 100644 --- a/arch/arm/mach-mx5/clock-mx51.c +++ b/arch/arm/mach-mx5/clock-mx51.c | |||
@@ -56,7 +56,7 @@ static void _clk_ccgr_disable(struct clk *clk) | |||
56 | { | 56 | { |
57 | u32 reg; | 57 | u32 reg; |
58 | reg = __raw_readl(clk->enable_reg); | 58 | reg = __raw_readl(clk->enable_reg); |
59 | reg &= ~(MXC_CCM_CCGRx_MOD_OFF << clk->enable_shift); | 59 | reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift); |
60 | __raw_writel(reg, clk->enable_reg); | 60 | __raw_writel(reg, clk->enable_reg); |
61 | 61 | ||
62 | } | 62 | } |
diff --git a/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/arch/arm/mach-pxa/cpufreq-pxa2xx.c index 268a9bc6be8a..58093d9e07be 100644 --- a/arch/arm/mach-pxa/cpufreq-pxa2xx.c +++ b/arch/arm/mach-pxa/cpufreq-pxa2xx.c | |||
@@ -312,8 +312,7 @@ static int pxa_set_target(struct cpufreq_policy *policy, | |||
312 | freqs.cpu = policy->cpu; | 312 | freqs.cpu = policy->cpu; |
313 | 313 | ||
314 | if (freq_debug) | 314 | if (freq_debug) |
315 | pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, " | 315 | pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n", |
316 | "(SDRAM %d Mhz)\n", | ||
317 | freqs.new / 1000, (pxa_freq_settings[idx].div2) ? | 316 | freqs.new / 1000, (pxa_freq_settings[idx].div2) ? |
318 | (new_freq_mem / 2000) : (new_freq_mem / 1000)); | 317 | (new_freq_mem / 2000) : (new_freq_mem / 1000)); |
319 | 318 | ||
@@ -398,7 +397,7 @@ static int pxa_set_target(struct cpufreq_policy *policy, | |||
398 | return 0; | 397 | return 0; |
399 | } | 398 | } |
400 | 399 | ||
401 | static __init int pxa_cpufreq_init(struct cpufreq_policy *policy) | 400 | static int pxa_cpufreq_init(struct cpufreq_policy *policy) |
402 | { | 401 | { |
403 | int i; | 402 | int i; |
404 | unsigned int freq; | 403 | unsigned int freq; |
diff --git a/arch/arm/mach-pxa/cpufreq-pxa3xx.c b/arch/arm/mach-pxa/cpufreq-pxa3xx.c index 27fa329d9a8b..0a0d0fe99220 100644 --- a/arch/arm/mach-pxa/cpufreq-pxa3xx.c +++ b/arch/arm/mach-pxa/cpufreq-pxa3xx.c | |||
@@ -204,7 +204,7 @@ static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, | |||
204 | return 0; | 204 | return 0; |
205 | } | 205 | } |
206 | 206 | ||
207 | static __init int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) | 207 | static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) |
208 | { | 208 | { |
209 | int ret = -EINVAL; | 209 | int ret = -EINVAL; |
210 | 210 | ||
diff --git a/arch/arm/mach-pxa/include/mach/hardware.h b/arch/arm/mach-pxa/include/mach/hardware.h index 7f64d24cd564..814f1458a06a 100644 --- a/arch/arm/mach-pxa/include/mach/hardware.h +++ b/arch/arm/mach-pxa/include/mach/hardware.h | |||
@@ -264,23 +264,35 @@ | |||
264 | * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x | 264 | * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x |
265 | * == 0x3 for pxa300/pxa310/pxa320 | 265 | * == 0x3 for pxa300/pxa310/pxa320 |
266 | */ | 266 | */ |
267 | #if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x) | ||
267 | #define __cpu_is_pxa2xx(id) \ | 268 | #define __cpu_is_pxa2xx(id) \ |
268 | ({ \ | 269 | ({ \ |
269 | unsigned int _id = (id) >> 13 & 0x7; \ | 270 | unsigned int _id = (id) >> 13 & 0x7; \ |
270 | _id <= 0x2; \ | 271 | _id <= 0x2; \ |
271 | }) | 272 | }) |
273 | #else | ||
274 | #define __cpu_is_pxa2xx(id) (0) | ||
275 | #endif | ||
272 | 276 | ||
277 | #ifdef CONFIG_PXA3xx | ||
273 | #define __cpu_is_pxa3xx(id) \ | 278 | #define __cpu_is_pxa3xx(id) \ |
274 | ({ \ | 279 | ({ \ |
275 | unsigned int _id = (id) >> 13 & 0x7; \ | 280 | unsigned int _id = (id) >> 13 & 0x7; \ |
276 | _id == 0x3; \ | 281 | _id == 0x3; \ |
277 | }) | 282 | }) |
283 | #else | ||
284 | #define __cpu_is_pxa3xx(id) (0) | ||
285 | #endif | ||
278 | 286 | ||
287 | #if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935) | ||
279 | #define __cpu_is_pxa93x(id) \ | 288 | #define __cpu_is_pxa93x(id) \ |
280 | ({ \ | 289 | ({ \ |
281 | unsigned int _id = (id) >> 4 & 0xfff; \ | 290 | unsigned int _id = (id) >> 4 & 0xfff; \ |
282 | _id == 0x683 || _id == 0x693; \ | 291 | _id == 0x683 || _id == 0x693; \ |
283 | }) | 292 | }) |
293 | #else | ||
294 | #define __cpu_is_pxa93x(id) (0) | ||
295 | #endif | ||
284 | 296 | ||
285 | #define cpu_is_pxa2xx() \ | 297 | #define cpu_is_pxa2xx() \ |
286 | ({ \ | 298 | ({ \ |
@@ -309,7 +321,7 @@ extern unsigned long get_clock_tick_rate(void); | |||
309 | #define PCIBIOS_MIN_IO 0 | 321 | #define PCIBIOS_MIN_IO 0 |
310 | #define PCIBIOS_MIN_MEM 0 | 322 | #define PCIBIOS_MIN_MEM 0 |
311 | #define pcibios_assign_all_busses() 1 | 323 | #define pcibios_assign_all_busses() 1 |
324 | #define ARCH_HAS_DMA_SET_COHERENT_MASK | ||
312 | #endif | 325 | #endif |
313 | 326 | ||
314 | |||
315 | #endif /* _ASM_ARCH_HARDWARE_H */ | 327 | #endif /* _ASM_ARCH_HARDWARE_H */ |
diff --git a/arch/arm/mach-pxa/include/mach/io.h b/arch/arm/mach-pxa/include/mach/io.h index 262691fb97d8..fdca3be47d9b 100644 --- a/arch/arm/mach-pxa/include/mach/io.h +++ b/arch/arm/mach-pxa/include/mach/io.h | |||
@@ -6,6 +6,8 @@ | |||
6 | #ifndef __ASM_ARM_ARCH_IO_H | 6 | #ifndef __ASM_ARM_ARCH_IO_H |
7 | #define __ASM_ARM_ARCH_IO_H | 7 | #define __ASM_ARM_ARCH_IO_H |
8 | 8 | ||
9 | #include <mach/hardware.h> | ||
10 | |||
9 | #define IO_SPACE_LIMIT 0xffffffff | 11 | #define IO_SPACE_LIMIT 0xffffffff |
10 | 12 | ||
11 | /* | 13 | /* |
diff --git a/arch/arm/mach-pxa/include/mach/mfp-pxa300.h b/arch/arm/mach-pxa/include/mach/mfp-pxa300.h index 7139e0dc26d1..4e1287070d21 100644 --- a/arch/arm/mach-pxa/include/mach/mfp-pxa300.h +++ b/arch/arm/mach-pxa/include/mach/mfp-pxa300.h | |||
@@ -71,10 +71,10 @@ | |||
71 | #define GPIO46_CI_DD_7 MFP_CFG_DRV(GPIO46, AF0, DS04X) | 71 | #define GPIO46_CI_DD_7 MFP_CFG_DRV(GPIO46, AF0, DS04X) |
72 | #define GPIO47_CI_DD_8 MFP_CFG_DRV(GPIO47, AF1, DS04X) | 72 | #define GPIO47_CI_DD_8 MFP_CFG_DRV(GPIO47, AF1, DS04X) |
73 | #define GPIO48_CI_DD_9 MFP_CFG_DRV(GPIO48, AF1, DS04X) | 73 | #define GPIO48_CI_DD_9 MFP_CFG_DRV(GPIO48, AF1, DS04X) |
74 | #define GPIO52_CI_HSYNC MFP_CFG_DRV(GPIO52, AF0, DS04X) | ||
75 | #define GPIO51_CI_VSYNC MFP_CFG_DRV(GPIO51, AF0, DS04X) | ||
76 | #define GPIO49_CI_MCLK MFP_CFG_DRV(GPIO49, AF0, DS04X) | 74 | #define GPIO49_CI_MCLK MFP_CFG_DRV(GPIO49, AF0, DS04X) |
77 | #define GPIO50_CI_PCLK MFP_CFG_DRV(GPIO50, AF0, DS04X) | 75 | #define GPIO50_CI_PCLK MFP_CFG_DRV(GPIO50, AF0, DS04X) |
76 | #define GPIO51_CI_HSYNC MFP_CFG_DRV(GPIO51, AF0, DS04X) | ||
77 | #define GPIO52_CI_VSYNC MFP_CFG_DRV(GPIO52, AF0, DS04X) | ||
78 | 78 | ||
79 | /* KEYPAD */ | 79 | /* KEYPAD */ |
80 | #define GPIO3_KP_DKIN_6 MFP_CFG_LPM(GPIO3, AF2, FLOAT) | 80 | #define GPIO3_KP_DKIN_6 MFP_CFG_LPM(GPIO3, AF2, FLOAT) |
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c index 77ad6d34ab5b..405b92a29793 100644 --- a/arch/arm/mach-pxa/palm27x.c +++ b/arch/arm/mach-pxa/palm27x.c | |||
@@ -469,9 +469,13 @@ static struct i2c_board_info __initdata palm27x_pi2c_board_info[] = { | |||
469 | }, | 469 | }, |
470 | }; | 470 | }; |
471 | 471 | ||
472 | static struct i2c_pxa_platform_data palm27x_i2c_power_info = { | ||
473 | .use_pio = 1, | ||
474 | }; | ||
475 | |||
472 | void __init palm27x_pmic_init(void) | 476 | void __init palm27x_pmic_init(void) |
473 | { | 477 | { |
474 | i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info)); | 478 | i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info)); |
475 | pxa27x_set_i2c_power_info(NULL); | 479 | pxa27x_set_i2c_power_info(&palm27x_i2c_power_info); |
476 | } | 480 | } |
477 | #endif | 481 | #endif |
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c index c9b747cedea8..37d6173bbb66 100644 --- a/arch/arm/mach-pxa/vpac270.c +++ b/arch/arm/mach-pxa/vpac270.c | |||
@@ -240,6 +240,7 @@ static void __init vpac270_onenand_init(void) {} | |||
240 | #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) | 240 | #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) |
241 | static struct pxamci_platform_data vpac270_mci_platform_data = { | 241 | static struct pxamci_platform_data vpac270_mci_platform_data = { |
242 | .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, | 242 | .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, |
243 | .gpio_power = -1, | ||
243 | .gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N, | 244 | .gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N, |
244 | .gpio_card_ro = GPIO52_VPAC270_SD_READONLY, | 245 | .gpio_card_ro = GPIO52_VPAC270_SD_READONLY, |
245 | .detect_delay_ms = 200, | 246 | .detect_delay_ms = 200, |
diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c index a492b982aa06..405e62128917 100644 --- a/arch/arm/mach-s3c64xx/dev-spi.c +++ b/arch/arm/mach-s3c64xx/dev-spi.c | |||
@@ -18,10 +18,11 @@ | |||
18 | #include <mach/map.h> | 18 | #include <mach/map.h> |
19 | #include <mach/gpio-bank-c.h> | 19 | #include <mach/gpio-bank-c.h> |
20 | #include <mach/spi-clocks.h> | 20 | #include <mach/spi-clocks.h> |
21 | #include <mach/irqs.h> | ||
21 | 22 | ||
22 | #include <plat/s3c64xx-spi.h> | 23 | #include <plat/s3c64xx-spi.h> |
23 | #include <plat/gpio-cfg.h> | 24 | #include <plat/gpio-cfg.h> |
24 | #include <plat/irqs.h> | 25 | #include <plat/devs.h> |
25 | 26 | ||
26 | static char *spi_src_clks[] = { | 27 | static char *spi_src_clks[] = { |
27 | [S3C64XX_SPI_SRCCLK_PCLK] = "pclk", | 28 | [S3C64XX_SPI_SRCCLK_PCLK] = "pclk", |
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c index 5c07d013b23d..e130379ba0e8 100644 --- a/arch/arm/mach-s3c64xx/mach-real6410.c +++ b/arch/arm/mach-s3c64xx/mach-real6410.c | |||
@@ -30,73 +30,73 @@ | |||
30 | #include <plat/devs.h> | 30 | #include <plat/devs.h> |
31 | #include <plat/regs-serial.h> | 31 | #include <plat/regs-serial.h> |
32 | 32 | ||
33 | #define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK | 33 | #define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK) |
34 | #define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB | 34 | #define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB) |
35 | #define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE | 35 | #define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE) |
36 | 36 | ||
37 | static struct s3c2410_uartcfg real6410_uartcfgs[] __initdata = { | 37 | static struct s3c2410_uartcfg real6410_uartcfgs[] __initdata = { |
38 | [0] = { | 38 | [0] = { |
39 | .hwport = 0, | 39 | .hwport = 0, |
40 | .flags = 0, | 40 | .flags = 0, |
41 | .ucon = UCON, | 41 | .ucon = UCON, |
42 | .ulcon = ULCON, | 42 | .ulcon = ULCON, |
43 | .ufcon = UFCON, | 43 | .ufcon = UFCON, |
44 | }, | 44 | }, |
45 | [1] = { | 45 | [1] = { |
46 | .hwport = 1, | 46 | .hwport = 1, |
47 | .flags = 0, | 47 | .flags = 0, |
48 | .ucon = UCON, | 48 | .ucon = UCON, |
49 | .ulcon = ULCON, | 49 | .ulcon = ULCON, |
50 | .ufcon = UFCON, | 50 | .ufcon = UFCON, |
51 | }, | 51 | }, |
52 | [2] = { | 52 | [2] = { |
53 | .hwport = 2, | 53 | .hwport = 2, |
54 | .flags = 0, | 54 | .flags = 0, |
55 | .ucon = UCON, | 55 | .ucon = UCON, |
56 | .ulcon = ULCON, | 56 | .ulcon = ULCON, |
57 | .ufcon = UFCON, | 57 | .ufcon = UFCON, |
58 | }, | 58 | }, |
59 | [3] = { | 59 | [3] = { |
60 | .hwport = 3, | 60 | .hwport = 3, |
61 | .flags = 0, | 61 | .flags = 0, |
62 | .ucon = UCON, | 62 | .ucon = UCON, |
63 | .ulcon = ULCON, | 63 | .ulcon = ULCON, |
64 | .ufcon = UFCON, | 64 | .ufcon = UFCON, |
65 | }, | 65 | }, |
66 | }; | 66 | }; |
67 | 67 | ||
68 | /* DM9000AEP 10/100 ethernet controller */ | 68 | /* DM9000AEP 10/100 ethernet controller */ |
69 | 69 | ||
70 | static struct resource real6410_dm9k_resource[] = { | 70 | static struct resource real6410_dm9k_resource[] = { |
71 | [0] = { | 71 | [0] = { |
72 | .start = S3C64XX_PA_XM0CSN1, | 72 | .start = S3C64XX_PA_XM0CSN1, |
73 | .end = S3C64XX_PA_XM0CSN1 + 1, | 73 | .end = S3C64XX_PA_XM0CSN1 + 1, |
74 | .flags = IORESOURCE_MEM | 74 | .flags = IORESOURCE_MEM |
75 | }, | 75 | }, |
76 | [1] = { | 76 | [1] = { |
77 | .start = S3C64XX_PA_XM0CSN1 + 4, | 77 | .start = S3C64XX_PA_XM0CSN1 + 4, |
78 | .end = S3C64XX_PA_XM0CSN1 + 5, | 78 | .end = S3C64XX_PA_XM0CSN1 + 5, |
79 | .flags = IORESOURCE_MEM | 79 | .flags = IORESOURCE_MEM |
80 | }, | 80 | }, |
81 | [2] = { | 81 | [2] = { |
82 | .start = S3C_EINT(7), | 82 | .start = S3C_EINT(7), |
83 | .end = S3C_EINT(7), | 83 | .end = S3C_EINT(7), |
84 | .flags = IORESOURCE_IRQ, | 84 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL |
85 | } | 85 | } |
86 | }; | 86 | }; |
87 | 87 | ||
88 | static struct dm9000_plat_data real6410_dm9k_pdata = { | 88 | static struct dm9000_plat_data real6410_dm9k_pdata = { |
89 | .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM), | 89 | .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM), |
90 | }; | 90 | }; |
91 | 91 | ||
92 | static struct platform_device real6410_device_eth = { | 92 | static struct platform_device real6410_device_eth = { |
93 | .name = "dm9000", | 93 | .name = "dm9000", |
94 | .id = -1, | 94 | .id = -1, |
95 | .num_resources = ARRAY_SIZE(real6410_dm9k_resource), | 95 | .num_resources = ARRAY_SIZE(real6410_dm9k_resource), |
96 | .resource = real6410_dm9k_resource, | 96 | .resource = real6410_dm9k_resource, |
97 | .dev = { | 97 | .dev = { |
98 | .platform_data = &real6410_dm9k_pdata, | 98 | .platform_data = &real6410_dm9k_pdata, |
99 | }, | 99 | }, |
100 | }; | 100 | }; |
101 | 101 | ||
102 | static struct platform_device *real6410_devices[] __initdata = { | 102 | static struct platform_device *real6410_devices[] __initdata = { |
@@ -129,12 +129,12 @@ static void __init real6410_machine_init(void) | |||
129 | /* set timing for nCS1 suitable for ethernet chip */ | 129 | /* set timing for nCS1 suitable for ethernet chip */ |
130 | 130 | ||
131 | __raw_writel((0 << S3C64XX_SROM_BCX__PMC__SHIFT) | | 131 | __raw_writel((0 << S3C64XX_SROM_BCX__PMC__SHIFT) | |
132 | (6 << S3C64XX_SROM_BCX__TACP__SHIFT) | | 132 | (6 << S3C64XX_SROM_BCX__TACP__SHIFT) | |
133 | (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) | | 133 | (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) | |
134 | (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) | | 134 | (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) | |
135 | (13 << S3C64XX_SROM_BCX__TACC__SHIFT) | | 135 | (13 << S3C64XX_SROM_BCX__TACC__SHIFT) | |
136 | (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) | | 136 | (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) | |
137 | (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1); | 137 | (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1); |
138 | 138 | ||
139 | platform_add_devices(real6410_devices, ARRAY_SIZE(real6410_devices)); | 139 | platform_add_devices(real6410_devices, ARRAY_SIZE(real6410_devices)); |
140 | } | 140 | } |
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c index af91fefef2c6..cfecd70657cb 100644 --- a/arch/arm/mach-s5pv210/clock.c +++ b/arch/arm/mach-s5pv210/clock.c | |||
@@ -281,6 +281,24 @@ static struct clk init_clocks_disable[] = { | |||
281 | .enable = s5pv210_clk_ip0_ctrl, | 281 | .enable = s5pv210_clk_ip0_ctrl, |
282 | .ctrlbit = (1<<29), | 282 | .ctrlbit = (1<<29), |
283 | }, { | 283 | }, { |
284 | .name = "fimc", | ||
285 | .id = 0, | ||
286 | .parent = &clk_hclk_dsys.clk, | ||
287 | .enable = s5pv210_clk_ip0_ctrl, | ||
288 | .ctrlbit = (1 << 24), | ||
289 | }, { | ||
290 | .name = "fimc", | ||
291 | .id = 1, | ||
292 | .parent = &clk_hclk_dsys.clk, | ||
293 | .enable = s5pv210_clk_ip0_ctrl, | ||
294 | .ctrlbit = (1 << 25), | ||
295 | }, { | ||
296 | .name = "fimc", | ||
297 | .id = 2, | ||
298 | .parent = &clk_hclk_dsys.clk, | ||
299 | .enable = s5pv210_clk_ip0_ctrl, | ||
300 | .ctrlbit = (1 << 26), | ||
301 | }, { | ||
284 | .name = "otg", | 302 | .name = "otg", |
285 | .id = -1, | 303 | .id = -1, |
286 | .parent = &clk_hclk_psys.clk, | 304 | .parent = &clk_hclk_psys.clk, |
@@ -357,7 +375,7 @@ static struct clk init_clocks_disable[] = { | |||
357 | .id = 1, | 375 | .id = 1, |
358 | .parent = &clk_pclk_psys.clk, | 376 | .parent = &clk_pclk_psys.clk, |
359 | .enable = s5pv210_clk_ip3_ctrl, | 377 | .enable = s5pv210_clk_ip3_ctrl, |
360 | .ctrlbit = (1<<8), | 378 | .ctrlbit = (1 << 10), |
361 | }, { | 379 | }, { |
362 | .name = "i2c", | 380 | .name = "i2c", |
363 | .id = 2, | 381 | .id = 2, |
diff --git a/arch/arm/mach-s5pv210/cpu.c b/arch/arm/mach-s5pv210/cpu.c index b9f4d677cf55..77f456c91ad3 100644 --- a/arch/arm/mach-s5pv210/cpu.c +++ b/arch/arm/mach-s5pv210/cpu.c | |||
@@ -47,7 +47,7 @@ static struct map_desc s5pv210_iodesc[] __initdata = { | |||
47 | { | 47 | { |
48 | .virtual = (unsigned long)S5P_VA_SYSTIMER, | 48 | .virtual = (unsigned long)S5P_VA_SYSTIMER, |
49 | .pfn = __phys_to_pfn(S5PV210_PA_SYSTIMER), | 49 | .pfn = __phys_to_pfn(S5PV210_PA_SYSTIMER), |
50 | .length = SZ_1M, | 50 | .length = SZ_4K, |
51 | .type = MT_DEVICE, | 51 | .type = MT_DEVICE, |
52 | }, { | 52 | }, { |
53 | .virtual = (unsigned long)VA_VIC2, | 53 | .virtual = (unsigned long)VA_VIC2, |
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile index 5e16b4c69222..ae416fe7daf2 100644 --- a/arch/arm/mach-shmobile/Makefile +++ b/arch/arm/mach-shmobile/Makefile | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | # Common objects | 5 | # Common objects |
6 | obj-y := timer.o console.o clock.o | 6 | obj-y := timer.o console.o clock.o pm_runtime.o |
7 | 7 | ||
8 | # CPU objects | 8 | # CPU objects |
9 | obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o | 9 | obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o |
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index 23d472f9525e..95935c83c306 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
26 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
27 | #include <linux/mfd/sh_mobile_sdhi.h> | 27 | #include <linux/mfd/sh_mobile_sdhi.h> |
28 | #include <linux/mfd/tmio.h> | ||
28 | #include <linux/mmc/host.h> | 29 | #include <linux/mmc/host.h> |
29 | #include <linux/mtd/mtd.h> | 30 | #include <linux/mtd/mtd.h> |
30 | #include <linux/mtd/partitions.h> | 31 | #include <linux/mtd/partitions.h> |
@@ -39,6 +40,7 @@ | |||
39 | #include <linux/sh_clk.h> | 40 | #include <linux/sh_clk.h> |
40 | #include <linux/gpio.h> | 41 | #include <linux/gpio.h> |
41 | #include <linux/input.h> | 42 | #include <linux/input.h> |
43 | #include <linux/leds.h> | ||
42 | #include <linux/input/sh_keysc.h> | 44 | #include <linux/input/sh_keysc.h> |
43 | #include <linux/usb/r8a66597.h> | 45 | #include <linux/usb/r8a66597.h> |
44 | 46 | ||
@@ -307,6 +309,7 @@ static struct sh_mobile_sdhi_info sdhi1_info = { | |||
307 | .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, | 309 | .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, |
308 | .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, | 310 | .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, |
309 | .tmio_ocr_mask = MMC_VDD_165_195, | 311 | .tmio_ocr_mask = MMC_VDD_165_195, |
312 | .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE, | ||
310 | }; | 313 | }; |
311 | 314 | ||
312 | static struct resource sdhi1_resources[] = { | 315 | static struct resource sdhi1_resources[] = { |
@@ -558,7 +561,7 @@ static struct resource fsi_resources[] = { | |||
558 | 561 | ||
559 | static struct platform_device fsi_device = { | 562 | static struct platform_device fsi_device = { |
560 | .name = "sh_fsi2", | 563 | .name = "sh_fsi2", |
561 | .id = 0, | 564 | .id = -1, |
562 | .num_resources = ARRAY_SIZE(fsi_resources), | 565 | .num_resources = ARRAY_SIZE(fsi_resources), |
563 | .resource = fsi_resources, | 566 | .resource = fsi_resources, |
564 | .dev = { | 567 | .dev = { |
@@ -650,7 +653,44 @@ static struct platform_device hdmi_device = { | |||
650 | }, | 653 | }, |
651 | }; | 654 | }; |
652 | 655 | ||
656 | static struct gpio_led ap4evb_leds[] = { | ||
657 | { | ||
658 | .name = "led4", | ||
659 | .gpio = GPIO_PORT185, | ||
660 | .default_state = LEDS_GPIO_DEFSTATE_ON, | ||
661 | }, | ||
662 | { | ||
663 | .name = "led2", | ||
664 | .gpio = GPIO_PORT186, | ||
665 | .default_state = LEDS_GPIO_DEFSTATE_ON, | ||
666 | }, | ||
667 | { | ||
668 | .name = "led3", | ||
669 | .gpio = GPIO_PORT187, | ||
670 | .default_state = LEDS_GPIO_DEFSTATE_ON, | ||
671 | }, | ||
672 | { | ||
673 | .name = "led1", | ||
674 | .gpio = GPIO_PORT188, | ||
675 | .default_state = LEDS_GPIO_DEFSTATE_ON, | ||
676 | } | ||
677 | }; | ||
678 | |||
679 | static struct gpio_led_platform_data ap4evb_leds_pdata = { | ||
680 | .num_leds = ARRAY_SIZE(ap4evb_leds), | ||
681 | .leds = ap4evb_leds, | ||
682 | }; | ||
683 | |||
684 | static struct platform_device leds_device = { | ||
685 | .name = "leds-gpio", | ||
686 | .id = 0, | ||
687 | .dev = { | ||
688 | .platform_data = &ap4evb_leds_pdata, | ||
689 | }, | ||
690 | }; | ||
691 | |||
653 | static struct platform_device *ap4evb_devices[] __initdata = { | 692 | static struct platform_device *ap4evb_devices[] __initdata = { |
693 | &leds_device, | ||
654 | &nor_flash_device, | 694 | &nor_flash_device, |
655 | &smc911x_device, | 695 | &smc911x_device, |
656 | &sdhi0_device, | 696 | &sdhi0_device, |
@@ -840,20 +880,6 @@ static void __init ap4evb_init(void) | |||
840 | gpio_request(GPIO_FN_CS5A, NULL); | 880 | gpio_request(GPIO_FN_CS5A, NULL); |
841 | gpio_request(GPIO_FN_IRQ6_39, NULL); | 881 | gpio_request(GPIO_FN_IRQ6_39, NULL); |
842 | 882 | ||
843 | /* enable LED 1 - 4 */ | ||
844 | gpio_request(GPIO_PORT185, NULL); | ||
845 | gpio_request(GPIO_PORT186, NULL); | ||
846 | gpio_request(GPIO_PORT187, NULL); | ||
847 | gpio_request(GPIO_PORT188, NULL); | ||
848 | gpio_direction_output(GPIO_PORT185, 1); | ||
849 | gpio_direction_output(GPIO_PORT186, 1); | ||
850 | gpio_direction_output(GPIO_PORT187, 1); | ||
851 | gpio_direction_output(GPIO_PORT188, 1); | ||
852 | gpio_export(GPIO_PORT185, 0); | ||
853 | gpio_export(GPIO_PORT186, 0); | ||
854 | gpio_export(GPIO_PORT187, 0); | ||
855 | gpio_export(GPIO_PORT188, 0); | ||
856 | |||
857 | /* enable Debug switch (S6) */ | 883 | /* enable Debug switch (S6) */ |
858 | gpio_request(GPIO_PORT32, NULL); | 884 | gpio_request(GPIO_PORT32, NULL); |
859 | gpio_request(GPIO_PORT33, NULL); | 885 | gpio_request(GPIO_PORT33, NULL); |
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c index fb4e9b1d788e..759468992ad2 100644 --- a/arch/arm/mach-shmobile/clock-sh7372.c +++ b/arch/arm/mach-shmobile/clock-sh7372.c | |||
@@ -286,7 +286,6 @@ static struct clk_ops pllc2_clk_ops = { | |||
286 | 286 | ||
287 | struct clk pllc2_clk = { | 287 | struct clk pllc2_clk = { |
288 | .ops = &pllc2_clk_ops, | 288 | .ops = &pllc2_clk_ops, |
289 | .flags = CLK_ENABLE_ON_INIT, | ||
290 | .parent = &extal1_div2_clk, | 289 | .parent = &extal1_div2_clk, |
291 | .freq_table = pllc2_freq_table, | 290 | .freq_table = pllc2_freq_table, |
292 | .parent_table = pllc2_parent, | 291 | .parent_table = pllc2_parent, |
@@ -395,7 +394,7 @@ static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = { | |||
395 | 394 | ||
396 | enum { MSTP001, | 395 | enum { MSTP001, |
397 | MSTP131, MSTP130, | 396 | MSTP131, MSTP130, |
398 | MSTP129, MSTP128, | 397 | MSTP129, MSTP128, MSTP127, MSTP126, |
399 | MSTP118, MSTP117, MSTP116, | 398 | MSTP118, MSTP117, MSTP116, |
400 | MSTP106, MSTP101, MSTP100, | 399 | MSTP106, MSTP101, MSTP100, |
401 | MSTP223, | 400 | MSTP223, |
@@ -413,6 +412,8 @@ static struct clk mstp_clks[MSTP_NR] = { | |||
413 | [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */ | 412 | [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */ |
414 | [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */ | 413 | [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */ |
415 | [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */ | 414 | [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */ |
415 | [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU */ | ||
416 | [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2 */ | ||
416 | [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */ | 417 | [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */ |
417 | [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */ | 418 | [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */ |
418 | [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */ | 419 | [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */ |
@@ -428,7 +429,7 @@ static struct clk mstp_clks[MSTP_NR] = { | |||
428 | [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */ | 429 | [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */ |
429 | [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */ | 430 | [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */ |
430 | [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */ | 431 | [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */ |
431 | [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, CLK_ENABLE_ON_INIT), /* FSIA */ | 432 | [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSIA */ |
432 | [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */ | 433 | [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */ |
433 | [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */ | 434 | [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */ |
434 | [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */ | 435 | [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */ |
@@ -498,6 +499,8 @@ static struct clk_lookup lookups[] = { | |||
498 | CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */ | 499 | CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */ |
499 | CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */ | 500 | CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */ |
500 | CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */ | 501 | CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */ |
502 | CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU */ | ||
503 | CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */ | ||
501 | CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ | 504 | CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ |
502 | CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */ | 505 | CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */ |
503 | CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */ | 506 | CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */ |
diff --git a/arch/arm/mach-shmobile/clock.c b/arch/arm/mach-shmobile/clock.c index b7c705a213a2..6b7c7c42bc8f 100644 --- a/arch/arm/mach-shmobile/clock.c +++ b/arch/arm/mach-shmobile/clock.c | |||
@@ -1,8 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * SH-Mobile Timer | 2 | * SH-Mobile Clock Framework |
3 | * | 3 | * |
4 | * Copyright (C) 2010 Magnus Damm | 4 | * Copyright (C) 2010 Magnus Damm |
5 | * | 5 | * |
6 | * Used together with arch/arm/common/clkdev.c and drivers/sh/clk.c. | ||
7 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; version 2 of the License. | 10 | * the Free Software Foundation; version 2 of the License. |
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c new file mode 100644 index 000000000000..94912d3944d3 --- /dev/null +++ b/arch/arm/mach-shmobile/pm_runtime.c | |||
@@ -0,0 +1,169 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-shmobile/pm_runtime.c | ||
3 | * | ||
4 | * Runtime PM support code for SuperH Mobile ARM | ||
5 | * | ||
6 | * Copyright (C) 2009-2010 Magnus Damm | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | |||
13 | #include <linux/init.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/io.h> | ||
16 | #include <linux/pm_runtime.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/clk.h> | ||
19 | #include <linux/sh_clk.h> | ||
20 | #include <linux/bitmap.h> | ||
21 | |||
22 | #ifdef CONFIG_PM_RUNTIME | ||
23 | #define BIT_ONCE 0 | ||
24 | #define BIT_ACTIVE 1 | ||
25 | #define BIT_CLK_ENABLED 2 | ||
26 | |||
27 | struct pm_runtime_data { | ||
28 | unsigned long flags; | ||
29 | struct clk *clk; | ||
30 | }; | ||
31 | |||
32 | static void __devres_release(struct device *dev, void *res) | ||
33 | { | ||
34 | struct pm_runtime_data *prd = res; | ||
35 | |||
36 | dev_dbg(dev, "__devres_release()\n"); | ||
37 | |||
38 | if (test_bit(BIT_CLK_ENABLED, &prd->flags)) | ||
39 | clk_disable(prd->clk); | ||
40 | |||
41 | if (test_bit(BIT_ACTIVE, &prd->flags)) | ||
42 | clk_put(prd->clk); | ||
43 | } | ||
44 | |||
45 | static struct pm_runtime_data *__to_prd(struct device *dev) | ||
46 | { | ||
47 | return devres_find(dev, __devres_release, NULL, NULL); | ||
48 | } | ||
49 | |||
50 | static void platform_pm_runtime_init(struct device *dev, | ||
51 | struct pm_runtime_data *prd) | ||
52 | { | ||
53 | if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) { | ||
54 | prd->clk = clk_get(dev, NULL); | ||
55 | if (!IS_ERR(prd->clk)) { | ||
56 | set_bit(BIT_ACTIVE, &prd->flags); | ||
57 | dev_info(dev, "clocks managed by runtime pm\n"); | ||
58 | } | ||
59 | } | ||
60 | } | ||
61 | |||
62 | static void platform_pm_runtime_bug(struct device *dev, | ||
63 | struct pm_runtime_data *prd) | ||
64 | { | ||
65 | if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) | ||
66 | dev_err(dev, "runtime pm suspend before resume\n"); | ||
67 | } | ||
68 | |||
69 | int platform_pm_runtime_suspend(struct device *dev) | ||
70 | { | ||
71 | struct pm_runtime_data *prd = __to_prd(dev); | ||
72 | |||
73 | dev_dbg(dev, "platform_pm_runtime_suspend()\n"); | ||
74 | |||
75 | platform_pm_runtime_bug(dev, prd); | ||
76 | |||
77 | if (prd && test_bit(BIT_ACTIVE, &prd->flags)) { | ||
78 | clk_disable(prd->clk); | ||
79 | clear_bit(BIT_CLK_ENABLED, &prd->flags); | ||
80 | } | ||
81 | |||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | int platform_pm_runtime_resume(struct device *dev) | ||
86 | { | ||
87 | struct pm_runtime_data *prd = __to_prd(dev); | ||
88 | |||
89 | dev_dbg(dev, "platform_pm_runtime_resume()\n"); | ||
90 | |||
91 | platform_pm_runtime_init(dev, prd); | ||
92 | |||
93 | if (prd && test_bit(BIT_ACTIVE, &prd->flags)) { | ||
94 | clk_enable(prd->clk); | ||
95 | set_bit(BIT_CLK_ENABLED, &prd->flags); | ||
96 | } | ||
97 | |||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | int platform_pm_runtime_idle(struct device *dev) | ||
102 | { | ||
103 | /* suspend synchronously to disable clocks immediately */ | ||
104 | return pm_runtime_suspend(dev); | ||
105 | } | ||
106 | |||
107 | static int platform_bus_notify(struct notifier_block *nb, | ||
108 | unsigned long action, void *data) | ||
109 | { | ||
110 | struct device *dev = data; | ||
111 | struct pm_runtime_data *prd; | ||
112 | |||
113 | dev_dbg(dev, "platform_bus_notify() %ld !\n", action); | ||
114 | |||
115 | if (action == BUS_NOTIFY_BIND_DRIVER) { | ||
116 | prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL); | ||
117 | if (prd) | ||
118 | devres_add(dev, prd); | ||
119 | else | ||
120 | dev_err(dev, "unable to alloc memory for runtime pm\n"); | ||
121 | } | ||
122 | |||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | #else /* CONFIG_PM_RUNTIME */ | ||
127 | |||
128 | static int platform_bus_notify(struct notifier_block *nb, | ||
129 | unsigned long action, void *data) | ||
130 | { | ||
131 | struct device *dev = data; | ||
132 | struct clk *clk; | ||
133 | |||
134 | dev_dbg(dev, "platform_bus_notify() %ld !\n", action); | ||
135 | |||
136 | switch (action) { | ||
137 | case BUS_NOTIFY_BIND_DRIVER: | ||
138 | clk = clk_get(dev, NULL); | ||
139 | if (!IS_ERR(clk)) { | ||
140 | clk_enable(clk); | ||
141 | clk_put(clk); | ||
142 | dev_info(dev, "runtime pm disabled, clock forced on\n"); | ||
143 | } | ||
144 | break; | ||
145 | case BUS_NOTIFY_UNBOUND_DRIVER: | ||
146 | clk = clk_get(dev, NULL); | ||
147 | if (!IS_ERR(clk)) { | ||
148 | clk_disable(clk); | ||
149 | clk_put(clk); | ||
150 | dev_info(dev, "runtime pm disabled, clock forced off\n"); | ||
151 | } | ||
152 | break; | ||
153 | } | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | #endif /* CONFIG_PM_RUNTIME */ | ||
159 | |||
160 | static struct notifier_block platform_bus_notifier = { | ||
161 | .notifier_call = platform_bus_notify | ||
162 | }; | ||
163 | |||
164 | static int __init sh_pm_runtime_init(void) | ||
165 | { | ||
166 | bus_register_notifier(&platform_bus_type, &platform_bus_notifier); | ||
167 | return 0; | ||
168 | } | ||
169 | core_initcall(sh_pm_runtime_init); | ||
diff --git a/arch/arm/mach-u300/include/mach/gpio.h b/arch/arm/mach-u300/include/mach/gpio.h index 7b1fc984abb6..d5a71abcbaea 100644 --- a/arch/arm/mach-u300/include/mach/gpio.h +++ b/arch/arm/mach-u300/include/mach/gpio.h | |||
@@ -273,6 +273,9 @@ extern void gpio_pullup(unsigned gpio, int value); | |||
273 | extern int gpio_get_value(unsigned gpio); | 273 | extern int gpio_get_value(unsigned gpio); |
274 | extern void gpio_set_value(unsigned gpio, int value); | 274 | extern void gpio_set_value(unsigned gpio, int value); |
275 | 275 | ||
276 | #define gpio_get_value_cansleep gpio_get_value | ||
277 | #define gpio_set_value_cansleep gpio_set_value | ||
278 | |||
276 | /* wrappers to sleep-enable the previous two functions */ | 279 | /* wrappers to sleep-enable the previous two functions */ |
277 | static inline unsigned gpio_to_irq(unsigned gpio) | 280 | static inline unsigned gpio_to_irq(unsigned gpio) |
278 | { | 281 | { |
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c index 577df6cccb08..efb127022d42 100644 --- a/arch/arm/mach-vexpress/ct-ca9x4.c +++ b/arch/arm/mach-vexpress/ct-ca9x4.c | |||
@@ -227,7 +227,13 @@ static void ct_ca9x4_init(void) | |||
227 | int i; | 227 | int i; |
228 | 228 | ||
229 | #ifdef CONFIG_CACHE_L2X0 | 229 | #ifdef CONFIG_CACHE_L2X0 |
230 | l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff); | 230 | void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC); |
231 | |||
232 | /* set RAM latencies to 1 cycle for this core tile. */ | ||
233 | writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL); | ||
234 | writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL); | ||
235 | |||
236 | l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff); | ||
231 | #endif | 237 | #endif |
232 | 238 | ||
233 | clkdev_add_table(lookups, ARRAY_SIZE(lookups)); | 239 | clkdev_add_table(lookups, ARRAY_SIZE(lookups)); |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 33c3f570aaa0..a0a2928ae4dd 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -398,7 +398,7 @@ config CPU_V6 | |||
398 | # ARMv6k | 398 | # ARMv6k |
399 | config CPU_32v6K | 399 | config CPU_32v6K |
400 | bool "Support ARM V6K processor extensions" if !SMP | 400 | bool "Support ARM V6K processor extensions" if !SMP |
401 | depends on CPU_V6 | 401 | depends on CPU_V6 || CPU_V7 |
402 | default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) | 402 | default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) |
403 | help | 403 | help |
404 | Say Y here if your ARMv6 processor supports the 'K' extension. | 404 | Say Y here if your ARMv6 processor supports the 'K' extension. |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index d073b64ae87e..724ba3bce72c 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
@@ -885,8 +885,23 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
885 | 885 | ||
886 | if (ai_usermode & UM_SIGNAL) | 886 | if (ai_usermode & UM_SIGNAL) |
887 | force_sig(SIGBUS, current); | 887 | force_sig(SIGBUS, current); |
888 | else | 888 | else { |
889 | set_cr(cr_no_alignment); | 889 | /* |
890 | * We're about to disable the alignment trap and return to | ||
891 | * user space. But if an interrupt occurs before actually | ||
892 | * reaching user space, then the IRQ vector entry code will | ||
893 | * notice that we were still in kernel space and therefore | ||
894 | * the alignment trap won't be re-enabled in that case as it | ||
895 | * is presumed to be always on from kernel space. | ||
896 | * Let's prevent that race by disabling interrupts here (they | ||
897 | * are disabled on the way back to user space anyway in | ||
898 | * entry-common.S) and disable the alignment trap only if | ||
899 | * there is no work pending for this thread. | ||
900 | */ | ||
901 | raw_local_irq_disable(); | ||
902 | if (!(current_thread_info()->flags & _TIF_WORK_MASK)) | ||
903 | set_cr(cr_no_alignment); | ||
904 | } | ||
890 | 905 | ||
891 | return 0; | 906 | return 0; |
892 | } | 907 | } |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c704eed63c5d..4bc43e535d3b 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -229,6 +229,8 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) | |||
229 | } | 229 | } |
230 | } while (size -= PAGE_SIZE); | 230 | } while (size -= PAGE_SIZE); |
231 | 231 | ||
232 | dsb(); | ||
233 | |||
232 | return (void *)c->vm_start; | 234 | return (void *)c->vm_start; |
233 | } | 235 | } |
234 | return NULL; | 236 | return NULL; |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 6e1c4f6a2b3f..6a3a2d0cd6db 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/nodemask.h> | 15 | #include <linux/nodemask.h> |
16 | #include <linux/memblock.h> | 16 | #include <linux/memblock.h> |
17 | #include <linux/sort.h> | 17 | #include <linux/sort.h> |
18 | #include <linux/fs.h> | ||
18 | 19 | ||
19 | #include <asm/cputype.h> | 20 | #include <asm/cputype.h> |
20 | #include <asm/sections.h> | 21 | #include <asm/sections.h> |
@@ -246,6 +247,9 @@ static struct mem_type mem_types[] = { | |||
246 | .domain = DOMAIN_USER, | 247 | .domain = DOMAIN_USER, |
247 | }, | 248 | }, |
248 | [MT_MEMORY] = { | 249 | [MT_MEMORY] = { |
250 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | ||
251 | L_PTE_USER | L_PTE_EXEC, | ||
252 | .prot_l1 = PMD_TYPE_TABLE, | ||
249 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 253 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
250 | .domain = DOMAIN_KERNEL, | 254 | .domain = DOMAIN_KERNEL, |
251 | }, | 255 | }, |
@@ -254,6 +258,9 @@ static struct mem_type mem_types[] = { | |||
254 | .domain = DOMAIN_KERNEL, | 258 | .domain = DOMAIN_KERNEL, |
255 | }, | 259 | }, |
256 | [MT_MEMORY_NONCACHED] = { | 260 | [MT_MEMORY_NONCACHED] = { |
261 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | ||
262 | L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, | ||
263 | .prot_l1 = PMD_TYPE_TABLE, | ||
257 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 264 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
258 | .domain = DOMAIN_KERNEL, | 265 | .domain = DOMAIN_KERNEL, |
259 | }, | 266 | }, |
@@ -411,9 +418,12 @@ static void __init build_mem_type_table(void) | |||
411 | * Enable CPU-specific coherency if supported. | 418 | * Enable CPU-specific coherency if supported. |
412 | * (Only available on XSC3 at the moment.) | 419 | * (Only available on XSC3 at the moment.) |
413 | */ | 420 | */ |
414 | if (arch_is_coherent() && cpu_is_xsc3()) | 421 | if (arch_is_coherent() && cpu_is_xsc3()) { |
415 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 422 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
416 | 423 | mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; | |
424 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; | ||
425 | mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; | ||
426 | } | ||
417 | /* | 427 | /* |
418 | * ARMv6 and above have extended page tables. | 428 | * ARMv6 and above have extended page tables. |
419 | */ | 429 | */ |
@@ -438,7 +448,9 @@ static void __init build_mem_type_table(void) | |||
438 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; | 448 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; |
439 | mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; | 449 | mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; |
440 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 450 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
451 | mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; | ||
441 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; | 452 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; |
453 | mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; | ||
442 | #endif | 454 | #endif |
443 | } | 455 | } |
444 | 456 | ||
@@ -475,6 +487,8 @@ static void __init build_mem_type_table(void) | |||
475 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; | 487 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; |
476 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; | 488 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; |
477 | mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; | 489 | mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; |
490 | mem_types[MT_MEMORY].prot_pte |= kern_pgprot; | ||
491 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; | ||
478 | mem_types[MT_ROM].prot_sect |= cp->pmd; | 492 | mem_types[MT_ROM].prot_sect |= cp->pmd; |
479 | 493 | ||
480 | switch (cp->pmd) { | 494 | switch (cp->pmd) { |
@@ -498,6 +512,19 @@ static void __init build_mem_type_table(void) | |||
498 | } | 512 | } |
499 | } | 513 | } |
500 | 514 | ||
515 | #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE | ||
516 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
517 | unsigned long size, pgprot_t vma_prot) | ||
518 | { | ||
519 | if (!pfn_valid(pfn)) | ||
520 | return pgprot_noncached(vma_prot); | ||
521 | else if (file->f_flags & O_SYNC) | ||
522 | return pgprot_writecombine(vma_prot); | ||
523 | return vma_prot; | ||
524 | } | ||
525 | EXPORT_SYMBOL(phys_mem_access_prot); | ||
526 | #endif | ||
527 | |||
501 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) | 528 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) |
502 | 529 | ||
503 | static void __init *early_alloc(unsigned long sz) | 530 | static void __init *early_alloc(unsigned long sz) |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 6a8506d99ee9..7563ff0141bd 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -186,13 +186,14 @@ cpu_v7_name: | |||
186 | * It is assumed that: | 186 | * It is assumed that: |
187 | * - cache type register is implemented | 187 | * - cache type register is implemented |
188 | */ | 188 | */ |
189 | __v7_setup: | 189 | __v7_ca9mp_setup: |
190 | #ifdef CONFIG_SMP | 190 | #ifdef CONFIG_SMP |
191 | mrc p15, 0, r0, c1, c0, 1 | 191 | mrc p15, 0, r0, c1, c0, 1 |
192 | tst r0, #(1 << 6) @ SMP/nAMP mode enabled? | 192 | tst r0, #(1 << 6) @ SMP/nAMP mode enabled? |
193 | orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and | 193 | orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and |
194 | mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting | 194 | mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting |
195 | #endif | 195 | #endif |
196 | __v7_setup: | ||
196 | adr r12, __v7_setup_stack @ the local stack | 197 | adr r12, __v7_setup_stack @ the local stack |
197 | stmia r12, {r0-r5, r7, r9, r11, lr} | 198 | stmia r12, {r0-r5, r7, r9, r11, lr} |
198 | bl v7_flush_dcache_all | 199 | bl v7_flush_dcache_all |
@@ -201,11 +202,16 @@ __v7_setup: | |||
201 | mrc p15, 0, r0, c0, c0, 0 @ read main ID register | 202 | mrc p15, 0, r0, c0, c0, 0 @ read main ID register |
202 | and r10, r0, #0xff000000 @ ARM? | 203 | and r10, r0, #0xff000000 @ ARM? |
203 | teq r10, #0x41000000 | 204 | teq r10, #0x41000000 |
204 | bne 2f | 205 | bne 3f |
205 | and r5, r0, #0x00f00000 @ variant | 206 | and r5, r0, #0x00f00000 @ variant |
206 | and r6, r0, #0x0000000f @ revision | 207 | and r6, r0, #0x0000000f @ revision |
207 | orr r0, r6, r5, lsr #20-4 @ combine variant and revision | 208 | orr r6, r6, r5, lsr #20-4 @ combine variant and revision |
209 | ubfx r0, r0, #4, #12 @ primary part number | ||
208 | 210 | ||
211 | /* Cortex-A8 Errata */ | ||
212 | ldr r10, =0x00000c08 @ Cortex-A8 primary part number | ||
213 | teq r0, r10 | ||
214 | bne 2f | ||
209 | #ifdef CONFIG_ARM_ERRATA_430973 | 215 | #ifdef CONFIG_ARM_ERRATA_430973 |
210 | teq r5, #0x00100000 @ only present in r1p* | 216 | teq r5, #0x00100000 @ only present in r1p* |
211 | mrceq p15, 0, r10, c1, c0, 1 @ read aux control register | 217 | mrceq p15, 0, r10, c1, c0, 1 @ read aux control register |
@@ -213,21 +219,42 @@ __v7_setup: | |||
213 | mcreq p15, 0, r10, c1, c0, 1 @ write aux control register | 219 | mcreq p15, 0, r10, c1, c0, 1 @ write aux control register |
214 | #endif | 220 | #endif |
215 | #ifdef CONFIG_ARM_ERRATA_458693 | 221 | #ifdef CONFIG_ARM_ERRATA_458693 |
216 | teq r0, #0x20 @ only present in r2p0 | 222 | teq r6, #0x20 @ only present in r2p0 |
217 | mrceq p15, 0, r10, c1, c0, 1 @ read aux control register | 223 | mrceq p15, 0, r10, c1, c0, 1 @ read aux control register |
218 | orreq r10, r10, #(1 << 5) @ set L1NEON to 1 | 224 | orreq r10, r10, #(1 << 5) @ set L1NEON to 1 |
219 | orreq r10, r10, #(1 << 9) @ set PLDNOP to 1 | 225 | orreq r10, r10, #(1 << 9) @ set PLDNOP to 1 |
220 | mcreq p15, 0, r10, c1, c0, 1 @ write aux control register | 226 | mcreq p15, 0, r10, c1, c0, 1 @ write aux control register |
221 | #endif | 227 | #endif |
222 | #ifdef CONFIG_ARM_ERRATA_460075 | 228 | #ifdef CONFIG_ARM_ERRATA_460075 |
223 | teq r0, #0x20 @ only present in r2p0 | 229 | teq r6, #0x20 @ only present in r2p0 |
224 | mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register | 230 | mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register |
225 | tsteq r10, #1 << 22 | 231 | tsteq r10, #1 << 22 |
226 | orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit | 232 | orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit |
227 | mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register | 233 | mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register |
228 | #endif | 234 | #endif |
235 | b 3f | ||
236 | |||
237 | /* Cortex-A9 Errata */ | ||
238 | 2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number | ||
239 | teq r0, r10 | ||
240 | bne 3f | ||
241 | #ifdef CONFIG_ARM_ERRATA_742230 | ||
242 | cmp r6, #0x22 @ only present up to r2p2 | ||
243 | mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register | ||
244 | orrle r10, r10, #1 << 4 @ set bit #4 | ||
245 | mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register | ||
246 | #endif | ||
247 | #ifdef CONFIG_ARM_ERRATA_742231 | ||
248 | teq r6, #0x20 @ present in r2p0 | ||
249 | teqne r6, #0x21 @ present in r2p1 | ||
250 | teqne r6, #0x22 @ present in r2p2 | ||
251 | mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register | ||
252 | orreq r10, r10, #1 << 12 @ set bit #12 | ||
253 | orreq r10, r10, #1 << 22 @ set bit #22 | ||
254 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register | ||
255 | #endif | ||
229 | 256 | ||
230 | 2: mov r10, #0 | 257 | 3: mov r10, #0 |
231 | #ifdef HARVARD_CACHE | 258 | #ifdef HARVARD_CACHE |
232 | mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate | 259 | mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate |
233 | #endif | 260 | #endif |
@@ -323,6 +350,29 @@ cpu_elf_name: | |||
323 | 350 | ||
324 | .section ".proc.info.init", #alloc, #execinstr | 351 | .section ".proc.info.init", #alloc, #execinstr |
325 | 352 | ||
353 | .type __v7_ca9mp_proc_info, #object | ||
354 | __v7_ca9mp_proc_info: | ||
355 | .long 0x410fc090 @ Required ID value | ||
356 | .long 0xff0ffff0 @ Mask for ID | ||
357 | .long PMD_TYPE_SECT | \ | ||
358 | PMD_SECT_AP_WRITE | \ | ||
359 | PMD_SECT_AP_READ | \ | ||
360 | PMD_FLAGS | ||
361 | .long PMD_TYPE_SECT | \ | ||
362 | PMD_SECT_XN | \ | ||
363 | PMD_SECT_AP_WRITE | \ | ||
364 | PMD_SECT_AP_READ | ||
365 | b __v7_ca9mp_setup | ||
366 | .long cpu_arch_name | ||
367 | .long cpu_elf_name | ||
368 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
369 | .long cpu_v7_name | ||
370 | .long v7_processor_functions | ||
371 | .long v7wbi_tlb_fns | ||
372 | .long v6_user_fns | ||
373 | .long v7_cache_fns | ||
374 | .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info | ||
375 | |||
326 | /* | 376 | /* |
327 | * Match any ARMv7 processor core. | 377 | * Match any ARMv7 processor core. |
328 | */ | 378 | */ |
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c index 0691176899ff..72e09eb642dd 100644 --- a/arch/arm/oprofile/common.c +++ b/arch/arm/oprofile/common.c | |||
@@ -102,6 +102,7 @@ static int op_create_counter(int cpu, int event) | |||
102 | if (IS_ERR(pevent)) { | 102 | if (IS_ERR(pevent)) { |
103 | ret = PTR_ERR(pevent); | 103 | ret = PTR_ERR(pevent); |
104 | } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) { | 104 | } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) { |
105 | perf_event_release_kernel(pevent); | ||
105 | pr_warning("oprofile: failed to enable event %d " | 106 | pr_warning("oprofile: failed to enable event %d " |
106 | "on CPU %d\n", event, cpu); | 107 | "on CPU %d\n", event, cpu); |
107 | ret = -EBUSY; | 108 | ret = -EBUSY; |
@@ -365,6 +366,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) | |||
365 | ret = init_driverfs(); | 366 | ret = init_driverfs(); |
366 | if (ret) { | 367 | if (ret) { |
367 | kfree(counter_config); | 368 | kfree(counter_config); |
369 | counter_config = NULL; | ||
368 | return ret; | 370 | return ret; |
369 | } | 371 | } |
370 | 372 | ||
@@ -402,7 +404,6 @@ void oprofile_arch_exit(void) | |||
402 | struct perf_event *event; | 404 | struct perf_event *event; |
403 | 405 | ||
404 | if (*perf_events) { | 406 | if (*perf_events) { |
405 | exit_driverfs(); | ||
406 | for_each_possible_cpu(cpu) { | 407 | for_each_possible_cpu(cpu) { |
407 | for (id = 0; id < perf_num_counters; ++id) { | 408 | for (id = 0; id < perf_num_counters; ++id) { |
408 | event = perf_events[cpu][id]; | 409 | event = perf_events[cpu][id]; |
@@ -413,8 +414,10 @@ void oprofile_arch_exit(void) | |||
413 | } | 414 | } |
414 | } | 415 | } |
415 | 416 | ||
416 | if (counter_config) | 417 | if (counter_config) { |
417 | kfree(counter_config); | 418 | kfree(counter_config); |
419 | exit_driverfs(); | ||
420 | } | ||
418 | } | 421 | } |
419 | #else | 422 | #else |
420 | int __init oprofile_arch_init(struct oprofile_operations *ops) | 423 | int __init oprofile_arch_init(struct oprofile_operations *ops) |
diff --git a/arch/arm/plat-mxc/Kconfig b/arch/arm/plat-mxc/Kconfig index 0527e65318f4..6785db4179b8 100644 --- a/arch/arm/plat-mxc/Kconfig +++ b/arch/arm/plat-mxc/Kconfig | |||
@@ -43,6 +43,7 @@ config ARCH_MXC91231 | |||
43 | config ARCH_MX5 | 43 | config ARCH_MX5 |
44 | bool "MX5-based" | 44 | bool "MX5-based" |
45 | select CPU_V7 | 45 | select CPU_V7 |
46 | select ARM_L1_CACHE_SHIFT_6 | ||
46 | help | 47 | help |
47 | This enables support for systems based on the Freescale i.MX51 family | 48 | This enables support for systems based on the Freescale i.MX51 family |
48 | 49 | ||
diff --git a/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h b/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h index 634e3f4c454d..656acb45d434 100644 --- a/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h +++ b/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h | |||
@@ -37,9 +37,9 @@ | |||
37 | * mach-mx5/eukrea_mbimx51-baseboard.c for cpuimx51 | 37 | * mach-mx5/eukrea_mbimx51-baseboard.c for cpuimx51 |
38 | */ | 38 | */ |
39 | 39 | ||
40 | extern void eukrea_mbimx25_baseboard_init(void); | 40 | extern void eukrea_mbimxsd25_baseboard_init(void); |
41 | extern void eukrea_mbimx27_baseboard_init(void); | 41 | extern void eukrea_mbimx27_baseboard_init(void); |
42 | extern void eukrea_mbimx35_baseboard_init(void); | 42 | extern void eukrea_mbimxsd35_baseboard_init(void); |
43 | extern void eukrea_mbimx51_baseboard_init(void); | 43 | extern void eukrea_mbimx51_baseboard_init(void); |
44 | 44 | ||
45 | #endif | 45 | #endif |
diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c index b3da9aad4295..3703ab28257f 100644 --- a/arch/arm/plat-mxc/tzic.c +++ b/arch/arm/plat-mxc/tzic.c | |||
@@ -164,8 +164,9 @@ int tzic_enable_wake(int is_idle) | |||
164 | return -EAGAIN; | 164 | return -EAGAIN; |
165 | 165 | ||
166 | for (i = 0; i < 4; i++) { | 166 | for (i = 0; i < 4; i++) { |
167 | v = is_idle ? __raw_readl(TZIC_ENSET0(i)) : wakeup_intr[i]; | 167 | v = is_idle ? __raw_readl(tzic_base + TZIC_ENSET0(i)) : |
168 | __raw_writel(v, TZIC_WAKEUP0(i)); | 168 | wakeup_intr[i]; |
169 | __raw_writel(v, tzic_base + TZIC_WAKEUP0(i)); | ||
169 | } | 170 | } |
170 | 171 | ||
171 | return 0; | 172 | return 0; |
diff --git a/arch/arm/plat-nomadik/timer.c b/arch/arm/plat-nomadik/timer.c index ea3ca86c5283..aedf9c1d645e 100644 --- a/arch/arm/plat-nomadik/timer.c +++ b/arch/arm/plat-nomadik/timer.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/arm/mach-nomadik/timer.c | 2 | * linux/arch/arm/plat-nomadik/timer.c |
3 | * | 3 | * |
4 | * Copyright (C) 2008 STMicroelectronics | 4 | * Copyright (C) 2008 STMicroelectronics |
5 | * Copyright (C) 2010 Alessandro Rubini | 5 | * Copyright (C) 2010 Alessandro Rubini |
@@ -75,7 +75,7 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode, | |||
75 | cr = readl(mtu_base + MTU_CR(1)); | 75 | cr = readl(mtu_base + MTU_CR(1)); |
76 | writel(0, mtu_base + MTU_LR(1)); | 76 | writel(0, mtu_base + MTU_LR(1)); |
77 | writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1)); | 77 | writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1)); |
78 | writel(0x2, mtu_base + MTU_IMSC); | 78 | writel(1 << 1, mtu_base + MTU_IMSC); |
79 | break; | 79 | break; |
80 | case CLOCK_EVT_MODE_SHUTDOWN: | 80 | case CLOCK_EVT_MODE_SHUTDOWN: |
81 | case CLOCK_EVT_MODE_UNUSED: | 81 | case CLOCK_EVT_MODE_UNUSED: |
@@ -131,25 +131,23 @@ void __init nmdk_timer_init(void) | |||
131 | { | 131 | { |
132 | unsigned long rate; | 132 | unsigned long rate; |
133 | struct clk *clk0; | 133 | struct clk *clk0; |
134 | struct clk *clk1; | 134 | u32 cr = MTU_CRn_32BITS; |
135 | u32 cr; | ||
136 | 135 | ||
137 | clk0 = clk_get_sys("mtu0", NULL); | 136 | clk0 = clk_get_sys("mtu0", NULL); |
138 | BUG_ON(IS_ERR(clk0)); | 137 | BUG_ON(IS_ERR(clk0)); |
139 | 138 | ||
140 | clk1 = clk_get_sys("mtu1", NULL); | ||
141 | BUG_ON(IS_ERR(clk1)); | ||
142 | |||
143 | clk_enable(clk0); | 139 | clk_enable(clk0); |
144 | clk_enable(clk1); | ||
145 | 140 | ||
146 | /* | 141 | /* |
147 | * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500: | 142 | * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz |
148 | * use a divide-by-16 counter if it's more than 16MHz | 143 | * for ux500. |
144 | * Use a divide-by-16 counter if the tick rate is more than 32MHz. | ||
145 | * At 32 MHz, the timer (with 32 bit counter) can be programmed | ||
146 | * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer | ||
147 | * with 16 gives too low timer resolution. | ||
149 | */ | 148 | */ |
150 | cr = MTU_CRn_32BITS;; | ||
151 | rate = clk_get_rate(clk0); | 149 | rate = clk_get_rate(clk0); |
152 | if (rate > 16 << 20) { | 150 | if (rate > 32000000) { |
153 | rate /= 16; | 151 | rate /= 16; |
154 | cr |= MTU_CRn_PRESCALE_16; | 152 | cr |= MTU_CRn_PRESCALE_16; |
155 | } else { | 153 | } else { |
@@ -170,15 +168,8 @@ void __init nmdk_timer_init(void) | |||
170 | pr_err("timer: failed to initialize clock source %s\n", | 168 | pr_err("timer: failed to initialize clock source %s\n", |
171 | nmdk_clksrc.name); | 169 | nmdk_clksrc.name); |
172 | 170 | ||
173 | /* Timer 1 is used for events, fix according to rate */ | 171 | /* Timer 1 is used for events */ |
174 | cr = MTU_CRn_32BITS; | 172 | |
175 | rate = clk_get_rate(clk1); | ||
176 | if (rate > 16 << 20) { | ||
177 | rate /= 16; | ||
178 | cr |= MTU_CRn_PRESCALE_16; | ||
179 | } else { | ||
180 | cr |= MTU_CRn_PRESCALE_1; | ||
181 | } | ||
182 | clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE); | 173 | clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE); |
183 | 174 | ||
184 | writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */ | 175 | writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */ |
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index e39a417a368d..a92cb499313f 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig | |||
@@ -33,7 +33,7 @@ config OMAP_DEBUG_DEVICES | |||
33 | config OMAP_DEBUG_LEDS | 33 | config OMAP_DEBUG_LEDS |
34 | bool | 34 | bool |
35 | depends on OMAP_DEBUG_DEVICES | 35 | depends on OMAP_DEBUG_DEVICES |
36 | default y if LEDS | 36 | default y if LEDS_CLASS |
37 | 37 | ||
38 | config OMAP_RESET_CLOCKS | 38 | config OMAP_RESET_CLOCKS |
39 | bool "Reset unused clocks during boot" | 39 | bool "Reset unused clocks during boot" |
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c index e31496e35b0f..0c8612fd8312 100644 --- a/arch/arm/plat-omap/mcbsp.c +++ b/arch/arm/plat-omap/mcbsp.c | |||
@@ -156,7 +156,7 @@ static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id) | |||
156 | /* Writing zero to RSYNC_ERR clears the IRQ */ | 156 | /* Writing zero to RSYNC_ERR clears the IRQ */ |
157 | MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1)); | 157 | MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1)); |
158 | } else { | 158 | } else { |
159 | complete(&mcbsp_rx->tx_irq_completion); | 159 | complete(&mcbsp_rx->rx_irq_completion); |
160 | } | 160 | } |
161 | 161 | ||
162 | return IRQ_HANDLED; | 162 | return IRQ_HANDLED; |
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c index 226b2e858d6c..10b3b4c63372 100644 --- a/arch/arm/plat-omap/sram.c +++ b/arch/arm/plat-omap/sram.c | |||
@@ -220,20 +220,7 @@ void __init omap_map_sram(void) | |||
220 | if (omap_sram_size == 0) | 220 | if (omap_sram_size == 0) |
221 | return; | 221 | return; |
222 | 222 | ||
223 | if (cpu_is_omap24xx()) { | ||
224 | omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA; | ||
225 | |||
226 | base = OMAP2_SRAM_PA; | ||
227 | base = ROUND_DOWN(base, PAGE_SIZE); | ||
228 | omap_sram_io_desc[0].pfn = __phys_to_pfn(base); | ||
229 | } | ||
230 | |||
231 | if (cpu_is_omap34xx()) { | 223 | if (cpu_is_omap34xx()) { |
232 | omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA; | ||
233 | base = OMAP3_SRAM_PA; | ||
234 | base = ROUND_DOWN(base, PAGE_SIZE); | ||
235 | omap_sram_io_desc[0].pfn = __phys_to_pfn(base); | ||
236 | |||
237 | /* | 224 | /* |
238 | * SRAM must be marked as non-cached on OMAP3 since the | 225 | * SRAM must be marked as non-cached on OMAP3 since the |
239 | * CORE DPLL M2 divider change code (in SRAM) runs with the | 226 | * CORE DPLL M2 divider change code (in SRAM) runs with the |
@@ -244,13 +231,11 @@ void __init omap_map_sram(void) | |||
244 | omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED; | 231 | omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED; |
245 | } | 232 | } |
246 | 233 | ||
247 | if (cpu_is_omap44xx()) { | 234 | omap_sram_io_desc[0].virtual = omap_sram_base; |
248 | omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA; | 235 | base = omap_sram_start; |
249 | base = OMAP4_SRAM_PA; | 236 | base = ROUND_DOWN(base, PAGE_SIZE); |
250 | base = ROUND_DOWN(base, PAGE_SIZE); | 237 | omap_sram_io_desc[0].pfn = __phys_to_pfn(base); |
251 | omap_sram_io_desc[0].pfn = __phys_to_pfn(base); | 238 | omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE); |
252 | } | ||
253 | omap_sram_io_desc[0].length = 1024 * 1024; /* Use section desc */ | ||
254 | iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); | 239 | iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); |
255 | 240 | ||
256 | printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n", | 241 | printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n", |
diff --git a/arch/arm/plat-pxa/pwm.c b/arch/arm/plat-pxa/pwm.c index 0732c6c8d511..ef32686feef9 100644 --- a/arch/arm/plat-pxa/pwm.c +++ b/arch/arm/plat-pxa/pwm.c | |||
@@ -176,7 +176,7 @@ static inline void __add_pwm(struct pwm_device *pwm) | |||
176 | 176 | ||
177 | static int __devinit pwm_probe(struct platform_device *pdev) | 177 | static int __devinit pwm_probe(struct platform_device *pdev) |
178 | { | 178 | { |
179 | struct platform_device_id *id = platform_get_device_id(pdev); | 179 | const struct platform_device_id *id = platform_get_device_id(pdev); |
180 | struct pwm_device *pwm, *secondary = NULL; | 180 | struct pwm_device *pwm, *secondary = NULL; |
181 | struct resource *r; | 181 | struct resource *r; |
182 | int ret = 0; | 182 | int ret = 0; |
diff --git a/arch/arm/plat-s5p/dev-fimc0.c b/arch/arm/plat-s5p/dev-fimc0.c index d3f1a9b5d2b5..608770fc1531 100644 --- a/arch/arm/plat-s5p/dev-fimc0.c +++ b/arch/arm/plat-s5p/dev-fimc0.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/dma-mapping.h> | ||
13 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
14 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
15 | #include <linux/ioport.h> | 16 | #include <linux/ioport.h> |
@@ -18,7 +19,7 @@ | |||
18 | static struct resource s5p_fimc0_resource[] = { | 19 | static struct resource s5p_fimc0_resource[] = { |
19 | [0] = { | 20 | [0] = { |
20 | .start = S5P_PA_FIMC0, | 21 | .start = S5P_PA_FIMC0, |
21 | .end = S5P_PA_FIMC0 + SZ_1M - 1, | 22 | .end = S5P_PA_FIMC0 + SZ_4K - 1, |
22 | .flags = IORESOURCE_MEM, | 23 | .flags = IORESOURCE_MEM, |
23 | }, | 24 | }, |
24 | [1] = { | 25 | [1] = { |
@@ -28,9 +29,15 @@ static struct resource s5p_fimc0_resource[] = { | |||
28 | }, | 29 | }, |
29 | }; | 30 | }; |
30 | 31 | ||
32 | static u64 s5p_fimc0_dma_mask = DMA_BIT_MASK(32); | ||
33 | |||
31 | struct platform_device s5p_device_fimc0 = { | 34 | struct platform_device s5p_device_fimc0 = { |
32 | .name = "s5p-fimc", | 35 | .name = "s5p-fimc", |
33 | .id = 0, | 36 | .id = 0, |
34 | .num_resources = ARRAY_SIZE(s5p_fimc0_resource), | 37 | .num_resources = ARRAY_SIZE(s5p_fimc0_resource), |
35 | .resource = s5p_fimc0_resource, | 38 | .resource = s5p_fimc0_resource, |
39 | .dev = { | ||
40 | .dma_mask = &s5p_fimc0_dma_mask, | ||
41 | .coherent_dma_mask = DMA_BIT_MASK(32), | ||
42 | }, | ||
36 | }; | 43 | }; |
diff --git a/arch/arm/plat-s5p/dev-fimc1.c b/arch/arm/plat-s5p/dev-fimc1.c index 41bd6986d0ad..76e3a97a87d3 100644 --- a/arch/arm/plat-s5p/dev-fimc1.c +++ b/arch/arm/plat-s5p/dev-fimc1.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/dma-mapping.h> | ||
13 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
14 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
15 | #include <linux/ioport.h> | 16 | #include <linux/ioport.h> |
@@ -18,7 +19,7 @@ | |||
18 | static struct resource s5p_fimc1_resource[] = { | 19 | static struct resource s5p_fimc1_resource[] = { |
19 | [0] = { | 20 | [0] = { |
20 | .start = S5P_PA_FIMC1, | 21 | .start = S5P_PA_FIMC1, |
21 | .end = S5P_PA_FIMC1 + SZ_1M - 1, | 22 | .end = S5P_PA_FIMC1 + SZ_4K - 1, |
22 | .flags = IORESOURCE_MEM, | 23 | .flags = IORESOURCE_MEM, |
23 | }, | 24 | }, |
24 | [1] = { | 25 | [1] = { |
@@ -28,9 +29,15 @@ static struct resource s5p_fimc1_resource[] = { | |||
28 | }, | 29 | }, |
29 | }; | 30 | }; |
30 | 31 | ||
32 | static u64 s5p_fimc1_dma_mask = DMA_BIT_MASK(32); | ||
33 | |||
31 | struct platform_device s5p_device_fimc1 = { | 34 | struct platform_device s5p_device_fimc1 = { |
32 | .name = "s5p-fimc", | 35 | .name = "s5p-fimc", |
33 | .id = 1, | 36 | .id = 1, |
34 | .num_resources = ARRAY_SIZE(s5p_fimc1_resource), | 37 | .num_resources = ARRAY_SIZE(s5p_fimc1_resource), |
35 | .resource = s5p_fimc1_resource, | 38 | .resource = s5p_fimc1_resource, |
39 | .dev = { | ||
40 | .dma_mask = &s5p_fimc1_dma_mask, | ||
41 | .coherent_dma_mask = DMA_BIT_MASK(32), | ||
42 | }, | ||
36 | }; | 43 | }; |
diff --git a/arch/arm/plat-s5p/dev-fimc2.c b/arch/arm/plat-s5p/dev-fimc2.c index dfddeda6d4a3..24d29816fa2c 100644 --- a/arch/arm/plat-s5p/dev-fimc2.c +++ b/arch/arm/plat-s5p/dev-fimc2.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/dma-mapping.h> | ||
13 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
14 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
15 | #include <linux/ioport.h> | 16 | #include <linux/ioport.h> |
@@ -18,7 +19,7 @@ | |||
18 | static struct resource s5p_fimc2_resource[] = { | 19 | static struct resource s5p_fimc2_resource[] = { |
19 | [0] = { | 20 | [0] = { |
20 | .start = S5P_PA_FIMC2, | 21 | .start = S5P_PA_FIMC2, |
21 | .end = S5P_PA_FIMC2 + SZ_1M - 1, | 22 | .end = S5P_PA_FIMC2 + SZ_4K - 1, |
22 | .flags = IORESOURCE_MEM, | 23 | .flags = IORESOURCE_MEM, |
23 | }, | 24 | }, |
24 | [1] = { | 25 | [1] = { |
@@ -28,9 +29,15 @@ static struct resource s5p_fimc2_resource[] = { | |||
28 | }, | 29 | }, |
29 | }; | 30 | }; |
30 | 31 | ||
32 | static u64 s5p_fimc2_dma_mask = DMA_BIT_MASK(32); | ||
33 | |||
31 | struct platform_device s5p_device_fimc2 = { | 34 | struct platform_device s5p_device_fimc2 = { |
32 | .name = "s5p-fimc", | 35 | .name = "s5p-fimc", |
33 | .id = 2, | 36 | .id = 2, |
34 | .num_resources = ARRAY_SIZE(s5p_fimc2_resource), | 37 | .num_resources = ARRAY_SIZE(s5p_fimc2_resource), |
35 | .resource = s5p_fimc2_resource, | 38 | .resource = s5p_fimc2_resource, |
39 | .dev = { | ||
40 | .dma_mask = &s5p_fimc2_dma_mask, | ||
41 | .coherent_dma_mask = DMA_BIT_MASK(32), | ||
42 | }, | ||
36 | }; | 43 | }; |
diff --git a/arch/arm/plat-samsung/gpio-config.c b/arch/arm/plat-samsung/gpio-config.c index 57b68a50f45e..e3d41eaed1ff 100644 --- a/arch/arm/plat-samsung/gpio-config.c +++ b/arch/arm/plat-samsung/gpio-config.c | |||
@@ -273,13 +273,13 @@ s5p_gpio_drvstr_t s5p_gpio_get_drvstr(unsigned int pin) | |||
273 | if (!chip) | 273 | if (!chip) |
274 | return -EINVAL; | 274 | return -EINVAL; |
275 | 275 | ||
276 | off = chip->chip.base - pin; | 276 | off = pin - chip->chip.base; |
277 | shift = off * 2; | 277 | shift = off * 2; |
278 | reg = chip->base + 0x0C; | 278 | reg = chip->base + 0x0C; |
279 | 279 | ||
280 | drvstr = __raw_readl(reg); | 280 | drvstr = __raw_readl(reg); |
281 | drvstr = 0xffff & (0x3 << shift); | ||
282 | drvstr = drvstr >> shift; | 281 | drvstr = drvstr >> shift; |
282 | drvstr &= 0x3; | ||
283 | 283 | ||
284 | return (__force s5p_gpio_drvstr_t)drvstr; | 284 | return (__force s5p_gpio_drvstr_t)drvstr; |
285 | } | 285 | } |
@@ -296,11 +296,12 @@ int s5p_gpio_set_drvstr(unsigned int pin, s5p_gpio_drvstr_t drvstr) | |||
296 | if (!chip) | 296 | if (!chip) |
297 | return -EINVAL; | 297 | return -EINVAL; |
298 | 298 | ||
299 | off = chip->chip.base - pin; | 299 | off = pin - chip->chip.base; |
300 | shift = off * 2; | 300 | shift = off * 2; |
301 | reg = chip->base + 0x0C; | 301 | reg = chip->base + 0x0C; |
302 | 302 | ||
303 | tmp = __raw_readl(reg); | 303 | tmp = __raw_readl(reg); |
304 | tmp &= ~(0x3 << shift); | ||
304 | tmp |= drvstr << shift; | 305 | tmp |= drvstr << shift; |
305 | 306 | ||
306 | __raw_writel(tmp, reg); | 307 | __raw_writel(tmp, reg); |
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/arch/arm/plat-samsung/include/plat/gpio-cfg.h index db4112c6f2be..1c6b92947c5d 100644 --- a/arch/arm/plat-samsung/include/plat/gpio-cfg.h +++ b/arch/arm/plat-samsung/include/plat/gpio-cfg.h | |||
@@ -143,12 +143,12 @@ extern s3c_gpio_pull_t s3c_gpio_getpull(unsigned int pin); | |||
143 | /* Define values for the drvstr available for each gpio pin. | 143 | /* Define values for the drvstr available for each gpio pin. |
144 | * | 144 | * |
145 | * These values control the value of the output signal driver strength, | 145 | * These values control the value of the output signal driver strength, |
146 | * configurable on most pins on the S5C series. | 146 | * configurable on most pins on the S5P series. |
147 | */ | 147 | */ |
148 | #define S5P_GPIO_DRVSTR_LV1 ((__force s5p_gpio_drvstr_t)0x00) | 148 | #define S5P_GPIO_DRVSTR_LV1 ((__force s5p_gpio_drvstr_t)0x0) |
149 | #define S5P_GPIO_DRVSTR_LV2 ((__force s5p_gpio_drvstr_t)0x01) | 149 | #define S5P_GPIO_DRVSTR_LV2 ((__force s5p_gpio_drvstr_t)0x2) |
150 | #define S5P_GPIO_DRVSTR_LV3 ((__force s5p_gpio_drvstr_t)0x10) | 150 | #define S5P_GPIO_DRVSTR_LV3 ((__force s5p_gpio_drvstr_t)0x1) |
151 | #define S5P_GPIO_DRVSTR_LV4 ((__force s5p_gpio_drvstr_t)0x11) | 151 | #define S5P_GPIO_DRVSTR_LV4 ((__force s5p_gpio_drvstr_t)0x3) |
152 | 152 | ||
153 | /** | 153 | /** |
154 | * s5c_gpio_get_drvstr() - get the driver streght value of a gpio pin | 154 | * s5c_gpio_get_drvstr() - get the driver streght value of a gpio pin |
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 48cbdcb6bbd4..55590a4d87c9 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types | |||
@@ -12,7 +12,7 @@ | |||
12 | # | 12 | # |
13 | # http://www.arm.linux.org.uk/developer/machines/?action=new | 13 | # http://www.arm.linux.org.uk/developer/machines/?action=new |
14 | # | 14 | # |
15 | # Last update: Mon Jul 12 21:10:14 2010 | 15 | # Last update: Thu Sep 9 22:43:01 2010 |
16 | # | 16 | # |
17 | # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number | 17 | # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number |
18 | # | 18 | # |
@@ -2622,7 +2622,7 @@ kraken MACH_KRAKEN KRAKEN 2634 | |||
2622 | gw2388 MACH_GW2388 GW2388 2635 | 2622 | gw2388 MACH_GW2388 GW2388 2635 |
2623 | jadecpu MACH_JADECPU JADECPU 2636 | 2623 | jadecpu MACH_JADECPU JADECPU 2636 |
2624 | carlisle MACH_CARLISLE CARLISLE 2637 | 2624 | carlisle MACH_CARLISLE CARLISLE 2637 |
2625 | lux_sf9 MACH_LUX_SFT9 LUX_SFT9 2638 | 2625 | lux_sf9 MACH_LUX_SF9 LUX_SF9 2638 |
2626 | nemid_tb MACH_NEMID_TB NEMID_TB 2639 | 2626 | nemid_tb MACH_NEMID_TB NEMID_TB 2639 |
2627 | terrier MACH_TERRIER TERRIER 2640 | 2627 | terrier MACH_TERRIER TERRIER 2640 |
2628 | turbot MACH_TURBOT TURBOT 2641 | 2628 | turbot MACH_TURBOT TURBOT 2641 |
@@ -2950,3 +2950,97 @@ davinci_dm365_dvr MACH_DAVINCI_DM365_DVR DAVINCI_DM365_DVR 2963 | |||
2950 | netviz MACH_NETVIZ NETVIZ 2964 | 2950 | netviz MACH_NETVIZ NETVIZ 2964 |
2951 | flexibity MACH_FLEXIBITY FLEXIBITY 2965 | 2951 | flexibity MACH_FLEXIBITY FLEXIBITY 2965 |
2952 | wlan_computer MACH_WLAN_COMPUTER WLAN_COMPUTER 2966 | 2952 | wlan_computer MACH_WLAN_COMPUTER WLAN_COMPUTER 2966 |
2953 | lpc24xx MACH_LPC24XX LPC24XX 2967 | ||
2954 | spica MACH_SPICA SPICA 2968 | ||
2955 | gpsdisplay MACH_GPSDISPLAY GPSDISPLAY 2969 | ||
2956 | bipnet MACH_BIPNET BIPNET 2970 | ||
2957 | overo_ctu_inertial MACH_OVERO_CTU_INERTIAL OVERO_CTU_INERTIAL 2971 | ||
2958 | davinci_dm355_mmm MACH_DAVINCI_DM355_MMM DAVINCI_DM355_MMM 2972 | ||
2959 | pc9260_v2 MACH_PC9260_V2 PC9260_V2 2973 | ||
2960 | ptx7545 MACH_PTX7545 PTX7545 2974 | ||
2961 | tm_efdc MACH_TM_EFDC TM_EFDC 2975 | ||
2962 | omap3_waldo1 MACH_OMAP3_WALDO1 OMAP3_WALDO1 2977 | ||
2963 | flyer MACH_FLYER FLYER 2978 | ||
2964 | tornado3240 MACH_TORNADO3240 TORNADO3240 2979 | ||
2965 | soli_01 MACH_SOLI_01 SOLI_01 2980 | ||
2966 | omapl138_europalc MACH_OMAPL138_EUROPALC OMAPL138_EUROPALC 2981 | ||
2967 | helios_v1 MACH_HELIOS_V1 HELIOS_V1 2982 | ||
2968 | netspace_lite_v2 MACH_NETSPACE_LITE_V2 NETSPACE_LITE_V2 2983 | ||
2969 | ssc MACH_SSC SSC 2984 | ||
2970 | premierwave_en MACH_PREMIERWAVE_EN PREMIERWAVE_EN 2985 | ||
2971 | wasabi MACH_WASABI WASABI 2986 | ||
2972 | vivow MACH_VIVOW VIVOW 2987 | ||
2973 | mx50_rdp MACH_MX50_RDP MX50_RDP 2988 | ||
2974 | universal MACH_UNIVERSAL UNIVERSAL 2989 | ||
2975 | real6410 MACH_REAL6410 REAL6410 2990 | ||
2976 | spx_sakura MACH_SPX_SAKURA SPX_SAKURA 2991 | ||
2977 | ij3k_2440 MACH_IJ3K_2440 IJ3K_2440 2992 | ||
2978 | omap3_bc10 MACH_OMAP3_BC10 OMAP3_BC10 2993 | ||
2979 | thebe MACH_THEBE THEBE 2994 | ||
2980 | rv082 MACH_RV082 RV082 2995 | ||
2981 | armlguest MACH_ARMLGUEST ARMLGUEST 2996 | ||
2982 | tjinc1000 MACH_TJINC1000 TJINC1000 2997 | ||
2983 | dockstar MACH_DOCKSTAR DOCKSTAR 2998 | ||
2984 | ax8008 MACH_AX8008 AX8008 2999 | ||
2985 | gnet_sgce MACH_GNET_SGCE GNET_SGCE 3000 | ||
2986 | pxwnas_500_1000 MACH_PXWNAS_500_1000 PXWNAS_500_1000 3001 | ||
2987 | ea20 MACH_EA20 EA20 3002 | ||
2988 | awm2 MACH_AWM2 AWM2 3003 | ||
2989 | ti8148evm MACH_TI8148EVM TI8148EVM 3004 | ||
2990 | tegra_seaboard MACH_TEGRA_SEABOARD TEGRA_SEABOARD 3005 | ||
2991 | linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006 | ||
2992 | tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007 | ||
2993 | rubys MACH_RUBYS RUBYS 3008 | ||
2994 | aquarius MACH_AQUARIUS AQUARIUS 3009 | ||
2995 | mx53_ard MACH_MX53_ARD MX53_ARD 3010 | ||
2996 | mx53_smd MACH_MX53_SMD MX53_SMD 3011 | ||
2997 | lswxl MACH_LSWXL LSWXL 3012 | ||
2998 | dove_avng_v3 MACH_DOVE_AVNG_V3 DOVE_AVNG_V3 3013 | ||
2999 | sdi_ess_9263 MACH_SDI_ESS_9263 SDI_ESS_9263 3014 | ||
3000 | jocpu550 MACH_JOCPU550 JOCPU550 3015 | ||
3001 | msm8x60_rumi3 MACH_MSM8X60_RUMI3 MSM8X60_RUMI3 3016 | ||
3002 | msm8x60_ffa MACH_MSM8X60_FFA MSM8X60_FFA 3017 | ||
3003 | yanomami MACH_YANOMAMI YANOMAMI 3018 | ||
3004 | gta04 MACH_GTA04 GTA04 3019 | ||
3005 | cm_a510 MACH_CM_A510 CM_A510 3020 | ||
3006 | omap3_rfs200 MACH_OMAP3_RFS200 OMAP3_RFS200 3021 | ||
3007 | kx33xx MACH_KX33XX KX33XX 3022 | ||
3008 | ptx7510 MACH_PTX7510 PTX7510 3023 | ||
3009 | top9000 MACH_TOP9000 TOP9000 3024 | ||
3010 | teenote MACH_TEENOTE TEENOTE 3025 | ||
3011 | ts3 MACH_TS3 TS3 3026 | ||
3012 | a0 MACH_A0 A0 3027 | ||
3013 | fsm9xxx_surf MACH_FSM9XXX_SURF FSM9XXX_SURF 3028 | ||
3014 | fsm9xxx_ffa MACH_FSM9XXX_FFA FSM9XXX_FFA 3029 | ||
3015 | frrhwcdma60w MACH_FRRHWCDMA60W FRRHWCDMA60W 3030 | ||
3016 | remus MACH_REMUS REMUS 3031 | ||
3017 | at91cap7xdk MACH_AT91CAP7XDK AT91CAP7XDK 3032 | ||
3018 | at91cap7stk MACH_AT91CAP7STK AT91CAP7STK 3033 | ||
3019 | kt_sbc_sam9_1 MACH_KT_SBC_SAM9_1 KT_SBC_SAM9_1 3034 | ||
3020 | oratisrouter MACH_ORATISROUTER ORATISROUTER 3035 | ||
3021 | armada_xp_db MACH_ARMADA_XP_DB ARMADA_XP_DB 3036 | ||
3022 | spdm MACH_SPDM SPDM 3037 | ||
3023 | gtib MACH_GTIB GTIB 3038 | ||
3024 | dgm3240 MACH_DGM3240 DGM3240 3039 | ||
3025 | atlas_i_lpe MACH_ATLAS_I_LPE ATLAS_I_LPE 3040 | ||
3026 | htcmega MACH_HTCMEGA HTCMEGA 3041 | ||
3027 | tricorder MACH_TRICORDER TRICORDER 3042 | ||
3028 | tx28 MACH_TX28 TX28 3043 | ||
3029 | bstbrd MACH_BSTBRD BSTBRD 3044 | ||
3030 | pwb3090 MACH_PWB3090 PWB3090 3045 | ||
3031 | idea6410 MACH_IDEA6410 IDEA6410 3046 | ||
3032 | qbc9263 MACH_QBC9263 QBC9263 3047 | ||
3033 | borabora MACH_BORABORA BORABORA 3048 | ||
3034 | valdez MACH_VALDEZ VALDEZ 3049 | ||
3035 | ls9g20 MACH_LS9G20 LS9G20 3050 | ||
3036 | mios_v1 MACH_MIOS_V1 MIOS_V1 3051 | ||
3037 | s5pc110_crespo MACH_S5PC110_CRESPO S5PC110_CRESPO 3052 | ||
3038 | controltek9g20 MACH_CONTROLTEK9G20 CONTROLTEK9G20 3053 | ||
3039 | tin307 MACH_TIN307 TIN307 3054 | ||
3040 | tin510 MACH_TIN510 TIN510 3055 | ||
3041 | bluecheese MACH_BLUECHEESE BLUECHEESE 3057 | ||
3042 | tem3x30 MACH_TEM3X30 TEM3X30 3058 | ||
3043 | harvest_desoto MACH_HARVEST_DESOTO HARVEST_DESOTO 3059 | ||
3044 | msm8x60_qrdc MACH_MSM8X60_QRDC MSM8X60_QRDC 3060 | ||
3045 | spear900 MACH_SPEAR900 SPEAR900 3061 | ||
3046 | pcontrol_g20 MACH_PCONTROL_G20 PCONTROL_G20 3062 | ||
diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c index 98f94d041d9c..a727f54d64d6 100644 --- a/arch/avr32/kernel/module.c +++ b/arch/avr32/kernel/module.c | |||
@@ -314,10 +314,9 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | |||
314 | vfree(module->arch.syminfo); | 314 | vfree(module->arch.syminfo); |
315 | module->arch.syminfo = NULL; | 315 | module->arch.syminfo = NULL; |
316 | 316 | ||
317 | return module_bug_finalize(hdr, sechdrs, module); | 317 | return 0; |
318 | } | 318 | } |
319 | 319 | ||
320 | void module_arch_cleanup(struct module *module) | 320 | void module_arch_cleanup(struct module *module) |
321 | { | 321 | { |
322 | module_bug_cleanup(module); | ||
323 | } | 322 | } |
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c index 0974c0ecc594..bab01298b58e 100644 --- a/arch/frv/kernel/signal.c +++ b/arch/frv/kernel/signal.c | |||
@@ -121,6 +121,9 @@ static int restore_sigcontext(struct sigcontext __user *sc, int *_gr8) | |||
121 | struct user_context *user = current->thread.user; | 121 | struct user_context *user = current->thread.user; |
122 | unsigned long tbr, psr; | 122 | unsigned long tbr, psr; |
123 | 123 | ||
124 | /* Always make any pending restarted system calls return -EINTR */ | ||
125 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
126 | |||
124 | tbr = user->i.tbr; | 127 | tbr = user->i.tbr; |
125 | psr = user->i.psr; | 128 | psr = user->i.psr; |
126 | if (copy_from_user(user, &sc->sc_context, sizeof(sc->sc_context))) | 129 | if (copy_from_user(user, &sc->sc_context, sizeof(sc->sc_context))) |
@@ -250,6 +253,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set) | |||
250 | struct sigframe __user *frame; | 253 | struct sigframe __user *frame; |
251 | int rsig; | 254 | int rsig; |
252 | 255 | ||
256 | set_fs(USER_DS); | ||
257 | |||
253 | frame = get_sigframe(ka, sizeof(*frame)); | 258 | frame = get_sigframe(ka, sizeof(*frame)); |
254 | 259 | ||
255 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 260 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
@@ -293,22 +298,23 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set) | |||
293 | (unsigned long) (frame->retcode + 2)); | 298 | (unsigned long) (frame->retcode + 2)); |
294 | } | 299 | } |
295 | 300 | ||
296 | /* set up registers for signal handler */ | 301 | /* Set up registers for the signal handler */ |
297 | __frame->sp = (unsigned long) frame; | ||
298 | __frame->lr = (unsigned long) &frame->retcode; | ||
299 | __frame->gr8 = sig; | ||
300 | |||
301 | if (current->personality & FDPIC_FUNCPTRS) { | 302 | if (current->personality & FDPIC_FUNCPTRS) { |
302 | struct fdpic_func_descriptor __user *funcptr = | 303 | struct fdpic_func_descriptor __user *funcptr = |
303 | (struct fdpic_func_descriptor __user *) ka->sa.sa_handler; | 304 | (struct fdpic_func_descriptor __user *) ka->sa.sa_handler; |
304 | __get_user(__frame->pc, &funcptr->text); | 305 | struct fdpic_func_descriptor desc; |
305 | __get_user(__frame->gr15, &funcptr->GOT); | 306 | if (copy_from_user(&desc, funcptr, sizeof(desc))) |
307 | goto give_sigsegv; | ||
308 | __frame->pc = desc.text; | ||
309 | __frame->gr15 = desc.GOT; | ||
306 | } else { | 310 | } else { |
307 | __frame->pc = (unsigned long) ka->sa.sa_handler; | 311 | __frame->pc = (unsigned long) ka->sa.sa_handler; |
308 | __frame->gr15 = 0; | 312 | __frame->gr15 = 0; |
309 | } | 313 | } |
310 | 314 | ||
311 | set_fs(USER_DS); | 315 | __frame->sp = (unsigned long) frame; |
316 | __frame->lr = (unsigned long) &frame->retcode; | ||
317 | __frame->gr8 = sig; | ||
312 | 318 | ||
313 | /* the tracer may want to single-step inside the handler */ | 319 | /* the tracer may want to single-step inside the handler */ |
314 | if (test_thread_flag(TIF_SINGLESTEP)) | 320 | if (test_thread_flag(TIF_SINGLESTEP)) |
@@ -323,7 +329,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set) | |||
323 | return 0; | 329 | return 0; |
324 | 330 | ||
325 | give_sigsegv: | 331 | give_sigsegv: |
326 | force_sig(SIGSEGV, current); | 332 | force_sigsegv(sig, current); |
327 | return -EFAULT; | 333 | return -EFAULT; |
328 | 334 | ||
329 | } /* end setup_frame() */ | 335 | } /* end setup_frame() */ |
@@ -338,6 +344,8 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
338 | struct rt_sigframe __user *frame; | 344 | struct rt_sigframe __user *frame; |
339 | int rsig; | 345 | int rsig; |
340 | 346 | ||
347 | set_fs(USER_DS); | ||
348 | |||
341 | frame = get_sigframe(ka, sizeof(*frame)); | 349 | frame = get_sigframe(ka, sizeof(*frame)); |
342 | 350 | ||
343 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 351 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
@@ -392,22 +400,23 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
392 | } | 400 | } |
393 | 401 | ||
394 | /* Set up registers for signal handler */ | 402 | /* Set up registers for signal handler */ |
395 | __frame->sp = (unsigned long) frame; | ||
396 | __frame->lr = (unsigned long) &frame->retcode; | ||
397 | __frame->gr8 = sig; | ||
398 | __frame->gr9 = (unsigned long) &frame->info; | ||
399 | |||
400 | if (current->personality & FDPIC_FUNCPTRS) { | 403 | if (current->personality & FDPIC_FUNCPTRS) { |
401 | struct fdpic_func_descriptor __user *funcptr = | 404 | struct fdpic_func_descriptor __user *funcptr = |
402 | (struct fdpic_func_descriptor __user *) ka->sa.sa_handler; | 405 | (struct fdpic_func_descriptor __user *) ka->sa.sa_handler; |
403 | __get_user(__frame->pc, &funcptr->text); | 406 | struct fdpic_func_descriptor desc; |
404 | __get_user(__frame->gr15, &funcptr->GOT); | 407 | if (copy_from_user(&desc, funcptr, sizeof(desc))) |
408 | goto give_sigsegv; | ||
409 | __frame->pc = desc.text; | ||
410 | __frame->gr15 = desc.GOT; | ||
405 | } else { | 411 | } else { |
406 | __frame->pc = (unsigned long) ka->sa.sa_handler; | 412 | __frame->pc = (unsigned long) ka->sa.sa_handler; |
407 | __frame->gr15 = 0; | 413 | __frame->gr15 = 0; |
408 | } | 414 | } |
409 | 415 | ||
410 | set_fs(USER_DS); | 416 | __frame->sp = (unsigned long) frame; |
417 | __frame->lr = (unsigned long) &frame->retcode; | ||
418 | __frame->gr8 = sig; | ||
419 | __frame->gr9 = (unsigned long) &frame->info; | ||
411 | 420 | ||
412 | /* the tracer may want to single-step inside the handler */ | 421 | /* the tracer may want to single-step inside the handler */ |
413 | if (test_thread_flag(TIF_SINGLESTEP)) | 422 | if (test_thread_flag(TIF_SINGLESTEP)) |
@@ -422,7 +431,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
422 | return 0; | 431 | return 0; |
423 | 432 | ||
424 | give_sigsegv: | 433 | give_sigsegv: |
425 | force_sig(SIGSEGV, current); | 434 | force_sigsegv(sig, current); |
426 | return -EFAULT; | 435 | return -EFAULT; |
427 | 436 | ||
428 | } /* end setup_rt_frame() */ | 437 | } /* end setup_rt_frame() */ |
@@ -437,7 +446,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info, | |||
437 | int ret; | 446 | int ret; |
438 | 447 | ||
439 | /* Are we from a system call? */ | 448 | /* Are we from a system call? */ |
440 | if (in_syscall(__frame)) { | 449 | if (__frame->syscallno != -1) { |
441 | /* If so, check system call restarting.. */ | 450 | /* If so, check system call restarting.. */ |
442 | switch (__frame->gr8) { | 451 | switch (__frame->gr8) { |
443 | case -ERESTART_RESTARTBLOCK: | 452 | case -ERESTART_RESTARTBLOCK: |
@@ -456,6 +465,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info, | |||
456 | __frame->gr8 = __frame->orig_gr8; | 465 | __frame->gr8 = __frame->orig_gr8; |
457 | __frame->pc -= 4; | 466 | __frame->pc -= 4; |
458 | } | 467 | } |
468 | __frame->syscallno = -1; | ||
459 | } | 469 | } |
460 | 470 | ||
461 | /* Set up the stack frame */ | 471 | /* Set up the stack frame */ |
@@ -538,10 +548,11 @@ no_signal: | |||
538 | break; | 548 | break; |
539 | 549 | ||
540 | case -ERESTART_RESTARTBLOCK: | 550 | case -ERESTART_RESTARTBLOCK: |
541 | __frame->gr8 = __NR_restart_syscall; | 551 | __frame->gr7 = __NR_restart_syscall; |
542 | __frame->pc -= 4; | 552 | __frame->pc -= 4; |
543 | break; | 553 | break; |
544 | } | 554 | } |
555 | __frame->syscallno = -1; | ||
545 | } | 556 | } |
546 | 557 | ||
547 | /* if there's no signal to deliver, we just put the saved sigmask | 558 | /* if there's no signal to deliver, we just put the saved sigmask |
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index e936804b7508..984221abb66d 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h | |||
@@ -18,7 +18,8 @@ | |||
18 | 18 | ||
19 | static __inline__ int atomic_add_return(int i, atomic_t *v) | 19 | static __inline__ int atomic_add_return(int i, atomic_t *v) |
20 | { | 20 | { |
21 | int ret,flags; | 21 | unsigned long flags; |
22 | int ret; | ||
22 | local_irq_save(flags); | 23 | local_irq_save(flags); |
23 | ret = v->counter += i; | 24 | ret = v->counter += i; |
24 | local_irq_restore(flags); | 25 | local_irq_restore(flags); |
@@ -30,7 +31,8 @@ static __inline__ int atomic_add_return(int i, atomic_t *v) | |||
30 | 31 | ||
31 | static __inline__ int atomic_sub_return(int i, atomic_t *v) | 32 | static __inline__ int atomic_sub_return(int i, atomic_t *v) |
32 | { | 33 | { |
33 | int ret,flags; | 34 | unsigned long flags; |
35 | int ret; | ||
34 | local_irq_save(flags); | 36 | local_irq_save(flags); |
35 | ret = v->counter -= i; | 37 | ret = v->counter -= i; |
36 | local_irq_restore(flags); | 38 | local_irq_restore(flags); |
@@ -42,7 +44,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) | |||
42 | 44 | ||
43 | static __inline__ int atomic_inc_return(atomic_t *v) | 45 | static __inline__ int atomic_inc_return(atomic_t *v) |
44 | { | 46 | { |
45 | int ret,flags; | 47 | unsigned long flags; |
48 | int ret; | ||
46 | local_irq_save(flags); | 49 | local_irq_save(flags); |
47 | v->counter++; | 50 | v->counter++; |
48 | ret = v->counter; | 51 | ret = v->counter; |
@@ -64,7 +67,8 @@ static __inline__ int atomic_inc_return(atomic_t *v) | |||
64 | 67 | ||
65 | static __inline__ int atomic_dec_return(atomic_t *v) | 68 | static __inline__ int atomic_dec_return(atomic_t *v) |
66 | { | 69 | { |
67 | int ret,flags; | 70 | unsigned long flags; |
71 | int ret; | ||
68 | local_irq_save(flags); | 72 | local_irq_save(flags); |
69 | --v->counter; | 73 | --v->counter; |
70 | ret = v->counter; | 74 | ret = v->counter; |
@@ -76,7 +80,8 @@ static __inline__ int atomic_dec_return(atomic_t *v) | |||
76 | 80 | ||
77 | static __inline__ int atomic_dec_and_test(atomic_t *v) | 81 | static __inline__ int atomic_dec_and_test(atomic_t *v) |
78 | { | 82 | { |
79 | int ret,flags; | 83 | unsigned long flags; |
84 | int ret; | ||
80 | local_irq_save(flags); | 85 | local_irq_save(flags); |
81 | --v->counter; | 86 | --v->counter; |
82 | ret = v->counter; | 87 | ret = v->counter; |
diff --git a/arch/h8300/include/asm/system.h b/arch/h8300/include/asm/system.h index d98d97685f06..16bf1560ff68 100644 --- a/arch/h8300/include/asm/system.h +++ b/arch/h8300/include/asm/system.h | |||
@@ -3,6 +3,8 @@ | |||
3 | 3 | ||
4 | #include <linux/linkage.h> | 4 | #include <linux/linkage.h> |
5 | 5 | ||
6 | struct pt_regs; | ||
7 | |||
6 | /* | 8 | /* |
7 | * switch_to(n) should switch tasks to task ptr, first checking that | 9 | * switch_to(n) should switch tasks to task ptr, first checking that |
8 | * ptr isn't the current task, in which case it does nothing. This | 10 | * ptr isn't the current task, in which case it does nothing. This |
@@ -155,6 +157,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz | |||
155 | 157 | ||
156 | #define arch_align_stack(x) (x) | 158 | #define arch_align_stack(x) (x) |
157 | 159 | ||
158 | void die(char *str, struct pt_regs *fp, unsigned long err); | 160 | extern void die(const char *str, struct pt_regs *fp, unsigned long err); |
159 | 161 | ||
160 | #endif /* _H8300_SYSTEM_H */ | 162 | #endif /* _H8300_SYSTEM_H */ |
diff --git a/arch/h8300/kernel/module.c b/arch/h8300/kernel/module.c index 0865e291c20d..db4953dc4e1b 100644 --- a/arch/h8300/kernel/module.c +++ b/arch/h8300/kernel/module.c | |||
@@ -112,10 +112,9 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
112 | const Elf_Shdr *sechdrs, | 112 | const Elf_Shdr *sechdrs, |
113 | struct module *me) | 113 | struct module *me) |
114 | { | 114 | { |
115 | return module_bug_finalize(hdr, sechdrs, me); | 115 | return 0; |
116 | } | 116 | } |
117 | 117 | ||
118 | void module_arch_cleanup(struct module *mod) | 118 | void module_arch_cleanup(struct module *mod) |
119 | { | 119 | { |
120 | module_bug_cleanup(mod); | ||
121 | } | 120 | } |
diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c index dc1ac0243b78..aaf5e5a48f93 100644 --- a/arch/h8300/kernel/sys_h8300.c +++ b/arch/h8300/kernel/sys_h8300.c | |||
@@ -56,8 +56,8 @@ int kernel_execve(const char *filename, | |||
56 | const char *const envp[]) | 56 | const char *const envp[]) |
57 | { | 57 | { |
58 | register long res __asm__("er0"); | 58 | register long res __asm__("er0"); |
59 | register char *const *_c __asm__("er3") = envp; | 59 | register const char *const *_c __asm__("er3") = envp; |
60 | register char *const *_b __asm__("er2") = argv; | 60 | register const char *const *_b __asm__("er2") = argv; |
61 | register const char * _a __asm__("er1") = filename; | 61 | register const char * _a __asm__("er1") = filename; |
62 | __asm__ __volatile__ ("mov.l %1,er0\n\t" | 62 | __asm__ __volatile__ ("mov.l %1,er0\n\t" |
63 | "trapa #0\n\t" | 63 | "trapa #0\n\t" |
diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c index 3c0b66bc669e..dfa05bd908b6 100644 --- a/arch/h8300/kernel/traps.c +++ b/arch/h8300/kernel/traps.c | |||
@@ -96,7 +96,7 @@ static void dump(struct pt_regs *fp) | |||
96 | printk("\n\n"); | 96 | printk("\n\n"); |
97 | } | 97 | } |
98 | 98 | ||
99 | void die(char *str, struct pt_regs *fp, unsigned long err) | 99 | void die(const char *str, struct pt_regs *fp, unsigned long err) |
100 | { | 100 | { |
101 | static int diecount; | 101 | static int diecount; |
102 | 102 | ||
diff --git a/arch/ia64/include/asm/compat.h b/arch/ia64/include/asm/compat.h index f90edc85b509..9301a2821615 100644 --- a/arch/ia64/include/asm/compat.h +++ b/arch/ia64/include/asm/compat.h | |||
@@ -199,7 +199,7 @@ ptr_to_compat(void __user *uptr) | |||
199 | } | 199 | } |
200 | 200 | ||
201 | static __inline__ void __user * | 201 | static __inline__ void __user * |
202 | compat_alloc_user_space (long len) | 202 | arch_compat_alloc_user_space (long len) |
203 | { | 203 | { |
204 | struct pt_regs *regs = task_pt_regs(current); | 204 | struct pt_regs *regs = task_pt_regs(current); |
205 | return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len); | 205 | return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len); |
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 3567d54f8cee..331d42bda77a 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S | |||
@@ -420,22 +420,31 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set | |||
420 | ;; | 420 | ;; |
421 | 421 | ||
422 | RSM_PSR_I(p0, r18, r19) // mask interrupt delivery | 422 | RSM_PSR_I(p0, r18, r19) // mask interrupt delivery |
423 | mov ar.ccv=0 | ||
424 | andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP | 423 | andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP |
424 | mov r8=EINVAL // default to EINVAL | ||
425 | 425 | ||
426 | #ifdef CONFIG_SMP | 426 | #ifdef CONFIG_SMP |
427 | mov r17=1 | 427 | // __ticket_spin_trylock(r31) |
428 | ld4 r17=[r31] | ||
428 | ;; | 429 | ;; |
429 | cmpxchg4.acq r18=[r31],r17,ar.ccv // try to acquire the lock | 430 | mov.m ar.ccv=r17 |
430 | mov r8=EINVAL // default to EINVAL | 431 | extr.u r9=r17,17,15 |
432 | adds r19=1,r17 | ||
433 | extr.u r18=r17,0,15 | ||
434 | ;; | ||
435 | cmp.eq p6,p7=r9,r18 | ||
431 | ;; | 436 | ;; |
437 | (p6) cmpxchg4.acq r9=[r31],r19,ar.ccv | ||
438 | (p6) dep.z r20=r19,1,15 // next serving ticket for unlock | ||
439 | (p7) br.cond.spnt.many .lock_contention | ||
440 | ;; | ||
441 | cmp4.eq p0,p7=r9,r17 | ||
442 | adds r31=2,r31 | ||
443 | (p7) br.cond.spnt.many .lock_contention | ||
432 | ld8 r3=[r2] // re-read current->blocked now that we hold the lock | 444 | ld8 r3=[r2] // re-read current->blocked now that we hold the lock |
433 | cmp4.ne p6,p0=r18,r0 | ||
434 | (p6) br.cond.spnt.many .lock_contention | ||
435 | ;; | 445 | ;; |
436 | #else | 446 | #else |
437 | ld8 r3=[r2] // re-read current->blocked now that we hold the lock | 447 | ld8 r3=[r2] // re-read current->blocked now that we hold the lock |
438 | mov r8=EINVAL // default to EINVAL | ||
439 | #endif | 448 | #endif |
440 | add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16 | 449 | add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16 |
441 | add r19=IA64_TASK_SIGNAL_OFFSET,r16 | 450 | add r19=IA64_TASK_SIGNAL_OFFSET,r16 |
@@ -490,7 +499,9 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set | |||
490 | (p6) br.cond.spnt.few 1b // yes -> retry | 499 | (p6) br.cond.spnt.few 1b // yes -> retry |
491 | 500 | ||
492 | #ifdef CONFIG_SMP | 501 | #ifdef CONFIG_SMP |
493 | st4.rel [r31]=r0 // release the lock | 502 | // __ticket_spin_unlock(r31) |
503 | st2.rel [r31]=r20 | ||
504 | mov r20=0 // i must not leak kernel bits... | ||
494 | #endif | 505 | #endif |
495 | SSM_PSR_I(p0, p9, r31) | 506 | SSM_PSR_I(p0, p9, r31) |
496 | ;; | 507 | ;; |
@@ -512,7 +523,8 @@ EX(.fail_efault, (p15) st8 [r34]=r3) | |||
512 | 523 | ||
513 | .sig_pending: | 524 | .sig_pending: |
514 | #ifdef CONFIG_SMP | 525 | #ifdef CONFIG_SMP |
515 | st4.rel [r31]=r0 // release the lock | 526 | // __ticket_spin_unlock(r31) |
527 | st2.rel [r31]=r20 // release the lock | ||
516 | #endif | 528 | #endif |
517 | SSM_PSR_I(p0, p9, r17) | 529 | SSM_PSR_I(p0, p9, r17) |
518 | ;; | 530 | ;; |
diff --git a/arch/m32r/include/asm/signal.h b/arch/m32r/include/asm/signal.h index 9c1acb2b1a92..b2eeb0de1c8d 100644 --- a/arch/m32r/include/asm/signal.h +++ b/arch/m32r/include/asm/signal.h | |||
@@ -157,7 +157,6 @@ typedef struct sigaltstack { | |||
157 | #undef __HAVE_ARCH_SIG_BITOPS | 157 | #undef __HAVE_ARCH_SIG_BITOPS |
158 | 158 | ||
159 | struct pt_regs; | 159 | struct pt_regs; |
160 | extern int do_signal(struct pt_regs *regs, sigset_t *oldset); | ||
161 | 160 | ||
162 | #define ptrace_signal_deliver(regs, cookie) do { } while (0) | 161 | #define ptrace_signal_deliver(regs, cookie) do { } while (0) |
163 | 162 | ||
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h index 76125777483c..c70545689da8 100644 --- a/arch/m32r/include/asm/unistd.h +++ b/arch/m32r/include/asm/unistd.h | |||
@@ -351,6 +351,7 @@ | |||
351 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/ | 351 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/ |
352 | #define __ARCH_WANT_SYS_OLDUMOUNT | 352 | #define __ARCH_WANT_SYS_OLDUMOUNT |
353 | #define __ARCH_WANT_SYS_RT_SIGACTION | 353 | #define __ARCH_WANT_SYS_RT_SIGACTION |
354 | #define __ARCH_WANT_SYS_RT_SIGSUSPEND | ||
354 | 355 | ||
355 | #define __IGNORE_lchown | 356 | #define __IGNORE_lchown |
356 | #define __IGNORE_setuid | 357 | #define __IGNORE_setuid |
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S index 403869833b98..225412bc227e 100644 --- a/arch/m32r/kernel/entry.S +++ b/arch/m32r/kernel/entry.S | |||
@@ -235,10 +235,9 @@ work_resched: | |||
235 | work_notifysig: ; deal with pending signals and | 235 | work_notifysig: ; deal with pending signals and |
236 | ; notify-resume requests | 236 | ; notify-resume requests |
237 | mv r0, sp ; arg1 : struct pt_regs *regs | 237 | mv r0, sp ; arg1 : struct pt_regs *regs |
238 | ldi r1, #0 ; arg2 : sigset_t *oldset | 238 | mv r1, r9 ; arg2 : __u32 thread_info_flags |
239 | mv r2, r9 ; arg3 : __u32 thread_info_flags | ||
240 | bl do_notify_resume | 239 | bl do_notify_resume |
241 | bra restore_all | 240 | bra resume_userspace |
242 | 241 | ||
243 | ; perform syscall exit tracing | 242 | ; perform syscall exit tracing |
244 | ALIGN | 243 | ALIGN |
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c index e555091eb97c..0021ade4cba8 100644 --- a/arch/m32r/kernel/ptrace.c +++ b/arch/m32r/kernel/ptrace.c | |||
@@ -592,16 +592,17 @@ void user_enable_single_step(struct task_struct *child) | |||
592 | 592 | ||
593 | if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0) | 593 | if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0) |
594 | != sizeof(insn)) | 594 | != sizeof(insn)) |
595 | break; | 595 | return -EIO; |
596 | 596 | ||
597 | compute_next_pc(insn, pc, &next_pc, child); | 597 | compute_next_pc(insn, pc, &next_pc, child); |
598 | if (next_pc & 0x80000000) | 598 | if (next_pc & 0x80000000) |
599 | break; | 599 | return -EIO; |
600 | 600 | ||
601 | if (embed_debug_trap(child, next_pc)) | 601 | if (embed_debug_trap(child, next_pc)) |
602 | break; | 602 | return -EIO; |
603 | 603 | ||
604 | invalidate_cache(); | 604 | invalidate_cache(); |
605 | return 0; | ||
605 | } | 606 | } |
606 | 607 | ||
607 | void user_disable_single_step(struct task_struct *child) | 608 | void user_disable_single_step(struct task_struct *child) |
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c index 144b0f124fc7..7bbe38645ed5 100644 --- a/arch/m32r/kernel/signal.c +++ b/arch/m32r/kernel/signal.c | |||
@@ -28,37 +28,6 @@ | |||
28 | 28 | ||
29 | #define DEBUG_SIG 0 | 29 | #define DEBUG_SIG 0 |
30 | 30 | ||
31 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
32 | |||
33 | int do_signal(struct pt_regs *, sigset_t *); | ||
34 | |||
35 | asmlinkage int | ||
36 | sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, | ||
37 | unsigned long r2, unsigned long r3, unsigned long r4, | ||
38 | unsigned long r5, unsigned long r6, struct pt_regs *regs) | ||
39 | { | ||
40 | sigset_t newset; | ||
41 | |||
42 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
43 | if (sigsetsize != sizeof(sigset_t)) | ||
44 | return -EINVAL; | ||
45 | |||
46 | if (copy_from_user(&newset, unewset, sizeof(newset))) | ||
47 | return -EFAULT; | ||
48 | sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); | ||
49 | |||
50 | spin_lock_irq(¤t->sighand->siglock); | ||
51 | current->saved_sigmask = current->blocked; | ||
52 | current->blocked = newset; | ||
53 | recalc_sigpending(); | ||
54 | spin_unlock_irq(¤t->sighand->siglock); | ||
55 | |||
56 | current->state = TASK_INTERRUPTIBLE; | ||
57 | schedule(); | ||
58 | set_thread_flag(TIF_RESTORE_SIGMASK); | ||
59 | return -ERESTARTNOHAND; | ||
60 | } | ||
61 | |||
62 | asmlinkage int | 31 | asmlinkage int |
63 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | 32 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, |
64 | unsigned long r2, unsigned long r3, unsigned long r4, | 33 | unsigned long r2, unsigned long r3, unsigned long r4, |
@@ -218,7 +187,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) | |||
218 | return (void __user *)((sp - frame_size) & -8ul); | 187 | return (void __user *)((sp - frame_size) & -8ul); |
219 | } | 188 | } |
220 | 189 | ||
221 | static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 190 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
222 | sigset_t *set, struct pt_regs *regs) | 191 | sigset_t *set, struct pt_regs *regs) |
223 | { | 192 | { |
224 | struct rt_sigframe __user *frame; | 193 | struct rt_sigframe __user *frame; |
@@ -275,22 +244,34 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
275 | current->comm, current->pid, frame, regs->pc); | 244 | current->comm, current->pid, frame, regs->pc); |
276 | #endif | 245 | #endif |
277 | 246 | ||
278 | return; | 247 | return 0; |
279 | 248 | ||
280 | give_sigsegv: | 249 | give_sigsegv: |
281 | force_sigsegv(sig, current); | 250 | force_sigsegv(sig, current); |
251 | return -EFAULT; | ||
252 | } | ||
253 | |||
254 | static int prev_insn(struct pt_regs *regs) | ||
255 | { | ||
256 | u16 inst; | ||
257 | if (get_user(&inst, (u16 __user *)(regs->bpc - 2))) | ||
258 | return -EFAULT; | ||
259 | if ((inst & 0xfff0) == 0x10f0) /* trap ? */ | ||
260 | regs->bpc -= 2; | ||
261 | else | ||
262 | regs->bpc -= 4; | ||
263 | regs->syscall_nr = -1; | ||
264 | return 0; | ||
282 | } | 265 | } |
283 | 266 | ||
284 | /* | 267 | /* |
285 | * OK, we're invoking a handler | 268 | * OK, we're invoking a handler |
286 | */ | 269 | */ |
287 | 270 | ||
288 | static void | 271 | static int |
289 | handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | 272 | handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, |
290 | sigset_t *oldset, struct pt_regs *regs) | 273 | sigset_t *oldset, struct pt_regs *regs) |
291 | { | 274 | { |
292 | unsigned short inst; | ||
293 | |||
294 | /* Are we from a system call? */ | 275 | /* Are we from a system call? */ |
295 | if (regs->syscall_nr >= 0) { | 276 | if (regs->syscall_nr >= 0) { |
296 | /* If so, check system call restarting.. */ | 277 | /* If so, check system call restarting.. */ |
@@ -308,16 +289,14 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | |||
308 | /* fallthrough */ | 289 | /* fallthrough */ |
309 | case -ERESTARTNOINTR: | 290 | case -ERESTARTNOINTR: |
310 | regs->r0 = regs->orig_r0; | 291 | regs->r0 = regs->orig_r0; |
311 | inst = *(unsigned short *)(regs->bpc - 2); | 292 | if (prev_insn(regs) < 0) |
312 | if ((inst & 0xfff0) == 0x10f0) /* trap ? */ | 293 | return -EFAULT; |
313 | regs->bpc -= 2; | ||
314 | else | ||
315 | regs->bpc -= 4; | ||
316 | } | 294 | } |
317 | } | 295 | } |
318 | 296 | ||
319 | /* Set up the stack frame */ | 297 | /* Set up the stack frame */ |
320 | setup_rt_frame(sig, ka, info, oldset, regs); | 298 | if (setup_rt_frame(sig, ka, info, oldset, regs)) |
299 | return -EFAULT; | ||
321 | 300 | ||
322 | spin_lock_irq(¤t->sighand->siglock); | 301 | spin_lock_irq(¤t->sighand->siglock); |
323 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | 302 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); |
@@ -325,6 +304,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | |||
325 | sigaddset(¤t->blocked,sig); | 304 | sigaddset(¤t->blocked,sig); |
326 | recalc_sigpending(); | 305 | recalc_sigpending(); |
327 | spin_unlock_irq(¤t->sighand->siglock); | 306 | spin_unlock_irq(¤t->sighand->siglock); |
307 | return 0; | ||
328 | } | 308 | } |
329 | 309 | ||
330 | /* | 310 | /* |
@@ -332,12 +312,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | |||
332 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | 312 | * want to handle. Thus you cannot kill init even with a SIGKILL even by |
333 | * mistake. | 313 | * mistake. |
334 | */ | 314 | */ |
335 | int do_signal(struct pt_regs *regs, sigset_t *oldset) | 315 | static void do_signal(struct pt_regs *regs) |
336 | { | 316 | { |
337 | siginfo_t info; | 317 | siginfo_t info; |
338 | int signr; | 318 | int signr; |
339 | struct k_sigaction ka; | 319 | struct k_sigaction ka; |
340 | unsigned short inst; | 320 | sigset_t *oldset; |
341 | 321 | ||
342 | /* | 322 | /* |
343 | * We want the common case to go fast, which | 323 | * We want the common case to go fast, which |
@@ -346,12 +326,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) | |||
346 | * if so. | 326 | * if so. |
347 | */ | 327 | */ |
348 | if (!user_mode(regs)) | 328 | if (!user_mode(regs)) |
349 | return 1; | 329 | return; |
350 | 330 | ||
351 | if (try_to_freeze()) | 331 | if (try_to_freeze()) |
352 | goto no_signal; | 332 | goto no_signal; |
353 | 333 | ||
354 | if (!oldset) | 334 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) |
335 | oldset = ¤t->saved_sigmask; | ||
336 | else | ||
355 | oldset = ¤t->blocked; | 337 | oldset = ¤t->blocked; |
356 | 338 | ||
357 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 339 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
@@ -363,8 +345,10 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) | |||
363 | */ | 345 | */ |
364 | 346 | ||
365 | /* Whee! Actually deliver the signal. */ | 347 | /* Whee! Actually deliver the signal. */ |
366 | handle_signal(signr, &ka, &info, oldset, regs); | 348 | if (handle_signal(signr, &ka, &info, oldset, regs) == 0) |
367 | return 1; | 349 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
350 | |||
351 | return; | ||
368 | } | 352 | } |
369 | 353 | ||
370 | no_signal: | 354 | no_signal: |
@@ -375,31 +359,24 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) | |||
375 | regs->r0 == -ERESTARTSYS || | 359 | regs->r0 == -ERESTARTSYS || |
376 | regs->r0 == -ERESTARTNOINTR) { | 360 | regs->r0 == -ERESTARTNOINTR) { |
377 | regs->r0 = regs->orig_r0; | 361 | regs->r0 = regs->orig_r0; |
378 | inst = *(unsigned short *)(regs->bpc - 2); | 362 | prev_insn(regs); |
379 | if ((inst & 0xfff0) == 0x10f0) /* trap ? */ | 363 | } else if (regs->r0 == -ERESTART_RESTARTBLOCK){ |
380 | regs->bpc -= 2; | ||
381 | else | ||
382 | regs->bpc -= 4; | ||
383 | } | ||
384 | if (regs->r0 == -ERESTART_RESTARTBLOCK){ | ||
385 | regs->r0 = regs->orig_r0; | 364 | regs->r0 = regs->orig_r0; |
386 | regs->r7 = __NR_restart_syscall; | 365 | regs->r7 = __NR_restart_syscall; |
387 | inst = *(unsigned short *)(regs->bpc - 2); | 366 | prev_insn(regs); |
388 | if ((inst & 0xfff0) == 0x10f0) /* trap ? */ | ||
389 | regs->bpc -= 2; | ||
390 | else | ||
391 | regs->bpc -= 4; | ||
392 | } | 367 | } |
393 | } | 368 | } |
394 | return 0; | 369 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { |
370 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
371 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | ||
372 | } | ||
395 | } | 373 | } |
396 | 374 | ||
397 | /* | 375 | /* |
398 | * notification of userspace execution resumption | 376 | * notification of userspace execution resumption |
399 | * - triggered by current->work.notify_resume | 377 | * - triggered by current->work.notify_resume |
400 | */ | 378 | */ |
401 | void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, | 379 | void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags) |
402 | __u32 thread_info_flags) | ||
403 | { | 380 | { |
404 | /* Pending single-step? */ | 381 | /* Pending single-step? */ |
405 | if (thread_info_flags & _TIF_SINGLESTEP) | 382 | if (thread_info_flags & _TIF_SINGLESTEP) |
@@ -407,7 +384,7 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, | |||
407 | 384 | ||
408 | /* deal with pending signal delivery */ | 385 | /* deal with pending signal delivery */ |
409 | if (thread_info_flags & _TIF_SIGPENDING) | 386 | if (thread_info_flags & _TIF_SIGPENDING) |
410 | do_signal(regs,oldset); | 387 | do_signal(regs); |
411 | 388 | ||
412 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 389 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
413 | clear_thread_flag(TIF_NOTIFY_RESUME); | 390 | clear_thread_flag(TIF_NOTIFY_RESUME); |
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 60b15d0aa072..b43b36beafe3 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h | |||
@@ -340,10 +340,13 @@ | |||
340 | #define __NR_set_thread_area 334 | 340 | #define __NR_set_thread_area 334 |
341 | #define __NR_atomic_cmpxchg_32 335 | 341 | #define __NR_atomic_cmpxchg_32 335 |
342 | #define __NR_atomic_barrier 336 | 342 | #define __NR_atomic_barrier 336 |
343 | #define __NR_fanotify_init 337 | ||
344 | #define __NR_fanotify_mark 338 | ||
345 | #define __NR_prlimit64 339 | ||
343 | 346 | ||
344 | #ifdef __KERNEL__ | 347 | #ifdef __KERNEL__ |
345 | 348 | ||
346 | #define NR_syscalls 337 | 349 | #define NR_syscalls 340 |
347 | 350 | ||
348 | #define __ARCH_WANT_IPC_PARSE_VERSION | 351 | #define __ARCH_WANT_IPC_PARSE_VERSION |
349 | #define __ARCH_WANT_OLD_READDIR | 352 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S index 2391bdff0996..6360c437dcf5 100644 --- a/arch/m68k/kernel/entry.S +++ b/arch/m68k/kernel/entry.S | |||
@@ -765,4 +765,7 @@ sys_call_table: | |||
765 | .long sys_set_thread_area | 765 | .long sys_set_thread_area |
766 | .long sys_atomic_cmpxchg_32 /* 335 */ | 766 | .long sys_atomic_cmpxchg_32 /* 335 */ |
767 | .long sys_atomic_barrier | 767 | .long sys_atomic_barrier |
768 | .long sys_fanotify_init | ||
769 | .long sys_fanotify_mark | ||
770 | .long sys_prlimit64 | ||
768 | 771 | ||
diff --git a/arch/m68k/mac/macboing.c b/arch/m68k/mac/macboing.c index 8f0640847ad2..05285d08e547 100644 --- a/arch/m68k/mac/macboing.c +++ b/arch/m68k/mac/macboing.c | |||
@@ -162,7 +162,7 @@ static void mac_init_asc( void ) | |||
162 | void mac_mksound( unsigned int freq, unsigned int length ) | 162 | void mac_mksound( unsigned int freq, unsigned int length ) |
163 | { | 163 | { |
164 | __u32 cfreq = ( freq << 5 ) / 468; | 164 | __u32 cfreq = ( freq << 5 ) / 468; |
165 | __u32 flags; | 165 | unsigned long flags; |
166 | int i; | 166 | int i; |
167 | 167 | ||
168 | if ( mac_special_bell == NULL ) | 168 | if ( mac_special_bell == NULL ) |
@@ -224,7 +224,7 @@ static void mac_nosound( unsigned long ignored ) | |||
224 | */ | 224 | */ |
225 | static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume ) | 225 | static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume ) |
226 | { | 226 | { |
227 | __u32 flags; | 227 | unsigned long flags; |
228 | 228 | ||
229 | /* if the bell is already ringing, ring longer */ | 229 | /* if the bell is already ringing, ring longer */ |
230 | if ( mac_bell_duration > 0 ) | 230 | if ( mac_bell_duration > 0 ) |
@@ -271,7 +271,7 @@ static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsig | |||
271 | static void mac_quadra_ring_bell( unsigned long ignored ) | 271 | static void mac_quadra_ring_bell( unsigned long ignored ) |
272 | { | 272 | { |
273 | int i, count = mac_asc_samplespersec / HZ; | 273 | int i, count = mac_asc_samplespersec / HZ; |
274 | __u32 flags; | 274 | unsigned long flags; |
275 | 275 | ||
276 | /* | 276 | /* |
277 | * we neither want a sound buffer overflow nor underflow, so we need to match | 277 | * we neither want a sound buffer overflow nor underflow, so we need to match |
diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S index b30b3eb197a5..79b1ed198c07 100644 --- a/arch/m68knommu/kernel/syscalltable.S +++ b/arch/m68knommu/kernel/syscalltable.S | |||
@@ -355,6 +355,9 @@ ENTRY(sys_call_table) | |||
355 | .long sys_set_thread_area | 355 | .long sys_set_thread_area |
356 | .long sys_atomic_cmpxchg_32 /* 335 */ | 356 | .long sys_atomic_cmpxchg_32 /* 335 */ |
357 | .long sys_atomic_barrier | 357 | .long sys_atomic_barrier |
358 | .long sys_fanotify_init | ||
359 | .long sys_fanotify_mark | ||
360 | .long sys_prlimit64 | ||
358 | 361 | ||
359 | .rept NR_syscalls-(.-sys_call_table)/4 | 362 | .rept NR_syscalls-(.-sys_call_table)/4 |
360 | .long sys_ni_syscall | 363 | .long sys_ni_syscall |
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S index a91b2713451d..ef332136f96d 100644 --- a/arch/m68knommu/kernel/vmlinux.lds.S +++ b/arch/m68knommu/kernel/vmlinux.lds.S | |||
@@ -150,6 +150,8 @@ SECTIONS { | |||
150 | _sdata = . ; | 150 | _sdata = . ; |
151 | DATA_DATA | 151 | DATA_DATA |
152 | CACHELINE_ALIGNED_DATA(32) | 152 | CACHELINE_ALIGNED_DATA(32) |
153 | PAGE_ALIGNED_DATA(PAGE_SIZE) | ||
154 | *(.data..shared_aligned) | ||
153 | INIT_TASK_DATA(THREAD_SIZE) | 155 | INIT_TASK_DATA(THREAD_SIZE) |
154 | _edata = . ; | 156 | _edata = . ; |
155 | } > DATA | 157 | } > DATA |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 3ad59dde4852..5526faabfc21 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -13,6 +13,7 @@ config MIPS | |||
13 | select HAVE_KPROBES | 13 | select HAVE_KPROBES |
14 | select HAVE_KRETPROBES | 14 | select HAVE_KRETPROBES |
15 | select RTC_LIB if !MACH_LOONGSON | 15 | select RTC_LIB if !MACH_LOONGSON |
16 | select GENERIC_ATOMIC64 if !64BIT | ||
16 | 17 | ||
17 | mainmenu "Linux/MIPS Kernel Configuration" | 18 | mainmenu "Linux/MIPS Kernel Configuration" |
18 | 19 | ||
@@ -1646,8 +1647,16 @@ config MIPS_MT_SMP | |||
1646 | select SYS_SUPPORTS_SMP | 1647 | select SYS_SUPPORTS_SMP |
1647 | select SMP_UP | 1648 | select SMP_UP |
1648 | help | 1649 | help |
1649 | This is a kernel model which is also known a VSMP or lately | 1650 | This is a kernel model which is known a VSMP but lately has been |
1650 | has been marketesed into SMVP. | 1651 | marketesed into SMVP. |
1652 | Virtual SMP uses the processor's VPEs to implement virtual | ||
1653 | processors. In currently available configuration of the 34K processor | ||
1654 | this allows for a dual processor. Both processors will share the same | ||
1655 | primary caches; each will obtain the half of the TLB for it's own | ||
1656 | exclusive use. For a layman this model can be described as similar to | ||
1657 | what Intel calls Hyperthreading. | ||
1658 | |||
1659 | For further information see http://www.linux-mips.org/wiki/34K#VSMP | ||
1651 | 1660 | ||
1652 | config MIPS_MT_SMTC | 1661 | config MIPS_MT_SMTC |
1653 | bool "SMTC: Use all TCs on all VPEs for SMP" | 1662 | bool "SMTC: Use all TCs on all VPEs for SMP" |
@@ -1664,6 +1673,14 @@ config MIPS_MT_SMTC | |||
1664 | help | 1673 | help |
1665 | This is a kernel model which is known a SMTC or lately has been | 1674 | This is a kernel model which is known a SMTC or lately has been |
1666 | marketesed into SMVP. | 1675 | marketesed into SMVP. |
1676 | is presenting the available TC's of the core as processors to Linux. | ||
1677 | On currently available 34K processors this means a Linux system will | ||
1678 | see up to 5 processors. The implementation of the SMTC kernel differs | ||
1679 | significantly from VSMP and cannot efficiently coexist in the same | ||
1680 | kernel binary so the choice between VSMP and SMTC is a compile time | ||
1681 | decision. | ||
1682 | |||
1683 | For further information see http://www.linux-mips.org/wiki/34K#SMTC | ||
1667 | 1684 | ||
1668 | endchoice | 1685 | endchoice |
1669 | 1686 | ||
diff --git a/arch/mips/alchemy/common/prom.c b/arch/mips/alchemy/common/prom.c index c29511b11d44..534021059629 100644 --- a/arch/mips/alchemy/common/prom.c +++ b/arch/mips/alchemy/common/prom.c | |||
@@ -43,7 +43,7 @@ int prom_argc; | |||
43 | char **prom_argv; | 43 | char **prom_argv; |
44 | char **prom_envp; | 44 | char **prom_envp; |
45 | 45 | ||
46 | void prom_init_cmdline(void) | 46 | void __init prom_init_cmdline(void) |
47 | { | 47 | { |
48 | int i; | 48 | int i; |
49 | 49 | ||
@@ -104,7 +104,7 @@ static inline void str2eaddr(unsigned char *ea, unsigned char *str) | |||
104 | } | 104 | } |
105 | } | 105 | } |
106 | 106 | ||
107 | int prom_get_ethernet_addr(char *ethernet_addr) | 107 | int __init prom_get_ethernet_addr(char *ethernet_addr) |
108 | { | 108 | { |
109 | char *ethaddr_str; | 109 | char *ethaddr_str; |
110 | 110 | ||
@@ -123,7 +123,6 @@ int prom_get_ethernet_addr(char *ethernet_addr) | |||
123 | 123 | ||
124 | return 0; | 124 | return 0; |
125 | } | 125 | } |
126 | EXPORT_SYMBOL(prom_get_ethernet_addr); | ||
127 | 126 | ||
128 | void __init prom_free_prom_memory(void) | 127 | void __init prom_free_prom_memory(void) |
129 | { | 128 | { |
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index ed9bb709c9a3..5fd7f7a58b7e 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile | |||
@@ -59,7 +59,7 @@ $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE | |||
59 | hostprogs-y := calc_vmlinuz_load_addr | 59 | hostprogs-y := calc_vmlinuz_load_addr |
60 | 60 | ||
61 | VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \ | 61 | VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \ |
62 | $(objtree)/$(KBUILD_IMAGE) $(VMLINUX_LOAD_ADDRESS)) | 62 | $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS)) |
63 | 63 | ||
64 | vmlinuzobjs-y += $(obj)/piggy.o | 64 | vmlinuzobjs-y += $(obj)/piggy.o |
65 | 65 | ||
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig index 094c17e38e16..47323ca452dc 100644 --- a/arch/mips/cavium-octeon/Kconfig +++ b/arch/mips/cavium-octeon/Kconfig | |||
@@ -83,3 +83,7 @@ config ARCH_SPARSEMEM_ENABLE | |||
83 | def_bool y | 83 | def_bool y |
84 | select SPARSEMEM_STATIC | 84 | select SPARSEMEM_STATIC |
85 | depends on CPU_CAVIUM_OCTEON | 85 | depends on CPU_CAVIUM_OCTEON |
86 | |||
87 | config CAVIUM_OCTEON_HELPER | ||
88 | def_bool y | ||
89 | depends on OCTEON_ETHERNET || PCI | ||
diff --git a/arch/mips/cavium-octeon/cpu.c b/arch/mips/cavium-octeon/cpu.c index c664c8cc2b42..a5b427909b5c 100644 --- a/arch/mips/cavium-octeon/cpu.c +++ b/arch/mips/cavium-octeon/cpu.c | |||
@@ -41,7 +41,7 @@ static int cnmips_cu2_call(struct notifier_block *nfb, unsigned long action, | |||
41 | return NOTIFY_OK; /* Let default notifier send signals */ | 41 | return NOTIFY_OK; /* Let default notifier send signals */ |
42 | } | 42 | } |
43 | 43 | ||
44 | static int cnmips_cu2_setup(void) | 44 | static int __init cnmips_cu2_setup(void) |
45 | { | 45 | { |
46 | return cu2_notifier(cnmips_cu2_call, 0); | 46 | return cu2_notifier(cnmips_cu2_call, 0); |
47 | } | 47 | } |
diff --git a/arch/mips/cavium-octeon/executive/Makefile b/arch/mips/cavium-octeon/executive/Makefile index 2fd66db6939e..7f41c5be2190 100644 --- a/arch/mips/cavium-octeon/executive/Makefile +++ b/arch/mips/cavium-octeon/executive/Makefile | |||
@@ -11,4 +11,4 @@ | |||
11 | 11 | ||
12 | obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o | 12 | obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o |
13 | 13 | ||
14 | obj-$(CONFIG_PCI) += cvmx-helper-errata.o cvmx-helper-jtag.o | 14 | obj-$(CONFIG_CAVIUM_OCTEON_HELPER) += cvmx-helper-errata.o cvmx-helper-jtag.o |
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index c63c56bfd184..47d87da379f9 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h | |||
@@ -782,6 +782,10 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | |||
782 | */ | 782 | */ |
783 | #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0) | 783 | #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0) |
784 | 784 | ||
785 | #else /* !CONFIG_64BIT */ | ||
786 | |||
787 | #include <asm-generic/atomic64.h> | ||
788 | |||
785 | #endif /* CONFIG_64BIT */ | 789 | #endif /* CONFIG_64BIT */ |
786 | 790 | ||
787 | /* | 791 | /* |
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index 613f6912dfc1..dbc51065df5b 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h | |||
@@ -145,7 +145,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) | |||
145 | return (u32)(unsigned long)uptr; | 145 | return (u32)(unsigned long)uptr; |
146 | } | 146 | } |
147 | 147 | ||
148 | static inline void __user *compat_alloc_user_space(long len) | 148 | static inline void __user *arch_compat_alloc_user_space(long len) |
149 | { | 149 | { |
150 | struct pt_regs *regs = (struct pt_regs *) | 150 | struct pt_regs *regs = (struct pt_regs *) |
151 | ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1; | 151 | ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1; |
diff --git a/arch/mips/include/asm/cop2.h b/arch/mips/include/asm/cop2.h index 2cb2f0c2c4f8..3532e2c5f098 100644 --- a/arch/mips/include/asm/cop2.h +++ b/arch/mips/include/asm/cop2.h | |||
@@ -24,7 +24,7 @@ extern int cu2_notifier_call_chain(unsigned long val, void *v); | |||
24 | 24 | ||
25 | #define cu2_notifier(fn, pri) \ | 25 | #define cu2_notifier(fn, pri) \ |
26 | ({ \ | 26 | ({ \ |
27 | static struct notifier_block fn##_nb __cpuinitdata = { \ | 27 | static struct notifier_block fn##_nb = { \ |
28 | .notifier_call = fn, \ | 28 | .notifier_call = fn, \ |
29 | .priority = pri \ | 29 | .priority = pri \ |
30 | }; \ | 30 | }; \ |
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h index 9b9436a4d816..86548da650e7 100644 --- a/arch/mips/include/asm/gic.h +++ b/arch/mips/include/asm/gic.h | |||
@@ -321,6 +321,7 @@ struct gic_intrmask_regs { | |||
321 | */ | 321 | */ |
322 | struct gic_intr_map { | 322 | struct gic_intr_map { |
323 | unsigned int cpunum; /* Directed to this CPU */ | 323 | unsigned int cpunum; /* Directed to this CPU */ |
324 | #define GIC_UNUSED 0xdead /* Dummy data */ | ||
324 | unsigned int pin; /* Directed to this Pin */ | 325 | unsigned int pin; /* Directed to this Pin */ |
325 | unsigned int polarity; /* Polarity : +/- */ | 326 | unsigned int polarity; /* Polarity : +/- */ |
326 | unsigned int trigtype; /* Trigger : Edge/Levl */ | 327 | unsigned int trigtype; /* Trigger : Edge/Levl */ |
diff --git a/arch/mips/include/asm/mach-tx49xx/kmalloc.h b/arch/mips/include/asm/mach-tx49xx/kmalloc.h index b74caf65482b..ff9a8b86cb93 100644 --- a/arch/mips/include/asm/mach-tx49xx/kmalloc.h +++ b/arch/mips/include/asm/mach-tx49xx/kmalloc.h | |||
@@ -1,6 +1,6 @@ | |||
1 | #ifndef __ASM_MACH_TX49XX_KMALLOC_H | 1 | #ifndef __ASM_MACH_TX49XX_KMALLOC_H |
2 | #define __ASM_MACH_TX49XX_KMALLOC_H | 2 | #define __ASM_MACH_TX49XX_KMALLOC_H |
3 | 3 | ||
4 | #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES | 4 | #define ARCH_DMA_MINALIGN L1_CACHE_BYTES |
5 | 5 | ||
6 | #endif /* __ASM_MACH_TX49XX_KMALLOC_H */ | 6 | #endif /* __ASM_MACH_TX49XX_KMALLOC_H */ |
diff --git a/arch/mips/include/asm/mips-boards/maltaint.h b/arch/mips/include/asm/mips-boards/maltaint.h index cea872fc6f5c..d11aa02a956a 100644 --- a/arch/mips/include/asm/mips-boards/maltaint.h +++ b/arch/mips/include/asm/mips-boards/maltaint.h | |||
@@ -88,9 +88,6 @@ | |||
88 | 88 | ||
89 | #define GIC_EXT_INTR(x) x | 89 | #define GIC_EXT_INTR(x) x |
90 | 90 | ||
91 | /* Dummy data */ | ||
92 | #define X 0xdead | ||
93 | |||
94 | /* External Interrupts used for IPI */ | 91 | /* External Interrupts used for IPI */ |
95 | #define GIC_IPI_EXT_INTR_RESCHED_VPE0 16 | 92 | #define GIC_IPI_EXT_INTR_RESCHED_VPE0 16 |
96 | #define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17 | 93 | #define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17 |
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index a16beafcea91..e59cd1ac09c2 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h | |||
@@ -150,6 +150,20 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
150 | ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) | 150 | ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) |
151 | #endif | 151 | #endif |
152 | #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) | 152 | #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) |
153 | |||
154 | /* | ||
155 | * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad | ||
156 | * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The | ||
157 | * discussion can be found in lkml posting | ||
158 | * <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is | ||
159 | * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html | ||
160 | * | ||
161 | * It is unclear if the misscompilations mentioned in | ||
162 | * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one | ||
163 | * until GCC 3.x has been retired before we can apply | ||
164 | * https://patchwork.linux-mips.org/patch/1541/ | ||
165 | */ | ||
166 | |||
153 | #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) | 167 | #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) |
154 | 168 | ||
155 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | 169 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 2376f2e06e47..70df9c0d3c5b 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h | |||
@@ -146,7 +146,8 @@ register struct thread_info *__current_thread_info __asm__("$28"); | |||
146 | #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) | 146 | #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) |
147 | 147 | ||
148 | /* work to do on interrupt/exception return */ | 148 | /* work to do on interrupt/exception return */ |
149 | #define _TIF_WORK_MASK (0x0000ffef & ~_TIF_SECCOMP) | 149 | #define _TIF_WORK_MASK (0x0000ffef & \ |
150 | ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT)) | ||
150 | /* work to do on any return to u-space */ | 151 | /* work to do on any return to u-space */ |
151 | #define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP) | 152 | #define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP) |
152 | 153 | ||
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h index baa318a59c97..550725b881d5 100644 --- a/arch/mips/include/asm/unistd.h +++ b/arch/mips/include/asm/unistd.h | |||
@@ -356,16 +356,19 @@ | |||
356 | #define __NR_perf_event_open (__NR_Linux + 333) | 356 | #define __NR_perf_event_open (__NR_Linux + 333) |
357 | #define __NR_accept4 (__NR_Linux + 334) | 357 | #define __NR_accept4 (__NR_Linux + 334) |
358 | #define __NR_recvmmsg (__NR_Linux + 335) | 358 | #define __NR_recvmmsg (__NR_Linux + 335) |
359 | #define __NR_fanotify_init (__NR_Linux + 336) | ||
360 | #define __NR_fanotify_mark (__NR_Linux + 337) | ||
361 | #define __NR_prlimit64 (__NR_Linux + 338) | ||
359 | 362 | ||
360 | /* | 363 | /* |
361 | * Offset of the last Linux o32 flavoured syscall | 364 | * Offset of the last Linux o32 flavoured syscall |
362 | */ | 365 | */ |
363 | #define __NR_Linux_syscalls 335 | 366 | #define __NR_Linux_syscalls 338 |
364 | 367 | ||
365 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ | 368 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ |
366 | 369 | ||
367 | #define __NR_O32_Linux 4000 | 370 | #define __NR_O32_Linux 4000 |
368 | #define __NR_O32_Linux_syscalls 335 | 371 | #define __NR_O32_Linux_syscalls 338 |
369 | 372 | ||
370 | #if _MIPS_SIM == _MIPS_SIM_ABI64 | 373 | #if _MIPS_SIM == _MIPS_SIM_ABI64 |
371 | 374 | ||
@@ -668,16 +671,19 @@ | |||
668 | #define __NR_perf_event_open (__NR_Linux + 292) | 671 | #define __NR_perf_event_open (__NR_Linux + 292) |
669 | #define __NR_accept4 (__NR_Linux + 293) | 672 | #define __NR_accept4 (__NR_Linux + 293) |
670 | #define __NR_recvmmsg (__NR_Linux + 294) | 673 | #define __NR_recvmmsg (__NR_Linux + 294) |
674 | #define __NR_fanotify_init (__NR_Linux + 295) | ||
675 | #define __NR_fanotify_mark (__NR_Linux + 296) | ||
676 | #define __NR_prlimit64 (__NR_Linux + 297) | ||
671 | 677 | ||
672 | /* | 678 | /* |
673 | * Offset of the last Linux 64-bit flavoured syscall | 679 | * Offset of the last Linux 64-bit flavoured syscall |
674 | */ | 680 | */ |
675 | #define __NR_Linux_syscalls 294 | 681 | #define __NR_Linux_syscalls 297 |
676 | 682 | ||
677 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ | 683 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ |
678 | 684 | ||
679 | #define __NR_64_Linux 5000 | 685 | #define __NR_64_Linux 5000 |
680 | #define __NR_64_Linux_syscalls 294 | 686 | #define __NR_64_Linux_syscalls 297 |
681 | 687 | ||
682 | #if _MIPS_SIM == _MIPS_SIM_NABI32 | 688 | #if _MIPS_SIM == _MIPS_SIM_NABI32 |
683 | 689 | ||
@@ -985,16 +991,19 @@ | |||
985 | #define __NR_accept4 (__NR_Linux + 297) | 991 | #define __NR_accept4 (__NR_Linux + 297) |
986 | #define __NR_recvmmsg (__NR_Linux + 298) | 992 | #define __NR_recvmmsg (__NR_Linux + 298) |
987 | #define __NR_getdents64 (__NR_Linux + 299) | 993 | #define __NR_getdents64 (__NR_Linux + 299) |
994 | #define __NR_fanotify_init (__NR_Linux + 300) | ||
995 | #define __NR_fanotify_mark (__NR_Linux + 301) | ||
996 | #define __NR_prlimit64 (__NR_Linux + 302) | ||
988 | 997 | ||
989 | /* | 998 | /* |
990 | * Offset of the last N32 flavoured syscall | 999 | * Offset of the last N32 flavoured syscall |
991 | */ | 1000 | */ |
992 | #define __NR_Linux_syscalls 299 | 1001 | #define __NR_Linux_syscalls 302 |
993 | 1002 | ||
994 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ | 1003 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ |
995 | 1004 | ||
996 | #define __NR_N32_Linux 6000 | 1005 | #define __NR_N32_Linux 6000 |
997 | #define __NR_N32_Linux_syscalls 299 | 1006 | #define __NR_N32_Linux_syscalls 302 |
998 | 1007 | ||
999 | #ifdef __KERNEL__ | 1008 | #ifdef __KERNEL__ |
1000 | 1009 | ||
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index b181f2f0ea8e..82ba9f62f49e 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c | |||
@@ -7,7 +7,6 @@ | |||
7 | #include <asm/io.h> | 7 | #include <asm/io.h> |
8 | #include <asm/gic.h> | 8 | #include <asm/gic.h> |
9 | #include <asm/gcmpregs.h> | 9 | #include <asm/gcmpregs.h> |
10 | #include <asm/mips-boards/maltaint.h> | ||
11 | #include <asm/irq.h> | 10 | #include <asm/irq.h> |
12 | #include <linux/hardirq.h> | 11 | #include <linux/hardirq.h> |
13 | #include <asm-generic/bitops/find.h> | 12 | #include <asm-generic/bitops/find.h> |
@@ -131,7 +130,7 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
131 | int i; | 130 | int i; |
132 | 131 | ||
133 | irq -= _irqbase; | 132 | irq -= _irqbase; |
134 | pr_debug(KERN_DEBUG "%s(%d) called\n", __func__, irq); | 133 | pr_debug("%s(%d) called\n", __func__, irq); |
135 | cpumask_and(&tmp, cpumask, cpu_online_mask); | 134 | cpumask_and(&tmp, cpumask, cpu_online_mask); |
136 | if (cpus_empty(tmp)) | 135 | if (cpus_empty(tmp)) |
137 | return -1; | 136 | return -1; |
@@ -222,7 +221,7 @@ static void __init gic_basic_init(int numintrs, int numvpes, | |||
222 | /* Setup specifics */ | 221 | /* Setup specifics */ |
223 | for (i = 0; i < mapsize; i++) { | 222 | for (i = 0; i < mapsize; i++) { |
224 | cpu = intrmap[i].cpunum; | 223 | cpu = intrmap[i].cpunum; |
225 | if (cpu == X) | 224 | if (cpu == GIC_UNUSED) |
226 | continue; | 225 | continue; |
227 | if (cpu == 0 && i != 0 && intrmap[i].flags == 0) | 226 | if (cpu == 0 && i != 0 && intrmap[i].flags == 0) |
228 | continue; | 227 | continue; |
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index 1f4e2fa64140..f4546e97c60d 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c | |||
@@ -283,7 +283,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, | |||
283 | struct pt_regs *regs = args->regs; | 283 | struct pt_regs *regs = args->regs; |
284 | int trap = (regs->cp0_cause & 0x7c) >> 2; | 284 | int trap = (regs->cp0_cause & 0x7c) >> 2; |
285 | 285 | ||
286 | /* Userpace events, ignore. */ | 286 | /* Userspace events, ignore. */ |
287 | if (user_mode(regs)) | 287 | if (user_mode(regs)) |
288 | return NOTIFY_DONE; | 288 | return NOTIFY_DONE; |
289 | 289 | ||
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c index 80e2ba694bab..29811f043399 100644 --- a/arch/mips/kernel/kspd.c +++ b/arch/mips/kernel/kspd.c | |||
@@ -251,7 +251,7 @@ void sp_work_handle_request(void) | |||
251 | memset(&tz, 0, sizeof(tz)); | 251 | memset(&tz, 0, sizeof(tz)); |
252 | if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv, | 252 | if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv, |
253 | (int)&tz, 0, 0)) == 0) | 253 | (int)&tz, 0, 0)) == 0) |
254 | ret.retval = tv.tv_sec; | 254 | ret.retval = tv.tv_sec; |
255 | break; | 255 | break; |
256 | 256 | ||
257 | case MTSP_SYSCALL_EXIT: | 257 | case MTSP_SYSCALL_EXIT: |
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index c2dab140dc98..6343b4a5b835 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c | |||
@@ -341,3 +341,10 @@ asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf, | |||
341 | { | 341 | { |
342 | return sys_lookup_dcookie(merge_64(a0, a1), buf, len); | 342 | return sys_lookup_dcookie(merge_64(a0, a1), buf, len); |
343 | } | 343 | } |
344 | |||
345 | SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags, | ||
346 | u64, a3, u64, a4, int, dfd, const char __user *, pathname) | ||
347 | { | ||
348 | return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4), | ||
349 | dfd, pathname); | ||
350 | } | ||
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 17202bbe843f..584415eef8c9 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -583,7 +583,10 @@ einval: li v0, -ENOSYS | |||
583 | sys sys_rt_tgsigqueueinfo 4 | 583 | sys sys_rt_tgsigqueueinfo 4 |
584 | sys sys_perf_event_open 5 | 584 | sys sys_perf_event_open 5 |
585 | sys sys_accept4 4 | 585 | sys sys_accept4 4 |
586 | sys sys_recvmmsg 5 | 586 | sys sys_recvmmsg 5 /* 4335 */ |
587 | sys sys_fanotify_init 2 | ||
588 | sys sys_fanotify_mark 6 | ||
589 | sys sys_prlimit64 4 | ||
587 | .endm | 590 | .endm |
588 | 591 | ||
589 | /* We pre-compute the number of _instruction_ bytes needed to | 592 | /* We pre-compute the number of _instruction_ bytes needed to |
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index a8a6c596eb04..5573f8e4e326 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
@@ -416,9 +416,12 @@ sys_call_table: | |||
416 | PTR sys_pipe2 | 416 | PTR sys_pipe2 |
417 | PTR sys_inotify_init1 | 417 | PTR sys_inotify_init1 |
418 | PTR sys_preadv | 418 | PTR sys_preadv |
419 | PTR sys_pwritev /* 5390 */ | 419 | PTR sys_pwritev /* 5290 */ |
420 | PTR sys_rt_tgsigqueueinfo | 420 | PTR sys_rt_tgsigqueueinfo |
421 | PTR sys_perf_event_open | 421 | PTR sys_perf_event_open |
422 | PTR sys_accept4 | 422 | PTR sys_accept4 |
423 | PTR sys_recvmmsg | 423 | PTR sys_recvmmsg |
424 | PTR sys_fanotify_init /* 5295 */ | ||
425 | PTR sys_fanotify_mark | ||
426 | PTR sys_prlimit64 | ||
424 | .size sys_call_table,.-sys_call_table | 427 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index a3d66137731a..1e38ec97672e 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -419,5 +419,8 @@ EXPORT(sysn32_call_table) | |||
419 | PTR sys_perf_event_open | 419 | PTR sys_perf_event_open |
420 | PTR sys_accept4 | 420 | PTR sys_accept4 |
421 | PTR compat_sys_recvmmsg | 421 | PTR compat_sys_recvmmsg |
422 | PTR sys_getdents | 422 | PTR sys_getdents64 |
423 | PTR sys_fanotify_init /* 6300 */ | ||
424 | PTR sys_fanotify_mark | ||
425 | PTR sys_prlimit64 | ||
423 | .size sysn32_call_table,.-sysn32_call_table | 426 | .size sysn32_call_table,.-sysn32_call_table |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 813689ef2384..171979fc98e5 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -538,5 +538,8 @@ sys_call_table: | |||
538 | PTR compat_sys_rt_tgsigqueueinfo | 538 | PTR compat_sys_rt_tgsigqueueinfo |
539 | PTR sys_perf_event_open | 539 | PTR sys_perf_event_open |
540 | PTR sys_accept4 | 540 | PTR sys_accept4 |
541 | PTR compat_sys_recvmmsg | 541 | PTR compat_sys_recvmmsg /* 4335 */ |
542 | PTR sys_fanotify_init | ||
543 | PTR sys_32_fanotify_mark | ||
544 | PTR sys_prlimit64 | ||
542 | .size sys_call_table,.-sys_call_table | 545 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 7ba890860d98..469d4019f795 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c | |||
@@ -44,27 +44,39 @@ static inline int cpu_is_noncoherent_r10000(struct device *dev) | |||
44 | 44 | ||
45 | static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) | 45 | static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) |
46 | { | 46 | { |
47 | gfp_t dma_flag; | ||
48 | |||
47 | /* ignore region specifiers */ | 49 | /* ignore region specifiers */ |
48 | gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); | 50 | gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); |
49 | 51 | ||
50 | #ifdef CONFIG_ZONE_DMA | 52 | #ifdef CONFIG_ISA |
51 | if (dev == NULL) | 53 | if (dev == NULL) |
52 | gfp |= __GFP_DMA; | 54 | dma_flag = __GFP_DMA; |
53 | else if (dev->coherent_dma_mask < DMA_BIT_MASK(24)) | ||
54 | gfp |= __GFP_DMA; | ||
55 | else | 55 | else |
56 | #endif | 56 | #endif |
57 | #ifdef CONFIG_ZONE_DMA32 | 57 | #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA) |
58 | if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) | 58 | if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) |
59 | gfp |= __GFP_DMA32; | 59 | dma_flag = __GFP_DMA; |
60 | else if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) | ||
61 | dma_flag = __GFP_DMA32; | ||
62 | else | ||
63 | #endif | ||
64 | #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA) | ||
65 | if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) | ||
66 | dma_flag = __GFP_DMA32; | ||
67 | else | ||
68 | #endif | ||
69 | #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) | ||
70 | if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) | ||
71 | dma_flag = __GFP_DMA; | ||
60 | else | 72 | else |
61 | #endif | 73 | #endif |
62 | ; | 74 | dma_flag = 0; |
63 | 75 | ||
64 | /* Don't invoke OOM killer */ | 76 | /* Don't invoke OOM killer */ |
65 | gfp |= __GFP_NORETRY; | 77 | gfp |= __GFP_NORETRY; |
66 | 78 | ||
67 | return gfp; | 79 | return gfp | dma_flag; |
68 | } | 80 | } |
69 | 81 | ||
70 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | 82 | void *dma_alloc_noncoherent(struct device *dev, size_t size, |
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c index 1ef75cd80a0d..274af3be1442 100644 --- a/arch/mips/mm/sc-rm7k.c +++ b/arch/mips/mm/sc-rm7k.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #define tc_lsize 32 | 30 | #define tc_lsize 32 |
31 | 31 | ||
32 | extern unsigned long icache_way_size, dcache_way_size; | 32 | extern unsigned long icache_way_size, dcache_way_size; |
33 | unsigned long tcache_size; | 33 | static unsigned long tcache_size; |
34 | 34 | ||
35 | #include <asm/r4kcache.h> | 35 | #include <asm/r4kcache.h> |
36 | 36 | ||
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index 15949b0be811..b79b24afe3a2 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c | |||
@@ -385,6 +385,8 @@ static int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap); | |||
385 | */ | 385 | */ |
386 | 386 | ||
387 | #define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK | 387 | #define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK |
388 | #define X GIC_UNUSED | ||
389 | |||
388 | static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = { | 390 | static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = { |
389 | { X, X, X, X, 0 }, | 391 | { X, X, X, X, 0 }, |
390 | { X, X, X, X, 0 }, | 392 | { X, X, X, X, 0 }, |
@@ -404,6 +406,7 @@ static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = { | |||
404 | { X, X, X, X, 0 }, | 406 | { X, X, X, X, 0 }, |
405 | /* The remainder of this table is initialised by fill_ipi_map */ | 407 | /* The remainder of this table is initialised by fill_ipi_map */ |
406 | }; | 408 | }; |
409 | #undef X | ||
407 | 410 | ||
408 | /* | 411 | /* |
409 | * GCMP needs to be detected before any SMP initialisation | 412 | * GCMP needs to be detected before any SMP initialisation |
diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c index 71f7d27b0d4c..f31218e17d3c 100644 --- a/arch/mips/pci/pci-rc32434.c +++ b/arch/mips/pci/pci-rc32434.c | |||
@@ -118,7 +118,7 @@ static int __init rc32434_pcibridge_init(void) | |||
118 | if (!((pcicvalue == PCIM_H_EA) || | 118 | if (!((pcicvalue == PCIM_H_EA) || |
119 | (pcicvalue == PCIM_H_IA_FIX) || | 119 | (pcicvalue == PCIM_H_IA_FIX) || |
120 | (pcicvalue == PCIM_H_IA_RR))) { | 120 | (pcicvalue == PCIM_H_IA_RR))) { |
121 | pr_err(KERN_ERR "PCI init error!!!\n"); | 121 | pr_err("PCI init error!!!\n"); |
122 | /* Not in Host Mode, return ERROR */ | 122 | /* Not in Host Mode, return ERROR */ |
123 | return -1; | 123 | return -1; |
124 | } | 124 | } |
diff --git a/arch/mips/pnx8550/common/reset.c b/arch/mips/pnx8550/common/reset.c index fadd8744a6bc..e7a12ff304b9 100644 --- a/arch/mips/pnx8550/common/reset.c +++ b/arch/mips/pnx8550/common/reset.c | |||
@@ -22,29 +22,19 @@ | |||
22 | */ | 22 | */ |
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | 24 | ||
25 | #include <asm/processor.h> | ||
25 | #include <asm/reboot.h> | 26 | #include <asm/reboot.h> |
26 | #include <glb.h> | 27 | #include <glb.h> |
27 | 28 | ||
28 | void pnx8550_machine_restart(char *command) | 29 | void pnx8550_machine_restart(char *command) |
29 | { | 30 | { |
30 | char head[] = "************* Machine restart *************"; | ||
31 | char foot[] = "*******************************************"; | ||
32 | |||
33 | printk("\n\n"); | ||
34 | printk("%s\n", head); | ||
35 | if (command != NULL) | ||
36 | printk("* %s\n", command); | ||
37 | printk("%s\n", foot); | ||
38 | |||
39 | PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST; | 31 | PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST; |
40 | } | 32 | } |
41 | 33 | ||
42 | void pnx8550_machine_halt(void) | 34 | void pnx8550_machine_halt(void) |
43 | { | 35 | { |
44 | printk("*** Machine halt. (Not implemented) ***\n"); | 36 | while (1) { |
45 | } | 37 | if (cpu_wait) |
46 | 38 | cpu_wait(); | |
47 | void pnx8550_machine_power_off(void) | 39 | } |
48 | { | ||
49 | printk("*** Machine power off. (Not implemented) ***\n"); | ||
50 | } | 40 | } |
diff --git a/arch/mips/pnx8550/common/setup.c b/arch/mips/pnx8550/common/setup.c index 64246c9c875c..43cb3945fdbf 100644 --- a/arch/mips/pnx8550/common/setup.c +++ b/arch/mips/pnx8550/common/setup.c | |||
@@ -44,7 +44,6 @@ | |||
44 | extern void __init board_setup(void); | 44 | extern void __init board_setup(void); |
45 | extern void pnx8550_machine_restart(char *); | 45 | extern void pnx8550_machine_restart(char *); |
46 | extern void pnx8550_machine_halt(void); | 46 | extern void pnx8550_machine_halt(void); |
47 | extern void pnx8550_machine_power_off(void); | ||
48 | extern struct resource ioport_resource; | 47 | extern struct resource ioport_resource; |
49 | extern struct resource iomem_resource; | 48 | extern struct resource iomem_resource; |
50 | extern char *prom_getcmdline(void); | 49 | extern char *prom_getcmdline(void); |
@@ -100,7 +99,7 @@ void __init plat_mem_setup(void) | |||
100 | 99 | ||
101 | _machine_restart = pnx8550_machine_restart; | 100 | _machine_restart = pnx8550_machine_restart; |
102 | _machine_halt = pnx8550_machine_halt; | 101 | _machine_halt = pnx8550_machine_halt; |
103 | pm_power_off = pnx8550_machine_power_off; | 102 | pm_power_off = pnx8550_machine_halt; |
104 | 103 | ||
105 | /* Clear the Global 2 Register, PCI Inta Output Enable Registers | 104 | /* Clear the Global 2 Register, PCI Inta Output Enable Registers |
106 | Bit 1:Enable DAC Powerdown | 105 | Bit 1:Enable DAC Powerdown |
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index 444b9f918fdf..7c2a2f7f8dc1 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig | |||
@@ -8,7 +8,6 @@ mainmenu "Linux Kernel Configuration" | |||
8 | config MN10300 | 8 | config MN10300 |
9 | def_bool y | 9 | def_bool y |
10 | select HAVE_OPROFILE | 10 | select HAVE_OPROFILE |
11 | select HAVE_ARCH_TRACEHOOK | ||
12 | 11 | ||
13 | config AM33 | 12 | config AM33 |
14 | def_bool y | 13 | def_bool y |
diff --git a/arch/mn10300/Kconfig.debug b/arch/mn10300/Kconfig.debug index ff80e86b9bd2..ce83c74b3fd7 100644 --- a/arch/mn10300/Kconfig.debug +++ b/arch/mn10300/Kconfig.debug | |||
@@ -101,7 +101,7 @@ config GDBSTUB_DEBUG_BREAKPOINT | |||
101 | 101 | ||
102 | choice | 102 | choice |
103 | prompt "GDB stub port" | 103 | prompt "GDB stub port" |
104 | default GDBSTUB_TTYSM0 | 104 | default GDBSTUB_ON_TTYSM0 |
105 | depends on GDBSTUB | 105 | depends on GDBSTUB |
106 | help | 106 | help |
107 | Select the serial port used for GDB-stub. | 107 | Select the serial port used for GDB-stub. |
diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h index f49ac49e09ad..3f50e9661076 100644 --- a/arch/mn10300/include/asm/bitops.h +++ b/arch/mn10300/include/asm/bitops.h | |||
@@ -229,9 +229,9 @@ int ffs(int x) | |||
229 | #include <asm-generic/bitops/hweight.h> | 229 | #include <asm-generic/bitops/hweight.h> |
230 | 230 | ||
231 | #define ext2_set_bit_atomic(lock, nr, addr) \ | 231 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
232 | test_and_set_bit((nr) ^ 0x18, (addr)) | 232 | test_and_set_bit((nr), (addr)) |
233 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | 233 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
234 | test_and_clear_bit((nr) ^ 0x18, (addr)) | 234 | test_and_clear_bit((nr), (addr)) |
235 | 235 | ||
236 | #include <asm-generic/bitops/ext2-non-atomic.h> | 236 | #include <asm-generic/bitops/ext2-non-atomic.h> |
237 | #include <asm-generic/bitops/minix-le.h> | 237 | #include <asm-generic/bitops/minix-le.h> |
diff --git a/arch/mn10300/include/asm/signal.h b/arch/mn10300/include/asm/signal.h index 7e891fce2370..1865d72a86ff 100644 --- a/arch/mn10300/include/asm/signal.h +++ b/arch/mn10300/include/asm/signal.h | |||
@@ -78,7 +78,7 @@ typedef unsigned long sigset_t; | |||
78 | 78 | ||
79 | /* These should not be considered constants from userland. */ | 79 | /* These should not be considered constants from userland. */ |
80 | #define SIGRTMIN 32 | 80 | #define SIGRTMIN 32 |
81 | #define SIGRTMAX (_NSIG-1) | 81 | #define SIGRTMAX _NSIG |
82 | 82 | ||
83 | /* | 83 | /* |
84 | * SA_FLAGS values: | 84 | * SA_FLAGS values: |
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c index 9d49073e827a..db509dd80565 100644 --- a/arch/mn10300/kernel/mn10300-serial.c +++ b/arch/mn10300/kernel/mn10300-serial.c | |||
@@ -156,17 +156,17 @@ struct mn10300_serial_port mn10300_serial_port_sif0 = { | |||
156 | ._intr = &SC0ICR, | 156 | ._intr = &SC0ICR, |
157 | ._rxb = &SC0RXB, | 157 | ._rxb = &SC0RXB, |
158 | ._txb = &SC0TXB, | 158 | ._txb = &SC0TXB, |
159 | .rx_name = "ttySM0/Rx", | 159 | .rx_name = "ttySM0:Rx", |
160 | .tx_name = "ttySM0/Tx", | 160 | .tx_name = "ttySM0:Tx", |
161 | #ifdef CONFIG_MN10300_TTYSM0_TIMER8 | 161 | #ifdef CONFIG_MN10300_TTYSM0_TIMER8 |
162 | .tm_name = "ttySM0/Timer8", | 162 | .tm_name = "ttySM0:Timer8", |
163 | ._tmxmd = &TM8MD, | 163 | ._tmxmd = &TM8MD, |
164 | ._tmxbr = &TM8BR, | 164 | ._tmxbr = &TM8BR, |
165 | ._tmicr = &TM8ICR, | 165 | ._tmicr = &TM8ICR, |
166 | .tm_irq = TM8IRQ, | 166 | .tm_irq = TM8IRQ, |
167 | .div_timer = MNSCx_DIV_TIMER_16BIT, | 167 | .div_timer = MNSCx_DIV_TIMER_16BIT, |
168 | #else /* CONFIG_MN10300_TTYSM0_TIMER2 */ | 168 | #else /* CONFIG_MN10300_TTYSM0_TIMER2 */ |
169 | .tm_name = "ttySM0/Timer2", | 169 | .tm_name = "ttySM0:Timer2", |
170 | ._tmxmd = &TM2MD, | 170 | ._tmxmd = &TM2MD, |
171 | ._tmxbr = (volatile u16 *) &TM2BR, | 171 | ._tmxbr = (volatile u16 *) &TM2BR, |
172 | ._tmicr = &TM2ICR, | 172 | ._tmicr = &TM2ICR, |
@@ -209,17 +209,17 @@ struct mn10300_serial_port mn10300_serial_port_sif1 = { | |||
209 | ._intr = &SC1ICR, | 209 | ._intr = &SC1ICR, |
210 | ._rxb = &SC1RXB, | 210 | ._rxb = &SC1RXB, |
211 | ._txb = &SC1TXB, | 211 | ._txb = &SC1TXB, |
212 | .rx_name = "ttySM1/Rx", | 212 | .rx_name = "ttySM1:Rx", |
213 | .tx_name = "ttySM1/Tx", | 213 | .tx_name = "ttySM1:Tx", |
214 | #ifdef CONFIG_MN10300_TTYSM1_TIMER9 | 214 | #ifdef CONFIG_MN10300_TTYSM1_TIMER9 |
215 | .tm_name = "ttySM1/Timer9", | 215 | .tm_name = "ttySM1:Timer9", |
216 | ._tmxmd = &TM9MD, | 216 | ._tmxmd = &TM9MD, |
217 | ._tmxbr = &TM9BR, | 217 | ._tmxbr = &TM9BR, |
218 | ._tmicr = &TM9ICR, | 218 | ._tmicr = &TM9ICR, |
219 | .tm_irq = TM9IRQ, | 219 | .tm_irq = TM9IRQ, |
220 | .div_timer = MNSCx_DIV_TIMER_16BIT, | 220 | .div_timer = MNSCx_DIV_TIMER_16BIT, |
221 | #else /* CONFIG_MN10300_TTYSM1_TIMER3 */ | 221 | #else /* CONFIG_MN10300_TTYSM1_TIMER3 */ |
222 | .tm_name = "ttySM1/Timer3", | 222 | .tm_name = "ttySM1:Timer3", |
223 | ._tmxmd = &TM3MD, | 223 | ._tmxmd = &TM3MD, |
224 | ._tmxbr = (volatile u16 *) &TM3BR, | 224 | ._tmxbr = (volatile u16 *) &TM3BR, |
225 | ._tmicr = &TM3ICR, | 225 | ._tmicr = &TM3ICR, |
@@ -260,9 +260,9 @@ struct mn10300_serial_port mn10300_serial_port_sif2 = { | |||
260 | .uart.lock = | 260 | .uart.lock = |
261 | __SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock), | 261 | __SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock), |
262 | .name = "ttySM2", | 262 | .name = "ttySM2", |
263 | .rx_name = "ttySM2/Rx", | 263 | .rx_name = "ttySM2:Rx", |
264 | .tx_name = "ttySM2/Tx", | 264 | .tx_name = "ttySM2:Tx", |
265 | .tm_name = "ttySM2/Timer10", | 265 | .tm_name = "ttySM2:Timer10", |
266 | ._iobase = &SC2CTR, | 266 | ._iobase = &SC2CTR, |
267 | ._control = &SC2CTR, | 267 | ._control = &SC2CTR, |
268 | ._status = &SC2STR, | 268 | ._status = &SC2STR, |
diff --git a/arch/mn10300/kernel/module.c b/arch/mn10300/kernel/module.c index 6aea7fd76993..196a111e2e29 100644 --- a/arch/mn10300/kernel/module.c +++ b/arch/mn10300/kernel/module.c | |||
@@ -206,7 +206,7 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
206 | const Elf_Shdr *sechdrs, | 206 | const Elf_Shdr *sechdrs, |
207 | struct module *me) | 207 | struct module *me) |
208 | { | 208 | { |
209 | return module_bug_finalize(hdr, sechdrs, me); | 209 | return 0; |
210 | } | 210 | } |
211 | 211 | ||
212 | /* | 212 | /* |
@@ -214,5 +214,4 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
214 | */ | 214 | */ |
215 | void module_arch_cleanup(struct module *mod) | 215 | void module_arch_cleanup(struct module *mod) |
216 | { | 216 | { |
217 | module_bug_cleanup(mod); | ||
218 | } | 217 | } |
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c index 717db14c2cc3..d4de05ab7864 100644 --- a/arch/mn10300/kernel/signal.c +++ b/arch/mn10300/kernel/signal.c | |||
@@ -65,10 +65,10 @@ asmlinkage long sys_sigaction(int sig, | |||
65 | old_sigset_t mask; | 65 | old_sigset_t mask; |
66 | if (verify_area(VERIFY_READ, act, sizeof(*act)) || | 66 | if (verify_area(VERIFY_READ, act, sizeof(*act)) || |
67 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | 67 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || |
68 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) | 68 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || |
69 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || | ||
70 | __get_user(mask, &act->sa_mask)) | ||
69 | return -EFAULT; | 71 | return -EFAULT; |
70 | __get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
71 | __get_user(mask, &act->sa_mask); | ||
72 | siginitset(&new_ka.sa.sa_mask, mask); | 72 | siginitset(&new_ka.sa.sa_mask, mask); |
73 | } | 73 | } |
74 | 74 | ||
@@ -77,10 +77,10 @@ asmlinkage long sys_sigaction(int sig, | |||
77 | if (!ret && oact) { | 77 | if (!ret && oact) { |
78 | if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) || | 78 | if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) || |
79 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | 79 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || |
80 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) | 80 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || |
81 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || | ||
82 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) | ||
81 | return -EFAULT; | 83 | return -EFAULT; |
82 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
83 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | ||
84 | } | 84 | } |
85 | 85 | ||
86 | return ret; | 86 | return ret; |
@@ -102,6 +102,9 @@ static int restore_sigcontext(struct pt_regs *regs, | |||
102 | { | 102 | { |
103 | unsigned int err = 0; | 103 | unsigned int err = 0; |
104 | 104 | ||
105 | /* Always make any pending restarted system calls return -EINTR */ | ||
106 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
107 | |||
105 | if (is_using_fpu(current)) | 108 | if (is_using_fpu(current)) |
106 | fpu_kill_state(current); | 109 | fpu_kill_state(current); |
107 | 110 | ||
@@ -330,8 +333,6 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | |||
330 | regs->d0 = sig; | 333 | regs->d0 = sig; |
331 | regs->d1 = (unsigned long) &frame->sc; | 334 | regs->d1 = (unsigned long) &frame->sc; |
332 | 335 | ||
333 | set_fs(USER_DS); | ||
334 | |||
335 | /* the tracer may want to single-step inside the handler */ | 336 | /* the tracer may want to single-step inside the handler */ |
336 | if (test_thread_flag(TIF_SINGLESTEP)) | 337 | if (test_thread_flag(TIF_SINGLESTEP)) |
337 | ptrace_notify(SIGTRAP); | 338 | ptrace_notify(SIGTRAP); |
@@ -345,7 +346,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | |||
345 | return 0; | 346 | return 0; |
346 | 347 | ||
347 | give_sigsegv: | 348 | give_sigsegv: |
348 | force_sig(SIGSEGV, current); | 349 | force_sigsegv(sig, current); |
349 | return -EFAULT; | 350 | return -EFAULT; |
350 | } | 351 | } |
351 | 352 | ||
@@ -413,8 +414,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
413 | regs->d0 = sig; | 414 | regs->d0 = sig; |
414 | regs->d1 = (long) &frame->info; | 415 | regs->d1 = (long) &frame->info; |
415 | 416 | ||
416 | set_fs(USER_DS); | ||
417 | |||
418 | /* the tracer may want to single-step inside the handler */ | 417 | /* the tracer may want to single-step inside the handler */ |
419 | if (test_thread_flag(TIF_SINGLESTEP)) | 418 | if (test_thread_flag(TIF_SINGLESTEP)) |
420 | ptrace_notify(SIGTRAP); | 419 | ptrace_notify(SIGTRAP); |
@@ -428,10 +427,16 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
428 | return 0; | 427 | return 0; |
429 | 428 | ||
430 | give_sigsegv: | 429 | give_sigsegv: |
431 | force_sig(SIGSEGV, current); | 430 | force_sigsegv(sig, current); |
432 | return -EFAULT; | 431 | return -EFAULT; |
433 | } | 432 | } |
434 | 433 | ||
434 | static inline void stepback(struct pt_regs *regs) | ||
435 | { | ||
436 | regs->pc -= 2; | ||
437 | regs->orig_d0 = -1; | ||
438 | } | ||
439 | |||
435 | /* | 440 | /* |
436 | * handle the actual delivery of a signal to userspace | 441 | * handle the actual delivery of a signal to userspace |
437 | */ | 442 | */ |
@@ -459,7 +464,7 @@ static int handle_signal(int sig, | |||
459 | /* fallthrough */ | 464 | /* fallthrough */ |
460 | case -ERESTARTNOINTR: | 465 | case -ERESTARTNOINTR: |
461 | regs->d0 = regs->orig_d0; | 466 | regs->d0 = regs->orig_d0; |
462 | regs->pc -= 2; | 467 | stepback(regs); |
463 | } | 468 | } |
464 | } | 469 | } |
465 | 470 | ||
@@ -527,12 +532,12 @@ static void do_signal(struct pt_regs *regs) | |||
527 | case -ERESTARTSYS: | 532 | case -ERESTARTSYS: |
528 | case -ERESTARTNOINTR: | 533 | case -ERESTARTNOINTR: |
529 | regs->d0 = regs->orig_d0; | 534 | regs->d0 = regs->orig_d0; |
530 | regs->pc -= 2; | 535 | stepback(regs); |
531 | break; | 536 | break; |
532 | 537 | ||
533 | case -ERESTART_RESTARTBLOCK: | 538 | case -ERESTART_RESTARTBLOCK: |
534 | regs->d0 = __NR_restart_syscall; | 539 | regs->d0 = __NR_restart_syscall; |
535 | regs->pc -= 2; | 540 | stepback(regs); |
536 | break; | 541 | break; |
537 | } | 542 | } |
538 | } | 543 | } |
diff --git a/arch/mn10300/mm/Makefile b/arch/mn10300/mm/Makefile index 28b9d983db0c..1557277fbc5c 100644 --- a/arch/mn10300/mm/Makefile +++ b/arch/mn10300/mm/Makefile | |||
@@ -2,13 +2,11 @@ | |||
2 | # Makefile for the MN10300-specific memory management code | 2 | # Makefile for the MN10300-specific memory management code |
3 | # | 3 | # |
4 | 4 | ||
5 | cacheflush-y := cache.o cache-mn10300.o | ||
6 | cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o | ||
7 | |||
8 | cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o | ||
9 | |||
5 | obj-y := \ | 10 | obj-y := \ |
6 | init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \ | 11 | init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \ |
7 | misalignment.o dma-alloc.o | 12 | misalignment.o dma-alloc.o $(cacheflush-y) |
8 | |||
9 | ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y) | ||
10 | obj-y += cache.o cache-mn10300.o | ||
11 | ifeq ($(CONFIG_MN10300_CACHE_WBACK),y) | ||
12 | obj-y += cache-flush-mn10300.o | ||
13 | endif | ||
14 | endif | ||
diff --git a/arch/mn10300/mm/cache-disabled.c b/arch/mn10300/mm/cache-disabled.c new file mode 100644 index 000000000000..f669ea42aba6 --- /dev/null +++ b/arch/mn10300/mm/cache-disabled.c | |||
@@ -0,0 +1,21 @@ | |||
1 | /* Handle the cache being disabled | ||
2 | * | ||
3 | * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/mm.h> | ||
12 | |||
13 | /* | ||
14 | * allow userspace to flush the instruction cache | ||
15 | */ | ||
16 | asmlinkage long sys_cacheflush(unsigned long start, unsigned long end) | ||
17 | { | ||
18 | if (end < start) | ||
19 | return -EINVAL; | ||
20 | return 0; | ||
21 | } | ||
diff --git a/arch/mn10300/mm/cache.c b/arch/mn10300/mm/cache.c index 1b76719ec1c3..9261217e8d2c 100644 --- a/arch/mn10300/mm/cache.c +++ b/arch/mn10300/mm/cache.c | |||
@@ -54,13 +54,30 @@ EXPORT_SYMBOL(flush_icache_page); | |||
54 | void flush_icache_range(unsigned long start, unsigned long end) | 54 | void flush_icache_range(unsigned long start, unsigned long end) |
55 | { | 55 | { |
56 | #ifdef CONFIG_MN10300_CACHE_WBACK | 56 | #ifdef CONFIG_MN10300_CACHE_WBACK |
57 | unsigned long addr, size, off; | 57 | unsigned long addr, size, base, off; |
58 | struct page *page; | 58 | struct page *page; |
59 | pgd_t *pgd; | 59 | pgd_t *pgd; |
60 | pud_t *pud; | 60 | pud_t *pud; |
61 | pmd_t *pmd; | 61 | pmd_t *pmd; |
62 | pte_t *ppte, pte; | 62 | pte_t *ppte, pte; |
63 | 63 | ||
64 | if (end > 0x80000000UL) { | ||
65 | /* addresses above 0xa0000000 do not go through the cache */ | ||
66 | if (end > 0xa0000000UL) { | ||
67 | end = 0xa0000000UL; | ||
68 | if (start >= end) | ||
69 | return; | ||
70 | } | ||
71 | |||
72 | /* kernel addresses between 0x80000000 and 0x9fffffff do not | ||
73 | * require page tables, so we just map such addresses directly */ | ||
74 | base = (start >= 0x80000000UL) ? start : 0x80000000UL; | ||
75 | mn10300_dcache_flush_range(base, end); | ||
76 | if (base == start) | ||
77 | goto invalidate; | ||
78 | end = base; | ||
79 | } | ||
80 | |||
64 | for (; start < end; start += size) { | 81 | for (; start < end; start += size) { |
65 | /* work out how much of the page to flush */ | 82 | /* work out how much of the page to flush */ |
66 | off = start & (PAGE_SIZE - 1); | 83 | off = start & (PAGE_SIZE - 1); |
@@ -104,6 +121,7 @@ void flush_icache_range(unsigned long start, unsigned long end) | |||
104 | } | 121 | } |
105 | #endif | 122 | #endif |
106 | 123 | ||
124 | invalidate: | ||
107 | mn10300_icache_inv(); | 125 | mn10300_icache_inv(); |
108 | } | 126 | } |
109 | EXPORT_SYMBOL(flush_icache_range); | 127 | EXPORT_SYMBOL(flush_icache_range); |
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h index 02b77baa5da6..efa0b60c63fe 100644 --- a/arch/parisc/include/asm/compat.h +++ b/arch/parisc/include/asm/compat.h | |||
@@ -147,7 +147,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) | |||
147 | return (u32)(unsigned long)uptr; | 147 | return (u32)(unsigned long)uptr; |
148 | } | 148 | } |
149 | 149 | ||
150 | static __inline__ void __user *compat_alloc_user_space(long len) | 150 | static __inline__ void __user *arch_compat_alloc_user_space(long len) |
151 | { | 151 | { |
152 | struct pt_regs *regs = ¤t->thread.regs; | 152 | struct pt_regs *regs = ¤t->thread.regs; |
153 | return (void __user *)regs->gr[30]; | 153 | return (void __user *)regs->gr[30]; |
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index 159a2b81e90c..6e81bb596e5b 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c | |||
@@ -941,11 +941,10 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
941 | nsyms = newptr - (Elf_Sym *)symhdr->sh_addr; | 941 | nsyms = newptr - (Elf_Sym *)symhdr->sh_addr; |
942 | DEBUGP("NEW num_symtab %lu\n", nsyms); | 942 | DEBUGP("NEW num_symtab %lu\n", nsyms); |
943 | symhdr->sh_size = nsyms * sizeof(Elf_Sym); | 943 | symhdr->sh_size = nsyms * sizeof(Elf_Sym); |
944 | return module_bug_finalize(hdr, sechdrs, me); | 944 | return 0; |
945 | } | 945 | } |
946 | 946 | ||
947 | void module_arch_cleanup(struct module *mod) | 947 | void module_arch_cleanup(struct module *mod) |
948 | { | 948 | { |
949 | deregister_unwind_table(mod); | 949 | deregister_unwind_table(mod); |
950 | module_bug_cleanup(mod); | ||
951 | } | 950 | } |
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index 396d21a80058..a11d4eac4f97 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h | |||
@@ -134,7 +134,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) | |||
134 | return (u32)(unsigned long)uptr; | 134 | return (u32)(unsigned long)uptr; |
135 | } | 135 | } |
136 | 136 | ||
137 | static inline void __user *compat_alloc_user_space(long len) | 137 | static inline void __user *arch_compat_alloc_user_space(long len) |
138 | { | 138 | { |
139 | struct pt_regs *regs = current->thread.regs; | 139 | struct pt_regs *regs = current->thread.regs; |
140 | unsigned long usp = regs->gpr[1]; | 140 | unsigned long usp = regs->gpr[1]; |
diff --git a/arch/powerpc/include/asm/fsldma.h b/arch/powerpc/include/asm/fsldma.h index a67aeed17d40..debc5ed96d6e 100644 --- a/arch/powerpc/include/asm/fsldma.h +++ b/arch/powerpc/include/asm/fsldma.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #ifndef __ARCH_POWERPC_ASM_FSLDMA_H__ | 11 | #ifndef __ARCH_POWERPC_ASM_FSLDMA_H__ |
12 | #define __ARCH_POWERPC_ASM_FSLDMA_H__ | 12 | #define __ARCH_POWERPC_ASM_FSLDMA_H__ |
13 | 13 | ||
14 | #include <linux/slab.h> | ||
14 | #include <linux/dmaengine.h> | 15 | #include <linux/dmaengine.h> |
15 | 16 | ||
16 | /* | 17 | /* |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 4d6681dce816..c571cd3c1453 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -575,13 +575,19 @@ __secondary_start: | |||
575 | /* Initialize the kernel stack. Just a repeat for iSeries. */ | 575 | /* Initialize the kernel stack. Just a repeat for iSeries. */ |
576 | LOAD_REG_ADDR(r3, current_set) | 576 | LOAD_REG_ADDR(r3, current_set) |
577 | sldi r28,r24,3 /* get current_set[cpu#] */ | 577 | sldi r28,r24,3 /* get current_set[cpu#] */ |
578 | ldx r1,r3,r28 | 578 | ldx r14,r3,r28 |
579 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | 579 | addi r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD |
580 | std r1,PACAKSAVE(r13) | 580 | std r14,PACAKSAVE(r13) |
581 | 581 | ||
582 | /* Do early setup for that CPU (stab, slb, hash table pointer) */ | 582 | /* Do early setup for that CPU (stab, slb, hash table pointer) */ |
583 | bl .early_setup_secondary | 583 | bl .early_setup_secondary |
584 | 584 | ||
585 | /* | ||
586 | * setup the new stack pointer, but *don't* use this until | ||
587 | * translation is on. | ||
588 | */ | ||
589 | mr r1, r14 | ||
590 | |||
585 | /* Clear backchain so we get nice backtraces */ | 591 | /* Clear backchain so we get nice backtraces */ |
586 | li r7,0 | 592 | li r7,0 |
587 | mtlr r7 | 593 | mtlr r7 |
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 6bbd7a604d24..a7a570dcdd57 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S | |||
@@ -810,6 +810,9 @@ relocate_new_kernel: | |||
810 | isync | 810 | isync |
811 | sync | 811 | sync |
812 | 812 | ||
813 | mfspr r3, SPRN_PIR /* current core we are running on */ | ||
814 | mr r4, r5 /* load physical address of chunk called */ | ||
815 | |||
813 | /* jump to the entry point, usually the setup routine */ | 816 | /* jump to the entry point, usually the setup routine */ |
814 | mtlr r5 | 817 | mtlr r5 |
815 | blrl | 818 | blrl |
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 477c663e0140..49cee9df225b 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c | |||
@@ -63,11 +63,6 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
63 | const Elf_Shdr *sechdrs, struct module *me) | 63 | const Elf_Shdr *sechdrs, struct module *me) |
64 | { | 64 | { |
65 | const Elf_Shdr *sect; | 65 | const Elf_Shdr *sect; |
66 | int err; | ||
67 | |||
68 | err = module_bug_finalize(hdr, sechdrs, me); | ||
69 | if (err) | ||
70 | return err; | ||
71 | 66 | ||
72 | /* Apply feature fixups */ | 67 | /* Apply feature fixups */ |
73 | sect = find_section(hdr, sechdrs, "__ftr_fixup"); | 68 | sect = find_section(hdr, sechdrs, "__ftr_fixup"); |
@@ -101,5 +96,4 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
101 | 96 | ||
102 | void module_arch_cleanup(struct module *mod) | 97 | void module_arch_cleanup(struct module *mod) |
103 | { | 98 | { |
104 | module_bug_cleanup(mod); | ||
105 | } | 99 | } |
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index 7109f5b1baa8..2300426e531a 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c | |||
@@ -138,6 +138,7 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs) | |||
138 | ti->local_flags &= ~_TLF_RESTORE_SIGMASK; | 138 | ti->local_flags &= ~_TLF_RESTORE_SIGMASK; |
139 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | 139 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); |
140 | } | 140 | } |
141 | regs->trap = 0; | ||
141 | return 0; /* no signals delivered */ | 142 | return 0; /* no signals delivered */ |
142 | } | 143 | } |
143 | 144 | ||
@@ -164,6 +165,7 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs) | |||
164 | ret = handle_rt_signal64(signr, &ka, &info, oldset, regs); | 165 | ret = handle_rt_signal64(signr, &ka, &info, oldset, regs); |
165 | } | 166 | } |
166 | 167 | ||
168 | regs->trap = 0; | ||
167 | if (ret) { | 169 | if (ret) { |
168 | spin_lock_irq(¤t->sighand->siglock); | 170 | spin_lock_irq(¤t->sighand->siglock); |
169 | sigorsets(¤t->blocked, ¤t->blocked, | 171 | sigorsets(¤t->blocked, ¤t->blocked, |
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 266610119f66..b96a3a010c26 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c | |||
@@ -511,6 +511,7 @@ static long restore_user_regs(struct pt_regs *regs, | |||
511 | if (!sig) | 511 | if (!sig) |
512 | save_r2 = (unsigned int)regs->gpr[2]; | 512 | save_r2 = (unsigned int)regs->gpr[2]; |
513 | err = restore_general_regs(regs, sr); | 513 | err = restore_general_regs(regs, sr); |
514 | regs->trap = 0; | ||
514 | err |= __get_user(msr, &sr->mc_gregs[PT_MSR]); | 515 | err |= __get_user(msr, &sr->mc_gregs[PT_MSR]); |
515 | if (!sig) | 516 | if (!sig) |
516 | regs->gpr[2] = (unsigned long) save_r2; | 517 | regs->gpr[2] = (unsigned long) save_r2; |
@@ -884,7 +885,6 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, | |||
884 | regs->nip = (unsigned long) ka->sa.sa_handler; | 885 | regs->nip = (unsigned long) ka->sa.sa_handler; |
885 | /* enter the signal handler in big-endian mode */ | 886 | /* enter the signal handler in big-endian mode */ |
886 | regs->msr &= ~MSR_LE; | 887 | regs->msr &= ~MSR_LE; |
887 | regs->trap = 0; | ||
888 | return 1; | 888 | return 1; |
889 | 889 | ||
890 | badframe: | 890 | badframe: |
@@ -1228,7 +1228,6 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, | |||
1228 | regs->nip = (unsigned long) ka->sa.sa_handler; | 1228 | regs->nip = (unsigned long) ka->sa.sa_handler; |
1229 | /* enter the signal handler in big-endian mode */ | 1229 | /* enter the signal handler in big-endian mode */ |
1230 | regs->msr &= ~MSR_LE; | 1230 | regs->msr &= ~MSR_LE; |
1231 | regs->trap = 0; | ||
1232 | 1231 | ||
1233 | return 1; | 1232 | return 1; |
1234 | 1233 | ||
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 2fe6fc64b614..27c4a4584f80 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
@@ -178,7 +178,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, | |||
178 | err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]); | 178 | err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]); |
179 | err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]); | 179 | err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]); |
180 | /* skip SOFTE */ | 180 | /* skip SOFTE */ |
181 | err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]); | 181 | regs->trap = 0; |
182 | err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); | 182 | err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); |
183 | err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); | 183 | err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); |
184 | err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); | 184 | err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index ce53dfa7130d..8533b3b83f5d 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -577,20 +577,11 @@ void timer_interrupt(struct pt_regs * regs) | |||
577 | * some CPUs will continuue to take decrementer exceptions */ | 577 | * some CPUs will continuue to take decrementer exceptions */ |
578 | set_dec(DECREMENTER_MAX); | 578 | set_dec(DECREMENTER_MAX); |
579 | 579 | ||
580 | #ifdef CONFIG_PPC32 | 580 | #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) |
581 | if (atomic_read(&ppc_n_lost_interrupts) != 0) | 581 | if (atomic_read(&ppc_n_lost_interrupts) != 0) |
582 | do_IRQ(regs); | 582 | do_IRQ(regs); |
583 | #endif | 583 | #endif |
584 | 584 | ||
585 | now = get_tb_or_rtc(); | ||
586 | if (now < decrementer->next_tb) { | ||
587 | /* not time for this event yet */ | ||
588 | now = decrementer->next_tb - now; | ||
589 | if (now <= DECREMENTER_MAX) | ||
590 | set_dec((int)now); | ||
591 | trace_timer_interrupt_exit(regs); | ||
592 | return; | ||
593 | } | ||
594 | old_regs = set_irq_regs(regs); | 585 | old_regs = set_irq_regs(regs); |
595 | irq_enter(); | 586 | irq_enter(); |
596 | 587 | ||
@@ -606,8 +597,16 @@ void timer_interrupt(struct pt_regs * regs) | |||
606 | get_lppaca()->int_dword.fields.decr_int = 0; | 597 | get_lppaca()->int_dword.fields.decr_int = 0; |
607 | #endif | 598 | #endif |
608 | 599 | ||
609 | if (evt->event_handler) | 600 | now = get_tb_or_rtc(); |
610 | evt->event_handler(evt); | 601 | if (now >= decrementer->next_tb) { |
602 | decrementer->next_tb = ~(u64)0; | ||
603 | if (evt->event_handler) | ||
604 | evt->event_handler(evt); | ||
605 | } else { | ||
606 | now = decrementer->next_tb - now; | ||
607 | if (now <= DECREMENTER_MAX) | ||
608 | set_dec((int)now); | ||
609 | } | ||
611 | 610 | ||
612 | #ifdef CONFIG_PPC_ISERIES | 611 | #ifdef CONFIG_PPC_ISERIES |
613 | if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) | 612 | if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) |
diff --git a/arch/powerpc/platforms/512x/clock.c b/arch/powerpc/platforms/512x/clock.c index 5b243bd3eb3b..3dc2a8d262b8 100644 --- a/arch/powerpc/platforms/512x/clock.c +++ b/arch/powerpc/platforms/512x/clock.c | |||
@@ -57,7 +57,7 @@ static struct clk *mpc5121_clk_get(struct device *dev, const char *id) | |||
57 | int id_match = 0; | 57 | int id_match = 0; |
58 | 58 | ||
59 | if (dev == NULL || id == NULL) | 59 | if (dev == NULL || id == NULL) |
60 | return NULL; | 60 | return clk; |
61 | 61 | ||
62 | mutex_lock(&clocks_mutex); | 62 | mutex_lock(&clocks_mutex); |
63 | list_for_each_entry(p, &clocks, node) { | 63 | list_for_each_entry(p, &clocks, node) { |
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c index 45c0cb9b67e6..18c104820198 100644 --- a/arch/powerpc/platforms/52xx/efika.c +++ b/arch/powerpc/platforms/52xx/efika.c | |||
@@ -99,7 +99,7 @@ static void __init efika_pcisetup(void) | |||
99 | if (bus_range == NULL || len < 2 * sizeof(int)) { | 99 | if (bus_range == NULL || len < 2 * sizeof(int)) { |
100 | printk(KERN_WARNING EFIKA_PLATFORM_NAME | 100 | printk(KERN_WARNING EFIKA_PLATFORM_NAME |
101 | ": Can't get bus-range for %s\n", pcictrl->full_name); | 101 | ": Can't get bus-range for %s\n", pcictrl->full_name); |
102 | return; | 102 | goto out_put; |
103 | } | 103 | } |
104 | 104 | ||
105 | if (bus_range[1] == bus_range[0]) | 105 | if (bus_range[1] == bus_range[0]) |
@@ -111,12 +111,12 @@ static void __init efika_pcisetup(void) | |||
111 | printk(" controlled by %s\n", pcictrl->full_name); | 111 | printk(" controlled by %s\n", pcictrl->full_name); |
112 | printk("\n"); | 112 | printk("\n"); |
113 | 113 | ||
114 | hose = pcibios_alloc_controller(of_node_get(pcictrl)); | 114 | hose = pcibios_alloc_controller(pcictrl); |
115 | if (!hose) { | 115 | if (!hose) { |
116 | printk(KERN_WARNING EFIKA_PLATFORM_NAME | 116 | printk(KERN_WARNING EFIKA_PLATFORM_NAME |
117 | ": Can't allocate PCI controller structure for %s\n", | 117 | ": Can't allocate PCI controller structure for %s\n", |
118 | pcictrl->full_name); | 118 | pcictrl->full_name); |
119 | return; | 119 | goto out_put; |
120 | } | 120 | } |
121 | 121 | ||
122 | hose->first_busno = bus_range[0]; | 122 | hose->first_busno = bus_range[0]; |
@@ -124,6 +124,9 @@ static void __init efika_pcisetup(void) | |||
124 | hose->ops = &rtas_pci_ops; | 124 | hose->ops = &rtas_pci_ops; |
125 | 125 | ||
126 | pci_process_bridge_OF_ranges(hose, pcictrl, 0); | 126 | pci_process_bridge_OF_ranges(hose, pcictrl, 0); |
127 | return; | ||
128 | out_put: | ||
129 | of_node_put(pcictrl); | ||
127 | } | 130 | } |
128 | 131 | ||
129 | #else | 132 | #else |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c index 6e905314ad5d..41f3a7eda1de 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_common.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c | |||
@@ -325,12 +325,16 @@ int mpc5200_psc_ac97_gpio_reset(int psc_number) | |||
325 | clrbits32(&simple_gpio->simple_dvo, sync | out); | 325 | clrbits32(&simple_gpio->simple_dvo, sync | out); |
326 | clrbits8(&wkup_gpio->wkup_dvo, reset); | 326 | clrbits8(&wkup_gpio->wkup_dvo, reset); |
327 | 327 | ||
328 | /* wait at lease 1 us */ | 328 | /* wait for 1 us */ |
329 | udelay(2); | 329 | udelay(1); |
330 | 330 | ||
331 | /* Deassert reset */ | 331 | /* Deassert reset */ |
332 | setbits8(&wkup_gpio->wkup_dvo, reset); | 332 | setbits8(&wkup_gpio->wkup_dvo, reset); |
333 | 333 | ||
334 | /* wait at least 200ns */ | ||
335 | /* 7 ~= (200ns * timebase) / ns2sec */ | ||
336 | __delay(7); | ||
337 | |||
334 | /* Restore pin-muxing */ | 338 | /* Restore pin-muxing */ |
335 | out_be32(&simple_gpio->port_config, mux); | 339 | out_be32(&simple_gpio->port_config, mux); |
336 | 340 | ||
diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c index f9751c8905be..83068322abd1 100644 --- a/arch/powerpc/platforms/83xx/mpc837x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c | |||
@@ -48,8 +48,10 @@ static int mpc837xmds_usb_cfg(void) | |||
48 | return -1; | 48 | return -1; |
49 | 49 | ||
50 | np = of_find_node_by_name(NULL, "usb"); | 50 | np = of_find_node_by_name(NULL, "usb"); |
51 | if (!np) | 51 | if (!np) { |
52 | return -ENODEV; | 52 | ret = -ENODEV; |
53 | goto out; | ||
54 | } | ||
53 | phy_type = of_get_property(np, "phy_type", NULL); | 55 | phy_type = of_get_property(np, "phy_type", NULL); |
54 | if (phy_type && !strcmp(phy_type, "ulpi")) { | 56 | if (phy_type && !strcmp(phy_type, "ulpi")) { |
55 | clrbits8(bcsr_regs + 12, BCSR12_USB_SER_PIN); | 57 | clrbits8(bcsr_regs + 12, BCSR12_USB_SER_PIN); |
@@ -65,8 +67,9 @@ static int mpc837xmds_usb_cfg(void) | |||
65 | } | 67 | } |
66 | 68 | ||
67 | of_node_put(np); | 69 | of_node_put(np); |
70 | out: | ||
68 | iounmap(bcsr_regs); | 71 | iounmap(bcsr_regs); |
69 | return 0; | 72 | return ret; |
70 | } | 73 | } |
71 | 74 | ||
72 | /* ************************************************************************ | 75 | /* ************************************************************************ |
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c index da64be19d099..aa34cac4eb5c 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c | |||
@@ -357,6 +357,7 @@ static void __init mpc85xx_mds_setup_arch(void) | |||
357 | { | 357 | { |
358 | #ifdef CONFIG_PCI | 358 | #ifdef CONFIG_PCI |
359 | struct pci_controller *hose; | 359 | struct pci_controller *hose; |
360 | struct device_node *np; | ||
360 | #endif | 361 | #endif |
361 | dma_addr_t max = 0xffffffff; | 362 | dma_addr_t max = 0xffffffff; |
362 | 363 | ||
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c index e1467c937450..34e00902ce86 100644 --- a/arch/powerpc/platforms/85xx/p1022_ds.c +++ b/arch/powerpc/platforms/85xx/p1022_ds.c | |||
@@ -19,7 +19,7 @@ | |||
19 | 19 | ||
20 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
21 | #include <linux/of_platform.h> | 21 | #include <linux/of_platform.h> |
22 | #include <linux/lmb.h> | 22 | #include <linux/memblock.h> |
23 | 23 | ||
24 | #include <asm/mpic.h> | 24 | #include <asm/mpic.h> |
25 | #include <asm/swiotlb.h> | 25 | #include <asm/swiotlb.h> |
@@ -97,7 +97,7 @@ static void __init p1022_ds_setup_arch(void) | |||
97 | #endif | 97 | #endif |
98 | 98 | ||
99 | #ifdef CONFIG_SWIOTLB | 99 | #ifdef CONFIG_SWIOTLB |
100 | if (lmb_end_of_DRAM() > max) { | 100 | if (memblock_end_of_DRAM() > max) { |
101 | ppc_swiotlb_enable = 1; | 101 | ppc_swiotlb_enable = 1; |
102 | set_pci_dma_ops(&swiotlb_dma_ops); | 102 | set_pci_dma_ops(&swiotlb_dma_ops); |
103 | ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; | 103 | ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; |
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 227c1c3d585e..72d8054fa739 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c | |||
@@ -129,20 +129,35 @@ struct device_node *dlpar_configure_connector(u32 drc_index) | |||
129 | struct property *property; | 129 | struct property *property; |
130 | struct property *last_property = NULL; | 130 | struct property *last_property = NULL; |
131 | struct cc_workarea *ccwa; | 131 | struct cc_workarea *ccwa; |
132 | char *data_buf; | ||
132 | int cc_token; | 133 | int cc_token; |
133 | int rc; | 134 | int rc = -1; |
134 | 135 | ||
135 | cc_token = rtas_token("ibm,configure-connector"); | 136 | cc_token = rtas_token("ibm,configure-connector"); |
136 | if (cc_token == RTAS_UNKNOWN_SERVICE) | 137 | if (cc_token == RTAS_UNKNOWN_SERVICE) |
137 | return NULL; | 138 | return NULL; |
138 | 139 | ||
139 | spin_lock(&rtas_data_buf_lock); | 140 | data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); |
140 | ccwa = (struct cc_workarea *)&rtas_data_buf[0]; | 141 | if (!data_buf) |
142 | return NULL; | ||
143 | |||
144 | ccwa = (struct cc_workarea *)&data_buf[0]; | ||
141 | ccwa->drc_index = drc_index; | 145 | ccwa->drc_index = drc_index; |
142 | ccwa->zero = 0; | 146 | ccwa->zero = 0; |
143 | 147 | ||
144 | rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); | 148 | do { |
145 | while (rc) { | 149 | /* Since we release the rtas_data_buf lock between configure |
150 | * connector calls we want to re-populate the rtas_data_buffer | ||
151 | * with the contents of the previous call. | ||
152 | */ | ||
153 | spin_lock(&rtas_data_buf_lock); | ||
154 | |||
155 | memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE); | ||
156 | rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); | ||
157 | memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE); | ||
158 | |||
159 | spin_unlock(&rtas_data_buf_lock); | ||
160 | |||
146 | switch (rc) { | 161 | switch (rc) { |
147 | case NEXT_SIBLING: | 162 | case NEXT_SIBLING: |
148 | dn = dlpar_parse_cc_node(ccwa); | 163 | dn = dlpar_parse_cc_node(ccwa); |
@@ -197,18 +212,19 @@ struct device_node *dlpar_configure_connector(u32 drc_index) | |||
197 | "returned from configure-connector\n", rc); | 212 | "returned from configure-connector\n", rc); |
198 | goto cc_error; | 213 | goto cc_error; |
199 | } | 214 | } |
215 | } while (rc); | ||
200 | 216 | ||
201 | rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); | 217 | cc_error: |
218 | kfree(data_buf); | ||
219 | |||
220 | if (rc) { | ||
221 | if (first_dn) | ||
222 | dlpar_free_cc_nodes(first_dn); | ||
223 | |||
224 | return NULL; | ||
202 | } | 225 | } |
203 | 226 | ||
204 | spin_unlock(&rtas_data_buf_lock); | ||
205 | return first_dn; | 227 | return first_dn; |
206 | |||
207 | cc_error: | ||
208 | if (first_dn) | ||
209 | dlpar_free_cc_nodes(first_dn); | ||
210 | spin_unlock(&rtas_data_buf_lock); | ||
211 | return NULL; | ||
212 | } | 228 | } |
213 | 229 | ||
214 | static struct device_node *derive_parent(const char *path) | 230 | static struct device_node *derive_parent(const char *path) |
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index 209384b6e039..4ae933225251 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c | |||
@@ -399,6 +399,8 @@ DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013E, quirk_fsl_pcie_header); | |||
399 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header); | 399 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header); |
400 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header); | 400 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header); |
401 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header); | 401 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header); |
402 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021E, quirk_fsl_pcie_header); | ||
403 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021, quirk_fsl_pcie_header); | ||
402 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header); | 404 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header); |
403 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header); | 405 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header); |
404 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header); | 406 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header); |
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 6425abe5b7db..3017532319c8 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c | |||
@@ -240,12 +240,13 @@ struct rio_priv { | |||
240 | 240 | ||
241 | static void __iomem *rio_regs_win; | 241 | static void __iomem *rio_regs_win; |
242 | 242 | ||
243 | #ifdef CONFIG_E500 | ||
243 | static int (*saved_mcheck_exception)(struct pt_regs *regs); | 244 | static int (*saved_mcheck_exception)(struct pt_regs *regs); |
244 | 245 | ||
245 | static int fsl_rio_mcheck_exception(struct pt_regs *regs) | 246 | static int fsl_rio_mcheck_exception(struct pt_regs *regs) |
246 | { | 247 | { |
247 | const struct exception_table_entry *entry = NULL; | 248 | const struct exception_table_entry *entry = NULL; |
248 | unsigned long reason = (mfspr(SPRN_MCSR) & MCSR_MASK); | 249 | unsigned long reason = mfspr(SPRN_MCSR); |
249 | 250 | ||
250 | if (reason & MCSR_BUS_RBERR) { | 251 | if (reason & MCSR_BUS_RBERR) { |
251 | reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR)); | 252 | reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR)); |
@@ -269,6 +270,7 @@ static int fsl_rio_mcheck_exception(struct pt_regs *regs) | |||
269 | else | 270 | else |
270 | return cur_cpu_spec->machine_check(regs); | 271 | return cur_cpu_spec->machine_check(regs); |
271 | } | 272 | } |
273 | #endif | ||
272 | 274 | ||
273 | /** | 275 | /** |
274 | * fsl_rio_doorbell_send - Send a MPC85xx doorbell message | 276 | * fsl_rio_doorbell_send - Send a MPC85xx doorbell message |
@@ -1517,8 +1519,10 @@ int fsl_rio_setup(struct platform_device *dev) | |||
1517 | fsl_rio_doorbell_init(port); | 1519 | fsl_rio_doorbell_init(port); |
1518 | fsl_rio_port_write_init(port); | 1520 | fsl_rio_port_write_init(port); |
1519 | 1521 | ||
1522 | #ifdef CONFIG_E500 | ||
1520 | saved_mcheck_exception = ppc_md.machine_check_exception; | 1523 | saved_mcheck_exception = ppc_md.machine_check_exception; |
1521 | ppc_md.machine_check_exception = fsl_rio_mcheck_exception; | 1524 | ppc_md.machine_check_exception = fsl_rio_mcheck_exception; |
1525 | #endif | ||
1522 | /* Ensure that RFXE is set */ | 1526 | /* Ensure that RFXE is set */ |
1523 | mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000)); | 1527 | mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000)); |
1524 | 1528 | ||
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c index 3da8014931c9..90020de4dcf2 100644 --- a/arch/powerpc/sysdev/qe_lib/qe.c +++ b/arch/powerpc/sysdev/qe_lib/qe.c | |||
@@ -640,6 +640,7 @@ unsigned int qe_get_num_of_snums(void) | |||
640 | if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) { | 640 | if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) { |
641 | /* No QE ever has fewer than 28 SNUMs */ | 641 | /* No QE ever has fewer than 28 SNUMs */ |
642 | pr_err("QE: number of snum is invalid\n"); | 642 | pr_err("QE: number of snum is invalid\n"); |
643 | of_node_put(qe); | ||
643 | return -EINVAL; | 644 | return -EINVAL; |
644 | } | 645 | } |
645 | } | 646 | } |
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 104f2007f097..a875c2f542e1 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h | |||
@@ -181,7 +181,7 @@ static inline int is_compat_task(void) | |||
181 | 181 | ||
182 | #endif | 182 | #endif |
183 | 183 | ||
184 | static inline void __user *compat_alloc_user_space(long len) | 184 | static inline void __user *arch_compat_alloc_user_space(long len) |
185 | { | 185 | { |
186 | unsigned long stack; | 186 | unsigned long stack; |
187 | 187 | ||
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 22cfd634c355..f7167ee4604c 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c | |||
@@ -407,10 +407,9 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
407 | { | 407 | { |
408 | vfree(me->arch.syminfo); | 408 | vfree(me->arch.syminfo); |
409 | me->arch.syminfo = NULL; | 409 | me->arch.syminfo = NULL; |
410 | return module_bug_finalize(hdr, sechdrs, me); | 410 | return 0; |
411 | } | 411 | } |
412 | 412 | ||
413 | void module_arch_cleanup(struct module *mod) | 413 | void module_arch_cleanup(struct module *mod) |
414 | { | 414 | { |
415 | module_bug_cleanup(mod); | ||
416 | } | 415 | } |
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c index 43adddfe4c04..ae0be697a89e 100644 --- a/arch/sh/kernel/module.c +++ b/arch/sh/kernel/module.c | |||
@@ -149,13 +149,11 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
149 | int ret = 0; | 149 | int ret = 0; |
150 | 150 | ||
151 | ret |= module_dwarf_finalize(hdr, sechdrs, me); | 151 | ret |= module_dwarf_finalize(hdr, sechdrs, me); |
152 | ret |= module_bug_finalize(hdr, sechdrs, me); | ||
153 | 152 | ||
154 | return ret; | 153 | return ret; |
155 | } | 154 | } |
156 | 155 | ||
157 | void module_arch_cleanup(struct module *mod) | 156 | void module_arch_cleanup(struct module *mod) |
158 | { | 157 | { |
159 | module_bug_cleanup(mod); | ||
160 | module_dwarf_cleanup(mod); | 158 | module_dwarf_cleanup(mod); |
161 | } | 159 | } |
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h index 5016f76ea98a..6f57325bb883 100644 --- a/arch/sparc/include/asm/compat.h +++ b/arch/sparc/include/asm/compat.h | |||
@@ -167,7 +167,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) | |||
167 | return (u32)(unsigned long)uptr; | 167 | return (u32)(unsigned long)uptr; |
168 | } | 168 | } |
169 | 169 | ||
170 | static inline void __user *compat_alloc_user_space(long len) | 170 | static inline void __user *arch_compat_alloc_user_space(long len) |
171 | { | 171 | { |
172 | struct pt_regs *regs = current_thread_info()->kregs; | 172 | struct pt_regs *regs = current_thread_info()->kregs; |
173 | unsigned long usp = regs->u_regs[UREG_I6]; | 173 | unsigned long usp = regs->u_regs[UREG_I6]; |
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 357ced3c33ff..6318e622cfb0 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -1038,6 +1038,7 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
1038 | if (atomic_read(&nmi_active) < 0) | 1038 | if (atomic_read(&nmi_active) < 0) |
1039 | return -ENODEV; | 1039 | return -ENODEV; |
1040 | 1040 | ||
1041 | pmap = NULL; | ||
1041 | if (attr->type == PERF_TYPE_HARDWARE) { | 1042 | if (attr->type == PERF_TYPE_HARDWARE) { |
1042 | if (attr->config >= sparc_pmu->max_events) | 1043 | if (attr->config >= sparc_pmu->max_events) |
1043 | return -EINVAL; | 1044 | return -EINVAL; |
@@ -1046,9 +1047,18 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
1046 | pmap = sparc_map_cache_event(attr->config); | 1047 | pmap = sparc_map_cache_event(attr->config); |
1047 | if (IS_ERR(pmap)) | 1048 | if (IS_ERR(pmap)) |
1048 | return PTR_ERR(pmap); | 1049 | return PTR_ERR(pmap); |
1049 | } else | 1050 | } else if (attr->type != PERF_TYPE_RAW) |
1050 | return -EOPNOTSUPP; | 1051 | return -EOPNOTSUPP; |
1051 | 1052 | ||
1053 | if (pmap) { | ||
1054 | hwc->event_base = perf_event_encode(pmap); | ||
1055 | } else { | ||
1056 | /* User gives us "(encoding << 16) | pic_mask" for | ||
1057 | * PERF_TYPE_RAW events. | ||
1058 | */ | ||
1059 | hwc->event_base = attr->config; | ||
1060 | } | ||
1061 | |||
1052 | /* We save the enable bits in the config_base. */ | 1062 | /* We save the enable bits in the config_base. */ |
1053 | hwc->config_base = sparc_pmu->irq_bit; | 1063 | hwc->config_base = sparc_pmu->irq_bit; |
1054 | if (!attr->exclude_user) | 1064 | if (!attr->exclude_user) |
@@ -1058,8 +1068,6 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
1058 | if (!attr->exclude_hv) | 1068 | if (!attr->exclude_hv) |
1059 | hwc->config_base |= sparc_pmu->hv_bit; | 1069 | hwc->config_base |= sparc_pmu->hv_bit; |
1060 | 1070 | ||
1061 | hwc->event_base = perf_event_encode(pmap); | ||
1062 | |||
1063 | n = 0; | 1071 | n = 0; |
1064 | if (event->group_leader != event) { | 1072 | if (event->group_leader != event) { |
1065 | n = collect_events(event->group_leader, | 1073 | n = collect_events(event->group_leader, |
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index ea22cd373c64..75fad425e249 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c | |||
@@ -453,8 +453,66 @@ static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | |||
453 | return err; | 453 | return err; |
454 | } | 454 | } |
455 | 455 | ||
456 | static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, | 456 | /* The I-cache flush instruction only works in the primary ASI, which |
457 | int signo, sigset_t *oldset) | 457 | * right now is the nucleus, aka. kernel space. |
458 | * | ||
459 | * Therefore we have to kick the instructions out using the kernel | ||
460 | * side linear mapping of the physical address backing the user | ||
461 | * instructions. | ||
462 | */ | ||
463 | static void flush_signal_insns(unsigned long address) | ||
464 | { | ||
465 | unsigned long pstate, paddr; | ||
466 | pte_t *ptep, pte; | ||
467 | pgd_t *pgdp; | ||
468 | pud_t *pudp; | ||
469 | pmd_t *pmdp; | ||
470 | |||
471 | /* Commit all stores of the instructions we are about to flush. */ | ||
472 | wmb(); | ||
473 | |||
474 | /* Disable cross-call reception. In this way even a very wide | ||
475 | * munmap() on another cpu can't tear down the page table | ||
476 | * hierarchy from underneath us, since that can't complete | ||
477 | * until the IPI tlb flush returns. | ||
478 | */ | ||
479 | |||
480 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | ||
481 | __asm__ __volatile__("wrpr %0, %1, %%pstate" | ||
482 | : : "r" (pstate), "i" (PSTATE_IE)); | ||
483 | |||
484 | pgdp = pgd_offset(current->mm, address); | ||
485 | if (pgd_none(*pgdp)) | ||
486 | goto out_irqs_on; | ||
487 | pudp = pud_offset(pgdp, address); | ||
488 | if (pud_none(*pudp)) | ||
489 | goto out_irqs_on; | ||
490 | pmdp = pmd_offset(pudp, address); | ||
491 | if (pmd_none(*pmdp)) | ||
492 | goto out_irqs_on; | ||
493 | |||
494 | ptep = pte_offset_map(pmdp, address); | ||
495 | pte = *ptep; | ||
496 | if (!pte_present(pte)) | ||
497 | goto out_unmap; | ||
498 | |||
499 | paddr = (unsigned long) page_address(pte_page(pte)); | ||
500 | |||
501 | __asm__ __volatile__("flush %0 + %1" | ||
502 | : /* no outputs */ | ||
503 | : "r" (paddr), | ||
504 | "r" (address & (PAGE_SIZE - 1)) | ||
505 | : "memory"); | ||
506 | |||
507 | out_unmap: | ||
508 | pte_unmap(ptep); | ||
509 | out_irqs_on: | ||
510 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); | ||
511 | |||
512 | } | ||
513 | |||
514 | static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, | ||
515 | int signo, sigset_t *oldset) | ||
458 | { | 516 | { |
459 | struct signal_frame32 __user *sf; | 517 | struct signal_frame32 __user *sf; |
460 | int sigframe_size; | 518 | int sigframe_size; |
@@ -547,13 +605,7 @@ static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, | |||
547 | if (ka->ka_restorer) { | 605 | if (ka->ka_restorer) { |
548 | regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; | 606 | regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; |
549 | } else { | 607 | } else { |
550 | /* Flush instruction space. */ | ||
551 | unsigned long address = ((unsigned long)&(sf->insns[0])); | 608 | unsigned long address = ((unsigned long)&(sf->insns[0])); |
552 | pgd_t *pgdp = pgd_offset(current->mm, address); | ||
553 | pud_t *pudp = pud_offset(pgdp, address); | ||
554 | pmd_t *pmdp = pmd_offset(pudp, address); | ||
555 | pte_t *ptep; | ||
556 | pte_t pte; | ||
557 | 609 | ||
558 | regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); | 610 | regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); |
559 | 611 | ||
@@ -562,34 +614,22 @@ static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, | |||
562 | if (err) | 614 | if (err) |
563 | goto sigsegv; | 615 | goto sigsegv; |
564 | 616 | ||
565 | preempt_disable(); | 617 | flush_signal_insns(address); |
566 | ptep = pte_offset_map(pmdp, address); | ||
567 | pte = *ptep; | ||
568 | if (pte_present(pte)) { | ||
569 | unsigned long page = (unsigned long) | ||
570 | page_address(pte_page(pte)); | ||
571 | |||
572 | wmb(); | ||
573 | __asm__ __volatile__("flush %0 + %1" | ||
574 | : /* no outputs */ | ||
575 | : "r" (page), | ||
576 | "r" (address & (PAGE_SIZE - 1)) | ||
577 | : "memory"); | ||
578 | } | ||
579 | pte_unmap(ptep); | ||
580 | preempt_enable(); | ||
581 | } | 618 | } |
582 | return; | 619 | return 0; |
583 | 620 | ||
584 | sigill: | 621 | sigill: |
585 | do_exit(SIGILL); | 622 | do_exit(SIGILL); |
623 | return -EINVAL; | ||
624 | |||
586 | sigsegv: | 625 | sigsegv: |
587 | force_sigsegv(signo, current); | 626 | force_sigsegv(signo, current); |
627 | return -EFAULT; | ||
588 | } | 628 | } |
589 | 629 | ||
590 | static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, | 630 | static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, |
591 | unsigned long signr, sigset_t *oldset, | 631 | unsigned long signr, sigset_t *oldset, |
592 | siginfo_t *info) | 632 | siginfo_t *info) |
593 | { | 633 | { |
594 | struct rt_signal_frame32 __user *sf; | 634 | struct rt_signal_frame32 __user *sf; |
595 | int sigframe_size; | 635 | int sigframe_size; |
@@ -687,12 +727,7 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, | |||
687 | if (ka->ka_restorer) | 727 | if (ka->ka_restorer) |
688 | regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; | 728 | regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; |
689 | else { | 729 | else { |
690 | /* Flush instruction space. */ | ||
691 | unsigned long address = ((unsigned long)&(sf->insns[0])); | 730 | unsigned long address = ((unsigned long)&(sf->insns[0])); |
692 | pgd_t *pgdp = pgd_offset(current->mm, address); | ||
693 | pud_t *pudp = pud_offset(pgdp, address); | ||
694 | pmd_t *pmdp = pmd_offset(pudp, address); | ||
695 | pte_t *ptep; | ||
696 | 731 | ||
697 | regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); | 732 | regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); |
698 | 733 | ||
@@ -704,38 +739,32 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, | |||
704 | if (err) | 739 | if (err) |
705 | goto sigsegv; | 740 | goto sigsegv; |
706 | 741 | ||
707 | preempt_disable(); | 742 | flush_signal_insns(address); |
708 | ptep = pte_offset_map(pmdp, address); | ||
709 | if (pte_present(*ptep)) { | ||
710 | unsigned long page = (unsigned long) | ||
711 | page_address(pte_page(*ptep)); | ||
712 | |||
713 | wmb(); | ||
714 | __asm__ __volatile__("flush %0 + %1" | ||
715 | : /* no outputs */ | ||
716 | : "r" (page), | ||
717 | "r" (address & (PAGE_SIZE - 1)) | ||
718 | : "memory"); | ||
719 | } | ||
720 | pte_unmap(ptep); | ||
721 | preempt_enable(); | ||
722 | } | 743 | } |
723 | return; | 744 | return 0; |
724 | 745 | ||
725 | sigill: | 746 | sigill: |
726 | do_exit(SIGILL); | 747 | do_exit(SIGILL); |
748 | return -EINVAL; | ||
749 | |||
727 | sigsegv: | 750 | sigsegv: |
728 | force_sigsegv(signr, current); | 751 | force_sigsegv(signr, current); |
752 | return -EFAULT; | ||
729 | } | 753 | } |
730 | 754 | ||
731 | static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka, | 755 | static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka, |
732 | siginfo_t *info, | 756 | siginfo_t *info, |
733 | sigset_t *oldset, struct pt_regs *regs) | 757 | sigset_t *oldset, struct pt_regs *regs) |
734 | { | 758 | { |
759 | int err; | ||
760 | |||
735 | if (ka->sa.sa_flags & SA_SIGINFO) | 761 | if (ka->sa.sa_flags & SA_SIGINFO) |
736 | setup_rt_frame32(ka, regs, signr, oldset, info); | 762 | err = setup_rt_frame32(ka, regs, signr, oldset, info); |
737 | else | 763 | else |
738 | setup_frame32(ka, regs, signr, oldset); | 764 | err = setup_frame32(ka, regs, signr, oldset); |
765 | |||
766 | if (err) | ||
767 | return err; | ||
739 | 768 | ||
740 | spin_lock_irq(¤t->sighand->siglock); | 769 | spin_lock_irq(¤t->sighand->siglock); |
741 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | 770 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); |
@@ -743,6 +772,10 @@ static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka, | |||
743 | sigaddset(¤t->blocked,signr); | 772 | sigaddset(¤t->blocked,signr); |
744 | recalc_sigpending(); | 773 | recalc_sigpending(); |
745 | spin_unlock_irq(¤t->sighand->siglock); | 774 | spin_unlock_irq(¤t->sighand->siglock); |
775 | |||
776 | tracehook_signal_handler(signr, info, ka, regs, 0); | ||
777 | |||
778 | return 0; | ||
746 | } | 779 | } |
747 | 780 | ||
748 | static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs, | 781 | static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs, |
@@ -789,16 +822,14 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs, | |||
789 | if (signr > 0) { | 822 | if (signr > 0) { |
790 | if (restart_syscall) | 823 | if (restart_syscall) |
791 | syscall_restart32(orig_i0, regs, &ka.sa); | 824 | syscall_restart32(orig_i0, regs, &ka.sa); |
792 | handle_signal32(signr, &ka, &info, oldset, regs); | 825 | if (handle_signal32(signr, &ka, &info, oldset, regs) == 0) { |
793 | 826 | /* A signal was successfully delivered; the saved | |
794 | /* A signal was successfully delivered; the saved | 827 | * sigmask will have been stored in the signal frame, |
795 | * sigmask will have been stored in the signal frame, | 828 | * and will be restored by sigreturn, so we can simply |
796 | * and will be restored by sigreturn, so we can simply | 829 | * clear the TS_RESTORE_SIGMASK flag. |
797 | * clear the TS_RESTORE_SIGMASK flag. | 830 | */ |
798 | */ | 831 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; |
799 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | 832 | } |
800 | |||
801 | tracehook_signal_handler(signr, &info, &ka, regs, 0); | ||
802 | return; | 833 | return; |
803 | } | 834 | } |
804 | if (restart_syscall && | 835 | if (restart_syscall && |
@@ -809,12 +840,14 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs, | |||
809 | regs->u_regs[UREG_I0] = orig_i0; | 840 | regs->u_regs[UREG_I0] = orig_i0; |
810 | regs->tpc -= 4; | 841 | regs->tpc -= 4; |
811 | regs->tnpc -= 4; | 842 | regs->tnpc -= 4; |
843 | pt_regs_clear_syscall(regs); | ||
812 | } | 844 | } |
813 | if (restart_syscall && | 845 | if (restart_syscall && |
814 | regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { | 846 | regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { |
815 | regs->u_regs[UREG_G1] = __NR_restart_syscall; | 847 | regs->u_regs[UREG_G1] = __NR_restart_syscall; |
816 | regs->tpc -= 4; | 848 | regs->tpc -= 4; |
817 | regs->tnpc -= 4; | 849 | regs->tnpc -= 4; |
850 | pt_regs_clear_syscall(regs); | ||
818 | } | 851 | } |
819 | 852 | ||
820 | /* If there's no signal to deliver, we just put the saved sigmask | 853 | /* If there's no signal to deliver, we just put the saved sigmask |
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 9882df92ba0a..5e5c5fd03783 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c | |||
@@ -315,8 +315,8 @@ save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | |||
315 | return err; | 315 | return err; |
316 | } | 316 | } |
317 | 317 | ||
318 | static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs, | 318 | static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs, |
319 | int signo, sigset_t *oldset) | 319 | int signo, sigset_t *oldset) |
320 | { | 320 | { |
321 | struct signal_frame __user *sf; | 321 | struct signal_frame __user *sf; |
322 | int sigframe_size, err; | 322 | int sigframe_size, err; |
@@ -384,16 +384,19 @@ static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs, | |||
384 | /* Flush instruction space. */ | 384 | /* Flush instruction space. */ |
385 | flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); | 385 | flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); |
386 | } | 386 | } |
387 | return; | 387 | return 0; |
388 | 388 | ||
389 | sigill_and_return: | 389 | sigill_and_return: |
390 | do_exit(SIGILL); | 390 | do_exit(SIGILL); |
391 | return -EINVAL; | ||
392 | |||
391 | sigsegv: | 393 | sigsegv: |
392 | force_sigsegv(signo, current); | 394 | force_sigsegv(signo, current); |
395 | return -EFAULT; | ||
393 | } | 396 | } |
394 | 397 | ||
395 | static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, | 398 | static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, |
396 | int signo, sigset_t *oldset, siginfo_t *info) | 399 | int signo, sigset_t *oldset, siginfo_t *info) |
397 | { | 400 | { |
398 | struct rt_signal_frame __user *sf; | 401 | struct rt_signal_frame __user *sf; |
399 | int sigframe_size; | 402 | int sigframe_size; |
@@ -466,22 +469,30 @@ static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, | |||
466 | /* Flush instruction space. */ | 469 | /* Flush instruction space. */ |
467 | flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); | 470 | flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); |
468 | } | 471 | } |
469 | return; | 472 | return 0; |
470 | 473 | ||
471 | sigill: | 474 | sigill: |
472 | do_exit(SIGILL); | 475 | do_exit(SIGILL); |
476 | return -EINVAL; | ||
477 | |||
473 | sigsegv: | 478 | sigsegv: |
474 | force_sigsegv(signo, current); | 479 | force_sigsegv(signo, current); |
480 | return -EFAULT; | ||
475 | } | 481 | } |
476 | 482 | ||
477 | static inline void | 483 | static inline int |
478 | handle_signal(unsigned long signr, struct k_sigaction *ka, | 484 | handle_signal(unsigned long signr, struct k_sigaction *ka, |
479 | siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) | 485 | siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) |
480 | { | 486 | { |
487 | int err; | ||
488 | |||
481 | if (ka->sa.sa_flags & SA_SIGINFO) | 489 | if (ka->sa.sa_flags & SA_SIGINFO) |
482 | setup_rt_frame(ka, regs, signr, oldset, info); | 490 | err = setup_rt_frame(ka, regs, signr, oldset, info); |
483 | else | 491 | else |
484 | setup_frame(ka, regs, signr, oldset); | 492 | err = setup_frame(ka, regs, signr, oldset); |
493 | |||
494 | if (err) | ||
495 | return err; | ||
485 | 496 | ||
486 | spin_lock_irq(¤t->sighand->siglock); | 497 | spin_lock_irq(¤t->sighand->siglock); |
487 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | 498 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); |
@@ -489,6 +500,10 @@ handle_signal(unsigned long signr, struct k_sigaction *ka, | |||
489 | sigaddset(¤t->blocked, signr); | 500 | sigaddset(¤t->blocked, signr); |
490 | recalc_sigpending(); | 501 | recalc_sigpending(); |
491 | spin_unlock_irq(¤t->sighand->siglock); | 502 | spin_unlock_irq(¤t->sighand->siglock); |
503 | |||
504 | tracehook_signal_handler(signr, info, ka, regs, 0); | ||
505 | |||
506 | return 0; | ||
492 | } | 507 | } |
493 | 508 | ||
494 | static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, | 509 | static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, |
@@ -546,17 +561,15 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) | |||
546 | if (signr > 0) { | 561 | if (signr > 0) { |
547 | if (restart_syscall) | 562 | if (restart_syscall) |
548 | syscall_restart(orig_i0, regs, &ka.sa); | 563 | syscall_restart(orig_i0, regs, &ka.sa); |
549 | handle_signal(signr, &ka, &info, oldset, regs); | 564 | if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { |
550 | 565 | /* a signal was successfully delivered; the saved | |
551 | /* a signal was successfully delivered; the saved | 566 | * sigmask will have been stored in the signal frame, |
552 | * sigmask will have been stored in the signal frame, | 567 | * and will be restored by sigreturn, so we can simply |
553 | * and will be restored by sigreturn, so we can simply | 568 | * clear the TIF_RESTORE_SIGMASK flag. |
554 | * clear the TIF_RESTORE_SIGMASK flag. | 569 | */ |
555 | */ | 570 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) |
556 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | 571 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
557 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 572 | } |
558 | |||
559 | tracehook_signal_handler(signr, &info, &ka, regs, 0); | ||
560 | return; | 573 | return; |
561 | } | 574 | } |
562 | if (restart_syscall && | 575 | if (restart_syscall && |
@@ -567,12 +580,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) | |||
567 | regs->u_regs[UREG_I0] = orig_i0; | 580 | regs->u_regs[UREG_I0] = orig_i0; |
568 | regs->pc -= 4; | 581 | regs->pc -= 4; |
569 | regs->npc -= 4; | 582 | regs->npc -= 4; |
583 | pt_regs_clear_syscall(regs); | ||
570 | } | 584 | } |
571 | if (restart_syscall && | 585 | if (restart_syscall && |
572 | regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { | 586 | regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { |
573 | regs->u_regs[UREG_G1] = __NR_restart_syscall; | 587 | regs->u_regs[UREG_G1] = __NR_restart_syscall; |
574 | regs->pc -= 4; | 588 | regs->pc -= 4; |
575 | regs->npc -= 4; | 589 | regs->npc -= 4; |
590 | pt_regs_clear_syscall(regs); | ||
576 | } | 591 | } |
577 | 592 | ||
578 | /* if there's no signal to deliver, we just put the saved sigmask | 593 | /* if there's no signal to deliver, we just put the saved sigmask |
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 9fa48c30037e..006fe4515886 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c | |||
@@ -409,7 +409,7 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs * | |||
409 | return (void __user *) sp; | 409 | return (void __user *) sp; |
410 | } | 410 | } |
411 | 411 | ||
412 | static inline void | 412 | static inline int |
413 | setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, | 413 | setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, |
414 | int signo, sigset_t *oldset, siginfo_t *info) | 414 | int signo, sigset_t *oldset, siginfo_t *info) |
415 | { | 415 | { |
@@ -483,26 +483,37 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, | |||
483 | } | 483 | } |
484 | /* 4. return to kernel instructions */ | 484 | /* 4. return to kernel instructions */ |
485 | regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; | 485 | regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; |
486 | return; | 486 | return 0; |
487 | 487 | ||
488 | sigill: | 488 | sigill: |
489 | do_exit(SIGILL); | 489 | do_exit(SIGILL); |
490 | return -EINVAL; | ||
491 | |||
490 | sigsegv: | 492 | sigsegv: |
491 | force_sigsegv(signo, current); | 493 | force_sigsegv(signo, current); |
494 | return -EFAULT; | ||
492 | } | 495 | } |
493 | 496 | ||
494 | static inline void handle_signal(unsigned long signr, struct k_sigaction *ka, | 497 | static inline int handle_signal(unsigned long signr, struct k_sigaction *ka, |
495 | siginfo_t *info, | 498 | siginfo_t *info, |
496 | sigset_t *oldset, struct pt_regs *regs) | 499 | sigset_t *oldset, struct pt_regs *regs) |
497 | { | 500 | { |
498 | setup_rt_frame(ka, regs, signr, oldset, | 501 | int err; |
499 | (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); | 502 | |
503 | err = setup_rt_frame(ka, regs, signr, oldset, | ||
504 | (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); | ||
505 | if (err) | ||
506 | return err; | ||
500 | spin_lock_irq(¤t->sighand->siglock); | 507 | spin_lock_irq(¤t->sighand->siglock); |
501 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | 508 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); |
502 | if (!(ka->sa.sa_flags & SA_NOMASK)) | 509 | if (!(ka->sa.sa_flags & SA_NOMASK)) |
503 | sigaddset(¤t->blocked,signr); | 510 | sigaddset(¤t->blocked,signr); |
504 | recalc_sigpending(); | 511 | recalc_sigpending(); |
505 | spin_unlock_irq(¤t->sighand->siglock); | 512 | spin_unlock_irq(¤t->sighand->siglock); |
513 | |||
514 | tracehook_signal_handler(signr, info, ka, regs, 0); | ||
515 | |||
516 | return 0; | ||
506 | } | 517 | } |
507 | 518 | ||
508 | static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, | 519 | static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, |
@@ -571,16 +582,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) | |||
571 | if (signr > 0) { | 582 | if (signr > 0) { |
572 | if (restart_syscall) | 583 | if (restart_syscall) |
573 | syscall_restart(orig_i0, regs, &ka.sa); | 584 | syscall_restart(orig_i0, regs, &ka.sa); |
574 | handle_signal(signr, &ka, &info, oldset, regs); | 585 | if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { |
575 | 586 | /* A signal was successfully delivered; the saved | |
576 | /* A signal was successfully delivered; the saved | 587 | * sigmask will have been stored in the signal frame, |
577 | * sigmask will have been stored in the signal frame, | 588 | * and will be restored by sigreturn, so we can simply |
578 | * and will be restored by sigreturn, so we can simply | 589 | * clear the TS_RESTORE_SIGMASK flag. |
579 | * clear the TS_RESTORE_SIGMASK flag. | 590 | */ |
580 | */ | 591 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; |
581 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | 592 | } |
582 | |||
583 | tracehook_signal_handler(signr, &info, &ka, regs, 0); | ||
584 | return; | 593 | return; |
585 | } | 594 | } |
586 | if (restart_syscall && | 595 | if (restart_syscall && |
@@ -591,12 +600,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) | |||
591 | regs->u_regs[UREG_I0] = orig_i0; | 600 | regs->u_regs[UREG_I0] = orig_i0; |
592 | regs->tpc -= 4; | 601 | regs->tpc -= 4; |
593 | regs->tnpc -= 4; | 602 | regs->tnpc -= 4; |
603 | pt_regs_clear_syscall(regs); | ||
594 | } | 604 | } |
595 | if (restart_syscall && | 605 | if (restart_syscall && |
596 | regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { | 606 | regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { |
597 | regs->u_regs[UREG_G1] = __NR_restart_syscall; | 607 | regs->u_regs[UREG_G1] = __NR_restart_syscall; |
598 | regs->tpc -= 4; | 608 | regs->tpc -= 4; |
599 | regs->tnpc -= 4; | 609 | regs->tnpc -= 4; |
610 | pt_regs_clear_syscall(regs); | ||
600 | } | 611 | } |
601 | 612 | ||
602 | /* If there's no signal to deliver, we just put the saved sigmask | 613 | /* If there's no signal to deliver, we just put the saved sigmask |
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index 50794137d710..675c9e11ada5 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c | |||
@@ -166,7 +166,6 @@ sparc_breakpoint (struct pt_regs *regs) | |||
166 | { | 166 | { |
167 | siginfo_t info; | 167 | siginfo_t info; |
168 | 168 | ||
169 | lock_kernel(); | ||
170 | #ifdef DEBUG_SPARC_BREAKPOINT | 169 | #ifdef DEBUG_SPARC_BREAKPOINT |
171 | printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc); | 170 | printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc); |
172 | #endif | 171 | #endif |
@@ -180,7 +179,6 @@ sparc_breakpoint (struct pt_regs *regs) | |||
180 | #ifdef DEBUG_SPARC_BREAKPOINT | 179 | #ifdef DEBUG_SPARC_BREAKPOINT |
181 | printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc); | 180 | printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc); |
182 | #endif | 181 | #endif |
183 | unlock_kernel(); | ||
184 | } | 182 | } |
185 | 183 | ||
186 | asmlinkage int | 184 | asmlinkage int |
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c index f8514e291e15..12b9f352595f 100644 --- a/arch/sparc/kernel/unaligned_32.c +++ b/arch/sparc/kernel/unaligned_32.c | |||
@@ -323,7 +323,6 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) | |||
323 | { | 323 | { |
324 | enum direction dir; | 324 | enum direction dir; |
325 | 325 | ||
326 | lock_kernel(); | ||
327 | if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) || | 326 | if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) || |
328 | (((insn >> 30) & 3) != 3)) | 327 | (((insn >> 30) & 3) != 3)) |
329 | goto kill_user; | 328 | goto kill_user; |
@@ -377,5 +376,5 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) | |||
377 | kill_user: | 376 | kill_user: |
378 | user_mna_trap_fault(regs, insn); | 377 | user_mna_trap_fault(regs, insn); |
379 | out: | 378 | out: |
380 | unlock_kernel(); | 379 | ; |
381 | } | 380 | } |
diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c index f24d298bda29..b351770cbdd6 100644 --- a/arch/sparc/kernel/windows.c +++ b/arch/sparc/kernel/windows.c | |||
@@ -112,7 +112,6 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who) | |||
112 | struct thread_info *tp = current_thread_info(); | 112 | struct thread_info *tp = current_thread_info(); |
113 | int window; | 113 | int window; |
114 | 114 | ||
115 | lock_kernel(); | ||
116 | flush_user_windows(); | 115 | flush_user_windows(); |
117 | for(window = 0; window < tp->w_saved; window++) { | 116 | for(window = 0; window < tp->w_saved; window++) { |
118 | unsigned long sp = tp->rwbuf_stkptrs[window]; | 117 | unsigned long sp = tp->rwbuf_stkptrs[window]; |
@@ -123,5 +122,4 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who) | |||
123 | do_exit(SIGILL); | 122 | do_exit(SIGILL); |
124 | } | 123 | } |
125 | tp->w_saved = 0; | 124 | tp->w_saved = 0; |
126 | unlock_kernel(); | ||
127 | } | 125 | } |
diff --git a/arch/tile/include/arch/chip_tile64.h b/arch/tile/include/arch/chip_tile64.h index 1246573be59e..261aaba092d4 100644 --- a/arch/tile/include/arch/chip_tile64.h +++ b/arch/tile/include/arch/chip_tile64.h | |||
@@ -150,6 +150,9 @@ | |||
150 | /** Is the PROC_STATUS SPR supported? */ | 150 | /** Is the PROC_STATUS SPR supported? */ |
151 | #define CHIP_HAS_PROC_STATUS_SPR() 0 | 151 | #define CHIP_HAS_PROC_STATUS_SPR() 0 |
152 | 152 | ||
153 | /** Is the DSTREAM_PF SPR supported? */ | ||
154 | #define CHIP_HAS_DSTREAM_PF() 0 | ||
155 | |||
153 | /** Log of the number of mshims we have. */ | 156 | /** Log of the number of mshims we have. */ |
154 | #define CHIP_LOG_NUM_MSHIMS() 2 | 157 | #define CHIP_LOG_NUM_MSHIMS() 2 |
155 | 158 | ||
diff --git a/arch/tile/include/arch/chip_tilepro.h b/arch/tile/include/arch/chip_tilepro.h index e864c47fc89c..70017699a74c 100644 --- a/arch/tile/include/arch/chip_tilepro.h +++ b/arch/tile/include/arch/chip_tilepro.h | |||
@@ -150,6 +150,9 @@ | |||
150 | /** Is the PROC_STATUS SPR supported? */ | 150 | /** Is the PROC_STATUS SPR supported? */ |
151 | #define CHIP_HAS_PROC_STATUS_SPR() 1 | 151 | #define CHIP_HAS_PROC_STATUS_SPR() 1 |
152 | 152 | ||
153 | /** Is the DSTREAM_PF SPR supported? */ | ||
154 | #define CHIP_HAS_DSTREAM_PF() 0 | ||
155 | |||
153 | /** Log of the number of mshims we have. */ | 156 | /** Log of the number of mshims we have. */ |
154 | #define CHIP_LOG_NUM_MSHIMS() 2 | 157 | #define CHIP_LOG_NUM_MSHIMS() 2 |
155 | 158 | ||
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h index 5a34da6cdd79..8b60ec8b2d19 100644 --- a/arch/tile/include/asm/compat.h +++ b/arch/tile/include/asm/compat.h | |||
@@ -195,7 +195,7 @@ static inline unsigned long ptr_to_compat_reg(void __user *uptr) | |||
195 | return (long)(int)(long __force)uptr; | 195 | return (long)(int)(long __force)uptr; |
196 | } | 196 | } |
197 | 197 | ||
198 | static inline void __user *compat_alloc_user_space(long len) | 198 | static inline void __user *arch_compat_alloc_user_space(long len) |
199 | { | 199 | { |
200 | struct pt_regs *regs = task_pt_regs(current); | 200 | struct pt_regs *regs = task_pt_regs(current); |
201 | return (void __user *)regs->sp - len; | 201 | return (void __user *)regs->sp - len; |
@@ -214,8 +214,9 @@ extern int compat_setup_rt_frame(int sig, struct k_sigaction *ka, | |||
214 | struct compat_sigaction; | 214 | struct compat_sigaction; |
215 | struct compat_siginfo; | 215 | struct compat_siginfo; |
216 | struct compat_sigaltstack; | 216 | struct compat_sigaltstack; |
217 | long compat_sys_execve(char __user *path, compat_uptr_t __user *argv, | 217 | long compat_sys_execve(const char __user *path, |
218 | compat_uptr_t __user *envp); | 218 | const compat_uptr_t __user *argv, |
219 | const compat_uptr_t __user *envp); | ||
219 | long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, | 220 | long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, |
220 | struct compat_sigaction __user *oact, | 221 | struct compat_sigaction __user *oact, |
221 | size_t sigsetsize); | 222 | size_t sigsetsize); |
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h index 8c95bef3fa45..ee43328713ab 100644 --- a/arch/tile/include/asm/io.h +++ b/arch/tile/include/asm/io.h | |||
@@ -164,22 +164,22 @@ static inline void _tile_writeq(u64 val, unsigned long addr) | |||
164 | #define iowrite32 writel | 164 | #define iowrite32 writel |
165 | #define iowrite64 writeq | 165 | #define iowrite64 writeq |
166 | 166 | ||
167 | static inline void *memcpy_fromio(void *dst, void *src, int len) | 167 | static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, |
168 | size_t len) | ||
168 | { | 169 | { |
169 | int x; | 170 | int x; |
170 | BUG_ON((unsigned long)src & 0x3); | 171 | BUG_ON((unsigned long)src & 0x3); |
171 | for (x = 0; x < len; x += 4) | 172 | for (x = 0; x < len; x += 4) |
172 | *(u32 *)(dst + x) = readl(src + x); | 173 | *(u32 *)(dst + x) = readl(src + x); |
173 | return dst; | ||
174 | } | 174 | } |
175 | 175 | ||
176 | static inline void *memcpy_toio(void *dst, void *src, int len) | 176 | static inline void memcpy_toio(volatile void __iomem *dst, const void *src, |
177 | size_t len) | ||
177 | { | 178 | { |
178 | int x; | 179 | int x; |
179 | BUG_ON((unsigned long)dst & 0x3); | 180 | BUG_ON((unsigned long)dst & 0x3); |
180 | for (x = 0; x < len; x += 4) | 181 | for (x = 0; x < len; x += 4) |
181 | writel(*(u32 *)(src + x), dst + x); | 182 | writel(*(u32 *)(src + x), dst + x); |
182 | return dst; | ||
183 | } | 183 | } |
184 | 184 | ||
185 | /* | 185 | /* |
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h index d942d09b252e..ccd5f8425688 100644 --- a/arch/tile/include/asm/processor.h +++ b/arch/tile/include/asm/processor.h | |||
@@ -103,6 +103,18 @@ struct thread_struct { | |||
103 | /* Any other miscellaneous processor state bits */ | 103 | /* Any other miscellaneous processor state bits */ |
104 | unsigned long proc_status; | 104 | unsigned long proc_status; |
105 | #endif | 105 | #endif |
106 | #if !CHIP_HAS_FIXED_INTVEC_BASE() | ||
107 | /* Interrupt base for PL0 interrupts */ | ||
108 | unsigned long interrupt_vector_base; | ||
109 | #endif | ||
110 | #if CHIP_HAS_TILE_RTF_HWM() | ||
111 | /* Tile cache retry fifo high-water mark */ | ||
112 | unsigned long tile_rtf_hwm; | ||
113 | #endif | ||
114 | #if CHIP_HAS_DSTREAM_PF() | ||
115 | /* Data stream prefetch control */ | ||
116 | unsigned long dstream_pf; | ||
117 | #endif | ||
106 | #ifdef CONFIG_HARDWALL | 118 | #ifdef CONFIG_HARDWALL |
107 | /* Is this task tied to an activated hardwall? */ | 119 | /* Is this task tied to an activated hardwall? */ |
108 | struct hardwall_info *hardwall; | 120 | struct hardwall_info *hardwall; |
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h index acdae814e016..4a02bb073979 100644 --- a/arch/tile/include/asm/ptrace.h +++ b/arch/tile/include/asm/ptrace.h | |||
@@ -51,10 +51,7 @@ typedef uint_reg_t pt_reg_t; | |||
51 | 51 | ||
52 | /* | 52 | /* |
53 | * This struct defines the way the registers are stored on the stack during a | 53 | * This struct defines the way the registers are stored on the stack during a |
54 | * system call/exception. It should be a multiple of 8 bytes to preserve | 54 | * system call or exception. "struct sigcontext" has the same shape. |
55 | * normal stack alignment rules. | ||
56 | * | ||
57 | * Must track <sys/ucontext.h> and <sys/procfs.h> | ||
58 | */ | 55 | */ |
59 | struct pt_regs { | 56 | struct pt_regs { |
60 | /* Saved main processor registers; 56..63 are special. */ | 57 | /* Saved main processor registers; 56..63 are special. */ |
@@ -80,11 +77,6 @@ struct pt_regs { | |||
80 | 77 | ||
81 | #endif /* __ASSEMBLY__ */ | 78 | #endif /* __ASSEMBLY__ */ |
82 | 79 | ||
83 | /* Flag bits in pt_regs.flags */ | ||
84 | #define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */ | ||
85 | #define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */ | ||
86 | #define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */ | ||
87 | |||
88 | #define PTRACE_GETREGS 12 | 80 | #define PTRACE_GETREGS 12 |
89 | #define PTRACE_SETREGS 13 | 81 | #define PTRACE_SETREGS 13 |
90 | #define PTRACE_GETFPREGS 14 | 82 | #define PTRACE_GETFPREGS 14 |
@@ -101,6 +93,11 @@ struct pt_regs { | |||
101 | 93 | ||
102 | #ifdef __KERNEL__ | 94 | #ifdef __KERNEL__ |
103 | 95 | ||
96 | /* Flag bits in pt_regs.flags */ | ||
97 | #define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */ | ||
98 | #define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */ | ||
99 | #define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */ | ||
100 | |||
104 | #ifndef __ASSEMBLY__ | 101 | #ifndef __ASSEMBLY__ |
105 | 102 | ||
106 | #define instruction_pointer(regs) ((regs)->pc) | 103 | #define instruction_pointer(regs) ((regs)->pc) |
diff --git a/arch/tile/include/asm/sigcontext.h b/arch/tile/include/asm/sigcontext.h index 7cd7672e3ad4..5e2d03336f53 100644 --- a/arch/tile/include/asm/sigcontext.h +++ b/arch/tile/include/asm/sigcontext.h | |||
@@ -15,13 +15,21 @@ | |||
15 | #ifndef _ASM_TILE_SIGCONTEXT_H | 15 | #ifndef _ASM_TILE_SIGCONTEXT_H |
16 | #define _ASM_TILE_SIGCONTEXT_H | 16 | #define _ASM_TILE_SIGCONTEXT_H |
17 | 17 | ||
18 | /* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */ | 18 | #include <arch/abi.h> |
19 | #include <asm/ptrace.h> | ||
20 | |||
21 | /* Must track <sys/ucontext.h> */ | ||
22 | 19 | ||
20 | /* | ||
21 | * struct sigcontext has the same shape as struct pt_regs, | ||
22 | * but is simplified since we know the fault is from userspace. | ||
23 | */ | ||
23 | struct sigcontext { | 24 | struct sigcontext { |
24 | struct pt_regs regs; | 25 | uint_reg_t gregs[53]; /* General-purpose registers. */ |
26 | uint_reg_t tp; /* Aliases gregs[TREG_TP]. */ | ||
27 | uint_reg_t sp; /* Aliases gregs[TREG_SP]. */ | ||
28 | uint_reg_t lr; /* Aliases gregs[TREG_LR]. */ | ||
29 | uint_reg_t pc; /* Program counter. */ | ||
30 | uint_reg_t ics; /* In Interrupt Critical Section? */ | ||
31 | uint_reg_t faultnum; /* Fault number. */ | ||
32 | uint_reg_t pad[5]; | ||
25 | }; | 33 | }; |
26 | 34 | ||
27 | #endif /* _ASM_TILE_SIGCONTEXT_H */ | 35 | #endif /* _ASM_TILE_SIGCONTEXT_H */ |
diff --git a/arch/tile/include/asm/signal.h b/arch/tile/include/asm/signal.h index eb0253f32202..c1ee1d61d44c 100644 --- a/arch/tile/include/asm/signal.h +++ b/arch/tile/include/asm/signal.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <asm-generic/signal.h> | 24 | #include <asm-generic/signal.h> |
25 | 25 | ||
26 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | 26 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
27 | struct pt_regs; | ||
27 | int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *); | 28 | int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *); |
28 | int setup_sigcontext(struct sigcontext __user *, struct pt_regs *); | 29 | int setup_sigcontext(struct sigcontext __user *, struct pt_regs *); |
29 | void do_signal(struct pt_regs *regs); | 30 | void do_signal(struct pt_regs *regs); |
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h index af165a74537f..ce99ffefeacf 100644 --- a/arch/tile/include/asm/syscalls.h +++ b/arch/tile/include/asm/syscalls.h | |||
@@ -62,10 +62,12 @@ long sys_fork(void); | |||
62 | long _sys_fork(struct pt_regs *regs); | 62 | long _sys_fork(struct pt_regs *regs); |
63 | long sys_vfork(void); | 63 | long sys_vfork(void); |
64 | long _sys_vfork(struct pt_regs *regs); | 64 | long _sys_vfork(struct pt_regs *regs); |
65 | long sys_execve(char __user *filename, char __user * __user *argv, | 65 | long sys_execve(const char __user *filename, |
66 | char __user * __user *envp); | 66 | const char __user *const __user *argv, |
67 | long _sys_execve(char __user *filename, char __user * __user *argv, | 67 | const char __user *const __user *envp); |
68 | char __user * __user *envp, struct pt_regs *regs); | 68 | long _sys_execve(const char __user *filename, |
69 | const char __user *const __user *argv, | ||
70 | const char __user *const __user *envp, struct pt_regs *regs); | ||
69 | 71 | ||
70 | /* kernel/signal.c */ | 72 | /* kernel/signal.c */ |
71 | long sys_sigaltstack(const stack_t __user *, stack_t __user *); | 73 | long sys_sigaltstack(const stack_t __user *, stack_t __user *); |
@@ -86,10 +88,13 @@ int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *); | |||
86 | #endif | 88 | #endif |
87 | 89 | ||
88 | #ifdef CONFIG_COMPAT | 90 | #ifdef CONFIG_COMPAT |
89 | long compat_sys_execve(char __user *path, compat_uptr_t __user *argv, | 91 | long compat_sys_execve(const char __user *path, |
90 | compat_uptr_t __user *envp); | 92 | const compat_uptr_t __user *argv, |
91 | long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv, | 93 | const compat_uptr_t __user *envp); |
92 | compat_uptr_t __user *envp, struct pt_regs *regs); | 94 | long _compat_sys_execve(const char __user *path, |
95 | const compat_uptr_t __user *argv, | ||
96 | const compat_uptr_t __user *envp, | ||
97 | struct pt_regs *regs); | ||
93 | long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, | 98 | long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, |
94 | struct compat_sigaltstack __user *uoss_ptr); | 99 | struct compat_sigaltstack __user *uoss_ptr); |
95 | long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, | 100 | long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, |
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index 84f296ca9e63..8f58bdff20d7 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S | |||
@@ -1506,13 +1506,6 @@ handle_ill: | |||
1506 | } | 1506 | } |
1507 | STD_ENDPROC(handle_ill) | 1507 | STD_ENDPROC(handle_ill) |
1508 | 1508 | ||
1509 | .pushsection .rodata, "a" | ||
1510 | .align 8 | ||
1511 | bpt_code: | ||
1512 | bpt | ||
1513 | ENDPROC(bpt_code) | ||
1514 | .popsection | ||
1515 | |||
1516 | /* Various stub interrupt handlers and syscall handlers */ | 1509 | /* Various stub interrupt handlers and syscall handlers */ |
1517 | 1510 | ||
1518 | STD_ENTRY_LOCAL(_kernel_double_fault) | 1511 | STD_ENTRY_LOCAL(_kernel_double_fault) |
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 985cc28c74c5..84c29111756c 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c | |||
@@ -408,6 +408,15 @@ static void save_arch_state(struct thread_struct *t) | |||
408 | #if CHIP_HAS_PROC_STATUS_SPR() | 408 | #if CHIP_HAS_PROC_STATUS_SPR() |
409 | t->proc_status = __insn_mfspr(SPR_PROC_STATUS); | 409 | t->proc_status = __insn_mfspr(SPR_PROC_STATUS); |
410 | #endif | 410 | #endif |
411 | #if !CHIP_HAS_FIXED_INTVEC_BASE() | ||
412 | t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0); | ||
413 | #endif | ||
414 | #if CHIP_HAS_TILE_RTF_HWM() | ||
415 | t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM); | ||
416 | #endif | ||
417 | #if CHIP_HAS_DSTREAM_PF() | ||
418 | t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF); | ||
419 | #endif | ||
411 | } | 420 | } |
412 | 421 | ||
413 | static void restore_arch_state(const struct thread_struct *t) | 422 | static void restore_arch_state(const struct thread_struct *t) |
@@ -428,14 +437,14 @@ static void restore_arch_state(const struct thread_struct *t) | |||
428 | #if CHIP_HAS_PROC_STATUS_SPR() | 437 | #if CHIP_HAS_PROC_STATUS_SPR() |
429 | __insn_mtspr(SPR_PROC_STATUS, t->proc_status); | 438 | __insn_mtspr(SPR_PROC_STATUS, t->proc_status); |
430 | #endif | 439 | #endif |
440 | #if !CHIP_HAS_FIXED_INTVEC_BASE() | ||
441 | __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base); | ||
442 | #endif | ||
431 | #if CHIP_HAS_TILE_RTF_HWM() | 443 | #if CHIP_HAS_TILE_RTF_HWM() |
432 | /* | 444 | __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm); |
433 | * Clear this whenever we switch back to a process in case | 445 | #endif |
434 | * the previous process was monkeying with it. Even if enabled | 446 | #if CHIP_HAS_DSTREAM_PF() |
435 | * in CBOX_MSR1 via TILE_RTF_HWM_MIN, it's still just a | 447 | __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf); |
436 | * performance hint, so isn't worth a full save/restore. | ||
437 | */ | ||
438 | __insn_mtspr(SPR_TILE_RTF_HWM, 0); | ||
439 | #endif | 448 | #endif |
440 | } | 449 | } |
441 | 450 | ||
@@ -561,8 +570,9 @@ out: | |||
561 | } | 570 | } |
562 | 571 | ||
563 | #ifdef CONFIG_COMPAT | 572 | #ifdef CONFIG_COMPAT |
564 | long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv, | 573 | long _compat_sys_execve(const char __user *path, |
565 | compat_uptr_t __user *envp, struct pt_regs *regs) | 574 | const compat_uptr_t __user *argv, |
575 | const compat_uptr_t __user *envp, struct pt_regs *regs) | ||
566 | { | 576 | { |
567 | long error; | 577 | long error; |
568 | char *filename; | 578 | char *filename; |
@@ -657,7 +667,7 @@ void show_regs(struct pt_regs *regs) | |||
657 | regs->regs[51], regs->regs[52], regs->tp); | 667 | regs->regs[51], regs->regs[52], regs->tp); |
658 | pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr); | 668 | pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr); |
659 | #else | 669 | #else |
660 | for (i = 0; i < 52; i += 3) | 670 | for (i = 0; i < 52; i += 4) |
661 | pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT | 671 | pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT |
662 | " r%-2d: "REGFMT" r%-2d: "REGFMT"\n", | 672 | " r%-2d: "REGFMT" r%-2d: "REGFMT"\n", |
663 | i, regs->regs[i], i+1, regs->regs[i+1], | 673 | i, regs->regs[i], i+1, regs->regs[i+1], |
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c index 45b66a3c991f..ce183aa1492c 100644 --- a/arch/tile/kernel/signal.c +++ b/arch/tile/kernel/signal.c | |||
@@ -61,13 +61,19 @@ int restore_sigcontext(struct pt_regs *regs, | |||
61 | /* Always make any pending restarted system calls return -EINTR */ | 61 | /* Always make any pending restarted system calls return -EINTR */ |
62 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | 62 | current_thread_info()->restart_block.fn = do_no_restart_syscall; |
63 | 63 | ||
64 | /* | ||
65 | * Enforce that sigcontext is like pt_regs, and doesn't mess | ||
66 | * up our stack alignment rules. | ||
67 | */ | ||
68 | BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs)); | ||
69 | BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0); | ||
70 | |||
64 | for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) | 71 | for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) |
65 | err |= __get_user(((long *)regs)[i], | 72 | err |= __get_user(regs->regs[i], &sc->gregs[i]); |
66 | &((long __user *)(&sc->regs))[i]); | ||
67 | 73 | ||
68 | regs->faultnum = INT_SWINT_1_SIGRETURN; | 74 | regs->faultnum = INT_SWINT_1_SIGRETURN; |
69 | 75 | ||
70 | err |= __get_user(*pr0, &sc->regs.regs[0]); | 76 | err |= __get_user(*pr0, &sc->gregs[0]); |
71 | return err; | 77 | return err; |
72 | } | 78 | } |
73 | 79 | ||
@@ -112,8 +118,7 @@ int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) | |||
112 | int i, err = 0; | 118 | int i, err = 0; |
113 | 119 | ||
114 | for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) | 120 | for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) |
115 | err |= __put_user(((long *)regs)[i], | 121 | err |= __put_user(regs->regs[i], &sc->gregs[i]); |
116 | &((long __user *)(&sc->regs))[i]); | ||
117 | 122 | ||
118 | return err; | 123 | return err; |
119 | } | 124 | } |
@@ -203,19 +208,17 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
203 | * Set up registers for signal handler. | 208 | * Set up registers for signal handler. |
204 | * Registers that we don't modify keep the value they had from | 209 | * Registers that we don't modify keep the value they had from |
205 | * user-space at the time we took the signal. | 210 | * user-space at the time we took the signal. |
211 | * We always pass siginfo and mcontext, regardless of SA_SIGINFO, | ||
212 | * since some things rely on this (e.g. glibc's debug/segfault.c). | ||
206 | */ | 213 | */ |
207 | regs->pc = (unsigned long) ka->sa.sa_handler; | 214 | regs->pc = (unsigned long) ka->sa.sa_handler; |
208 | regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ | 215 | regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ |
209 | regs->sp = (unsigned long) frame; | 216 | regs->sp = (unsigned long) frame; |
210 | regs->lr = restorer; | 217 | regs->lr = restorer; |
211 | regs->regs[0] = (unsigned long) usig; | 218 | regs->regs[0] = (unsigned long) usig; |
212 | 219 | regs->regs[1] = (unsigned long) &frame->info; | |
213 | if (ka->sa.sa_flags & SA_SIGINFO) { | 220 | regs->regs[2] = (unsigned long) &frame->uc; |
214 | /* Need extra arguments, so mark to restore caller-saves. */ | 221 | regs->flags |= PT_FLAGS_CALLER_SAVES; |
215 | regs->regs[1] = (unsigned long) &frame->info; | ||
216 | regs->regs[2] = (unsigned long) &frame->uc; | ||
217 | regs->flags |= PT_FLAGS_CALLER_SAVES; | ||
218 | } | ||
219 | 222 | ||
220 | /* | 223 | /* |
221 | * Notify any tracer that was single-stepping it. | 224 | * Notify any tracer that was single-stepping it. |
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index 38a68b0b4581..ea2e0ce28380 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c | |||
@@ -175,7 +175,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt) | |||
175 | pr_err(" <received signal %d>\n", | 175 | pr_err(" <received signal %d>\n", |
176 | frame->info.si_signo); | 176 | frame->info.si_signo); |
177 | } | 177 | } |
178 | return &frame->uc.uc_mcontext.regs; | 178 | return (struct pt_regs *)&frame->uc.uc_mcontext; |
179 | } | 179 | } |
180 | return NULL; | 180 | return NULL; |
181 | } | 181 | } |
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index 2ab233ba32c1..47d0c37897d5 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c | |||
@@ -255,18 +255,6 @@ static void uml_net_tx_timeout(struct net_device *dev) | |||
255 | netif_wake_queue(dev); | 255 | netif_wake_queue(dev); |
256 | } | 256 | } |
257 | 257 | ||
258 | static int uml_net_set_mac(struct net_device *dev, void *addr) | ||
259 | { | ||
260 | struct uml_net_private *lp = netdev_priv(dev); | ||
261 | struct sockaddr *hwaddr = addr; | ||
262 | |||
263 | spin_lock_irq(&lp->lock); | ||
264 | eth_mac_addr(dev, hwaddr->sa_data); | ||
265 | spin_unlock_irq(&lp->lock); | ||
266 | |||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | static int uml_net_change_mtu(struct net_device *dev, int new_mtu) | 258 | static int uml_net_change_mtu(struct net_device *dev, int new_mtu) |
271 | { | 259 | { |
272 | dev->mtu = new_mtu; | 260 | dev->mtu = new_mtu; |
@@ -373,7 +361,7 @@ static const struct net_device_ops uml_netdev_ops = { | |||
373 | .ndo_start_xmit = uml_net_start_xmit, | 361 | .ndo_start_xmit = uml_net_start_xmit, |
374 | .ndo_set_multicast_list = uml_net_set_multicast_list, | 362 | .ndo_set_multicast_list = uml_net_set_multicast_list, |
375 | .ndo_tx_timeout = uml_net_tx_timeout, | 363 | .ndo_tx_timeout = uml_net_tx_timeout, |
376 | .ndo_set_mac_address = uml_net_set_mac, | 364 | .ndo_set_mac_address = eth_mac_addr, |
377 | .ndo_change_mtu = uml_net_change_mtu, | 365 | .ndo_change_mtu = uml_net_change_mtu, |
378 | .ndo_validate_addr = eth_validate_addr, | 366 | .ndo_validate_addr = eth_validate_addr, |
379 | }; | 367 | }; |
@@ -472,7 +460,8 @@ static void eth_configure(int n, void *init, char *mac, | |||
472 | ((*transport->user->init)(&lp->user, dev) != 0)) | 460 | ((*transport->user->init)(&lp->user, dev) != 0)) |
473 | goto out_unregister; | 461 | goto out_unregister; |
474 | 462 | ||
475 | eth_mac_addr(dev, device->mac); | 463 | /* don't use eth_mac_addr, it will not work here */ |
464 | memcpy(dev->dev_addr, device->mac, ETH_ALEN); | ||
476 | dev->mtu = transport->user->mtu; | 465 | dev->mtu = transport->user->mtu; |
477 | dev->netdev_ops = ¨_netdev_ops; | 466 | dev->netdev_ops = ¨_netdev_ops; |
478 | dev->ethtool_ops = ¨_net_ethtool_ops; | 467 | dev->ethtool_ops = ¨_net_ethtool_ops; |
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c index cd145eda3579..49b5e1eb3262 100644 --- a/arch/um/kernel/exec.c +++ b/arch/um/kernel/exec.c | |||
@@ -62,7 +62,7 @@ static long execve1(const char *file, | |||
62 | return error; | 62 | return error; |
63 | } | 63 | } |
64 | 64 | ||
65 | long um_execve(const char *file, char __user *__user *argv, char __user *__user *env) | 65 | long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env) |
66 | { | 66 | { |
67 | long err; | 67 | long err; |
68 | 68 | ||
@@ -72,8 +72,8 @@ long um_execve(const char *file, char __user *__user *argv, char __user *__user | |||
72 | return err; | 72 | return err; |
73 | } | 73 | } |
74 | 74 | ||
75 | long sys_execve(const char __user *file, char __user *__user *argv, | 75 | long sys_execve(const char __user *file, const char __user *const __user *argv, |
76 | char __user *__user *env) | 76 | const char __user *const __user *env) |
77 | { | 77 | { |
78 | long error; | 78 | long error; |
79 | char *filename; | 79 | char *filename; |
diff --git a/arch/um/kernel/internal.h b/arch/um/kernel/internal.h index 1303a105fe91..5bf97db24a04 100644 --- a/arch/um/kernel/internal.h +++ b/arch/um/kernel/internal.h | |||
@@ -1 +1 @@ | |||
extern long um_execve(const char *file, char __user *__user *argv, char __user *__user *env); | extern long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env); | ||
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c index 5ddb246626db..f958cb876ee3 100644 --- a/arch/um/kernel/syscall.c +++ b/arch/um/kernel/syscall.c | |||
@@ -60,8 +60,8 @@ int kernel_execve(const char *filename, | |||
60 | 60 | ||
61 | fs = get_fs(); | 61 | fs = get_fs(); |
62 | set_fs(KERNEL_DS); | 62 | set_fs(KERNEL_DS); |
63 | ret = um_execve(filename, (char __user *__user *)argv, | 63 | ret = um_execve(filename, (const char __user *const __user *)argv, |
64 | (char __user *__user *) envp); | 64 | (const char __user *const __user *) envp); |
65 | set_fs(fs); | 65 | set_fs(fs); |
66 | 66 | ||
67 | return ret; | 67 | return ret; |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 8aa1b59b9074..e8c8881351b3 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -74,7 +74,7 @@ endif | |||
74 | 74 | ||
75 | ifdef CONFIG_CC_STACKPROTECTOR | 75 | ifdef CONFIG_CC_STACKPROTECTOR |
76 | cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh | 76 | cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh |
77 | ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(biarch)),y) | 77 | ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y) |
78 | stackp-y := -fstack-protector | 78 | stackp-y := -fstack-protector |
79 | KBUILD_CFLAGS += $(stackp-y) | 79 | KBUILD_CFLAGS += $(stackp-y) |
80 | else | 80 | else |
diff --git a/arch/x86/boot/early_serial_console.c b/arch/x86/boot/early_serial_console.c index 030f4b93e255..5df2869c874b 100644 --- a/arch/x86/boot/early_serial_console.c +++ b/arch/x86/boot/early_serial_console.c | |||
@@ -58,7 +58,19 @@ static void parse_earlyprintk(void) | |||
58 | if (arg[pos] == ',') | 58 | if (arg[pos] == ',') |
59 | pos++; | 59 | pos++; |
60 | 60 | ||
61 | if (!strncmp(arg, "ttyS", 4)) { | 61 | /* |
62 | * make sure we have | ||
63 | * "serial,0x3f8,115200" | ||
64 | * "serial,ttyS0,115200" | ||
65 | * "ttyS0,115200" | ||
66 | */ | ||
67 | if (pos == 7 && !strncmp(arg + pos, "0x", 2)) { | ||
68 | port = simple_strtoull(arg + pos, &e, 16); | ||
69 | if (port == 0 || arg + pos == e) | ||
70 | port = DEFAULT_SERIAL_PORT; | ||
71 | else | ||
72 | pos = e - arg; | ||
73 | } else if (!strncmp(arg + pos, "ttyS", 4)) { | ||
62 | static const int bases[] = { 0x3f8, 0x2f8 }; | 74 | static const int bases[] = { 0x3f8, 0x2f8 }; |
63 | int idx = 0; | 75 | int idx = 0; |
64 | 76 | ||
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index b86feabed69b..518bb99c3394 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
@@ -50,7 +50,12 @@ | |||
50 | /* | 50 | /* |
51 | * Reload arg registers from stack in case ptrace changed them. | 51 | * Reload arg registers from stack in case ptrace changed them. |
52 | * We don't reload %eax because syscall_trace_enter() returned | 52 | * We don't reload %eax because syscall_trace_enter() returned |
53 | * the value it wants us to use in the table lookup. | 53 | * the %rax value we should see. Instead, we just truncate that |
54 | * value to 32 bits again as we did on entry from user mode. | ||
55 | * If it's a new value set by user_regset during entry tracing, | ||
56 | * this matches the normal truncation of the user-mode value. | ||
57 | * If it's -1 to make us punt the syscall, then (u32)-1 is still | ||
58 | * an appropriately invalid value. | ||
54 | */ | 59 | */ |
55 | .macro LOAD_ARGS32 offset, _r9=0 | 60 | .macro LOAD_ARGS32 offset, _r9=0 |
56 | .if \_r9 | 61 | .if \_r9 |
@@ -60,6 +65,7 @@ | |||
60 | movl \offset+48(%rsp),%edx | 65 | movl \offset+48(%rsp),%edx |
61 | movl \offset+56(%rsp),%esi | 66 | movl \offset+56(%rsp),%esi |
62 | movl \offset+64(%rsp),%edi | 67 | movl \offset+64(%rsp),%edi |
68 | movl %eax,%eax /* zero extension */ | ||
63 | .endm | 69 | .endm |
64 | 70 | ||
65 | .macro CFI_STARTPROC32 simple | 71 | .macro CFI_STARTPROC32 simple |
@@ -153,7 +159,7 @@ ENTRY(ia32_sysenter_target) | |||
153 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) | 159 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) |
154 | CFI_REMEMBER_STATE | 160 | CFI_REMEMBER_STATE |
155 | jnz sysenter_tracesys | 161 | jnz sysenter_tracesys |
156 | cmpl $(IA32_NR_syscalls-1),%eax | 162 | cmpq $(IA32_NR_syscalls-1),%rax |
157 | ja ia32_badsys | 163 | ja ia32_badsys |
158 | sysenter_do_call: | 164 | sysenter_do_call: |
159 | IA32_ARG_FIXUP | 165 | IA32_ARG_FIXUP |
@@ -195,7 +201,7 @@ sysexit_from_sys_call: | |||
195 | movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ | 201 | movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ |
196 | call audit_syscall_entry | 202 | call audit_syscall_entry |
197 | movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ | 203 | movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ |
198 | cmpl $(IA32_NR_syscalls-1),%eax | 204 | cmpq $(IA32_NR_syscalls-1),%rax |
199 | ja ia32_badsys | 205 | ja ia32_badsys |
200 | movl %ebx,%edi /* reload 1st syscall arg */ | 206 | movl %ebx,%edi /* reload 1st syscall arg */ |
201 | movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */ | 207 | movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */ |
@@ -248,7 +254,7 @@ sysenter_tracesys: | |||
248 | call syscall_trace_enter | 254 | call syscall_trace_enter |
249 | LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ | 255 | LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ |
250 | RESTORE_REST | 256 | RESTORE_REST |
251 | cmpl $(IA32_NR_syscalls-1),%eax | 257 | cmpq $(IA32_NR_syscalls-1),%rax |
252 | ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */ | 258 | ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */ |
253 | jmp sysenter_do_call | 259 | jmp sysenter_do_call |
254 | CFI_ENDPROC | 260 | CFI_ENDPROC |
@@ -314,7 +320,7 @@ ENTRY(ia32_cstar_target) | |||
314 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) | 320 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) |
315 | CFI_REMEMBER_STATE | 321 | CFI_REMEMBER_STATE |
316 | jnz cstar_tracesys | 322 | jnz cstar_tracesys |
317 | cmpl $IA32_NR_syscalls-1,%eax | 323 | cmpq $IA32_NR_syscalls-1,%rax |
318 | ja ia32_badsys | 324 | ja ia32_badsys |
319 | cstar_do_call: | 325 | cstar_do_call: |
320 | IA32_ARG_FIXUP 1 | 326 | IA32_ARG_FIXUP 1 |
@@ -367,7 +373,7 @@ cstar_tracesys: | |||
367 | LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */ | 373 | LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */ |
368 | RESTORE_REST | 374 | RESTORE_REST |
369 | xchgl %ebp,%r9d | 375 | xchgl %ebp,%r9d |
370 | cmpl $(IA32_NR_syscalls-1),%eax | 376 | cmpq $(IA32_NR_syscalls-1),%rax |
371 | ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */ | 377 | ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */ |
372 | jmp cstar_do_call | 378 | jmp cstar_do_call |
373 | END(ia32_cstar_target) | 379 | END(ia32_cstar_target) |
@@ -425,7 +431,7 @@ ENTRY(ia32_syscall) | |||
425 | orl $TS_COMPAT,TI_status(%r10) | 431 | orl $TS_COMPAT,TI_status(%r10) |
426 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) | 432 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) |
427 | jnz ia32_tracesys | 433 | jnz ia32_tracesys |
428 | cmpl $(IA32_NR_syscalls-1),%eax | 434 | cmpq $(IA32_NR_syscalls-1),%rax |
429 | ja ia32_badsys | 435 | ja ia32_badsys |
430 | ia32_do_call: | 436 | ia32_do_call: |
431 | IA32_ARG_FIXUP | 437 | IA32_ARG_FIXUP |
@@ -444,7 +450,7 @@ ia32_tracesys: | |||
444 | call syscall_trace_enter | 450 | call syscall_trace_enter |
445 | LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ | 451 | LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ |
446 | RESTORE_REST | 452 | RESTORE_REST |
447 | cmpl $(IA32_NR_syscalls-1),%eax | 453 | cmpq $(IA32_NR_syscalls-1),%rax |
448 | ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ | 454 | ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ |
449 | jmp ia32_do_call | 455 | jmp ia32_do_call |
450 | END(ia32_syscall) | 456 | END(ia32_syscall) |
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h index d2544f1d705d..cb030374b90a 100644 --- a/arch/x86/include/asm/amd_iommu_proto.h +++ b/arch/x86/include/asm/amd_iommu_proto.h | |||
@@ -38,4 +38,10 @@ static inline void amd_iommu_stats_init(void) { } | |||
38 | 38 | ||
39 | #endif /* !CONFIG_AMD_IOMMU_STATS */ | 39 | #endif /* !CONFIG_AMD_IOMMU_STATS */ |
40 | 40 | ||
41 | static inline bool is_rd890_iommu(struct pci_dev *pdev) | ||
42 | { | ||
43 | return (pdev->vendor == PCI_VENDOR_ID_ATI) && | ||
44 | (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); | ||
45 | } | ||
46 | |||
41 | #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ | 47 | #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ |
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 7014e88bc779..08616180deaf 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -368,6 +368,9 @@ struct amd_iommu { | |||
368 | /* capabilities of that IOMMU read from ACPI */ | 368 | /* capabilities of that IOMMU read from ACPI */ |
369 | u32 cap; | 369 | u32 cap; |
370 | 370 | ||
371 | /* flags read from acpi table */ | ||
372 | u8 acpi_flags; | ||
373 | |||
371 | /* | 374 | /* |
372 | * Capability pointer. There could be more than one IOMMU per PCI | 375 | * Capability pointer. There could be more than one IOMMU per PCI |
373 | * device function if there are more than one AMD IOMMU capability | 376 | * device function if there are more than one AMD IOMMU capability |
@@ -411,6 +414,15 @@ struct amd_iommu { | |||
411 | 414 | ||
412 | /* default dma_ops domain for that IOMMU */ | 415 | /* default dma_ops domain for that IOMMU */ |
413 | struct dma_ops_domain *default_dom; | 416 | struct dma_ops_domain *default_dom; |
417 | |||
418 | /* | ||
419 | * This array is required to work around a potential BIOS bug. | ||
420 | * The BIOS may miss to restore parts of the PCI configuration | ||
421 | * space when the system resumes from S3. The result is that the | ||
422 | * IOMMU does not execute commands anymore which leads to system | ||
423 | * failure. | ||
424 | */ | ||
425 | u32 cache_cfg[4]; | ||
414 | }; | 426 | }; |
415 | 427 | ||
416 | /* | 428 | /* |
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 545776efeb16..bafd80defa43 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h | |||
@@ -309,7 +309,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |||
309 | static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) | 309 | static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) |
310 | { | 310 | { |
311 | return ((1UL << (nr % BITS_PER_LONG)) & | 311 | return ((1UL << (nr % BITS_PER_LONG)) & |
312 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; | 312 | (addr[nr / BITS_PER_LONG])) != 0; |
313 | } | 313 | } |
314 | 314 | ||
315 | static inline int variable_test_bit(int nr, volatile const unsigned long *addr) | 315 | static inline int variable_test_bit(int nr, volatile const unsigned long *addr) |
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index 306160e58b48..1d9cd27c2920 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h | |||
@@ -205,7 +205,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) | |||
205 | return (u32)(unsigned long)uptr; | 205 | return (u32)(unsigned long)uptr; |
206 | } | 206 | } |
207 | 207 | ||
208 | static inline void __user *compat_alloc_user_space(long len) | 208 | static inline void __user *arch_compat_alloc_user_space(long len) |
209 | { | 209 | { |
210 | struct pt_regs *regs = task_pt_regs(current); | 210 | struct pt_regs *regs = task_pt_regs(current); |
211 | return (void __user *)regs->sp - len; | 211 | return (void __user *)regs->sp - len; |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 781a50b29a49..3f76523589af 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -168,6 +168,7 @@ | |||
168 | #define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ | 168 | #define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ |
169 | #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ | 169 | #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ |
170 | #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ | 170 | #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ |
171 | #define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */ | ||
171 | 172 | ||
172 | /* Virtualization flags: Linux defined, word 8 */ | 173 | /* Virtualization flags: Linux defined, word 8 */ |
173 | #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ | 174 | #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ |
@@ -296,6 +297,7 @@ extern const char * const x86_power_flags[32]; | |||
296 | 297 | ||
297 | #endif /* CONFIG_X86_64 */ | 298 | #endif /* CONFIG_X86_64 */ |
298 | 299 | ||
300 | #if __GNUC__ >= 4 | ||
299 | /* | 301 | /* |
300 | * Static testing of CPU features. Used the same as boot_cpu_has(). | 302 | * Static testing of CPU features. Used the same as boot_cpu_has(). |
301 | * These are only valid after alternatives have run, but will statically | 303 | * These are only valid after alternatives have run, but will statically |
@@ -304,7 +306,7 @@ extern const char * const x86_power_flags[32]; | |||
304 | */ | 306 | */ |
305 | static __always_inline __pure bool __static_cpu_has(u16 bit) | 307 | static __always_inline __pure bool __static_cpu_has(u16 bit) |
306 | { | 308 | { |
307 | #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) | 309 | #if __GNUC__ > 4 || __GNUC_MINOR__ >= 5 |
308 | asm goto("1: jmp %l[t_no]\n" | 310 | asm goto("1: jmp %l[t_no]\n" |
309 | "2:\n" | 311 | "2:\n" |
310 | ".section .altinstructions,\"a\"\n" | 312 | ".section .altinstructions,\"a\"\n" |
@@ -345,7 +347,6 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) | |||
345 | #endif | 347 | #endif |
346 | } | 348 | } |
347 | 349 | ||
348 | #if __GNUC__ >= 4 | ||
349 | #define static_cpu_has(bit) \ | 350 | #define static_cpu_has(bit) \ |
350 | ( \ | 351 | ( \ |
351 | __builtin_constant_p(boot_cpu_has(bit)) ? \ | 352 | __builtin_constant_p(boot_cpu_has(bit)) ? \ |
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h index 004e6e25e913..1d5c08a1bdfd 100644 --- a/arch/x86/include/asm/hpet.h +++ b/arch/x86/include/asm/hpet.h | |||
@@ -68,7 +68,6 @@ extern unsigned long force_hpet_address; | |||
68 | extern u8 hpet_blockid; | 68 | extern u8 hpet_blockid; |
69 | extern int hpet_force_user; | 69 | extern int hpet_force_user; |
70 | extern u8 hpet_msi_disable; | 70 | extern u8 hpet_msi_disable; |
71 | extern u8 hpet_readback_cmp; | ||
72 | extern int is_hpet_enabled(void); | 71 | extern int is_hpet_enabled(void); |
73 | extern int hpet_enable(void); | 72 | extern int hpet_enable(void); |
74 | extern void hpet_disable(void); | 73 | extern void hpet_disable(void); |
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h index 528a11e8d3e3..824ca07860d0 100644 --- a/arch/x86/include/asm/hw_breakpoint.h +++ b/arch/x86/include/asm/hw_breakpoint.h | |||
@@ -20,7 +20,7 @@ struct arch_hw_breakpoint { | |||
20 | #include <linux/list.h> | 20 | #include <linux/list.h> |
21 | 21 | ||
22 | /* Available HW breakpoint length encodings */ | 22 | /* Available HW breakpoint length encodings */ |
23 | #define X86_BREAKPOINT_LEN_X 0x00 | 23 | #define X86_BREAKPOINT_LEN_X 0x40 |
24 | #define X86_BREAKPOINT_LEN_1 0x40 | 24 | #define X86_BREAKPOINT_LEN_1 0x40 |
25 | #define X86_BREAKPOINT_LEN_2 0x44 | 25 | #define X86_BREAKPOINT_LEN_2 0x44 |
26 | #define X86_BREAKPOINT_LEN_4 0x4c | 26 | #define X86_BREAKPOINT_LEN_4 0x4c |
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h index f35eb45d6576..c4191b3b7056 100644 --- a/arch/x86/include/asm/iomap.h +++ b/arch/x86/include/asm/iomap.h | |||
@@ -26,11 +26,11 @@ | |||
26 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
27 | #include <asm/tlbflush.h> | 27 | #include <asm/tlbflush.h> |
28 | 28 | ||
29 | void * | 29 | void __iomem * |
30 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); | 30 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); |
31 | 31 | ||
32 | void | 32 | void |
33 | iounmap_atomic(void *kvaddr, enum km_type type); | 33 | iounmap_atomic(void __iomem *kvaddr, enum km_type type); |
34 | 34 | ||
35 | int | 35 | int |
36 | iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); | 36 | iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); |
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 51cfd730ac5d..1f99ecfc48e1 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
@@ -152,9 +152,14 @@ struct x86_emulate_ops { | |||
152 | struct operand { | 152 | struct operand { |
153 | enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type; | 153 | enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type; |
154 | unsigned int bytes; | 154 | unsigned int bytes; |
155 | unsigned long orig_val, *ptr; | 155 | union { |
156 | unsigned long orig_val; | ||
157 | u64 orig_val64; | ||
158 | }; | ||
159 | unsigned long *ptr; | ||
156 | union { | 160 | union { |
157 | unsigned long val; | 161 | unsigned long val; |
162 | u64 val64; | ||
158 | char valptr[sizeof(unsigned long) + 2]; | 163 | char valptr[sizeof(unsigned long) + 2]; |
159 | }; | 164 | }; |
160 | }; | 165 | }; |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 404a880ea325..d395540ff894 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -27,6 +27,9 @@ extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops, | |||
27 | int node); | 27 | int node); |
28 | extern struct pci_bus *pci_scan_bus_with_sysdata(int busno); | 28 | extern struct pci_bus *pci_scan_bus_with_sysdata(int busno); |
29 | 29 | ||
30 | #ifdef CONFIG_PCI | ||
31 | |||
32 | #ifdef CONFIG_PCI_DOMAINS | ||
30 | static inline int pci_domain_nr(struct pci_bus *bus) | 33 | static inline int pci_domain_nr(struct pci_bus *bus) |
31 | { | 34 | { |
32 | struct pci_sysdata *sd = bus->sysdata; | 35 | struct pci_sysdata *sd = bus->sysdata; |
@@ -37,13 +40,12 @@ static inline int pci_proc_domain(struct pci_bus *bus) | |||
37 | { | 40 | { |
38 | return pci_domain_nr(bus); | 41 | return pci_domain_nr(bus); |
39 | } | 42 | } |
40 | 43 | #endif | |
41 | 44 | ||
42 | /* Can be used to override the logic in pci_scan_bus for skipping | 45 | /* Can be used to override the logic in pci_scan_bus for skipping |
43 | already-configured bus numbers - to be used for buggy BIOSes | 46 | already-configured bus numbers - to be used for buggy BIOSes |
44 | or architectures with incomplete PCI setup by the loader */ | 47 | or architectures with incomplete PCI setup by the loader */ |
45 | 48 | ||
46 | #ifdef CONFIG_PCI | ||
47 | extern unsigned int pcibios_assign_all_busses(void); | 49 | extern unsigned int pcibios_assign_all_busses(void); |
48 | extern int pci_legacy_init(void); | 50 | extern int pci_legacy_init(void); |
49 | # ifdef CONFIG_ACPI | 51 | # ifdef CONFIG_ACPI |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 0925676266bd..fedf32a8c3ec 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -11,6 +11,8 @@ ifdef CONFIG_FUNCTION_TRACER | |||
11 | CFLAGS_REMOVE_tsc.o = -pg | 11 | CFLAGS_REMOVE_tsc.o = -pg |
12 | CFLAGS_REMOVE_rtc.o = -pg | 12 | CFLAGS_REMOVE_rtc.o = -pg |
13 | CFLAGS_REMOVE_paravirt-spinlocks.o = -pg | 13 | CFLAGS_REMOVE_paravirt-spinlocks.o = -pg |
14 | CFLAGS_REMOVE_pvclock.o = -pg | ||
15 | CFLAGS_REMOVE_kvmclock.o = -pg | ||
14 | CFLAGS_REMOVE_ftrace.o = -pg | 16 | CFLAGS_REMOVE_ftrace.o = -pg |
15 | CFLAGS_REMOVE_early_printk.o = -pg | 17 | CFLAGS_REMOVE_early_printk.o = -pg |
16 | endif | 18 | endif |
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index fb7a5f052e2b..fb16f17e59be 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c | |||
@@ -61,7 +61,7 @@ struct cstate_entry { | |||
61 | unsigned int ecx; | 61 | unsigned int ecx; |
62 | } states[ACPI_PROCESSOR_MAX_POWER]; | 62 | } states[ACPI_PROCESSOR_MAX_POWER]; |
63 | }; | 63 | }; |
64 | static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */ | 64 | static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */ |
65 | 65 | ||
66 | static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; | 66 | static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; |
67 | 67 | ||
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index fa044e1e30a2..679b6450382b 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -1953,6 +1953,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, | |||
1953 | size_t size, | 1953 | size_t size, |
1954 | int dir) | 1954 | int dir) |
1955 | { | 1955 | { |
1956 | dma_addr_t flush_addr; | ||
1956 | dma_addr_t i, start; | 1957 | dma_addr_t i, start; |
1957 | unsigned int pages; | 1958 | unsigned int pages; |
1958 | 1959 | ||
@@ -1960,6 +1961,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, | |||
1960 | (dma_addr + size > dma_dom->aperture_size)) | 1961 | (dma_addr + size > dma_dom->aperture_size)) |
1961 | return; | 1962 | return; |
1962 | 1963 | ||
1964 | flush_addr = dma_addr; | ||
1963 | pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); | 1965 | pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); |
1964 | dma_addr &= PAGE_MASK; | 1966 | dma_addr &= PAGE_MASK; |
1965 | start = dma_addr; | 1967 | start = dma_addr; |
@@ -1974,7 +1976,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, | |||
1974 | dma_ops_free_addresses(dma_dom, dma_addr, pages); | 1976 | dma_ops_free_addresses(dma_dom, dma_addr, pages); |
1975 | 1977 | ||
1976 | if (amd_iommu_unmap_flush || dma_dom->need_flush) { | 1978 | if (amd_iommu_unmap_flush || dma_dom->need_flush) { |
1977 | iommu_flush_pages(&dma_dom->domain, dma_addr, size); | 1979 | iommu_flush_pages(&dma_dom->domain, flush_addr, size); |
1978 | dma_dom->need_flush = false; | 1980 | dma_dom->need_flush = false; |
1979 | } | 1981 | } |
1980 | } | 1982 | } |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 3cc63e2b8dd4..5a170cbbbed8 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -632,6 +632,13 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu) | |||
632 | iommu->last_device = calc_devid(MMIO_GET_BUS(range), | 632 | iommu->last_device = calc_devid(MMIO_GET_BUS(range), |
633 | MMIO_GET_LD(range)); | 633 | MMIO_GET_LD(range)); |
634 | iommu->evt_msi_num = MMIO_MSI_NUM(misc); | 634 | iommu->evt_msi_num = MMIO_MSI_NUM(misc); |
635 | |||
636 | if (is_rd890_iommu(iommu->dev)) { | ||
637 | pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]); | ||
638 | pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]); | ||
639 | pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]); | ||
640 | pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]); | ||
641 | } | ||
635 | } | 642 | } |
636 | 643 | ||
637 | /* | 644 | /* |
@@ -649,29 +656,9 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu, | |||
649 | struct ivhd_entry *e; | 656 | struct ivhd_entry *e; |
650 | 657 | ||
651 | /* | 658 | /* |
652 | * First set the recommended feature enable bits from ACPI | 659 | * First save the recommended feature enable bits from ACPI |
653 | * into the IOMMU control registers | ||
654 | */ | 660 | */ |
655 | h->flags & IVHD_FLAG_HT_TUN_EN_MASK ? | 661 | iommu->acpi_flags = h->flags; |
656 | iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : | ||
657 | iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); | ||
658 | |||
659 | h->flags & IVHD_FLAG_PASSPW_EN_MASK ? | ||
660 | iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : | ||
661 | iommu_feature_disable(iommu, CONTROL_PASSPW_EN); | ||
662 | |||
663 | h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ? | ||
664 | iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : | ||
665 | iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); | ||
666 | |||
667 | h->flags & IVHD_FLAG_ISOC_EN_MASK ? | ||
668 | iommu_feature_enable(iommu, CONTROL_ISOC_EN) : | ||
669 | iommu_feature_disable(iommu, CONTROL_ISOC_EN); | ||
670 | |||
671 | /* | ||
672 | * make IOMMU memory accesses cache coherent | ||
673 | */ | ||
674 | iommu_feature_enable(iommu, CONTROL_COHERENT_EN); | ||
675 | 662 | ||
676 | /* | 663 | /* |
677 | * Done. Now parse the device entries | 664 | * Done. Now parse the device entries |
@@ -1116,6 +1103,40 @@ static void init_device_table(void) | |||
1116 | } | 1103 | } |
1117 | } | 1104 | } |
1118 | 1105 | ||
1106 | static void iommu_init_flags(struct amd_iommu *iommu) | ||
1107 | { | ||
1108 | iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? | ||
1109 | iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : | ||
1110 | iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); | ||
1111 | |||
1112 | iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? | ||
1113 | iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : | ||
1114 | iommu_feature_disable(iommu, CONTROL_PASSPW_EN); | ||
1115 | |||
1116 | iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? | ||
1117 | iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : | ||
1118 | iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); | ||
1119 | |||
1120 | iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? | ||
1121 | iommu_feature_enable(iommu, CONTROL_ISOC_EN) : | ||
1122 | iommu_feature_disable(iommu, CONTROL_ISOC_EN); | ||
1123 | |||
1124 | /* | ||
1125 | * make IOMMU memory accesses cache coherent | ||
1126 | */ | ||
1127 | iommu_feature_enable(iommu, CONTROL_COHERENT_EN); | ||
1128 | } | ||
1129 | |||
1130 | static void iommu_apply_quirks(struct amd_iommu *iommu) | ||
1131 | { | ||
1132 | if (is_rd890_iommu(iommu->dev)) { | ||
1133 | pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]); | ||
1134 | pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]); | ||
1135 | pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]); | ||
1136 | pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]); | ||
1137 | } | ||
1138 | } | ||
1139 | |||
1119 | /* | 1140 | /* |
1120 | * This function finally enables all IOMMUs found in the system after | 1141 | * This function finally enables all IOMMUs found in the system after |
1121 | * they have been initialized | 1142 | * they have been initialized |
@@ -1126,6 +1147,8 @@ static void enable_iommus(void) | |||
1126 | 1147 | ||
1127 | for_each_iommu(iommu) { | 1148 | for_each_iommu(iommu) { |
1128 | iommu_disable(iommu); | 1149 | iommu_disable(iommu); |
1150 | iommu_apply_quirks(iommu); | ||
1151 | iommu_init_flags(iommu); | ||
1129 | iommu_set_device_table(iommu); | 1152 | iommu_set_device_table(iommu); |
1130 | iommu_enable_command_buffer(iommu); | 1153 | iommu_enable_command_buffer(iommu); |
1131 | iommu_enable_event_buffer(iommu); | 1154 | iommu_enable_event_buffer(iommu); |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index f1efebaf5510..5c5b8f3dddb5 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -306,14 +306,19 @@ void arch_init_copy_chip_data(struct irq_desc *old_desc, | |||
306 | 306 | ||
307 | old_cfg = old_desc->chip_data; | 307 | old_cfg = old_desc->chip_data; |
308 | 308 | ||
309 | memcpy(cfg, old_cfg, sizeof(struct irq_cfg)); | 309 | cfg->vector = old_cfg->vector; |
310 | cfg->move_in_progress = old_cfg->move_in_progress; | ||
311 | cpumask_copy(cfg->domain, old_cfg->domain); | ||
312 | cpumask_copy(cfg->old_domain, old_cfg->old_domain); | ||
310 | 313 | ||
311 | init_copy_irq_2_pin(old_cfg, cfg, node); | 314 | init_copy_irq_2_pin(old_cfg, cfg, node); |
312 | } | 315 | } |
313 | 316 | ||
314 | static void free_irq_cfg(struct irq_cfg *old_cfg) | 317 | static void free_irq_cfg(struct irq_cfg *cfg) |
315 | { | 318 | { |
316 | kfree(old_cfg); | 319 | free_cpumask_var(cfg->domain); |
320 | free_cpumask_var(cfg->old_domain); | ||
321 | kfree(cfg); | ||
317 | } | 322 | } |
318 | 323 | ||
319 | void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) | 324 | void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 7b598b84c902..f744f54cb248 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -698,9 +698,11 @@ void __init uv_system_init(void) | |||
698 | for (j = 0; j < 64; j++) { | 698 | for (j = 0; j < 64; j++) { |
699 | if (!test_bit(j, &present)) | 699 | if (!test_bit(j, &present)) |
700 | continue; | 700 | continue; |
701 | uv_blade_info[blade].pnode = (i * 64 + j); | 701 | pnode = (i * 64 + j); |
702 | uv_blade_info[blade].pnode = pnode; | ||
702 | uv_blade_info[blade].nr_possible_cpus = 0; | 703 | uv_blade_info[blade].nr_possible_cpus = 0; |
703 | uv_blade_info[blade].nr_online_cpus = 0; | 704 | uv_blade_info[blade].nr_online_cpus = 0; |
705 | max_pnode = max(pnode, max_pnode); | ||
704 | blade++; | 706 | blade++; |
705 | } | 707 | } |
706 | } | 708 | } |
@@ -738,7 +740,6 @@ void __init uv_system_init(void) | |||
738 | uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid); | 740 | uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid); |
739 | uv_node_to_blade[nid] = blade; | 741 | uv_node_to_blade[nid] = blade; |
740 | uv_cpu_to_blade[cpu] = blade; | 742 | uv_cpu_to_blade[cpu] = blade; |
741 | max_pnode = max(pnode, max_pnode); | ||
742 | } | 743 | } |
743 | 744 | ||
744 | /* Add blade/pnode info for nodes without cpus */ | 745 | /* Add blade/pnode info for nodes without cpus */ |
@@ -750,7 +751,6 @@ void __init uv_system_init(void) | |||
750 | pnode = (paddr >> m_val) & pnode_mask; | 751 | pnode = (paddr >> m_val) & pnode_mask; |
751 | blade = boot_pnode_to_blade(pnode); | 752 | blade = boot_pnode_to_blade(pnode); |
752 | uv_node_to_blade[nid] = blade; | 753 | uv_node_to_blade[nid] = blade; |
753 | max_pnode = max(pnode, max_pnode); | ||
754 | } | 754 | } |
755 | 755 | ||
756 | map_gru_high(max_pnode); | 756 | map_gru_high(max_pnode); |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 490dac63c2d2..f2f9ac7da25c 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -545,7 +545,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | |||
545 | } | 545 | } |
546 | } | 546 | } |
547 | 547 | ||
548 | static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | 548 | void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) |
549 | { | 549 | { |
550 | u32 tfms, xlvl; | 550 | u32 tfms, xlvl; |
551 | u32 ebx; | 551 | u32 ebx; |
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 3624e8a0f71b..f668bb1f7d43 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h | |||
@@ -33,5 +33,6 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[], | |||
33 | *const __x86_cpu_dev_end[]; | 33 | *const __x86_cpu_dev_end[]; |
34 | 34 | ||
35 | extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); | 35 | extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); |
36 | extern void get_cpu_cap(struct cpuinfo_x86 *c); | ||
36 | 37 | ||
37 | #endif | 38 | #endif |
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c index 994230d4dc4e..4f6f679f2799 100644 --- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c | |||
@@ -368,16 +368,22 @@ static int __init pcc_cpufreq_do_osc(acpi_handle *handle) | |||
368 | return -ENODEV; | 368 | return -ENODEV; |
369 | 369 | ||
370 | out_obj = output.pointer; | 370 | out_obj = output.pointer; |
371 | if (out_obj->type != ACPI_TYPE_BUFFER) | 371 | if (out_obj->type != ACPI_TYPE_BUFFER) { |
372 | return -ENODEV; | 372 | ret = -ENODEV; |
373 | goto out_free; | ||
374 | } | ||
373 | 375 | ||
374 | errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); | 376 | errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); |
375 | if (errors) | 377 | if (errors) { |
376 | return -ENODEV; | 378 | ret = -ENODEV; |
379 | goto out_free; | ||
380 | } | ||
377 | 381 | ||
378 | supported = *((u32 *)(out_obj->buffer.pointer + 4)); | 382 | supported = *((u32 *)(out_obj->buffer.pointer + 4)); |
379 | if (!(supported & 0x1)) | 383 | if (!(supported & 0x1)) { |
380 | return -ENODEV; | 384 | ret = -ENODEV; |
385 | goto out_free; | ||
386 | } | ||
381 | 387 | ||
382 | out_free: | 388 | out_free: |
383 | kfree(output.pointer); | 389 | kfree(output.pointer); |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 85f69cdeae10..b4389441efbb 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -39,6 +39,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | |||
39 | misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; | 39 | misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; |
40 | wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | 40 | wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); |
41 | c->cpuid_level = cpuid_eax(0); | 41 | c->cpuid_level = cpuid_eax(0); |
42 | get_cpu_cap(c); | ||
42 | } | 43 | } |
43 | } | 44 | } |
44 | 45 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 224392d8fe8c..5e975298fa81 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -530,7 +530,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
530 | err = -ENOMEM; | 530 | err = -ENOMEM; |
531 | goto out; | 531 | goto out; |
532 | } | 532 | } |
533 | if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) { | 533 | if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) { |
534 | kfree(b); | 534 | kfree(b); |
535 | err = -ENOMEM; | 535 | err = -ENOMEM; |
536 | goto out; | 536 | goto out; |
@@ -543,7 +543,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
543 | #ifndef CONFIG_SMP | 543 | #ifndef CONFIG_SMP |
544 | cpumask_setall(b->cpus); | 544 | cpumask_setall(b->cpus); |
545 | #else | 545 | #else |
546 | cpumask_copy(b->cpus, c->llc_shared_map); | 546 | cpumask_set_cpu(cpu, b->cpus); |
547 | #endif | 547 | #endif |
548 | 548 | ||
549 | per_cpu(threshold_banks, cpu)[bank] = b; | 549 | per_cpu(threshold_banks, cpu)[bank] = b; |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index c2a8b26d4fea..d9368eeda309 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -202,10 +202,11 @@ static int therm_throt_process(bool new_event, int event, int level) | |||
202 | 202 | ||
203 | #ifdef CONFIG_SYSFS | 203 | #ifdef CONFIG_SYSFS |
204 | /* Add/Remove thermal_throttle interface for CPU device: */ | 204 | /* Add/Remove thermal_throttle interface for CPU device: */ |
205 | static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev) | 205 | static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev, |
206 | unsigned int cpu) | ||
206 | { | 207 | { |
207 | int err; | 208 | int err; |
208 | struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); | 209 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
209 | 210 | ||
210 | err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group); | 211 | err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group); |
211 | if (err) | 212 | if (err) |
@@ -251,7 +252,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb, | |||
251 | case CPU_UP_PREPARE: | 252 | case CPU_UP_PREPARE: |
252 | case CPU_UP_PREPARE_FROZEN: | 253 | case CPU_UP_PREPARE_FROZEN: |
253 | mutex_lock(&therm_cpu_lock); | 254 | mutex_lock(&therm_cpu_lock); |
254 | err = thermal_throttle_add_dev(sys_dev); | 255 | err = thermal_throttle_add_dev(sys_dev, cpu); |
255 | mutex_unlock(&therm_cpu_lock); | 256 | mutex_unlock(&therm_cpu_lock); |
256 | WARN_ON(err); | 257 | WARN_ON(err); |
257 | break; | 258 | break; |
@@ -287,7 +288,7 @@ static __init int thermal_throttle_init_device(void) | |||
287 | #endif | 288 | #endif |
288 | /* connect live CPUs to sysfs */ | 289 | /* connect live CPUs to sysfs */ |
289 | for_each_online_cpu(cpu) { | 290 | for_each_online_cpu(cpu) { |
290 | err = thermal_throttle_add_dev(get_cpu_sysdev(cpu)); | 291 | err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu); |
291 | WARN_ON(err); | 292 | WARN_ON(err); |
292 | } | 293 | } |
293 | #ifdef CONFIG_HOTPLUG_CPU | 294 | #ifdef CONFIG_HOTPLUG_CPU |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index f2da20fda02d..03a5b0385ad6 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -102,6 +102,7 @@ struct cpu_hw_events { | |||
102 | */ | 102 | */ |
103 | struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ | 103 | struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ |
104 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 104 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
105 | unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | ||
105 | int enabled; | 106 | int enabled; |
106 | 107 | ||
107 | int n_events; | 108 | int n_events; |
@@ -1010,6 +1011,7 @@ static int x86_pmu_start(struct perf_event *event) | |||
1010 | x86_perf_event_set_period(event); | 1011 | x86_perf_event_set_period(event); |
1011 | cpuc->events[idx] = event; | 1012 | cpuc->events[idx] = event; |
1012 | __set_bit(idx, cpuc->active_mask); | 1013 | __set_bit(idx, cpuc->active_mask); |
1014 | __set_bit(idx, cpuc->running); | ||
1013 | x86_pmu.enable(event); | 1015 | x86_pmu.enable(event); |
1014 | perf_event_update_userpage(event); | 1016 | perf_event_update_userpage(event); |
1015 | 1017 | ||
@@ -1141,8 +1143,16 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1141 | cpuc = &__get_cpu_var(cpu_hw_events); | 1143 | cpuc = &__get_cpu_var(cpu_hw_events); |
1142 | 1144 | ||
1143 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1145 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
1144 | if (!test_bit(idx, cpuc->active_mask)) | 1146 | if (!test_bit(idx, cpuc->active_mask)) { |
1147 | /* | ||
1148 | * Though we deactivated the counter some cpus | ||
1149 | * might still deliver spurious interrupts still | ||
1150 | * in flight. Catch them: | ||
1151 | */ | ||
1152 | if (__test_and_clear_bit(idx, cpuc->running)) | ||
1153 | handled++; | ||
1145 | continue; | 1154 | continue; |
1155 | } | ||
1146 | 1156 | ||
1147 | event = cpuc->events[idx]; | 1157 | event = cpuc->events[idx]; |
1148 | hwc = &event->hw; | 1158 | hwc = &event->hw; |
@@ -1154,7 +1164,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1154 | /* | 1164 | /* |
1155 | * event overflow | 1165 | * event overflow |
1156 | */ | 1166 | */ |
1157 | handled = 1; | 1167 | handled++; |
1158 | data.period = event->hw.last_period; | 1168 | data.period = event->hw.last_period; |
1159 | 1169 | ||
1160 | if (!x86_perf_event_set_period(event)) | 1170 | if (!x86_perf_event_set_period(event)) |
@@ -1200,12 +1210,20 @@ void perf_events_lapic_init(void) | |||
1200 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 1210 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
1201 | } | 1211 | } |
1202 | 1212 | ||
1213 | struct pmu_nmi_state { | ||
1214 | unsigned int marked; | ||
1215 | int handled; | ||
1216 | }; | ||
1217 | |||
1218 | static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi); | ||
1219 | |||
1203 | static int __kprobes | 1220 | static int __kprobes |
1204 | perf_event_nmi_handler(struct notifier_block *self, | 1221 | perf_event_nmi_handler(struct notifier_block *self, |
1205 | unsigned long cmd, void *__args) | 1222 | unsigned long cmd, void *__args) |
1206 | { | 1223 | { |
1207 | struct die_args *args = __args; | 1224 | struct die_args *args = __args; |
1208 | struct pt_regs *regs; | 1225 | unsigned int this_nmi; |
1226 | int handled; | ||
1209 | 1227 | ||
1210 | if (!atomic_read(&active_events)) | 1228 | if (!atomic_read(&active_events)) |
1211 | return NOTIFY_DONE; | 1229 | return NOTIFY_DONE; |
@@ -1214,22 +1232,47 @@ perf_event_nmi_handler(struct notifier_block *self, | |||
1214 | case DIE_NMI: | 1232 | case DIE_NMI: |
1215 | case DIE_NMI_IPI: | 1233 | case DIE_NMI_IPI: |
1216 | break; | 1234 | break; |
1217 | 1235 | case DIE_NMIUNKNOWN: | |
1236 | this_nmi = percpu_read(irq_stat.__nmi_count); | ||
1237 | if (this_nmi != __get_cpu_var(pmu_nmi).marked) | ||
1238 | /* let the kernel handle the unknown nmi */ | ||
1239 | return NOTIFY_DONE; | ||
1240 | /* | ||
1241 | * This one is a PMU back-to-back nmi. Two events | ||
1242 | * trigger 'simultaneously' raising two back-to-back | ||
1243 | * NMIs. If the first NMI handles both, the latter | ||
1244 | * will be empty and daze the CPU. So, we drop it to | ||
1245 | * avoid false-positive 'unknown nmi' messages. | ||
1246 | */ | ||
1247 | return NOTIFY_STOP; | ||
1218 | default: | 1248 | default: |
1219 | return NOTIFY_DONE; | 1249 | return NOTIFY_DONE; |
1220 | } | 1250 | } |
1221 | 1251 | ||
1222 | regs = args->regs; | ||
1223 | |||
1224 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 1252 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
1225 | /* | 1253 | |
1226 | * Can't rely on the handled return value to say it was our NMI, two | 1254 | handled = x86_pmu.handle_irq(args->regs); |
1227 | * events could trigger 'simultaneously' raising two back-to-back NMIs. | 1255 | if (!handled) |
1228 | * | 1256 | return NOTIFY_DONE; |
1229 | * If the first NMI handles both, the latter will be empty and daze | 1257 | |
1230 | * the CPU. | 1258 | this_nmi = percpu_read(irq_stat.__nmi_count); |
1231 | */ | 1259 | if ((handled > 1) || |
1232 | x86_pmu.handle_irq(regs); | 1260 | /* the next nmi could be a back-to-back nmi */ |
1261 | ((__get_cpu_var(pmu_nmi).marked == this_nmi) && | ||
1262 | (__get_cpu_var(pmu_nmi).handled > 1))) { | ||
1263 | /* | ||
1264 | * We could have two subsequent back-to-back nmis: The | ||
1265 | * first handles more than one counter, the 2nd | ||
1266 | * handles only one counter and the 3rd handles no | ||
1267 | * counter. | ||
1268 | * | ||
1269 | * This is the 2nd nmi because the previous was | ||
1270 | * handling more than one counter. We will mark the | ||
1271 | * next (3rd) and then drop it if unhandled. | ||
1272 | */ | ||
1273 | __get_cpu_var(pmu_nmi).marked = this_nmi + 1; | ||
1274 | __get_cpu_var(pmu_nmi).handled = handled; | ||
1275 | } | ||
1233 | 1276 | ||
1234 | return NOTIFY_STOP; | 1277 | return NOTIFY_STOP; |
1235 | } | 1278 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index d8d86d014008..ee05c90012d2 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -712,7 +712,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
712 | struct perf_sample_data data; | 712 | struct perf_sample_data data; |
713 | struct cpu_hw_events *cpuc; | 713 | struct cpu_hw_events *cpuc; |
714 | int bit, loops; | 714 | int bit, loops; |
715 | u64 ack, status; | 715 | u64 status; |
716 | int handled = 0; | ||
716 | 717 | ||
717 | perf_sample_data_init(&data, 0); | 718 | perf_sample_data_init(&data, 0); |
718 | 719 | ||
@@ -728,6 +729,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
728 | 729 | ||
729 | loops = 0; | 730 | loops = 0; |
730 | again: | 731 | again: |
732 | intel_pmu_ack_status(status); | ||
731 | if (++loops > 100) { | 733 | if (++loops > 100) { |
732 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); | 734 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); |
733 | perf_event_print_debug(); | 735 | perf_event_print_debug(); |
@@ -736,19 +738,22 @@ again: | |||
736 | } | 738 | } |
737 | 739 | ||
738 | inc_irq_stat(apic_perf_irqs); | 740 | inc_irq_stat(apic_perf_irqs); |
739 | ack = status; | ||
740 | 741 | ||
741 | intel_pmu_lbr_read(); | 742 | intel_pmu_lbr_read(); |
742 | 743 | ||
743 | /* | 744 | /* |
744 | * PEBS overflow sets bit 62 in the global status register | 745 | * PEBS overflow sets bit 62 in the global status register |
745 | */ | 746 | */ |
746 | if (__test_and_clear_bit(62, (unsigned long *)&status)) | 747 | if (__test_and_clear_bit(62, (unsigned long *)&status)) { |
748 | handled++; | ||
747 | x86_pmu.drain_pebs(regs); | 749 | x86_pmu.drain_pebs(regs); |
750 | } | ||
748 | 751 | ||
749 | for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { | 752 | for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { |
750 | struct perf_event *event = cpuc->events[bit]; | 753 | struct perf_event *event = cpuc->events[bit]; |
751 | 754 | ||
755 | handled++; | ||
756 | |||
752 | if (!test_bit(bit, cpuc->active_mask)) | 757 | if (!test_bit(bit, cpuc->active_mask)) |
753 | continue; | 758 | continue; |
754 | 759 | ||
@@ -761,8 +766,6 @@ again: | |||
761 | x86_pmu_stop(event); | 766 | x86_pmu_stop(event); |
762 | } | 767 | } |
763 | 768 | ||
764 | intel_pmu_ack_status(ack); | ||
765 | |||
766 | /* | 769 | /* |
767 | * Repeat if there is more work to be done: | 770 | * Repeat if there is more work to be done: |
768 | */ | 771 | */ |
@@ -772,7 +775,7 @@ again: | |||
772 | 775 | ||
773 | done: | 776 | done: |
774 | intel_pmu_enable_all(0); | 777 | intel_pmu_enable_all(0); |
775 | return 1; | 778 | return handled; |
776 | } | 779 | } |
777 | 780 | ||
778 | static struct event_constraint * | 781 | static struct event_constraint * |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 7e578e9cc58b..249015173992 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -660,8 +660,12 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) | |||
660 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 660 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
661 | int overflow; | 661 | int overflow; |
662 | 662 | ||
663 | if (!test_bit(idx, cpuc->active_mask)) | 663 | if (!test_bit(idx, cpuc->active_mask)) { |
664 | /* catch in-flight IRQs */ | ||
665 | if (__test_and_clear_bit(idx, cpuc->running)) | ||
666 | handled++; | ||
664 | continue; | 667 | continue; |
668 | } | ||
665 | 669 | ||
666 | event = cpuc->events[idx]; | 670 | event = cpuc->events[idx]; |
667 | hwc = &event->hw; | 671 | hwc = &event->hw; |
@@ -692,7 +696,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) | |||
692 | inc_irq_stat(apic_perf_irqs); | 696 | inc_irq_stat(apic_perf_irqs); |
693 | } | 697 | } |
694 | 698 | ||
695 | return handled > 0; | 699 | return handled; |
696 | } | 700 | } |
697 | 701 | ||
698 | /* | 702 | /* |
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 34b4dad6f0b8..d49079515122 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c | |||
@@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
31 | const struct cpuid_bit *cb; | 31 | const struct cpuid_bit *cb; |
32 | 32 | ||
33 | static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { | 33 | static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { |
34 | { X86_FEATURE_DTS, CR_EAX, 0, 0x00000006, 0 }, | ||
34 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, | 35 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, |
35 | { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, | 36 | { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, |
36 | { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, | 37 | { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, |
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index e5cc7e82e60d..ebdb85cf2686 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <asm/apic.h> | 18 | #include <asm/apic.h> |
19 | #include <asm/iommu.h> | 19 | #include <asm/iommu.h> |
20 | #include <asm/gart.h> | 20 | #include <asm/gart.h> |
21 | #include <asm/hpet.h> | ||
22 | 21 | ||
23 | static void __init fix_hypertransport_config(int num, int slot, int func) | 22 | static void __init fix_hypertransport_config(int num, int slot, int func) |
24 | { | 23 | { |
@@ -192,21 +191,6 @@ static void __init ati_bugs_contd(int num, int slot, int func) | |||
192 | } | 191 | } |
193 | #endif | 192 | #endif |
194 | 193 | ||
195 | /* | ||
196 | * Force the read back of the CMP register in hpet_next_event() | ||
197 | * to work around the problem that the CMP register write seems to be | ||
198 | * delayed. See hpet_next_event() for details. | ||
199 | * | ||
200 | * We do this on all SMBUS incarnations for now until we have more | ||
201 | * information about the affected chipsets. | ||
202 | */ | ||
203 | static void __init ati_hpet_bugs(int num, int slot, int func) | ||
204 | { | ||
205 | #ifdef CONFIG_HPET_TIMER | ||
206 | hpet_readback_cmp = 1; | ||
207 | #endif | ||
208 | } | ||
209 | |||
210 | #define QFLAG_APPLY_ONCE 0x1 | 194 | #define QFLAG_APPLY_ONCE 0x1 |
211 | #define QFLAG_APPLIED 0x2 | 195 | #define QFLAG_APPLIED 0x2 |
212 | #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) | 196 | #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) |
@@ -236,8 +220,6 @@ static struct chipset early_qrk[] __initdata = { | |||
236 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, | 220 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, |
237 | { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, | 221 | { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, |
238 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, | 222 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, |
239 | { PCI_VENDOR_ID_ATI, PCI_ANY_ID, | ||
240 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_hpet_bugs }, | ||
241 | {} | 223 | {} |
242 | }; | 224 | }; |
243 | 225 | ||
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 351f9c0fea1f..7494999141b3 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -35,7 +35,6 @@ | |||
35 | unsigned long hpet_address; | 35 | unsigned long hpet_address; |
36 | u8 hpet_blockid; /* OS timer block num */ | 36 | u8 hpet_blockid; /* OS timer block num */ |
37 | u8 hpet_msi_disable; | 37 | u8 hpet_msi_disable; |
38 | u8 hpet_readback_cmp; | ||
39 | 38 | ||
40 | #ifdef CONFIG_PCI_MSI | 39 | #ifdef CONFIG_PCI_MSI |
41 | static unsigned long hpet_num_timers; | 40 | static unsigned long hpet_num_timers; |
@@ -395,23 +394,27 @@ static int hpet_next_event(unsigned long delta, | |||
395 | * at that point and we would wait for the next hpet interrupt | 394 | * at that point and we would wait for the next hpet interrupt |
396 | * forever. We found out that reading the CMP register back | 395 | * forever. We found out that reading the CMP register back |
397 | * forces the transfer so we can rely on the comparison with | 396 | * forces the transfer so we can rely on the comparison with |
398 | * the counter register below. | 397 | * the counter register below. If the read back from the |
398 | * compare register does not match the value we programmed | ||
399 | * then we might have a real hardware problem. We can not do | ||
400 | * much about it here, but at least alert the user/admin with | ||
401 | * a prominent warning. | ||
399 | * | 402 | * |
400 | * That works fine on those ATI chipsets, but on newer Intel | 403 | * An erratum on some chipsets (ICH9,..), results in |
401 | * chipsets (ICH9...) this triggers due to an erratum: Reading | 404 | * comparator read immediately following a write returning old |
402 | * the comparator immediately following a write is returning | 405 | * value. Workaround for this is to read this value second |
403 | * the old value. | 406 | * time, when first read returns old value. |
404 | * | 407 | * |
405 | * We restrict the read back to the affected ATI chipsets (set | 408 | * In fact the write to the comparator register is delayed up |
406 | * by quirks) and also run it with hpet=verbose for debugging | 409 | * to two HPET cycles so the workaround we tried to restrict |
407 | * purposes. | 410 | * the readback to those known to be borked ATI chipsets |
411 | * failed miserably. So we give up on optimizations forever | ||
412 | * and penalize all HPET incarnations unconditionally. | ||
408 | */ | 413 | */ |
409 | if (hpet_readback_cmp || hpet_verbose) { | 414 | if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) { |
410 | u32 cmp = hpet_readl(HPET_Tn_CMP(timer)); | 415 | if (hpet_readl(HPET_Tn_CMP(timer)) != cnt) |
411 | |||
412 | if (cmp != cnt) | ||
413 | printk_once(KERN_WARNING | 416 | printk_once(KERN_WARNING |
414 | "hpet: compare register read back failed.\n"); | 417 | "hpet: compare register read back failed.\n"); |
415 | } | 418 | } |
416 | 419 | ||
417 | return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; | 420 | return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; |
@@ -503,7 +506,7 @@ static int hpet_assign_irq(struct hpet_dev *dev) | |||
503 | { | 506 | { |
504 | unsigned int irq; | 507 | unsigned int irq; |
505 | 508 | ||
506 | irq = create_irq(); | 509 | irq = create_irq_nr(0, -1); |
507 | if (!irq) | 510 | if (!irq) |
508 | return -EINVAL; | 511 | return -EINVAL; |
509 | 512 | ||
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index a474ec37c32f..ff15c9dcc25d 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -206,11 +206,27 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp) | |||
206 | int arch_bp_generic_fields(int x86_len, int x86_type, | 206 | int arch_bp_generic_fields(int x86_len, int x86_type, |
207 | int *gen_len, int *gen_type) | 207 | int *gen_len, int *gen_type) |
208 | { | 208 | { |
209 | /* Len */ | 209 | /* Type */ |
210 | switch (x86_len) { | 210 | switch (x86_type) { |
211 | case X86_BREAKPOINT_LEN_X: | 211 | case X86_BREAKPOINT_EXECUTE: |
212 | if (x86_len != X86_BREAKPOINT_LEN_X) | ||
213 | return -EINVAL; | ||
214 | |||
215 | *gen_type = HW_BREAKPOINT_X; | ||
212 | *gen_len = sizeof(long); | 216 | *gen_len = sizeof(long); |
217 | return 0; | ||
218 | case X86_BREAKPOINT_WRITE: | ||
219 | *gen_type = HW_BREAKPOINT_W; | ||
213 | break; | 220 | break; |
221 | case X86_BREAKPOINT_RW: | ||
222 | *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; | ||
223 | break; | ||
224 | default: | ||
225 | return -EINVAL; | ||
226 | } | ||
227 | |||
228 | /* Len */ | ||
229 | switch (x86_len) { | ||
214 | case X86_BREAKPOINT_LEN_1: | 230 | case X86_BREAKPOINT_LEN_1: |
215 | *gen_len = HW_BREAKPOINT_LEN_1; | 231 | *gen_len = HW_BREAKPOINT_LEN_1; |
216 | break; | 232 | break; |
@@ -229,21 +245,6 @@ int arch_bp_generic_fields(int x86_len, int x86_type, | |||
229 | return -EINVAL; | 245 | return -EINVAL; |
230 | } | 246 | } |
231 | 247 | ||
232 | /* Type */ | ||
233 | switch (x86_type) { | ||
234 | case X86_BREAKPOINT_EXECUTE: | ||
235 | *gen_type = HW_BREAKPOINT_X; | ||
236 | break; | ||
237 | case X86_BREAKPOINT_WRITE: | ||
238 | *gen_type = HW_BREAKPOINT_W; | ||
239 | break; | ||
240 | case X86_BREAKPOINT_RW: | ||
241 | *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; | ||
242 | break; | ||
243 | default: | ||
244 | return -EINVAL; | ||
245 | } | ||
246 | |||
247 | return 0; | 248 | return 0; |
248 | } | 249 | } |
249 | 250 | ||
@@ -316,9 +317,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) | |||
316 | ret = -EINVAL; | 317 | ret = -EINVAL; |
317 | 318 | ||
318 | switch (info->len) { | 319 | switch (info->len) { |
319 | case X86_BREAKPOINT_LEN_X: | ||
320 | align = sizeof(long) -1; | ||
321 | break; | ||
322 | case X86_BREAKPOINT_LEN_1: | 320 | case X86_BREAKPOINT_LEN_1: |
323 | align = 0; | 321 | align = 0; |
324 | break; | 322 | break; |
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index e0bc186d7501..1c355c550960 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c | |||
@@ -239,11 +239,10 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
239 | apply_paravirt(pseg, pseg + para->sh_size); | 239 | apply_paravirt(pseg, pseg + para->sh_size); |
240 | } | 240 | } |
241 | 241 | ||
242 | return module_bug_finalize(hdr, sechdrs, me); | 242 | return 0; |
243 | } | 243 | } |
244 | 244 | ||
245 | void module_arch_cleanup(struct module *mod) | 245 | void module_arch_cleanup(struct module *mod) |
246 | { | 246 | { |
247 | alternatives_smp_module_del(mod); | 247 | alternatives_smp_module_del(mod); |
248 | module_bug_cleanup(mod); | ||
249 | } | 248 | } |
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index 9a3d44c0df9a..4c3da5674e67 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c | |||
@@ -45,8 +45,7 @@ void __init setup_trampoline_page_table(void) | |||
45 | /* Copy kernel address range */ | 45 | /* Copy kernel address range */ |
46 | clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY, | 46 | clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY, |
47 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | 47 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, |
48 | min_t(unsigned long, KERNEL_PGD_PTRS, | 48 | KERNEL_PGD_PTRS); |
49 | KERNEL_PGD_BOUNDARY)); | ||
50 | 49 | ||
51 | /* Initialize low mappings */ | 50 | /* Initialize low mappings */ |
52 | clone_pgd_range(trampoline_pg_dir, | 51 | clone_pgd_range(trampoline_pg_dir, |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index d632934cb638..26a863a9c2a8 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -655,7 +655,7 @@ void restore_sched_clock_state(void) | |||
655 | 655 | ||
656 | local_irq_save(flags); | 656 | local_irq_save(flags); |
657 | 657 | ||
658 | get_cpu_var(cyc2ns_offset) = 0; | 658 | __get_cpu_var(cyc2ns_offset) = 0; |
659 | offset = cyc2ns_suspend - sched_clock(); | 659 | offset = cyc2ns_suspend - sched_clock(); |
660 | 660 | ||
661 | for_each_possible_cpu(cpu) | 661 | for_each_possible_cpu(cpu) |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index b38bd8b92aa6..66ca98aafdd6 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -1870,17 +1870,16 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt, | |||
1870 | struct x86_emulate_ops *ops) | 1870 | struct x86_emulate_ops *ops) |
1871 | { | 1871 | { |
1872 | struct decode_cache *c = &ctxt->decode; | 1872 | struct decode_cache *c = &ctxt->decode; |
1873 | u64 old = c->dst.orig_val; | 1873 | u64 old = c->dst.orig_val64; |
1874 | 1874 | ||
1875 | if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) || | 1875 | if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) || |
1876 | ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) { | 1876 | ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) { |
1877 | |||
1878 | c->regs[VCPU_REGS_RAX] = (u32) (old >> 0); | 1877 | c->regs[VCPU_REGS_RAX] = (u32) (old >> 0); |
1879 | c->regs[VCPU_REGS_RDX] = (u32) (old >> 32); | 1878 | c->regs[VCPU_REGS_RDX] = (u32) (old >> 32); |
1880 | ctxt->eflags &= ~EFLG_ZF; | 1879 | ctxt->eflags &= ~EFLG_ZF; |
1881 | } else { | 1880 | } else { |
1882 | c->dst.val = ((u64)c->regs[VCPU_REGS_RCX] << 32) | | 1881 | c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) | |
1883 | (u32) c->regs[VCPU_REGS_RBX]; | 1882 | (u32) c->regs[VCPU_REGS_RBX]; |
1884 | 1883 | ||
1885 | ctxt->eflags |= EFLG_ZF; | 1884 | ctxt->eflags |= EFLG_ZF; |
1886 | } | 1885 | } |
@@ -2616,7 +2615,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
2616 | c->src.valptr, c->src.bytes); | 2615 | c->src.valptr, c->src.bytes); |
2617 | if (rc != X86EMUL_CONTINUE) | 2616 | if (rc != X86EMUL_CONTINUE) |
2618 | goto done; | 2617 | goto done; |
2619 | c->src.orig_val = c->src.val; | 2618 | c->src.orig_val64 = c->src.val64; |
2620 | } | 2619 | } |
2621 | 2620 | ||
2622 | if (c->src2.type == OP_MEM) { | 2621 | if (c->src2.type == OP_MEM) { |
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 8d10c063d7f2..4b7b73ce2098 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c | |||
@@ -64,6 +64,9 @@ static void pic_unlock(struct kvm_pic *s) | |||
64 | if (!found) | 64 | if (!found) |
65 | found = s->kvm->bsp_vcpu; | 65 | found = s->kvm->bsp_vcpu; |
66 | 66 | ||
67 | if (!found) | ||
68 | return; | ||
69 | |||
67 | kvm_vcpu_kick(found); | 70 | kvm_vcpu_kick(found); |
68 | } | 71 | } |
69 | } | 72 | } |
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index ffed06871c5c..63c314502993 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h | |||
@@ -43,7 +43,6 @@ struct kvm_kpic_state { | |||
43 | u8 irr; /* interrupt request register */ | 43 | u8 irr; /* interrupt request register */ |
44 | u8 imr; /* interrupt mask register */ | 44 | u8 imr; /* interrupt mask register */ |
45 | u8 isr; /* interrupt service register */ | 45 | u8 isr; /* interrupt service register */ |
46 | u8 isr_ack; /* interrupt ack detection */ | ||
47 | u8 priority_add; /* highest irq priority */ | 46 | u8 priority_add; /* highest irq priority */ |
48 | u8 irq_base; | 47 | u8 irq_base; |
49 | u8 read_reg_select; | 48 | u8 read_reg_select; |
@@ -56,6 +55,7 @@ struct kvm_kpic_state { | |||
56 | u8 init4; /* true if 4 byte init */ | 55 | u8 init4; /* true if 4 byte init */ |
57 | u8 elcr; /* PIIX edge/trigger selection */ | 56 | u8 elcr; /* PIIX edge/trigger selection */ |
58 | u8 elcr_mask; | 57 | u8 elcr_mask; |
58 | u8 isr_ack; /* interrupt ack detection */ | ||
59 | struct kvm_pic *pics_state; | 59 | struct kvm_pic *pics_state; |
60 | }; | 60 | }; |
61 | 61 | ||
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 9257510b4836..9d5f55848455 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -324,9 +324,8 @@ static void lguest_load_gdt(const struct desc_ptr *desc) | |||
324 | } | 324 | } |
325 | 325 | ||
326 | /* | 326 | /* |
327 | * For a single GDT entry which changes, we do the lazy thing: alter our GDT, | 327 | * For a single GDT entry which changes, we simply change our copy and |
328 | * then tell the Host to reload the entire thing. This operation is so rare | 328 | * then tell the host about it. |
329 | * that this naive implementation is reasonable. | ||
330 | */ | 329 | */ |
331 | static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, | 330 | static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, |
332 | const void *desc, int type) | 331 | const void *desc, int type) |
@@ -338,9 +337,13 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, | |||
338 | } | 337 | } |
339 | 338 | ||
340 | /* | 339 | /* |
341 | * OK, I lied. There are three "thread local storage" GDT entries which change | 340 | * There are three "thread local storage" GDT entries which change |
342 | * on every context switch (these three entries are how glibc implements | 341 | * on every context switch (these three entries are how glibc implements |
343 | * __thread variables). So we have a hypercall specifically for this case. | 342 | * __thread variables). As an optimization, we have a hypercall |
343 | * specifically for this case. | ||
344 | * | ||
345 | * Wouldn't it be nicer to have a general LOAD_GDT_ENTRIES hypercall | ||
346 | * which took a range of entries? | ||
344 | */ | 347 | */ |
345 | static void lguest_load_tls(struct thread_struct *t, unsigned int cpu) | 348 | static void lguest_load_tls(struct thread_struct *t, unsigned int cpu) |
346 | { | 349 | { |
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 84e236ce76ba..72fc70cf6184 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c | |||
@@ -74,7 +74,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | |||
74 | /* | 74 | /* |
75 | * Map 'pfn' using fixed map 'type' and protections 'prot' | 75 | * Map 'pfn' using fixed map 'type' and protections 'prot' |
76 | */ | 76 | */ |
77 | void * | 77 | void __iomem * |
78 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | 78 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) |
79 | { | 79 | { |
80 | /* | 80 | /* |
@@ -86,12 +86,12 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | |||
86 | if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) | 86 | if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) |
87 | prot = PAGE_KERNEL_UC_MINUS; | 87 | prot = PAGE_KERNEL_UC_MINUS; |
88 | 88 | ||
89 | return kmap_atomic_prot_pfn(pfn, type, prot); | 89 | return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot); |
90 | } | 90 | } |
91 | EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); | 91 | EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); |
92 | 92 | ||
93 | void | 93 | void |
94 | iounmap_atomic(void *kvaddr, enum km_type type) | 94 | iounmap_atomic(void __iomem *kvaddr, enum km_type type) |
95 | { | 95 | { |
96 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | 96 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
97 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | 97 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index f6b48f6c5951..f1575c9a2572 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -568,8 +568,13 @@ static int __init init_sysfs(void) | |||
568 | int error; | 568 | int error; |
569 | 569 | ||
570 | error = sysdev_class_register(&oprofile_sysclass); | 570 | error = sysdev_class_register(&oprofile_sysclass); |
571 | if (!error) | 571 | if (error) |
572 | error = sysdev_register(&device_oprofile); | 572 | return error; |
573 | |||
574 | error = sysdev_register(&device_oprofile); | ||
575 | if (error) | ||
576 | sysdev_class_unregister(&oprofile_sysclass); | ||
577 | |||
573 | return error; | 578 | return error; |
574 | } | 579 | } |
575 | 580 | ||
@@ -580,8 +585,10 @@ static void exit_sysfs(void) | |||
580 | } | 585 | } |
581 | 586 | ||
582 | #else | 587 | #else |
583 | #define init_sysfs() do { } while (0) | 588 | |
584 | #define exit_sysfs() do { } while (0) | 589 | static inline int init_sysfs(void) { return 0; } |
590 | static inline void exit_sysfs(void) { } | ||
591 | |||
585 | #endif /* CONFIG_PM */ | 592 | #endif /* CONFIG_PM */ |
586 | 593 | ||
587 | static int __init p4_init(char **cpu_type) | 594 | static int __init p4_init(char **cpu_type) |
@@ -664,7 +671,10 @@ static int __init ppro_init(char **cpu_type) | |||
664 | case 14: | 671 | case 14: |
665 | *cpu_type = "i386/core"; | 672 | *cpu_type = "i386/core"; |
666 | break; | 673 | break; |
667 | case 15: case 23: | 674 | case 0x0f: |
675 | case 0x16: | ||
676 | case 0x17: | ||
677 | case 0x1d: | ||
668 | *cpu_type = "i386/core_2"; | 678 | *cpu_type = "i386/core_2"; |
669 | break; | 679 | break; |
670 | case 0x1a: | 680 | case 0x1a: |
@@ -695,6 +705,8 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
695 | char *cpu_type = NULL; | 705 | char *cpu_type = NULL; |
696 | int ret = 0; | 706 | int ret = 0; |
697 | 707 | ||
708 | using_nmi = 0; | ||
709 | |||
698 | if (!cpu_has_apic) | 710 | if (!cpu_has_apic) |
699 | return -ENODEV; | 711 | return -ENODEV; |
700 | 712 | ||
@@ -774,7 +786,10 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
774 | 786 | ||
775 | mux_init(ops); | 787 | mux_init(ops); |
776 | 788 | ||
777 | init_sysfs(); | 789 | ret = init_sysfs(); |
790 | if (ret) | ||
791 | return ret; | ||
792 | |||
778 | using_nmi = 1; | 793 | using_nmi = 1; |
779 | printk(KERN_INFO "oprofile: using NMI interrupt.\n"); | 794 | printk(KERN_INFO "oprofile: using NMI interrupt.\n"); |
780 | return 0; | 795 | return 0; |
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 1a5353a753fc..b2bb5aa3b054 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -489,8 +489,9 @@ static void xen_hvm_setup_cpu_clockevents(void) | |||
489 | __init void xen_hvm_init_time_ops(void) | 489 | __init void xen_hvm_init_time_ops(void) |
490 | { | 490 | { |
491 | /* vector callback is needed otherwise we cannot receive interrupts | 491 | /* vector callback is needed otherwise we cannot receive interrupts |
492 | * on cpu > 0 */ | 492 | * on cpu > 0 and at this point we don't know how many cpus are |
493 | if (!xen_have_vector_callback && num_present_cpus() > 1) | 493 | * available */ |
494 | if (!xen_have_vector_callback) | ||
494 | return; | 495 | return; |
495 | if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { | 496 | if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { |
496 | printk(KERN_INFO "Xen doesn't support pvclock on HVM," | 497 | printk(KERN_INFO "Xen doesn't support pvclock on HVM," |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index a6809645d212..2fef1ef931a0 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -966,7 +966,7 @@ blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |||
966 | 966 | ||
967 | /* Currently we do not support hierarchy deeper than two level (0,1) */ | 967 | /* Currently we do not support hierarchy deeper than two level (0,1) */ |
968 | if (parent != cgroup->top_cgroup) | 968 | if (parent != cgroup->top_cgroup) |
969 | return ERR_PTR(-EINVAL); | 969 | return ERR_PTR(-EPERM); |
970 | 970 | ||
971 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); | 971 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
972 | if (!blkcg) | 972 | if (!blkcg) |
diff --git a/block/blk-core.c b/block/blk-core.c index ee1a1e7e63cc..32a1c123dfb3 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1198,9 +1198,9 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1198 | int el_ret; | 1198 | int el_ret; |
1199 | unsigned int bytes = bio->bi_size; | 1199 | unsigned int bytes = bio->bi_size; |
1200 | const unsigned short prio = bio_prio(bio); | 1200 | const unsigned short prio = bio_prio(bio); |
1201 | const bool sync = (bio->bi_rw & REQ_SYNC); | 1201 | const bool sync = !!(bio->bi_rw & REQ_SYNC); |
1202 | const bool unplug = (bio->bi_rw & REQ_UNPLUG); | 1202 | const bool unplug = !!(bio->bi_rw & REQ_UNPLUG); |
1203 | const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; | 1203 | const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK; |
1204 | int rw_flags; | 1204 | int rw_flags; |
1205 | 1205 | ||
1206 | if ((bio->bi_rw & REQ_HARDBARRIER) && | 1206 | if ((bio->bi_rw & REQ_HARDBARRIER) && |
diff --git a/block/blk-map.c b/block/blk-map.c index c65d7593f7f1..ade0a08c9099 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -307,7 +307,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | |||
307 | return PTR_ERR(bio); | 307 | return PTR_ERR(bio); |
308 | 308 | ||
309 | if (rq_data_dir(rq) == WRITE) | 309 | if (rq_data_dir(rq) == WRITE) |
310 | bio->bi_rw |= (1 << REQ_WRITE); | 310 | bio->bi_rw |= REQ_WRITE; |
311 | 311 | ||
312 | if (do_copy) | 312 | if (do_copy) |
313 | rq->cmd_flags |= REQ_COPY_USER; | 313 | rq->cmd_flags |= REQ_COPY_USER; |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 3b0cd4249671..eafc94f68d79 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -362,6 +362,18 @@ static int attempt_merge(struct request_queue *q, struct request *req, | |||
362 | return 0; | 362 | return 0; |
363 | 363 | ||
364 | /* | 364 | /* |
365 | * Don't merge file system requests and discard requests | ||
366 | */ | ||
367 | if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD)) | ||
368 | return 0; | ||
369 | |||
370 | /* | ||
371 | * Don't merge discard requests and secure discard requests | ||
372 | */ | ||
373 | if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE)) | ||
374 | return 0; | ||
375 | |||
376 | /* | ||
365 | * not contiguous | 377 | * not contiguous |
366 | */ | 378 | */ |
367 | if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) | 379 | if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 001ab18078f5..0749b89c6885 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -511,6 +511,7 @@ int blk_register_queue(struct gendisk *disk) | |||
511 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | 511 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
512 | kobject_del(&q->kobj); | 512 | kobject_del(&q->kobj); |
513 | blk_trace_remove_sysfs(disk_to_dev(disk)); | 513 | blk_trace_remove_sysfs(disk_to_dev(disk)); |
514 | kobject_put(&dev->kobj); | ||
514 | return ret; | 515 | return ret; |
515 | } | 516 | } |
516 | 517 | ||
diff --git a/block/blk.h b/block/blk.h index 6e7dc87141e4..d6b911ac002c 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -142,14 +142,18 @@ static inline int queue_congestion_off_threshold(struct request_queue *q) | |||
142 | 142 | ||
143 | static inline int blk_cpu_to_group(int cpu) | 143 | static inline int blk_cpu_to_group(int cpu) |
144 | { | 144 | { |
145 | int group = NR_CPUS; | ||
145 | #ifdef CONFIG_SCHED_MC | 146 | #ifdef CONFIG_SCHED_MC |
146 | const struct cpumask *mask = cpu_coregroup_mask(cpu); | 147 | const struct cpumask *mask = cpu_coregroup_mask(cpu); |
147 | return cpumask_first(mask); | 148 | group = cpumask_first(mask); |
148 | #elif defined(CONFIG_SCHED_SMT) | 149 | #elif defined(CONFIG_SCHED_SMT) |
149 | return cpumask_first(topology_thread_cpumask(cpu)); | 150 | group = cpumask_first(topology_thread_cpumask(cpu)); |
150 | #else | 151 | #else |
151 | return cpu; | 152 | return cpu; |
152 | #endif | 153 | #endif |
154 | if (likely(group < NR_CPUS)) | ||
155 | return group; | ||
156 | return cpu; | ||
153 | } | 157 | } |
154 | 158 | ||
155 | /* | 159 | /* |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index eb4086f7dfef..9eba291eb6fd 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -30,6 +30,7 @@ static const int cfq_slice_sync = HZ / 10; | |||
30 | static int cfq_slice_async = HZ / 25; | 30 | static int cfq_slice_async = HZ / 25; |
31 | static const int cfq_slice_async_rq = 2; | 31 | static const int cfq_slice_async_rq = 2; |
32 | static int cfq_slice_idle = HZ / 125; | 32 | static int cfq_slice_idle = HZ / 125; |
33 | static int cfq_group_idle = HZ / 125; | ||
33 | static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ | 34 | static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ |
34 | static const int cfq_hist_divisor = 4; | 35 | static const int cfq_hist_divisor = 4; |
35 | 36 | ||
@@ -147,6 +148,8 @@ struct cfq_queue { | |||
147 | struct cfq_queue *new_cfqq; | 148 | struct cfq_queue *new_cfqq; |
148 | struct cfq_group *cfqg; | 149 | struct cfq_group *cfqg; |
149 | struct cfq_group *orig_cfqg; | 150 | struct cfq_group *orig_cfqg; |
151 | /* Number of sectors dispatched from queue in single dispatch round */ | ||
152 | unsigned long nr_sectors; | ||
150 | }; | 153 | }; |
151 | 154 | ||
152 | /* | 155 | /* |
@@ -198,6 +201,8 @@ struct cfq_group { | |||
198 | struct hlist_node cfqd_node; | 201 | struct hlist_node cfqd_node; |
199 | atomic_t ref; | 202 | atomic_t ref; |
200 | #endif | 203 | #endif |
204 | /* number of requests that are on the dispatch list or inside driver */ | ||
205 | int dispatched; | ||
201 | }; | 206 | }; |
202 | 207 | ||
203 | /* | 208 | /* |
@@ -271,6 +276,7 @@ struct cfq_data { | |||
271 | unsigned int cfq_slice[2]; | 276 | unsigned int cfq_slice[2]; |
272 | unsigned int cfq_slice_async_rq; | 277 | unsigned int cfq_slice_async_rq; |
273 | unsigned int cfq_slice_idle; | 278 | unsigned int cfq_slice_idle; |
279 | unsigned int cfq_group_idle; | ||
274 | unsigned int cfq_latency; | 280 | unsigned int cfq_latency; |
275 | unsigned int cfq_group_isolation; | 281 | unsigned int cfq_group_isolation; |
276 | 282 | ||
@@ -378,6 +384,21 @@ CFQ_CFQQ_FNS(wait_busy); | |||
378 | &cfqg->service_trees[i][j]: NULL) \ | 384 | &cfqg->service_trees[i][j]: NULL) \ |
379 | 385 | ||
380 | 386 | ||
387 | static inline bool iops_mode(struct cfq_data *cfqd) | ||
388 | { | ||
389 | /* | ||
390 | * If we are not idling on queues and it is a NCQ drive, parallel | ||
391 | * execution of requests is on and measuring time is not possible | ||
392 | * in most of the cases until and unless we drive shallower queue | ||
393 | * depths and that becomes a performance bottleneck. In such cases | ||
394 | * switch to start providing fairness in terms of number of IOs. | ||
395 | */ | ||
396 | if (!cfqd->cfq_slice_idle && cfqd->hw_tag) | ||
397 | return true; | ||
398 | else | ||
399 | return false; | ||
400 | } | ||
401 | |||
381 | static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) | 402 | static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) |
382 | { | 403 | { |
383 | if (cfq_class_idle(cfqq)) | 404 | if (cfq_class_idle(cfqq)) |
@@ -906,7 +927,6 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) | |||
906 | slice_used = cfqq->allocated_slice; | 927 | slice_used = cfqq->allocated_slice; |
907 | } | 928 | } |
908 | 929 | ||
909 | cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used); | ||
910 | return slice_used; | 930 | return slice_used; |
911 | } | 931 | } |
912 | 932 | ||
@@ -914,19 +934,21 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
914 | struct cfq_queue *cfqq) | 934 | struct cfq_queue *cfqq) |
915 | { | 935 | { |
916 | struct cfq_rb_root *st = &cfqd->grp_service_tree; | 936 | struct cfq_rb_root *st = &cfqd->grp_service_tree; |
917 | unsigned int used_sl, charge_sl; | 937 | unsigned int used_sl, charge; |
918 | int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) | 938 | int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) |
919 | - cfqg->service_tree_idle.count; | 939 | - cfqg->service_tree_idle.count; |
920 | 940 | ||
921 | BUG_ON(nr_sync < 0); | 941 | BUG_ON(nr_sync < 0); |
922 | used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq); | 942 | used_sl = charge = cfq_cfqq_slice_usage(cfqq); |
923 | 943 | ||
924 | if (!cfq_cfqq_sync(cfqq) && !nr_sync) | 944 | if (iops_mode(cfqd)) |
925 | charge_sl = cfqq->allocated_slice; | 945 | charge = cfqq->slice_dispatch; |
946 | else if (!cfq_cfqq_sync(cfqq) && !nr_sync) | ||
947 | charge = cfqq->allocated_slice; | ||
926 | 948 | ||
927 | /* Can't update vdisktime while group is on service tree */ | 949 | /* Can't update vdisktime while group is on service tree */ |
928 | cfq_rb_erase(&cfqg->rb_node, st); | 950 | cfq_rb_erase(&cfqg->rb_node, st); |
929 | cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg); | 951 | cfqg->vdisktime += cfq_scale_slice(charge, cfqg); |
930 | __cfq_group_service_tree_add(st, cfqg); | 952 | __cfq_group_service_tree_add(st, cfqg); |
931 | 953 | ||
932 | /* This group is being expired. Save the context */ | 954 | /* This group is being expired. Save the context */ |
@@ -940,6 +962,9 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
940 | 962 | ||
941 | cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, | 963 | cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, |
942 | st->min_vdisktime); | 964 | st->min_vdisktime); |
965 | cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u" | ||
966 | " sect=%u", used_sl, cfqq->slice_dispatch, charge, | ||
967 | iops_mode(cfqd), cfqq->nr_sectors); | ||
943 | cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); | 968 | cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); |
944 | cfq_blkiocg_set_start_empty_time(&cfqg->blkg); | 969 | cfq_blkiocg_set_start_empty_time(&cfqg->blkg); |
945 | } | 970 | } |
@@ -994,10 +1019,20 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) | |||
994 | */ | 1019 | */ |
995 | atomic_set(&cfqg->ref, 1); | 1020 | atomic_set(&cfqg->ref, 1); |
996 | 1021 | ||
997 | /* Add group onto cgroup list */ | 1022 | /* |
998 | sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); | 1023 | * Add group onto cgroup list. It might happen that bdi->dev is |
999 | cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, | 1024 | * not initiliazed yet. Initialize this new group without major |
1025 | * and minor info and this info will be filled in once a new thread | ||
1026 | * comes for IO. See code above. | ||
1027 | */ | ||
1028 | if (bdi->dev) { | ||
1029 | sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); | ||
1030 | cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, | ||
1000 | MKDEV(major, minor)); | 1031 | MKDEV(major, minor)); |
1032 | } else | ||
1033 | cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, | ||
1034 | 0); | ||
1035 | |||
1001 | cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); | 1036 | cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); |
1002 | 1037 | ||
1003 | /* Add group on cfqd list */ | 1038 | /* Add group on cfqd list */ |
@@ -1587,6 +1622,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, | |||
1587 | cfqq->allocated_slice = 0; | 1622 | cfqq->allocated_slice = 0; |
1588 | cfqq->slice_end = 0; | 1623 | cfqq->slice_end = 0; |
1589 | cfqq->slice_dispatch = 0; | 1624 | cfqq->slice_dispatch = 0; |
1625 | cfqq->nr_sectors = 0; | ||
1590 | 1626 | ||
1591 | cfq_clear_cfqq_wait_request(cfqq); | 1627 | cfq_clear_cfqq_wait_request(cfqq); |
1592 | cfq_clear_cfqq_must_dispatch(cfqq); | 1628 | cfq_clear_cfqq_must_dispatch(cfqq); |
@@ -1839,6 +1875,9 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1839 | BUG_ON(!service_tree); | 1875 | BUG_ON(!service_tree); |
1840 | BUG_ON(!service_tree->count); | 1876 | BUG_ON(!service_tree->count); |
1841 | 1877 | ||
1878 | if (!cfqd->cfq_slice_idle) | ||
1879 | return false; | ||
1880 | |||
1842 | /* We never do for idle class queues. */ | 1881 | /* We never do for idle class queues. */ |
1843 | if (prio == IDLE_WORKLOAD) | 1882 | if (prio == IDLE_WORKLOAD) |
1844 | return false; | 1883 | return false; |
@@ -1863,7 +1902,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
1863 | { | 1902 | { |
1864 | struct cfq_queue *cfqq = cfqd->active_queue; | 1903 | struct cfq_queue *cfqq = cfqd->active_queue; |
1865 | struct cfq_io_context *cic; | 1904 | struct cfq_io_context *cic; |
1866 | unsigned long sl; | 1905 | unsigned long sl, group_idle = 0; |
1867 | 1906 | ||
1868 | /* | 1907 | /* |
1869 | * SSD device without seek penalty, disable idling. But only do so | 1908 | * SSD device without seek penalty, disable idling. But only do so |
@@ -1879,8 +1918,13 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
1879 | /* | 1918 | /* |
1880 | * idle is disabled, either manually or by past process history | 1919 | * idle is disabled, either manually or by past process history |
1881 | */ | 1920 | */ |
1882 | if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq)) | 1921 | if (!cfq_should_idle(cfqd, cfqq)) { |
1883 | return; | 1922 | /* no queue idling. Check for group idling */ |
1923 | if (cfqd->cfq_group_idle) | ||
1924 | group_idle = cfqd->cfq_group_idle; | ||
1925 | else | ||
1926 | return; | ||
1927 | } | ||
1884 | 1928 | ||
1885 | /* | 1929 | /* |
1886 | * still active requests from this queue, don't idle | 1930 | * still active requests from this queue, don't idle |
@@ -1907,13 +1951,21 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
1907 | return; | 1951 | return; |
1908 | } | 1952 | } |
1909 | 1953 | ||
1954 | /* There are other queues in the group, don't do group idle */ | ||
1955 | if (group_idle && cfqq->cfqg->nr_cfqq > 1) | ||
1956 | return; | ||
1957 | |||
1910 | cfq_mark_cfqq_wait_request(cfqq); | 1958 | cfq_mark_cfqq_wait_request(cfqq); |
1911 | 1959 | ||
1912 | sl = cfqd->cfq_slice_idle; | 1960 | if (group_idle) |
1961 | sl = cfqd->cfq_group_idle; | ||
1962 | else | ||
1963 | sl = cfqd->cfq_slice_idle; | ||
1913 | 1964 | ||
1914 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); | 1965 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); |
1915 | cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); | 1966 | cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); |
1916 | cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); | 1967 | cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, |
1968 | group_idle ? 1 : 0); | ||
1917 | } | 1969 | } |
1918 | 1970 | ||
1919 | /* | 1971 | /* |
@@ -1929,9 +1981,11 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) | |||
1929 | cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); | 1981 | cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); |
1930 | cfq_remove_request(rq); | 1982 | cfq_remove_request(rq); |
1931 | cfqq->dispatched++; | 1983 | cfqq->dispatched++; |
1984 | (RQ_CFQG(rq))->dispatched++; | ||
1932 | elv_dispatch_sort(q, rq); | 1985 | elv_dispatch_sort(q, rq); |
1933 | 1986 | ||
1934 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; | 1987 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; |
1988 | cfqq->nr_sectors += blk_rq_sectors(rq); | ||
1935 | cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), | 1989 | cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), |
1936 | rq_data_dir(rq), rq_is_sync(rq)); | 1990 | rq_data_dir(rq), rq_is_sync(rq)); |
1937 | } | 1991 | } |
@@ -2198,7 +2252,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
2198 | cfqq = NULL; | 2252 | cfqq = NULL; |
2199 | goto keep_queue; | 2253 | goto keep_queue; |
2200 | } else | 2254 | } else |
2201 | goto expire; | 2255 | goto check_group_idle; |
2202 | } | 2256 | } |
2203 | 2257 | ||
2204 | /* | 2258 | /* |
@@ -2226,8 +2280,23 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
2226 | * flight or is idling for a new request, allow either of these | 2280 | * flight or is idling for a new request, allow either of these |
2227 | * conditions to happen (or time out) before selecting a new queue. | 2281 | * conditions to happen (or time out) before selecting a new queue. |
2228 | */ | 2282 | */ |
2229 | if (timer_pending(&cfqd->idle_slice_timer) || | 2283 | if (timer_pending(&cfqd->idle_slice_timer)) { |
2230 | (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) { | 2284 | cfqq = NULL; |
2285 | goto keep_queue; | ||
2286 | } | ||
2287 | |||
2288 | if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { | ||
2289 | cfqq = NULL; | ||
2290 | goto keep_queue; | ||
2291 | } | ||
2292 | |||
2293 | /* | ||
2294 | * If group idle is enabled and there are requests dispatched from | ||
2295 | * this group, wait for requests to complete. | ||
2296 | */ | ||
2297 | check_group_idle: | ||
2298 | if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 | ||
2299 | && cfqq->cfqg->dispatched) { | ||
2231 | cfqq = NULL; | 2300 | cfqq = NULL; |
2232 | goto keep_queue; | 2301 | goto keep_queue; |
2233 | } | 2302 | } |
@@ -3375,6 +3444,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
3375 | WARN_ON(!cfqq->dispatched); | 3444 | WARN_ON(!cfqq->dispatched); |
3376 | cfqd->rq_in_driver--; | 3445 | cfqd->rq_in_driver--; |
3377 | cfqq->dispatched--; | 3446 | cfqq->dispatched--; |
3447 | (RQ_CFQG(rq))->dispatched--; | ||
3378 | cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, | 3448 | cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, |
3379 | rq_start_time_ns(rq), rq_io_start_time_ns(rq), | 3449 | rq_start_time_ns(rq), rq_io_start_time_ns(rq), |
3380 | rq_data_dir(rq), rq_is_sync(rq)); | 3450 | rq_data_dir(rq), rq_is_sync(rq)); |
@@ -3404,7 +3474,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
3404 | * the queue. | 3474 | * the queue. |
3405 | */ | 3475 | */ |
3406 | if (cfq_should_wait_busy(cfqd, cfqq)) { | 3476 | if (cfq_should_wait_busy(cfqd, cfqq)) { |
3407 | cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; | 3477 | unsigned long extend_sl = cfqd->cfq_slice_idle; |
3478 | if (!cfqd->cfq_slice_idle) | ||
3479 | extend_sl = cfqd->cfq_group_idle; | ||
3480 | cfqq->slice_end = jiffies + extend_sl; | ||
3408 | cfq_mark_cfqq_wait_busy(cfqq); | 3481 | cfq_mark_cfqq_wait_busy(cfqq); |
3409 | cfq_log_cfqq(cfqd, cfqq, "will busy wait"); | 3482 | cfq_log_cfqq(cfqd, cfqq, "will busy wait"); |
3410 | } | 3483 | } |
@@ -3850,6 +3923,7 @@ static void *cfq_init_queue(struct request_queue *q) | |||
3850 | cfqd->cfq_slice[1] = cfq_slice_sync; | 3923 | cfqd->cfq_slice[1] = cfq_slice_sync; |
3851 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; | 3924 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; |
3852 | cfqd->cfq_slice_idle = cfq_slice_idle; | 3925 | cfqd->cfq_slice_idle = cfq_slice_idle; |
3926 | cfqd->cfq_group_idle = cfq_group_idle; | ||
3853 | cfqd->cfq_latency = 1; | 3927 | cfqd->cfq_latency = 1; |
3854 | cfqd->cfq_group_isolation = 0; | 3928 | cfqd->cfq_group_isolation = 0; |
3855 | cfqd->hw_tag = -1; | 3929 | cfqd->hw_tag = -1; |
@@ -3922,6 +3996,7 @@ SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); | |||
3922 | SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); | 3996 | SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); |
3923 | SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); | 3997 | SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); |
3924 | SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); | 3998 | SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); |
3999 | SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1); | ||
3925 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); | 4000 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); |
3926 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); | 4001 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); |
3927 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); | 4002 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); |
@@ -3954,6 +4029,7 @@ STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); | |||
3954 | STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, | 4029 | STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, |
3955 | UINT_MAX, 0); | 4030 | UINT_MAX, 0); |
3956 | STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); | 4031 | STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); |
4032 | STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1); | ||
3957 | STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); | 4033 | STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); |
3958 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); | 4034 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); |
3959 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, | 4035 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, |
@@ -3975,6 +4051,7 @@ static struct elv_fs_entry cfq_attrs[] = { | |||
3975 | CFQ_ATTR(slice_async), | 4051 | CFQ_ATTR(slice_async), |
3976 | CFQ_ATTR(slice_async_rq), | 4052 | CFQ_ATTR(slice_async_rq), |
3977 | CFQ_ATTR(slice_idle), | 4053 | CFQ_ATTR(slice_idle), |
4054 | CFQ_ATTR(group_idle), | ||
3978 | CFQ_ATTR(low_latency), | 4055 | CFQ_ATTR(low_latency), |
3979 | CFQ_ATTR(group_isolation), | 4056 | CFQ_ATTR(group_isolation), |
3980 | __ATTR_NULL | 4057 | __ATTR_NULL |
@@ -4028,6 +4105,12 @@ static int __init cfq_init(void) | |||
4028 | if (!cfq_slice_idle) | 4105 | if (!cfq_slice_idle) |
4029 | cfq_slice_idle = 1; | 4106 | cfq_slice_idle = 1; |
4030 | 4107 | ||
4108 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | ||
4109 | if (!cfq_group_idle) | ||
4110 | cfq_group_idle = 1; | ||
4111 | #else | ||
4112 | cfq_group_idle = 0; | ||
4113 | #endif | ||
4031 | if (cfq_slab_setup()) | 4114 | if (cfq_slab_setup()) |
4032 | return -ENOMEM; | 4115 | return -ENOMEM; |
4033 | 4116 | ||
diff --git a/block/elevator.c b/block/elevator.c index ec585c9554d3..205b09a5bd9e 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -1009,18 +1009,19 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
1009 | { | 1009 | { |
1010 | struct elevator_queue *old_elevator, *e; | 1010 | struct elevator_queue *old_elevator, *e; |
1011 | void *data; | 1011 | void *data; |
1012 | int err; | ||
1012 | 1013 | ||
1013 | /* | 1014 | /* |
1014 | * Allocate new elevator | 1015 | * Allocate new elevator |
1015 | */ | 1016 | */ |
1016 | e = elevator_alloc(q, new_e); | 1017 | e = elevator_alloc(q, new_e); |
1017 | if (!e) | 1018 | if (!e) |
1018 | return 0; | 1019 | return -ENOMEM; |
1019 | 1020 | ||
1020 | data = elevator_init_queue(q, e); | 1021 | data = elevator_init_queue(q, e); |
1021 | if (!data) { | 1022 | if (!data) { |
1022 | kobject_put(&e->kobj); | 1023 | kobject_put(&e->kobj); |
1023 | return 0; | 1024 | return -ENOMEM; |
1024 | } | 1025 | } |
1025 | 1026 | ||
1026 | /* | 1027 | /* |
@@ -1043,7 +1044,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
1043 | 1044 | ||
1044 | __elv_unregister_queue(old_elevator); | 1045 | __elv_unregister_queue(old_elevator); |
1045 | 1046 | ||
1046 | if (elv_register_queue(q)) | 1047 | err = elv_register_queue(q); |
1048 | if (err) | ||
1047 | goto fail_register; | 1049 | goto fail_register; |
1048 | 1050 | ||
1049 | /* | 1051 | /* |
@@ -1056,7 +1058,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
1056 | 1058 | ||
1057 | blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); | 1059 | blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); |
1058 | 1060 | ||
1059 | return 1; | 1061 | return 0; |
1060 | 1062 | ||
1061 | fail_register: | 1063 | fail_register: |
1062 | /* | 1064 | /* |
@@ -1071,17 +1073,19 @@ fail_register: | |||
1071 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); | 1073 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); |
1072 | spin_unlock_irq(q->queue_lock); | 1074 | spin_unlock_irq(q->queue_lock); |
1073 | 1075 | ||
1074 | return 0; | 1076 | return err; |
1075 | } | 1077 | } |
1076 | 1078 | ||
1077 | ssize_t elv_iosched_store(struct request_queue *q, const char *name, | 1079 | /* |
1078 | size_t count) | 1080 | * Switch this queue to the given IO scheduler. |
1081 | */ | ||
1082 | int elevator_change(struct request_queue *q, const char *name) | ||
1079 | { | 1083 | { |
1080 | char elevator_name[ELV_NAME_MAX]; | 1084 | char elevator_name[ELV_NAME_MAX]; |
1081 | struct elevator_type *e; | 1085 | struct elevator_type *e; |
1082 | 1086 | ||
1083 | if (!q->elevator) | 1087 | if (!q->elevator) |
1084 | return count; | 1088 | return -ENXIO; |
1085 | 1089 | ||
1086 | strlcpy(elevator_name, name, sizeof(elevator_name)); | 1090 | strlcpy(elevator_name, name, sizeof(elevator_name)); |
1087 | e = elevator_get(strstrip(elevator_name)); | 1091 | e = elevator_get(strstrip(elevator_name)); |
@@ -1092,13 +1096,27 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name, | |||
1092 | 1096 | ||
1093 | if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { | 1097 | if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { |
1094 | elevator_put(e); | 1098 | elevator_put(e); |
1095 | return count; | 1099 | return 0; |
1096 | } | 1100 | } |
1097 | 1101 | ||
1098 | if (!elevator_switch(q, e)) | 1102 | return elevator_switch(q, e); |
1099 | printk(KERN_ERR "elevator: switch to %s failed\n", | 1103 | } |
1100 | elevator_name); | 1104 | EXPORT_SYMBOL(elevator_change); |
1101 | return count; | 1105 | |
1106 | ssize_t elv_iosched_store(struct request_queue *q, const char *name, | ||
1107 | size_t count) | ||
1108 | { | ||
1109 | int ret; | ||
1110 | |||
1111 | if (!q->elevator) | ||
1112 | return count; | ||
1113 | |||
1114 | ret = elevator_change(q, name); | ||
1115 | if (!ret) | ||
1116 | return count; | ||
1117 | |||
1118 | printk(KERN_ERR "elevator: switch to %s failed\n", name); | ||
1119 | return ret; | ||
1102 | } | 1120 | } |
1103 | 1121 | ||
1104 | ssize_t elv_iosched_show(struct request_queue *q, char *name) | 1122 | ssize_t elv_iosched_show(struct request_queue *q, char *name) |
diff --git a/crypto/Kconfig b/crypto/Kconfig index 1cd497d7a15a..e573077f1672 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -101,13 +101,13 @@ config CRYPTO_MANAGER2 | |||
101 | select CRYPTO_BLKCIPHER2 | 101 | select CRYPTO_BLKCIPHER2 |
102 | select CRYPTO_PCOMP2 | 102 | select CRYPTO_PCOMP2 |
103 | 103 | ||
104 | config CRYPTO_MANAGER_TESTS | 104 | config CRYPTO_MANAGER_DISABLE_TESTS |
105 | bool "Run algolithms' self-tests" | 105 | bool "Disable run-time self tests" |
106 | default y | 106 | default y |
107 | depends on CRYPTO_MANAGER2 | 107 | depends on CRYPTO_MANAGER2 |
108 | help | 108 | help |
109 | Run cryptomanager's tests for the new crypto algorithms being | 109 | Disable run-time self tests that normally take place at |
110 | registered. | 110 | algorithm registration. |
111 | 111 | ||
112 | config CRYPTO_GF128MUL | 112 | config CRYPTO_GF128MUL |
113 | tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" | 113 | tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" |
diff --git a/crypto/ahash.c b/crypto/ahash.c index b8c59b889c6e..f669822a7a44 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c | |||
@@ -47,8 +47,11 @@ static int hash_walk_next(struct crypto_hash_walk *walk) | |||
47 | walk->data = crypto_kmap(walk->pg, 0); | 47 | walk->data = crypto_kmap(walk->pg, 0); |
48 | walk->data += offset; | 48 | walk->data += offset; |
49 | 49 | ||
50 | if (offset & alignmask) | 50 | if (offset & alignmask) { |
51 | nbytes = alignmask + 1 - (offset & alignmask); | 51 | unsigned int unaligned = alignmask + 1 - (offset & alignmask); |
52 | if (nbytes > unaligned) | ||
53 | nbytes = unaligned; | ||
54 | } | ||
52 | 55 | ||
53 | walk->entrylen -= nbytes; | 56 | walk->entrylen -= nbytes; |
54 | return nbytes; | 57 | return nbytes; |
diff --git a/crypto/algboss.c b/crypto/algboss.c index 40bd391f34d9..791d194958fa 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c | |||
@@ -206,13 +206,16 @@ err: | |||
206 | return NOTIFY_OK; | 206 | return NOTIFY_OK; |
207 | } | 207 | } |
208 | 208 | ||
209 | #ifdef CONFIG_CRYPTO_MANAGER_TESTS | ||
210 | static int cryptomgr_test(void *data) | 209 | static int cryptomgr_test(void *data) |
211 | { | 210 | { |
212 | struct crypto_test_param *param = data; | 211 | struct crypto_test_param *param = data; |
213 | u32 type = param->type; | 212 | u32 type = param->type; |
214 | int err = 0; | 213 | int err = 0; |
215 | 214 | ||
215 | #ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS | ||
216 | goto skiptest; | ||
217 | #endif | ||
218 | |||
216 | if (type & CRYPTO_ALG_TESTED) | 219 | if (type & CRYPTO_ALG_TESTED) |
217 | goto skiptest; | 220 | goto skiptest; |
218 | 221 | ||
@@ -267,7 +270,6 @@ err_put_module: | |||
267 | err: | 270 | err: |
268 | return NOTIFY_OK; | 271 | return NOTIFY_OK; |
269 | } | 272 | } |
270 | #endif /* CONFIG_CRYPTO_MANAGER_TESTS */ | ||
271 | 273 | ||
272 | static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, | 274 | static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, |
273 | void *data) | 275 | void *data) |
@@ -275,10 +277,8 @@ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, | |||
275 | switch (msg) { | 277 | switch (msg) { |
276 | case CRYPTO_MSG_ALG_REQUEST: | 278 | case CRYPTO_MSG_ALG_REQUEST: |
277 | return cryptomgr_schedule_probe(data); | 279 | return cryptomgr_schedule_probe(data); |
278 | #ifdef CONFIG_CRYPTO_MANAGER_TESTS | ||
279 | case CRYPTO_MSG_ALG_REGISTER: | 280 | case CRYPTO_MSG_ALG_REGISTER: |
280 | return cryptomgr_schedule_test(data); | 281 | return cryptomgr_schedule_test(data); |
281 | #endif | ||
282 | } | 282 | } |
283 | 283 | ||
284 | return NOTIFY_DONE; | 284 | return NOTIFY_DONE; |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index abd980c729eb..fa8c8f78c8d4 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -23,7 +23,7 @@ | |||
23 | 23 | ||
24 | #include "internal.h" | 24 | #include "internal.h" |
25 | 25 | ||
26 | #ifndef CONFIG_CRYPTO_MANAGER_TESTS | 26 | #ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS |
27 | 27 | ||
28 | /* a perfect nop */ | 28 | /* a perfect nop */ |
29 | int alg_test(const char *driver, const char *alg, u32 type, u32 mask) | 29 | int alg_test(const char *driver, const char *alg, u32 type, u32 mask) |
@@ -2542,6 +2542,6 @@ non_fips_alg: | |||
2542 | return -EINVAL; | 2542 | return -EINVAL; |
2543 | } | 2543 | } |
2544 | 2544 | ||
2545 | #endif /* CONFIG_CRYPTO_MANAGER_TESTS */ | 2545 | #endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */ |
2546 | 2546 | ||
2547 | EXPORT_SYMBOL_GPL(alg_test); | 2547 | EXPORT_SYMBOL_GPL(alg_test); |
diff --git a/drivers/Makefile b/drivers/Makefile index ae473445ad6d..a2aea53a75ed 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -50,7 +50,7 @@ obj-$(CONFIG_SPI) += spi/ | |||
50 | obj-y += net/ | 50 | obj-y += net/ |
51 | obj-$(CONFIG_ATM) += atm/ | 51 | obj-$(CONFIG_ATM) += atm/ |
52 | obj-$(CONFIG_FUSION) += message/ | 52 | obj-$(CONFIG_FUSION) += message/ |
53 | obj-$(CONFIG_FIREWIRE) += firewire/ | 53 | obj-y += firewire/ |
54 | obj-y += ieee1394/ | 54 | obj-y += ieee1394/ |
55 | obj-$(CONFIG_UIO) += uio/ | 55 | obj-$(CONFIG_UIO) += uio/ |
56 | obj-y += cdrom/ | 56 | obj-y += cdrom/ |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index b811f2173f6f..88681aca88c5 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -105,7 +105,7 @@ config ACPI_EC_DEBUGFS | |||
105 | 105 | ||
106 | Be aware that using this interface can confuse your Embedded | 106 | Be aware that using this interface can confuse your Embedded |
107 | Controller in a way that a normal reboot is not enough. You then | 107 | Controller in a way that a normal reboot is not enough. You then |
108 | have to power of your system, and remove the laptop battery for | 108 | have to power off your system, and remove the laptop battery for |
109 | some seconds. | 109 | some seconds. |
110 | An Embedded Controller typically is available on laptops and reads | 110 | An Embedded Controller typically is available on laptops and reads |
111 | sensor values like battery state and temperature. | 111 | sensor values like battery state and temperature. |
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index b76848c80be3..6b115f6c4313 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c | |||
@@ -382,31 +382,32 @@ static void acpi_pad_remove_sysfs(struct acpi_device *device) | |||
382 | device_remove_file(&device->dev, &dev_attr_rrtime); | 382 | device_remove_file(&device->dev, &dev_attr_rrtime); |
383 | } | 383 | } |
384 | 384 | ||
385 | /* Query firmware how many CPUs should be idle */ | 385 | /* |
386 | static int acpi_pad_pur(acpi_handle handle, int *num_cpus) | 386 | * Query firmware how many CPUs should be idle |
387 | * return -1 on failure | ||
388 | */ | ||
389 | static int acpi_pad_pur(acpi_handle handle) | ||
387 | { | 390 | { |
388 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | 391 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; |
389 | union acpi_object *package; | 392 | union acpi_object *package; |
390 | int rev, num, ret = -EINVAL; | 393 | int num = -1; |
391 | 394 | ||
392 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer))) | 395 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer))) |
393 | return -EINVAL; | 396 | return num; |
394 | 397 | ||
395 | if (!buffer.length || !buffer.pointer) | 398 | if (!buffer.length || !buffer.pointer) |
396 | return -EINVAL; | 399 | return num; |
397 | 400 | ||
398 | package = buffer.pointer; | 401 | package = buffer.pointer; |
399 | if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2) | 402 | |
400 | goto out; | 403 | if (package->type == ACPI_TYPE_PACKAGE && |
401 | rev = package->package.elements[0].integer.value; | 404 | package->package.count == 2 && |
402 | num = package->package.elements[1].integer.value; | 405 | package->package.elements[0].integer.value == 1) /* rev 1 */ |
403 | if (rev != 1 || num < 0) | 406 | |
404 | goto out; | 407 | num = package->package.elements[1].integer.value; |
405 | *num_cpus = num; | 408 | |
406 | ret = 0; | ||
407 | out: | ||
408 | kfree(buffer.pointer); | 409 | kfree(buffer.pointer); |
409 | return ret; | 410 | return num; |
410 | } | 411 | } |
411 | 412 | ||
412 | /* Notify firmware how many CPUs are idle */ | 413 | /* Notify firmware how many CPUs are idle */ |
@@ -433,7 +434,8 @@ static void acpi_pad_handle_notify(acpi_handle handle) | |||
433 | uint32_t idle_cpus; | 434 | uint32_t idle_cpus; |
434 | 435 | ||
435 | mutex_lock(&isolated_cpus_lock); | 436 | mutex_lock(&isolated_cpus_lock); |
436 | if (acpi_pad_pur(handle, &num_cpus)) { | 437 | num_cpus = acpi_pad_pur(handle); |
438 | if (num_cpus < 0) { | ||
437 | mutex_unlock(&isolated_cpus_lock); | 439 | mutex_unlock(&isolated_cpus_lock); |
438 | return; | 440 | return; |
439 | } | 441 | } |
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index df85b53a674f..7dad9160f209 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h | |||
@@ -854,6 +854,7 @@ struct acpi_bit_register_info { | |||
854 | ACPI_BITMASK_POWER_BUTTON_STATUS | \ | 854 | ACPI_BITMASK_POWER_BUTTON_STATUS | \ |
855 | ACPI_BITMASK_SLEEP_BUTTON_STATUS | \ | 855 | ACPI_BITMASK_SLEEP_BUTTON_STATUS | \ |
856 | ACPI_BITMASK_RT_CLOCK_STATUS | \ | 856 | ACPI_BITMASK_RT_CLOCK_STATUS | \ |
857 | ACPI_BITMASK_PCIEXP_WAKE_DISABLE | \ | ||
857 | ACPI_BITMASK_WAKE_STATUS) | 858 | ACPI_BITMASK_WAKE_STATUS) |
858 | 859 | ||
859 | #define ACPI_BITMASK_TIMER_ENABLE 0x0001 | 860 | #define ACPI_BITMASK_TIMER_ENABLE 0x0001 |
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c index 74c24d517f81..4093522eed45 100644 --- a/drivers/acpi/acpica/exutils.c +++ b/drivers/acpi/acpica/exutils.c | |||
@@ -109,7 +109,7 @@ void acpi_ex_enter_interpreter(void) | |||
109 | * | 109 | * |
110 | * DESCRIPTION: Reacquire the interpreter execution region from within the | 110 | * DESCRIPTION: Reacquire the interpreter execution region from within the |
111 | * interpreter code. Failure to enter the interpreter region is a | 111 | * interpreter code. Failure to enter the interpreter region is a |
112 | * fatal system error. Used in conjuction with | 112 | * fatal system error. Used in conjunction with |
113 | * relinquish_interpreter | 113 | * relinquish_interpreter |
114 | * | 114 | * |
115 | ******************************************************************************/ | 115 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c index 22cfcfbd9fff..491191e6cf69 100644 --- a/drivers/acpi/acpica/rsutils.c +++ b/drivers/acpi/acpica/rsutils.c | |||
@@ -149,7 +149,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type) | |||
149 | 149 | ||
150 | /* | 150 | /* |
151 | * 16-, 32-, and 64-bit cases must use the move macros that perform | 151 | * 16-, 32-, and 64-bit cases must use the move macros that perform |
152 | * endian conversion and/or accomodate hardware that cannot perform | 152 | * endian conversion and/or accommodate hardware that cannot perform |
153 | * misaligned memory transfers | 153 | * misaligned memory transfers |
154 | */ | 154 | */ |
155 | case ACPI_RSC_MOVE16: | 155 | case ACPI_RSC_MOVE16: |
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index 907e350f1c7d..fca34ccfd294 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig | |||
@@ -34,6 +34,6 @@ config ACPI_APEI_ERST_DEBUG | |||
34 | depends on ACPI_APEI | 34 | depends on ACPI_APEI |
35 | help | 35 | help |
36 | ERST is a way provided by APEI to save and retrieve hardware | 36 | ERST is a way provided by APEI to save and retrieve hardware |
37 | error infomation to and from a persistent store. Enable this | 37 | error information to and from a persistent store. Enable this |
38 | if you want to debugging and testing the ERST kernel support | 38 | if you want to debugging and testing the ERST kernel support |
39 | and firmware implementation. | 39 | and firmware implementation. |
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 73fd0c7487c1..4a904a4bf05f 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c | |||
@@ -445,11 +445,15 @@ EXPORT_SYMBOL_GPL(apei_resources_sub); | |||
445 | int apei_resources_request(struct apei_resources *resources, | 445 | int apei_resources_request(struct apei_resources *resources, |
446 | const char *desc) | 446 | const char *desc) |
447 | { | 447 | { |
448 | struct apei_res *res, *res_bak; | 448 | struct apei_res *res, *res_bak = NULL; |
449 | struct resource *r; | 449 | struct resource *r; |
450 | int rc; | ||
450 | 451 | ||
451 | apei_resources_sub(resources, &apei_resources_all); | 452 | rc = apei_resources_sub(resources, &apei_resources_all); |
453 | if (rc) | ||
454 | return rc; | ||
452 | 455 | ||
456 | rc = -EINVAL; | ||
453 | list_for_each_entry(res, &resources->iomem, list) { | 457 | list_for_each_entry(res, &resources->iomem, list) { |
454 | r = request_mem_region(res->start, res->end - res->start, | 458 | r = request_mem_region(res->start, res->end - res->start, |
455 | desc); | 459 | desc); |
@@ -475,7 +479,11 @@ int apei_resources_request(struct apei_resources *resources, | |||
475 | } | 479 | } |
476 | } | 480 | } |
477 | 481 | ||
478 | apei_resources_merge(&apei_resources_all, resources); | 482 | rc = apei_resources_merge(&apei_resources_all, resources); |
483 | if (rc) { | ||
484 | pr_err(APEI_PFX "Fail to merge resources!\n"); | ||
485 | goto err_unmap_ioport; | ||
486 | } | ||
479 | 487 | ||
480 | return 0; | 488 | return 0; |
481 | err_unmap_ioport: | 489 | err_unmap_ioport: |
@@ -491,12 +499,13 @@ err_unmap_iomem: | |||
491 | break; | 499 | break; |
492 | release_mem_region(res->start, res->end - res->start); | 500 | release_mem_region(res->start, res->end - res->start); |
493 | } | 501 | } |
494 | return -EINVAL; | 502 | return rc; |
495 | } | 503 | } |
496 | EXPORT_SYMBOL_GPL(apei_resources_request); | 504 | EXPORT_SYMBOL_GPL(apei_resources_request); |
497 | 505 | ||
498 | void apei_resources_release(struct apei_resources *resources) | 506 | void apei_resources_release(struct apei_resources *resources) |
499 | { | 507 | { |
508 | int rc; | ||
500 | struct apei_res *res; | 509 | struct apei_res *res; |
501 | 510 | ||
502 | list_for_each_entry(res, &resources->iomem, list) | 511 | list_for_each_entry(res, &resources->iomem, list) |
@@ -504,7 +513,9 @@ void apei_resources_release(struct apei_resources *resources) | |||
504 | list_for_each_entry(res, &resources->ioport, list) | 513 | list_for_each_entry(res, &resources->ioport, list) |
505 | release_region(res->start, res->end - res->start); | 514 | release_region(res->start, res->end - res->start); |
506 | 515 | ||
507 | apei_resources_sub(&apei_resources_all, resources); | 516 | rc = apei_resources_sub(&apei_resources_all, resources); |
517 | if (rc) | ||
518 | pr_err(APEI_PFX "Fail to sub resources!\n"); | ||
508 | } | 519 | } |
509 | EXPORT_SYMBOL_GPL(apei_resources_release); | 520 | EXPORT_SYMBOL_GPL(apei_resources_release); |
510 | 521 | ||
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index 465c885938ee..cf29df69380b 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c | |||
@@ -426,7 +426,9 @@ DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL, | |||
426 | 426 | ||
427 | static int einj_check_table(struct acpi_table_einj *einj_tab) | 427 | static int einj_check_table(struct acpi_table_einj *einj_tab) |
428 | { | 428 | { |
429 | if (einj_tab->header_length != sizeof(struct acpi_table_einj)) | 429 | if ((einj_tab->header_length != |
430 | (sizeof(struct acpi_table_einj) - sizeof(einj_tab->header))) | ||
431 | && (einj_tab->header_length != sizeof(struct acpi_table_einj))) | ||
430 | return -EINVAL; | 432 | return -EINVAL; |
431 | if (einj_tab->header.length < sizeof(struct acpi_table_einj)) | 433 | if (einj_tab->header.length < sizeof(struct acpi_table_einj)) |
432 | return -EINVAL; | 434 | return -EINVAL; |
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c index 5281ddda2777..da1228a9a544 100644 --- a/drivers/acpi/apei/erst-dbg.c +++ b/drivers/acpi/apei/erst-dbg.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * APEI Error Record Serialization Table debug support | 2 | * APEI Error Record Serialization Table debug support |
3 | * | 3 | * |
4 | * ERST is a way provided by APEI to save and retrieve hardware error | 4 | * ERST is a way provided by APEI to save and retrieve hardware error |
5 | * infomation to and from a persistent store. This file provide the | 5 | * information to and from a persistent store. This file provide the |
6 | * debugging/testing support for ERST kernel support and firmware | 6 | * debugging/testing support for ERST kernel support and firmware |
7 | * implementation. | 7 | * implementation. |
8 | * | 8 | * |
@@ -111,11 +111,13 @@ retry: | |||
111 | goto out; | 111 | goto out; |
112 | } | 112 | } |
113 | if (len > erst_dbg_buf_len) { | 113 | if (len > erst_dbg_buf_len) { |
114 | kfree(erst_dbg_buf); | 114 | void *p; |
115 | rc = -ENOMEM; | 115 | rc = -ENOMEM; |
116 | erst_dbg_buf = kmalloc(len, GFP_KERNEL); | 116 | p = kmalloc(len, GFP_KERNEL); |
117 | if (!erst_dbg_buf) | 117 | if (!p) |
118 | goto out; | 118 | goto out; |
119 | kfree(erst_dbg_buf); | ||
120 | erst_dbg_buf = p; | ||
119 | erst_dbg_buf_len = len; | 121 | erst_dbg_buf_len = len; |
120 | goto retry; | 122 | goto retry; |
121 | } | 123 | } |
@@ -150,11 +152,13 @@ static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf, | |||
150 | if (mutex_lock_interruptible(&erst_dbg_mutex)) | 152 | if (mutex_lock_interruptible(&erst_dbg_mutex)) |
151 | return -EINTR; | 153 | return -EINTR; |
152 | if (usize > erst_dbg_buf_len) { | 154 | if (usize > erst_dbg_buf_len) { |
153 | kfree(erst_dbg_buf); | 155 | void *p; |
154 | rc = -ENOMEM; | 156 | rc = -ENOMEM; |
155 | erst_dbg_buf = kmalloc(usize, GFP_KERNEL); | 157 | p = kmalloc(usize, GFP_KERNEL); |
156 | if (!erst_dbg_buf) | 158 | if (!p) |
157 | goto out; | 159 | goto out; |
160 | kfree(erst_dbg_buf); | ||
161 | erst_dbg_buf = p; | ||
158 | erst_dbg_buf_len = usize; | 162 | erst_dbg_buf_len = usize; |
159 | } | 163 | } |
160 | rc = copy_from_user(erst_dbg_buf, ubuf, usize); | 164 | rc = copy_from_user(erst_dbg_buf, ubuf, usize); |
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 18645f4e83cd..1211c03149e8 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * APEI Error Record Serialization Table support | 2 | * APEI Error Record Serialization Table support |
3 | * | 3 | * |
4 | * ERST is a way provided by APEI to save and retrieve hardware error | 4 | * ERST is a way provided by APEI to save and retrieve hardware error |
5 | * infomation to and from a persistent store. | 5 | * information to and from a persistent store. |
6 | * | 6 | * |
7 | * For more information about ERST, please refer to ACPI Specification | 7 | * For more information about ERST, please refer to ACPI Specification |
8 | * version 4.0, section 17.4. | 8 | * version 4.0, section 17.4. |
@@ -266,13 +266,30 @@ static int erst_exec_move_data(struct apei_exec_context *ctx, | |||
266 | { | 266 | { |
267 | int rc; | 267 | int rc; |
268 | u64 offset; | 268 | u64 offset; |
269 | void *src, *dst; | ||
270 | |||
271 | /* ioremap does not work in interrupt context */ | ||
272 | if (in_interrupt()) { | ||
273 | pr_warning(ERST_PFX | ||
274 | "MOVE_DATA can not be used in interrupt context"); | ||
275 | return -EBUSY; | ||
276 | } | ||
269 | 277 | ||
270 | rc = __apei_exec_read_register(entry, &offset); | 278 | rc = __apei_exec_read_register(entry, &offset); |
271 | if (rc) | 279 | if (rc) |
272 | return rc; | 280 | return rc; |
273 | memmove((void *)ctx->dst_base + offset, | 281 | |
274 | (void *)ctx->src_base + offset, | 282 | src = ioremap(ctx->src_base + offset, ctx->var2); |
275 | ctx->var2); | 283 | if (!src) |
284 | return -ENOMEM; | ||
285 | dst = ioremap(ctx->dst_base + offset, ctx->var2); | ||
286 | if (!dst) | ||
287 | return -ENOMEM; | ||
288 | |||
289 | memmove(dst, src, ctx->var2); | ||
290 | |||
291 | iounmap(src); | ||
292 | iounmap(dst); | ||
276 | 293 | ||
277 | return 0; | 294 | return 0; |
278 | } | 295 | } |
@@ -750,7 +767,9 @@ __setup("erst_disable", setup_erst_disable); | |||
750 | 767 | ||
751 | static int erst_check_table(struct acpi_table_erst *erst_tab) | 768 | static int erst_check_table(struct acpi_table_erst *erst_tab) |
752 | { | 769 | { |
753 | if (erst_tab->header_length != sizeof(struct acpi_table_erst)) | 770 | if ((erst_tab->header_length != |
771 | (sizeof(struct acpi_table_erst) - sizeof(erst_tab->header))) | ||
772 | && (erst_tab->header_length != sizeof(struct acpi_table_einj))) | ||
754 | return -EINVAL; | 773 | return -EINVAL; |
755 | if (erst_tab->header.length < sizeof(struct acpi_table_erst)) | 774 | if (erst_tab->header.length < sizeof(struct acpi_table_erst)) |
756 | return -EINVAL; | 775 | return -EINVAL; |
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 385a6059714a..0d505e59214d 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c | |||
@@ -302,7 +302,7 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev) | |||
302 | struct ghes *ghes = NULL; | 302 | struct ghes *ghes = NULL; |
303 | int rc = -EINVAL; | 303 | int rc = -EINVAL; |
304 | 304 | ||
305 | generic = ghes_dev->dev.platform_data; | 305 | generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data; |
306 | if (!generic->enabled) | 306 | if (!generic->enabled) |
307 | return -ENODEV; | 307 | return -ENODEV; |
308 | 308 | ||
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index 343168d18266..1a3508a7fe03 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c | |||
@@ -137,20 +137,23 @@ static int hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data) | |||
137 | 137 | ||
138 | static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) | 138 | static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) |
139 | { | 139 | { |
140 | struct acpi_hest_generic *generic; | ||
141 | struct platform_device *ghes_dev; | 140 | struct platform_device *ghes_dev; |
142 | struct ghes_arr *ghes_arr = data; | 141 | struct ghes_arr *ghes_arr = data; |
143 | int rc; | 142 | int rc; |
144 | 143 | ||
145 | if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR) | 144 | if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR) |
146 | return 0; | 145 | return 0; |
147 | generic = (struct acpi_hest_generic *)hest_hdr; | 146 | |
148 | if (!generic->enabled) | 147 | if (!((struct acpi_hest_generic *)hest_hdr)->enabled) |
149 | return 0; | 148 | return 0; |
150 | ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id); | 149 | ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id); |
151 | if (!ghes_dev) | 150 | if (!ghes_dev) |
152 | return -ENOMEM; | 151 | return -ENOMEM; |
153 | ghes_dev->dev.platform_data = generic; | 152 | |
153 | rc = platform_device_add_data(ghes_dev, &hest_hdr, sizeof(void *)); | ||
154 | if (rc) | ||
155 | goto err; | ||
156 | |||
154 | rc = platform_device_add(ghes_dev); | 157 | rc = platform_device_add(ghes_dev); |
155 | if (rc) | 158 | if (rc) |
156 | goto err; | 159 | goto err; |
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c index 8f8bd736d4ff..542e53903891 100644 --- a/drivers/acpi/atomicio.c +++ b/drivers/acpi/atomicio.c | |||
@@ -142,7 +142,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr, | |||
142 | list_add_tail_rcu(&map->list, &acpi_iomaps); | 142 | list_add_tail_rcu(&map->list, &acpi_iomaps); |
143 | spin_unlock_irqrestore(&acpi_iomaps_lock, flags); | 143 | spin_unlock_irqrestore(&acpi_iomaps_lock, flags); |
144 | 144 | ||
145 | return vaddr + (paddr - pg_off); | 145 | return map->vaddr + (paddr - map->paddr); |
146 | err_unmap: | 146 | err_unmap: |
147 | iounmap(vaddr); | 147 | iounmap(vaddr); |
148 | return NULL; | 148 | return NULL; |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index dc58402b0a17..98417201e9ce 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
@@ -273,7 +273,6 @@ static enum power_supply_property energy_battery_props[] = { | |||
273 | POWER_SUPPLY_PROP_CYCLE_COUNT, | 273 | POWER_SUPPLY_PROP_CYCLE_COUNT, |
274 | POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, | 274 | POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, |
275 | POWER_SUPPLY_PROP_VOLTAGE_NOW, | 275 | POWER_SUPPLY_PROP_VOLTAGE_NOW, |
276 | POWER_SUPPLY_PROP_CURRENT_NOW, | ||
277 | POWER_SUPPLY_PROP_POWER_NOW, | 276 | POWER_SUPPLY_PROP_POWER_NOW, |
278 | POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, | 277 | POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, |
279 | POWER_SUPPLY_PROP_ENERGY_FULL, | 278 | POWER_SUPPLY_PROP_ENERGY_FULL, |
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index 2bb28b9d91c4..f7619600270a 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c | |||
@@ -183,6 +183,8 @@ static int __init dmi_disable_osi_vista(const struct dmi_system_id *d) | |||
183 | { | 183 | { |
184 | printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); | 184 | printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); |
185 | acpi_osi_setup("!Windows 2006"); | 185 | acpi_osi_setup("!Windows 2006"); |
186 | acpi_osi_setup("!Windows 2006 SP1"); | ||
187 | acpi_osi_setup("!Windows 2006 SP2"); | ||
186 | return 0; | 188 | return 0; |
187 | } | 189 | } |
188 | static int __init dmi_disable_osi_win7(const struct dmi_system_id *d) | 190 | static int __init dmi_disable_osi_win7(const struct dmi_system_id *d) |
@@ -226,6 +228,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { | |||
226 | }, | 228 | }, |
227 | }, | 229 | }, |
228 | { | 230 | { |
231 | .callback = dmi_disable_osi_vista, | ||
232 | .ident = "Toshiba Satellite L355", | ||
233 | .matches = { | ||
234 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
235 | DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"), | ||
236 | }, | ||
237 | }, | ||
238 | { | ||
229 | .callback = dmi_disable_osi_win7, | 239 | .callback = dmi_disable_osi_win7, |
230 | .ident = "ASUS K50IJ", | 240 | .ident = "ASUS K50IJ", |
231 | .matches = { | 241 | .matches = { |
@@ -233,6 +243,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { | |||
233 | DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"), | 243 | DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"), |
234 | }, | 244 | }, |
235 | }, | 245 | }, |
246 | { | ||
247 | .callback = dmi_disable_osi_vista, | ||
248 | .ident = "Toshiba P305D", | ||
249 | .matches = { | ||
250 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
251 | DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"), | ||
252 | }, | ||
253 | }, | ||
236 | 254 | ||
237 | /* | 255 | /* |
238 | * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. | 256 | * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 5c221ab535d5..310e3b9749cb 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -55,7 +55,7 @@ EXPORT_SYMBOL(acpi_root_dir); | |||
55 | static int set_power_nocheck(const struct dmi_system_id *id) | 55 | static int set_power_nocheck(const struct dmi_system_id *id) |
56 | { | 56 | { |
57 | printk(KERN_NOTICE PREFIX "%s detected - " | 57 | printk(KERN_NOTICE PREFIX "%s detected - " |
58 | "disable power check in power transistion\n", id->ident); | 58 | "disable power check in power transition\n", id->ident); |
59 | acpi_power_nocheck = 1; | 59 | acpi_power_nocheck = 1; |
60 | return 0; | 60 | return 0; |
61 | } | 61 | } |
@@ -80,23 +80,15 @@ static int set_copy_dsdt(const struct dmi_system_id *id) | |||
80 | 80 | ||
81 | static struct dmi_system_id dsdt_dmi_table[] __initdata = { | 81 | static struct dmi_system_id dsdt_dmi_table[] __initdata = { |
82 | /* | 82 | /* |
83 | * Insyde BIOS on some TOSHIBA machines corrupt the DSDT. | 83 | * Invoke DSDT corruption work-around on all Toshiba Satellite. |
84 | * https://bugzilla.kernel.org/show_bug.cgi?id=14679 | 84 | * https://bugzilla.kernel.org/show_bug.cgi?id=14679 |
85 | */ | 85 | */ |
86 | { | 86 | { |
87 | .callback = set_copy_dsdt, | 87 | .callback = set_copy_dsdt, |
88 | .ident = "TOSHIBA Satellite A505", | 88 | .ident = "TOSHIBA Satellite", |
89 | .matches = { | 89 | .matches = { |
90 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | 90 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), |
91 | DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"), | 91 | DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"), |
92 | }, | ||
93 | }, | ||
94 | { | ||
95 | .callback = set_copy_dsdt, | ||
96 | .ident = "TOSHIBA Satellite L505D", | ||
97 | .matches = { | ||
98 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
99 | DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"), | ||
100 | }, | 92 | }, |
101 | }, | 93 | }, |
102 | {} | 94 | {} |
@@ -1027,7 +1019,7 @@ static int __init acpi_init(void) | |||
1027 | 1019 | ||
1028 | /* | 1020 | /* |
1029 | * If the laptop falls into the DMI check table, the power state check | 1021 | * If the laptop falls into the DMI check table, the power state check |
1030 | * will be disabled in the course of device power transistion. | 1022 | * will be disabled in the course of device power transition. |
1031 | */ | 1023 | */ |
1032 | dmi_check_system(power_nocheck_dmi_table); | 1024 | dmi_check_system(power_nocheck_dmi_table); |
1033 | 1025 | ||
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 8a3b840c0bb2..d94d2953c974 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c | |||
@@ -369,7 +369,9 @@ static void __exit acpi_fan_exit(void) | |||
369 | 369 | ||
370 | acpi_bus_unregister_driver(&acpi_fan_driver); | 370 | acpi_bus_unregister_driver(&acpi_fan_driver); |
371 | 371 | ||
372 | #ifdef CONFIG_ACPI_PROCFS | ||
372 | remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir); | 373 | remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir); |
374 | #endif | ||
373 | 375 | ||
374 | return; | 376 | return; |
375 | } | 377 | } |
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 1f67057af2a5..3ba8d1f44a73 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/pm_runtime.h> | 33 | #include <linux/pm_runtime.h> |
34 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
35 | #include <linux/pci-acpi.h> | 35 | #include <linux/pci-acpi.h> |
36 | #include <linux/pci-aspm.h> | ||
37 | #include <linux/acpi.h> | 36 | #include <linux/acpi.h> |
38 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
39 | #include <acpi/acpi_bus.h> | 38 | #include <acpi/acpi_bus.h> |
@@ -226,22 +225,31 @@ static acpi_status acpi_pci_run_osc(acpi_handle handle, | |||
226 | return status; | 225 | return status; |
227 | } | 226 | } |
228 | 227 | ||
229 | static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 flags) | 228 | static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, |
229 | u32 support, | ||
230 | u32 *control) | ||
230 | { | 231 | { |
231 | acpi_status status; | 232 | acpi_status status; |
232 | u32 support_set, result, capbuf[3]; | 233 | u32 result, capbuf[3]; |
234 | |||
235 | support &= OSC_PCI_SUPPORT_MASKS; | ||
236 | support |= root->osc_support_set; | ||
233 | 237 | ||
234 | /* do _OSC query for all possible controls */ | ||
235 | support_set = root->osc_support_set | (flags & OSC_PCI_SUPPORT_MASKS); | ||
236 | capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; | 238 | capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; |
237 | capbuf[OSC_SUPPORT_TYPE] = support_set; | 239 | capbuf[OSC_SUPPORT_TYPE] = support; |
238 | capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS; | 240 | if (control) { |
241 | *control &= OSC_PCI_CONTROL_MASKS; | ||
242 | capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set; | ||
243 | } else { | ||
244 | /* Run _OSC query for all possible controls. */ | ||
245 | capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS; | ||
246 | } | ||
239 | 247 | ||
240 | status = acpi_pci_run_osc(root->device->handle, capbuf, &result); | 248 | status = acpi_pci_run_osc(root->device->handle, capbuf, &result); |
241 | if (ACPI_SUCCESS(status)) { | 249 | if (ACPI_SUCCESS(status)) { |
242 | root->osc_support_set = support_set; | 250 | root->osc_support_set = support; |
243 | root->osc_control_qry = result; | 251 | if (control) |
244 | root->osc_queried = 1; | 252 | *control = result; |
245 | } | 253 | } |
246 | return status; | 254 | return status; |
247 | } | 255 | } |
@@ -255,7 +263,7 @@ static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags) | |||
255 | if (ACPI_FAILURE(status)) | 263 | if (ACPI_FAILURE(status)) |
256 | return status; | 264 | return status; |
257 | mutex_lock(&osc_lock); | 265 | mutex_lock(&osc_lock); |
258 | status = acpi_pci_query_osc(root, flags); | 266 | status = acpi_pci_query_osc(root, flags, NULL); |
259 | mutex_unlock(&osc_lock); | 267 | mutex_unlock(&osc_lock); |
260 | return status; | 268 | return status; |
261 | } | 269 | } |
@@ -365,55 +373,70 @@ out: | |||
365 | EXPORT_SYMBOL_GPL(acpi_get_pci_dev); | 373 | EXPORT_SYMBOL_GPL(acpi_get_pci_dev); |
366 | 374 | ||
367 | /** | 375 | /** |
368 | * acpi_pci_osc_control_set - commit requested control to Firmware | 376 | * acpi_pci_osc_control_set - Request control of PCI root _OSC features. |
369 | * @handle: acpi_handle for the target ACPI object | 377 | * @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex). |
370 | * @flags: driver's requested control bits | 378 | * @mask: Mask of _OSC bits to request control of, place to store control mask. |
379 | * @req: Mask of _OSC bits the control of is essential to the caller. | ||
380 | * | ||
381 | * Run _OSC query for @mask and if that is successful, compare the returned | ||
382 | * mask of control bits with @req. If all of the @req bits are set in the | ||
383 | * returned mask, run _OSC request for it. | ||
371 | * | 384 | * |
372 | * Attempt to take control from Firmware on requested control bits. | 385 | * The variable at the @mask address may be modified regardless of whether or |
386 | * not the function returns success. On success it will contain the mask of | ||
387 | * _OSC bits the BIOS has granted control of, but its contents are meaningless | ||
388 | * on failure. | ||
373 | **/ | 389 | **/ |
374 | acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags) | 390 | acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req) |
375 | { | 391 | { |
392 | struct acpi_pci_root *root; | ||
376 | acpi_status status; | 393 | acpi_status status; |
377 | u32 control_req, result, capbuf[3]; | 394 | u32 ctrl, capbuf[3]; |
378 | acpi_handle tmp; | 395 | acpi_handle tmp; |
379 | struct acpi_pci_root *root; | ||
380 | 396 | ||
381 | status = acpi_get_handle(handle, "_OSC", &tmp); | 397 | if (!mask) |
382 | if (ACPI_FAILURE(status)) | 398 | return AE_BAD_PARAMETER; |
383 | return status; | ||
384 | 399 | ||
385 | control_req = (flags & OSC_PCI_CONTROL_MASKS); | 400 | ctrl = *mask & OSC_PCI_CONTROL_MASKS; |
386 | if (!control_req) | 401 | if ((ctrl & req) != req) |
387 | return AE_TYPE; | 402 | return AE_TYPE; |
388 | 403 | ||
389 | root = acpi_pci_find_root(handle); | 404 | root = acpi_pci_find_root(handle); |
390 | if (!root) | 405 | if (!root) |
391 | return AE_NOT_EXIST; | 406 | return AE_NOT_EXIST; |
392 | 407 | ||
408 | status = acpi_get_handle(handle, "_OSC", &tmp); | ||
409 | if (ACPI_FAILURE(status)) | ||
410 | return status; | ||
411 | |||
393 | mutex_lock(&osc_lock); | 412 | mutex_lock(&osc_lock); |
413 | |||
414 | *mask = ctrl | root->osc_control_set; | ||
394 | /* No need to evaluate _OSC if the control was already granted. */ | 415 | /* No need to evaluate _OSC if the control was already granted. */ |
395 | if ((root->osc_control_set & control_req) == control_req) | 416 | if ((root->osc_control_set & ctrl) == ctrl) |
396 | goto out; | 417 | goto out; |
397 | 418 | ||
398 | /* Need to query controls first before requesting them */ | 419 | /* Need to check the available controls bits before requesting them. */ |
399 | if (!root->osc_queried) { | 420 | while (*mask) { |
400 | status = acpi_pci_query_osc(root, root->osc_support_set); | 421 | status = acpi_pci_query_osc(root, root->osc_support_set, mask); |
401 | if (ACPI_FAILURE(status)) | 422 | if (ACPI_FAILURE(status)) |
402 | goto out; | 423 | goto out; |
424 | if (ctrl == *mask) | ||
425 | break; | ||
426 | ctrl = *mask; | ||
403 | } | 427 | } |
404 | if ((root->osc_control_qry & control_req) != control_req) { | 428 | |
405 | printk(KERN_DEBUG | 429 | if ((ctrl & req) != req) { |
406 | "Firmware did not grant requested _OSC control\n"); | ||
407 | status = AE_SUPPORT; | 430 | status = AE_SUPPORT; |
408 | goto out; | 431 | goto out; |
409 | } | 432 | } |
410 | 433 | ||
411 | capbuf[OSC_QUERY_TYPE] = 0; | 434 | capbuf[OSC_QUERY_TYPE] = 0; |
412 | capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set; | 435 | capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set; |
413 | capbuf[OSC_CONTROL_TYPE] = root->osc_control_set | control_req; | 436 | capbuf[OSC_CONTROL_TYPE] = ctrl; |
414 | status = acpi_pci_run_osc(handle, capbuf, &result); | 437 | status = acpi_pci_run_osc(handle, capbuf, mask); |
415 | if (ACPI_SUCCESS(status)) | 438 | if (ACPI_SUCCESS(status)) |
416 | root->osc_control_set = result; | 439 | root->osc_control_set = *mask; |
417 | out: | 440 | out: |
418 | mutex_unlock(&osc_lock); | 441 | mutex_unlock(&osc_lock); |
419 | return status; | 442 | return status; |
@@ -544,14 +567,6 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
544 | if (flags != base_flags) | 567 | if (flags != base_flags) |
545 | acpi_pci_osc_support(root, flags); | 568 | acpi_pci_osc_support(root, flags); |
546 | 569 | ||
547 | status = acpi_pci_osc_control_set(root->device->handle, | ||
548 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | ||
549 | |||
550 | if (ACPI_FAILURE(status)) { | ||
551 | printk(KERN_INFO "Unable to assume PCIe control: Disabling ASPM\n"); | ||
552 | pcie_no_aspm(); | ||
553 | } | ||
554 | |||
555 | pci_acpi_add_bus_pm_notifier(device, root->bus); | 570 | pci_acpi_add_bus_pm_notifier(device, root->bus); |
556 | if (device->wakeup.flags.run_wake) | 571 | if (device->wakeup.flags.run_wake) |
557 | device_set_run_wake(root->bus->bridge, true); | 572 | device_set_run_wake(root->bus->bridge, true); |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index e9699aaed109..b618f888d66b 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -29,12 +29,6 @@ static int set_no_mwait(const struct dmi_system_id *id) | |||
29 | 29 | ||
30 | static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = { | 30 | static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = { |
31 | { | 31 | { |
32 | set_no_mwait, "IFL91 board", { | ||
33 | DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), | ||
34 | DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"), | ||
35 | DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"), | ||
36 | DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL}, | ||
37 | { | ||
38 | set_no_mwait, "Extensa 5220", { | 32 | set_no_mwait, "Extensa 5220", { |
39 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), | 33 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), |
40 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | 34 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), |
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 156021892389..347eb21b2353 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c | |||
@@ -850,7 +850,7 @@ static int __init acpi_processor_init(void) | |||
850 | printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", | 850 | printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", |
851 | acpi_idle_driver.name); | 851 | acpi_idle_driver.name); |
852 | } else { | 852 | } else { |
853 | printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s", | 853 | printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n", |
854 | cpuidle_get_driver()->name); | 854 | cpuidle_get_driver()->name); |
855 | } | 855 | } |
856 | 856 | ||
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index ba1bd263d903..3a73a93596e8 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
@@ -447,8 +447,8 @@ int acpi_processor_notify_smm(struct module *calling_module) | |||
447 | if (!try_module_get(calling_module)) | 447 | if (!try_module_get(calling_module)) |
448 | return -EINVAL; | 448 | return -EINVAL; |
449 | 449 | ||
450 | /* is_done is set to negative if an error occured, | 450 | /* is_done is set to negative if an error occurred, |
451 | * and to postitive if _no_ error occured, but SMM | 451 | * and to postitive if _no_ error occurred, but SMM |
452 | * was already notified. This avoids double notification | 452 | * was already notified. This avoids double notification |
453 | * which might lead to unexpected results... | 453 | * which might lead to unexpected results... |
454 | */ | 454 | */ |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index cf82989ae756..4754ff6e70e6 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -363,6 +363,12 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d) | |||
363 | return 0; | 363 | return 0; |
364 | } | 364 | } |
365 | 365 | ||
366 | static int __init init_nvs_nosave(const struct dmi_system_id *d) | ||
367 | { | ||
368 | acpi_nvs_nosave(); | ||
369 | return 0; | ||
370 | } | ||
371 | |||
366 | static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | 372 | static struct dmi_system_id __initdata acpisleep_dmi_table[] = { |
367 | { | 373 | { |
368 | .callback = init_old_suspend_ordering, | 374 | .callback = init_old_suspend_ordering, |
@@ -397,6 +403,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | |||
397 | DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), | 403 | DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), |
398 | }, | 404 | }, |
399 | }, | 405 | }, |
406 | { | ||
407 | .callback = init_nvs_nosave, | ||
408 | .ident = "Sony Vaio VGN-SR11M", | ||
409 | .matches = { | ||
410 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
411 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"), | ||
412 | }, | ||
413 | }, | ||
414 | { | ||
415 | .callback = init_nvs_nosave, | ||
416 | .ident = "Everex StepNote Series", | ||
417 | .matches = { | ||
418 | DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."), | ||
419 | DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"), | ||
420 | }, | ||
421 | }, | ||
400 | {}, | 422 | {}, |
401 | }; | 423 | }; |
402 | #endif /* CONFIG_SUSPEND */ | 424 | #endif /* CONFIG_SUSPEND */ |
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 68e2e4582fa2..f8588f81048a 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c | |||
@@ -100,7 +100,7 @@ static const struct acpi_dlevel acpi_debug_levels[] = { | |||
100 | ACPI_DEBUG_INIT(ACPI_LV_EVENTS), | 100 | ACPI_DEBUG_INIT(ACPI_LV_EVENTS), |
101 | }; | 101 | }; |
102 | 102 | ||
103 | static int param_get_debug_layer(char *buffer, struct kernel_param *kp) | 103 | static int param_get_debug_layer(char *buffer, const struct kernel_param *kp) |
104 | { | 104 | { |
105 | int result = 0; | 105 | int result = 0; |
106 | int i; | 106 | int i; |
@@ -128,7 +128,7 @@ static int param_get_debug_layer(char *buffer, struct kernel_param *kp) | |||
128 | return result; | 128 | return result; |
129 | } | 129 | } |
130 | 130 | ||
131 | static int param_get_debug_level(char *buffer, struct kernel_param *kp) | 131 | static int param_get_debug_level(char *buffer, const struct kernel_param *kp) |
132 | { | 132 | { |
133 | int result = 0; | 133 | int result = 0; |
134 | int i; | 134 | int i; |
@@ -149,10 +149,18 @@ static int param_get_debug_level(char *buffer, struct kernel_param *kp) | |||
149 | return result; | 149 | return result; |
150 | } | 150 | } |
151 | 151 | ||
152 | module_param_call(debug_layer, param_set_uint, param_get_debug_layer, | 152 | static struct kernel_param_ops param_ops_debug_layer = { |
153 | &acpi_dbg_layer, 0644); | 153 | .set = param_set_uint, |
154 | module_param_call(debug_level, param_set_uint, param_get_debug_level, | 154 | .get = param_get_debug_layer, |
155 | &acpi_dbg_level, 0644); | 155 | }; |
156 | |||
157 | static struct kernel_param_ops param_ops_debug_level = { | ||
158 | .set = param_set_uint, | ||
159 | .get = param_get_debug_level, | ||
160 | }; | ||
161 | |||
162 | module_param_cb(debug_layer, ¶m_ops_debug_layer, &acpi_dbg_layer, 0644); | ||
163 | module_param_cb(debug_level, ¶m_ops_debug_level, &acpi_dbg_level, 0644); | ||
156 | 164 | ||
157 | static char trace_method_name[6]; | 165 | static char trace_method_name[6]; |
158 | module_param_string(trace_method_name, trace_method_name, 6, 0644); | 166 | module_param_string(trace_method_name, trace_method_name, 6, 0644); |
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index c5fef01b3c95..b83676126598 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c | |||
@@ -59,8 +59,8 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context, | |||
59 | "support\n")); | 59 | "support\n")); |
60 | *cap |= ACPI_VIDEO_BACKLIGHT; | 60 | *cap |= ACPI_VIDEO_BACKLIGHT; |
61 | if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy))) | 61 | if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy))) |
62 | printk(KERN_WARNING FW_BUG PREFIX "ACPI brightness " | 62 | printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, " |
63 | "control misses _BQC function\n"); | 63 | "cannot determine initial brightness\n"); |
64 | /* We have backlight support, no need to scan further */ | 64 | /* We have backlight support, no need to scan further */ |
65 | return AE_CTRL_TERMINATE; | 65 | return AE_CTRL_TERMINATE; |
66 | } | 66 | } |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 013727b20417..99d0e5a51148 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -90,6 +90,10 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); | |||
90 | static int ahci_pci_device_resume(struct pci_dev *pdev); | 90 | static int ahci_pci_device_resume(struct pci_dev *pdev); |
91 | #endif | 91 | #endif |
92 | 92 | ||
93 | static struct scsi_host_template ahci_sht = { | ||
94 | AHCI_SHT("ahci"), | ||
95 | }; | ||
96 | |||
93 | static struct ata_port_operations ahci_vt8251_ops = { | 97 | static struct ata_port_operations ahci_vt8251_ops = { |
94 | .inherits = &ahci_ops, | 98 | .inherits = &ahci_ops, |
95 | .hardreset = ahci_vt8251_hardreset, | 99 | .hardreset = ahci_vt8251_hardreset, |
@@ -253,6 +257,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
253 | { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ | 257 | { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ |
254 | { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ | 258 | { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ |
255 | { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ | 259 | { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ |
260 | { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ | ||
261 | { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */ | ||
262 | { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */ | ||
256 | 263 | ||
257 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ | 264 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ |
258 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 265 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 474427b6f99f..e5fdeebf9ef0 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h | |||
@@ -298,7 +298,17 @@ struct ahci_host_priv { | |||
298 | 298 | ||
299 | extern int ahci_ignore_sss; | 299 | extern int ahci_ignore_sss; |
300 | 300 | ||
301 | extern struct scsi_host_template ahci_sht; | 301 | extern struct device_attribute *ahci_shost_attrs[]; |
302 | extern struct device_attribute *ahci_sdev_attrs[]; | ||
303 | |||
304 | #define AHCI_SHT(drv_name) \ | ||
305 | ATA_NCQ_SHT(drv_name), \ | ||
306 | .can_queue = AHCI_MAX_CMDS - 1, \ | ||
307 | .sg_tablesize = AHCI_MAX_SG, \ | ||
308 | .dma_boundary = AHCI_DMA_BOUNDARY, \ | ||
309 | .shost_attrs = ahci_shost_attrs, \ | ||
310 | .sdev_attrs = ahci_sdev_attrs | ||
311 | |||
302 | extern struct ata_port_operations ahci_ops; | 312 | extern struct ata_port_operations ahci_ops; |
303 | 313 | ||
304 | void ahci_save_initial_config(struct device *dev, | 314 | void ahci_save_initial_config(struct device *dev, |
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c index 4e97f33cca44..84b643270e7a 100644 --- a/drivers/ata/ahci_platform.c +++ b/drivers/ata/ahci_platform.c | |||
@@ -23,6 +23,10 @@ | |||
23 | #include <linux/ahci_platform.h> | 23 | #include <linux/ahci_platform.h> |
24 | #include "ahci.h" | 24 | #include "ahci.h" |
25 | 25 | ||
26 | static struct scsi_host_template ahci_platform_sht = { | ||
27 | AHCI_SHT("ahci_platform"), | ||
28 | }; | ||
29 | |||
26 | static int __init ahci_probe(struct platform_device *pdev) | 30 | static int __init ahci_probe(struct platform_device *pdev) |
27 | { | 31 | { |
28 | struct device *dev = &pdev->dev; | 32 | struct device *dev = &pdev->dev; |
@@ -145,7 +149,7 @@ static int __init ahci_probe(struct platform_device *pdev) | |||
145 | ahci_print_info(host, "platform"); | 149 | ahci_print_info(host, "platform"); |
146 | 150 | ||
147 | rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, | 151 | rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, |
148 | &ahci_sht); | 152 | &ahci_platform_sht); |
149 | if (rc) | 153 | if (rc) |
150 | goto err0; | 154 | goto err0; |
151 | 155 | ||
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 3971bc0a4838..d712675d0a96 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -302,6 +302,10 @@ static const struct pci_device_id piix_pci_tbl[] = { | |||
302 | { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 302 | { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, |
303 | /* SATA Controller IDE (CPT) */ | 303 | /* SATA Controller IDE (CPT) */ |
304 | { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 304 | { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, |
305 | /* SATA Controller IDE (PBG) */ | ||
306 | { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, | ||
307 | /* SATA Controller IDE (PBG) */ | ||
308 | { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | ||
305 | { } /* terminate list */ | 309 | { } /* terminate list */ |
306 | }; | 310 | }; |
307 | 311 | ||
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 666850d31df2..8eea309ea212 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c | |||
@@ -121,7 +121,7 @@ static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL); | |||
121 | static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, | 121 | static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, |
122 | ahci_read_em_buffer, ahci_store_em_buffer); | 122 | ahci_read_em_buffer, ahci_store_em_buffer); |
123 | 123 | ||
124 | static struct device_attribute *ahci_shost_attrs[] = { | 124 | struct device_attribute *ahci_shost_attrs[] = { |
125 | &dev_attr_link_power_management_policy, | 125 | &dev_attr_link_power_management_policy, |
126 | &dev_attr_em_message_type, | 126 | &dev_attr_em_message_type, |
127 | &dev_attr_em_message, | 127 | &dev_attr_em_message, |
@@ -132,22 +132,14 @@ static struct device_attribute *ahci_shost_attrs[] = { | |||
132 | &dev_attr_em_buffer, | 132 | &dev_attr_em_buffer, |
133 | NULL | 133 | NULL |
134 | }; | 134 | }; |
135 | EXPORT_SYMBOL_GPL(ahci_shost_attrs); | ||
135 | 136 | ||
136 | static struct device_attribute *ahci_sdev_attrs[] = { | 137 | struct device_attribute *ahci_sdev_attrs[] = { |
137 | &dev_attr_sw_activity, | 138 | &dev_attr_sw_activity, |
138 | &dev_attr_unload_heads, | 139 | &dev_attr_unload_heads, |
139 | NULL | 140 | NULL |
140 | }; | 141 | }; |
141 | 142 | EXPORT_SYMBOL_GPL(ahci_sdev_attrs); | |
142 | struct scsi_host_template ahci_sht = { | ||
143 | ATA_NCQ_SHT("ahci"), | ||
144 | .can_queue = AHCI_MAX_CMDS - 1, | ||
145 | .sg_tablesize = AHCI_MAX_SG, | ||
146 | .dma_boundary = AHCI_DMA_BOUNDARY, | ||
147 | .shost_attrs = ahci_shost_attrs, | ||
148 | .sdev_attrs = ahci_sdev_attrs, | ||
149 | }; | ||
150 | EXPORT_SYMBOL_GPL(ahci_sht); | ||
151 | 143 | ||
152 | struct ata_port_operations ahci_ops = { | 144 | struct ata_port_operations ahci_ops = { |
153 | .inherits = &sata_pmp_port_ops, | 145 | .inherits = &sata_pmp_port_ops, |
@@ -1326,7 +1318,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class, | |||
1326 | /* issue the first D2H Register FIS */ | 1318 | /* issue the first D2H Register FIS */ |
1327 | msecs = 0; | 1319 | msecs = 0; |
1328 | now = jiffies; | 1320 | now = jiffies; |
1329 | if (time_after(now, deadline)) | 1321 | if (time_after(deadline, now)) |
1330 | msecs = jiffies_to_msecs(deadline - now); | 1322 | msecs = jiffies_to_msecs(deadline - now); |
1331 | 1323 | ||
1332 | tf.ctl |= ATA_SRST; | 1324 | tf.ctl |= ATA_SRST; |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index c035b3d041ee..932eaee50245 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -5418,6 +5418,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, | |||
5418 | */ | 5418 | */ |
5419 | int ata_host_suspend(struct ata_host *host, pm_message_t mesg) | 5419 | int ata_host_suspend(struct ata_host *host, pm_message_t mesg) |
5420 | { | 5420 | { |
5421 | unsigned int ehi_flags = ATA_EHI_QUIET; | ||
5421 | int rc; | 5422 | int rc; |
5422 | 5423 | ||
5423 | /* | 5424 | /* |
@@ -5426,7 +5427,18 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg) | |||
5426 | */ | 5427 | */ |
5427 | ata_lpm_enable(host); | 5428 | ata_lpm_enable(host); |
5428 | 5429 | ||
5429 | rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); | 5430 | /* |
5431 | * On some hardware, device fails to respond after spun down | ||
5432 | * for suspend. As the device won't be used before being | ||
5433 | * resumed, we don't need to touch the device. Ask EH to skip | ||
5434 | * the usual stuff and proceed directly to suspend. | ||
5435 | * | ||
5436 | * http://thread.gmane.org/gmane.linux.ide/46764 | ||
5437 | */ | ||
5438 | if (mesg.event == PM_EVENT_SUSPEND) | ||
5439 | ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY; | ||
5440 | |||
5441 | rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1); | ||
5430 | if (rc == 0) | 5442 | if (rc == 0) |
5431 | host->dev->power.power_state = mesg; | 5443 | host->dev->power.power_state = mesg; |
5432 | return rc; | 5444 | return rc; |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index c9ae299b8342..e48302eae55f 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -3235,6 +3235,10 @@ static int ata_eh_skip_recovery(struct ata_link *link) | |||
3235 | if (link->flags & ATA_LFLAG_DISABLED) | 3235 | if (link->flags & ATA_LFLAG_DISABLED) |
3236 | return 1; | 3236 | return 1; |
3237 | 3237 | ||
3238 | /* skip if explicitly requested */ | ||
3239 | if (ehc->i.flags & ATA_EHI_NO_RECOVERY) | ||
3240 | return 1; | ||
3241 | |||
3238 | /* thaw frozen port and recover failed devices */ | 3242 | /* thaw frozen port and recover failed devices */ |
3239 | if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) | 3243 | if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) |
3240 | return 0; | 3244 | return 0; |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 3b82d8ef76f0..e30c537cce32 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -418,6 +418,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | |||
418 | if (ioaddr->ctl_addr) | 418 | if (ioaddr->ctl_addr) |
419 | iowrite8(tf->ctl, ioaddr->ctl_addr); | 419 | iowrite8(tf->ctl, ioaddr->ctl_addr); |
420 | ap->last_ctl = tf->ctl; | 420 | ap->last_ctl = tf->ctl; |
421 | ata_wait_idle(ap); | ||
421 | } | 422 | } |
422 | 423 | ||
423 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { | 424 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { |
@@ -453,6 +454,8 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | |||
453 | iowrite8(tf->device, ioaddr->device_addr); | 454 | iowrite8(tf->device, ioaddr->device_addr); |
454 | VPRINTK("device 0x%X\n", tf->device); | 455 | VPRINTK("device 0x%X\n", tf->device); |
455 | } | 456 | } |
457 | |||
458 | ata_wait_idle(ap); | ||
456 | } | 459 | } |
457 | EXPORT_SYMBOL_GPL(ata_sff_tf_load); | 460 | EXPORT_SYMBOL_GPL(ata_sff_tf_load); |
458 | 461 | ||
@@ -1042,7 +1045,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) | |||
1042 | int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | 1045 | int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, |
1043 | u8 status, int in_wq) | 1046 | u8 status, int in_wq) |
1044 | { | 1047 | { |
1045 | struct ata_eh_info *ehi = &ap->link.eh_info; | 1048 | struct ata_link *link = qc->dev->link; |
1049 | struct ata_eh_info *ehi = &link->eh_info; | ||
1046 | unsigned long flags = 0; | 1050 | unsigned long flags = 0; |
1047 | int poll_next; | 1051 | int poll_next; |
1048 | 1052 | ||
@@ -1298,8 +1302,14 @@ fsm_start: | |||
1298 | } | 1302 | } |
1299 | EXPORT_SYMBOL_GPL(ata_sff_hsm_move); | 1303 | EXPORT_SYMBOL_GPL(ata_sff_hsm_move); |
1300 | 1304 | ||
1301 | void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay) | 1305 | void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay) |
1302 | { | 1306 | { |
1307 | struct ata_port *ap = link->ap; | ||
1308 | |||
1309 | WARN_ON((ap->sff_pio_task_link != NULL) && | ||
1310 | (ap->sff_pio_task_link != link)); | ||
1311 | ap->sff_pio_task_link = link; | ||
1312 | |||
1303 | /* may fail if ata_sff_flush_pio_task() in progress */ | 1313 | /* may fail if ata_sff_flush_pio_task() in progress */ |
1304 | queue_delayed_work(ata_sff_wq, &ap->sff_pio_task, | 1314 | queue_delayed_work(ata_sff_wq, &ap->sff_pio_task, |
1305 | msecs_to_jiffies(delay)); | 1315 | msecs_to_jiffies(delay)); |
@@ -1321,14 +1331,18 @@ static void ata_sff_pio_task(struct work_struct *work) | |||
1321 | { | 1331 | { |
1322 | struct ata_port *ap = | 1332 | struct ata_port *ap = |
1323 | container_of(work, struct ata_port, sff_pio_task.work); | 1333 | container_of(work, struct ata_port, sff_pio_task.work); |
1334 | struct ata_link *link = ap->sff_pio_task_link; | ||
1324 | struct ata_queued_cmd *qc; | 1335 | struct ata_queued_cmd *qc; |
1325 | u8 status; | 1336 | u8 status; |
1326 | int poll_next; | 1337 | int poll_next; |
1327 | 1338 | ||
1339 | BUG_ON(ap->sff_pio_task_link == NULL); | ||
1328 | /* qc can be NULL if timeout occurred */ | 1340 | /* qc can be NULL if timeout occurred */ |
1329 | qc = ata_qc_from_tag(ap, ap->link.active_tag); | 1341 | qc = ata_qc_from_tag(ap, link->active_tag); |
1330 | if (!qc) | 1342 | if (!qc) { |
1343 | ap->sff_pio_task_link = NULL; | ||
1331 | return; | 1344 | return; |
1345 | } | ||
1332 | 1346 | ||
1333 | fsm_start: | 1347 | fsm_start: |
1334 | WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); | 1348 | WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); |
@@ -1345,11 +1359,16 @@ fsm_start: | |||
1345 | msleep(2); | 1359 | msleep(2); |
1346 | status = ata_sff_busy_wait(ap, ATA_BUSY, 10); | 1360 | status = ata_sff_busy_wait(ap, ATA_BUSY, 10); |
1347 | if (status & ATA_BUSY) { | 1361 | if (status & ATA_BUSY) { |
1348 | ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE); | 1362 | ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); |
1349 | return; | 1363 | return; |
1350 | } | 1364 | } |
1351 | } | 1365 | } |
1352 | 1366 | ||
1367 | /* | ||
1368 | * hsm_move() may trigger another command to be processed. | ||
1369 | * clean the link beforehand. | ||
1370 | */ | ||
1371 | ap->sff_pio_task_link = NULL; | ||
1353 | /* move the HSM */ | 1372 | /* move the HSM */ |
1354 | poll_next = ata_sff_hsm_move(ap, qc, status, 1); | 1373 | poll_next = ata_sff_hsm_move(ap, qc, status, 1); |
1355 | 1374 | ||
@@ -1376,6 +1395,7 @@ fsm_start: | |||
1376 | unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | 1395 | unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) |
1377 | { | 1396 | { |
1378 | struct ata_port *ap = qc->ap; | 1397 | struct ata_port *ap = qc->ap; |
1398 | struct ata_link *link = qc->dev->link; | ||
1379 | 1399 | ||
1380 | /* Use polling pio if the LLD doesn't handle | 1400 | /* Use polling pio if the LLD doesn't handle |
1381 | * interrupt driven pio and atapi CDB interrupt. | 1401 | * interrupt driven pio and atapi CDB interrupt. |
@@ -1396,7 +1416,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
1396 | ap->hsm_task_state = HSM_ST_LAST; | 1416 | ap->hsm_task_state = HSM_ST_LAST; |
1397 | 1417 | ||
1398 | if (qc->tf.flags & ATA_TFLAG_POLLING) | 1418 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
1399 | ata_sff_queue_pio_task(ap, 0); | 1419 | ata_sff_queue_pio_task(link, 0); |
1400 | 1420 | ||
1401 | break; | 1421 | break; |
1402 | 1422 | ||
@@ -1409,7 +1429,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
1409 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | 1429 | if (qc->tf.flags & ATA_TFLAG_WRITE) { |
1410 | /* PIO data out protocol */ | 1430 | /* PIO data out protocol */ |
1411 | ap->hsm_task_state = HSM_ST_FIRST; | 1431 | ap->hsm_task_state = HSM_ST_FIRST; |
1412 | ata_sff_queue_pio_task(ap, 0); | 1432 | ata_sff_queue_pio_task(link, 0); |
1413 | 1433 | ||
1414 | /* always send first data block using the | 1434 | /* always send first data block using the |
1415 | * ata_sff_pio_task() codepath. | 1435 | * ata_sff_pio_task() codepath. |
@@ -1419,7 +1439,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
1419 | ap->hsm_task_state = HSM_ST; | 1439 | ap->hsm_task_state = HSM_ST; |
1420 | 1440 | ||
1421 | if (qc->tf.flags & ATA_TFLAG_POLLING) | 1441 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
1422 | ata_sff_queue_pio_task(ap, 0); | 1442 | ata_sff_queue_pio_task(link, 0); |
1423 | 1443 | ||
1424 | /* if polling, ata_sff_pio_task() handles the | 1444 | /* if polling, ata_sff_pio_task() handles the |
1425 | * rest. otherwise, interrupt handler takes | 1445 | * rest. otherwise, interrupt handler takes |
@@ -1441,7 +1461,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
1441 | /* send cdb by polling if no cdb interrupt */ | 1461 | /* send cdb by polling if no cdb interrupt */ |
1442 | if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || | 1462 | if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || |
1443 | (qc->tf.flags & ATA_TFLAG_POLLING)) | 1463 | (qc->tf.flags & ATA_TFLAG_POLLING)) |
1444 | ata_sff_queue_pio_task(ap, 0); | 1464 | ata_sff_queue_pio_task(link, 0); |
1445 | break; | 1465 | break; |
1446 | 1466 | ||
1447 | default: | 1467 | default: |
@@ -2734,6 +2754,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); | |||
2734 | unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) | 2754 | unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) |
2735 | { | 2755 | { |
2736 | struct ata_port *ap = qc->ap; | 2756 | struct ata_port *ap = qc->ap; |
2757 | struct ata_link *link = qc->dev->link; | ||
2737 | 2758 | ||
2738 | /* defer PIO handling to sff_qc_issue */ | 2759 | /* defer PIO handling to sff_qc_issue */ |
2739 | if (!ata_is_dma(qc->tf.protocol)) | 2760 | if (!ata_is_dma(qc->tf.protocol)) |
@@ -2762,7 +2783,7 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) | |||
2762 | 2783 | ||
2763 | /* send cdb by polling if no cdb interrupt */ | 2784 | /* send cdb by polling if no cdb interrupt */ |
2764 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | 2785 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) |
2765 | ata_sff_queue_pio_task(ap, 0); | 2786 | ata_sff_queue_pio_task(link, 0); |
2766 | break; | 2787 | break; |
2767 | 2788 | ||
2768 | default: | 2789 | default: |
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c index ba43f0f8c880..2215632e4b31 100644 --- a/drivers/ata/pata_artop.c +++ b/drivers/ata/pata_artop.c | |||
@@ -74,7 +74,8 @@ static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline) | |||
74 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 74 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
75 | 75 | ||
76 | /* Odd numbered device ids are the units with enable bits (the -R cards) */ | 76 | /* Odd numbered device ids are the units with enable bits (the -R cards) */ |
77 | if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) | 77 | if ((pdev->device & 1) && |
78 | !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) | ||
78 | return -ENOENT; | 79 | return -ENOENT; |
79 | 80 | ||
80 | return ata_sff_prereset(link, deadline); | 81 | return ata_sff_prereset(link, deadline); |
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 5e659885de16..ac8d7d97e408 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c | |||
@@ -417,6 +417,8 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | |||
417 | tf->lbam, | 417 | tf->lbam, |
418 | tf->lbah); | 418 | tf->lbah); |
419 | } | 419 | } |
420 | |||
421 | ata_wait_idle(ap); | ||
420 | } | 422 | } |
421 | 423 | ||
422 | static int via_port_start(struct ata_port *ap) | 424 | static int via_port_start(struct ata_port *ap) |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 81982594a014..a9fd9709c262 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -2284,7 +2284,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc) | |||
2284 | } | 2284 | } |
2285 | 2285 | ||
2286 | if (qc->tf.flags & ATA_TFLAG_POLLING) | 2286 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
2287 | ata_sff_queue_pio_task(ap, 0); | 2287 | ata_sff_queue_pio_task(link, 0); |
2288 | return 0; | 2288 | return 0; |
2289 | } | 2289 | } |
2290 | 2290 | ||
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 5419a49ff135..276d5a701dc3 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -59,6 +59,7 @@ void device_pm_init(struct device *dev) | |||
59 | { | 59 | { |
60 | dev->power.status = DPM_ON; | 60 | dev->power.status = DPM_ON; |
61 | init_completion(&dev->power.completion); | 61 | init_completion(&dev->power.completion); |
62 | complete_all(&dev->power.completion); | ||
62 | dev->power.wakeup_count = 0; | 63 | dev->power.wakeup_count = 0; |
63 | pm_runtime_init(dev); | 64 | pm_runtime_init(dev); |
64 | } | 65 | } |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 31064df1370a..5e4fadcdece9 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -297,6 +297,8 @@ static void enqueue_cmd_and_start_io(ctlr_info_t *h, | |||
297 | spin_lock_irqsave(&h->lock, flags); | 297 | spin_lock_irqsave(&h->lock, flags); |
298 | addQ(&h->reqQ, c); | 298 | addQ(&h->reqQ, c); |
299 | h->Qdepth++; | 299 | h->Qdepth++; |
300 | if (h->Qdepth > h->maxQsinceinit) | ||
301 | h->maxQsinceinit = h->Qdepth; | ||
300 | start_io(h); | 302 | start_io(h); |
301 | spin_unlock_irqrestore(&h->lock, flags); | 303 | spin_unlock_irqrestore(&h->lock, flags); |
302 | } | 304 | } |
@@ -4519,6 +4521,12 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) | |||
4519 | misc_fw_support = readl(&cfgtable->misc_fw_support); | 4521 | misc_fw_support = readl(&cfgtable->misc_fw_support); |
4520 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; | 4522 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; |
4521 | 4523 | ||
4524 | /* The doorbell reset seems to cause lockups on some Smart | ||
4525 | * Arrays (e.g. P410, P410i, maybe others). Until this is | ||
4526 | * fixed or at least isolated, avoid the doorbell reset. | ||
4527 | */ | ||
4528 | use_doorbell = 0; | ||
4529 | |||
4522 | rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); | 4530 | rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); |
4523 | if (rc) | 4531 | if (rc) |
4524 | goto unmap_cfgtable; | 4532 | goto unmap_cfgtable; |
@@ -4712,6 +4720,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
4712 | h->scatter_list = kmalloc(h->max_commands * | 4720 | h->scatter_list = kmalloc(h->max_commands * |
4713 | sizeof(struct scatterlist *), | 4721 | sizeof(struct scatterlist *), |
4714 | GFP_KERNEL); | 4722 | GFP_KERNEL); |
4723 | if (!h->scatter_list) | ||
4724 | goto clean4; | ||
4725 | |||
4715 | for (k = 0; k < h->nr_cmds; k++) { | 4726 | for (k = 0; k < h->nr_cmds; k++) { |
4716 | h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) * | 4727 | h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) * |
4717 | h->maxsgentries, | 4728 | h->maxsgentries, |
@@ -4781,7 +4792,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
4781 | clean4: | 4792 | clean4: |
4782 | kfree(h->cmd_pool_bits); | 4793 | kfree(h->cmd_pool_bits); |
4783 | /* Free up sg elements */ | 4794 | /* Free up sg elements */ |
4784 | for (k = 0; k < h->nr_cmds; k++) | 4795 | for (k-- ; k >= 0; k--) |
4785 | kfree(h->scatter_list[k]); | 4796 | kfree(h->scatter_list[k]); |
4786 | kfree(h->scatter_list); | 4797 | kfree(h->scatter_list); |
4787 | cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); | 4798 | cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f3c636d23718..91797bbbe702 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -477,7 +477,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) | |||
477 | pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; | 477 | pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; |
478 | 478 | ||
479 | if (bio_rw(bio) == WRITE) { | 479 | if (bio_rw(bio) == WRITE) { |
480 | bool barrier = (bio->bi_rw & REQ_HARDBARRIER); | 480 | bool barrier = !!(bio->bi_rw & REQ_HARDBARRIER); |
481 | struct file *file = lo->lo_backing_file; | 481 | struct file *file = lo->lo_backing_file; |
482 | 482 | ||
483 | if (barrier) { | 483 | if (barrier) { |
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index b82c5ce5e9df..76fa3deaee84 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c | |||
@@ -974,8 +974,7 @@ static int mg_probe(struct platform_device *plat_dev) | |||
974 | host->breq->queuedata = host; | 974 | host->breq->queuedata = host; |
975 | 975 | ||
976 | /* mflash is random device, thanx for the noop */ | 976 | /* mflash is random device, thanx for the noop */ |
977 | elevator_exit(host->breq->elevator); | 977 | err = elevator_change(host->breq, "noop"); |
978 | err = elevator_init(host->breq, "noop"); | ||
979 | if (err) { | 978 | if (err) { |
980 | printk(KERN_ERR "%s:%d (elevator_init) fail\n", | 979 | printk(KERN_ERR "%s:%d (elevator_init) fail\n", |
981 | __func__, __LINE__); | 980 | __func__, __LINE__); |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index b1cbeb59bb76..37a2bb595076 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -2369,7 +2369,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush) | |||
2369 | pkt_shrink_pktlist(pd); | 2369 | pkt_shrink_pktlist(pd); |
2370 | } | 2370 | } |
2371 | 2371 | ||
2372 | static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor) | 2372 | static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor) |
2373 | { | 2373 | { |
2374 | if (dev_minor >= MAX_WRITERS) | 2374 | if (dev_minor >= MAX_WRITERS) |
2375 | return NULL; | 2375 | return NULL; |
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 710af89b176d..cd18493c9527 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <asm/smp.h> | 12 | #include <asm/smp.h> |
13 | #include "agp.h" | 13 | #include "agp.h" |
14 | #include "intel-agp.h" | 14 | #include "intel-agp.h" |
15 | #include <linux/intel-gtt.h> | ||
15 | 16 | ||
16 | #include "intel-gtt.c" | 17 | #include "intel-gtt.c" |
17 | 18 | ||
@@ -805,6 +806,8 @@ static const struct intel_driver_description { | |||
805 | "G45/G43", NULL, &intel_i965_driver }, | 806 | "G45/G43", NULL, &intel_i965_driver }, |
806 | { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, | 807 | { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, |
807 | "B43", NULL, &intel_i965_driver }, | 808 | "B43", NULL, &intel_i965_driver }, |
809 | { PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG, | ||
810 | "B43", NULL, &intel_i965_driver }, | ||
808 | { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, | 811 | { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, |
809 | "G41", NULL, &intel_i965_driver }, | 812 | "G41", NULL, &intel_i965_driver }, |
810 | { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, | 813 | { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, |
@@ -815,11 +818,19 @@ static const struct intel_driver_description { | |||
815 | "HD Graphics", NULL, &intel_i965_driver }, | 818 | "HD Graphics", NULL, &intel_i965_driver }, |
816 | { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, | 819 | { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, |
817 | "HD Graphics", NULL, &intel_i965_driver }, | 820 | "HD Graphics", NULL, &intel_i965_driver }, |
818 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, | 821 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, |
822 | "Sandybridge", NULL, &intel_gen6_driver }, | ||
823 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, | ||
824 | "Sandybridge", NULL, &intel_gen6_driver }, | ||
825 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, | ||
826 | "Sandybridge", NULL, &intel_gen6_driver }, | ||
827 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, | ||
828 | "Sandybridge", NULL, &intel_gen6_driver }, | ||
829 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, | ||
819 | "Sandybridge", NULL, &intel_gen6_driver }, | 830 | "Sandybridge", NULL, &intel_gen6_driver }, |
820 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, | 831 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, |
821 | "Sandybridge", NULL, &intel_gen6_driver }, | 832 | "Sandybridge", NULL, &intel_gen6_driver }, |
822 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG, | 833 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, |
823 | "Sandybridge", NULL, &intel_gen6_driver }, | 834 | "Sandybridge", NULL, &intel_gen6_driver }, |
824 | { 0, 0, NULL, NULL, NULL } | 835 | { 0, 0, NULL, NULL, NULL } |
825 | }; | 836 | }; |
@@ -1044,6 +1055,7 @@ static struct pci_device_id agp_intel_pci_table[] = { | |||
1044 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), | 1055 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), |
1045 | ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB), | 1056 | ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB), |
1046 | ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB), | 1057 | ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB), |
1058 | ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB), | ||
1047 | { } | 1059 | { } |
1048 | }; | 1060 | }; |
1049 | 1061 | ||
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index 08d47532e605..d09b1ab7e8ab 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h | |||
@@ -1,6 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Common Intel AGPGART and GTT definitions. | 2 | * Common Intel AGPGART and GTT definitions. |
3 | */ | 3 | */ |
4 | #ifndef _INTEL_AGP_H | ||
5 | #define _INTEL_AGP_H | ||
4 | 6 | ||
5 | /* Intel registers */ | 7 | /* Intel registers */ |
6 | #define INTEL_APSIZE 0xb4 | 8 | #define INTEL_APSIZE 0xb4 |
@@ -184,6 +186,8 @@ | |||
184 | #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 | 186 | #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 |
185 | #define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 | 187 | #define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 |
186 | #define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 | 188 | #define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 |
189 | #define PCI_DEVICE_ID_INTEL_B43_1_HB 0x2E90 | ||
190 | #define PCI_DEVICE_ID_INTEL_B43_1_IG 0x2E92 | ||
187 | #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 | 191 | #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 |
188 | #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 | 192 | #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 |
189 | #define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 | 193 | #define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 |
@@ -200,11 +204,16 @@ | |||
200 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 | 204 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 |
201 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a | 205 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a |
202 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 | 206 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 |
203 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 | 207 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */ |
204 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 | 208 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102 |
205 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 | 209 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112 |
206 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 | 210 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122 |
207 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG 0x0126 | 211 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */ |
212 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106 | ||
213 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116 | ||
214 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126 | ||
215 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */ | ||
216 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A | ||
208 | 217 | ||
209 | /* cover 915 and 945 variants */ | 218 | /* cover 915 and 945 variants */ |
210 | #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ | 219 | #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ |
@@ -231,7 +240,8 @@ | |||
231 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) | 240 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) |
232 | 241 | ||
233 | #define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ | 242 | #define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ |
234 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) | 243 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \ |
244 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB) | ||
235 | 245 | ||
236 | #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ | 246 | #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ |
237 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ | 247 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ |
@@ -244,3 +254,5 @@ | |||
244 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ | 254 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ |
245 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ | 255 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ |
246 | IS_SNB) | 256 | IS_SNB) |
257 | |||
258 | #endif | ||
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index d22ffb811bf2..75e0a3497888 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c | |||
@@ -49,6 +49,26 @@ static struct gatt_mask intel_i810_masks[] = | |||
49 | .type = INTEL_AGP_CACHED_MEMORY} | 49 | .type = INTEL_AGP_CACHED_MEMORY} |
50 | }; | 50 | }; |
51 | 51 | ||
52 | #define INTEL_AGP_UNCACHED_MEMORY 0 | ||
53 | #define INTEL_AGP_CACHED_MEMORY_LLC 1 | ||
54 | #define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2 | ||
55 | #define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3 | ||
56 | #define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4 | ||
57 | |||
58 | static struct gatt_mask intel_gen6_masks[] = | ||
59 | { | ||
60 | {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED, | ||
61 | .type = INTEL_AGP_UNCACHED_MEMORY }, | ||
62 | {.mask = I810_PTE_VALID | GEN6_PTE_LLC, | ||
63 | .type = INTEL_AGP_CACHED_MEMORY_LLC }, | ||
64 | {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT, | ||
65 | .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT }, | ||
66 | {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC, | ||
67 | .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC }, | ||
68 | {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT, | ||
69 | .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT }, | ||
70 | }; | ||
71 | |||
52 | static struct _intel_private { | 72 | static struct _intel_private { |
53 | struct pci_dev *pcidev; /* device one */ | 73 | struct pci_dev *pcidev; /* device one */ |
54 | u8 __iomem *registers; | 74 | u8 __iomem *registers; |
@@ -178,13 +198,6 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem, | |||
178 | off_t pg_start, int mask_type) | 198 | off_t pg_start, int mask_type) |
179 | { | 199 | { |
180 | int i, j; | 200 | int i, j; |
181 | u32 cache_bits = 0; | ||
182 | |||
183 | if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || | ||
184 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) | ||
185 | { | ||
186 | cache_bits = GEN6_PTE_LLC_MLC; | ||
187 | } | ||
188 | 201 | ||
189 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | 202 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { |
190 | writel(agp_bridge->driver->mask_memory(agp_bridge, | 203 | writel(agp_bridge->driver->mask_memory(agp_bridge, |
@@ -317,6 +330,23 @@ static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge, | |||
317 | return 0; | 330 | return 0; |
318 | } | 331 | } |
319 | 332 | ||
333 | static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge, | ||
334 | int type) | ||
335 | { | ||
336 | unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT; | ||
337 | unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT; | ||
338 | |||
339 | if (type_mask == AGP_USER_UNCACHED_MEMORY) | ||
340 | return INTEL_AGP_UNCACHED_MEMORY; | ||
341 | else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) | ||
342 | return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT : | ||
343 | INTEL_AGP_CACHED_MEMORY_LLC_MLC; | ||
344 | else /* set 'normal'/'cached' to LLC by default */ | ||
345 | return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT : | ||
346 | INTEL_AGP_CACHED_MEMORY_LLC; | ||
347 | } | ||
348 | |||
349 | |||
320 | static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, | 350 | static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, |
321 | int type) | 351 | int type) |
322 | { | 352 | { |
@@ -588,8 +618,7 @@ static void intel_i830_init_gtt_entries(void) | |||
588 | gtt_entries = 0; | 618 | gtt_entries = 0; |
589 | break; | 619 | break; |
590 | } | 620 | } |
591 | } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || | 621 | } else if (IS_SNB) { |
592 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) { | ||
593 | /* | 622 | /* |
594 | * SandyBridge has new memory control reg at 0x50.w | 623 | * SandyBridge has new memory control reg at 0x50.w |
595 | */ | 624 | */ |
@@ -1068,11 +1097,11 @@ static void intel_i9xx_setup_flush(void) | |||
1068 | intel_i915_setup_chipset_flush(); | 1097 | intel_i915_setup_chipset_flush(); |
1069 | } | 1098 | } |
1070 | 1099 | ||
1071 | if (intel_private.ifp_resource.start) { | 1100 | if (intel_private.ifp_resource.start) |
1072 | intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); | 1101 | intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); |
1073 | if (!intel_private.i9xx_flush_page) | 1102 | if (!intel_private.i9xx_flush_page) |
1074 | dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing"); | 1103 | dev_err(&intel_private.pcidev->dev, |
1075 | } | 1104 | "can't ioremap flush page - no chipset flushing\n"); |
1076 | } | 1105 | } |
1077 | 1106 | ||
1078 | static int intel_i9xx_configure(void) | 1107 | static int intel_i9xx_configure(void) |
@@ -1163,7 +1192,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, | |||
1163 | 1192 | ||
1164 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | 1193 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); |
1165 | 1194 | ||
1166 | if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && | 1195 | if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY && |
1167 | mask_type != INTEL_AGP_CACHED_MEMORY) | 1196 | mask_type != INTEL_AGP_CACHED_MEMORY) |
1168 | goto out_err; | 1197 | goto out_err; |
1169 | 1198 | ||
@@ -1333,8 +1362,8 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, | |||
1333 | static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge, | 1362 | static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge, |
1334 | dma_addr_t addr, int type) | 1363 | dma_addr_t addr, int type) |
1335 | { | 1364 | { |
1336 | /* Shift high bits down */ | 1365 | /* gen6 has bit11-4 for physical addr bit39-32 */ |
1337 | addr |= (addr >> 28) & 0xff; | 1366 | addr |= (addr >> 28) & 0xff0; |
1338 | 1367 | ||
1339 | /* Type checking must be done elsewhere */ | 1368 | /* Type checking must be done elsewhere */ |
1340 | return addr | bridge->driver->masks[type].mask; | 1369 | return addr | bridge->driver->masks[type].mask; |
@@ -1359,6 +1388,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) | |||
1359 | break; | 1388 | break; |
1360 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: | 1389 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: |
1361 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: | 1390 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: |
1391 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB: | ||
1362 | *gtt_offset = MB(2); | 1392 | *gtt_offset = MB(2); |
1363 | 1393 | ||
1364 | pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); | 1394 | pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
@@ -1563,7 +1593,7 @@ static const struct agp_bridge_driver intel_gen6_driver = { | |||
1563 | .fetch_size = intel_i9xx_fetch_size, | 1593 | .fetch_size = intel_i9xx_fetch_size, |
1564 | .cleanup = intel_i915_cleanup, | 1594 | .cleanup = intel_i915_cleanup, |
1565 | .mask_memory = intel_gen6_mask_memory, | 1595 | .mask_memory = intel_gen6_mask_memory, |
1566 | .masks = intel_i810_masks, | 1596 | .masks = intel_gen6_masks, |
1567 | .agp_enable = intel_i810_agp_enable, | 1597 | .agp_enable = intel_i810_agp_enable, |
1568 | .cache_flush = global_cache_flush, | 1598 | .cache_flush = global_cache_flush, |
1569 | .create_gatt_table = intel_i965_create_gatt_table, | 1599 | .create_gatt_table = intel_i965_create_gatt_table, |
@@ -1576,7 +1606,7 @@ static const struct agp_bridge_driver intel_gen6_driver = { | |||
1576 | .agp_alloc_pages = agp_generic_alloc_pages, | 1606 | .agp_alloc_pages = agp_generic_alloc_pages, |
1577 | .agp_destroy_page = agp_generic_destroy_page, | 1607 | .agp_destroy_page = agp_generic_destroy_page, |
1578 | .agp_destroy_pages = agp_generic_destroy_pages, | 1608 | .agp_destroy_pages = agp_generic_destroy_pages, |
1579 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | 1609 | .agp_type_to_mask_type = intel_gen6_type_to_mask_type, |
1580 | .chipset_flush = intel_i915_chipset_flush, | 1610 | .chipset_flush = intel_i915_chipset_flush, |
1581 | #ifdef USE_PCI_DMA_API | 1611 | #ifdef USE_PCI_DMA_API |
1582 | .agp_map_page = intel_agp_map_page, | 1612 | .agp_map_page = intel_agp_map_page, |
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c index 1acdb2509511..a3f5e381e746 100644 --- a/drivers/char/hw_random/n2-drv.c +++ b/drivers/char/hw_random/n2-drv.c | |||
@@ -387,7 +387,7 @@ static int n2rng_init_control(struct n2rng *np) | |||
387 | 387 | ||
388 | static int n2rng_data_read(struct hwrng *rng, u32 *data) | 388 | static int n2rng_data_read(struct hwrng *rng, u32 *data) |
389 | { | 389 | { |
390 | struct n2rng *np = rng->priv; | 390 | struct n2rng *np = (struct n2rng *) rng->priv; |
391 | unsigned long ra = __pa(&np->test_data); | 391 | unsigned long ra = __pa(&np->test_data); |
392 | int len; | 392 | int len; |
393 | 393 | ||
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 3822b4f49c84..7bd7c45b53ef 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -305,6 +305,9 @@ static int num_force_kipmid; | |||
305 | #ifdef CONFIG_PCI | 305 | #ifdef CONFIG_PCI |
306 | static int pci_registered; | 306 | static int pci_registered; |
307 | #endif | 307 | #endif |
308 | #ifdef CONFIG_ACPI | ||
309 | static int pnp_registered; | ||
310 | #endif | ||
308 | #ifdef CONFIG_PPC_OF | 311 | #ifdef CONFIG_PPC_OF |
309 | static int of_registered; | 312 | static int of_registered; |
310 | #endif | 313 | #endif |
@@ -2126,7 +2129,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, | |||
2126 | { | 2129 | { |
2127 | struct acpi_device *acpi_dev; | 2130 | struct acpi_device *acpi_dev; |
2128 | struct smi_info *info; | 2131 | struct smi_info *info; |
2129 | struct resource *res; | 2132 | struct resource *res, *res_second; |
2130 | acpi_handle handle; | 2133 | acpi_handle handle; |
2131 | acpi_status status; | 2134 | acpi_status status; |
2132 | unsigned long long tmp; | 2135 | unsigned long long tmp; |
@@ -2182,13 +2185,13 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, | |||
2182 | info->io.addr_data = res->start; | 2185 | info->io.addr_data = res->start; |
2183 | 2186 | ||
2184 | info->io.regspacing = DEFAULT_REGSPACING; | 2187 | info->io.regspacing = DEFAULT_REGSPACING; |
2185 | res = pnp_get_resource(dev, | 2188 | res_second = pnp_get_resource(dev, |
2186 | (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? | 2189 | (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? |
2187 | IORESOURCE_IO : IORESOURCE_MEM, | 2190 | IORESOURCE_IO : IORESOURCE_MEM, |
2188 | 1); | 2191 | 1); |
2189 | if (res) { | 2192 | if (res_second) { |
2190 | if (res->start > info->io.addr_data) | 2193 | if (res_second->start > info->io.addr_data) |
2191 | info->io.regspacing = res->start - info->io.addr_data; | 2194 | info->io.regspacing = res_second->start - info->io.addr_data; |
2192 | } | 2195 | } |
2193 | info->io.regsize = DEFAULT_REGSPACING; | 2196 | info->io.regsize = DEFAULT_REGSPACING; |
2194 | info->io.regshift = 0; | 2197 | info->io.regshift = 0; |
@@ -3359,6 +3362,7 @@ static __devinit int init_ipmi_si(void) | |||
3359 | 3362 | ||
3360 | #ifdef CONFIG_ACPI | 3363 | #ifdef CONFIG_ACPI |
3361 | pnp_register_driver(&ipmi_pnp_driver); | 3364 | pnp_register_driver(&ipmi_pnp_driver); |
3365 | pnp_registered = 1; | ||
3362 | #endif | 3366 | #endif |
3363 | 3367 | ||
3364 | #ifdef CONFIG_DMI | 3368 | #ifdef CONFIG_DMI |
@@ -3526,7 +3530,8 @@ static __exit void cleanup_ipmi_si(void) | |||
3526 | pci_unregister_driver(&ipmi_pci_driver); | 3530 | pci_unregister_driver(&ipmi_pci_driver); |
3527 | #endif | 3531 | #endif |
3528 | #ifdef CONFIG_ACPI | 3532 | #ifdef CONFIG_ACPI |
3529 | pnp_unregister_driver(&ipmi_pnp_driver); | 3533 | if (pnp_registered) |
3534 | pnp_unregister_driver(&ipmi_pnp_driver); | ||
3530 | #endif | 3535 | #endif |
3531 | 3536 | ||
3532 | #ifdef CONFIG_PPC_OF | 3537 | #ifdef CONFIG_PPC_OF |
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index a398ecdbd758..1f528fad3516 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
@@ -788,10 +788,11 @@ static const struct file_operations zero_fops = { | |||
788 | /* | 788 | /* |
789 | * capabilities for /dev/zero | 789 | * capabilities for /dev/zero |
790 | * - permits private mappings, "copies" are taken of the source of zeros | 790 | * - permits private mappings, "copies" are taken of the source of zeros |
791 | * - no writeback happens | ||
791 | */ | 792 | */ |
792 | static struct backing_dev_info zero_bdi = { | 793 | static struct backing_dev_info zero_bdi = { |
793 | .name = "char/mem", | 794 | .name = "char/mem", |
794 | .capabilities = BDI_CAP_MAP_COPY, | 795 | .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK, |
795 | }; | 796 | }; |
796 | 797 | ||
797 | static const struct file_operations full_fops = { | 798 | static const struct file_operations full_fops = { |
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 949067a0bd47..613c852ee0fe 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -355,7 +355,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line) | |||
355 | if (*stp == '\0') | 355 | if (*stp == '\0') |
356 | stp = NULL; | 356 | stp = NULL; |
357 | 357 | ||
358 | if (tty_line >= 0 && tty_line <= p->num && p->ops && | 358 | if (tty_line >= 0 && tty_line < p->num && p->ops && |
359 | p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) { | 359 | p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) { |
360 | res = tty_driver_kref_get(p); | 360 | res = tty_driver_kref_get(p); |
361 | *line = tty_line; | 361 | *line = tty_line; |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 942a9826bd23..c810481a5bc2 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -596,6 +596,10 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | |||
596 | ssize_t ret; | 596 | ssize_t ret; |
597 | bool nonblock; | 597 | bool nonblock; |
598 | 598 | ||
599 | /* Userspace could be out to fool us */ | ||
600 | if (!count) | ||
601 | return 0; | ||
602 | |||
599 | port = filp->private_data; | 603 | port = filp->private_data; |
600 | 604 | ||
601 | nonblock = filp->f_flags & O_NONBLOCK; | 605 | nonblock = filp->f_flags & O_NONBLOCK; |
@@ -642,7 +646,7 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait) | |||
642 | poll_wait(filp, &port->waitqueue, wait); | 646 | poll_wait(filp, &port->waitqueue, wait); |
643 | 647 | ||
644 | ret = 0; | 648 | ret = 0; |
645 | if (port->inbuf) | 649 | if (!will_read_block(port)) |
646 | ret |= POLLIN | POLLRDNORM; | 650 | ret |= POLLIN | POLLRDNORM; |
647 | if (!will_write_block(port)) | 651 | if (!will_write_block(port)) |
648 | ret |= POLLOUT; | 652 | ret |= POLLOUT; |
diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 50590c7f2c01..281aada7b4a1 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c | |||
@@ -906,22 +906,16 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, | |||
906 | * bottom of buffer | 906 | * bottom of buffer |
907 | */ | 907 | */ |
908 | old_origin += (old_rows - new_rows) * old_row_size; | 908 | old_origin += (old_rows - new_rows) * old_row_size; |
909 | end = vc->vc_scr_end; | ||
910 | } else { | 909 | } else { |
911 | /* | 910 | /* |
912 | * Cursor is in no man's land, copy 1/2 screenful | 911 | * Cursor is in no man's land, copy 1/2 screenful |
913 | * from the top and bottom of cursor position | 912 | * from the top and bottom of cursor position |
914 | */ | 913 | */ |
915 | old_origin += (vc->vc_y - new_rows/2) * old_row_size; | 914 | old_origin += (vc->vc_y - new_rows/2) * old_row_size; |
916 | end = old_origin + (old_row_size * new_rows); | ||
917 | } | 915 | } |
918 | } else | 916 | } |
919 | /* | 917 | |
920 | * Cursor near the top, copy contents from the top of buffer | 918 | end = old_origin + old_row_size * min(old_rows, new_rows); |
921 | */ | ||
922 | end = (old_rows > new_rows) ? old_origin + | ||
923 | (old_row_size * new_rows) : | ||
924 | vc->vc_scr_end; | ||
925 | 919 | ||
926 | update_attr(vc); | 920 | update_attr(vc); |
927 | 921 | ||
@@ -3075,8 +3069,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last, | |||
3075 | 3069 | ||
3076 | old_was_color = vc->vc_can_do_color; | 3070 | old_was_color = vc->vc_can_do_color; |
3077 | vc->vc_sw->con_deinit(vc); | 3071 | vc->vc_sw->con_deinit(vc); |
3078 | if (!vc->vc_origin) | 3072 | vc->vc_origin = (unsigned long)vc->vc_screenbuf; |
3079 | vc->vc_origin = (unsigned long)vc->vc_screenbuf; | ||
3080 | visual_init(vc, i, 0); | 3073 | visual_init(vc, i, 0); |
3081 | set_origin(vc); | 3074 | set_origin(vc); |
3082 | update_attr(vc); | 3075 | update_attr(vc); |
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index 2bbeaaea46e9..38df8c19e74c 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c | |||
@@ -533,11 +533,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
533 | case KIOCSOUND: | 533 | case KIOCSOUND: |
534 | if (!perm) | 534 | if (!perm) |
535 | goto eperm; | 535 | goto eperm; |
536 | /* FIXME: This is an old broken API but we need to keep it | 536 | /* |
537 | supported and somehow separate the historic advertised | 537 | * The use of PIT_TICK_RATE is historic, it used to be |
538 | tick rate from any real one */ | 538 | * the platform-dependent CLOCK_TICK_RATE between 2.6.12 |
539 | * and 2.6.36, which was a minor but unfortunate ABI | ||
540 | * change. | ||
541 | */ | ||
539 | if (arg) | 542 | if (arg) |
540 | arg = CLOCK_TICK_RATE / arg; | 543 | arg = PIT_TICK_RATE / arg; |
541 | kd_mksound(arg, 0); | 544 | kd_mksound(arg, 0); |
542 | break; | 545 | break; |
543 | 546 | ||
@@ -553,11 +556,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
553 | */ | 556 | */ |
554 | ticks = HZ * ((arg >> 16) & 0xffff) / 1000; | 557 | ticks = HZ * ((arg >> 16) & 0xffff) / 1000; |
555 | count = ticks ? (arg & 0xffff) : 0; | 558 | count = ticks ? (arg & 0xffff) : 0; |
556 | /* FIXME: This is an old broken API but we need to keep it | ||
557 | supported and somehow separate the historic advertised | ||
558 | tick rate from any real one */ | ||
559 | if (count) | 559 | if (count) |
560 | count = CLOCK_TICK_RATE / count; | 560 | count = PIT_TICK_RATE / count; |
561 | kd_mksound(count, ticks); | 561 | kd_mksound(count, ticks); |
562 | break; | 562 | break; |
563 | } | 563 | } |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index c2408bbe9c2e..f508690eb958 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -80,7 +80,7 @@ | |||
80 | * Limiting Performance Impact | 80 | * Limiting Performance Impact |
81 | * --------------------------- | 81 | * --------------------------- |
82 | * C states, especially those with large exit latencies, can have a real | 82 | * C states, especially those with large exit latencies, can have a real |
83 | * noticable impact on workloads, which is not acceptable for most sysadmins, | 83 | * noticeable impact on workloads, which is not acceptable for most sysadmins, |
84 | * and in addition, less performance has a power price of its own. | 84 | * and in addition, less performance has a power price of its own. |
85 | * | 85 | * |
86 | * As a general rule of thumb, menu assumes that the following heuristic | 86 | * As a general rule of thumb, menu assumes that the following heuristic |
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index 8661c84a105d..b98c67664ae7 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c | |||
@@ -39,6 +39,10 @@ static DEFINE_SPINLOCK(dca_lock); | |||
39 | 39 | ||
40 | static LIST_HEAD(dca_domains); | 40 | static LIST_HEAD(dca_domains); |
41 | 41 | ||
42 | static BLOCKING_NOTIFIER_HEAD(dca_provider_chain); | ||
43 | |||
44 | static int dca_providers_blocked; | ||
45 | |||
42 | static struct pci_bus *dca_pci_rc_from_dev(struct device *dev) | 46 | static struct pci_bus *dca_pci_rc_from_dev(struct device *dev) |
43 | { | 47 | { |
44 | struct pci_dev *pdev = to_pci_dev(dev); | 48 | struct pci_dev *pdev = to_pci_dev(dev); |
@@ -70,6 +74,60 @@ static void dca_free_domain(struct dca_domain *domain) | |||
70 | kfree(domain); | 74 | kfree(domain); |
71 | } | 75 | } |
72 | 76 | ||
77 | static int dca_provider_ioat_ver_3_0(struct device *dev) | ||
78 | { | ||
79 | struct pci_dev *pdev = to_pci_dev(dev); | ||
80 | |||
81 | return ((pdev->vendor == PCI_VENDOR_ID_INTEL) && | ||
82 | ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) || | ||
83 | (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) || | ||
84 | (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) || | ||
85 | (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) || | ||
86 | (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) || | ||
87 | (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) || | ||
88 | (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) || | ||
89 | (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7))); | ||
90 | } | ||
91 | |||
92 | static void unregister_dca_providers(void) | ||
93 | { | ||
94 | struct dca_provider *dca, *_dca; | ||
95 | struct list_head unregistered_providers; | ||
96 | struct dca_domain *domain; | ||
97 | unsigned long flags; | ||
98 | |||
99 | blocking_notifier_call_chain(&dca_provider_chain, | ||
100 | DCA_PROVIDER_REMOVE, NULL); | ||
101 | |||
102 | INIT_LIST_HEAD(&unregistered_providers); | ||
103 | |||
104 | spin_lock_irqsave(&dca_lock, flags); | ||
105 | |||
106 | if (list_empty(&dca_domains)) { | ||
107 | spin_unlock_irqrestore(&dca_lock, flags); | ||
108 | return; | ||
109 | } | ||
110 | |||
111 | /* at this point only one domain in the list is expected */ | ||
112 | domain = list_first_entry(&dca_domains, struct dca_domain, node); | ||
113 | if (!domain) | ||
114 | return; | ||
115 | |||
116 | list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) { | ||
117 | list_del(&dca->node); | ||
118 | list_add(&dca->node, &unregistered_providers); | ||
119 | } | ||
120 | |||
121 | dca_free_domain(domain); | ||
122 | |||
123 | spin_unlock_irqrestore(&dca_lock, flags); | ||
124 | |||
125 | list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) { | ||
126 | dca_sysfs_remove_provider(dca); | ||
127 | list_del(&dca->node); | ||
128 | } | ||
129 | } | ||
130 | |||
73 | static struct dca_domain *dca_find_domain(struct pci_bus *rc) | 131 | static struct dca_domain *dca_find_domain(struct pci_bus *rc) |
74 | { | 132 | { |
75 | struct dca_domain *domain; | 133 | struct dca_domain *domain; |
@@ -90,9 +148,13 @@ static struct dca_domain *dca_get_domain(struct device *dev) | |||
90 | domain = dca_find_domain(rc); | 148 | domain = dca_find_domain(rc); |
91 | 149 | ||
92 | if (!domain) { | 150 | if (!domain) { |
93 | domain = dca_allocate_domain(rc); | 151 | if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) { |
94 | if (domain) | 152 | dca_providers_blocked = 1; |
95 | list_add(&domain->node, &dca_domains); | 153 | } else { |
154 | domain = dca_allocate_domain(rc); | ||
155 | if (domain) | ||
156 | list_add(&domain->node, &dca_domains); | ||
157 | } | ||
96 | } | 158 | } |
97 | 159 | ||
98 | return domain; | 160 | return domain; |
@@ -293,8 +355,6 @@ void free_dca_provider(struct dca_provider *dca) | |||
293 | } | 355 | } |
294 | EXPORT_SYMBOL_GPL(free_dca_provider); | 356 | EXPORT_SYMBOL_GPL(free_dca_provider); |
295 | 357 | ||
296 | static BLOCKING_NOTIFIER_HEAD(dca_provider_chain); | ||
297 | |||
298 | /** | 358 | /** |
299 | * register_dca_provider - register a dca provider | 359 | * register_dca_provider - register a dca provider |
300 | * @dca - struct created by alloc_dca_provider() | 360 | * @dca - struct created by alloc_dca_provider() |
@@ -306,6 +366,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev) | |||
306 | unsigned long flags; | 366 | unsigned long flags; |
307 | struct dca_domain *domain; | 367 | struct dca_domain *domain; |
308 | 368 | ||
369 | spin_lock_irqsave(&dca_lock, flags); | ||
370 | if (dca_providers_blocked) { | ||
371 | spin_unlock_irqrestore(&dca_lock, flags); | ||
372 | return -ENODEV; | ||
373 | } | ||
374 | spin_unlock_irqrestore(&dca_lock, flags); | ||
375 | |||
309 | err = dca_sysfs_add_provider(dca, dev); | 376 | err = dca_sysfs_add_provider(dca, dev); |
310 | if (err) | 377 | if (err) |
311 | return err; | 378 | return err; |
@@ -313,7 +380,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev) | |||
313 | spin_lock_irqsave(&dca_lock, flags); | 380 | spin_lock_irqsave(&dca_lock, flags); |
314 | domain = dca_get_domain(dev); | 381 | domain = dca_get_domain(dev); |
315 | if (!domain) { | 382 | if (!domain) { |
316 | spin_unlock_irqrestore(&dca_lock, flags); | 383 | if (dca_providers_blocked) { |
384 | spin_unlock_irqrestore(&dca_lock, flags); | ||
385 | dca_sysfs_remove_provider(dca); | ||
386 | unregister_dca_providers(); | ||
387 | } else { | ||
388 | spin_unlock_irqrestore(&dca_lock, flags); | ||
389 | } | ||
317 | return -ENODEV; | 390 | return -ENODEV; |
318 | } | 391 | } |
319 | list_add(&dca->node, &domain->dca_providers); | 392 | list_add(&dca->node, &domain->dca_providers); |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 86c5ae9fde34..411d5bf50fc4 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -162,7 +162,7 @@ static int mv_is_err_intr(u32 intr_cause) | |||
162 | 162 | ||
163 | static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) | 163 | static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) |
164 | { | 164 | { |
165 | u32 val = (1 << (1 + (chan->idx * 16))); | 165 | u32 val = ~(1 << (chan->idx * 16)); |
166 | dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); | 166 | dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); |
167 | __raw_writel(val, XOR_INTR_CAUSE(chan)); | 167 | __raw_writel(val, XOR_INTR_CAUSE(chan)); |
168 | } | 168 | } |
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index fb64cf36ba61..eb6b54dbb806 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -580,7 +580,6 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( | |||
580 | 580 | ||
581 | sh_chan = to_sh_chan(chan); | 581 | sh_chan = to_sh_chan(chan); |
582 | param = chan->private; | 582 | param = chan->private; |
583 | slave_addr = param->config->addr; | ||
584 | 583 | ||
585 | /* Someone calling slave DMA on a public channel? */ | 584 | /* Someone calling slave DMA on a public channel? */ |
586 | if (!param || !sg_len) { | 585 | if (!param || !sg_len) { |
@@ -589,6 +588,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( | |||
589 | return NULL; | 588 | return NULL; |
590 | } | 589 | } |
591 | 590 | ||
591 | slave_addr = param->config->addr; | ||
592 | |||
592 | /* | 593 | /* |
593 | * if (param != NULL), this is a successfully requested slave channel, | 594 | * if (param != NULL), this is a successfully requested slave channel, |
594 | * therefore param->config != NULL too. | 595 | * therefore param->config != NULL too. |
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 3630308e7b81..6b21e25f7a84 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
@@ -339,6 +339,9 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci) | |||
339 | { | 339 | { |
340 | int status; | 340 | int status; |
341 | 341 | ||
342 | if (mci->op_state != OP_RUNNING_POLL) | ||
343 | return; | ||
344 | |||
342 | status = cancel_delayed_work(&mci->work); | 345 | status = cancel_delayed_work(&mci->work); |
343 | if (status == 0) { | 346 | if (status == 0) { |
344 | debugf0("%s() not canceled, flush the queue\n", | 347 | debugf0("%s() not canceled, flush the queue\n", |
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index e0187d16dd7c..0fd5b85a0f75 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c | |||
@@ -1140,6 +1140,7 @@ static struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = { | |||
1140 | ATTR_COUNTER(0), | 1140 | ATTR_COUNTER(0), |
1141 | ATTR_COUNTER(1), | 1141 | ATTR_COUNTER(1), |
1142 | ATTR_COUNTER(2), | 1142 | ATTR_COUNTER(2), |
1143 | { .attr = { .name = NULL } } | ||
1143 | }; | 1144 | }; |
1144 | 1145 | ||
1145 | static struct mcidev_sysfs_group i7core_udimm_counters = { | 1146 | static struct mcidev_sysfs_group i7core_udimm_counters = { |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index be29b0bb2471..1b05896648bc 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -263,6 +263,7 @@ static const struct { | |||
263 | {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI}, | 263 | {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI}, |
264 | {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, | 264 | {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, |
265 | {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, | 265 | {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, |
266 | {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, | ||
266 | {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, | 267 | {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, |
267 | }; | 268 | }; |
268 | 269 | ||
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c index b42f42ca70c3..823559ab0e24 100644 --- a/drivers/gpio/sx150x.c +++ b/drivers/gpio/sx150x.c | |||
@@ -459,17 +459,33 @@ static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg) | |||
459 | return err; | 459 | return err; |
460 | } | 460 | } |
461 | 461 | ||
462 | static int sx150x_init_hw(struct sx150x_chip *chip, | 462 | static int sx150x_reset(struct sx150x_chip *chip) |
463 | struct sx150x_platform_data *pdata) | ||
464 | { | 463 | { |
465 | int err = 0; | 464 | int err; |
466 | 465 | ||
467 | err = i2c_smbus_write_word_data(chip->client, | 466 | err = i2c_smbus_write_byte_data(chip->client, |
468 | chip->dev_cfg->reg_reset, | 467 | chip->dev_cfg->reg_reset, |
469 | 0x3412); | 468 | 0x12); |
470 | if (err < 0) | 469 | if (err < 0) |
471 | return err; | 470 | return err; |
472 | 471 | ||
472 | err = i2c_smbus_write_byte_data(chip->client, | ||
473 | chip->dev_cfg->reg_reset, | ||
474 | 0x34); | ||
475 | return err; | ||
476 | } | ||
477 | |||
478 | static int sx150x_init_hw(struct sx150x_chip *chip, | ||
479 | struct sx150x_platform_data *pdata) | ||
480 | { | ||
481 | int err = 0; | ||
482 | |||
483 | if (pdata->reset_during_probe) { | ||
484 | err = sx150x_reset(chip); | ||
485 | if (err < 0) | ||
486 | return err; | ||
487 | } | ||
488 | |||
473 | err = sx150x_i2c_write(chip->client, | 489 | err = sx150x_i2c_write(chip->client, |
474 | chip->dev_cfg->reg_misc, | 490 | chip->dev_cfg->reg_misc, |
475 | 0x01); | 491 | 0x01); |
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c index 55d03ed05000..529a0dbe9fc6 100644 --- a/drivers/gpu/drm/drm_buffer.c +++ b/drivers/gpu/drm/drm_buffer.c | |||
@@ -98,8 +98,8 @@ EXPORT_SYMBOL(drm_buffer_alloc); | |||
98 | * user_data: A pointer the data that is copied to the buffer. | 98 | * user_data: A pointer the data that is copied to the buffer. |
99 | * size: The Number of bytes to copy. | 99 | * size: The Number of bytes to copy. |
100 | */ | 100 | */ |
101 | extern int drm_buffer_copy_from_user(struct drm_buffer *buf, | 101 | int drm_buffer_copy_from_user(struct drm_buffer *buf, |
102 | void __user *user_data, int size) | 102 | void __user *user_data, int size) |
103 | { | 103 | { |
104 | int nr_pages = size / PAGE_SIZE + 1; | 104 | int nr_pages = size / PAGE_SIZE + 1; |
105 | int idx; | 105 | int idx; |
@@ -163,7 +163,7 @@ void *drm_buffer_read_object(struct drm_buffer *buf, | |||
163 | { | 163 | { |
164 | int idx = drm_buffer_index(buf); | 164 | int idx = drm_buffer_index(buf); |
165 | int page = drm_buffer_page(buf); | 165 | int page = drm_buffer_page(buf); |
166 | void *obj = 0; | 166 | void *obj = NULL; |
167 | 167 | ||
168 | if (idx + objsize <= PAGE_SIZE) { | 168 | if (idx + objsize <= PAGE_SIZE) { |
169 | obj = &buf->data[page][idx]; | 169 | obj = &buf->data[page][idx]; |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 7e31d4348340..dcbeb98f195a 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -34,6 +34,9 @@ | |||
34 | #include "drm_crtc_helper.h" | 34 | #include "drm_crtc_helper.h" |
35 | #include "drm_fb_helper.h" | 35 | #include "drm_fb_helper.h" |
36 | 36 | ||
37 | static bool drm_kms_helper_poll = true; | ||
38 | module_param_named(poll, drm_kms_helper_poll, bool, 0600); | ||
39 | |||
37 | static void drm_mode_validate_flag(struct drm_connector *connector, | 40 | static void drm_mode_validate_flag(struct drm_connector *connector, |
38 | int flags) | 41 | int flags) |
39 | { | 42 | { |
@@ -99,8 +102,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
99 | connector->status = connector_status_disconnected; | 102 | connector->status = connector_status_disconnected; |
100 | if (connector->funcs->force) | 103 | if (connector->funcs->force) |
101 | connector->funcs->force(connector); | 104 | connector->funcs->force(connector); |
102 | } else | 105 | } else { |
103 | connector->status = connector->funcs->detect(connector); | 106 | connector->status = connector->funcs->detect(connector, true); |
107 | drm_kms_helper_poll_enable(dev); | ||
108 | } | ||
104 | 109 | ||
105 | if (connector->status == connector_status_disconnected) { | 110 | if (connector->status == connector_status_disconnected) { |
106 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", | 111 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", |
@@ -110,11 +115,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
110 | } | 115 | } |
111 | 116 | ||
112 | count = (*connector_funcs->get_modes)(connector); | 117 | count = (*connector_funcs->get_modes)(connector); |
113 | if (!count) { | 118 | if (count == 0 && connector->status == connector_status_connected) |
114 | count = drm_add_modes_noedid(connector, 1024, 768); | 119 | count = drm_add_modes_noedid(connector, 1024, 768); |
115 | if (!count) | 120 | if (count == 0) |
116 | return 0; | 121 | goto prune; |
117 | } | ||
118 | 122 | ||
119 | drm_mode_connector_list_update(connector); | 123 | drm_mode_connector_list_update(connector); |
120 | 124 | ||
@@ -633,13 +637,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
633 | mode_changed = true; | 637 | mode_changed = true; |
634 | 638 | ||
635 | if (mode_changed) { | 639 | if (mode_changed) { |
636 | old_fb = set->crtc->fb; | ||
637 | set->crtc->fb = set->fb; | ||
638 | set->crtc->enabled = (set->mode != NULL); | 640 | set->crtc->enabled = (set->mode != NULL); |
639 | if (set->mode != NULL) { | 641 | if (set->mode != NULL) { |
640 | DRM_DEBUG_KMS("attempting to set mode from" | 642 | DRM_DEBUG_KMS("attempting to set mode from" |
641 | " userspace\n"); | 643 | " userspace\n"); |
642 | drm_mode_debug_printmodeline(set->mode); | 644 | drm_mode_debug_printmodeline(set->mode); |
645 | old_fb = set->crtc->fb; | ||
646 | set->crtc->fb = set->fb; | ||
643 | if (!drm_crtc_helper_set_mode(set->crtc, set->mode, | 647 | if (!drm_crtc_helper_set_mode(set->crtc, set->mode, |
644 | set->x, set->y, | 648 | set->x, set->y, |
645 | old_fb)) { | 649 | old_fb)) { |
@@ -840,6 +844,9 @@ static void output_poll_execute(struct work_struct *work) | |||
840 | enum drm_connector_status old_status, status; | 844 | enum drm_connector_status old_status, status; |
841 | bool repoll = false, changed = false; | 845 | bool repoll = false, changed = false; |
842 | 846 | ||
847 | if (!drm_kms_helper_poll) | ||
848 | return; | ||
849 | |||
843 | mutex_lock(&dev->mode_config.mutex); | 850 | mutex_lock(&dev->mode_config.mutex); |
844 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 851 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
845 | 852 | ||
@@ -859,7 +866,7 @@ static void output_poll_execute(struct work_struct *work) | |||
859 | !(connector->polled & DRM_CONNECTOR_POLL_HPD)) | 866 | !(connector->polled & DRM_CONNECTOR_POLL_HPD)) |
860 | continue; | 867 | continue; |
861 | 868 | ||
862 | status = connector->funcs->detect(connector); | 869 | status = connector->funcs->detect(connector, false); |
863 | if (old_status != status) | 870 | if (old_status != status) |
864 | changed = true; | 871 | changed = true; |
865 | } | 872 | } |
@@ -890,6 +897,9 @@ void drm_kms_helper_poll_enable(struct drm_device *dev) | |||
890 | bool poll = false; | 897 | bool poll = false; |
891 | struct drm_connector *connector; | 898 | struct drm_connector *connector; |
892 | 899 | ||
900 | if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll) | ||
901 | return; | ||
902 | |||
893 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 903 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
894 | if (connector->polled) | 904 | if (connector->polled) |
895 | poll = true; | 905 | poll = true; |
@@ -919,8 +929,10 @@ void drm_helper_hpd_irq_event(struct drm_device *dev) | |||
919 | { | 929 | { |
920 | if (!dev->mode_config.poll_enabled) | 930 | if (!dev->mode_config.poll_enabled) |
921 | return; | 931 | return; |
932 | |||
922 | /* kill timer and schedule immediate execution, this doesn't block */ | 933 | /* kill timer and schedule immediate execution, this doesn't block */ |
923 | cancel_delayed_work(&dev->mode_config.output_poll_work); | 934 | cancel_delayed_work(&dev->mode_config.output_poll_work); |
924 | queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); | 935 | if (drm_kms_helper_poll) |
936 | queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); | ||
925 | } | 937 | } |
926 | EXPORT_SYMBOL(drm_helper_hpd_irq_event); | 938 | EXPORT_SYMBOL(drm_helper_hpd_irq_event); |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index bf92d07510df..5663d2719063 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -148,7 +148,7 @@ int drm_gem_object_init(struct drm_device *dev, | |||
148 | return -ENOMEM; | 148 | return -ENOMEM; |
149 | 149 | ||
150 | kref_init(&obj->refcount); | 150 | kref_init(&obj->refcount); |
151 | kref_init(&obj->handlecount); | 151 | atomic_set(&obj->handle_count, 0); |
152 | obj->size = size; | 152 | obj->size = size; |
153 | 153 | ||
154 | atomic_inc(&dev->object_count); | 154 | atomic_inc(&dev->object_count); |
@@ -462,28 +462,6 @@ drm_gem_object_free(struct kref *kref) | |||
462 | } | 462 | } |
463 | EXPORT_SYMBOL(drm_gem_object_free); | 463 | EXPORT_SYMBOL(drm_gem_object_free); |
464 | 464 | ||
465 | /** | ||
466 | * Called after the last reference to the object has been lost. | ||
467 | * Must be called without holding struct_mutex | ||
468 | * | ||
469 | * Frees the object | ||
470 | */ | ||
471 | void | ||
472 | drm_gem_object_free_unlocked(struct kref *kref) | ||
473 | { | ||
474 | struct drm_gem_object *obj = (struct drm_gem_object *) kref; | ||
475 | struct drm_device *dev = obj->dev; | ||
476 | |||
477 | if (dev->driver->gem_free_object_unlocked != NULL) | ||
478 | dev->driver->gem_free_object_unlocked(obj); | ||
479 | else if (dev->driver->gem_free_object != NULL) { | ||
480 | mutex_lock(&dev->struct_mutex); | ||
481 | dev->driver->gem_free_object(obj); | ||
482 | mutex_unlock(&dev->struct_mutex); | ||
483 | } | ||
484 | } | ||
485 | EXPORT_SYMBOL(drm_gem_object_free_unlocked); | ||
486 | |||
487 | static void drm_gem_object_ref_bug(struct kref *list_kref) | 465 | static void drm_gem_object_ref_bug(struct kref *list_kref) |
488 | { | 466 | { |
489 | BUG(); | 467 | BUG(); |
@@ -496,12 +474,8 @@ static void drm_gem_object_ref_bug(struct kref *list_kref) | |||
496 | * called before drm_gem_object_free or we'll be touching | 474 | * called before drm_gem_object_free or we'll be touching |
497 | * freed memory | 475 | * freed memory |
498 | */ | 476 | */ |
499 | void | 477 | void drm_gem_object_handle_free(struct drm_gem_object *obj) |
500 | drm_gem_object_handle_free(struct kref *kref) | ||
501 | { | 478 | { |
502 | struct drm_gem_object *obj = container_of(kref, | ||
503 | struct drm_gem_object, | ||
504 | handlecount); | ||
505 | struct drm_device *dev = obj->dev; | 479 | struct drm_device *dev = obj->dev; |
506 | 480 | ||
507 | /* Remove any name for this object */ | 481 | /* Remove any name for this object */ |
@@ -528,6 +502,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma) | |||
528 | struct drm_gem_object *obj = vma->vm_private_data; | 502 | struct drm_gem_object *obj = vma->vm_private_data; |
529 | 503 | ||
530 | drm_gem_object_reference(obj); | 504 | drm_gem_object_reference(obj); |
505 | |||
506 | mutex_lock(&obj->dev->struct_mutex); | ||
507 | drm_vm_open_locked(vma); | ||
508 | mutex_unlock(&obj->dev->struct_mutex); | ||
531 | } | 509 | } |
532 | EXPORT_SYMBOL(drm_gem_vm_open); | 510 | EXPORT_SYMBOL(drm_gem_vm_open); |
533 | 511 | ||
@@ -535,7 +513,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma) | |||
535 | { | 513 | { |
536 | struct drm_gem_object *obj = vma->vm_private_data; | 514 | struct drm_gem_object *obj = vma->vm_private_data; |
537 | 515 | ||
538 | drm_gem_object_unreference_unlocked(obj); | 516 | mutex_lock(&obj->dev->struct_mutex); |
517 | drm_vm_close_locked(vma); | ||
518 | drm_gem_object_unreference(obj); | ||
519 | mutex_unlock(&obj->dev->struct_mutex); | ||
539 | } | 520 | } |
540 | EXPORT_SYMBOL(drm_gem_vm_close); | 521 | EXPORT_SYMBOL(drm_gem_vm_close); |
541 | 522 | ||
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 2ef2c7827243..974e970ce3f8 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c | |||
@@ -255,7 +255,7 @@ int drm_gem_one_name_info(int id, void *ptr, void *data) | |||
255 | 255 | ||
256 | seq_printf(m, "%6d %8zd %7d %8d\n", | 256 | seq_printf(m, "%6d %8zd %7d %8d\n", |
257 | obj->name, obj->size, | 257 | obj->name, obj->size, |
258 | atomic_read(&obj->handlecount.refcount), | 258 | atomic_read(&obj->handle_count), |
259 | atomic_read(&obj->refcount.refcount)); | 259 | atomic_read(&obj->refcount.refcount)); |
260 | return 0; | 260 | return 0; |
261 | } | 261 | } |
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index e20f78b542a7..f5bd9e590c80 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c | |||
@@ -164,6 +164,8 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, | |||
164 | dev->hose = pdev->sysdata; | 164 | dev->hose = pdev->sysdata; |
165 | #endif | 165 | #endif |
166 | 166 | ||
167 | mutex_lock(&drm_global_mutex); | ||
168 | |||
167 | if ((ret = drm_fill_in_dev(dev, ent, driver))) { | 169 | if ((ret = drm_fill_in_dev(dev, ent, driver))) { |
168 | printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); | 170 | printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); |
169 | goto err_g2; | 171 | goto err_g2; |
@@ -199,6 +201,7 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, | |||
199 | driver->name, driver->major, driver->minor, driver->patchlevel, | 201 | driver->name, driver->major, driver->minor, driver->patchlevel, |
200 | driver->date, pci_name(pdev), dev->primary->index); | 202 | driver->date, pci_name(pdev), dev->primary->index); |
201 | 203 | ||
204 | mutex_unlock(&drm_global_mutex); | ||
202 | return 0; | 205 | return 0; |
203 | 206 | ||
204 | err_g4: | 207 | err_g4: |
@@ -210,6 +213,7 @@ err_g2: | |||
210 | pci_disable_device(pdev); | 213 | pci_disable_device(pdev); |
211 | err_g1: | 214 | err_g1: |
212 | kfree(dev); | 215 | kfree(dev); |
216 | mutex_unlock(&drm_global_mutex); | ||
213 | return ret; | 217 | return ret; |
214 | } | 218 | } |
215 | EXPORT_SYMBOL(drm_get_pci_dev); | 219 | EXPORT_SYMBOL(drm_get_pci_dev); |
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c index 460e9a3afa8d..92d1d0fb7b75 100644 --- a/drivers/gpu/drm/drm_platform.c +++ b/drivers/gpu/drm/drm_platform.c | |||
@@ -53,6 +53,8 @@ int drm_get_platform_dev(struct platform_device *platdev, | |||
53 | dev->platformdev = platdev; | 53 | dev->platformdev = platdev; |
54 | dev->dev = &platdev->dev; | 54 | dev->dev = &platdev->dev; |
55 | 55 | ||
56 | mutex_lock(&drm_global_mutex); | ||
57 | |||
56 | ret = drm_fill_in_dev(dev, NULL, driver); | 58 | ret = drm_fill_in_dev(dev, NULL, driver); |
57 | 59 | ||
58 | if (ret) { | 60 | if (ret) { |
@@ -87,6 +89,8 @@ int drm_get_platform_dev(struct platform_device *platdev, | |||
87 | 89 | ||
88 | list_add_tail(&dev->driver_item, &driver->device_list); | 90 | list_add_tail(&dev->driver_item, &driver->device_list); |
89 | 91 | ||
92 | mutex_unlock(&drm_global_mutex); | ||
93 | |||
90 | DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", | 94 | DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", |
91 | driver->name, driver->major, driver->minor, driver->patchlevel, | 95 | driver->name, driver->major, driver->minor, driver->patchlevel, |
92 | driver->date, dev->primary->index); | 96 | driver->date, dev->primary->index); |
@@ -100,6 +104,7 @@ err_g2: | |||
100 | drm_put_minor(&dev->control); | 104 | drm_put_minor(&dev->control); |
101 | err_g1: | 105 | err_g1: |
102 | kfree(dev); | 106 | kfree(dev); |
107 | mutex_unlock(&drm_global_mutex); | ||
103 | return ret; | 108 | return ret; |
104 | } | 109 | } |
105 | EXPORT_SYMBOL(drm_get_platform_dev); | 110 | EXPORT_SYMBOL(drm_get_platform_dev); |
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 86118a742231..85da4c40694c 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c | |||
@@ -159,7 +159,7 @@ static ssize_t status_show(struct device *device, | |||
159 | struct drm_connector *connector = to_drm_connector(device); | 159 | struct drm_connector *connector = to_drm_connector(device); |
160 | enum drm_connector_status status; | 160 | enum drm_connector_status status; |
161 | 161 | ||
162 | status = connector->funcs->detect(connector); | 162 | status = connector->funcs->detect(connector, true); |
163 | return snprintf(buf, PAGE_SIZE, "%s\n", | 163 | return snprintf(buf, PAGE_SIZE, "%s\n", |
164 | drm_get_connector_status_name(status)); | 164 | drm_get_connector_status_name(status)); |
165 | } | 165 | } |
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index fda67468e603..5df450683aab 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c | |||
@@ -433,15 +433,7 @@ static void drm_vm_open(struct vm_area_struct *vma) | |||
433 | mutex_unlock(&dev->struct_mutex); | 433 | mutex_unlock(&dev->struct_mutex); |
434 | } | 434 | } |
435 | 435 | ||
436 | /** | 436 | void drm_vm_close_locked(struct vm_area_struct *vma) |
437 | * \c close method for all virtual memory types. | ||
438 | * | ||
439 | * \param vma virtual memory area. | ||
440 | * | ||
441 | * Search the \p vma private data entry in drm_device::vmalist, unlink it, and | ||
442 | * free it. | ||
443 | */ | ||
444 | static void drm_vm_close(struct vm_area_struct *vma) | ||
445 | { | 437 | { |
446 | struct drm_file *priv = vma->vm_file->private_data; | 438 | struct drm_file *priv = vma->vm_file->private_data; |
447 | struct drm_device *dev = priv->minor->dev; | 439 | struct drm_device *dev = priv->minor->dev; |
@@ -451,7 +443,6 @@ static void drm_vm_close(struct vm_area_struct *vma) | |||
451 | vma->vm_start, vma->vm_end - vma->vm_start); | 443 | vma->vm_start, vma->vm_end - vma->vm_start); |
452 | atomic_dec(&dev->vma_count); | 444 | atomic_dec(&dev->vma_count); |
453 | 445 | ||
454 | mutex_lock(&dev->struct_mutex); | ||
455 | list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { | 446 | list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { |
456 | if (pt->vma == vma) { | 447 | if (pt->vma == vma) { |
457 | list_del(&pt->head); | 448 | list_del(&pt->head); |
@@ -459,6 +450,23 @@ static void drm_vm_close(struct vm_area_struct *vma) | |||
459 | break; | 450 | break; |
460 | } | 451 | } |
461 | } | 452 | } |
453 | } | ||
454 | |||
455 | /** | ||
456 | * \c close method for all virtual memory types. | ||
457 | * | ||
458 | * \param vma virtual memory area. | ||
459 | * | ||
460 | * Search the \p vma private data entry in drm_device::vmalist, unlink it, and | ||
461 | * free it. | ||
462 | */ | ||
463 | static void drm_vm_close(struct vm_area_struct *vma) | ||
464 | { | ||
465 | struct drm_file *priv = vma->vm_file->private_data; | ||
466 | struct drm_device *dev = priv->minor->dev; | ||
467 | |||
468 | mutex_lock(&dev->struct_mutex); | ||
469 | drm_vm_close_locked(vma); | ||
462 | mutex_unlock(&dev->struct_mutex); | 470 | mutex_unlock(&dev->struct_mutex); |
463 | } | 471 | } |
464 | 472 | ||
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 61b4caf220fa..fb07e73581e8 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c | |||
@@ -116,7 +116,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) | |||
116 | static const struct file_operations i810_buffer_fops = { | 116 | static const struct file_operations i810_buffer_fops = { |
117 | .open = drm_open, | 117 | .open = drm_open, |
118 | .release = drm_release, | 118 | .release = drm_release, |
119 | .unlocked_ioctl = drm_ioctl, | 119 | .unlocked_ioctl = i810_ioctl, |
120 | .mmap = i810_mmap_buffers, | 120 | .mmap = i810_mmap_buffers, |
121 | .fasync = drm_fasync, | 121 | .fasync = drm_fasync, |
122 | }; | 122 | }; |
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c index 671aa18415ac..cc92c7e6236f 100644 --- a/drivers/gpu/drm/i830/i830_dma.c +++ b/drivers/gpu/drm/i830/i830_dma.c | |||
@@ -118,7 +118,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma) | |||
118 | static const struct file_operations i830_buffer_fops = { | 118 | static const struct file_operations i830_buffer_fops = { |
119 | .open = drm_open, | 119 | .open = drm_open, |
120 | .release = drm_release, | 120 | .release = drm_release, |
121 | .unlocked_ioctl = drm_ioctl, | 121 | .unlocked_ioctl = i830_ioctl, |
122 | .mmap = i830_mmap_buffers, | 122 | .mmap = i830_mmap_buffers, |
123 | .fasync = drm_fasync, | 123 | .fasync = drm_fasync, |
124 | }; | 124 | }; |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 92d5605a34d1..5e43d7076789 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include "drmP.h" | 32 | #include "drmP.h" |
33 | #include "drm.h" | 33 | #include "drm.h" |
34 | #include "intel_drv.h" | ||
34 | #include "i915_drm.h" | 35 | #include "i915_drm.h" |
35 | #include "i915_drv.h" | 36 | #include "i915_drv.h" |
36 | 37 | ||
@@ -121,6 +122,54 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
121 | return 0; | 122 | return 0; |
122 | } | 123 | } |
123 | 124 | ||
125 | static int i915_gem_pageflip_info(struct seq_file *m, void *data) | ||
126 | { | ||
127 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
128 | struct drm_device *dev = node->minor->dev; | ||
129 | unsigned long flags; | ||
130 | struct intel_crtc *crtc; | ||
131 | |||
132 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { | ||
133 | const char *pipe = crtc->pipe ? "B" : "A"; | ||
134 | const char *plane = crtc->plane ? "B" : "A"; | ||
135 | struct intel_unpin_work *work; | ||
136 | |||
137 | spin_lock_irqsave(&dev->event_lock, flags); | ||
138 | work = crtc->unpin_work; | ||
139 | if (work == NULL) { | ||
140 | seq_printf(m, "No flip due on pipe %s (plane %s)\n", | ||
141 | pipe, plane); | ||
142 | } else { | ||
143 | if (!work->pending) { | ||
144 | seq_printf(m, "Flip queued on pipe %s (plane %s)\n", | ||
145 | pipe, plane); | ||
146 | } else { | ||
147 | seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n", | ||
148 | pipe, plane); | ||
149 | } | ||
150 | if (work->enable_stall_check) | ||
151 | seq_printf(m, "Stall check enabled, "); | ||
152 | else | ||
153 | seq_printf(m, "Stall check waiting for page flip ioctl, "); | ||
154 | seq_printf(m, "%d prepares\n", work->pending); | ||
155 | |||
156 | if (work->old_fb_obj) { | ||
157 | struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj); | ||
158 | if(obj_priv) | ||
159 | seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); | ||
160 | } | ||
161 | if (work->pending_flip_obj) { | ||
162 | struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj); | ||
163 | if(obj_priv) | ||
164 | seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); | ||
165 | } | ||
166 | } | ||
167 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
168 | } | ||
169 | |||
170 | return 0; | ||
171 | } | ||
172 | |||
124 | static int i915_gem_request_info(struct seq_file *m, void *data) | 173 | static int i915_gem_request_info(struct seq_file *m, void *data) |
125 | { | 174 | { |
126 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 175 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
@@ -777,6 +826,7 @@ static struct drm_info_list i915_debugfs_list[] = { | |||
777 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, | 826 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, |
778 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, | 827 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, |
779 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, | 828 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, |
829 | {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, | ||
780 | {"i915_gem_request", i915_gem_request_info, 0}, | 830 | {"i915_gem_request", i915_gem_request_info, 0}, |
781 | {"i915_gem_seqno", i915_gem_seqno_info, 0}, | 831 | {"i915_gem_seqno", i915_gem_seqno_info, 0}, |
782 | {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, | 832 | {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index a7ec93e62f81..c74e4e8006d4 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -620,8 +620,10 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, | |||
620 | ret = copy_from_user(cliprects, batch->cliprects, | 620 | ret = copy_from_user(cliprects, batch->cliprects, |
621 | batch->num_cliprects * | 621 | batch->num_cliprects * |
622 | sizeof(struct drm_clip_rect)); | 622 | sizeof(struct drm_clip_rect)); |
623 | if (ret != 0) | 623 | if (ret != 0) { |
624 | ret = -EFAULT; | ||
624 | goto fail_free; | 625 | goto fail_free; |
626 | } | ||
625 | } | 627 | } |
626 | 628 | ||
627 | mutex_lock(&dev->struct_mutex); | 629 | mutex_lock(&dev->struct_mutex); |
@@ -662,8 +664,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, | |||
662 | return -ENOMEM; | 664 | return -ENOMEM; |
663 | 665 | ||
664 | ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); | 666 | ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); |
665 | if (ret != 0) | 667 | if (ret != 0) { |
668 | ret = -EFAULT; | ||
666 | goto fail_batch_free; | 669 | goto fail_batch_free; |
670 | } | ||
667 | 671 | ||
668 | if (cmdbuf->num_cliprects) { | 672 | if (cmdbuf->num_cliprects) { |
669 | cliprects = kcalloc(cmdbuf->num_cliprects, | 673 | cliprects = kcalloc(cmdbuf->num_cliprects, |
@@ -676,8 +680,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, | |||
676 | ret = copy_from_user(cliprects, cmdbuf->cliprects, | 680 | ret = copy_from_user(cliprects, cmdbuf->cliprects, |
677 | cmdbuf->num_cliprects * | 681 | cmdbuf->num_cliprects * |
678 | sizeof(struct drm_clip_rect)); | 682 | sizeof(struct drm_clip_rect)); |
679 | if (ret != 0) | 683 | if (ret != 0) { |
684 | ret = -EFAULT; | ||
680 | goto fail_clip_free; | 685 | goto fail_clip_free; |
686 | } | ||
681 | } | 687 | } |
682 | 688 | ||
683 | mutex_lock(&dev->struct_mutex); | 689 | mutex_lock(&dev->struct_mutex); |
@@ -885,7 +891,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev) | |||
885 | int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; | 891 | int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; |
886 | u32 temp_lo, temp_hi = 0; | 892 | u32 temp_lo, temp_hi = 0; |
887 | u64 mchbar_addr; | 893 | u64 mchbar_addr; |
888 | int ret = 0; | 894 | int ret; |
889 | 895 | ||
890 | if (IS_I965G(dev)) | 896 | if (IS_I965G(dev)) |
891 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); | 897 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); |
@@ -895,22 +901,23 @@ intel_alloc_mchbar_resource(struct drm_device *dev) | |||
895 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ | 901 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ |
896 | #ifdef CONFIG_PNP | 902 | #ifdef CONFIG_PNP |
897 | if (mchbar_addr && | 903 | if (mchbar_addr && |
898 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { | 904 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) |
899 | ret = 0; | 905 | return 0; |
900 | goto out; | ||
901 | } | ||
902 | #endif | 906 | #endif |
903 | 907 | ||
904 | /* Get some space for it */ | 908 | /* Get some space for it */ |
905 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res, | 909 | dev_priv->mch_res.name = "i915 MCHBAR"; |
910 | dev_priv->mch_res.flags = IORESOURCE_MEM; | ||
911 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, | ||
912 | &dev_priv->mch_res, | ||
906 | MCHBAR_SIZE, MCHBAR_SIZE, | 913 | MCHBAR_SIZE, MCHBAR_SIZE, |
907 | PCIBIOS_MIN_MEM, | 914 | PCIBIOS_MIN_MEM, |
908 | 0, pcibios_align_resource, | 915 | 0, pcibios_align_resource, |
909 | dev_priv->bridge_dev); | 916 | dev_priv->bridge_dev); |
910 | if (ret) { | 917 | if (ret) { |
911 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); | 918 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); |
912 | dev_priv->mch_res.start = 0; | 919 | dev_priv->mch_res.start = 0; |
913 | goto out; | 920 | return ret; |
914 | } | 921 | } |
915 | 922 | ||
916 | if (IS_I965G(dev)) | 923 | if (IS_I965G(dev)) |
@@ -919,8 +926,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev) | |||
919 | 926 | ||
920 | pci_write_config_dword(dev_priv->bridge_dev, reg, | 927 | pci_write_config_dword(dev_priv->bridge_dev, reg, |
921 | lower_32_bits(dev_priv->mch_res.start)); | 928 | lower_32_bits(dev_priv->mch_res.start)); |
922 | out: | 929 | return 0; |
923 | return ret; | ||
924 | } | 930 | } |
925 | 931 | ||
926 | /* Setup MCHBAR if possible, return true if we should disable it again */ | 932 | /* Setup MCHBAR if possible, return true if we should disable it again */ |
@@ -1781,9 +1787,9 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) | |||
1781 | } | 1787 | } |
1782 | } | 1788 | } |
1783 | 1789 | ||
1784 | div_u64(diff, diff1); | 1790 | diff = div_u64(diff, diff1); |
1785 | ret = ((m * diff) + c); | 1791 | ret = ((m * diff) + c); |
1786 | div_u64(ret, 10); | 1792 | ret = div_u64(ret, 10); |
1787 | 1793 | ||
1788 | dev_priv->last_count1 = total_count; | 1794 | dev_priv->last_count1 = total_count; |
1789 | dev_priv->last_time1 = now; | 1795 | dev_priv->last_time1 = now; |
@@ -1852,7 +1858,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv) | |||
1852 | 1858 | ||
1853 | /* More magic constants... */ | 1859 | /* More magic constants... */ |
1854 | diff = diff * 1181; | 1860 | diff = diff * 1181; |
1855 | div_u64(diff, diffms * 10); | 1861 | diff = div_u64(diff, diffms * 10); |
1856 | dev_priv->gfx_power = diff; | 1862 | dev_priv->gfx_power = diff; |
1857 | } | 1863 | } |
1858 | 1864 | ||
@@ -2082,6 +2088,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2082 | goto free_priv; | 2088 | goto free_priv; |
2083 | } | 2089 | } |
2084 | 2090 | ||
2091 | /* overlay on gen2 is broken and can't address above 1G */ | ||
2092 | if (IS_GEN2(dev)) | ||
2093 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); | ||
2094 | |||
2085 | dev_priv->regs = ioremap(base, size); | 2095 | dev_priv->regs = ioremap(base, size); |
2086 | if (!dev_priv->regs) { | 2096 | if (!dev_priv->regs) { |
2087 | DRM_ERROR("failed to map registers\n"); | 2097 | DRM_ERROR("failed to map registers\n"); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 00befce8fbb7..6dbe14cc4f74 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -61,91 +61,86 @@ extern int intel_agp_enabled; | |||
61 | .driver_data = (unsigned long) info } | 61 | .driver_data = (unsigned long) info } |
62 | 62 | ||
63 | static const struct intel_device_info intel_i830_info = { | 63 | static const struct intel_device_info intel_i830_info = { |
64 | .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, | 64 | .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, |
65 | }; | 65 | }; |
66 | 66 | ||
67 | static const struct intel_device_info intel_845g_info = { | 67 | static const struct intel_device_info intel_845g_info = { |
68 | .is_i8xx = 1, | 68 | .gen = 2, .is_i8xx = 1, |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static const struct intel_device_info intel_i85x_info = { | 71 | static const struct intel_device_info intel_i85x_info = { |
72 | .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, | 72 | .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, |
73 | .cursor_needs_physical = 1, | 73 | .cursor_needs_physical = 1, |
74 | }; | 74 | }; |
75 | 75 | ||
76 | static const struct intel_device_info intel_i865g_info = { | 76 | static const struct intel_device_info intel_i865g_info = { |
77 | .is_i8xx = 1, | 77 | .gen = 2, .is_i8xx = 1, |
78 | }; | 78 | }; |
79 | 79 | ||
80 | static const struct intel_device_info intel_i915g_info = { | 80 | static const struct intel_device_info intel_i915g_info = { |
81 | .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, | 81 | .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, |
82 | }; | 82 | }; |
83 | static const struct intel_device_info intel_i915gm_info = { | 83 | static const struct intel_device_info intel_i915gm_info = { |
84 | .is_i9xx = 1, .is_mobile = 1, | 84 | .gen = 3, .is_i9xx = 1, .is_mobile = 1, |
85 | .cursor_needs_physical = 1, | 85 | .cursor_needs_physical = 1, |
86 | }; | 86 | }; |
87 | static const struct intel_device_info intel_i945g_info = { | 87 | static const struct intel_device_info intel_i945g_info = { |
88 | .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, | 88 | .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, |
89 | }; | 89 | }; |
90 | static const struct intel_device_info intel_i945gm_info = { | 90 | static const struct intel_device_info intel_i945gm_info = { |
91 | .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, | 91 | .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, |
92 | .has_hotplug = 1, .cursor_needs_physical = 1, | 92 | .has_hotplug = 1, .cursor_needs_physical = 1, |
93 | }; | 93 | }; |
94 | 94 | ||
95 | static const struct intel_device_info intel_i965g_info = { | 95 | static const struct intel_device_info intel_i965g_info = { |
96 | .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, | 96 | .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, |
97 | .has_hotplug = 1, | ||
97 | }; | 98 | }; |
98 | 99 | ||
99 | static const struct intel_device_info intel_i965gm_info = { | 100 | static const struct intel_device_info intel_i965gm_info = { |
100 | .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, | 101 | .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, |
101 | .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, | 102 | .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, |
102 | .has_hotplug = 1, | ||
103 | }; | 103 | }; |
104 | 104 | ||
105 | static const struct intel_device_info intel_g33_info = { | 105 | static const struct intel_device_info intel_g33_info = { |
106 | .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, | 106 | .gen = 3, .is_g33 = 1, .is_i9xx = 1, |
107 | .has_hotplug = 1, | 107 | .need_gfx_hws = 1, .has_hotplug = 1, |
108 | }; | 108 | }; |
109 | 109 | ||
110 | static const struct intel_device_info intel_g45_info = { | 110 | static const struct intel_device_info intel_g45_info = { |
111 | .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, | 111 | .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, |
112 | .has_pipe_cxsr = 1, | 112 | .has_pipe_cxsr = 1, .has_hotplug = 1, |
113 | .has_hotplug = 1, | ||
114 | }; | 113 | }; |
115 | 114 | ||
116 | static const struct intel_device_info intel_gm45_info = { | 115 | static const struct intel_device_info intel_gm45_info = { |
117 | .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, | 116 | .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, |
118 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, | 117 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, |
119 | .has_pipe_cxsr = 1, | 118 | .has_pipe_cxsr = 1, .has_hotplug = 1, |
120 | .has_hotplug = 1, | ||
121 | }; | 119 | }; |
122 | 120 | ||
123 | static const struct intel_device_info intel_pineview_info = { | 121 | static const struct intel_device_info intel_pineview_info = { |
124 | .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, | 122 | .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, |
125 | .need_gfx_hws = 1, | 123 | .need_gfx_hws = 1, .has_hotplug = 1, |
126 | .has_hotplug = 1, | ||
127 | }; | 124 | }; |
128 | 125 | ||
129 | static const struct intel_device_info intel_ironlake_d_info = { | 126 | static const struct intel_device_info intel_ironlake_d_info = { |
130 | .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, | 127 | .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, |
131 | .has_pipe_cxsr = 1, | 128 | .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, |
132 | .has_hotplug = 1, | ||
133 | }; | 129 | }; |
134 | 130 | ||
135 | static const struct intel_device_info intel_ironlake_m_info = { | 131 | static const struct intel_device_info intel_ironlake_m_info = { |
136 | .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, | 132 | .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, |
137 | .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, | 133 | .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, |
138 | .has_hotplug = 1, | ||
139 | }; | 134 | }; |
140 | 135 | ||
141 | static const struct intel_device_info intel_sandybridge_d_info = { | 136 | static const struct intel_device_info intel_sandybridge_d_info = { |
142 | .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, | 137 | .gen = 6, .is_i965g = 1, .is_i9xx = 1, |
143 | .has_hotplug = 1, .is_gen6 = 1, | 138 | .need_gfx_hws = 1, .has_hotplug = 1, |
144 | }; | 139 | }; |
145 | 140 | ||
146 | static const struct intel_device_info intel_sandybridge_m_info = { | 141 | static const struct intel_device_info intel_sandybridge_m_info = { |
147 | .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, | 142 | .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, |
148 | .has_hotplug = 1, .is_gen6 = 1, | 143 | .need_gfx_hws = 1, .has_hotplug = 1, |
149 | }; | 144 | }; |
150 | 145 | ||
151 | static const struct pci_device_id pciidlist[] = { /* aka */ | 146 | static const struct pci_device_id pciidlist[] = { /* aka */ |
@@ -175,13 +170,18 @@ static const struct pci_device_id pciidlist[] = { /* aka */ | |||
175 | INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ | 170 | INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ |
176 | INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ | 171 | INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ |
177 | INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ | 172 | INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ |
173 | INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */ | ||
178 | INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), | 174 | INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), |
179 | INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), | 175 | INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), |
180 | INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), | 176 | INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), |
181 | INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), | 177 | INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), |
182 | INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), | 178 | INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), |
179 | INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info), | ||
180 | INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info), | ||
183 | INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), | 181 | INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), |
182 | INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), | ||
184 | INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), | 183 | INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), |
184 | INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), | ||
185 | {0, 0, 0} | 185 | {0, 0, 0} |
186 | }; | 186 | }; |
187 | 187 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 047cd7ce7e1b..af4a263cf257 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -191,6 +191,7 @@ struct drm_i915_display_funcs { | |||
191 | }; | 191 | }; |
192 | 192 | ||
193 | struct intel_device_info { | 193 | struct intel_device_info { |
194 | u8 gen; | ||
194 | u8 is_mobile : 1; | 195 | u8 is_mobile : 1; |
195 | u8 is_i8xx : 1; | 196 | u8 is_i8xx : 1; |
196 | u8 is_i85x : 1; | 197 | u8 is_i85x : 1; |
@@ -206,7 +207,6 @@ struct intel_device_info { | |||
206 | u8 is_broadwater : 1; | 207 | u8 is_broadwater : 1; |
207 | u8 is_crestline : 1; | 208 | u8 is_crestline : 1; |
208 | u8 is_ironlake : 1; | 209 | u8 is_ironlake : 1; |
209 | u8 is_gen6 : 1; | ||
210 | u8 has_fbc : 1; | 210 | u8 has_fbc : 1; |
211 | u8 has_rc6 : 1; | 211 | u8 has_rc6 : 1; |
212 | u8 has_pipe_cxsr : 1; | 212 | u8 has_pipe_cxsr : 1; |
@@ -1162,7 +1162,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove | |||
1162 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) | 1162 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) |
1163 | #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) | 1163 | #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) |
1164 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | 1164 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) |
1165 | #define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) | ||
1166 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) | 1165 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) |
1167 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) | 1166 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) |
1168 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) | 1167 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) |
@@ -1181,27 +1180,13 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove | |||
1181 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) | 1180 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) |
1182 | #define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) | 1181 | #define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) |
1183 | #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) | 1182 | #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) |
1184 | #define IS_GEN6(dev) (INTEL_INFO(dev)->is_gen6) | ||
1185 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) | 1183 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
1186 | 1184 | ||
1187 | #define IS_GEN3(dev) (IS_I915G(dev) || \ | 1185 | #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) |
1188 | IS_I915GM(dev) || \ | 1186 | #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) |
1189 | IS_I945G(dev) || \ | 1187 | #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) |
1190 | IS_I945GM(dev) || \ | 1188 | #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) |
1191 | IS_G33(dev) || \ | 1189 | #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) |
1192 | IS_PINEVIEW(dev)) | ||
1193 | #define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \ | ||
1194 | (dev)->pci_device == 0x2982 || \ | ||
1195 | (dev)->pci_device == 0x2992 || \ | ||
1196 | (dev)->pci_device == 0x29A2 || \ | ||
1197 | (dev)->pci_device == 0x2A02 || \ | ||
1198 | (dev)->pci_device == 0x2A12 || \ | ||
1199 | (dev)->pci_device == 0x2E02 || \ | ||
1200 | (dev)->pci_device == 0x2E12 || \ | ||
1201 | (dev)->pci_device == 0x2E22 || \ | ||
1202 | (dev)->pci_device == 0x2E32 || \ | ||
1203 | (dev)->pci_device == 0x2A42 || \ | ||
1204 | (dev)->pci_device == 0x2E42) | ||
1205 | 1190 | ||
1206 | #define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) | 1191 | #define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) |
1207 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) | 1192 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index df5a7135c261..90b1d6753b9d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | #include <linux/swap.h> | 35 | #include <linux/swap.h> |
36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
37 | #include <linux/intel-gtt.h> | ||
37 | 38 | ||
38 | static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); | 39 | static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); |
39 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); | 40 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); |
@@ -135,12 +136,13 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, | |||
135 | return -ENOMEM; | 136 | return -ENOMEM; |
136 | 137 | ||
137 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 138 | ret = drm_gem_handle_create(file_priv, obj, &handle); |
139 | /* drop reference from allocate - handle holds it now */ | ||
138 | drm_gem_object_unreference_unlocked(obj); | 140 | drm_gem_object_unreference_unlocked(obj); |
139 | if (ret) | 141 | if (ret) { |
140 | return ret; | 142 | return ret; |
143 | } | ||
141 | 144 | ||
142 | args->handle = handle; | 145 | args->handle = handle; |
143 | |||
144 | return 0; | 146 | return 0; |
145 | } | 147 | } |
146 | 148 | ||
@@ -467,14 +469,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
467 | return -ENOENT; | 469 | return -ENOENT; |
468 | obj_priv = to_intel_bo(obj); | 470 | obj_priv = to_intel_bo(obj); |
469 | 471 | ||
470 | /* Bounds check source. | 472 | /* Bounds check source. */ |
471 | * | 473 | if (args->offset > obj->size || args->size > obj->size - args->offset) { |
472 | * XXX: This could use review for overflow issues... | 474 | ret = -EINVAL; |
473 | */ | 475 | goto err; |
474 | if (args->offset > obj->size || args->size > obj->size || | 476 | } |
475 | args->offset + args->size > obj->size) { | 477 | |
476 | drm_gem_object_unreference_unlocked(obj); | 478 | if (!access_ok(VERIFY_WRITE, |
477 | return -EINVAL; | 479 | (char __user *)(uintptr_t)args->data_ptr, |
480 | args->size)) { | ||
481 | ret = -EFAULT; | ||
482 | goto err; | ||
478 | } | 483 | } |
479 | 484 | ||
480 | if (i915_gem_object_needs_bit17_swizzle(obj)) { | 485 | if (i915_gem_object_needs_bit17_swizzle(obj)) { |
@@ -486,8 +491,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
486 | file_priv); | 491 | file_priv); |
487 | } | 492 | } |
488 | 493 | ||
494 | err: | ||
489 | drm_gem_object_unreference_unlocked(obj); | 495 | drm_gem_object_unreference_unlocked(obj); |
490 | |||
491 | return ret; | 496 | return ret; |
492 | } | 497 | } |
493 | 498 | ||
@@ -576,8 +581,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
576 | 581 | ||
577 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 582 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
578 | remain = args->size; | 583 | remain = args->size; |
579 | if (!access_ok(VERIFY_READ, user_data, remain)) | ||
580 | return -EFAULT; | ||
581 | 584 | ||
582 | 585 | ||
583 | mutex_lock(&dev->struct_mutex); | 586 | mutex_lock(&dev->struct_mutex); |
@@ -930,14 +933,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
930 | return -ENOENT; | 933 | return -ENOENT; |
931 | obj_priv = to_intel_bo(obj); | 934 | obj_priv = to_intel_bo(obj); |
932 | 935 | ||
933 | /* Bounds check destination. | 936 | /* Bounds check destination. */ |
934 | * | 937 | if (args->offset > obj->size || args->size > obj->size - args->offset) { |
935 | * XXX: This could use review for overflow issues... | 938 | ret = -EINVAL; |
936 | */ | 939 | goto err; |
937 | if (args->offset > obj->size || args->size > obj->size || | 940 | } |
938 | args->offset + args->size > obj->size) { | 941 | |
939 | drm_gem_object_unreference_unlocked(obj); | 942 | if (!access_ok(VERIFY_READ, |
940 | return -EINVAL; | 943 | (char __user *)(uintptr_t)args->data_ptr, |
944 | args->size)) { | ||
945 | ret = -EFAULT; | ||
946 | goto err; | ||
941 | } | 947 | } |
942 | 948 | ||
943 | /* We can only do the GTT pwrite on untiled buffers, as otherwise | 949 | /* We can only do the GTT pwrite on untiled buffers, as otherwise |
@@ -971,8 +977,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
971 | DRM_INFO("pwrite failed %d\n", ret); | 977 | DRM_INFO("pwrite failed %d\n", ret); |
972 | #endif | 978 | #endif |
973 | 979 | ||
980 | err: | ||
974 | drm_gem_object_unreference_unlocked(obj); | 981 | drm_gem_object_unreference_unlocked(obj); |
975 | |||
976 | return ret; | 982 | return ret; |
977 | } | 983 | } |
978 | 984 | ||
@@ -2347,14 +2353,21 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | |||
2347 | 2353 | ||
2348 | reg->obj = obj; | 2354 | reg->obj = obj; |
2349 | 2355 | ||
2350 | if (IS_GEN6(dev)) | 2356 | switch (INTEL_INFO(dev)->gen) { |
2357 | case 6: | ||
2351 | sandybridge_write_fence_reg(reg); | 2358 | sandybridge_write_fence_reg(reg); |
2352 | else if (IS_I965G(dev)) | 2359 | break; |
2360 | case 5: | ||
2361 | case 4: | ||
2353 | i965_write_fence_reg(reg); | 2362 | i965_write_fence_reg(reg); |
2354 | else if (IS_I9XX(dev)) | 2363 | break; |
2364 | case 3: | ||
2355 | i915_write_fence_reg(reg); | 2365 | i915_write_fence_reg(reg); |
2356 | else | 2366 | break; |
2367 | case 2: | ||
2357 | i830_write_fence_reg(reg); | 2368 | i830_write_fence_reg(reg); |
2369 | break; | ||
2370 | } | ||
2358 | 2371 | ||
2359 | trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg, | 2372 | trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg, |
2360 | obj_priv->tiling_mode); | 2373 | obj_priv->tiling_mode); |
@@ -2377,22 +2390,26 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) | |||
2377 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2390 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2378 | struct drm_i915_fence_reg *reg = | 2391 | struct drm_i915_fence_reg *reg = |
2379 | &dev_priv->fence_regs[obj_priv->fence_reg]; | 2392 | &dev_priv->fence_regs[obj_priv->fence_reg]; |
2393 | uint32_t fence_reg; | ||
2380 | 2394 | ||
2381 | if (IS_GEN6(dev)) { | 2395 | switch (INTEL_INFO(dev)->gen) { |
2396 | case 6: | ||
2382 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + | 2397 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + |
2383 | (obj_priv->fence_reg * 8), 0); | 2398 | (obj_priv->fence_reg * 8), 0); |
2384 | } else if (IS_I965G(dev)) { | 2399 | break; |
2400 | case 5: | ||
2401 | case 4: | ||
2385 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); | 2402 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); |
2386 | } else { | 2403 | break; |
2387 | uint32_t fence_reg; | 2404 | case 3: |
2388 | 2405 | if (obj_priv->fence_reg >= 8) | |
2389 | if (obj_priv->fence_reg < 8) | 2406 | fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; |
2390 | fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; | ||
2391 | else | 2407 | else |
2392 | fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - | 2408 | case 2: |
2393 | 8) * 4; | 2409 | fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; |
2394 | 2410 | ||
2395 | I915_WRITE(fence_reg, 0); | 2411 | I915_WRITE(fence_reg, 0); |
2412 | break; | ||
2396 | } | 2413 | } |
2397 | 2414 | ||
2398 | reg->obj = NULL; | 2415 | reg->obj = NULL; |
@@ -3243,6 +3260,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3243 | (int) reloc->offset, | 3260 | (int) reloc->offset, |
3244 | reloc->read_domains, | 3261 | reloc->read_domains, |
3245 | reloc->write_domain); | 3262 | reloc->write_domain); |
3263 | drm_gem_object_unreference(target_obj); | ||
3264 | i915_gem_object_unpin(obj); | ||
3246 | return -EINVAL; | 3265 | return -EINVAL; |
3247 | } | 3266 | } |
3248 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || | 3267 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || |
@@ -3585,6 +3604,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3585 | if (ret != 0) { | 3604 | if (ret != 0) { |
3586 | DRM_ERROR("copy %d cliprects failed: %d\n", | 3605 | DRM_ERROR("copy %d cliprects failed: %d\n", |
3587 | args->num_cliprects, ret); | 3606 | args->num_cliprects, ret); |
3607 | ret = -EFAULT; | ||
3588 | goto pre_mutex_err; | 3608 | goto pre_mutex_err; |
3589 | } | 3609 | } |
3590 | } | 3610 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 72cae3cccad8..5c428fa3e0b3 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -79,6 +79,7 @@ mark_free(struct drm_i915_gem_object *obj_priv, | |||
79 | struct list_head *unwind) | 79 | struct list_head *unwind) |
80 | { | 80 | { |
81 | list_add(&obj_priv->evict_list, unwind); | 81 | list_add(&obj_priv->evict_list, unwind); |
82 | drm_gem_object_reference(&obj_priv->base); | ||
82 | return drm_mm_scan_add_block(obj_priv->gtt_space); | 83 | return drm_mm_scan_add_block(obj_priv->gtt_space); |
83 | } | 84 | } |
84 | 85 | ||
@@ -92,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen | |||
92 | { | 93 | { |
93 | drm_i915_private_t *dev_priv = dev->dev_private; | 94 | drm_i915_private_t *dev_priv = dev->dev_private; |
94 | struct list_head eviction_list, unwind_list; | 95 | struct list_head eviction_list, unwind_list; |
95 | struct drm_i915_gem_object *obj_priv, *tmp_obj_priv; | 96 | struct drm_i915_gem_object *obj_priv; |
96 | struct list_head *render_iter, *bsd_iter; | 97 | struct list_head *render_iter, *bsd_iter; |
97 | int ret = 0; | 98 | int ret = 0; |
98 | 99 | ||
@@ -165,6 +166,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen | |||
165 | list_for_each_entry(obj_priv, &unwind_list, evict_list) { | 166 | list_for_each_entry(obj_priv, &unwind_list, evict_list) { |
166 | ret = drm_mm_scan_remove_block(obj_priv->gtt_space); | 167 | ret = drm_mm_scan_remove_block(obj_priv->gtt_space); |
167 | BUG_ON(ret); | 168 | BUG_ON(ret); |
169 | drm_gem_object_unreference(&obj_priv->base); | ||
168 | } | 170 | } |
169 | 171 | ||
170 | /* We expect the caller to unpin, evict all and try again, or give up. | 172 | /* We expect the caller to unpin, evict all and try again, or give up. |
@@ -173,36 +175,34 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen | |||
173 | return -ENOSPC; | 175 | return -ENOSPC; |
174 | 176 | ||
175 | found: | 177 | found: |
178 | /* drm_mm doesn't allow any other other operations while | ||
179 | * scanning, therefore store to be evicted objects on a | ||
180 | * temporary list. */ | ||
176 | INIT_LIST_HEAD(&eviction_list); | 181 | INIT_LIST_HEAD(&eviction_list); |
177 | list_for_each_entry_safe(obj_priv, tmp_obj_priv, | 182 | while (!list_empty(&unwind_list)) { |
178 | &unwind_list, evict_list) { | 183 | obj_priv = list_first_entry(&unwind_list, |
184 | struct drm_i915_gem_object, | ||
185 | evict_list); | ||
179 | if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { | 186 | if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { |
180 | /* drm_mm doesn't allow any other other operations while | ||
181 | * scanning, therefore store to be evicted objects on a | ||
182 | * temporary list. */ | ||
183 | list_move(&obj_priv->evict_list, &eviction_list); | 187 | list_move(&obj_priv->evict_list, &eviction_list); |
188 | continue; | ||
184 | } | 189 | } |
190 | list_del(&obj_priv->evict_list); | ||
191 | drm_gem_object_unreference(&obj_priv->base); | ||
185 | } | 192 | } |
186 | 193 | ||
187 | /* Unbinding will emit any required flushes */ | 194 | /* Unbinding will emit any required flushes */ |
188 | list_for_each_entry_safe(obj_priv, tmp_obj_priv, | 195 | while (!list_empty(&eviction_list)) { |
189 | &eviction_list, evict_list) { | 196 | obj_priv = list_first_entry(&eviction_list, |
190 | #if WATCH_LRU | 197 | struct drm_i915_gem_object, |
191 | DRM_INFO("%s: evicting %p\n", __func__, obj); | 198 | evict_list); |
192 | #endif | 199 | if (ret == 0) |
193 | ret = i915_gem_object_unbind(&obj_priv->base); | 200 | ret = i915_gem_object_unbind(&obj_priv->base); |
194 | if (ret) | 201 | list_del(&obj_priv->evict_list); |
195 | return ret; | 202 | drm_gem_object_unreference(&obj_priv->base); |
196 | } | 203 | } |
197 | 204 | ||
198 | /* The just created free hole should be on the top of the free stack | 205 | return ret; |
199 | * maintained by drm_mm, so this BUG_ON actually executes in O(1). | ||
200 | * Furthermore all accessed data has just recently been used, so it | ||
201 | * should be really fast, too. */ | ||
202 | BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size, | ||
203 | alignment, 0)); | ||
204 | |||
205 | return 0; | ||
206 | } | 206 | } |
207 | 207 | ||
208 | int | 208 | int |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 16861b800fee..744225ebb4b2 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -887,6 +887,49 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) | |||
887 | queue_work(dev_priv->wq, &dev_priv->error_work); | 887 | queue_work(dev_priv->wq, &dev_priv->error_work); |
888 | } | 888 | } |
889 | 889 | ||
890 | static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) | ||
891 | { | ||
892 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
893 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
894 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
895 | struct drm_i915_gem_object *obj_priv; | ||
896 | struct intel_unpin_work *work; | ||
897 | unsigned long flags; | ||
898 | bool stall_detected; | ||
899 | |||
900 | /* Ignore early vblank irqs */ | ||
901 | if (intel_crtc == NULL) | ||
902 | return; | ||
903 | |||
904 | spin_lock_irqsave(&dev->event_lock, flags); | ||
905 | work = intel_crtc->unpin_work; | ||
906 | |||
907 | if (work == NULL || work->pending || !work->enable_stall_check) { | ||
908 | /* Either the pending flip IRQ arrived, or we're too early. Don't check */ | ||
909 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
910 | return; | ||
911 | } | ||
912 | |||
913 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ | ||
914 | obj_priv = to_intel_bo(work->pending_flip_obj); | ||
915 | if(IS_I965G(dev)) { | ||
916 | int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; | ||
917 | stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset; | ||
918 | } else { | ||
919 | int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; | ||
920 | stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset + | ||
921 | crtc->y * crtc->fb->pitch + | ||
922 | crtc->x * crtc->fb->bits_per_pixel/8); | ||
923 | } | ||
924 | |||
925 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
926 | |||
927 | if (stall_detected) { | ||
928 | DRM_DEBUG_DRIVER("Pageflip stall detected\n"); | ||
929 | intel_prepare_page_flip(dev, intel_crtc->plane); | ||
930 | } | ||
931 | } | ||
932 | |||
890 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | 933 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) |
891 | { | 934 | { |
892 | struct drm_device *dev = (struct drm_device *) arg; | 935 | struct drm_device *dev = (struct drm_device *) arg; |
@@ -1004,15 +1047,19 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
1004 | if (pipea_stats & vblank_status) { | 1047 | if (pipea_stats & vblank_status) { |
1005 | vblank++; | 1048 | vblank++; |
1006 | drm_handle_vblank(dev, 0); | 1049 | drm_handle_vblank(dev, 0); |
1007 | if (!dev_priv->flip_pending_is_done) | 1050 | if (!dev_priv->flip_pending_is_done) { |
1051 | i915_pageflip_stall_check(dev, 0); | ||
1008 | intel_finish_page_flip(dev, 0); | 1052 | intel_finish_page_flip(dev, 0); |
1053 | } | ||
1009 | } | 1054 | } |
1010 | 1055 | ||
1011 | if (pipeb_stats & vblank_status) { | 1056 | if (pipeb_stats & vblank_status) { |
1012 | vblank++; | 1057 | vblank++; |
1013 | drm_handle_vblank(dev, 1); | 1058 | drm_handle_vblank(dev, 1); |
1014 | if (!dev_priv->flip_pending_is_done) | 1059 | if (!dev_priv->flip_pending_is_done) { |
1060 | i915_pageflip_stall_check(dev, 1); | ||
1015 | intel_finish_page_flip(dev, 1); | 1061 | intel_finish_page_flip(dev, 1); |
1062 | } | ||
1016 | } | 1063 | } |
1017 | 1064 | ||
1018 | if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || | 1065 | if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || |
@@ -1303,17 +1350,25 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1303 | i915_seqno_passed(i915_get_gem_seqno(dev, | 1350 | i915_seqno_passed(i915_get_gem_seqno(dev, |
1304 | &dev_priv->render_ring), | 1351 | &dev_priv->render_ring), |
1305 | i915_get_tail_request(dev)->seqno)) { | 1352 | i915_get_tail_request(dev)->seqno)) { |
1353 | bool missed_wakeup = false; | ||
1354 | |||
1306 | dev_priv->hangcheck_count = 0; | 1355 | dev_priv->hangcheck_count = 0; |
1307 | 1356 | ||
1308 | /* Issue a wake-up to catch stuck h/w. */ | 1357 | /* Issue a wake-up to catch stuck h/w. */ |
1309 | if (dev_priv->render_ring.waiting_gem_seqno | | 1358 | if (dev_priv->render_ring.waiting_gem_seqno && |
1310 | dev_priv->bsd_ring.waiting_gem_seqno) { | 1359 | waitqueue_active(&dev_priv->render_ring.irq_queue)) { |
1311 | DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); | 1360 | DRM_WAKEUP(&dev_priv->render_ring.irq_queue); |
1312 | if (dev_priv->render_ring.waiting_gem_seqno) | 1361 | missed_wakeup = true; |
1313 | DRM_WAKEUP(&dev_priv->render_ring.irq_queue); | ||
1314 | if (dev_priv->bsd_ring.waiting_gem_seqno) | ||
1315 | DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); | ||
1316 | } | 1362 | } |
1363 | |||
1364 | if (dev_priv->bsd_ring.waiting_gem_seqno && | ||
1365 | waitqueue_active(&dev_priv->bsd_ring.irq_queue)) { | ||
1366 | DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); | ||
1367 | missed_wakeup = true; | ||
1368 | } | ||
1369 | |||
1370 | if (missed_wakeup) | ||
1371 | DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); | ||
1317 | return; | 1372 | return; |
1318 | } | 1373 | } |
1319 | 1374 | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 67e3ec1a6af9..4f5e15577e89 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -319,6 +319,7 @@ | |||
319 | 319 | ||
320 | #define MI_MODE 0x0209c | 320 | #define MI_MODE 0x0209c |
321 | # define VS_TIMER_DISPATCH (1 << 6) | 321 | # define VS_TIMER_DISPATCH (1 << 6) |
322 | # define MI_FLUSH_ENABLE (1 << 11) | ||
322 | 323 | ||
323 | #define SCPD0 0x0209c /* 915+ only */ | 324 | #define SCPD0 0x0209c /* 915+ only */ |
324 | #define IER 0x020a0 | 325 | #define IER 0x020a0 |
@@ -2205,9 +2206,17 @@ | |||
2205 | #define WM1_LP_SR_EN (1<<31) | 2206 | #define WM1_LP_SR_EN (1<<31) |
2206 | #define WM1_LP_LATENCY_SHIFT 24 | 2207 | #define WM1_LP_LATENCY_SHIFT 24 |
2207 | #define WM1_LP_LATENCY_MASK (0x7f<<24) | 2208 | #define WM1_LP_LATENCY_MASK (0x7f<<24) |
2209 | #define WM1_LP_FBC_LP1_MASK (0xf<<20) | ||
2210 | #define WM1_LP_FBC_LP1_SHIFT 20 | ||
2208 | #define WM1_LP_SR_MASK (0x1ff<<8) | 2211 | #define WM1_LP_SR_MASK (0x1ff<<8) |
2209 | #define WM1_LP_SR_SHIFT 8 | 2212 | #define WM1_LP_SR_SHIFT 8 |
2210 | #define WM1_LP_CURSOR_MASK (0x3f) | 2213 | #define WM1_LP_CURSOR_MASK (0x3f) |
2214 | #define WM2_LP_ILK 0x4510c | ||
2215 | #define WM2_LP_EN (1<<31) | ||
2216 | #define WM3_LP_ILK 0x45110 | ||
2217 | #define WM3_LP_EN (1<<31) | ||
2218 | #define WM1S_LP_ILK 0x45120 | ||
2219 | #define WM1S_LP_EN (1<<31) | ||
2211 | 2220 | ||
2212 | /* Memory latency timer register */ | 2221 | /* Memory latency timer register */ |
2213 | #define MLTR_ILK 0x11222 | 2222 | #define MLTR_ILK 0x11222 |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 2c6b98f2440e..31f08581e93a 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -789,16 +789,25 @@ int i915_save_state(struct drm_device *dev) | |||
789 | dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); | 789 | dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); |
790 | 790 | ||
791 | /* Fences */ | 791 | /* Fences */ |
792 | if (IS_I965G(dev)) { | 792 | switch (INTEL_INFO(dev)->gen) { |
793 | case 6: | ||
794 | for (i = 0; i < 16; i++) | ||
795 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | ||
796 | break; | ||
797 | case 5: | ||
798 | case 4: | ||
793 | for (i = 0; i < 16; i++) | 799 | for (i = 0; i < 16; i++) |
794 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | 800 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); |
795 | } else { | 801 | break; |
796 | for (i = 0; i < 8; i++) | 802 | case 3: |
797 | dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | ||
798 | |||
799 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | 803 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
800 | for (i = 0; i < 8; i++) | 804 | for (i = 0; i < 8; i++) |
801 | dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | 805 | dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); |
806 | case 2: | ||
807 | for (i = 0; i < 8; i++) | ||
808 | dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | ||
809 | break; | ||
810 | |||
802 | } | 811 | } |
803 | 812 | ||
804 | return 0; | 813 | return 0; |
@@ -815,15 +824,24 @@ int i915_restore_state(struct drm_device *dev) | |||
815 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); | 824 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); |
816 | 825 | ||
817 | /* Fences */ | 826 | /* Fences */ |
818 | if (IS_I965G(dev)) { | 827 | switch (INTEL_INFO(dev)->gen) { |
828 | case 6: | ||
829 | for (i = 0; i < 16; i++) | ||
830 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); | ||
831 | break; | ||
832 | case 5: | ||
833 | case 4: | ||
819 | for (i = 0; i < 16; i++) | 834 | for (i = 0; i < 16; i++) |
820 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); | 835 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); |
821 | } else { | 836 | break; |
822 | for (i = 0; i < 8; i++) | 837 | case 3: |
823 | I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); | 838 | case 2: |
824 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | 839 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
825 | for (i = 0; i < 8; i++) | 840 | for (i = 0; i < 8; i++) |
826 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); | 841 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); |
842 | for (i = 0; i < 8; i++) | ||
843 | I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); | ||
844 | break; | ||
827 | } | 845 | } |
828 | 846 | ||
829 | i915_restore_display(dev); | 847 | i915_restore_display(dev); |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 4b7735196cd5..197d4f32585a 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -188,7 +188,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | |||
188 | 188 | ||
189 | if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, | 189 | if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, |
190 | 1000, 1)) | 190 | 1000, 1)) |
191 | DRM_ERROR("timed out waiting for FORCE_TRIGGER"); | 191 | DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); |
192 | 192 | ||
193 | if (turn_off_dac) { | 193 | if (turn_off_dac) { |
194 | I915_WRITE(PCH_ADPA, temp); | 194 | I915_WRITE(PCH_ADPA, temp); |
@@ -245,7 +245,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) | |||
245 | if (wait_for((I915_READ(PORT_HOTPLUG_EN) & | 245 | if (wait_for((I915_READ(PORT_HOTPLUG_EN) & |
246 | CRT_HOTPLUG_FORCE_DETECT) == 0, | 246 | CRT_HOTPLUG_FORCE_DETECT) == 0, |
247 | 1000, 1)) | 247 | 1000, 1)) |
248 | DRM_ERROR("timed out waiting for FORCE_DETECT to go off"); | 248 | DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off"); |
249 | } | 249 | } |
250 | 250 | ||
251 | stat = I915_READ(PORT_HOTPLUG_STAT); | 251 | stat = I915_READ(PORT_HOTPLUG_STAT); |
@@ -400,7 +400,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder | |||
400 | return status; | 400 | return status; |
401 | } | 401 | } |
402 | 402 | ||
403 | static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) | 403 | static enum drm_connector_status |
404 | intel_crt_detect(struct drm_connector *connector, bool force) | ||
404 | { | 405 | { |
405 | struct drm_device *dev = connector->dev; | 406 | struct drm_device *dev = connector->dev; |
406 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 407 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
@@ -419,6 +420,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto | |||
419 | if (intel_crt_detect_ddc(encoder)) | 420 | if (intel_crt_detect_ddc(encoder)) |
420 | return connector_status_connected; | 421 | return connector_status_connected; |
421 | 422 | ||
423 | if (!force) | ||
424 | return connector->status; | ||
425 | |||
422 | /* for pre-945g platforms use load detect */ | 426 | /* for pre-945g platforms use load detect */ |
423 | if (encoder->crtc && encoder->crtc->enabled) { | 427 | if (encoder->crtc && encoder->crtc->enabled) { |
424 | status = intel_crt_load_detect(encoder->crtc, intel_encoder); | 428 | status = intel_crt_load_detect(encoder->crtc, intel_encoder); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 11a3394f5fe1..979228594599 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -990,6 +990,22 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) | |||
990 | struct drm_i915_private *dev_priv = dev->dev_private; | 990 | struct drm_i915_private *dev_priv = dev->dev_private; |
991 | int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT); | 991 | int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT); |
992 | 992 | ||
993 | /* Clear existing vblank status. Note this will clear any other | ||
994 | * sticky status fields as well. | ||
995 | * | ||
996 | * This races with i915_driver_irq_handler() with the result | ||
997 | * that either function could miss a vblank event. Here it is not | ||
998 | * fatal, as we will either wait upon the next vblank interrupt or | ||
999 | * timeout. Generally speaking intel_wait_for_vblank() is only | ||
1000 | * called during modeset at which time the GPU should be idle and | ||
1001 | * should *not* be performing page flips and thus not waiting on | ||
1002 | * vblanks... | ||
1003 | * Currently, the result of us stealing a vblank from the irq | ||
1004 | * handler is that a single frame will be skipped during swapbuffers. | ||
1005 | */ | ||
1006 | I915_WRITE(pipestat_reg, | ||
1007 | I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); | ||
1008 | |||
993 | /* Wait for vblank interrupt bit to set */ | 1009 | /* Wait for vblank interrupt bit to set */ |
994 | if (wait_for((I915_READ(pipestat_reg) & | 1010 | if (wait_for((I915_READ(pipestat_reg) & |
995 | PIPE_VBLANK_INTERRUPT_STATUS), | 1011 | PIPE_VBLANK_INTERRUPT_STATUS), |
@@ -997,8 +1013,8 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) | |||
997 | DRM_DEBUG_KMS("vblank wait timed out\n"); | 1013 | DRM_DEBUG_KMS("vblank wait timed out\n"); |
998 | } | 1014 | } |
999 | 1015 | ||
1000 | /** | 1016 | /* |
1001 | * intel_wait_for_vblank_off - wait for vblank after disabling a pipe | 1017 | * intel_wait_for_pipe_off - wait for pipe to turn off |
1002 | * @dev: drm device | 1018 | * @dev: drm device |
1003 | * @pipe: pipe to wait for | 1019 | * @pipe: pipe to wait for |
1004 | * | 1020 | * |
@@ -1006,25 +1022,39 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) | |||
1006 | * spinning on the vblank interrupt status bit, since we won't actually | 1022 | * spinning on the vblank interrupt status bit, since we won't actually |
1007 | * see an interrupt when the pipe is disabled. | 1023 | * see an interrupt when the pipe is disabled. |
1008 | * | 1024 | * |
1009 | * So this function waits for the display line value to settle (it | 1025 | * On Gen4 and above: |
1010 | * usually ends up stopping at the start of the next frame). | 1026 | * wait for the pipe register state bit to turn off |
1027 | * | ||
1028 | * Otherwise: | ||
1029 | * wait for the display line value to settle (it usually | ||
1030 | * ends up stopping at the start of the next frame). | ||
1031 | * | ||
1011 | */ | 1032 | */ |
1012 | void intel_wait_for_vblank_off(struct drm_device *dev, int pipe) | 1033 | static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) |
1013 | { | 1034 | { |
1014 | struct drm_i915_private *dev_priv = dev->dev_private; | 1035 | struct drm_i915_private *dev_priv = dev->dev_private; |
1015 | int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); | 1036 | |
1016 | unsigned long timeout = jiffies + msecs_to_jiffies(100); | 1037 | if (INTEL_INFO(dev)->gen >= 4) { |
1017 | u32 last_line; | 1038 | int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF); |
1018 | 1039 | ||
1019 | /* Wait for the display line to settle */ | 1040 | /* Wait for the Pipe State to go off */ |
1020 | do { | 1041 | if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, |
1021 | last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; | 1042 | 100, 0)) |
1022 | mdelay(5); | 1043 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); |
1023 | } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && | 1044 | } else { |
1024 | time_after(timeout, jiffies)); | 1045 | u32 last_line; |
1025 | 1046 | int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); | |
1026 | if (time_after(jiffies, timeout)) | 1047 | unsigned long timeout = jiffies + msecs_to_jiffies(100); |
1027 | DRM_DEBUG_KMS("vblank wait timed out\n"); | 1048 | |
1049 | /* Wait for the display line to settle */ | ||
1050 | do { | ||
1051 | last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; | ||
1052 | mdelay(5); | ||
1053 | } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && | ||
1054 | time_after(timeout, jiffies)); | ||
1055 | if (time_after(jiffies, timeout)) | ||
1056 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); | ||
1057 | } | ||
1028 | } | 1058 | } |
1029 | 1059 | ||
1030 | /* Parameters have changed, update FBC info */ | 1060 | /* Parameters have changed, update FBC info */ |
@@ -1486,7 +1516,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1486 | dspcntr &= ~DISPPLANE_TILED; | 1516 | dspcntr &= ~DISPPLANE_TILED; |
1487 | } | 1517 | } |
1488 | 1518 | ||
1489 | if (IS_IRONLAKE(dev)) | 1519 | if (HAS_PCH_SPLIT(dev)) |
1490 | /* must disable */ | 1520 | /* must disable */ |
1491 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; | 1521 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; |
1492 | 1522 | ||
@@ -1495,20 +1525,19 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1495 | Start = obj_priv->gtt_offset; | 1525 | Start = obj_priv->gtt_offset; |
1496 | Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); | 1526 | Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); |
1497 | 1527 | ||
1498 | DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); | 1528 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", |
1529 | Start, Offset, x, y, fb->pitch); | ||
1499 | I915_WRITE(dspstride, fb->pitch); | 1530 | I915_WRITE(dspstride, fb->pitch); |
1500 | if (IS_I965G(dev)) { | 1531 | if (IS_I965G(dev)) { |
1501 | I915_WRITE(dspbase, Offset); | ||
1502 | I915_READ(dspbase); | ||
1503 | I915_WRITE(dspsurf, Start); | 1532 | I915_WRITE(dspsurf, Start); |
1504 | I915_READ(dspsurf); | ||
1505 | I915_WRITE(dsptileoff, (y << 16) | x); | 1533 | I915_WRITE(dsptileoff, (y << 16) | x); |
1534 | I915_WRITE(dspbase, Offset); | ||
1506 | } else { | 1535 | } else { |
1507 | I915_WRITE(dspbase, Start + Offset); | 1536 | I915_WRITE(dspbase, Start + Offset); |
1508 | I915_READ(dspbase); | ||
1509 | } | 1537 | } |
1538 | POSTING_READ(dspbase); | ||
1510 | 1539 | ||
1511 | if ((IS_I965G(dev) || plane == 0)) | 1540 | if (IS_I965G(dev) || plane == 0) |
1512 | intel_update_fbc(crtc, &crtc->mode); | 1541 | intel_update_fbc(crtc, &crtc->mode); |
1513 | 1542 | ||
1514 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 1543 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
@@ -1522,7 +1551,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1522 | struct drm_framebuffer *old_fb) | 1551 | struct drm_framebuffer *old_fb) |
1523 | { | 1552 | { |
1524 | struct drm_device *dev = crtc->dev; | 1553 | struct drm_device *dev = crtc->dev; |
1525 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1526 | struct drm_i915_master_private *master_priv; | 1554 | struct drm_i915_master_private *master_priv; |
1527 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1555 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1528 | struct intel_framebuffer *intel_fb; | 1556 | struct intel_framebuffer *intel_fb; |
@@ -1530,13 +1558,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1530 | struct drm_gem_object *obj; | 1558 | struct drm_gem_object *obj; |
1531 | int pipe = intel_crtc->pipe; | 1559 | int pipe = intel_crtc->pipe; |
1532 | int plane = intel_crtc->plane; | 1560 | int plane = intel_crtc->plane; |
1533 | unsigned long Start, Offset; | ||
1534 | int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR); | ||
1535 | int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF); | ||
1536 | int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE; | ||
1537 | int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF); | ||
1538 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; | ||
1539 | u32 dspcntr; | ||
1540 | int ret; | 1561 | int ret; |
1541 | 1562 | ||
1542 | /* no fb bound */ | 1563 | /* no fb bound */ |
@@ -1572,71 +1593,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1572 | return ret; | 1593 | return ret; |
1573 | } | 1594 | } |
1574 | 1595 | ||
1575 | dspcntr = I915_READ(dspcntr_reg); | 1596 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y); |
1576 | /* Mask out pixel format bits in case we change it */ | 1597 | if (ret) { |
1577 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; | ||
1578 | switch (crtc->fb->bits_per_pixel) { | ||
1579 | case 8: | ||
1580 | dspcntr |= DISPPLANE_8BPP; | ||
1581 | break; | ||
1582 | case 16: | ||
1583 | if (crtc->fb->depth == 15) | ||
1584 | dspcntr |= DISPPLANE_15_16BPP; | ||
1585 | else | ||
1586 | dspcntr |= DISPPLANE_16BPP; | ||
1587 | break; | ||
1588 | case 24: | ||
1589 | case 32: | ||
1590 | if (crtc->fb->depth == 30) | ||
1591 | dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA; | ||
1592 | else | ||
1593 | dspcntr |= DISPPLANE_32BPP_NO_ALPHA; | ||
1594 | break; | ||
1595 | default: | ||
1596 | DRM_ERROR("Unknown color depth\n"); | ||
1597 | i915_gem_object_unpin(obj); | 1598 | i915_gem_object_unpin(obj); |
1598 | mutex_unlock(&dev->struct_mutex); | 1599 | mutex_unlock(&dev->struct_mutex); |
1599 | return -EINVAL; | 1600 | return ret; |
1600 | } | ||
1601 | if (IS_I965G(dev)) { | ||
1602 | if (obj_priv->tiling_mode != I915_TILING_NONE) | ||
1603 | dspcntr |= DISPPLANE_TILED; | ||
1604 | else | ||
1605 | dspcntr &= ~DISPPLANE_TILED; | ||
1606 | } | ||
1607 | |||
1608 | if (HAS_PCH_SPLIT(dev)) | ||
1609 | /* must disable */ | ||
1610 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; | ||
1611 | |||
1612 | I915_WRITE(dspcntr_reg, dspcntr); | ||
1613 | |||
1614 | Start = obj_priv->gtt_offset; | ||
1615 | Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); | ||
1616 | |||
1617 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", | ||
1618 | Start, Offset, x, y, crtc->fb->pitch); | ||
1619 | I915_WRITE(dspstride, crtc->fb->pitch); | ||
1620 | if (IS_I965G(dev)) { | ||
1621 | I915_WRITE(dspsurf, Start); | ||
1622 | I915_WRITE(dsptileoff, (y << 16) | x); | ||
1623 | I915_WRITE(dspbase, Offset); | ||
1624 | } else { | ||
1625 | I915_WRITE(dspbase, Start + Offset); | ||
1626 | } | 1601 | } |
1627 | POSTING_READ(dspbase); | ||
1628 | |||
1629 | if ((IS_I965G(dev) || plane == 0)) | ||
1630 | intel_update_fbc(crtc, &crtc->mode); | ||
1631 | |||
1632 | intel_wait_for_vblank(dev, pipe); | ||
1633 | 1602 | ||
1634 | if (old_fb) { | 1603 | if (old_fb) { |
1635 | intel_fb = to_intel_framebuffer(old_fb); | 1604 | intel_fb = to_intel_framebuffer(old_fb); |
1636 | obj_priv = to_intel_bo(intel_fb->obj); | 1605 | obj_priv = to_intel_bo(intel_fb->obj); |
1637 | i915_gem_object_unpin(intel_fb->obj); | 1606 | i915_gem_object_unpin(intel_fb->obj); |
1638 | } | 1607 | } |
1639 | intel_increase_pllclock(crtc, true); | ||
1640 | 1608 | ||
1641 | mutex_unlock(&dev->struct_mutex); | 1609 | mutex_unlock(&dev->struct_mutex); |
1642 | 1610 | ||
@@ -1911,9 +1879,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1911 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; | 1879 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; |
1912 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; | 1880 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; |
1913 | int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; | 1881 | int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; |
1914 | int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; | ||
1915 | int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; | ||
1916 | int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS; | ||
1917 | int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; | 1882 | int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; |
1918 | int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; | 1883 | int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; |
1919 | int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; | 1884 | int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; |
@@ -1982,15 +1947,19 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1982 | } | 1947 | } |
1983 | 1948 | ||
1984 | /* Enable panel fitting for LVDS */ | 1949 | /* Enable panel fitting for LVDS */ |
1985 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) | 1950 | if (dev_priv->pch_pf_size && |
1986 | || HAS_eDP || intel_pch_has_edp(crtc)) { | 1951 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) |
1987 | if (dev_priv->pch_pf_size) { | 1952 | || HAS_eDP || intel_pch_has_edp(crtc))) { |
1988 | temp = I915_READ(pf_ctl_reg); | 1953 | /* Force use of hard-coded filter coefficients |
1989 | I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); | 1954 | * as some pre-programmed values are broken, |
1990 | I915_WRITE(pf_win_pos, dev_priv->pch_pf_pos); | 1955 | * e.g. x201. |
1991 | I915_WRITE(pf_win_size, dev_priv->pch_pf_size); | 1956 | */ |
1992 | } else | 1957 | I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, |
1993 | I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); | 1958 | PF_ENABLE | PF_FILTER_MED_3x3); |
1959 | I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS, | ||
1960 | dev_priv->pch_pf_pos); | ||
1961 | I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, | ||
1962 | dev_priv->pch_pf_size); | ||
1994 | } | 1963 | } |
1995 | 1964 | ||
1996 | /* Enable CPU pipe */ | 1965 | /* Enable CPU pipe */ |
@@ -2115,7 +2084,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2115 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); | 2084 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); |
2116 | I915_READ(transconf_reg); | 2085 | I915_READ(transconf_reg); |
2117 | 2086 | ||
2118 | if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 10, 0)) | 2087 | if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1)) |
2119 | DRM_ERROR("failed to enable transcoder\n"); | 2088 | DRM_ERROR("failed to enable transcoder\n"); |
2120 | } | 2089 | } |
2121 | 2090 | ||
@@ -2155,14 +2124,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2155 | udelay(100); | 2124 | udelay(100); |
2156 | 2125 | ||
2157 | /* Disable PF */ | 2126 | /* Disable PF */ |
2158 | temp = I915_READ(pf_ctl_reg); | 2127 | I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0); |
2159 | if ((temp & PF_ENABLE) != 0) { | 2128 | I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0); |
2160 | I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); | ||
2161 | I915_READ(pf_ctl_reg); | ||
2162 | } | ||
2163 | I915_WRITE(pf_win_size, 0); | ||
2164 | POSTING_READ(pf_win_size); | ||
2165 | |||
2166 | 2129 | ||
2167 | /* disable CPU FDI tx and PCH FDI rx */ | 2130 | /* disable CPU FDI tx and PCH FDI rx */ |
2168 | temp = I915_READ(fdi_tx_reg); | 2131 | temp = I915_READ(fdi_tx_reg); |
@@ -2379,13 +2342,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2379 | I915_READ(dspbase_reg); | 2342 | I915_READ(dspbase_reg); |
2380 | } | 2343 | } |
2381 | 2344 | ||
2382 | /* Wait for vblank for the disable to take effect */ | ||
2383 | intel_wait_for_vblank_off(dev, pipe); | ||
2384 | |||
2385 | /* Don't disable pipe A or pipe A PLLs if needed */ | 2345 | /* Don't disable pipe A or pipe A PLLs if needed */ |
2386 | if (pipeconf_reg == PIPEACONF && | 2346 | if (pipeconf_reg == PIPEACONF && |
2387 | (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | 2347 | (dev_priv->quirks & QUIRK_PIPEA_FORCE)) { |
2348 | /* Wait for vblank for the disable to take effect */ | ||
2349 | intel_wait_for_vblank(dev, pipe); | ||
2388 | goto skip_pipe_off; | 2350 | goto skip_pipe_off; |
2351 | } | ||
2389 | 2352 | ||
2390 | /* Next, disable display pipes */ | 2353 | /* Next, disable display pipes */ |
2391 | temp = I915_READ(pipeconf_reg); | 2354 | temp = I915_READ(pipeconf_reg); |
@@ -2394,8 +2357,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2394 | I915_READ(pipeconf_reg); | 2357 | I915_READ(pipeconf_reg); |
2395 | } | 2358 | } |
2396 | 2359 | ||
2397 | /* Wait for vblank for the disable to take effect. */ | 2360 | /* Wait for the pipe to turn off */ |
2398 | intel_wait_for_vblank_off(dev, pipe); | 2361 | intel_wait_for_pipe_off(dev, pipe); |
2399 | 2362 | ||
2400 | temp = I915_READ(dpll_reg); | 2363 | temp = I915_READ(dpll_reg); |
2401 | if ((temp & DPLL_VCO_ENABLE) != 0) { | 2364 | if ((temp & DPLL_VCO_ENABLE) != 0) { |
@@ -2421,6 +2384,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2421 | int pipe = intel_crtc->pipe; | 2384 | int pipe = intel_crtc->pipe; |
2422 | bool enabled; | 2385 | bool enabled; |
2423 | 2386 | ||
2387 | if (intel_crtc->dpms_mode == mode) | ||
2388 | return; | ||
2389 | |||
2424 | intel_crtc->dpms_mode = mode; | 2390 | intel_crtc->dpms_mode = mode; |
2425 | intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON; | 2391 | intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON; |
2426 | 2392 | ||
@@ -2511,11 +2477,19 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, | |||
2511 | struct drm_display_mode *adjusted_mode) | 2477 | struct drm_display_mode *adjusted_mode) |
2512 | { | 2478 | { |
2513 | struct drm_device *dev = crtc->dev; | 2479 | struct drm_device *dev = crtc->dev; |
2480 | |||
2514 | if (HAS_PCH_SPLIT(dev)) { | 2481 | if (HAS_PCH_SPLIT(dev)) { |
2515 | /* FDI link clock is fixed at 2.7G */ | 2482 | /* FDI link clock is fixed at 2.7G */ |
2516 | if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) | 2483 | if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) |
2517 | return false; | 2484 | return false; |
2518 | } | 2485 | } |
2486 | |||
2487 | /* XXX some encoders set the crtcinfo, others don't. | ||
2488 | * Obviously we need some form of conflict resolution here... | ||
2489 | */ | ||
2490 | if (adjusted_mode->crtc_htotal == 0) | ||
2491 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
2492 | |||
2519 | return true; | 2493 | return true; |
2520 | } | 2494 | } |
2521 | 2495 | ||
@@ -2815,14 +2789,8 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | |||
2815 | /* Don't promote wm_size to unsigned... */ | 2789 | /* Don't promote wm_size to unsigned... */ |
2816 | if (wm_size > (long)wm->max_wm) | 2790 | if (wm_size > (long)wm->max_wm) |
2817 | wm_size = wm->max_wm; | 2791 | wm_size = wm->max_wm; |
2818 | if (wm_size <= 0) { | 2792 | if (wm_size <= 0) |
2819 | wm_size = wm->default_wm; | 2793 | wm_size = wm->default_wm; |
2820 | DRM_ERROR("Insufficient FIFO for plane, expect flickering:" | ||
2821 | " entries required = %ld, available = %lu.\n", | ||
2822 | entries_required + wm->guard_size, | ||
2823 | wm->fifo_size); | ||
2824 | } | ||
2825 | |||
2826 | return wm_size; | 2794 | return wm_size; |
2827 | } | 2795 | } |
2828 | 2796 | ||
@@ -3436,8 +3404,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | |||
3436 | reg_value = I915_READ(WM1_LP_ILK); | 3404 | reg_value = I915_READ(WM1_LP_ILK); |
3437 | reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | | 3405 | reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | |
3438 | WM1_LP_CURSOR_MASK); | 3406 | WM1_LP_CURSOR_MASK); |
3439 | reg_value |= WM1_LP_SR_EN | | 3407 | reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | |
3440 | (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | | ||
3441 | (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; | 3408 | (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; |
3442 | 3409 | ||
3443 | I915_WRITE(WM1_LP_ILK, reg_value); | 3410 | I915_WRITE(WM1_LP_ILK, reg_value); |
@@ -3554,10 +3521,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3554 | u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; | 3521 | u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; |
3555 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; | 3522 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; |
3556 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; | 3523 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; |
3557 | bool is_edp = false; | 3524 | struct intel_encoder *has_edp_encoder = NULL; |
3558 | struct drm_mode_config *mode_config = &dev->mode_config; | 3525 | struct drm_mode_config *mode_config = &dev->mode_config; |
3559 | struct drm_encoder *encoder; | 3526 | struct drm_encoder *encoder; |
3560 | struct intel_encoder *intel_encoder = NULL; | ||
3561 | const intel_limit_t *limit; | 3527 | const intel_limit_t *limit; |
3562 | int ret; | 3528 | int ret; |
3563 | struct fdi_m_n m_n = {0}; | 3529 | struct fdi_m_n m_n = {0}; |
@@ -3578,12 +3544,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3578 | drm_vblank_pre_modeset(dev, pipe); | 3544 | drm_vblank_pre_modeset(dev, pipe); |
3579 | 3545 | ||
3580 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { | 3546 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { |
3547 | struct intel_encoder *intel_encoder; | ||
3581 | 3548 | ||
3582 | if (!encoder || encoder->crtc != crtc) | 3549 | if (encoder->crtc != crtc) |
3583 | continue; | 3550 | continue; |
3584 | 3551 | ||
3585 | intel_encoder = enc_to_intel_encoder(encoder); | 3552 | intel_encoder = enc_to_intel_encoder(encoder); |
3586 | |||
3587 | switch (intel_encoder->type) { | 3553 | switch (intel_encoder->type) { |
3588 | case INTEL_OUTPUT_LVDS: | 3554 | case INTEL_OUTPUT_LVDS: |
3589 | is_lvds = true; | 3555 | is_lvds = true; |
@@ -3607,7 +3573,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3607 | is_dp = true; | 3573 | is_dp = true; |
3608 | break; | 3574 | break; |
3609 | case INTEL_OUTPUT_EDP: | 3575 | case INTEL_OUTPUT_EDP: |
3610 | is_edp = true; | 3576 | has_edp_encoder = intel_encoder; |
3611 | break; | 3577 | break; |
3612 | } | 3578 | } |
3613 | 3579 | ||
@@ -3685,10 +3651,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3685 | int lane = 0, link_bw, bpp; | 3651 | int lane = 0, link_bw, bpp; |
3686 | /* eDP doesn't require FDI link, so just set DP M/N | 3652 | /* eDP doesn't require FDI link, so just set DP M/N |
3687 | according to current link config */ | 3653 | according to current link config */ |
3688 | if (is_edp) { | 3654 | if (has_edp_encoder) { |
3689 | target_clock = mode->clock; | 3655 | target_clock = mode->clock; |
3690 | intel_edp_link_config(intel_encoder, | 3656 | intel_edp_link_config(has_edp_encoder, |
3691 | &lane, &link_bw); | 3657 | &lane, &link_bw); |
3692 | } else { | 3658 | } else { |
3693 | /* DP over FDI requires target mode clock | 3659 | /* DP over FDI requires target mode clock |
3694 | instead of link clock */ | 3660 | instead of link clock */ |
@@ -3709,7 +3675,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3709 | temp |= PIPE_8BPC; | 3675 | temp |= PIPE_8BPC; |
3710 | else | 3676 | else |
3711 | temp |= PIPE_6BPC; | 3677 | temp |= PIPE_6BPC; |
3712 | } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) { | 3678 | } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) { |
3713 | switch (dev_priv->edp_bpp/3) { | 3679 | switch (dev_priv->edp_bpp/3) { |
3714 | case 8: | 3680 | case 8: |
3715 | temp |= PIPE_8BPC; | 3681 | temp |= PIPE_8BPC; |
@@ -3782,7 +3748,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3782 | 3748 | ||
3783 | udelay(200); | 3749 | udelay(200); |
3784 | 3750 | ||
3785 | if (is_edp) { | 3751 | if (has_edp_encoder) { |
3786 | if (dev_priv->lvds_use_ssc) { | 3752 | if (dev_priv->lvds_use_ssc) { |
3787 | temp |= DREF_SSC1_ENABLE; | 3753 | temp |= DREF_SSC1_ENABLE; |
3788 | I915_WRITE(PCH_DREF_CONTROL, temp); | 3754 | I915_WRITE(PCH_DREF_CONTROL, temp); |
@@ -3931,7 +3897,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3931 | dpll_reg = pch_dpll_reg; | 3897 | dpll_reg = pch_dpll_reg; |
3932 | } | 3898 | } |
3933 | 3899 | ||
3934 | if (!is_edp) { | 3900 | if (!has_edp_encoder) { |
3935 | I915_WRITE(fp_reg, fp); | 3901 | I915_WRITE(fp_reg, fp); |
3936 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); | 3902 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); |
3937 | I915_READ(dpll_reg); | 3903 | I915_READ(dpll_reg); |
@@ -4026,7 +3992,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4026 | } | 3992 | } |
4027 | } | 3993 | } |
4028 | 3994 | ||
4029 | if (!is_edp) { | 3995 | if (!has_edp_encoder) { |
4030 | I915_WRITE(fp_reg, fp); | 3996 | I915_WRITE(fp_reg, fp); |
4031 | I915_WRITE(dpll_reg, dpll); | 3997 | I915_WRITE(dpll_reg, dpll); |
4032 | I915_READ(dpll_reg); | 3998 | I915_READ(dpll_reg); |
@@ -4105,7 +4071,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4105 | I915_WRITE(link_m1_reg, m_n.link_m); | 4071 | I915_WRITE(link_m1_reg, m_n.link_m); |
4106 | I915_WRITE(link_n1_reg, m_n.link_n); | 4072 | I915_WRITE(link_n1_reg, m_n.link_n); |
4107 | 4073 | ||
4108 | if (is_edp) { | 4074 | if (has_edp_encoder) { |
4109 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); | 4075 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); |
4110 | } else { | 4076 | } else { |
4111 | /* enable FDI RX PLL too */ | 4077 | /* enable FDI RX PLL too */ |
@@ -4911,15 +4877,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) | |||
4911 | kfree(intel_crtc); | 4877 | kfree(intel_crtc); |
4912 | } | 4878 | } |
4913 | 4879 | ||
4914 | struct intel_unpin_work { | ||
4915 | struct work_struct work; | ||
4916 | struct drm_device *dev; | ||
4917 | struct drm_gem_object *old_fb_obj; | ||
4918 | struct drm_gem_object *pending_flip_obj; | ||
4919 | struct drm_pending_vblank_event *event; | ||
4920 | int pending; | ||
4921 | }; | ||
4922 | |||
4923 | static void intel_unpin_work_fn(struct work_struct *__work) | 4880 | static void intel_unpin_work_fn(struct work_struct *__work) |
4924 | { | 4881 | { |
4925 | struct intel_unpin_work *work = | 4882 | struct intel_unpin_work *work = |
@@ -5007,7 +4964,8 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane) | |||
5007 | 4964 | ||
5008 | spin_lock_irqsave(&dev->event_lock, flags); | 4965 | spin_lock_irqsave(&dev->event_lock, flags); |
5009 | if (intel_crtc->unpin_work) { | 4966 | if (intel_crtc->unpin_work) { |
5010 | intel_crtc->unpin_work->pending = 1; | 4967 | if ((++intel_crtc->unpin_work->pending) > 1) |
4968 | DRM_ERROR("Prepared flip multiple times\n"); | ||
5011 | } else { | 4969 | } else { |
5012 | DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); | 4970 | DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); |
5013 | } | 4971 | } |
@@ -5026,9 +4984,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5026 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4984 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5027 | struct intel_unpin_work *work; | 4985 | struct intel_unpin_work *work; |
5028 | unsigned long flags, offset; | 4986 | unsigned long flags, offset; |
5029 | int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; | 4987 | int pipe = intel_crtc->pipe; |
5030 | int ret, pipesrc; | 4988 | u32 pf, pipesrc; |
5031 | u32 flip_mask; | 4989 | int ret; |
5032 | 4990 | ||
5033 | work = kzalloc(sizeof *work, GFP_KERNEL); | 4991 | work = kzalloc(sizeof *work, GFP_KERNEL); |
5034 | if (work == NULL) | 4992 | if (work == NULL) |
@@ -5077,42 +5035,73 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5077 | atomic_inc(&obj_priv->pending_flip); | 5035 | atomic_inc(&obj_priv->pending_flip); |
5078 | work->pending_flip_obj = obj; | 5036 | work->pending_flip_obj = obj; |
5079 | 5037 | ||
5080 | if (intel_crtc->plane) | ||
5081 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | ||
5082 | else | ||
5083 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | ||
5084 | |||
5085 | if (IS_GEN3(dev) || IS_GEN2(dev)) { | 5038 | if (IS_GEN3(dev) || IS_GEN2(dev)) { |
5039 | u32 flip_mask; | ||
5040 | |||
5041 | if (intel_crtc->plane) | ||
5042 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | ||
5043 | else | ||
5044 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | ||
5045 | |||
5086 | BEGIN_LP_RING(2); | 5046 | BEGIN_LP_RING(2); |
5087 | OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); | 5047 | OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); |
5088 | OUT_RING(0); | 5048 | OUT_RING(0); |
5089 | ADVANCE_LP_RING(); | 5049 | ADVANCE_LP_RING(); |
5090 | } | 5050 | } |
5091 | 5051 | ||
5052 | work->enable_stall_check = true; | ||
5053 | |||
5092 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | 5054 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ |
5093 | offset = obj_priv->gtt_offset; | 5055 | offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; |
5094 | offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8); | ||
5095 | 5056 | ||
5096 | BEGIN_LP_RING(4); | 5057 | BEGIN_LP_RING(4); |
5097 | if (IS_I965G(dev)) { | 5058 | switch(INTEL_INFO(dev)->gen) { |
5059 | case 2: | ||
5098 | OUT_RING(MI_DISPLAY_FLIP | | 5060 | OUT_RING(MI_DISPLAY_FLIP | |
5099 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5061 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5100 | OUT_RING(fb->pitch); | 5062 | OUT_RING(fb->pitch); |
5101 | OUT_RING(offset | obj_priv->tiling_mode); | 5063 | OUT_RING(obj_priv->gtt_offset + offset); |
5102 | pipesrc = I915_READ(pipesrc_reg); | 5064 | OUT_RING(MI_NOOP); |
5103 | OUT_RING(pipesrc & 0x0fff0fff); | 5065 | break; |
5104 | } else if (IS_GEN3(dev)) { | 5066 | |
5067 | case 3: | ||
5105 | OUT_RING(MI_DISPLAY_FLIP_I915 | | 5068 | OUT_RING(MI_DISPLAY_FLIP_I915 | |
5106 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5069 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5107 | OUT_RING(fb->pitch); | 5070 | OUT_RING(fb->pitch); |
5108 | OUT_RING(offset); | 5071 | OUT_RING(obj_priv->gtt_offset + offset); |
5109 | OUT_RING(MI_NOOP); | 5072 | OUT_RING(MI_NOOP); |
5110 | } else { | 5073 | break; |
5074 | |||
5075 | case 4: | ||
5076 | case 5: | ||
5077 | /* i965+ uses the linear or tiled offsets from the | ||
5078 | * Display Registers (which do not change across a page-flip) | ||
5079 | * so we need only reprogram the base address. | ||
5080 | */ | ||
5111 | OUT_RING(MI_DISPLAY_FLIP | | 5081 | OUT_RING(MI_DISPLAY_FLIP | |
5112 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5082 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5113 | OUT_RING(fb->pitch); | 5083 | OUT_RING(fb->pitch); |
5114 | OUT_RING(offset); | 5084 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); |
5115 | OUT_RING(MI_NOOP); | 5085 | |
5086 | /* XXX Enabling the panel-fitter across page-flip is so far | ||
5087 | * untested on non-native modes, so ignore it for now. | ||
5088 | * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; | ||
5089 | */ | ||
5090 | pf = 0; | ||
5091 | pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; | ||
5092 | OUT_RING(pf | pipesrc); | ||
5093 | break; | ||
5094 | |||
5095 | case 6: | ||
5096 | OUT_RING(MI_DISPLAY_FLIP | | ||
5097 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
5098 | OUT_RING(fb->pitch | obj_priv->tiling_mode); | ||
5099 | OUT_RING(obj_priv->gtt_offset); | ||
5100 | |||
5101 | pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; | ||
5102 | pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; | ||
5103 | OUT_RING(pf | pipesrc); | ||
5104 | break; | ||
5116 | } | 5105 | } |
5117 | ADVANCE_LP_RING(); | 5106 | ADVANCE_LP_RING(); |
5118 | 5107 | ||
@@ -5193,7 +5182,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
5193 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; | 5182 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; |
5194 | 5183 | ||
5195 | intel_crtc->cursor_addr = 0; | 5184 | intel_crtc->cursor_addr = 0; |
5196 | intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; | 5185 | intel_crtc->dpms_mode = -1; |
5197 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); | 5186 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); |
5198 | 5187 | ||
5199 | intel_crtc->busy = false; | 5188 | intel_crtc->busy = false; |
@@ -5701,6 +5690,9 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5701 | I915_WRITE(DISP_ARB_CTL, | 5690 | I915_WRITE(DISP_ARB_CTL, |
5702 | (I915_READ(DISP_ARB_CTL) | | 5691 | (I915_READ(DISP_ARB_CTL) | |
5703 | DISP_FBC_WM_DIS)); | 5692 | DISP_FBC_WM_DIS)); |
5693 | I915_WRITE(WM3_LP_ILK, 0); | ||
5694 | I915_WRITE(WM2_LP_ILK, 0); | ||
5695 | I915_WRITE(WM1_LP_ILK, 0); | ||
5704 | } | 5696 | } |
5705 | /* | 5697 | /* |
5706 | * Based on the document from hardware guys the following bits | 5698 | * Based on the document from hardware guys the following bits |
@@ -5722,8 +5714,7 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5722 | ILK_DPFC_DIS2 | | 5714 | ILK_DPFC_DIS2 | |
5723 | ILK_CLK_FBC); | 5715 | ILK_CLK_FBC); |
5724 | } | 5716 | } |
5725 | if (IS_GEN6(dev)) | 5717 | return; |
5726 | return; | ||
5727 | } else if (IS_G4X(dev)) { | 5718 | } else if (IS_G4X(dev)) { |
5728 | uint32_t dspclk_gate; | 5719 | uint32_t dspclk_gate; |
5729 | I915_WRITE(RENCLK_GATE_D1, 0); | 5720 | I915_WRITE(RENCLK_GATE_D1, 0); |
@@ -5784,11 +5775,9 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5784 | OUT_RING(MI_FLUSH); | 5775 | OUT_RING(MI_FLUSH); |
5785 | ADVANCE_LP_RING(); | 5776 | ADVANCE_LP_RING(); |
5786 | } | 5777 | } |
5787 | } else { | 5778 | } else |
5788 | DRM_DEBUG_KMS("Failed to allocate render context." | 5779 | DRM_DEBUG_KMS("Failed to allocate render context." |
5789 | "Disable RC6\n"); | 5780 | "Disable RC6\n"); |
5790 | return; | ||
5791 | } | ||
5792 | } | 5781 | } |
5793 | 5782 | ||
5794 | if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { | 5783 | if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 9caccd03dccb..9ab8708ac6ba 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -239,7 +239,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
239 | uint32_t ch_data = ch_ctl + 4; | 239 | uint32_t ch_data = ch_ctl + 4; |
240 | int i; | 240 | int i; |
241 | int recv_bytes; | 241 | int recv_bytes; |
242 | uint32_t ctl; | ||
243 | uint32_t status; | 242 | uint32_t status; |
244 | uint32_t aux_clock_divider; | 243 | uint32_t aux_clock_divider; |
245 | int try, precharge; | 244 | int try, precharge; |
@@ -263,41 +262,43 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
263 | else | 262 | else |
264 | precharge = 5; | 263 | precharge = 5; |
265 | 264 | ||
265 | if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) { | ||
266 | DRM_ERROR("dp_aux_ch not started status 0x%08x\n", | ||
267 | I915_READ(ch_ctl)); | ||
268 | return -EBUSY; | ||
269 | } | ||
270 | |||
266 | /* Must try at least 3 times according to DP spec */ | 271 | /* Must try at least 3 times according to DP spec */ |
267 | for (try = 0; try < 5; try++) { | 272 | for (try = 0; try < 5; try++) { |
268 | /* Load the send data into the aux channel data registers */ | 273 | /* Load the send data into the aux channel data registers */ |
269 | for (i = 0; i < send_bytes; i += 4) { | 274 | for (i = 0; i < send_bytes; i += 4) |
270 | uint32_t d = pack_aux(send + i, send_bytes - i); | 275 | I915_WRITE(ch_data + i, |
271 | 276 | pack_aux(send + i, send_bytes - i)); | |
272 | I915_WRITE(ch_data + i, d); | ||
273 | } | ||
274 | |||
275 | ctl = (DP_AUX_CH_CTL_SEND_BUSY | | ||
276 | DP_AUX_CH_CTL_TIME_OUT_400us | | ||
277 | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | | ||
278 | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | | ||
279 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | | ||
280 | DP_AUX_CH_CTL_DONE | | ||
281 | DP_AUX_CH_CTL_TIME_OUT_ERROR | | ||
282 | DP_AUX_CH_CTL_RECEIVE_ERROR); | ||
283 | 277 | ||
284 | /* Send the command and wait for it to complete */ | 278 | /* Send the command and wait for it to complete */ |
285 | I915_WRITE(ch_ctl, ctl); | 279 | I915_WRITE(ch_ctl, |
286 | (void) I915_READ(ch_ctl); | 280 | DP_AUX_CH_CTL_SEND_BUSY | |
281 | DP_AUX_CH_CTL_TIME_OUT_400us | | ||
282 | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | | ||
283 | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | | ||
284 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | | ||
285 | DP_AUX_CH_CTL_DONE | | ||
286 | DP_AUX_CH_CTL_TIME_OUT_ERROR | | ||
287 | DP_AUX_CH_CTL_RECEIVE_ERROR); | ||
287 | for (;;) { | 288 | for (;;) { |
288 | udelay(100); | ||
289 | status = I915_READ(ch_ctl); | 289 | status = I915_READ(ch_ctl); |
290 | if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) | 290 | if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) |
291 | break; | 291 | break; |
292 | udelay(100); | ||
292 | } | 293 | } |
293 | 294 | ||
294 | /* Clear done status and any errors */ | 295 | /* Clear done status and any errors */ |
295 | I915_WRITE(ch_ctl, (status | | 296 | I915_WRITE(ch_ctl, |
296 | DP_AUX_CH_CTL_DONE | | 297 | status | |
297 | DP_AUX_CH_CTL_TIME_OUT_ERROR | | 298 | DP_AUX_CH_CTL_DONE | |
298 | DP_AUX_CH_CTL_RECEIVE_ERROR)); | 299 | DP_AUX_CH_CTL_TIME_OUT_ERROR | |
299 | (void) I915_READ(ch_ctl); | 300 | DP_AUX_CH_CTL_RECEIVE_ERROR); |
300 | if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0) | 301 | if (status & DP_AUX_CH_CTL_DONE) |
301 | break; | 302 | break; |
302 | } | 303 | } |
303 | 304 | ||
@@ -324,15 +325,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
324 | /* Unload any bytes sent back from the other side */ | 325 | /* Unload any bytes sent back from the other side */ |
325 | recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> | 326 | recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> |
326 | DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); | 327 | DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); |
327 | |||
328 | if (recv_bytes > recv_size) | 328 | if (recv_bytes > recv_size) |
329 | recv_bytes = recv_size; | 329 | recv_bytes = recv_size; |
330 | 330 | ||
331 | for (i = 0; i < recv_bytes; i += 4) { | 331 | for (i = 0; i < recv_bytes; i += 4) |
332 | uint32_t d = I915_READ(ch_data + i); | 332 | unpack_aux(I915_READ(ch_data + i), |
333 | 333 | recv + i, recv_bytes - i); | |
334 | unpack_aux(d, recv + i, recv_bytes - i); | ||
335 | } | ||
336 | 334 | ||
337 | return recv_bytes; | 335 | return recv_bytes; |
338 | } | 336 | } |
@@ -1140,18 +1138,14 @@ static bool | |||
1140 | intel_dp_set_link_train(struct intel_dp *intel_dp, | 1138 | intel_dp_set_link_train(struct intel_dp *intel_dp, |
1141 | uint32_t dp_reg_value, | 1139 | uint32_t dp_reg_value, |
1142 | uint8_t dp_train_pat, | 1140 | uint8_t dp_train_pat, |
1143 | uint8_t train_set[4], | 1141 | uint8_t train_set[4]) |
1144 | bool first) | ||
1145 | { | 1142 | { |
1146 | struct drm_device *dev = intel_dp->base.enc.dev; | 1143 | struct drm_device *dev = intel_dp->base.enc.dev; |
1147 | struct drm_i915_private *dev_priv = dev->dev_private; | 1144 | struct drm_i915_private *dev_priv = dev->dev_private; |
1148 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc); | ||
1149 | int ret; | 1145 | int ret; |
1150 | 1146 | ||
1151 | I915_WRITE(intel_dp->output_reg, dp_reg_value); | 1147 | I915_WRITE(intel_dp->output_reg, dp_reg_value); |
1152 | POSTING_READ(intel_dp->output_reg); | 1148 | POSTING_READ(intel_dp->output_reg); |
1153 | if (first) | ||
1154 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
1155 | 1149 | ||
1156 | intel_dp_aux_native_write_1(intel_dp, | 1150 | intel_dp_aux_native_write_1(intel_dp, |
1157 | DP_TRAINING_PATTERN_SET, | 1151 | DP_TRAINING_PATTERN_SET, |
@@ -1176,10 +1170,15 @@ intel_dp_link_train(struct intel_dp *intel_dp) | |||
1176 | uint8_t voltage; | 1170 | uint8_t voltage; |
1177 | bool clock_recovery = false; | 1171 | bool clock_recovery = false; |
1178 | bool channel_eq = false; | 1172 | bool channel_eq = false; |
1179 | bool first = true; | ||
1180 | int tries; | 1173 | int tries; |
1181 | u32 reg; | 1174 | u32 reg; |
1182 | uint32_t DP = intel_dp->DP; | 1175 | uint32_t DP = intel_dp->DP; |
1176 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc); | ||
1177 | |||
1178 | /* Enable output, wait for it to become active */ | ||
1179 | I915_WRITE(intel_dp->output_reg, intel_dp->DP); | ||
1180 | POSTING_READ(intel_dp->output_reg); | ||
1181 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
1183 | 1182 | ||
1184 | /* Write the link configuration data */ | 1183 | /* Write the link configuration data */ |
1185 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, | 1184 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, |
@@ -1212,9 +1211,8 @@ intel_dp_link_train(struct intel_dp *intel_dp) | |||
1212 | reg = DP | DP_LINK_TRAIN_PAT_1; | 1211 | reg = DP | DP_LINK_TRAIN_PAT_1; |
1213 | 1212 | ||
1214 | if (!intel_dp_set_link_train(intel_dp, reg, | 1213 | if (!intel_dp_set_link_train(intel_dp, reg, |
1215 | DP_TRAINING_PATTERN_1, train_set, first)) | 1214 | DP_TRAINING_PATTERN_1, train_set)) |
1216 | break; | 1215 | break; |
1217 | first = false; | ||
1218 | /* Set training pattern 1 */ | 1216 | /* Set training pattern 1 */ |
1219 | 1217 | ||
1220 | udelay(100); | 1218 | udelay(100); |
@@ -1268,8 +1266,7 @@ intel_dp_link_train(struct intel_dp *intel_dp) | |||
1268 | 1266 | ||
1269 | /* channel eq pattern */ | 1267 | /* channel eq pattern */ |
1270 | if (!intel_dp_set_link_train(intel_dp, reg, | 1268 | if (!intel_dp_set_link_train(intel_dp, reg, |
1271 | DP_TRAINING_PATTERN_2, train_set, | 1269 | DP_TRAINING_PATTERN_2, train_set)) |
1272 | false)) | ||
1273 | break; | 1270 | break; |
1274 | 1271 | ||
1275 | udelay(400); | 1272 | udelay(400); |
@@ -1388,7 +1385,7 @@ ironlake_dp_detect(struct drm_connector *connector) | |||
1388 | * \return false if DP port is disconnected. | 1385 | * \return false if DP port is disconnected. |
1389 | */ | 1386 | */ |
1390 | static enum drm_connector_status | 1387 | static enum drm_connector_status |
1391 | intel_dp_detect(struct drm_connector *connector) | 1388 | intel_dp_detect(struct drm_connector *connector, bool force) |
1392 | { | 1389 | { |
1393 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1390 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1394 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1391 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0e92aa07b382..8828b3ac6414 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -176,6 +176,16 @@ struct intel_crtc { | |||
176 | #define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) | 176 | #define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) |
177 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) | 177 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) |
178 | 178 | ||
179 | struct intel_unpin_work { | ||
180 | struct work_struct work; | ||
181 | struct drm_device *dev; | ||
182 | struct drm_gem_object *old_fb_obj; | ||
183 | struct drm_gem_object *pending_flip_obj; | ||
184 | struct drm_pending_vblank_event *event; | ||
185 | int pending; | ||
186 | bool enable_stall_check; | ||
187 | }; | ||
188 | |||
179 | struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, | 189 | struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, |
180 | const char *name); | 190 | const char *name); |
181 | void intel_i2c_destroy(struct i2c_adapter *adapter); | 191 | void intel_i2c_destroy(struct i2c_adapter *adapter); |
@@ -219,7 +229,6 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
219 | struct drm_crtc *crtc); | 229 | struct drm_crtc *crtc); |
220 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | 230 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
221 | struct drm_file *file_priv); | 231 | struct drm_file *file_priv); |
222 | extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe); | ||
223 | extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); | 232 | extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); |
224 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); | 233 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); |
225 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | 234 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index a399f4b2c1c5..7c9ec1472d46 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -221,7 +221,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, | |||
221 | * | 221 | * |
222 | * Unimplemented. | 222 | * Unimplemented. |
223 | */ | 223 | */ |
224 | static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) | 224 | static enum drm_connector_status |
225 | intel_dvo_detect(struct drm_connector *connector, bool force) | ||
225 | { | 226 | { |
226 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 227 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
227 | struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); | 228 | struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 7bdc96256bf5..56ad9df2ccb5 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -237,8 +237,10 @@ int intel_fbdev_destroy(struct drm_device *dev, | |||
237 | drm_fb_helper_fini(&ifbdev->helper); | 237 | drm_fb_helper_fini(&ifbdev->helper); |
238 | 238 | ||
239 | drm_framebuffer_cleanup(&ifb->base); | 239 | drm_framebuffer_cleanup(&ifb->base); |
240 | if (ifb->obj) | 240 | if (ifb->obj) { |
241 | drm_gem_object_handle_unreference(ifb->obj); | ||
241 | drm_gem_object_unreference(ifb->obj); | 242 | drm_gem_object_unreference(ifb->obj); |
243 | } | ||
242 | 244 | ||
243 | return 0; | 245 | return 0; |
244 | } | 246 | } |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index ccd4c97e6524..926934a482ec 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -139,7 +139,7 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, | |||
139 | } | 139 | } |
140 | 140 | ||
141 | static enum drm_connector_status | 141 | static enum drm_connector_status |
142 | intel_hdmi_detect(struct drm_connector *connector) | 142 | intel_hdmi_detect(struct drm_connector *connector, bool force) |
143 | { | 143 | { |
144 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 144 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
145 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 145 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index b819c1081147..6ec39a86ed06 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -445,7 +445,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
445 | * connected and closed means disconnected. We also send hotplug events as | 445 | * connected and closed means disconnected. We also send hotplug events as |
446 | * needed, using lid status notification from the input layer. | 446 | * needed, using lid status notification from the input layer. |
447 | */ | 447 | */ |
448 | static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) | 448 | static enum drm_connector_status |
449 | intel_lvds_detect(struct drm_connector *connector, bool force) | ||
449 | { | 450 | { |
450 | struct drm_device *dev = connector->dev; | 451 | struct drm_device *dev = connector->dev; |
451 | enum drm_connector_status status = connector_status_connected; | 452 | enum drm_connector_status status = connector_status_connected; |
@@ -540,7 +541,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
540 | * the LID nofication event. | 541 | * the LID nofication event. |
541 | */ | 542 | */ |
542 | if (connector) | 543 | if (connector) |
543 | connector->status = connector->funcs->detect(connector); | 544 | connector->status = connector->funcs->detect(connector, |
545 | false); | ||
546 | |||
544 | /* Don't force modeset on machines where it causes a GPU lockup */ | 547 | /* Don't force modeset on machines where it causes a GPU lockup */ |
545 | if (dmi_check_system(intel_no_modeset_on_lid)) | 548 | if (dmi_check_system(intel_no_modeset_on_lid)) |
546 | return NOTIFY_OK; | 549 | return NOTIFY_OK; |
@@ -875,8 +878,6 @@ void intel_lvds_init(struct drm_device *dev) | |||
875 | 878 | ||
876 | intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); | 879 | intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); |
877 | intel_encoder->crtc_mask = (1 << 1); | 880 | intel_encoder->crtc_mask = (1 << 1); |
878 | if (IS_I965G(dev)) | ||
879 | intel_encoder->crtc_mask |= (1 << 0); | ||
880 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); | 881 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); |
881 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); | 882 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); |
882 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 883 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 4f00390d7c61..1d306a458be6 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -25,6 +25,8 @@ | |||
25 | * | 25 | * |
26 | * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c | 26 | * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c |
27 | */ | 27 | */ |
28 | |||
29 | #include <linux/seq_file.h> | ||
28 | #include "drmP.h" | 30 | #include "drmP.h" |
29 | #include "drm.h" | 31 | #include "drm.h" |
30 | #include "i915_drm.h" | 32 | #include "i915_drm.h" |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 51e9c9e718c4..cb3508f78bc3 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -220,9 +220,13 @@ static int init_render_ring(struct drm_device *dev, | |||
220 | { | 220 | { |
221 | drm_i915_private_t *dev_priv = dev->dev_private; | 221 | drm_i915_private_t *dev_priv = dev->dev_private; |
222 | int ret = init_ring_common(dev, ring); | 222 | int ret = init_ring_common(dev, ring); |
223 | int mode; | ||
224 | |||
223 | if (IS_I9XX(dev) && !IS_GEN3(dev)) { | 225 | if (IS_I9XX(dev) && !IS_GEN3(dev)) { |
224 | I915_WRITE(MI_MODE, | 226 | mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; |
225 | (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); | 227 | if (IS_GEN6(dev)) |
228 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; | ||
229 | I915_WRITE(MI_MODE, mode); | ||
226 | } | 230 | } |
227 | return ret; | 231 | return ret; |
228 | } | 232 | } |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 093e914e8a41..ee73e428a84a 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -1061,8 +1061,9 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
1061 | if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) | 1061 | if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) |
1062 | return false; | 1062 | return false; |
1063 | 1063 | ||
1064 | if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode)) | 1064 | (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, |
1065 | return false; | 1065 | mode, |
1066 | adjusted_mode); | ||
1066 | } else if (intel_sdvo->is_lvds) { | 1067 | } else if (intel_sdvo->is_lvds) { |
1067 | drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0); | 1068 | drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0); |
1068 | 1069 | ||
@@ -1070,8 +1071,9 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
1070 | intel_sdvo->sdvo_lvds_fixed_mode)) | 1071 | intel_sdvo->sdvo_lvds_fixed_mode)) |
1071 | return false; | 1072 | return false; |
1072 | 1073 | ||
1073 | if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode)) | 1074 | (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, |
1074 | return false; | 1075 | mode, |
1076 | adjusted_mode); | ||
1075 | } | 1077 | } |
1076 | 1078 | ||
1077 | /* Make the CRTC code factor in the SDVO pixel multiplier. The | 1079 | /* Make the CRTC code factor in the SDVO pixel multiplier. The |
@@ -1108,10 +1110,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1108 | in_out.in0 = intel_sdvo->attached_output; | 1110 | in_out.in0 = intel_sdvo->attached_output; |
1109 | in_out.in1 = 0; | 1111 | in_out.in1 = 0; |
1110 | 1112 | ||
1111 | if (!intel_sdvo_set_value(intel_sdvo, | 1113 | intel_sdvo_set_value(intel_sdvo, |
1112 | SDVO_CMD_SET_IN_OUT_MAP, | 1114 | SDVO_CMD_SET_IN_OUT_MAP, |
1113 | &in_out, sizeof(in_out))) | 1115 | &in_out, sizeof(in_out)); |
1114 | return; | ||
1115 | 1116 | ||
1116 | if (intel_sdvo->is_hdmi) { | 1117 | if (intel_sdvo->is_hdmi) { |
1117 | if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode)) | 1118 | if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode)) |
@@ -1122,11 +1123,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1122 | 1123 | ||
1123 | /* We have tried to get input timing in mode_fixup, and filled into | 1124 | /* We have tried to get input timing in mode_fixup, and filled into |
1124 | adjusted_mode */ | 1125 | adjusted_mode */ |
1125 | if (intel_sdvo->is_tv || intel_sdvo->is_lvds) { | 1126 | intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); |
1126 | intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); | 1127 | if (intel_sdvo->is_tv || intel_sdvo->is_lvds) |
1127 | input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags; | 1128 | input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags; |
1128 | } else | ||
1129 | intel_sdvo_get_dtd_from_mode(&input_dtd, mode); | ||
1130 | 1129 | ||
1131 | /* If it's a TV, we already set the output timing in mode_fixup. | 1130 | /* If it's a TV, we already set the output timing in mode_fixup. |
1132 | * Otherwise, the output timing is equal to the input timing. | 1131 | * Otherwise, the output timing is equal to the input timing. |
@@ -1137,8 +1136,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1137 | intel_sdvo->attached_output)) | 1136 | intel_sdvo->attached_output)) |
1138 | return; | 1137 | return; |
1139 | 1138 | ||
1140 | if (!intel_sdvo_set_output_timing(intel_sdvo, &input_dtd)) | 1139 | (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd); |
1141 | return; | ||
1142 | } | 1140 | } |
1143 | 1141 | ||
1144 | /* Set the input timing to the screen. Assume always input 0. */ | 1142 | /* Set the input timing to the screen. Assume always input 0. */ |
@@ -1165,8 +1163,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1165 | intel_sdvo_set_input_timing(encoder, &input_dtd); | 1163 | intel_sdvo_set_input_timing(encoder, &input_dtd); |
1166 | } | 1164 | } |
1167 | #else | 1165 | #else |
1168 | if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) | 1166 | (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); |
1169 | return; | ||
1170 | #endif | 1167 | #endif |
1171 | 1168 | ||
1172 | sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); | 1169 | sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); |
@@ -1420,7 +1417,7 @@ intel_analog_is_connected(struct drm_device *dev) | |||
1420 | if (!analog_connector) | 1417 | if (!analog_connector) |
1421 | return false; | 1418 | return false; |
1422 | 1419 | ||
1423 | if (analog_connector->funcs->detect(analog_connector) == | 1420 | if (analog_connector->funcs->detect(analog_connector, false) == |
1424 | connector_status_disconnected) | 1421 | connector_status_disconnected) |
1425 | return false; | 1422 | return false; |
1426 | 1423 | ||
@@ -1489,7 +1486,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | |||
1489 | return status; | 1486 | return status; |
1490 | } | 1487 | } |
1491 | 1488 | ||
1492 | static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) | 1489 | static enum drm_connector_status |
1490 | intel_sdvo_detect(struct drm_connector *connector, bool force) | ||
1493 | { | 1491 | { |
1494 | uint16_t response; | 1492 | uint16_t response; |
1495 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1493 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
@@ -1932,6 +1930,41 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { | |||
1932 | .destroy = intel_sdvo_enc_destroy, | 1930 | .destroy = intel_sdvo_enc_destroy, |
1933 | }; | 1931 | }; |
1934 | 1932 | ||
1933 | static void | ||
1934 | intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo) | ||
1935 | { | ||
1936 | uint16_t mask = 0; | ||
1937 | unsigned int num_bits; | ||
1938 | |||
1939 | /* Make a mask of outputs less than or equal to our own priority in the | ||
1940 | * list. | ||
1941 | */ | ||
1942 | switch (sdvo->controlled_output) { | ||
1943 | case SDVO_OUTPUT_LVDS1: | ||
1944 | mask |= SDVO_OUTPUT_LVDS1; | ||
1945 | case SDVO_OUTPUT_LVDS0: | ||
1946 | mask |= SDVO_OUTPUT_LVDS0; | ||
1947 | case SDVO_OUTPUT_TMDS1: | ||
1948 | mask |= SDVO_OUTPUT_TMDS1; | ||
1949 | case SDVO_OUTPUT_TMDS0: | ||
1950 | mask |= SDVO_OUTPUT_TMDS0; | ||
1951 | case SDVO_OUTPUT_RGB1: | ||
1952 | mask |= SDVO_OUTPUT_RGB1; | ||
1953 | case SDVO_OUTPUT_RGB0: | ||
1954 | mask |= SDVO_OUTPUT_RGB0; | ||
1955 | break; | ||
1956 | } | ||
1957 | |||
1958 | /* Count bits to find what number we are in the priority list. */ | ||
1959 | mask &= sdvo->caps.output_flags; | ||
1960 | num_bits = hweight16(mask); | ||
1961 | /* If more than 3 outputs, default to DDC bus 3 for now. */ | ||
1962 | if (num_bits > 3) | ||
1963 | num_bits = 3; | ||
1964 | |||
1965 | /* Corresponds to SDVO_CONTROL_BUS_DDCx */ | ||
1966 | sdvo->ddc_bus = 1 << num_bits; | ||
1967 | } | ||
1935 | 1968 | ||
1936 | /** | 1969 | /** |
1937 | * Choose the appropriate DDC bus for control bus switch command for this | 1970 | * Choose the appropriate DDC bus for control bus switch command for this |
@@ -1951,7 +1984,10 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, | |||
1951 | else | 1984 | else |
1952 | mapping = &(dev_priv->sdvo_mappings[1]); | 1985 | mapping = &(dev_priv->sdvo_mappings[1]); |
1953 | 1986 | ||
1954 | sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); | 1987 | if (mapping->initialized) |
1988 | sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); | ||
1989 | else | ||
1990 | intel_sdvo_guess_ddc_bus(sdvo); | ||
1955 | } | 1991 | } |
1956 | 1992 | ||
1957 | static bool | 1993 | static bool |
@@ -2134,8 +2170,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) | |||
2134 | return true; | 2170 | return true; |
2135 | 2171 | ||
2136 | err: | 2172 | err: |
2137 | intel_sdvo_destroy_enhance_property(connector); | 2173 | intel_sdvo_destroy(connector); |
2138 | kfree(intel_sdvo_connector); | ||
2139 | return false; | 2174 | return false; |
2140 | } | 2175 | } |
2141 | 2176 | ||
@@ -2207,8 +2242,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) | |||
2207 | return true; | 2242 | return true; |
2208 | 2243 | ||
2209 | err: | 2244 | err: |
2210 | intel_sdvo_destroy_enhance_property(connector); | 2245 | intel_sdvo_destroy(connector); |
2211 | kfree(intel_sdvo_connector); | ||
2212 | return false; | 2246 | return false; |
2213 | } | 2247 | } |
2214 | 2248 | ||
@@ -2486,11 +2520,10 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, | |||
2486 | uint16_t response; | 2520 | uint16_t response; |
2487 | } enhancements; | 2521 | } enhancements; |
2488 | 2522 | ||
2489 | if (!intel_sdvo_get_value(intel_sdvo, | 2523 | enhancements.response = 0; |
2490 | SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, | 2524 | intel_sdvo_get_value(intel_sdvo, |
2491 | &enhancements, sizeof(enhancements))) | 2525 | SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, |
2492 | return false; | 2526 | &enhancements, sizeof(enhancements)); |
2493 | |||
2494 | if (enhancements.response == 0) { | 2527 | if (enhancements.response == 0) { |
2495 | DRM_DEBUG_KMS("No enhancement is supported\n"); | 2528 | DRM_DEBUG_KMS("No enhancement is supported\n"); |
2496 | return true; | 2529 | return true; |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index d2029efee982..4a117e318a73 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1231,7 +1231,6 @@ intel_tv_detect_type (struct intel_tv *intel_tv) | |||
1231 | struct drm_encoder *encoder = &intel_tv->base.enc; | 1231 | struct drm_encoder *encoder = &intel_tv->base.enc; |
1232 | struct drm_device *dev = encoder->dev; | 1232 | struct drm_device *dev = encoder->dev; |
1233 | struct drm_i915_private *dev_priv = dev->dev_private; | 1233 | struct drm_i915_private *dev_priv = dev->dev_private; |
1234 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||
1235 | unsigned long irqflags; | 1234 | unsigned long irqflags; |
1236 | u32 tv_ctl, save_tv_ctl; | 1235 | u32 tv_ctl, save_tv_ctl; |
1237 | u32 tv_dac, save_tv_dac; | 1236 | u32 tv_dac, save_tv_dac; |
@@ -1268,11 +1267,15 @@ intel_tv_detect_type (struct intel_tv *intel_tv) | |||
1268 | DAC_C_0_7_V); | 1267 | DAC_C_0_7_V); |
1269 | I915_WRITE(TV_CTL, tv_ctl); | 1268 | I915_WRITE(TV_CTL, tv_ctl); |
1270 | I915_WRITE(TV_DAC, tv_dac); | 1269 | I915_WRITE(TV_DAC, tv_dac); |
1271 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 1270 | POSTING_READ(TV_DAC); |
1271 | msleep(20); | ||
1272 | |||
1272 | tv_dac = I915_READ(TV_DAC); | 1273 | tv_dac = I915_READ(TV_DAC); |
1273 | I915_WRITE(TV_DAC, save_tv_dac); | 1274 | I915_WRITE(TV_DAC, save_tv_dac); |
1274 | I915_WRITE(TV_CTL, save_tv_ctl); | 1275 | I915_WRITE(TV_CTL, save_tv_ctl); |
1275 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 1276 | POSTING_READ(TV_CTL); |
1277 | msleep(20); | ||
1278 | |||
1276 | /* | 1279 | /* |
1277 | * A B C | 1280 | * A B C |
1278 | * 0 1 1 Composite | 1281 | * 0 1 1 Composite |
@@ -1338,7 +1341,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector) | |||
1338 | * we have a pipe programmed in order to probe the TV. | 1341 | * we have a pipe programmed in order to probe the TV. |
1339 | */ | 1342 | */ |
1340 | static enum drm_connector_status | 1343 | static enum drm_connector_status |
1341 | intel_tv_detect(struct drm_connector *connector) | 1344 | intel_tv_detect(struct drm_connector *connector, bool force) |
1342 | { | 1345 | { |
1343 | struct drm_display_mode mode; | 1346 | struct drm_display_mode mode; |
1344 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1347 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
@@ -1350,7 +1353,7 @@ intel_tv_detect(struct drm_connector *connector) | |||
1350 | 1353 | ||
1351 | if (encoder->crtc && encoder->crtc->enabled) { | 1354 | if (encoder->crtc && encoder->crtc->enabled) { |
1352 | type = intel_tv_detect_type(intel_tv); | 1355 | type = intel_tv_detect_type(intel_tv); |
1353 | } else { | 1356 | } else if (force) { |
1354 | struct drm_crtc *crtc; | 1357 | struct drm_crtc *crtc; |
1355 | int dpms_mode; | 1358 | int dpms_mode; |
1356 | 1359 | ||
@@ -1361,10 +1364,9 @@ intel_tv_detect(struct drm_connector *connector) | |||
1361 | intel_release_load_detect_pipe(&intel_tv->base, connector, | 1364 | intel_release_load_detect_pipe(&intel_tv->base, connector, |
1362 | dpms_mode); | 1365 | dpms_mode); |
1363 | } else | 1366 | } else |
1364 | type = -1; | 1367 | return connector_status_unknown; |
1365 | } | 1368 | } else |
1366 | 1369 | return connector->status; | |
1367 | intel_tv->type = type; | ||
1368 | 1370 | ||
1369 | if (type < 0) | 1371 | if (type < 0) |
1370 | return connector_status_disconnected; | 1372 | return connector_status_disconnected; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index a1473fff06ac..fc737037f751 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -168,7 +168,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector, | |||
168 | } | 168 | } |
169 | 169 | ||
170 | static enum drm_connector_status | 170 | static enum drm_connector_status |
171 | nouveau_connector_detect(struct drm_connector *connector) | 171 | nouveau_connector_detect(struct drm_connector *connector, bool force) |
172 | { | 172 | { |
173 | struct drm_device *dev = connector->dev; | 173 | struct drm_device *dev = connector->dev; |
174 | struct nouveau_connector *nv_connector = nouveau_connector(connector); | 174 | struct nouveau_connector *nv_connector = nouveau_connector(connector); |
@@ -246,7 +246,7 @@ detect_analog: | |||
246 | } | 246 | } |
247 | 247 | ||
248 | static enum drm_connector_status | 248 | static enum drm_connector_status |
249 | nouveau_connector_detect_lvds(struct drm_connector *connector) | 249 | nouveau_connector_detect_lvds(struct drm_connector *connector, bool force) |
250 | { | 250 | { |
251 | struct drm_device *dev = connector->dev; | 251 | struct drm_device *dev = connector->dev; |
252 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 252 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
@@ -267,7 +267,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector) | |||
267 | 267 | ||
268 | /* Try retrieving EDID via DDC */ | 268 | /* Try retrieving EDID via DDC */ |
269 | if (!dev_priv->vbios.fp_no_ddc) { | 269 | if (!dev_priv->vbios.fp_no_ddc) { |
270 | status = nouveau_connector_detect(connector); | 270 | status = nouveau_connector_detect(connector, force); |
271 | if (status == connector_status_connected) | 271 | if (status == connector_status_connected) |
272 | goto out; | 272 | goto out; |
273 | } | 273 | } |
@@ -558,8 +558,10 @@ nouveau_connector_get_modes(struct drm_connector *connector) | |||
558 | if (nv_encoder->dcb->type == OUTPUT_LVDS && | 558 | if (nv_encoder->dcb->type == OUTPUT_LVDS && |
559 | (nv_encoder->dcb->lvdsconf.use_straps_for_mode || | 559 | (nv_encoder->dcb->lvdsconf.use_straps_for_mode || |
560 | dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { | 560 | dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { |
561 | nv_connector->native_mode = drm_mode_create(dev); | 561 | struct drm_display_mode mode; |
562 | nouveau_bios_fp_mode(dev, nv_connector->native_mode); | 562 | |
563 | nouveau_bios_fp_mode(dev, &mode); | ||
564 | nv_connector->native_mode = drm_mode_duplicate(dev, &mode); | ||
563 | } | 565 | } |
564 | 566 | ||
565 | /* Find the native mode if this is a digital panel, if we didn't | 567 | /* Find the native mode if this is a digital panel, if we didn't |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index dbd30b2e43fd..d2047713dc59 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -352,6 +352,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) | |||
352 | 352 | ||
353 | if (nouveau_fb->nvbo) { | 353 | if (nouveau_fb->nvbo) { |
354 | nouveau_bo_unmap(nouveau_fb->nvbo); | 354 | nouveau_bo_unmap(nouveau_fb->nvbo); |
355 | drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem); | ||
355 | drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); | 356 | drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); |
356 | nouveau_fb->nvbo = NULL; | 357 | nouveau_fb->nvbo = NULL; |
357 | } | 358 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 6b208ffafa8d..87ac21ec23d2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -64,16 +64,17 @@ nouveau_fence_update(struct nouveau_channel *chan) | |||
64 | struct nouveau_fence *fence; | 64 | struct nouveau_fence *fence; |
65 | uint32_t sequence; | 65 | uint32_t sequence; |
66 | 66 | ||
67 | spin_lock(&chan->fence.lock); | ||
68 | |||
67 | if (USE_REFCNT) | 69 | if (USE_REFCNT) |
68 | sequence = nvchan_rd32(chan, 0x48); | 70 | sequence = nvchan_rd32(chan, 0x48); |
69 | else | 71 | else |
70 | sequence = atomic_read(&chan->fence.last_sequence_irq); | 72 | sequence = atomic_read(&chan->fence.last_sequence_irq); |
71 | 73 | ||
72 | if (chan->fence.sequence_ack == sequence) | 74 | if (chan->fence.sequence_ack == sequence) |
73 | return; | 75 | goto out; |
74 | chan->fence.sequence_ack = sequence; | 76 | chan->fence.sequence_ack = sequence; |
75 | 77 | ||
76 | spin_lock(&chan->fence.lock); | ||
77 | list_for_each_safe(entry, tmp, &chan->fence.pending) { | 78 | list_for_each_safe(entry, tmp, &chan->fence.pending) { |
78 | fence = list_entry(entry, struct nouveau_fence, entry); | 79 | fence = list_entry(entry, struct nouveau_fence, entry); |
79 | 80 | ||
@@ -85,6 +86,7 @@ nouveau_fence_update(struct nouveau_channel *chan) | |||
85 | if (sequence == chan->fence.sequence_ack) | 86 | if (sequence == chan->fence.sequence_ack) |
86 | break; | 87 | break; |
87 | } | 88 | } |
89 | out: | ||
88 | spin_unlock(&chan->fence.lock); | 90 | spin_unlock(&chan->fence.lock); |
89 | } | 91 | } |
90 | 92 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 581c67cd7b24..19620a6709f5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -167,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |||
167 | goto out; | 167 | goto out; |
168 | 168 | ||
169 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); | 169 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); |
170 | /* drop reference from allocate - handle holds it now */ | ||
171 | drm_gem_object_unreference_unlocked(nvbo->gem); | ||
170 | out: | 172 | out: |
171 | drm_gem_object_handle_unreference_unlocked(nvbo->gem); | ||
172 | |||
173 | if (ret) | ||
174 | drm_gem_object_unreference_unlocked(nvbo->gem); | ||
175 | return ret; | 173 | return ret; |
176 | } | 174 | } |
177 | 175 | ||
@@ -245,7 +243,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence) | |||
245 | list_del(&nvbo->entry); | 243 | list_del(&nvbo->entry); |
246 | nvbo->reserved_by = NULL; | 244 | nvbo->reserved_by = NULL; |
247 | ttm_bo_unreserve(&nvbo->bo); | 245 | ttm_bo_unreserve(&nvbo->bo); |
248 | drm_gem_object_unreference(nvbo->gem); | 246 | drm_gem_object_unreference_unlocked(nvbo->gem); |
249 | } | 247 | } |
250 | } | 248 | } |
251 | 249 | ||
@@ -300,7 +298,7 @@ retry: | |||
300 | validate_fini(op, NULL); | 298 | validate_fini(op, NULL); |
301 | if (ret == -EAGAIN) | 299 | if (ret == -EAGAIN) |
302 | ret = ttm_bo_wait_unreserved(&nvbo->bo, false); | 300 | ret = ttm_bo_wait_unreserved(&nvbo->bo, false); |
303 | drm_gem_object_unreference(gem); | 301 | drm_gem_object_unreference_unlocked(gem); |
304 | if (ret) { | 302 | if (ret) { |
305 | NV_ERROR(dev, "fail reserve\n"); | 303 | NV_ERROR(dev, "fail reserve\n"); |
306 | return ret; | 304 | return ret; |
@@ -616,8 +614,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
616 | return PTR_ERR(bo); | 614 | return PTR_ERR(bo); |
617 | } | 615 | } |
618 | 616 | ||
619 | mutex_lock(&dev->struct_mutex); | ||
620 | |||
621 | /* Mark push buffers as being used on PFIFO, the validation code | 617 | /* Mark push buffers as being used on PFIFO, the validation code |
622 | * will then make sure that if the pushbuf bo moves, that they | 618 | * will then make sure that if the pushbuf bo moves, that they |
623 | * happen on the kernel channel, which will in turn cause a sync | 619 | * happen on the kernel channel, which will in turn cause a sync |
@@ -731,7 +727,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
731 | out: | 727 | out: |
732 | validate_fini(&op, fence); | 728 | validate_fini(&op, fence); |
733 | nouveau_fence_unref((void**)&fence); | 729 | nouveau_fence_unref((void**)&fence); |
734 | mutex_unlock(&dev->struct_mutex); | ||
735 | kfree(bo); | 730 | kfree(bo); |
736 | kfree(push); | 731 | kfree(push); |
737 | 732 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 3ec181ff50ce..3c9964a8fbad 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c | |||
@@ -79,6 +79,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) | |||
79 | mutex_lock(&dev->struct_mutex); | 79 | mutex_lock(&dev->struct_mutex); |
80 | nouveau_bo_unpin(chan->notifier_bo); | 80 | nouveau_bo_unpin(chan->notifier_bo); |
81 | mutex_unlock(&dev->struct_mutex); | 81 | mutex_unlock(&dev->struct_mutex); |
82 | drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem); | ||
82 | drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); | 83 | drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); |
83 | drm_mm_takedown(&chan->notifier_heap); | 84 | drm_mm_takedown(&chan->notifier_heap); |
84 | } | 85 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index c95bf9b681dd..91ef93cf1f35 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -139,6 +139,8 @@ nv50_instmem_init(struct drm_device *dev) | |||
139 | chan->file_priv = (struct drm_file *)-2; | 139 | chan->file_priv = (struct drm_file *)-2; |
140 | dev_priv->fifos[0] = dev_priv->fifos[127] = chan; | 140 | dev_priv->fifos[0] = dev_priv->fifos[127] = chan; |
141 | 141 | ||
142 | INIT_LIST_HEAD(&chan->ramht_refs); | ||
143 | |||
142 | /* Channel's PRAMIN object + heap */ | 144 | /* Channel's PRAMIN object + heap */ |
143 | ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, | 145 | ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, |
144 | NULL, &chan->ramin); | 146 | NULL, &chan->ramin); |
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 1bc72c3190a9..fe359a239df3 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h | |||
@@ -4999,7 +4999,7 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS | |||
4999 | #define SW_I2C_CNTL_WRITE1BIT 6 | 4999 | #define SW_I2C_CNTL_WRITE1BIT 6 |
5000 | 5000 | ||
5001 | //==============================VESA definition Portion=============================== | 5001 | //==============================VESA definition Portion=============================== |
5002 | #define VESA_OEM_PRODUCT_REV '01.00' | 5002 | #define VESA_OEM_PRODUCT_REV "01.00" |
5003 | #define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support | 5003 | #define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support |
5004 | #define VESA_MODE_WIN_ATTRIBUTE 7 | 5004 | #define VESA_MODE_WIN_ATTRIBUTE 7 |
5005 | #define VESA_WIN_SIZE 64 | 5005 | #define VESA_WIN_SIZE 64 |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 577239a24fd5..cd0290f946cf 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -332,6 +332,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc, | |||
332 | args.usV_SyncWidth = | 332 | args.usV_SyncWidth = |
333 | cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); | 333 | cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); |
334 | 334 | ||
335 | args.ucOverscanRight = radeon_crtc->h_border; | ||
336 | args.ucOverscanLeft = radeon_crtc->h_border; | ||
337 | args.ucOverscanBottom = radeon_crtc->v_border; | ||
338 | args.ucOverscanTop = radeon_crtc->v_border; | ||
339 | |||
335 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) | 340 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) |
336 | misc |= ATOM_VSYNC_POLARITY; | 341 | misc |= ATOM_VSYNC_POLARITY; |
337 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) | 342 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) |
@@ -534,6 +539,21 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
534 | pll->algo = PLL_ALGO_LEGACY; | 539 | pll->algo = PLL_ALGO_LEGACY; |
535 | pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; | 540 | pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; |
536 | } | 541 | } |
542 | /* There is some evidence (often anecdotal) that RV515/RV620 LVDS | ||
543 | * (on some boards at least) prefers the legacy algo. I'm not | ||
544 | * sure whether this should handled generically or on a | ||
545 | * case-by-case quirk basis. Both algos should work fine in the | ||
546 | * majority of cases. | ||
547 | */ | ||
548 | if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) && | ||
549 | ((rdev->family == CHIP_RV515) || | ||
550 | (rdev->family == CHIP_RV620))) { | ||
551 | /* allow the user to overrride just in case */ | ||
552 | if (radeon_new_pll == 1) | ||
553 | pll->algo = PLL_ALGO_NEW; | ||
554 | else | ||
555 | pll->algo = PLL_ALGO_LEGACY; | ||
556 | } | ||
537 | } else { | 557 | } else { |
538 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) | 558 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) |
539 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; | 559 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; |
@@ -1056,11 +1076,11 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
1056 | 1076 | ||
1057 | if (rdev->family >= CHIP_RV770) { | 1077 | if (rdev->family >= CHIP_RV770) { |
1058 | if (radeon_crtc->crtc_id) { | 1078 | if (radeon_crtc->crtc_id) { |
1059 | WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); | 1079 | WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); |
1060 | WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); | 1080 | WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); |
1061 | } else { | 1081 | } else { |
1062 | WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); | 1082 | WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); |
1063 | WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); | 1083 | WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); |
1064 | } | 1084 | } |
1065 | } | 1085 | } |
1066 | WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | 1086 | WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, |
@@ -1197,8 +1217,18 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, | |||
1197 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 1217 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
1198 | struct drm_device *dev = crtc->dev; | 1218 | struct drm_device *dev = crtc->dev; |
1199 | struct radeon_device *rdev = dev->dev_private; | 1219 | struct radeon_device *rdev = dev->dev_private; |
1220 | struct drm_encoder *encoder; | ||
1221 | bool is_tvcv = false; | ||
1200 | 1222 | ||
1201 | /* TODO color tiling */ | 1223 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
1224 | /* find tv std */ | ||
1225 | if (encoder->crtc == crtc) { | ||
1226 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1227 | if (radeon_encoder->active_device & | ||
1228 | (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) | ||
1229 | is_tvcv = true; | ||
1230 | } | ||
1231 | } | ||
1202 | 1232 | ||
1203 | atombios_disable_ss(crtc); | 1233 | atombios_disable_ss(crtc); |
1204 | /* always set DCPLL */ | 1234 | /* always set DCPLL */ |
@@ -1207,9 +1237,14 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, | |||
1207 | atombios_crtc_set_pll(crtc, adjusted_mode); | 1237 | atombios_crtc_set_pll(crtc, adjusted_mode); |
1208 | atombios_enable_ss(crtc); | 1238 | atombios_enable_ss(crtc); |
1209 | 1239 | ||
1210 | if (ASIC_IS_AVIVO(rdev)) | 1240 | if (ASIC_IS_DCE4(rdev)) |
1211 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); | 1241 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); |
1212 | else { | 1242 | else if (ASIC_IS_AVIVO(rdev)) { |
1243 | if (is_tvcv) | ||
1244 | atombios_crtc_set_timing(crtc, adjusted_mode); | ||
1245 | else | ||
1246 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); | ||
1247 | } else { | ||
1213 | atombios_crtc_set_timing(crtc, adjusted_mode); | 1248 | atombios_crtc_set_timing(crtc, adjusted_mode); |
1214 | if (radeon_crtc->crtc_id == 0) | 1249 | if (radeon_crtc->crtc_id == 0) |
1215 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); | 1250 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 957d5067ad9c..79082d4398ae 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -675,6 +675,43 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev) | |||
675 | return 0; | 675 | return 0; |
676 | } | 676 | } |
677 | 677 | ||
678 | static int evergreen_cp_start(struct radeon_device *rdev) | ||
679 | { | ||
680 | int r; | ||
681 | uint32_t cp_me; | ||
682 | |||
683 | r = radeon_ring_lock(rdev, 7); | ||
684 | if (r) { | ||
685 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | ||
686 | return r; | ||
687 | } | ||
688 | radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); | ||
689 | radeon_ring_write(rdev, 0x1); | ||
690 | radeon_ring_write(rdev, 0x0); | ||
691 | radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1); | ||
692 | radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); | ||
693 | radeon_ring_write(rdev, 0); | ||
694 | radeon_ring_write(rdev, 0); | ||
695 | radeon_ring_unlock_commit(rdev); | ||
696 | |||
697 | cp_me = 0xff; | ||
698 | WREG32(CP_ME_CNTL, cp_me); | ||
699 | |||
700 | r = radeon_ring_lock(rdev, 4); | ||
701 | if (r) { | ||
702 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | ||
703 | return r; | ||
704 | } | ||
705 | /* init some VGT regs */ | ||
706 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | ||
707 | radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2); | ||
708 | radeon_ring_write(rdev, 0xe); | ||
709 | radeon_ring_write(rdev, 0x10); | ||
710 | radeon_ring_unlock_commit(rdev); | ||
711 | |||
712 | return 0; | ||
713 | } | ||
714 | |||
678 | int evergreen_cp_resume(struct radeon_device *rdev) | 715 | int evergreen_cp_resume(struct radeon_device *rdev) |
679 | { | 716 | { |
680 | u32 tmp; | 717 | u32 tmp; |
@@ -719,7 +756,7 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
719 | rdev->cp.rptr = RREG32(CP_RB_RPTR); | 756 | rdev->cp.rptr = RREG32(CP_RB_RPTR); |
720 | rdev->cp.wptr = RREG32(CP_RB_WPTR); | 757 | rdev->cp.wptr = RREG32(CP_RB_WPTR); |
721 | 758 | ||
722 | r600_cp_start(rdev); | 759 | evergreen_cp_start(rdev); |
723 | rdev->cp.ready = true; | 760 | rdev->cp.ready = true; |
724 | r = radeon_ring_test(rdev); | 761 | r = radeon_ring_test(rdev); |
725 | if (r) { | 762 | if (r) { |
@@ -1123,14 +1160,25 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1123 | EVERGREEN_MAX_BACKENDS_MASK)); | 1160 | EVERGREEN_MAX_BACKENDS_MASK)); |
1124 | break; | 1161 | break; |
1125 | } | 1162 | } |
1126 | } else | 1163 | } else { |
1127 | gb_backend_map = | 1164 | switch (rdev->family) { |
1128 | evergreen_get_tile_pipe_to_backend_map(rdev, | 1165 | case CHIP_CYPRESS: |
1129 | rdev->config.evergreen.max_tile_pipes, | 1166 | case CHIP_HEMLOCK: |
1130 | rdev->config.evergreen.max_backends, | 1167 | gb_backend_map = 0x66442200; |
1131 | ((EVERGREEN_MAX_BACKENDS_MASK << | 1168 | break; |
1132 | rdev->config.evergreen.max_backends) & | 1169 | case CHIP_JUNIPER: |
1133 | EVERGREEN_MAX_BACKENDS_MASK)); | 1170 | gb_backend_map = 0x00006420; |
1171 | break; | ||
1172 | default: | ||
1173 | gb_backend_map = | ||
1174 | evergreen_get_tile_pipe_to_backend_map(rdev, | ||
1175 | rdev->config.evergreen.max_tile_pipes, | ||
1176 | rdev->config.evergreen.max_backends, | ||
1177 | ((EVERGREEN_MAX_BACKENDS_MASK << | ||
1178 | rdev->config.evergreen.max_backends) & | ||
1179 | EVERGREEN_MAX_BACKENDS_MASK)); | ||
1180 | } | ||
1181 | } | ||
1134 | 1182 | ||
1135 | rdev->config.evergreen.tile_config = gb_addr_config; | 1183 | rdev->config.evergreen.tile_config = gb_addr_config; |
1136 | WREG32(GB_BACKEND_MAP, gb_backend_map); | 1184 | WREG32(GB_BACKEND_MAP, gb_backend_map); |
@@ -2054,11 +2102,6 @@ int evergreen_resume(struct radeon_device *rdev) | |||
2054 | */ | 2102 | */ |
2055 | /* post card */ | 2103 | /* post card */ |
2056 | atom_asic_init(rdev->mode_info.atom_context); | 2104 | atom_asic_init(rdev->mode_info.atom_context); |
2057 | /* Initialize clocks */ | ||
2058 | r = radeon_clocks_init(rdev); | ||
2059 | if (r) { | ||
2060 | return r; | ||
2061 | } | ||
2062 | 2105 | ||
2063 | r = evergreen_startup(rdev); | 2106 | r = evergreen_startup(rdev); |
2064 | if (r) { | 2107 | if (r) { |
@@ -2164,9 +2207,6 @@ int evergreen_init(struct radeon_device *rdev) | |||
2164 | radeon_surface_init(rdev); | 2207 | radeon_surface_init(rdev); |
2165 | /* Initialize clocks */ | 2208 | /* Initialize clocks */ |
2166 | radeon_get_clock_info(rdev->ddev); | 2209 | radeon_get_clock_info(rdev->ddev); |
2167 | r = radeon_clocks_init(rdev); | ||
2168 | if (r) | ||
2169 | return r; | ||
2170 | /* Fence driver */ | 2210 | /* Fence driver */ |
2171 | r = radeon_fence_driver_init(rdev); | 2211 | r = radeon_fence_driver_init(rdev); |
2172 | if (r) | 2212 | if (r) |
@@ -2236,7 +2276,6 @@ void evergreen_fini(struct radeon_device *rdev) | |||
2236 | evergreen_pcie_gart_fini(rdev); | 2276 | evergreen_pcie_gart_fini(rdev); |
2237 | radeon_gem_fini(rdev); | 2277 | radeon_gem_fini(rdev); |
2238 | radeon_fence_driver_fini(rdev); | 2278 | radeon_fence_driver_fini(rdev); |
2239 | radeon_clocks_fini(rdev); | ||
2240 | radeon_agp_fini(rdev); | 2279 | radeon_agp_fini(rdev); |
2241 | radeon_bo_fini(rdev); | 2280 | radeon_bo_fini(rdev); |
2242 | radeon_atombios_fini(rdev); | 2281 | radeon_atombios_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index e817a0bb5eb4..e151f16a8f86 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -2020,18 +2020,7 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l | |||
2020 | return false; | 2020 | return false; |
2021 | } | 2021 | } |
2022 | elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies); | 2022 | elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies); |
2023 | if (elapsed >= 3000) { | 2023 | if (elapsed >= 10000) { |
2024 | /* very likely the improbable case where current | ||
2025 | * rptr is equal to last recorded, a while ago, rptr | ||
2026 | * this is more likely a false positive update tracking | ||
2027 | * information which should force us to be recall at | ||
2028 | * latter point | ||
2029 | */ | ||
2030 | lockup->last_cp_rptr = cp->rptr; | ||
2031 | lockup->last_jiffies = jiffies; | ||
2032 | return false; | ||
2033 | } | ||
2034 | if (elapsed >= 1000) { | ||
2035 | dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); | 2024 | dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); |
2036 | return true; | 2025 | return true; |
2037 | } | 2026 | } |
@@ -3308,13 +3297,14 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3308 | unsigned long size; | 3297 | unsigned long size; |
3309 | unsigned prim_walk; | 3298 | unsigned prim_walk; |
3310 | unsigned nverts; | 3299 | unsigned nverts; |
3300 | unsigned num_cb = track->num_cb; | ||
3311 | 3301 | ||
3312 | for (i = 0; i < track->num_cb; i++) { | 3302 | if (!track->zb_cb_clear && !track->color_channel_mask && |
3303 | !track->blend_read_enable) | ||
3304 | num_cb = 0; | ||
3305 | |||
3306 | for (i = 0; i < num_cb; i++) { | ||
3313 | if (track->cb[i].robj == NULL) { | 3307 | if (track->cb[i].robj == NULL) { |
3314 | if (!(track->zb_cb_clear || track->color_channel_mask || | ||
3315 | track->blend_read_enable)) { | ||
3316 | continue; | ||
3317 | } | ||
3318 | DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); | 3308 | DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); |
3319 | return -EINVAL; | 3309 | return -EINVAL; |
3320 | } | 3310 | } |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index d0ebae9dde25..7a04959ba0ee 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -2119,10 +2119,7 @@ int r600_cp_start(struct radeon_device *rdev) | |||
2119 | } | 2119 | } |
2120 | radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); | 2120 | radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); |
2121 | radeon_ring_write(rdev, 0x1); | 2121 | radeon_ring_write(rdev, 0x1); |
2122 | if (rdev->family >= CHIP_CEDAR) { | 2122 | if (rdev->family >= CHIP_RV770) { |
2123 | radeon_ring_write(rdev, 0x0); | ||
2124 | radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1); | ||
2125 | } else if (rdev->family >= CHIP_RV770) { | ||
2126 | radeon_ring_write(rdev, 0x0); | 2123 | radeon_ring_write(rdev, 0x0); |
2127 | radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); | 2124 | radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); |
2128 | } else { | 2125 | } else { |
@@ -2489,11 +2486,6 @@ int r600_resume(struct radeon_device *rdev) | |||
2489 | */ | 2486 | */ |
2490 | /* post card */ | 2487 | /* post card */ |
2491 | atom_asic_init(rdev->mode_info.atom_context); | 2488 | atom_asic_init(rdev->mode_info.atom_context); |
2492 | /* Initialize clocks */ | ||
2493 | r = radeon_clocks_init(rdev); | ||
2494 | if (r) { | ||
2495 | return r; | ||
2496 | } | ||
2497 | 2489 | ||
2498 | r = r600_startup(rdev); | 2490 | r = r600_startup(rdev); |
2499 | if (r) { | 2491 | if (r) { |
@@ -2586,9 +2578,6 @@ int r600_init(struct radeon_device *rdev) | |||
2586 | radeon_surface_init(rdev); | 2578 | radeon_surface_init(rdev); |
2587 | /* Initialize clocks */ | 2579 | /* Initialize clocks */ |
2588 | radeon_get_clock_info(rdev->ddev); | 2580 | radeon_get_clock_info(rdev->ddev); |
2589 | r = radeon_clocks_init(rdev); | ||
2590 | if (r) | ||
2591 | return r; | ||
2592 | /* Fence driver */ | 2581 | /* Fence driver */ |
2593 | r = radeon_fence_driver_init(rdev); | 2582 | r = radeon_fence_driver_init(rdev); |
2594 | if (r) | 2583 | if (r) |
@@ -2663,7 +2652,6 @@ void r600_fini(struct radeon_device *rdev) | |||
2663 | radeon_agp_fini(rdev); | 2652 | radeon_agp_fini(rdev); |
2664 | radeon_gem_fini(rdev); | 2653 | radeon_gem_fini(rdev); |
2665 | radeon_fence_driver_fini(rdev); | 2654 | radeon_fence_driver_fini(rdev); |
2666 | radeon_clocks_fini(rdev); | ||
2667 | radeon_bo_fini(rdev); | 2655 | radeon_bo_fini(rdev); |
2668 | radeon_atombios_fini(rdev); | 2656 | radeon_atombios_fini(rdev); |
2669 | kfree(rdev->bios); | 2657 | kfree(rdev->bios); |
@@ -2741,7 +2729,7 @@ int r600_ib_test(struct radeon_device *rdev) | |||
2741 | if (i < rdev->usec_timeout) { | 2729 | if (i < rdev->usec_timeout) { |
2742 | DRM_INFO("ib test succeeded in %u usecs\n", i); | 2730 | DRM_INFO("ib test succeeded in %u usecs\n", i); |
2743 | } else { | 2731 | } else { |
2744 | DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", | 2732 | DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", |
2745 | scratch, tmp); | 2733 | scratch, tmp); |
2746 | r = -EINVAL; | 2734 | r = -EINVAL; |
2747 | } | 2735 | } |
@@ -3540,8 +3528,9 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) | |||
3540 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read | 3528 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read |
3541 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL | 3529 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL |
3542 | */ | 3530 | */ |
3543 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { | 3531 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && |
3544 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; | 3532 | rdev->vram_scratch.ptr) { |
3533 | void __iomem *ptr = (void *)rdev->vram_scratch.ptr; | ||
3545 | u32 tmp; | 3534 | u32 tmp; |
3546 | 3535 | ||
3547 | WREG32(HDP_DEBUG1, 0); | 3536 | WREG32(HDP_DEBUG1, 0); |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index d13622ae74e9..9ceb2a1ce799 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -1,3 +1,28 @@ | |||
1 | /* | ||
2 | * Copyright 2009 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2009 Red Hat Inc. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the next | ||
13 | * paragraph) shall be included in all copies or substantial portions of the | ||
14 | * Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | */ | ||
25 | |||
1 | #include "drmP.h" | 26 | #include "drmP.h" |
2 | #include "drm.h" | 27 | #include "drm.h" |
3 | #include "radeon_drm.h" | 28 | #include "radeon_drm.h" |
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h index fdc3b378cbb0..f437d36dd98c 100644 --- a/drivers/gpu/drm/radeon/r600_blit_shaders.h +++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h | |||
@@ -1,3 +1,27 @@ | |||
1 | /* | ||
2 | * Copyright 2009 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2009 Red Hat Inc. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the next | ||
13 | * paragraph) shall be included in all copies or substantial portions of the | ||
14 | * Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | */ | ||
1 | 25 | ||
2 | #ifndef R600_BLIT_SHADERS_H | 26 | #ifndef R600_BLIT_SHADERS_H |
3 | #define R600_BLIT_SHADERS_H | 27 | #define R600_BLIT_SHADERS_H |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index d8864949e387..250a3a918193 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -1170,9 +1170,8 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i | |||
1170 | /* using get ib will give us the offset into the mipmap bo */ | 1170 | /* using get ib will give us the offset into the mipmap bo */ |
1171 | word0 = radeon_get_ib_value(p, idx + 3) << 8; | 1171 | word0 = radeon_get_ib_value(p, idx + 3) << 8; |
1172 | if ((mipmap_size + word0) > radeon_bo_size(mipmap)) { | 1172 | if ((mipmap_size + word0) > radeon_bo_size(mipmap)) { |
1173 | dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", | 1173 | /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", |
1174 | w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture)); | 1174 | w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/ |
1175 | return -EINVAL; | ||
1176 | } | 1175 | } |
1177 | return 0; | 1176 | return 0; |
1178 | } | 1177 | } |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3dfcfa3ca425..a168d644bf9e 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -1013,6 +1013,11 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | |||
1013 | int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | 1013 | int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, |
1014 | struct drm_file *filp); | 1014 | struct drm_file *filp); |
1015 | 1015 | ||
1016 | /* VRAM scratch page for HDP bug */ | ||
1017 | struct r700_vram_scratch { | ||
1018 | struct radeon_bo *robj; | ||
1019 | volatile uint32_t *ptr; | ||
1020 | }; | ||
1016 | 1021 | ||
1017 | /* | 1022 | /* |
1018 | * Core structure, functions and helpers. | 1023 | * Core structure, functions and helpers. |
@@ -1079,6 +1084,7 @@ struct radeon_device { | |||
1079 | const struct firmware *pfp_fw; /* r6/700 PFP firmware */ | 1084 | const struct firmware *pfp_fw; /* r6/700 PFP firmware */ |
1080 | const struct firmware *rlc_fw; /* r6/700 RLC firmware */ | 1085 | const struct firmware *rlc_fw; /* r6/700 RLC firmware */ |
1081 | struct r600_blit r600_blit; | 1086 | struct r600_blit r600_blit; |
1087 | struct r700_vram_scratch vram_scratch; | ||
1082 | int msi_enabled; /* msi enabled */ | 1088 | int msi_enabled; /* msi enabled */ |
1083 | struct r600_ih ih; /* r6/700 interrupt ring */ | 1089 | struct r600_ih ih; /* r6/700 interrupt ring */ |
1084 | struct workqueue_struct *wq; | 1090 | struct workqueue_struct *wq; |
@@ -1333,8 +1339,6 @@ extern bool radeon_card_posted(struct radeon_device *rdev); | |||
1333 | extern void radeon_update_bandwidth_info(struct radeon_device *rdev); | 1339 | extern void radeon_update_bandwidth_info(struct radeon_device *rdev); |
1334 | extern void radeon_update_display_priority(struct radeon_device *rdev); | 1340 | extern void radeon_update_display_priority(struct radeon_device *rdev); |
1335 | extern bool radeon_boot_test_post_card(struct radeon_device *rdev); | 1341 | extern bool radeon_boot_test_post_card(struct radeon_device *rdev); |
1336 | extern int radeon_clocks_init(struct radeon_device *rdev); | ||
1337 | extern void radeon_clocks_fini(struct radeon_device *rdev); | ||
1338 | extern void radeon_scratch_init(struct radeon_device *rdev); | 1342 | extern void radeon_scratch_init(struct radeon_device *rdev); |
1339 | extern void radeon_surface_init(struct radeon_device *rdev); | 1343 | extern void radeon_surface_init(struct radeon_device *rdev); |
1340 | extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); | 1344 | extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index a21bf88e8c2d..25e1dd197791 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -858,21 +858,3 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
858 | return 0; | 858 | return 0; |
859 | } | 859 | } |
860 | 860 | ||
861 | /* | ||
862 | * Wrapper around modesetting bits. Move to radeon_clocks.c? | ||
863 | */ | ||
864 | int radeon_clocks_init(struct radeon_device *rdev) | ||
865 | { | ||
866 | int r; | ||
867 | |||
868 | r = radeon_static_clocks_init(rdev->ddev); | ||
869 | if (r) { | ||
870 | return r; | ||
871 | } | ||
872 | DRM_INFO("Clocks initialized !\n"); | ||
873 | return 0; | ||
874 | } | ||
875 | |||
876 | void radeon_clocks_fini(struct radeon_device *rdev) | ||
877 | { | ||
878 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 61141981880d..68932ba7b8a4 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -85,6 +85,19 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev | |||
85 | for (i = 0; i < num_indices; i++) { | 85 | for (i = 0; i < num_indices; i++) { |
86 | gpio = &i2c_info->asGPIO_Info[i]; | 86 | gpio = &i2c_info->asGPIO_Info[i]; |
87 | 87 | ||
88 | /* some evergreen boards have bad data for this entry */ | ||
89 | if (ASIC_IS_DCE4(rdev)) { | ||
90 | if ((i == 7) && | ||
91 | (gpio->usClkMaskRegisterIndex == 0x1936) && | ||
92 | (gpio->sucI2cId.ucAccess == 0)) { | ||
93 | gpio->sucI2cId.ucAccess = 0x97; | ||
94 | gpio->ucDataMaskShift = 8; | ||
95 | gpio->ucDataEnShift = 8; | ||
96 | gpio->ucDataY_Shift = 8; | ||
97 | gpio->ucDataA_Shift = 8; | ||
98 | } | ||
99 | } | ||
100 | |||
88 | if (gpio->sucI2cId.ucAccess == id) { | 101 | if (gpio->sucI2cId.ucAccess == id) { |
89 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; | 102 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; |
90 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; | 103 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; |
@@ -147,6 +160,20 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) | |||
147 | for (i = 0; i < num_indices; i++) { | 160 | for (i = 0; i < num_indices; i++) { |
148 | gpio = &i2c_info->asGPIO_Info[i]; | 161 | gpio = &i2c_info->asGPIO_Info[i]; |
149 | i2c.valid = false; | 162 | i2c.valid = false; |
163 | |||
164 | /* some evergreen boards have bad data for this entry */ | ||
165 | if (ASIC_IS_DCE4(rdev)) { | ||
166 | if ((i == 7) && | ||
167 | (gpio->usClkMaskRegisterIndex == 0x1936) && | ||
168 | (gpio->sucI2cId.ucAccess == 0)) { | ||
169 | gpio->sucI2cId.ucAccess = 0x97; | ||
170 | gpio->ucDataMaskShift = 8; | ||
171 | gpio->ucDataEnShift = 8; | ||
172 | gpio->ucDataY_Shift = 8; | ||
173 | gpio->ucDataA_Shift = 8; | ||
174 | } | ||
175 | } | ||
176 | |||
150 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; | 177 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; |
151 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; | 178 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; |
152 | i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; | 179 | i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; |
@@ -290,6 +317,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
290 | *connector_type = DRM_MODE_CONNECTOR_DVID; | 317 | *connector_type = DRM_MODE_CONNECTOR_DVID; |
291 | } | 318 | } |
292 | 319 | ||
320 | /* MSI K9A2GM V2/V3 board has no HDMI or DVI */ | ||
321 | if ((dev->pdev->device == 0x796e) && | ||
322 | (dev->pdev->subsystem_vendor == 0x1462) && | ||
323 | (dev->pdev->subsystem_device == 0x7302)) { | ||
324 | if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) || | ||
325 | (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) | ||
326 | return false; | ||
327 | } | ||
328 | |||
293 | /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ | 329 | /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ |
294 | if ((dev->pdev->device == 0x7941) && | 330 | if ((dev->pdev->device == 0x7941) && |
295 | (dev->pdev->subsystem_vendor == 0x147b) && | 331 | (dev->pdev->subsystem_vendor == 0x147b) && |
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index 14448a740ba6..5249af8931e6 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c | |||
@@ -327,6 +327,14 @@ void radeon_get_clock_info(struct drm_device *dev) | |||
327 | mpll->max_feedback_div = 0xff; | 327 | mpll->max_feedback_div = 0xff; |
328 | mpll->best_vco = 0; | 328 | mpll->best_vco = 0; |
329 | 329 | ||
330 | if (!rdev->clock.default_sclk) | ||
331 | rdev->clock.default_sclk = radeon_get_engine_clock(rdev); | ||
332 | if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock) | ||
333 | rdev->clock.default_mclk = radeon_get_memory_clock(rdev); | ||
334 | |||
335 | rdev->pm.current_sclk = rdev->clock.default_sclk; | ||
336 | rdev->pm.current_mclk = rdev->clock.default_mclk; | ||
337 | |||
330 | } | 338 | } |
331 | 339 | ||
332 | /* 10 khz */ | 340 | /* 10 khz */ |
@@ -897,53 +905,3 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) | |||
897 | } | 905 | } |
898 | } | 906 | } |
899 | 907 | ||
900 | static void radeon_apply_clock_quirks(struct radeon_device *rdev) | ||
901 | { | ||
902 | uint32_t tmp; | ||
903 | |||
904 | /* XXX make sure engine is idle */ | ||
905 | |||
906 | if (rdev->family < CHIP_RS600) { | ||
907 | tmp = RREG32_PLL(RADEON_SCLK_CNTL); | ||
908 | if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev)) | ||
909 | tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP; | ||
910 | if ((rdev->family == CHIP_RV250) | ||
911 | || (rdev->family == CHIP_RV280)) | ||
912 | tmp |= | ||
913 | RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2; | ||
914 | if ((rdev->family == CHIP_RV350) | ||
915 | || (rdev->family == CHIP_RV380)) | ||
916 | tmp |= R300_SCLK_FORCE_VAP; | ||
917 | if (rdev->family == CHIP_R420) | ||
918 | tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX; | ||
919 | WREG32_PLL(RADEON_SCLK_CNTL, tmp); | ||
920 | } else if (rdev->family < CHIP_R600) { | ||
921 | tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL); | ||
922 | tmp |= AVIVO_CP_FORCEON; | ||
923 | WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp); | ||
924 | |||
925 | tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL); | ||
926 | tmp |= AVIVO_E2_FORCEON; | ||
927 | WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp); | ||
928 | |||
929 | tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL); | ||
930 | tmp |= AVIVO_IDCT_FORCEON; | ||
931 | WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp); | ||
932 | } | ||
933 | } | ||
934 | |||
935 | int radeon_static_clocks_init(struct drm_device *dev) | ||
936 | { | ||
937 | struct radeon_device *rdev = dev->dev_private; | ||
938 | |||
939 | /* XXX make sure engine is idle */ | ||
940 | |||
941 | if (radeon_dynclks != -1) { | ||
942 | if (radeon_dynclks) { | ||
943 | if (rdev->asic->set_clock_gating) | ||
944 | radeon_set_clock_gating(rdev, 1); | ||
945 | } | ||
946 | } | ||
947 | radeon_apply_clock_quirks(rdev); | ||
948 | return 0; | ||
949 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index bd74e428bd14..a04b7a6ad95f 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -1485,6 +1485,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1485 | /* PowerMac8,1 ? */ | 1485 | /* PowerMac8,1 ? */ |
1486 | /* imac g5 isight */ | 1486 | /* imac g5 isight */ |
1487 | rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; | 1487 | rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; |
1488 | } else if ((rdev->pdev->device == 0x4a48) && | ||
1489 | (rdev->pdev->subsystem_vendor == 0x1002) && | ||
1490 | (rdev->pdev->subsystem_device == 0x4a48)) { | ||
1491 | /* Mac X800 */ | ||
1492 | rdev->mode_info.connector_table = CT_MAC_X800; | ||
1488 | } else | 1493 | } else |
1489 | #endif /* CONFIG_PPC_PMAC */ | 1494 | #endif /* CONFIG_PPC_PMAC */ |
1490 | #ifdef CONFIG_PPC64 | 1495 | #ifdef CONFIG_PPC64 |
@@ -1961,6 +1966,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1961 | CONNECTOR_OBJECT_ID_VGA, | 1966 | CONNECTOR_OBJECT_ID_VGA, |
1962 | &hpd); | 1967 | &hpd); |
1963 | break; | 1968 | break; |
1969 | case CT_MAC_X800: | ||
1970 | DRM_INFO("Connector Table: %d (mac x800)\n", | ||
1971 | rdev->mode_info.connector_table); | ||
1972 | /* DVI - primary dac, internal tmds */ | ||
1973 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); | ||
1974 | hpd.hpd = RADEON_HPD_1; /* ??? */ | ||
1975 | radeon_add_legacy_encoder(dev, | ||
1976 | radeon_get_encoder_enum(dev, | ||
1977 | ATOM_DEVICE_DFP1_SUPPORT, | ||
1978 | 0), | ||
1979 | ATOM_DEVICE_DFP1_SUPPORT); | ||
1980 | radeon_add_legacy_encoder(dev, | ||
1981 | radeon_get_encoder_enum(dev, | ||
1982 | ATOM_DEVICE_CRT1_SUPPORT, | ||
1983 | 1), | ||
1984 | ATOM_DEVICE_CRT1_SUPPORT); | ||
1985 | radeon_add_legacy_connector(dev, 0, | ||
1986 | ATOM_DEVICE_DFP1_SUPPORT | | ||
1987 | ATOM_DEVICE_CRT1_SUPPORT, | ||
1988 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, | ||
1989 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, | ||
1990 | &hpd); | ||
1991 | /* DVI - tv dac, dvo */ | ||
1992 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); | ||
1993 | hpd.hpd = RADEON_HPD_2; /* ??? */ | ||
1994 | radeon_add_legacy_encoder(dev, | ||
1995 | radeon_get_encoder_enum(dev, | ||
1996 | ATOM_DEVICE_DFP2_SUPPORT, | ||
1997 | 0), | ||
1998 | ATOM_DEVICE_DFP2_SUPPORT); | ||
1999 | radeon_add_legacy_encoder(dev, | ||
2000 | radeon_get_encoder_enum(dev, | ||
2001 | ATOM_DEVICE_CRT2_SUPPORT, | ||
2002 | 2), | ||
2003 | ATOM_DEVICE_CRT2_SUPPORT); | ||
2004 | radeon_add_legacy_connector(dev, 1, | ||
2005 | ATOM_DEVICE_DFP2_SUPPORT | | ||
2006 | ATOM_DEVICE_CRT2_SUPPORT, | ||
2007 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, | ||
2008 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, | ||
2009 | &hpd); | ||
2010 | break; | ||
1964 | default: | 2011 | default: |
1965 | DRM_INFO("Connector table: %d (invalid)\n", | 2012 | DRM_INFO("Connector table: %d (invalid)\n", |
1966 | rdev->mode_info.connector_table); | 2013 | rdev->mode_info.connector_table); |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 1a5ee392e9c7..ecc1a8fafbfd 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -481,7 +481,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector, | |||
481 | return MODE_OK; | 481 | return MODE_OK; |
482 | } | 482 | } |
483 | 483 | ||
484 | static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector) | 484 | static enum drm_connector_status |
485 | radeon_lvds_detect(struct drm_connector *connector, bool force) | ||
485 | { | 486 | { |
486 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 487 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
487 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); | 488 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); |
@@ -594,7 +595,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector, | |||
594 | return MODE_OK; | 595 | return MODE_OK; |
595 | } | 596 | } |
596 | 597 | ||
597 | static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector) | 598 | static enum drm_connector_status |
599 | radeon_vga_detect(struct drm_connector *connector, bool force) | ||
598 | { | 600 | { |
599 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 601 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
600 | struct drm_encoder *encoder; | 602 | struct drm_encoder *encoder; |
@@ -691,7 +693,8 @@ static int radeon_tv_mode_valid(struct drm_connector *connector, | |||
691 | return MODE_OK; | 693 | return MODE_OK; |
692 | } | 694 | } |
693 | 695 | ||
694 | static enum drm_connector_status radeon_tv_detect(struct drm_connector *connector) | 696 | static enum drm_connector_status |
697 | radeon_tv_detect(struct drm_connector *connector, bool force) | ||
695 | { | 698 | { |
696 | struct drm_encoder *encoder; | 699 | struct drm_encoder *encoder; |
697 | struct drm_encoder_helper_funcs *encoder_funcs; | 700 | struct drm_encoder_helper_funcs *encoder_funcs; |
@@ -748,7 +751,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector) | |||
748 | * we have to check if this analog encoder is shared with anyone else (TV) | 751 | * we have to check if this analog encoder is shared with anyone else (TV) |
749 | * if its shared we have to set the other connector to disconnected. | 752 | * if its shared we have to set the other connector to disconnected. |
750 | */ | 753 | */ |
751 | static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector) | 754 | static enum drm_connector_status |
755 | radeon_dvi_detect(struct drm_connector *connector, bool force) | ||
752 | { | 756 | { |
753 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 757 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
754 | struct drm_encoder *encoder = NULL; | 758 | struct drm_encoder *encoder = NULL; |
@@ -972,7 +976,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector) | |||
972 | return ret; | 976 | return ret; |
973 | } | 977 | } |
974 | 978 | ||
975 | static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector) | 979 | static enum drm_connector_status |
980 | radeon_dp_detect(struct drm_connector *connector, bool force) | ||
976 | { | 981 | { |
977 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 982 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
978 | enum drm_connector_status ret = connector_status_disconnected; | 983 | enum drm_connector_status ret = connector_status_disconnected; |
@@ -1051,10 +1056,16 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1051 | uint32_t subpixel_order = SubPixelNone; | 1056 | uint32_t subpixel_order = SubPixelNone; |
1052 | bool shared_ddc = false; | 1057 | bool shared_ddc = false; |
1053 | 1058 | ||
1054 | /* fixme - tv/cv/din */ | ||
1055 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) | 1059 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) |
1056 | return; | 1060 | return; |
1057 | 1061 | ||
1062 | /* if the user selected tv=0 don't try and add the connector */ | ||
1063 | if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) || | ||
1064 | (connector_type == DRM_MODE_CONNECTOR_Composite) || | ||
1065 | (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) && | ||
1066 | (radeon_tv == 0)) | ||
1067 | return; | ||
1068 | |||
1058 | /* see if we already added it */ | 1069 | /* see if we already added it */ |
1059 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1070 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
1060 | radeon_connector = to_radeon_connector(connector); | 1071 | radeon_connector = to_radeon_connector(connector); |
@@ -1209,19 +1220,17 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1209 | case DRM_MODE_CONNECTOR_SVIDEO: | 1220 | case DRM_MODE_CONNECTOR_SVIDEO: |
1210 | case DRM_MODE_CONNECTOR_Composite: | 1221 | case DRM_MODE_CONNECTOR_Composite: |
1211 | case DRM_MODE_CONNECTOR_9PinDIN: | 1222 | case DRM_MODE_CONNECTOR_9PinDIN: |
1212 | if (radeon_tv == 1) { | 1223 | drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); |
1213 | drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); | 1224 | drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); |
1214 | drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); | 1225 | radeon_connector->dac_load_detect = true; |
1215 | radeon_connector->dac_load_detect = true; | 1226 | drm_connector_attach_property(&radeon_connector->base, |
1216 | drm_connector_attach_property(&radeon_connector->base, | 1227 | rdev->mode_info.load_detect_property, |
1217 | rdev->mode_info.load_detect_property, | 1228 | 1); |
1218 | 1); | 1229 | drm_connector_attach_property(&radeon_connector->base, |
1219 | drm_connector_attach_property(&radeon_connector->base, | 1230 | rdev->mode_info.tv_std_property, |
1220 | rdev->mode_info.tv_std_property, | 1231 | radeon_atombios_get_tv_info(rdev)); |
1221 | radeon_atombios_get_tv_info(rdev)); | 1232 | /* no HPD on analog connectors */ |
1222 | /* no HPD on analog connectors */ | 1233 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
1223 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | ||
1224 | } | ||
1225 | break; | 1234 | break; |
1226 | case DRM_MODE_CONNECTOR_LVDS: | 1235 | case DRM_MODE_CONNECTOR_LVDS: |
1227 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | 1236 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); |
@@ -1272,10 +1281,16 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1272 | struct radeon_connector *radeon_connector; | 1281 | struct radeon_connector *radeon_connector; |
1273 | uint32_t subpixel_order = SubPixelNone; | 1282 | uint32_t subpixel_order = SubPixelNone; |
1274 | 1283 | ||
1275 | /* fixme - tv/cv/din */ | ||
1276 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) | 1284 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) |
1277 | return; | 1285 | return; |
1278 | 1286 | ||
1287 | /* if the user selected tv=0 don't try and add the connector */ | ||
1288 | if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) || | ||
1289 | (connector_type == DRM_MODE_CONNECTOR_Composite) || | ||
1290 | (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) && | ||
1291 | (radeon_tv == 0)) | ||
1292 | return; | ||
1293 | |||
1279 | /* see if we already added it */ | 1294 | /* see if we already added it */ |
1280 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1295 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
1281 | radeon_connector = to_radeon_connector(connector); | 1296 | radeon_connector = to_radeon_connector(connector); |
@@ -1347,26 +1362,24 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1347 | case DRM_MODE_CONNECTOR_SVIDEO: | 1362 | case DRM_MODE_CONNECTOR_SVIDEO: |
1348 | case DRM_MODE_CONNECTOR_Composite: | 1363 | case DRM_MODE_CONNECTOR_Composite: |
1349 | case DRM_MODE_CONNECTOR_9PinDIN: | 1364 | case DRM_MODE_CONNECTOR_9PinDIN: |
1350 | if (radeon_tv == 1) { | 1365 | drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); |
1351 | drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); | 1366 | drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); |
1352 | drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); | 1367 | radeon_connector->dac_load_detect = true; |
1353 | radeon_connector->dac_load_detect = true; | 1368 | /* RS400,RC410,RS480 chipset seems to report a lot |
1354 | /* RS400,RC410,RS480 chipset seems to report a lot | 1369 | * of false positive on load detect, we haven't yet |
1355 | * of false positive on load detect, we haven't yet | 1370 | * found a way to make load detect reliable on those |
1356 | * found a way to make load detect reliable on those | 1371 | * chipset, thus just disable it for TV. |
1357 | * chipset, thus just disable it for TV. | 1372 | */ |
1358 | */ | 1373 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) |
1359 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) | 1374 | radeon_connector->dac_load_detect = false; |
1360 | radeon_connector->dac_load_detect = false; | 1375 | drm_connector_attach_property(&radeon_connector->base, |
1361 | drm_connector_attach_property(&radeon_connector->base, | 1376 | rdev->mode_info.load_detect_property, |
1362 | rdev->mode_info.load_detect_property, | 1377 | radeon_connector->dac_load_detect); |
1363 | radeon_connector->dac_load_detect); | 1378 | drm_connector_attach_property(&radeon_connector->base, |
1364 | drm_connector_attach_property(&radeon_connector->base, | 1379 | rdev->mode_info.tv_std_property, |
1365 | rdev->mode_info.tv_std_property, | 1380 | radeon_combios_get_tv_info(rdev)); |
1366 | radeon_combios_get_tv_info(rdev)); | 1381 | /* no HPD on analog connectors */ |
1367 | /* no HPD on analog connectors */ | 1382 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
1368 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | ||
1369 | } | ||
1370 | break; | 1383 | break; |
1371 | case DRM_MODE_CONNECTOR_LVDS: | 1384 | case DRM_MODE_CONNECTOR_LVDS: |
1372 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); | 1385 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 69b3c2291e92..256d204a6d24 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -293,30 +293,20 @@ bool radeon_card_posted(struct radeon_device *rdev) | |||
293 | void radeon_update_bandwidth_info(struct radeon_device *rdev) | 293 | void radeon_update_bandwidth_info(struct radeon_device *rdev) |
294 | { | 294 | { |
295 | fixed20_12 a; | 295 | fixed20_12 a; |
296 | u32 sclk, mclk; | 296 | u32 sclk = rdev->pm.current_sclk; |
297 | u32 mclk = rdev->pm.current_mclk; | ||
297 | 298 | ||
298 | if (rdev->flags & RADEON_IS_IGP) { | 299 | /* sclk/mclk in Mhz */ |
299 | sclk = radeon_get_engine_clock(rdev); | 300 | a.full = dfixed_const(100); |
300 | mclk = rdev->clock.default_mclk; | 301 | rdev->pm.sclk.full = dfixed_const(sclk); |
301 | 302 | rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); | |
302 | a.full = dfixed_const(100); | 303 | rdev->pm.mclk.full = dfixed_const(mclk); |
303 | rdev->pm.sclk.full = dfixed_const(sclk); | 304 | rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); |
304 | rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); | ||
305 | rdev->pm.mclk.full = dfixed_const(mclk); | ||
306 | rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); | ||
307 | 305 | ||
306 | if (rdev->flags & RADEON_IS_IGP) { | ||
308 | a.full = dfixed_const(16); | 307 | a.full = dfixed_const(16); |
309 | /* core_bandwidth = sclk(Mhz) * 16 */ | 308 | /* core_bandwidth = sclk(Mhz) * 16 */ |
310 | rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); | 309 | rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); |
311 | } else { | ||
312 | sclk = radeon_get_engine_clock(rdev); | ||
313 | mclk = radeon_get_memory_clock(rdev); | ||
314 | |||
315 | a.full = dfixed_const(100); | ||
316 | rdev->pm.sclk.full = dfixed_const(sclk); | ||
317 | rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); | ||
318 | rdev->pm.mclk.full = dfixed_const(mclk); | ||
319 | rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); | ||
320 | } | 310 | } |
321 | } | 311 | } |
322 | 312 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 6dd434ad2429..b92d2f2fcbed 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -349,6 +349,8 @@ static void radeon_print_display_setup(struct drm_device *dev) | |||
349 | DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); | 349 | DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); |
350 | if (devices & ATOM_DEVICE_DFP5_SUPPORT) | 350 | if (devices & ATOM_DEVICE_DFP5_SUPPORT) |
351 | DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); | 351 | DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); |
352 | if (devices & ATOM_DEVICE_DFP6_SUPPORT) | ||
353 | DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]); | ||
352 | if (devices & ATOM_DEVICE_TV1_SUPPORT) | 354 | if (devices & ATOM_DEVICE_TV1_SUPPORT) |
353 | DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); | 355 | DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); |
354 | if (devices & ATOM_DEVICE_CV_SUPPORT) | 356 | if (devices & ATOM_DEVICE_CV_SUPPORT) |
@@ -841,8 +843,9 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) | |||
841 | { | 843 | { |
842 | struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); | 844 | struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); |
843 | 845 | ||
844 | if (radeon_fb->obj) | 846 | if (radeon_fb->obj) { |
845 | drm_gem_object_unreference_unlocked(radeon_fb->obj); | 847 | drm_gem_object_unreference_unlocked(radeon_fb->obj); |
848 | } | ||
846 | drm_framebuffer_cleanup(fb); | 849 | drm_framebuffer_cleanup(fb); |
847 | kfree(radeon_fb); | 850 | kfree(radeon_fb); |
848 | } | 851 | } |
@@ -1140,17 +1143,18 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
1140 | radeon_crtc->rmx_type = radeon_encoder->rmx_type; | 1143 | radeon_crtc->rmx_type = radeon_encoder->rmx_type; |
1141 | else | 1144 | else |
1142 | radeon_crtc->rmx_type = RMX_OFF; | 1145 | radeon_crtc->rmx_type = RMX_OFF; |
1143 | src_v = crtc->mode.vdisplay; | ||
1144 | dst_v = radeon_crtc->native_mode.vdisplay; | ||
1145 | src_h = crtc->mode.hdisplay; | ||
1146 | dst_h = radeon_crtc->native_mode.vdisplay; | ||
1147 | /* copy native mode */ | 1146 | /* copy native mode */ |
1148 | memcpy(&radeon_crtc->native_mode, | 1147 | memcpy(&radeon_crtc->native_mode, |
1149 | &radeon_encoder->native_mode, | 1148 | &radeon_encoder->native_mode, |
1150 | sizeof(struct drm_display_mode)); | 1149 | sizeof(struct drm_display_mode)); |
1150 | src_v = crtc->mode.vdisplay; | ||
1151 | dst_v = radeon_crtc->native_mode.vdisplay; | ||
1152 | src_h = crtc->mode.hdisplay; | ||
1153 | dst_h = radeon_crtc->native_mode.hdisplay; | ||
1151 | 1154 | ||
1152 | /* fix up for overscan on hdmi */ | 1155 | /* fix up for overscan on hdmi */ |
1153 | if (ASIC_IS_AVIVO(rdev) && | 1156 | if (ASIC_IS_AVIVO(rdev) && |
1157 | (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && | ||
1154 | ((radeon_encoder->underscan_type == UNDERSCAN_ON) || | 1158 | ((radeon_encoder->underscan_type == UNDERSCAN_ON) || |
1155 | ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && | 1159 | ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && |
1156 | drm_detect_hdmi_monitor(radeon_connector->edid) && | 1160 | drm_detect_hdmi_monitor(radeon_connector->edid) && |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index c74a8b20d941..9cdf6a35bc2c 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -94,8 +94,10 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) | |||
94 | ret = radeon_bo_reserve(rbo, false); | 94 | ret = radeon_bo_reserve(rbo, false); |
95 | if (likely(ret == 0)) { | 95 | if (likely(ret == 0)) { |
96 | radeon_bo_kunmap(rbo); | 96 | radeon_bo_kunmap(rbo); |
97 | radeon_bo_unpin(rbo); | ||
97 | radeon_bo_unreserve(rbo); | 98 | radeon_bo_unreserve(rbo); |
98 | } | 99 | } |
100 | drm_gem_object_handle_unreference(gobj); | ||
99 | drm_gem_object_unreference_unlocked(gobj); | 101 | drm_gem_object_unreference_unlocked(gobj); |
100 | } | 102 | } |
101 | 103 | ||
@@ -325,8 +327,6 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb | |||
325 | { | 327 | { |
326 | struct fb_info *info; | 328 | struct fb_info *info; |
327 | struct radeon_framebuffer *rfb = &rfbdev->rfb; | 329 | struct radeon_framebuffer *rfb = &rfbdev->rfb; |
328 | struct radeon_bo *rbo; | ||
329 | int r; | ||
330 | 330 | ||
331 | if (rfbdev->helper.fbdev) { | 331 | if (rfbdev->helper.fbdev) { |
332 | info = rfbdev->helper.fbdev; | 332 | info = rfbdev->helper.fbdev; |
@@ -338,14 +338,8 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb | |||
338 | } | 338 | } |
339 | 339 | ||
340 | if (rfb->obj) { | 340 | if (rfb->obj) { |
341 | rbo = rfb->obj->driver_private; | 341 | radeonfb_destroy_pinned_object(rfb->obj); |
342 | r = radeon_bo_reserve(rbo, false); | 342 | rfb->obj = NULL; |
343 | if (likely(r == 0)) { | ||
344 | radeon_bo_kunmap(rbo); | ||
345 | radeon_bo_unpin(rbo); | ||
346 | radeon_bo_unreserve(rbo); | ||
347 | } | ||
348 | drm_gem_object_unreference_unlocked(rfb->obj); | ||
349 | } | 343 | } |
350 | drm_fb_helper_fini(&rfbdev->helper); | 344 | drm_fb_helper_fini(&rfbdev->helper); |
351 | drm_framebuffer_cleanup(&rfb->base); | 345 | drm_framebuffer_cleanup(&rfb->base); |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index c578f265b24c..d1e595d91723 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -201,11 +201,11 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, | |||
201 | return r; | 201 | return r; |
202 | } | 202 | } |
203 | r = drm_gem_handle_create(filp, gobj, &handle); | 203 | r = drm_gem_handle_create(filp, gobj, &handle); |
204 | /* drop reference from allocate - handle holds it now */ | ||
205 | drm_gem_object_unreference_unlocked(gobj); | ||
204 | if (r) { | 206 | if (r) { |
205 | drm_gem_object_unreference_unlocked(gobj); | ||
206 | return r; | 207 | return r; |
207 | } | 208 | } |
208 | drm_gem_object_handle_unreference_unlocked(gobj); | ||
209 | args->handle = handle; | 209 | args->handle = handle; |
210 | return 0; | 210 | return 0; |
211 | } | 211 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 0416804d8f30..6a13ee38a5b9 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
@@ -213,7 +213,7 @@ static void post_xfer(struct i2c_adapter *i2c_adap) | |||
213 | 213 | ||
214 | static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) | 214 | static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) |
215 | { | 215 | { |
216 | u32 sclk = radeon_get_engine_clock(rdev); | 216 | u32 sclk = rdev->pm.current_sclk; |
217 | u32 prescale = 0; | 217 | u32 prescale = 0; |
218 | u32 nm; | 218 | u32 nm; |
219 | u8 n, m, loop; | 219 | u8 n, m, loop; |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 5eee3c41d124..8fbbe1c6ebbd 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -203,6 +203,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
203 | */ | 203 | */ |
204 | int radeon_driver_firstopen_kms(struct drm_device *dev) | 204 | int radeon_driver_firstopen_kms(struct drm_device *dev) |
205 | { | 205 | { |
206 | struct radeon_device *rdev = dev->dev_private; | ||
207 | |||
208 | if (rdev->powered_down) | ||
209 | return -EINVAL; | ||
206 | return 0; | 210 | return 0; |
207 | } | 211 | } |
208 | 212 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 8f93e2b4b0c8..17a6602b5885 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -204,7 +204,7 @@ struct radeon_i2c_chan { | |||
204 | 204 | ||
205 | /* mostly for macs, but really any system without connector tables */ | 205 | /* mostly for macs, but really any system without connector tables */ |
206 | enum radeon_connector_table { | 206 | enum radeon_connector_table { |
207 | CT_NONE, | 207 | CT_NONE = 0, |
208 | CT_GENERIC, | 208 | CT_GENERIC, |
209 | CT_IBOOK, | 209 | CT_IBOOK, |
210 | CT_POWERBOOK_EXTERNAL, | 210 | CT_POWERBOOK_EXTERNAL, |
@@ -215,6 +215,7 @@ enum radeon_connector_table { | |||
215 | CT_IMAC_G5_ISIGHT, | 215 | CT_IMAC_G5_ISIGHT, |
216 | CT_EMAC, | 216 | CT_EMAC, |
217 | CT_RN50_POWER, | 217 | CT_RN50_POWER, |
218 | CT_MAC_X800, | ||
218 | }; | 219 | }; |
219 | 220 | ||
220 | enum radeon_dvo_chip { | 221 | enum radeon_dvo_chip { |
@@ -600,7 +601,6 @@ extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct d | |||
600 | void radeon_enc_destroy(struct drm_encoder *encoder); | 601 | void radeon_enc_destroy(struct drm_encoder *encoder); |
601 | void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); | 602 | void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); |
602 | void radeon_combios_asic_init(struct drm_device *dev); | 603 | void radeon_combios_asic_init(struct drm_device *dev); |
603 | extern int radeon_static_clocks_init(struct drm_device *dev); | ||
604 | bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | 604 | bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, |
605 | struct drm_display_mode *mode, | 605 | struct drm_display_mode *mode, |
606 | struct drm_display_mode *adjusted_mode); | 606 | struct drm_display_mode *adjusted_mode); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index f1c796810117..bfa59db374d2 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -905,6 +905,54 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
905 | 905 | ||
906 | } | 906 | } |
907 | 907 | ||
908 | static int rv770_vram_scratch_init(struct radeon_device *rdev) | ||
909 | { | ||
910 | int r; | ||
911 | u64 gpu_addr; | ||
912 | |||
913 | if (rdev->vram_scratch.robj == NULL) { | ||
914 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, | ||
915 | true, RADEON_GEM_DOMAIN_VRAM, | ||
916 | &rdev->vram_scratch.robj); | ||
917 | if (r) { | ||
918 | return r; | ||
919 | } | ||
920 | } | ||
921 | |||
922 | r = radeon_bo_reserve(rdev->vram_scratch.robj, false); | ||
923 | if (unlikely(r != 0)) | ||
924 | return r; | ||
925 | r = radeon_bo_pin(rdev->vram_scratch.robj, | ||
926 | RADEON_GEM_DOMAIN_VRAM, &gpu_addr); | ||
927 | if (r) { | ||
928 | radeon_bo_unreserve(rdev->vram_scratch.robj); | ||
929 | return r; | ||
930 | } | ||
931 | r = radeon_bo_kmap(rdev->vram_scratch.robj, | ||
932 | (void **)&rdev->vram_scratch.ptr); | ||
933 | if (r) | ||
934 | radeon_bo_unpin(rdev->vram_scratch.robj); | ||
935 | radeon_bo_unreserve(rdev->vram_scratch.robj); | ||
936 | |||
937 | return r; | ||
938 | } | ||
939 | |||
940 | static void rv770_vram_scratch_fini(struct radeon_device *rdev) | ||
941 | { | ||
942 | int r; | ||
943 | |||
944 | if (rdev->vram_scratch.robj == NULL) { | ||
945 | return; | ||
946 | } | ||
947 | r = radeon_bo_reserve(rdev->vram_scratch.robj, false); | ||
948 | if (likely(r == 0)) { | ||
949 | radeon_bo_kunmap(rdev->vram_scratch.robj); | ||
950 | radeon_bo_unpin(rdev->vram_scratch.robj); | ||
951 | radeon_bo_unreserve(rdev->vram_scratch.robj); | ||
952 | } | ||
953 | radeon_bo_unref(&rdev->vram_scratch.robj); | ||
954 | } | ||
955 | |||
908 | int rv770_mc_init(struct radeon_device *rdev) | 956 | int rv770_mc_init(struct radeon_device *rdev) |
909 | { | 957 | { |
910 | u32 tmp; | 958 | u32 tmp; |
@@ -970,6 +1018,9 @@ static int rv770_startup(struct radeon_device *rdev) | |||
970 | if (r) | 1018 | if (r) |
971 | return r; | 1019 | return r; |
972 | } | 1020 | } |
1021 | r = rv770_vram_scratch_init(rdev); | ||
1022 | if (r) | ||
1023 | return r; | ||
973 | rv770_gpu_init(rdev); | 1024 | rv770_gpu_init(rdev); |
974 | r = r600_blit_init(rdev); | 1025 | r = r600_blit_init(rdev); |
975 | if (r) { | 1026 | if (r) { |
@@ -1023,11 +1074,6 @@ int rv770_resume(struct radeon_device *rdev) | |||
1023 | */ | 1074 | */ |
1024 | /* post card */ | 1075 | /* post card */ |
1025 | atom_asic_init(rdev->mode_info.atom_context); | 1076 | atom_asic_init(rdev->mode_info.atom_context); |
1026 | /* Initialize clocks */ | ||
1027 | r = radeon_clocks_init(rdev); | ||
1028 | if (r) { | ||
1029 | return r; | ||
1030 | } | ||
1031 | 1077 | ||
1032 | r = rv770_startup(rdev); | 1078 | r = rv770_startup(rdev); |
1033 | if (r) { | 1079 | if (r) { |
@@ -1118,9 +1164,6 @@ int rv770_init(struct radeon_device *rdev) | |||
1118 | radeon_surface_init(rdev); | 1164 | radeon_surface_init(rdev); |
1119 | /* Initialize clocks */ | 1165 | /* Initialize clocks */ |
1120 | radeon_get_clock_info(rdev->ddev); | 1166 | radeon_get_clock_info(rdev->ddev); |
1121 | r = radeon_clocks_init(rdev); | ||
1122 | if (r) | ||
1123 | return r; | ||
1124 | /* Fence driver */ | 1167 | /* Fence driver */ |
1125 | r = radeon_fence_driver_init(rdev); | 1168 | r = radeon_fence_driver_init(rdev); |
1126 | if (r) | 1169 | if (r) |
@@ -1195,9 +1238,9 @@ void rv770_fini(struct radeon_device *rdev) | |||
1195 | r600_irq_fini(rdev); | 1238 | r600_irq_fini(rdev); |
1196 | radeon_irq_kms_fini(rdev); | 1239 | radeon_irq_kms_fini(rdev); |
1197 | rv770_pcie_gart_fini(rdev); | 1240 | rv770_pcie_gart_fini(rdev); |
1241 | rv770_vram_scratch_fini(rdev); | ||
1198 | radeon_gem_fini(rdev); | 1242 | radeon_gem_fini(rdev); |
1199 | radeon_fence_driver_fini(rdev); | 1243 | radeon_fence_driver_fini(rdev); |
1200 | radeon_clocks_fini(rdev); | ||
1201 | radeon_agp_fini(rdev); | 1244 | radeon_agp_fini(rdev); |
1202 | radeon_bo_fini(rdev); | 1245 | radeon_bo_fini(rdev); |
1203 | radeon_atombios_fini(rdev); | 1246 | radeon_atombios_fini(rdev); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 7cffb3e04232..3451a82adba7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -351,6 +351,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |||
351 | INIT_LIST_HEAD(&fbo->lru); | 351 | INIT_LIST_HEAD(&fbo->lru); |
352 | INIT_LIST_HEAD(&fbo->swap); | 352 | INIT_LIST_HEAD(&fbo->swap); |
353 | fbo->vm_node = NULL; | 353 | fbo->vm_node = NULL; |
354 | atomic_set(&fbo->cpu_writers, 0); | ||
354 | 355 | ||
355 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); | 356 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); |
356 | kref_init(&fbo->list_kref); | 357 | kref_init(&fbo->list_kref); |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index ca904799f018..b1e02fffd3cc 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -69,7 +69,7 @@ struct ttm_page_pool { | |||
69 | spinlock_t lock; | 69 | spinlock_t lock; |
70 | bool fill_lock; | 70 | bool fill_lock; |
71 | struct list_head list; | 71 | struct list_head list; |
72 | int gfp_flags; | 72 | gfp_t gfp_flags; |
73 | unsigned npages; | 73 | unsigned npages; |
74 | char *name; | 74 | char *name; |
75 | unsigned long nfrees; | 75 | unsigned long nfrees; |
@@ -475,7 +475,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages, | |||
475 | * This function is reentrant if caller updates count depending on number of | 475 | * This function is reentrant if caller updates count depending on number of |
476 | * pages returned in pages array. | 476 | * pages returned in pages array. |
477 | */ | 477 | */ |
478 | static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, | 478 | static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, |
479 | int ttm_flags, enum ttm_caching_state cstate, unsigned count) | 479 | int ttm_flags, enum ttm_caching_state cstate, unsigned count) |
480 | { | 480 | { |
481 | struct page **caching_array; | 481 | struct page **caching_array; |
@@ -666,7 +666,7 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
666 | { | 666 | { |
667 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); | 667 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
668 | struct page *p = NULL; | 668 | struct page *p = NULL; |
669 | int gfp_flags = GFP_USER; | 669 | gfp_t gfp_flags = GFP_USER; |
670 | int r; | 670 | int r; |
671 | 671 | ||
672 | /* set zero flag for page allocation if required */ | 672 | /* set zero flag for page allocation if required */ |
@@ -818,7 +818,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) | |||
818 | return 0; | 818 | return 0; |
819 | } | 819 | } |
820 | 820 | ||
821 | void ttm_page_alloc_fini() | 821 | void ttm_page_alloc_fini(void) |
822 | { | 822 | { |
823 | int i; | 823 | int i; |
824 | 824 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 72ec2e2b6e97..a96ed6d9d010 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -148,13 +148,16 @@ static struct pci_device_id vmw_pci_id_list[] = { | |||
148 | {0, 0, 0} | 148 | {0, 0, 0} |
149 | }; | 149 | }; |
150 | 150 | ||
151 | static char *vmw_devname = "vmwgfx"; | 151 | static int enable_fbdev; |
152 | 152 | ||
153 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); | 153 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
154 | static void vmw_master_init(struct vmw_master *); | 154 | static void vmw_master_init(struct vmw_master *); |
155 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | 155 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
156 | void *ptr); | 156 | void *ptr); |
157 | 157 | ||
158 | MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); | ||
159 | module_param_named(enable_fbdev, enable_fbdev, int, 0600); | ||
160 | |||
158 | static void vmw_print_capabilities(uint32_t capabilities) | 161 | static void vmw_print_capabilities(uint32_t capabilities) |
159 | { | 162 | { |
160 | DRM_INFO("Capabilities:\n"); | 163 | DRM_INFO("Capabilities:\n"); |
@@ -192,8 +195,6 @@ static int vmw_request_device(struct vmw_private *dev_priv) | |||
192 | { | 195 | { |
193 | int ret; | 196 | int ret; |
194 | 197 | ||
195 | vmw_kms_save_vga(dev_priv); | ||
196 | |||
197 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); | 198 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); |
198 | if (unlikely(ret != 0)) { | 199 | if (unlikely(ret != 0)) { |
199 | DRM_ERROR("Unable to initialize FIFO.\n"); | 200 | DRM_ERROR("Unable to initialize FIFO.\n"); |
@@ -206,9 +207,35 @@ static int vmw_request_device(struct vmw_private *dev_priv) | |||
206 | static void vmw_release_device(struct vmw_private *dev_priv) | 207 | static void vmw_release_device(struct vmw_private *dev_priv) |
207 | { | 208 | { |
208 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | 209 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
209 | vmw_kms_restore_vga(dev_priv); | ||
210 | } | 210 | } |
211 | 211 | ||
212 | int vmw_3d_resource_inc(struct vmw_private *dev_priv) | ||
213 | { | ||
214 | int ret = 0; | ||
215 | |||
216 | mutex_lock(&dev_priv->release_mutex); | ||
217 | if (unlikely(dev_priv->num_3d_resources++ == 0)) { | ||
218 | ret = vmw_request_device(dev_priv); | ||
219 | if (unlikely(ret != 0)) | ||
220 | --dev_priv->num_3d_resources; | ||
221 | } | ||
222 | mutex_unlock(&dev_priv->release_mutex); | ||
223 | return ret; | ||
224 | } | ||
225 | |||
226 | |||
227 | void vmw_3d_resource_dec(struct vmw_private *dev_priv) | ||
228 | { | ||
229 | int32_t n3d; | ||
230 | |||
231 | mutex_lock(&dev_priv->release_mutex); | ||
232 | if (unlikely(--dev_priv->num_3d_resources == 0)) | ||
233 | vmw_release_device(dev_priv); | ||
234 | n3d = (int32_t) dev_priv->num_3d_resources; | ||
235 | mutex_unlock(&dev_priv->release_mutex); | ||
236 | |||
237 | BUG_ON(n3d < 0); | ||
238 | } | ||
212 | 239 | ||
213 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | 240 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
214 | { | 241 | { |
@@ -228,6 +255,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
228 | dev_priv->last_read_sequence = (uint32_t) -100; | 255 | dev_priv->last_read_sequence = (uint32_t) -100; |
229 | mutex_init(&dev_priv->hw_mutex); | 256 | mutex_init(&dev_priv->hw_mutex); |
230 | mutex_init(&dev_priv->cmdbuf_mutex); | 257 | mutex_init(&dev_priv->cmdbuf_mutex); |
258 | mutex_init(&dev_priv->release_mutex); | ||
231 | rwlock_init(&dev_priv->resource_lock); | 259 | rwlock_init(&dev_priv->resource_lock); |
232 | idr_init(&dev_priv->context_idr); | 260 | idr_init(&dev_priv->context_idr); |
233 | idr_init(&dev_priv->surface_idr); | 261 | idr_init(&dev_priv->surface_idr); |
@@ -244,6 +272,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
244 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); | 272 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); |
245 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); | 273 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); |
246 | 274 | ||
275 | dev_priv->enable_fb = enable_fbdev; | ||
276 | |||
247 | mutex_lock(&dev_priv->hw_mutex); | 277 | mutex_lock(&dev_priv->hw_mutex); |
248 | 278 | ||
249 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); | 279 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
@@ -343,17 +373,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
343 | 373 | ||
344 | dev->dev_private = dev_priv; | 374 | dev->dev_private = dev_priv; |
345 | 375 | ||
346 | if (!dev->devname) | ||
347 | dev->devname = vmw_devname; | ||
348 | |||
349 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { | ||
350 | ret = drm_irq_install(dev); | ||
351 | if (unlikely(ret != 0)) { | ||
352 | DRM_ERROR("Failed installing irq: %d\n", ret); | ||
353 | goto out_no_irq; | ||
354 | } | ||
355 | } | ||
356 | |||
357 | ret = pci_request_regions(dev->pdev, "vmwgfx probe"); | 376 | ret = pci_request_regions(dev->pdev, "vmwgfx probe"); |
358 | dev_priv->stealth = (ret != 0); | 377 | dev_priv->stealth = (ret != 0); |
359 | if (dev_priv->stealth) { | 378 | if (dev_priv->stealth) { |
@@ -369,26 +388,52 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
369 | goto out_no_device; | 388 | goto out_no_device; |
370 | } | 389 | } |
371 | } | 390 | } |
372 | ret = vmw_request_device(dev_priv); | 391 | ret = vmw_kms_init(dev_priv); |
373 | if (unlikely(ret != 0)) | 392 | if (unlikely(ret != 0)) |
374 | goto out_no_device; | 393 | goto out_no_kms; |
375 | vmw_kms_init(dev_priv); | ||
376 | vmw_overlay_init(dev_priv); | 394 | vmw_overlay_init(dev_priv); |
377 | vmw_fb_init(dev_priv); | 395 | if (dev_priv->enable_fb) { |
396 | ret = vmw_3d_resource_inc(dev_priv); | ||
397 | if (unlikely(ret != 0)) | ||
398 | goto out_no_fifo; | ||
399 | vmw_kms_save_vga(dev_priv); | ||
400 | vmw_fb_init(dev_priv); | ||
401 | DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? | ||
402 | "Detected device 3D availability.\n" : | ||
403 | "Detected no device 3D availability.\n"); | ||
404 | } else { | ||
405 | DRM_INFO("Delayed 3D detection since we're not " | ||
406 | "running the device in SVGA mode yet.\n"); | ||
407 | } | ||
408 | |||
409 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { | ||
410 | ret = drm_irq_install(dev); | ||
411 | if (unlikely(ret != 0)) { | ||
412 | DRM_ERROR("Failed installing irq: %d\n", ret); | ||
413 | goto out_no_irq; | ||
414 | } | ||
415 | } | ||
378 | 416 | ||
379 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; | 417 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; |
380 | register_pm_notifier(&dev_priv->pm_nb); | 418 | register_pm_notifier(&dev_priv->pm_nb); |
381 | 419 | ||
382 | DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n"); | ||
383 | |||
384 | return 0; | 420 | return 0; |
385 | 421 | ||
386 | out_no_device: | ||
387 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | ||
388 | drm_irq_uninstall(dev_priv->dev); | ||
389 | if (dev->devname == vmw_devname) | ||
390 | dev->devname = NULL; | ||
391 | out_no_irq: | 422 | out_no_irq: |
423 | if (dev_priv->enable_fb) { | ||
424 | vmw_fb_close(dev_priv); | ||
425 | vmw_kms_restore_vga(dev_priv); | ||
426 | vmw_3d_resource_dec(dev_priv); | ||
427 | } | ||
428 | out_no_fifo: | ||
429 | vmw_overlay_close(dev_priv); | ||
430 | vmw_kms_close(dev_priv); | ||
431 | out_no_kms: | ||
432 | if (dev_priv->stealth) | ||
433 | pci_release_region(dev->pdev, 2); | ||
434 | else | ||
435 | pci_release_regions(dev->pdev); | ||
436 | out_no_device: | ||
392 | ttm_object_device_release(&dev_priv->tdev); | 437 | ttm_object_device_release(&dev_priv->tdev); |
393 | out_err4: | 438 | out_err4: |
394 | iounmap(dev_priv->mmio_virt); | 439 | iounmap(dev_priv->mmio_virt); |
@@ -415,19 +460,20 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
415 | 460 | ||
416 | unregister_pm_notifier(&dev_priv->pm_nb); | 461 | unregister_pm_notifier(&dev_priv->pm_nb); |
417 | 462 | ||
418 | vmw_fb_close(dev_priv); | 463 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
464 | drm_irq_uninstall(dev_priv->dev); | ||
465 | if (dev_priv->enable_fb) { | ||
466 | vmw_fb_close(dev_priv); | ||
467 | vmw_kms_restore_vga(dev_priv); | ||
468 | vmw_3d_resource_dec(dev_priv); | ||
469 | } | ||
419 | vmw_kms_close(dev_priv); | 470 | vmw_kms_close(dev_priv); |
420 | vmw_overlay_close(dev_priv); | 471 | vmw_overlay_close(dev_priv); |
421 | vmw_release_device(dev_priv); | ||
422 | if (dev_priv->stealth) | 472 | if (dev_priv->stealth) |
423 | pci_release_region(dev->pdev, 2); | 473 | pci_release_region(dev->pdev, 2); |
424 | else | 474 | else |
425 | pci_release_regions(dev->pdev); | 475 | pci_release_regions(dev->pdev); |
426 | 476 | ||
427 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | ||
428 | drm_irq_uninstall(dev_priv->dev); | ||
429 | if (dev->devname == vmw_devname) | ||
430 | dev->devname = NULL; | ||
431 | ttm_object_device_release(&dev_priv->tdev); | 477 | ttm_object_device_release(&dev_priv->tdev); |
432 | iounmap(dev_priv->mmio_virt); | 478 | iounmap(dev_priv->mmio_virt); |
433 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, | 479 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, |
@@ -500,7 +546,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, | |||
500 | struct drm_ioctl_desc *ioctl = | 546 | struct drm_ioctl_desc *ioctl = |
501 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; | 547 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; |
502 | 548 | ||
503 | if (unlikely(ioctl->cmd != cmd)) { | 549 | if (unlikely(ioctl->cmd_drv != cmd)) { |
504 | DRM_ERROR("Invalid command format, ioctl %d\n", | 550 | DRM_ERROR("Invalid command format, ioctl %d\n", |
505 | nr - DRM_COMMAND_BASE); | 551 | nr - DRM_COMMAND_BASE); |
506 | return -EINVAL; | 552 | return -EINVAL; |
@@ -589,6 +635,16 @@ static int vmw_master_set(struct drm_device *dev, | |||
589 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 635 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
590 | int ret = 0; | 636 | int ret = 0; |
591 | 637 | ||
638 | if (!dev_priv->enable_fb) { | ||
639 | ret = vmw_3d_resource_inc(dev_priv); | ||
640 | if (unlikely(ret != 0)) | ||
641 | return ret; | ||
642 | vmw_kms_save_vga(dev_priv); | ||
643 | mutex_lock(&dev_priv->hw_mutex); | ||
644 | vmw_write(dev_priv, SVGA_REG_TRACES, 0); | ||
645 | mutex_unlock(&dev_priv->hw_mutex); | ||
646 | } | ||
647 | |||
592 | if (active) { | 648 | if (active) { |
593 | BUG_ON(active != &dev_priv->fbdev_master); | 649 | BUG_ON(active != &dev_priv->fbdev_master); |
594 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); | 650 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); |
@@ -617,7 +673,13 @@ static int vmw_master_set(struct drm_device *dev, | |||
617 | return 0; | 673 | return 0; |
618 | 674 | ||
619 | out_no_active_lock: | 675 | out_no_active_lock: |
620 | vmw_release_device(dev_priv); | 676 | if (!dev_priv->enable_fb) { |
677 | mutex_lock(&dev_priv->hw_mutex); | ||
678 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | ||
679 | mutex_unlock(&dev_priv->hw_mutex); | ||
680 | vmw_kms_restore_vga(dev_priv); | ||
681 | vmw_3d_resource_dec(dev_priv); | ||
682 | } | ||
621 | return ret; | 683 | return ret; |
622 | } | 684 | } |
623 | 685 | ||
@@ -645,11 +707,23 @@ static void vmw_master_drop(struct drm_device *dev, | |||
645 | 707 | ||
646 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | 708 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
647 | 709 | ||
710 | if (!dev_priv->enable_fb) { | ||
711 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
712 | if (unlikely(ret != 0)) | ||
713 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); | ||
714 | mutex_lock(&dev_priv->hw_mutex); | ||
715 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | ||
716 | mutex_unlock(&dev_priv->hw_mutex); | ||
717 | vmw_kms_restore_vga(dev_priv); | ||
718 | vmw_3d_resource_dec(dev_priv); | ||
719 | } | ||
720 | |||
648 | dev_priv->active_master = &dev_priv->fbdev_master; | 721 | dev_priv->active_master = &dev_priv->fbdev_master; |
649 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | 722 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
650 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); | 723 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); |
651 | 724 | ||
652 | vmw_fb_on(dev_priv); | 725 | if (dev_priv->enable_fb) |
726 | vmw_fb_on(dev_priv); | ||
653 | } | 727 | } |
654 | 728 | ||
655 | 729 | ||
@@ -722,6 +796,7 @@ static struct drm_driver driver = { | |||
722 | .irq_postinstall = vmw_irq_postinstall, | 796 | .irq_postinstall = vmw_irq_postinstall, |
723 | .irq_uninstall = vmw_irq_uninstall, | 797 | .irq_uninstall = vmw_irq_uninstall, |
724 | .irq_handler = vmw_irq_handler, | 798 | .irq_handler = vmw_irq_handler, |
799 | .get_vblank_counter = vmw_get_vblank_counter, | ||
725 | .reclaim_buffers_locked = NULL, | 800 | .reclaim_buffers_locked = NULL, |
726 | .get_map_ofs = drm_core_get_map_ofs, | 801 | .get_map_ofs = drm_core_get_map_ofs, |
727 | .get_reg_ofs = drm_core_get_reg_ofs, | 802 | .get_reg_ofs = drm_core_get_reg_ofs, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 429f917b60bf..58de6393f611 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -277,6 +277,7 @@ struct vmw_private { | |||
277 | 277 | ||
278 | bool stealth; | 278 | bool stealth; |
279 | bool is_opened; | 279 | bool is_opened; |
280 | bool enable_fb; | ||
280 | 281 | ||
281 | /** | 282 | /** |
282 | * Master management. | 283 | * Master management. |
@@ -285,6 +286,9 @@ struct vmw_private { | |||
285 | struct vmw_master *active_master; | 286 | struct vmw_master *active_master; |
286 | struct vmw_master fbdev_master; | 287 | struct vmw_master fbdev_master; |
287 | struct notifier_block pm_nb; | 288 | struct notifier_block pm_nb; |
289 | |||
290 | struct mutex release_mutex; | ||
291 | uint32_t num_3d_resources; | ||
288 | }; | 292 | }; |
289 | 293 | ||
290 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) | 294 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) |
@@ -319,6 +323,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv, | |||
319 | return val; | 323 | return val; |
320 | } | 324 | } |
321 | 325 | ||
326 | int vmw_3d_resource_inc(struct vmw_private *dev_priv); | ||
327 | void vmw_3d_resource_dec(struct vmw_private *dev_priv); | ||
328 | |||
322 | /** | 329 | /** |
323 | * GMR utilities - vmwgfx_gmr.c | 330 | * GMR utilities - vmwgfx_gmr.c |
324 | */ | 331 | */ |
@@ -511,6 +518,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv, | |||
511 | unsigned bbp, unsigned depth); | 518 | unsigned bbp, unsigned depth); |
512 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | 519 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, |
513 | struct drm_file *file_priv); | 520 | struct drm_file *file_priv); |
521 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); | ||
514 | 522 | ||
515 | /** | 523 | /** |
516 | * Overlay control - vmwgfx_overlay.c | 524 | * Overlay control - vmwgfx_overlay.c |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 870967a97c15..409e172f4abf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
@@ -615,6 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | |||
615 | if (unlikely(ret != 0)) | 615 | if (unlikely(ret != 0)) |
616 | goto err_unlock; | 616 | goto err_unlock; |
617 | 617 | ||
618 | if (bo->mem.mem_type == TTM_PL_VRAM && | ||
619 | bo->mem.mm_node->start < bo->num_pages) | ||
620 | (void) ttm_bo_validate(bo, &vmw_sys_placement, false, | ||
621 | false, false); | ||
622 | |||
618 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); | 623 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); |
619 | 624 | ||
620 | /* Could probably bug on */ | 625 | /* Could probably bug on */ |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index e6a1eb7ea954..0fe31766e4cf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -106,6 +106,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
106 | mutex_lock(&dev_priv->hw_mutex); | 106 | mutex_lock(&dev_priv->hw_mutex); |
107 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); | 107 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); |
108 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); | 108 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); |
109 | dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); | ||
109 | vmw_write(dev_priv, SVGA_REG_ENABLE, 1); | 110 | vmw_write(dev_priv, SVGA_REG_ENABLE, 1); |
110 | 111 | ||
111 | min = 4; | 112 | min = 4; |
@@ -175,6 +176,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
175 | dev_priv->config_done_state); | 176 | dev_priv->config_done_state); |
176 | vmw_write(dev_priv, SVGA_REG_ENABLE, | 177 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
177 | dev_priv->enable_state); | 178 | dev_priv->enable_state); |
179 | vmw_write(dev_priv, SVGA_REG_TRACES, | ||
180 | dev_priv->traces_state); | ||
178 | 181 | ||
179 | mutex_unlock(&dev_priv->hw_mutex); | 182 | mutex_unlock(&dev_priv->hw_mutex); |
180 | vmw_fence_queue_takedown(&fifo->fence_queue); | 183 | vmw_fence_queue_takedown(&fifo->fence_queue); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 64d7f47df868..e882ba099f0c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -898,7 +898,19 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv) | |||
898 | save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); | 898 | save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); |
899 | save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); | 899 | save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); |
900 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | 900 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); |
901 | if (i == 0 && vmw_priv->num_displays == 1 && | ||
902 | save->width == 0 && save->height == 0) { | ||
903 | |||
904 | /* | ||
905 | * It should be fairly safe to assume that these | ||
906 | * values are uninitialized. | ||
907 | */ | ||
908 | |||
909 | save->width = vmw_priv->vga_width - save->pos_x; | ||
910 | save->height = vmw_priv->vga_height - save->pos_y; | ||
911 | } | ||
901 | } | 912 | } |
913 | |||
902 | return 0; | 914 | return 0; |
903 | } | 915 | } |
904 | 916 | ||
@@ -984,3 +996,8 @@ out_unlock: | |||
984 | ttm_read_unlock(&vmaster->lock); | 996 | ttm_read_unlock(&vmaster->lock); |
985 | return ret; | 997 | return ret; |
986 | } | 998 | } |
999 | |||
1000 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) | ||
1001 | { | ||
1002 | return 0; | ||
1003 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 2ff5cf78235f..11cb39e3accb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
@@ -27,6 +27,8 @@ | |||
27 | 27 | ||
28 | #include "vmwgfx_kms.h" | 28 | #include "vmwgfx_kms.h" |
29 | 29 | ||
30 | #define VMWGFX_LDU_NUM_DU 8 | ||
31 | |||
30 | #define vmw_crtc_to_ldu(x) \ | 32 | #define vmw_crtc_to_ldu(x) \ |
31 | container_of(x, struct vmw_legacy_display_unit, base.crtc) | 33 | container_of(x, struct vmw_legacy_display_unit, base.crtc) |
32 | #define vmw_encoder_to_ldu(x) \ | 34 | #define vmw_encoder_to_ldu(x) \ |
@@ -335,7 +337,8 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector) | |||
335 | } | 337 | } |
336 | 338 | ||
337 | static enum drm_connector_status | 339 | static enum drm_connector_status |
338 | vmw_ldu_connector_detect(struct drm_connector *connector) | 340 | vmw_ldu_connector_detect(struct drm_connector *connector, |
341 | bool force) | ||
339 | { | 342 | { |
340 | if (vmw_connector_to_ldu(connector)->pref_active) | 343 | if (vmw_connector_to_ldu(connector)->pref_active) |
341 | return connector_status_connected; | 344 | return connector_status_connected; |
@@ -516,7 +519,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
516 | 519 | ||
517 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, | 520 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, |
518 | DRM_MODE_CONNECTOR_LVDS); | 521 | DRM_MODE_CONNECTOR_LVDS); |
519 | connector->status = vmw_ldu_connector_detect(connector); | 522 | connector->status = vmw_ldu_connector_detect(connector, true); |
520 | 523 | ||
521 | drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, | 524 | drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, |
522 | DRM_MODE_ENCODER_LVDS); | 525 | DRM_MODE_ENCODER_LVDS); |
@@ -535,6 +538,10 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
535 | 538 | ||
536 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | 539 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) |
537 | { | 540 | { |
541 | struct drm_device *dev = dev_priv->dev; | ||
542 | int i; | ||
543 | int ret; | ||
544 | |||
538 | if (dev_priv->ldu_priv) { | 545 | if (dev_priv->ldu_priv) { |
539 | DRM_INFO("ldu system already on\n"); | 546 | DRM_INFO("ldu system already on\n"); |
540 | return -EINVAL; | 547 | return -EINVAL; |
@@ -552,23 +559,24 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | |||
552 | 559 | ||
553 | drm_mode_create_dirty_info_property(dev_priv->dev); | 560 | drm_mode_create_dirty_info_property(dev_priv->dev); |
554 | 561 | ||
555 | vmw_ldu_init(dev_priv, 0); | ||
556 | /* for old hardware without multimon only enable one display */ | ||
557 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { | 562 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { |
558 | vmw_ldu_init(dev_priv, 1); | 563 | for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i) |
559 | vmw_ldu_init(dev_priv, 2); | 564 | vmw_ldu_init(dev_priv, i); |
560 | vmw_ldu_init(dev_priv, 3); | 565 | ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU); |
561 | vmw_ldu_init(dev_priv, 4); | 566 | } else { |
562 | vmw_ldu_init(dev_priv, 5); | 567 | /* for old hardware without multimon only enable one display */ |
563 | vmw_ldu_init(dev_priv, 6); | 568 | vmw_ldu_init(dev_priv, 0); |
564 | vmw_ldu_init(dev_priv, 7); | 569 | ret = drm_vblank_init(dev, 1); |
565 | } | 570 | } |
566 | 571 | ||
567 | return 0; | 572 | return ret; |
568 | } | 573 | } |
569 | 574 | ||
570 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) | 575 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) |
571 | { | 576 | { |
577 | struct drm_device *dev = dev_priv->dev; | ||
578 | |||
579 | drm_vblank_cleanup(dev); | ||
572 | if (!dev_priv->ldu_priv) | 580 | if (!dev_priv->ldu_priv) |
573 | return -ENOSYS; | 581 | return -ENOSYS; |
574 | 582 | ||
@@ -610,7 +618,7 @@ int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num, | |||
610 | ldu->pref_height = 600; | 618 | ldu->pref_height = 600; |
611 | ldu->pref_active = false; | 619 | ldu->pref_active = false; |
612 | } | 620 | } |
613 | con->status = vmw_ldu_connector_detect(con); | 621 | con->status = vmw_ldu_connector_detect(con, true); |
614 | } | 622 | } |
615 | 623 | ||
616 | mutex_unlock(&dev->mode_config.mutex); | 624 | mutex_unlock(&dev->mode_config.mutex); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 5f2d5df01e5c..c8c40e9979db 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -211,6 +211,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | |||
211 | cmd->body.cid = cpu_to_le32(res->id); | 211 | cmd->body.cid = cpu_to_le32(res->id); |
212 | 212 | ||
213 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 213 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
214 | vmw_3d_resource_dec(dev_priv); | ||
214 | } | 215 | } |
215 | 216 | ||
216 | static int vmw_context_init(struct vmw_private *dev_priv, | 217 | static int vmw_context_init(struct vmw_private *dev_priv, |
@@ -247,6 +248,7 @@ static int vmw_context_init(struct vmw_private *dev_priv, | |||
247 | cmd->body.cid = cpu_to_le32(res->id); | 248 | cmd->body.cid = cpu_to_le32(res->id); |
248 | 249 | ||
249 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 250 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
251 | (void) vmw_3d_resource_inc(dev_priv); | ||
250 | vmw_resource_activate(res, vmw_hw_context_destroy); | 252 | vmw_resource_activate(res, vmw_hw_context_destroy); |
251 | return 0; | 253 | return 0; |
252 | } | 254 | } |
@@ -406,6 +408,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) | |||
406 | cmd->body.sid = cpu_to_le32(res->id); | 408 | cmd->body.sid = cpu_to_le32(res->id); |
407 | 409 | ||
408 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 410 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
411 | vmw_3d_resource_dec(dev_priv); | ||
409 | } | 412 | } |
410 | 413 | ||
411 | void vmw_surface_res_free(struct vmw_resource *res) | 414 | void vmw_surface_res_free(struct vmw_resource *res) |
@@ -473,6 +476,7 @@ int vmw_surface_init(struct vmw_private *dev_priv, | |||
473 | } | 476 | } |
474 | 477 | ||
475 | vmw_fifo_commit(dev_priv, submit_size); | 478 | vmw_fifo_commit(dev_priv, submit_size); |
479 | (void) vmw_3d_resource_inc(dev_priv); | ||
476 | vmw_resource_activate(res, vmw_hw_surface_destroy); | 480 | vmw_resource_activate(res, vmw_hw_surface_destroy); |
477 | return 0; | 481 | return 0; |
478 | } | 482 | } |
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index b87569e96b16..f366f968155a 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c | |||
@@ -598,7 +598,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev, | |||
598 | pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); | 598 | pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); |
599 | } | 599 | } |
600 | 600 | ||
601 | void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) | 601 | static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) |
602 | { | 602 | { |
603 | struct vga_device *vgadev; | 603 | struct vga_device *vgadev; |
604 | unsigned long flags; | 604 | unsigned long flags; |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 0c52899be964..3f7292486024 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -1285,8 +1285,11 @@ static const struct hid_device_id hid_blacklist[] = { | |||
1285 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, | 1285 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, |
1286 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, | 1286 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, |
1287 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, | 1287 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, |
1288 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) }, | ||
1289 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) }, | ||
1288 | { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, | 1290 | { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, |
1289 | { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, | 1291 | { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, |
1292 | { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, | ||
1290 | { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) }, | 1293 | { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) }, |
1291 | { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) }, | 1294 | { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) }, |
1292 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, | 1295 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, |
@@ -1578,7 +1581,6 @@ static const struct hid_device_id hid_ignore_list[] = { | |||
1578 | { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) }, | 1581 | { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) }, |
1579 | { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) }, | 1582 | { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) }, |
1580 | { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) }, | 1583 | { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) }, |
1581 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)}, | ||
1582 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)}, | 1584 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)}, |
1583 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)}, | 1585 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)}, |
1584 | { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, | 1586 | { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 85c6d13c9ffa..765a4f53eb5c 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -105,6 +105,7 @@ | |||
105 | 105 | ||
106 | #define USB_VENDOR_ID_ASUS 0x0486 | 106 | #define USB_VENDOR_ID_ASUS 0x0486 |
107 | #define USB_DEVICE_ID_ASUS_T91MT 0x0185 | 107 | #define USB_DEVICE_ID_ASUS_T91MT 0x0185 |
108 | #define USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO 0x0186 | ||
108 | 109 | ||
109 | #define USB_VENDOR_ID_ASUSTEK 0x0b05 | 110 | #define USB_VENDOR_ID_ASUSTEK 0x0b05 |
110 | #define USB_DEVICE_ID_ASUSTEK_LCM 0x1726 | 111 | #define USB_DEVICE_ID_ASUSTEK_LCM 0x1726 |
@@ -128,6 +129,7 @@ | |||
128 | 129 | ||
129 | #define USB_VENDOR_ID_BTC 0x046e | 130 | #define USB_VENDOR_ID_BTC 0x046e |
130 | #define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578 | 131 | #define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578 |
132 | #define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577 | ||
131 | 133 | ||
132 | #define USB_VENDOR_ID_CANDO 0x2087 | 134 | #define USB_VENDOR_ID_CANDO 0x2087 |
133 | #define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01 | 135 | #define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01 |
@@ -149,6 +151,7 @@ | |||
149 | 151 | ||
150 | #define USB_VENDOR_ID_CHICONY 0x04f2 | 152 | #define USB_VENDOR_ID_CHICONY 0x04f2 |
151 | #define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418 | 153 | #define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418 |
154 | #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d | ||
152 | 155 | ||
153 | #define USB_VENDOR_ID_CIDC 0x1677 | 156 | #define USB_VENDOR_ID_CIDC 0x1677 |
154 | 157 | ||
@@ -507,6 +510,7 @@ | |||
507 | #define USB_VENDOR_ID_UCLOGIC 0x5543 | 510 | #define USB_VENDOR_ID_UCLOGIC 0x5543 |
508 | #define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042 | 511 | #define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042 |
509 | #define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003 | 512 | #define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003 |
513 | #define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001 | ||
510 | 514 | ||
511 | #define USB_VENDOR_ID_VERNIER 0x08f7 | 515 | #define USB_VENDOR_ID_VERNIER 0x08f7 |
512 | #define USB_DEVICE_ID_VERNIER_LABPRO 0x0001 | 516 | #define USB_DEVICE_ID_VERNIER_LABPRO 0x0001 |
diff --git a/drivers/hid/hid-mosart.c b/drivers/hid/hid-mosart.c index e91437c18906..ac5421d568f1 100644 --- a/drivers/hid/hid-mosart.c +++ b/drivers/hid/hid-mosart.c | |||
@@ -239,6 +239,7 @@ static void mosart_remove(struct hid_device *hdev) | |||
239 | 239 | ||
240 | static const struct hid_device_id mosart_devices[] = { | 240 | static const struct hid_device_id mosart_devices[] = { |
241 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) }, | 241 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) }, |
242 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) }, | ||
242 | { } | 243 | { } |
243 | }; | 244 | }; |
244 | MODULE_DEVICE_TABLE(hid, mosart_devices); | 245 | MODULE_DEVICE_TABLE(hid, mosart_devices); |
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c index 5771f851f856..956ed9ac19d4 100644 --- a/drivers/hid/hid-topseed.c +++ b/drivers/hid/hid-topseed.c | |||
@@ -64,6 +64,7 @@ static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi, | |||
64 | static const struct hid_device_id ts_devices[] = { | 64 | static const struct hid_device_id ts_devices[] = { |
65 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, | 65 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, |
66 | { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, | 66 | { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, |
67 | { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, | ||
67 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, | 68 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, |
68 | { } | 69 | { } |
69 | }; | 70 | }; |
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index b729c0286679..599041a7f670 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c | |||
@@ -828,6 +828,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co | |||
828 | } | 828 | } |
829 | } else { | 829 | } else { |
830 | int skipped_report_id = 0; | 830 | int skipped_report_id = 0; |
831 | int report_id = buf[0]; | ||
831 | if (buf[0] == 0x0) { | 832 | if (buf[0] == 0x0) { |
832 | /* Don't send the Report ID */ | 833 | /* Don't send the Report ID */ |
833 | buf++; | 834 | buf++; |
@@ -837,7 +838,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co | |||
837 | ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), | 838 | ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), |
838 | HID_REQ_SET_REPORT, | 839 | HID_REQ_SET_REPORT, |
839 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | 840 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, |
840 | ((report_type + 1) << 8) | *buf, | 841 | ((report_type + 1) << 8) | report_id, |
841 | interface->desc.bInterfaceNumber, buf, count, | 842 | interface->desc.bInterfaceNumber, buf, count, |
842 | USB_CTRL_SET_TIMEOUT); | 843 | USB_CTRL_SET_TIMEOUT); |
843 | /* count also the report id, if this was a numbered report. */ | 844 | /* count also the report id, if this was a numbered report. */ |
@@ -1445,6 +1446,11 @@ static const struct hid_device_id hid_usb_table[] = { | |||
1445 | { } | 1446 | { } |
1446 | }; | 1447 | }; |
1447 | 1448 | ||
1449 | struct usb_interface *usbhid_find_interface(int minor) | ||
1450 | { | ||
1451 | return usb_find_interface(&hid_driver, minor); | ||
1452 | } | ||
1453 | |||
1448 | static struct hid_driver hid_usb_driver = { | 1454 | static struct hid_driver hid_usb_driver = { |
1449 | .name = "generic-usb", | 1455 | .name = "generic-usb", |
1450 | .id_table = hid_usb_table, | 1456 | .id_table = hid_usb_table, |
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 2643d3147621..70da3181c8a0 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -33,6 +33,7 @@ static const struct hid_blacklist { | |||
33 | { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD }, | 33 | { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD }, |
34 | { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD }, | 34 | { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD }, |
35 | { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD }, | 35 | { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD }, |
36 | { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET }, | ||
36 | { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT }, | 37 | { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT }, |
37 | { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT }, | 38 | { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT }, |
38 | { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, | 39 | { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, |
@@ -69,6 +70,7 @@ static const struct hid_blacklist { | |||
69 | { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, | 70 | { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, |
70 | { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT }, | 71 | { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT }, |
71 | { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT }, | 72 | { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT }, |
73 | { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT }, | ||
72 | { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, | 74 | { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, |
73 | { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, | 75 | { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, |
74 | 76 | ||
@@ -77,6 +79,8 @@ static const struct hid_blacklist { | |||
77 | 79 | ||
78 | { USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE }, | 80 | { USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE }, |
79 | 81 | ||
82 | { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT }, | ||
83 | |||
80 | { 0, 0 } | 84 | { 0, 0 } |
81 | }; | 85 | }; |
82 | 86 | ||
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 0a29c51114aa..681e620eb95b 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c | |||
@@ -270,7 +270,7 @@ static int hiddev_open(struct inode *inode, struct file *file) | |||
270 | struct hiddev *hiddev; | 270 | struct hiddev *hiddev; |
271 | int res; | 271 | int res; |
272 | 272 | ||
273 | intf = usb_find_interface(&hiddev_driver, iminor(inode)); | 273 | intf = usbhid_find_interface(iminor(inode)); |
274 | if (!intf) | 274 | if (!intf) |
275 | return -ENODEV; | 275 | return -ENODEV; |
276 | hid = usb_get_intfdata(intf); | 276 | hid = usb_get_intfdata(intf); |
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h index 693fd3e720df..89d2e847dcc6 100644 --- a/drivers/hid/usbhid/usbhid.h +++ b/drivers/hid/usbhid/usbhid.h | |||
@@ -42,6 +42,7 @@ void usbhid_submit_report | |||
42 | (struct hid_device *hid, struct hid_report *report, unsigned char dir); | 42 | (struct hid_device *hid, struct hid_report *report, unsigned char dir); |
43 | int usbhid_get_power(struct hid_device *hid); | 43 | int usbhid_get_power(struct hid_device *hid); |
44 | void usbhid_put_power(struct hid_device *hid); | 44 | void usbhid_put_power(struct hid_device *hid); |
45 | struct usb_interface *usbhid_find_interface(int minor); | ||
45 | 46 | ||
46 | /* iofl flags */ | 47 | /* iofl flags */ |
47 | #define HID_CTRL_RUNNING 1 | 48 | #define HID_CTRL_RUNNING 1 |
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 4d4d09bdec0a..97499d00615a 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
@@ -409,7 +409,7 @@ config SENSORS_CORETEMP | |||
409 | 409 | ||
410 | config SENSORS_PKGTEMP | 410 | config SENSORS_PKGTEMP |
411 | tristate "Intel processor package temperature sensor" | 411 | tristate "Intel processor package temperature sensor" |
412 | depends on X86 && PCI && EXPERIMENTAL | 412 | depends on X86 && EXPERIMENTAL |
413 | help | 413 | help |
414 | If you say yes here you get support for the package level temperature | 414 | If you say yes here you get support for the package level temperature |
415 | sensor inside your CPU. Check documentation/driver for details. | 415 | sensor inside your CPU. Check documentation/driver for details. |
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c index 15c1a9616af3..0683e6be662c 100644 --- a/drivers/hwmon/adm1031.c +++ b/drivers/hwmon/adm1031.c | |||
@@ -79,7 +79,7 @@ struct adm1031_data { | |||
79 | int chip_type; | 79 | int chip_type; |
80 | char valid; /* !=0 if following fields are valid */ | 80 | char valid; /* !=0 if following fields are valid */ |
81 | unsigned long last_updated; /* In jiffies */ | 81 | unsigned long last_updated; /* In jiffies */ |
82 | unsigned int update_rate; /* In milliseconds */ | 82 | unsigned int update_interval; /* In milliseconds */ |
83 | /* The chan_select_table contains the possible configurations for | 83 | /* The chan_select_table contains the possible configurations for |
84 | * auto fan control. | 84 | * auto fan control. |
85 | */ | 85 | */ |
@@ -743,23 +743,23 @@ static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12); | |||
743 | static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13); | 743 | static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13); |
744 | static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14); | 744 | static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14); |
745 | 745 | ||
746 | /* Update Rate */ | 746 | /* Update Interval */ |
747 | static const unsigned int update_rates[] = { | 747 | static const unsigned int update_intervals[] = { |
748 | 16000, 8000, 4000, 2000, 1000, 500, 250, 125, | 748 | 16000, 8000, 4000, 2000, 1000, 500, 250, 125, |
749 | }; | 749 | }; |
750 | 750 | ||
751 | static ssize_t show_update_rate(struct device *dev, | 751 | static ssize_t show_update_interval(struct device *dev, |
752 | struct device_attribute *attr, char *buf) | 752 | struct device_attribute *attr, char *buf) |
753 | { | 753 | { |
754 | struct i2c_client *client = to_i2c_client(dev); | 754 | struct i2c_client *client = to_i2c_client(dev); |
755 | struct adm1031_data *data = i2c_get_clientdata(client); | 755 | struct adm1031_data *data = i2c_get_clientdata(client); |
756 | 756 | ||
757 | return sprintf(buf, "%u\n", data->update_rate); | 757 | return sprintf(buf, "%u\n", data->update_interval); |
758 | } | 758 | } |
759 | 759 | ||
760 | static ssize_t set_update_rate(struct device *dev, | 760 | static ssize_t set_update_interval(struct device *dev, |
761 | struct device_attribute *attr, | 761 | struct device_attribute *attr, |
762 | const char *buf, size_t count) | 762 | const char *buf, size_t count) |
763 | { | 763 | { |
764 | struct i2c_client *client = to_i2c_client(dev); | 764 | struct i2c_client *client = to_i2c_client(dev); |
765 | struct adm1031_data *data = i2c_get_clientdata(client); | 765 | struct adm1031_data *data = i2c_get_clientdata(client); |
@@ -771,12 +771,15 @@ static ssize_t set_update_rate(struct device *dev, | |||
771 | if (err) | 771 | if (err) |
772 | return err; | 772 | return err; |
773 | 773 | ||
774 | /* find the nearest update rate from the table */ | 774 | /* |
775 | for (i = 0; i < ARRAY_SIZE(update_rates) - 1; i++) { | 775 | * Find the nearest update interval from the table. |
776 | if (val >= update_rates[i]) | 776 | * Use it to determine the matching update rate. |
777 | */ | ||
778 | for (i = 0; i < ARRAY_SIZE(update_intervals) - 1; i++) { | ||
779 | if (val >= update_intervals[i]) | ||
777 | break; | 780 | break; |
778 | } | 781 | } |
779 | /* if not found, we point to the last entry (lowest update rate) */ | 782 | /* if not found, we point to the last entry (lowest update interval) */ |
780 | 783 | ||
781 | /* set the new update rate while preserving other settings */ | 784 | /* set the new update rate while preserving other settings */ |
782 | reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER); | 785 | reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER); |
@@ -785,14 +788,14 @@ static ssize_t set_update_rate(struct device *dev, | |||
785 | adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg); | 788 | adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg); |
786 | 789 | ||
787 | mutex_lock(&data->update_lock); | 790 | mutex_lock(&data->update_lock); |
788 | data->update_rate = update_rates[i]; | 791 | data->update_interval = update_intervals[i]; |
789 | mutex_unlock(&data->update_lock); | 792 | mutex_unlock(&data->update_lock); |
790 | 793 | ||
791 | return count; | 794 | return count; |
792 | } | 795 | } |
793 | 796 | ||
794 | static DEVICE_ATTR(update_rate, S_IRUGO | S_IWUSR, show_update_rate, | 797 | static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval, |
795 | set_update_rate); | 798 | set_update_interval); |
796 | 799 | ||
797 | static struct attribute *adm1031_attributes[] = { | 800 | static struct attribute *adm1031_attributes[] = { |
798 | &sensor_dev_attr_fan1_input.dev_attr.attr, | 801 | &sensor_dev_attr_fan1_input.dev_attr.attr, |
@@ -830,7 +833,7 @@ static struct attribute *adm1031_attributes[] = { | |||
830 | 833 | ||
831 | &sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr, | 834 | &sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr, |
832 | 835 | ||
833 | &dev_attr_update_rate.attr, | 836 | &dev_attr_update_interval.attr, |
834 | &dev_attr_alarms.attr, | 837 | &dev_attr_alarms.attr, |
835 | 838 | ||
836 | NULL | 839 | NULL |
@@ -981,7 +984,8 @@ static void adm1031_init_client(struct i2c_client *client) | |||
981 | mask = ADM1031_UPDATE_RATE_MASK; | 984 | mask = ADM1031_UPDATE_RATE_MASK; |
982 | read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER); | 985 | read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER); |
983 | i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT; | 986 | i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT; |
984 | data->update_rate = update_rates[i]; | 987 | /* Save it as update interval */ |
988 | data->update_interval = update_intervals[i]; | ||
985 | } | 989 | } |
986 | 990 | ||
987 | static struct adm1031_data *adm1031_update_device(struct device *dev) | 991 | static struct adm1031_data *adm1031_update_device(struct device *dev) |
@@ -993,7 +997,8 @@ static struct adm1031_data *adm1031_update_device(struct device *dev) | |||
993 | 997 | ||
994 | mutex_lock(&data->update_lock); | 998 | mutex_lock(&data->update_lock); |
995 | 999 | ||
996 | next_update = data->last_updated + msecs_to_jiffies(data->update_rate); | 1000 | next_update = data->last_updated |
1001 | + msecs_to_jiffies(data->update_interval); | ||
997 | if (time_after(jiffies, next_update) || !data->valid) { | 1002 | if (time_after(jiffies, next_update) || !data->valid) { |
998 | 1003 | ||
999 | dev_dbg(&client->dev, "Starting adm1031 update\n"); | 1004 | dev_dbg(&client->dev, "Starting adm1031 update\n"); |
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index de8111114f46..a23b17a78ace 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
37 | #include <asm/msr.h> | 37 | #include <asm/msr.h> |
38 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
39 | #include <asm/smp.h> | ||
39 | 40 | ||
40 | #define DRVNAME "coretemp" | 41 | #define DRVNAME "coretemp" |
41 | 42 | ||
@@ -423,9 +424,18 @@ static int __cpuinit coretemp_device_add(unsigned int cpu) | |||
423 | int err; | 424 | int err; |
424 | struct platform_device *pdev; | 425 | struct platform_device *pdev; |
425 | struct pdev_entry *pdev_entry; | 426 | struct pdev_entry *pdev_entry; |
426 | #ifdef CONFIG_SMP | ||
427 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 427 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
428 | #endif | 428 | |
429 | /* | ||
430 | * CPUID.06H.EAX[0] indicates whether the CPU has thermal | ||
431 | * sensors. We check this bit only, all the early CPUs | ||
432 | * without thermal sensors will be filtered out. | ||
433 | */ | ||
434 | if (!cpu_has(c, X86_FEATURE_DTS)) { | ||
435 | printk(KERN_INFO DRVNAME ": CPU (model=0x%x)" | ||
436 | " has no thermal sensor.\n", c->x86_model); | ||
437 | return 0; | ||
438 | } | ||
429 | 439 | ||
430 | mutex_lock(&pdev_list_mutex); | 440 | mutex_lock(&pdev_list_mutex); |
431 | 441 | ||
@@ -482,14 +492,22 @@ exit: | |||
482 | 492 | ||
483 | static void coretemp_device_remove(unsigned int cpu) | 493 | static void coretemp_device_remove(unsigned int cpu) |
484 | { | 494 | { |
485 | struct pdev_entry *p, *n; | 495 | struct pdev_entry *p; |
496 | unsigned int i; | ||
497 | |||
486 | mutex_lock(&pdev_list_mutex); | 498 | mutex_lock(&pdev_list_mutex); |
487 | list_for_each_entry_safe(p, n, &pdev_list, list) { | 499 | list_for_each_entry(p, &pdev_list, list) { |
488 | if (p->cpu == cpu) { | 500 | if (p->cpu != cpu) |
489 | platform_device_unregister(p->pdev); | 501 | continue; |
490 | list_del(&p->list); | 502 | |
491 | kfree(p); | 503 | platform_device_unregister(p->pdev); |
492 | } | 504 | list_del(&p->list); |
505 | mutex_unlock(&pdev_list_mutex); | ||
506 | kfree(p); | ||
507 | for_each_cpu(i, cpu_sibling_mask(cpu)) | ||
508 | if (i != cpu && !coretemp_device_add(i)) | ||
509 | break; | ||
510 | return; | ||
493 | } | 511 | } |
494 | mutex_unlock(&pdev_list_mutex); | 512 | mutex_unlock(&pdev_list_mutex); |
495 | } | 513 | } |
@@ -527,30 +545,21 @@ static int __init coretemp_init(void) | |||
527 | if (err) | 545 | if (err) |
528 | goto exit; | 546 | goto exit; |
529 | 547 | ||
530 | for_each_online_cpu(i) { | 548 | for_each_online_cpu(i) |
531 | struct cpuinfo_x86 *c = &cpu_data(i); | 549 | coretemp_device_add(i); |
532 | /* | 550 | |
533 | * CPUID.06H.EAX[0] indicates whether the CPU has thermal | 551 | #ifndef CONFIG_HOTPLUG_CPU |
534 | * sensors. We check this bit only, all the early CPUs | ||
535 | * without thermal sensors will be filtered out. | ||
536 | */ | ||
537 | if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01)) | ||
538 | coretemp_device_add(i); | ||
539 | else { | ||
540 | printk(KERN_INFO DRVNAME ": CPU (model=0x%x)" | ||
541 | " has no thermal sensor.\n", c->x86_model); | ||
542 | } | ||
543 | } | ||
544 | if (list_empty(&pdev_list)) { | 552 | if (list_empty(&pdev_list)) { |
545 | err = -ENODEV; | 553 | err = -ENODEV; |
546 | goto exit_driver_unreg; | 554 | goto exit_driver_unreg; |
547 | } | 555 | } |
556 | #endif | ||
548 | 557 | ||
549 | register_hotcpu_notifier(&coretemp_cpu_notifier); | 558 | register_hotcpu_notifier(&coretemp_cpu_notifier); |
550 | return 0; | 559 | return 0; |
551 | 560 | ||
552 | exit_driver_unreg: | ||
553 | #ifndef CONFIG_HOTPLUG_CPU | 561 | #ifndef CONFIG_HOTPLUG_CPU |
562 | exit_driver_unreg: | ||
554 | platform_driver_unregister(&coretemp_driver); | 563 | platform_driver_unregister(&coretemp_driver); |
555 | #endif | 564 | #endif |
556 | exit: | 565 | exit: |
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c index 5b58b20dead1..8dee3f38fdfb 100644 --- a/drivers/hwmon/emc1403.c +++ b/drivers/hwmon/emc1403.c | |||
@@ -308,7 +308,6 @@ static int emc1403_probe(struct i2c_client *client, | |||
308 | res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr); | 308 | res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr); |
309 | if (res) { | 309 | if (res) { |
310 | dev_warn(&client->dev, "create group failed\n"); | 310 | dev_warn(&client->dev, "create group failed\n"); |
311 | hwmon_device_unregister(data->hwmon_dev); | ||
312 | goto thermal_error1; | 311 | goto thermal_error1; |
313 | } | 312 | } |
314 | data->hwmon_dev = hwmon_device_register(&client->dev); | 313 | data->hwmon_dev = hwmon_device_register(&client->dev); |
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index 537841ef44b9..75afb3b0e076 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c | |||
@@ -111,7 +111,7 @@ static struct platform_device *f71882fg_pdev; | |||
111 | /* Super-I/O Function prototypes */ | 111 | /* Super-I/O Function prototypes */ |
112 | static inline int superio_inb(int base, int reg); | 112 | static inline int superio_inb(int base, int reg); |
113 | static inline int superio_inw(int base, int reg); | 113 | static inline int superio_inw(int base, int reg); |
114 | static inline void superio_enter(int base); | 114 | static inline int superio_enter(int base); |
115 | static inline void superio_select(int base, int ld); | 115 | static inline void superio_select(int base, int ld); |
116 | static inline void superio_exit(int base); | 116 | static inline void superio_exit(int base); |
117 | 117 | ||
@@ -861,11 +861,20 @@ static int superio_inw(int base, int reg) | |||
861 | return val; | 861 | return val; |
862 | } | 862 | } |
863 | 863 | ||
864 | static inline void superio_enter(int base) | 864 | static inline int superio_enter(int base) |
865 | { | 865 | { |
866 | /* Don't step on other drivers' I/O space by accident */ | ||
867 | if (!request_muxed_region(base, 2, DRVNAME)) { | ||
868 | printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n", | ||
869 | base); | ||
870 | return -EBUSY; | ||
871 | } | ||
872 | |||
866 | /* according to the datasheet the key must be send twice! */ | 873 | /* according to the datasheet the key must be send twice! */ |
867 | outb(SIO_UNLOCK_KEY, base); | 874 | outb(SIO_UNLOCK_KEY, base); |
868 | outb(SIO_UNLOCK_KEY, base); | 875 | outb(SIO_UNLOCK_KEY, base); |
876 | |||
877 | return 0; | ||
869 | } | 878 | } |
870 | 879 | ||
871 | static inline void superio_select(int base, int ld) | 880 | static inline void superio_select(int base, int ld) |
@@ -877,6 +886,7 @@ static inline void superio_select(int base, int ld) | |||
877 | static inline void superio_exit(int base) | 886 | static inline void superio_exit(int base) |
878 | { | 887 | { |
879 | outb(SIO_LOCK_KEY, base); | 888 | outb(SIO_LOCK_KEY, base); |
889 | release_region(base, 2); | ||
880 | } | 890 | } |
881 | 891 | ||
882 | static inline int fan_from_reg(u16 reg) | 892 | static inline int fan_from_reg(u16 reg) |
@@ -2175,21 +2185,15 @@ static int f71882fg_remove(struct platform_device *pdev) | |||
2175 | static int __init f71882fg_find(int sioaddr, unsigned short *address, | 2185 | static int __init f71882fg_find(int sioaddr, unsigned short *address, |
2176 | struct f71882fg_sio_data *sio_data) | 2186 | struct f71882fg_sio_data *sio_data) |
2177 | { | 2187 | { |
2178 | int err = -ENODEV; | ||
2179 | u16 devid; | 2188 | u16 devid; |
2180 | 2189 | int err = superio_enter(sioaddr); | |
2181 | /* Don't step on other drivers' I/O space by accident */ | 2190 | if (err) |
2182 | if (!request_region(sioaddr, 2, DRVNAME)) { | 2191 | return err; |
2183 | printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n", | ||
2184 | (int)sioaddr); | ||
2185 | return -EBUSY; | ||
2186 | } | ||
2187 | |||
2188 | superio_enter(sioaddr); | ||
2189 | 2192 | ||
2190 | devid = superio_inw(sioaddr, SIO_REG_MANID); | 2193 | devid = superio_inw(sioaddr, SIO_REG_MANID); |
2191 | if (devid != SIO_FINTEK_ID) { | 2194 | if (devid != SIO_FINTEK_ID) { |
2192 | pr_debug(DRVNAME ": Not a Fintek device\n"); | 2195 | pr_debug(DRVNAME ": Not a Fintek device\n"); |
2196 | err = -ENODEV; | ||
2193 | goto exit; | 2197 | goto exit; |
2194 | } | 2198 | } |
2195 | 2199 | ||
@@ -2213,6 +2217,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, | |||
2213 | default: | 2217 | default: |
2214 | printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n", | 2218 | printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n", |
2215 | (unsigned int)devid); | 2219 | (unsigned int)devid); |
2220 | err = -ENODEV; | ||
2216 | goto exit; | 2221 | goto exit; |
2217 | } | 2222 | } |
2218 | 2223 | ||
@@ -2223,12 +2228,14 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, | |||
2223 | 2228 | ||
2224 | if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) { | 2229 | if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) { |
2225 | printk(KERN_WARNING DRVNAME ": Device not activated\n"); | 2230 | printk(KERN_WARNING DRVNAME ": Device not activated\n"); |
2231 | err = -ENODEV; | ||
2226 | goto exit; | 2232 | goto exit; |
2227 | } | 2233 | } |
2228 | 2234 | ||
2229 | *address = superio_inw(sioaddr, SIO_REG_ADDR); | 2235 | *address = superio_inw(sioaddr, SIO_REG_ADDR); |
2230 | if (*address == 0) { | 2236 | if (*address == 0) { |
2231 | printk(KERN_WARNING DRVNAME ": Base address not set\n"); | 2237 | printk(KERN_WARNING DRVNAME ": Base address not set\n"); |
2238 | err = -ENODEV; | ||
2232 | goto exit; | 2239 | goto exit; |
2233 | } | 2240 | } |
2234 | *address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */ | 2241 | *address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */ |
@@ -2239,7 +2246,6 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, | |||
2239 | (int)superio_inb(sioaddr, SIO_REG_DEVREV)); | 2246 | (int)superio_inb(sioaddr, SIO_REG_DEVREV)); |
2240 | exit: | 2247 | exit: |
2241 | superio_exit(sioaddr); | 2248 | superio_exit(sioaddr); |
2242 | release_region(sioaddr, 2); | ||
2243 | return err; | 2249 | return err; |
2244 | } | 2250 | } |
2245 | 2251 | ||
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c index 0f58ecc5334d..9638d58f99fd 100644 --- a/drivers/hwmon/f75375s.c +++ b/drivers/hwmon/f75375s.c | |||
@@ -79,7 +79,7 @@ enum chips { f75373, f75375 }; | |||
79 | #define F75375_REG_PWM2_DROP_DUTY 0x6C | 79 | #define F75375_REG_PWM2_DROP_DUTY 0x6C |
80 | 80 | ||
81 | #define FAN_CTRL_LINEAR(nr) (4 + nr) | 81 | #define FAN_CTRL_LINEAR(nr) (4 + nr) |
82 | #define FAN_CTRL_MODE(nr) (5 + ((nr) * 2)) | 82 | #define FAN_CTRL_MODE(nr) (4 + ((nr) * 2)) |
83 | 83 | ||
84 | /* | 84 | /* |
85 | * Data structures and manipulation thereof | 85 | * Data structures and manipulation thereof |
@@ -298,7 +298,7 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val) | |||
298 | return -EINVAL; | 298 | return -EINVAL; |
299 | 299 | ||
300 | fanmode = f75375_read8(client, F75375_REG_FAN_TIMER); | 300 | fanmode = f75375_read8(client, F75375_REG_FAN_TIMER); |
301 | fanmode = ~(3 << FAN_CTRL_MODE(nr)); | 301 | fanmode &= ~(3 << FAN_CTRL_MODE(nr)); |
302 | 302 | ||
303 | switch (val) { | 303 | switch (val) { |
304 | case 0: /* Full speed */ | 304 | case 0: /* Full speed */ |
@@ -350,7 +350,7 @@ static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *attr, | |||
350 | 350 | ||
351 | mutex_lock(&data->update_lock); | 351 | mutex_lock(&data->update_lock); |
352 | conf = f75375_read8(client, F75375_REG_CONFIG1); | 352 | conf = f75375_read8(client, F75375_REG_CONFIG1); |
353 | conf = ~(1 << FAN_CTRL_LINEAR(nr)); | 353 | conf &= ~(1 << FAN_CTRL_LINEAR(nr)); |
354 | 354 | ||
355 | if (val == 0) | 355 | if (val == 0) |
356 | conf |= (1 << FAN_CTRL_LINEAR(nr)) ; | 356 | conf |= (1 << FAN_CTRL_LINEAR(nr)) ; |
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c index 7580f55e67e3..36e957532230 100644 --- a/drivers/hwmon/hp_accel.c +++ b/drivers/hwmon/hp_accel.c | |||
@@ -221,6 +221,8 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = { | |||
221 | AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), | 221 | AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), |
222 | AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), | 222 | AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), |
223 | AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), | 223 | AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), |
224 | AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted), | ||
225 | AXIS_DMI_MATCH("Mini5102", "HP Mini 5102", xy_rotated_left_usd), | ||
224 | { NULL, } | 226 | { NULL, } |
225 | /* Laptop models without axis info (yet): | 227 | /* Laptop models without axis info (yet): |
226 | * "NC6910" "HP Compaq 6910" | 228 | * "NC6910" "HP Compaq 6910" |
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c index 6138f036b159..fc591ae53107 100644 --- a/drivers/hwmon/lis3lv02d.c +++ b/drivers/hwmon/lis3lv02d.c | |||
@@ -277,7 +277,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy) | |||
277 | wake_up_interruptible(&lis3_dev.misc_wait); | 277 | wake_up_interruptible(&lis3_dev.misc_wait); |
278 | kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN); | 278 | kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN); |
279 | out: | 279 | out: |
280 | if (lis3_dev.whoami == WAI_8B && lis3_dev.idev && | 280 | if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev && |
281 | lis3_dev.idev->input->users) | 281 | lis3_dev.idev->input->users) |
282 | return IRQ_WAKE_THREAD; | 282 | return IRQ_WAKE_THREAD; |
283 | return IRQ_HANDLED; | 283 | return IRQ_HANDLED; |
@@ -718,7 +718,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev) | |||
718 | * io-apic is not configurable (and generates a warning) but I keep it | 718 | * io-apic is not configurable (and generates a warning) but I keep it |
719 | * in case of support for other hardware. | 719 | * in case of support for other hardware. |
720 | */ | 720 | */ |
721 | if (dev->whoami == WAI_8B) | 721 | if (dev->pdata && dev->whoami == WAI_8B) |
722 | thread_fn = lis302dl_interrupt_thread1_8b; | 722 | thread_fn = lis302dl_interrupt_thread1_8b; |
723 | else | 723 | else |
724 | thread_fn = NULL; | 724 | thread_fn = NULL; |
diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/hwmon/lis3lv02d_i2c.c index dc1f5402c1d7..8e5933b72d19 100644 --- a/drivers/hwmon/lis3lv02d_i2c.c +++ b/drivers/hwmon/lis3lv02d_i2c.c | |||
@@ -121,7 +121,7 @@ static int lis3lv02d_i2c_suspend(struct i2c_client *client, pm_message_t mesg) | |||
121 | { | 121 | { |
122 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); | 122 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); |
123 | 123 | ||
124 | if (!lis3->pdata->wakeup_flags) | 124 | if (!lis3->pdata || !lis3->pdata->wakeup_flags) |
125 | lis3lv02d_poweroff(lis3); | 125 | lis3lv02d_poweroff(lis3); |
126 | return 0; | 126 | return 0; |
127 | } | 127 | } |
@@ -130,7 +130,7 @@ static int lis3lv02d_i2c_resume(struct i2c_client *client) | |||
130 | { | 130 | { |
131 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); | 131 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); |
132 | 132 | ||
133 | if (!lis3->pdata->wakeup_flags) | 133 | if (!lis3->pdata || !lis3->pdata->wakeup_flags) |
134 | lis3lv02d_poweron(lis3); | 134 | lis3lv02d_poweron(lis3); |
135 | return 0; | 135 | return 0; |
136 | } | 136 | } |
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c index 82b16808a274..b9be5e3a22b3 100644 --- a/drivers/hwmon/lis3lv02d_spi.c +++ b/drivers/hwmon/lis3lv02d_spi.c | |||
@@ -92,7 +92,7 @@ static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg) | |||
92 | { | 92 | { |
93 | struct lis3lv02d *lis3 = spi_get_drvdata(spi); | 93 | struct lis3lv02d *lis3 = spi_get_drvdata(spi); |
94 | 94 | ||
95 | if (!lis3->pdata->wakeup_flags) | 95 | if (!lis3->pdata || !lis3->pdata->wakeup_flags) |
96 | lis3lv02d_poweroff(&lis3_dev); | 96 | lis3lv02d_poweroff(&lis3_dev); |
97 | 97 | ||
98 | return 0; | 98 | return 0; |
@@ -102,7 +102,7 @@ static int lis3lv02d_spi_resume(struct spi_device *spi) | |||
102 | { | 102 | { |
103 | struct lis3lv02d *lis3 = spi_get_drvdata(spi); | 103 | struct lis3lv02d *lis3 = spi_get_drvdata(spi); |
104 | 104 | ||
105 | if (!lis3->pdata->wakeup_flags) | 105 | if (!lis3->pdata || !lis3->pdata->wakeup_flags) |
106 | lis3lv02d_poweron(lis3); | 106 | lis3lv02d_poweron(lis3); |
107 | 107 | ||
108 | return 0; | 108 | return 0; |
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c index 94741d42112d..464340f25496 100644 --- a/drivers/hwmon/lm95241.c +++ b/drivers/hwmon/lm95241.c | |||
@@ -91,7 +91,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev); | |||
91 | struct lm95241_data { | 91 | struct lm95241_data { |
92 | struct device *hwmon_dev; | 92 | struct device *hwmon_dev; |
93 | struct mutex update_lock; | 93 | struct mutex update_lock; |
94 | unsigned long last_updated, rate; /* in jiffies */ | 94 | unsigned long last_updated, interval; /* in jiffies */ |
95 | char valid; /* zero until following fields are valid */ | 95 | char valid; /* zero until following fields are valid */ |
96 | /* registers values */ | 96 | /* registers values */ |
97 | u8 local_h, local_l; /* local */ | 97 | u8 local_h, local_l; /* local */ |
@@ -114,23 +114,23 @@ show_temp(local); | |||
114 | show_temp(remote1); | 114 | show_temp(remote1); |
115 | show_temp(remote2); | 115 | show_temp(remote2); |
116 | 116 | ||
117 | static ssize_t show_rate(struct device *dev, struct device_attribute *attr, | 117 | static ssize_t show_interval(struct device *dev, struct device_attribute *attr, |
118 | char *buf) | 118 | char *buf) |
119 | { | 119 | { |
120 | struct lm95241_data *data = lm95241_update_device(dev); | 120 | struct lm95241_data *data = lm95241_update_device(dev); |
121 | 121 | ||
122 | snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->rate / HZ); | 122 | snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->interval / HZ); |
123 | return strlen(buf); | 123 | return strlen(buf); |
124 | } | 124 | } |
125 | 125 | ||
126 | static ssize_t set_rate(struct device *dev, struct device_attribute *attr, | 126 | static ssize_t set_interval(struct device *dev, struct device_attribute *attr, |
127 | const char *buf, size_t count) | 127 | const char *buf, size_t count) |
128 | { | 128 | { |
129 | struct i2c_client *client = to_i2c_client(dev); | 129 | struct i2c_client *client = to_i2c_client(dev); |
130 | struct lm95241_data *data = i2c_get_clientdata(client); | 130 | struct lm95241_data *data = i2c_get_clientdata(client); |
131 | 131 | ||
132 | strict_strtol(buf, 10, &data->rate); | 132 | strict_strtol(buf, 10, &data->interval); |
133 | data->rate = data->rate * HZ / 1000; | 133 | data->interval = data->interval * HZ / 1000; |
134 | 134 | ||
135 | return count; | 135 | return count; |
136 | } | 136 | } |
@@ -286,7 +286,8 @@ static DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min1, set_min1); | |||
286 | static DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min2, set_min2); | 286 | static DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min2, set_min2); |
287 | static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max1, set_max1); | 287 | static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max1, set_max1); |
288 | static DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max2, set_max2); | 288 | static DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max2, set_max2); |
289 | static DEVICE_ATTR(rate, S_IWUSR | S_IRUGO, show_rate, set_rate); | 289 | static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval, |
290 | set_interval); | ||
290 | 291 | ||
291 | static struct attribute *lm95241_attributes[] = { | 292 | static struct attribute *lm95241_attributes[] = { |
292 | &dev_attr_temp1_input.attr, | 293 | &dev_attr_temp1_input.attr, |
@@ -298,7 +299,7 @@ static struct attribute *lm95241_attributes[] = { | |||
298 | &dev_attr_temp3_min.attr, | 299 | &dev_attr_temp3_min.attr, |
299 | &dev_attr_temp2_max.attr, | 300 | &dev_attr_temp2_max.attr, |
300 | &dev_attr_temp3_max.attr, | 301 | &dev_attr_temp3_max.attr, |
301 | &dev_attr_rate.attr, | 302 | &dev_attr_update_interval.attr, |
302 | NULL | 303 | NULL |
303 | }; | 304 | }; |
304 | 305 | ||
@@ -376,7 +377,7 @@ static void lm95241_init_client(struct i2c_client *client) | |||
376 | { | 377 | { |
377 | struct lm95241_data *data = i2c_get_clientdata(client); | 378 | struct lm95241_data *data = i2c_get_clientdata(client); |
378 | 379 | ||
379 | data->rate = HZ; /* 1 sec default */ | 380 | data->interval = HZ; /* 1 sec default */ |
380 | data->valid = 0; | 381 | data->valid = 0; |
381 | data->config = CFG_CR0076; | 382 | data->config = CFG_CR0076; |
382 | data->model = 0; | 383 | data->model = 0; |
@@ -410,7 +411,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev) | |||
410 | 411 | ||
411 | mutex_lock(&data->update_lock); | 412 | mutex_lock(&data->update_lock); |
412 | 413 | ||
413 | if (time_after(jiffies, data->last_updated + data->rate) || | 414 | if (time_after(jiffies, data->last_updated + data->interval) || |
414 | !data->valid) { | 415 | !data->valid) { |
415 | dev_dbg(&client->dev, "Updating lm95241 data.\n"); | 416 | dev_dbg(&client->dev, "Updating lm95241 data.\n"); |
416 | data->local_h = | 417 | data->local_h = |
diff --git a/drivers/hwmon/pkgtemp.c b/drivers/hwmon/pkgtemp.c index 74157fcda6ed..f11903936c8b 100644 --- a/drivers/hwmon/pkgtemp.c +++ b/drivers/hwmon/pkgtemp.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/list.h> | 33 | #include <linux/list.h> |
34 | #include <linux/platform_device.h> | 34 | #include <linux/platform_device.h> |
35 | #include <linux/cpu.h> | 35 | #include <linux/cpu.h> |
36 | #include <linux/pci.h> | ||
37 | #include <asm/msr.h> | 36 | #include <asm/msr.h> |
38 | #include <asm/processor.h> | 37 | #include <asm/processor.h> |
39 | 38 | ||
@@ -224,7 +223,7 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev) | |||
224 | 223 | ||
225 | err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group); | 224 | err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group); |
226 | if (err) | 225 | if (err) |
227 | goto exit_free; | 226 | goto exit_dev; |
228 | 227 | ||
229 | data->hwmon_dev = hwmon_device_register(&pdev->dev); | 228 | data->hwmon_dev = hwmon_device_register(&pdev->dev); |
230 | if (IS_ERR(data->hwmon_dev)) { | 229 | if (IS_ERR(data->hwmon_dev)) { |
@@ -238,6 +237,8 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev) | |||
238 | 237 | ||
239 | exit_class: | 238 | exit_class: |
240 | sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); | 239 | sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); |
240 | exit_dev: | ||
241 | device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); | ||
241 | exit_free: | 242 | exit_free: |
242 | kfree(data); | 243 | kfree(data); |
243 | exit: | 244 | exit: |
@@ -250,6 +251,7 @@ static int __devexit pkgtemp_remove(struct platform_device *pdev) | |||
250 | 251 | ||
251 | hwmon_device_unregister(data->hwmon_dev); | 252 | hwmon_device_unregister(data->hwmon_dev); |
252 | sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); | 253 | sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); |
254 | device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); | ||
253 | platform_set_drvdata(pdev, NULL); | 255 | platform_set_drvdata(pdev, NULL); |
254 | kfree(data); | 256 | kfree(data); |
255 | return 0; | 257 | return 0; |
@@ -281,9 +283,10 @@ static int __cpuinit pkgtemp_device_add(unsigned int cpu) | |||
281 | int err; | 283 | int err; |
282 | struct platform_device *pdev; | 284 | struct platform_device *pdev; |
283 | struct pdev_entry *pdev_entry; | 285 | struct pdev_entry *pdev_entry; |
284 | #ifdef CONFIG_SMP | ||
285 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 286 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
286 | #endif | 287 | |
288 | if (!cpu_has(c, X86_FEATURE_PTS)) | ||
289 | return 0; | ||
287 | 290 | ||
288 | mutex_lock(&pdev_list_mutex); | 291 | mutex_lock(&pdev_list_mutex); |
289 | 292 | ||
@@ -339,17 +342,18 @@ exit: | |||
339 | #ifdef CONFIG_HOTPLUG_CPU | 342 | #ifdef CONFIG_HOTPLUG_CPU |
340 | static void pkgtemp_device_remove(unsigned int cpu) | 343 | static void pkgtemp_device_remove(unsigned int cpu) |
341 | { | 344 | { |
342 | struct pdev_entry *p, *n; | 345 | struct pdev_entry *p; |
343 | unsigned int i; | 346 | unsigned int i; |
344 | int err; | 347 | int err; |
345 | 348 | ||
346 | mutex_lock(&pdev_list_mutex); | 349 | mutex_lock(&pdev_list_mutex); |
347 | list_for_each_entry_safe(p, n, &pdev_list, list) { | 350 | list_for_each_entry(p, &pdev_list, list) { |
348 | if (p->cpu != cpu) | 351 | if (p->cpu != cpu) |
349 | continue; | 352 | continue; |
350 | 353 | ||
351 | platform_device_unregister(p->pdev); | 354 | platform_device_unregister(p->pdev); |
352 | list_del(&p->list); | 355 | list_del(&p->list); |
356 | mutex_unlock(&pdev_list_mutex); | ||
353 | kfree(p); | 357 | kfree(p); |
354 | for_each_cpu(i, cpu_core_mask(cpu)) { | 358 | for_each_cpu(i, cpu_core_mask(cpu)) { |
355 | if (i != cpu) { | 359 | if (i != cpu) { |
@@ -358,7 +362,7 @@ static void pkgtemp_device_remove(unsigned int cpu) | |||
358 | break; | 362 | break; |
359 | } | 363 | } |
360 | } | 364 | } |
361 | break; | 365 | return; |
362 | } | 366 | } |
363 | mutex_unlock(&pdev_list_mutex); | 367 | mutex_unlock(&pdev_list_mutex); |
364 | } | 368 | } |
@@ -399,11 +403,6 @@ static int __init pkgtemp_init(void) | |||
399 | goto exit; | 403 | goto exit; |
400 | 404 | ||
401 | for_each_online_cpu(i) { | 405 | for_each_online_cpu(i) { |
402 | struct cpuinfo_x86 *c = &cpu_data(i); | ||
403 | |||
404 | if (!cpu_has(c, X86_FEATURE_PTS)) | ||
405 | continue; | ||
406 | |||
407 | err = pkgtemp_device_add(i); | 406 | err = pkgtemp_device_add(i); |
408 | if (err) | 407 | if (err) |
409 | goto exit_devices_unreg; | 408 | goto exit_devices_unreg; |
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index e96e69dd36fb..072c58008a63 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c | |||
@@ -127,6 +127,7 @@ superio_enter(int ioreg) | |||
127 | static inline void | 127 | static inline void |
128 | superio_exit(int ioreg) | 128 | superio_exit(int ioreg) |
129 | { | 129 | { |
130 | outb(0xaa, ioreg); | ||
130 | outb(0x02, ioreg); | 131 | outb(0x02, ioreg); |
131 | outb(0x02, ioreg + 1); | 132 | outb(0x02, ioreg + 1); |
132 | } | 133 | } |
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index 2222c87876b9..b8feac5f2ef4 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c | |||
@@ -357,9 +357,6 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) | |||
357 | 357 | ||
358 | dev->terminate = 0; | 358 | dev->terminate = 0; |
359 | 359 | ||
360 | /* write the data into mode register */ | ||
361 | davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag); | ||
362 | |||
363 | /* | 360 | /* |
364 | * First byte should be set here, not after interrupt, | 361 | * First byte should be set here, not after interrupt, |
365 | * because transmit-data-ready interrupt can come before | 362 | * because transmit-data-ready interrupt can come before |
@@ -371,6 +368,9 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) | |||
371 | dev->buf_len--; | 368 | dev->buf_len--; |
372 | } | 369 | } |
373 | 370 | ||
371 | /* write the data into mode register; start transmitting */ | ||
372 | davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag); | ||
373 | |||
374 | r = wait_for_completion_interruptible_timeout(&dev->cmd_complete, | 374 | r = wait_for_completion_interruptible_timeout(&dev->cmd_complete, |
375 | dev->adapter.timeout); | 375 | dev->adapter.timeout); |
376 | if (r == 0) { | 376 | if (r == 0) { |
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c index 0e9f85d0a835..56dbe54e8811 100644 --- a/drivers/i2c/busses/i2c-octeon.c +++ b/drivers/i2c/busses/i2c-octeon.c | |||
@@ -218,7 +218,7 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c) | |||
218 | return result; | 218 | return result; |
219 | } else if (result == 0) { | 219 | } else if (result == 0) { |
220 | dev_dbg(i2c->dev, "%s: timeout\n", __func__); | 220 | dev_dbg(i2c->dev, "%s: timeout\n", __func__); |
221 | result = -ETIMEDOUT; | 221 | return -ETIMEDOUT; |
222 | } | 222 | } |
223 | 223 | ||
224 | return 0; | 224 | return 0; |
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 7674efb55378..b33c78586bfc 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
@@ -680,6 +680,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) | |||
680 | 680 | ||
681 | if (r == 0) | 681 | if (r == 0) |
682 | r = num; | 682 | r = num; |
683 | |||
684 | omap_i2c_wait_for_bb(dev); | ||
683 | out: | 685 | out: |
684 | omap_i2c_idle(dev); | 686 | omap_i2c_idle(dev); |
685 | return r; | 687 | return r; |
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 72902e0bbfa7..bf831bf81587 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c | |||
@@ -662,8 +662,8 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got) | |||
662 | unsigned long sda_delay; | 662 | unsigned long sda_delay; |
663 | 663 | ||
664 | if (pdata->sda_delay) { | 664 | if (pdata->sda_delay) { |
665 | sda_delay = (freq / 1000) * pdata->sda_delay; | 665 | sda_delay = clkin * pdata->sda_delay; |
666 | sda_delay /= 1000000; | 666 | sda_delay = DIV_ROUND_UP(sda_delay, 1000000); |
667 | sda_delay = DIV_ROUND_UP(sda_delay, 5); | 667 | sda_delay = DIV_ROUND_UP(sda_delay, 5); |
668 | if (sda_delay > 3) | 668 | if (sda_delay > 3) |
669 | sda_delay = 3; | 669 | sda_delay = 3; |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 4c3d1bfec0c5..068cef0a987a 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -1448,19 +1448,13 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d, | |||
1448 | if (hwif == NULL) | 1448 | if (hwif == NULL) |
1449 | continue; | 1449 | continue; |
1450 | 1450 | ||
1451 | if (hwif->present) | ||
1452 | hwif_register_devices(hwif); | ||
1453 | } | ||
1454 | |||
1455 | ide_host_for_each_port(i, hwif, host) { | ||
1456 | if (hwif == NULL) | ||
1457 | continue; | ||
1458 | |||
1459 | ide_sysfs_register_port(hwif); | 1451 | ide_sysfs_register_port(hwif); |
1460 | ide_proc_register_port(hwif); | 1452 | ide_proc_register_port(hwif); |
1461 | 1453 | ||
1462 | if (hwif->present) | 1454 | if (hwif->present) { |
1463 | ide_proc_port_register_devices(hwif); | 1455 | ide_proc_port_register_devices(hwif); |
1456 | hwif_register_devices(hwif); | ||
1457 | } | ||
1464 | } | 1458 | } |
1465 | 1459 | ||
1466 | return j ? 0 : -1; | 1460 | return j ? 0 : -1; |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index a10152bb1427..0906fc5b69b9 100755..100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
@@ -83,7 +83,7 @@ static unsigned int mwait_substates; | |||
83 | /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ | 83 | /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ |
84 | static unsigned int lapic_timer_reliable_states; | 84 | static unsigned int lapic_timer_reliable_states; |
85 | 85 | ||
86 | static struct cpuidle_device *intel_idle_cpuidle_devices; | 86 | static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; |
87 | static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); | 87 | static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); |
88 | 88 | ||
89 | static struct cpuidle_state *cpuidle_state_table; | 89 | static struct cpuidle_state *cpuidle_state_table; |
@@ -108,7 +108,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
108 | .name = "NHM-C3", | 108 | .name = "NHM-C3", |
109 | .desc = "MWAIT 0x10", | 109 | .desc = "MWAIT 0x10", |
110 | .driver_data = (void *) 0x10, | 110 | .driver_data = (void *) 0x10, |
111 | .flags = CPUIDLE_FLAG_TIME_VALID, | 111 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
112 | .exit_latency = 20, | 112 | .exit_latency = 20, |
113 | .power_usage = 500, | 113 | .power_usage = 500, |
114 | .target_residency = 80, | 114 | .target_residency = 80, |
@@ -117,7 +117,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
117 | .name = "NHM-C6", | 117 | .name = "NHM-C6", |
118 | .desc = "MWAIT 0x20", | 118 | .desc = "MWAIT 0x20", |
119 | .driver_data = (void *) 0x20, | 119 | .driver_data = (void *) 0x20, |
120 | .flags = CPUIDLE_FLAG_TIME_VALID, | 120 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
121 | .exit_latency = 200, | 121 | .exit_latency = 200, |
122 | .power_usage = 350, | 122 | .power_usage = 350, |
123 | .target_residency = 800, | 123 | .target_residency = 800, |
@@ -149,7 +149,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
149 | .name = "ATM-C4", | 149 | .name = "ATM-C4", |
150 | .desc = "MWAIT 0x30", | 150 | .desc = "MWAIT 0x30", |
151 | .driver_data = (void *) 0x30, | 151 | .driver_data = (void *) 0x30, |
152 | .flags = CPUIDLE_FLAG_TIME_VALID, | 152 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
153 | .exit_latency = 100, | 153 | .exit_latency = 100, |
154 | .power_usage = 250, | 154 | .power_usage = 250, |
155 | .target_residency = 400, | 155 | .target_residency = 400, |
@@ -159,7 +159,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
159 | .name = "ATM-C6", | 159 | .name = "ATM-C6", |
160 | .desc = "MWAIT 0x40", | 160 | .desc = "MWAIT 0x40", |
161 | .driver_data = (void *) 0x40, | 161 | .driver_data = (void *) 0x40, |
162 | .flags = CPUIDLE_FLAG_TIME_VALID, | 162 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
163 | .exit_latency = 200, | 163 | .exit_latency = 200, |
164 | .power_usage = 150, | 164 | .power_usage = 150, |
165 | .target_residency = 800, | 165 | .target_residency = 800, |
@@ -185,6 +185,16 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) | |||
185 | 185 | ||
186 | local_irq_disable(); | 186 | local_irq_disable(); |
187 | 187 | ||
188 | /* | ||
189 | * If the state flag indicates that the TLB will be flushed or if this | ||
190 | * is the deepest c-state supported, do a voluntary leave mm to avoid | ||
191 | * costly and mostly unnecessary wakeups for flushing the user TLB's | ||
192 | * associated with the active mm. | ||
193 | */ | ||
194 | if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED || | ||
195 | (&dev->states[dev->state_count - 1] == state)) | ||
196 | leave_mm(cpu); | ||
197 | |||
188 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) | 198 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) |
189 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); | 199 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); |
190 | 200 | ||
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h index 8f0caf7d4482..78fbe9ffe7f0 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.h +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h | |||
@@ -53,7 +53,7 @@ | |||
53 | #define T3_MAX_PBL_SIZE 256 | 53 | #define T3_MAX_PBL_SIZE 256 |
54 | #define T3_MAX_RQ_SIZE 1024 | 54 | #define T3_MAX_RQ_SIZE 1024 |
55 | #define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1) | 55 | #define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1) |
56 | #define T3_MAX_CQ_DEPTH 262144 | 56 | #define T3_MAX_CQ_DEPTH 65536 |
57 | #define T3_MAX_NUM_STAG (1<<15) | 57 | #define T3_MAX_NUM_STAG (1<<15) |
58 | #define T3_MAX_MR_SIZE 0x100000000ULL | 58 | #define T3_MAX_MR_SIZE 0x100000000ULL |
59 | #define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ | 59 | #define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index d88077a21994..13c88871dc3b 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -463,7 +463,8 @@ static int send_connect(struct iwch_ep *ep) | |||
463 | V_MSS_IDX(mtu_idx) | | 463 | V_MSS_IDX(mtu_idx) | |
464 | V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); | 464 | V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); |
465 | opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); | 465 | opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); |
466 | opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); | 466 | opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) | |
467 | V_CONG_CONTROL_FLAVOR(cong_flavor); | ||
467 | skb->priority = CPL_PRIORITY_SETUP; | 468 | skb->priority = CPL_PRIORITY_SETUP; |
468 | set_arp_failure_handler(skb, act_open_req_arp_failure); | 469 | set_arp_failure_handler(skb, act_open_req_arp_failure); |
469 | 470 | ||
@@ -1280,7 +1281,8 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb) | |||
1280 | V_MSS_IDX(mtu_idx) | | 1281 | V_MSS_IDX(mtu_idx) | |
1281 | V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); | 1282 | V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); |
1282 | opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); | 1283 | opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); |
1283 | opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); | 1284 | opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) | |
1285 | V_CONG_CONTROL_FLAVOR(cong_flavor); | ||
1284 | 1286 | ||
1285 | rpl = cplhdr(skb); | 1287 | rpl = cplhdr(skb); |
1286 | rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | 1288 | rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 443cea55daac..61e0efd4ccfb 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -502,7 +502,9 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
502 | static void nes_retrans_expired(struct nes_cm_node *cm_node) | 502 | static void nes_retrans_expired(struct nes_cm_node *cm_node) |
503 | { | 503 | { |
504 | struct iw_cm_id *cm_id = cm_node->cm_id; | 504 | struct iw_cm_id *cm_id = cm_node->cm_id; |
505 | switch (cm_node->state) { | 505 | enum nes_cm_node_state state = cm_node->state; |
506 | cm_node->state = NES_CM_STATE_CLOSED; | ||
507 | switch (state) { | ||
506 | case NES_CM_STATE_SYN_RCVD: | 508 | case NES_CM_STATE_SYN_RCVD: |
507 | case NES_CM_STATE_CLOSING: | 509 | case NES_CM_STATE_CLOSING: |
508 | rem_ref_cm_node(cm_node->cm_core, cm_node); | 510 | rem_ref_cm_node(cm_node->cm_core, cm_node); |
@@ -511,7 +513,6 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node) | |||
511 | case NES_CM_STATE_FIN_WAIT1: | 513 | case NES_CM_STATE_FIN_WAIT1: |
512 | if (cm_node->cm_id) | 514 | if (cm_node->cm_id) |
513 | cm_id->rem_ref(cm_id); | 515 | cm_id->rem_ref(cm_id); |
514 | cm_node->state = NES_CM_STATE_CLOSED; | ||
515 | send_reset(cm_node, NULL); | 516 | send_reset(cm_node, NULL); |
516 | break; | 517 | break; |
517 | default: | 518 | default: |
@@ -1439,9 +1440,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1439 | break; | 1440 | break; |
1440 | case NES_CM_STATE_MPAREQ_RCVD: | 1441 | case NES_CM_STATE_MPAREQ_RCVD: |
1441 | passive_state = atomic_add_return(1, &cm_node->passive_state); | 1442 | passive_state = atomic_add_return(1, &cm_node->passive_state); |
1442 | if (passive_state == NES_SEND_RESET_EVENT) | ||
1443 | create_event(cm_node, NES_CM_EVENT_RESET); | ||
1444 | cm_node->state = NES_CM_STATE_CLOSED; | ||
1445 | dev_kfree_skb_any(skb); | 1443 | dev_kfree_skb_any(skb); |
1446 | break; | 1444 | break; |
1447 | case NES_CM_STATE_ESTABLISHED: | 1445 | case NES_CM_STATE_ESTABLISHED: |
@@ -1456,6 +1454,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1456 | case NES_CM_STATE_CLOSED: | 1454 | case NES_CM_STATE_CLOSED: |
1457 | drop_packet(skb); | 1455 | drop_packet(skb); |
1458 | break; | 1456 | break; |
1457 | case NES_CM_STATE_FIN_WAIT2: | ||
1459 | case NES_CM_STATE_FIN_WAIT1: | 1458 | case NES_CM_STATE_FIN_WAIT1: |
1460 | case NES_CM_STATE_LAST_ACK: | 1459 | case NES_CM_STATE_LAST_ACK: |
1461 | cm_node->cm_id->rem_ref(cm_node->cm_id); | 1460 | cm_node->cm_id->rem_ref(cm_node->cm_id); |
@@ -2777,6 +2776,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2777 | return -EINVAL; | 2776 | return -EINVAL; |
2778 | } | 2777 | } |
2779 | 2778 | ||
2779 | passive_state = atomic_add_return(1, &cm_node->passive_state); | ||
2780 | if (passive_state == NES_SEND_RESET_EVENT) { | ||
2781 | rem_ref_cm_node(cm_node->cm_core, cm_node); | ||
2782 | return -ECONNRESET; | ||
2783 | } | ||
2784 | |||
2780 | /* associate the node with the QP */ | 2785 | /* associate the node with the QP */ |
2781 | nesqp->cm_node = (void *)cm_node; | 2786 | nesqp->cm_node = (void *)cm_node; |
2782 | cm_node->nesqp = nesqp; | 2787 | cm_node->nesqp = nesqp; |
@@ -2979,9 +2984,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2979 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " | 2984 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " |
2980 | "ret=%d\n", __func__, __LINE__, ret); | 2985 | "ret=%d\n", __func__, __LINE__, ret); |
2981 | 2986 | ||
2982 | passive_state = atomic_add_return(1, &cm_node->passive_state); | ||
2983 | if (passive_state == NES_SEND_RESET_EVENT) | ||
2984 | create_event(cm_node, NES_CM_EVENT_RESET); | ||
2985 | return 0; | 2987 | return 0; |
2986 | } | 2988 | } |
2987 | 2989 | ||
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index f8233c851c69..1980a461c499 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
@@ -3468,6 +3468,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
3468 | return; /* Ignore it, wait for close complete */ | 3468 | return; /* Ignore it, wait for close complete */ |
3469 | 3469 | ||
3470 | if (atomic_inc_return(&nesqp->close_timer_started) == 1) { | 3470 | if (atomic_inc_return(&nesqp->close_timer_started) == 1) { |
3471 | if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) && | ||
3472 | (nesqp->ibqp_state == IB_QPS_RTS) && | ||
3473 | ((nesadapter->eeprom_version >> 16) != NES_A0)) { | ||
3474 | spin_lock_irqsave(&nesqp->lock, flags); | ||
3475 | nesqp->hw_iwarp_state = iwarp_state; | ||
3476 | nesqp->hw_tcp_state = tcp_state; | ||
3477 | nesqp->last_aeq = async_event_id; | ||
3478 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING; | ||
3479 | nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING; | ||
3480 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
3481 | nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0); | ||
3482 | nes_cm_disconn(nesqp); | ||
3483 | } | ||
3471 | nesqp->cm_id->add_ref(nesqp->cm_id); | 3484 | nesqp->cm_id->add_ref(nesqp->cm_id); |
3472 | schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp, | 3485 | schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp, |
3473 | NES_TIMER_TYPE_CLOSE, 1, 0); | 3486 | NES_TIMER_TYPE_CLOSE, 1, 0); |
@@ -3477,7 +3490,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
3477 | nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), | 3490 | nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), |
3478 | async_event_id, nesqp->last_aeq, tcp_state); | 3491 | async_event_id, nesqp->last_aeq, tcp_state); |
3479 | } | 3492 | } |
3480 | |||
3481 | break; | 3493 | break; |
3482 | case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: | 3494 | case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: |
3483 | if (nesqp->term_flags) { | 3495 | if (nesqp->term_flags) { |
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index aa9183db32b1..1204c3432b63 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h | |||
@@ -45,6 +45,7 @@ | |||
45 | #define NES_PHY_TYPE_KR 9 | 45 | #define NES_PHY_TYPE_KR 9 |
46 | 46 | ||
47 | #define NES_MULTICAST_PF_MAX 8 | 47 | #define NES_MULTICAST_PF_MAX 8 |
48 | #define NES_A0 3 | ||
48 | 49 | ||
49 | enum pci_regs { | 50 | enum pci_regs { |
50 | NES_INT_STAT = 0x0000, | 51 | NES_INT_STAT = 0x0000, |
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 6dfdd49cdbcf..10560c796fd6 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c | |||
@@ -1446,14 +1446,14 @@ static int nes_netdev_set_pauseparam(struct net_device *netdev, | |||
1446 | NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); | 1446 | NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); |
1447 | u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; | 1447 | u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; |
1448 | nes_write_indexed(nesdev, | 1448 | nes_write_indexed(nesdev, |
1449 | NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp); | 1449 | NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp); |
1450 | nesdev->disable_tx_flow_control = 0; | 1450 | nesdev->disable_tx_flow_control = 0; |
1451 | } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) { | 1451 | } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) { |
1452 | u32temp = nes_read_indexed(nesdev, | 1452 | u32temp = nes_read_indexed(nesdev, |
1453 | NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); | 1453 | NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); |
1454 | u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; | 1454 | u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; |
1455 | nes_write_indexed(nesdev, | 1455 | nes_write_indexed(nesdev, |
1456 | NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp); | 1456 | NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp); |
1457 | nesdev->disable_tx_flow_control = 1; | 1457 | nesdev->disable_tx_flow_control = 1; |
1458 | } | 1458 | } |
1459 | if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) { | 1459 | if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) { |
diff --git a/drivers/input/input.c b/drivers/input/input.c index a9b025f4147a..ab6982056518 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
@@ -1599,11 +1599,14 @@ EXPORT_SYMBOL(input_free_device); | |||
1599 | * @dev: input device supporting MT events and finger tracking | 1599 | * @dev: input device supporting MT events and finger tracking |
1600 | * @num_slots: number of slots used by the device | 1600 | * @num_slots: number of slots used by the device |
1601 | * | 1601 | * |
1602 | * This function allocates all necessary memory for MT slot handling | 1602 | * This function allocates all necessary memory for MT slot handling in the |
1603 | * in the input device, and adds ABS_MT_SLOT to the device capabilities. | 1603 | * input device, and adds ABS_MT_SLOT to the device capabilities. All slots |
1604 | * are initially marked as unused iby setting ABS_MT_TRACKING_ID to -1. | ||
1604 | */ | 1605 | */ |
1605 | int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots) | 1606 | int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots) |
1606 | { | 1607 | { |
1608 | int i; | ||
1609 | |||
1607 | if (!num_slots) | 1610 | if (!num_slots) |
1608 | return 0; | 1611 | return 0; |
1609 | 1612 | ||
@@ -1614,6 +1617,10 @@ int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots) | |||
1614 | dev->mtsize = num_slots; | 1617 | dev->mtsize = num_slots; |
1615 | input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0); | 1618 | input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0); |
1616 | 1619 | ||
1620 | /* Mark slots as 'unused' */ | ||
1621 | for (i = 0; i < num_slots; i++) | ||
1622 | dev->mt[i].abs[ABS_MT_TRACKING_ID - ABS_MT_FIRST] = -1; | ||
1623 | |||
1617 | return 0; | 1624 | return 0; |
1618 | } | 1625 | } |
1619 | EXPORT_SYMBOL(input_mt_create_slots); | 1626 | EXPORT_SYMBOL(input_mt_create_slots); |
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index ea67c49146a3..b95231763911 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c | |||
@@ -337,10 +337,14 @@ static void report_finger_data(struct input_dev *input, | |||
337 | const struct bcm5974_config *cfg, | 337 | const struct bcm5974_config *cfg, |
338 | const struct tp_finger *f) | 338 | const struct tp_finger *f) |
339 | { | 339 | { |
340 | input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major)); | 340 | input_report_abs(input, ABS_MT_TOUCH_MAJOR, |
341 | input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor)); | 341 | raw2int(f->force_major) << 1); |
342 | input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major)); | 342 | input_report_abs(input, ABS_MT_TOUCH_MINOR, |
343 | input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor)); | 343 | raw2int(f->force_minor) << 1); |
344 | input_report_abs(input, ABS_MT_WIDTH_MAJOR, | ||
345 | raw2int(f->size_major) << 1); | ||
346 | input_report_abs(input, ABS_MT_WIDTH_MINOR, | ||
347 | raw2int(f->size_minor) << 1); | ||
344 | input_report_abs(input, ABS_MT_ORIENTATION, | 348 | input_report_abs(input, ABS_MT_ORIENTATION, |
345 | MAX_FINGER_ORIENTATION - raw2int(f->orientation)); | 349 | MAX_FINGER_ORIENTATION - raw2int(f->orientation)); |
346 | input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x)); | 350 | input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x)); |
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 46e4ba0b9246..f58513160480 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c | |||
@@ -1485,8 +1485,8 @@ static int __init i8042_init(void) | |||
1485 | 1485 | ||
1486 | static void __exit i8042_exit(void) | 1486 | static void __exit i8042_exit(void) |
1487 | { | 1487 | { |
1488 | platform_driver_unregister(&i8042_driver); | ||
1489 | platform_device_unregister(i8042_platform_device); | 1488 | platform_device_unregister(i8042_platform_device); |
1489 | platform_driver_unregister(&i8042_driver); | ||
1490 | i8042_platform_exit(); | 1490 | i8042_platform_exit(); |
1491 | 1491 | ||
1492 | panic_blink = NULL; | 1492 | panic_blink = NULL; |
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 40d77ba8fdc1..6e29badb969e 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c | |||
@@ -243,10 +243,10 @@ static int wacom_graphire_irq(struct wacom_wac *wacom) | |||
243 | if (features->type == WACOM_G4 || | 243 | if (features->type == WACOM_G4 || |
244 | features->type == WACOM_MO) { | 244 | features->type == WACOM_MO) { |
245 | input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f); | 245 | input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f); |
246 | rw = (signed)(data[7] & 0x04) - (data[7] & 0x03); | 246 | rw = (data[7] & 0x04) - (data[7] & 0x03); |
247 | } else { | 247 | } else { |
248 | input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f); | 248 | input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f); |
249 | rw = -(signed)data[6]; | 249 | rw = -(signed char)data[6]; |
250 | } | 250 | } |
251 | input_report_rel(input, REL_WHEEL, rw); | 251 | input_report_rel(input, REL_WHEEL, rw); |
252 | } | 252 | } |
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c index 74dce4ba0262..350eb34f049c 100644 --- a/drivers/leds/leds-ns2.c +++ b/drivers/leds/leds-ns2.c | |||
@@ -81,7 +81,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat, | |||
81 | int cmd_level; | 81 | int cmd_level; |
82 | int slow_level; | 82 | int slow_level; |
83 | 83 | ||
84 | read_lock(&led_dat->rw_lock); | 84 | read_lock_irq(&led_dat->rw_lock); |
85 | 85 | ||
86 | cmd_level = gpio_get_value(led_dat->cmd); | 86 | cmd_level = gpio_get_value(led_dat->cmd); |
87 | slow_level = gpio_get_value(led_dat->slow); | 87 | slow_level = gpio_get_value(led_dat->slow); |
@@ -95,7 +95,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat, | |||
95 | } | 95 | } |
96 | } | 96 | } |
97 | 97 | ||
98 | read_unlock(&led_dat->rw_lock); | 98 | read_unlock_irq(&led_dat->rw_lock); |
99 | 99 | ||
100 | return ret; | 100 | return ret; |
101 | } | 101 | } |
@@ -104,8 +104,9 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat, | |||
104 | enum ns2_led_modes mode) | 104 | enum ns2_led_modes mode) |
105 | { | 105 | { |
106 | int i; | 106 | int i; |
107 | unsigned long flags; | ||
107 | 108 | ||
108 | write_lock(&led_dat->rw_lock); | 109 | write_lock_irqsave(&led_dat->rw_lock, flags); |
109 | 110 | ||
110 | for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) { | 111 | for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) { |
111 | if (mode == ns2_led_modval[i].mode) { | 112 | if (mode == ns2_led_modval[i].mode) { |
@@ -116,7 +117,7 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat, | |||
116 | } | 117 | } |
117 | } | 118 | } |
118 | 119 | ||
119 | write_unlock(&led_dat->rw_lock); | 120 | write_unlock_irqrestore(&led_dat->rw_lock, flags); |
120 | } | 121 | } |
121 | 122 | ||
122 | static void ns2_led_set(struct led_classdev *led_cdev, | 123 | static void ns2_led_set(struct led_classdev *led_cdev, |
diff --git a/drivers/md/.gitignore b/drivers/md/.gitignore deleted file mode 100644 index a7afec6b19c6..000000000000 --- a/drivers/md/.gitignore +++ /dev/null | |||
@@ -1,4 +0,0 @@ | |||
1 | mktables | ||
2 | raid6altivec*.c | ||
3 | raid6int*.c | ||
4 | raid6tables.c | ||
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 1ba1e122e948..ed4900ade93a 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -1542,8 +1542,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) | |||
1542 | atomic_read(&bitmap->mddev->recovery_active) == 0); | 1542 | atomic_read(&bitmap->mddev->recovery_active) == 0); |
1543 | 1543 | ||
1544 | bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync; | 1544 | bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync; |
1545 | if (bitmap->mddev->persistent) | 1545 | set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags); |
1546 | set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags); | ||
1547 | sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1); | 1546 | sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1); |
1548 | s = 0; | 1547 | s = 0; |
1549 | while (s < sector && s < bitmap->mddev->resync_max_sectors) { | 1548 | while (s < sector && s < bitmap->mddev->resync_max_sectors) { |
diff --git a/drivers/md/md.c b/drivers/md/md.c index c148b6302154..f20d13e717d5 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -1643,7 +1643,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1643 | bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; | 1643 | bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; |
1644 | if (rdev->sb_size & bmask) | 1644 | if (rdev->sb_size & bmask) |
1645 | rdev->sb_size = (rdev->sb_size | bmask) + 1; | 1645 | rdev->sb_size = (rdev->sb_size | bmask) + 1; |
1646 | } | 1646 | } else |
1647 | max_dev = le32_to_cpu(sb->max_dev); | ||
1648 | |||
1647 | for (i=0; i<max_dev;i++) | 1649 | for (i=0; i<max_dev;i++) |
1648 | sb->dev_roles[i] = cpu_to_le16(0xfffe); | 1650 | sb->dev_roles[i] = cpu_to_le16(0xfffe); |
1649 | 1651 | ||
@@ -2167,9 +2169,9 @@ repeat: | |||
2167 | rdev->recovery_offset = mddev->curr_resync_completed; | 2169 | rdev->recovery_offset = mddev->curr_resync_completed; |
2168 | 2170 | ||
2169 | } | 2171 | } |
2170 | if (mddev->external || !mddev->persistent) { | 2172 | if (!mddev->persistent) { |
2171 | clear_bit(MD_CHANGE_DEVS, &mddev->flags); | ||
2172 | clear_bit(MD_CHANGE_CLEAN, &mddev->flags); | 2173 | clear_bit(MD_CHANGE_CLEAN, &mddev->flags); |
2174 | clear_bit(MD_CHANGE_DEVS, &mddev->flags); | ||
2173 | wake_up(&mddev->sb_wait); | 2175 | wake_up(&mddev->sb_wait); |
2174 | return; | 2176 | return; |
2175 | } | 2177 | } |
@@ -2178,7 +2180,6 @@ repeat: | |||
2178 | 2180 | ||
2179 | mddev->utime = get_seconds(); | 2181 | mddev->utime = get_seconds(); |
2180 | 2182 | ||
2181 | set_bit(MD_CHANGE_PENDING, &mddev->flags); | ||
2182 | if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) | 2183 | if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) |
2183 | force_change = 1; | 2184 | force_change = 1; |
2184 | if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) | 2185 | if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) |
@@ -3371,7 +3372,7 @@ array_state_show(mddev_t *mddev, char *page) | |||
3371 | case 0: | 3372 | case 0: |
3372 | if (mddev->in_sync) | 3373 | if (mddev->in_sync) |
3373 | st = clean; | 3374 | st = clean; |
3374 | else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) | 3375 | else if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) |
3375 | st = write_pending; | 3376 | st = write_pending; |
3376 | else if (mddev->safemode) | 3377 | else if (mddev->safemode) |
3377 | st = active_idle; | 3378 | st = active_idle; |
@@ -3452,9 +3453,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len) | |||
3452 | mddev->in_sync = 1; | 3453 | mddev->in_sync = 1; |
3453 | if (mddev->safemode == 1) | 3454 | if (mddev->safemode == 1) |
3454 | mddev->safemode = 0; | 3455 | mddev->safemode = 0; |
3455 | if (mddev->persistent) | 3456 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); |
3456 | set_bit(MD_CHANGE_CLEAN, | ||
3457 | &mddev->flags); | ||
3458 | } | 3457 | } |
3459 | err = 0; | 3458 | err = 0; |
3460 | } else | 3459 | } else |
@@ -3466,8 +3465,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len) | |||
3466 | case active: | 3465 | case active: |
3467 | if (mddev->pers) { | 3466 | if (mddev->pers) { |
3468 | restart_array(mddev); | 3467 | restart_array(mddev); |
3469 | if (mddev->external) | 3468 | clear_bit(MD_CHANGE_PENDING, &mddev->flags); |
3470 | clear_bit(MD_CHANGE_CLEAN, &mddev->flags); | ||
3471 | wake_up(&mddev->sb_wait); | 3469 | wake_up(&mddev->sb_wait); |
3472 | err = 0; | 3470 | err = 0; |
3473 | } else { | 3471 | } else { |
@@ -6572,6 +6570,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi) | |||
6572 | if (mddev->in_sync) { | 6570 | if (mddev->in_sync) { |
6573 | mddev->in_sync = 0; | 6571 | mddev->in_sync = 0; |
6574 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); | 6572 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); |
6573 | set_bit(MD_CHANGE_PENDING, &mddev->flags); | ||
6575 | md_wakeup_thread(mddev->thread); | 6574 | md_wakeup_thread(mddev->thread); |
6576 | did_change = 1; | 6575 | did_change = 1; |
6577 | } | 6576 | } |
@@ -6580,7 +6579,6 @@ void md_write_start(mddev_t *mddev, struct bio *bi) | |||
6580 | if (did_change) | 6579 | if (did_change) |
6581 | sysfs_notify_dirent_safe(mddev->sysfs_state); | 6580 | sysfs_notify_dirent_safe(mddev->sysfs_state); |
6582 | wait_event(mddev->sb_wait, | 6581 | wait_event(mddev->sb_wait, |
6583 | !test_bit(MD_CHANGE_CLEAN, &mddev->flags) && | ||
6584 | !test_bit(MD_CHANGE_PENDING, &mddev->flags)); | 6582 | !test_bit(MD_CHANGE_PENDING, &mddev->flags)); |
6585 | } | 6583 | } |
6586 | 6584 | ||
@@ -6616,6 +6614,7 @@ int md_allow_write(mddev_t *mddev) | |||
6616 | if (mddev->in_sync) { | 6614 | if (mddev->in_sync) { |
6617 | mddev->in_sync = 0; | 6615 | mddev->in_sync = 0; |
6618 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); | 6616 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); |
6617 | set_bit(MD_CHANGE_PENDING, &mddev->flags); | ||
6619 | if (mddev->safemode_delay && | 6618 | if (mddev->safemode_delay && |
6620 | mddev->safemode == 0) | 6619 | mddev->safemode == 0) |
6621 | mddev->safemode = 1; | 6620 | mddev->safemode = 1; |
@@ -6625,7 +6624,7 @@ int md_allow_write(mddev_t *mddev) | |||
6625 | } else | 6624 | } else |
6626 | spin_unlock_irq(&mddev->write_lock); | 6625 | spin_unlock_irq(&mddev->write_lock); |
6627 | 6626 | ||
6628 | if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) | 6627 | if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) |
6629 | return -EAGAIN; | 6628 | return -EAGAIN; |
6630 | else | 6629 | else |
6631 | return 0; | 6630 | return 0; |
@@ -6823,8 +6822,7 @@ void md_do_sync(mddev_t *mddev) | |||
6823 | atomic_read(&mddev->recovery_active) == 0); | 6822 | atomic_read(&mddev->recovery_active) == 0); |
6824 | mddev->curr_resync_completed = | 6823 | mddev->curr_resync_completed = |
6825 | mddev->curr_resync; | 6824 | mddev->curr_resync; |
6826 | if (mddev->persistent) | 6825 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); |
6827 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); | ||
6828 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); | 6826 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); |
6829 | } | 6827 | } |
6830 | 6828 | ||
@@ -7073,7 +7071,7 @@ void md_check_recovery(mddev_t *mddev) | |||
7073 | if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) | 7071 | if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) |
7074 | return; | 7072 | return; |
7075 | if ( ! ( | 7073 | if ( ! ( |
7076 | (mddev->flags && !mddev->external) || | 7074 | (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) || |
7077 | test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || | 7075 | test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || |
7078 | test_bit(MD_RECOVERY_DONE, &mddev->recovery) || | 7076 | test_bit(MD_RECOVERY_DONE, &mddev->recovery) || |
7079 | (mddev->external == 0 && mddev->safemode == 1) || | 7077 | (mddev->external == 0 && mddev->safemode == 1) || |
@@ -7103,8 +7101,7 @@ void md_check_recovery(mddev_t *mddev) | |||
7103 | mddev->recovery_cp == MaxSector) { | 7101 | mddev->recovery_cp == MaxSector) { |
7104 | mddev->in_sync = 1; | 7102 | mddev->in_sync = 1; |
7105 | did_change = 1; | 7103 | did_change = 1; |
7106 | if (mddev->persistent) | 7104 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); |
7107 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); | ||
7108 | } | 7105 | } |
7109 | if (mddev->safemode == 1) | 7106 | if (mddev->safemode == 1) |
7110 | mddev->safemode = 0; | 7107 | mddev->safemode = 0; |
diff --git a/drivers/md/md.h b/drivers/md/md.h index a953fe2808ae..3931299788dc 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -140,7 +140,7 @@ struct mddev_s | |||
140 | unsigned long flags; | 140 | unsigned long flags; |
141 | #define MD_CHANGE_DEVS 0 /* Some device status has changed */ | 141 | #define MD_CHANGE_DEVS 0 /* Some device status has changed */ |
142 | #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ | 142 | #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ |
143 | #define MD_CHANGE_PENDING 2 /* superblock update in progress */ | 143 | #define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */ |
144 | 144 | ||
145 | int suspended; | 145 | int suspended; |
146 | atomic_t active_io; | 146 | atomic_t active_io; |
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c index 04028a9ee082..428377a5a6f5 100644 --- a/drivers/mfd/max8925-core.c +++ b/drivers/mfd/max8925-core.c | |||
@@ -429,24 +429,25 @@ static void max8925_irq_sync_unlock(unsigned int irq) | |||
429 | irq_tsc = cache_tsc; | 429 | irq_tsc = cache_tsc; |
430 | for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { | 430 | for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { |
431 | irq_data = &max8925_irqs[i]; | 431 | irq_data = &max8925_irqs[i]; |
432 | /* 1 -- disable, 0 -- enable */ | ||
432 | switch (irq_data->mask_reg) { | 433 | switch (irq_data->mask_reg) { |
433 | case MAX8925_CHG_IRQ1_MASK: | 434 | case MAX8925_CHG_IRQ1_MASK: |
434 | irq_chg[0] &= irq_data->enable; | 435 | irq_chg[0] &= ~irq_data->enable; |
435 | break; | 436 | break; |
436 | case MAX8925_CHG_IRQ2_MASK: | 437 | case MAX8925_CHG_IRQ2_MASK: |
437 | irq_chg[1] &= irq_data->enable; | 438 | irq_chg[1] &= ~irq_data->enable; |
438 | break; | 439 | break; |
439 | case MAX8925_ON_OFF_IRQ1_MASK: | 440 | case MAX8925_ON_OFF_IRQ1_MASK: |
440 | irq_on[0] &= irq_data->enable; | 441 | irq_on[0] &= ~irq_data->enable; |
441 | break; | 442 | break; |
442 | case MAX8925_ON_OFF_IRQ2_MASK: | 443 | case MAX8925_ON_OFF_IRQ2_MASK: |
443 | irq_on[1] &= irq_data->enable; | 444 | irq_on[1] &= ~irq_data->enable; |
444 | break; | 445 | break; |
445 | case MAX8925_RTC_IRQ_MASK: | 446 | case MAX8925_RTC_IRQ_MASK: |
446 | irq_rtc &= irq_data->enable; | 447 | irq_rtc &= ~irq_data->enable; |
447 | break; | 448 | break; |
448 | case MAX8925_TSC_IRQ_MASK: | 449 | case MAX8925_TSC_IRQ_MASK: |
449 | irq_tsc &= irq_data->enable; | 450 | irq_tsc &= ~irq_data->enable; |
450 | break; | 451 | break; |
451 | default: | 452 | default: |
452 | dev_err(chip->dev, "wrong IRQ\n"); | 453 | dev_err(chip->dev, "wrong IRQ\n"); |
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c index 7dabe4dbd373..294183b6260b 100644 --- a/drivers/mfd/wm831x-irq.c +++ b/drivers/mfd/wm831x-irq.c | |||
@@ -394,8 +394,13 @@ static int wm831x_irq_set_type(unsigned int irq, unsigned int type) | |||
394 | 394 | ||
395 | irq = irq - wm831x->irq_base; | 395 | irq = irq - wm831x->irq_base; |
396 | 396 | ||
397 | if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) | 397 | if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) { |
398 | return -EINVAL; | 398 | /* Ignore internal-only IRQs */ |
399 | if (irq >= 0 && irq < WM831X_NUM_IRQS) | ||
400 | return 0; | ||
401 | else | ||
402 | return -EINVAL; | ||
403 | } | ||
399 | 404 | ||
400 | switch (type) { | 405 | switch (type) { |
401 | case IRQ_TYPE_EDGE_BOTH: | 406 | case IRQ_TYPE_EDGE_BOTH: |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 0b591b658243..b74331260744 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -368,7 +368,7 @@ config VMWARE_BALLOON | |||
368 | If unsure, say N. | 368 | If unsure, say N. |
369 | 369 | ||
370 | To compile this driver as a module, choose M here: the | 370 | To compile this driver as a module, choose M here: the |
371 | module will be called vmware_balloon. | 371 | module will be called vmw_balloon. |
372 | 372 | ||
373 | config ARM_CHARLCD | 373 | config ARM_CHARLCD |
374 | bool "ARM Ltd. Character LCD Driver" | 374 | bool "ARM Ltd. Character LCD Driver" |
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 255a80dc9d73..42eab95cde2a 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile | |||
@@ -33,5 +33,5 @@ obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/ | |||
33 | obj-$(CONFIG_HMC6352) += hmc6352.o | 33 | obj-$(CONFIG_HMC6352) += hmc6352.o |
34 | obj-y += eeprom/ | 34 | obj-y += eeprom/ |
35 | obj-y += cb710/ | 35 | obj-y += cb710/ |
36 | obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o | 36 | obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o |
37 | obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o | 37 | obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o |
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmw_balloon.c index 2a1e804a71aa..2a1e804a71aa 100644 --- a/drivers/misc/vmware_balloon.c +++ b/drivers/misc/vmw_balloon.c | |||
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index bd2755e8d9a3..f332c52968b7 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c | |||
@@ -362,9 +362,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, | |||
362 | goto err; | 362 | goto err; |
363 | } | 363 | } |
364 | 364 | ||
365 | err = mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid); | 365 | if (ocr & R4_MEMORY_PRESENT |
366 | 366 | && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) { | |
367 | if (!err) { | ||
368 | card->type = MMC_TYPE_SD_COMBO; | 367 | card->type = MMC_TYPE_SD_COMBO; |
369 | 368 | ||
370 | if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || | 369 | if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || |
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c index 5f3a599ead07..87226cd202a5 100644 --- a/drivers/mmc/host/at91_mci.c +++ b/drivers/mmc/host/at91_mci.c | |||
@@ -66,6 +66,7 @@ | |||
66 | #include <linux/clk.h> | 66 | #include <linux/clk.h> |
67 | #include <linux/atmel_pdc.h> | 67 | #include <linux/atmel_pdc.h> |
68 | #include <linux/gfp.h> | 68 | #include <linux/gfp.h> |
69 | #include <linux/highmem.h> | ||
69 | 70 | ||
70 | #include <linux/mmc/host.h> | 71 | #include <linux/mmc/host.h> |
71 | 72 | ||
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c index 9a68ff4353a2..5a950b16d9e6 100644 --- a/drivers/mmc/host/imxmmc.c +++ b/drivers/mmc/host/imxmmc.c | |||
@@ -148,11 +148,12 @@ static int imxmci_start_clock(struct imxmci_host *host) | |||
148 | 148 | ||
149 | while (delay--) { | 149 | while (delay--) { |
150 | reg = readw(host->base + MMC_REG_STATUS); | 150 | reg = readw(host->base + MMC_REG_STATUS); |
151 | if (reg & STATUS_CARD_BUS_CLK_RUN) | 151 | if (reg & STATUS_CARD_BUS_CLK_RUN) { |
152 | /* Check twice before cut */ | 152 | /* Check twice before cut */ |
153 | reg = readw(host->base + MMC_REG_STATUS); | 153 | reg = readw(host->base + MMC_REG_STATUS); |
154 | if (reg & STATUS_CARD_BUS_CLK_RUN) | 154 | if (reg & STATUS_CARD_BUS_CLK_RUN) |
155 | return 0; | 155 | return 0; |
156 | } | ||
156 | 157 | ||
157 | if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) | 158 | if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) |
158 | return 0; | 159 | return 0; |
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 4a8776f8afdd..4526d2791f29 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c | |||
@@ -2305,7 +2305,6 @@ static int omap_hsmmc_suspend(struct device *dev) | |||
2305 | int ret = 0; | 2305 | int ret = 0; |
2306 | struct platform_device *pdev = to_platform_device(dev); | 2306 | struct platform_device *pdev = to_platform_device(dev); |
2307 | struct omap_hsmmc_host *host = platform_get_drvdata(pdev); | 2307 | struct omap_hsmmc_host *host = platform_get_drvdata(pdev); |
2308 | pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */ | ||
2309 | 2308 | ||
2310 | if (host && host->suspended) | 2309 | if (host && host->suspended) |
2311 | return 0; | 2310 | return 0; |
@@ -2324,8 +2323,8 @@ static int omap_hsmmc_suspend(struct device *dev) | |||
2324 | } | 2323 | } |
2325 | } | 2324 | } |
2326 | cancel_work_sync(&host->mmc_carddetect_work); | 2325 | cancel_work_sync(&host->mmc_carddetect_work); |
2327 | mmc_host_enable(host->mmc); | ||
2328 | ret = mmc_suspend_host(host->mmc); | 2326 | ret = mmc_suspend_host(host->mmc); |
2327 | mmc_host_enable(host->mmc); | ||
2329 | if (ret == 0) { | 2328 | if (ret == 0) { |
2330 | omap_hsmmc_disable_irq(host); | 2329 | omap_hsmmc_disable_irq(host); |
2331 | OMAP_HSMMC_WRITE(host->base, HCTL, | 2330 | OMAP_HSMMC_WRITE(host->base, HCTL, |
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 2e16e0a90a5e..976330de379e 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c | |||
@@ -1600,7 +1600,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev) | |||
1600 | host->pio_active = XFER_NONE; | 1600 | host->pio_active = XFER_NONE; |
1601 | 1601 | ||
1602 | #ifdef CONFIG_MMC_S3C_PIODMA | 1602 | #ifdef CONFIG_MMC_S3C_PIODMA |
1603 | host->dodma = host->pdata->dma; | 1603 | host->dodma = host->pdata->use_dma; |
1604 | #endif | 1604 | #endif |
1605 | 1605 | ||
1606 | host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1606 | host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 71ad4163b95e..aacb862ecc8a 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c | |||
@@ -241,8 +241,10 @@ static struct sdhci_ops sdhci_s3c_ops = { | |||
241 | static void sdhci_s3c_notify_change(struct platform_device *dev, int state) | 241 | static void sdhci_s3c_notify_change(struct platform_device *dev, int state) |
242 | { | 242 | { |
243 | struct sdhci_host *host = platform_get_drvdata(dev); | 243 | struct sdhci_host *host = platform_get_drvdata(dev); |
244 | unsigned long flags; | ||
245 | |||
244 | if (host) { | 246 | if (host) { |
245 | spin_lock(&host->lock); | 247 | spin_lock_irqsave(&host->lock, flags); |
246 | if (state) { | 248 | if (state) { |
247 | dev_dbg(&dev->dev, "card inserted.\n"); | 249 | dev_dbg(&dev->dev, "card inserted.\n"); |
248 | host->flags &= ~SDHCI_DEVICE_DEAD; | 250 | host->flags &= ~SDHCI_DEVICE_DEAD; |
@@ -253,7 +255,7 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state) | |||
253 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; | 255 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; |
254 | } | 256 | } |
255 | tasklet_schedule(&host->card_tasklet); | 257 | tasklet_schedule(&host->card_tasklet); |
256 | spin_unlock(&host->lock); | 258 | spin_unlock_irqrestore(&host->lock, flags); |
257 | } | 259 | } |
258 | } | 260 | } |
259 | 261 | ||
@@ -481,8 +483,10 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev) | |||
481 | sdhci_remove_host(host, 1); | 483 | sdhci_remove_host(host, 1); |
482 | 484 | ||
483 | for (ptr = 0; ptr < 3; ptr++) { | 485 | for (ptr = 0; ptr < 3; ptr++) { |
484 | clk_disable(sc->clk_bus[ptr]); | 486 | if (sc->clk_bus[ptr]) { |
485 | clk_put(sc->clk_bus[ptr]); | 487 | clk_disable(sc->clk_bus[ptr]); |
488 | clk_put(sc->clk_bus[ptr]); | ||
489 | } | ||
486 | } | 490 | } |
487 | clk_disable(sc->clk_io); | 491 | clk_disable(sc->clk_io); |
488 | clk_put(sc->clk_io); | 492 | clk_put(sc->clk_io); |
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index ee7d0a5a51c4..69d98e3bf6ab 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c | |||
@@ -164,6 +164,7 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) | |||
164 | static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) | 164 | static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) |
165 | { | 165 | { |
166 | struct mmc_data *data = host->data; | 166 | struct mmc_data *data = host->data; |
167 | void *sg_virt; | ||
167 | unsigned short *buf; | 168 | unsigned short *buf; |
168 | unsigned int count; | 169 | unsigned int count; |
169 | unsigned long flags; | 170 | unsigned long flags; |
@@ -173,8 +174,8 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) | |||
173 | return; | 174 | return; |
174 | } | 175 | } |
175 | 176 | ||
176 | buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) + | 177 | sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); |
177 | host->sg_off); | 178 | buf = (unsigned short *)(sg_virt + host->sg_off); |
178 | 179 | ||
179 | count = host->sg_ptr->length - host->sg_off; | 180 | count = host->sg_ptr->length - host->sg_off; |
180 | if (count > data->blksz) | 181 | if (count > data->blksz) |
@@ -191,7 +192,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) | |||
191 | 192 | ||
192 | host->sg_off += count; | 193 | host->sg_off += count; |
193 | 194 | ||
194 | tmio_mmc_kunmap_atomic(host, &flags); | 195 | tmio_mmc_kunmap_atomic(sg_virt, &flags); |
195 | 196 | ||
196 | if (host->sg_off == host->sg_ptr->length) | 197 | if (host->sg_off == host->sg_ptr->length) |
197 | tmio_mmc_next_sg(host); | 198 | tmio_mmc_next_sg(host); |
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 64f7d5dfc106..0fedc78e3ea5 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h | |||
@@ -82,10 +82,7 @@ | |||
82 | 82 | ||
83 | #define ack_mmc_irqs(host, i) \ | 83 | #define ack_mmc_irqs(host, i) \ |
84 | do { \ | 84 | do { \ |
85 | u32 mask;\ | 85 | sd_ctrl_write32((host), CTL_STATUS, ~(i)); \ |
86 | mask = sd_ctrl_read32((host), CTL_STATUS); \ | ||
87 | mask &= ~((i) & TMIO_MASK_IRQ); \ | ||
88 | sd_ctrl_write32((host), CTL_STATUS, mask); \ | ||
89 | } while (0) | 86 | } while (0) |
90 | 87 | ||
91 | 88 | ||
@@ -177,19 +174,17 @@ static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host) | |||
177 | return --host->sg_len; | 174 | return --host->sg_len; |
178 | } | 175 | } |
179 | 176 | ||
180 | static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host, | 177 | static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, |
181 | unsigned long *flags) | 178 | unsigned long *flags) |
182 | { | 179 | { |
183 | struct scatterlist *sg = host->sg_ptr; | ||
184 | |||
185 | local_irq_save(*flags); | 180 | local_irq_save(*flags); |
186 | return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; | 181 | return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; |
187 | } | 182 | } |
188 | 183 | ||
189 | static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host, | 184 | static inline void tmio_mmc_kunmap_atomic(void *virt, |
190 | unsigned long *flags) | 185 | unsigned long *flags) |
191 | { | 186 | { |
192 | kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ); | 187 | kunmap_atomic(virt, KM_BIO_SRC_IRQ); |
193 | local_irq_restore(*flags); | 188 | local_irq_restore(*flags); |
194 | } | 189 | } |
195 | 190 | ||
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c index a382e3dd0a5d..6fbeefa3a766 100644 --- a/drivers/mtd/nand/bf5xx_nand.c +++ b/drivers/mtd/nand/bf5xx_nand.c | |||
@@ -682,7 +682,6 @@ static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info) | |||
682 | static int __devexit bf5xx_nand_remove(struct platform_device *pdev) | 682 | static int __devexit bf5xx_nand_remove(struct platform_device *pdev) |
683 | { | 683 | { |
684 | struct bf5xx_nand_info *info = to_nand_info(pdev); | 684 | struct bf5xx_nand_info *info = to_nand_info(pdev); |
685 | struct mtd_info *mtd = NULL; | ||
686 | 685 | ||
687 | platform_set_drvdata(pdev, NULL); | 686 | platform_set_drvdata(pdev, NULL); |
688 | 687 | ||
@@ -690,11 +689,7 @@ static int __devexit bf5xx_nand_remove(struct platform_device *pdev) | |||
690 | * and their partitions, then go through freeing the | 689 | * and their partitions, then go through freeing the |
691 | * resources used | 690 | * resources used |
692 | */ | 691 | */ |
693 | mtd = &info->mtd; | 692 | nand_release(&info->mtd); |
694 | if (mtd) { | ||
695 | nand_release(mtd); | ||
696 | kfree(mtd); | ||
697 | } | ||
698 | 693 | ||
699 | peripheral_free_list(bfin_nfc_pin_req); | 694 | peripheral_free_list(bfin_nfc_pin_req); |
700 | bf5xx_nand_dma_remove(info); | 695 | bf5xx_nand_dma_remove(info); |
@@ -710,7 +705,7 @@ static int bf5xx_nand_scan(struct mtd_info *mtd) | |||
710 | struct nand_chip *chip = mtd->priv; | 705 | struct nand_chip *chip = mtd->priv; |
711 | int ret; | 706 | int ret; |
712 | 707 | ||
713 | ret = nand_scan_ident(mtd, 1); | 708 | ret = nand_scan_ident(mtd, 1, NULL); |
714 | if (ret) | 709 | if (ret) |
715 | return ret; | 710 | return ret; |
716 | 711 | ||
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index fcf8ceb277d4..b2828e84d243 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c | |||
@@ -67,7 +67,9 @@ | |||
67 | #define NFC_V1_V2_CONFIG1_BIG (1 << 5) | 67 | #define NFC_V1_V2_CONFIG1_BIG (1 << 5) |
68 | #define NFC_V1_V2_CONFIG1_RST (1 << 6) | 68 | #define NFC_V1_V2_CONFIG1_RST (1 << 6) |
69 | #define NFC_V1_V2_CONFIG1_CE (1 << 7) | 69 | #define NFC_V1_V2_CONFIG1_CE (1 << 7) |
70 | #define NFC_V1_V2_CONFIG1_ONE_CYCLE (1 << 8) | 70 | #define NFC_V2_CONFIG1_ONE_CYCLE (1 << 8) |
71 | #define NFC_V2_CONFIG1_PPB(x) (((x) & 0x3) << 9) | ||
72 | #define NFC_V2_CONFIG1_FP_INT (1 << 11) | ||
71 | 73 | ||
72 | #define NFC_V1_V2_CONFIG2_INT (1 << 15) | 74 | #define NFC_V1_V2_CONFIG2_INT (1 << 15) |
73 | 75 | ||
@@ -402,16 +404,16 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host) | |||
402 | /* Wait for operation to complete */ | 404 | /* Wait for operation to complete */ |
403 | wait_op_done(host, true); | 405 | wait_op_done(host, true); |
404 | 406 | ||
407 | memcpy(host->data_buf, host->main_area0, 16); | ||
408 | |||
405 | if (this->options & NAND_BUSWIDTH_16) { | 409 | if (this->options & NAND_BUSWIDTH_16) { |
406 | void __iomem *main_buf = host->main_area0; | ||
407 | /* compress the ID info */ | 410 | /* compress the ID info */ |
408 | writeb(readb(main_buf + 2), main_buf + 1); | 411 | host->data_buf[1] = host->data_buf[2]; |
409 | writeb(readb(main_buf + 4), main_buf + 2); | 412 | host->data_buf[2] = host->data_buf[4]; |
410 | writeb(readb(main_buf + 6), main_buf + 3); | 413 | host->data_buf[3] = host->data_buf[6]; |
411 | writeb(readb(main_buf + 8), main_buf + 4); | 414 | host->data_buf[4] = host->data_buf[8]; |
412 | writeb(readb(main_buf + 10), main_buf + 5); | 415 | host->data_buf[5] = host->data_buf[10]; |
413 | } | 416 | } |
414 | memcpy(host->data_buf, host->main_area0, 16); | ||
415 | } | 417 | } |
416 | 418 | ||
417 | static uint16_t get_dev_status_v3(struct mxc_nand_host *host) | 419 | static uint16_t get_dev_status_v3(struct mxc_nand_host *host) |
@@ -729,27 +731,30 @@ static void preset_v1_v2(struct mtd_info *mtd) | |||
729 | { | 731 | { |
730 | struct nand_chip *nand_chip = mtd->priv; | 732 | struct nand_chip *nand_chip = mtd->priv; |
731 | struct mxc_nand_host *host = nand_chip->priv; | 733 | struct mxc_nand_host *host = nand_chip->priv; |
732 | uint16_t tmp; | 734 | uint16_t config1 = 0; |
733 | 735 | ||
734 | /* enable interrupt, disable spare enable */ | 736 | if (nand_chip->ecc.mode == NAND_ECC_HW) |
735 | tmp = readw(NFC_V1_V2_CONFIG1); | 737 | config1 |= NFC_V1_V2_CONFIG1_ECC_EN; |
736 | tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK; | 738 | |
737 | tmp &= ~NFC_V1_V2_CONFIG1_SP_EN; | 739 | if (nfc_is_v21()) |
738 | if (nand_chip->ecc.mode == NAND_ECC_HW) { | 740 | config1 |= NFC_V2_CONFIG1_FP_INT; |
739 | tmp |= NFC_V1_V2_CONFIG1_ECC_EN; | 741 | |
740 | } else { | 742 | if (!cpu_is_mx21()) |
741 | tmp &= ~NFC_V1_V2_CONFIG1_ECC_EN; | 743 | config1 |= NFC_V1_V2_CONFIG1_INT_MSK; |
742 | } | ||
743 | 744 | ||
744 | if (nfc_is_v21() && mtd->writesize) { | 745 | if (nfc_is_v21() && mtd->writesize) { |
746 | uint16_t pages_per_block = mtd->erasesize / mtd->writesize; | ||
747 | |||
745 | host->eccsize = get_eccsize(mtd); | 748 | host->eccsize = get_eccsize(mtd); |
746 | if (host->eccsize == 4) | 749 | if (host->eccsize == 4) |
747 | tmp |= NFC_V2_CONFIG1_ECC_MODE_4; | 750 | config1 |= NFC_V2_CONFIG1_ECC_MODE_4; |
751 | |||
752 | config1 |= NFC_V2_CONFIG1_PPB(ffs(pages_per_block) - 6); | ||
748 | } else { | 753 | } else { |
749 | host->eccsize = 1; | 754 | host->eccsize = 1; |
750 | } | 755 | } |
751 | 756 | ||
752 | writew(tmp, NFC_V1_V2_CONFIG1); | 757 | writew(config1, NFC_V1_V2_CONFIG1); |
753 | /* preset operation */ | 758 | /* preset operation */ |
754 | 759 | ||
755 | /* Unlock the internal RAM Buffer */ | 760 | /* Unlock the internal RAM Buffer */ |
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 133d51528f8d..513e0a76a4a7 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
@@ -413,7 +413,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
413 | prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT); | 413 | prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT); |
414 | } while (prefetch_status); | 414 | } while (prefetch_status); |
415 | /* disable and stop the PFPW engine */ | 415 | /* disable and stop the PFPW engine */ |
416 | gpmc_prefetch_reset(); | 416 | gpmc_prefetch_reset(info->gpmc_cs); |
417 | 417 | ||
418 | dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); | 418 | dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); |
419 | return 0; | 419 | return 0; |
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 4d89f3780207..4d01cda68844 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c | |||
@@ -1320,6 +1320,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev) | |||
1320 | goto fail_free_irq; | 1320 | goto fail_free_irq; |
1321 | } | 1321 | } |
1322 | 1322 | ||
1323 | #ifdef CONFIG_MTD_PARTITIONS | ||
1323 | if (mtd_has_cmdlinepart()) { | 1324 | if (mtd_has_cmdlinepart()) { |
1324 | static const char *probes[] = { "cmdlinepart", NULL }; | 1325 | static const char *probes[] = { "cmdlinepart", NULL }; |
1325 | struct mtd_partition *parts; | 1326 | struct mtd_partition *parts; |
@@ -1332,6 +1333,9 @@ static int pxa3xx_nand_probe(struct platform_device *pdev) | |||
1332 | } | 1333 | } |
1333 | 1334 | ||
1334 | return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); | 1335 | return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); |
1336 | #else | ||
1337 | return 0; | ||
1338 | #endif | ||
1335 | 1339 | ||
1336 | fail_free_irq: | 1340 | fail_free_irq: |
1337 | free_irq(irq, info); | 1341 | free_irq(irq, info); |
@@ -1364,7 +1368,9 @@ static int pxa3xx_nand_remove(struct platform_device *pdev) | |||
1364 | platform_set_drvdata(pdev, NULL); | 1368 | platform_set_drvdata(pdev, NULL); |
1365 | 1369 | ||
1366 | del_mtd_device(mtd); | 1370 | del_mtd_device(mtd); |
1371 | #ifdef CONFIG_MTD_PARTITIONS | ||
1367 | del_mtd_partitions(mtd); | 1372 | del_mtd_partitions(mtd); |
1373 | #endif | ||
1368 | irq = platform_get_irq(pdev, 0); | 1374 | irq = platform_get_irq(pdev, 0); |
1369 | if (irq >= 0) | 1375 | if (irq >= 0) |
1370 | free_irq(irq, info); | 1376 | free_irq(irq, info); |
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c index cb443af3d45f..a460f1b748c2 100644 --- a/drivers/mtd/onenand/samsung.c +++ b/drivers/mtd/onenand/samsung.c | |||
@@ -554,14 +554,13 @@ static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction) | |||
554 | 554 | ||
555 | do { | 555 | do { |
556 | status = readl(base + S5PC110_DMA_TRANS_STATUS); | 556 | status = readl(base + S5PC110_DMA_TRANS_STATUS); |
557 | if (status & S5PC110_DMA_TRANS_STATUS_TE) { | ||
558 | writel(S5PC110_DMA_TRANS_CMD_TEC, | ||
559 | base + S5PC110_DMA_TRANS_CMD); | ||
560 | return -EIO; | ||
561 | } | ||
557 | } while (!(status & S5PC110_DMA_TRANS_STATUS_TD)); | 562 | } while (!(status & S5PC110_DMA_TRANS_STATUS_TD)); |
558 | 563 | ||
559 | if (status & S5PC110_DMA_TRANS_STATUS_TE) { | ||
560 | writel(S5PC110_DMA_TRANS_CMD_TEC, base + S5PC110_DMA_TRANS_CMD); | ||
561 | writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD); | ||
562 | return -EIO; | ||
563 | } | ||
564 | |||
565 | writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD); | 564 | writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD); |
566 | 565 | ||
567 | return 0; | 566 | return 0; |
@@ -571,13 +570,12 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area, | |||
571 | unsigned char *buffer, int offset, size_t count) | 570 | unsigned char *buffer, int offset, size_t count) |
572 | { | 571 | { |
573 | struct onenand_chip *this = mtd->priv; | 572 | struct onenand_chip *this = mtd->priv; |
574 | void __iomem *bufferram; | ||
575 | void __iomem *p; | 573 | void __iomem *p; |
576 | void *buf = (void *) buffer; | 574 | void *buf = (void *) buffer; |
577 | dma_addr_t dma_src, dma_dst; | 575 | dma_addr_t dma_src, dma_dst; |
578 | int err; | 576 | int err; |
579 | 577 | ||
580 | p = bufferram = this->base + area; | 578 | p = this->base + area; |
581 | if (ONENAND_CURRENT_BUFFERRAM(this)) { | 579 | if (ONENAND_CURRENT_BUFFERRAM(this)) { |
582 | if (area == ONENAND_DATARAM) | 580 | if (area == ONENAND_DATARAM) |
583 | p += this->writesize; | 581 | p += this->writesize; |
@@ -621,7 +619,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area, | |||
621 | normal: | 619 | normal: |
622 | if (count != mtd->writesize) { | 620 | if (count != mtd->writesize) { |
623 | /* Copy the bufferram to memory to prevent unaligned access */ | 621 | /* Copy the bufferram to memory to prevent unaligned access */ |
624 | memcpy(this->page_buf, bufferram, mtd->writesize); | 622 | memcpy(this->page_buf, p, mtd->writesize); |
625 | p = this->page_buf + offset; | 623 | p = this->page_buf + offset; |
626 | } | 624 | } |
627 | 625 | ||
diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug index 2246f154e2f7..61f6e5e40458 100644 --- a/drivers/mtd/ubi/Kconfig.debug +++ b/drivers/mtd/ubi/Kconfig.debug | |||
@@ -6,7 +6,7 @@ config MTD_UBI_DEBUG | |||
6 | depends on SYSFS | 6 | depends on SYSFS |
7 | depends on MTD_UBI | 7 | depends on MTD_UBI |
8 | select DEBUG_FS | 8 | select DEBUG_FS |
9 | select KALLSYMS_ALL | 9 | select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL |
10 | help | 10 | help |
11 | This option enables UBI debugging. | 11 | This option enables UBI debugging. |
12 | 12 | ||
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index 4dfa6b90c21c..3d2d1a69e9a0 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c | |||
@@ -798,18 +798,18 @@ static int rename_volumes(struct ubi_device *ubi, | |||
798 | goto out_free; | 798 | goto out_free; |
799 | } | 799 | } |
800 | 800 | ||
801 | re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); | 801 | re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); |
802 | if (!re) { | 802 | if (!re1) { |
803 | err = -ENOMEM; | 803 | err = -ENOMEM; |
804 | ubi_close_volume(desc); | 804 | ubi_close_volume(desc); |
805 | goto out_free; | 805 | goto out_free; |
806 | } | 806 | } |
807 | 807 | ||
808 | re->remove = 1; | 808 | re1->remove = 1; |
809 | re->desc = desc; | 809 | re1->desc = desc; |
810 | list_add(&re->list, &rename_list); | 810 | list_add(&re1->list, &rename_list); |
811 | dbg_msg("will remove volume %d, name \"%s\"", | 811 | dbg_msg("will remove volume %d, name \"%s\"", |
812 | re->desc->vol->vol_id, re->desc->vol->name); | 812 | re1->desc->vol->vol_id, re1->desc->vol->name); |
813 | } | 813 | } |
814 | 814 | ||
815 | mutex_lock(&ubi->device_mutex); | 815 | mutex_lock(&ubi->device_mutex); |
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index 372a15ac9995..69b52e9c9489 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c | |||
@@ -843,7 +843,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, | |||
843 | case UBI_COMPAT_DELETE: | 843 | case UBI_COMPAT_DELETE: |
844 | ubi_msg("\"delete\" compatible internal volume %d:%d" | 844 | ubi_msg("\"delete\" compatible internal volume %d:%d" |
845 | " found, will remove it", vol_id, lnum); | 845 | " found, will remove it", vol_id, lnum); |
846 | err = add_to_list(si, pnum, ec, &si->corr); | 846 | err = add_to_list(si, pnum, ec, &si->erase); |
847 | if (err) | 847 | if (err) |
848 | return err; | 848 | return err; |
849 | return 0; | 849 | return 0; |
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index ee7b1d8fbb92..97a435672eaf 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
@@ -1212,7 +1212,8 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) | |||
1212 | retry: | 1212 | retry: |
1213 | spin_lock(&ubi->wl_lock); | 1213 | spin_lock(&ubi->wl_lock); |
1214 | e = ubi->lookuptbl[pnum]; | 1214 | e = ubi->lookuptbl[pnum]; |
1215 | if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) { | 1215 | if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) || |
1216 | in_wl_tree(e, &ubi->erroneous)) { | ||
1216 | spin_unlock(&ubi->wl_lock); | 1217 | spin_unlock(&ubi->wl_lock); |
1217 | return 0; | 1218 | return 0; |
1218 | } | 1219 | } |
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index c685a55fc2f4..179871d9e71f 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -635,6 +635,9 @@ struct vortex_private { | |||
635 | must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ | 635 | must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ |
636 | large_frames:1, /* accept large frames */ | 636 | large_frames:1, /* accept large frames */ |
637 | handling_irq:1; /* private in_irq indicator */ | 637 | handling_irq:1; /* private in_irq indicator */ |
638 | /* {get|set}_wol operations are already serialized by rtnl. | ||
639 | * no additional locking is required for the enable_wol and acpi_set_WOL() | ||
640 | */ | ||
638 | int drv_flags; | 641 | int drv_flags; |
639 | u16 status_enable; | 642 | u16 status_enable; |
640 | u16 intr_enable; | 643 | u16 intr_enable; |
@@ -647,7 +650,7 @@ struct vortex_private { | |||
647 | u16 io_size; /* Size of PCI region (for release_region) */ | 650 | u16 io_size; /* Size of PCI region (for release_region) */ |
648 | 651 | ||
649 | /* Serialises access to hardware other than MII and variables below. | 652 | /* Serialises access to hardware other than MII and variables below. |
650 | * The lock hierarchy is rtnl_lock > lock > mii_lock > window_lock. */ | 653 | * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */ |
651 | spinlock_t lock; | 654 | spinlock_t lock; |
652 | 655 | ||
653 | spinlock_t mii_lock; /* Serialises access to MII */ | 656 | spinlock_t mii_lock; /* Serialises access to MII */ |
@@ -1994,10 +1997,9 @@ vortex_error(struct net_device *dev, int status) | |||
1994 | } | 1997 | } |
1995 | } | 1998 | } |
1996 | 1999 | ||
1997 | if (status & RxEarly) { /* Rx early is unused. */ | 2000 | if (status & RxEarly) /* Rx early is unused. */ |
1998 | vortex_rx(dev); | ||
1999 | iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD); | 2001 | iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD); |
2000 | } | 2002 | |
2001 | if (status & StatsFull) { /* Empty statistics. */ | 2003 | if (status & StatsFull) { /* Empty statistics. */ |
2002 | static int DoneDidThat; | 2004 | static int DoneDidThat; |
2003 | if (vortex_debug > 4) | 2005 | if (vortex_debug > 4) |
@@ -2298,7 +2300,12 @@ vortex_interrupt(int irq, void *dev_id) | |||
2298 | if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { | 2300 | if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { |
2299 | if (status == 0xffff) | 2301 | if (status == 0xffff) |
2300 | break; | 2302 | break; |
2303 | if (status & RxEarly) | ||
2304 | vortex_rx(dev); | ||
2305 | spin_unlock(&vp->window_lock); | ||
2301 | vortex_error(dev, status); | 2306 | vortex_error(dev, status); |
2307 | spin_lock(&vp->window_lock); | ||
2308 | window_set(vp, 7); | ||
2302 | } | 2309 | } |
2303 | 2310 | ||
2304 | if (--work_done < 0) { | 2311 | if (--work_done < 0) { |
@@ -2935,28 +2942,31 @@ static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
2935 | { | 2942 | { |
2936 | struct vortex_private *vp = netdev_priv(dev); | 2943 | struct vortex_private *vp = netdev_priv(dev); |
2937 | 2944 | ||
2938 | spin_lock_irq(&vp->lock); | 2945 | if (!VORTEX_PCI(vp)) |
2946 | return; | ||
2947 | |||
2939 | wol->supported = WAKE_MAGIC; | 2948 | wol->supported = WAKE_MAGIC; |
2940 | 2949 | ||
2941 | wol->wolopts = 0; | 2950 | wol->wolopts = 0; |
2942 | if (vp->enable_wol) | 2951 | if (vp->enable_wol) |
2943 | wol->wolopts |= WAKE_MAGIC; | 2952 | wol->wolopts |= WAKE_MAGIC; |
2944 | spin_unlock_irq(&vp->lock); | ||
2945 | } | 2953 | } |
2946 | 2954 | ||
2947 | static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | 2955 | static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
2948 | { | 2956 | { |
2949 | struct vortex_private *vp = netdev_priv(dev); | 2957 | struct vortex_private *vp = netdev_priv(dev); |
2958 | |||
2959 | if (!VORTEX_PCI(vp)) | ||
2960 | return -EOPNOTSUPP; | ||
2961 | |||
2950 | if (wol->wolopts & ~WAKE_MAGIC) | 2962 | if (wol->wolopts & ~WAKE_MAGIC) |
2951 | return -EINVAL; | 2963 | return -EINVAL; |
2952 | 2964 | ||
2953 | spin_lock_irq(&vp->lock); | ||
2954 | if (wol->wolopts & WAKE_MAGIC) | 2965 | if (wol->wolopts & WAKE_MAGIC) |
2955 | vp->enable_wol = 1; | 2966 | vp->enable_wol = 1; |
2956 | else | 2967 | else |
2957 | vp->enable_wol = 0; | 2968 | vp->enable_wol = 0; |
2958 | acpi_set_WOL(dev); | 2969 | acpi_set_WOL(dev); |
2959 | spin_unlock_irq(&vp->lock); | ||
2960 | 2970 | ||
2961 | return 0; | 2971 | return 0; |
2962 | } | 2972 | } |
@@ -2984,7 +2994,6 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
2984 | { | 2994 | { |
2985 | int err; | 2995 | int err; |
2986 | struct vortex_private *vp = netdev_priv(dev); | 2996 | struct vortex_private *vp = netdev_priv(dev); |
2987 | unsigned long flags; | ||
2988 | pci_power_t state = 0; | 2997 | pci_power_t state = 0; |
2989 | 2998 | ||
2990 | if(VORTEX_PCI(vp)) | 2999 | if(VORTEX_PCI(vp)) |
@@ -2994,9 +3003,7 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
2994 | 3003 | ||
2995 | if(state != 0) | 3004 | if(state != 0) |
2996 | pci_set_power_state(VORTEX_PCI(vp), PCI_D0); | 3005 | pci_set_power_state(VORTEX_PCI(vp), PCI_D0); |
2997 | spin_lock_irqsave(&vp->lock, flags); | ||
2998 | err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); | 3006 | err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); |
2999 | spin_unlock_irqrestore(&vp->lock, flags); | ||
3000 | if(state != 0) | 3007 | if(state != 0) |
3001 | pci_set_power_state(VORTEX_PCI(vp), state); | 3008 | pci_set_power_state(VORTEX_PCI(vp), state); |
3002 | 3009 | ||
@@ -3201,6 +3208,9 @@ static void acpi_set_WOL(struct net_device *dev) | |||
3201 | return; | 3208 | return; |
3202 | } | 3209 | } |
3203 | 3210 | ||
3211 | if (VORTEX_PCI(vp)->current_state < PCI_D3hot) | ||
3212 | return; | ||
3213 | |||
3204 | /* Change the power state to D3; RxEnable doesn't take effect. */ | 3214 | /* Change the power state to D3; RxEnable doesn't take effect. */ |
3205 | pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot); | 3215 | pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot); |
3206 | } | 3216 | } |
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 63b9ba0cc67e..c73be2848319 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c | |||
@@ -1251,6 +1251,12 @@ static void atl1_free_ring_resources(struct atl1_adapter *adapter) | |||
1251 | 1251 | ||
1252 | rrd_ring->desc = NULL; | 1252 | rrd_ring->desc = NULL; |
1253 | rrd_ring->dma = 0; | 1253 | rrd_ring->dma = 0; |
1254 | |||
1255 | adapter->cmb.dma = 0; | ||
1256 | adapter->cmb.cmb = NULL; | ||
1257 | |||
1258 | adapter->smb.dma = 0; | ||
1259 | adapter->smb.smb = NULL; | ||
1254 | } | 1260 | } |
1255 | 1261 | ||
1256 | static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) | 1262 | static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) |
@@ -2847,10 +2853,11 @@ static int atl1_resume(struct pci_dev *pdev) | |||
2847 | pci_enable_wake(pdev, PCI_D3cold, 0); | 2853 | pci_enable_wake(pdev, PCI_D3cold, 0); |
2848 | 2854 | ||
2849 | atl1_reset_hw(&adapter->hw); | 2855 | atl1_reset_hw(&adapter->hw); |
2850 | adapter->cmb.cmb->int_stats = 0; | ||
2851 | 2856 | ||
2852 | if (netif_running(netdev)) | 2857 | if (netif_running(netdev)) { |
2858 | adapter->cmb.cmb->int_stats = 0; | ||
2853 | atl1_up(adapter); | 2859 | atl1_up(adapter); |
2860 | } | ||
2854 | netif_device_attach(netdev); | 2861 | netif_device_attach(netdev); |
2855 | 2862 | ||
2856 | return 0; | 2863 | return 0; |
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 37617abc1647..1e620e287ae0 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
@@ -848,6 +848,15 @@ static int b44_poll(struct napi_struct *napi, int budget) | |||
848 | b44_tx(bp); | 848 | b44_tx(bp); |
849 | /* spin_unlock(&bp->tx_lock); */ | 849 | /* spin_unlock(&bp->tx_lock); */ |
850 | } | 850 | } |
851 | if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */ | ||
852 | bp->istat &= ~ISTAT_RFO; | ||
853 | b44_disable_ints(bp); | ||
854 | ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */ | ||
855 | b44_init_rings(bp); | ||
856 | b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); | ||
857 | netif_wake_queue(bp->dev); | ||
858 | } | ||
859 | |||
851 | spin_unlock_irqrestore(&bp->lock, flags); | 860 | spin_unlock_irqrestore(&bp->lock, flags); |
852 | 861 | ||
853 | work_done = 0; | 862 | work_done = 0; |
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index 99197bd54da5..53306bf3f401 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h | |||
@@ -181,6 +181,7 @@ struct be_drvr_stats { | |||
181 | u64 be_rx_bytes_prev; | 181 | u64 be_rx_bytes_prev; |
182 | u64 be_rx_pkts; | 182 | u64 be_rx_pkts; |
183 | u32 be_rx_rate; | 183 | u32 be_rx_rate; |
184 | u32 be_rx_mcast_pkt; | ||
184 | /* number of non ether type II frames dropped where | 185 | /* number of non ether type II frames dropped where |
185 | * frame len > length field of Mac Hdr */ | 186 | * frame len > length field of Mac Hdr */ |
186 | u32 be_802_3_dropped_frames; | 187 | u32 be_802_3_dropped_frames; |
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 3d305494a606..34abcc9403d6 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c | |||
@@ -140,10 +140,8 @@ int be_process_mcc(struct be_adapter *adapter, int *status) | |||
140 | while ((compl = be_mcc_compl_get(adapter))) { | 140 | while ((compl = be_mcc_compl_get(adapter))) { |
141 | if (compl->flags & CQE_FLAGS_ASYNC_MASK) { | 141 | if (compl->flags & CQE_FLAGS_ASYNC_MASK) { |
142 | /* Interpret flags as an async trailer */ | 142 | /* Interpret flags as an async trailer */ |
143 | BUG_ON(!is_link_state_evt(compl->flags)); | 143 | if (is_link_state_evt(compl->flags)) |
144 | 144 | be_async_link_state_process(adapter, | |
145 | /* Interpret compl as a async link evt */ | ||
146 | be_async_link_state_process(adapter, | ||
147 | (struct be_async_event_link_state *) compl); | 145 | (struct be_async_event_link_state *) compl); |
148 | } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { | 146 | } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { |
149 | *status = be_mcc_compl_process(adapter, compl); | 147 | *status = be_mcc_compl_process(adapter, compl); |
@@ -207,7 +205,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) | |||
207 | 205 | ||
208 | if (msecs > 4000) { | 206 | if (msecs > 4000) { |
209 | dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); | 207 | dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); |
210 | be_dump_ue(adapter); | 208 | be_detect_dump_ue(adapter); |
211 | return -1; | 209 | return -1; |
212 | } | 210 | } |
213 | 211 | ||
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h index bdc10a28cfda..ad1e6fac60c5 100644 --- a/drivers/net/benet/be_cmds.h +++ b/drivers/net/benet/be_cmds.h | |||
@@ -992,5 +992,5 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, | |||
992 | extern int be_cmd_get_phy_info(struct be_adapter *adapter, | 992 | extern int be_cmd_get_phy_info(struct be_adapter *adapter, |
993 | struct be_dma_mem *cmd); | 993 | struct be_dma_mem *cmd); |
994 | extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); | 994 | extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); |
995 | extern void be_dump_ue(struct be_adapter *adapter); | 995 | extern void be_detect_dump_ue(struct be_adapter *adapter); |
996 | 996 | ||
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index cd16243c7c36..13f0abbc5205 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c | |||
@@ -60,6 +60,7 @@ static const struct be_ethtool_stat et_stats[] = { | |||
60 | {DRVSTAT_INFO(be_rx_events)}, | 60 | {DRVSTAT_INFO(be_rx_events)}, |
61 | {DRVSTAT_INFO(be_tx_compl)}, | 61 | {DRVSTAT_INFO(be_tx_compl)}, |
62 | {DRVSTAT_INFO(be_rx_compl)}, | 62 | {DRVSTAT_INFO(be_rx_compl)}, |
63 | {DRVSTAT_INFO(be_rx_mcast_pkt)}, | ||
63 | {DRVSTAT_INFO(be_ethrx_post_fail)}, | 64 | {DRVSTAT_INFO(be_ethrx_post_fail)}, |
64 | {DRVSTAT_INFO(be_802_3_dropped_frames)}, | 65 | {DRVSTAT_INFO(be_802_3_dropped_frames)}, |
65 | {DRVSTAT_INFO(be_802_3_malformed_frames)}, | 66 | {DRVSTAT_INFO(be_802_3_malformed_frames)}, |
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h index 5d38046402b2..a2ec5df0d733 100644 --- a/drivers/net/benet/be_hw.h +++ b/drivers/net/benet/be_hw.h | |||
@@ -167,8 +167,11 @@ | |||
167 | #define FLASH_FCoE_BIOS_START_g3 (13631488) | 167 | #define FLASH_FCoE_BIOS_START_g3 (13631488) |
168 | #define FLASH_REDBOOT_START_g3 (262144) | 168 | #define FLASH_REDBOOT_START_g3 (262144) |
169 | 169 | ||
170 | 170 | /************* Rx Packet Type Encoding **************/ | |
171 | 171 | #define BE_UNICAST_PACKET 0 | |
172 | #define BE_MULTICAST_PACKET 1 | ||
173 | #define BE_BROADCAST_PACKET 2 | ||
174 | #define BE_RSVD_PACKET 3 | ||
172 | 175 | ||
173 | /* | 176 | /* |
174 | * BE descriptors: host memory data structures whose formats | 177 | * BE descriptors: host memory data structures whose formats |
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 74e146f470c6..6eda7a022256 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
@@ -247,6 +247,7 @@ void netdev_stats_update(struct be_adapter *adapter) | |||
247 | dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts; | 247 | dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts; |
248 | dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes; | 248 | dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes; |
249 | dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes; | 249 | dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes; |
250 | dev_stats->multicast = drvr_stats(adapter)->be_rx_mcast_pkt; | ||
250 | 251 | ||
251 | /* bad pkts received */ | 252 | /* bad pkts received */ |
252 | dev_stats->rx_errors = port_stats->rx_crc_errors + | 253 | dev_stats->rx_errors = port_stats->rx_crc_errors + |
@@ -294,7 +295,6 @@ void netdev_stats_update(struct be_adapter *adapter) | |||
294 | /* no space available in linux */ | 295 | /* no space available in linux */ |
295 | dev_stats->tx_dropped = 0; | 296 | dev_stats->tx_dropped = 0; |
296 | 297 | ||
297 | dev_stats->multicast = port_stats->rx_multicast_frames; | ||
298 | dev_stats->collisions = 0; | 298 | dev_stats->collisions = 0; |
299 | 299 | ||
300 | /* detailed tx_errors */ | 300 | /* detailed tx_errors */ |
@@ -848,7 +848,7 @@ static void be_rx_rate_update(struct be_adapter *adapter) | |||
848 | } | 848 | } |
849 | 849 | ||
850 | static void be_rx_stats_update(struct be_adapter *adapter, | 850 | static void be_rx_stats_update(struct be_adapter *adapter, |
851 | u32 pktsize, u16 numfrags) | 851 | u32 pktsize, u16 numfrags, u8 pkt_type) |
852 | { | 852 | { |
853 | struct be_drvr_stats *stats = drvr_stats(adapter); | 853 | struct be_drvr_stats *stats = drvr_stats(adapter); |
854 | 854 | ||
@@ -856,6 +856,9 @@ static void be_rx_stats_update(struct be_adapter *adapter, | |||
856 | stats->be_rx_frags += numfrags; | 856 | stats->be_rx_frags += numfrags; |
857 | stats->be_rx_bytes += pktsize; | 857 | stats->be_rx_bytes += pktsize; |
858 | stats->be_rx_pkts++; | 858 | stats->be_rx_pkts++; |
859 | |||
860 | if (pkt_type == BE_MULTICAST_PACKET) | ||
861 | stats->be_rx_mcast_pkt++; | ||
859 | } | 862 | } |
860 | 863 | ||
861 | static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso) | 864 | static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso) |
@@ -925,9 +928,11 @@ static void skb_fill_rx_data(struct be_adapter *adapter, | |||
925 | u16 rxq_idx, i, j; | 928 | u16 rxq_idx, i, j; |
926 | u32 pktsize, hdr_len, curr_frag_len, size; | 929 | u32 pktsize, hdr_len, curr_frag_len, size; |
927 | u8 *start; | 930 | u8 *start; |
931 | u8 pkt_type; | ||
928 | 932 | ||
929 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | 933 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); |
930 | pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); | 934 | pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); |
935 | pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp); | ||
931 | 936 | ||
932 | page_info = get_rx_page_info(adapter, rxq_idx); | 937 | page_info = get_rx_page_info(adapter, rxq_idx); |
933 | 938 | ||
@@ -993,7 +998,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, | |||
993 | BUG_ON(j > MAX_SKB_FRAGS); | 998 | BUG_ON(j > MAX_SKB_FRAGS); |
994 | 999 | ||
995 | done: | 1000 | done: |
996 | be_rx_stats_update(adapter, pktsize, num_rcvd); | 1001 | be_rx_stats_update(adapter, pktsize, num_rcvd, pkt_type); |
997 | } | 1002 | } |
998 | 1003 | ||
999 | /* Process the RX completion indicated by rxcp when GRO is disabled */ | 1004 | /* Process the RX completion indicated by rxcp when GRO is disabled */ |
@@ -1060,6 +1065,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, | |||
1060 | u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; | 1065 | u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; |
1061 | u16 i, rxq_idx = 0, vid, j; | 1066 | u16 i, rxq_idx = 0, vid, j; |
1062 | u8 vtm; | 1067 | u8 vtm; |
1068 | u8 pkt_type; | ||
1063 | 1069 | ||
1064 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); | 1070 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); |
1065 | /* Is it a flush compl that has no data */ | 1071 | /* Is it a flush compl that has no data */ |
@@ -1070,6 +1076,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, | |||
1070 | vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); | 1076 | vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); |
1071 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | 1077 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); |
1072 | vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); | 1078 | vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); |
1079 | pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp); | ||
1073 | 1080 | ||
1074 | /* vlanf could be wrongly set in some cards. | 1081 | /* vlanf could be wrongly set in some cards. |
1075 | * ignore if vtm is not set */ | 1082 | * ignore if vtm is not set */ |
@@ -1125,7 +1132,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, | |||
1125 | vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); | 1132 | vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); |
1126 | } | 1133 | } |
1127 | 1134 | ||
1128 | be_rx_stats_update(adapter, pkt_size, num_rcvd); | 1135 | be_rx_stats_update(adapter, pkt_size, num_rcvd, pkt_type); |
1129 | } | 1136 | } |
1130 | 1137 | ||
1131 | static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter) | 1138 | static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter) |
@@ -1743,26 +1750,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget) | |||
1743 | return 1; | 1750 | return 1; |
1744 | } | 1751 | } |
1745 | 1752 | ||
1746 | static inline bool be_detect_ue(struct be_adapter *adapter) | 1753 | void be_detect_dump_ue(struct be_adapter *adapter) |
1747 | { | ||
1748 | u32 online0 = 0, online1 = 0; | ||
1749 | |||
1750 | pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0); | ||
1751 | |||
1752 | pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1); | ||
1753 | |||
1754 | if (!online0 || !online1) { | ||
1755 | adapter->ue_detected = true; | ||
1756 | dev_err(&adapter->pdev->dev, | ||
1757 | "UE Detected!! online0=%d online1=%d\n", | ||
1758 | online0, online1); | ||
1759 | return true; | ||
1760 | } | ||
1761 | |||
1762 | return false; | ||
1763 | } | ||
1764 | |||
1765 | void be_dump_ue(struct be_adapter *adapter) | ||
1766 | { | 1754 | { |
1767 | u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask; | 1755 | u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask; |
1768 | u32 i; | 1756 | u32 i; |
@@ -1779,6 +1767,11 @@ void be_dump_ue(struct be_adapter *adapter) | |||
1779 | ue_status_lo = (ue_status_lo & (~ue_status_lo_mask)); | 1767 | ue_status_lo = (ue_status_lo & (~ue_status_lo_mask)); |
1780 | ue_status_hi = (ue_status_hi & (~ue_status_hi_mask)); | 1768 | ue_status_hi = (ue_status_hi & (~ue_status_hi_mask)); |
1781 | 1769 | ||
1770 | if (ue_status_lo || ue_status_hi) { | ||
1771 | adapter->ue_detected = true; | ||
1772 | dev_err(&adapter->pdev->dev, "UE Detected!!\n"); | ||
1773 | } | ||
1774 | |||
1782 | if (ue_status_lo) { | 1775 | if (ue_status_lo) { |
1783 | for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) { | 1776 | for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) { |
1784 | if (ue_status_lo & 1) | 1777 | if (ue_status_lo & 1) |
@@ -1814,10 +1807,8 @@ static void be_worker(struct work_struct *work) | |||
1814 | adapter->rx_post_starved = false; | 1807 | adapter->rx_post_starved = false; |
1815 | be_post_rx_frags(adapter); | 1808 | be_post_rx_frags(adapter); |
1816 | } | 1809 | } |
1817 | if (!adapter->ue_detected) { | 1810 | if (!adapter->ue_detected) |
1818 | if (be_detect_ue(adapter)) | 1811 | be_detect_dump_ue(adapter); |
1819 | be_dump_ue(adapter); | ||
1820 | } | ||
1821 | 1812 | ||
1822 | schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); | 1813 | schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); |
1823 | } | 1814 | } |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 822f586d72af..0ddf4c66afe2 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -2466,6 +2466,9 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac | |||
2466 | if (!(dev->flags & IFF_MASTER)) | 2466 | if (!(dev->flags & IFF_MASTER)) |
2467 | goto out; | 2467 | goto out; |
2468 | 2468 | ||
2469 | if (!pskb_may_pull(skb, sizeof(struct lacpdu))) | ||
2470 | goto out; | ||
2471 | |||
2469 | read_lock(&bond->lock); | 2472 | read_lock(&bond->lock); |
2470 | slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev), | 2473 | slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev), |
2471 | orig_dev); | 2474 | orig_dev); |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index c746b331771d..26bb118c4533 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -362,6 +362,9 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct | |||
362 | goto out; | 362 | goto out; |
363 | } | 363 | } |
364 | 364 | ||
365 | if (!pskb_may_pull(skb, arp_hdr_len(bond_dev))) | ||
366 | goto out; | ||
367 | |||
365 | if (skb->len < sizeof(struct arp_pkt)) { | 368 | if (skb->len < sizeof(struct arp_pkt)) { |
366 | pr_debug("Packet is too small to be an ARP\n"); | 369 | pr_debug("Packet is too small to be an ARP\n"); |
367 | goto out; | 370 | goto out; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2cc4cfc31892..3b16f62d5606 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -2797,9 +2797,15 @@ void bond_loadbalance_arp_mon(struct work_struct *work) | |||
2797 | * so it can wait | 2797 | * so it can wait |
2798 | */ | 2798 | */ |
2799 | bond_for_each_slave(bond, slave, i) { | 2799 | bond_for_each_slave(bond, slave, i) { |
2800 | unsigned long trans_start = dev_trans_start(slave->dev); | ||
2801 | |||
2800 | if (slave->link != BOND_LINK_UP) { | 2802 | if (slave->link != BOND_LINK_UP) { |
2801 | if (time_before_eq(jiffies, dev_trans_start(slave->dev) + delta_in_ticks) && | 2803 | if (time_in_range(jiffies, |
2802 | time_before_eq(jiffies, slave->dev->last_rx + delta_in_ticks)) { | 2804 | trans_start - delta_in_ticks, |
2805 | trans_start + delta_in_ticks) && | ||
2806 | time_in_range(jiffies, | ||
2807 | slave->dev->last_rx - delta_in_ticks, | ||
2808 | slave->dev->last_rx + delta_in_ticks)) { | ||
2803 | 2809 | ||
2804 | slave->link = BOND_LINK_UP; | 2810 | slave->link = BOND_LINK_UP; |
2805 | slave->state = BOND_STATE_ACTIVE; | 2811 | slave->state = BOND_STATE_ACTIVE; |
@@ -2827,8 +2833,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work) | |||
2827 | * when the source ip is 0, so don't take the link down | 2833 | * when the source ip is 0, so don't take the link down |
2828 | * if we don't know our ip yet | 2834 | * if we don't know our ip yet |
2829 | */ | 2835 | */ |
2830 | if (time_after_eq(jiffies, dev_trans_start(slave->dev) + 2*delta_in_ticks) || | 2836 | if (!time_in_range(jiffies, |
2831 | (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks))) { | 2837 | trans_start - delta_in_ticks, |
2838 | trans_start + 2 * delta_in_ticks) || | ||
2839 | !time_in_range(jiffies, | ||
2840 | slave->dev->last_rx - delta_in_ticks, | ||
2841 | slave->dev->last_rx + 2 * delta_in_ticks)) { | ||
2832 | 2842 | ||
2833 | slave->link = BOND_LINK_DOWN; | 2843 | slave->link = BOND_LINK_DOWN; |
2834 | slave->state = BOND_STATE_BACKUP; | 2844 | slave->state = BOND_STATE_BACKUP; |
@@ -2883,13 +2893,16 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) | |||
2883 | { | 2893 | { |
2884 | struct slave *slave; | 2894 | struct slave *slave; |
2885 | int i, commit = 0; | 2895 | int i, commit = 0; |
2896 | unsigned long trans_start; | ||
2886 | 2897 | ||
2887 | bond_for_each_slave(bond, slave, i) { | 2898 | bond_for_each_slave(bond, slave, i) { |
2888 | slave->new_link = BOND_LINK_NOCHANGE; | 2899 | slave->new_link = BOND_LINK_NOCHANGE; |
2889 | 2900 | ||
2890 | if (slave->link != BOND_LINK_UP) { | 2901 | if (slave->link != BOND_LINK_UP) { |
2891 | if (time_before_eq(jiffies, slave_last_rx(bond, slave) + | 2902 | if (time_in_range(jiffies, |
2892 | delta_in_ticks)) { | 2903 | slave_last_rx(bond, slave) - delta_in_ticks, |
2904 | slave_last_rx(bond, slave) + delta_in_ticks)) { | ||
2905 | |||
2893 | slave->new_link = BOND_LINK_UP; | 2906 | slave->new_link = BOND_LINK_UP; |
2894 | commit++; | 2907 | commit++; |
2895 | } | 2908 | } |
@@ -2902,8 +2915,9 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) | |||
2902 | * active. This avoids bouncing, as the last receive | 2915 | * active. This avoids bouncing, as the last receive |
2903 | * times need a full ARP monitor cycle to be updated. | 2916 | * times need a full ARP monitor cycle to be updated. |
2904 | */ | 2917 | */ |
2905 | if (!time_after_eq(jiffies, slave->jiffies + | 2918 | if (time_in_range(jiffies, |
2906 | 2 * delta_in_ticks)) | 2919 | slave->jiffies - delta_in_ticks, |
2920 | slave->jiffies + 2 * delta_in_ticks)) | ||
2907 | continue; | 2921 | continue; |
2908 | 2922 | ||
2909 | /* | 2923 | /* |
@@ -2921,8 +2935,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) | |||
2921 | */ | 2935 | */ |
2922 | if (slave->state == BOND_STATE_BACKUP && | 2936 | if (slave->state == BOND_STATE_BACKUP && |
2923 | !bond->current_arp_slave && | 2937 | !bond->current_arp_slave && |
2924 | time_after(jiffies, slave_last_rx(bond, slave) + | 2938 | !time_in_range(jiffies, |
2925 | 3 * delta_in_ticks)) { | 2939 | slave_last_rx(bond, slave) - delta_in_ticks, |
2940 | slave_last_rx(bond, slave) + 3 * delta_in_ticks)) { | ||
2941 | |||
2926 | slave->new_link = BOND_LINK_DOWN; | 2942 | slave->new_link = BOND_LINK_DOWN; |
2927 | commit++; | 2943 | commit++; |
2928 | } | 2944 | } |
@@ -2933,11 +2949,15 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) | |||
2933 | * - (more than 2*delta since receive AND | 2949 | * - (more than 2*delta since receive AND |
2934 | * the bond has an IP address) | 2950 | * the bond has an IP address) |
2935 | */ | 2951 | */ |
2952 | trans_start = dev_trans_start(slave->dev); | ||
2936 | if ((slave->state == BOND_STATE_ACTIVE) && | 2953 | if ((slave->state == BOND_STATE_ACTIVE) && |
2937 | (time_after_eq(jiffies, dev_trans_start(slave->dev) + | 2954 | (!time_in_range(jiffies, |
2938 | 2 * delta_in_ticks) || | 2955 | trans_start - delta_in_ticks, |
2939 | (time_after_eq(jiffies, slave_last_rx(bond, slave) | 2956 | trans_start + 2 * delta_in_ticks) || |
2940 | + 2 * delta_in_ticks)))) { | 2957 | !time_in_range(jiffies, |
2958 | slave_last_rx(bond, slave) - delta_in_ticks, | ||
2959 | slave_last_rx(bond, slave) + 2 * delta_in_ticks))) { | ||
2960 | |||
2941 | slave->new_link = BOND_LINK_DOWN; | 2961 | slave->new_link = BOND_LINK_DOWN; |
2942 | commit++; | 2962 | commit++; |
2943 | } | 2963 | } |
@@ -2956,6 +2976,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks) | |||
2956 | { | 2976 | { |
2957 | struct slave *slave; | 2977 | struct slave *slave; |
2958 | int i; | 2978 | int i; |
2979 | unsigned long trans_start; | ||
2959 | 2980 | ||
2960 | bond_for_each_slave(bond, slave, i) { | 2981 | bond_for_each_slave(bond, slave, i) { |
2961 | switch (slave->new_link) { | 2982 | switch (slave->new_link) { |
@@ -2963,10 +2984,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks) | |||
2963 | continue; | 2984 | continue; |
2964 | 2985 | ||
2965 | case BOND_LINK_UP: | 2986 | case BOND_LINK_UP: |
2987 | trans_start = dev_trans_start(slave->dev); | ||
2966 | if ((!bond->curr_active_slave && | 2988 | if ((!bond->curr_active_slave && |
2967 | time_before_eq(jiffies, | 2989 | time_in_range(jiffies, |
2968 | dev_trans_start(slave->dev) + | 2990 | trans_start - delta_in_ticks, |
2969 | delta_in_ticks)) || | 2991 | trans_start + delta_in_ticks)) || |
2970 | bond->curr_active_slave != slave) { | 2992 | bond->curr_active_slave != slave) { |
2971 | slave->link = BOND_LINK_UP; | 2993 | slave->link = BOND_LINK_UP; |
2972 | bond->current_arp_slave = NULL; | 2994 | bond->current_arp_slave = NULL; |
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index ad19585d960b..f208712c0b90 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c | |||
@@ -2296,6 +2296,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) | |||
2296 | case CHELSIO_GET_QSET_NUM:{ | 2296 | case CHELSIO_GET_QSET_NUM:{ |
2297 | struct ch_reg edata; | 2297 | struct ch_reg edata; |
2298 | 2298 | ||
2299 | memset(&edata, 0, sizeof(struct ch_reg)); | ||
2300 | |||
2299 | edata.cmd = CHELSIO_GET_QSET_NUM; | 2301 | edata.cmd = CHELSIO_GET_QSET_NUM; |
2300 | edata.val = pi->nqsets; | 2302 | edata.val = pi->nqsets; |
2301 | if (copy_to_user(useraddr, &edata, sizeof(edata))) | 2303 | if (copy_to_user(useraddr, &edata, sizeof(edata))) |
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index 66ed08f726fb..ba302a5c2c30 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h | |||
@@ -57,6 +57,7 @@ enum e1e_registers { | |||
57 | E1000_SCTL = 0x00024, /* SerDes Control - RW */ | 57 | E1000_SCTL = 0x00024, /* SerDes Control - RW */ |
58 | E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */ | 58 | E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */ |
59 | E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */ | 59 | E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */ |
60 | E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */ | ||
60 | E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ | 61 | E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ |
61 | E1000_FCT = 0x00030, /* Flow Control Type - RW */ | 62 | E1000_FCT = 0x00030, /* Flow Control Type - RW */ |
62 | E1000_VET = 0x00038, /* VLAN Ether Type - RW */ | 63 | E1000_VET = 0x00038, /* VLAN Ether Type - RW */ |
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 63930d12711c..57b5435599ab 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -105,6 +105,10 @@ | |||
105 | #define E1000_FEXTNVM_SW_CONFIG 1 | 105 | #define E1000_FEXTNVM_SW_CONFIG 1 |
106 | #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ | 106 | #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ |
107 | 107 | ||
108 | #define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 | ||
109 | #define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 | ||
110 | #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 | ||
111 | |||
108 | #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL | 112 | #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL |
109 | 113 | ||
110 | #define E1000_ICH_RAR_ENTRIES 7 | 114 | #define E1000_ICH_RAR_ENTRIES 7 |
@@ -125,6 +129,7 @@ | |||
125 | 129 | ||
126 | /* SMBus Address Phy Register */ | 130 | /* SMBus Address Phy Register */ |
127 | #define HV_SMB_ADDR PHY_REG(768, 26) | 131 | #define HV_SMB_ADDR PHY_REG(768, 26) |
132 | #define HV_SMB_ADDR_MASK 0x007F | ||
128 | #define HV_SMB_ADDR_PEC_EN 0x0200 | 133 | #define HV_SMB_ADDR_PEC_EN 0x0200 |
129 | #define HV_SMB_ADDR_VALID 0x0080 | 134 | #define HV_SMB_ADDR_VALID 0x0080 |
130 | 135 | ||
@@ -237,6 +242,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); | |||
237 | static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); | 242 | static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); |
238 | static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); | 243 | static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); |
239 | static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); | 244 | static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); |
245 | static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); | ||
246 | static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); | ||
240 | 247 | ||
241 | static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) | 248 | static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) |
242 | { | 249 | { |
@@ -272,7 +279,7 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) | |||
272 | static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) | 279 | static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) |
273 | { | 280 | { |
274 | struct e1000_phy_info *phy = &hw->phy; | 281 | struct e1000_phy_info *phy = &hw->phy; |
275 | u32 ctrl; | 282 | u32 ctrl, fwsm; |
276 | s32 ret_val = 0; | 283 | s32 ret_val = 0; |
277 | 284 | ||
278 | phy->addr = 1; | 285 | phy->addr = 1; |
@@ -294,7 +301,8 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) | |||
294 | * disabled, then toggle the LANPHYPC Value bit to force | 301 | * disabled, then toggle the LANPHYPC Value bit to force |
295 | * the interconnect to PCIe mode. | 302 | * the interconnect to PCIe mode. |
296 | */ | 303 | */ |
297 | if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { | 304 | fwsm = er32(FWSM); |
305 | if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) { | ||
298 | ctrl = er32(CTRL); | 306 | ctrl = er32(CTRL); |
299 | ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; | 307 | ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; |
300 | ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; | 308 | ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; |
@@ -303,6 +311,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) | |||
303 | ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; | 311 | ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; |
304 | ew32(CTRL, ctrl); | 312 | ew32(CTRL, ctrl); |
305 | msleep(50); | 313 | msleep(50); |
314 | |||
315 | /* | ||
316 | * Gate automatic PHY configuration by hardware on | ||
317 | * non-managed 82579 | ||
318 | */ | ||
319 | if (hw->mac.type == e1000_pch2lan) | ||
320 | e1000_gate_hw_phy_config_ich8lan(hw, true); | ||
306 | } | 321 | } |
307 | 322 | ||
308 | /* | 323 | /* |
@@ -315,6 +330,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) | |||
315 | if (ret_val) | 330 | if (ret_val) |
316 | goto out; | 331 | goto out; |
317 | 332 | ||
333 | /* Ungate automatic PHY configuration on non-managed 82579 */ | ||
334 | if ((hw->mac.type == e1000_pch2lan) && | ||
335 | !(fwsm & E1000_ICH_FWSM_FW_VALID)) { | ||
336 | msleep(10); | ||
337 | e1000_gate_hw_phy_config_ich8lan(hw, false); | ||
338 | } | ||
339 | |||
318 | phy->id = e1000_phy_unknown; | 340 | phy->id = e1000_phy_unknown; |
319 | ret_val = e1000e_get_phy_id(hw); | 341 | ret_val = e1000e_get_phy_id(hw); |
320 | if (ret_val) | 342 | if (ret_val) |
@@ -561,13 +583,10 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) | |||
561 | if (mac->type == e1000_ich8lan) | 583 | if (mac->type == e1000_ich8lan) |
562 | e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); | 584 | e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); |
563 | 585 | ||
564 | /* Disable PHY configuration by hardware, config by software */ | 586 | /* Gate automatic PHY configuration by hardware on managed 82579 */ |
565 | if (mac->type == e1000_pch2lan) { | 587 | if ((mac->type == e1000_pch2lan) && |
566 | u32 extcnf_ctrl = er32(EXTCNF_CTRL); | 588 | (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) |
567 | 589 | e1000_gate_hw_phy_config_ich8lan(hw, true); | |
568 | extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; | ||
569 | ew32(EXTCNF_CTRL, extcnf_ctrl); | ||
570 | } | ||
571 | 590 | ||
572 | return 0; | 591 | return 0; |
573 | } | 592 | } |
@@ -652,6 +671,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
652 | goto out; | 671 | goto out; |
653 | } | 672 | } |
654 | 673 | ||
674 | if (hw->mac.type == e1000_pch2lan) { | ||
675 | ret_val = e1000_k1_workaround_lv(hw); | ||
676 | if (ret_val) | ||
677 | goto out; | ||
678 | } | ||
679 | |||
655 | /* | 680 | /* |
656 | * Check if there was DownShift, must be checked | 681 | * Check if there was DownShift, must be checked |
657 | * immediately after link-up | 682 | * immediately after link-up |
@@ -895,6 +920,34 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) | |||
895 | } | 920 | } |
896 | 921 | ||
897 | /** | 922 | /** |
923 | * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states | ||
924 | * @hw: pointer to the HW structure | ||
925 | * | ||
926 | * Assumes semaphore already acquired. | ||
927 | * | ||
928 | **/ | ||
929 | static s32 e1000_write_smbus_addr(struct e1000_hw *hw) | ||
930 | { | ||
931 | u16 phy_data; | ||
932 | u32 strap = er32(STRAP); | ||
933 | s32 ret_val = 0; | ||
934 | |||
935 | strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; | ||
936 | |||
937 | ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); | ||
938 | if (ret_val) | ||
939 | goto out; | ||
940 | |||
941 | phy_data &= ~HV_SMB_ADDR_MASK; | ||
942 | phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); | ||
943 | phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; | ||
944 | ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); | ||
945 | |||
946 | out: | ||
947 | return ret_val; | ||
948 | } | ||
949 | |||
950 | /** | ||
898 | * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration | 951 | * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration |
899 | * @hw: pointer to the HW structure | 952 | * @hw: pointer to the HW structure |
900 | * | 953 | * |
@@ -903,7 +956,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) | |||
903 | **/ | 956 | **/ |
904 | static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) | 957 | static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) |
905 | { | 958 | { |
906 | struct e1000_adapter *adapter = hw->adapter; | ||
907 | struct e1000_phy_info *phy = &hw->phy; | 959 | struct e1000_phy_info *phy = &hw->phy; |
908 | u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; | 960 | u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; |
909 | s32 ret_val = 0; | 961 | s32 ret_val = 0; |
@@ -921,7 +973,8 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) | |||
921 | if (phy->type != e1000_phy_igp_3) | 973 | if (phy->type != e1000_phy_igp_3) |
922 | return ret_val; | 974 | return ret_val; |
923 | 975 | ||
924 | if (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) { | 976 | if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) || |
977 | (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) { | ||
925 | sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; | 978 | sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; |
926 | break; | 979 | break; |
927 | } | 980 | } |
@@ -961,21 +1014,16 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) | |||
961 | cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; | 1014 | cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; |
962 | cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; | 1015 | cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; |
963 | 1016 | ||
964 | if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && | 1017 | if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && |
965 | ((hw->mac.type == e1000_pchlan) || | 1018 | (hw->mac.type == e1000_pchlan)) || |
966 | (hw->mac.type == e1000_pch2lan))) { | 1019 | (hw->mac.type == e1000_pch2lan)) { |
967 | /* | 1020 | /* |
968 | * HW configures the SMBus address and LEDs when the | 1021 | * HW configures the SMBus address and LEDs when the |
969 | * OEM and LCD Write Enable bits are set in the NVM. | 1022 | * OEM and LCD Write Enable bits are set in the NVM. |
970 | * When both NVM bits are cleared, SW will configure | 1023 | * When both NVM bits are cleared, SW will configure |
971 | * them instead. | 1024 | * them instead. |
972 | */ | 1025 | */ |
973 | data = er32(STRAP); | 1026 | ret_val = e1000_write_smbus_addr(hw); |
974 | data &= E1000_STRAP_SMBUS_ADDRESS_MASK; | ||
975 | reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT; | ||
976 | reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; | ||
977 | ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, | ||
978 | reg_data); | ||
979 | if (ret_val) | 1027 | if (ret_val) |
980 | goto out; | 1028 | goto out; |
981 | 1029 | ||
@@ -1440,10 +1488,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) | |||
1440 | goto out; | 1488 | goto out; |
1441 | 1489 | ||
1442 | /* Enable jumbo frame workaround in the PHY */ | 1490 | /* Enable jumbo frame workaround in the PHY */ |
1443 | e1e_rphy(hw, PHY_REG(769, 20), &data); | ||
1444 | ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14)); | ||
1445 | if (ret_val) | ||
1446 | goto out; | ||
1447 | e1e_rphy(hw, PHY_REG(769, 23), &data); | 1491 | e1e_rphy(hw, PHY_REG(769, 23), &data); |
1448 | data &= ~(0x7F << 5); | 1492 | data &= ~(0x7F << 5); |
1449 | data |= (0x37 << 5); | 1493 | data |= (0x37 << 5); |
@@ -1452,7 +1496,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) | |||
1452 | goto out; | 1496 | goto out; |
1453 | e1e_rphy(hw, PHY_REG(769, 16), &data); | 1497 | e1e_rphy(hw, PHY_REG(769, 16), &data); |
1454 | data &= ~(1 << 13); | 1498 | data &= ~(1 << 13); |
1455 | data |= (1 << 12); | ||
1456 | ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); | 1499 | ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); |
1457 | if (ret_val) | 1500 | if (ret_val) |
1458 | goto out; | 1501 | goto out; |
@@ -1477,7 +1520,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) | |||
1477 | 1520 | ||
1478 | mac_reg = er32(RCTL); | 1521 | mac_reg = er32(RCTL); |
1479 | mac_reg &= ~E1000_RCTL_SECRC; | 1522 | mac_reg &= ~E1000_RCTL_SECRC; |
1480 | ew32(FFLT_DBG, mac_reg); | 1523 | ew32(RCTL, mac_reg); |
1481 | 1524 | ||
1482 | ret_val = e1000e_read_kmrn_reg(hw, | 1525 | ret_val = e1000e_read_kmrn_reg(hw, |
1483 | E1000_KMRNCTRLSTA_CTRL_OFFSET, | 1526 | E1000_KMRNCTRLSTA_CTRL_OFFSET, |
@@ -1503,17 +1546,12 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) | |||
1503 | goto out; | 1546 | goto out; |
1504 | 1547 | ||
1505 | /* Write PHY register values back to h/w defaults */ | 1548 | /* Write PHY register values back to h/w defaults */ |
1506 | e1e_rphy(hw, PHY_REG(769, 20), &data); | ||
1507 | ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14)); | ||
1508 | if (ret_val) | ||
1509 | goto out; | ||
1510 | e1e_rphy(hw, PHY_REG(769, 23), &data); | 1549 | e1e_rphy(hw, PHY_REG(769, 23), &data); |
1511 | data &= ~(0x7F << 5); | 1550 | data &= ~(0x7F << 5); |
1512 | ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); | 1551 | ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); |
1513 | if (ret_val) | 1552 | if (ret_val) |
1514 | goto out; | 1553 | goto out; |
1515 | e1e_rphy(hw, PHY_REG(769, 16), &data); | 1554 | e1e_rphy(hw, PHY_REG(769, 16), &data); |
1516 | data &= ~(1 << 12); | ||
1517 | data |= (1 << 13); | 1555 | data |= (1 << 13); |
1518 | ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); | 1556 | ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); |
1519 | if (ret_val) | 1557 | if (ret_val) |
@@ -1559,6 +1597,69 @@ out: | |||
1559 | } | 1597 | } |
1560 | 1598 | ||
1561 | /** | 1599 | /** |
1600 | * e1000_k1_gig_workaround_lv - K1 Si workaround | ||
1601 | * @hw: pointer to the HW structure | ||
1602 | * | ||
1603 | * Workaround to set the K1 beacon duration for 82579 parts | ||
1604 | **/ | ||
1605 | static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) | ||
1606 | { | ||
1607 | s32 ret_val = 0; | ||
1608 | u16 status_reg = 0; | ||
1609 | u32 mac_reg; | ||
1610 | |||
1611 | if (hw->mac.type != e1000_pch2lan) | ||
1612 | goto out; | ||
1613 | |||
1614 | /* Set K1 beacon duration based on 1Gbps speed or otherwise */ | ||
1615 | ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); | ||
1616 | if (ret_val) | ||
1617 | goto out; | ||
1618 | |||
1619 | if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) | ||
1620 | == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { | ||
1621 | mac_reg = er32(FEXTNVM4); | ||
1622 | mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; | ||
1623 | |||
1624 | if (status_reg & HV_M_STATUS_SPEED_1000) | ||
1625 | mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; | ||
1626 | else | ||
1627 | mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; | ||
1628 | |||
1629 | ew32(FEXTNVM4, mac_reg); | ||
1630 | } | ||
1631 | |||
1632 | out: | ||
1633 | return ret_val; | ||
1634 | } | ||
1635 | |||
1636 | /** | ||
1637 | * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware | ||
1638 | * @hw: pointer to the HW structure | ||
1639 | * @gate: boolean set to true to gate, false to ungate | ||
1640 | * | ||
1641 | * Gate/ungate the automatic PHY configuration via hardware; perform | ||
1642 | * the configuration via software instead. | ||
1643 | **/ | ||
1644 | static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) | ||
1645 | { | ||
1646 | u32 extcnf_ctrl; | ||
1647 | |||
1648 | if (hw->mac.type != e1000_pch2lan) | ||
1649 | return; | ||
1650 | |||
1651 | extcnf_ctrl = er32(EXTCNF_CTRL); | ||
1652 | |||
1653 | if (gate) | ||
1654 | extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; | ||
1655 | else | ||
1656 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; | ||
1657 | |||
1658 | ew32(EXTCNF_CTRL, extcnf_ctrl); | ||
1659 | return; | ||
1660 | } | ||
1661 | |||
1662 | /** | ||
1562 | * e1000_lan_init_done_ich8lan - Check for PHY config completion | 1663 | * e1000_lan_init_done_ich8lan - Check for PHY config completion |
1563 | * @hw: pointer to the HW structure | 1664 | * @hw: pointer to the HW structure |
1564 | * | 1665 | * |
@@ -1602,6 +1703,9 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) | |||
1602 | if (e1000_check_reset_block(hw)) | 1703 | if (e1000_check_reset_block(hw)) |
1603 | goto out; | 1704 | goto out; |
1604 | 1705 | ||
1706 | /* Allow time for h/w to get to quiescent state after reset */ | ||
1707 | msleep(10); | ||
1708 | |||
1605 | /* Perform any necessary post-reset workarounds */ | 1709 | /* Perform any necessary post-reset workarounds */ |
1606 | switch (hw->mac.type) { | 1710 | switch (hw->mac.type) { |
1607 | case e1000_pchlan: | 1711 | case e1000_pchlan: |
@@ -1630,6 +1734,13 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) | |||
1630 | /* Configure the LCD with the OEM bits in NVM */ | 1734 | /* Configure the LCD with the OEM bits in NVM */ |
1631 | ret_val = e1000_oem_bits_config_ich8lan(hw, true); | 1735 | ret_val = e1000_oem_bits_config_ich8lan(hw, true); |
1632 | 1736 | ||
1737 | /* Ungate automatic PHY configuration on non-managed 82579 */ | ||
1738 | if ((hw->mac.type == e1000_pch2lan) && | ||
1739 | !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { | ||
1740 | msleep(10); | ||
1741 | e1000_gate_hw_phy_config_ich8lan(hw, false); | ||
1742 | } | ||
1743 | |||
1633 | out: | 1744 | out: |
1634 | return ret_val; | 1745 | return ret_val; |
1635 | } | 1746 | } |
@@ -1646,6 +1757,11 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) | |||
1646 | { | 1757 | { |
1647 | s32 ret_val = 0; | 1758 | s32 ret_val = 0; |
1648 | 1759 | ||
1760 | /* Gate automatic PHY configuration by hardware on non-managed 82579 */ | ||
1761 | if ((hw->mac.type == e1000_pch2lan) && | ||
1762 | !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) | ||
1763 | e1000_gate_hw_phy_config_ich8lan(hw, true); | ||
1764 | |||
1649 | ret_val = e1000e_phy_hw_reset_generic(hw); | 1765 | ret_val = e1000e_phy_hw_reset_generic(hw); |
1650 | if (ret_val) | 1766 | if (ret_val) |
1651 | goto out; | 1767 | goto out; |
@@ -2910,6 +3026,14 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
2910 | * external PHY is reset. | 3026 | * external PHY is reset. |
2911 | */ | 3027 | */ |
2912 | ctrl |= E1000_CTRL_PHY_RST; | 3028 | ctrl |= E1000_CTRL_PHY_RST; |
3029 | |||
3030 | /* | ||
3031 | * Gate automatic PHY configuration by hardware on | ||
3032 | * non-managed 82579 | ||
3033 | */ | ||
3034 | if ((hw->mac.type == e1000_pch2lan) && | ||
3035 | !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) | ||
3036 | e1000_gate_hw_phy_config_ich8lan(hw, true); | ||
2913 | } | 3037 | } |
2914 | ret_val = e1000_acquire_swflag_ich8lan(hw); | 3038 | ret_val = e1000_acquire_swflag_ich8lan(hw); |
2915 | e_dbg("Issuing a global reset to ich8lan\n"); | 3039 | e_dbg("Issuing a global reset to ich8lan\n"); |
@@ -3460,13 +3584,20 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) | |||
3460 | void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) | 3584 | void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) |
3461 | { | 3585 | { |
3462 | u32 phy_ctrl; | 3586 | u32 phy_ctrl; |
3587 | s32 ret_val; | ||
3463 | 3588 | ||
3464 | phy_ctrl = er32(PHY_CTRL); | 3589 | phy_ctrl = er32(PHY_CTRL); |
3465 | phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; | 3590 | phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; |
3466 | ew32(PHY_CTRL, phy_ctrl); | 3591 | ew32(PHY_CTRL, phy_ctrl); |
3467 | 3592 | ||
3468 | if (hw->mac.type >= e1000_pchlan) | 3593 | if (hw->mac.type >= e1000_pchlan) { |
3469 | e1000_phy_hw_reset_ich8lan(hw); | 3594 | e1000_oem_bits_config_ich8lan(hw, true); |
3595 | ret_val = hw->phy.ops.acquire(hw); | ||
3596 | if (ret_val) | ||
3597 | return; | ||
3598 | e1000_write_smbus_addr(hw); | ||
3599 | hw->phy.ops.release(hw); | ||
3600 | } | ||
3470 | } | 3601 | } |
3471 | 3602 | ||
3472 | /** | 3603 | /** |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 2b8ef44bd2b1..e561d15c3eb1 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -2704,6 +2704,16 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
2704 | u32 psrctl = 0; | 2704 | u32 psrctl = 0; |
2705 | u32 pages = 0; | 2705 | u32 pages = 0; |
2706 | 2706 | ||
2707 | /* Workaround Si errata on 82579 - configure jumbo frame flow */ | ||
2708 | if (hw->mac.type == e1000_pch2lan) { | ||
2709 | s32 ret_val; | ||
2710 | |||
2711 | if (adapter->netdev->mtu > ETH_DATA_LEN) | ||
2712 | ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); | ||
2713 | else | ||
2714 | ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); | ||
2715 | } | ||
2716 | |||
2707 | /* Program MC offset vector base */ | 2717 | /* Program MC offset vector base */ |
2708 | rctl = er32(RCTL); | 2718 | rctl = er32(RCTL); |
2709 | rctl &= ~(3 << E1000_RCTL_MO_SHIFT); | 2719 | rctl &= ~(3 << E1000_RCTL_MO_SHIFT); |
@@ -2744,16 +2754,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
2744 | e1e_wphy(hw, 22, phy_data); | 2754 | e1e_wphy(hw, 22, phy_data); |
2745 | } | 2755 | } |
2746 | 2756 | ||
2747 | /* Workaround Si errata on 82579 - configure jumbo frame flow */ | ||
2748 | if (hw->mac.type == e1000_pch2lan) { | ||
2749 | s32 ret_val; | ||
2750 | |||
2751 | if (rctl & E1000_RCTL_LPE) | ||
2752 | ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); | ||
2753 | else | ||
2754 | ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); | ||
2755 | } | ||
2756 | |||
2757 | /* Setup buffer sizes */ | 2757 | /* Setup buffer sizes */ |
2758 | rctl &= ~E1000_RCTL_SZ_4096; | 2758 | rctl &= ~E1000_RCTL_SZ_4096; |
2759 | rctl |= E1000_RCTL_BSEX; | 2759 | rctl |= E1000_RCTL_BSEX; |
@@ -4833,6 +4833,15 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
4833 | return -EINVAL; | 4833 | return -EINVAL; |
4834 | } | 4834 | } |
4835 | 4835 | ||
4836 | /* Jumbo frame workaround on 82579 requires CRC be stripped */ | ||
4837 | if ((adapter->hw.mac.type == e1000_pch2lan) && | ||
4838 | !(adapter->flags2 & FLAG2_CRC_STRIPPING) && | ||
4839 | (new_mtu > ETH_DATA_LEN)) { | ||
4840 | e_err("Jumbo Frames not supported on 82579 when CRC " | ||
4841 | "stripping is disabled.\n"); | ||
4842 | return -EINVAL; | ||
4843 | } | ||
4844 | |||
4836 | /* 82573 Errata 17 */ | 4845 | /* 82573 Errata 17 */ |
4837 | if (((adapter->hw.mac.type == e1000_82573) || | 4846 | if (((adapter->hw.mac.type == e1000_82573) || |
4838 | (adapter->hw.mac.type == e1000_82574)) && | 4847 | (adapter->hw.mac.type == e1000_82574)) && |
diff --git a/drivers/net/eql.c b/drivers/net/eql.c index dda2c7944da9..0cb1cf9cf4b0 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c | |||
@@ -555,6 +555,8 @@ static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp) | |||
555 | equalizer_t *eql; | 555 | equalizer_t *eql; |
556 | master_config_t mc; | 556 | master_config_t mc; |
557 | 557 | ||
558 | memset(&mc, 0, sizeof(master_config_t)); | ||
559 | |||
558 | if (eql_is_master(dev)) { | 560 | if (eql_is_master(dev)) { |
559 | eql = netdev_priv(dev); | 561 | eql = netdev_priv(dev); |
560 | mc.max_slaves = eql->max_slaves; | 562 | mc.max_slaves = eql->max_slaves; |
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index 3506fd6ad726..519e19e23955 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c | |||
@@ -2928,7 +2928,7 @@ static int __devinit emac_probe(struct platform_device *ofdev, | |||
2928 | if (dev->emac_irq != NO_IRQ) | 2928 | if (dev->emac_irq != NO_IRQ) |
2929 | irq_dispose_mapping(dev->emac_irq); | 2929 | irq_dispose_mapping(dev->emac_irq); |
2930 | err_free: | 2930 | err_free: |
2931 | kfree(ndev); | 2931 | free_netdev(ndev); |
2932 | err_gone: | 2932 | err_gone: |
2933 | /* if we were on the bootlist, remove us as we won't show up and | 2933 | /* if we were on the bootlist, remove us as we won't show up and |
2934 | * wake up all waiters to notify them in case they were waiting | 2934 | * wake up all waiters to notify them in case they were waiting |
@@ -2971,7 +2971,7 @@ static int __devexit emac_remove(struct platform_device *ofdev) | |||
2971 | if (dev->emac_irq != NO_IRQ) | 2971 | if (dev->emac_irq != NO_IRQ) |
2972 | irq_dispose_mapping(dev->emac_irq); | 2972 | irq_dispose_mapping(dev->emac_irq); |
2973 | 2973 | ||
2974 | kfree(dev->ndev); | 2974 | free_netdev(dev->ndev); |
2975 | 2975 | ||
2976 | return 0; | 2976 | return 0; |
2977 | } | 2977 | } |
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c index b4fb07a6f13f..51919fcd50c2 100644 --- a/drivers/net/ks8851.c +++ b/drivers/net/ks8851.c | |||
@@ -503,30 +503,33 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) | |||
503 | ks8851_wrreg16(ks, KS_RXQCR, | 503 | ks8851_wrreg16(ks, KS_RXQCR, |
504 | ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE); | 504 | ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE); |
505 | 505 | ||
506 | if (rxlen > 0) { | 506 | if (rxlen > 4) { |
507 | skb = netdev_alloc_skb(ks->netdev, rxlen + 2 + 8); | 507 | unsigned int rxalign; |
508 | if (!skb) { | 508 | |
509 | /* todo - dump frame and move on */ | 509 | rxlen -= 4; |
510 | } | 510 | rxalign = ALIGN(rxlen, 4); |
511 | skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign); | ||
512 | if (skb) { | ||
511 | 513 | ||
512 | /* two bytes to ensure ip is aligned, and four bytes | 514 | /* 4 bytes of status header + 4 bytes of |
513 | * for the status header and 4 bytes of garbage */ | 515 | * garbage: we put them before ethernet |
514 | skb_reserve(skb, 2 + 4 + 4); | 516 | * header, so that they are copied, |
517 | * but ignored. | ||
518 | */ | ||
515 | 519 | ||
516 | rxpkt = skb_put(skb, rxlen - 4) - 8; | 520 | rxpkt = skb_put(skb, rxlen) - 8; |
517 | 521 | ||
518 | /* align the packet length to 4 bytes, and add 4 bytes | 522 | ks8851_rdfifo(ks, rxpkt, rxalign + 8); |
519 | * as we're getting the rx status header as well */ | ||
520 | ks8851_rdfifo(ks, rxpkt, ALIGN(rxlen, 4) + 8); | ||
521 | 523 | ||
522 | if (netif_msg_pktdata(ks)) | 524 | if (netif_msg_pktdata(ks)) |
523 | ks8851_dbg_dumpkkt(ks, rxpkt); | 525 | ks8851_dbg_dumpkkt(ks, rxpkt); |
524 | 526 | ||
525 | skb->protocol = eth_type_trans(skb, ks->netdev); | 527 | skb->protocol = eth_type_trans(skb, ks->netdev); |
526 | netif_rx(skb); | 528 | netif_rx(skb); |
527 | 529 | ||
528 | ks->netdev->stats.rx_packets++; | 530 | ks->netdev->stats.rx_packets++; |
529 | ks->netdev->stats.rx_bytes += rxlen - 4; | 531 | ks->netdev->stats.rx_bytes += rxlen; |
532 | } | ||
530 | } | 533 | } |
531 | 534 | ||
532 | ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); | 535 | ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); |
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c index bdf2149e5296..87f0a93b165c 100644 --- a/drivers/net/ll_temac_main.c +++ b/drivers/net/ll_temac_main.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/of_device.h> | 38 | #include <linux/of_device.h> |
39 | #include <linux/of_mdio.h> | 39 | #include <linux/of_mdio.h> |
40 | #include <linux/of_platform.h> | 40 | #include <linux/of_platform.h> |
41 | #include <linux/of_address.h> | ||
41 | #include <linux/skbuff.h> | 42 | #include <linux/skbuff.h> |
42 | #include <linux/spinlock.h> | 43 | #include <linux/spinlock.h> |
43 | #include <linux/tcp.h> /* needed for sizeof(tcphdr) */ | 44 | #include <linux/tcp.h> /* needed for sizeof(tcphdr) */ |
diff --git a/drivers/net/ll_temac_mdio.c b/drivers/net/ll_temac_mdio.c index 5ae28c975b38..8cf9d4f56bb2 100644 --- a/drivers/net/ll_temac_mdio.c +++ b/drivers/net/ll_temac_mdio.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/phy.h> | 10 | #include <linux/phy.h> |
11 | #include <linux/of.h> | 11 | #include <linux/of.h> |
12 | #include <linux/of_device.h> | 12 | #include <linux/of_device.h> |
13 | #include <linux/of_address.h> | ||
13 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
14 | #include <linux/of_mdio.h> | 15 | #include <linux/of_mdio.h> |
15 | 16 | ||
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index cabae7bb1fc6..b075a35b85d4 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -1540,7 +1540,6 @@ netxen_process_rcv(struct netxen_adapter *adapter, | |||
1540 | if (pkt_offset) | 1540 | if (pkt_offset) |
1541 | skb_pull(skb, pkt_offset); | 1541 | skb_pull(skb, pkt_offset); |
1542 | 1542 | ||
1543 | skb->truesize = skb->len + sizeof(struct sk_buff); | ||
1544 | skb->protocol = eth_type_trans(skb, netdev); | 1543 | skb->protocol = eth_type_trans(skb, netdev); |
1545 | 1544 | ||
1546 | napi_gro_receive(&sds_ring->napi, skb); | 1545 | napi_gro_receive(&sds_ring->napi, skb); |
@@ -1602,8 +1601,6 @@ netxen_process_lro(struct netxen_adapter *adapter, | |||
1602 | 1601 | ||
1603 | skb_put(skb, lro_length + data_offset); | 1602 | skb_put(skb, lro_length + data_offset); |
1604 | 1603 | ||
1605 | skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb); | ||
1606 | |||
1607 | skb_pull(skb, l2_hdr_offset); | 1604 | skb_pull(skb, l2_hdr_offset); |
1608 | skb->protocol = eth_type_trans(skb, netdev); | 1605 | skb->protocol = eth_type_trans(skb, netdev); |
1609 | 1606 | ||
diff --git a/drivers/net/niu.c b/drivers/net/niu.c index bc695d53cdcc..fe6983af6918 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c | |||
@@ -7269,32 +7269,28 @@ static int niu_get_ethtool_tcam_all(struct niu *np, | |||
7269 | struct niu_parent *parent = np->parent; | 7269 | struct niu_parent *parent = np->parent; |
7270 | struct niu_tcam_entry *tp; | 7270 | struct niu_tcam_entry *tp; |
7271 | int i, idx, cnt; | 7271 | int i, idx, cnt; |
7272 | u16 n_entries; | ||
7273 | unsigned long flags; | 7272 | unsigned long flags; |
7274 | 7273 | int ret = 0; | |
7275 | 7274 | ||
7276 | /* put the tcam size here */ | 7275 | /* put the tcam size here */ |
7277 | nfc->data = tcam_get_size(np); | 7276 | nfc->data = tcam_get_size(np); |
7278 | 7277 | ||
7279 | niu_lock_parent(np, flags); | 7278 | niu_lock_parent(np, flags); |
7280 | n_entries = nfc->rule_cnt; | ||
7281 | for (cnt = 0, i = 0; i < nfc->data; i++) { | 7279 | for (cnt = 0, i = 0; i < nfc->data; i++) { |
7282 | idx = tcam_get_index(np, i); | 7280 | idx = tcam_get_index(np, i); |
7283 | tp = &parent->tcam[idx]; | 7281 | tp = &parent->tcam[idx]; |
7284 | if (!tp->valid) | 7282 | if (!tp->valid) |
7285 | continue; | 7283 | continue; |
7284 | if (cnt == nfc->rule_cnt) { | ||
7285 | ret = -EMSGSIZE; | ||
7286 | break; | ||
7287 | } | ||
7286 | rule_locs[cnt] = i; | 7288 | rule_locs[cnt] = i; |
7287 | cnt++; | 7289 | cnt++; |
7288 | } | 7290 | } |
7289 | niu_unlock_parent(np, flags); | 7291 | niu_unlock_parent(np, flags); |
7290 | 7292 | ||
7291 | if (n_entries != cnt) { | 7293 | return ret; |
7292 | /* print warning, this should not happen */ | ||
7293 | netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n", | ||
7294 | np->parent->index, __func__, n_entries, cnt); | ||
7295 | } | ||
7296 | |||
7297 | return 0; | ||
7298 | } | 7294 | } |
7299 | 7295 | ||
7300 | static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, | 7296 | static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, |
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index c3edfe4c2651..f9b509a6b09a 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c | |||
@@ -508,7 +508,8 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev, | |||
508 | unsigned int vcc, | 508 | unsigned int vcc, |
509 | void *priv_data) | 509 | void *priv_data) |
510 | { | 510 | { |
511 | int *has_shmem = priv_data; | 511 | int *priv = priv_data; |
512 | int try = (*priv & 0x1); | ||
512 | int i; | 513 | int i; |
513 | cistpl_io_t *io = &cfg->io; | 514 | cistpl_io_t *io = &cfg->io; |
514 | 515 | ||
@@ -525,77 +526,103 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev, | |||
525 | i = p_dev->resource[1]->end = 0; | 526 | i = p_dev->resource[1]->end = 0; |
526 | } | 527 | } |
527 | 528 | ||
528 | *has_shmem = ((cfg->mem.nwin == 1) && | 529 | *priv &= ((cfg->mem.nwin == 1) && |
529 | (cfg->mem.win[0].len >= 0x4000)); | 530 | (cfg->mem.win[0].len >= 0x4000)) ? 0x10 : ~0x10; |
531 | |||
530 | p_dev->resource[0]->start = io->win[i].base; | 532 | p_dev->resource[0]->start = io->win[i].base; |
531 | p_dev->resource[0]->end = io->win[i].len; | 533 | p_dev->resource[0]->end = io->win[i].len; |
532 | p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; | 534 | if (!try) |
535 | p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; | ||
536 | else | ||
537 | p_dev->io_lines = 16; | ||
533 | if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32) | 538 | if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32) |
534 | return try_io_port(p_dev); | 539 | return try_io_port(p_dev); |
535 | 540 | ||
536 | return 0; | 541 | return -EINVAL; |
542 | } | ||
543 | |||
544 | static hw_info_t *pcnet_try_config(struct pcmcia_device *link, | ||
545 | int *has_shmem, int try) | ||
546 | { | ||
547 | struct net_device *dev = link->priv; | ||
548 | hw_info_t *local_hw_info; | ||
549 | pcnet_dev_t *info = PRIV(dev); | ||
550 | int priv = try; | ||
551 | int ret; | ||
552 | |||
553 | ret = pcmcia_loop_config(link, pcnet_confcheck, &priv); | ||
554 | if (ret) { | ||
555 | dev_warn(&link->dev, "no useable port range found\n"); | ||
556 | return NULL; | ||
557 | } | ||
558 | *has_shmem = (priv & 0x10); | ||
559 | |||
560 | if (!link->irq) | ||
561 | return NULL; | ||
562 | |||
563 | if (resource_size(link->resource[1]) == 8) { | ||
564 | link->conf.Attributes |= CONF_ENABLE_SPKR; | ||
565 | link->conf.Status = CCSR_AUDIO_ENA; | ||
566 | } | ||
567 | if ((link->manf_id == MANFID_IBM) && | ||
568 | (link->card_id == PRODID_IBM_HOME_AND_AWAY)) | ||
569 | link->conf.ConfigIndex |= 0x10; | ||
570 | |||
571 | ret = pcmcia_request_configuration(link, &link->conf); | ||
572 | if (ret) | ||
573 | return NULL; | ||
574 | |||
575 | dev->irq = link->irq; | ||
576 | dev->base_addr = link->resource[0]->start; | ||
577 | |||
578 | if (info->flags & HAS_MISC_REG) { | ||
579 | if ((if_port == 1) || (if_port == 2)) | ||
580 | dev->if_port = if_port; | ||
581 | else | ||
582 | dev_notice(&link->dev, "invalid if_port requested\n"); | ||
583 | } else | ||
584 | dev->if_port = 0; | ||
585 | |||
586 | if ((link->conf.ConfigBase == 0x03c0) && | ||
587 | (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) { | ||
588 | dev_info(&link->dev, | ||
589 | "this is an AX88190 card - use axnet_cs instead.\n"); | ||
590 | return NULL; | ||
591 | } | ||
592 | |||
593 | local_hw_info = get_hwinfo(link); | ||
594 | if (!local_hw_info) | ||
595 | local_hw_info = get_prom(link); | ||
596 | if (!local_hw_info) | ||
597 | local_hw_info = get_dl10019(link); | ||
598 | if (!local_hw_info) | ||
599 | local_hw_info = get_ax88190(link); | ||
600 | if (!local_hw_info) | ||
601 | local_hw_info = get_hwired(link); | ||
602 | |||
603 | return local_hw_info; | ||
537 | } | 604 | } |
538 | 605 | ||
539 | static int pcnet_config(struct pcmcia_device *link) | 606 | static int pcnet_config(struct pcmcia_device *link) |
540 | { | 607 | { |
541 | struct net_device *dev = link->priv; | 608 | struct net_device *dev = link->priv; |
542 | pcnet_dev_t *info = PRIV(dev); | 609 | pcnet_dev_t *info = PRIV(dev); |
543 | int ret, start_pg, stop_pg, cm_offset; | 610 | int start_pg, stop_pg, cm_offset; |
544 | int has_shmem = 0; | 611 | int has_shmem = 0; |
545 | hw_info_t *local_hw_info; | 612 | hw_info_t *local_hw_info; |
546 | 613 | ||
547 | dev_dbg(&link->dev, "pcnet_config\n"); | 614 | dev_dbg(&link->dev, "pcnet_config\n"); |
548 | 615 | ||
549 | ret = pcmcia_loop_config(link, pcnet_confcheck, &has_shmem); | 616 | local_hw_info = pcnet_try_config(link, &has_shmem, 0); |
550 | if (ret) | 617 | if (!local_hw_info) { |
551 | goto failed; | 618 | /* check whether forcing io_lines to 16 helps... */ |
552 | 619 | pcmcia_disable_device(link); | |
553 | if (!link->irq) | 620 | local_hw_info = pcnet_try_config(link, &has_shmem, 1); |
554 | goto failed; | 621 | if (local_hw_info == NULL) { |
555 | 622 | dev_notice(&link->dev, "unable to read hardware net" | |
556 | if (resource_size(link->resource[1]) == 8) { | 623 | " address for io base %#3lx\n", dev->base_addr); |
557 | link->conf.Attributes |= CONF_ENABLE_SPKR; | 624 | goto failed; |
558 | link->conf.Status = CCSR_AUDIO_ENA; | 625 | } |
559 | } | ||
560 | if ((link->manf_id == MANFID_IBM) && | ||
561 | (link->card_id == PRODID_IBM_HOME_AND_AWAY)) | ||
562 | link->conf.ConfigIndex |= 0x10; | ||
563 | |||
564 | ret = pcmcia_request_configuration(link, &link->conf); | ||
565 | if (ret) | ||
566 | goto failed; | ||
567 | dev->irq = link->irq; | ||
568 | dev->base_addr = link->resource[0]->start; | ||
569 | if (info->flags & HAS_MISC_REG) { | ||
570 | if ((if_port == 1) || (if_port == 2)) | ||
571 | dev->if_port = if_port; | ||
572 | else | ||
573 | printk(KERN_NOTICE "pcnet_cs: invalid if_port requested\n"); | ||
574 | } else { | ||
575 | dev->if_port = 0; | ||
576 | } | ||
577 | |||
578 | if ((link->conf.ConfigBase == 0x03c0) && | ||
579 | (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) { | ||
580 | printk(KERN_INFO "pcnet_cs: this is an AX88190 card!\n"); | ||
581 | printk(KERN_INFO "pcnet_cs: use axnet_cs instead.\n"); | ||
582 | goto failed; | ||
583 | } | ||
584 | |||
585 | local_hw_info = get_hwinfo(link); | ||
586 | if (local_hw_info == NULL) | ||
587 | local_hw_info = get_prom(link); | ||
588 | if (local_hw_info == NULL) | ||
589 | local_hw_info = get_dl10019(link); | ||
590 | if (local_hw_info == NULL) | ||
591 | local_hw_info = get_ax88190(link); | ||
592 | if (local_hw_info == NULL) | ||
593 | local_hw_info = get_hwired(link); | ||
594 | |||
595 | if (local_hw_info == NULL) { | ||
596 | printk(KERN_NOTICE "pcnet_cs: unable to read hardware net" | ||
597 | " address for io base %#3lx\n", dev->base_addr); | ||
598 | goto failed; | ||
599 | } | 626 | } |
600 | 627 | ||
601 | info->flags = local_hw_info->flags; | 628 | info->flags = local_hw_info->flags; |
@@ -1637,6 +1664,7 @@ static struct pcmcia_device_id pcnet_ids[] = { | |||
1637 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b), | 1664 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b), |
1638 | PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0), | 1665 | PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0), |
1639 | PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956), | 1666 | PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956), |
1667 | PCMCIA_DEVICE_PROD_ID12("KENTRONICS", "KEP-230", 0xaf8144c9, 0x868f6616), | ||
1640 | PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64), | 1668 | PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64), |
1641 | PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5), | 1669 | PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5), |
1642 | PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3), | 1670 | PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3), |
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 6a6b8199a0d6..6c58da2b882c 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
@@ -308,7 +308,7 @@ static int mdio_bus_suspend(struct device *dev) | |||
308 | * may call phy routines that try to grab the same lock, and that may | 308 | * may call phy routines that try to grab the same lock, and that may |
309 | * lead to a deadlock. | 309 | * lead to a deadlock. |
310 | */ | 310 | */ |
311 | if (phydev->attached_dev) | 311 | if (phydev->attached_dev && phydev->adjust_link) |
312 | phy_stop_machine(phydev); | 312 | phy_stop_machine(phydev); |
313 | 313 | ||
314 | if (!mdio_bus_phy_may_suspend(phydev)) | 314 | if (!mdio_bus_phy_may_suspend(phydev)) |
@@ -331,7 +331,7 @@ static int mdio_bus_resume(struct device *dev) | |||
331 | return ret; | 331 | return ret; |
332 | 332 | ||
333 | no_resume: | 333 | no_resume: |
334 | if (phydev->attached_dev) | 334 | if (phydev->attached_dev && phydev->adjust_link) |
335 | phy_start_machine(phydev, NULL); | 335 | phy_start_machine(phydev, NULL); |
336 | 336 | ||
337 | return 0; | 337 | return 0; |
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 6695a51e09e9..736b91703b3e 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c | |||
@@ -1314,8 +1314,13 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) | |||
1314 | hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; | 1314 | hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; |
1315 | i = 0; | 1315 | i = 0; |
1316 | list_for_each_entry(pch, &ppp->channels, clist) { | 1316 | list_for_each_entry(pch, &ppp->channels, clist) { |
1317 | navail += pch->avail = (pch->chan != NULL); | 1317 | if (pch->chan) { |
1318 | pch->speed = pch->chan->speed; | 1318 | pch->avail = 1; |
1319 | navail++; | ||
1320 | pch->speed = pch->chan->speed; | ||
1321 | } else { | ||
1322 | pch->avail = 0; | ||
1323 | } | ||
1319 | if (pch->avail) { | 1324 | if (pch->avail) { |
1320 | if (skb_queue_empty(&pch->file.xq) || | 1325 | if (skb_queue_empty(&pch->file.xq) || |
1321 | !pch->had_frag) { | 1326 | !pch->had_frag) { |
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c index 410ea0a61371..85eddda276bd 100644 --- a/drivers/net/pxa168_eth.c +++ b/drivers/net/pxa168_eth.c | |||
@@ -1606,6 +1606,8 @@ static int pxa168_eth_remove(struct platform_device *pdev) | |||
1606 | 1606 | ||
1607 | iounmap(pep->base); | 1607 | iounmap(pep->base); |
1608 | pep->base = NULL; | 1608 | pep->base = NULL; |
1609 | mdiobus_unregister(pep->smi_bus); | ||
1610 | mdiobus_free(pep->smi_bus); | ||
1609 | unregister_netdev(dev); | 1611 | unregister_netdev(dev); |
1610 | flush_scheduled_work(); | 1612 | flush_scheduled_work(); |
1611 | free_netdev(dev); | 1613 | free_netdev(dev); |
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c index 75ba744b173c..2c7cf0b64811 100644 --- a/drivers/net/qlcnic/qlcnic_init.c +++ b/drivers/net/qlcnic/qlcnic_init.c | |||
@@ -1316,7 +1316,7 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, | |||
1316 | return -ENOMEM; | 1316 | return -ENOMEM; |
1317 | } | 1317 | } |
1318 | 1318 | ||
1319 | skb_reserve(skb, 2); | 1319 | skb_reserve(skb, NET_IP_ALIGN); |
1320 | 1320 | ||
1321 | dma = pci_map_single(pdev, skb->data, | 1321 | dma = pci_map_single(pdev, skb->data, |
1322 | rds_ring->dma_size, PCI_DMA_FROMDEVICE); | 1322 | rds_ring->dma_size, PCI_DMA_FROMDEVICE); |
@@ -1404,7 +1404,6 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter, | |||
1404 | if (pkt_offset) | 1404 | if (pkt_offset) |
1405 | skb_pull(skb, pkt_offset); | 1405 | skb_pull(skb, pkt_offset); |
1406 | 1406 | ||
1407 | skb->truesize = skb->len + sizeof(struct sk_buff); | ||
1408 | skb->protocol = eth_type_trans(skb, netdev); | 1407 | skb->protocol = eth_type_trans(skb, netdev); |
1409 | 1408 | ||
1410 | napi_gro_receive(&sds_ring->napi, skb); | 1409 | napi_gro_receive(&sds_ring->napi, skb); |
@@ -1466,8 +1465,6 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter, | |||
1466 | 1465 | ||
1467 | skb_put(skb, lro_length + data_offset); | 1466 | skb_put(skb, lro_length + data_offset); |
1468 | 1467 | ||
1469 | skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb); | ||
1470 | |||
1471 | skb_pull(skb, l2_hdr_offset); | 1468 | skb_pull(skb, l2_hdr_offset); |
1472 | skb->protocol = eth_type_trans(skb, netdev); | 1469 | skb->protocol = eth_type_trans(skb, netdev); |
1473 | 1470 | ||
@@ -1700,8 +1697,6 @@ qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, | |||
1700 | if (pkt_offset) | 1697 | if (pkt_offset) |
1701 | skb_pull(skb, pkt_offset); | 1698 | skb_pull(skb, pkt_offset); |
1702 | 1699 | ||
1703 | skb->truesize = skb->len + sizeof(struct sk_buff); | ||
1704 | |||
1705 | if (!qlcnic_check_loopback_buff(skb->data)) | 1700 | if (!qlcnic_check_loopback_buff(skb->data)) |
1706 | adapter->diag_cnt++; | 1701 | adapter->diag_cnt++; |
1707 | 1702 | ||
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 078bbf4e6f19..a0da4a17b025 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -2934,7 +2934,7 @@ static const struct rtl_cfg_info { | |||
2934 | .hw_start = rtl_hw_start_8168, | 2934 | .hw_start = rtl_hw_start_8168, |
2935 | .region = 2, | 2935 | .region = 2, |
2936 | .align = 8, | 2936 | .align = 8, |
2937 | .intr_event = SYSErr | LinkChg | RxOverflow | | 2937 | .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow | |
2938 | TxErr | TxOK | RxOK | RxErr, | 2938 | TxErr | TxOK | RxOK | RxErr, |
2939 | .napi_event = TxErr | TxOK | RxOK | RxOverflow, | 2939 | .napi_event = TxErr | TxOK | RxOK | RxOverflow, |
2940 | .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, | 2940 | .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, |
@@ -4625,8 +4625,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) | |||
4625 | } | 4625 | } |
4626 | 4626 | ||
4627 | /* Work around for rx fifo overflow */ | 4627 | /* Work around for rx fifo overflow */ |
4628 | if (unlikely(status & RxFIFOOver) && | 4628 | if (unlikely(status & RxFIFOOver)) { |
4629 | (tp->mac_version == RTL_GIGA_MAC_VER_11)) { | ||
4630 | netif_stop_queue(dev); | 4629 | netif_stop_queue(dev); |
4631 | rtl8169_tx_timeout(dev); | 4630 | rtl8169_tx_timeout(dev); |
4632 | break; | 4631 | break; |
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index 07eb884ff982..44150f2f7bfd 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c | |||
@@ -384,7 +384,7 @@ static void rionet_remove(struct rio_dev *rdev) | |||
384 | free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ? | 384 | free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ? |
385 | __ilog2(sizeof(void *)) + 4 : 0); | 385 | __ilog2(sizeof(void *)) + 4 : 0); |
386 | unregister_netdev(ndev); | 386 | unregister_netdev(ndev); |
387 | kfree(ndev); | 387 | free_netdev(ndev); |
388 | 388 | ||
389 | list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { | 389 | list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { |
390 | list_del(&peer->node); | 390 | list_del(&peer->node); |
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c index cc4bd8c65f8b..9265315baa0b 100644 --- a/drivers/net/sgiseeq.c +++ b/drivers/net/sgiseeq.c | |||
@@ -804,7 +804,7 @@ static int __devinit sgiseeq_probe(struct platform_device *pdev) | |||
804 | err_out_free_page: | 804 | err_out_free_page: |
805 | free_page((unsigned long) sp->srings); | 805 | free_page((unsigned long) sp->srings); |
806 | err_out_free_dev: | 806 | err_out_free_dev: |
807 | kfree(dev); | 807 | free_netdev(dev); |
808 | 808 | ||
809 | err_out: | 809 | err_out: |
810 | return err; | 810 | return err; |
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index 0909ae934ad0..8150ba154116 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c | |||
@@ -58,6 +58,7 @@ | |||
58 | 58 | ||
59 | MODULE_LICENSE("GPL"); | 59 | MODULE_LICENSE("GPL"); |
60 | MODULE_VERSION(SMSC_DRV_VERSION); | 60 | MODULE_VERSION(SMSC_DRV_VERSION); |
61 | MODULE_ALIAS("platform:smsc911x"); | ||
61 | 62 | ||
62 | #if USE_DEBUG > 0 | 63 | #if USE_DEBUG > 0 |
63 | static int debug = 16; | 64 | static int debug = 16; |
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index bbb7951b9c4c..ea0461eb2dbe 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c | |||
@@ -1865,15 +1865,15 @@ static int stmmac_resume(struct platform_device *pdev) | |||
1865 | if (!netif_running(dev)) | 1865 | if (!netif_running(dev)) |
1866 | return 0; | 1866 | return 0; |
1867 | 1867 | ||
1868 | spin_lock(&priv->lock); | ||
1869 | |||
1870 | if (priv->shutdown) { | 1868 | if (priv->shutdown) { |
1871 | /* Re-open the interface and re-init the MAC/DMA | 1869 | /* Re-open the interface and re-init the MAC/DMA |
1872 | and the rings. */ | 1870 | and the rings (i.e. on hibernation stage) */ |
1873 | stmmac_open(dev); | 1871 | stmmac_open(dev); |
1874 | goto out_resume; | 1872 | return 0; |
1875 | } | 1873 | } |
1876 | 1874 | ||
1875 | spin_lock(&priv->lock); | ||
1876 | |||
1877 | /* Power Down bit, into the PM register, is cleared | 1877 | /* Power Down bit, into the PM register, is cleared |
1878 | * automatically as soon as a magic packet or a Wake-up frame | 1878 | * automatically as soon as a magic packet or a Wake-up frame |
1879 | * is received. Anyway, it's better to manually clear | 1879 | * is received. Anyway, it's better to manually clear |
@@ -1901,7 +1901,6 @@ static int stmmac_resume(struct platform_device *pdev) | |||
1901 | 1901 | ||
1902 | netif_start_queue(dev); | 1902 | netif_start_queue(dev); |
1903 | 1903 | ||
1904 | out_resume: | ||
1905 | spin_unlock(&priv->lock); | 1904 | spin_unlock(&priv->lock); |
1906 | return 0; | 1905 | return 0; |
1907 | } | 1906 | } |
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index 5efa57757a2c..6888e3d41462 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c | |||
@@ -243,6 +243,7 @@ enum { | |||
243 | NWayState = (1 << 14) | (1 << 13) | (1 << 12), | 243 | NWayState = (1 << 14) | (1 << 13) | (1 << 12), |
244 | NWayRestart = (1 << 12), | 244 | NWayRestart = (1 << 12), |
245 | NonselPortActive = (1 << 9), | 245 | NonselPortActive = (1 << 9), |
246 | SelPortActive = (1 << 8), | ||
246 | LinkFailStatus = (1 << 2), | 247 | LinkFailStatus = (1 << 2), |
247 | NetCxnErr = (1 << 1), | 248 | NetCxnErr = (1 << 1), |
248 | }; | 249 | }; |
@@ -363,7 +364,9 @@ static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, }; | |||
363 | 364 | ||
364 | /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/ | 365 | /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/ |
365 | static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, }; | 366 | static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, }; |
366 | static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, }; | 367 | static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, }; |
368 | /* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */ | ||
369 | static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, }; | ||
367 | static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, }; | 370 | static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, }; |
368 | 371 | ||
369 | 372 | ||
@@ -1064,6 +1067,9 @@ static void de21041_media_timer (unsigned long data) | |||
1064 | unsigned int carrier; | 1067 | unsigned int carrier; |
1065 | unsigned long flags; | 1068 | unsigned long flags; |
1066 | 1069 | ||
1070 | /* clear port active bits */ | ||
1071 | dw32(SIAStatus, NonselPortActive | SelPortActive); | ||
1072 | |||
1067 | carrier = (status & NetCxnErr) ? 0 : 1; | 1073 | carrier = (status & NetCxnErr) ? 0 : 1; |
1068 | 1074 | ||
1069 | if (carrier) { | 1075 | if (carrier) { |
@@ -1158,14 +1164,29 @@ no_link_yet: | |||
1158 | static void de_media_interrupt (struct de_private *de, u32 status) | 1164 | static void de_media_interrupt (struct de_private *de, u32 status) |
1159 | { | 1165 | { |
1160 | if (status & LinkPass) { | 1166 | if (status & LinkPass) { |
1167 | /* Ignore if current media is AUI or BNC and we can't use TP */ | ||
1168 | if ((de->media_type == DE_MEDIA_AUI || | ||
1169 | de->media_type == DE_MEDIA_BNC) && | ||
1170 | (de->media_lock || | ||
1171 | !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))) | ||
1172 | return; | ||
1173 | /* If current media is not TP, change it to TP */ | ||
1174 | if ((de->media_type == DE_MEDIA_AUI || | ||
1175 | de->media_type == DE_MEDIA_BNC)) { | ||
1176 | de->media_type = DE_MEDIA_TP_AUTO; | ||
1177 | de_stop_rxtx(de); | ||
1178 | de_set_media(de); | ||
1179 | de_start_rxtx(de); | ||
1180 | } | ||
1161 | de_link_up(de); | 1181 | de_link_up(de); |
1162 | mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK); | 1182 | mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK); |
1163 | return; | 1183 | return; |
1164 | } | 1184 | } |
1165 | 1185 | ||
1166 | BUG_ON(!(status & LinkFail)); | 1186 | BUG_ON(!(status & LinkFail)); |
1167 | 1187 | /* Mark the link as down only if current media is TP */ | |
1168 | if (netif_carrier_ok(de->dev)) { | 1188 | if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI && |
1189 | de->media_type != DE_MEDIA_BNC) { | ||
1169 | de_link_down(de); | 1190 | de_link_down(de); |
1170 | mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); | 1191 | mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); |
1171 | } | 1192 | } |
@@ -1229,6 +1250,7 @@ static void de_adapter_sleep (struct de_private *de) | |||
1229 | if (de->de21040) | 1250 | if (de->de21040) |
1230 | return; | 1251 | return; |
1231 | 1252 | ||
1253 | dw32(CSR13, 0); /* Reset phy */ | ||
1232 | pci_read_config_dword(de->pdev, PCIPM, &pmctl); | 1254 | pci_read_config_dword(de->pdev, PCIPM, &pmctl); |
1233 | pmctl |= PM_Sleep; | 1255 | pmctl |= PM_Sleep; |
1234 | pci_write_config_dword(de->pdev, PCIPM, pmctl); | 1256 | pci_write_config_dword(de->pdev, PCIPM, pmctl); |
@@ -1574,12 +1596,15 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd) | |||
1574 | return 0; /* nothing to change */ | 1596 | return 0; /* nothing to change */ |
1575 | 1597 | ||
1576 | de_link_down(de); | 1598 | de_link_down(de); |
1599 | mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); | ||
1577 | de_stop_rxtx(de); | 1600 | de_stop_rxtx(de); |
1578 | 1601 | ||
1579 | de->media_type = new_media; | 1602 | de->media_type = new_media; |
1580 | de->media_lock = media_lock; | 1603 | de->media_lock = media_lock; |
1581 | de->media_advertise = ecmd->advertising; | 1604 | de->media_advertise = ecmd->advertising; |
1582 | de_set_media(de); | 1605 | de_set_media(de); |
1606 | if (netif_running(de->dev)) | ||
1607 | de_start_rxtx(de); | ||
1583 | 1608 | ||
1584 | return 0; | 1609 | return 0; |
1585 | } | 1610 | } |
@@ -1911,8 +1936,14 @@ fill_defaults: | |||
1911 | for (i = 0; i < DE_MAX_MEDIA; i++) { | 1936 | for (i = 0; i < DE_MAX_MEDIA; i++) { |
1912 | if (de->media[i].csr13 == 0xffff) | 1937 | if (de->media[i].csr13 == 0xffff) |
1913 | de->media[i].csr13 = t21041_csr13[i]; | 1938 | de->media[i].csr13 = t21041_csr13[i]; |
1914 | if (de->media[i].csr14 == 0xffff) | 1939 | if (de->media[i].csr14 == 0xffff) { |
1915 | de->media[i].csr14 = t21041_csr14[i]; | 1940 | /* autonegotiation is broken at least on some chip |
1941 | revisions - rev. 0x21 works, 0x11 does not */ | ||
1942 | if (de->pdev->revision < 0x20) | ||
1943 | de->media[i].csr14 = t21041_csr14_brk[i]; | ||
1944 | else | ||
1945 | de->media[i].csr14 = t21041_csr14[i]; | ||
1946 | } | ||
1916 | if (de->media[i].csr15 == 0xffff) | 1947 | if (de->media[i].csr15 == 0xffff) |
1917 | de->media[i].csr15 = t21041_csr15[i]; | 1948 | de->media[i].csr15 = t21041_csr15[i]; |
1918 | } | 1949 | } |
@@ -2158,6 +2189,8 @@ static int de_resume (struct pci_dev *pdev) | |||
2158 | dev_err(&dev->dev, "pci_enable_device failed in resume\n"); | 2189 | dev_err(&dev->dev, "pci_enable_device failed in resume\n"); |
2159 | goto out; | 2190 | goto out; |
2160 | } | 2191 | } |
2192 | pci_set_master(pdev); | ||
2193 | de_init_rings(de); | ||
2161 | de_init_hw(de); | 2194 | de_init_hw(de); |
2162 | out_attach: | 2195 | out_attach: |
2163 | netif_device_attach(dev); | 2196 | netif_device_attach(dev); |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 6efca66b8766..1cd752f9a6e1 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
@@ -1652,6 +1652,8 @@ static int hso_get_count(struct hso_serial *serial, | |||
1652 | struct uart_icount cnow; | 1652 | struct uart_icount cnow; |
1653 | struct hso_tiocmget *tiocmget = serial->tiocmget; | 1653 | struct hso_tiocmget *tiocmget = serial->tiocmget; |
1654 | 1654 | ||
1655 | memset(&icount, 0, sizeof(struct serial_icounter_struct)); | ||
1656 | |||
1655 | if (!tiocmget) | 1657 | if (!tiocmget) |
1656 | return -ENOENT; | 1658 | return -ENOENT; |
1657 | spin_lock_irq(&serial->serial_lock); | 1659 | spin_lock_irq(&serial->serial_lock); |
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 8ed30fa35d0a..b2bcf99e6f08 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c | |||
@@ -429,10 +429,6 @@ static const struct net_device_ops ipheth_netdev_ops = { | |||
429 | .ndo_get_stats = &ipheth_stats, | 429 | .ndo_get_stats = &ipheth_stats, |
430 | }; | 430 | }; |
431 | 431 | ||
432 | static struct device_type ipheth_type = { | ||
433 | .name = "wwan", | ||
434 | }; | ||
435 | |||
436 | static int ipheth_probe(struct usb_interface *intf, | 432 | static int ipheth_probe(struct usb_interface *intf, |
437 | const struct usb_device_id *id) | 433 | const struct usb_device_id *id) |
438 | { | 434 | { |
@@ -450,7 +446,7 @@ static int ipheth_probe(struct usb_interface *intf, | |||
450 | 446 | ||
451 | netdev->netdev_ops = &ipheth_netdev_ops; | 447 | netdev->netdev_ops = &ipheth_netdev_ops; |
452 | netdev->watchdog_timeo = IPHETH_TX_TIMEOUT; | 448 | netdev->watchdog_timeo = IPHETH_TX_TIMEOUT; |
453 | strcpy(netdev->name, "wwan%d"); | 449 | strcpy(netdev->name, "eth%d"); |
454 | 450 | ||
455 | dev = netdev_priv(netdev); | 451 | dev = netdev_priv(netdev); |
456 | dev->udev = udev; | 452 | dev->udev = udev; |
@@ -500,7 +496,6 @@ static int ipheth_probe(struct usb_interface *intf, | |||
500 | 496 | ||
501 | SET_NETDEV_DEV(netdev, &intf->dev); | 497 | SET_NETDEV_DEV(netdev, &intf->dev); |
502 | SET_ETHTOOL_OPS(netdev, &ops); | 498 | SET_ETHTOOL_OPS(netdev, &ops); |
503 | SET_NETDEV_DEVTYPE(netdev, &ipheth_type); | ||
504 | 499 | ||
505 | retval = register_netdev(netdev); | 500 | retval = register_netdev(netdev); |
506 | if (retval) { | 501 | if (retval) { |
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index fd69095ef6e3..f53412368ce1 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -2824,7 +2824,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi | |||
2824 | netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); | 2824 | netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); |
2825 | 2825 | ||
2826 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | | 2826 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | |
2827 | NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG; | 2827 | NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM; |
2828 | 2828 | ||
2829 | ret = register_netdev(dev); | 2829 | ret = register_netdev(dev); |
2830 | if (ret < 0) | 2830 | if (ret < 0) |
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 373dcfec689c..d77ce9906b6c 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c | |||
@@ -1327,6 +1327,10 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, | |||
1327 | PCI_DMA_TODEVICE); | 1327 | PCI_DMA_TODEVICE); |
1328 | 1328 | ||
1329 | rate = ieee80211_get_tx_rate(sc->hw, info); | 1329 | rate = ieee80211_get_tx_rate(sc->hw, info); |
1330 | if (!rate) { | ||
1331 | ret = -EINVAL; | ||
1332 | goto err_unmap; | ||
1333 | } | ||
1330 | 1334 | ||
1331 | if (info->flags & IEEE80211_TX_CTL_NO_ACK) | 1335 | if (info->flags & IEEE80211_TX_CTL_NO_ACK) |
1332 | flags |= AR5K_TXDESC_NOACK; | 1336 | flags |= AR5K_TXDESC_NOACK; |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index b883b174385b..057fb69ddf7f 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c | |||
@@ -797,7 +797,7 @@ static bool ar9300_uncompress_block(struct ath_hw *ah, | |||
797 | length = block[it+1]; | 797 | length = block[it+1]; |
798 | length &= 0xff; | 798 | length &= 0xff; |
799 | 799 | ||
800 | if (length > 0 && spot >= 0 && spot+length < mdataSize) { | 800 | if (length > 0 && spot >= 0 && spot+length <= mdataSize) { |
801 | ath_print(common, ATH_DBG_EEPROM, | 801 | ath_print(common, ATH_DBG_EEPROM, |
802 | "Restore at %d: spot=%d " | 802 | "Restore at %d: spot=%d " |
803 | "offset=%d length=%d\n", | 803 | "offset=%d length=%d\n", |
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index 7f48df1e2903..0b09db0f8e7d 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h | |||
@@ -62,7 +62,7 @@ | |||
62 | 62 | ||
63 | #define SD_NO_CTL 0xE0 | 63 | #define SD_NO_CTL 0xE0 |
64 | #define NO_CTL 0xff | 64 | #define NO_CTL 0xff |
65 | #define CTL_MODE_M 7 | 65 | #define CTL_MODE_M 0xf |
66 | #define CTL_11A 0 | 66 | #define CTL_11A 0 |
67 | #define CTL_11B 1 | 67 | #define CTL_11B 1 |
68 | #define CTL_11G 2 | 68 | #define CTL_11G 2 |
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index a1c39526161a..345dd9721b41 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h | |||
@@ -31,7 +31,6 @@ enum ctl_group { | |||
31 | #define NO_CTL 0xff | 31 | #define NO_CTL 0xff |
32 | #define SD_NO_CTL 0xE0 | 32 | #define SD_NO_CTL 0xE0 |
33 | #define NO_CTL 0xff | 33 | #define NO_CTL 0xff |
34 | #define CTL_MODE_M 7 | ||
35 | #define CTL_11A 0 | 34 | #define CTL_11A 0 |
36 | #define CTL_11B 1 | 35 | #define CTL_11B 1 |
37 | #define CTL_11G 2 | 36 | #define CTL_11G 2 |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index 9dd9e64c2b0b..8fd00a6e5120 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c | |||
@@ -1411,7 +1411,7 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) | |||
1411 | clear_bit(STATUS_SCAN_HW, &priv->status); | 1411 | clear_bit(STATUS_SCAN_HW, &priv->status); |
1412 | clear_bit(STATUS_SCANNING, &priv->status); | 1412 | clear_bit(STATUS_SCANNING, &priv->status); |
1413 | /* inform mac80211 scan aborted */ | 1413 | /* inform mac80211 scan aborted */ |
1414 | queue_work(priv->workqueue, &priv->scan_completed); | 1414 | queue_work(priv->workqueue, &priv->abort_scan); |
1415 | } | 1415 | } |
1416 | 1416 | ||
1417 | int iwlagn_manage_ibss_station(struct iwl_priv *priv, | 1417 | int iwlagn_manage_ibss_station(struct iwl_priv *priv, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 07dbc2796448..e23c4060a0f0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c | |||
@@ -2613,6 +2613,11 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external) | |||
2613 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | 2613 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) |
2614 | return -EINVAL; | 2614 | return -EINVAL; |
2615 | 2615 | ||
2616 | if (test_bit(STATUS_SCANNING, &priv->status)) { | ||
2617 | IWL_DEBUG_INFO(priv, "scan in progress.\n"); | ||
2618 | return -EINVAL; | ||
2619 | } | ||
2620 | |||
2616 | if (mode >= IWL_MAX_FORCE_RESET) { | 2621 | if (mode >= IWL_MAX_FORCE_RESET) { |
2617 | IWL_DEBUG_INFO(priv, "invalid reset request.\n"); | 2622 | IWL_DEBUG_INFO(priv, "invalid reset request.\n"); |
2618 | return -EINVAL; | 2623 | return -EINVAL; |
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 59a308b02f95..d31661c1ce77 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
@@ -3018,7 +3018,7 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) | |||
3018 | clear_bit(STATUS_SCANNING, &priv->status); | 3018 | clear_bit(STATUS_SCANNING, &priv->status); |
3019 | 3019 | ||
3020 | /* inform mac80211 scan aborted */ | 3020 | /* inform mac80211 scan aborted */ |
3021 | queue_work(priv->workqueue, &priv->scan_completed); | 3021 | queue_work(priv->workqueue, &priv->abort_scan); |
3022 | } | 3022 | } |
3023 | 3023 | ||
3024 | static void iwl3945_bg_restart(struct work_struct *data) | 3024 | static void iwl3945_bg_restart(struct work_struct *data) |
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c index ba854c70ab94..87b634978b35 100644 --- a/drivers/net/wireless/libertas/if_sdio.c +++ b/drivers/net/wireless/libertas/if_sdio.c | |||
@@ -128,7 +128,7 @@ struct if_sdio_card { | |||
128 | bool helper_allocated; | 128 | bool helper_allocated; |
129 | bool firmware_allocated; | 129 | bool firmware_allocated; |
130 | 130 | ||
131 | u8 buffer[65536]; | 131 | u8 buffer[65536] __attribute__((aligned(4))); |
132 | 132 | ||
133 | spinlock_t lock; | 133 | spinlock_t lock; |
134 | struct if_sdio_packet *packets; | 134 | struct if_sdio_packet *packets; |
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c index 173aec3d6e7e..0e937dc0c9c4 100644 --- a/drivers/net/wireless/p54/txrx.c +++ b/drivers/net/wireless/p54/txrx.c | |||
@@ -446,7 +446,7 @@ static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb) | |||
446 | } | 446 | } |
447 | 447 | ||
448 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && | 448 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && |
449 | (!payload->status)) | 449 | !(payload->status & P54_TX_FAILED)) |
450 | info->flags |= IEEE80211_TX_STAT_ACK; | 450 | info->flags |= IEEE80211_TX_STAT_ACK; |
451 | if (payload->status & P54_TX_PSM_CANCELLED) | 451 | if (payload->status & P54_TX_PSM_CANCELLED) |
452 | info->flags |= IEEE80211_TX_STAT_TX_FILTERED; | 452 | info->flags |= IEEE80211_TX_STAT_TX_FILTERED; |
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index a9352b2c7ac4..b7e755f4178a 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c | |||
@@ -141,16 +141,6 @@ static struct notifier_block module_load_nb = { | |||
141 | .notifier_call = module_load_notify, | 141 | .notifier_call = module_load_notify, |
142 | }; | 142 | }; |
143 | 143 | ||
144 | |||
145 | static void end_sync(void) | ||
146 | { | ||
147 | end_cpu_work(); | ||
148 | /* make sure we don't leak task structs */ | ||
149 | process_task_mortuary(); | ||
150 | process_task_mortuary(); | ||
151 | } | ||
152 | |||
153 | |||
154 | int sync_start(void) | 144 | int sync_start(void) |
155 | { | 145 | { |
156 | int err; | 146 | int err; |
@@ -158,7 +148,7 @@ int sync_start(void) | |||
158 | if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) | 148 | if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) |
159 | return -ENOMEM; | 149 | return -ENOMEM; |
160 | 150 | ||
161 | start_cpu_work(); | 151 | mutex_lock(&buffer_mutex); |
162 | 152 | ||
163 | err = task_handoff_register(&task_free_nb); | 153 | err = task_handoff_register(&task_free_nb); |
164 | if (err) | 154 | if (err) |
@@ -173,7 +163,10 @@ int sync_start(void) | |||
173 | if (err) | 163 | if (err) |
174 | goto out4; | 164 | goto out4; |
175 | 165 | ||
166 | start_cpu_work(); | ||
167 | |||
176 | out: | 168 | out: |
169 | mutex_unlock(&buffer_mutex); | ||
177 | return err; | 170 | return err; |
178 | out4: | 171 | out4: |
179 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); | 172 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); |
@@ -182,7 +175,6 @@ out3: | |||
182 | out2: | 175 | out2: |
183 | task_handoff_unregister(&task_free_nb); | 176 | task_handoff_unregister(&task_free_nb); |
184 | out1: | 177 | out1: |
185 | end_sync(); | ||
186 | free_cpumask_var(marked_cpus); | 178 | free_cpumask_var(marked_cpus); |
187 | goto out; | 179 | goto out; |
188 | } | 180 | } |
@@ -190,11 +182,20 @@ out1: | |||
190 | 182 | ||
191 | void sync_stop(void) | 183 | void sync_stop(void) |
192 | { | 184 | { |
185 | /* flush buffers */ | ||
186 | mutex_lock(&buffer_mutex); | ||
187 | end_cpu_work(); | ||
193 | unregister_module_notifier(&module_load_nb); | 188 | unregister_module_notifier(&module_load_nb); |
194 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); | 189 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); |
195 | profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); | 190 | profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); |
196 | task_handoff_unregister(&task_free_nb); | 191 | task_handoff_unregister(&task_free_nb); |
197 | end_sync(); | 192 | mutex_unlock(&buffer_mutex); |
193 | flush_scheduled_work(); | ||
194 | |||
195 | /* make sure we don't leak task structs */ | ||
196 | process_task_mortuary(); | ||
197 | process_task_mortuary(); | ||
198 | |||
198 | free_cpumask_var(marked_cpus); | 199 | free_cpumask_var(marked_cpus); |
199 | } | 200 | } |
200 | 201 | ||
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 219f79e2210a..f179ac2ea801 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c | |||
@@ -120,8 +120,6 @@ void end_cpu_work(void) | |||
120 | 120 | ||
121 | cancel_delayed_work(&b->work); | 121 | cancel_delayed_work(&b->work); |
122 | } | 122 | } |
123 | |||
124 | flush_scheduled_work(); | ||
125 | } | 123 | } |
126 | 124 | ||
127 | /* | 125 | /* |
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index 45fcc1e96df9..3bc72d18b121 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c | |||
@@ -338,9 +338,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) | |||
338 | acpi_handle chandle, handle; | 338 | acpi_handle chandle, handle; |
339 | struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; | 339 | struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; |
340 | 340 | ||
341 | flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | | 341 | flags &= OSC_SHPC_NATIVE_HP_CONTROL; |
342 | OSC_SHPC_NATIVE_HP_CONTROL | | ||
343 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | ||
344 | if (!flags) { | 342 | if (!flags) { |
345 | err("Invalid flags %u specified!\n", flags); | 343 | err("Invalid flags %u specified!\n", flags); |
346 | return -EINVAL; | 344 | return -EINVAL; |
@@ -360,7 +358,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) | |||
360 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); | 358 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); |
361 | dbg("Trying to get hotplug control for %s\n", | 359 | dbg("Trying to get hotplug control for %s\n", |
362 | (char *)string.pointer); | 360 | (char *)string.pointer); |
363 | status = acpi_pci_osc_control_set(handle, flags); | 361 | status = acpi_pci_osc_control_set(handle, &flags, flags); |
364 | if (ACPI_SUCCESS(status)) | 362 | if (ACPI_SUCCESS(status)) |
365 | goto got_one; | 363 | goto got_one; |
366 | if (status == AE_SUPPORT) | 364 | if (status == AE_SUPPORT) |
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 4ed76b47b6dc..73d513989263 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h | |||
@@ -176,19 +176,11 @@ static inline void pciehp_firmware_init(void) | |||
176 | { | 176 | { |
177 | pciehp_acpi_slot_detection_init(); | 177 | pciehp_acpi_slot_detection_init(); |
178 | } | 178 | } |
179 | |||
180 | static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev) | ||
181 | { | ||
182 | int retval; | ||
183 | u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | | ||
184 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | ||
185 | retval = acpi_get_hp_hw_control_from_firmware(dev, flags); | ||
186 | if (retval) | ||
187 | return retval; | ||
188 | return pciehp_acpi_slot_detection_check(dev); | ||
189 | } | ||
190 | #else | 179 | #else |
191 | #define pciehp_firmware_init() do {} while (0) | 180 | #define pciehp_firmware_init() do {} while (0) |
192 | #define pciehp_get_hp_hw_control_from_firmware(dev) 0 | 181 | static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev) |
182 | { | ||
183 | return 0; | ||
184 | } | ||
193 | #endif /* CONFIG_ACPI */ | 185 | #endif /* CONFIG_ACPI */ |
194 | #endif /* _PCIEHP_H */ | 186 | #endif /* _PCIEHP_H */ |
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c index 1f4000a5a108..2574700db461 100644 --- a/drivers/pci/hotplug/pciehp_acpi.c +++ b/drivers/pci/hotplug/pciehp_acpi.c | |||
@@ -85,9 +85,7 @@ static int __init dummy_probe(struct pcie_device *dev) | |||
85 | acpi_handle handle; | 85 | acpi_handle handle; |
86 | struct dummy_slot *slot, *tmp; | 86 | struct dummy_slot *slot, *tmp; |
87 | struct pci_dev *pdev = dev->port; | 87 | struct pci_dev *pdev = dev->port; |
88 | /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ | 88 | |
89 | if (pciehp_get_hp_hw_control_from_firmware(pdev)) | ||
90 | return -ENODEV; | ||
91 | pos = pci_pcie_cap(pdev); | 89 | pos = pci_pcie_cap(pdev); |
92 | if (!pos) | 90 | if (!pos) |
93 | return -ENODEV; | 91 | return -ENODEV; |
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 3588ea61b0dd..aa5f3ff629ff 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c | |||
@@ -59,7 +59,7 @@ module_param(pciehp_force, bool, 0644); | |||
59 | MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not"); | 59 | MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not"); |
60 | MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); | 60 | MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); |
61 | MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); | 61 | MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); |
62 | MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing"); | 62 | MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if OSHP is missing"); |
63 | 63 | ||
64 | #define PCIE_MODULE_NAME "pciehp" | 64 | #define PCIE_MODULE_NAME "pciehp" |
65 | 65 | ||
@@ -235,7 +235,7 @@ static int pciehp_probe(struct pcie_device *dev) | |||
235 | dev_info(&dev->device, | 235 | dev_info(&dev->device, |
236 | "Bypassing BIOS check for pciehp use on %s\n", | 236 | "Bypassing BIOS check for pciehp use on %s\n", |
237 | pci_name(dev->port)); | 237 | pci_name(dev->port)); |
238 | else if (pciehp_get_hp_hw_control_from_firmware(dev->port)) | 238 | else if (pciehp_acpi_slot_detection_check(dev->port)) |
239 | goto err_out_none; | 239 | goto err_out_none; |
240 | 240 | ||
241 | ctrl = pcie_init(dev); | 241 | ctrl = pcie_init(dev); |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index c3ceebb5be84..4789f8e8bf7a 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -71,6 +71,49 @@ | |||
71 | #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) | 71 | #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) |
72 | #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) | 72 | #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) |
73 | 73 | ||
74 | /* page table handling */ | ||
75 | #define LEVEL_STRIDE (9) | ||
76 | #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) | ||
77 | |||
78 | static inline int agaw_to_level(int agaw) | ||
79 | { | ||
80 | return agaw + 2; | ||
81 | } | ||
82 | |||
83 | static inline int agaw_to_width(int agaw) | ||
84 | { | ||
85 | return 30 + agaw * LEVEL_STRIDE; | ||
86 | } | ||
87 | |||
88 | static inline int width_to_agaw(int width) | ||
89 | { | ||
90 | return (width - 30) / LEVEL_STRIDE; | ||
91 | } | ||
92 | |||
93 | static inline unsigned int level_to_offset_bits(int level) | ||
94 | { | ||
95 | return (level - 1) * LEVEL_STRIDE; | ||
96 | } | ||
97 | |||
98 | static inline int pfn_level_offset(unsigned long pfn, int level) | ||
99 | { | ||
100 | return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; | ||
101 | } | ||
102 | |||
103 | static inline unsigned long level_mask(int level) | ||
104 | { | ||
105 | return -1UL << level_to_offset_bits(level); | ||
106 | } | ||
107 | |||
108 | static inline unsigned long level_size(int level) | ||
109 | { | ||
110 | return 1UL << level_to_offset_bits(level); | ||
111 | } | ||
112 | |||
113 | static inline unsigned long align_to_level(unsigned long pfn, int level) | ||
114 | { | ||
115 | return (pfn + level_size(level) - 1) & level_mask(level); | ||
116 | } | ||
74 | 117 | ||
75 | /* VT-d pages must always be _smaller_ than MM pages. Otherwise things | 118 | /* VT-d pages must always be _smaller_ than MM pages. Otherwise things |
76 | are never going to work. */ | 119 | are never going to work. */ |
@@ -434,8 +477,6 @@ void free_iova_mem(struct iova *iova) | |||
434 | } | 477 | } |
435 | 478 | ||
436 | 479 | ||
437 | static inline int width_to_agaw(int width); | ||
438 | |||
439 | static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) | 480 | static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) |
440 | { | 481 | { |
441 | unsigned long sagaw; | 482 | unsigned long sagaw; |
@@ -646,51 +687,6 @@ out: | |||
646 | spin_unlock_irqrestore(&iommu->lock, flags); | 687 | spin_unlock_irqrestore(&iommu->lock, flags); |
647 | } | 688 | } |
648 | 689 | ||
649 | /* page table handling */ | ||
650 | #define LEVEL_STRIDE (9) | ||
651 | #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) | ||
652 | |||
653 | static inline int agaw_to_level(int agaw) | ||
654 | { | ||
655 | return agaw + 2; | ||
656 | } | ||
657 | |||
658 | static inline int agaw_to_width(int agaw) | ||
659 | { | ||
660 | return 30 + agaw * LEVEL_STRIDE; | ||
661 | |||
662 | } | ||
663 | |||
664 | static inline int width_to_agaw(int width) | ||
665 | { | ||
666 | return (width - 30) / LEVEL_STRIDE; | ||
667 | } | ||
668 | |||
669 | static inline unsigned int level_to_offset_bits(int level) | ||
670 | { | ||
671 | return (level - 1) * LEVEL_STRIDE; | ||
672 | } | ||
673 | |||
674 | static inline int pfn_level_offset(unsigned long pfn, int level) | ||
675 | { | ||
676 | return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; | ||
677 | } | ||
678 | |||
679 | static inline unsigned long level_mask(int level) | ||
680 | { | ||
681 | return -1UL << level_to_offset_bits(level); | ||
682 | } | ||
683 | |||
684 | static inline unsigned long level_size(int level) | ||
685 | { | ||
686 | return 1UL << level_to_offset_bits(level); | ||
687 | } | ||
688 | |||
689 | static inline unsigned long align_to_level(unsigned long pfn, int level) | ||
690 | { | ||
691 | return (pfn + level_size(level) - 1) & level_mask(level); | ||
692 | } | ||
693 | |||
694 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, | 690 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, |
695 | unsigned long pfn) | 691 | unsigned long pfn) |
696 | { | 692 | { |
@@ -3761,6 +3757,33 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) | |||
3761 | 3757 | ||
3762 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); | 3758 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); |
3763 | 3759 | ||
3760 | #define GGC 0x52 | ||
3761 | #define GGC_MEMORY_SIZE_MASK (0xf << 8) | ||
3762 | #define GGC_MEMORY_SIZE_NONE (0x0 << 8) | ||
3763 | #define GGC_MEMORY_SIZE_1M (0x1 << 8) | ||
3764 | #define GGC_MEMORY_SIZE_2M (0x3 << 8) | ||
3765 | #define GGC_MEMORY_VT_ENABLED (0x8 << 8) | ||
3766 | #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8) | ||
3767 | #define GGC_MEMORY_SIZE_3M_VT (0xa << 8) | ||
3768 | #define GGC_MEMORY_SIZE_4M_VT (0xb << 8) | ||
3769 | |||
3770 | static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev) | ||
3771 | { | ||
3772 | unsigned short ggc; | ||
3773 | |||
3774 | if (pci_read_config_word(dev, GGC, &ggc)) | ||
3775 | return; | ||
3776 | |||
3777 | if (!(ggc & GGC_MEMORY_VT_ENABLED)) { | ||
3778 | printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); | ||
3779 | dmar_map_gfx = 0; | ||
3780 | } | ||
3781 | } | ||
3782 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); | ||
3783 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); | ||
3784 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt); | ||
3785 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt); | ||
3786 | |||
3764 | /* On Tylersburg chipsets, some BIOSes have been known to enable the | 3787 | /* On Tylersburg chipsets, some BIOSes have been known to enable the |
3765 | ISOCH DMAR unit for the Azalia sound device, but not give it any | 3788 | ISOCH DMAR unit for the Azalia sound device, but not give it any |
3766 | TLB entries, which causes it to deadlock. Check for that. We do | 3789 | TLB entries, which causes it to deadlock. Check for that. We do |
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index ce6a3666b3d9..553d8ee55c1c 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c | |||
@@ -608,7 +608,7 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno, | |||
608 | * the VF BAR size multiplied by the number of VFs. The alignment | 608 | * the VF BAR size multiplied by the number of VFs. The alignment |
609 | * is just the VF BAR size. | 609 | * is just the VF BAR size. |
610 | */ | 610 | */ |
611 | int pci_sriov_resource_alignment(struct pci_dev *dev, int resno) | 611 | resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno) |
612 | { | 612 | { |
613 | struct resource tmp; | 613 | struct resource tmp; |
614 | enum pci_bar_type type; | 614 | enum pci_bar_type type; |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 679c39de6a89..6beb11b617a9 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -140,8 +140,10 @@ static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } | |||
140 | 140 | ||
141 | #ifdef CONFIG_PCIEAER | 141 | #ifdef CONFIG_PCIEAER |
142 | void pci_no_aer(void); | 142 | void pci_no_aer(void); |
143 | bool pci_aer_available(void); | ||
143 | #else | 144 | #else |
144 | static inline void pci_no_aer(void) { } | 145 | static inline void pci_no_aer(void) { } |
146 | static inline bool pci_aer_available(void) { return false; } | ||
145 | #endif | 147 | #endif |
146 | 148 | ||
147 | static inline int pci_no_d1d2(struct pci_dev *dev) | 149 | static inline int pci_no_d1d2(struct pci_dev *dev) |
@@ -262,7 +264,8 @@ extern int pci_iov_init(struct pci_dev *dev); | |||
262 | extern void pci_iov_release(struct pci_dev *dev); | 264 | extern void pci_iov_release(struct pci_dev *dev); |
263 | extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, | 265 | extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, |
264 | enum pci_bar_type *type); | 266 | enum pci_bar_type *type); |
265 | extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno); | 267 | extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, |
268 | int resno); | ||
266 | extern void pci_restore_iov_state(struct pci_dev *dev); | 269 | extern void pci_restore_iov_state(struct pci_dev *dev); |
267 | extern int pci_iov_bus_range(struct pci_bus *bus); | 270 | extern int pci_iov_bus_range(struct pci_bus *bus); |
268 | 271 | ||
@@ -318,7 +321,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev) | |||
318 | } | 321 | } |
319 | #endif /* CONFIG_PCI_IOV */ | 322 | #endif /* CONFIG_PCI_IOV */ |
320 | 323 | ||
321 | static inline int pci_resource_alignment(struct pci_dev *dev, | 324 | static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, |
322 | struct resource *res) | 325 | struct resource *res) |
323 | { | 326 | { |
324 | #ifdef CONFIG_PCI_IOV | 327 | #ifdef CONFIG_PCI_IOV |
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile index ea654545e7c4..00c62df5a9fc 100644 --- a/drivers/pci/pcie/Makefile +++ b/drivers/pci/pcie/Makefile | |||
@@ -6,10 +6,11 @@ | |||
6 | obj-$(CONFIG_PCIEASPM) += aspm.o | 6 | obj-$(CONFIG_PCIEASPM) += aspm.o |
7 | 7 | ||
8 | pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o | 8 | pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o |
9 | pcieportdrv-$(CONFIG_ACPI) += portdrv_acpi.o | ||
9 | 10 | ||
10 | obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o | 11 | obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o |
11 | 12 | ||
12 | # Build PCI Express AER if needed | 13 | # Build PCI Express AER if needed |
13 | obj-$(CONFIG_PCIEAER) += aer/ | 14 | obj-$(CONFIG_PCIEAER) += aer/ |
14 | 15 | ||
15 | obj-$(CONFIG_PCIE_PME) += pme/ | 16 | obj-$(CONFIG_PCIE_PME) += pme.o |
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 484cc55194b8..f409948e1a9b 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c | |||
@@ -72,6 +72,11 @@ void pci_no_aer(void) | |||
72 | pcie_aer_disable = 1; /* has priority over 'forceload' */ | 72 | pcie_aer_disable = 1; /* has priority over 'forceload' */ |
73 | } | 73 | } |
74 | 74 | ||
75 | bool pci_aer_available(void) | ||
76 | { | ||
77 | return !pcie_aer_disable && pci_msi_enabled(); | ||
78 | } | ||
79 | |||
75 | static int set_device_error_reporting(struct pci_dev *dev, void *data) | 80 | static int set_device_error_reporting(struct pci_dev *dev, void *data) |
76 | { | 81 | { |
77 | bool enable = *((bool *)data); | 82 | bool enable = *((bool *)data); |
@@ -411,9 +416,7 @@ static void aer_error_resume(struct pci_dev *dev) | |||
411 | */ | 416 | */ |
412 | static int __init aer_service_init(void) | 417 | static int __init aer_service_init(void) |
413 | { | 418 | { |
414 | if (pcie_aer_disable) | 419 | if (!pci_aer_available()) |
415 | return -ENXIO; | ||
416 | if (!pci_msi_enabled()) | ||
417 | return -ENXIO; | 420 | return -ENXIO; |
418 | return pcie_port_service_register(&aerdriver); | 421 | return pcie_port_service_register(&aerdriver); |
419 | } | 422 | } |
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c index f278d7b0d95d..2bb9b8972211 100644 --- a/drivers/pci/pcie/aer/aerdrv_acpi.c +++ b/drivers/pci/pcie/aer/aerdrv_acpi.c | |||
@@ -19,42 +19,6 @@ | |||
19 | #include <acpi/apei.h> | 19 | #include <acpi/apei.h> |
20 | #include "aerdrv.h" | 20 | #include "aerdrv.h" |
21 | 21 | ||
22 | /** | ||
23 | * aer_osc_setup - run ACPI _OSC method | ||
24 | * @pciedev: pcie_device which AER is being enabled on | ||
25 | * | ||
26 | * @return: Zero on success. Nonzero otherwise. | ||
27 | * | ||
28 | * Invoked when PCIe bus loads AER service driver. To avoid conflict with | ||
29 | * BIOS AER support requires BIOS to yield AER control to OS native driver. | ||
30 | **/ | ||
31 | int aer_osc_setup(struct pcie_device *pciedev) | ||
32 | { | ||
33 | acpi_status status = AE_NOT_FOUND; | ||
34 | struct pci_dev *pdev = pciedev->port; | ||
35 | acpi_handle handle = NULL; | ||
36 | |||
37 | if (acpi_pci_disabled) | ||
38 | return -1; | ||
39 | |||
40 | handle = acpi_find_root_bridge_handle(pdev); | ||
41 | if (handle) { | ||
42 | status = acpi_pci_osc_control_set(handle, | ||
43 | OSC_PCI_EXPRESS_AER_CONTROL | | ||
44 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | ||
45 | } | ||
46 | |||
47 | if (ACPI_FAILURE(status)) { | ||
48 | dev_printk(KERN_DEBUG, &pciedev->device, "AER service couldn't " | ||
49 | "init device: %s\n", | ||
50 | (status == AE_SUPPORT || status == AE_NOT_FOUND) ? | ||
51 | "no _OSC support" : "_OSC failed"); | ||
52 | return -1; | ||
53 | } | ||
54 | |||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | #ifdef CONFIG_ACPI_APEI | 22 | #ifdef CONFIG_ACPI_APEI |
59 | static inline int hest_match_pci(struct acpi_hest_aer_common *p, | 23 | static inline int hest_match_pci(struct acpi_hest_aer_common *p, |
60 | struct pci_dev *pci) | 24 | struct pci_dev *pci) |
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index fc0b5a93e1de..29e268fadf14 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c | |||
@@ -772,22 +772,10 @@ void aer_isr(struct work_struct *work) | |||
772 | */ | 772 | */ |
773 | int aer_init(struct pcie_device *dev) | 773 | int aer_init(struct pcie_device *dev) |
774 | { | 774 | { |
775 | if (pcie_aer_get_firmware_first(dev->port)) { | ||
776 | dev_printk(KERN_DEBUG, &dev->device, | ||
777 | "PCIe errors handled by platform firmware.\n"); | ||
778 | goto out; | ||
779 | } | ||
780 | |||
781 | if (aer_osc_setup(dev)) | ||
782 | goto out; | ||
783 | |||
784 | return 0; | ||
785 | out: | ||
786 | if (forceload) { | 775 | if (forceload) { |
787 | dev_printk(KERN_DEBUG, &dev->device, | 776 | dev_printk(KERN_DEBUG, &dev->device, |
788 | "aerdrv forceload requested.\n"); | 777 | "aerdrv forceload requested.\n"); |
789 | pcie_aer_force_firmware_first(dev->port, 0); | 778 | pcie_aer_force_firmware_first(dev->port, 0); |
790 | return 0; | ||
791 | } | 779 | } |
792 | return -ENXIO; | 780 | return 0; |
793 | } | 781 | } |
diff --git a/drivers/pci/pcie/pme/pcie_pme.c b/drivers/pci/pcie/pme.c index bbdea18693d9..2f3c90407227 100644 --- a/drivers/pci/pcie/pme/pcie_pme.c +++ b/drivers/pci/pcie/pme.c | |||
@@ -23,38 +23,13 @@ | |||
23 | #include <linux/pci-acpi.h> | 23 | #include <linux/pci-acpi.h> |
24 | #include <linux/pm_runtime.h> | 24 | #include <linux/pm_runtime.h> |
25 | 25 | ||
26 | #include "../../pci.h" | 26 | #include "../pci.h" |
27 | #include "pcie_pme.h" | 27 | #include "portdrv.h" |
28 | 28 | ||
29 | #define PCI_EXP_RTSTA_PME 0x10000 /* PME status */ | 29 | #define PCI_EXP_RTSTA_PME 0x10000 /* PME status */ |
30 | #define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */ | 30 | #define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */ |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * If set, this switch will prevent the PCIe root port PME service driver from | ||
34 | * being registered. Consequently, the interrupt-based PCIe PME signaling will | ||
35 | * not be used by any PCIe root ports in that case. | ||
36 | */ | ||
37 | static bool pcie_pme_disabled = true; | ||
38 | |||
39 | /* | ||
40 | * The PCI Express Base Specification 2.0, Section 6.1.8, states the following: | ||
41 | * "In order to maintain compatibility with non-PCI Express-aware system | ||
42 | * software, system power management logic must be configured by firmware to use | ||
43 | * the legacy mechanism of signaling PME by default. PCI Express-aware system | ||
44 | * software must notify the firmware prior to enabling native, interrupt-based | ||
45 | * PME signaling." However, if the platform doesn't provide us with a suitable | ||
46 | * notification mechanism or the notification fails, it is not clear whether or | ||
47 | * not we are supposed to use the interrupt-based PCIe PME signaling. The | ||
48 | * switch below can be used to indicate the desired behaviour. When set, it | ||
49 | * will make the kernel use the interrupt-based PCIe PME signaling regardless of | ||
50 | * the platform notification status, although the kernel will attempt to notify | ||
51 | * the platform anyway. When unset, it will prevent the kernel from using the | ||
52 | * the interrupt-based PCIe PME signaling if the platform notification fails, | ||
53 | * which is the default. | ||
54 | */ | ||
55 | static bool pcie_pme_force_enable; | ||
56 | |||
57 | /* | ||
58 | * If this switch is set, MSI will not be used for PCIe PME signaling. This | 33 | * If this switch is set, MSI will not be used for PCIe PME signaling. This |
59 | * causes the PCIe port driver to use INTx interrupts only, but it turns out | 34 | * causes the PCIe port driver to use INTx interrupts only, but it turns out |
60 | * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based | 35 | * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based |
@@ -64,38 +39,13 @@ bool pcie_pme_msi_disabled; | |||
64 | 39 | ||
65 | static int __init pcie_pme_setup(char *str) | 40 | static int __init pcie_pme_setup(char *str) |
66 | { | 41 | { |
67 | if (!strncmp(str, "auto", 4)) | 42 | if (!strncmp(str, "nomsi", 5)) |
68 | pcie_pme_disabled = false; | 43 | pcie_pme_msi_disabled = true; |
69 | else if (!strncmp(str, "force", 5)) | ||
70 | pcie_pme_force_enable = true; | ||
71 | |||
72 | str = strchr(str, ','); | ||
73 | if (str) { | ||
74 | str++; | ||
75 | str += strspn(str, " \t"); | ||
76 | if (*str && !strcmp(str, "nomsi")) | ||
77 | pcie_pme_msi_disabled = true; | ||
78 | } | ||
79 | 44 | ||
80 | return 1; | 45 | return 1; |
81 | } | 46 | } |
82 | __setup("pcie_pme=", pcie_pme_setup); | 47 | __setup("pcie_pme=", pcie_pme_setup); |
83 | 48 | ||
84 | /** | ||
85 | * pcie_pme_platform_setup - Ensure that the kernel controls the PCIe PME. | ||
86 | * @srv: PCIe PME root port service to use for carrying out the check. | ||
87 | * | ||
88 | * Notify the platform that the native PCIe PME is going to be used and return | ||
89 | * 'true' if the control of the PCIe PME registers has been acquired from the | ||
90 | * platform. | ||
91 | */ | ||
92 | static bool pcie_pme_platform_setup(struct pcie_device *srv) | ||
93 | { | ||
94 | if (!pcie_pme_platform_notify(srv)) | ||
95 | return true; | ||
96 | return pcie_pme_force_enable; | ||
97 | } | ||
98 | |||
99 | struct pcie_pme_service_data { | 49 | struct pcie_pme_service_data { |
100 | spinlock_t lock; | 50 | spinlock_t lock; |
101 | struct pcie_device *srv; | 51 | struct pcie_device *srv; |
@@ -108,7 +58,7 @@ struct pcie_pme_service_data { | |||
108 | * @dev: PCIe root port or event collector. | 58 | * @dev: PCIe root port or event collector. |
109 | * @enable: Enable or disable the interrupt. | 59 | * @enable: Enable or disable the interrupt. |
110 | */ | 60 | */ |
111 | static void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable) | 61 | void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable) |
112 | { | 62 | { |
113 | int rtctl_pos; | 63 | int rtctl_pos; |
114 | u16 rtctl; | 64 | u16 rtctl; |
@@ -417,9 +367,6 @@ static int pcie_pme_probe(struct pcie_device *srv) | |||
417 | struct pcie_pme_service_data *data; | 367 | struct pcie_pme_service_data *data; |
418 | int ret; | 368 | int ret; |
419 | 369 | ||
420 | if (!pcie_pme_platform_setup(srv)) | ||
421 | return -EACCES; | ||
422 | |||
423 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 370 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
424 | if (!data) | 371 | if (!data) |
425 | return -ENOMEM; | 372 | return -ENOMEM; |
@@ -509,8 +456,7 @@ static struct pcie_port_service_driver pcie_pme_driver = { | |||
509 | */ | 456 | */ |
510 | static int __init pcie_pme_service_init(void) | 457 | static int __init pcie_pme_service_init(void) |
511 | { | 458 | { |
512 | return pcie_pme_disabled ? | 459 | return pcie_port_service_register(&pcie_pme_driver); |
513 | -ENODEV : pcie_port_service_register(&pcie_pme_driver); | ||
514 | } | 460 | } |
515 | 461 | ||
516 | module_init(pcie_pme_service_init); | 462 | module_init(pcie_pme_service_init); |
diff --git a/drivers/pci/pcie/pme/Makefile b/drivers/pci/pcie/pme/Makefile deleted file mode 100644 index 8b9238053080..000000000000 --- a/drivers/pci/pcie/pme/Makefile +++ /dev/null | |||
@@ -1,8 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for PCI-Express Root Port PME signaling driver | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_PCIE_PME) += pmedriver.o | ||
6 | |||
7 | pmedriver-objs := pcie_pme.o | ||
8 | pmedriver-$(CONFIG_ACPI) += pcie_pme_acpi.o | ||
diff --git a/drivers/pci/pcie/pme/pcie_pme.h b/drivers/pci/pcie/pme/pcie_pme.h deleted file mode 100644 index b30d2b7c9775..000000000000 --- a/drivers/pci/pcie/pme/pcie_pme.h +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | /* | ||
2 | * drivers/pci/pcie/pme/pcie_pme.h | ||
3 | * | ||
4 | * PCI Express Root Port PME signaling support | ||
5 | * | ||
6 | * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
7 | */ | ||
8 | |||
9 | #ifndef _PCIE_PME_H_ | ||
10 | #define _PCIE_PME_H_ | ||
11 | |||
12 | struct pcie_device; | ||
13 | |||
14 | #ifdef CONFIG_ACPI | ||
15 | extern int pcie_pme_acpi_setup(struct pcie_device *srv); | ||
16 | |||
17 | static inline int pcie_pme_platform_notify(struct pcie_device *srv) | ||
18 | { | ||
19 | return pcie_pme_acpi_setup(srv); | ||
20 | } | ||
21 | #else /* !CONFIG_ACPI */ | ||
22 | static inline int pcie_pme_platform_notify(struct pcie_device *srv) | ||
23 | { | ||
24 | return 0; | ||
25 | } | ||
26 | #endif /* !CONFIG_ACPI */ | ||
27 | |||
28 | #endif | ||
diff --git a/drivers/pci/pcie/pme/pcie_pme_acpi.c b/drivers/pci/pcie/pme/pcie_pme_acpi.c deleted file mode 100644 index 83ab2287ae3f..000000000000 --- a/drivers/pci/pcie/pme/pcie_pme_acpi.c +++ /dev/null | |||
@@ -1,54 +0,0 @@ | |||
1 | /* | ||
2 | * PCIe Native PME support, ACPI-related part | ||
3 | * | ||
4 | * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License V2. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #include <linux/pci.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/acpi.h> | ||
15 | #include <linux/pci-acpi.h> | ||
16 | #include <linux/pcieport_if.h> | ||
17 | |||
18 | /** | ||
19 | * pcie_pme_acpi_setup - Request the ACPI BIOS to release control over PCIe PME. | ||
20 | * @srv - PCIe PME service for a root port or event collector. | ||
21 | * | ||
22 | * Invoked when the PCIe bus type loads PCIe PME service driver. To avoid | ||
23 | * conflict with the BIOS PCIe support requires the BIOS to yield PCIe PME | ||
24 | * control to the kernel. | ||
25 | */ | ||
26 | int pcie_pme_acpi_setup(struct pcie_device *srv) | ||
27 | { | ||
28 | acpi_status status = AE_NOT_FOUND; | ||
29 | struct pci_dev *port = srv->port; | ||
30 | acpi_handle handle; | ||
31 | int error = 0; | ||
32 | |||
33 | if (acpi_pci_disabled) | ||
34 | return -ENOSYS; | ||
35 | |||
36 | dev_info(&port->dev, "Requesting control of PCIe PME from ACPI BIOS\n"); | ||
37 | |||
38 | handle = acpi_find_root_bridge_handle(port); | ||
39 | if (!handle) | ||
40 | return -EINVAL; | ||
41 | |||
42 | status = acpi_pci_osc_control_set(handle, | ||
43 | OSC_PCI_EXPRESS_PME_CONTROL | | ||
44 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | ||
45 | if (ACPI_FAILURE(status)) { | ||
46 | dev_info(&port->dev, | ||
47 | "Failed to receive control of PCIe PME service: %s\n", | ||
48 | (status == AE_SUPPORT || status == AE_NOT_FOUND) ? | ||
49 | "no _OSC support" : "ACPI _OSC failed"); | ||
50 | error = -ENODEV; | ||
51 | } | ||
52 | |||
53 | return error; | ||
54 | } | ||
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index 813a5c3427b6..7b5aba0a3291 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h | |||
@@ -20,6 +20,9 @@ | |||
20 | 20 | ||
21 | #define get_descriptor_id(type, service) (((type - 4) << 4) | service) | 21 | #define get_descriptor_id(type, service) (((type - 4) << 4) | service) |
22 | 22 | ||
23 | extern bool pcie_ports_disabled; | ||
24 | extern bool pcie_ports_auto; | ||
25 | |||
23 | extern struct bus_type pcie_port_bus_type; | 26 | extern struct bus_type pcie_port_bus_type; |
24 | extern int pcie_port_device_register(struct pci_dev *dev); | 27 | extern int pcie_port_device_register(struct pci_dev *dev); |
25 | #ifdef CONFIG_PM | 28 | #ifdef CONFIG_PM |
@@ -30,6 +33,8 @@ extern void pcie_port_device_remove(struct pci_dev *dev); | |||
30 | extern int __must_check pcie_port_bus_register(void); | 33 | extern int __must_check pcie_port_bus_register(void); |
31 | extern void pcie_port_bus_unregister(void); | 34 | extern void pcie_port_bus_unregister(void); |
32 | 35 | ||
36 | struct pci_dev; | ||
37 | |||
33 | #ifdef CONFIG_PCIE_PME | 38 | #ifdef CONFIG_PCIE_PME |
34 | extern bool pcie_pme_msi_disabled; | 39 | extern bool pcie_pme_msi_disabled; |
35 | 40 | ||
@@ -42,9 +47,26 @@ static inline bool pcie_pme_no_msi(void) | |||
42 | { | 47 | { |
43 | return pcie_pme_msi_disabled; | 48 | return pcie_pme_msi_disabled; |
44 | } | 49 | } |
50 | |||
51 | extern void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable); | ||
45 | #else /* !CONFIG_PCIE_PME */ | 52 | #else /* !CONFIG_PCIE_PME */ |
46 | static inline void pcie_pme_disable_msi(void) {} | 53 | static inline void pcie_pme_disable_msi(void) {} |
47 | static inline bool pcie_pme_no_msi(void) { return false; } | 54 | static inline bool pcie_pme_no_msi(void) { return false; } |
55 | static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {} | ||
48 | #endif /* !CONFIG_PCIE_PME */ | 56 | #endif /* !CONFIG_PCIE_PME */ |
49 | 57 | ||
58 | #ifdef CONFIG_ACPI | ||
59 | extern int pcie_port_acpi_setup(struct pci_dev *port, int *mask); | ||
60 | |||
61 | static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask) | ||
62 | { | ||
63 | return pcie_port_acpi_setup(port, mask); | ||
64 | } | ||
65 | #else /* !CONFIG_ACPI */ | ||
66 | static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask) | ||
67 | { | ||
68 | return 0; | ||
69 | } | ||
70 | #endif /* !CONFIG_ACPI */ | ||
71 | |||
50 | #endif /* _PORTDRV_H_ */ | 72 | #endif /* _PORTDRV_H_ */ |
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c new file mode 100644 index 000000000000..b7c4cb1ccb23 --- /dev/null +++ b/drivers/pci/pcie/portdrv_acpi.c | |||
@@ -0,0 +1,77 @@ | |||
1 | /* | ||
2 | * PCIe Port Native Services Support, ACPI-Related Part | ||
3 | * | ||
4 | * Copyright (C) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License V2. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #include <linux/pci.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/acpi.h> | ||
15 | #include <linux/pci-acpi.h> | ||
16 | #include <linux/pcieport_if.h> | ||
17 | |||
18 | #include "aer/aerdrv.h" | ||
19 | #include "../pci.h" | ||
20 | |||
21 | /** | ||
22 | * pcie_port_acpi_setup - Request the BIOS to release control of PCIe services. | ||
23 | * @port: PCIe Port service for a root port or event collector. | ||
24 | * @srv_mask: Bit mask of services that can be enabled for @port. | ||
25 | * | ||
26 | * Invoked when @port is identified as a PCIe port device. To avoid conflicts | ||
27 | * with the BIOS PCIe port native services support requires the BIOS to yield | ||
28 | * control of these services to the kernel. The mask of services that the BIOS | ||
29 | * allows to be enabled for @port is written to @srv_mask. | ||
30 | * | ||
31 | * NOTE: It turns out that we cannot do that for individual port services | ||
32 | * separately, because that would make some systems work incorrectly. | ||
33 | */ | ||
34 | int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask) | ||
35 | { | ||
36 | acpi_status status; | ||
37 | acpi_handle handle; | ||
38 | u32 flags; | ||
39 | |||
40 | if (acpi_pci_disabled) | ||
41 | return 0; | ||
42 | |||
43 | handle = acpi_find_root_bridge_handle(port); | ||
44 | if (!handle) | ||
45 | return -EINVAL; | ||
46 | |||
47 | flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL | ||
48 | | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | ||
49 | | OSC_PCI_EXPRESS_PME_CONTROL; | ||
50 | |||
51 | if (pci_aer_available()) { | ||
52 | if (pcie_aer_get_firmware_first(port)) | ||
53 | dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n"); | ||
54 | else | ||
55 | flags |= OSC_PCI_EXPRESS_AER_CONTROL; | ||
56 | } | ||
57 | |||
58 | status = acpi_pci_osc_control_set(handle, &flags, | ||
59 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | ||
60 | if (ACPI_FAILURE(status)) { | ||
61 | dev_dbg(&port->dev, "ACPI _OSC request failed (code %d)\n", | ||
62 | status); | ||
63 | return -ENODEV; | ||
64 | } | ||
65 | |||
66 | dev_info(&port->dev, "ACPI _OSC control granted for 0x%02x\n", flags); | ||
67 | |||
68 | *srv_mask = PCIE_PORT_SERVICE_VC; | ||
69 | if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL) | ||
70 | *srv_mask |= PCIE_PORT_SERVICE_HP; | ||
71 | if (flags & OSC_PCI_EXPRESS_PME_CONTROL) | ||
72 | *srv_mask |= PCIE_PORT_SERVICE_PME; | ||
73 | if (flags & OSC_PCI_EXPRESS_AER_CONTROL) | ||
74 | *srv_mask |= PCIE_PORT_SERVICE_AER; | ||
75 | |||
76 | return 0; | ||
77 | } | ||
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index e73effbe402c..a9c222d79ebc 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c | |||
@@ -14,6 +14,8 @@ | |||
14 | #include <linux/string.h> | 14 | #include <linux/string.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/pcieport_if.h> | 16 | #include <linux/pcieport_if.h> |
17 | #include <linux/aer.h> | ||
18 | #include <linux/pci-aspm.h> | ||
17 | 19 | ||
18 | #include "../pci.h" | 20 | #include "../pci.h" |
19 | #include "portdrv.h" | 21 | #include "portdrv.h" |
@@ -236,24 +238,64 @@ static int get_port_device_capability(struct pci_dev *dev) | |||
236 | int services = 0, pos; | 238 | int services = 0, pos; |
237 | u16 reg16; | 239 | u16 reg16; |
238 | u32 reg32; | 240 | u32 reg32; |
241 | int cap_mask; | ||
242 | int err; | ||
243 | |||
244 | err = pcie_port_platform_notify(dev, &cap_mask); | ||
245 | if (pcie_ports_auto) { | ||
246 | if (err) { | ||
247 | pcie_no_aspm(); | ||
248 | return 0; | ||
249 | } | ||
250 | } else { | ||
251 | cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP | ||
252 | | PCIE_PORT_SERVICE_VC; | ||
253 | if (pci_aer_available()) | ||
254 | cap_mask |= PCIE_PORT_SERVICE_AER; | ||
255 | } | ||
239 | 256 | ||
240 | pos = pci_pcie_cap(dev); | 257 | pos = pci_pcie_cap(dev); |
241 | pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); | 258 | pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); |
242 | /* Hot-Plug Capable */ | 259 | /* Hot-Plug Capable */ |
243 | if (reg16 & PCI_EXP_FLAGS_SLOT) { | 260 | if ((cap_mask & PCIE_PORT_SERVICE_HP) && (reg16 & PCI_EXP_FLAGS_SLOT)) { |
244 | pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, ®32); | 261 | pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, ®32); |
245 | if (reg32 & PCI_EXP_SLTCAP_HPC) | 262 | if (reg32 & PCI_EXP_SLTCAP_HPC) { |
246 | services |= PCIE_PORT_SERVICE_HP; | 263 | services |= PCIE_PORT_SERVICE_HP; |
264 | /* | ||
265 | * Disable hot-plug interrupts in case they have been | ||
266 | * enabled by the BIOS and the hot-plug service driver | ||
267 | * is not loaded. | ||
268 | */ | ||
269 | pos += PCI_EXP_SLTCTL; | ||
270 | pci_read_config_word(dev, pos, ®16); | ||
271 | reg16 &= ~(PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE); | ||
272 | pci_write_config_word(dev, pos, reg16); | ||
273 | } | ||
247 | } | 274 | } |
248 | /* AER capable */ | 275 | /* AER capable */ |
249 | if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) | 276 | if ((cap_mask & PCIE_PORT_SERVICE_AER) |
277 | && pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) { | ||
250 | services |= PCIE_PORT_SERVICE_AER; | 278 | services |= PCIE_PORT_SERVICE_AER; |
279 | /* | ||
280 | * Disable AER on this port in case it's been enabled by the | ||
281 | * BIOS (the AER service driver will enable it when necessary). | ||
282 | */ | ||
283 | pci_disable_pcie_error_reporting(dev); | ||
284 | } | ||
251 | /* VC support */ | 285 | /* VC support */ |
252 | if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) | 286 | if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) |
253 | services |= PCIE_PORT_SERVICE_VC; | 287 | services |= PCIE_PORT_SERVICE_VC; |
254 | /* Root ports are capable of generating PME too */ | 288 | /* Root ports are capable of generating PME too */ |
255 | if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) | 289 | if ((cap_mask & PCIE_PORT_SERVICE_PME) |
290 | && dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) { | ||
256 | services |= PCIE_PORT_SERVICE_PME; | 291 | services |= PCIE_PORT_SERVICE_PME; |
292 | /* | ||
293 | * Disable PME interrupt on this port in case it's been enabled | ||
294 | * by the BIOS (the PME service driver will enable it when | ||
295 | * necessary). | ||
296 | */ | ||
297 | pcie_pme_interrupt_enable(dev, false); | ||
298 | } | ||
257 | 299 | ||
258 | return services; | 300 | return services; |
259 | } | 301 | } |
@@ -494,6 +536,9 @@ static void pcie_port_shutdown_service(struct device *dev) {} | |||
494 | */ | 536 | */ |
495 | int pcie_port_service_register(struct pcie_port_service_driver *new) | 537 | int pcie_port_service_register(struct pcie_port_service_driver *new) |
496 | { | 538 | { |
539 | if (pcie_ports_disabled) | ||
540 | return -ENODEV; | ||
541 | |||
497 | new->driver.name = (char *)new->name; | 542 | new->driver.name = (char *)new->name; |
498 | new->driver.bus = &pcie_port_bus_type; | 543 | new->driver.bus = &pcie_port_bus_type; |
499 | new->driver.probe = pcie_port_probe_service; | 544 | new->driver.probe = pcie_port_probe_service; |
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 3debed25e46b..f9033e190fb6 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/pcieport_if.h> | 15 | #include <linux/pcieport_if.h> |
16 | #include <linux/aer.h> | 16 | #include <linux/aer.h> |
17 | #include <linux/dmi.h> | 17 | #include <linux/dmi.h> |
18 | #include <linux/pci-aspm.h> | ||
18 | 19 | ||
19 | #include "portdrv.h" | 20 | #include "portdrv.h" |
20 | #include "aer/aerdrv.h" | 21 | #include "aer/aerdrv.h" |
@@ -29,6 +30,31 @@ MODULE_AUTHOR(DRIVER_AUTHOR); | |||
29 | MODULE_DESCRIPTION(DRIVER_DESC); | 30 | MODULE_DESCRIPTION(DRIVER_DESC); |
30 | MODULE_LICENSE("GPL"); | 31 | MODULE_LICENSE("GPL"); |
31 | 32 | ||
33 | /* If this switch is set, PCIe port native services should not be enabled. */ | ||
34 | bool pcie_ports_disabled; | ||
35 | |||
36 | /* | ||
37 | * If this switch is set, ACPI _OSC will be used to determine whether or not to | ||
38 | * enable PCIe port native services. | ||
39 | */ | ||
40 | bool pcie_ports_auto = true; | ||
41 | |||
42 | static int __init pcie_port_setup(char *str) | ||
43 | { | ||
44 | if (!strncmp(str, "compat", 6)) { | ||
45 | pcie_ports_disabled = true; | ||
46 | } else if (!strncmp(str, "native", 6)) { | ||
47 | pcie_ports_disabled = false; | ||
48 | pcie_ports_auto = false; | ||
49 | } else if (!strncmp(str, "auto", 4)) { | ||
50 | pcie_ports_disabled = false; | ||
51 | pcie_ports_auto = true; | ||
52 | } | ||
53 | |||
54 | return 1; | ||
55 | } | ||
56 | __setup("pcie_ports=", pcie_port_setup); | ||
57 | |||
32 | /* global data */ | 58 | /* global data */ |
33 | 59 | ||
34 | static int pcie_portdrv_restore_config(struct pci_dev *dev) | 60 | static int pcie_portdrv_restore_config(struct pci_dev *dev) |
@@ -301,6 +327,11 @@ static int __init pcie_portdrv_init(void) | |||
301 | { | 327 | { |
302 | int retval; | 328 | int retval; |
303 | 329 | ||
330 | if (pcie_ports_disabled) { | ||
331 | pcie_no_aspm(); | ||
332 | return -EACCES; | ||
333 | } | ||
334 | |||
304 | dmi_check_system(pcie_portdrv_dmi_table); | 335 | dmi_check_system(pcie_portdrv_dmi_table); |
305 | 336 | ||
306 | retval = pcie_port_bus_register(); | 337 | retval = pcie_port_bus_register(); |
@@ -315,11 +346,4 @@ static int __init pcie_portdrv_init(void) | |||
315 | return retval; | 346 | return retval; |
316 | } | 347 | } |
317 | 348 | ||
318 | static void __exit pcie_portdrv_exit(void) | ||
319 | { | ||
320 | pci_unregister_driver(&pcie_portdriver); | ||
321 | pcie_port_bus_unregister(); | ||
322 | } | ||
323 | |||
324 | module_init(pcie_portdrv_init); | 349 | module_init(pcie_portdrv_init); |
325 | module_exit(pcie_portdrv_exit); | ||
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 89ed181cd90c..857ae01734a6 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -163,6 +163,26 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_d | |||
163 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); | 163 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); |
164 | 164 | ||
165 | /* | 165 | /* |
166 | * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear | ||
167 | * for some HT machines to use C4 w/o hanging. | ||
168 | */ | ||
169 | static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev) | ||
170 | { | ||
171 | u32 pmbase; | ||
172 | u16 pm1a; | ||
173 | |||
174 | pci_read_config_dword(dev, 0x40, &pmbase); | ||
175 | pmbase = pmbase & 0xff80; | ||
176 | pm1a = inw(pmbase); | ||
177 | |||
178 | if (pm1a & 0x10) { | ||
179 | dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n"); | ||
180 | outw(0x10, pmbase); | ||
181 | } | ||
182 | } | ||
183 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts); | ||
184 | |||
185 | /* | ||
166 | * Chipsets where PCI->PCI transfers vanish or hang | 186 | * Chipsets where PCI->PCI transfers vanish or hang |
167 | */ | 187 | */ |
168 | static void __devinit quirk_nopcipci(struct pci_dev *dev) | 188 | static void __devinit quirk_nopcipci(struct pci_dev *dev) |
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index 659eaa0fc48f..968cfea04f74 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c | |||
@@ -49,7 +49,7 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf) | |||
49 | } | 49 | } |
50 | 50 | ||
51 | /* these strings match up with the values in pci_bus_speed */ | 51 | /* these strings match up with the values in pci_bus_speed */ |
52 | static char *pci_bus_speed_strings[] = { | 52 | static const char *pci_bus_speed_strings[] = { |
53 | "33 MHz PCI", /* 0x00 */ | 53 | "33 MHz PCI", /* 0x00 */ |
54 | "66 MHz PCI", /* 0x01 */ | 54 | "66 MHz PCI", /* 0x01 */ |
55 | "66 MHz PCI-X", /* 0x02 */ | 55 | "66 MHz PCI-X", /* 0x02 */ |
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c index 54aa1c238cb3..9ba4dade69a4 100644 --- a/drivers/pcmcia/pcmcia_resource.c +++ b/drivers/pcmcia/pcmcia_resource.c | |||
@@ -163,7 +163,7 @@ static int pcmcia_access_config(struct pcmcia_device *p_dev, | |||
163 | c = p_dev->function_config; | 163 | c = p_dev->function_config; |
164 | 164 | ||
165 | if (!(c->state & CONFIG_LOCKED)) { | 165 | if (!(c->state & CONFIG_LOCKED)) { |
166 | dev_dbg(&s->dev, "Configuration isnt't locked\n"); | 166 | dev_dbg(&p_dev->dev, "Configuration isnt't locked\n"); |
167 | mutex_unlock(&s->ops_mutex); | 167 | mutex_unlock(&s->ops_mutex); |
168 | return -EACCES; | 168 | return -EACCES; |
169 | } | 169 | } |
@@ -220,7 +220,7 @@ int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t wh, | |||
220 | s->win[w].card_start = offset; | 220 | s->win[w].card_start = offset; |
221 | ret = s->ops->set_mem_map(s, &s->win[w]); | 221 | ret = s->ops->set_mem_map(s, &s->win[w]); |
222 | if (ret) | 222 | if (ret) |
223 | dev_warn(&s->dev, "failed to set_mem_map\n"); | 223 | dev_warn(&p_dev->dev, "failed to set_mem_map\n"); |
224 | mutex_unlock(&s->ops_mutex); | 224 | mutex_unlock(&s->ops_mutex); |
225 | return ret; | 225 | return ret; |
226 | } /* pcmcia_map_mem_page */ | 226 | } /* pcmcia_map_mem_page */ |
@@ -244,18 +244,18 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev, | |||
244 | c = p_dev->function_config; | 244 | c = p_dev->function_config; |
245 | 245 | ||
246 | if (!(s->state & SOCKET_PRESENT)) { | 246 | if (!(s->state & SOCKET_PRESENT)) { |
247 | dev_dbg(&s->dev, "No card present\n"); | 247 | dev_dbg(&p_dev->dev, "No card present\n"); |
248 | ret = -ENODEV; | 248 | ret = -ENODEV; |
249 | goto unlock; | 249 | goto unlock; |
250 | } | 250 | } |
251 | if (!(c->state & CONFIG_LOCKED)) { | 251 | if (!(c->state & CONFIG_LOCKED)) { |
252 | dev_dbg(&s->dev, "Configuration isnt't locked\n"); | 252 | dev_dbg(&p_dev->dev, "Configuration isnt't locked\n"); |
253 | ret = -EACCES; | 253 | ret = -EACCES; |
254 | goto unlock; | 254 | goto unlock; |
255 | } | 255 | } |
256 | 256 | ||
257 | if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) { | 257 | if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) { |
258 | dev_dbg(&s->dev, | 258 | dev_dbg(&p_dev->dev, |
259 | "changing Vcc or IRQ is not allowed at this time\n"); | 259 | "changing Vcc or IRQ is not allowed at this time\n"); |
260 | ret = -EINVAL; | 260 | ret = -EINVAL; |
261 | goto unlock; | 261 | goto unlock; |
@@ -265,20 +265,22 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev, | |||
265 | if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) && | 265 | if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) && |
266 | (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { | 266 | (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { |
267 | if (mod->Vpp1 != mod->Vpp2) { | 267 | if (mod->Vpp1 != mod->Vpp2) { |
268 | dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n"); | 268 | dev_dbg(&p_dev->dev, |
269 | "Vpp1 and Vpp2 must be the same\n"); | ||
269 | ret = -EINVAL; | 270 | ret = -EINVAL; |
270 | goto unlock; | 271 | goto unlock; |
271 | } | 272 | } |
272 | s->socket.Vpp = mod->Vpp1; | 273 | s->socket.Vpp = mod->Vpp1; |
273 | if (s->ops->set_socket(s, &s->socket)) { | 274 | if (s->ops->set_socket(s, &s->socket)) { |
274 | dev_printk(KERN_WARNING, &s->dev, | 275 | dev_printk(KERN_WARNING, &p_dev->dev, |
275 | "Unable to set VPP\n"); | 276 | "Unable to set VPP\n"); |
276 | ret = -EIO; | 277 | ret = -EIO; |
277 | goto unlock; | 278 | goto unlock; |
278 | } | 279 | } |
279 | } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) || | 280 | } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) || |
280 | (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { | 281 | (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { |
281 | dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n"); | 282 | dev_dbg(&p_dev->dev, |
283 | "changing Vcc is not allowed at this time\n"); | ||
282 | ret = -EINVAL; | 284 | ret = -EINVAL; |
283 | goto unlock; | 285 | goto unlock; |
284 | } | 286 | } |
@@ -401,7 +403,7 @@ int pcmcia_release_window(struct pcmcia_device *p_dev, struct resource *res) | |||
401 | win = &s->win[w]; | 403 | win = &s->win[w]; |
402 | 404 | ||
403 | if (!(p_dev->_win & CLIENT_WIN_REQ(w))) { | 405 | if (!(p_dev->_win & CLIENT_WIN_REQ(w))) { |
404 | dev_dbg(&s->dev, "not releasing unknown window\n"); | 406 | dev_dbg(&p_dev->dev, "not releasing unknown window\n"); |
405 | mutex_unlock(&s->ops_mutex); | 407 | mutex_unlock(&s->ops_mutex); |
406 | return -EINVAL; | 408 | return -EINVAL; |
407 | } | 409 | } |
@@ -439,7 +441,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev, | |||
439 | return -ENODEV; | 441 | return -ENODEV; |
440 | 442 | ||
441 | if (req->IntType & INT_CARDBUS) { | 443 | if (req->IntType & INT_CARDBUS) { |
442 | dev_dbg(&s->dev, "IntType may not be INT_CARDBUS\n"); | 444 | dev_dbg(&p_dev->dev, "IntType may not be INT_CARDBUS\n"); |
443 | return -EINVAL; | 445 | return -EINVAL; |
444 | } | 446 | } |
445 | 447 | ||
@@ -447,7 +449,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev, | |||
447 | c = p_dev->function_config; | 449 | c = p_dev->function_config; |
448 | if (c->state & CONFIG_LOCKED) { | 450 | if (c->state & CONFIG_LOCKED) { |
449 | mutex_unlock(&s->ops_mutex); | 451 | mutex_unlock(&s->ops_mutex); |
450 | dev_dbg(&s->dev, "Configuration is locked\n"); | 452 | dev_dbg(&p_dev->dev, "Configuration is locked\n"); |
451 | return -EACCES; | 453 | return -EACCES; |
452 | } | 454 | } |
453 | 455 | ||
@@ -455,7 +457,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev, | |||
455 | s->socket.Vpp = req->Vpp; | 457 | s->socket.Vpp = req->Vpp; |
456 | if (s->ops->set_socket(s, &s->socket)) { | 458 | if (s->ops->set_socket(s, &s->socket)) { |
457 | mutex_unlock(&s->ops_mutex); | 459 | mutex_unlock(&s->ops_mutex); |
458 | dev_printk(KERN_WARNING, &s->dev, | 460 | dev_printk(KERN_WARNING, &p_dev->dev, |
459 | "Unable to set socket state\n"); | 461 | "Unable to set socket state\n"); |
460 | return -EINVAL; | 462 | return -EINVAL; |
461 | } | 463 | } |
@@ -569,19 +571,20 @@ int pcmcia_request_io(struct pcmcia_device *p_dev) | |||
569 | int ret = -EINVAL; | 571 | int ret = -EINVAL; |
570 | 572 | ||
571 | mutex_lock(&s->ops_mutex); | 573 | mutex_lock(&s->ops_mutex); |
572 | dev_dbg(&s->dev, "pcmcia_request_io: %pR , %pR", &c->io[0], &c->io[1]); | 574 | dev_dbg(&p_dev->dev, "pcmcia_request_io: %pR , %pR", |
575 | &c->io[0], &c->io[1]); | ||
573 | 576 | ||
574 | if (!(s->state & SOCKET_PRESENT)) { | 577 | if (!(s->state & SOCKET_PRESENT)) { |
575 | dev_dbg(&s->dev, "pcmcia_request_io: No card present\n"); | 578 | dev_dbg(&p_dev->dev, "pcmcia_request_io: No card present\n"); |
576 | goto out; | 579 | goto out; |
577 | } | 580 | } |
578 | 581 | ||
579 | if (c->state & CONFIG_LOCKED) { | 582 | if (c->state & CONFIG_LOCKED) { |
580 | dev_dbg(&s->dev, "Configuration is locked\n"); | 583 | dev_dbg(&p_dev->dev, "Configuration is locked\n"); |
581 | goto out; | 584 | goto out; |
582 | } | 585 | } |
583 | if (c->state & CONFIG_IO_REQ) { | 586 | if (c->state & CONFIG_IO_REQ) { |
584 | dev_dbg(&s->dev, "IO already configured\n"); | 587 | dev_dbg(&p_dev->dev, "IO already configured\n"); |
585 | goto out; | 588 | goto out; |
586 | } | 589 | } |
587 | 590 | ||
@@ -592,7 +595,13 @@ int pcmcia_request_io(struct pcmcia_device *p_dev) | |||
592 | if (c->io[1].end) { | 595 | if (c->io[1].end) { |
593 | ret = alloc_io_space(s, &c->io[1], p_dev->io_lines); | 596 | ret = alloc_io_space(s, &c->io[1], p_dev->io_lines); |
594 | if (ret) { | 597 | if (ret) { |
598 | struct resource tmp = c->io[0]; | ||
599 | /* release the previously allocated resource */ | ||
595 | release_io_space(s, &c->io[0]); | 600 | release_io_space(s, &c->io[0]); |
601 | /* but preserve the settings, for they worked... */ | ||
602 | c->io[0].end = resource_size(&tmp); | ||
603 | c->io[0].start = tmp.start; | ||
604 | c->io[0].flags = tmp.flags; | ||
596 | goto out; | 605 | goto out; |
597 | } | 606 | } |
598 | } else | 607 | } else |
@@ -601,7 +610,7 @@ int pcmcia_request_io(struct pcmcia_device *p_dev) | |||
601 | c->state |= CONFIG_IO_REQ; | 610 | c->state |= CONFIG_IO_REQ; |
602 | p_dev->_io = 1; | 611 | p_dev->_io = 1; |
603 | 612 | ||
604 | dev_dbg(&s->dev, "pcmcia_request_io succeeded: %pR , %pR", | 613 | dev_dbg(&p_dev->dev, "pcmcia_request_io succeeded: %pR , %pR", |
605 | &c->io[0], &c->io[1]); | 614 | &c->io[0], &c->io[1]); |
606 | out: | 615 | out: |
607 | mutex_unlock(&s->ops_mutex); | 616 | mutex_unlock(&s->ops_mutex); |
@@ -800,7 +809,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha | |||
800 | int w; | 809 | int w; |
801 | 810 | ||
802 | if (!(s->state & SOCKET_PRESENT)) { | 811 | if (!(s->state & SOCKET_PRESENT)) { |
803 | dev_dbg(&s->dev, "No card present\n"); | 812 | dev_dbg(&p_dev->dev, "No card present\n"); |
804 | return -ENODEV; | 813 | return -ENODEV; |
805 | } | 814 | } |
806 | 815 | ||
@@ -809,12 +818,12 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha | |||
809 | req->Size = s->map_size; | 818 | req->Size = s->map_size; |
810 | align = (s->features & SS_CAP_MEM_ALIGN) ? req->Size : s->map_size; | 819 | align = (s->features & SS_CAP_MEM_ALIGN) ? req->Size : s->map_size; |
811 | if (req->Size & (s->map_size-1)) { | 820 | if (req->Size & (s->map_size-1)) { |
812 | dev_dbg(&s->dev, "invalid map size\n"); | 821 | dev_dbg(&p_dev->dev, "invalid map size\n"); |
813 | return -EINVAL; | 822 | return -EINVAL; |
814 | } | 823 | } |
815 | if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) || | 824 | if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) || |
816 | (req->Base & (align-1))) { | 825 | (req->Base & (align-1))) { |
817 | dev_dbg(&s->dev, "invalid base address\n"); | 826 | dev_dbg(&p_dev->dev, "invalid base address\n"); |
818 | return -EINVAL; | 827 | return -EINVAL; |
819 | } | 828 | } |
820 | if (req->Base) | 829 | if (req->Base) |
@@ -826,7 +835,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha | |||
826 | if (!(s->state & SOCKET_WIN_REQ(w))) | 835 | if (!(s->state & SOCKET_WIN_REQ(w))) |
827 | break; | 836 | break; |
828 | if (w == MAX_WIN) { | 837 | if (w == MAX_WIN) { |
829 | dev_dbg(&s->dev, "all windows are used already\n"); | 838 | dev_dbg(&p_dev->dev, "all windows are used already\n"); |
830 | mutex_unlock(&s->ops_mutex); | 839 | mutex_unlock(&s->ops_mutex); |
831 | return -EINVAL; | 840 | return -EINVAL; |
832 | } | 841 | } |
@@ -837,7 +846,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha | |||
837 | win->res = pcmcia_find_mem_region(req->Base, req->Size, align, | 846 | win->res = pcmcia_find_mem_region(req->Base, req->Size, align, |
838 | 0, s); | 847 | 0, s); |
839 | if (!win->res) { | 848 | if (!win->res) { |
840 | dev_dbg(&s->dev, "allocating mem region failed\n"); | 849 | dev_dbg(&p_dev->dev, "allocating mem region failed\n"); |
841 | mutex_unlock(&s->ops_mutex); | 850 | mutex_unlock(&s->ops_mutex); |
842 | return -EINVAL; | 851 | return -EINVAL; |
843 | } | 852 | } |
@@ -851,7 +860,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha | |||
851 | win->card_start = 0; | 860 | win->card_start = 0; |
852 | 861 | ||
853 | if (s->ops->set_mem_map(s, win) != 0) { | 862 | if (s->ops->set_mem_map(s, win) != 0) { |
854 | dev_dbg(&s->dev, "failed to set memory mapping\n"); | 863 | dev_dbg(&p_dev->dev, "failed to set memory mapping\n"); |
855 | mutex_unlock(&s->ops_mutex); | 864 | mutex_unlock(&s->ops_mutex); |
856 | return -EIO; | 865 | return -EIO; |
857 | } | 866 | } |
@@ -874,7 +883,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha | |||
874 | if (win->res) | 883 | if (win->res) |
875 | request_resource(&iomem_resource, res); | 884 | request_resource(&iomem_resource, res); |
876 | 885 | ||
877 | dev_dbg(&s->dev, "request_window results in %pR\n", res); | 886 | dev_dbg(&p_dev->dev, "request_window results in %pR\n", res); |
878 | 887 | ||
879 | mutex_unlock(&s->ops_mutex); | 888 | mutex_unlock(&s->ops_mutex); |
880 | *wh = res; | 889 | *wh = res; |
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c index b8a869af0f44..deef6656ab7b 100644 --- a/drivers/pcmcia/pd6729.c +++ b/drivers/pcmcia/pd6729.c | |||
@@ -646,7 +646,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev, | |||
646 | if (!pci_resource_start(dev, 0)) { | 646 | if (!pci_resource_start(dev, 0)) { |
647 | dev_warn(&dev->dev, "refusing to load the driver as the " | 647 | dev_warn(&dev->dev, "refusing to load the driver as the " |
648 | "io_base is NULL.\n"); | 648 | "io_base is NULL.\n"); |
649 | goto err_out_free_mem; | 649 | goto err_out_disable; |
650 | } | 650 | } |
651 | 651 | ||
652 | dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx " | 652 | dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx " |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index e35ed128bdef..2d61186ad5a2 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
@@ -3093,7 +3093,8 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = { | |||
3093 | TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */ | 3093 | TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */ |
3094 | }; | 3094 | }; |
3095 | 3095 | ||
3096 | typedef u16 tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN]; | 3096 | typedef u16 tpacpi_keymap_entry_t; |
3097 | typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN]; | ||
3097 | 3098 | ||
3098 | static int __init hotkey_init(struct ibm_init_struct *iibm) | 3099 | static int __init hotkey_init(struct ibm_init_struct *iibm) |
3099 | { | 3100 | { |
@@ -3230,7 +3231,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) | |||
3230 | }; | 3231 | }; |
3231 | 3232 | ||
3232 | #define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t) | 3233 | #define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t) |
3233 | #define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_t[0]) | 3234 | #define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_entry_t) |
3234 | 3235 | ||
3235 | int res, i; | 3236 | int res, i; |
3236 | int status; | 3237 | int status; |
diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c index 936bae560fa1..dc628cb2e762 100644 --- a/drivers/power/apm_power.c +++ b/drivers/power/apm_power.c | |||
@@ -233,6 +233,7 @@ static int calculate_capacity(enum apm_source source) | |||
233 | empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN; | 233 | empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN; |
234 | now_prop = POWER_SUPPLY_PROP_ENERGY_NOW; | 234 | now_prop = POWER_SUPPLY_PROP_ENERGY_NOW; |
235 | avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG; | 235 | avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG; |
236 | break; | ||
236 | case SOURCE_VOLTAGE: | 237 | case SOURCE_VOLTAGE: |
237 | full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX; | 238 | full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX; |
238 | empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN; | 239 | empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN; |
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c index c61ffec2ff10..2a10cd361181 100644 --- a/drivers/power/intel_mid_battery.c +++ b/drivers/power/intel_mid_battery.c | |||
@@ -185,8 +185,8 @@ static int pmic_scu_ipc_battery_property_get(struct battery_property *prop) | |||
185 | { | 185 | { |
186 | u32 data[3]; | 186 | u32 data[3]; |
187 | u8 *p = (u8 *)&data[1]; | 187 | u8 *p = (u8 *)&data[1]; |
188 | int err = intel_scu_ipc_command(IPC_CMD_BATTERY_PROPERTY, | 188 | int err = intel_scu_ipc_command(IPCMSG_BATTERY, |
189 | IPCMSG_BATTERY, NULL, 0, data, 3); | 189 | IPC_CMD_BATTERY_PROPERTY, NULL, 0, data, 3); |
190 | 190 | ||
191 | prop->capacity = data[0]; | 191 | prop->capacity = data[0]; |
192 | prop->crnt = *p++; | 192 | prop->crnt = *p++; |
@@ -207,7 +207,7 @@ static int pmic_scu_ipc_battery_property_get(struct battery_property *prop) | |||
207 | 207 | ||
208 | static int pmic_scu_ipc_set_charger(int charger) | 208 | static int pmic_scu_ipc_set_charger(int charger) |
209 | { | 209 | { |
210 | return intel_scu_ipc_simple_command(charger, IPCMSG_BATTERY); | 210 | return intel_scu_ipc_simple_command(IPCMSG_BATTERY, charger); |
211 | } | 211 | } |
212 | 212 | ||
213 | /** | 213 | /** |
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c index 7d149a8d8d9b..2ce2eb71d0f5 100644 --- a/drivers/regulator/88pm8607.c +++ b/drivers/regulator/88pm8607.c | |||
@@ -215,7 +215,7 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index) | |||
215 | struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); | 215 | struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); |
216 | int ret = -EINVAL; | 216 | int ret = -EINVAL; |
217 | 217 | ||
218 | if (info->vol_table && (index < (2 << info->vol_nbits))) { | 218 | if (info->vol_table && (index < (1 << info->vol_nbits))) { |
219 | ret = info->vol_table[index]; | 219 | ret = info->vol_table[index]; |
220 | if (info->slope_double) | 220 | if (info->slope_double) |
221 | ret <<= 1; | 221 | ret <<= 1; |
@@ -233,7 +233,7 @@ static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) | |||
233 | max_uV = max_uV >> 1; | 233 | max_uV = max_uV >> 1; |
234 | } | 234 | } |
235 | if (info->vol_table) { | 235 | if (info->vol_table) { |
236 | for (i = 0; i < (2 << info->vol_nbits); i++) { | 236 | for (i = 0; i < (1 << info->vol_nbits); i++) { |
237 | if (!info->vol_table[i]) | 237 | if (!info->vol_table[i]) |
238 | break; | 238 | break; |
239 | if ((min_uV <= info->vol_table[i]) | 239 | if ((min_uV <= info->vol_table[i]) |
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c index 11790990277a..b349266a43de 100644 --- a/drivers/regulator/ab3100.c +++ b/drivers/regulator/ab3100.c | |||
@@ -634,12 +634,9 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev) | |||
634 | "%s: failed to register regulator %s err %d\n", | 634 | "%s: failed to register regulator %s err %d\n", |
635 | __func__, ab3100_regulator_desc[i].name, | 635 | __func__, ab3100_regulator_desc[i].name, |
636 | err); | 636 | err); |
637 | i--; | ||
638 | /* remove the already registered regulators */ | 637 | /* remove the already registered regulators */ |
639 | while (i > 0) { | 638 | while (--i >= 0) |
640 | regulator_unregister(ab3100_regulators[i].rdev); | 639 | regulator_unregister(ab3100_regulators[i].rdev); |
641 | i--; | ||
642 | } | ||
643 | return err; | 640 | return err; |
644 | } | 641 | } |
645 | 642 | ||
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c index dc3f1a491675..28c7ae67cec9 100644 --- a/drivers/regulator/ab8500.c +++ b/drivers/regulator/ab8500.c | |||
@@ -157,7 +157,7 @@ static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector) | |||
157 | if (info->fixed_uV) | 157 | if (info->fixed_uV) |
158 | return info->fixed_uV; | 158 | return info->fixed_uV; |
159 | 159 | ||
160 | if (selector > info->voltages_len) | 160 | if (selector >= info->voltages_len) |
161 | return -EINVAL; | 161 | return -EINVAL; |
162 | 162 | ||
163 | return info->supported_voltages[selector]; | 163 | return info->supported_voltages[selector]; |
@@ -344,13 +344,14 @@ static inline struct ab8500_regulator_info *find_regulator_info(int id) | |||
344 | static __devinit int ab8500_regulator_probe(struct platform_device *pdev) | 344 | static __devinit int ab8500_regulator_probe(struct platform_device *pdev) |
345 | { | 345 | { |
346 | struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); | 346 | struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); |
347 | struct ab8500_platform_data *pdata = dev_get_platdata(ab8500->dev); | 347 | struct ab8500_platform_data *pdata; |
348 | int i, err; | 348 | int i, err; |
349 | 349 | ||
350 | if (!ab8500) { | 350 | if (!ab8500) { |
351 | dev_err(&pdev->dev, "null mfd parent\n"); | 351 | dev_err(&pdev->dev, "null mfd parent\n"); |
352 | return -EINVAL; | 352 | return -EINVAL; |
353 | } | 353 | } |
354 | pdata = dev_get_platdata(ab8500->dev); | ||
354 | 355 | ||
355 | /* register all regulators */ | 356 | /* register all regulators */ |
356 | for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) { | 357 | for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) { |
@@ -368,11 +369,9 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev) | |||
368 | dev_err(&pdev->dev, "failed to register regulator %s\n", | 369 | dev_err(&pdev->dev, "failed to register regulator %s\n", |
369 | info->desc.name); | 370 | info->desc.name); |
370 | /* when we fail, un-register all earlier regulators */ | 371 | /* when we fail, un-register all earlier regulators */ |
371 | i--; | 372 | while (--i >= 0) { |
372 | while (i > 0) { | ||
373 | info = &ab8500_regulator_info[i]; | 373 | info = &ab8500_regulator_info[i]; |
374 | regulator_unregister(info->regulator); | 374 | regulator_unregister(info->regulator); |
375 | i--; | ||
376 | } | 375 | } |
377 | return err; | 376 | return err; |
378 | } | 377 | } |
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c index d59d2f2314af..df1fb53c09d2 100644 --- a/drivers/regulator/ad5398.c +++ b/drivers/regulator/ad5398.c | |||
@@ -25,7 +25,7 @@ struct ad5398_chip_info { | |||
25 | unsigned int current_level; | 25 | unsigned int current_level; |
26 | unsigned int current_mask; | 26 | unsigned int current_mask; |
27 | unsigned int current_offset; | 27 | unsigned int current_offset; |
28 | struct regulator_dev rdev; | 28 | struct regulator_dev *rdev; |
29 | }; | 29 | }; |
30 | 30 | ||
31 | static int ad5398_calc_current(struct ad5398_chip_info *chip, | 31 | static int ad5398_calc_current(struct ad5398_chip_info *chip, |
@@ -211,7 +211,6 @@ MODULE_DEVICE_TABLE(i2c, ad5398_id); | |||
211 | static int __devinit ad5398_probe(struct i2c_client *client, | 211 | static int __devinit ad5398_probe(struct i2c_client *client, |
212 | const struct i2c_device_id *id) | 212 | const struct i2c_device_id *id) |
213 | { | 213 | { |
214 | struct regulator_dev *rdev; | ||
215 | struct regulator_init_data *init_data = client->dev.platform_data; | 214 | struct regulator_init_data *init_data = client->dev.platform_data; |
216 | struct ad5398_chip_info *chip; | 215 | struct ad5398_chip_info *chip; |
217 | const struct ad5398_current_data_format *df = | 216 | const struct ad5398_current_data_format *df = |
@@ -233,9 +232,10 @@ static int __devinit ad5398_probe(struct i2c_client *client, | |||
233 | chip->current_offset = df->current_offset; | 232 | chip->current_offset = df->current_offset; |
234 | chip->current_mask = (chip->current_level - 1) << chip->current_offset; | 233 | chip->current_mask = (chip->current_level - 1) << chip->current_offset; |
235 | 234 | ||
236 | rdev = regulator_register(&ad5398_reg, &client->dev, init_data, chip); | 235 | chip->rdev = regulator_register(&ad5398_reg, &client->dev, |
237 | if (IS_ERR(rdev)) { | 236 | init_data, chip); |
238 | ret = PTR_ERR(rdev); | 237 | if (IS_ERR(chip->rdev)) { |
238 | ret = PTR_ERR(chip->rdev); | ||
239 | dev_err(&client->dev, "failed to register %s %s\n", | 239 | dev_err(&client->dev, "failed to register %s %s\n", |
240 | id->name, ad5398_reg.name); | 240 | id->name, ad5398_reg.name); |
241 | goto err; | 241 | goto err; |
@@ -254,7 +254,7 @@ static int __devexit ad5398_remove(struct i2c_client *client) | |||
254 | { | 254 | { |
255 | struct ad5398_chip_info *chip = i2c_get_clientdata(client); | 255 | struct ad5398_chip_info *chip = i2c_get_clientdata(client); |
256 | 256 | ||
257 | regulator_unregister(&chip->rdev); | 257 | regulator_unregister(chip->rdev); |
258 | kfree(chip); | 258 | kfree(chip); |
259 | i2c_set_clientdata(client, NULL); | 259 | i2c_set_clientdata(client, NULL); |
260 | 260 | ||
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 422a709d271d..cc8b337b9119 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -700,7 +700,7 @@ static void print_constraints(struct regulator_dev *rdev) | |||
700 | constraints->min_uA != constraints->max_uA) { | 700 | constraints->min_uA != constraints->max_uA) { |
701 | ret = _regulator_get_current_limit(rdev); | 701 | ret = _regulator_get_current_limit(rdev); |
702 | if (ret > 0) | 702 | if (ret > 0) |
703 | count += sprintf(buf + count, "at %d uA ", ret / 1000); | 703 | count += sprintf(buf + count, "at %d mA ", ret / 1000); |
704 | } | 704 | } |
705 | 705 | ||
706 | if (constraints->valid_modes_mask & REGULATOR_MODE_FAST) | 706 | if (constraints->valid_modes_mask & REGULATOR_MODE_FAST) |
@@ -2302,8 +2302,10 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, | |||
2302 | dev_set_name(&rdev->dev, "regulator.%d", | 2302 | dev_set_name(&rdev->dev, "regulator.%d", |
2303 | atomic_inc_return(®ulator_no) - 1); | 2303 | atomic_inc_return(®ulator_no) - 1); |
2304 | ret = device_register(&rdev->dev); | 2304 | ret = device_register(&rdev->dev); |
2305 | if (ret != 0) | 2305 | if (ret != 0) { |
2306 | put_device(&rdev->dev); | ||
2306 | goto clean; | 2307 | goto clean; |
2308 | } | ||
2307 | 2309 | ||
2308 | dev_set_drvdata(&rdev->dev, rdev); | 2310 | dev_set_drvdata(&rdev->dev, rdev); |
2309 | 2311 | ||
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c index e49d2bd393f2..d61ecb885a8c 100644 --- a/drivers/regulator/isl6271a-regulator.c +++ b/drivers/regulator/isl6271a-regulator.c | |||
@@ -165,7 +165,7 @@ static int __devinit isl6271a_probe(struct i2c_client *i2c, | |||
165 | mutex_init(&pmic->mtx); | 165 | mutex_init(&pmic->mtx); |
166 | 166 | ||
167 | for (i = 0; i < 3; i++) { | 167 | for (i = 0; i < 3; i++) { |
168 | pmic->rdev[i] = regulator_register(&isl_rd[0], &i2c->dev, | 168 | pmic->rdev[i] = regulator_register(&isl_rd[i], &i2c->dev, |
169 | init_data, pmic); | 169 | init_data, pmic); |
170 | if (IS_ERR(pmic->rdev[i])) { | 170 | if (IS_ERR(pmic->rdev[i])) { |
171 | dev_err(&i2c->dev, "failed to register %s\n", id->name); | 171 | dev_err(&i2c->dev, "failed to register %s\n", id->name); |
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c index 8867c2710a6d..559cfa271a44 100644 --- a/drivers/regulator/max1586.c +++ b/drivers/regulator/max1586.c | |||
@@ -121,14 +121,14 @@ static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV) | |||
121 | if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV) | 121 | if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV) |
122 | return -EINVAL; | 122 | return -EINVAL; |
123 | 123 | ||
124 | if (min_uV >= 3000000) | ||
125 | selector = 3; | ||
126 | if (min_uV < 3000000) | ||
127 | selector = 2; | ||
128 | if (min_uV < 2500000) | ||
129 | selector = 1; | ||
130 | if (min_uV < 1800000) | 124 | if (min_uV < 1800000) |
131 | selector = 0; | 125 | selector = 0; |
126 | else if (min_uV < 2500000) | ||
127 | selector = 1; | ||
128 | else if (min_uV < 3000000) | ||
129 | selector = 2; | ||
130 | else if (min_uV >= 3000000) | ||
131 | selector = 3; | ||
132 | 132 | ||
133 | if (max1586_v6_calc_voltage(selector) > max_uV) | 133 | if (max1586_v6_calc_voltage(selector) > max_uV) |
134 | return -EINVAL; | 134 | return -EINVAL; |
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c index 4520ace3f7e7..6b60a9c0366b 100644 --- a/drivers/regulator/max8649.c +++ b/drivers/regulator/max8649.c | |||
@@ -330,7 +330,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client, | |||
330 | /* set external clock frequency */ | 330 | /* set external clock frequency */ |
331 | info->extclk_freq = pdata->extclk_freq; | 331 | info->extclk_freq = pdata->extclk_freq; |
332 | max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK, | 332 | max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK, |
333 | info->extclk_freq); | 333 | info->extclk_freq << 6); |
334 | } | 334 | } |
335 | 335 | ||
336 | if (pdata->ramp_timing) { | 336 | if (pdata->ramp_timing) { |
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c index ab67298799f9..a1baf1fbe004 100644 --- a/drivers/regulator/max8998.c +++ b/drivers/regulator/max8998.c | |||
@@ -549,7 +549,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev) | |||
549 | if (!max8998) | 549 | if (!max8998) |
550 | return -ENOMEM; | 550 | return -ENOMEM; |
551 | 551 | ||
552 | size = sizeof(struct regulator_dev *) * (pdata->num_regulators + 1); | 552 | size = sizeof(struct regulator_dev *) * pdata->num_regulators; |
553 | max8998->rdev = kzalloc(size, GFP_KERNEL); | 553 | max8998->rdev = kzalloc(size, GFP_KERNEL); |
554 | if (!max8998->rdev) { | 554 | if (!max8998->rdev) { |
555 | kfree(max8998); | 555 | kfree(max8998); |
@@ -557,7 +557,9 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev) | |||
557 | } | 557 | } |
558 | 558 | ||
559 | rdev = max8998->rdev; | 559 | rdev = max8998->rdev; |
560 | max8998->dev = &pdev->dev; | ||
560 | max8998->iodev = iodev; | 561 | max8998->iodev = iodev; |
562 | max8998->num_regulators = pdata->num_regulators; | ||
561 | platform_set_drvdata(pdev, max8998); | 563 | platform_set_drvdata(pdev, max8998); |
562 | 564 | ||
563 | for (i = 0; i < pdata->num_regulators; i++) { | 565 | for (i = 0; i < pdata->num_regulators; i++) { |
@@ -583,7 +585,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev) | |||
583 | 585 | ||
584 | return 0; | 586 | return 0; |
585 | err: | 587 | err: |
586 | for (i = 0; i <= max8998->num_regulators; i++) | 588 | for (i = 0; i < max8998->num_regulators; i++) |
587 | if (rdev[i]) | 589 | if (rdev[i]) |
588 | regulator_unregister(rdev[i]); | 590 | regulator_unregister(rdev[i]); |
589 | 591 | ||
@@ -599,7 +601,7 @@ static int __devexit max8998_pmic_remove(struct platform_device *pdev) | |||
599 | struct regulator_dev **rdev = max8998->rdev; | 601 | struct regulator_dev **rdev = max8998->rdev; |
600 | int i; | 602 | int i; |
601 | 603 | ||
602 | for (i = 0; i <= max8998->num_regulators; i++) | 604 | for (i = 0; i < max8998->num_regulators; i++) |
603 | if (rdev[i]) | 605 | if (rdev[i]) |
604 | regulator_unregister(rdev[i]); | 606 | regulator_unregister(rdev[i]); |
605 | 607 | ||
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c index c239f42aa4a3..020f5878d7ff 100644 --- a/drivers/regulator/tps6507x-regulator.c +++ b/drivers/regulator/tps6507x-regulator.c | |||
@@ -626,12 +626,6 @@ fail: | |||
626 | return error; | 626 | return error; |
627 | } | 627 | } |
628 | 628 | ||
629 | /** | ||
630 | * tps6507x_remove - TPS6507x driver i2c remove handler | ||
631 | * @client: i2c driver client device structure | ||
632 | * | ||
633 | * Unregister TPS driver as an i2c client device driver | ||
634 | */ | ||
635 | static int __devexit tps6507x_pmic_remove(struct platform_device *pdev) | 629 | static int __devexit tps6507x_pmic_remove(struct platform_device *pdev) |
636 | { | 630 | { |
637 | struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev); | 631 | struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev); |
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c index 8cff1413a147..51237fbb1bbb 100644 --- a/drivers/regulator/tps6586x-regulator.c +++ b/drivers/regulator/tps6586x-regulator.c | |||
@@ -133,7 +133,7 @@ static int tps6586x_ldo_get_voltage(struct regulator_dev *rdev) | |||
133 | mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift; | 133 | mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift; |
134 | val = (val & mask) >> ri->volt_shift; | 134 | val = (val & mask) >> ri->volt_shift; |
135 | 135 | ||
136 | if (val > ri->desc.n_voltages) | 136 | if (val >= ri->desc.n_voltages) |
137 | BUG(); | 137 | BUG(); |
138 | 138 | ||
139 | return ri->voltages[val] * 1000; | 139 | return ri->voltages[val] * 1000; |
@@ -150,7 +150,7 @@ static int tps6586x_dvm_set_voltage(struct regulator_dev *rdev, | |||
150 | if (ret) | 150 | if (ret) |
151 | return ret; | 151 | return ret; |
152 | 152 | ||
153 | return tps6586x_set_bits(parent, ri->go_reg, ri->go_bit); | 153 | return tps6586x_set_bits(parent, ri->go_reg, 1 << ri->go_bit); |
154 | } | 154 | } |
155 | 155 | ||
156 | static int tps6586x_regulator_enable(struct regulator_dev *rdev) | 156 | static int tps6586x_regulator_enable(struct regulator_dev *rdev) |
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c index e686cdb61b97..9edf8f692341 100644 --- a/drivers/regulator/wm831x-ldo.c +++ b/drivers/regulator/wm831x-ldo.c | |||
@@ -215,8 +215,7 @@ static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev, | |||
215 | 215 | ||
216 | case REGULATOR_MODE_IDLE: | 216 | case REGULATOR_MODE_IDLE: |
217 | ret = wm831x_set_bits(wm831x, ctrl_reg, | 217 | ret = wm831x_set_bits(wm831x, ctrl_reg, |
218 | WM831X_LDO1_LP_MODE, | 218 | WM831X_LDO1_LP_MODE, 0); |
219 | WM831X_LDO1_LP_MODE); | ||
220 | if (ret < 0) | 219 | if (ret < 0) |
221 | return ret; | 220 | return ret; |
222 | 221 | ||
@@ -225,10 +224,12 @@ static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev, | |||
225 | WM831X_LDO1_ON_MODE); | 224 | WM831X_LDO1_ON_MODE); |
226 | if (ret < 0) | 225 | if (ret < 0) |
227 | return ret; | 226 | return ret; |
227 | break; | ||
228 | 228 | ||
229 | case REGULATOR_MODE_STANDBY: | 229 | case REGULATOR_MODE_STANDBY: |
230 | ret = wm831x_set_bits(wm831x, ctrl_reg, | 230 | ret = wm831x_set_bits(wm831x, ctrl_reg, |
231 | WM831X_LDO1_LP_MODE, 0); | 231 | WM831X_LDO1_LP_MODE, |
232 | WM831X_LDO1_LP_MODE); | ||
232 | if (ret < 0) | 233 | if (ret < 0) |
233 | return ret; | 234 | return ret; |
234 | 235 | ||
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c index 0e6ed7db9364..fe4b8a8a9dfd 100644 --- a/drivers/regulator/wm8350-regulator.c +++ b/drivers/regulator/wm8350-regulator.c | |||
@@ -1129,7 +1129,7 @@ static unsigned int wm8350_dcdc_get_mode(struct regulator_dev *rdev) | |||
1129 | mode = REGULATOR_MODE_NORMAL; | 1129 | mode = REGULATOR_MODE_NORMAL; |
1130 | } else if (!active && !sleep) | 1130 | } else if (!active && !sleep) |
1131 | mode = REGULATOR_MODE_IDLE; | 1131 | mode = REGULATOR_MODE_IDLE; |
1132 | else if (!sleep) | 1132 | else if (sleep) |
1133 | mode = REGULATOR_MODE_STANDBY; | 1133 | mode = REGULATOR_MODE_STANDBY; |
1134 | 1134 | ||
1135 | return mode; | 1135 | return mode; |
diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c index d26780ea254b..261a07e0fb24 100644 --- a/drivers/rtc/rtc-ab3100.c +++ b/drivers/rtc/rtc-ab3100.c | |||
@@ -235,6 +235,7 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev) | |||
235 | err = PTR_ERR(rtc); | 235 | err = PTR_ERR(rtc); |
236 | return err; | 236 | return err; |
237 | } | 237 | } |
238 | platform_set_drvdata(pdev, rtc); | ||
238 | 239 | ||
239 | return 0; | 240 | return 0; |
240 | } | 241 | } |
@@ -244,6 +245,7 @@ static int __exit ab3100_rtc_remove(struct platform_device *pdev) | |||
244 | struct rtc_device *rtc = platform_get_drvdata(pdev); | 245 | struct rtc_device *rtc = platform_get_drvdata(pdev); |
245 | 246 | ||
246 | rtc_device_unregister(rtc); | 247 | rtc_device_unregister(rtc); |
248 | platform_set_drvdata(pdev, NULL); | ||
247 | return 0; | 249 | return 0; |
248 | } | 250 | } |
249 | 251 | ||
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c index 72b2bcc2c224..d4fb82d85e9b 100644 --- a/drivers/rtc/rtc-bfin.c +++ b/drivers/rtc/rtc-bfin.c | |||
@@ -426,7 +426,7 @@ static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state) | |||
426 | enable_irq_wake(IRQ_RTC); | 426 | enable_irq_wake(IRQ_RTC); |
427 | bfin_rtc_sync_pending(&pdev->dev); | 427 | bfin_rtc_sync_pending(&pdev->dev); |
428 | } else | 428 | } else |
429 | bfin_rtc_int_clear(-1); | 429 | bfin_rtc_int_clear(0); |
430 | 430 | ||
431 | return 0; | 431 | return 0; |
432 | } | 432 | } |
@@ -435,8 +435,17 @@ static int bfin_rtc_resume(struct platform_device *pdev) | |||
435 | { | 435 | { |
436 | if (device_may_wakeup(&pdev->dev)) | 436 | if (device_may_wakeup(&pdev->dev)) |
437 | disable_irq_wake(IRQ_RTC); | 437 | disable_irq_wake(IRQ_RTC); |
438 | else | 438 | |
439 | bfin_write_RTC_ISTAT(-1); | 439 | /* |
440 | * Since only some of the RTC bits are maintained externally in the | ||
441 | * Vbat domain, we need to wait for the RTC MMRs to be synced into | ||
442 | * the core after waking up. This happens every RTC 1HZ. Once that | ||
443 | * has happened, we can go ahead and re-enable the important write | ||
444 | * complete interrupt event. | ||
445 | */ | ||
446 | while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_SEC)) | ||
447 | continue; | ||
448 | bfin_rtc_int_set(RTC_ISTAT_WRITE_COMPLETE); | ||
440 | 449 | ||
441 | return 0; | 450 | return 0; |
442 | } | 451 | } |
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 66377f3e28b8..d60557cae8ef 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c | |||
@@ -364,7 +364,7 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t) | |||
364 | t->time.tm_isdst = -1; | 364 | t->time.tm_isdst = -1; |
365 | t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE); | 365 | t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE); |
366 | t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF); | 366 | t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF); |
367 | return rtc_valid_tm(t); | 367 | return 0; |
368 | } | 368 | } |
369 | 369 | ||
370 | static struct rtc_class_ops m41t80_rtc_ops = { | 370 | static struct rtc_class_ops m41t80_rtc_ops = { |
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index 6c418fe7f288..b7a6690e5b35 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c | |||
@@ -403,7 +403,7 @@ static int pl031_probe(struct amba_device *adev, struct amba_id *id) | |||
403 | } | 403 | } |
404 | 404 | ||
405 | if (request_irq(adev->irq[0], pl031_interrupt, | 405 | if (request_irq(adev->irq[0], pl031_interrupt, |
406 | IRQF_DISABLED | IRQF_SHARED, "rtc-pl031", ldata)) { | 406 | IRQF_DISABLED, "rtc-pl031", ldata)) { |
407 | ret = -EIO; | 407 | ret = -EIO; |
408 | goto out_no_irq; | 408 | goto out_no_irq; |
409 | } | 409 | } |
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index a0d3ec89d412..f57a87f4ae96 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
@@ -310,11 +310,6 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
310 | 310 | ||
311 | s3c_rtc_setaie(alrm->enabled); | 311 | s3c_rtc_setaie(alrm->enabled); |
312 | 312 | ||
313 | if (alrm->enabled) | ||
314 | enable_irq_wake(s3c_rtc_alarmno); | ||
315 | else | ||
316 | disable_irq_wake(s3c_rtc_alarmno); | ||
317 | |||
318 | return 0; | 313 | return 0; |
319 | } | 314 | } |
320 | 315 | ||
@@ -587,6 +582,10 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state) | |||
587 | ticnt_en_save &= S3C64XX_RTCCON_TICEN; | 582 | ticnt_en_save &= S3C64XX_RTCCON_TICEN; |
588 | } | 583 | } |
589 | s3c_rtc_enable(pdev, 0); | 584 | s3c_rtc_enable(pdev, 0); |
585 | |||
586 | if (device_may_wakeup(&pdev->dev)) | ||
587 | enable_irq_wake(s3c_rtc_alarmno); | ||
588 | |||
590 | return 0; | 589 | return 0; |
591 | } | 590 | } |
592 | 591 | ||
@@ -600,6 +599,10 @@ static int s3c_rtc_resume(struct platform_device *pdev) | |||
600 | tmp = readb(s3c_rtc_base + S3C2410_RTCCON); | 599 | tmp = readb(s3c_rtc_base + S3C2410_RTCCON); |
601 | writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); | 600 | writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); |
602 | } | 601 | } |
602 | |||
603 | if (device_may_wakeup(&pdev->dev)) | ||
604 | disable_irq_wake(s3c_rtc_alarmno); | ||
605 | |||
603 | return 0; | 606 | return 0; |
604 | } | 607 | } |
605 | #else | 608 | #else |
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index b7de02525ec9..85cf607fc78f 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c | |||
@@ -217,8 +217,7 @@ tapeblock_setup_device(struct tape_device * device) | |||
217 | if (!blkdat->request_queue) | 217 | if (!blkdat->request_queue) |
218 | return -ENOMEM; | 218 | return -ENOMEM; |
219 | 219 | ||
220 | elevator_exit(blkdat->request_queue->elevator); | 220 | rc = elevator_change(blkdat->request_queue, "noop"); |
221 | rc = elevator_init(blkdat->request_queue, "noop"); | ||
222 | if (rc) | 221 | if (rc) |
223 | goto cleanup_queue; | 222 | goto cleanup_queue; |
224 | 223 | ||
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 6edf20b62de5..2c7d2d9be4d0 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c | |||
@@ -1154,7 +1154,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv) | |||
1154 | dev_fsm, dev_fsm_len, GFP_KERNEL); | 1154 | dev_fsm, dev_fsm_len, GFP_KERNEL); |
1155 | if (priv->fsm == NULL) { | 1155 | if (priv->fsm == NULL) { |
1156 | CTCMY_DBF_DEV(SETUP, dev, "init_fsm error"); | 1156 | CTCMY_DBF_DEV(SETUP, dev, "init_fsm error"); |
1157 | kfree(dev); | 1157 | free_netdev(dev); |
1158 | return NULL; | 1158 | return NULL; |
1159 | } | 1159 | } |
1160 | fsm_newstate(priv->fsm, DEV_STATE_STOPPED); | 1160 | fsm_newstate(priv->fsm, DEV_STATE_STOPPED); |
@@ -1165,7 +1165,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv) | |||
1165 | grp = ctcmpc_init_mpc_group(priv); | 1165 | grp = ctcmpc_init_mpc_group(priv); |
1166 | if (grp == NULL) { | 1166 | if (grp == NULL) { |
1167 | MPC_DBF_DEV(SETUP, dev, "init_mpc_group error"); | 1167 | MPC_DBF_DEV(SETUP, dev, "init_mpc_group error"); |
1168 | kfree(dev); | 1168 | free_netdev(dev); |
1169 | return NULL; | 1169 | return NULL; |
1170 | } | 1170 | } |
1171 | tasklet_init(&grp->mpc_tasklet2, | 1171 | tasklet_init(&grp->mpc_tasklet2, |
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index 7d4d2275573c..7f11f3e48e12 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c | |||
@@ -300,8 +300,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost, | |||
300 | enum iscsi_host_param param, char *buf) | 300 | enum iscsi_host_param param, char *buf) |
301 | { | 301 | { |
302 | struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost); | 302 | struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost); |
303 | int len = 0; | 303 | int status = 0; |
304 | int status; | ||
305 | 304 | ||
306 | SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param); | 305 | SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param); |
307 | switch (param) { | 306 | switch (param) { |
@@ -315,7 +314,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost, | |||
315 | default: | 314 | default: |
316 | return iscsi_host_get_param(shost, param, buf); | 315 | return iscsi_host_get_param(shost, param, buf); |
317 | } | 316 | } |
318 | return len; | 317 | return status; |
319 | } | 318 | } |
320 | 319 | ||
321 | int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba) | 320 | int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba) |
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 26350e470bcc..877324fc594c 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c | |||
@@ -368,7 +368,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba, | |||
368 | memset(req, 0, sizeof(*req)); | 368 | memset(req, 0, sizeof(*req)); |
369 | wrb->tag0 |= tag; | 369 | wrb->tag0 |= tag; |
370 | 370 | ||
371 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 1); | 371 | be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); |
372 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, | 372 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, |
373 | OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, | 373 | OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, |
374 | sizeof(*req)); | 374 | sizeof(*req)); |
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c index cd05e049d5f6..d0c82340f0e2 100644 --- a/drivers/scsi/constants.c +++ b/drivers/scsi/constants.c | |||
@@ -1404,13 +1404,13 @@ void scsi_print_sense(char *name, struct scsi_cmnd *cmd) | |||
1404 | { | 1404 | { |
1405 | struct scsi_sense_hdr sshdr; | 1405 | struct scsi_sense_hdr sshdr; |
1406 | 1406 | ||
1407 | scmd_printk(KERN_INFO, cmd, ""); | 1407 | scmd_printk(KERN_INFO, cmd, " "); |
1408 | scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, | 1408 | scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, |
1409 | &sshdr); | 1409 | &sshdr); |
1410 | scsi_show_sense_hdr(&sshdr); | 1410 | scsi_show_sense_hdr(&sshdr); |
1411 | scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, | 1411 | scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, |
1412 | &sshdr); | 1412 | &sshdr); |
1413 | scmd_printk(KERN_INFO, cmd, ""); | 1413 | scmd_printk(KERN_INFO, cmd, " "); |
1414 | scsi_show_extd_sense(sshdr.asc, sshdr.ascq); | 1414 | scsi_show_extd_sense(sshdr.asc, sshdr.ascq); |
1415 | } | 1415 | } |
1416 | EXPORT_SYMBOL(scsi_print_sense); | 1416 | EXPORT_SYMBOL(scsi_print_sense); |
@@ -1453,7 +1453,7 @@ EXPORT_SYMBOL(scsi_show_result); | |||
1453 | 1453 | ||
1454 | void scsi_print_result(struct scsi_cmnd *cmd) | 1454 | void scsi_print_result(struct scsi_cmnd *cmd) |
1455 | { | 1455 | { |
1456 | scmd_printk(KERN_INFO, cmd, ""); | 1456 | scmd_printk(KERN_INFO, cmd, " "); |
1457 | scsi_show_result(cmd->result); | 1457 | scsi_show_result(cmd->result); |
1458 | } | 1458 | } |
1459 | EXPORT_SYMBOL(scsi_print_result); | 1459 | EXPORT_SYMBOL(scsi_print_result); |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 4f5551b5fe53..c5d0606ad097 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -3231,6 +3231,12 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) | |||
3231 | misc_fw_support = readl(&cfgtable->misc_fw_support); | 3231 | misc_fw_support = readl(&cfgtable->misc_fw_support); |
3232 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; | 3232 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; |
3233 | 3233 | ||
3234 | /* The doorbell reset seems to cause lockups on some Smart | ||
3235 | * Arrays (e.g. P410, P410i, maybe others). Until this is | ||
3236 | * fixed or at least isolated, avoid the doorbell reset. | ||
3237 | */ | ||
3238 | use_doorbell = 0; | ||
3239 | |||
3234 | rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); | 3240 | rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); |
3235 | if (rc) | 3241 | if (rc) |
3236 | goto unmap_cfgtable; | 3242 | goto unmap_cfgtable; |
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index fda4de3440c4..e88bbdde49c5 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c | |||
@@ -865,7 +865,7 @@ void osd_req_read(struct osd_request *or, | |||
865 | { | 865 | { |
866 | _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); | 866 | _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); |
867 | WARN_ON(or->in.bio || or->in.total_bytes); | 867 | WARN_ON(or->in.bio || or->in.total_bytes); |
868 | WARN_ON(1 == (bio->bi_rw & REQ_WRITE)); | 868 | WARN_ON(bio->bi_rw & REQ_WRITE); |
869 | or->in.bio = bio; | 869 | or->in.bio = bio; |
870 | or->in.total_bytes = len; | 870 | or->in.total_bytes = len; |
871 | } | 871 | } |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 420238cc794e..114bc5a81171 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -1838,26 +1838,33 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) | |||
1838 | 1838 | ||
1839 | qla24xx_disable_vp(vha); | 1839 | qla24xx_disable_vp(vha); |
1840 | 1840 | ||
1841 | vha->flags.delete_progress = 1; | ||
1842 | |||
1841 | fc_remove_host(vha->host); | 1843 | fc_remove_host(vha->host); |
1842 | 1844 | ||
1843 | scsi_remove_host(vha->host); | 1845 | scsi_remove_host(vha->host); |
1844 | 1846 | ||
1845 | qla2x00_free_fcports(vha); | 1847 | if (vha->timer_active) { |
1848 | qla2x00_vp_stop_timer(vha); | ||
1849 | DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]" | ||
1850 | " = %p has stopped\n", vha->host_no, vha->vp_idx, vha)); | ||
1851 | } | ||
1846 | 1852 | ||
1847 | qla24xx_deallocate_vp_id(vha); | 1853 | qla24xx_deallocate_vp_id(vha); |
1848 | 1854 | ||
1855 | /* No pending activities shall be there on the vha now */ | ||
1856 | DEBUG(msleep(random32()%10)); /* Just to see if something falls on | ||
1857 | * the net we have placed below */ | ||
1858 | |||
1859 | BUG_ON(atomic_read(&vha->vref_count)); | ||
1860 | |||
1861 | qla2x00_free_fcports(vha); | ||
1862 | |||
1849 | mutex_lock(&ha->vport_lock); | 1863 | mutex_lock(&ha->vport_lock); |
1850 | ha->cur_vport_count--; | 1864 | ha->cur_vport_count--; |
1851 | clear_bit(vha->vp_idx, ha->vp_idx_map); | 1865 | clear_bit(vha->vp_idx, ha->vp_idx_map); |
1852 | mutex_unlock(&ha->vport_lock); | 1866 | mutex_unlock(&ha->vport_lock); |
1853 | 1867 | ||
1854 | if (vha->timer_active) { | ||
1855 | qla2x00_vp_stop_timer(vha); | ||
1856 | DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p " | ||
1857 | "has stopped\n", | ||
1858 | vha->host_no, vha->vp_idx, vha)); | ||
1859 | } | ||
1860 | |||
1861 | if (vha->req->id && !ha->flags.cpu_affinity_enabled) { | 1868 | if (vha->req->id && !ha->flags.cpu_affinity_enabled) { |
1862 | if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) | 1869 | if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) |
1863 | qla_printk(KERN_WARNING, ha, | 1870 | qla_printk(KERN_WARNING, ha, |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index 6cfc28a25eb3..b74e6b5743dc 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h | |||
@@ -29,8 +29,6 @@ | |||
29 | /* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */ | 29 | /* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */ |
30 | /* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */ | 30 | /* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */ |
31 | 31 | ||
32 | /* #define QL_PRINTK_BUF */ /* Captures printk to buffer */ | ||
33 | |||
34 | /* | 32 | /* |
35 | * Macros use for debugging the driver. | 33 | * Macros use for debugging the driver. |
36 | */ | 34 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 3a432ea0c7a3..d2a4e1530708 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -2641,6 +2641,7 @@ struct qla_hw_data { | |||
2641 | #define MBX_UPDATE_FLASH_ACTIVE 3 | 2641 | #define MBX_UPDATE_FLASH_ACTIVE 3 |
2642 | 2642 | ||
2643 | struct mutex vport_lock; /* Virtual port synchronization */ | 2643 | struct mutex vport_lock; /* Virtual port synchronization */ |
2644 | spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */ | ||
2644 | struct completion mbx_cmd_comp; /* Serialize mbx access */ | 2645 | struct completion mbx_cmd_comp; /* Serialize mbx access */ |
2645 | struct completion mbx_intr_comp; /* Used for completion notification */ | 2646 | struct completion mbx_intr_comp; /* Used for completion notification */ |
2646 | struct completion dcbx_comp; /* For set port config notification */ | 2647 | struct completion dcbx_comp; /* For set port config notification */ |
@@ -2828,6 +2829,7 @@ typedef struct scsi_qla_host { | |||
2828 | uint32_t management_server_logged_in :1; | 2829 | uint32_t management_server_logged_in :1; |
2829 | uint32_t process_response_queue :1; | 2830 | uint32_t process_response_queue :1; |
2830 | uint32_t difdix_supported:1; | 2831 | uint32_t difdix_supported:1; |
2832 | uint32_t delete_progress:1; | ||
2831 | } flags; | 2833 | } flags; |
2832 | 2834 | ||
2833 | atomic_t loop_state; | 2835 | atomic_t loop_state; |
@@ -2922,6 +2924,8 @@ typedef struct scsi_qla_host { | |||
2922 | struct req_que *req; | 2924 | struct req_que *req; |
2923 | int fw_heartbeat_counter; | 2925 | int fw_heartbeat_counter; |
2924 | int seconds_since_last_heartbeat; | 2926 | int seconds_since_last_heartbeat; |
2927 | |||
2928 | atomic_t vref_count; | ||
2925 | } scsi_qla_host_t; | 2929 | } scsi_qla_host_t; |
2926 | 2930 | ||
2927 | /* | 2931 | /* |
@@ -2932,6 +2936,22 @@ typedef struct scsi_qla_host { | |||
2932 | test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \ | 2936 | test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \ |
2933 | atomic_read(&ha->loop_state) == LOOP_DOWN) | 2937 | atomic_read(&ha->loop_state) == LOOP_DOWN) |
2934 | 2938 | ||
2939 | #define QLA_VHA_MARK_BUSY(__vha, __bail) do { \ | ||
2940 | atomic_inc(&__vha->vref_count); \ | ||
2941 | mb(); \ | ||
2942 | if (__vha->flags.delete_progress) { \ | ||
2943 | atomic_dec(&__vha->vref_count); \ | ||
2944 | __bail = 1; \ | ||
2945 | } else { \ | ||
2946 | __bail = 0; \ | ||
2947 | } \ | ||
2948 | } while (0) | ||
2949 | |||
2950 | #define QLA_VHA_MARK_NOT_BUSY(__vha) do { \ | ||
2951 | atomic_dec(&__vha->vref_count); \ | ||
2952 | } while (0) | ||
2953 | |||
2954 | |||
2935 | #define qla_printk(level, ha, format, arg...) \ | 2955 | #define qla_printk(level, ha, format, arg...) \ |
2936 | dev_printk(level , &((ha)->pdev->dev) , format , ## arg) | 2956 | dev_printk(level , &((ha)->pdev->dev) , format , ## arg) |
2937 | 2957 | ||
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index d863ed2619b5..9c383baebe27 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -69,21 +69,29 @@ qla2x00_ctx_sp_free(srb_t *sp) | |||
69 | { | 69 | { |
70 | struct srb_ctx *ctx = sp->ctx; | 70 | struct srb_ctx *ctx = sp->ctx; |
71 | struct srb_iocb *iocb = ctx->u.iocb_cmd; | 71 | struct srb_iocb *iocb = ctx->u.iocb_cmd; |
72 | struct scsi_qla_host *vha = sp->fcport->vha; | ||
72 | 73 | ||
73 | del_timer_sync(&iocb->timer); | 74 | del_timer_sync(&iocb->timer); |
74 | kfree(iocb); | 75 | kfree(iocb); |
75 | kfree(ctx); | 76 | kfree(ctx); |
76 | mempool_free(sp, sp->fcport->vha->hw->srb_mempool); | 77 | mempool_free(sp, sp->fcport->vha->hw->srb_mempool); |
78 | |||
79 | QLA_VHA_MARK_NOT_BUSY(vha); | ||
77 | } | 80 | } |
78 | 81 | ||
79 | inline srb_t * | 82 | inline srb_t * |
80 | qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size, | 83 | qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size, |
81 | unsigned long tmo) | 84 | unsigned long tmo) |
82 | { | 85 | { |
83 | srb_t *sp; | 86 | srb_t *sp = NULL; |
84 | struct qla_hw_data *ha = vha->hw; | 87 | struct qla_hw_data *ha = vha->hw; |
85 | struct srb_ctx *ctx; | 88 | struct srb_ctx *ctx; |
86 | struct srb_iocb *iocb; | 89 | struct srb_iocb *iocb; |
90 | uint8_t bail; | ||
91 | |||
92 | QLA_VHA_MARK_BUSY(vha, bail); | ||
93 | if (bail) | ||
94 | return NULL; | ||
87 | 95 | ||
88 | sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL); | 96 | sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL); |
89 | if (!sp) | 97 | if (!sp) |
@@ -116,6 +124,8 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size, | |||
116 | iocb->timer.function = qla2x00_ctx_sp_timeout; | 124 | iocb->timer.function = qla2x00_ctx_sp_timeout; |
117 | add_timer(&iocb->timer); | 125 | add_timer(&iocb->timer); |
118 | done: | 126 | done: |
127 | if (!sp) | ||
128 | QLA_VHA_MARK_NOT_BUSY(vha); | ||
119 | return sp; | 129 | return sp; |
120 | } | 130 | } |
121 | 131 | ||
@@ -1777,11 +1787,15 @@ qla2x00_init_rings(scsi_qla_host_t *vha) | |||
1777 | qla2x00_init_response_q_entries(rsp); | 1787 | qla2x00_init_response_q_entries(rsp); |
1778 | } | 1788 | } |
1779 | 1789 | ||
1790 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
1780 | /* Clear RSCN queue. */ | 1791 | /* Clear RSCN queue. */ |
1781 | list_for_each_entry(vp, &ha->vp_list, list) { | 1792 | list_for_each_entry(vp, &ha->vp_list, list) { |
1782 | vp->rscn_in_ptr = 0; | 1793 | vp->rscn_in_ptr = 0; |
1783 | vp->rscn_out_ptr = 0; | 1794 | vp->rscn_out_ptr = 0; |
1784 | } | 1795 | } |
1796 | |||
1797 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
1798 | |||
1785 | ha->isp_ops->config_rings(vha); | 1799 | ha->isp_ops->config_rings(vha); |
1786 | 1800 | ||
1787 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 1801 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
@@ -3218,12 +3232,17 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, | |||
3218 | /* Bypass virtual ports of the same host. */ | 3232 | /* Bypass virtual ports of the same host. */ |
3219 | found = 0; | 3233 | found = 0; |
3220 | if (ha->num_vhosts) { | 3234 | if (ha->num_vhosts) { |
3235 | unsigned long flags; | ||
3236 | |||
3237 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
3221 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { | 3238 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { |
3222 | if (new_fcport->d_id.b24 == vp->d_id.b24) { | 3239 | if (new_fcport->d_id.b24 == vp->d_id.b24) { |
3223 | found = 1; | 3240 | found = 1; |
3224 | break; | 3241 | break; |
3225 | } | 3242 | } |
3226 | } | 3243 | } |
3244 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
3245 | |||
3227 | if (found) | 3246 | if (found) |
3228 | continue; | 3247 | continue; |
3229 | } | 3248 | } |
@@ -3343,6 +3362,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) | |||
3343 | struct qla_hw_data *ha = vha->hw; | 3362 | struct qla_hw_data *ha = vha->hw; |
3344 | struct scsi_qla_host *vp; | 3363 | struct scsi_qla_host *vp; |
3345 | struct scsi_qla_host *tvp; | 3364 | struct scsi_qla_host *tvp; |
3365 | unsigned long flags = 0; | ||
3346 | 3366 | ||
3347 | rval = QLA_SUCCESS; | 3367 | rval = QLA_SUCCESS; |
3348 | 3368 | ||
@@ -3367,6 +3387,8 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) | |||
3367 | /* Check for loop ID being already in use. */ | 3387 | /* Check for loop ID being already in use. */ |
3368 | found = 0; | 3388 | found = 0; |
3369 | fcport = NULL; | 3389 | fcport = NULL; |
3390 | |||
3391 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
3370 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { | 3392 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { |
3371 | list_for_each_entry(fcport, &vp->vp_fcports, list) { | 3393 | list_for_each_entry(fcport, &vp->vp_fcports, list) { |
3372 | if (fcport->loop_id == dev->loop_id && | 3394 | if (fcport->loop_id == dev->loop_id && |
@@ -3379,6 +3401,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) | |||
3379 | if (found) | 3401 | if (found) |
3380 | break; | 3402 | break; |
3381 | } | 3403 | } |
3404 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
3382 | 3405 | ||
3383 | /* If not in use then it is free to use. */ | 3406 | /* If not in use then it is free to use. */ |
3384 | if (!found) { | 3407 | if (!found) { |
@@ -3791,14 +3814,27 @@ void | |||
3791 | qla2x00_update_fcports(scsi_qla_host_t *base_vha) | 3814 | qla2x00_update_fcports(scsi_qla_host_t *base_vha) |
3792 | { | 3815 | { |
3793 | fc_port_t *fcport; | 3816 | fc_port_t *fcport; |
3794 | struct scsi_qla_host *tvp, *vha; | 3817 | struct scsi_qla_host *vha; |
3818 | struct qla_hw_data *ha = base_vha->hw; | ||
3819 | unsigned long flags; | ||
3795 | 3820 | ||
3821 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
3796 | /* Go with deferred removal of rport references. */ | 3822 | /* Go with deferred removal of rport references. */ |
3797 | list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) | 3823 | list_for_each_entry(vha, &base_vha->hw->vp_list, list) { |
3798 | list_for_each_entry(fcport, &vha->vp_fcports, list) | 3824 | atomic_inc(&vha->vref_count); |
3825 | list_for_each_entry(fcport, &vha->vp_fcports, list) { | ||
3799 | if (fcport && fcport->drport && | 3826 | if (fcport && fcport->drport && |
3800 | atomic_read(&fcport->state) != FCS_UNCONFIGURED) | 3827 | atomic_read(&fcport->state) != FCS_UNCONFIGURED) { |
3828 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
3829 | |||
3801 | qla2x00_rport_del(fcport); | 3830 | qla2x00_rport_del(fcport); |
3831 | |||
3832 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
3833 | } | ||
3834 | } | ||
3835 | atomic_dec(&vha->vref_count); | ||
3836 | } | ||
3837 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
3802 | } | 3838 | } |
3803 | 3839 | ||
3804 | void | 3840 | void |
@@ -3806,7 +3842,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) | |||
3806 | { | 3842 | { |
3807 | struct qla_hw_data *ha = vha->hw; | 3843 | struct qla_hw_data *ha = vha->hw; |
3808 | struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); | 3844 | struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); |
3809 | struct scsi_qla_host *tvp; | 3845 | unsigned long flags; |
3810 | 3846 | ||
3811 | vha->flags.online = 0; | 3847 | vha->flags.online = 0; |
3812 | ha->flags.chip_reset_done = 0; | 3848 | ha->flags.chip_reset_done = 0; |
@@ -3824,8 +3860,18 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) | |||
3824 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { | 3860 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { |
3825 | atomic_set(&vha->loop_state, LOOP_DOWN); | 3861 | atomic_set(&vha->loop_state, LOOP_DOWN); |
3826 | qla2x00_mark_all_devices_lost(vha, 0); | 3862 | qla2x00_mark_all_devices_lost(vha, 0); |
3827 | list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list) | 3863 | |
3864 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
3865 | list_for_each_entry(vp, &base_vha->hw->vp_list, list) { | ||
3866 | atomic_inc(&vp->vref_count); | ||
3867 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
3868 | |||
3828 | qla2x00_mark_all_devices_lost(vp, 0); | 3869 | qla2x00_mark_all_devices_lost(vp, 0); |
3870 | |||
3871 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
3872 | atomic_dec(&vp->vref_count); | ||
3873 | } | ||
3874 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
3829 | } else { | 3875 | } else { |
3830 | if (!atomic_read(&vha->loop_down_timer)) | 3876 | if (!atomic_read(&vha->loop_down_timer)) |
3831 | atomic_set(&vha->loop_down_timer, | 3877 | atomic_set(&vha->loop_down_timer, |
@@ -3862,8 +3908,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) | |||
3862 | uint8_t status = 0; | 3908 | uint8_t status = 0; |
3863 | struct qla_hw_data *ha = vha->hw; | 3909 | struct qla_hw_data *ha = vha->hw; |
3864 | struct scsi_qla_host *vp; | 3910 | struct scsi_qla_host *vp; |
3865 | struct scsi_qla_host *tvp; | ||
3866 | struct req_que *req = ha->req_q_map[0]; | 3911 | struct req_que *req = ha->req_q_map[0]; |
3912 | unsigned long flags; | ||
3867 | 3913 | ||
3868 | if (vha->flags.online) { | 3914 | if (vha->flags.online) { |
3869 | qla2x00_abort_isp_cleanup(vha); | 3915 | qla2x00_abort_isp_cleanup(vha); |
@@ -3970,10 +4016,21 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) | |||
3970 | DEBUG(printk(KERN_INFO | 4016 | DEBUG(printk(KERN_INFO |
3971 | "qla2x00_abort_isp(%ld): succeeded.\n", | 4017 | "qla2x00_abort_isp(%ld): succeeded.\n", |
3972 | vha->host_no)); | 4018 | vha->host_no)); |
3973 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { | 4019 | |
3974 | if (vp->vp_idx) | 4020 | spin_lock_irqsave(&ha->vport_slock, flags); |
4021 | list_for_each_entry(vp, &ha->vp_list, list) { | ||
4022 | if (vp->vp_idx) { | ||
4023 | atomic_inc(&vp->vref_count); | ||
4024 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
4025 | |||
3975 | qla2x00_vp_abort_isp(vp); | 4026 | qla2x00_vp_abort_isp(vp); |
4027 | |||
4028 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
4029 | atomic_dec(&vp->vref_count); | ||
4030 | } | ||
3976 | } | 4031 | } |
4032 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
4033 | |||
3977 | } else { | 4034 | } else { |
3978 | qla_printk(KERN_INFO, ha, | 4035 | qla_printk(KERN_INFO, ha, |
3979 | "qla2x00_abort_isp: **** FAILED ****\n"); | 4036 | "qla2x00_abort_isp: **** FAILED ****\n"); |
@@ -5185,7 +5242,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) | |||
5185 | struct req_que *req = ha->req_q_map[0]; | 5242 | struct req_que *req = ha->req_q_map[0]; |
5186 | struct rsp_que *rsp = ha->rsp_q_map[0]; | 5243 | struct rsp_que *rsp = ha->rsp_q_map[0]; |
5187 | struct scsi_qla_host *vp; | 5244 | struct scsi_qla_host *vp; |
5188 | struct scsi_qla_host *tvp; | 5245 | unsigned long flags; |
5189 | 5246 | ||
5190 | status = qla2x00_init_rings(vha); | 5247 | status = qla2x00_init_rings(vha); |
5191 | if (!status) { | 5248 | if (!status) { |
@@ -5272,10 +5329,21 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) | |||
5272 | DEBUG(printk(KERN_INFO | 5329 | DEBUG(printk(KERN_INFO |
5273 | "qla82xx_restart_isp(%ld): succeeded.\n", | 5330 | "qla82xx_restart_isp(%ld): succeeded.\n", |
5274 | vha->host_no)); | 5331 | vha->host_no)); |
5275 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { | 5332 | |
5276 | if (vp->vp_idx) | 5333 | spin_lock_irqsave(&ha->vport_slock, flags); |
5334 | list_for_each_entry(vp, &ha->vp_list, list) { | ||
5335 | if (vp->vp_idx) { | ||
5336 | atomic_inc(&vp->vref_count); | ||
5337 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
5338 | |||
5277 | qla2x00_vp_abort_isp(vp); | 5339 | qla2x00_vp_abort_isp(vp); |
5340 | |||
5341 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
5342 | atomic_dec(&vp->vref_count); | ||
5343 | } | ||
5278 | } | 5344 | } |
5345 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
5346 | |||
5279 | } else { | 5347 | } else { |
5280 | qla_printk(KERN_INFO, ha, | 5348 | qla_printk(KERN_INFO, ha, |
5281 | "qla82xx_restart_isp: **** FAILED ****\n"); | 5349 | "qla82xx_restart_isp: **** FAILED ****\n"); |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 6982ba70e12a..28f65be19dad 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -1706,19 +1706,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
1706 | cp->result = DID_ERROR << 16; | 1706 | cp->result = DID_ERROR << 16; |
1707 | break; | 1707 | break; |
1708 | } | 1708 | } |
1709 | } else if (!lscsi_status) { | 1709 | } else { |
1710 | DEBUG2(qla_printk(KERN_INFO, ha, | 1710 | DEBUG2(qla_printk(KERN_INFO, ha, |
1711 | "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x " | 1711 | "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x " |
1712 | "of 0x%x bytes).\n", vha->host_no, cp->device->id, | 1712 | "of 0x%x bytes).\n", vha->host_no, cp->device->id, |
1713 | cp->device->lun, resid, scsi_bufflen(cp))); | 1713 | cp->device->lun, resid, scsi_bufflen(cp))); |
1714 | 1714 | ||
1715 | cp->result = DID_ERROR << 16; | 1715 | cp->result = DID_ERROR << 16 | lscsi_status; |
1716 | break; | 1716 | goto check_scsi_status; |
1717 | } | 1717 | } |
1718 | 1718 | ||
1719 | cp->result = DID_OK << 16 | lscsi_status; | 1719 | cp->result = DID_OK << 16 | lscsi_status; |
1720 | logit = 0; | 1720 | logit = 0; |
1721 | 1721 | ||
1722 | check_scsi_status: | ||
1722 | /* | 1723 | /* |
1723 | * Check to see if SCSI Status is non zero. If so report SCSI | 1724 | * Check to see if SCSI Status is non zero. If so report SCSI |
1724 | * Status. | 1725 | * Status. |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 6009b0c69488..a595ec8264f8 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -2913,7 +2913,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, | |||
2913 | uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); | 2913 | uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); |
2914 | struct qla_hw_data *ha = vha->hw; | 2914 | struct qla_hw_data *ha = vha->hw; |
2915 | scsi_qla_host_t *vp; | 2915 | scsi_qla_host_t *vp; |
2916 | scsi_qla_host_t *tvp; | 2916 | unsigned long flags; |
2917 | 2917 | ||
2918 | if (rptid_entry->entry_status != 0) | 2918 | if (rptid_entry->entry_status != 0) |
2919 | return; | 2919 | return; |
@@ -2945,9 +2945,12 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, | |||
2945 | return; | 2945 | return; |
2946 | } | 2946 | } |
2947 | 2947 | ||
2948 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) | 2948 | spin_lock_irqsave(&ha->vport_slock, flags); |
2949 | list_for_each_entry(vp, &ha->vp_list, list) | ||
2949 | if (vp_idx == vp->vp_idx) | 2950 | if (vp_idx == vp->vp_idx) |
2950 | break; | 2951 | break; |
2952 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
2953 | |||
2951 | if (!vp) | 2954 | if (!vp) |
2952 | return; | 2955 | return; |
2953 | 2956 | ||
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 987c5b0ca78e..2b69392a71a1 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c | |||
@@ -30,6 +30,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha) | |||
30 | { | 30 | { |
31 | uint32_t vp_id; | 31 | uint32_t vp_id; |
32 | struct qla_hw_data *ha = vha->hw; | 32 | struct qla_hw_data *ha = vha->hw; |
33 | unsigned long flags; | ||
33 | 34 | ||
34 | /* Find an empty slot and assign an vp_id */ | 35 | /* Find an empty slot and assign an vp_id */ |
35 | mutex_lock(&ha->vport_lock); | 36 | mutex_lock(&ha->vport_lock); |
@@ -44,7 +45,11 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha) | |||
44 | set_bit(vp_id, ha->vp_idx_map); | 45 | set_bit(vp_id, ha->vp_idx_map); |
45 | ha->num_vhosts++; | 46 | ha->num_vhosts++; |
46 | vha->vp_idx = vp_id; | 47 | vha->vp_idx = vp_id; |
48 | |||
49 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
47 | list_add_tail(&vha->list, &ha->vp_list); | 50 | list_add_tail(&vha->list, &ha->vp_list); |
51 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
52 | |||
48 | mutex_unlock(&ha->vport_lock); | 53 | mutex_unlock(&ha->vport_lock); |
49 | return vp_id; | 54 | return vp_id; |
50 | } | 55 | } |
@@ -54,12 +59,31 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) | |||
54 | { | 59 | { |
55 | uint16_t vp_id; | 60 | uint16_t vp_id; |
56 | struct qla_hw_data *ha = vha->hw; | 61 | struct qla_hw_data *ha = vha->hw; |
62 | unsigned long flags = 0; | ||
57 | 63 | ||
58 | mutex_lock(&ha->vport_lock); | 64 | mutex_lock(&ha->vport_lock); |
65 | /* | ||
66 | * Wait for all pending activities to finish before removing vport from | ||
67 | * the list. | ||
68 | * Lock needs to be held for safe removal from the list (it | ||
69 | * ensures no active vp_list traversal while the vport is removed | ||
70 | * from the queue) | ||
71 | */ | ||
72 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
73 | while (atomic_read(&vha->vref_count)) { | ||
74 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
75 | |||
76 | msleep(500); | ||
77 | |||
78 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
79 | } | ||
80 | list_del(&vha->list); | ||
81 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
82 | |||
59 | vp_id = vha->vp_idx; | 83 | vp_id = vha->vp_idx; |
60 | ha->num_vhosts--; | 84 | ha->num_vhosts--; |
61 | clear_bit(vp_id, ha->vp_idx_map); | 85 | clear_bit(vp_id, ha->vp_idx_map); |
62 | list_del(&vha->list); | 86 | |
63 | mutex_unlock(&ha->vport_lock); | 87 | mutex_unlock(&ha->vport_lock); |
64 | } | 88 | } |
65 | 89 | ||
@@ -68,12 +92,17 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name) | |||
68 | { | 92 | { |
69 | scsi_qla_host_t *vha; | 93 | scsi_qla_host_t *vha; |
70 | struct scsi_qla_host *tvha; | 94 | struct scsi_qla_host *tvha; |
95 | unsigned long flags; | ||
71 | 96 | ||
97 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
72 | /* Locate matching device in database. */ | 98 | /* Locate matching device in database. */ |
73 | list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { | 99 | list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { |
74 | if (!memcmp(port_name, vha->port_name, WWN_SIZE)) | 100 | if (!memcmp(port_name, vha->port_name, WWN_SIZE)) { |
101 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
75 | return vha; | 102 | return vha; |
103 | } | ||
76 | } | 104 | } |
105 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
77 | return NULL; | 106 | return NULL; |
78 | } | 107 | } |
79 | 108 | ||
@@ -93,6 +122,12 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name) | |||
93 | static void | 122 | static void |
94 | qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) | 123 | qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) |
95 | { | 124 | { |
125 | /* | ||
126 | * !!! NOTE !!! | ||
127 | * This function, if called in contexts other than vp create, disable | ||
128 | * or delete, please make sure this is synchronized with the | ||
129 | * delete thread. | ||
130 | */ | ||
96 | fc_port_t *fcport; | 131 | fc_port_t *fcport; |
97 | 132 | ||
98 | list_for_each_entry(fcport, &vha->vp_fcports, list) { | 133 | list_for_each_entry(fcport, &vha->vp_fcports, list) { |
@@ -100,7 +135,6 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) | |||
100 | "loop_id=0x%04x :%x\n", | 135 | "loop_id=0x%04x :%x\n", |
101 | vha->host_no, fcport->loop_id, fcport->vp_idx)); | 136 | vha->host_no, fcport->loop_id, fcport->vp_idx)); |
102 | 137 | ||
103 | atomic_set(&fcport->state, FCS_DEVICE_DEAD); | ||
104 | qla2x00_mark_device_lost(vha, fcport, 0, 0); | 138 | qla2x00_mark_device_lost(vha, fcport, 0, 0); |
105 | atomic_set(&fcport->state, FCS_UNCONFIGURED); | 139 | atomic_set(&fcport->state, FCS_UNCONFIGURED); |
106 | } | 140 | } |
@@ -194,12 +228,17 @@ qla24xx_configure_vp(scsi_qla_host_t *vha) | |||
194 | void | 228 | void |
195 | qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) | 229 | qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) |
196 | { | 230 | { |
197 | scsi_qla_host_t *vha, *tvha; | 231 | scsi_qla_host_t *vha; |
198 | struct qla_hw_data *ha = rsp->hw; | 232 | struct qla_hw_data *ha = rsp->hw; |
199 | int i = 0; | 233 | int i = 0; |
234 | unsigned long flags; | ||
200 | 235 | ||
201 | list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { | 236 | spin_lock_irqsave(&ha->vport_slock, flags); |
237 | list_for_each_entry(vha, &ha->vp_list, list) { | ||
202 | if (vha->vp_idx) { | 238 | if (vha->vp_idx) { |
239 | atomic_inc(&vha->vref_count); | ||
240 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
241 | |||
203 | switch (mb[0]) { | 242 | switch (mb[0]) { |
204 | case MBA_LIP_OCCURRED: | 243 | case MBA_LIP_OCCURRED: |
205 | case MBA_LOOP_UP: | 244 | case MBA_LOOP_UP: |
@@ -215,9 +254,13 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) | |||
215 | qla2x00_async_event(vha, rsp, mb); | 254 | qla2x00_async_event(vha, rsp, mb); |
216 | break; | 255 | break; |
217 | } | 256 | } |
257 | |||
258 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
259 | atomic_dec(&vha->vref_count); | ||
218 | } | 260 | } |
219 | i++; | 261 | i++; |
220 | } | 262 | } |
263 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
221 | } | 264 | } |
222 | 265 | ||
223 | int | 266 | int |
@@ -297,7 +340,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) | |||
297 | int ret; | 340 | int ret; |
298 | struct qla_hw_data *ha = vha->hw; | 341 | struct qla_hw_data *ha = vha->hw; |
299 | scsi_qla_host_t *vp; | 342 | scsi_qla_host_t *vp; |
300 | struct scsi_qla_host *tvp; | 343 | unsigned long flags = 0; |
301 | 344 | ||
302 | if (vha->vp_idx) | 345 | if (vha->vp_idx) |
303 | return; | 346 | return; |
@@ -309,10 +352,19 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) | |||
309 | if (!(ha->current_topology & ISP_CFG_F)) | 352 | if (!(ha->current_topology & ISP_CFG_F)) |
310 | return; | 353 | return; |
311 | 354 | ||
312 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { | 355 | spin_lock_irqsave(&ha->vport_slock, flags); |
313 | if (vp->vp_idx) | 356 | list_for_each_entry(vp, &ha->vp_list, list) { |
357 | if (vp->vp_idx) { | ||
358 | atomic_inc(&vp->vref_count); | ||
359 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
360 | |||
314 | ret = qla2x00_do_dpc_vp(vp); | 361 | ret = qla2x00_do_dpc_vp(vp); |
362 | |||
363 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
364 | atomic_dec(&vp->vref_count); | ||
365 | } | ||
315 | } | 366 | } |
367 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
316 | } | 368 | } |
317 | 369 | ||
318 | int | 370 | int |
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 915b77a6e193..0a71cc71eab2 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c | |||
@@ -2672,6 +2672,19 @@ qla82xx_start_scsi(srb_t *sp) | |||
2672 | sufficient_dsds: | 2672 | sufficient_dsds: |
2673 | req_cnt = 1; | 2673 | req_cnt = 1; |
2674 | 2674 | ||
2675 | if (req->cnt < (req_cnt + 2)) { | ||
2676 | cnt = (uint16_t)RD_REG_DWORD_RELAXED( | ||
2677 | ®->req_q_out[0]); | ||
2678 | if (req->ring_index < cnt) | ||
2679 | req->cnt = cnt - req->ring_index; | ||
2680 | else | ||
2681 | req->cnt = req->length - | ||
2682 | (req->ring_index - cnt); | ||
2683 | } | ||
2684 | |||
2685 | if (req->cnt < (req_cnt + 2)) | ||
2686 | goto queuing_error; | ||
2687 | |||
2675 | ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); | 2688 | ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); |
2676 | if (!sp->ctx) { | 2689 | if (!sp->ctx) { |
2677 | DEBUG(printk(KERN_INFO | 2690 | DEBUG(printk(KERN_INFO |
@@ -3307,16 +3320,19 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha) | |||
3307 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | 3320 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
3308 | } | 3321 | } |
3309 | qla2xxx_wake_dpc(vha); | 3322 | qla2xxx_wake_dpc(vha); |
3323 | ha->flags.fw_hung = 1; | ||
3310 | if (ha->flags.mbox_busy) { | 3324 | if (ha->flags.mbox_busy) { |
3311 | ha->flags.fw_hung = 1; | ||
3312 | ha->flags.mbox_int = 1; | 3325 | ha->flags.mbox_int = 1; |
3313 | DEBUG2(qla_printk(KERN_ERR, ha, | 3326 | DEBUG2(qla_printk(KERN_ERR, ha, |
3314 | "Due to fw hung, doing premature " | 3327 | "Due to fw hung, doing premature " |
3315 | "completion of mbx command\n")); | 3328 | "completion of mbx command\n")); |
3316 | complete(&ha->mbx_intr_comp); | 3329 | if (test_bit(MBX_INTR_WAIT, |
3330 | &ha->mbx_cmd_flags)) | ||
3331 | complete(&ha->mbx_intr_comp); | ||
3317 | } | 3332 | } |
3318 | } | 3333 | } |
3319 | } | 3334 | } else |
3335 | vha->seconds_since_last_heartbeat = 0; | ||
3320 | vha->fw_heartbeat_counter = fw_heartbeat_counter; | 3336 | vha->fw_heartbeat_counter = fw_heartbeat_counter; |
3321 | } | 3337 | } |
3322 | 3338 | ||
@@ -3418,13 +3434,15 @@ void qla82xx_watchdog(scsi_qla_host_t *vha) | |||
3418 | "%s(): Adapter reset needed!\n", __func__); | 3434 | "%s(): Adapter reset needed!\n", __func__); |
3419 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | 3435 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
3420 | qla2xxx_wake_dpc(vha); | 3436 | qla2xxx_wake_dpc(vha); |
3437 | ha->flags.fw_hung = 1; | ||
3421 | if (ha->flags.mbox_busy) { | 3438 | if (ha->flags.mbox_busy) { |
3422 | ha->flags.fw_hung = 1; | ||
3423 | ha->flags.mbox_int = 1; | 3439 | ha->flags.mbox_int = 1; |
3424 | DEBUG2(qla_printk(KERN_ERR, ha, | 3440 | DEBUG2(qla_printk(KERN_ERR, ha, |
3425 | "Need reset, doing premature " | 3441 | "Need reset, doing premature " |
3426 | "completion of mbx command\n")); | 3442 | "completion of mbx command\n")); |
3427 | complete(&ha->mbx_intr_comp); | 3443 | if (test_bit(MBX_INTR_WAIT, |
3444 | &ha->mbx_cmd_flags)) | ||
3445 | complete(&ha->mbx_intr_comp); | ||
3428 | } | 3446 | } |
3429 | } else { | 3447 | } else { |
3430 | qla82xx_check_fw_alive(vha); | 3448 | qla82xx_check_fw_alive(vha); |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 8c80b49ac1c4..1e4bff695254 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -2341,16 +2341,28 @@ probe_out: | |||
2341 | static void | 2341 | static void |
2342 | qla2x00_remove_one(struct pci_dev *pdev) | 2342 | qla2x00_remove_one(struct pci_dev *pdev) |
2343 | { | 2343 | { |
2344 | scsi_qla_host_t *base_vha, *vha, *temp; | 2344 | scsi_qla_host_t *base_vha, *vha; |
2345 | struct qla_hw_data *ha; | 2345 | struct qla_hw_data *ha; |
2346 | unsigned long flags; | ||
2346 | 2347 | ||
2347 | base_vha = pci_get_drvdata(pdev); | 2348 | base_vha = pci_get_drvdata(pdev); |
2348 | ha = base_vha->hw; | 2349 | ha = base_vha->hw; |
2349 | 2350 | ||
2350 | list_for_each_entry_safe(vha, temp, &ha->vp_list, list) { | 2351 | spin_lock_irqsave(&ha->vport_slock, flags); |
2351 | if (vha && vha->fc_vport) | 2352 | list_for_each_entry(vha, &ha->vp_list, list) { |
2353 | atomic_inc(&vha->vref_count); | ||
2354 | |||
2355 | if (vha && vha->fc_vport) { | ||
2356 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
2357 | |||
2352 | fc_vport_terminate(vha->fc_vport); | 2358 | fc_vport_terminate(vha->fc_vport); |
2359 | |||
2360 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
2361 | } | ||
2362 | |||
2363 | atomic_dec(&vha->vref_count); | ||
2353 | } | 2364 | } |
2365 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
2354 | 2366 | ||
2355 | set_bit(UNLOADING, &base_vha->dpc_flags); | 2367 | set_bit(UNLOADING, &base_vha->dpc_flags); |
2356 | 2368 | ||
@@ -2975,10 +2987,17 @@ static struct qla_work_evt * | |||
2975 | qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) | 2987 | qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) |
2976 | { | 2988 | { |
2977 | struct qla_work_evt *e; | 2989 | struct qla_work_evt *e; |
2990 | uint8_t bail; | ||
2991 | |||
2992 | QLA_VHA_MARK_BUSY(vha, bail); | ||
2993 | if (bail) | ||
2994 | return NULL; | ||
2978 | 2995 | ||
2979 | e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); | 2996 | e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); |
2980 | if (!e) | 2997 | if (!e) { |
2998 | QLA_VHA_MARK_NOT_BUSY(vha); | ||
2981 | return NULL; | 2999 | return NULL; |
3000 | } | ||
2982 | 3001 | ||
2983 | INIT_LIST_HEAD(&e->list); | 3002 | INIT_LIST_HEAD(&e->list); |
2984 | e->type = type; | 3003 | e->type = type; |
@@ -3135,6 +3154,9 @@ qla2x00_do_work(struct scsi_qla_host *vha) | |||
3135 | } | 3154 | } |
3136 | if (e->flags & QLA_EVT_FLAG_FREE) | 3155 | if (e->flags & QLA_EVT_FLAG_FREE) |
3137 | kfree(e); | 3156 | kfree(e); |
3157 | |||
3158 | /* For each work completed decrement vha ref count */ | ||
3159 | QLA_VHA_MARK_NOT_BUSY(vha); | ||
3138 | } | 3160 | } |
3139 | } | 3161 | } |
3140 | 3162 | ||
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index e75ccb91317d..8edbccb3232d 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,9 +7,9 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.03.03-k0" | 10 | #define QLA2XXX_VERSION "8.03.04-k0" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 3 | 13 | #define QLA_DRIVER_MINOR_VER 3 |
14 | #define QLA_DRIVER_PATCH_VER 3 | 14 | #define QLA_DRIVER_PATCH_VER 4 |
15 | #define QLA_DRIVER_BETA_VER 0 | 15 | #define QLA_DRIVER_BETA_VER 0 |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9ade720422c6..ee02d3838a0a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1011,8 +1011,8 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) | |||
1011 | 1011 | ||
1012 | err_exit: | 1012 | err_exit: |
1013 | scsi_release_buffers(cmd); | 1013 | scsi_release_buffers(cmd); |
1014 | scsi_put_command(cmd); | ||
1015 | cmd->request->special = NULL; | 1014 | cmd->request->special = NULL; |
1015 | scsi_put_command(cmd); | ||
1016 | return error; | 1016 | return error; |
1017 | } | 1017 | } |
1018 | EXPORT_SYMBOL(scsi_init_io); | 1018 | EXPORT_SYMBOL(scsi_init_io); |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 2714becc2eaf..ffa0689ee840 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -870,7 +870,7 @@ static int sd_release(struct gendisk *disk, fmode_t mode) | |||
870 | 870 | ||
871 | SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); | 871 | SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); |
872 | 872 | ||
873 | if (atomic_dec_return(&sdkp->openers) && sdev->removable) { | 873 | if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) { |
874 | if (scsi_block_when_processing_errors(sdev)) | 874 | if (scsi_block_when_processing_errors(sdev)) |
875 | scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); | 875 | scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); |
876 | } | 876 | } |
@@ -2625,15 +2625,15 @@ module_exit(exit_sd); | |||
2625 | static void sd_print_sense_hdr(struct scsi_disk *sdkp, | 2625 | static void sd_print_sense_hdr(struct scsi_disk *sdkp, |
2626 | struct scsi_sense_hdr *sshdr) | 2626 | struct scsi_sense_hdr *sshdr) |
2627 | { | 2627 | { |
2628 | sd_printk(KERN_INFO, sdkp, ""); | 2628 | sd_printk(KERN_INFO, sdkp, " "); |
2629 | scsi_show_sense_hdr(sshdr); | 2629 | scsi_show_sense_hdr(sshdr); |
2630 | sd_printk(KERN_INFO, sdkp, ""); | 2630 | sd_printk(KERN_INFO, sdkp, " "); |
2631 | scsi_show_extd_sense(sshdr->asc, sshdr->ascq); | 2631 | scsi_show_extd_sense(sshdr->asc, sshdr->ascq); |
2632 | } | 2632 | } |
2633 | 2633 | ||
2634 | static void sd_print_result(struct scsi_disk *sdkp, int result) | 2634 | static void sd_print_result(struct scsi_disk *sdkp, int result) |
2635 | { | 2635 | { |
2636 | sd_printk(KERN_INFO, sdkp, ""); | 2636 | sd_printk(KERN_INFO, sdkp, " "); |
2637 | scsi_show_result(result); | 2637 | scsi_show_result(result); |
2638 | } | 2638 | } |
2639 | 2639 | ||
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index a7bc8b7b09ac..2c3e89ddf069 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c | |||
@@ -72,10 +72,7 @@ static void sym_printl_hex(u_char *p, int n) | |||
72 | 72 | ||
73 | static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg) | 73 | static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg) |
74 | { | 74 | { |
75 | if (label) | 75 | sym_print_addr(cp->cmd, "%s: ", label); |
76 | sym_print_addr(cp->cmd, "%s: ", label); | ||
77 | else | ||
78 | sym_print_addr(cp->cmd, ""); | ||
79 | 76 | ||
80 | spi_print_msg(msg); | 77 | spi_print_msg(msg); |
81 | printf("\n"); | 78 | printf("\n"); |
@@ -4558,7 +4555,8 @@ static void sym_int_sir(struct sym_hcb *np) | |||
4558 | switch (np->msgin [2]) { | 4555 | switch (np->msgin [2]) { |
4559 | case M_X_MODIFY_DP: | 4556 | case M_X_MODIFY_DP: |
4560 | if (DEBUG_FLAGS & DEBUG_POINTER) | 4557 | if (DEBUG_FLAGS & DEBUG_POINTER) |
4561 | sym_print_msg(cp, NULL, np->msgin); | 4558 | sym_print_msg(cp, "extended msg ", |
4559 | np->msgin); | ||
4562 | tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + | 4560 | tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + |
4563 | (np->msgin[5]<<8) + (np->msgin[6]); | 4561 | (np->msgin[5]<<8) + (np->msgin[6]); |
4564 | sym_modify_dp(np, tp, cp, tmp); | 4562 | sym_modify_dp(np, tp, cp, tmp); |
@@ -4585,7 +4583,7 @@ static void sym_int_sir(struct sym_hcb *np) | |||
4585 | */ | 4583 | */ |
4586 | case M_IGN_RESIDUE: | 4584 | case M_IGN_RESIDUE: |
4587 | if (DEBUG_FLAGS & DEBUG_POINTER) | 4585 | if (DEBUG_FLAGS & DEBUG_POINTER) |
4588 | sym_print_msg(cp, NULL, np->msgin); | 4586 | sym_print_msg(cp, "1 or 2 byte ", np->msgin); |
4589 | if (cp->host_flags & HF_SENSE) | 4587 | if (cp->host_flags & HF_SENSE) |
4590 | OUTL_DSP(np, SCRIPTA_BA(np, clrack)); | 4588 | OUTL_DSP(np, SCRIPTA_BA(np, clrack)); |
4591 | else | 4589 | else |
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c index 50441ffe8e38..2904aa044126 100644 --- a/drivers/serial/amba-pl010.c +++ b/drivers/serial/amba-pl010.c | |||
@@ -472,14 +472,9 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios, | |||
472 | spin_unlock_irqrestore(&uap->port.lock, flags); | 472 | spin_unlock_irqrestore(&uap->port.lock, flags); |
473 | } | 473 | } |
474 | 474 | ||
475 | static void pl010_set_ldisc(struct uart_port *port) | 475 | static void pl010_set_ldisc(struct uart_port *port, int new) |
476 | { | 476 | { |
477 | int line = port->line; | 477 | if (new == N_PPS) { |
478 | |||
479 | if (line >= port->state->port.tty->driver->num) | ||
480 | return; | ||
481 | |||
482 | if (port->state->port.tty->ldisc->ops->num == N_PPS) { | ||
483 | port->flags |= UPF_HARDPPS_CD; | 478 | port->flags |= UPF_HARDPPS_CD; |
484 | pl010_enable_ms(port); | 479 | pl010_enable_ms(port); |
485 | } else | 480 | } else |
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c index e57fb3d228e2..5318dd3774ae 100644 --- a/drivers/serial/bfin_sport_uart.c +++ b/drivers/serial/bfin_sport_uart.c | |||
@@ -121,7 +121,7 @@ static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate) | |||
121 | unsigned int sclk = get_sclk(); | 121 | unsigned int sclk = get_sclk(); |
122 | 122 | ||
123 | /* Set TCR1 and TCR2, TFSR is not enabled for uart */ | 123 | /* Set TCR1 and TCR2, TFSR is not enabled for uart */ |
124 | SPORT_PUT_TCR1(up, (ITFS | TLSBIT | ITCLK)); | 124 | SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK)); |
125 | SPORT_PUT_TCR2(up, size + 1); | 125 | SPORT_PUT_TCR2(up, size + 1); |
126 | pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up)); | 126 | pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up)); |
127 | 127 | ||
diff --git a/drivers/serial/mfd.c b/drivers/serial/mfd.c index bc9af503907f..5dff45c76d32 100644 --- a/drivers/serial/mfd.c +++ b/drivers/serial/mfd.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/console.h> | 28 | #include <linux/console.h> |
29 | #include <linux/sysrq.h> | 29 | #include <linux/sysrq.h> |
30 | #include <linux/slab.h> | ||
30 | #include <linux/serial_reg.h> | 31 | #include <linux/serial_reg.h> |
31 | #include <linux/circ_buf.h> | 32 | #include <linux/circ_buf.h> |
32 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
@@ -1423,7 +1424,6 @@ static void hsu_global_init(void) | |||
1423 | } | 1424 | } |
1424 | 1425 | ||
1425 | phsu = hsu; | 1426 | phsu = hsu; |
1426 | |||
1427 | hsu_debugfs_init(hsu); | 1427 | hsu_debugfs_init(hsu); |
1428 | return; | 1428 | return; |
1429 | 1429 | ||
@@ -1435,18 +1435,20 @@ err_free_region: | |||
1435 | 1435 | ||
1436 | static void serial_hsu_remove(struct pci_dev *pdev) | 1436 | static void serial_hsu_remove(struct pci_dev *pdev) |
1437 | { | 1437 | { |
1438 | struct hsu_port *hsu; | 1438 | void *priv = pci_get_drvdata(pdev); |
1439 | int i; | 1439 | struct uart_hsu_port *up; |
1440 | 1440 | ||
1441 | hsu = pci_get_drvdata(pdev); | 1441 | if (!priv) |
1442 | if (!hsu) | ||
1443 | return; | 1442 | return; |
1444 | 1443 | ||
1445 | for (i = 0; i < 3; i++) | 1444 | /* For port 0/1/2, priv is the address of uart_hsu_port */ |
1446 | uart_remove_one_port(&serial_hsu_reg, &hsu->port[i].port); | 1445 | if (pdev->device != 0x081E) { |
1446 | up = priv; | ||
1447 | uart_remove_one_port(&serial_hsu_reg, &up->port); | ||
1448 | } | ||
1447 | 1449 | ||
1448 | pci_set_drvdata(pdev, NULL); | 1450 | pci_set_drvdata(pdev, NULL); |
1449 | free_irq(hsu->irq, hsu); | 1451 | free_irq(pdev->irq, priv); |
1450 | pci_disable_device(pdev); | 1452 | pci_disable_device(pdev); |
1451 | } | 1453 | } |
1452 | 1454 | ||
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c index 8dedb266f143..c4399e23565a 100644 --- a/drivers/serial/mpc52xx_uart.c +++ b/drivers/serial/mpc52xx_uart.c | |||
@@ -500,6 +500,7 @@ static int __init mpc512x_psc_fifoc_init(void) | |||
500 | psc_fifoc = of_iomap(np, 0); | 500 | psc_fifoc = of_iomap(np, 0); |
501 | if (!psc_fifoc) { | 501 | if (!psc_fifoc) { |
502 | pr_err("%s: Can't map FIFOC\n", __func__); | 502 | pr_err("%s: Can't map FIFOC\n", __func__); |
503 | of_node_put(np); | ||
503 | return -ENODEV; | 504 | return -ENODEV; |
504 | } | 505 | } |
505 | 506 | ||
diff --git a/drivers/serial/mrst_max3110.c b/drivers/serial/mrst_max3110.c index f6ad1ecbff79..51c15f58e01e 100644 --- a/drivers/serial/mrst_max3110.c +++ b/drivers/serial/mrst_max3110.c | |||
@@ -29,6 +29,7 @@ | |||
29 | 29 | ||
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/ioport.h> | 31 | #include <linux/ioport.h> |
32 | #include <linux/irq.h> | ||
32 | #include <linux/init.h> | 33 | #include <linux/init.h> |
33 | #include <linux/console.h> | 34 | #include <linux/console.h> |
34 | #include <linux/sysrq.h> | 35 | #include <linux/sysrq.h> |
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index 141c69554bd4..7d475b2a79e8 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c | |||
@@ -335,8 +335,6 @@ static int serial_probe(struct pcmcia_device *link) | |||
335 | info->p_dev = link; | 335 | info->p_dev = link; |
336 | link->priv = info; | 336 | link->priv = info; |
337 | 337 | ||
338 | link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; | ||
339 | link->resource[0]->end = 8; | ||
340 | link->conf.Attributes = CONF_ENABLE_IRQ; | 338 | link->conf.Attributes = CONF_ENABLE_IRQ; |
341 | if (do_sound) { | 339 | if (do_sound) { |
342 | link->conf.Attributes |= CONF_ENABLE_SPKR; | 340 | link->conf.Attributes |= CONF_ENABLE_SPKR; |
@@ -411,6 +409,27 @@ static int setup_serial(struct pcmcia_device *handle, struct serial_info * info, | |||
411 | 409 | ||
412 | /*====================================================================*/ | 410 | /*====================================================================*/ |
413 | 411 | ||
412 | static int pfc_config(struct pcmcia_device *p_dev) | ||
413 | { | ||
414 | unsigned int port = 0; | ||
415 | struct serial_info *info = p_dev->priv; | ||
416 | |||
417 | if ((p_dev->resource[1]->end != 0) && | ||
418 | (resource_size(p_dev->resource[1]) == 8)) { | ||
419 | port = p_dev->resource[1]->start; | ||
420 | info->slave = 1; | ||
421 | } else if ((info->manfid == MANFID_OSITECH) && | ||
422 | (resource_size(p_dev->resource[0]) == 0x40)) { | ||
423 | port = p_dev->resource[0]->start + 0x28; | ||
424 | info->slave = 1; | ||
425 | } | ||
426 | if (info->slave) | ||
427 | return setup_serial(p_dev, info, port, p_dev->irq); | ||
428 | |||
429 | dev_warn(&p_dev->dev, "no usable port range found, giving up\n"); | ||
430 | return -ENODEV; | ||
431 | } | ||
432 | |||
414 | static int simple_config_check(struct pcmcia_device *p_dev, | 433 | static int simple_config_check(struct pcmcia_device *p_dev, |
415 | cistpl_cftable_entry_t *cf, | 434 | cistpl_cftable_entry_t *cf, |
416 | cistpl_cftable_entry_t *dflt, | 435 | cistpl_cftable_entry_t *dflt, |
@@ -461,23 +480,8 @@ static int simple_config(struct pcmcia_device *link) | |||
461 | struct serial_info *info = link->priv; | 480 | struct serial_info *info = link->priv; |
462 | int i = -ENODEV, try; | 481 | int i = -ENODEV, try; |
463 | 482 | ||
464 | /* If the card is already configured, look up the port and irq */ | 483 | link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; |
465 | if (link->function_config) { | 484 | link->resource[0]->end = 8; |
466 | unsigned int port = 0; | ||
467 | if ((link->resource[1]->end != 0) && | ||
468 | (resource_size(link->resource[1]) == 8)) { | ||
469 | port = link->resource[1]->end; | ||
470 | info->slave = 1; | ||
471 | } else if ((info->manfid == MANFID_OSITECH) && | ||
472 | (resource_size(link->resource[0]) == 0x40)) { | ||
473 | port = link->resource[0]->start + 0x28; | ||
474 | info->slave = 1; | ||
475 | } | ||
476 | if (info->slave) { | ||
477 | return setup_serial(link, info, port, | ||
478 | link->irq); | ||
479 | } | ||
480 | } | ||
481 | 485 | ||
482 | /* First pass: look for a config entry that looks normal. | 486 | /* First pass: look for a config entry that looks normal. |
483 | * Two tries: without IO aliases, then with aliases */ | 487 | * Two tries: without IO aliases, then with aliases */ |
@@ -491,8 +495,7 @@ static int simple_config(struct pcmcia_device *link) | |||
491 | if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL)) | 495 | if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL)) |
492 | goto found_port; | 496 | goto found_port; |
493 | 497 | ||
494 | printk(KERN_NOTICE | 498 | dev_warn(&link->dev, "no usable port range found, giving up\n"); |
495 | "serial_cs: no usable port range found, giving up\n"); | ||
496 | return -1; | 499 | return -1; |
497 | 500 | ||
498 | found_port: | 501 | found_port: |
@@ -558,6 +561,7 @@ static int multi_config(struct pcmcia_device *link) | |||
558 | int i, base2 = 0; | 561 | int i, base2 = 0; |
559 | 562 | ||
560 | /* First, look for a generic full-sized window */ | 563 | /* First, look for a generic full-sized window */ |
564 | link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; | ||
561 | link->resource[0]->end = info->multi * 8; | 565 | link->resource[0]->end = info->multi * 8; |
562 | if (pcmcia_loop_config(link, multi_config_check, &base2)) { | 566 | if (pcmcia_loop_config(link, multi_config_check, &base2)) { |
563 | /* If that didn't work, look for two windows */ | 567 | /* If that didn't work, look for two windows */ |
@@ -565,15 +569,14 @@ static int multi_config(struct pcmcia_device *link) | |||
565 | info->multi = 2; | 569 | info->multi = 2; |
566 | if (pcmcia_loop_config(link, multi_config_check_notpicky, | 570 | if (pcmcia_loop_config(link, multi_config_check_notpicky, |
567 | &base2)) { | 571 | &base2)) { |
568 | printk(KERN_NOTICE "serial_cs: no usable port range" | 572 | dev_warn(&link->dev, "no usable port range " |
569 | "found, giving up\n"); | 573 | "found, giving up\n"); |
570 | return -ENODEV; | 574 | return -ENODEV; |
571 | } | 575 | } |
572 | } | 576 | } |
573 | 577 | ||
574 | if (!link->irq) | 578 | if (!link->irq) |
575 | dev_warn(&link->dev, | 579 | dev_warn(&link->dev, "no usable IRQ found, continuing...\n"); |
576 | "serial_cs: no usable IRQ found, continuing...\n"); | ||
577 | 580 | ||
578 | /* | 581 | /* |
579 | * Apply any configuration quirks. | 582 | * Apply any configuration quirks. |
@@ -675,6 +678,7 @@ static int serial_config(struct pcmcia_device * link) | |||
675 | multifunction cards that ask for appropriate IO port ranges */ | 678 | multifunction cards that ask for appropriate IO port ranges */ |
676 | if ((info->multi == 0) && | 679 | if ((info->multi == 0) && |
677 | (link->has_func_id) && | 680 | (link->has_func_id) && |
681 | (link->socket->pcmcia_pfc == 0) && | ||
678 | ((link->func_id == CISTPL_FUNCID_MULTI) || | 682 | ((link->func_id == CISTPL_FUNCID_MULTI) || |
679 | (link->func_id == CISTPL_FUNCID_SERIAL))) | 683 | (link->func_id == CISTPL_FUNCID_SERIAL))) |
680 | pcmcia_loop_config(link, serial_check_for_multi, info); | 684 | pcmcia_loop_config(link, serial_check_for_multi, info); |
@@ -685,7 +689,13 @@ static int serial_config(struct pcmcia_device * link) | |||
685 | if (info->quirk && info->quirk->multi != -1) | 689 | if (info->quirk && info->quirk->multi != -1) |
686 | info->multi = info->quirk->multi; | 690 | info->multi = info->quirk->multi; |
687 | 691 | ||
688 | if (info->multi > 1) | 692 | dev_info(&link->dev, |
693 | "trying to set up [0x%04x:0x%04x] (pfc: %d, multi: %d, quirk: %p)\n", | ||
694 | link->manf_id, link->card_id, | ||
695 | link->socket->pcmcia_pfc, info->multi, info->quirk); | ||
696 | if (link->socket->pcmcia_pfc) | ||
697 | i = pfc_config(link); | ||
698 | else if (info->multi > 1) | ||
689 | i = multi_config(link); | 699 | i = multi_config(link); |
690 | else | 700 | else |
691 | i = simple_config(link); | 701 | i = simple_config(link); |
@@ -704,7 +714,7 @@ static int serial_config(struct pcmcia_device * link) | |||
704 | return 0; | 714 | return 0; |
705 | 715 | ||
706 | failed: | 716 | failed: |
707 | dev_warn(&link->dev, "serial_cs: failed to initialize\n"); | 717 | dev_warn(&link->dev, "failed to initialize\n"); |
708 | serial_remove(link); | 718 | serial_remove(link); |
709 | return -ENODEV; | 719 | return -ENODEV; |
710 | } | 720 | } |
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c index acd35d1ebd12..4c37c4e28647 100644 --- a/drivers/spi/amba-pl022.c +++ b/drivers/spi/amba-pl022.c | |||
@@ -503,8 +503,9 @@ static void giveback(struct pl022 *pl022) | |||
503 | msg->state = NULL; | 503 | msg->state = NULL; |
504 | if (msg->complete) | 504 | if (msg->complete) |
505 | msg->complete(msg->context); | 505 | msg->complete(msg->context); |
506 | /* This message is completed, so let's turn off the clock! */ | 506 | /* This message is completed, so let's turn off the clocks! */ |
507 | clk_disable(pl022->clk); | 507 | clk_disable(pl022->clk); |
508 | amba_pclk_disable(pl022->adev); | ||
508 | } | 509 | } |
509 | 510 | ||
510 | /** | 511 | /** |
@@ -1139,9 +1140,10 @@ static void pump_messages(struct work_struct *work) | |||
1139 | /* Setup the SPI using the per chip configuration */ | 1140 | /* Setup the SPI using the per chip configuration */ |
1140 | pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); | 1141 | pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); |
1141 | /* | 1142 | /* |
1142 | * We enable the clock here, then the clock will be disabled when | 1143 | * We enable the clocks here, then the clocks will be disabled when |
1143 | * giveback() is called in each method (poll/interrupt/DMA) | 1144 | * giveback() is called in each method (poll/interrupt/DMA) |
1144 | */ | 1145 | */ |
1146 | amba_pclk_enable(pl022->adev); | ||
1145 | clk_enable(pl022->clk); | 1147 | clk_enable(pl022->clk); |
1146 | restore_state(pl022); | 1148 | restore_state(pl022); |
1147 | flush(pl022); | 1149 | flush(pl022); |
@@ -1786,11 +1788,9 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) | |||
1786 | } | 1788 | } |
1787 | 1789 | ||
1788 | /* Disable SSP */ | 1790 | /* Disable SSP */ |
1789 | clk_enable(pl022->clk); | ||
1790 | writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), | 1791 | writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), |
1791 | SSP_CR1(pl022->virtbase)); | 1792 | SSP_CR1(pl022->virtbase)); |
1792 | load_ssp_default_config(pl022); | 1793 | load_ssp_default_config(pl022); |
1793 | clk_disable(pl022->clk); | ||
1794 | 1794 | ||
1795 | status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022", | 1795 | status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022", |
1796 | pl022); | 1796 | pl022); |
@@ -1818,6 +1818,8 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) | |||
1818 | goto err_spi_register; | 1818 | goto err_spi_register; |
1819 | } | 1819 | } |
1820 | dev_dbg(dev, "probe succeded\n"); | 1820 | dev_dbg(dev, "probe succeded\n"); |
1821 | /* Disable the silicon block pclk and clock it when needed */ | ||
1822 | amba_pclk_disable(adev); | ||
1821 | return 0; | 1823 | return 0; |
1822 | 1824 | ||
1823 | err_spi_register: | 1825 | err_spi_register: |
@@ -1879,9 +1881,9 @@ static int pl022_suspend(struct amba_device *adev, pm_message_t state) | |||
1879 | return status; | 1881 | return status; |
1880 | } | 1882 | } |
1881 | 1883 | ||
1882 | clk_enable(pl022->clk); | 1884 | amba_pclk_enable(adev); |
1883 | load_ssp_default_config(pl022); | 1885 | load_ssp_default_config(pl022); |
1884 | clk_disable(pl022->clk); | 1886 | amba_pclk_disable(adev); |
1885 | dev_dbg(&adev->dev, "suspended\n"); | 1887 | dev_dbg(&adev->dev, "suspended\n"); |
1886 | return 0; | 1888 | return 0; |
1887 | } | 1889 | } |
@@ -1981,7 +1983,7 @@ static int __init pl022_init(void) | |||
1981 | return amba_driver_register(&pl022_driver); | 1983 | return amba_driver_register(&pl022_driver); |
1982 | } | 1984 | } |
1983 | 1985 | ||
1984 | module_init(pl022_init); | 1986 | subsys_initcall(pl022_init); |
1985 | 1987 | ||
1986 | static void __exit pl022_exit(void) | 1988 | static void __exit pl022_exit(void) |
1987 | { | 1989 | { |
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c index d256cb00604c..56247853c298 100644 --- a/drivers/spi/dw_spi.c +++ b/drivers/spi/dw_spi.c | |||
@@ -181,10 +181,6 @@ static void flush(struct dw_spi *dws) | |||
181 | wait_till_not_busy(dws); | 181 | wait_till_not_busy(dws); |
182 | } | 182 | } |
183 | 183 | ||
184 | static void null_cs_control(u32 command) | ||
185 | { | ||
186 | } | ||
187 | |||
188 | static int null_writer(struct dw_spi *dws) | 184 | static int null_writer(struct dw_spi *dws) |
189 | { | 185 | { |
190 | u8 n_bytes = dws->n_bytes; | 186 | u8 n_bytes = dws->n_bytes; |
@@ -322,7 +318,7 @@ static void giveback(struct dw_spi *dws) | |||
322 | struct spi_transfer, | 318 | struct spi_transfer, |
323 | transfer_list); | 319 | transfer_list); |
324 | 320 | ||
325 | if (!last_transfer->cs_change) | 321 | if (!last_transfer->cs_change && dws->cs_control) |
326 | dws->cs_control(MRST_SPI_DEASSERT); | 322 | dws->cs_control(MRST_SPI_DEASSERT); |
327 | 323 | ||
328 | msg->state = NULL; | 324 | msg->state = NULL; |
@@ -396,6 +392,11 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws) | |||
396 | static irqreturn_t dw_spi_irq(int irq, void *dev_id) | 392 | static irqreturn_t dw_spi_irq(int irq, void *dev_id) |
397 | { | 393 | { |
398 | struct dw_spi *dws = dev_id; | 394 | struct dw_spi *dws = dev_id; |
395 | u16 irq_status, irq_mask = 0x3f; | ||
396 | |||
397 | irq_status = dw_readw(dws, isr) & irq_mask; | ||
398 | if (!irq_status) | ||
399 | return IRQ_NONE; | ||
399 | 400 | ||
400 | if (!dws->cur_msg) { | 401 | if (!dws->cur_msg) { |
401 | spi_mask_intr(dws, SPI_INT_TXEI); | 402 | spi_mask_intr(dws, SPI_INT_TXEI); |
@@ -544,13 +545,13 @@ static void pump_transfers(unsigned long data) | |||
544 | */ | 545 | */ |
545 | if (dws->cs_control) { | 546 | if (dws->cs_control) { |
546 | if (dws->rx && dws->tx) | 547 | if (dws->rx && dws->tx) |
547 | chip->tmode = 0x00; | 548 | chip->tmode = SPI_TMOD_TR; |
548 | else if (dws->rx) | 549 | else if (dws->rx) |
549 | chip->tmode = 0x02; | 550 | chip->tmode = SPI_TMOD_RO; |
550 | else | 551 | else |
551 | chip->tmode = 0x01; | 552 | chip->tmode = SPI_TMOD_TO; |
552 | 553 | ||
553 | cr0 &= ~(0x3 << SPI_MODE_OFFSET); | 554 | cr0 &= ~SPI_TMOD_MASK; |
554 | cr0 |= (chip->tmode << SPI_TMOD_OFFSET); | 555 | cr0 |= (chip->tmode << SPI_TMOD_OFFSET); |
555 | } | 556 | } |
556 | 557 | ||
@@ -699,9 +700,6 @@ static int dw_spi_setup(struct spi_device *spi) | |||
699 | chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); | 700 | chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); |
700 | if (!chip) | 701 | if (!chip) |
701 | return -ENOMEM; | 702 | return -ENOMEM; |
702 | |||
703 | chip->cs_control = null_cs_control; | ||
704 | chip->enable_dma = 0; | ||
705 | } | 703 | } |
706 | 704 | ||
707 | /* | 705 | /* |
@@ -883,7 +881,7 @@ int __devinit dw_spi_add_host(struct dw_spi *dws) | |||
883 | dws->dma_inited = 0; | 881 | dws->dma_inited = 0; |
884 | dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60); | 882 | dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60); |
885 | 883 | ||
886 | ret = request_irq(dws->irq, dw_spi_irq, 0, | 884 | ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, |
887 | "dw_spi", dws); | 885 | "dw_spi", dws); |
888 | if (ret < 0) { | 886 | if (ret < 0) { |
889 | dev_err(&master->dev, "can not get IRQ\n"); | 887 | dev_err(&master->dev, "can not get IRQ\n"); |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index a9e5c79ae52a..b5a78a1f4421 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/cache.h> | 24 | #include <linux/cache.h> |
25 | #include <linux/mutex.h> | 25 | #include <linux/mutex.h> |
26 | #include <linux/of_device.h> | ||
26 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
27 | #include <linux/mod_devicetable.h> | 28 | #include <linux/mod_devicetable.h> |
28 | #include <linux/spi/spi.h> | 29 | #include <linux/spi/spi.h> |
@@ -86,6 +87,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv) | |||
86 | const struct spi_device *spi = to_spi_device(dev); | 87 | const struct spi_device *spi = to_spi_device(dev); |
87 | const struct spi_driver *sdrv = to_spi_driver(drv); | 88 | const struct spi_driver *sdrv = to_spi_driver(drv); |
88 | 89 | ||
90 | /* Attempt an OF style match */ | ||
91 | if (of_driver_match_device(dev, drv)) | ||
92 | return 1; | ||
93 | |||
89 | if (sdrv->id_table) | 94 | if (sdrv->id_table) |
90 | return !!spi_match_id(sdrv->id_table, spi); | 95 | return !!spi_match_id(sdrv->id_table, spi); |
91 | 96 | ||
@@ -554,11 +559,9 @@ done: | |||
554 | EXPORT_SYMBOL_GPL(spi_register_master); | 559 | EXPORT_SYMBOL_GPL(spi_register_master); |
555 | 560 | ||
556 | 561 | ||
557 | static int __unregister(struct device *dev, void *master_dev) | 562 | static int __unregister(struct device *dev, void *null) |
558 | { | 563 | { |
559 | /* note: before about 2.6.14-rc1 this would corrupt memory: */ | 564 | spi_unregister_device(to_spi_device(dev)); |
560 | if (dev != master_dev) | ||
561 | spi_unregister_device(to_spi_device(dev)); | ||
562 | return 0; | 565 | return 0; |
563 | } | 566 | } |
564 | 567 | ||
@@ -576,8 +579,7 @@ void spi_unregister_master(struct spi_master *master) | |||
576 | { | 579 | { |
577 | int dummy; | 580 | int dummy; |
578 | 581 | ||
579 | dummy = device_for_each_child(master->dev.parent, &master->dev, | 582 | dummy = device_for_each_child(&master->dev, NULL, __unregister); |
580 | __unregister); | ||
581 | device_unregister(&master->dev); | 583 | device_unregister(&master->dev); |
582 | } | 584 | } |
583 | EXPORT_SYMBOL_GPL(spi_unregister_master); | 585 | EXPORT_SYMBOL_GPL(spi_unregister_master); |
diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi_gpio.c index e24a63498acb..63e51b011d50 100644 --- a/drivers/spi/spi_gpio.c +++ b/drivers/spi/spi_gpio.c | |||
@@ -350,7 +350,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev) | |||
350 | spi_gpio->bitbang.master = spi_master_get(master); | 350 | spi_gpio->bitbang.master = spi_master_get(master); |
351 | spi_gpio->bitbang.chipselect = spi_gpio_chipselect; | 351 | spi_gpio->bitbang.chipselect = spi_gpio_chipselect; |
352 | 352 | ||
353 | if ((master_flags & (SPI_MASTER_NO_RX | SPI_MASTER_NO_RX)) == 0) { | 353 | if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) { |
354 | spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; | 354 | spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; |
355 | spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; | 355 | spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; |
356 | spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; | 356 | spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; |
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c index d31b57f7baaf..1dd86b835cd8 100644 --- a/drivers/spi/spi_mpc8xxx.c +++ b/drivers/spi/spi_mpc8xxx.c | |||
@@ -408,11 +408,17 @@ static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) | |||
408 | 408 | ||
409 | xfer_ofs = mspi->xfer_in_progress->len - mspi->count; | 409 | xfer_ofs = mspi->xfer_in_progress->len - mspi->count; |
410 | 410 | ||
411 | out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); | 411 | if (mspi->rx_dma == mspi->dma_dummy_rx) |
412 | out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma); | ||
413 | else | ||
414 | out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); | ||
412 | out_be16(&rx_bd->cbd_datlen, 0); | 415 | out_be16(&rx_bd->cbd_datlen, 0); |
413 | out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP); | 416 | out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP); |
414 | 417 | ||
415 | out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); | 418 | if (mspi->tx_dma == mspi->dma_dummy_tx) |
419 | out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma); | ||
420 | else | ||
421 | out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); | ||
416 | out_be16(&tx_bd->cbd_datlen, xfer_len); | 422 | out_be16(&tx_bd->cbd_datlen, xfer_len); |
417 | out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | | 423 | out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | |
418 | BD_SC_LAST); | 424 | BD_SC_LAST); |
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c index 97365815a729..c3038da2648a 100644 --- a/drivers/spi/spi_s3c64xx.c +++ b/drivers/spi/spi_s3c64xx.c | |||
@@ -200,6 +200,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) | |||
200 | val = readl(regs + S3C64XX_SPI_STATUS); | 200 | val = readl(regs + S3C64XX_SPI_STATUS); |
201 | } while (TX_FIFO_LVL(val, sci) && loops--); | 201 | } while (TX_FIFO_LVL(val, sci) && loops--); |
202 | 202 | ||
203 | if (loops == 0) | ||
204 | dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n"); | ||
205 | |||
203 | /* Flush RxFIFO*/ | 206 | /* Flush RxFIFO*/ |
204 | loops = msecs_to_loops(1); | 207 | loops = msecs_to_loops(1); |
205 | do { | 208 | do { |
@@ -210,6 +213,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) | |||
210 | break; | 213 | break; |
211 | } while (loops--); | 214 | } while (loops--); |
212 | 215 | ||
216 | if (loops == 0) | ||
217 | dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n"); | ||
218 | |||
213 | val = readl(regs + S3C64XX_SPI_CH_CFG); | 219 | val = readl(regs + S3C64XX_SPI_CH_CFG); |
214 | val &= ~S3C64XX_SPI_CH_SW_RST; | 220 | val &= ~S3C64XX_SPI_CH_SW_RST; |
215 | writel(val, regs + S3C64XX_SPI_CH_CFG); | 221 | writel(val, regs + S3C64XX_SPI_CH_CFG); |
@@ -320,16 +326,17 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, | |||
320 | 326 | ||
321 | /* millisecs to xfer 'len' bytes @ 'cur_speed' */ | 327 | /* millisecs to xfer 'len' bytes @ 'cur_speed' */ |
322 | ms = xfer->len * 8 * 1000 / sdd->cur_speed; | 328 | ms = xfer->len * 8 * 1000 / sdd->cur_speed; |
323 | ms += 5; /* some tolerance */ | 329 | ms += 10; /* some tolerance */ |
324 | 330 | ||
325 | if (dma_mode) { | 331 | if (dma_mode) { |
326 | val = msecs_to_jiffies(ms) + 10; | 332 | val = msecs_to_jiffies(ms) + 10; |
327 | val = wait_for_completion_timeout(&sdd->xfer_completion, val); | 333 | val = wait_for_completion_timeout(&sdd->xfer_completion, val); |
328 | } else { | 334 | } else { |
335 | u32 status; | ||
329 | val = msecs_to_loops(ms); | 336 | val = msecs_to_loops(ms); |
330 | do { | 337 | do { |
331 | val = readl(regs + S3C64XX_SPI_STATUS); | 338 | status = readl(regs + S3C64XX_SPI_STATUS); |
332 | } while (RX_FIFO_LVL(val, sci) < xfer->len && --val); | 339 | } while (RX_FIFO_LVL(status, sci) < xfer->len && --val); |
333 | } | 340 | } |
334 | 341 | ||
335 | if (!val) | 342 | if (!val) |
@@ -447,8 +454,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) | |||
447 | writel(val, regs + S3C64XX_SPI_CLK_CFG); | 454 | writel(val, regs + S3C64XX_SPI_CLK_CFG); |
448 | } | 455 | } |
449 | 456 | ||
450 | void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, | 457 | static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, |
451 | int size, enum s3c2410_dma_buffresult res) | 458 | int size, enum s3c2410_dma_buffresult res) |
452 | { | 459 | { |
453 | struct s3c64xx_spi_driver_data *sdd = buf_id; | 460 | struct s3c64xx_spi_driver_data *sdd = buf_id; |
454 | unsigned long flags; | 461 | unsigned long flags; |
@@ -467,8 +474,8 @@ void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, | |||
467 | spin_unlock_irqrestore(&sdd->lock, flags); | 474 | spin_unlock_irqrestore(&sdd->lock, flags); |
468 | } | 475 | } |
469 | 476 | ||
470 | void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id, | 477 | static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id, |
471 | int size, enum s3c2410_dma_buffresult res) | 478 | int size, enum s3c2410_dma_buffresult res) |
472 | { | 479 | { |
473 | struct s3c64xx_spi_driver_data *sdd = buf_id; | 480 | struct s3c64xx_spi_driver_data *sdd = buf_id; |
474 | unsigned long flags; | 481 | unsigned long flags; |
@@ -508,8 +515,9 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, | |||
508 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | 515 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
509 | 516 | ||
510 | if (xfer->tx_buf != NULL) { | 517 | if (xfer->tx_buf != NULL) { |
511 | xfer->tx_dma = dma_map_single(dev, xfer->tx_buf, | 518 | xfer->tx_dma = dma_map_single(dev, |
512 | xfer->len, DMA_TO_DEVICE); | 519 | (void *)xfer->tx_buf, xfer->len, |
520 | DMA_TO_DEVICE); | ||
513 | if (dma_mapping_error(dev, xfer->tx_dma)) { | 521 | if (dma_mapping_error(dev, xfer->tx_dma)) { |
514 | dev_err(dev, "dma_map_single Tx failed\n"); | 522 | dev_err(dev, "dma_map_single Tx failed\n"); |
515 | xfer->tx_dma = XFER_DMAADDR_INVALID; | 523 | xfer->tx_dma = XFER_DMAADDR_INVALID; |
@@ -919,6 +927,13 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) | |||
919 | return -ENODEV; | 927 | return -ENODEV; |
920 | } | 928 | } |
921 | 929 | ||
930 | sci = pdev->dev.platform_data; | ||
931 | if (!sci->src_clk_name) { | ||
932 | dev_err(&pdev->dev, | ||
933 | "Board init must call s3c64xx_spi_set_info()\n"); | ||
934 | return -EINVAL; | ||
935 | } | ||
936 | |||
922 | /* Check for availability of necessary resource */ | 937 | /* Check for availability of necessary resource */ |
923 | 938 | ||
924 | dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0); | 939 | dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0); |
@@ -946,8 +961,6 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) | |||
946 | return -ENOMEM; | 961 | return -ENOMEM; |
947 | } | 962 | } |
948 | 963 | ||
949 | sci = pdev->dev.platform_data; | ||
950 | |||
951 | platform_set_drvdata(pdev, master); | 964 | platform_set_drvdata(pdev, master); |
952 | 965 | ||
953 | sdd = spi_master_get_devdata(master); | 966 | sdd = spi_master_get_devdata(master); |
@@ -1170,7 +1183,7 @@ static int __init s3c64xx_spi_init(void) | |||
1170 | { | 1183 | { |
1171 | return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe); | 1184 | return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe); |
1172 | } | 1185 | } |
1173 | module_init(s3c64xx_spi_init); | 1186 | subsys_initcall(s3c64xx_spi_init); |
1174 | 1187 | ||
1175 | static void __exit s3c64xx_spi_exit(void) | 1188 | static void __exit s3c64xx_spi_exit(void) |
1176 | { | 1189 | { |
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c index baa8b05b9e8d..6e973a79aa25 100644 --- a/drivers/staging/batman-adv/hard-interface.c +++ b/drivers/staging/batman-adv/hard-interface.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include "hash.h" | 30 | #include "hash.h" |
31 | 31 | ||
32 | #include <linux/if_arp.h> | 32 | #include <linux/if_arp.h> |
33 | #include <linux/netfilter_bridge.h> | ||
34 | 33 | ||
35 | #define MIN(x, y) ((x) < (y) ? (x) : (y)) | 34 | #define MIN(x, y) ((x) < (y) ? (x) : (y)) |
36 | 35 | ||
@@ -431,11 +430,6 @@ out: | |||
431 | return NOTIFY_DONE; | 430 | return NOTIFY_DONE; |
432 | } | 431 | } |
433 | 432 | ||
434 | static int batman_skb_recv_finish(struct sk_buff *skb) | ||
435 | { | ||
436 | return NF_ACCEPT; | ||
437 | } | ||
438 | |||
439 | /* receive a packet with the batman ethertype coming on a hard | 433 | /* receive a packet with the batman ethertype coming on a hard |
440 | * interface */ | 434 | * interface */ |
441 | int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, | 435 | int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, |
@@ -456,13 +450,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
456 | if (atomic_read(&module_state) != MODULE_ACTIVE) | 450 | if (atomic_read(&module_state) != MODULE_ACTIVE) |
457 | goto err_free; | 451 | goto err_free; |
458 | 452 | ||
459 | /* if netfilter/ebtables wants to block incoming batman | ||
460 | * packets then give them a chance to do so here */ | ||
461 | ret = NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, dev, NULL, | ||
462 | batman_skb_recv_finish); | ||
463 | if (ret != 1) | ||
464 | goto err_out; | ||
465 | |||
466 | /* packet should hold at least type and version */ | 453 | /* packet should hold at least type and version */ |
467 | if (unlikely(skb_headlen(skb) < 2)) | 454 | if (unlikely(skb_headlen(skb) < 2)) |
468 | goto err_free; | 455 | goto err_free; |
diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c index 055edee7b4e4..da3c82e47bbd 100644 --- a/drivers/staging/batman-adv/send.c +++ b/drivers/staging/batman-adv/send.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include "vis.h" | 29 | #include "vis.h" |
30 | #include "aggregation.h" | 30 | #include "aggregation.h" |
31 | 31 | ||
32 | #include <linux/netfilter_bridge.h> | ||
33 | 32 | ||
34 | static void send_outstanding_bcast_packet(struct work_struct *work); | 33 | static void send_outstanding_bcast_packet(struct work_struct *work); |
35 | 34 | ||
@@ -92,12 +91,9 @@ int send_skb_packet(struct sk_buff *skb, | |||
92 | 91 | ||
93 | /* dev_queue_xmit() returns a negative result on error. However on | 92 | /* dev_queue_xmit() returns a negative result on error. However on |
94 | * congestion and traffic shaping, it drops and returns NET_XMIT_DROP | 93 | * congestion and traffic shaping, it drops and returns NET_XMIT_DROP |
95 | * (which is > 0). This will not be treated as an error. | 94 | * (which is > 0). This will not be treated as an error. */ |
96 | * Also, if netfilter/ebtables wants to block outgoing batman | ||
97 | * packets then giving them a chance to do so here */ | ||
98 | 95 | ||
99 | return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, | 96 | return dev_queue_xmit(skb); |
100 | dev_queue_xmit); | ||
101 | send_skb_err: | 97 | send_skb_err: |
102 | kfree_skb(skb); | 98 | kfree_skb(skb); |
103 | return NET_XMIT_DROP; | 99 | return NET_XMIT_DROP; |
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c index c6aa52f8dcee..48d9fb1227df 100644 --- a/drivers/staging/comedi/drivers/das08_cs.c +++ b/drivers/staging/comedi/drivers/das08_cs.c | |||
@@ -222,7 +222,6 @@ static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev, | |||
222 | p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; | 222 | p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; |
223 | p_dev->resource[0]->flags |= | 223 | p_dev->resource[0]->flags |= |
224 | pcmcia_io_cfg_data_width(io->flags); | 224 | pcmcia_io_cfg_data_width(io->flags); |
225 | p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; | ||
226 | p_dev->resource[0]->start = io->win[0].base; | 225 | p_dev->resource[0]->start = io->win[0].base; |
227 | p_dev->resource[0]->end = io->win[0].len; | 226 | p_dev->resource[0]->end = io->win[0].len; |
228 | if (io->nwin > 1) { | 227 | if (io->nwin > 1) { |
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c index 56e11575c977..64a01147ecae 100644 --- a/drivers/staging/hv/netvsc_drv.c +++ b/drivers/staging/hv/netvsc_drv.c | |||
@@ -327,6 +327,9 @@ static const struct net_device_ops device_ops = { | |||
327 | .ndo_stop = netvsc_close, | 327 | .ndo_stop = netvsc_close, |
328 | .ndo_start_xmit = netvsc_start_xmit, | 328 | .ndo_start_xmit = netvsc_start_xmit, |
329 | .ndo_set_multicast_list = netvsc_set_multicast_list, | 329 | .ndo_set_multicast_list = netvsc_set_multicast_list, |
330 | .ndo_change_mtu = eth_change_mtu, | ||
331 | .ndo_validate_addr = eth_validate_addr, | ||
332 | .ndo_set_mac_address = eth_mac_addr, | ||
330 | }; | 333 | }; |
331 | 334 | ||
332 | static int netvsc_probe(struct device *device) | 335 | static int netvsc_probe(struct device *device) |
diff --git a/drivers/staging/hv/ring_buffer.c b/drivers/staging/hv/ring_buffer.c index 17bc7626f70a..d78c569ac94a 100644 --- a/drivers/staging/hv/ring_buffer.c +++ b/drivers/staging/hv/ring_buffer.c | |||
@@ -193,8 +193,7 @@ Description: | |||
193 | static inline u64 | 193 | static inline u64 |
194 | GetRingBufferIndices(struct hv_ring_buffer_info *RingInfo) | 194 | GetRingBufferIndices(struct hv_ring_buffer_info *RingInfo) |
195 | { | 195 | { |
196 | return ((u64)RingInfo->RingBuffer->WriteIndex << 32) | 196 | return (u64)RingInfo->RingBuffer->WriteIndex << 32; |
197 | || RingInfo->RingBuffer->ReadIndex; | ||
198 | } | 197 | } |
199 | 198 | ||
200 | 199 | ||
diff --git a/drivers/staging/hv/storvsc_api.h b/drivers/staging/hv/storvsc_api.h index 0063bde9a4b2..8505a1c5f9ee 100644 --- a/drivers/staging/hv/storvsc_api.h +++ b/drivers/staging/hv/storvsc_api.h | |||
@@ -28,10 +28,10 @@ | |||
28 | #include "vmbus_api.h" | 28 | #include "vmbus_api.h" |
29 | 29 | ||
30 | /* Defines */ | 30 | /* Defines */ |
31 | #define STORVSC_RING_BUFFER_SIZE (10*PAGE_SIZE) | 31 | #define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE) |
32 | #define BLKVSC_RING_BUFFER_SIZE (20*PAGE_SIZE) | 32 | #define BLKVSC_RING_BUFFER_SIZE (20*PAGE_SIZE) |
33 | 33 | ||
34 | #define STORVSC_MAX_IO_REQUESTS 64 | 34 | #define STORVSC_MAX_IO_REQUESTS 128 |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In | 37 | * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In |
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c index 075b61bd492f..62882a437aa4 100644 --- a/drivers/staging/hv/storvsc_drv.c +++ b/drivers/staging/hv/storvsc_drv.c | |||
@@ -495,7 +495,7 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl, | |||
495 | 495 | ||
496 | /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ | 496 | /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ |
497 | 497 | ||
498 | if (j == 0) | 498 | if (bounce_addr == 0) |
499 | bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); | 499 | bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); |
500 | 500 | ||
501 | while (srclen) { | 501 | while (srclen) { |
@@ -556,7 +556,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, | |||
556 | destlen = orig_sgl[i].length; | 556 | destlen = orig_sgl[i].length; |
557 | /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ | 557 | /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ |
558 | 558 | ||
559 | if (j == 0) | 559 | if (bounce_addr == 0) |
560 | bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); | 560 | bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); |
561 | 561 | ||
562 | while (destlen) { | 562 | while (destlen) { |
@@ -615,6 +615,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd, | |||
615 | unsigned int request_size = 0; | 615 | unsigned int request_size = 0; |
616 | int i; | 616 | int i; |
617 | struct scatterlist *sgl; | 617 | struct scatterlist *sgl; |
618 | unsigned int sg_count = 0; | ||
618 | 619 | ||
619 | DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d " | 620 | DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d " |
620 | "queue depth %d tagged %d", scmnd, scmnd->sc_data_direction, | 621 | "queue depth %d tagged %d", scmnd, scmnd->sc_data_direction, |
@@ -697,6 +698,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd, | |||
697 | request->DataBuffer.Length = scsi_bufflen(scmnd); | 698 | request->DataBuffer.Length = scsi_bufflen(scmnd); |
698 | if (scsi_sg_count(scmnd)) { | 699 | if (scsi_sg_count(scmnd)) { |
699 | sgl = (struct scatterlist *)scsi_sglist(scmnd); | 700 | sgl = (struct scatterlist *)scsi_sglist(scmnd); |
701 | sg_count = scsi_sg_count(scmnd); | ||
700 | 702 | ||
701 | /* check if we need to bounce the sgl */ | 703 | /* check if we need to bounce the sgl */ |
702 | if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) { | 704 | if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) { |
@@ -731,15 +733,16 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd, | |||
731 | scsi_sg_count(scmnd)); | 733 | scsi_sg_count(scmnd)); |
732 | 734 | ||
733 | sgl = cmd_request->bounce_sgl; | 735 | sgl = cmd_request->bounce_sgl; |
736 | sg_count = cmd_request->bounce_sgl_count; | ||
734 | } | 737 | } |
735 | 738 | ||
736 | request->DataBuffer.Offset = sgl[0].offset; | 739 | request->DataBuffer.Offset = sgl[0].offset; |
737 | 740 | ||
738 | for (i = 0; i < scsi_sg_count(scmnd); i++) { | 741 | for (i = 0; i < sg_count; i++) { |
739 | DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n", | 742 | DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n", |
740 | i, sgl[i].length, sgl[i].offset); | 743 | i, sgl[i].length, sgl[i].offset); |
741 | request->DataBuffer.PfnArray[i] = | 744 | request->DataBuffer.PfnArray[i] = |
742 | page_to_pfn(sg_page((&sgl[i]))); | 745 | page_to_pfn(sg_page((&sgl[i]))); |
743 | } | 746 | } |
744 | } else if (scsi_sglist(scmnd)) { | 747 | } else if (scsi_sglist(scmnd)) { |
745 | /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */ | 748 | /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */ |
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig index 638ad6b35891..9493128e5fd2 100644 --- a/drivers/staging/octeon/Kconfig +++ b/drivers/staging/octeon/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config OCTEON_ETHERNET | 1 | config OCTEON_ETHERNET |
2 | tristate "Cavium Networks Octeon Ethernet support" | 2 | tristate "Cavium Networks Octeon Ethernet support" |
3 | depends on CPU_CAVIUM_OCTEON | 3 | depends on CPU_CAVIUM_OCTEON && NETDEVICES |
4 | select PHYLIB | 4 | select PHYLIB |
5 | select MDIO_OCTEON | 5 | select MDIO_OCTEON |
6 | help | 6 | help |
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c index a0fe31de0a6d..ebf9074a9083 100644 --- a/drivers/staging/rt2860/usb_main_dev.c +++ b/drivers/staging/rt2860/usb_main_dev.c | |||
@@ -44,6 +44,7 @@ struct usb_device_id rtusb_usb_id[] = { | |||
44 | {USB_DEVICE(0x07B8, 0x2870)}, /* AboCom */ | 44 | {USB_DEVICE(0x07B8, 0x2870)}, /* AboCom */ |
45 | {USB_DEVICE(0x07B8, 0x2770)}, /* AboCom */ | 45 | {USB_DEVICE(0x07B8, 0x2770)}, /* AboCom */ |
46 | {USB_DEVICE(0x0DF6, 0x0039)}, /* Sitecom 2770 */ | 46 | {USB_DEVICE(0x0DF6, 0x0039)}, /* Sitecom 2770 */ |
47 | {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom 2770 */ | ||
47 | {USB_DEVICE(0x083A, 0x7512)}, /* Arcadyan 2770 */ | 48 | {USB_DEVICE(0x083A, 0x7512)}, /* Arcadyan 2770 */ |
48 | {USB_DEVICE(0x0789, 0x0162)}, /* Logitec 2870 */ | 49 | {USB_DEVICE(0x0789, 0x0162)}, /* Logitec 2870 */ |
49 | {USB_DEVICE(0x0789, 0x0163)}, /* Logitec 2870 */ | 50 | {USB_DEVICE(0x0789, 0x0163)}, /* Logitec 2870 */ |
@@ -95,7 +96,8 @@ struct usb_device_id rtusb_usb_id[] = { | |||
95 | {USB_DEVICE(0x050d, 0x815c)}, | 96 | {USB_DEVICE(0x050d, 0x815c)}, |
96 | {USB_DEVICE(0x1482, 0x3C09)}, /* Abocom */ | 97 | {USB_DEVICE(0x1482, 0x3C09)}, /* Abocom */ |
97 | {USB_DEVICE(0x14B2, 0x3C09)}, /* Alpha */ | 98 | {USB_DEVICE(0x14B2, 0x3C09)}, /* Alpha */ |
98 | {USB_DEVICE(0x04E8, 0x2018)}, /* samsung */ | 99 | {USB_DEVICE(0x04E8, 0x2018)}, /* samsung linkstick2 */ |
100 | {USB_DEVICE(0x1690, 0x0740)}, /* Askey */ | ||
99 | {USB_DEVICE(0x5A57, 0x0280)}, /* Zinwell */ | 101 | {USB_DEVICE(0x5A57, 0x0280)}, /* Zinwell */ |
100 | {USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */ | 102 | {USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */ |
101 | {USB_DEVICE(0x7392, 0x7718)}, | 103 | {USB_DEVICE(0x7392, 0x7718)}, |
@@ -105,21 +107,34 @@ struct usb_device_id rtusb_usb_id[] = { | |||
105 | {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */ | 107 | {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */ |
106 | {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */ | 108 | {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */ |
107 | {USB_DEVICE(0x050d, 0x815c)}, /* Belkin F5D8053 */ | 109 | {USB_DEVICE(0x050d, 0x815c)}, /* Belkin F5D8053 */ |
110 | {USB_DEVICE(0x100D, 0x9031)}, /* Motorola 2770 */ | ||
108 | #endif /* RT2870 // */ | 111 | #endif /* RT2870 // */ |
109 | #ifdef RT3070 | 112 | #ifdef RT3070 |
110 | {USB_DEVICE(0x148F, 0x3070)}, /* Ralink 3070 */ | 113 | {USB_DEVICE(0x148F, 0x3070)}, /* Ralink 3070 */ |
111 | {USB_DEVICE(0x148F, 0x3071)}, /* Ralink 3071 */ | 114 | {USB_DEVICE(0x148F, 0x3071)}, /* Ralink 3071 */ |
112 | {USB_DEVICE(0x148F, 0x3072)}, /* Ralink 3072 */ | 115 | {USB_DEVICE(0x148F, 0x3072)}, /* Ralink 3072 */ |
113 | {USB_DEVICE(0x0DB0, 0x3820)}, /* Ralink 3070 */ | 116 | {USB_DEVICE(0x0DB0, 0x3820)}, /* Ralink 3070 */ |
117 | {USB_DEVICE(0x0DB0, 0x871C)}, /* Ralink 3070 */ | ||
118 | {USB_DEVICE(0x0DB0, 0x822C)}, /* Ralink 3070 */ | ||
119 | {USB_DEVICE(0x0DB0, 0x871B)}, /* Ralink 3070 */ | ||
120 | {USB_DEVICE(0x0DB0, 0x822B)}, /* Ralink 3070 */ | ||
114 | {USB_DEVICE(0x0DF6, 0x003E)}, /* Sitecom 3070 */ | 121 | {USB_DEVICE(0x0DF6, 0x003E)}, /* Sitecom 3070 */ |
115 | {USB_DEVICE(0x0DF6, 0x0042)}, /* Sitecom 3072 */ | 122 | {USB_DEVICE(0x0DF6, 0x0042)}, /* Sitecom 3072 */ |
123 | {USB_DEVICE(0x0DF6, 0x0048)}, /* Sitecom 3070 */ | ||
124 | {USB_DEVICE(0x0DF6, 0x0047)}, /* Sitecom 3071 */ | ||
116 | {USB_DEVICE(0x14B2, 0x3C12)}, /* AL 3070 */ | 125 | {USB_DEVICE(0x14B2, 0x3C12)}, /* AL 3070 */ |
117 | {USB_DEVICE(0x18C5, 0x0012)}, /* Corega 3070 */ | 126 | {USB_DEVICE(0x18C5, 0x0012)}, /* Corega 3070 */ |
118 | {USB_DEVICE(0x083A, 0x7511)}, /* Arcadyan 3070 */ | 127 | {USB_DEVICE(0x083A, 0x7511)}, /* Arcadyan 3070 */ |
128 | {USB_DEVICE(0x083A, 0xA701)}, /* SMC 3070 */ | ||
129 | {USB_DEVICE(0x083A, 0xA702)}, /* SMC 3072 */ | ||
119 | {USB_DEVICE(0x1740, 0x9703)}, /* EnGenius 3070 */ | 130 | {USB_DEVICE(0x1740, 0x9703)}, /* EnGenius 3070 */ |
120 | {USB_DEVICE(0x1740, 0x9705)}, /* EnGenius 3071 */ | 131 | {USB_DEVICE(0x1740, 0x9705)}, /* EnGenius 3071 */ |
121 | {USB_DEVICE(0x1740, 0x9706)}, /* EnGenius 3072 */ | 132 | {USB_DEVICE(0x1740, 0x9706)}, /* EnGenius 3072 */ |
133 | {USB_DEVICE(0x1740, 0x9707)}, /* EnGenius 3070 */ | ||
134 | {USB_DEVICE(0x1740, 0x9708)}, /* EnGenius 3071 */ | ||
135 | {USB_DEVICE(0x1740, 0x9709)}, /* EnGenius 3072 */ | ||
122 | {USB_DEVICE(0x13D3, 0x3273)}, /* AzureWave 3070 */ | 136 | {USB_DEVICE(0x13D3, 0x3273)}, /* AzureWave 3070 */ |
137 | {USB_DEVICE(0x13D3, 0x3305)}, /* AzureWave 3070*/ | ||
123 | {USB_DEVICE(0x1044, 0x800D)}, /* Gigabyte GN-WB32L 3070 */ | 138 | {USB_DEVICE(0x1044, 0x800D)}, /* Gigabyte GN-WB32L 3070 */ |
124 | {USB_DEVICE(0x2019, 0xAB25)}, /* Planex Communications, Inc. RT3070 */ | 139 | {USB_DEVICE(0x2019, 0xAB25)}, /* Planex Communications, Inc. RT3070 */ |
125 | {USB_DEVICE(0x07B8, 0x3070)}, /* AboCom 3070 */ | 140 | {USB_DEVICE(0x07B8, 0x3070)}, /* AboCom 3070 */ |
@@ -132,14 +147,36 @@ struct usb_device_id rtusb_usb_id[] = { | |||
132 | {USB_DEVICE(0x07D1, 0x3C0D)}, /* D-Link 3070 */ | 147 | {USB_DEVICE(0x07D1, 0x3C0D)}, /* D-Link 3070 */ |
133 | {USB_DEVICE(0x07D1, 0x3C0E)}, /* D-Link 3070 */ | 148 | {USB_DEVICE(0x07D1, 0x3C0E)}, /* D-Link 3070 */ |
134 | {USB_DEVICE(0x07D1, 0x3C0F)}, /* D-Link 3070 */ | 149 | {USB_DEVICE(0x07D1, 0x3C0F)}, /* D-Link 3070 */ |
150 | {USB_DEVICE(0x07D1, 0x3C16)}, /* D-Link 3070 */ | ||
151 | {USB_DEVICE(0x07D1, 0x3C17)}, /* D-Link 8070 */ | ||
135 | {USB_DEVICE(0x1D4D, 0x000C)}, /* Pegatron Corporation 3070 */ | 152 | {USB_DEVICE(0x1D4D, 0x000C)}, /* Pegatron Corporation 3070 */ |
136 | {USB_DEVICE(0x1D4D, 0x000E)}, /* Pegatron Corporation 3070 */ | 153 | {USB_DEVICE(0x1D4D, 0x000E)}, /* Pegatron Corporation 3070 */ |
137 | {USB_DEVICE(0x5A57, 0x5257)}, /* Zinwell 3070 */ | 154 | {USB_DEVICE(0x5A57, 0x5257)}, /* Zinwell 3070 */ |
138 | {USB_DEVICE(0x5A57, 0x0283)}, /* Zinwell 3072 */ | 155 | {USB_DEVICE(0x5A57, 0x0283)}, /* Zinwell 3072 */ |
139 | {USB_DEVICE(0x04BB, 0x0945)}, /* I-O DATA 3072 */ | 156 | {USB_DEVICE(0x04BB, 0x0945)}, /* I-O DATA 3072 */ |
157 | {USB_DEVICE(0x04BB, 0x0947)}, /* I-O DATA 3070 */ | ||
158 | {USB_DEVICE(0x04BB, 0x0948)}, /* I-O DATA 3072 */ | ||
140 | {USB_DEVICE(0x203D, 0x1480)}, /* Encore 3070 */ | 159 | {USB_DEVICE(0x203D, 0x1480)}, /* Encore 3070 */ |
160 | {USB_DEVICE(0x20B8, 0x8888)}, /* PARA INDUSTRIAL 3070 */ | ||
161 | {USB_DEVICE(0x0B05, 0x1784)}, /* Asus 3072 */ | ||
162 | {USB_DEVICE(0x203D, 0x14A9)}, /* Encore 3070*/ | ||
163 | {USB_DEVICE(0x0DB0, 0x899A)}, /* MSI 3070*/ | ||
164 | {USB_DEVICE(0x0DB0, 0x3870)}, /* MSI 3070*/ | ||
165 | {USB_DEVICE(0x0DB0, 0x870A)}, /* MSI 3070*/ | ||
166 | {USB_DEVICE(0x0DB0, 0x6899)}, /* MSI 3070 */ | ||
167 | {USB_DEVICE(0x0DB0, 0x3822)}, /* MSI 3070 */ | ||
168 | {USB_DEVICE(0x0DB0, 0x3871)}, /* MSI 3070 */ | ||
169 | {USB_DEVICE(0x0DB0, 0x871A)}, /* MSI 3070 */ | ||
170 | {USB_DEVICE(0x0DB0, 0x822A)}, /* MSI 3070 */ | ||
171 | {USB_DEVICE(0x0DB0, 0x3821)}, /* Ralink 3070 */ | ||
172 | {USB_DEVICE(0x0DB0, 0x821A)}, /* Ralink 3070 */ | ||
173 | {USB_DEVICE(0x083A, 0xA703)}, /* IO-MAGIC */ | ||
174 | {USB_DEVICE(0x13D3, 0x3307)}, /* Azurewave */ | ||
175 | {USB_DEVICE(0x13D3, 0x3321)}, /* Azurewave */ | ||
176 | {USB_DEVICE(0x07FA, 0x7712)}, /* Edimax */ | ||
177 | {USB_DEVICE(0x0789, 0x0166)}, /* Edimax */ | ||
178 | {USB_DEVICE(0x148F, 0x2070)}, /* Edimax */ | ||
141 | #endif /* RT3070 // */ | 179 | #endif /* RT3070 // */ |
142 | {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom WL-608 */ | ||
143 | {USB_DEVICE(0x1737, 0x0077)}, /* Linksys WUSB54GC-EU v3 */ | 180 | {USB_DEVICE(0x1737, 0x0077)}, /* Linksys WUSB54GC-EU v3 */ |
144 | {USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */ | 181 | {USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */ |
145 | {USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */ | 182 | {USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */ |
diff --git a/drivers/staging/spectra/Kconfig b/drivers/staging/spectra/Kconfig index 5e2ffefb60af..d231ae27299d 100644 --- a/drivers/staging/spectra/Kconfig +++ b/drivers/staging/spectra/Kconfig | |||
@@ -2,6 +2,7 @@ | |||
2 | menuconfig SPECTRA | 2 | menuconfig SPECTRA |
3 | tristate "Denali Spectra Flash Translation Layer" | 3 | tristate "Denali Spectra Flash Translation Layer" |
4 | depends on BLOCK | 4 | depends on BLOCK |
5 | depends on X86_MRST | ||
5 | default n | 6 | default n |
6 | ---help--- | 7 | ---help--- |
7 | Enable the FTL pseudo-filesystem used with the NAND Flash | 8 | Enable the FTL pseudo-filesystem used with the NAND Flash |
diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c index 44a7fbe7eccd..fa21a0fd8e84 100644 --- a/drivers/staging/spectra/ffsport.c +++ b/drivers/staging/spectra/ffsport.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/log2.h> | 28 | #include <linux/log2.h> |
29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
30 | #include <linux/smp_lock.h> | 30 | #include <linux/smp_lock.h> |
31 | #include <linux/slab.h> | ||
31 | 32 | ||
32 | /**** Helper functions used for Div, Remainder operation on u64 ****/ | 33 | /**** Helper functions used for Div, Remainder operation on u64 ****/ |
33 | 34 | ||
diff --git a/drivers/staging/ti-st/st.h b/drivers/staging/ti-st/st.h index 9952579425b9..1b3060eb2921 100644 --- a/drivers/staging/ti-st/st.h +++ b/drivers/staging/ti-st/st.h | |||
@@ -80,5 +80,4 @@ struct st_proto_s { | |||
80 | extern long st_register(struct st_proto_s *); | 80 | extern long st_register(struct st_proto_s *); |
81 | extern long st_unregister(enum proto_type); | 81 | extern long st_unregister(enum proto_type); |
82 | 82 | ||
83 | extern struct platform_device *st_get_plat_device(void); | ||
84 | #endif /* ST_H */ | 83 | #endif /* ST_H */ |
diff --git a/drivers/staging/ti-st/st_core.c b/drivers/staging/ti-st/st_core.c index 063c9b1db1ab..b85d8bfdf600 100644 --- a/drivers/staging/ti-st/st_core.c +++ b/drivers/staging/ti-st/st_core.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include "st_ll.h" | 38 | #include "st_ll.h" |
39 | #include "st.h" | 39 | #include "st.h" |
40 | 40 | ||
41 | #define VERBOSE | ||
42 | /* strings to be used for rfkill entries and by | 41 | /* strings to be used for rfkill entries and by |
43 | * ST Core to be used for sysfs debug entry | 42 | * ST Core to be used for sysfs debug entry |
44 | */ | 43 | */ |
@@ -581,7 +580,7 @@ long st_register(struct st_proto_s *new_proto) | |||
581 | long err = 0; | 580 | long err = 0; |
582 | unsigned long flags = 0; | 581 | unsigned long flags = 0; |
583 | 582 | ||
584 | st_kim_ref(&st_gdata); | 583 | st_kim_ref(&st_gdata, 0); |
585 | pr_info("%s(%d) ", __func__, new_proto->type); | 584 | pr_info("%s(%d) ", __func__, new_proto->type); |
586 | if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL | 585 | if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL |
587 | || new_proto->reg_complete_cb == NULL) { | 586 | || new_proto->reg_complete_cb == NULL) { |
@@ -713,7 +712,7 @@ long st_unregister(enum proto_type type) | |||
713 | 712 | ||
714 | pr_debug("%s: %d ", __func__, type); | 713 | pr_debug("%s: %d ", __func__, type); |
715 | 714 | ||
716 | st_kim_ref(&st_gdata); | 715 | st_kim_ref(&st_gdata, 0); |
717 | if (type < ST_BT || type >= ST_MAX) { | 716 | if (type < ST_BT || type >= ST_MAX) { |
718 | pr_err(" protocol %d not supported", type); | 717 | pr_err(" protocol %d not supported", type); |
719 | return -EPROTONOSUPPORT; | 718 | return -EPROTONOSUPPORT; |
@@ -767,7 +766,7 @@ long st_write(struct sk_buff *skb) | |||
767 | #endif | 766 | #endif |
768 | long len; | 767 | long len; |
769 | 768 | ||
770 | st_kim_ref(&st_gdata); | 769 | st_kim_ref(&st_gdata, 0); |
771 | if (unlikely(skb == NULL || st_gdata == NULL | 770 | if (unlikely(skb == NULL || st_gdata == NULL |
772 | || st_gdata->tty == NULL)) { | 771 | || st_gdata->tty == NULL)) { |
773 | pr_err("data/tty unavailable to perform write"); | 772 | pr_err("data/tty unavailable to perform write"); |
@@ -818,7 +817,7 @@ static int st_tty_open(struct tty_struct *tty) | |||
818 | struct st_data_s *st_gdata; | 817 | struct st_data_s *st_gdata; |
819 | pr_info("%s ", __func__); | 818 | pr_info("%s ", __func__); |
820 | 819 | ||
821 | st_kim_ref(&st_gdata); | 820 | st_kim_ref(&st_gdata, 0); |
822 | st_gdata->tty = tty; | 821 | st_gdata->tty = tty; |
823 | tty->disc_data = st_gdata; | 822 | tty->disc_data = st_gdata; |
824 | 823 | ||
diff --git a/drivers/staging/ti-st/st_core.h b/drivers/staging/ti-st/st_core.h index e0c32d149f5f..8601320a679e 100644 --- a/drivers/staging/ti-st/st_core.h +++ b/drivers/staging/ti-st/st_core.h | |||
@@ -117,7 +117,7 @@ int st_core_init(struct st_data_s **); | |||
117 | void st_core_exit(struct st_data_s *); | 117 | void st_core_exit(struct st_data_s *); |
118 | 118 | ||
119 | /* ask for reference from KIM */ | 119 | /* ask for reference from KIM */ |
120 | void st_kim_ref(struct st_data_s **); | 120 | void st_kim_ref(struct st_data_s **, int); |
121 | 121 | ||
122 | #define GPS_STUB_TEST | 122 | #define GPS_STUB_TEST |
123 | #ifdef GPS_STUB_TEST | 123 | #ifdef GPS_STUB_TEST |
diff --git a/drivers/staging/ti-st/st_kim.c b/drivers/staging/ti-st/st_kim.c index b4a6c7fdc4e6..9e99463f76e8 100644 --- a/drivers/staging/ti-st/st_kim.c +++ b/drivers/staging/ti-st/st_kim.c | |||
@@ -72,11 +72,26 @@ const unsigned char *protocol_names[] = { | |||
72 | PROTO_ENTRY(ST_GPS, "GPS"), | 72 | PROTO_ENTRY(ST_GPS, "GPS"), |
73 | }; | 73 | }; |
74 | 74 | ||
75 | #define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */ | ||
76 | struct platform_device *st_kim_devices[MAX_ST_DEVICES]; | ||
75 | 77 | ||
76 | /**********************************************************************/ | 78 | /**********************************************************************/ |
77 | /* internal functions */ | 79 | /* internal functions */ |
78 | 80 | ||
79 | /** | 81 | /** |
82 | * st_get_plat_device - | ||
83 | * function which returns the reference to the platform device | ||
84 | * requested by id. As of now only 1 such device exists (id=0) | ||
85 | * the context requesting for reference can get the id to be | ||
86 | * requested by a. The protocol driver which is registering or | ||
87 | * b. the tty device which is opened. | ||
88 | */ | ||
89 | static struct platform_device *st_get_plat_device(int id) | ||
90 | { | ||
91 | return st_kim_devices[id]; | ||
92 | } | ||
93 | |||
94 | /** | ||
80 | * validate_firmware_response - | 95 | * validate_firmware_response - |
81 | * function to return whether the firmware response was proper | 96 | * function to return whether the firmware response was proper |
82 | * in case of error don't complete so that waiting for proper | 97 | * in case of error don't complete so that waiting for proper |
@@ -353,7 +368,7 @@ void st_kim_chip_toggle(enum proto_type type, enum kim_gpio_state state) | |||
353 | struct kim_data_s *kim_gdata; | 368 | struct kim_data_s *kim_gdata; |
354 | pr_info(" %s ", __func__); | 369 | pr_info(" %s ", __func__); |
355 | 370 | ||
356 | kim_pdev = st_get_plat_device(); | 371 | kim_pdev = st_get_plat_device(0); |
357 | kim_gdata = dev_get_drvdata(&kim_pdev->dev); | 372 | kim_gdata = dev_get_drvdata(&kim_pdev->dev); |
358 | 373 | ||
359 | if (kim_gdata->gpios[type] == -1) { | 374 | if (kim_gdata->gpios[type] == -1) { |
@@ -574,12 +589,12 @@ static int kim_toggle_radio(void *data, bool blocked) | |||
574 | * This would enable multiple such platform devices to exist | 589 | * This would enable multiple such platform devices to exist |
575 | * on a given platform | 590 | * on a given platform |
576 | */ | 591 | */ |
577 | void st_kim_ref(struct st_data_s **core_data) | 592 | void st_kim_ref(struct st_data_s **core_data, int id) |
578 | { | 593 | { |
579 | struct platform_device *pdev; | 594 | struct platform_device *pdev; |
580 | struct kim_data_s *kim_gdata; | 595 | struct kim_data_s *kim_gdata; |
581 | /* get kim_gdata reference from platform device */ | 596 | /* get kim_gdata reference from platform device */ |
582 | pdev = st_get_plat_device(); | 597 | pdev = st_get_plat_device(id); |
583 | kim_gdata = dev_get_drvdata(&pdev->dev); | 598 | kim_gdata = dev_get_drvdata(&pdev->dev); |
584 | *core_data = kim_gdata->core_data; | 599 | *core_data = kim_gdata->core_data; |
585 | } | 600 | } |
@@ -623,6 +638,7 @@ static int kim_probe(struct platform_device *pdev) | |||
623 | long *gpios = pdev->dev.platform_data; | 638 | long *gpios = pdev->dev.platform_data; |
624 | struct kim_data_s *kim_gdata; | 639 | struct kim_data_s *kim_gdata; |
625 | 640 | ||
641 | st_kim_devices[pdev->id] = pdev; | ||
626 | kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC); | 642 | kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC); |
627 | if (!kim_gdata) { | 643 | if (!kim_gdata) { |
628 | pr_err("no mem to allocate"); | 644 | pr_err("no mem to allocate"); |
diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c index 0142338bcafe..4bdb8362de82 100644 --- a/drivers/staging/vt6655/wpactl.c +++ b/drivers/staging/vt6655/wpactl.c | |||
@@ -766,9 +766,14 @@ static int wpa_set_associate(PSDevice pDevice, | |||
766 | DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len); | 766 | DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len); |
767 | 767 | ||
768 | 768 | ||
769 | if (param->u.wpa_associate.wpa_ie && | 769 | if (param->u.wpa_associate.wpa_ie_len) { |
770 | copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len)) | 770 | if (!param->u.wpa_associate.wpa_ie) |
771 | return -EINVAL; | 771 | return -EINVAL; |
772 | if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE)) | ||
773 | return -EINVAL; | ||
774 | if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len)) | ||
775 | return -EFAULT; | ||
776 | } | ||
772 | 777 | ||
773 | if (param->u.wpa_associate.mode == 1) | 778 | if (param->u.wpa_associate.mode == 1) |
774 | pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA; | 779 | pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA; |
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c index 368c30a9d5ff..4af83d5318f2 100644 --- a/drivers/staging/wlan-ng/cfg80211.c +++ b/drivers/staging/wlan-ng/cfg80211.c | |||
@@ -219,6 +219,7 @@ int prism2_get_key(struct wiphy *wiphy, struct net_device *dev, | |||
219 | return -ENOENT; | 219 | return -ENOENT; |
220 | params.key_len = len; | 220 | params.key_len = len; |
221 | params.key = wlandev->wep_keys[key_index]; | 221 | params.key = wlandev->wep_keys[key_index]; |
222 | params.seq_len = 0; | ||
222 | 223 | ||
223 | callback(cookie, ¶ms); | 224 | callback(cookie, ¶ms); |
224 | 225 | ||
@@ -735,6 +736,8 @@ struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev) | |||
735 | priv->band.n_channels = ARRAY_SIZE(prism2_channels); | 736 | priv->band.n_channels = ARRAY_SIZE(prism2_channels); |
736 | priv->band.bitrates = priv->rates; | 737 | priv->band.bitrates = priv->rates; |
737 | priv->band.n_bitrates = ARRAY_SIZE(prism2_rates); | 738 | priv->band.n_bitrates = ARRAY_SIZE(prism2_rates); |
739 | priv->band.band = IEEE80211_BAND_2GHZ; | ||
740 | priv->band.ht_cap.ht_supported = false; | ||
738 | wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; | 741 | wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; |
739 | 742 | ||
740 | set_wiphy_dev(wiphy, dev); | 743 | set_wiphy_dev(wiphy, dev); |
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 77d4d715a789..722c840ac638 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c | |||
@@ -769,6 +769,7 @@ static int __init zram_init(void) | |||
769 | free_devices: | 769 | free_devices: |
770 | while (dev_id) | 770 | while (dev_id) |
771 | destroy_device(&devices[--dev_id]); | 771 | destroy_device(&devices[--dev_id]); |
772 | kfree(devices); | ||
772 | unregister: | 773 | unregister: |
773 | unregister_blkdev(zram_major, "zram"); | 774 | unregister_blkdev(zram_major, "zram"); |
774 | out: | 775 | out: |
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index 593fc5e2d2e6..5af23cc5ea9f 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c | |||
@@ -1127,6 +1127,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, | |||
1127 | { | 1127 | { |
1128 | struct cxacru_data *instance; | 1128 | struct cxacru_data *instance; |
1129 | struct usb_device *usb_dev = interface_to_usbdev(intf); | 1129 | struct usb_device *usb_dev = interface_to_usbdev(intf); |
1130 | struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD]; | ||
1130 | int ret; | 1131 | int ret; |
1131 | 1132 | ||
1132 | /* instance init */ | 1133 | /* instance init */ |
@@ -1171,15 +1172,34 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, | |||
1171 | goto fail; | 1172 | goto fail; |
1172 | } | 1173 | } |
1173 | 1174 | ||
1174 | usb_fill_int_urb(instance->rcv_urb, | 1175 | if (!cmd_ep) { |
1176 | dbg("cxacru_bind: no command endpoint"); | ||
1177 | ret = -ENODEV; | ||
1178 | goto fail; | ||
1179 | } | ||
1180 | |||
1181 | if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) | ||
1182 | == USB_ENDPOINT_XFER_INT) { | ||
1183 | usb_fill_int_urb(instance->rcv_urb, | ||
1175 | usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD), | 1184 | usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD), |
1176 | instance->rcv_buf, PAGE_SIZE, | 1185 | instance->rcv_buf, PAGE_SIZE, |
1177 | cxacru_blocking_completion, &instance->rcv_done, 1); | 1186 | cxacru_blocking_completion, &instance->rcv_done, 1); |
1178 | 1187 | ||
1179 | usb_fill_int_urb(instance->snd_urb, | 1188 | usb_fill_int_urb(instance->snd_urb, |
1180 | usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD), | 1189 | usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD), |
1181 | instance->snd_buf, PAGE_SIZE, | 1190 | instance->snd_buf, PAGE_SIZE, |
1182 | cxacru_blocking_completion, &instance->snd_done, 4); | 1191 | cxacru_blocking_completion, &instance->snd_done, 4); |
1192 | } else { | ||
1193 | usb_fill_bulk_urb(instance->rcv_urb, | ||
1194 | usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD), | ||
1195 | instance->rcv_buf, PAGE_SIZE, | ||
1196 | cxacru_blocking_completion, &instance->rcv_done); | ||
1197 | |||
1198 | usb_fill_bulk_urb(instance->snd_urb, | ||
1199 | usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD), | ||
1200 | instance->snd_buf, PAGE_SIZE, | ||
1201 | cxacru_blocking_completion, &instance->snd_done); | ||
1202 | } | ||
1183 | 1203 | ||
1184 | mutex_init(&instance->cm_serialize); | 1204 | mutex_init(&instance->cm_serialize); |
1185 | 1205 | ||
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 1833b3a71515..bc62fae0680f 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -965,7 +965,8 @@ static int acm_probe(struct usb_interface *intf, | |||
965 | } | 965 | } |
966 | 966 | ||
967 | if (!buflen) { | 967 | if (!buflen) { |
968 | if (intf->cur_altsetting->endpoint->extralen && | 968 | if (intf->cur_altsetting->endpoint && |
969 | intf->cur_altsetting->endpoint->extralen && | ||
969 | intf->cur_altsetting->endpoint->extra) { | 970 | intf->cur_altsetting->endpoint->extra) { |
970 | dev_dbg(&intf->dev, | 971 | dev_dbg(&intf->dev, |
971 | "Seeking extra descriptors on endpoint\n"); | 972 | "Seeking extra descriptors on endpoint\n"); |
@@ -1481,6 +1482,11 @@ static int acm_reset_resume(struct usb_interface *intf) | |||
1481 | USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \ | 1482 | USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \ |
1482 | USB_CDC_ACM_PROTO_VENDOR) | 1483 | USB_CDC_ACM_PROTO_VENDOR) |
1483 | 1484 | ||
1485 | #define SAMSUNG_PCSUITE_ACM_INFO(x) \ | ||
1486 | USB_DEVICE_AND_INTERFACE_INFO(0x04e7, x, \ | ||
1487 | USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \ | ||
1488 | USB_CDC_ACM_PROTO_VENDOR) | ||
1489 | |||
1484 | /* | 1490 | /* |
1485 | * USB driver structure. | 1491 | * USB driver structure. |
1486 | */ | 1492 | */ |
@@ -1591,6 +1597,17 @@ static const struct usb_device_id acm_ids[] = { | |||
1591 | { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */ | 1597 | { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */ |
1592 | { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */ | 1598 | { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */ |
1593 | { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */ | 1599 | { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */ |
1600 | { NOKIA_PCSUITE_ACM_INFO(0x0178), }, /* Nokia E63 */ | ||
1601 | { NOKIA_PCSUITE_ACM_INFO(0x010e), }, /* Nokia E75 */ | ||
1602 | { NOKIA_PCSUITE_ACM_INFO(0x02d9), }, /* Nokia 6760 Slide */ | ||
1603 | { NOKIA_PCSUITE_ACM_INFO(0x01d0), }, /* Nokia E52 */ | ||
1604 | { NOKIA_PCSUITE_ACM_INFO(0x0223), }, /* Nokia E72 */ | ||
1605 | { NOKIA_PCSUITE_ACM_INFO(0x0275), }, /* Nokia X6 */ | ||
1606 | { NOKIA_PCSUITE_ACM_INFO(0x026c), }, /* Nokia N97 Mini */ | ||
1607 | { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */ | ||
1608 | { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */ | ||
1609 | { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */ | ||
1610 | { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */ | ||
1594 | 1611 | ||
1595 | /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ | 1612 | /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ |
1596 | 1613 | ||
@@ -1599,6 +1616,10 @@ static const struct usb_device_id acm_ids[] = { | |||
1599 | .driver_info = NOT_A_MODEM, | 1616 | .driver_info = NOT_A_MODEM, |
1600 | }, | 1617 | }, |
1601 | 1618 | ||
1619 | /* control interfaces without any protocol set */ | ||
1620 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, | ||
1621 | USB_CDC_PROTO_NONE) }, | ||
1622 | |||
1602 | /* control interfaces with various AT-command sets */ | 1623 | /* control interfaces with various AT-command sets */ |
1603 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, | 1624 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, |
1604 | USB_CDC_ACM_PROTO_AT_V25TER) }, | 1625 | USB_CDC_ACM_PROTO_AT_V25TER) }, |
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig index 7e594449600e..9eed5b52d9de 100644 --- a/drivers/usb/core/Kconfig +++ b/drivers/usb/core/Kconfig | |||
@@ -91,12 +91,12 @@ config USB_DYNAMIC_MINORS | |||
91 | If you are unsure about this, say N here. | 91 | If you are unsure about this, say N here. |
92 | 92 | ||
93 | config USB_SUSPEND | 93 | config USB_SUSPEND |
94 | bool "USB runtime power management (suspend/resume and wakeup)" | 94 | bool "USB runtime power management (autosuspend) and wakeup" |
95 | depends on USB && PM_RUNTIME | 95 | depends on USB && PM_RUNTIME |
96 | help | 96 | help |
97 | If you say Y here, you can use driver calls or the sysfs | 97 | If you say Y here, you can use driver calls or the sysfs |
98 | "power/level" file to suspend or resume individual USB | 98 | "power/control" file to enable or disable autosuspend for |
99 | peripherals and to enable or disable autosuspend (see | 99 | individual USB peripherals (see |
100 | Documentation/usb/power-management.txt for more details). | 100 | Documentation/usb/power-management.txt for more details). |
101 | 101 | ||
102 | Also, USB "remote wakeup" signaling is supported, whereby some | 102 | Also, USB "remote wakeup" signaling is supported, whereby some |
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index f06f5dbc8cdc..1e6ccef2cf0c 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c | |||
@@ -159,9 +159,9 @@ void usb_major_cleanup(void) | |||
159 | int usb_register_dev(struct usb_interface *intf, | 159 | int usb_register_dev(struct usb_interface *intf, |
160 | struct usb_class_driver *class_driver) | 160 | struct usb_class_driver *class_driver) |
161 | { | 161 | { |
162 | int retval = -EINVAL; | 162 | int retval; |
163 | int minor_base = class_driver->minor_base; | 163 | int minor_base = class_driver->minor_base; |
164 | int minor = 0; | 164 | int minor; |
165 | char name[20]; | 165 | char name[20]; |
166 | char *temp; | 166 | char *temp; |
167 | 167 | ||
@@ -173,12 +173,17 @@ int usb_register_dev(struct usb_interface *intf, | |||
173 | */ | 173 | */ |
174 | minor_base = 0; | 174 | minor_base = 0; |
175 | #endif | 175 | #endif |
176 | intf->minor = -1; | ||
177 | |||
178 | dbg ("looking for a minor, starting at %d", minor_base); | ||
179 | 176 | ||
180 | if (class_driver->fops == NULL) | 177 | if (class_driver->fops == NULL) |
181 | goto exit; | 178 | return -EINVAL; |
179 | if (intf->minor >= 0) | ||
180 | return -EADDRINUSE; | ||
181 | |||
182 | retval = init_usb_class(); | ||
183 | if (retval) | ||
184 | return retval; | ||
185 | |||
186 | dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base); | ||
182 | 187 | ||
183 | down_write(&minor_rwsem); | 188 | down_write(&minor_rwsem); |
184 | for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) { | 189 | for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) { |
@@ -186,20 +191,12 @@ int usb_register_dev(struct usb_interface *intf, | |||
186 | continue; | 191 | continue; |
187 | 192 | ||
188 | usb_minors[minor] = class_driver->fops; | 193 | usb_minors[minor] = class_driver->fops; |
189 | 194 | intf->minor = minor; | |
190 | retval = 0; | ||
191 | break; | 195 | break; |
192 | } | 196 | } |
193 | up_write(&minor_rwsem); | 197 | up_write(&minor_rwsem); |
194 | 198 | if (intf->minor < 0) | |
195 | if (retval) | 199 | return -EXFULL; |
196 | goto exit; | ||
197 | |||
198 | retval = init_usb_class(); | ||
199 | if (retval) | ||
200 | goto exit; | ||
201 | |||
202 | intf->minor = minor; | ||
203 | 200 | ||
204 | /* create a usb class device for this usb interface */ | 201 | /* create a usb class device for this usb interface */ |
205 | snprintf(name, sizeof(name), class_driver->name, minor - minor_base); | 202 | snprintf(name, sizeof(name), class_driver->name, minor - minor_base); |
@@ -213,11 +210,11 @@ int usb_register_dev(struct usb_interface *intf, | |||
213 | "%s", temp); | 210 | "%s", temp); |
214 | if (IS_ERR(intf->usb_dev)) { | 211 | if (IS_ERR(intf->usb_dev)) { |
215 | down_write(&minor_rwsem); | 212 | down_write(&minor_rwsem); |
216 | usb_minors[intf->minor] = NULL; | 213 | usb_minors[minor] = NULL; |
214 | intf->minor = -1; | ||
217 | up_write(&minor_rwsem); | 215 | up_write(&minor_rwsem); |
218 | retval = PTR_ERR(intf->usb_dev); | 216 | retval = PTR_ERR(intf->usb_dev); |
219 | } | 217 | } |
220 | exit: | ||
221 | return retval; | 218 | return retval; |
222 | } | 219 | } |
223 | EXPORT_SYMBOL_GPL(usb_register_dev); | 220 | EXPORT_SYMBOL_GPL(usb_register_dev); |
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index fd4c36ea5e46..9f0ce7de0e36 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c | |||
@@ -1724,6 +1724,15 @@ free_interfaces: | |||
1724 | if (ret) | 1724 | if (ret) |
1725 | goto free_interfaces; | 1725 | goto free_interfaces; |
1726 | 1726 | ||
1727 | /* if it's already configured, clear out old state first. | ||
1728 | * getting rid of old interfaces means unbinding their drivers. | ||
1729 | */ | ||
1730 | if (dev->state != USB_STATE_ADDRESS) | ||
1731 | usb_disable_device(dev, 1); /* Skip ep0 */ | ||
1732 | |||
1733 | /* Get rid of pending async Set-Config requests for this device */ | ||
1734 | cancel_async_set_config(dev); | ||
1735 | |||
1727 | /* Make sure we have bandwidth (and available HCD resources) for this | 1736 | /* Make sure we have bandwidth (and available HCD resources) for this |
1728 | * configuration. Remove endpoints from the schedule if we're dropping | 1737 | * configuration. Remove endpoints from the schedule if we're dropping |
1729 | * this configuration to set configuration 0. After this point, the | 1738 | * this configuration to set configuration 0. After this point, the |
@@ -1733,20 +1742,11 @@ free_interfaces: | |||
1733 | mutex_lock(&hcd->bandwidth_mutex); | 1742 | mutex_lock(&hcd->bandwidth_mutex); |
1734 | ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); | 1743 | ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); |
1735 | if (ret < 0) { | 1744 | if (ret < 0) { |
1736 | usb_autosuspend_device(dev); | ||
1737 | mutex_unlock(&hcd->bandwidth_mutex); | 1745 | mutex_unlock(&hcd->bandwidth_mutex); |
1746 | usb_autosuspend_device(dev); | ||
1738 | goto free_interfaces; | 1747 | goto free_interfaces; |
1739 | } | 1748 | } |
1740 | 1749 | ||
1741 | /* if it's already configured, clear out old state first. | ||
1742 | * getting rid of old interfaces means unbinding their drivers. | ||
1743 | */ | ||
1744 | if (dev->state != USB_STATE_ADDRESS) | ||
1745 | usb_disable_device(dev, 1); /* Skip ep0 */ | ||
1746 | |||
1747 | /* Get rid of pending async Set-Config requests for this device */ | ||
1748 | cancel_async_set_config(dev); | ||
1749 | |||
1750 | ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), | 1750 | ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), |
1751 | USB_REQ_SET_CONFIGURATION, 0, configuration, 0, | 1751 | USB_REQ_SET_CONFIGURATION, 0, configuration, 0, |
1752 | NULL, 0, USB_CTRL_SET_TIMEOUT); | 1752 | NULL, 0, USB_CTRL_SET_TIMEOUT); |
@@ -1761,8 +1761,8 @@ free_interfaces: | |||
1761 | if (!cp) { | 1761 | if (!cp) { |
1762 | usb_set_device_state(dev, USB_STATE_ADDRESS); | 1762 | usb_set_device_state(dev, USB_STATE_ADDRESS); |
1763 | usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); | 1763 | usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); |
1764 | usb_autosuspend_device(dev); | ||
1765 | mutex_unlock(&hcd->bandwidth_mutex); | 1764 | mutex_unlock(&hcd->bandwidth_mutex); |
1765 | usb_autosuspend_device(dev); | ||
1766 | goto free_interfaces; | 1766 | goto free_interfaces; |
1767 | } | 1767 | } |
1768 | mutex_unlock(&hcd->bandwidth_mutex); | 1768 | mutex_unlock(&hcd->bandwidth_mutex); |
@@ -1802,6 +1802,7 @@ free_interfaces: | |||
1802 | intf->dev.groups = usb_interface_groups; | 1802 | intf->dev.groups = usb_interface_groups; |
1803 | intf->dev.dma_mask = dev->dev.dma_mask; | 1803 | intf->dev.dma_mask = dev->dev.dma_mask; |
1804 | INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); | 1804 | INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); |
1805 | intf->minor = -1; | ||
1805 | device_initialize(&intf->dev); | 1806 | device_initialize(&intf->dev); |
1806 | dev_set_name(&intf->dev, "%d-%s:%d.%d", | 1807 | dev_set_name(&intf->dev, "%d-%s:%d.%d", |
1807 | dev->bus->busnum, dev->devpath, | 1808 | dev->bus->busnum, dev->devpath, |
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c index 020fa5a25fda..972d5ddd1e18 100644 --- a/drivers/usb/gadget/rndis.c +++ b/drivers/usb/gadget/rndis.c | |||
@@ -293,9 +293,13 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len, | |||
293 | /* mandatory */ | 293 | /* mandatory */ |
294 | case OID_GEN_VENDOR_DESCRIPTION: | 294 | case OID_GEN_VENDOR_DESCRIPTION: |
295 | pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__); | 295 | pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__); |
296 | length = strlen (rndis_per_dev_params [configNr].vendorDescr); | 296 | if ( rndis_per_dev_params [configNr].vendorDescr ) { |
297 | memcpy (outbuf, | 297 | length = strlen (rndis_per_dev_params [configNr].vendorDescr); |
298 | rndis_per_dev_params [configNr].vendorDescr, length); | 298 | memcpy (outbuf, |
299 | rndis_per_dev_params [configNr].vendorDescr, length); | ||
300 | } else { | ||
301 | outbuf[0] = 0; | ||
302 | } | ||
299 | retval = 0; | 303 | retval = 0; |
300 | break; | 304 | break; |
301 | 305 | ||
@@ -1148,7 +1152,7 @@ static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS]; | |||
1148 | #endif /* CONFIG_USB_GADGET_DEBUG_FILES */ | 1152 | #endif /* CONFIG_USB_GADGET_DEBUG_FILES */ |
1149 | 1153 | ||
1150 | 1154 | ||
1151 | int __init rndis_init (void) | 1155 | int rndis_init(void) |
1152 | { | 1156 | { |
1153 | u8 i; | 1157 | u8 i; |
1154 | 1158 | ||
diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h index c236aaa9dcd1..907c33008118 100644 --- a/drivers/usb/gadget/rndis.h +++ b/drivers/usb/gadget/rndis.h | |||
@@ -262,7 +262,7 @@ int rndis_signal_disconnect (int configNr); | |||
262 | int rndis_state (int configNr); | 262 | int rndis_state (int configNr); |
263 | extern void rndis_set_host_mac (int configNr, const u8 *addr); | 263 | extern void rndis_set_host_mac (int configNr, const u8 *addr); |
264 | 264 | ||
265 | int __devinit rndis_init (void); | 265 | int rndis_init(void); |
266 | void rndis_exit (void); | 266 | void rndis_exit (void); |
267 | 267 | ||
268 | #endif /* _LINUX_RNDIS_H */ | 268 | #endif /* _LINUX_RNDIS_H */ |
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c index 521ebed0118d..a229744a8c7d 100644 --- a/drivers/usb/gadget/s3c-hsotg.c +++ b/drivers/usb/gadget/s3c-hsotg.c | |||
@@ -12,8 +12,6 @@ | |||
12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #define DEBUG | ||
16 | |||
17 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
18 | #include <linux/module.h> | 16 | #include <linux/module.h> |
19 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 58b72d741d93..a1e8d273103f 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c | |||
@@ -119,6 +119,11 @@ static int ehci_pci_setup(struct usb_hcd *hcd) | |||
119 | ehci->broken_periodic = 1; | 119 | ehci->broken_periodic = 1; |
120 | ehci_info(ehci, "using broken periodic workaround\n"); | 120 | ehci_info(ehci, "using broken periodic workaround\n"); |
121 | } | 121 | } |
122 | if (pdev->device == 0x0806 || pdev->device == 0x0811 | ||
123 | || pdev->device == 0x0829) { | ||
124 | ehci_info(ehci, "disable lpm for langwell/penwell\n"); | ||
125 | ehci->has_lpm = 0; | ||
126 | } | ||
122 | break; | 127 | break; |
123 | case PCI_VENDOR_ID_TDI: | 128 | case PCI_VENDOR_ID_TDI: |
124 | if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { | 129 | if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { |
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c index 335ee699fd85..ba52be473027 100644 --- a/drivers/usb/host/ehci-ppc-of.c +++ b/drivers/usb/host/ehci-ppc-of.c | |||
@@ -192,17 +192,19 @@ ehci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *mat | |||
192 | } | 192 | } |
193 | 193 | ||
194 | rv = usb_add_hcd(hcd, irq, 0); | 194 | rv = usb_add_hcd(hcd, irq, 0); |
195 | if (rv == 0) | 195 | if (rv) |
196 | return 0; | 196 | goto err_ehci; |
197 | |||
198 | return 0; | ||
197 | 199 | ||
200 | err_ehci: | ||
201 | if (ehci->has_amcc_usb23) | ||
202 | iounmap(ehci->ohci_hcctrl_reg); | ||
198 | iounmap(hcd->regs); | 203 | iounmap(hcd->regs); |
199 | err_ioremap: | 204 | err_ioremap: |
200 | irq_dispose_mapping(irq); | 205 | irq_dispose_mapping(irq); |
201 | err_irq: | 206 | err_irq: |
202 | release_mem_region(hcd->rsrc_start, hcd->rsrc_len); | 207 | release_mem_region(hcd->rsrc_start, hcd->rsrc_len); |
203 | |||
204 | if (ehci->has_amcc_usb23) | ||
205 | iounmap(ehci->ohci_hcctrl_reg); | ||
206 | err_rmr: | 208 | err_rmr: |
207 | usb_put_hcd(hcd); | 209 | usb_put_hcd(hcd); |
208 | 210 | ||
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c index 59dc3d351b60..5ab5bb89bae3 100644 --- a/drivers/usb/musb/cppi_dma.c +++ b/drivers/usb/musb/cppi_dma.c | |||
@@ -322,6 +322,7 @@ cppi_channel_allocate(struct dma_controller *c, | |||
322 | index, transmit ? 'T' : 'R', cppi_ch); | 322 | index, transmit ? 'T' : 'R', cppi_ch); |
323 | cppi_ch->hw_ep = ep; | 323 | cppi_ch->hw_ep = ep; |
324 | cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; | 324 | cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; |
325 | cppi_ch->channel.max_len = 0x7fffffff; | ||
325 | 326 | ||
326 | DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); | 327 | DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); |
327 | return &cppi_ch->channel; | 328 | return &cppi_ch->channel; |
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c index c79a5e30d437..9e8639d4e862 100644 --- a/drivers/usb/musb/musb_debugfs.c +++ b/drivers/usb/musb/musb_debugfs.c | |||
@@ -195,15 +195,14 @@ static const struct file_operations musb_regdump_fops = { | |||
195 | 195 | ||
196 | static int musb_test_mode_open(struct inode *inode, struct file *file) | 196 | static int musb_test_mode_open(struct inode *inode, struct file *file) |
197 | { | 197 | { |
198 | file->private_data = inode->i_private; | ||
199 | |||
200 | return single_open(file, musb_test_mode_show, inode->i_private); | 198 | return single_open(file, musb_test_mode_show, inode->i_private); |
201 | } | 199 | } |
202 | 200 | ||
203 | static ssize_t musb_test_mode_write(struct file *file, | 201 | static ssize_t musb_test_mode_write(struct file *file, |
204 | const char __user *ubuf, size_t count, loff_t *ppos) | 202 | const char __user *ubuf, size_t count, loff_t *ppos) |
205 | { | 203 | { |
206 | struct musb *musb = file->private_data; | 204 | struct seq_file *s = file->private_data; |
205 | struct musb *musb = s->private; | ||
207 | u8 test = 0; | 206 | u8 test = 0; |
208 | char buf[18]; | 207 | char buf[18]; |
209 | 208 | ||
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 6fca870e957e..d065e23f123e 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -300,6 +300,11 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
300 | #ifndef CONFIG_MUSB_PIO_ONLY | 300 | #ifndef CONFIG_MUSB_PIO_ONLY |
301 | if (is_dma_capable() && musb_ep->dma) { | 301 | if (is_dma_capable() && musb_ep->dma) { |
302 | struct dma_controller *c = musb->dma_controller; | 302 | struct dma_controller *c = musb->dma_controller; |
303 | size_t request_size; | ||
304 | |||
305 | /* setup DMA, then program endpoint CSR */ | ||
306 | request_size = min_t(size_t, request->length - request->actual, | ||
307 | musb_ep->dma->max_len); | ||
303 | 308 | ||
304 | use_dma = (request->dma != DMA_ADDR_INVALID); | 309 | use_dma = (request->dma != DMA_ADDR_INVALID); |
305 | 310 | ||
@@ -307,11 +312,6 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
307 | 312 | ||
308 | #ifdef CONFIG_USB_INVENTRA_DMA | 313 | #ifdef CONFIG_USB_INVENTRA_DMA |
309 | { | 314 | { |
310 | size_t request_size; | ||
311 | |||
312 | /* setup DMA, then program endpoint CSR */ | ||
313 | request_size = min_t(size_t, request->length, | ||
314 | musb_ep->dma->max_len); | ||
315 | if (request_size < musb_ep->packet_sz) | 315 | if (request_size < musb_ep->packet_sz) |
316 | musb_ep->dma->desired_mode = 0; | 316 | musb_ep->dma->desired_mode = 0; |
317 | else | 317 | else |
@@ -373,8 +373,8 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
373 | use_dma = use_dma && c->channel_program( | 373 | use_dma = use_dma && c->channel_program( |
374 | musb_ep->dma, musb_ep->packet_sz, | 374 | musb_ep->dma, musb_ep->packet_sz, |
375 | 0, | 375 | 0, |
376 | request->dma, | 376 | request->dma + request->actual, |
377 | request->length); | 377 | request_size); |
378 | if (!use_dma) { | 378 | if (!use_dma) { |
379 | c->channel_release(musb_ep->dma); | 379 | c->channel_release(musb_ep->dma); |
380 | musb_ep->dma = NULL; | 380 | musb_ep->dma = NULL; |
@@ -386,8 +386,8 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
386 | use_dma = use_dma && c->channel_program( | 386 | use_dma = use_dma && c->channel_program( |
387 | musb_ep->dma, musb_ep->packet_sz, | 387 | musb_ep->dma, musb_ep->packet_sz, |
388 | request->zero, | 388 | request->zero, |
389 | request->dma, | 389 | request->dma + request->actual, |
390 | request->length); | 390 | request_size); |
391 | #endif | 391 | #endif |
392 | } | 392 | } |
393 | #endif | 393 | #endif |
@@ -501,26 +501,14 @@ void musb_g_tx(struct musb *musb, u8 epnum) | |||
501 | request->zero = 0; | 501 | request->zero = 0; |
502 | } | 502 | } |
503 | 503 | ||
504 | /* ... or if not, then complete it. */ | 504 | if (request->actual == request->length) { |
505 | musb_g_giveback(musb_ep, request, 0); | 505 | musb_g_giveback(musb_ep, request, 0); |
506 | 506 | request = musb_ep->desc ? next_request(musb_ep) : NULL; | |
507 | /* | 507 | if (!request) { |
508 | * Kickstart next transfer if appropriate; | 508 | DBG(4, "%s idle now\n", |
509 | * the packet that just completed might not | 509 | musb_ep->end_point.name); |
510 | * be transmitted for hours or days. | 510 | return; |
511 | * REVISIT for double buffering... | 511 | } |
512 | * FIXME revisit for stalls too... | ||
513 | */ | ||
514 | musb_ep_select(mbase, epnum); | ||
515 | csr = musb_readw(epio, MUSB_TXCSR); | ||
516 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) | ||
517 | return; | ||
518 | |||
519 | request = musb_ep->desc ? next_request(musb_ep) : NULL; | ||
520 | if (!request) { | ||
521 | DBG(4, "%s idle now\n", | ||
522 | musb_ep->end_point.name); | ||
523 | return; | ||
524 | } | 512 | } |
525 | } | 513 | } |
526 | 514 | ||
@@ -568,11 +556,19 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
568 | { | 556 | { |
569 | const u8 epnum = req->epnum; | 557 | const u8 epnum = req->epnum; |
570 | struct usb_request *request = &req->request; | 558 | struct usb_request *request = &req->request; |
571 | struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; | 559 | struct musb_ep *musb_ep; |
572 | void __iomem *epio = musb->endpoints[epnum].regs; | 560 | void __iomem *epio = musb->endpoints[epnum].regs; |
573 | unsigned fifo_count = 0; | 561 | unsigned fifo_count = 0; |
574 | u16 len = musb_ep->packet_sz; | 562 | u16 len; |
575 | u16 csr = musb_readw(epio, MUSB_RXCSR); | 563 | u16 csr = musb_readw(epio, MUSB_RXCSR); |
564 | struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; | ||
565 | |||
566 | if (hw_ep->is_shared_fifo) | ||
567 | musb_ep = &hw_ep->ep_in; | ||
568 | else | ||
569 | musb_ep = &hw_ep->ep_out; | ||
570 | |||
571 | len = musb_ep->packet_sz; | ||
576 | 572 | ||
577 | /* We shouldn't get here while DMA is active, but we do... */ | 573 | /* We shouldn't get here while DMA is active, but we do... */ |
578 | if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { | 574 | if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { |
@@ -647,8 +643,8 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
647 | */ | 643 | */ |
648 | 644 | ||
649 | csr |= MUSB_RXCSR_DMAENAB; | 645 | csr |= MUSB_RXCSR_DMAENAB; |
650 | #ifdef USE_MODE1 | ||
651 | csr |= MUSB_RXCSR_AUTOCLEAR; | 646 | csr |= MUSB_RXCSR_AUTOCLEAR; |
647 | #ifdef USE_MODE1 | ||
652 | /* csr |= MUSB_RXCSR_DMAMODE; */ | 648 | /* csr |= MUSB_RXCSR_DMAMODE; */ |
653 | 649 | ||
654 | /* this special sequence (enabling and then | 650 | /* this special sequence (enabling and then |
@@ -663,10 +659,11 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
663 | if (request->actual < request->length) { | 659 | if (request->actual < request->length) { |
664 | int transfer_size = 0; | 660 | int transfer_size = 0; |
665 | #ifdef USE_MODE1 | 661 | #ifdef USE_MODE1 |
666 | transfer_size = min(request->length, | 662 | transfer_size = min(request->length - request->actual, |
667 | channel->max_len); | 663 | channel->max_len); |
668 | #else | 664 | #else |
669 | transfer_size = len; | 665 | transfer_size = min(request->length - request->actual, |
666 | (unsigned)len); | ||
670 | #endif | 667 | #endif |
671 | if (transfer_size <= musb_ep->packet_sz) | 668 | if (transfer_size <= musb_ep->packet_sz) |
672 | musb_ep->dma->desired_mode = 0; | 669 | musb_ep->dma->desired_mode = 0; |
@@ -740,9 +737,15 @@ void musb_g_rx(struct musb *musb, u8 epnum) | |||
740 | u16 csr; | 737 | u16 csr; |
741 | struct usb_request *request; | 738 | struct usb_request *request; |
742 | void __iomem *mbase = musb->mregs; | 739 | void __iomem *mbase = musb->mregs; |
743 | struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; | 740 | struct musb_ep *musb_ep; |
744 | void __iomem *epio = musb->endpoints[epnum].regs; | 741 | void __iomem *epio = musb->endpoints[epnum].regs; |
745 | struct dma_channel *dma; | 742 | struct dma_channel *dma; |
743 | struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; | ||
744 | |||
745 | if (hw_ep->is_shared_fifo) | ||
746 | musb_ep = &hw_ep->ep_in; | ||
747 | else | ||
748 | musb_ep = &hw_ep->ep_out; | ||
746 | 749 | ||
747 | musb_ep_select(mbase, epnum); | 750 | musb_ep_select(mbase, epnum); |
748 | 751 | ||
@@ -1081,7 +1084,7 @@ struct free_record { | |||
1081 | /* | 1084 | /* |
1082 | * Context: controller locked, IRQs blocked. | 1085 | * Context: controller locked, IRQs blocked. |
1083 | */ | 1086 | */ |
1084 | static void musb_ep_restart(struct musb *musb, struct musb_request *req) | 1087 | void musb_ep_restart(struct musb *musb, struct musb_request *req) |
1085 | { | 1088 | { |
1086 | DBG(3, "<== %s request %p len %u on hw_ep%d\n", | 1089 | DBG(3, "<== %s request %p len %u on hw_ep%d\n", |
1087 | req->tx ? "TX/IN" : "RX/OUT", | 1090 | req->tx ? "TX/IN" : "RX/OUT", |
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h index c8b140325d82..572b1da7f2dc 100644 --- a/drivers/usb/musb/musb_gadget.h +++ b/drivers/usb/musb/musb_gadget.h | |||
@@ -105,4 +105,6 @@ extern void musb_gadget_cleanup(struct musb *); | |||
105 | 105 | ||
106 | extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int); | 106 | extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int); |
107 | 107 | ||
108 | extern void musb_ep_restart(struct musb *, struct musb_request *); | ||
109 | |||
108 | #endif /* __MUSB_GADGET_H */ | 110 | #endif /* __MUSB_GADGET_H */ |
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c index 59bef8f3a358..6dd03f4c5f49 100644 --- a/drivers/usb/musb/musb_gadget_ep0.c +++ b/drivers/usb/musb/musb_gadget_ep0.c | |||
@@ -261,6 +261,7 @@ __acquires(musb->lock) | |||
261 | ctrlrequest->wIndex & 0x0f; | 261 | ctrlrequest->wIndex & 0x0f; |
262 | struct musb_ep *musb_ep; | 262 | struct musb_ep *musb_ep; |
263 | struct musb_hw_ep *ep; | 263 | struct musb_hw_ep *ep; |
264 | struct musb_request *request; | ||
264 | void __iomem *regs; | 265 | void __iomem *regs; |
265 | int is_in; | 266 | int is_in; |
266 | u16 csr; | 267 | u16 csr; |
@@ -302,6 +303,14 @@ __acquires(musb->lock) | |||
302 | musb_writew(regs, MUSB_RXCSR, csr); | 303 | musb_writew(regs, MUSB_RXCSR, csr); |
303 | } | 304 | } |
304 | 305 | ||
306 | /* Maybe start the first request in the queue */ | ||
307 | request = to_musb_request( | ||
308 | next_request(musb_ep)); | ||
309 | if (!musb_ep->busy && request) { | ||
310 | DBG(3, "restarting the request\n"); | ||
311 | musb_ep_restart(musb, request); | ||
312 | } | ||
313 | |||
305 | /* select ep0 again */ | 314 | /* select ep0 again */ |
306 | musb_ep_select(mbase, 0); | 315 | musb_ep_select(mbase, 0); |
307 | } break; | 316 | } break; |
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 877d20b1dff9..9e65c47cc98b 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
@@ -660,6 +660,12 @@ static bool musb_tx_dma_program(struct dma_controller *dma, | |||
660 | 660 | ||
661 | qh->segsize = length; | 661 | qh->segsize = length; |
662 | 662 | ||
663 | /* | ||
664 | * Ensure the data reaches to main memory before starting | ||
665 | * DMA transfer | ||
666 | */ | ||
667 | wmb(); | ||
668 | |||
663 | if (!dma->channel_program(channel, pkt_size, mode, | 669 | if (!dma->channel_program(channel, pkt_size, mode, |
664 | urb->transfer_dma + offset, length)) { | 670 | urb->transfer_dma + offset, length)) { |
665 | dma->channel_release(channel); | 671 | dma->channel_release(channel); |
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c index 05aaac1c3861..0bc97698af15 100644 --- a/drivers/usb/otg/twl4030-usb.c +++ b/drivers/usb/otg/twl4030-usb.c | |||
@@ -347,11 +347,20 @@ static void twl4030_i2c_access(struct twl4030_usb *twl, int on) | |||
347 | } | 347 | } |
348 | } | 348 | } |
349 | 349 | ||
350 | static void twl4030_phy_power(struct twl4030_usb *twl, int on) | 350 | static void __twl4030_phy_power(struct twl4030_usb *twl, int on) |
351 | { | 351 | { |
352 | u8 pwr; | 352 | u8 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL); |
353 | |||
354 | if (on) | ||
355 | pwr &= ~PHY_PWR_PHYPWD; | ||
356 | else | ||
357 | pwr |= PHY_PWR_PHYPWD; | ||
353 | 358 | ||
354 | pwr = twl4030_usb_read(twl, PHY_PWR_CTRL); | 359 | WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0); |
360 | } | ||
361 | |||
362 | static void twl4030_phy_power(struct twl4030_usb *twl, int on) | ||
363 | { | ||
355 | if (on) { | 364 | if (on) { |
356 | regulator_enable(twl->usb3v1); | 365 | regulator_enable(twl->usb3v1); |
357 | regulator_enable(twl->usb1v8); | 366 | regulator_enable(twl->usb1v8); |
@@ -365,15 +374,13 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on) | |||
365 | twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, | 374 | twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, |
366 | VUSB_DEDICATED2); | 375 | VUSB_DEDICATED2); |
367 | regulator_enable(twl->usb1v5); | 376 | regulator_enable(twl->usb1v5); |
368 | pwr &= ~PHY_PWR_PHYPWD; | 377 | __twl4030_phy_power(twl, 1); |
369 | WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0); | ||
370 | twl4030_usb_write(twl, PHY_CLK_CTRL, | 378 | twl4030_usb_write(twl, PHY_CLK_CTRL, |
371 | twl4030_usb_read(twl, PHY_CLK_CTRL) | | 379 | twl4030_usb_read(twl, PHY_CLK_CTRL) | |
372 | (PHY_CLK_CTRL_CLOCKGATING_EN | | 380 | (PHY_CLK_CTRL_CLOCKGATING_EN | |
373 | PHY_CLK_CTRL_CLK32K_EN)); | 381 | PHY_CLK_CTRL_CLK32K_EN)); |
374 | } else { | 382 | } else { |
375 | pwr |= PHY_PWR_PHYPWD; | 383 | __twl4030_phy_power(twl, 0); |
376 | WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0); | ||
377 | regulator_disable(twl->usb1v5); | 384 | regulator_disable(twl->usb1v5); |
378 | regulator_disable(twl->usb1v8); | 385 | regulator_disable(twl->usb1v8); |
379 | regulator_disable(twl->usb3v1); | 386 | regulator_disable(twl->usb3v1); |
@@ -387,19 +394,25 @@ static void twl4030_phy_suspend(struct twl4030_usb *twl, int controller_off) | |||
387 | 394 | ||
388 | twl4030_phy_power(twl, 0); | 395 | twl4030_phy_power(twl, 0); |
389 | twl->asleep = 1; | 396 | twl->asleep = 1; |
397 | dev_dbg(twl->dev, "%s\n", __func__); | ||
390 | } | 398 | } |
391 | 399 | ||
392 | static void twl4030_phy_resume(struct twl4030_usb *twl) | 400 | static void __twl4030_phy_resume(struct twl4030_usb *twl) |
393 | { | 401 | { |
394 | if (!twl->asleep) | ||
395 | return; | ||
396 | |||
397 | twl4030_phy_power(twl, 1); | 402 | twl4030_phy_power(twl, 1); |
398 | twl4030_i2c_access(twl, 1); | 403 | twl4030_i2c_access(twl, 1); |
399 | twl4030_usb_set_mode(twl, twl->usb_mode); | 404 | twl4030_usb_set_mode(twl, twl->usb_mode); |
400 | if (twl->usb_mode == T2_USB_MODE_ULPI) | 405 | if (twl->usb_mode == T2_USB_MODE_ULPI) |
401 | twl4030_i2c_access(twl, 0); | 406 | twl4030_i2c_access(twl, 0); |
407 | } | ||
408 | |||
409 | static void twl4030_phy_resume(struct twl4030_usb *twl) | ||
410 | { | ||
411 | if (!twl->asleep) | ||
412 | return; | ||
413 | __twl4030_phy_resume(twl); | ||
402 | twl->asleep = 0; | 414 | twl->asleep = 0; |
415 | dev_dbg(twl->dev, "%s\n", __func__); | ||
403 | } | 416 | } |
404 | 417 | ||
405 | static int twl4030_usb_ldo_init(struct twl4030_usb *twl) | 418 | static int twl4030_usb_ldo_init(struct twl4030_usb *twl) |
@@ -408,8 +421,8 @@ static int twl4030_usb_ldo_init(struct twl4030_usb *twl) | |||
408 | twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY); | 421 | twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY); |
409 | twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY); | 422 | twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY); |
410 | 423 | ||
411 | /* put VUSB3V1 LDO in active state */ | 424 | /* Keep VUSB3V1 LDO in sleep state until VBUS/ID change detected*/ |
412 | twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2); | 425 | /*twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);*/ |
413 | 426 | ||
414 | /* input to VUSB3V1 LDO is from VBAT, not VBUS */ | 427 | /* input to VUSB3V1 LDO is from VBAT, not VBUS */ |
415 | twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1); | 428 | twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1); |
@@ -502,6 +515,26 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl) | |||
502 | return IRQ_HANDLED; | 515 | return IRQ_HANDLED; |
503 | } | 516 | } |
504 | 517 | ||
518 | static void twl4030_usb_phy_init(struct twl4030_usb *twl) | ||
519 | { | ||
520 | int status; | ||
521 | |||
522 | status = twl4030_usb_linkstat(twl); | ||
523 | if (status >= 0) { | ||
524 | if (status == USB_EVENT_NONE) { | ||
525 | __twl4030_phy_power(twl, 0); | ||
526 | twl->asleep = 1; | ||
527 | } else { | ||
528 | __twl4030_phy_resume(twl); | ||
529 | twl->asleep = 0; | ||
530 | } | ||
531 | |||
532 | blocking_notifier_call_chain(&twl->otg.notifier, status, | ||
533 | twl->otg.gadget); | ||
534 | } | ||
535 | sysfs_notify(&twl->dev->kobj, NULL, "vbus"); | ||
536 | } | ||
537 | |||
505 | static int twl4030_set_suspend(struct otg_transceiver *x, int suspend) | 538 | static int twl4030_set_suspend(struct otg_transceiver *x, int suspend) |
506 | { | 539 | { |
507 | struct twl4030_usb *twl = xceiv_to_twl(x); | 540 | struct twl4030_usb *twl = xceiv_to_twl(x); |
@@ -550,7 +583,6 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev) | |||
550 | struct twl4030_usb_data *pdata = pdev->dev.platform_data; | 583 | struct twl4030_usb_data *pdata = pdev->dev.platform_data; |
551 | struct twl4030_usb *twl; | 584 | struct twl4030_usb *twl; |
552 | int status, err; | 585 | int status, err; |
553 | u8 pwr; | ||
554 | 586 | ||
555 | if (!pdata) { | 587 | if (!pdata) { |
556 | dev_dbg(&pdev->dev, "platform_data not available\n"); | 588 | dev_dbg(&pdev->dev, "platform_data not available\n"); |
@@ -569,10 +601,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev) | |||
569 | twl->otg.set_peripheral = twl4030_set_peripheral; | 601 | twl->otg.set_peripheral = twl4030_set_peripheral; |
570 | twl->otg.set_suspend = twl4030_set_suspend; | 602 | twl->otg.set_suspend = twl4030_set_suspend; |
571 | twl->usb_mode = pdata->usb_mode; | 603 | twl->usb_mode = pdata->usb_mode; |
572 | 604 | twl->asleep = 1; | |
573 | pwr = twl4030_usb_read(twl, PHY_PWR_CTRL); | ||
574 | |||
575 | twl->asleep = (pwr & PHY_PWR_PHYPWD); | ||
576 | 605 | ||
577 | /* init spinlock for workqueue */ | 606 | /* init spinlock for workqueue */ |
578 | spin_lock_init(&twl->lock); | 607 | spin_lock_init(&twl->lock); |
@@ -610,15 +639,10 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev) | |||
610 | return status; | 639 | return status; |
611 | } | 640 | } |
612 | 641 | ||
613 | /* The IRQ handler just handles changes from the previous states | 642 | /* Power down phy or make it work according to |
614 | * of the ID and VBUS pins ... in probe() we must initialize that | 643 | * current link state. |
615 | * previous state. The easy way: fake an IRQ. | ||
616 | * | ||
617 | * REVISIT: a real IRQ might have happened already, if PREEMPT is | ||
618 | * enabled. Else the IRQ may not yet be configured or enabled, | ||
619 | * because of scheduling delays. | ||
620 | */ | 644 | */ |
621 | twl4030_usb_irq(twl->irq, twl); | 645 | twl4030_usb_phy_init(twl); |
622 | 646 | ||
623 | dev_info(&pdev->dev, "Initialized TWL4030 USB module\n"); | 647 | dev_info(&pdev->dev, "Initialized TWL4030 USB module\n"); |
624 | return 0; | 648 | return 0; |
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 80bf8333bb03..4f1744c5871f 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
@@ -56,6 +56,7 @@ static int debug; | |||
56 | static const struct usb_device_id id_table[] = { | 56 | static const struct usb_device_id id_table[] = { |
57 | { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ | 57 | { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ |
58 | { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ | 58 | { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ |
59 | { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ | ||
59 | { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ | 60 | { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ |
60 | { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ | 61 | { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ |
61 | { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ | 62 | { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ |
@@ -88,6 +89,7 @@ static const struct usb_device_id id_table[] = { | |||
88 | { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */ | 89 | { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */ |
89 | { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ | 90 | { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ |
90 | { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ | 91 | { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ |
92 | { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */ | ||
91 | { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ | 93 | { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ |
92 | { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ | 94 | { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ |
93 | { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ | 95 | { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ |
@@ -109,6 +111,7 @@ static const struct usb_device_id id_table[] = { | |||
109 | { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ | 111 | { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ |
110 | { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ | 112 | { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ |
111 | { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ | 113 | { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ |
114 | { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ | ||
112 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ | 115 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ |
113 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ | 116 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ |
114 | { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ | 117 | { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ |
@@ -122,14 +125,14 @@ static const struct usb_device_id id_table[] = { | |||
122 | { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ | 125 | { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ |
123 | { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ | 126 | { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ |
124 | { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ | 127 | { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ |
125 | { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ | ||
126 | { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ | ||
127 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ | ||
128 | { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ | ||
129 | { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ | 128 | { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ |
130 | { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ | 129 | { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ |
131 | { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */ | 130 | { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */ |
132 | { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ | 131 | { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ |
132 | { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ | ||
133 | { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ | ||
134 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ | ||
135 | { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ | ||
133 | { } /* Terminating Entry */ | 136 | { } /* Terminating Entry */ |
134 | }; | 137 | }; |
135 | 138 | ||
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index c792c96f590e..97cc87d654ce 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -753,6 +753,14 @@ static struct usb_device_id id_table_combined [] = { | |||
753 | { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) }, | 753 | { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) }, |
754 | { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID), | 754 | { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID), |
755 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 755 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
756 | { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) }, | ||
757 | { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_PC_WING_PID) }, | ||
758 | { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_USB_DMX_PID) }, | ||
759 | { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MIDI_TIMECODE_PID) }, | ||
760 | { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MINI_WING_PID) }, | ||
761 | { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) }, | ||
762 | { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) }, | ||
763 | { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) }, | ||
756 | { }, /* Optional parameter entry */ | 764 | { }, /* Optional parameter entry */ |
757 | { } /* Terminating entry */ | 765 | { } /* Terminating entry */ |
758 | }; | 766 | }; |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 2e95857c9633..15a4583775ad 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -135,6 +135,18 @@ | |||
135 | #define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */ | 135 | #define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */ |
136 | 136 | ||
137 | /* | 137 | /* |
138 | * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs | ||
139 | */ | ||
140 | #define FTDI_CHAMSYS_24_MASTER_WING_PID 0xDAF8 | ||
141 | #define FTDI_CHAMSYS_PC_WING_PID 0xDAF9 | ||
142 | #define FTDI_CHAMSYS_USB_DMX_PID 0xDAFA | ||
143 | #define FTDI_CHAMSYS_MIDI_TIMECODE_PID 0xDAFB | ||
144 | #define FTDI_CHAMSYS_MINI_WING_PID 0xDAFC | ||
145 | #define FTDI_CHAMSYS_MAXI_WING_PID 0xDAFD | ||
146 | #define FTDI_CHAMSYS_MEDIA_WING_PID 0xDAFE | ||
147 | #define FTDI_CHAMSYS_WING_PID 0xDAFF | ||
148 | |||
149 | /* | ||
138 | * Westrex International devices submitted by Cory Lee | 150 | * Westrex International devices submitted by Cory Lee |
139 | */ | 151 | */ |
140 | #define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */ | 152 | #define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */ |
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 30922a7e3347..aa665817a272 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c | |||
@@ -2024,6 +2024,9 @@ static int mos7720_ioctl(struct tty_struct *tty, struct file *file, | |||
2024 | 2024 | ||
2025 | case TIOCGICOUNT: | 2025 | case TIOCGICOUNT: |
2026 | cnow = mos7720_port->icount; | 2026 | cnow = mos7720_port->icount; |
2027 | |||
2028 | memset(&icount, 0, sizeof(struct serial_icounter_struct)); | ||
2029 | |||
2027 | icount.cts = cnow.cts; | 2030 | icount.cts = cnow.cts; |
2028 | icount.dsr = cnow.dsr; | 2031 | icount.dsr = cnow.dsr; |
2029 | icount.rng = cnow.rng; | 2032 | icount.rng = cnow.rng; |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 585b7e663740..1a42bc213799 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -119,16 +119,20 @@ | |||
119 | * by making a change here, in moschip_port_id_table, and in | 119 | * by making a change here, in moschip_port_id_table, and in |
120 | * moschip_id_table_combined | 120 | * moschip_id_table_combined |
121 | */ | 121 | */ |
122 | #define USB_VENDOR_ID_BANDB 0x0856 | 122 | #define USB_VENDOR_ID_BANDB 0x0856 |
123 | #define BANDB_DEVICE_ID_USO9ML2_2 0xAC22 | 123 | #define BANDB_DEVICE_ID_USO9ML2_2 0xAC22 |
124 | #define BANDB_DEVICE_ID_USO9ML2_4 0xAC24 | 124 | #define BANDB_DEVICE_ID_USO9ML2_2P 0xBC00 |
125 | #define BANDB_DEVICE_ID_US9ML2_2 0xAC29 | 125 | #define BANDB_DEVICE_ID_USO9ML2_4 0xAC24 |
126 | #define BANDB_DEVICE_ID_US9ML2_4 0xAC30 | 126 | #define BANDB_DEVICE_ID_USO9ML2_4P 0xBC01 |
127 | #define BANDB_DEVICE_ID_USPTL4_2 0xAC31 | 127 | #define BANDB_DEVICE_ID_US9ML2_2 0xAC29 |
128 | #define BANDB_DEVICE_ID_USPTL4_4 0xAC32 | 128 | #define BANDB_DEVICE_ID_US9ML2_4 0xAC30 |
129 | #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 | 129 | #define BANDB_DEVICE_ID_USPTL4_2 0xAC31 |
130 | #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 | 130 | #define BANDB_DEVICE_ID_USPTL4_4 0xAC32 |
131 | #define BANDB_DEVICE_ID_USOPTL2_4 0xAC24 | 131 | #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 |
132 | #define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02 | ||
133 | #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 | ||
134 | #define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03 | ||
135 | #define BANDB_DEVICE_ID_USOPTL2_4 0xAC24 | ||
132 | 136 | ||
133 | /* This driver also supports | 137 | /* This driver also supports |
134 | * ATEN UC2324 device using Moschip MCS7840 | 138 | * ATEN UC2324 device using Moschip MCS7840 |
@@ -184,13 +188,17 @@ static const struct usb_device_id moschip_port_id_table[] = { | |||
184 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, | 188 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, |
185 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, | 189 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, |
186 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, | 190 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, |
191 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)}, | ||
187 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, | 192 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, |
193 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)}, | ||
188 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, | 194 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, |
189 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, | 195 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, |
190 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, | 196 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, |
191 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, | 197 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, |
192 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | 198 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, |
199 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)}, | ||
193 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | 200 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, |
201 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)}, | ||
194 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, | 202 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, |
195 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, | 203 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, |
196 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, | 204 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, |
@@ -201,13 +209,17 @@ static const struct usb_device_id moschip_id_table_combined[] __devinitconst = { | |||
201 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, | 209 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, |
202 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, | 210 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, |
203 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, | 211 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, |
212 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)}, | ||
204 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, | 213 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, |
214 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)}, | ||
205 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, | 215 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, |
206 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, | 216 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, |
207 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, | 217 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, |
208 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, | 218 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, |
209 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | 219 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, |
220 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)}, | ||
210 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | 221 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, |
222 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)}, | ||
211 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, | 223 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, |
212 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, | 224 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, |
213 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, | 225 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, |
@@ -2273,6 +2285,9 @@ static int mos7840_ioctl(struct tty_struct *tty, struct file *file, | |||
2273 | case TIOCGICOUNT: | 2285 | case TIOCGICOUNT: |
2274 | cnow = mos7840_port->icount; | 2286 | cnow = mos7840_port->icount; |
2275 | smp_rmb(); | 2287 | smp_rmb(); |
2288 | |||
2289 | memset(&icount, 0, sizeof(struct serial_icounter_struct)); | ||
2290 | |||
2276 | icount.cts = cnow.cts; | 2291 | icount.cts = cnow.cts; |
2277 | icount.dsr = cnow.dsr; | 2292 | icount.dsr = cnow.dsr; |
2278 | icount.rng = cnow.rng; | 2293 | icount.rng = cnow.rng; |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index adcbdb994de3..c46911af282f 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -164,6 +164,14 @@ static void option_instat_callback(struct urb *urb); | |||
164 | #define YISO_VENDOR_ID 0x0EAB | 164 | #define YISO_VENDOR_ID 0x0EAB |
165 | #define YISO_PRODUCT_U893 0xC893 | 165 | #define YISO_PRODUCT_U893 0xC893 |
166 | 166 | ||
167 | /* | ||
168 | * NOVATEL WIRELESS PRODUCTS | ||
169 | * | ||
170 | * Note from Novatel Wireless: | ||
171 | * If your Novatel modem does not work on linux, don't | ||
172 | * change the option module, but check our website. If | ||
173 | * that does not help, contact ddeschepper@nvtl.com | ||
174 | */ | ||
167 | /* MERLIN EVDO PRODUCTS */ | 175 | /* MERLIN EVDO PRODUCTS */ |
168 | #define NOVATELWIRELESS_PRODUCT_V640 0x1100 | 176 | #define NOVATELWIRELESS_PRODUCT_V640 0x1100 |
169 | #define NOVATELWIRELESS_PRODUCT_V620 0x1110 | 177 | #define NOVATELWIRELESS_PRODUCT_V620 0x1110 |
@@ -185,24 +193,39 @@ static void option_instat_callback(struct urb *urb); | |||
185 | #define NOVATELWIRELESS_PRODUCT_EU730 0x2400 | 193 | #define NOVATELWIRELESS_PRODUCT_EU730 0x2400 |
186 | #define NOVATELWIRELESS_PRODUCT_EU740 0x2410 | 194 | #define NOVATELWIRELESS_PRODUCT_EU740 0x2410 |
187 | #define NOVATELWIRELESS_PRODUCT_EU870D 0x2420 | 195 | #define NOVATELWIRELESS_PRODUCT_EU870D 0x2420 |
188 | |||
189 | /* OVATION PRODUCTS */ | 196 | /* OVATION PRODUCTS */ |
190 | #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 | 197 | #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 |
191 | #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 | 198 | #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 |
192 | #define NOVATELWIRELESS_PRODUCT_U727 0x5010 | 199 | /* |
193 | #define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100 | 200 | * Note from Novatel Wireless: |
194 | #define NOVATELWIRELESS_PRODUCT_MC760 0x6000 | 201 | * All PID in the 5xxx range are currently reserved for |
202 | * auto-install CDROMs, and should not be added to this | ||
203 | * module. | ||
204 | * | ||
205 | * #define NOVATELWIRELESS_PRODUCT_U727 0x5010 | ||
206 | * #define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100 | ||
207 | */ | ||
195 | #define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 | 208 | #define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 |
196 | 209 | #define NOVATELWIRELESS_PRODUCT_MC780 0x6010 | |
197 | /* FUTURE NOVATEL PRODUCTS */ | 210 | #define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0x6000 |
198 | #define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001 | 211 | #define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0x6001 |
199 | #define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000 | 212 | #define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0x7000 |
200 | #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001 | 213 | #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0x7001 |
201 | #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0X8000 | 214 | #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3 0x7003 |
202 | #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0X8001 | 215 | #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4 0x7004 |
203 | #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0X9000 | 216 | #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5 0x7005 |
204 | #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0X9001 | 217 | #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6 0x7006 |
205 | #define NOVATELWIRELESS_PRODUCT_GLOBAL 0XA001 | 218 | #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7 0x7007 |
219 | #define NOVATELWIRELESS_PRODUCT_MC996D 0x7030 | ||
220 | #define NOVATELWIRELESS_PRODUCT_MF3470 0x7041 | ||
221 | #define NOVATELWIRELESS_PRODUCT_MC547 0x7042 | ||
222 | #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0x8000 | ||
223 | #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001 | ||
224 | #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000 | ||
225 | #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 | ||
226 | #define NOVATELWIRELESS_PRODUCT_G1 0xA001 | ||
227 | #define NOVATELWIRELESS_PRODUCT_G1_M 0xA002 | ||
228 | #define NOVATELWIRELESS_PRODUCT_G2 0xA010 | ||
206 | 229 | ||
207 | /* AMOI PRODUCTS */ | 230 | /* AMOI PRODUCTS */ |
208 | #define AMOI_VENDOR_ID 0x1614 | 231 | #define AMOI_VENDOR_ID 0x1614 |
@@ -490,36 +513,44 @@ static const struct usb_device_id option_ids[] = { | |||
490 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, | 513 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, |
491 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, | 514 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, |
492 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) }, | 515 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) }, |
493 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */ | 516 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, |
494 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */ | 517 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, |
495 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, /* Novatel Merlin EX720/V740/X720 */ | 518 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, |
496 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, /* Novatel Merlin V720/S720/PC720 */ | 519 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, |
497 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, /* Novatel U730/U740 (VF version) */ | 520 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, |
498 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, /* Novatel U740 */ | 521 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, |
499 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, /* Novatel U870 */ | 522 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, |
500 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, /* Novatel Merlin XU870 HSDPA/3G */ | 523 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, |
501 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, /* Novatel X950D */ | 524 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, |
502 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, /* Novatel EV620/ES620 CDMA/EV-DO */ | 525 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, |
503 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, /* Novatel ES620/ES720/U720/USB720 */ | 526 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, |
504 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, /* Novatel E725/E726 */ | 527 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, |
505 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) }, /* Novatel Merlin ES620 SM Bus */ | 528 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) }, |
506 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, /* Novatel EU730 and Vodafone EU740 */ | 529 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, |
507 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, /* Novatel non-Vodafone EU740 */ | 530 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, |
508 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ | 531 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, |
509 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ | 532 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, |
510 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ | 533 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, |
511 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */ | 534 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, |
512 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ | 535 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC780) }, |
513 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ | 536 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) }, |
514 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */ | 537 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, |
515 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */ | 538 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, |
516 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */ | 539 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, |
517 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */ | 540 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, |
518 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, /* Novatel EVDO product */ | 541 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) }, |
519 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, /* Novatel HSPA product */ | 542 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) }, |
520 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, /* Novatel EVDO Embedded product */ | 543 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) }, |
521 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, /* Novatel HSPA Embedded product */ | 544 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6) }, |
522 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL) }, /* Novatel Global product */ | 545 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7) }, |
546 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC996D) }, | ||
547 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MF3470) }, | ||
548 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) }, | ||
549 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, | ||
550 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, | ||
551 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) }, | ||
552 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) }, | ||
553 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) }, | ||
523 | 554 | ||
524 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, | 555 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, |
525 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, | 556 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, |
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c index 68c18fdfc6da..e986002b3844 100644 --- a/drivers/usb/serial/ssu100.c +++ b/drivers/usb/serial/ssu100.c | |||
@@ -46,7 +46,7 @@ | |||
46 | #define FULLPWRBIT 0x00000080 | 46 | #define FULLPWRBIT 0x00000080 |
47 | #define NEXT_BOARD_POWER_BIT 0x00000004 | 47 | #define NEXT_BOARD_POWER_BIT 0x00000004 |
48 | 48 | ||
49 | static int debug = 1; | 49 | static int debug; |
50 | 50 | ||
51 | /* Version Information */ | 51 | /* Version Information */ |
52 | #define DRIVER_VERSION "v0.1" | 52 | #define DRIVER_VERSION "v0.1" |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 29e850a7a2f9..7c8008225ee3 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -243,7 +243,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, | |||
243 | int r, nlogs = 0; | 243 | int r, nlogs = 0; |
244 | 244 | ||
245 | while (datalen > 0) { | 245 | while (datalen > 0) { |
246 | if (unlikely(headcount >= VHOST_NET_MAX_SG)) { | 246 | if (unlikely(seg >= VHOST_NET_MAX_SG)) { |
247 | r = -ENOBUFS; | 247 | r = -ENOBUFS; |
248 | goto err; | 248 | goto err; |
249 | } | 249 | } |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index e05557d52999..dd3d6f7406f8 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync, | |||
60 | return 0; | 60 | return 0; |
61 | } | 61 | } |
62 | 62 | ||
63 | static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) | ||
64 | { | ||
65 | INIT_LIST_HEAD(&work->node); | ||
66 | work->fn = fn; | ||
67 | init_waitqueue_head(&work->done); | ||
68 | work->flushing = 0; | ||
69 | work->queue_seq = work->done_seq = 0; | ||
70 | } | ||
71 | |||
63 | /* Init poll structure */ | 72 | /* Init poll structure */ |
64 | void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, | 73 | void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, |
65 | unsigned long mask, struct vhost_dev *dev) | 74 | unsigned long mask, struct vhost_dev *dev) |
66 | { | 75 | { |
67 | struct vhost_work *work = &poll->work; | ||
68 | |||
69 | init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); | 76 | init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); |
70 | init_poll_funcptr(&poll->table, vhost_poll_func); | 77 | init_poll_funcptr(&poll->table, vhost_poll_func); |
71 | poll->mask = mask; | 78 | poll->mask = mask; |
72 | poll->dev = dev; | 79 | poll->dev = dev; |
73 | 80 | ||
74 | INIT_LIST_HEAD(&work->node); | 81 | vhost_work_init(&poll->work, fn); |
75 | work->fn = fn; | ||
76 | init_waitqueue_head(&work->done); | ||
77 | work->flushing = 0; | ||
78 | work->queue_seq = work->done_seq = 0; | ||
79 | } | 82 | } |
80 | 83 | ||
81 | /* Start polling a file. We add ourselves to file's wait queue. The caller must | 84 | /* Start polling a file. We add ourselves to file's wait queue. The caller must |
@@ -95,35 +98,38 @@ void vhost_poll_stop(struct vhost_poll *poll) | |||
95 | remove_wait_queue(poll->wqh, &poll->wait); | 98 | remove_wait_queue(poll->wqh, &poll->wait); |
96 | } | 99 | } |
97 | 100 | ||
98 | /* Flush any work that has been scheduled. When calling this, don't hold any | 101 | static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) |
99 | * locks that are also used by the callback. */ | ||
100 | void vhost_poll_flush(struct vhost_poll *poll) | ||
101 | { | 102 | { |
102 | struct vhost_work *work = &poll->work; | ||
103 | unsigned seq; | 103 | unsigned seq; |
104 | int left; | 104 | int left; |
105 | int flushing; | 105 | int flushing; |
106 | 106 | ||
107 | spin_lock_irq(&poll->dev->work_lock); | 107 | spin_lock_irq(&dev->work_lock); |
108 | seq = work->queue_seq; | 108 | seq = work->queue_seq; |
109 | work->flushing++; | 109 | work->flushing++; |
110 | spin_unlock_irq(&poll->dev->work_lock); | 110 | spin_unlock_irq(&dev->work_lock); |
111 | wait_event(work->done, ({ | 111 | wait_event(work->done, ({ |
112 | spin_lock_irq(&poll->dev->work_lock); | 112 | spin_lock_irq(&dev->work_lock); |
113 | left = seq - work->done_seq <= 0; | 113 | left = seq - work->done_seq <= 0; |
114 | spin_unlock_irq(&poll->dev->work_lock); | 114 | spin_unlock_irq(&dev->work_lock); |
115 | left; | 115 | left; |
116 | })); | 116 | })); |
117 | spin_lock_irq(&poll->dev->work_lock); | 117 | spin_lock_irq(&dev->work_lock); |
118 | flushing = --work->flushing; | 118 | flushing = --work->flushing; |
119 | spin_unlock_irq(&poll->dev->work_lock); | 119 | spin_unlock_irq(&dev->work_lock); |
120 | BUG_ON(flushing < 0); | 120 | BUG_ON(flushing < 0); |
121 | } | 121 | } |
122 | 122 | ||
123 | void vhost_poll_queue(struct vhost_poll *poll) | 123 | /* Flush any work that has been scheduled. When calling this, don't hold any |
124 | * locks that are also used by the callback. */ | ||
125 | void vhost_poll_flush(struct vhost_poll *poll) | ||
126 | { | ||
127 | vhost_work_flush(poll->dev, &poll->work); | ||
128 | } | ||
129 | |||
130 | static inline void vhost_work_queue(struct vhost_dev *dev, | ||
131 | struct vhost_work *work) | ||
124 | { | 132 | { |
125 | struct vhost_dev *dev = poll->dev; | ||
126 | struct vhost_work *work = &poll->work; | ||
127 | unsigned long flags; | 133 | unsigned long flags; |
128 | 134 | ||
129 | spin_lock_irqsave(&dev->work_lock, flags); | 135 | spin_lock_irqsave(&dev->work_lock, flags); |
@@ -135,6 +141,11 @@ void vhost_poll_queue(struct vhost_poll *poll) | |||
135 | spin_unlock_irqrestore(&dev->work_lock, flags); | 141 | spin_unlock_irqrestore(&dev->work_lock, flags); |
136 | } | 142 | } |
137 | 143 | ||
144 | void vhost_poll_queue(struct vhost_poll *poll) | ||
145 | { | ||
146 | vhost_work_queue(poll->dev, &poll->work); | ||
147 | } | ||
148 | |||
138 | static void vhost_vq_reset(struct vhost_dev *dev, | 149 | static void vhost_vq_reset(struct vhost_dev *dev, |
139 | struct vhost_virtqueue *vq) | 150 | struct vhost_virtqueue *vq) |
140 | { | 151 | { |
@@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev) | |||
236 | return dev->mm == current->mm ? 0 : -EPERM; | 247 | return dev->mm == current->mm ? 0 : -EPERM; |
237 | } | 248 | } |
238 | 249 | ||
250 | struct vhost_attach_cgroups_struct { | ||
251 | struct vhost_work work; | ||
252 | struct task_struct *owner; | ||
253 | int ret; | ||
254 | }; | ||
255 | |||
256 | static void vhost_attach_cgroups_work(struct vhost_work *work) | ||
257 | { | ||
258 | struct vhost_attach_cgroups_struct *s; | ||
259 | s = container_of(work, struct vhost_attach_cgroups_struct, work); | ||
260 | s->ret = cgroup_attach_task_all(s->owner, current); | ||
261 | } | ||
262 | |||
263 | static int vhost_attach_cgroups(struct vhost_dev *dev) | ||
264 | { | ||
265 | struct vhost_attach_cgroups_struct attach; | ||
266 | attach.owner = current; | ||
267 | vhost_work_init(&attach.work, vhost_attach_cgroups_work); | ||
268 | vhost_work_queue(dev, &attach.work); | ||
269 | vhost_work_flush(dev, &attach.work); | ||
270 | return attach.ret; | ||
271 | } | ||
272 | |||
239 | /* Caller should have device mutex */ | 273 | /* Caller should have device mutex */ |
240 | static long vhost_dev_set_owner(struct vhost_dev *dev) | 274 | static long vhost_dev_set_owner(struct vhost_dev *dev) |
241 | { | 275 | { |
@@ -255,14 +289,16 @@ static long vhost_dev_set_owner(struct vhost_dev *dev) | |||
255 | } | 289 | } |
256 | 290 | ||
257 | dev->worker = worker; | 291 | dev->worker = worker; |
258 | err = cgroup_attach_task_current_cg(worker); | 292 | wake_up_process(worker); /* avoid contributing to loadavg */ |
293 | |||
294 | err = vhost_attach_cgroups(dev); | ||
259 | if (err) | 295 | if (err) |
260 | goto err_cgroup; | 296 | goto err_cgroup; |
261 | wake_up_process(worker); /* avoid contributing to loadavg */ | ||
262 | 297 | ||
263 | return 0; | 298 | return 0; |
264 | err_cgroup: | 299 | err_cgroup: |
265 | kthread_stop(worker); | 300 | kthread_stop(worker); |
301 | dev->worker = NULL; | ||
266 | err_worker: | 302 | err_worker: |
267 | if (dev->mm) | 303 | if (dev->mm) |
268 | mmput(dev->mm); | 304 | mmput(dev->mm); |
@@ -323,7 +359,10 @@ void vhost_dev_cleanup(struct vhost_dev *dev) | |||
323 | dev->mm = NULL; | 359 | dev->mm = NULL; |
324 | 360 | ||
325 | WARN_ON(!list_empty(&dev->work_list)); | 361 | WARN_ON(!list_empty(&dev->work_list)); |
326 | kthread_stop(dev->worker); | 362 | if (dev->worker) { |
363 | kthread_stop(dev->worker); | ||
364 | dev->worker = NULL; | ||
365 | } | ||
327 | } | 366 | } |
328 | 367 | ||
329 | static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) | 368 | static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) |
@@ -819,11 +858,12 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, | |||
819 | if (r < 0) | 858 | if (r < 0) |
820 | return r; | 859 | return r; |
821 | len -= l; | 860 | len -= l; |
822 | if (!len) | 861 | if (!len) { |
862 | if (vq->log_ctx) | ||
863 | eventfd_signal(vq->log_ctx, 1); | ||
823 | return 0; | 864 | return 0; |
865 | } | ||
824 | } | 866 | } |
825 | if (vq->log_ctx) | ||
826 | eventfd_signal(vq->log_ctx, 1); | ||
827 | /* Length written exceeds what we have stored. This is a bug. */ | 867 | /* Length written exceeds what we have stored. This is a bug. */ |
828 | BUG(); | 868 | BUG(); |
829 | return 0; | 869 | return 0; |
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 84f842331dfa..7ccc967831f0 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c | |||
@@ -3508,7 +3508,7 @@ static void fbcon_exit(void) | |||
3508 | softback_buf = 0UL; | 3508 | softback_buf = 0UL; |
3509 | 3509 | ||
3510 | for (i = 0; i < FB_MAX; i++) { | 3510 | for (i = 0; i < FB_MAX; i++) { |
3511 | int pending; | 3511 | int pending = 0; |
3512 | 3512 | ||
3513 | mapped = 0; | 3513 | mapped = 0; |
3514 | info = registered_fb[i]; | 3514 | info = registered_fb[i]; |
@@ -3516,7 +3516,8 @@ static void fbcon_exit(void) | |||
3516 | if (info == NULL) | 3516 | if (info == NULL) |
3517 | continue; | 3517 | continue; |
3518 | 3518 | ||
3519 | pending = cancel_work_sync(&info->queue); | 3519 | if (info->queue.func) |
3520 | pending = cancel_work_sync(&info->queue); | ||
3520 | DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" : | 3521 | DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" : |
3521 | "no")); | 3522 | "no")); |
3522 | 3523 | ||
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c index 815f84b07933..70477c2e4b61 100644 --- a/drivers/video/efifb.c +++ b/drivers/video/efifb.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
14 | #include <linux/screen_info.h> | 14 | #include <linux/screen_info.h> |
15 | #include <linux/dmi.h> | 15 | #include <linux/dmi.h> |
16 | 16 | #include <linux/pci.h> | |
17 | #include <video/vga.h> | 17 | #include <video/vga.h> |
18 | 18 | ||
19 | static struct fb_var_screeninfo efifb_defined __devinitdata = { | 19 | static struct fb_var_screeninfo efifb_defined __devinitdata = { |
@@ -39,17 +39,31 @@ enum { | |||
39 | M_I20, /* 20-Inch iMac */ | 39 | M_I20, /* 20-Inch iMac */ |
40 | M_I20_SR, /* 20-Inch iMac (Santa Rosa) */ | 40 | M_I20_SR, /* 20-Inch iMac (Santa Rosa) */ |
41 | M_I24, /* 24-Inch iMac */ | 41 | M_I24, /* 24-Inch iMac */ |
42 | M_I24_8_1, /* 24-Inch iMac, 8,1th gen */ | ||
43 | M_I24_10_1, /* 24-Inch iMac, 10,1th gen */ | ||
44 | M_I27_11_1, /* 27-Inch iMac, 11,1th gen */ | ||
42 | M_MINI, /* Mac Mini */ | 45 | M_MINI, /* Mac Mini */ |
46 | M_MINI_3_1, /* Mac Mini, 3,1th gen */ | ||
47 | M_MINI_4_1, /* Mac Mini, 4,1th gen */ | ||
43 | M_MB, /* MacBook */ | 48 | M_MB, /* MacBook */ |
44 | M_MB_2, /* MacBook, 2nd rev. */ | 49 | M_MB_2, /* MacBook, 2nd rev. */ |
45 | M_MB_3, /* MacBook, 3rd rev. */ | 50 | M_MB_3, /* MacBook, 3rd rev. */ |
51 | M_MB_5_1, /* MacBook, 5th rev. */ | ||
52 | M_MB_6_1, /* MacBook, 6th rev. */ | ||
53 | M_MB_7_1, /* MacBook, 7th rev. */ | ||
46 | M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */ | 54 | M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */ |
47 | M_MBA, /* MacBook Air */ | 55 | M_MBA, /* MacBook Air */ |
48 | M_MBP, /* MacBook Pro */ | 56 | M_MBP, /* MacBook Pro */ |
49 | M_MBP_2, /* MacBook Pro 2nd gen */ | 57 | M_MBP_2, /* MacBook Pro 2nd gen */ |
58 | M_MBP_2_2, /* MacBook Pro 2,2nd gen */ | ||
50 | M_MBP_SR, /* MacBook Pro (Santa Rosa) */ | 59 | M_MBP_SR, /* MacBook Pro (Santa Rosa) */ |
51 | M_MBP_4, /* MacBook Pro, 4th gen */ | 60 | M_MBP_4, /* MacBook Pro, 4th gen */ |
52 | M_MBP_5_1, /* MacBook Pro, 5,1th gen */ | 61 | M_MBP_5_1, /* MacBook Pro, 5,1th gen */ |
62 | M_MBP_5_2, /* MacBook Pro, 5,2th gen */ | ||
63 | M_MBP_5_3, /* MacBook Pro, 5,3rd gen */ | ||
64 | M_MBP_6_1, /* MacBook Pro, 6,1th gen */ | ||
65 | M_MBP_6_2, /* MacBook Pro, 6,2th gen */ | ||
66 | M_MBP_7_1, /* MacBook Pro, 7,1th gen */ | ||
53 | M_UNKNOWN /* placeholder */ | 67 | M_UNKNOWN /* placeholder */ |
54 | }; | 68 | }; |
55 | 69 | ||
@@ -64,14 +78,28 @@ static struct efifb_dmi_info { | |||
64 | [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050 }, /* guess */ | 78 | [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050 }, /* guess */ |
65 | [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050 }, | 79 | [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050 }, |
66 | [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200 }, /* guess */ | 80 | [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200 }, /* guess */ |
81 | [M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200 }, | ||
82 | [M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080 }, | ||
83 | [M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440 }, | ||
67 | [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768 }, | 84 | [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768 }, |
85 | [M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768 }, | ||
86 | [M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200 }, | ||
68 | [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800 }, | 87 | [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800 }, |
88 | [M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800 }, | ||
89 | [M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800 }, | ||
90 | [M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800 }, | ||
69 | [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800 }, | 91 | [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800 }, |
70 | [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900 }, | 92 | [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900 }, |
71 | [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */ | 93 | [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */ |
94 | [M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900 }, | ||
72 | [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 }, | 95 | [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 }, |
73 | [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 }, | 96 | [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 }, |
74 | [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 }, | 97 | [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 }, |
98 | [M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200 }, | ||
99 | [M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900 }, | ||
100 | [M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200 }, | ||
101 | [M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050 }, | ||
102 | [M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800 }, | ||
75 | [M_UNKNOWN] = { NULL, 0, 0, 0, 0 } | 103 | [M_UNKNOWN] = { NULL, 0, 0, 0, 0 } |
76 | }; | 104 | }; |
77 | 105 | ||
@@ -92,7 +120,12 @@ static const struct dmi_system_id dmi_system_table[] __initconst = { | |||
92 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24), | 120 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24), |
93 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24), | 121 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24), |
94 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR), | 122 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR), |
123 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1), | ||
124 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1), | ||
125 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1), | ||
95 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI), | 126 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI), |
127 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1), | ||
128 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1), | ||
96 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB), | 129 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB), |
97 | /* At least one of these two will be right; maybe both? */ | 130 | /* At least one of these two will be right; maybe both? */ |
98 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB), | 131 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB), |
@@ -101,14 +134,23 @@ static const struct dmi_system_id dmi_system_table[] __initconst = { | |||
101 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB), | 134 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB), |
102 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB), | 135 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB), |
103 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB), | 136 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB), |
137 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1), | ||
138 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1), | ||
139 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1), | ||
104 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA), | 140 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA), |
105 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP), | 141 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP), |
106 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2), | 142 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2), |
143 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2), | ||
107 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2), | 144 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2), |
108 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR), | 145 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR), |
109 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR), | 146 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR), |
110 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4), | 147 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4), |
111 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1), | 148 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1), |
149 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2), | ||
150 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3), | ||
151 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1), | ||
152 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2), | ||
153 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1), | ||
112 | {}, | 154 | {}, |
113 | }; | 155 | }; |
114 | 156 | ||
@@ -116,7 +158,7 @@ static int set_system(const struct dmi_system_id *id) | |||
116 | { | 158 | { |
117 | struct efifb_dmi_info *info = id->driver_data; | 159 | struct efifb_dmi_info *info = id->driver_data; |
118 | if (info->base == 0) | 160 | if (info->base == 0) |
119 | return -ENODEV; | 161 | return 0; |
120 | 162 | ||
121 | printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p " | 163 | printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p " |
122 | "(%dx%d, stride %d)\n", id->ident, | 164 | "(%dx%d, stride %d)\n", id->ident, |
@@ -124,18 +166,55 @@ static int set_system(const struct dmi_system_id *id) | |||
124 | info->stride); | 166 | info->stride); |
125 | 167 | ||
126 | /* Trust the bootloader over the DMI tables */ | 168 | /* Trust the bootloader over the DMI tables */ |
127 | if (screen_info.lfb_base == 0) | 169 | if (screen_info.lfb_base == 0) { |
170 | #if defined(CONFIG_PCI) | ||
171 | struct pci_dev *dev = NULL; | ||
172 | int found_bar = 0; | ||
173 | #endif | ||
128 | screen_info.lfb_base = info->base; | 174 | screen_info.lfb_base = info->base; |
129 | if (screen_info.lfb_linelength == 0) | ||
130 | screen_info.lfb_linelength = info->stride; | ||
131 | if (screen_info.lfb_width == 0) | ||
132 | screen_info.lfb_width = info->width; | ||
133 | if (screen_info.lfb_height == 0) | ||
134 | screen_info.lfb_height = info->height; | ||
135 | if (screen_info.orig_video_isVGA == 0) | ||
136 | screen_info.orig_video_isVGA = VIDEO_TYPE_EFI; | ||
137 | 175 | ||
138 | return 0; | 176 | #if defined(CONFIG_PCI) |
177 | /* make sure that the address in the table is actually on a | ||
178 | * VGA device's PCI BAR */ | ||
179 | |||
180 | for_each_pci_dev(dev) { | ||
181 | int i; | ||
182 | if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) | ||
183 | continue; | ||
184 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | ||
185 | resource_size_t start, end; | ||
186 | |||
187 | start = pci_resource_start(dev, i); | ||
188 | if (start == 0) | ||
189 | break; | ||
190 | end = pci_resource_end(dev, i); | ||
191 | if (screen_info.lfb_base >= start && | ||
192 | screen_info.lfb_base < end) { | ||
193 | found_bar = 1; | ||
194 | } | ||
195 | } | ||
196 | } | ||
197 | if (!found_bar) | ||
198 | screen_info.lfb_base = 0; | ||
199 | #endif | ||
200 | } | ||
201 | if (screen_info.lfb_base) { | ||
202 | if (screen_info.lfb_linelength == 0) | ||
203 | screen_info.lfb_linelength = info->stride; | ||
204 | if (screen_info.lfb_width == 0) | ||
205 | screen_info.lfb_width = info->width; | ||
206 | if (screen_info.lfb_height == 0) | ||
207 | screen_info.lfb_height = info->height; | ||
208 | if (screen_info.orig_video_isVGA == 0) | ||
209 | screen_info.orig_video_isVGA = VIDEO_TYPE_EFI; | ||
210 | } else { | ||
211 | screen_info.lfb_linelength = 0; | ||
212 | screen_info.lfb_width = 0; | ||
213 | screen_info.lfb_height = 0; | ||
214 | screen_info.orig_video_isVGA = 0; | ||
215 | return 0; | ||
216 | } | ||
217 | return 1; | ||
139 | } | 218 | } |
140 | 219 | ||
141 | static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green, | 220 | static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green, |
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c index c91a7f70f7b0..a31a77ff6f3d 100644 --- a/drivers/video/pxa168fb.c +++ b/drivers/video/pxa168fb.c | |||
@@ -298,8 +298,8 @@ static void set_dma_control0(struct pxa168fb_info *fbi) | |||
298 | * Set bit to enable graphics DMA. | 298 | * Set bit to enable graphics DMA. |
299 | */ | 299 | */ |
300 | x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); | 300 | x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); |
301 | x |= fbi->active ? 0x00000100 : 0; | 301 | x &= ~CFG_GRA_ENA_MASK; |
302 | fbi->active = 0; | 302 | x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0); |
303 | 303 | ||
304 | /* | 304 | /* |
305 | * If we are in a pseudo-color mode, we need to enable | 305 | * If we are in a pseudo-color mode, we need to enable |
@@ -559,7 +559,7 @@ static struct fb_ops pxa168fb_ops = { | |||
559 | .fb_imageblit = cfb_imageblit, | 559 | .fb_imageblit = cfb_imageblit, |
560 | }; | 560 | }; |
561 | 561 | ||
562 | static int __init pxa168fb_init_mode(struct fb_info *info, | 562 | static int __devinit pxa168fb_init_mode(struct fb_info *info, |
563 | struct pxa168fb_mach_info *mi) | 563 | struct pxa168fb_mach_info *mi) |
564 | { | 564 | { |
565 | struct pxa168fb_info *fbi = info->par; | 565 | struct pxa168fb_info *fbi = info->par; |
@@ -599,7 +599,7 @@ static int __init pxa168fb_init_mode(struct fb_info *info, | |||
599 | return ret; | 599 | return ret; |
600 | } | 600 | } |
601 | 601 | ||
602 | static int __init pxa168fb_probe(struct platform_device *pdev) | 602 | static int __devinit pxa168fb_probe(struct platform_device *pdev) |
603 | { | 603 | { |
604 | struct pxa168fb_mach_info *mi; | 604 | struct pxa168fb_mach_info *mi; |
605 | struct fb_info *info = 0; | 605 | struct fb_info *info = 0; |
@@ -792,7 +792,7 @@ static struct platform_driver pxa168fb_driver = { | |||
792 | .probe = pxa168fb_probe, | 792 | .probe = pxa168fb_probe, |
793 | }; | 793 | }; |
794 | 794 | ||
795 | static int __devinit pxa168fb_init(void) | 795 | static int __init pxa168fb_init(void) |
796 | { | 796 | { |
797 | return platform_driver_register(&pxa168fb_driver); | 797 | return platform_driver_register(&pxa168fb_driver); |
798 | } | 798 | } |
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c index 559bf1727a2b..b52f8e4ef1fd 100644 --- a/drivers/video/sis/sis_main.c +++ b/drivers/video/sis/sis_main.c | |||
@@ -1701,6 +1701,9 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd, | |||
1701 | break; | 1701 | break; |
1702 | 1702 | ||
1703 | case FBIOGET_VBLANK: | 1703 | case FBIOGET_VBLANK: |
1704 | |||
1705 | memset(&sisvbblank, 0, sizeof(struct fb_vblank)); | ||
1706 | |||
1704 | sisvbblank.count = 0; | 1707 | sisvbblank.count = 0; |
1705 | sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount); | 1708 | sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount); |
1706 | 1709 | ||
diff --git a/drivers/video/via/ioctl.c b/drivers/video/via/ioctl.c index da03c074e32a..4d553d0b8d7a 100644 --- a/drivers/video/via/ioctl.c +++ b/drivers/video/via/ioctl.c | |||
@@ -25,6 +25,8 @@ int viafb_ioctl_get_viafb_info(u_long arg) | |||
25 | { | 25 | { |
26 | struct viafb_ioctl_info viainfo; | 26 | struct viafb_ioctl_info viainfo; |
27 | 27 | ||
28 | memset(&viainfo, 0, sizeof(struct viafb_ioctl_info)); | ||
29 | |||
28 | viainfo.viafb_id = VIAID; | 30 | viainfo.viafb_id = VIAID; |
29 | viainfo.vendor_id = PCI_VIA_VENDOR_ID; | 31 | viainfo.vendor_id = PCI_VIA_VENDOR_ID; |
30 | 32 | ||
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index b036677df8c4..24efd8ea41bb 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -213,11 +213,11 @@ config OMAP_WATCHDOG | |||
213 | here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer. | 213 | here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer. |
214 | 214 | ||
215 | config PNX4008_WATCHDOG | 215 | config PNX4008_WATCHDOG |
216 | tristate "PNX4008 Watchdog" | 216 | tristate "PNX4008 and LPC32XX Watchdog" |
217 | depends on ARCH_PNX4008 | 217 | depends on ARCH_PNX4008 || ARCH_LPC32XX |
218 | help | 218 | help |
219 | Say Y here if to include support for the watchdog timer | 219 | Say Y here if to include support for the watchdog timer |
220 | in the PNX4008 processor. | 220 | in the PNX4008 or LPC32XX processor. |
221 | This driver can be built as a module by choosing M. The module | 221 | This driver can be built as a module by choosing M. The module |
222 | will be called pnx4008_wdt. | 222 | will be called pnx4008_wdt. |
223 | 223 | ||
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c index 88c83aa57303..f31493e65b38 100644 --- a/drivers/watchdog/sb_wdog.c +++ b/drivers/watchdog/sb_wdog.c | |||
@@ -305,7 +305,7 @@ static int __init sbwdog_init(void) | |||
305 | if (ret) { | 305 | if (ret) { |
306 | printk(KERN_ERR "%s: failed to request irq 1 - %d\n", | 306 | printk(KERN_ERR "%s: failed to request irq 1 - %d\n", |
307 | ident.identity, ret); | 307 | ident.identity, ret); |
308 | return ret; | 308 | goto out; |
309 | } | 309 | } |
310 | 310 | ||
311 | ret = misc_register(&sbwdog_miscdev); | 311 | ret = misc_register(&sbwdog_miscdev); |
@@ -313,14 +313,20 @@ static int __init sbwdog_init(void) | |||
313 | printk(KERN_INFO "%s: timeout is %ld.%ld secs\n", | 313 | printk(KERN_INFO "%s: timeout is %ld.%ld secs\n", |
314 | ident.identity, | 314 | ident.identity, |
315 | timeout / 1000000, (timeout / 100000) % 10); | 315 | timeout / 1000000, (timeout / 100000) % 10); |
316 | } else | 316 | return 0; |
317 | free_irq(1, (void *)user_dog); | 317 | } |
318 | free_irq(1, (void *)user_dog); | ||
319 | out: | ||
320 | unregister_reboot_notifier(&sbwdog_notifier); | ||
321 | |||
318 | return ret; | 322 | return ret; |
319 | } | 323 | } |
320 | 324 | ||
321 | static void __exit sbwdog_exit(void) | 325 | static void __exit sbwdog_exit(void) |
322 | { | 326 | { |
323 | misc_deregister(&sbwdog_miscdev); | 327 | misc_deregister(&sbwdog_miscdev); |
328 | free_irq(1, (void *)user_dog); | ||
329 | unregister_reboot_notifier(&sbwdog_notifier); | ||
324 | } | 330 | } |
325 | 331 | ||
326 | module_init(sbwdog_init); | 332 | module_init(sbwdog_init); |
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c index 458c499c1223..18cdeb4c4258 100644 --- a/drivers/watchdog/ts72xx_wdt.c +++ b/drivers/watchdog/ts72xx_wdt.c | |||
@@ -449,6 +449,9 @@ static __devinit int ts72xx_wdt_probe(struct platform_device *pdev) | |||
449 | wdt->pdev = pdev; | 449 | wdt->pdev = pdev; |
450 | mutex_init(&wdt->lock); | 450 | mutex_init(&wdt->lock); |
451 | 451 | ||
452 | /* make sure that the watchdog is disabled */ | ||
453 | ts72xx_wdt_stop(wdt); | ||
454 | |||
452 | error = misc_register(&ts72xx_wdt_miscdev); | 455 | error = misc_register(&ts72xx_wdt_miscdev); |
453 | if (error) { | 456 | if (error) { |
454 | dev_err(&pdev->dev, "failed to register miscdev\n"); | 457 | dev_err(&pdev->dev, "failed to register miscdev\n"); |
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 29bac5118877..d409495876f1 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
@@ -755,7 +755,10 @@ int register_xenstore_notifier(struct notifier_block *nb) | |||
755 | { | 755 | { |
756 | int ret = 0; | 756 | int ret = 0; |
757 | 757 | ||
758 | blocking_notifier_chain_register(&xenstore_chain, nb); | 758 | if (xenstored_ready > 0) |
759 | ret = nb->notifier_call(nb, 0, NULL); | ||
760 | else | ||
761 | blocking_notifier_chain_register(&xenstore_chain, nb); | ||
759 | 762 | ||
760 | return ret; | 763 | return ret; |
761 | } | 764 | } |
@@ -769,7 +772,7 @@ EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); | |||
769 | 772 | ||
770 | void xenbus_probe(struct work_struct *unused) | 773 | void xenbus_probe(struct work_struct *unused) |
771 | { | 774 | { |
772 | BUG_ON((xenstored_ready <= 0)); | 775 | xenstored_ready = 1; |
773 | 776 | ||
774 | /* Enumerate devices in xenstore and watch for changes. */ | 777 | /* Enumerate devices in xenstore and watch for changes. */ |
775 | xenbus_probe_devices(&xenbus_frontend); | 778 | xenbus_probe_devices(&xenbus_frontend); |
@@ -835,8 +838,8 @@ static int __init xenbus_init(void) | |||
835 | xen_store_evtchn = xen_start_info->store_evtchn; | 838 | xen_store_evtchn = xen_start_info->store_evtchn; |
836 | xen_store_mfn = xen_start_info->store_mfn; | 839 | xen_store_mfn = xen_start_info->store_mfn; |
837 | xen_store_interface = mfn_to_virt(xen_store_mfn); | 840 | xen_store_interface = mfn_to_virt(xen_store_mfn); |
841 | xenstored_ready = 1; | ||
838 | } | 842 | } |
839 | xenstored_ready = 1; | ||
840 | } | 843 | } |
841 | 844 | ||
842 | /* Initialize the interface to xenstore. */ | 845 | /* Initialize the interface to xenstore. */ |
diff --git a/fs/9p/fid.c b/fs/9p/fid.c index 358563689064..6406f896bf95 100644 --- a/fs/9p/fid.c +++ b/fs/9p/fid.c | |||
@@ -242,7 +242,8 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry) | |||
242 | } | 242 | } |
243 | kfree(wnames); | 243 | kfree(wnames); |
244 | fid_out: | 244 | fid_out: |
245 | v9fs_fid_add(dentry, fid); | 245 | if (!IS_ERR(fid)) |
246 | v9fs_fid_add(dentry, fid); | ||
246 | err_out: | 247 | err_out: |
247 | up_read(&v9ses->rename_sem); | 248 | up_read(&v9ses->rename_sem); |
248 | return fid; | 249 | return fid; |
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c index 16c8a2a98c1b..899f168fd19c 100644 --- a/fs/9p/vfs_dir.c +++ b/fs/9p/vfs_dir.c | |||
@@ -292,9 +292,11 @@ int v9fs_dir_release(struct inode *inode, struct file *filp) | |||
292 | 292 | ||
293 | fid = filp->private_data; | 293 | fid = filp->private_data; |
294 | P9_DPRINTK(P9_DEBUG_VFS, | 294 | P9_DPRINTK(P9_DEBUG_VFS, |
295 | "inode: %p filp: %p fid: %d\n", inode, filp, fid->fid); | 295 | "v9fs_dir_release: inode: %p filp: %p fid: %d\n", |
296 | inode, filp, fid ? fid->fid : -1); | ||
296 | filemap_write_and_wait(inode->i_mapping); | 297 | filemap_write_and_wait(inode->i_mapping); |
297 | p9_client_clunk(fid); | 298 | if (fid) |
299 | p9_client_clunk(fid); | ||
298 | return 0; | 300 | return 0; |
299 | } | 301 | } |
300 | 302 | ||
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index c7c23eab9440..9e670d527646 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c | |||
@@ -730,7 +730,10 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int mode, | |||
730 | P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); | 730 | P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); |
731 | goto error; | 731 | goto error; |
732 | } | 732 | } |
733 | dentry->d_op = &v9fs_cached_dentry_operations; | 733 | if (v9ses->cache) |
734 | dentry->d_op = &v9fs_cached_dentry_operations; | ||
735 | else | ||
736 | dentry->d_op = &v9fs_dentry_operations; | ||
734 | d_instantiate(dentry, inode); | 737 | d_instantiate(dentry, inode); |
735 | err = v9fs_fid_add(dentry, fid); | 738 | err = v9fs_fid_add(dentry, fid); |
736 | if (err < 0) | 739 | if (err < 0) |
@@ -1128,6 +1131,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, | |||
1128 | v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb); | 1131 | v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb); |
1129 | generic_fillattr(dentry->d_inode, stat); | 1132 | generic_fillattr(dentry->d_inode, stat); |
1130 | 1133 | ||
1134 | p9stat_free(st); | ||
1131 | kfree(st); | 1135 | kfree(st); |
1132 | return 0; | 1136 | return 0; |
1133 | } | 1137 | } |
@@ -1489,6 +1493,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen) | |||
1489 | 1493 | ||
1490 | retval = strnlen(buffer, buflen); | 1494 | retval = strnlen(buffer, buflen); |
1491 | done: | 1495 | done: |
1496 | p9stat_free(st); | ||
1492 | kfree(st); | 1497 | kfree(st); |
1493 | return retval; | 1498 | return retval; |
1494 | } | 1499 | } |
@@ -1942,7 +1947,7 @@ static const struct inode_operations v9fs_dir_inode_operations_dotu = { | |||
1942 | .unlink = v9fs_vfs_unlink, | 1947 | .unlink = v9fs_vfs_unlink, |
1943 | .mkdir = v9fs_vfs_mkdir, | 1948 | .mkdir = v9fs_vfs_mkdir, |
1944 | .rmdir = v9fs_vfs_rmdir, | 1949 | .rmdir = v9fs_vfs_rmdir, |
1945 | .mknod = v9fs_vfs_mknod_dotl, | 1950 | .mknod = v9fs_vfs_mknod, |
1946 | .rename = v9fs_vfs_rename, | 1951 | .rename = v9fs_vfs_rename, |
1947 | .getattr = v9fs_vfs_getattr, | 1952 | .getattr = v9fs_vfs_getattr, |
1948 | .setattr = v9fs_vfs_setattr, | 1953 | .setattr = v9fs_vfs_setattr, |
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index f9311077de68..1d12ba0ed3db 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c | |||
@@ -122,6 +122,10 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags, | |||
122 | fid = v9fs_session_init(v9ses, dev_name, data); | 122 | fid = v9fs_session_init(v9ses, dev_name, data); |
123 | if (IS_ERR(fid)) { | 123 | if (IS_ERR(fid)) { |
124 | retval = PTR_ERR(fid); | 124 | retval = PTR_ERR(fid); |
125 | /* | ||
126 | * we need to call session_close to tear down some | ||
127 | * of the data structure setup by session_init | ||
128 | */ | ||
125 | goto close_session; | 129 | goto close_session; |
126 | } | 130 | } |
127 | 131 | ||
@@ -144,7 +148,6 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags, | |||
144 | retval = -ENOMEM; | 148 | retval = -ENOMEM; |
145 | goto release_sb; | 149 | goto release_sb; |
146 | } | 150 | } |
147 | |||
148 | sb->s_root = root; | 151 | sb->s_root = root; |
149 | 152 | ||
150 | if (v9fs_proto_dotl(v9ses)) { | 153 | if (v9fs_proto_dotl(v9ses)) { |
@@ -152,7 +155,7 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags, | |||
152 | st = p9_client_getattr_dotl(fid, P9_STATS_BASIC); | 155 | st = p9_client_getattr_dotl(fid, P9_STATS_BASIC); |
153 | if (IS_ERR(st)) { | 156 | if (IS_ERR(st)) { |
154 | retval = PTR_ERR(st); | 157 | retval = PTR_ERR(st); |
155 | goto clunk_fid; | 158 | goto release_sb; |
156 | } | 159 | } |
157 | 160 | ||
158 | v9fs_stat2inode_dotl(st, root->d_inode); | 161 | v9fs_stat2inode_dotl(st, root->d_inode); |
@@ -162,7 +165,7 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags, | |||
162 | st = p9_client_stat(fid); | 165 | st = p9_client_stat(fid); |
163 | if (IS_ERR(st)) { | 166 | if (IS_ERR(st)) { |
164 | retval = PTR_ERR(st); | 167 | retval = PTR_ERR(st); |
165 | goto clunk_fid; | 168 | goto release_sb; |
166 | } | 169 | } |
167 | 170 | ||
168 | root->d_inode->i_ino = v9fs_qid2ino(&st->qid); | 171 | root->d_inode->i_ino = v9fs_qid2ino(&st->qid); |
@@ -174,19 +177,24 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags, | |||
174 | 177 | ||
175 | v9fs_fid_add(root, fid); | 178 | v9fs_fid_add(root, fid); |
176 | 179 | ||
177 | P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n"); | 180 | P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n"); |
178 | simple_set_mnt(mnt, sb); | 181 | simple_set_mnt(mnt, sb); |
179 | return 0; | 182 | return 0; |
180 | 183 | ||
181 | clunk_fid: | 184 | clunk_fid: |
182 | p9_client_clunk(fid); | 185 | p9_client_clunk(fid); |
183 | |||
184 | close_session: | 186 | close_session: |
185 | v9fs_session_close(v9ses); | 187 | v9fs_session_close(v9ses); |
186 | kfree(v9ses); | 188 | kfree(v9ses); |
187 | return retval; | 189 | return retval; |
188 | |||
189 | release_sb: | 190 | release_sb: |
191 | /* | ||
192 | * we will do the session_close and root dentry release | ||
193 | * in the below call. But we need to clunk fid, because we haven't | ||
194 | * attached the fid to dentry so it won't get clunked | ||
195 | * automatically. | ||
196 | */ | ||
197 | p9_client_clunk(fid); | ||
190 | deactivate_locked_super(sb); | 198 | deactivate_locked_super(sb); |
191 | return retval; | 199 | return retval; |
192 | } | 200 | } |
@@ -712,8 +712,16 @@ static ssize_t aio_run_iocb(struct kiocb *iocb) | |||
712 | */ | 712 | */ |
713 | ret = retry(iocb); | 713 | ret = retry(iocb); |
714 | 714 | ||
715 | if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) | 715 | if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) { |
716 | /* | ||
717 | * There's no easy way to restart the syscall since other AIO's | ||
718 | * may be already running. Just fail this IO with EINTR. | ||
719 | */ | ||
720 | if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR || | ||
721 | ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK)) | ||
722 | ret = -EINTR; | ||
716 | aio_complete(iocb, ret, 0); | 723 | aio_complete(iocb, ret, 0); |
724 | } | ||
717 | out: | 725 | out: |
718 | spin_lock_irq(&ctx->ctx_lock); | 726 | spin_lock_irq(&ctx->ctx_lock); |
719 | 727 | ||
@@ -1659,6 +1667,9 @@ long do_io_submit(aio_context_t ctx_id, long nr, | |||
1659 | if (unlikely(nr < 0)) | 1667 | if (unlikely(nr < 0)) |
1660 | return -EINVAL; | 1668 | return -EINVAL; |
1661 | 1669 | ||
1670 | if (unlikely(nr > LONG_MAX/sizeof(*iocbpp))) | ||
1671 | nr = LONG_MAX/sizeof(*iocbpp); | ||
1672 | |||
1662 | if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) | 1673 | if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) |
1663 | return -EFAULT; | 1674 | return -EFAULT; |
1664 | 1675 | ||
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index a7528b913936..fd0cc0bf9a40 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c | |||
@@ -724,7 +724,7 @@ static int __init init_misc_binfmt(void) | |||
724 | { | 724 | { |
725 | int err = register_filesystem(&bm_fs_type); | 725 | int err = register_filesystem(&bm_fs_type); |
726 | if (!err) { | 726 | if (!err) { |
727 | err = register_binfmt(&misc_format); | 727 | err = insert_binfmt(&misc_format); |
728 | if (err) | 728 | if (err) |
729 | unregister_filesystem(&bm_fs_type); | 729 | unregister_filesystem(&bm_fs_type); |
730 | } | 730 | } |
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 612a5c38d3c1..4d0ff5ee27b8 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c | |||
@@ -413,10 +413,10 @@ int bio_integrity_prep(struct bio *bio) | |||
413 | 413 | ||
414 | /* Allocate kernel buffer for protection data */ | 414 | /* Allocate kernel buffer for protection data */ |
415 | len = sectors * blk_integrity_tuple_size(bi); | 415 | len = sectors * blk_integrity_tuple_size(bi); |
416 | buf = kmalloc(len, GFP_NOIO | __GFP_NOFAIL | q->bounce_gfp); | 416 | buf = kmalloc(len, GFP_NOIO | q->bounce_gfp); |
417 | if (unlikely(buf == NULL)) { | 417 | if (unlikely(buf == NULL)) { |
418 | printk(KERN_ERR "could not allocate integrity buffer\n"); | 418 | printk(KERN_ERR "could not allocate integrity buffer\n"); |
419 | return -EIO; | 419 | return -ENOMEM; |
420 | } | 420 | } |
421 | 421 | ||
422 | end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 422 | end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig index bc87b9c1d27e..0fcd2640c23f 100644 --- a/fs/ceph/Kconfig +++ b/fs/ceph/Kconfig | |||
@@ -3,6 +3,7 @@ config CEPH_FS | |||
3 | depends on INET && EXPERIMENTAL | 3 | depends on INET && EXPERIMENTAL |
4 | select LIBCRC32C | 4 | select LIBCRC32C |
5 | select CRYPTO_AES | 5 | select CRYPTO_AES |
6 | select CRYPTO | ||
6 | help | 7 | help |
7 | Choose Y or M here to include support for mounting the | 8 | Choose Y or M here to include support for mounting the |
8 | experimental Ceph distributed file system. Ceph is an extremely | 9 | experimental Ceph distributed file system. Ceph is an extremely |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 4cfce1ee31fa..efbc604001c8 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
@@ -411,8 +411,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) | |||
411 | if (i_size < page_off + len) | 411 | if (i_size < page_off + len) |
412 | len = i_size - page_off; | 412 | len = i_size - page_off; |
413 | 413 | ||
414 | dout("writepage %p page %p index %lu on %llu~%u\n", | 414 | dout("writepage %p page %p index %lu on %llu~%u snapc %p\n", |
415 | inode, page, page->index, page_off, len); | 415 | inode, page, page->index, page_off, len, snapc); |
416 | 416 | ||
417 | writeback_stat = atomic_long_inc_return(&client->writeback_count); | 417 | writeback_stat = atomic_long_inc_return(&client->writeback_count); |
418 | if (writeback_stat > | 418 | if (writeback_stat > |
@@ -766,7 +766,8 @@ get_more_pages: | |||
766 | /* ok */ | 766 | /* ok */ |
767 | if (locked_pages == 0) { | 767 | if (locked_pages == 0) { |
768 | /* prepare async write request */ | 768 | /* prepare async write request */ |
769 | offset = page->index << PAGE_CACHE_SHIFT; | 769 | offset = (unsigned long long)page->index |
770 | << PAGE_CACHE_SHIFT; | ||
770 | len = wsize; | 771 | len = wsize; |
771 | req = ceph_osdc_new_request(&client->osdc, | 772 | req = ceph_osdc_new_request(&client->osdc, |
772 | &ci->i_layout, | 773 | &ci->i_layout, |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index a2069b6680ae..73c153092f72 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
@@ -814,7 +814,7 @@ int __ceph_caps_used(struct ceph_inode_info *ci) | |||
814 | used |= CEPH_CAP_PIN; | 814 | used |= CEPH_CAP_PIN; |
815 | if (ci->i_rd_ref) | 815 | if (ci->i_rd_ref) |
816 | used |= CEPH_CAP_FILE_RD; | 816 | used |= CEPH_CAP_FILE_RD; |
817 | if (ci->i_rdcache_ref || ci->i_rdcache_gen) | 817 | if (ci->i_rdcache_ref || ci->vfs_inode.i_data.nrpages) |
818 | used |= CEPH_CAP_FILE_CACHE; | 818 | used |= CEPH_CAP_FILE_CACHE; |
819 | if (ci->i_wr_ref) | 819 | if (ci->i_wr_ref) |
820 | used |= CEPH_CAP_FILE_WR; | 820 | used |= CEPH_CAP_FILE_WR; |
@@ -1195,10 +1195,14 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, | |||
1195 | * asynchronously back to the MDS once sync writes complete and dirty | 1195 | * asynchronously back to the MDS once sync writes complete and dirty |
1196 | * data is written out. | 1196 | * data is written out. |
1197 | * | 1197 | * |
1198 | * Unless @again is true, skip cap_snaps that were already sent to | ||
1199 | * the MDS (i.e., during this session). | ||
1200 | * | ||
1198 | * Called under i_lock. Takes s_mutex as needed. | 1201 | * Called under i_lock. Takes s_mutex as needed. |
1199 | */ | 1202 | */ |
1200 | void __ceph_flush_snaps(struct ceph_inode_info *ci, | 1203 | void __ceph_flush_snaps(struct ceph_inode_info *ci, |
1201 | struct ceph_mds_session **psession) | 1204 | struct ceph_mds_session **psession, |
1205 | int again) | ||
1202 | __releases(ci->vfs_inode->i_lock) | 1206 | __releases(ci->vfs_inode->i_lock) |
1203 | __acquires(ci->vfs_inode->i_lock) | 1207 | __acquires(ci->vfs_inode->i_lock) |
1204 | { | 1208 | { |
@@ -1227,7 +1231,7 @@ retry: | |||
1227 | * pages to be written out. | 1231 | * pages to be written out. |
1228 | */ | 1232 | */ |
1229 | if (capsnap->dirty_pages || capsnap->writing) | 1233 | if (capsnap->dirty_pages || capsnap->writing) |
1230 | continue; | 1234 | break; |
1231 | 1235 | ||
1232 | /* | 1236 | /* |
1233 | * if cap writeback already occurred, we should have dropped | 1237 | * if cap writeback already occurred, we should have dropped |
@@ -1240,6 +1244,13 @@ retry: | |||
1240 | dout("no auth cap (migrating?), doing nothing\n"); | 1244 | dout("no auth cap (migrating?), doing nothing\n"); |
1241 | goto out; | 1245 | goto out; |
1242 | } | 1246 | } |
1247 | |||
1248 | /* only flush each capsnap once */ | ||
1249 | if (!again && !list_empty(&capsnap->flushing_item)) { | ||
1250 | dout("already flushed %p, skipping\n", capsnap); | ||
1251 | continue; | ||
1252 | } | ||
1253 | |||
1243 | mds = ci->i_auth_cap->session->s_mds; | 1254 | mds = ci->i_auth_cap->session->s_mds; |
1244 | mseq = ci->i_auth_cap->mseq; | 1255 | mseq = ci->i_auth_cap->mseq; |
1245 | 1256 | ||
@@ -1276,8 +1287,8 @@ retry: | |||
1276 | &session->s_cap_snaps_flushing); | 1287 | &session->s_cap_snaps_flushing); |
1277 | spin_unlock(&inode->i_lock); | 1288 | spin_unlock(&inode->i_lock); |
1278 | 1289 | ||
1279 | dout("flush_snaps %p cap_snap %p follows %lld size %llu\n", | 1290 | dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n", |
1280 | inode, capsnap, next_follows, capsnap->size); | 1291 | inode, capsnap, capsnap->follows, capsnap->flush_tid); |
1281 | send_cap_msg(session, ceph_vino(inode).ino, 0, | 1292 | send_cap_msg(session, ceph_vino(inode).ino, 0, |
1282 | CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0, | 1293 | CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0, |
1283 | capsnap->dirty, 0, capsnap->flush_tid, 0, mseq, | 1294 | capsnap->dirty, 0, capsnap->flush_tid, 0, mseq, |
@@ -1314,7 +1325,7 @@ static void ceph_flush_snaps(struct ceph_inode_info *ci) | |||
1314 | struct inode *inode = &ci->vfs_inode; | 1325 | struct inode *inode = &ci->vfs_inode; |
1315 | 1326 | ||
1316 | spin_lock(&inode->i_lock); | 1327 | spin_lock(&inode->i_lock); |
1317 | __ceph_flush_snaps(ci, NULL); | 1328 | __ceph_flush_snaps(ci, NULL, 0); |
1318 | spin_unlock(&inode->i_lock); | 1329 | spin_unlock(&inode->i_lock); |
1319 | } | 1330 | } |
1320 | 1331 | ||
@@ -1477,7 +1488,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags, | |||
1477 | 1488 | ||
1478 | /* flush snaps first time around only */ | 1489 | /* flush snaps first time around only */ |
1479 | if (!list_empty(&ci->i_cap_snaps)) | 1490 | if (!list_empty(&ci->i_cap_snaps)) |
1480 | __ceph_flush_snaps(ci, &session); | 1491 | __ceph_flush_snaps(ci, &session, 0); |
1481 | goto retry_locked; | 1492 | goto retry_locked; |
1482 | retry: | 1493 | retry: |
1483 | spin_lock(&inode->i_lock); | 1494 | spin_lock(&inode->i_lock); |
@@ -1894,7 +1905,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc, | |||
1894 | if (cap && cap->session == session) { | 1905 | if (cap && cap->session == session) { |
1895 | dout("kick_flushing_caps %p cap %p capsnap %p\n", inode, | 1906 | dout("kick_flushing_caps %p cap %p capsnap %p\n", inode, |
1896 | cap, capsnap); | 1907 | cap, capsnap); |
1897 | __ceph_flush_snaps(ci, &session); | 1908 | __ceph_flush_snaps(ci, &session, 1); |
1898 | } else { | 1909 | } else { |
1899 | pr_err("%p auth cap %p not mds%d ???\n", inode, | 1910 | pr_err("%p auth cap %p not mds%d ???\n", inode, |
1900 | cap, session->s_mds); | 1911 | cap, session->s_mds); |
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 6e4f43ff23ec..a1986eb52045 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
@@ -1021,11 +1021,15 @@ out_touch: | |||
1021 | static void ceph_dentry_release(struct dentry *dentry) | 1021 | static void ceph_dentry_release(struct dentry *dentry) |
1022 | { | 1022 | { |
1023 | struct ceph_dentry_info *di = ceph_dentry(dentry); | 1023 | struct ceph_dentry_info *di = ceph_dentry(dentry); |
1024 | struct inode *parent_inode = dentry->d_parent->d_inode; | 1024 | struct inode *parent_inode = NULL; |
1025 | u64 snapid = ceph_snap(parent_inode); | 1025 | u64 snapid = CEPH_NOSNAP; |
1026 | 1026 | ||
1027 | if (!IS_ROOT(dentry)) { | ||
1028 | parent_inode = dentry->d_parent->d_inode; | ||
1029 | if (parent_inode) | ||
1030 | snapid = ceph_snap(parent_inode); | ||
1031 | } | ||
1027 | dout("dentry_release %p parent %p\n", dentry, parent_inode); | 1032 | dout("dentry_release %p parent %p\n", dentry, parent_inode); |
1028 | |||
1029 | if (parent_inode && snapid != CEPH_SNAPDIR) { | 1033 | if (parent_inode && snapid != CEPH_SNAPDIR) { |
1030 | struct ceph_inode_info *ci = ceph_inode(parent_inode); | 1034 | struct ceph_inode_info *ci = ceph_inode(parent_inode); |
1031 | 1035 | ||
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index e7cca414da03..62377ec37edf 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
@@ -845,7 +845,7 @@ static void ceph_set_dentry_offset(struct dentry *dn) | |||
845 | * the caller) if we fail. | 845 | * the caller) if we fail. |
846 | */ | 846 | */ |
847 | static struct dentry *splice_dentry(struct dentry *dn, struct inode *in, | 847 | static struct dentry *splice_dentry(struct dentry *dn, struct inode *in, |
848 | bool *prehash) | 848 | bool *prehash, bool set_offset) |
849 | { | 849 | { |
850 | struct dentry *realdn; | 850 | struct dentry *realdn; |
851 | 851 | ||
@@ -877,7 +877,8 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in, | |||
877 | } | 877 | } |
878 | if ((!prehash || *prehash) && d_unhashed(dn)) | 878 | if ((!prehash || *prehash) && d_unhashed(dn)) |
879 | d_rehash(dn); | 879 | d_rehash(dn); |
880 | ceph_set_dentry_offset(dn); | 880 | if (set_offset) |
881 | ceph_set_dentry_offset(dn); | ||
881 | out: | 882 | out: |
882 | return dn; | 883 | return dn; |
883 | } | 884 | } |
@@ -1062,7 +1063,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
1062 | d_delete(dn); | 1063 | d_delete(dn); |
1063 | goto done; | 1064 | goto done; |
1064 | } | 1065 | } |
1065 | dn = splice_dentry(dn, in, &have_lease); | 1066 | dn = splice_dentry(dn, in, &have_lease, true); |
1066 | if (IS_ERR(dn)) { | 1067 | if (IS_ERR(dn)) { |
1067 | err = PTR_ERR(dn); | 1068 | err = PTR_ERR(dn); |
1068 | goto done; | 1069 | goto done; |
@@ -1105,7 +1106,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
1105 | goto done; | 1106 | goto done; |
1106 | } | 1107 | } |
1107 | dout(" linking snapped dir %p to dn %p\n", in, dn); | 1108 | dout(" linking snapped dir %p to dn %p\n", in, dn); |
1108 | dn = splice_dentry(dn, in, NULL); | 1109 | dn = splice_dentry(dn, in, NULL, true); |
1109 | if (IS_ERR(dn)) { | 1110 | if (IS_ERR(dn)) { |
1110 | err = PTR_ERR(dn); | 1111 | err = PTR_ERR(dn); |
1111 | goto done; | 1112 | goto done; |
@@ -1237,7 +1238,7 @@ retry_lookup: | |||
1237 | err = PTR_ERR(in); | 1238 | err = PTR_ERR(in); |
1238 | goto out; | 1239 | goto out; |
1239 | } | 1240 | } |
1240 | dn = splice_dentry(dn, in, NULL); | 1241 | dn = splice_dentry(dn, in, NULL, false); |
1241 | if (IS_ERR(dn)) | 1242 | if (IS_ERR(dn)) |
1242 | dn = NULL; | 1243 | dn = NULL; |
1243 | } | 1244 | } |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index f091b1351786..fad95f8f2608 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
@@ -2374,6 +2374,8 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, | |||
2374 | num_fcntl_locks, | 2374 | num_fcntl_locks, |
2375 | num_flock_locks); | 2375 | num_flock_locks); |
2376 | unlock_kernel(); | 2376 | unlock_kernel(); |
2377 | } else { | ||
2378 | err = ceph_pagelist_append(pagelist, &rec, reclen); | ||
2377 | } | 2379 | } |
2378 | 2380 | ||
2379 | out_free: | 2381 | out_free: |
diff --git a/fs/ceph/pagelist.c b/fs/ceph/pagelist.c index b6859f47d364..46a368b6dce5 100644 --- a/fs/ceph/pagelist.c +++ b/fs/ceph/pagelist.c | |||
@@ -5,10 +5,18 @@ | |||
5 | 5 | ||
6 | #include "pagelist.h" | 6 | #include "pagelist.h" |
7 | 7 | ||
8 | static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl) | ||
9 | { | ||
10 | struct page *page = list_entry(pl->head.prev, struct page, | ||
11 | lru); | ||
12 | kunmap(page); | ||
13 | } | ||
14 | |||
8 | int ceph_pagelist_release(struct ceph_pagelist *pl) | 15 | int ceph_pagelist_release(struct ceph_pagelist *pl) |
9 | { | 16 | { |
10 | if (pl->mapped_tail) | 17 | if (pl->mapped_tail) |
11 | kunmap(pl->mapped_tail); | 18 | ceph_pagelist_unmap_tail(pl); |
19 | |||
12 | while (!list_empty(&pl->head)) { | 20 | while (!list_empty(&pl->head)) { |
13 | struct page *page = list_first_entry(&pl->head, struct page, | 21 | struct page *page = list_first_entry(&pl->head, struct page, |
14 | lru); | 22 | lru); |
@@ -26,7 +34,7 @@ static int ceph_pagelist_addpage(struct ceph_pagelist *pl) | |||
26 | pl->room += PAGE_SIZE; | 34 | pl->room += PAGE_SIZE; |
27 | list_add_tail(&page->lru, &pl->head); | 35 | list_add_tail(&page->lru, &pl->head); |
28 | if (pl->mapped_tail) | 36 | if (pl->mapped_tail) |
29 | kunmap(pl->mapped_tail); | 37 | ceph_pagelist_unmap_tail(pl); |
30 | pl->mapped_tail = kmap(page); | 38 | pl->mapped_tail = kmap(page); |
31 | return 0; | 39 | return 0; |
32 | } | 40 | } |
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 4868b9dcac5a..190b6c4a6f2b 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c | |||
@@ -119,6 +119,7 @@ static struct ceph_snap_realm *ceph_create_snap_realm( | |||
119 | INIT_LIST_HEAD(&realm->children); | 119 | INIT_LIST_HEAD(&realm->children); |
120 | INIT_LIST_HEAD(&realm->child_item); | 120 | INIT_LIST_HEAD(&realm->child_item); |
121 | INIT_LIST_HEAD(&realm->empty_item); | 121 | INIT_LIST_HEAD(&realm->empty_item); |
122 | INIT_LIST_HEAD(&realm->dirty_item); | ||
122 | INIT_LIST_HEAD(&realm->inodes_with_caps); | 123 | INIT_LIST_HEAD(&realm->inodes_with_caps); |
123 | spin_lock_init(&realm->inodes_with_caps_lock); | 124 | spin_lock_init(&realm->inodes_with_caps_lock); |
124 | __insert_snap_realm(&mdsc->snap_realms, realm); | 125 | __insert_snap_realm(&mdsc->snap_realms, realm); |
@@ -467,7 +468,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci) | |||
467 | INIT_LIST_HEAD(&capsnap->ci_item); | 468 | INIT_LIST_HEAD(&capsnap->ci_item); |
468 | INIT_LIST_HEAD(&capsnap->flushing_item); | 469 | INIT_LIST_HEAD(&capsnap->flushing_item); |
469 | 470 | ||
470 | capsnap->follows = snapc->seq - 1; | 471 | capsnap->follows = snapc->seq; |
471 | capsnap->issued = __ceph_caps_issued(ci, NULL); | 472 | capsnap->issued = __ceph_caps_issued(ci, NULL); |
472 | capsnap->dirty = dirty; | 473 | capsnap->dirty = dirty; |
473 | 474 | ||
@@ -604,6 +605,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc, | |||
604 | struct ceph_snap_realm *realm; | 605 | struct ceph_snap_realm *realm; |
605 | int invalidate = 0; | 606 | int invalidate = 0; |
606 | int err = -ENOMEM; | 607 | int err = -ENOMEM; |
608 | LIST_HEAD(dirty_realms); | ||
607 | 609 | ||
608 | dout("update_snap_trace deletion=%d\n", deletion); | 610 | dout("update_snap_trace deletion=%d\n", deletion); |
609 | more: | 611 | more: |
@@ -626,24 +628,6 @@ more: | |||
626 | } | 628 | } |
627 | } | 629 | } |
628 | 630 | ||
629 | if (le64_to_cpu(ri->seq) > realm->seq) { | ||
630 | dout("update_snap_trace updating %llx %p %lld -> %lld\n", | ||
631 | realm->ino, realm, realm->seq, le64_to_cpu(ri->seq)); | ||
632 | /* | ||
633 | * if the realm seq has changed, queue a cap_snap for every | ||
634 | * inode with open caps. we do this _before_ we update | ||
635 | * the realm info so that we prepare for writeback under the | ||
636 | * _previous_ snap context. | ||
637 | * | ||
638 | * ...unless it's a snap deletion! | ||
639 | */ | ||
640 | if (!deletion) | ||
641 | queue_realm_cap_snaps(realm); | ||
642 | } else { | ||
643 | dout("update_snap_trace %llx %p seq %lld unchanged\n", | ||
644 | realm->ino, realm, realm->seq); | ||
645 | } | ||
646 | |||
647 | /* ensure the parent is correct */ | 631 | /* ensure the parent is correct */ |
648 | err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent)); | 632 | err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent)); |
649 | if (err < 0) | 633 | if (err < 0) |
@@ -651,6 +635,8 @@ more: | |||
651 | invalidate += err; | 635 | invalidate += err; |
652 | 636 | ||
653 | if (le64_to_cpu(ri->seq) > realm->seq) { | 637 | if (le64_to_cpu(ri->seq) > realm->seq) { |
638 | dout("update_snap_trace updating %llx %p %lld -> %lld\n", | ||
639 | realm->ino, realm, realm->seq, le64_to_cpu(ri->seq)); | ||
654 | /* update realm parameters, snap lists */ | 640 | /* update realm parameters, snap lists */ |
655 | realm->seq = le64_to_cpu(ri->seq); | 641 | realm->seq = le64_to_cpu(ri->seq); |
656 | realm->created = le64_to_cpu(ri->created); | 642 | realm->created = le64_to_cpu(ri->created); |
@@ -668,9 +654,17 @@ more: | |||
668 | if (err < 0) | 654 | if (err < 0) |
669 | goto fail; | 655 | goto fail; |
670 | 656 | ||
657 | /* queue realm for cap_snap creation */ | ||
658 | list_add(&realm->dirty_item, &dirty_realms); | ||
659 | |||
671 | invalidate = 1; | 660 | invalidate = 1; |
672 | } else if (!realm->cached_context) { | 661 | } else if (!realm->cached_context) { |
662 | dout("update_snap_trace %llx %p seq %lld new\n", | ||
663 | realm->ino, realm, realm->seq); | ||
673 | invalidate = 1; | 664 | invalidate = 1; |
665 | } else { | ||
666 | dout("update_snap_trace %llx %p seq %lld unchanged\n", | ||
667 | realm->ino, realm, realm->seq); | ||
674 | } | 668 | } |
675 | 669 | ||
676 | dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino, | 670 | dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino, |
@@ -683,6 +677,14 @@ more: | |||
683 | if (invalidate) | 677 | if (invalidate) |
684 | rebuild_snap_realms(realm); | 678 | rebuild_snap_realms(realm); |
685 | 679 | ||
680 | /* | ||
681 | * queue cap snaps _after_ we've built the new snap contexts, | ||
682 | * so that i_head_snapc can be set appropriately. | ||
683 | */ | ||
684 | list_for_each_entry(realm, &dirty_realms, dirty_item) { | ||
685 | queue_realm_cap_snaps(realm); | ||
686 | } | ||
687 | |||
686 | __cleanup_empty_realms(mdsc); | 688 | __cleanup_empty_realms(mdsc); |
687 | return 0; | 689 | return 0; |
688 | 690 | ||
@@ -715,7 +717,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc) | |||
715 | igrab(inode); | 717 | igrab(inode); |
716 | spin_unlock(&mdsc->snap_flush_lock); | 718 | spin_unlock(&mdsc->snap_flush_lock); |
717 | spin_lock(&inode->i_lock); | 719 | spin_lock(&inode->i_lock); |
718 | __ceph_flush_snaps(ci, &session); | 720 | __ceph_flush_snaps(ci, &session, 0); |
719 | spin_unlock(&inode->i_lock); | 721 | spin_unlock(&inode->i_lock); |
720 | iput(inode); | 722 | iput(inode); |
721 | spin_lock(&mdsc->snap_flush_lock); | 723 | spin_lock(&mdsc->snap_flush_lock); |
@@ -816,6 +818,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc, | |||
816 | }; | 818 | }; |
817 | struct inode *inode = ceph_find_inode(sb, vino); | 819 | struct inode *inode = ceph_find_inode(sb, vino); |
818 | struct ceph_inode_info *ci; | 820 | struct ceph_inode_info *ci; |
821 | struct ceph_snap_realm *oldrealm; | ||
819 | 822 | ||
820 | if (!inode) | 823 | if (!inode) |
821 | continue; | 824 | continue; |
@@ -841,18 +844,19 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc, | |||
841 | dout(" will move %p to split realm %llx %p\n", | 844 | dout(" will move %p to split realm %llx %p\n", |
842 | inode, realm->ino, realm); | 845 | inode, realm->ino, realm); |
843 | /* | 846 | /* |
844 | * Remove the inode from the realm's inode | 847 | * Move the inode to the new realm |
845 | * list, but don't add it to the new realm | ||
846 | * yet. We don't want the cap_snap to be | ||
847 | * queued (again) by ceph_update_snap_trace() | ||
848 | * below. Queue it _now_, under the old context. | ||
849 | */ | 848 | */ |
850 | spin_lock(&realm->inodes_with_caps_lock); | 849 | spin_lock(&realm->inodes_with_caps_lock); |
851 | list_del_init(&ci->i_snap_realm_item); | 850 | list_del_init(&ci->i_snap_realm_item); |
851 | list_add(&ci->i_snap_realm_item, | ||
852 | &realm->inodes_with_caps); | ||
853 | oldrealm = ci->i_snap_realm; | ||
854 | ci->i_snap_realm = realm; | ||
852 | spin_unlock(&realm->inodes_with_caps_lock); | 855 | spin_unlock(&realm->inodes_with_caps_lock); |
853 | spin_unlock(&inode->i_lock); | 856 | spin_unlock(&inode->i_lock); |
854 | 857 | ||
855 | ceph_queue_cap_snap(ci); | 858 | ceph_get_snap_realm(mdsc, realm); |
859 | ceph_put_snap_realm(mdsc, oldrealm); | ||
856 | 860 | ||
857 | iput(inode); | 861 | iput(inode); |
858 | continue; | 862 | continue; |
@@ -880,43 +884,9 @@ skip_inode: | |||
880 | ceph_update_snap_trace(mdsc, p, e, | 884 | ceph_update_snap_trace(mdsc, p, e, |
881 | op == CEPH_SNAP_OP_DESTROY); | 885 | op == CEPH_SNAP_OP_DESTROY); |
882 | 886 | ||
883 | if (op == CEPH_SNAP_OP_SPLIT) { | 887 | if (op == CEPH_SNAP_OP_SPLIT) |
884 | /* | ||
885 | * ok, _now_ add the inodes into the new realm. | ||
886 | */ | ||
887 | for (i = 0; i < num_split_inos; i++) { | ||
888 | struct ceph_vino vino = { | ||
889 | .ino = le64_to_cpu(split_inos[i]), | ||
890 | .snap = CEPH_NOSNAP, | ||
891 | }; | ||
892 | struct inode *inode = ceph_find_inode(sb, vino); | ||
893 | struct ceph_inode_info *ci; | ||
894 | |||
895 | if (!inode) | ||
896 | continue; | ||
897 | ci = ceph_inode(inode); | ||
898 | spin_lock(&inode->i_lock); | ||
899 | if (list_empty(&ci->i_snap_realm_item)) { | ||
900 | struct ceph_snap_realm *oldrealm = | ||
901 | ci->i_snap_realm; | ||
902 | |||
903 | dout(" moving %p to split realm %llx %p\n", | ||
904 | inode, realm->ino, realm); | ||
905 | spin_lock(&realm->inodes_with_caps_lock); | ||
906 | list_add(&ci->i_snap_realm_item, | ||
907 | &realm->inodes_with_caps); | ||
908 | ci->i_snap_realm = realm; | ||
909 | spin_unlock(&realm->inodes_with_caps_lock); | ||
910 | ceph_get_snap_realm(mdsc, realm); | ||
911 | ceph_put_snap_realm(mdsc, oldrealm); | ||
912 | } | ||
913 | spin_unlock(&inode->i_lock); | ||
914 | iput(inode); | ||
915 | } | ||
916 | |||
917 | /* we took a reference when we created the realm, above */ | 888 | /* we took a reference when we created the realm, above */ |
918 | ceph_put_snap_realm(mdsc, realm); | 889 | ceph_put_snap_realm(mdsc, realm); |
919 | } | ||
920 | 890 | ||
921 | __cleanup_empty_realms(mdsc); | 891 | __cleanup_empty_realms(mdsc); |
922 | 892 | ||
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index c33897ae5725..b87638e84c4b 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
@@ -690,6 +690,8 @@ struct ceph_snap_realm { | |||
690 | 690 | ||
691 | struct list_head empty_item; /* if i have ref==0 */ | 691 | struct list_head empty_item; /* if i have ref==0 */ |
692 | 692 | ||
693 | struct list_head dirty_item; /* if realm needs new context */ | ||
694 | |||
693 | /* the current set of snaps for this realm */ | 695 | /* the current set of snaps for this realm */ |
694 | struct ceph_snap_context *cached_context; | 696 | struct ceph_snap_context *cached_context; |
695 | 697 | ||
@@ -826,7 +828,8 @@ extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had); | |||
826 | extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, | 828 | extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, |
827 | struct ceph_snap_context *snapc); | 829 | struct ceph_snap_context *snapc); |
828 | extern void __ceph_flush_snaps(struct ceph_inode_info *ci, | 830 | extern void __ceph_flush_snaps(struct ceph_inode_info *ci, |
829 | struct ceph_mds_session **psession); | 831 | struct ceph_mds_session **psession, |
832 | int again); | ||
830 | extern void ceph_check_caps(struct ceph_inode_info *ci, int flags, | 833 | extern void ceph_check_caps(struct ceph_inode_info *ci, int flags, |
831 | struct ceph_mds_session *session); | 834 | struct ceph_mds_session *session); |
832 | extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc); | 835 | extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc); |
diff --git a/fs/char_dev.c b/fs/char_dev.c index f80a4f25123c..143d393881cb 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c | |||
@@ -40,7 +40,9 @@ struct backing_dev_info directly_mappable_cdev_bdi = { | |||
40 | #endif | 40 | #endif |
41 | /* permit direct mmap, for read, write or exec */ | 41 | /* permit direct mmap, for read, write or exec */ |
42 | BDI_CAP_MAP_DIRECT | | 42 | BDI_CAP_MAP_DIRECT | |
43 | BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP), | 43 | BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP | |
44 | /* no writeback happens */ | ||
45 | BDI_CAP_NO_ACCT_AND_WRITEBACK), | ||
44 | }; | 46 | }; |
45 | 47 | ||
46 | static struct kobj_map *cdev_map; | 48 | static struct kobj_map *cdev_map; |
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index 0da1debd499d..917b7d449bb2 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig | |||
@@ -2,8 +2,6 @@ config CIFS | |||
2 | tristate "CIFS support (advanced network filesystem, SMBFS successor)" | 2 | tristate "CIFS support (advanced network filesystem, SMBFS successor)" |
3 | depends on INET | 3 | depends on INET |
4 | select NLS | 4 | select NLS |
5 | select CRYPTO_MD5 | ||
6 | select CRYPTO_ARC4 | ||
7 | help | 5 | help |
8 | This is the client VFS module for the Common Internet File System | 6 | This is the client VFS module for the Common Internet File System |
9 | (CIFS) protocol which is the successor to the Server Message Block | 7 | (CIFS) protocol which is the successor to the Server Message Block |
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c index 21f0fbd86989..cfd1ce34e0bc 100644 --- a/fs/cifs/asn1.c +++ b/fs/cifs/asn1.c | |||
@@ -597,13 +597,13 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
597 | if (compare_oid(oid, oidlen, MSKRB5_OID, | 597 | if (compare_oid(oid, oidlen, MSKRB5_OID, |
598 | MSKRB5_OID_LEN)) | 598 | MSKRB5_OID_LEN)) |
599 | server->sec_mskerberos = true; | 599 | server->sec_mskerberos = true; |
600 | if (compare_oid(oid, oidlen, KRB5U2U_OID, | 600 | else if (compare_oid(oid, oidlen, KRB5U2U_OID, |
601 | KRB5U2U_OID_LEN)) | 601 | KRB5U2U_OID_LEN)) |
602 | server->sec_kerberosu2u = true; | 602 | server->sec_kerberosu2u = true; |
603 | if (compare_oid(oid, oidlen, KRB5_OID, | 603 | else if (compare_oid(oid, oidlen, KRB5_OID, |
604 | KRB5_OID_LEN)) | 604 | KRB5_OID_LEN)) |
605 | server->sec_kerberos = true; | 605 | server->sec_kerberos = true; |
606 | if (compare_oid(oid, oidlen, NTLMSSP_OID, | 606 | else if (compare_oid(oid, oidlen, NTLMSSP_OID, |
607 | NTLMSSP_OID_LEN)) | 607 | NTLMSSP_OID_LEN)) |
608 | server->sec_ntlmssp = true; | 608 | server->sec_ntlmssp = true; |
609 | 609 | ||
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 709f2296bdb4..35042d8f7338 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include "md5.h" | 27 | #include "md5.h" |
28 | #include "cifs_unicode.h" | 28 | #include "cifs_unicode.h" |
29 | #include "cifsproto.h" | 29 | #include "cifsproto.h" |
30 | #include "ntlmssp.h" | ||
31 | #include <linux/ctype.h> | 30 | #include <linux/ctype.h> |
32 | #include <linux/random.h> | 31 | #include <linux/random.h> |
33 | 32 | ||
@@ -43,43 +42,21 @@ extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8, | |||
43 | unsigned char *p24); | 42 | unsigned char *p24); |
44 | 43 | ||
45 | static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, | 44 | static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, |
46 | struct TCP_Server_Info *server, char *signature) | 45 | const struct mac_key *key, char *signature) |
47 | { | 46 | { |
48 | int rc; | 47 | struct MD5Context context; |
49 | 48 | ||
50 | if (cifs_pdu == NULL || server == NULL || signature == NULL) | 49 | if ((cifs_pdu == NULL) || (signature == NULL) || (key == NULL)) |
51 | return -EINVAL; | 50 | return -EINVAL; |
52 | 51 | ||
53 | if (!server->ntlmssp.sdescmd5) { | 52 | cifs_MD5_init(&context); |
54 | cERROR(1, | 53 | cifs_MD5_update(&context, (char *)&key->data, key->len); |
55 | "cifs_calculate_signature: can't generate signature\n"); | 54 | cifs_MD5_update(&context, cifs_pdu->Protocol, cifs_pdu->smb_buf_length); |
56 | return -1; | ||
57 | } | ||
58 | |||
59 | rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash); | ||
60 | if (rc) { | ||
61 | cERROR(1, "cifs_calculate_signature: oould not init md5\n"); | ||
62 | return rc; | ||
63 | } | ||
64 | |||
65 | if (server->secType == RawNTLMSSP) | ||
66 | crypto_shash_update(&server->ntlmssp.sdescmd5->shash, | ||
67 | server->session_key.data.ntlmv2.key, | ||
68 | CIFS_NTLMV2_SESSKEY_SIZE); | ||
69 | else | ||
70 | crypto_shash_update(&server->ntlmssp.sdescmd5->shash, | ||
71 | (char *)&server->session_key.data, | ||
72 | server->session_key.len); | ||
73 | |||
74 | crypto_shash_update(&server->ntlmssp.sdescmd5->shash, | ||
75 | cifs_pdu->Protocol, cifs_pdu->smb_buf_length); | ||
76 | 55 | ||
77 | rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature); | 56 | cifs_MD5_final(signature, &context); |
78 | 57 | return 0; | |
79 | return rc; | ||
80 | } | 58 | } |
81 | 59 | ||
82 | |||
83 | int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server, | 60 | int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server, |
84 | __u32 *pexpected_response_sequence_number) | 61 | __u32 *pexpected_response_sequence_number) |
85 | { | 62 | { |
@@ -101,7 +78,8 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server, | |||
101 | server->sequence_number++; | 78 | server->sequence_number++; |
102 | spin_unlock(&GlobalMid_Lock); | 79 | spin_unlock(&GlobalMid_Lock); |
103 | 80 | ||
104 | rc = cifs_calculate_signature(cifs_pdu, server, smb_signature); | 81 | rc = cifs_calculate_signature(cifs_pdu, &server->mac_signing_key, |
82 | smb_signature); | ||
105 | if (rc) | 83 | if (rc) |
106 | memset(cifs_pdu->Signature.SecuritySignature, 0, 8); | 84 | memset(cifs_pdu->Signature.SecuritySignature, 0, 8); |
107 | else | 85 | else |
@@ -111,39 +89,21 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server, | |||
111 | } | 89 | } |
112 | 90 | ||
113 | static int cifs_calc_signature2(const struct kvec *iov, int n_vec, | 91 | static int cifs_calc_signature2(const struct kvec *iov, int n_vec, |
114 | struct TCP_Server_Info *server, char *signature) | 92 | const struct mac_key *key, char *signature) |
115 | { | 93 | { |
94 | struct MD5Context context; | ||
116 | int i; | 95 | int i; |
117 | int rc; | ||
118 | 96 | ||
119 | if (iov == NULL || server == NULL || signature == NULL) | 97 | if ((iov == NULL) || (signature == NULL) || (key == NULL)) |
120 | return -EINVAL; | 98 | return -EINVAL; |
121 | 99 | ||
122 | if (!server->ntlmssp.sdescmd5) { | 100 | cifs_MD5_init(&context); |
123 | cERROR(1, "cifs_calc_signature2: can't generate signature\n"); | 101 | cifs_MD5_update(&context, (char *)&key->data, key->len); |
124 | return -1; | ||
125 | } | ||
126 | |||
127 | rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash); | ||
128 | if (rc) { | ||
129 | cERROR(1, "cifs_calc_signature2: oould not init md5\n"); | ||
130 | return rc; | ||
131 | } | ||
132 | |||
133 | if (server->secType == RawNTLMSSP) | ||
134 | crypto_shash_update(&server->ntlmssp.sdescmd5->shash, | ||
135 | server->session_key.data.ntlmv2.key, | ||
136 | CIFS_NTLMV2_SESSKEY_SIZE); | ||
137 | else | ||
138 | crypto_shash_update(&server->ntlmssp.sdescmd5->shash, | ||
139 | (char *)&server->session_key.data, | ||
140 | server->session_key.len); | ||
141 | |||
142 | for (i = 0; i < n_vec; i++) { | 102 | for (i = 0; i < n_vec; i++) { |
143 | if (iov[i].iov_len == 0) | 103 | if (iov[i].iov_len == 0) |
144 | continue; | 104 | continue; |
145 | if (iov[i].iov_base == NULL) { | 105 | if (iov[i].iov_base == NULL) { |
146 | cERROR(1, "cifs_calc_signature2: null iovec entry"); | 106 | cERROR(1, "null iovec entry"); |
147 | return -EIO; | 107 | return -EIO; |
148 | } | 108 | } |
149 | /* The first entry includes a length field (which does not get | 109 | /* The first entry includes a length field (which does not get |
@@ -151,18 +111,18 @@ static int cifs_calc_signature2(const struct kvec *iov, int n_vec, | |||
151 | if (i == 0) { | 111 | if (i == 0) { |
152 | if (iov[0].iov_len <= 8) /* cmd field at offset 9 */ | 112 | if (iov[0].iov_len <= 8) /* cmd field at offset 9 */ |
153 | break; /* nothing to sign or corrupt header */ | 113 | break; /* nothing to sign or corrupt header */ |
154 | crypto_shash_update(&server->ntlmssp.sdescmd5->shash, | 114 | cifs_MD5_update(&context, iov[0].iov_base+4, |
155 | iov[i].iov_base + 4, iov[i].iov_len - 4); | 115 | iov[0].iov_len-4); |
156 | } else | 116 | } else |
157 | crypto_shash_update(&server->ntlmssp.sdescmd5->shash, | 117 | cifs_MD5_update(&context, iov[i].iov_base, iov[i].iov_len); |
158 | iov[i].iov_base, iov[i].iov_len); | ||
159 | } | 118 | } |
160 | 119 | ||
161 | rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature); | 120 | cifs_MD5_final(signature, &context); |
162 | 121 | ||
163 | return rc; | 122 | return 0; |
164 | } | 123 | } |
165 | 124 | ||
125 | |||
166 | int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server, | 126 | int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server, |
167 | __u32 *pexpected_response_sequence_number) | 127 | __u32 *pexpected_response_sequence_number) |
168 | { | 128 | { |
@@ -185,7 +145,8 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server, | |||
185 | server->sequence_number++; | 145 | server->sequence_number++; |
186 | spin_unlock(&GlobalMid_Lock); | 146 | spin_unlock(&GlobalMid_Lock); |
187 | 147 | ||
188 | rc = cifs_calc_signature2(iov, n_vec, server, smb_signature); | 148 | rc = cifs_calc_signature2(iov, n_vec, &server->mac_signing_key, |
149 | smb_signature); | ||
189 | if (rc) | 150 | if (rc) |
190 | memset(cifs_pdu->Signature.SecuritySignature, 0, 8); | 151 | memset(cifs_pdu->Signature.SecuritySignature, 0, 8); |
191 | else | 152 | else |
@@ -195,14 +156,14 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server, | |||
195 | } | 156 | } |
196 | 157 | ||
197 | int cifs_verify_signature(struct smb_hdr *cifs_pdu, | 158 | int cifs_verify_signature(struct smb_hdr *cifs_pdu, |
198 | struct TCP_Server_Info *server, | 159 | const struct mac_key *mac_key, |
199 | __u32 expected_sequence_number) | 160 | __u32 expected_sequence_number) |
200 | { | 161 | { |
201 | int rc; | 162 | unsigned int rc; |
202 | char server_response_sig[8]; | 163 | char server_response_sig[8]; |
203 | char what_we_think_sig_should_be[20]; | 164 | char what_we_think_sig_should_be[20]; |
204 | 165 | ||
205 | if (cifs_pdu == NULL || server == NULL) | 166 | if ((cifs_pdu == NULL) || (mac_key == NULL)) |
206 | return -EINVAL; | 167 | return -EINVAL; |
207 | 168 | ||
208 | if (cifs_pdu->Command == SMB_COM_NEGOTIATE) | 169 | if (cifs_pdu->Command == SMB_COM_NEGOTIATE) |
@@ -231,7 +192,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu, | |||
231 | cpu_to_le32(expected_sequence_number); | 192 | cpu_to_le32(expected_sequence_number); |
232 | cifs_pdu->Signature.Sequence.Reserved = 0; | 193 | cifs_pdu->Signature.Sequence.Reserved = 0; |
233 | 194 | ||
234 | rc = cifs_calculate_signature(cifs_pdu, server, | 195 | rc = cifs_calculate_signature(cifs_pdu, mac_key, |
235 | what_we_think_sig_should_be); | 196 | what_we_think_sig_should_be); |
236 | 197 | ||
237 | if (rc) | 198 | if (rc) |
@@ -248,7 +209,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu, | |||
248 | } | 209 | } |
249 | 210 | ||
250 | /* We fill in key by putting in 40 byte array which was allocated by caller */ | 211 | /* We fill in key by putting in 40 byte array which was allocated by caller */ |
251 | int cifs_calculate_session_key(struct session_key *key, const char *rn, | 212 | int cifs_calculate_mac_key(struct mac_key *key, const char *rn, |
252 | const char *password) | 213 | const char *password) |
253 | { | 214 | { |
254 | char temp_key[16]; | 215 | char temp_key[16]; |
@@ -306,52 +267,38 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses, | |||
306 | { | 267 | { |
307 | int rc = 0; | 268 | int rc = 0; |
308 | int len; | 269 | int len; |
309 | char nt_hash[CIFS_NTHASH_SIZE]; | 270 | char nt_hash[16]; |
271 | struct HMACMD5Context *pctxt; | ||
310 | wchar_t *user; | 272 | wchar_t *user; |
311 | wchar_t *domain; | 273 | wchar_t *domain; |
312 | wchar_t *server; | ||
313 | 274 | ||
314 | if (!ses->server->ntlmssp.sdeschmacmd5) { | 275 | pctxt = kmalloc(sizeof(struct HMACMD5Context), GFP_KERNEL); |
315 | cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n"); | 276 | |
316 | return -1; | 277 | if (pctxt == NULL) |
317 | } | 278 | return -ENOMEM; |
318 | 279 | ||
319 | /* calculate md4 hash of password */ | 280 | /* calculate md4 hash of password */ |
320 | E_md4hash(ses->password, nt_hash); | 281 | E_md4hash(ses->password, nt_hash); |
321 | 282 | ||
322 | crypto_shash_setkey(ses->server->ntlmssp.hmacmd5, nt_hash, | 283 | /* convert Domainname to unicode and uppercase */ |
323 | CIFS_NTHASH_SIZE); | 284 | hmac_md5_init_limK_to_64(nt_hash, 16, pctxt); |
324 | |||
325 | rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash); | ||
326 | if (rc) { | ||
327 | cERROR(1, "calc_ntlmv2_hash: could not init hmacmd5\n"); | ||
328 | return rc; | ||
329 | } | ||
330 | 285 | ||
331 | /* convert ses->userName to unicode and uppercase */ | 286 | /* convert ses->userName to unicode and uppercase */ |
332 | len = strlen(ses->userName); | 287 | len = strlen(ses->userName); |
333 | user = kmalloc(2 + (len * 2), GFP_KERNEL); | 288 | user = kmalloc(2 + (len * 2), GFP_KERNEL); |
334 | if (user == NULL) { | 289 | if (user == NULL) |
335 | cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n"); | ||
336 | rc = -ENOMEM; | ||
337 | goto calc_exit_2; | 290 | goto calc_exit_2; |
338 | } | ||
339 | len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp); | 291 | len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp); |
340 | UniStrupr(user); | 292 | UniStrupr(user); |
341 | 293 | hmac_md5_update((char *)user, 2*len, pctxt); | |
342 | crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash, | ||
343 | (char *)user, 2 * len); | ||
344 | 294 | ||
345 | /* convert ses->domainName to unicode and uppercase */ | 295 | /* convert ses->domainName to unicode and uppercase */ |
346 | if (ses->domainName) { | 296 | if (ses->domainName) { |
347 | len = strlen(ses->domainName); | 297 | len = strlen(ses->domainName); |
348 | 298 | ||
349 | domain = kmalloc(2 + (len * 2), GFP_KERNEL); | 299 | domain = kmalloc(2 + (len * 2), GFP_KERNEL); |
350 | if (domain == NULL) { | 300 | if (domain == NULL) |
351 | cERROR(1, "calc_ntlmv2_hash: domain mem alloc failure"); | ||
352 | rc = -ENOMEM; | ||
353 | goto calc_exit_1; | 301 | goto calc_exit_1; |
354 | } | ||
355 | len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len, | 302 | len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len, |
356 | nls_cp); | 303 | nls_cp); |
357 | /* the following line was removed since it didn't work well | 304 | /* the following line was removed since it didn't work well |
@@ -359,292 +306,65 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses, | |||
359 | Maybe converting the domain name earlier makes sense */ | 306 | Maybe converting the domain name earlier makes sense */ |
360 | /* UniStrupr(domain); */ | 307 | /* UniStrupr(domain); */ |
361 | 308 | ||
362 | crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash, | 309 | hmac_md5_update((char *)domain, 2*len, pctxt); |
363 | (char *)domain, 2 * len); | ||
364 | 310 | ||
365 | kfree(domain); | 311 | kfree(domain); |
366 | } else if (ses->serverName) { | ||
367 | len = strlen(ses->serverName); | ||
368 | |||
369 | server = kmalloc(2 + (len * 2), GFP_KERNEL); | ||
370 | if (server == NULL) { | ||
371 | cERROR(1, "calc_ntlmv2_hash: server mem alloc failure"); | ||
372 | rc = -ENOMEM; | ||
373 | goto calc_exit_1; | ||
374 | } | ||
375 | len = cifs_strtoUCS((__le16 *)server, ses->serverName, len, | ||
376 | nls_cp); | ||
377 | /* the following line was removed since it didn't work well | ||
378 | with lower cased domain name that passed as an option. | ||
379 | Maybe converting the domain name earlier makes sense */ | ||
380 | /* UniStrupr(domain); */ | ||
381 | |||
382 | crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash, | ||
383 | (char *)server, 2 * len); | ||
384 | |||
385 | kfree(server); | ||
386 | } | 312 | } |
387 | |||
388 | rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash, | ||
389 | ses->server->ntlmv2_hash); | ||
390 | |||
391 | calc_exit_1: | 313 | calc_exit_1: |
392 | kfree(user); | 314 | kfree(user); |
393 | calc_exit_2: | 315 | calc_exit_2: |
394 | /* BB FIXME what about bytes 24 through 40 of the signing key? | 316 | /* BB FIXME what about bytes 24 through 40 of the signing key? |
395 | compare with the NTLM example */ | 317 | compare with the NTLM example */ |
318 | hmac_md5_final(ses->server->ntlmv2_hash, pctxt); | ||
396 | 319 | ||
320 | kfree(pctxt); | ||
397 | return rc; | 321 | return rc; |
398 | } | 322 | } |
399 | 323 | ||
400 | static int | 324 | void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf, |
401 | find_domain_name(struct cifsSesInfo *ses) | ||
402 | { | ||
403 | int rc = 0; | ||
404 | unsigned int attrsize; | ||
405 | unsigned int type; | ||
406 | unsigned char *blobptr; | ||
407 | struct ntlmssp2_name *attrptr; | ||
408 | |||
409 | if (ses->server->tiblob) { | ||
410 | blobptr = ses->server->tiblob; | ||
411 | attrptr = (struct ntlmssp2_name *) blobptr; | ||
412 | |||
413 | while ((type = attrptr->type) != 0) { | ||
414 | blobptr += 2; /* advance attr type */ | ||
415 | attrsize = attrptr->length; | ||
416 | blobptr += 2; /* advance attr size */ | ||
417 | if (type == NTLMSSP_AV_NB_DOMAIN_NAME) { | ||
418 | if (!ses->domainName) { | ||
419 | ses->domainName = | ||
420 | kmalloc(attrptr->length + 1, | ||
421 | GFP_KERNEL); | ||
422 | if (!ses->domainName) | ||
423 | return -ENOMEM; | ||
424 | cifs_from_ucs2(ses->domainName, | ||
425 | (__le16 *)blobptr, | ||
426 | attrptr->length, | ||
427 | attrptr->length, | ||
428 | load_nls_default(), false); | ||
429 | } | ||
430 | } | ||
431 | blobptr += attrsize; /* advance attr value */ | ||
432 | attrptr = (struct ntlmssp2_name *) blobptr; | ||
433 | } | ||
434 | } else { | ||
435 | ses->server->tilen = 2 * sizeof(struct ntlmssp2_name); | ||
436 | ses->server->tiblob = kmalloc(ses->server->tilen, GFP_KERNEL); | ||
437 | if (!ses->server->tiblob) { | ||
438 | ses->server->tilen = 0; | ||
439 | cERROR(1, "Challenge target info allocation failure"); | ||
440 | return -ENOMEM; | ||
441 | } | ||
442 | memset(ses->server->tiblob, 0x0, ses->server->tilen); | ||
443 | attrptr = (struct ntlmssp2_name *) ses->server->tiblob; | ||
444 | attrptr->type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE); | ||
445 | } | ||
446 | |||
447 | return rc; | ||
448 | } | ||
449 | |||
450 | static int | ||
451 | CalcNTLMv2_response(const struct TCP_Server_Info *server, | ||
452 | char *v2_session_response) | ||
453 | { | ||
454 | int rc; | ||
455 | |||
456 | if (!server->ntlmssp.sdeschmacmd5) { | ||
457 | cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n"); | ||
458 | return -1; | ||
459 | } | ||
460 | |||
461 | crypto_shash_setkey(server->ntlmssp.hmacmd5, server->ntlmv2_hash, | ||
462 | CIFS_HMAC_MD5_HASH_SIZE); | ||
463 | |||
464 | rc = crypto_shash_init(&server->ntlmssp.sdeschmacmd5->shash); | ||
465 | if (rc) { | ||
466 | cERROR(1, "CalcNTLMv2_response: could not init hmacmd5"); | ||
467 | return rc; | ||
468 | } | ||
469 | |||
470 | memcpy(v2_session_response + CIFS_SERVER_CHALLENGE_SIZE, | ||
471 | server->cryptKey, CIFS_SERVER_CHALLENGE_SIZE); | ||
472 | crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash, | ||
473 | v2_session_response + CIFS_SERVER_CHALLENGE_SIZE, | ||
474 | sizeof(struct ntlmv2_resp) - CIFS_SERVER_CHALLENGE_SIZE); | ||
475 | |||
476 | if (server->tilen) | ||
477 | crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash, | ||
478 | server->tiblob, server->tilen); | ||
479 | |||
480 | rc = crypto_shash_final(&server->ntlmssp.sdeschmacmd5->shash, | ||
481 | v2_session_response); | ||
482 | |||
483 | return rc; | ||
484 | } | ||
485 | |||
486 | int | ||
487 | setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf, | ||
488 | const struct nls_table *nls_cp) | 325 | const struct nls_table *nls_cp) |
489 | { | 326 | { |
490 | int rc = 0; | 327 | int rc; |
491 | struct ntlmv2_resp *buf = (struct ntlmv2_resp *)resp_buf; | 328 | struct ntlmv2_resp *buf = (struct ntlmv2_resp *)resp_buf; |
329 | struct HMACMD5Context context; | ||
492 | 330 | ||
493 | buf->blob_signature = cpu_to_le32(0x00000101); | 331 | buf->blob_signature = cpu_to_le32(0x00000101); |
494 | buf->reserved = 0; | 332 | buf->reserved = 0; |
495 | buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); | 333 | buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); |
496 | get_random_bytes(&buf->client_chal, sizeof(buf->client_chal)); | 334 | get_random_bytes(&buf->client_chal, sizeof(buf->client_chal)); |
497 | buf->reserved2 = 0; | 335 | buf->reserved2 = 0; |
498 | 336 | buf->names[0].type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE); | |
499 | if (!ses->domainName) { | 337 | buf->names[0].length = 0; |
500 | rc = find_domain_name(ses); | 338 | buf->names[1].type = 0; |
501 | if (rc) { | 339 | buf->names[1].length = 0; |
502 | cERROR(1, "could not get domain/server name rc %d", rc); | ||
503 | return rc; | ||
504 | } | ||
505 | } | ||
506 | 340 | ||
507 | /* calculate buf->ntlmv2_hash */ | 341 | /* calculate buf->ntlmv2_hash */ |
508 | rc = calc_ntlmv2_hash(ses, nls_cp); | 342 | rc = calc_ntlmv2_hash(ses, nls_cp); |
509 | if (rc) { | 343 | if (rc) |
510 | cERROR(1, "could not get v2 hash rc %d", rc); | ||
511 | return rc; | ||
512 | } | ||
513 | rc = CalcNTLMv2_response(ses->server, resp_buf); | ||
514 | if (rc) { | ||
515 | cERROR(1, "could not get v2 hash rc %d", rc); | 344 | cERROR(1, "could not get v2 hash rc %d", rc); |
516 | return rc; | 345 | CalcNTLMv2_response(ses, resp_buf); |
517 | } | ||
518 | |||
519 | if (!ses->server->ntlmssp.sdeschmacmd5) { | ||
520 | cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n"); | ||
521 | return -1; | ||
522 | } | ||
523 | |||
524 | crypto_shash_setkey(ses->server->ntlmssp.hmacmd5, | ||
525 | ses->server->ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE); | ||
526 | 346 | ||
527 | rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash); | 347 | /* now calculate the MAC key for NTLMv2 */ |
528 | if (rc) { | 348 | hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context); |
529 | cERROR(1, "setup_ntlmv2_rsp: could not init hmacmd5\n"); | 349 | hmac_md5_update(resp_buf, 16, &context); |
530 | return rc; | 350 | hmac_md5_final(ses->server->mac_signing_key.data.ntlmv2.key, &context); |
531 | } | ||
532 | 351 | ||
533 | crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash, | 352 | memcpy(&ses->server->mac_signing_key.data.ntlmv2.resp, resp_buf, |
534 | resp_buf, CIFS_HMAC_MD5_HASH_SIZE); | 353 | sizeof(struct ntlmv2_resp)); |
535 | 354 | ses->server->mac_signing_key.len = 16 + sizeof(struct ntlmv2_resp); | |
536 | rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash, | ||
537 | ses->server->session_key.data.ntlmv2.key); | ||
538 | |||
539 | memcpy(&ses->server->session_key.data.ntlmv2.resp, resp_buf, | ||
540 | sizeof(struct ntlmv2_resp)); | ||
541 | ses->server->session_key.len = 16 + sizeof(struct ntlmv2_resp); | ||
542 | |||
543 | return rc; | ||
544 | } | 355 | } |
545 | 356 | ||
546 | int | 357 | void CalcNTLMv2_response(const struct cifsSesInfo *ses, |
547 | calc_seckey(struct TCP_Server_Info *server) | 358 | char *v2_session_response) |
548 | { | ||
549 | int rc; | ||
550 | unsigned char sec_key[CIFS_NTLMV2_SESSKEY_SIZE]; | ||
551 | struct crypto_blkcipher *tfm_arc4; | ||
552 | struct scatterlist sgin, sgout; | ||
553 | struct blkcipher_desc desc; | ||
554 | |||
555 | get_random_bytes(sec_key, CIFS_NTLMV2_SESSKEY_SIZE); | ||
556 | |||
557 | tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", | ||
558 | 0, CRYPTO_ALG_ASYNC); | ||
559 | if (!tfm_arc4 || IS_ERR(tfm_arc4)) { | ||
560 | cERROR(1, "could not allocate " "master crypto API arc4\n"); | ||
561 | return 1; | ||
562 | } | ||
563 | |||
564 | desc.tfm = tfm_arc4; | ||
565 | |||
566 | crypto_blkcipher_setkey(tfm_arc4, | ||
567 | server->session_key.data.ntlmv2.key, CIFS_CPHTXT_SIZE); | ||
568 | sg_init_one(&sgin, sec_key, CIFS_CPHTXT_SIZE); | ||
569 | sg_init_one(&sgout, server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE); | ||
570 | rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, CIFS_CPHTXT_SIZE); | ||
571 | |||
572 | if (!rc) | ||
573 | memcpy(server->session_key.data.ntlmv2.key, | ||
574 | sec_key, CIFS_NTLMV2_SESSKEY_SIZE); | ||
575 | |||
576 | crypto_free_blkcipher(tfm_arc4); | ||
577 | |||
578 | return 0; | ||
579 | } | ||
580 | |||
581 | void | ||
582 | cifs_crypto_shash_release(struct TCP_Server_Info *server) | ||
583 | { | ||
584 | if (server->ntlmssp.md5) | ||
585 | crypto_free_shash(server->ntlmssp.md5); | ||
586 | |||
587 | if (server->ntlmssp.hmacmd5) | ||
588 | crypto_free_shash(server->ntlmssp.hmacmd5); | ||
589 | |||
590 | kfree(server->ntlmssp.sdeschmacmd5); | ||
591 | |||
592 | kfree(server->ntlmssp.sdescmd5); | ||
593 | } | ||
594 | |||
595 | int | ||
596 | cifs_crypto_shash_allocate(struct TCP_Server_Info *server) | ||
597 | { | 359 | { |
598 | int rc; | 360 | struct HMACMD5Context context; |
599 | unsigned int size; | 361 | /* rest of v2 struct already generated */ |
600 | 362 | memcpy(v2_session_response + 8, ses->server->cryptKey, 8); | |
601 | server->ntlmssp.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0); | 363 | hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context); |
602 | if (!server->ntlmssp.hmacmd5 || | ||
603 | IS_ERR(server->ntlmssp.hmacmd5)) { | ||
604 | cERROR(1, "could not allocate crypto hmacmd5\n"); | ||
605 | return 1; | ||
606 | } | ||
607 | |||
608 | server->ntlmssp.md5 = crypto_alloc_shash("md5", 0, 0); | ||
609 | if (!server->ntlmssp.md5 || IS_ERR(server->ntlmssp.md5)) { | ||
610 | cERROR(1, "could not allocate crypto md5\n"); | ||
611 | rc = 1; | ||
612 | goto cifs_crypto_shash_allocate_ret1; | ||
613 | } | ||
614 | |||
615 | size = sizeof(struct shash_desc) + | ||
616 | crypto_shash_descsize(server->ntlmssp.hmacmd5); | ||
617 | server->ntlmssp.sdeschmacmd5 = kmalloc(size, GFP_KERNEL); | ||
618 | if (!server->ntlmssp.sdeschmacmd5) { | ||
619 | cERROR(1, "cifs_crypto_shash_allocate: can't alloc hmacmd5\n"); | ||
620 | rc = -ENOMEM; | ||
621 | goto cifs_crypto_shash_allocate_ret2; | ||
622 | } | ||
623 | server->ntlmssp.sdeschmacmd5->shash.tfm = server->ntlmssp.hmacmd5; | ||
624 | server->ntlmssp.sdeschmacmd5->shash.flags = 0x0; | ||
625 | 364 | ||
365 | hmac_md5_update(v2_session_response+8, | ||
366 | sizeof(struct ntlmv2_resp) - 8, &context); | ||
626 | 367 | ||
627 | size = sizeof(struct shash_desc) + | 368 | hmac_md5_final(v2_session_response, &context); |
628 | crypto_shash_descsize(server->ntlmssp.md5); | 369 | /* cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */ |
629 | server->ntlmssp.sdescmd5 = kmalloc(size, GFP_KERNEL); | ||
630 | if (!server->ntlmssp.sdescmd5) { | ||
631 | cERROR(1, "cifs_crypto_shash_allocate: can't alloc md5\n"); | ||
632 | rc = -ENOMEM; | ||
633 | goto cifs_crypto_shash_allocate_ret3; | ||
634 | } | ||
635 | server->ntlmssp.sdescmd5->shash.tfm = server->ntlmssp.md5; | ||
636 | server->ntlmssp.sdescmd5->shash.flags = 0x0; | ||
637 | |||
638 | return 0; | ||
639 | |||
640 | cifs_crypto_shash_allocate_ret3: | ||
641 | kfree(server->ntlmssp.sdeschmacmd5); | ||
642 | |||
643 | cifs_crypto_shash_allocate_ret2: | ||
644 | crypto_free_shash(server->ntlmssp.md5); | ||
645 | |||
646 | cifs_crypto_shash_allocate_ret1: | ||
647 | crypto_free_shash(server->ntlmssp.hmacmd5); | ||
648 | |||
649 | return rc; | ||
650 | } | 370 | } |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index c9d0cfc086eb..0cdfb8c32ac6 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -25,9 +25,6 @@ | |||
25 | #include <linux/workqueue.h> | 25 | #include <linux/workqueue.h> |
26 | #include "cifs_fs_sb.h" | 26 | #include "cifs_fs_sb.h" |
27 | #include "cifsacl.h" | 27 | #include "cifsacl.h" |
28 | #include <crypto/internal/hash.h> | ||
29 | #include <linux/scatterlist.h> | ||
30 | |||
31 | /* | 28 | /* |
32 | * The sizes of various internal tables and strings | 29 | * The sizes of various internal tables and strings |
33 | */ | 30 | */ |
@@ -100,7 +97,7 @@ enum protocolEnum { | |||
100 | /* Netbios frames protocol not supported at this time */ | 97 | /* Netbios frames protocol not supported at this time */ |
101 | }; | 98 | }; |
102 | 99 | ||
103 | struct session_key { | 100 | struct mac_key { |
104 | unsigned int len; | 101 | unsigned int len; |
105 | union { | 102 | union { |
106 | char ntlm[CIFS_SESS_KEY_SIZE + 16]; | 103 | char ntlm[CIFS_SESS_KEY_SIZE + 16]; |
@@ -123,21 +120,6 @@ struct cifs_cred { | |||
123 | struct cifs_ace *aces; | 120 | struct cifs_ace *aces; |
124 | }; | 121 | }; |
125 | 122 | ||
126 | struct sdesc { | ||
127 | struct shash_desc shash; | ||
128 | char ctx[]; | ||
129 | }; | ||
130 | |||
131 | struct ntlmssp_auth { | ||
132 | __u32 client_flags; | ||
133 | __u32 server_flags; | ||
134 | unsigned char ciphertext[CIFS_CPHTXT_SIZE]; | ||
135 | struct crypto_shash *hmacmd5; | ||
136 | struct crypto_shash *md5; | ||
137 | struct sdesc *sdeschmacmd5; | ||
138 | struct sdesc *sdescmd5; | ||
139 | }; | ||
140 | |||
141 | /* | 123 | /* |
142 | ***************************************************************** | 124 | ***************************************************************** |
143 | * Except the CIFS PDUs themselves all the | 125 | * Except the CIFS PDUs themselves all the |
@@ -200,14 +182,11 @@ struct TCP_Server_Info { | |||
200 | /* 16th byte of RFC1001 workstation name is always null */ | 182 | /* 16th byte of RFC1001 workstation name is always null */ |
201 | char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; | 183 | char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; |
202 | __u32 sequence_number; /* needed for CIFS PDU signature */ | 184 | __u32 sequence_number; /* needed for CIFS PDU signature */ |
203 | struct session_key session_key; | 185 | struct mac_key mac_signing_key; |
204 | char ntlmv2_hash[16]; | 186 | char ntlmv2_hash[16]; |
205 | unsigned long lstrp; /* when we got last response from this server */ | 187 | unsigned long lstrp; /* when we got last response from this server */ |
206 | u16 dialect; /* dialect index that server chose */ | 188 | u16 dialect; /* dialect index that server chose */ |
207 | /* extended security flavors that server supports */ | 189 | /* extended security flavors that server supports */ |
208 | unsigned int tilen; /* length of the target info blob */ | ||
209 | unsigned char *tiblob; /* target info blob in challenge response */ | ||
210 | struct ntlmssp_auth ntlmssp; /* various keys, ciphers, flags */ | ||
211 | bool sec_kerberos; /* supports plain Kerberos */ | 190 | bool sec_kerberos; /* supports plain Kerberos */ |
212 | bool sec_mskerberos; /* supports legacy MS Kerberos */ | 191 | bool sec_mskerberos; /* supports legacy MS Kerberos */ |
213 | bool sec_kerberosu2u; /* supports U2U Kerberos */ | 192 | bool sec_kerberosu2u; /* supports U2U Kerberos */ |
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h index 320e0fd0ba7b..14d036d8db11 100644 --- a/fs/cifs/cifspdu.h +++ b/fs/cifs/cifspdu.h | |||
@@ -134,12 +134,6 @@ | |||
134 | * Size of the session key (crypto key encrypted with the password | 134 | * Size of the session key (crypto key encrypted with the password |
135 | */ | 135 | */ |
136 | #define CIFS_SESS_KEY_SIZE (24) | 136 | #define CIFS_SESS_KEY_SIZE (24) |
137 | #define CIFS_CLIENT_CHALLENGE_SIZE (8) | ||
138 | #define CIFS_SERVER_CHALLENGE_SIZE (8) | ||
139 | #define CIFS_HMAC_MD5_HASH_SIZE (16) | ||
140 | #define CIFS_CPHTXT_SIZE (16) | ||
141 | #define CIFS_NTLMV2_SESSKEY_SIZE (16) | ||
142 | #define CIFS_NTHASH_SIZE (16) | ||
143 | 137 | ||
144 | /* | 138 | /* |
145 | * Maximum user name length | 139 | * Maximum user name length |
@@ -669,6 +663,7 @@ struct ntlmv2_resp { | |||
669 | __le64 time; | 663 | __le64 time; |
670 | __u64 client_chal; /* random */ | 664 | __u64 client_chal; /* random */ |
671 | __u32 reserved2; | 665 | __u32 reserved2; |
666 | struct ntlmssp2_name names[2]; | ||
672 | /* array of name entries could follow ending in minimum 4 byte struct */ | 667 | /* array of name entries could follow ending in minimum 4 byte struct */ |
673 | } __attribute__((packed)); | 668 | } __attribute__((packed)); |
674 | 669 | ||
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 1378d9133844..1d60c655e3e0 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -87,8 +87,9 @@ extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr); | |||
87 | extern int decode_negTokenInit(unsigned char *security_blob, int length, | 87 | extern int decode_negTokenInit(unsigned char *security_blob, int length, |
88 | struct TCP_Server_Info *server); | 88 | struct TCP_Server_Info *server); |
89 | extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len); | 89 | extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len); |
90 | extern int cifs_set_port(struct sockaddr *addr, const unsigned short int port); | ||
90 | extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len, | 91 | extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len, |
91 | unsigned short int port); | 92 | const unsigned short int port); |
92 | extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr); | 93 | extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr); |
93 | extern void header_assemble(struct smb_hdr *, char /* command */ , | 94 | extern void header_assemble(struct smb_hdr *, char /* command */ , |
94 | const struct cifsTconInfo *, int /* length of | 95 | const struct cifsTconInfo *, int /* length of |
@@ -361,15 +362,13 @@ extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *); | |||
361 | extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *, | 362 | extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *, |
362 | __u32 *); | 363 | __u32 *); |
363 | extern int cifs_verify_signature(struct smb_hdr *, | 364 | extern int cifs_verify_signature(struct smb_hdr *, |
364 | struct TCP_Server_Info *server, | 365 | const struct mac_key *mac_key, |
365 | __u32 expected_sequence_number); | 366 | __u32 expected_sequence_number); |
366 | extern int cifs_calculate_session_key(struct session_key *key, const char *rn, | 367 | extern int cifs_calculate_mac_key(struct mac_key *key, const char *rn, |
367 | const char *pass); | 368 | const char *pass); |
368 | extern int setup_ntlmv2_rsp(struct cifsSesInfo *, char *, | 369 | extern void CalcNTLMv2_response(const struct cifsSesInfo *, char *); |
370 | extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *, | ||
369 | const struct nls_table *); | 371 | const struct nls_table *); |
370 | extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *); | ||
371 | extern void cifs_crypto_shash_release(struct TCP_Server_Info *); | ||
372 | extern int calc_seckey(struct TCP_Server_Info *); | ||
373 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | 372 | #ifdef CONFIG_CIFS_WEAK_PW_HASH |
374 | extern void calc_lanman_hash(const char *password, const char *cryptkey, | 373 | extern void calc_lanman_hash(const char *password, const char *cryptkey, |
375 | bool encrypt, char *lnm_session_key); | 374 | bool encrypt, char *lnm_session_key); |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 4bda920d1f75..7e83b356cc9e 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -232,7 +232,7 @@ static int | |||
232 | small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | 232 | small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, |
233 | void **request_buf) | 233 | void **request_buf) |
234 | { | 234 | { |
235 | int rc = 0; | 235 | int rc; |
236 | 236 | ||
237 | rc = cifs_reconnect_tcon(tcon, smb_command); | 237 | rc = cifs_reconnect_tcon(tcon, smb_command); |
238 | if (rc) | 238 | if (rc) |
@@ -250,7 +250,7 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | |||
250 | if (tcon != NULL) | 250 | if (tcon != NULL) |
251 | cifs_stats_inc(&tcon->num_smbs_sent); | 251 | cifs_stats_inc(&tcon->num_smbs_sent); |
252 | 252 | ||
253 | return rc; | 253 | return 0; |
254 | } | 254 | } |
255 | 255 | ||
256 | int | 256 | int |
@@ -281,16 +281,9 @@ small_smb_init_no_tc(const int smb_command, const int wct, | |||
281 | 281 | ||
282 | /* If the return code is zero, this function must fill in request_buf pointer */ | 282 | /* If the return code is zero, this function must fill in request_buf pointer */ |
283 | static int | 283 | static int |
284 | smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | 284 | __smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, |
285 | void **request_buf /* returned */ , | 285 | void **request_buf, void **response_buf) |
286 | void **response_buf /* returned */ ) | ||
287 | { | 286 | { |
288 | int rc = 0; | ||
289 | |||
290 | rc = cifs_reconnect_tcon(tcon, smb_command); | ||
291 | if (rc) | ||
292 | return rc; | ||
293 | |||
294 | *request_buf = cifs_buf_get(); | 287 | *request_buf = cifs_buf_get(); |
295 | if (*request_buf == NULL) { | 288 | if (*request_buf == NULL) { |
296 | /* BB should we add a retry in here if not a writepage? */ | 289 | /* BB should we add a retry in here if not a writepage? */ |
@@ -309,7 +302,31 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | |||
309 | if (tcon != NULL) | 302 | if (tcon != NULL) |
310 | cifs_stats_inc(&tcon->num_smbs_sent); | 303 | cifs_stats_inc(&tcon->num_smbs_sent); |
311 | 304 | ||
312 | return rc; | 305 | return 0; |
306 | } | ||
307 | |||
308 | /* If the return code is zero, this function must fill in request_buf pointer */ | ||
309 | static int | ||
310 | smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | ||
311 | void **request_buf, void **response_buf) | ||
312 | { | ||
313 | int rc; | ||
314 | |||
315 | rc = cifs_reconnect_tcon(tcon, smb_command); | ||
316 | if (rc) | ||
317 | return rc; | ||
318 | |||
319 | return __smb_init(smb_command, wct, tcon, request_buf, response_buf); | ||
320 | } | ||
321 | |||
322 | static int | ||
323 | smb_init_no_reconnect(int smb_command, int wct, struct cifsTconInfo *tcon, | ||
324 | void **request_buf, void **response_buf) | ||
325 | { | ||
326 | if (tcon->ses->need_reconnect || tcon->need_reconnect) | ||
327 | return -EHOSTDOWN; | ||
328 | |||
329 | return __smb_init(smb_command, wct, tcon, request_buf, response_buf); | ||
313 | } | 330 | } |
314 | 331 | ||
315 | static int validate_t2(struct smb_t2_rsp *pSMB) | 332 | static int validate_t2(struct smb_t2_rsp *pSMB) |
@@ -604,14 +621,11 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
604 | else | 621 | else |
605 | rc = -EINVAL; | 622 | rc = -EINVAL; |
606 | 623 | ||
607 | if (server->secType == Kerberos) { | 624 | if (server->sec_kerberos || server->sec_mskerberos) |
608 | if (!server->sec_kerberos && | 625 | server->secType = Kerberos; |
609 | !server->sec_mskerberos) | 626 | else if (server->sec_ntlmssp) |
610 | rc = -EOPNOTSUPP; | 627 | server->secType = RawNTLMSSP; |
611 | } else if (server->secType == RawNTLMSSP) { | 628 | else |
612 | if (!server->sec_ntlmssp) | ||
613 | rc = -EOPNOTSUPP; | ||
614 | } else | ||
615 | rc = -EOPNOTSUPP; | 629 | rc = -EOPNOTSUPP; |
616 | } | 630 | } |
617 | } else | 631 | } else |
@@ -4537,8 +4551,8 @@ CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon) | |||
4537 | 4551 | ||
4538 | cFYI(1, "In QFSUnixInfo"); | 4552 | cFYI(1, "In QFSUnixInfo"); |
4539 | QFSUnixRetry: | 4553 | QFSUnixRetry: |
4540 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 4554 | rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon, |
4541 | (void **) &pSMBr); | 4555 | (void **) &pSMB, (void **) &pSMBr); |
4542 | if (rc) | 4556 | if (rc) |
4543 | return rc; | 4557 | return rc; |
4544 | 4558 | ||
@@ -4607,8 +4621,8 @@ CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon, __u64 cap) | |||
4607 | cFYI(1, "In SETFSUnixInfo"); | 4621 | cFYI(1, "In SETFSUnixInfo"); |
4608 | SETFSUnixRetry: | 4622 | SETFSUnixRetry: |
4609 | /* BB switch to small buf init to save memory */ | 4623 | /* BB switch to small buf init to save memory */ |
4610 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 4624 | rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon, |
4611 | (void **) &pSMBr); | 4625 | (void **) &pSMB, (void **) &pSMBr); |
4612 | if (rc) | 4626 | if (rc) |
4613 | return rc; | 4627 | return rc; |
4614 | 4628 | ||
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index ec0ea4a43bdb..88c84a38bccb 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -400,7 +400,9 @@ incomplete_rcv: | |||
400 | cFYI(1, "call to reconnect done"); | 400 | cFYI(1, "call to reconnect done"); |
401 | csocket = server->ssocket; | 401 | csocket = server->ssocket; |
402 | continue; | 402 | continue; |
403 | } else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) { | 403 | } else if (length == -ERESTARTSYS || |
404 | length == -EAGAIN || | ||
405 | length == -EINTR) { | ||
404 | msleep(1); /* minimum sleep to prevent looping | 406 | msleep(1); /* minimum sleep to prevent looping |
405 | allowing socket to clear and app threads to set | 407 | allowing socket to clear and app threads to set |
406 | tcpStatus CifsNeedReconnect if server hung */ | 408 | tcpStatus CifsNeedReconnect if server hung */ |
@@ -414,18 +416,6 @@ incomplete_rcv: | |||
414 | } else | 416 | } else |
415 | continue; | 417 | continue; |
416 | } else if (length <= 0) { | 418 | } else if (length <= 0) { |
417 | if (server->tcpStatus == CifsNew) { | ||
418 | cFYI(1, "tcp session abend after SMBnegprot"); | ||
419 | /* some servers kill the TCP session rather than | ||
420 | returning an SMB negprot error, in which | ||
421 | case reconnecting here is not going to help, | ||
422 | and so simply return error to mount */ | ||
423 | break; | ||
424 | } | ||
425 | if (!try_to_freeze() && (length == -EINTR)) { | ||
426 | cFYI(1, "cifsd thread killed"); | ||
427 | break; | ||
428 | } | ||
429 | cFYI(1, "Reconnect after unexpected peek error %d", | 419 | cFYI(1, "Reconnect after unexpected peek error %d", |
430 | length); | 420 | length); |
431 | cifs_reconnect(server); | 421 | cifs_reconnect(server); |
@@ -466,27 +456,19 @@ incomplete_rcv: | |||
466 | an error on SMB negprot response */ | 456 | an error on SMB negprot response */ |
467 | cFYI(1, "Negative RFC1002 Session Response Error 0x%x)", | 457 | cFYI(1, "Negative RFC1002 Session Response Error 0x%x)", |
468 | pdu_length); | 458 | pdu_length); |
469 | if (server->tcpStatus == CifsNew) { | 459 | /* give server a second to clean up */ |
470 | /* if nack on negprot (rather than | 460 | msleep(1000); |
471 | ret of smb negprot error) reconnecting | 461 | /* always try 445 first on reconnect since we get NACK |
472 | not going to help, ret error to mount */ | 462 | * on some if we ever connected to port 139 (the NACK |
473 | break; | 463 | * is since we do not begin with RFC1001 session |
474 | } else { | 464 | * initialize frame) |
475 | /* give server a second to | 465 | */ |
476 | clean up before reconnect attempt */ | 466 | cifs_set_port((struct sockaddr *) |
477 | msleep(1000); | 467 | &server->addr.sockAddr, CIFS_PORT); |
478 | /* always try 445 first on reconnect | 468 | cifs_reconnect(server); |
479 | since we get NACK on some if we ever | 469 | csocket = server->ssocket; |
480 | connected to port 139 (the NACK is | 470 | wake_up(&server->response_q); |
481 | since we do not begin with RFC1001 | 471 | continue; |
482 | session initialize frame) */ | ||
483 | server->addr.sockAddr.sin_port = | ||
484 | htons(CIFS_PORT); | ||
485 | cifs_reconnect(server); | ||
486 | csocket = server->ssocket; | ||
487 | wake_up(&server->response_q); | ||
488 | continue; | ||
489 | } | ||
490 | } else if (temp != (char) 0) { | 472 | } else if (temp != (char) 0) { |
491 | cERROR(1, "Unknown RFC 1002 frame"); | 473 | cERROR(1, "Unknown RFC 1002 frame"); |
492 | cifs_dump_mem(" Received Data: ", (char *)smb_buffer, | 474 | cifs_dump_mem(" Received Data: ", (char *)smb_buffer, |
@@ -522,8 +504,7 @@ incomplete_rcv: | |||
522 | total_read += length) { | 504 | total_read += length) { |
523 | length = kernel_recvmsg(csocket, &smb_msg, &iov, 1, | 505 | length = kernel_recvmsg(csocket, &smb_msg, &iov, 1, |
524 | pdu_length - total_read, 0); | 506 | pdu_length - total_read, 0); |
525 | if ((server->tcpStatus == CifsExiting) || | 507 | if (server->tcpStatus == CifsExiting) { |
526 | (length == -EINTR)) { | ||
527 | /* then will exit */ | 508 | /* then will exit */ |
528 | reconnect = 2; | 509 | reconnect = 2; |
529 | break; | 510 | break; |
@@ -534,8 +515,9 @@ incomplete_rcv: | |||
534 | /* Now we will reread sock */ | 515 | /* Now we will reread sock */ |
535 | reconnect = 1; | 516 | reconnect = 1; |
536 | break; | 517 | break; |
537 | } else if ((length == -ERESTARTSYS) || | 518 | } else if (length == -ERESTARTSYS || |
538 | (length == -EAGAIN)) { | 519 | length == -EAGAIN || |
520 | length == -EINTR) { | ||
539 | msleep(1); /* minimum sleep to prevent looping, | 521 | msleep(1); /* minimum sleep to prevent looping, |
540 | allowing socket to clear and app | 522 | allowing socket to clear and app |
541 | threads to set tcpStatus | 523 | threads to set tcpStatus |
@@ -1708,7 +1690,6 @@ cifs_put_smb_ses(struct cifsSesInfo *ses) | |||
1708 | CIFSSMBLogoff(xid, ses); | 1690 | CIFSSMBLogoff(xid, ses); |
1709 | _FreeXid(xid); | 1691 | _FreeXid(xid); |
1710 | } | 1692 | } |
1711 | cifs_crypto_shash_release(server); | ||
1712 | sesInfoFree(ses); | 1693 | sesInfoFree(ses); |
1713 | cifs_put_tcp_session(server); | 1694 | cifs_put_tcp_session(server); |
1714 | } | 1695 | } |
@@ -1725,9 +1706,6 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) | |||
1725 | if (ses) { | 1706 | if (ses) { |
1726 | cFYI(1, "Existing smb sess found (status=%d)", ses->status); | 1707 | cFYI(1, "Existing smb sess found (status=%d)", ses->status); |
1727 | 1708 | ||
1728 | /* existing SMB ses has a server reference already */ | ||
1729 | cifs_put_tcp_session(server); | ||
1730 | |||
1731 | mutex_lock(&ses->session_mutex); | 1709 | mutex_lock(&ses->session_mutex); |
1732 | rc = cifs_negotiate_protocol(xid, ses); | 1710 | rc = cifs_negotiate_protocol(xid, ses); |
1733 | if (rc) { | 1711 | if (rc) { |
@@ -1750,6 +1728,9 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) | |||
1750 | } | 1728 | } |
1751 | } | 1729 | } |
1752 | mutex_unlock(&ses->session_mutex); | 1730 | mutex_unlock(&ses->session_mutex); |
1731 | |||
1732 | /* existing SMB ses has a server reference already */ | ||
1733 | cifs_put_tcp_session(server); | ||
1753 | FreeXid(xid); | 1734 | FreeXid(xid); |
1754 | return ses; | 1735 | return ses; |
1755 | } | 1736 | } |
@@ -1788,23 +1769,13 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) | |||
1788 | ses->linux_uid = volume_info->linux_uid; | 1769 | ses->linux_uid = volume_info->linux_uid; |
1789 | ses->overrideSecFlg = volume_info->secFlg; | 1770 | ses->overrideSecFlg = volume_info->secFlg; |
1790 | 1771 | ||
1791 | rc = cifs_crypto_shash_allocate(server); | ||
1792 | if (rc) { | ||
1793 | cERROR(1, "could not setup hash structures rc %d", rc); | ||
1794 | goto get_ses_fail; | ||
1795 | } | ||
1796 | server->tilen = 0; | ||
1797 | server->tiblob = NULL; | ||
1798 | |||
1799 | mutex_lock(&ses->session_mutex); | 1772 | mutex_lock(&ses->session_mutex); |
1800 | rc = cifs_negotiate_protocol(xid, ses); | 1773 | rc = cifs_negotiate_protocol(xid, ses); |
1801 | if (!rc) | 1774 | if (!rc) |
1802 | rc = cifs_setup_session(xid, ses, volume_info->local_nls); | 1775 | rc = cifs_setup_session(xid, ses, volume_info->local_nls); |
1803 | mutex_unlock(&ses->session_mutex); | 1776 | mutex_unlock(&ses->session_mutex); |
1804 | if (rc) { | 1777 | if (rc) |
1805 | cifs_crypto_shash_release(ses->server); | ||
1806 | goto get_ses_fail; | 1778 | goto get_ses_fail; |
1807 | } | ||
1808 | 1779 | ||
1809 | /* success, put it on the list */ | 1780 | /* success, put it on the list */ |
1810 | write_lock(&cifs_tcp_ses_lock); | 1781 | write_lock(&cifs_tcp_ses_lock); |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 86a164f08a74..53cce8cc2224 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -801,6 +801,8 @@ retry_iget5_locked: | |||
801 | inode->i_flags |= S_NOATIME | S_NOCMTIME; | 801 | inode->i_flags |= S_NOATIME | S_NOCMTIME; |
802 | if (inode->i_state & I_NEW) { | 802 | if (inode->i_state & I_NEW) { |
803 | inode->i_ino = hash; | 803 | inode->i_ino = hash; |
804 | if (S_ISREG(inode->i_mode)) | ||
805 | inode->i_data.backing_dev_info = sb->s_bdi; | ||
804 | #ifdef CONFIG_CIFS_FSCACHE | 806 | #ifdef CONFIG_CIFS_FSCACHE |
805 | /* initialize per-inode cache cookie pointer */ | 807 | /* initialize per-inode cache cookie pointer */ |
806 | CIFS_I(inode)->fscache = NULL; | 808 | CIFS_I(inode)->fscache = NULL; |
@@ -1462,29 +1464,18 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry, | |||
1462 | { | 1464 | { |
1463 | char *fromName = NULL; | 1465 | char *fromName = NULL; |
1464 | char *toName = NULL; | 1466 | char *toName = NULL; |
1465 | struct cifs_sb_info *cifs_sb_source; | 1467 | struct cifs_sb_info *cifs_sb; |
1466 | struct cifs_sb_info *cifs_sb_target; | ||
1467 | struct cifsTconInfo *tcon; | 1468 | struct cifsTconInfo *tcon; |
1468 | FILE_UNIX_BASIC_INFO *info_buf_source = NULL; | 1469 | FILE_UNIX_BASIC_INFO *info_buf_source = NULL; |
1469 | FILE_UNIX_BASIC_INFO *info_buf_target; | 1470 | FILE_UNIX_BASIC_INFO *info_buf_target; |
1470 | int xid, rc, tmprc; | 1471 | int xid, rc, tmprc; |
1471 | 1472 | ||
1472 | cifs_sb_target = CIFS_SB(target_dir->i_sb); | 1473 | cifs_sb = CIFS_SB(source_dir->i_sb); |
1473 | cifs_sb_source = CIFS_SB(source_dir->i_sb); | 1474 | tcon = cifs_sb->tcon; |
1474 | tcon = cifs_sb_source->tcon; | ||
1475 | 1475 | ||
1476 | xid = GetXid(); | 1476 | xid = GetXid(); |
1477 | 1477 | ||
1478 | /* | 1478 | /* |
1479 | * BB: this might be allowed if same server, but different share. | ||
1480 | * Consider adding support for this | ||
1481 | */ | ||
1482 | if (tcon != cifs_sb_target->tcon) { | ||
1483 | rc = -EXDEV; | ||
1484 | goto cifs_rename_exit; | ||
1485 | } | ||
1486 | |||
1487 | /* | ||
1488 | * we already have the rename sem so we do not need to | 1479 | * we already have the rename sem so we do not need to |
1489 | * grab it again here to protect the path integrity | 1480 | * grab it again here to protect the path integrity |
1490 | */ | 1481 | */ |
@@ -1519,17 +1510,16 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry, | |||
1519 | info_buf_target = info_buf_source + 1; | 1510 | info_buf_target = info_buf_source + 1; |
1520 | tmprc = CIFSSMBUnixQPathInfo(xid, tcon, fromName, | 1511 | tmprc = CIFSSMBUnixQPathInfo(xid, tcon, fromName, |
1521 | info_buf_source, | 1512 | info_buf_source, |
1522 | cifs_sb_source->local_nls, | 1513 | cifs_sb->local_nls, |
1523 | cifs_sb_source->mnt_cifs_flags & | 1514 | cifs_sb->mnt_cifs_flags & |
1524 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 1515 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
1525 | if (tmprc != 0) | 1516 | if (tmprc != 0) |
1526 | goto unlink_target; | 1517 | goto unlink_target; |
1527 | 1518 | ||
1528 | tmprc = CIFSSMBUnixQPathInfo(xid, tcon, | 1519 | tmprc = CIFSSMBUnixQPathInfo(xid, tcon, toName, |
1529 | toName, info_buf_target, | 1520 | info_buf_target, |
1530 | cifs_sb_target->local_nls, | 1521 | cifs_sb->local_nls, |
1531 | /* remap based on source sb */ | 1522 | cifs_sb->mnt_cifs_flags & |
1532 | cifs_sb_source->mnt_cifs_flags & | ||
1533 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 1523 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
1534 | 1524 | ||
1535 | if (tmprc == 0 && (info_buf_source->UniqueId == | 1525 | if (tmprc == 0 && (info_buf_source->UniqueId == |
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index f97851119e6c..9aad47a2d62f 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c | |||
@@ -206,26 +206,30 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len) | |||
206 | } | 206 | } |
207 | 207 | ||
208 | int | 208 | int |
209 | cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len, | 209 | cifs_set_port(struct sockaddr *addr, const unsigned short int port) |
210 | const unsigned short int port) | ||
211 | { | 210 | { |
212 | if (!cifs_convert_address(dst, src, len)) | 211 | switch (addr->sa_family) { |
213 | return 0; | ||
214 | |||
215 | switch (dst->sa_family) { | ||
216 | case AF_INET: | 212 | case AF_INET: |
217 | ((struct sockaddr_in *)dst)->sin_port = htons(port); | 213 | ((struct sockaddr_in *)addr)->sin_port = htons(port); |
218 | break; | 214 | break; |
219 | case AF_INET6: | 215 | case AF_INET6: |
220 | ((struct sockaddr_in6 *)dst)->sin6_port = htons(port); | 216 | ((struct sockaddr_in6 *)addr)->sin6_port = htons(port); |
221 | break; | 217 | break; |
222 | default: | 218 | default: |
223 | return 0; | 219 | return 0; |
224 | } | 220 | } |
225 | |||
226 | return 1; | 221 | return 1; |
227 | } | 222 | } |
228 | 223 | ||
224 | int | ||
225 | cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len, | ||
226 | const unsigned short int port) | ||
227 | { | ||
228 | if (!cifs_convert_address(dst, src, len)) | ||
229 | return 0; | ||
230 | return cifs_set_port(dst, port); | ||
231 | } | ||
232 | |||
229 | /***************************************************************************** | 233 | /***************************************************************************** |
230 | convert a NT status code to a dos class/code | 234 | convert a NT status code to a dos class/code |
231 | *****************************************************************************/ | 235 | *****************************************************************************/ |
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h index 1db0f0746a5b..49c9a4e75319 100644 --- a/fs/cifs/ntlmssp.h +++ b/fs/cifs/ntlmssp.h | |||
@@ -61,19 +61,6 @@ | |||
61 | #define NTLMSSP_NEGOTIATE_KEY_XCH 0x40000000 | 61 | #define NTLMSSP_NEGOTIATE_KEY_XCH 0x40000000 |
62 | #define NTLMSSP_NEGOTIATE_56 0x80000000 | 62 | #define NTLMSSP_NEGOTIATE_56 0x80000000 |
63 | 63 | ||
64 | /* Define AV Pair Field IDs */ | ||
65 | #define NTLMSSP_AV_EOL 0 | ||
66 | #define NTLMSSP_AV_NB_COMPUTER_NAME 1 | ||
67 | #define NTLMSSP_AV_NB_DOMAIN_NAME 2 | ||
68 | #define NTLMSSP_AV_DNS_COMPUTER_NAME 3 | ||
69 | #define NTLMSSP_AV_DNS_DOMAIN_NAME 4 | ||
70 | #define NTLMSSP_AV_DNS_TREE_NAME 5 | ||
71 | #define NTLMSSP_AV_FLAGS 6 | ||
72 | #define NTLMSSP_AV_TIMESTAMP 7 | ||
73 | #define NTLMSSP_AV_RESTRICTION 8 | ||
74 | #define NTLMSSP_AV_TARGET_NAME 9 | ||
75 | #define NTLMSSP_AV_CHANNEL_BINDINGS 10 | ||
76 | |||
77 | /* Although typedefs are not commonly used for structure definitions */ | 64 | /* Although typedefs are not commonly used for structure definitions */ |
78 | /* in the Linux kernel, in this particular case they are useful */ | 65 | /* in the Linux kernel, in this particular case they are useful */ |
79 | /* to more closely match the standards document for NTLMSSP from */ | 66 | /* to more closely match the standards document for NTLMSSP from */ |
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 795095f4eac6..0a57cb7db5dd 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c | |||
@@ -383,9 +383,6 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft, | |||
383 | static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, | 383 | static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, |
384 | struct cifsSesInfo *ses) | 384 | struct cifsSesInfo *ses) |
385 | { | 385 | { |
386 | unsigned int tioffset; /* challeng message target info area */ | ||
387 | unsigned int tilen; /* challeng message target info area length */ | ||
388 | |||
389 | CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr; | 386 | CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr; |
390 | 387 | ||
391 | if (blob_len < sizeof(CHALLENGE_MESSAGE)) { | 388 | if (blob_len < sizeof(CHALLENGE_MESSAGE)) { |
@@ -408,20 +405,6 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, | |||
408 | /* BB spec says that if AvId field of MsvAvTimestamp is populated then | 405 | /* BB spec says that if AvId field of MsvAvTimestamp is populated then |
409 | we must set the MIC field of the AUTHENTICATE_MESSAGE */ | 406 | we must set the MIC field of the AUTHENTICATE_MESSAGE */ |
410 | 407 | ||
411 | ses->server->ntlmssp.server_flags = le32_to_cpu(pblob->NegotiateFlags); | ||
412 | |||
413 | tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset); | ||
414 | tilen = cpu_to_le16(pblob->TargetInfoArray.Length); | ||
415 | ses->server->tilen = tilen; | ||
416 | if (tilen) { | ||
417 | ses->server->tiblob = kmalloc(tilen, GFP_KERNEL); | ||
418 | if (!ses->server->tiblob) { | ||
419 | cERROR(1, "Challenge target info allocation failure"); | ||
420 | return -ENOMEM; | ||
421 | } | ||
422 | memcpy(ses->server->tiblob, bcc_ptr + tioffset, tilen); | ||
423 | } | ||
424 | |||
425 | return 0; | 408 | return 0; |
426 | } | 409 | } |
427 | 410 | ||
@@ -442,13 +425,12 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, | |||
442 | /* BB is NTLMV2 session security format easier to use here? */ | 425 | /* BB is NTLMV2 session security format easier to use here? */ |
443 | flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | | 426 | flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | |
444 | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | | 427 | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | |
445 | NTLMSSP_NEGOTIATE_NTLM; | 428 | NTLMSSP_NEGOTIATE_NT_ONLY | NTLMSSP_NEGOTIATE_NTLM; |
446 | if (ses->server->secMode & | 429 | if (ses->server->secMode & |
447 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { | 430 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) |
448 | flags |= NTLMSSP_NEGOTIATE_SIGN | | 431 | flags |= NTLMSSP_NEGOTIATE_SIGN; |
449 | NTLMSSP_NEGOTIATE_KEY_XCH | | 432 | if (ses->server->secMode & SECMODE_SIGN_REQUIRED) |
450 | NTLMSSP_NEGOTIATE_EXTENDED_SEC; | 433 | flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN; |
451 | } | ||
452 | 434 | ||
453 | sec_blob->NegotiateFlags |= cpu_to_le32(flags); | 435 | sec_blob->NegotiateFlags |= cpu_to_le32(flags); |
454 | 436 | ||
@@ -469,12 +451,10 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, | |||
469 | struct cifsSesInfo *ses, | 451 | struct cifsSesInfo *ses, |
470 | const struct nls_table *nls_cp, bool first) | 452 | const struct nls_table *nls_cp, bool first) |
471 | { | 453 | { |
472 | int rc; | ||
473 | unsigned int size; | ||
474 | AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer; | 454 | AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer; |
475 | __u32 flags; | 455 | __u32 flags; |
476 | unsigned char *tmp; | 456 | unsigned char *tmp; |
477 | struct ntlmv2_resp ntlmv2_response = {}; | 457 | char ntlm_session_key[CIFS_SESS_KEY_SIZE]; |
478 | 458 | ||
479 | memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); | 459 | memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); |
480 | sec_blob->MessageType = NtLmAuthenticate; | 460 | sec_blob->MessageType = NtLmAuthenticate; |
@@ -497,25 +477,19 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, | |||
497 | sec_blob->LmChallengeResponse.Length = 0; | 477 | sec_blob->LmChallengeResponse.Length = 0; |
498 | sec_blob->LmChallengeResponse.MaximumLength = 0; | 478 | sec_blob->LmChallengeResponse.MaximumLength = 0; |
499 | 479 | ||
500 | sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer); | 480 | /* calculate session key, BB what about adding similar ntlmv2 path? */ |
501 | rc = setup_ntlmv2_rsp(ses, (char *)&ntlmv2_response, nls_cp); | 481 | SMBNTencrypt(ses->password, ses->server->cryptKey, ntlm_session_key); |
502 | if (rc) { | 482 | if (first) |
503 | cERROR(1, "error rc: %d during ntlmssp ntlmv2 setup", rc); | 483 | cifs_calculate_mac_key(&ses->server->mac_signing_key, |
504 | goto setup_ntlmv2_ret; | 484 | ntlm_session_key, ses->password); |
505 | } | ||
506 | size = sizeof(struct ntlmv2_resp); | ||
507 | memcpy(tmp, (char *)&ntlmv2_response, size); | ||
508 | tmp += size; | ||
509 | if (ses->server->tilen > 0) { | ||
510 | memcpy(tmp, ses->server->tiblob, ses->server->tilen); | ||
511 | tmp += ses->server->tilen; | ||
512 | } else | ||
513 | ses->server->tilen = 0; | ||
514 | 485 | ||
515 | sec_blob->NtChallengeResponse.Length = cpu_to_le16(size + | 486 | memcpy(tmp, ntlm_session_key, CIFS_SESS_KEY_SIZE); |
516 | ses->server->tilen); | 487 | sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer); |
488 | sec_blob->NtChallengeResponse.Length = cpu_to_le16(CIFS_SESS_KEY_SIZE); | ||
517 | sec_blob->NtChallengeResponse.MaximumLength = | 489 | sec_blob->NtChallengeResponse.MaximumLength = |
518 | cpu_to_le16(size + ses->server->tilen); | 490 | cpu_to_le16(CIFS_SESS_KEY_SIZE); |
491 | |||
492 | tmp += CIFS_SESS_KEY_SIZE; | ||
519 | 493 | ||
520 | if (ses->domainName == NULL) { | 494 | if (ses->domainName == NULL) { |
521 | sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); | 495 | sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); |
@@ -527,6 +501,7 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, | |||
527 | len = cifs_strtoUCS((__le16 *)tmp, ses->domainName, | 501 | len = cifs_strtoUCS((__le16 *)tmp, ses->domainName, |
528 | MAX_USERNAME_SIZE, nls_cp); | 502 | MAX_USERNAME_SIZE, nls_cp); |
529 | len *= 2; /* unicode is 2 bytes each */ | 503 | len *= 2; /* unicode is 2 bytes each */ |
504 | len += 2; /* trailing null */ | ||
530 | sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); | 505 | sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); |
531 | sec_blob->DomainName.Length = cpu_to_le16(len); | 506 | sec_blob->DomainName.Length = cpu_to_le16(len); |
532 | sec_blob->DomainName.MaximumLength = cpu_to_le16(len); | 507 | sec_blob->DomainName.MaximumLength = cpu_to_le16(len); |
@@ -543,6 +518,7 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, | |||
543 | len = cifs_strtoUCS((__le16 *)tmp, ses->userName, | 518 | len = cifs_strtoUCS((__le16 *)tmp, ses->userName, |
544 | MAX_USERNAME_SIZE, nls_cp); | 519 | MAX_USERNAME_SIZE, nls_cp); |
545 | len *= 2; /* unicode is 2 bytes each */ | 520 | len *= 2; /* unicode is 2 bytes each */ |
521 | len += 2; /* trailing null */ | ||
546 | sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); | 522 | sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); |
547 | sec_blob->UserName.Length = cpu_to_le16(len); | 523 | sec_blob->UserName.Length = cpu_to_le16(len); |
548 | sec_blob->UserName.MaximumLength = cpu_to_le16(len); | 524 | sec_blob->UserName.MaximumLength = cpu_to_le16(len); |
@@ -554,26 +530,9 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, | |||
554 | sec_blob->WorkstationName.MaximumLength = 0; | 530 | sec_blob->WorkstationName.MaximumLength = 0; |
555 | tmp += 2; | 531 | tmp += 2; |
556 | 532 | ||
557 | if ((ses->server->ntlmssp.server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) && | 533 | sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer); |
558 | !calc_seckey(ses->server)) { | 534 | sec_blob->SessionKey.Length = 0; |
559 | memcpy(tmp, ses->server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE); | 535 | sec_blob->SessionKey.MaximumLength = 0; |
560 | sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer); | ||
561 | sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE); | ||
562 | sec_blob->SessionKey.MaximumLength = | ||
563 | cpu_to_le16(CIFS_CPHTXT_SIZE); | ||
564 | tmp += CIFS_CPHTXT_SIZE; | ||
565 | } else { | ||
566 | sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer); | ||
567 | sec_blob->SessionKey.Length = 0; | ||
568 | sec_blob->SessionKey.MaximumLength = 0; | ||
569 | } | ||
570 | |||
571 | ses->server->sequence_number = 0; | ||
572 | |||
573 | setup_ntlmv2_ret: | ||
574 | if (ses->server->tilen > 0) | ||
575 | kfree(ses->server->tiblob); | ||
576 | |||
577 | return tmp - pbuffer; | 536 | return tmp - pbuffer; |
578 | } | 537 | } |
579 | 538 | ||
@@ -587,14 +546,15 @@ static void setup_ntlmssp_neg_req(SESSION_SETUP_ANDX *pSMB, | |||
587 | return; | 546 | return; |
588 | } | 547 | } |
589 | 548 | ||
590 | static int setup_ntlmssp_auth_req(char *ntlmsspblob, | 549 | static int setup_ntlmssp_auth_req(SESSION_SETUP_ANDX *pSMB, |
591 | struct cifsSesInfo *ses, | 550 | struct cifsSesInfo *ses, |
592 | const struct nls_table *nls, bool first_time) | 551 | const struct nls_table *nls, bool first_time) |
593 | { | 552 | { |
594 | int bloblen; | 553 | int bloblen; |
595 | 554 | ||
596 | bloblen = build_ntlmssp_auth_blob(ntlmsspblob, ses, nls, | 555 | bloblen = build_ntlmssp_auth_blob(&pSMB->req.SecurityBlob[0], ses, nls, |
597 | first_time); | 556 | first_time); |
557 | pSMB->req.SecurityBlobLength = cpu_to_le16(bloblen); | ||
598 | 558 | ||
599 | return bloblen; | 559 | return bloblen; |
600 | } | 560 | } |
@@ -730,7 +690,7 @@ ssetup_ntlmssp_authenticate: | |||
730 | 690 | ||
731 | if (first_time) /* should this be moved into common code | 691 | if (first_time) /* should this be moved into common code |
732 | with similar ntlmv2 path? */ | 692 | with similar ntlmv2 path? */ |
733 | cifs_calculate_session_key(&ses->server->session_key, | 693 | cifs_calculate_mac_key(&ses->server->mac_signing_key, |
734 | ntlm_session_key, ses->password); | 694 | ntlm_session_key, ses->password); |
735 | /* copy session key */ | 695 | /* copy session key */ |
736 | 696 | ||
@@ -769,21 +729,12 @@ ssetup_ntlmssp_authenticate: | |||
769 | cpu_to_le16(sizeof(struct ntlmv2_resp)); | 729 | cpu_to_le16(sizeof(struct ntlmv2_resp)); |
770 | 730 | ||
771 | /* calculate session key */ | 731 | /* calculate session key */ |
772 | rc = setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp); | 732 | setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp); |
773 | if (rc) { | ||
774 | kfree(v2_sess_key); | ||
775 | goto ssetup_exit; | ||
776 | } | ||
777 | /* FIXME: calculate MAC key */ | 733 | /* FIXME: calculate MAC key */ |
778 | memcpy(bcc_ptr, (char *)v2_sess_key, | 734 | memcpy(bcc_ptr, (char *)v2_sess_key, |
779 | sizeof(struct ntlmv2_resp)); | 735 | sizeof(struct ntlmv2_resp)); |
780 | bcc_ptr += sizeof(struct ntlmv2_resp); | 736 | bcc_ptr += sizeof(struct ntlmv2_resp); |
781 | kfree(v2_sess_key); | 737 | kfree(v2_sess_key); |
782 | if (ses->server->tilen > 0) { | ||
783 | memcpy(bcc_ptr, ses->server->tiblob, | ||
784 | ses->server->tilen); | ||
785 | bcc_ptr += ses->server->tilen; | ||
786 | } | ||
787 | if (ses->capabilities & CAP_UNICODE) { | 738 | if (ses->capabilities & CAP_UNICODE) { |
788 | if (iov[0].iov_len % 2) { | 739 | if (iov[0].iov_len % 2) { |
789 | *bcc_ptr = 0; | 740 | *bcc_ptr = 0; |
@@ -814,15 +765,15 @@ ssetup_ntlmssp_authenticate: | |||
814 | } | 765 | } |
815 | /* bail out if key is too long */ | 766 | /* bail out if key is too long */ |
816 | if (msg->sesskey_len > | 767 | if (msg->sesskey_len > |
817 | sizeof(ses->server->session_key.data.krb5)) { | 768 | sizeof(ses->server->mac_signing_key.data.krb5)) { |
818 | cERROR(1, "Kerberos signing key too long (%u bytes)", | 769 | cERROR(1, "Kerberos signing key too long (%u bytes)", |
819 | msg->sesskey_len); | 770 | msg->sesskey_len); |
820 | rc = -EOVERFLOW; | 771 | rc = -EOVERFLOW; |
821 | goto ssetup_exit; | 772 | goto ssetup_exit; |
822 | } | 773 | } |
823 | if (first_time) { | 774 | if (first_time) { |
824 | ses->server->session_key.len = msg->sesskey_len; | 775 | ses->server->mac_signing_key.len = msg->sesskey_len; |
825 | memcpy(ses->server->session_key.data.krb5, | 776 | memcpy(ses->server->mac_signing_key.data.krb5, |
826 | msg->data, msg->sesskey_len); | 777 | msg->data, msg->sesskey_len); |
827 | } | 778 | } |
828 | pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; | 779 | pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; |
@@ -864,28 +815,12 @@ ssetup_ntlmssp_authenticate: | |||
864 | if (phase == NtLmNegotiate) { | 815 | if (phase == NtLmNegotiate) { |
865 | setup_ntlmssp_neg_req(pSMB, ses); | 816 | setup_ntlmssp_neg_req(pSMB, ses); |
866 | iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE); | 817 | iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE); |
867 | iov[1].iov_base = &pSMB->req.SecurityBlob[0]; | ||
868 | } else if (phase == NtLmAuthenticate) { | 818 | } else if (phase == NtLmAuthenticate) { |
869 | int blob_len; | 819 | int blob_len; |
870 | char *ntlmsspblob; | 820 | blob_len = setup_ntlmssp_auth_req(pSMB, ses, |
871 | 821 | nls_cp, | |
872 | ntlmsspblob = kmalloc(5 * | 822 | first_time); |
873 | sizeof(struct _AUTHENTICATE_MESSAGE), | ||
874 | GFP_KERNEL); | ||
875 | if (!ntlmsspblob) { | ||
876 | cERROR(1, "Can't allocate NTLMSSP"); | ||
877 | rc = -ENOMEM; | ||
878 | goto ssetup_exit; | ||
879 | } | ||
880 | |||
881 | blob_len = setup_ntlmssp_auth_req(ntlmsspblob, | ||
882 | ses, | ||
883 | nls_cp, | ||
884 | first_time); | ||
885 | iov[1].iov_len = blob_len; | 823 | iov[1].iov_len = blob_len; |
886 | iov[1].iov_base = ntlmsspblob; | ||
887 | pSMB->req.SecurityBlobLength = | ||
888 | cpu_to_le16(blob_len); | ||
889 | /* Make sure that we tell the server that we | 824 | /* Make sure that we tell the server that we |
890 | are using the uid that it just gave us back | 825 | are using the uid that it just gave us back |
891 | on the response (challenge) */ | 826 | on the response (challenge) */ |
@@ -895,6 +830,7 @@ ssetup_ntlmssp_authenticate: | |||
895 | rc = -ENOSYS; | 830 | rc = -ENOSYS; |
896 | goto ssetup_exit; | 831 | goto ssetup_exit; |
897 | } | 832 | } |
833 | iov[1].iov_base = &pSMB->req.SecurityBlob[0]; | ||
898 | /* unicode strings must be word aligned */ | 834 | /* unicode strings must be word aligned */ |
899 | if ((iov[0].iov_len + iov[1].iov_len) % 2) { | 835 | if ((iov[0].iov_len + iov[1].iov_len) % 2) { |
900 | *bcc_ptr = 0; | 836 | *bcc_ptr = 0; |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index e0588cdf4cc5..82f78c4d6978 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -543,7 +543,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | |||
543 | (ses->server->secMode & (SECMODE_SIGN_REQUIRED | | 543 | (ses->server->secMode & (SECMODE_SIGN_REQUIRED | |
544 | SECMODE_SIGN_ENABLED))) { | 544 | SECMODE_SIGN_ENABLED))) { |
545 | rc = cifs_verify_signature(midQ->resp_buf, | 545 | rc = cifs_verify_signature(midQ->resp_buf, |
546 | ses->server, | 546 | &ses->server->mac_signing_key, |
547 | midQ->sequence_number+1); | 547 | midQ->sequence_number+1); |
548 | if (rc) { | 548 | if (rc) { |
549 | cERROR(1, "Unexpected SMB signature"); | 549 | cERROR(1, "Unexpected SMB signature"); |
@@ -731,7 +731,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
731 | (ses->server->secMode & (SECMODE_SIGN_REQUIRED | | 731 | (ses->server->secMode & (SECMODE_SIGN_REQUIRED | |
732 | SECMODE_SIGN_ENABLED))) { | 732 | SECMODE_SIGN_ENABLED))) { |
733 | rc = cifs_verify_signature(out_buf, | 733 | rc = cifs_verify_signature(out_buf, |
734 | ses->server, | 734 | &ses->server->mac_signing_key, |
735 | midQ->sequence_number+1); | 735 | midQ->sequence_number+1); |
736 | if (rc) { | 736 | if (rc) { |
737 | cERROR(1, "Unexpected SMB signature"); | 737 | cERROR(1, "Unexpected SMB signature"); |
@@ -981,7 +981,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, | |||
981 | (ses->server->secMode & (SECMODE_SIGN_REQUIRED | | 981 | (ses->server->secMode & (SECMODE_SIGN_REQUIRED | |
982 | SECMODE_SIGN_ENABLED))) { | 982 | SECMODE_SIGN_ENABLED))) { |
983 | rc = cifs_verify_signature(out_buf, | 983 | rc = cifs_verify_signature(out_buf, |
984 | ses->server, | 984 | &ses->server->mac_signing_key, |
985 | midQ->sequence_number+1); | 985 | midQ->sequence_number+1); |
986 | if (rc) { | 986 | if (rc) { |
987 | cERROR(1, "Unexpected SMB signature"); | 987 | cERROR(1, "Unexpected SMB signature"); |
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c index de89645777c7..116af7546cf0 100644 --- a/fs/coda/psdev.c +++ b/fs/coda/psdev.c | |||
@@ -184,8 +184,8 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf, | |||
184 | } | 184 | } |
185 | 185 | ||
186 | /* adjust outsize. is this useful ?? */ | 186 | /* adjust outsize. is this useful ?? */ |
187 | req->uc_outSize = nbytes; | 187 | req->uc_outSize = nbytes; |
188 | req->uc_flags |= REQ_WRITE; | 188 | req->uc_flags |= CODA_REQ_WRITE; |
189 | count = nbytes; | 189 | count = nbytes; |
190 | 190 | ||
191 | /* Convert filedescriptor into a file handle */ | 191 | /* Convert filedescriptor into a file handle */ |
diff --git a/fs/compat.c b/fs/compat.c index 718c7062aec1..0644a154672b 100644 --- a/fs/compat.c +++ b/fs/compat.c | |||
@@ -1153,7 +1153,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file, | |||
1153 | { | 1153 | { |
1154 | compat_ssize_t tot_len; | 1154 | compat_ssize_t tot_len; |
1155 | struct iovec iovstack[UIO_FASTIOV]; | 1155 | struct iovec iovstack[UIO_FASTIOV]; |
1156 | struct iovec *iov; | 1156 | struct iovec *iov = iovstack; |
1157 | ssize_t ret; | 1157 | ssize_t ret; |
1158 | io_fn_t fn; | 1158 | io_fn_t fn; |
1159 | iov_fn_t fnv; | 1159 | iov_fn_t fnv; |
diff --git a/fs/direct-io.c b/fs/direct-io.c index 51f270b479b6..48d74c7391d1 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -634,7 +634,7 @@ static int dio_send_cur_page(struct dio *dio) | |||
634 | int ret = 0; | 634 | int ret = 0; |
635 | 635 | ||
636 | if (dio->bio) { | 636 | if (dio->bio) { |
637 | loff_t cur_offset = dio->block_in_file << dio->blkbits; | 637 | loff_t cur_offset = dio->cur_page_fs_offset; |
638 | loff_t bio_next_offset = dio->logical_offset_in_bio + | 638 | loff_t bio_next_offset = dio->logical_offset_in_bio + |
639 | dio->bio->bi_size; | 639 | dio->bio->bi_size; |
640 | 640 | ||
@@ -659,7 +659,7 @@ static int dio_send_cur_page(struct dio *dio) | |||
659 | * Submit now if the underlying fs is about to perform a | 659 | * Submit now if the underlying fs is about to perform a |
660 | * metadata read | 660 | * metadata read |
661 | */ | 661 | */ |
662 | if (dio->boundary) | 662 | else if (dio->boundary) |
663 | dio_bio_submit(dio); | 663 | dio_bio_submit(dio); |
664 | } | 664 | } |
665 | 665 | ||
@@ -376,6 +376,9 @@ static int count(const char __user * const __user * argv, int max) | |||
376 | argv++; | 376 | argv++; |
377 | if (i++ >= max) | 377 | if (i++ >= max) |
378 | return -E2BIG; | 378 | return -E2BIG; |
379 | |||
380 | if (fatal_signal_pending(current)) | ||
381 | return -ERESTARTNOHAND; | ||
379 | cond_resched(); | 382 | cond_resched(); |
380 | } | 383 | } |
381 | } | 384 | } |
@@ -419,6 +422,12 @@ static int copy_strings(int argc, const char __user *const __user *argv, | |||
419 | while (len > 0) { | 422 | while (len > 0) { |
420 | int offset, bytes_to_copy; | 423 | int offset, bytes_to_copy; |
421 | 424 | ||
425 | if (fatal_signal_pending(current)) { | ||
426 | ret = -ERESTARTNOHAND; | ||
427 | goto out; | ||
428 | } | ||
429 | cond_resched(); | ||
430 | |||
422 | offset = pos % PAGE_SIZE; | 431 | offset = pos % PAGE_SIZE; |
423 | if (offset == 0) | 432 | if (offset == 0) |
424 | offset = PAGE_SIZE; | 433 | offset = PAGE_SIZE; |
@@ -594,6 +603,11 @@ int setup_arg_pages(struct linux_binprm *bprm, | |||
594 | #else | 603 | #else |
595 | stack_top = arch_align_stack(stack_top); | 604 | stack_top = arch_align_stack(stack_top); |
596 | stack_top = PAGE_ALIGN(stack_top); | 605 | stack_top = PAGE_ALIGN(stack_top); |
606 | |||
607 | if (unlikely(stack_top < mmap_min_addr) || | ||
608 | unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr)) | ||
609 | return -ENOMEM; | ||
610 | |||
597 | stack_shift = vma->vm_end - stack_top; | 611 | stack_shift = vma->vm_end - stack_top; |
598 | 612 | ||
599 | bprm->p -= stack_shift; | 613 | bprm->p -= stack_shift; |
diff --git a/fs/fcntl.c b/fs/fcntl.c index 6769fd0f35b8..f8cc34f542c3 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
@@ -769,11 +769,15 @@ EXPORT_SYMBOL(kill_fasync); | |||
769 | 769 | ||
770 | static int __init fcntl_init(void) | 770 | static int __init fcntl_init(void) |
771 | { | 771 | { |
772 | /* please add new bits here to ensure allocation uniqueness */ | 772 | /* |
773 | BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( | 773 | * Please add new bits here to ensure allocation uniqueness. |
774 | * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY | ||
775 | * is defined as O_NONBLOCK on some platforms and not on others. | ||
776 | */ | ||
777 | BUILD_BUG_ON(18 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( | ||
774 | O_RDONLY | O_WRONLY | O_RDWR | | 778 | O_RDONLY | O_WRONLY | O_RDWR | |
775 | O_CREAT | O_EXCL | O_NOCTTY | | 779 | O_CREAT | O_EXCL | O_NOCTTY | |
776 | O_TRUNC | O_APPEND | O_NONBLOCK | | 780 | O_TRUNC | O_APPEND | /* O_NONBLOCK | */ |
777 | __O_SYNC | O_DSYNC | FASYNC | | 781 | __O_SYNC | O_DSYNC | FASYNC | |
778 | O_DIRECT | O_LARGEFILE | O_DIRECTORY | | 782 | O_DIRECT | O_LARGEFILE | O_DIRECTORY | |
779 | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | | 783 | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 7d9d06ba184b..ab38fef1c9a1 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -52,8 +52,6 @@ struct wb_writeback_work { | |||
52 | #define CREATE_TRACE_POINTS | 52 | #define CREATE_TRACE_POINTS |
53 | #include <trace/events/writeback.h> | 53 | #include <trace/events/writeback.h> |
54 | 54 | ||
55 | #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info) | ||
56 | |||
57 | /* | 55 | /* |
58 | * We don't actually have pdflush, but this one is exported though /proc... | 56 | * We don't actually have pdflush, but this one is exported though /proc... |
59 | */ | 57 | */ |
@@ -71,6 +69,16 @@ int writeback_in_progress(struct backing_dev_info *bdi) | |||
71 | return test_bit(BDI_writeback_running, &bdi->state); | 69 | return test_bit(BDI_writeback_running, &bdi->state); |
72 | } | 70 | } |
73 | 71 | ||
72 | static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) | ||
73 | { | ||
74 | struct super_block *sb = inode->i_sb; | ||
75 | |||
76 | if (strcmp(sb->s_type->name, "bdev") == 0) | ||
77 | return inode->i_mapping->backing_dev_info; | ||
78 | |||
79 | return sb->s_bdi; | ||
80 | } | ||
81 | |||
74 | static void bdi_queue_work(struct backing_dev_info *bdi, | 82 | static void bdi_queue_work(struct backing_dev_info *bdi, |
75 | struct wb_writeback_work *work) | 83 | struct wb_writeback_work *work) |
76 | { | 84 | { |
@@ -808,7 +816,7 @@ int bdi_writeback_thread(void *data) | |||
808 | wb->last_active = jiffies; | 816 | wb->last_active = jiffies; |
809 | 817 | ||
810 | set_current_state(TASK_INTERRUPTIBLE); | 818 | set_current_state(TASK_INTERRUPTIBLE); |
811 | if (!list_empty(&bdi->work_list)) { | 819 | if (!list_empty(&bdi->work_list) || kthread_should_stop()) { |
812 | __set_current_state(TASK_RUNNING); | 820 | __set_current_state(TASK_RUNNING); |
813 | continue; | 821 | continue; |
814 | } | 822 | } |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 69ad053ffd78..cde755cca564 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -276,7 +276,7 @@ static void flush_bg_queue(struct fuse_conn *fc) | |||
276 | * Called with fc->lock, unlocks it | 276 | * Called with fc->lock, unlocks it |
277 | */ | 277 | */ |
278 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) | 278 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) |
279 | __releases(&fc->lock) | 279 | __releases(fc->lock) |
280 | { | 280 | { |
281 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; | 281 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; |
282 | req->end = NULL; | 282 | req->end = NULL; |
@@ -306,8 +306,8 @@ __releases(&fc->lock) | |||
306 | 306 | ||
307 | static void wait_answer_interruptible(struct fuse_conn *fc, | 307 | static void wait_answer_interruptible(struct fuse_conn *fc, |
308 | struct fuse_req *req) | 308 | struct fuse_req *req) |
309 | __releases(&fc->lock) | 309 | __releases(fc->lock) |
310 | __acquires(&fc->lock) | 310 | __acquires(fc->lock) |
311 | { | 311 | { |
312 | if (signal_pending(current)) | 312 | if (signal_pending(current)) |
313 | return; | 313 | return; |
@@ -325,8 +325,8 @@ static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req) | |||
325 | } | 325 | } |
326 | 326 | ||
327 | static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) | 327 | static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) |
328 | __releases(&fc->lock) | 328 | __releases(fc->lock) |
329 | __acquires(&fc->lock) | 329 | __acquires(fc->lock) |
330 | { | 330 | { |
331 | if (!fc->no_interrupt) { | 331 | if (!fc->no_interrupt) { |
332 | /* Any signal may interrupt this */ | 332 | /* Any signal may interrupt this */ |
@@ -905,8 +905,8 @@ static int request_pending(struct fuse_conn *fc) | |||
905 | 905 | ||
906 | /* Wait until a request is available on the pending list */ | 906 | /* Wait until a request is available on the pending list */ |
907 | static void request_wait(struct fuse_conn *fc) | 907 | static void request_wait(struct fuse_conn *fc) |
908 | __releases(&fc->lock) | 908 | __releases(fc->lock) |
909 | __acquires(&fc->lock) | 909 | __acquires(fc->lock) |
910 | { | 910 | { |
911 | DECLARE_WAITQUEUE(wait, current); | 911 | DECLARE_WAITQUEUE(wait, current); |
912 | 912 | ||
@@ -934,7 +934,7 @@ __acquires(&fc->lock) | |||
934 | */ | 934 | */ |
935 | static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs, | 935 | static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs, |
936 | size_t nbytes, struct fuse_req *req) | 936 | size_t nbytes, struct fuse_req *req) |
937 | __releases(&fc->lock) | 937 | __releases(fc->lock) |
938 | { | 938 | { |
939 | struct fuse_in_header ih; | 939 | struct fuse_in_header ih; |
940 | struct fuse_interrupt_in arg; | 940 | struct fuse_interrupt_in arg; |
@@ -1354,7 +1354,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, | |||
1354 | loff_t file_size; | 1354 | loff_t file_size; |
1355 | unsigned int num; | 1355 | unsigned int num; |
1356 | unsigned int offset; | 1356 | unsigned int offset; |
1357 | size_t total_len; | 1357 | size_t total_len = 0; |
1358 | 1358 | ||
1359 | req = fuse_get_req(fc); | 1359 | req = fuse_get_req(fc); |
1360 | if (IS_ERR(req)) | 1360 | if (IS_ERR(req)) |
@@ -1720,8 +1720,8 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait) | |||
1720 | * This function releases and reacquires fc->lock | 1720 | * This function releases and reacquires fc->lock |
1721 | */ | 1721 | */ |
1722 | static void end_requests(struct fuse_conn *fc, struct list_head *head) | 1722 | static void end_requests(struct fuse_conn *fc, struct list_head *head) |
1723 | __releases(&fc->lock) | 1723 | __releases(fc->lock) |
1724 | __acquires(&fc->lock) | 1724 | __acquires(fc->lock) |
1725 | { | 1725 | { |
1726 | while (!list_empty(head)) { | 1726 | while (!list_empty(head)) { |
1727 | struct fuse_req *req; | 1727 | struct fuse_req *req; |
@@ -1744,8 +1744,8 @@ __acquires(&fc->lock) | |||
1744 | * locked). | 1744 | * locked). |
1745 | */ | 1745 | */ |
1746 | static void end_io_requests(struct fuse_conn *fc) | 1746 | static void end_io_requests(struct fuse_conn *fc) |
1747 | __releases(&fc->lock) | 1747 | __releases(fc->lock) |
1748 | __acquires(&fc->lock) | 1748 | __acquires(fc->lock) |
1749 | { | 1749 | { |
1750 | while (!list_empty(&fc->io)) { | 1750 | while (!list_empty(&fc->io)) { |
1751 | struct fuse_req *req = | 1751 | struct fuse_req *req = |
@@ -1769,6 +1769,16 @@ __acquires(&fc->lock) | |||
1769 | } | 1769 | } |
1770 | } | 1770 | } |
1771 | 1771 | ||
1772 | static void end_queued_requests(struct fuse_conn *fc) | ||
1773 | __releases(fc->lock) | ||
1774 | __acquires(fc->lock) | ||
1775 | { | ||
1776 | fc->max_background = UINT_MAX; | ||
1777 | flush_bg_queue(fc); | ||
1778 | end_requests(fc, &fc->pending); | ||
1779 | end_requests(fc, &fc->processing); | ||
1780 | } | ||
1781 | |||
1772 | /* | 1782 | /* |
1773 | * Abort all requests. | 1783 | * Abort all requests. |
1774 | * | 1784 | * |
@@ -1795,8 +1805,7 @@ void fuse_abort_conn(struct fuse_conn *fc) | |||
1795 | fc->connected = 0; | 1805 | fc->connected = 0; |
1796 | fc->blocked = 0; | 1806 | fc->blocked = 0; |
1797 | end_io_requests(fc); | 1807 | end_io_requests(fc); |
1798 | end_requests(fc, &fc->pending); | 1808 | end_queued_requests(fc); |
1799 | end_requests(fc, &fc->processing); | ||
1800 | wake_up_all(&fc->waitq); | 1809 | wake_up_all(&fc->waitq); |
1801 | wake_up_all(&fc->blocked_waitq); | 1810 | wake_up_all(&fc->blocked_waitq); |
1802 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | 1811 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); |
@@ -1811,8 +1820,9 @@ int fuse_dev_release(struct inode *inode, struct file *file) | |||
1811 | if (fc) { | 1820 | if (fc) { |
1812 | spin_lock(&fc->lock); | 1821 | spin_lock(&fc->lock); |
1813 | fc->connected = 0; | 1822 | fc->connected = 0; |
1814 | end_requests(fc, &fc->pending); | 1823 | fc->blocked = 0; |
1815 | end_requests(fc, &fc->processing); | 1824 | end_queued_requests(fc); |
1825 | wake_up_all(&fc->blocked_waitq); | ||
1816 | spin_unlock(&fc->lock); | 1826 | spin_unlock(&fc->lock); |
1817 | fuse_conn_put(fc); | 1827 | fuse_conn_put(fc); |
1818 | } | 1828 | } |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 147c1f71bdb9..c8224587123f 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -1144,8 +1144,8 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) | |||
1144 | 1144 | ||
1145 | /* Called under fc->lock, may release and reacquire it */ | 1145 | /* Called under fc->lock, may release and reacquire it */ |
1146 | static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req) | 1146 | static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req) |
1147 | __releases(&fc->lock) | 1147 | __releases(fc->lock) |
1148 | __acquires(&fc->lock) | 1148 | __acquires(fc->lock) |
1149 | { | 1149 | { |
1150 | struct fuse_inode *fi = get_fuse_inode(req->inode); | 1150 | struct fuse_inode *fi = get_fuse_inode(req->inode); |
1151 | loff_t size = i_size_read(req->inode); | 1151 | loff_t size = i_size_read(req->inode); |
@@ -1183,8 +1183,8 @@ __acquires(&fc->lock) | |||
1183 | * Called with fc->lock | 1183 | * Called with fc->lock |
1184 | */ | 1184 | */ |
1185 | void fuse_flush_writepages(struct inode *inode) | 1185 | void fuse_flush_writepages(struct inode *inode) |
1186 | __releases(&fc->lock) | 1186 | __releases(fc->lock) |
1187 | __acquires(&fc->lock) | 1187 | __acquires(fc->lock) |
1188 | { | 1188 | { |
1189 | struct fuse_conn *fc = get_fuse_conn(inode); | 1189 | struct fuse_conn *fc = get_fuse_conn(inode); |
1190 | struct fuse_inode *fi = get_fuse_inode(inode); | 1190 | struct fuse_inode *fi = get_fuse_inode(inode); |
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index cde1248a6225..ac750bd31a6f 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
@@ -932,7 +932,7 @@ int gfs2_logd(void *data) | |||
932 | 932 | ||
933 | do { | 933 | do { |
934 | prepare_to_wait(&sdp->sd_logd_waitq, &wait, | 934 | prepare_to_wait(&sdp->sd_logd_waitq, &wait, |
935 | TASK_UNINTERRUPTIBLE); | 935 | TASK_INTERRUPTIBLE); |
936 | if (!gfs2_ail_flush_reqd(sdp) && | 936 | if (!gfs2_ail_flush_reqd(sdp) && |
937 | !gfs2_jrnl_flush_reqd(sdp) && | 937 | !gfs2_jrnl_flush_reqd(sdp) && |
938 | !kthread_should_stop()) | 938 | !kthread_should_stop()) |
diff --git a/fs/minix/namei.c b/fs/minix/namei.c index e20ee85955d1..f3f3578393a4 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c | |||
@@ -115,7 +115,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode) | |||
115 | 115 | ||
116 | inode_inc_link_count(dir); | 116 | inode_inc_link_count(dir); |
117 | 117 | ||
118 | inode = minix_new_inode(dir, mode, &err); | 118 | inode = minix_new_inode(dir, S_IFDIR | mode, &err); |
119 | if (!inode) | 119 | if (!inode) |
120 | goto out_dir; | 120 | goto out_dir; |
121 | 121 | ||
diff --git a/fs/namespace.c b/fs/namespace.c index de402eb6eafb..a72eaabfe8f2 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -1484,13 +1484,30 @@ out_unlock: | |||
1484 | } | 1484 | } |
1485 | 1485 | ||
1486 | /* | 1486 | /* |
1487 | * Sanity check the flags to change_mnt_propagation. | ||
1488 | */ | ||
1489 | |||
1490 | static int flags_to_propagation_type(int flags) | ||
1491 | { | ||
1492 | int type = flags & ~MS_REC; | ||
1493 | |||
1494 | /* Fail if any non-propagation flags are set */ | ||
1495 | if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) | ||
1496 | return 0; | ||
1497 | /* Only one propagation flag should be set */ | ||
1498 | if (!is_power_of_2(type)) | ||
1499 | return 0; | ||
1500 | return type; | ||
1501 | } | ||
1502 | |||
1503 | /* | ||
1487 | * recursively change the type of the mountpoint. | 1504 | * recursively change the type of the mountpoint. |
1488 | */ | 1505 | */ |
1489 | static int do_change_type(struct path *path, int flag) | 1506 | static int do_change_type(struct path *path, int flag) |
1490 | { | 1507 | { |
1491 | struct vfsmount *m, *mnt = path->mnt; | 1508 | struct vfsmount *m, *mnt = path->mnt; |
1492 | int recurse = flag & MS_REC; | 1509 | int recurse = flag & MS_REC; |
1493 | int type = flag & ~MS_REC; | 1510 | int type; |
1494 | int err = 0; | 1511 | int err = 0; |
1495 | 1512 | ||
1496 | if (!capable(CAP_SYS_ADMIN)) | 1513 | if (!capable(CAP_SYS_ADMIN)) |
@@ -1499,6 +1516,10 @@ static int do_change_type(struct path *path, int flag) | |||
1499 | if (path->dentry != path->mnt->mnt_root) | 1516 | if (path->dentry != path->mnt->mnt_root) |
1500 | return -EINVAL; | 1517 | return -EINVAL; |
1501 | 1518 | ||
1519 | type = flags_to_propagation_type(flag); | ||
1520 | if (!type) | ||
1521 | return -EINVAL; | ||
1522 | |||
1502 | down_write(&namespace_sem); | 1523 | down_write(&namespace_sem); |
1503 | if (type == MS_SHARED) { | 1524 | if (type == MS_SHARED) { |
1504 | err = invent_group_ids(mnt, recurse); | 1525 | err = invent_group_ids(mnt, recurse); |
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 6c2aad49d731..f7e13db613cb 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig | |||
@@ -63,6 +63,7 @@ config NFS_V3_ACL | |||
63 | config NFS_V4 | 63 | config NFS_V4 |
64 | bool "NFS client support for NFS version 4" | 64 | bool "NFS client support for NFS version 4" |
65 | depends on NFS_FS | 65 | depends on NFS_FS |
66 | select SUNRPC_GSS | ||
66 | help | 67 | help |
67 | This option enables support for version 4 of the NFS protocol | 68 | This option enables support for version 4 of the NFS protocol |
68 | (RFC 3530) in the kernel's NFS client. | 69 | (RFC 3530) in the kernel's NFS client. |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 4e7df2adb212..e7340729af89 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -275,7 +275,7 @@ static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1, | |||
275 | sin1->sin6_scope_id != sin2->sin6_scope_id) | 275 | sin1->sin6_scope_id != sin2->sin6_scope_id) |
276 | return 0; | 276 | return 0; |
277 | 277 | ||
278 | return ipv6_addr_equal(&sin1->sin6_addr, &sin1->sin6_addr); | 278 | return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr); |
279 | } | 279 | } |
280 | #else /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */ | 280 | #else /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */ |
281 | static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1, | 281 | static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1, |
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index eb51bd6201da..05bf3c0dc751 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -723,10 +723,6 @@ static int do_vfs_lock(struct file *file, struct file_lock *fl) | |||
723 | default: | 723 | default: |
724 | BUG(); | 724 | BUG(); |
725 | } | 725 | } |
726 | if (res < 0) | ||
727 | dprintk(KERN_WARNING "%s: VFS is out of sync with lock manager" | ||
728 | " - error %d!\n", | ||
729 | __func__, res); | ||
730 | return res; | 726 | return res; |
731 | } | 727 | } |
732 | 728 | ||
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index ec3966e4706b..f4cbf0c306c6 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -431,7 +431,15 @@ static int nfs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
431 | goto out_err; | 431 | goto out_err; |
432 | 432 | ||
433 | error = server->nfs_client->rpc_ops->statfs(server, fh, &res); | 433 | error = server->nfs_client->rpc_ops->statfs(server, fh, &res); |
434 | if (unlikely(error == -ESTALE)) { | ||
435 | struct dentry *pd_dentry; | ||
434 | 436 | ||
437 | pd_dentry = dget_parent(dentry); | ||
438 | if (pd_dentry != NULL) { | ||
439 | nfs_zap_caches(pd_dentry->d_inode); | ||
440 | dput(pd_dentry); | ||
441 | } | ||
442 | } | ||
435 | nfs_free_fattr(res.fattr); | 443 | nfs_free_fattr(res.fattr); |
436 | if (error < 0) | 444 | if (error < 0) |
437 | goto out_err; | 445 | goto out_err; |
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig index 95932f523aef..4264377552e2 100644 --- a/fs/nfsd/Kconfig +++ b/fs/nfsd/Kconfig | |||
@@ -69,6 +69,7 @@ config NFSD_V4 | |||
69 | depends on NFSD && PROC_FS && EXPERIMENTAL | 69 | depends on NFSD && PROC_FS && EXPERIMENTAL |
70 | select NFSD_V3 | 70 | select NFSD_V3 |
71 | select FS_POSIX_ACL | 71 | select FS_POSIX_ACL |
72 | select SUNRPC_GSS | ||
72 | help | 73 | help |
73 | This option enables support in your system's NFS server for | 74 | This option enables support in your system's NFS server for |
74 | version 4 of the NFS protocol (RFC 3530). | 75 | version 4 of the NFS protocol (RFC 3530). |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 3dfef0623968..cf0d2ffb3c84 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -440,7 +440,7 @@ test_share(struct nfs4_stateid *stp, struct nfsd4_open *open) { | |||
440 | 440 | ||
441 | static int nfs4_access_to_omode(u32 access) | 441 | static int nfs4_access_to_omode(u32 access) |
442 | { | 442 | { |
443 | switch (access) { | 443 | switch (access & NFS4_SHARE_ACCESS_BOTH) { |
444 | case NFS4_SHARE_ACCESS_READ: | 444 | case NFS4_SHARE_ACCESS_READ: |
445 | return O_RDONLY; | 445 | return O_RDONLY; |
446 | case NFS4_SHARE_ACCESS_WRITE: | 446 | case NFS4_SHARE_ACCESS_WRITE: |
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 4317f177ea7c..ba7c10c917fc 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c | |||
@@ -446,6 +446,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) | |||
446 | nilfs_mdt_destroy(nilfs->ns_cpfile); | 446 | nilfs_mdt_destroy(nilfs->ns_cpfile); |
447 | nilfs_mdt_destroy(nilfs->ns_sufile); | 447 | nilfs_mdt_destroy(nilfs->ns_sufile); |
448 | nilfs_mdt_destroy(nilfs->ns_dat); | 448 | nilfs_mdt_destroy(nilfs->ns_dat); |
449 | nilfs_mdt_destroy(nilfs->ns_gc_dat); | ||
449 | 450 | ||
450 | failed: | 451 | failed: |
451 | nilfs_clear_recovery_info(&ri); | 452 | nilfs_clear_recovery_info(&ri); |
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index a76e0aa5cd3f..391915093fe1 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c | |||
@@ -209,7 +209,10 @@ static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh, | |||
209 | } | 209 | } |
210 | 210 | ||
211 | inode->i_mode = new_mode; | 211 | inode->i_mode = new_mode; |
212 | inode->i_ctime = CURRENT_TIME; | ||
212 | di->i_mode = cpu_to_le16(inode->i_mode); | 213 | di->i_mode = cpu_to_le16(inode->i_mode); |
214 | di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); | ||
215 | di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); | ||
213 | 216 | ||
214 | ocfs2_journal_dirty(handle, di_bh); | 217 | ocfs2_journal_dirty(handle, di_bh); |
215 | 218 | ||
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 215e12ce1d85..592fae5007d1 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c | |||
@@ -6672,7 +6672,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end, | |||
6672 | last_page_bytes = PAGE_ALIGN(end); | 6672 | last_page_bytes = PAGE_ALIGN(end); |
6673 | index = start >> PAGE_CACHE_SHIFT; | 6673 | index = start >> PAGE_CACHE_SHIFT; |
6674 | do { | 6674 | do { |
6675 | pages[numpages] = grab_cache_page(mapping, index); | 6675 | pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS); |
6676 | if (!pages[numpages]) { | 6676 | if (!pages[numpages]) { |
6677 | ret = -ENOMEM; | 6677 | ret = -ENOMEM; |
6678 | mlog_errno(ret); | 6678 | mlog_errno(ret); |
diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c index ec6d12339593..c7ee03c22226 100644 --- a/fs/ocfs2/blockcheck.c +++ b/fs/ocfs2/blockcheck.c | |||
@@ -439,7 +439,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize, | |||
439 | 439 | ||
440 | ocfs2_blockcheck_inc_failure(stats); | 440 | ocfs2_blockcheck_inc_failure(stats); |
441 | mlog(ML_ERROR, | 441 | mlog(ML_ERROR, |
442 | "CRC32 failed: stored: %u, computed %u. Applying ECC.\n", | 442 | "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n", |
443 | (unsigned int)check.bc_crc32e, (unsigned int)crc); | 443 | (unsigned int)check.bc_crc32e, (unsigned int)crc); |
444 | 444 | ||
445 | /* Ok, try ECC fixups */ | 445 | /* Ok, try ECC fixups */ |
@@ -453,7 +453,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize, | |||
453 | goto out; | 453 | goto out; |
454 | } | 454 | } |
455 | 455 | ||
456 | mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n", | 456 | mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n", |
457 | (unsigned int)check.bc_crc32e, (unsigned int)crc); | 457 | (unsigned int)check.bc_crc32e, (unsigned int)crc); |
458 | 458 | ||
459 | rc = -EIO; | 459 | rc = -EIO; |
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 1361997cf205..cbe2f057cc28 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c | |||
@@ -977,7 +977,7 @@ static int o2net_tx_can_proceed(struct o2net_node *nn, | |||
977 | int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, | 977 | int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, |
978 | size_t caller_veclen, u8 target_node, int *status) | 978 | size_t caller_veclen, u8 target_node, int *status) |
979 | { | 979 | { |
980 | int ret; | 980 | int ret = 0; |
981 | struct o2net_msg *msg = NULL; | 981 | struct o2net_msg *msg = NULL; |
982 | size_t veclen, caller_bytes = 0; | 982 | size_t veclen, caller_bytes = 0; |
983 | struct kvec *vec = NULL; | 983 | struct kvec *vec = NULL; |
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index f04ebcfffc4a..c49f6de0e7ab 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c | |||
@@ -3931,6 +3931,15 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir, | |||
3931 | goto out_commit; | 3931 | goto out_commit; |
3932 | } | 3932 | } |
3933 | 3933 | ||
3934 | cpos = split_hash; | ||
3935 | ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle, | ||
3936 | data_ac, meta_ac, new_dx_leaves, | ||
3937 | num_dx_leaves); | ||
3938 | if (ret) { | ||
3939 | mlog_errno(ret); | ||
3940 | goto out_commit; | ||
3941 | } | ||
3942 | |||
3934 | for (i = 0; i < num_dx_leaves; i++) { | 3943 | for (i = 0; i < num_dx_leaves; i++) { |
3935 | ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), | 3944 | ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), |
3936 | orig_dx_leaves[i], | 3945 | orig_dx_leaves[i], |
@@ -3939,15 +3948,14 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir, | |||
3939 | mlog_errno(ret); | 3948 | mlog_errno(ret); |
3940 | goto out_commit; | 3949 | goto out_commit; |
3941 | } | 3950 | } |
3942 | } | ||
3943 | 3951 | ||
3944 | cpos = split_hash; | 3952 | ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), |
3945 | ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle, | 3953 | new_dx_leaves[i], |
3946 | data_ac, meta_ac, new_dx_leaves, | 3954 | OCFS2_JOURNAL_ACCESS_WRITE); |
3947 | num_dx_leaves); | 3955 | if (ret) { |
3948 | if (ret) { | 3956 | mlog_errno(ret); |
3949 | mlog_errno(ret); | 3957 | goto out_commit; |
3950 | goto out_commit; | 3958 | } |
3951 | } | 3959 | } |
3952 | 3960 | ||
3953 | ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf, | 3961 | ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf, |
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index 4b6ae2c13b47..765298908f1d 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h | |||
@@ -1030,6 +1030,7 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, | |||
1030 | struct dlm_lock_resource *res); | 1030 | struct dlm_lock_resource *res); |
1031 | void dlm_clean_master_list(struct dlm_ctxt *dlm, | 1031 | void dlm_clean_master_list(struct dlm_ctxt *dlm, |
1032 | u8 dead_node); | 1032 | u8 dead_node); |
1033 | void dlm_force_free_mles(struct dlm_ctxt *dlm); | ||
1033 | int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); | 1034 | int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); |
1034 | int __dlm_lockres_has_locks(struct dlm_lock_resource *res); | 1035 | int __dlm_lockres_has_locks(struct dlm_lock_resource *res); |
1035 | int __dlm_lockres_unused(struct dlm_lock_resource *res); | 1036 | int __dlm_lockres_unused(struct dlm_lock_resource *res); |
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index 5efdd37dfe48..901ca52bf86b 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c | |||
@@ -636,8 +636,14 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos) | |||
636 | spin_lock(&dlm->track_lock); | 636 | spin_lock(&dlm->track_lock); |
637 | if (oldres) | 637 | if (oldres) |
638 | track_list = &oldres->tracking; | 638 | track_list = &oldres->tracking; |
639 | else | 639 | else { |
640 | track_list = &dlm->tracking_list; | 640 | track_list = &dlm->tracking_list; |
641 | if (list_empty(track_list)) { | ||
642 | dl = NULL; | ||
643 | spin_unlock(&dlm->track_lock); | ||
644 | goto bail; | ||
645 | } | ||
646 | } | ||
641 | 647 | ||
642 | list_for_each_entry(res, track_list, tracking) { | 648 | list_for_each_entry(res, track_list, tracking) { |
643 | if (&res->tracking == &dlm->tracking_list) | 649 | if (&res->tracking == &dlm->tracking_list) |
@@ -660,6 +666,7 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos) | |||
660 | } else | 666 | } else |
661 | dl = NULL; | 667 | dl = NULL; |
662 | 668 | ||
669 | bail: | ||
663 | /* passed to seq_show */ | 670 | /* passed to seq_show */ |
664 | return dl; | 671 | return dl; |
665 | } | 672 | } |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 153abb5abef0..11a5c87fd7f7 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -693,6 +693,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm) | |||
693 | 693 | ||
694 | dlm_mark_domain_leaving(dlm); | 694 | dlm_mark_domain_leaving(dlm); |
695 | dlm_leave_domain(dlm); | 695 | dlm_leave_domain(dlm); |
696 | dlm_force_free_mles(dlm); | ||
696 | dlm_complete_dlm_shutdown(dlm); | 697 | dlm_complete_dlm_shutdown(dlm); |
697 | } | 698 | } |
698 | dlm_put(dlm); | 699 | dlm_put(dlm); |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index ffb4c68dafa4..f564b0e5f80d 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -3433,3 +3433,43 @@ void dlm_lockres_release_ast(struct dlm_ctxt *dlm, | |||
3433 | wake_up(&res->wq); | 3433 | wake_up(&res->wq); |
3434 | wake_up(&dlm->migration_wq); | 3434 | wake_up(&dlm->migration_wq); |
3435 | } | 3435 | } |
3436 | |||
3437 | void dlm_force_free_mles(struct dlm_ctxt *dlm) | ||
3438 | { | ||
3439 | int i; | ||
3440 | struct hlist_head *bucket; | ||
3441 | struct dlm_master_list_entry *mle; | ||
3442 | struct hlist_node *tmp, *list; | ||
3443 | |||
3444 | /* | ||
3445 | * We notified all other nodes that we are exiting the domain and | ||
3446 | * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still | ||
3447 | * around we force free them and wake any processes that are waiting | ||
3448 | * on the mles | ||
3449 | */ | ||
3450 | spin_lock(&dlm->spinlock); | ||
3451 | spin_lock(&dlm->master_lock); | ||
3452 | |||
3453 | BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING); | ||
3454 | BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES)); | ||
3455 | |||
3456 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { | ||
3457 | bucket = dlm_master_hash(dlm, i); | ||
3458 | hlist_for_each_safe(list, tmp, bucket) { | ||
3459 | mle = hlist_entry(list, struct dlm_master_list_entry, | ||
3460 | master_hash_node); | ||
3461 | if (mle->type != DLM_MLE_BLOCK) { | ||
3462 | mlog(ML_ERROR, "bad mle: %p\n", mle); | ||
3463 | dlm_print_one_mle(mle); | ||
3464 | } | ||
3465 | atomic_set(&mle->woken, 1); | ||
3466 | wake_up(&mle->wq); | ||
3467 | |||
3468 | __dlm_unlink_mle(dlm, mle); | ||
3469 | __dlm_mle_detach_hb_events(dlm, mle); | ||
3470 | __dlm_put_mle(mle); | ||
3471 | } | ||
3472 | } | ||
3473 | spin_unlock(&dlm->master_lock); | ||
3474 | spin_unlock(&dlm->spinlock); | ||
3475 | } | ||
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h index d1ce48e1b3d6..1d596d8c4a4a 100644 --- a/fs/ocfs2/dlmglue.h +++ b/fs/ocfs2/dlmglue.h | |||
@@ -84,6 +84,7 @@ enum { | |||
84 | OI_LS_PARENT, | 84 | OI_LS_PARENT, |
85 | OI_LS_RENAME1, | 85 | OI_LS_RENAME1, |
86 | OI_LS_RENAME2, | 86 | OI_LS_RENAME2, |
87 | OI_LS_REFLINK_TARGET, | ||
87 | }; | 88 | }; |
88 | 89 | ||
89 | int ocfs2_dlm_init(struct ocfs2_super *osb); | 90 | int ocfs2_dlm_init(struct ocfs2_super *osb); |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 81296b4e3646..9a03c151b5ce 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/writeback.h> | 36 | #include <linux/writeback.h> |
37 | #include <linux/falloc.h> | 37 | #include <linux/falloc.h> |
38 | #include <linux/quotaops.h> | 38 | #include <linux/quotaops.h> |
39 | #include <linux/blkdev.h> | ||
39 | 40 | ||
40 | #define MLOG_MASK_PREFIX ML_INODE | 41 | #define MLOG_MASK_PREFIX ML_INODE |
41 | #include <cluster/masklog.h> | 42 | #include <cluster/masklog.h> |
@@ -190,8 +191,16 @@ static int ocfs2_sync_file(struct file *file, int datasync) | |||
190 | if (err) | 191 | if (err) |
191 | goto bail; | 192 | goto bail; |
192 | 193 | ||
193 | if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) | 194 | if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) { |
195 | /* | ||
196 | * We still have to flush drive's caches to get data to the | ||
197 | * platter | ||
198 | */ | ||
199 | if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER) | ||
200 | blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, | ||
201 | NULL, BLKDEV_IFL_WAIT); | ||
194 | goto bail; | 202 | goto bail; |
203 | } | ||
195 | 204 | ||
196 | journal = osb->journal->j_journal; | 205 | journal = osb->journal->j_journal; |
197 | err = jbd2_journal_force_commit(journal); | 206 | err = jbd2_journal_force_commit(journal); |
@@ -774,7 +783,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, | |||
774 | BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT)); | 783 | BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT)); |
775 | BUG_ON(abs_from & (inode->i_blkbits - 1)); | 784 | BUG_ON(abs_from & (inode->i_blkbits - 1)); |
776 | 785 | ||
777 | page = grab_cache_page(mapping, index); | 786 | page = find_or_create_page(mapping, index, GFP_NOFS); |
778 | if (!page) { | 787 | if (!page) { |
779 | ret = -ENOMEM; | 788 | ret = -ENOMEM; |
780 | mlog_errno(ret); | 789 | mlog_errno(ret); |
@@ -2329,7 +2338,7 @@ out_dio: | |||
2329 | BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); | 2338 | BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); |
2330 | 2339 | ||
2331 | if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || | 2340 | if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || |
2332 | ((file->f_flags & O_DIRECT) && has_refcount)) { | 2341 | ((file->f_flags & O_DIRECT) && !direct_io)) { |
2333 | ret = filemap_fdatawrite_range(file->f_mapping, pos, | 2342 | ret = filemap_fdatawrite_range(file->f_mapping, pos, |
2334 | pos + count - 1); | 2343 | pos + count - 1); |
2335 | if (ret < 0) | 2344 | if (ret < 0) |
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 0492464916b1..eece3e05d9d0 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c | |||
@@ -488,7 +488,11 @@ static int ocfs2_read_locked_inode(struct inode *inode, | |||
488 | OCFS2_BH_IGNORE_CACHE); | 488 | OCFS2_BH_IGNORE_CACHE); |
489 | } else { | 489 | } else { |
490 | status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh); | 490 | status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh); |
491 | if (!status) | 491 | /* |
492 | * If buffer is in jbd, then its checksum may not have been | ||
493 | * computed as yet. | ||
494 | */ | ||
495 | if (!status && !buffer_jbd(bh)) | ||
492 | status = ocfs2_validate_inode_block(osb->sb, bh); | 496 | status = ocfs2_validate_inode_block(osb->sb, bh); |
493 | } | 497 | } |
494 | if (status < 0) { | 498 | if (status < 0) { |
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index af2b8fe1f139..4c18f4ad93b4 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c | |||
@@ -74,9 +74,11 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh, | |||
74 | /* | 74 | /* |
75 | * Another node might have truncated while we were waiting on | 75 | * Another node might have truncated while we were waiting on |
76 | * cluster locks. | 76 | * cluster locks. |
77 | * We don't check size == 0 before the shift. This is borrowed | ||
78 | * from do_generic_file_read. | ||
77 | */ | 79 | */ |
78 | last_index = size >> PAGE_CACHE_SHIFT; | 80 | last_index = (size - 1) >> PAGE_CACHE_SHIFT; |
79 | if (page->index > last_index) { | 81 | if (unlikely(!size || page->index > last_index)) { |
80 | ret = -EINVAL; | 82 | ret = -EINVAL; |
81 | goto out; | 83 | goto out; |
82 | } | 84 | } |
@@ -107,7 +109,7 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh, | |||
107 | * because the "write" would invalidate their data. | 109 | * because the "write" would invalidate their data. |
108 | */ | 110 | */ |
109 | if (page->index == last_index) | 111 | if (page->index == last_index) |
110 | len = size & ~PAGE_CACHE_MASK; | 112 | len = ((size - 1) & ~PAGE_CACHE_MASK) + 1; |
111 | 113 | ||
112 | ret = ocfs2_write_begin_nolock(mapping, pos, len, 0, &locked_page, | 114 | ret = ocfs2_write_begin_nolock(mapping, pos, len, 0, &locked_page, |
113 | &fsdata, di_bh, page); | 115 | &fsdata, di_bh, page); |
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index f171b51a74f7..a00dda2e4f16 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c | |||
@@ -472,32 +472,23 @@ leave: | |||
472 | return status; | 472 | return status; |
473 | } | 473 | } |
474 | 474 | ||
475 | static int ocfs2_mknod_locked(struct ocfs2_super *osb, | 475 | static int __ocfs2_mknod_locked(struct inode *dir, |
476 | struct inode *dir, | 476 | struct inode *inode, |
477 | struct inode *inode, | 477 | dev_t dev, |
478 | dev_t dev, | 478 | struct buffer_head **new_fe_bh, |
479 | struct buffer_head **new_fe_bh, | 479 | struct buffer_head *parent_fe_bh, |
480 | struct buffer_head *parent_fe_bh, | 480 | handle_t *handle, |
481 | handle_t *handle, | 481 | struct ocfs2_alloc_context *inode_ac, |
482 | struct ocfs2_alloc_context *inode_ac) | 482 | u64 fe_blkno, u64 suballoc_loc, u16 suballoc_bit) |
483 | { | 483 | { |
484 | int status = 0; | 484 | int status = 0; |
485 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); | ||
485 | struct ocfs2_dinode *fe = NULL; | 486 | struct ocfs2_dinode *fe = NULL; |
486 | struct ocfs2_extent_list *fel; | 487 | struct ocfs2_extent_list *fel; |
487 | u64 suballoc_loc, fe_blkno = 0; | ||
488 | u16 suballoc_bit; | ||
489 | u16 feat; | 488 | u16 feat; |
490 | 489 | ||
491 | *new_fe_bh = NULL; | 490 | *new_fe_bh = NULL; |
492 | 491 | ||
493 | status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh, | ||
494 | inode_ac, &suballoc_loc, | ||
495 | &suballoc_bit, &fe_blkno); | ||
496 | if (status < 0) { | ||
497 | mlog_errno(status); | ||
498 | goto leave; | ||
499 | } | ||
500 | |||
501 | /* populate as many fields early on as possible - many of | 492 | /* populate as many fields early on as possible - many of |
502 | * these are used by the support functions here and in | 493 | * these are used by the support functions here and in |
503 | * callers. */ | 494 | * callers. */ |
@@ -591,6 +582,34 @@ leave: | |||
591 | return status; | 582 | return status; |
592 | } | 583 | } |
593 | 584 | ||
585 | static int ocfs2_mknod_locked(struct ocfs2_super *osb, | ||
586 | struct inode *dir, | ||
587 | struct inode *inode, | ||
588 | dev_t dev, | ||
589 | struct buffer_head **new_fe_bh, | ||
590 | struct buffer_head *parent_fe_bh, | ||
591 | handle_t *handle, | ||
592 | struct ocfs2_alloc_context *inode_ac) | ||
593 | { | ||
594 | int status = 0; | ||
595 | u64 suballoc_loc, fe_blkno = 0; | ||
596 | u16 suballoc_bit; | ||
597 | |||
598 | *new_fe_bh = NULL; | ||
599 | |||
600 | status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh, | ||
601 | inode_ac, &suballoc_loc, | ||
602 | &suballoc_bit, &fe_blkno); | ||
603 | if (status < 0) { | ||
604 | mlog_errno(status); | ||
605 | return status; | ||
606 | } | ||
607 | |||
608 | return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh, | ||
609 | parent_fe_bh, handle, inode_ac, | ||
610 | fe_blkno, suballoc_loc, suballoc_bit); | ||
611 | } | ||
612 | |||
594 | static int ocfs2_mkdir(struct inode *dir, | 613 | static int ocfs2_mkdir(struct inode *dir, |
595 | struct dentry *dentry, | 614 | struct dentry *dentry, |
596 | int mode) | 615 | int mode) |
@@ -1852,61 +1871,117 @@ bail: | |||
1852 | return status; | 1871 | return status; |
1853 | } | 1872 | } |
1854 | 1873 | ||
1855 | static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, | 1874 | static int ocfs2_lookup_lock_orphan_dir(struct ocfs2_super *osb, |
1856 | struct inode **ret_orphan_dir, | 1875 | struct inode **ret_orphan_dir, |
1857 | u64 blkno, | 1876 | struct buffer_head **ret_orphan_dir_bh) |
1858 | char *name, | ||
1859 | struct ocfs2_dir_lookup_result *lookup) | ||
1860 | { | 1877 | { |
1861 | struct inode *orphan_dir_inode; | 1878 | struct inode *orphan_dir_inode; |
1862 | struct buffer_head *orphan_dir_bh = NULL; | 1879 | struct buffer_head *orphan_dir_bh = NULL; |
1863 | int status = 0; | 1880 | int ret = 0; |
1864 | |||
1865 | status = ocfs2_blkno_stringify(blkno, name); | ||
1866 | if (status < 0) { | ||
1867 | mlog_errno(status); | ||
1868 | return status; | ||
1869 | } | ||
1870 | 1881 | ||
1871 | orphan_dir_inode = ocfs2_get_system_file_inode(osb, | 1882 | orphan_dir_inode = ocfs2_get_system_file_inode(osb, |
1872 | ORPHAN_DIR_SYSTEM_INODE, | 1883 | ORPHAN_DIR_SYSTEM_INODE, |
1873 | osb->slot_num); | 1884 | osb->slot_num); |
1874 | if (!orphan_dir_inode) { | 1885 | if (!orphan_dir_inode) { |
1875 | status = -ENOENT; | 1886 | ret = -ENOENT; |
1876 | mlog_errno(status); | 1887 | mlog_errno(ret); |
1877 | return status; | 1888 | return ret; |
1878 | } | 1889 | } |
1879 | 1890 | ||
1880 | mutex_lock(&orphan_dir_inode->i_mutex); | 1891 | mutex_lock(&orphan_dir_inode->i_mutex); |
1881 | 1892 | ||
1882 | status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1); | 1893 | ret = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1); |
1883 | if (status < 0) { | 1894 | if (ret < 0) { |
1884 | mlog_errno(status); | 1895 | mutex_unlock(&orphan_dir_inode->i_mutex); |
1885 | goto leave; | 1896 | iput(orphan_dir_inode); |
1897 | |||
1898 | mlog_errno(ret); | ||
1899 | return ret; | ||
1886 | } | 1900 | } |
1887 | 1901 | ||
1888 | status = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode, | 1902 | *ret_orphan_dir = orphan_dir_inode; |
1889 | orphan_dir_bh, name, | 1903 | *ret_orphan_dir_bh = orphan_dir_bh; |
1890 | OCFS2_ORPHAN_NAMELEN, lookup); | ||
1891 | if (status < 0) { | ||
1892 | ocfs2_inode_unlock(orphan_dir_inode, 1); | ||
1893 | 1904 | ||
1894 | mlog_errno(status); | 1905 | return 0; |
1895 | goto leave; | 1906 | } |
1907 | |||
1908 | static int __ocfs2_prepare_orphan_dir(struct inode *orphan_dir_inode, | ||
1909 | struct buffer_head *orphan_dir_bh, | ||
1910 | u64 blkno, | ||
1911 | char *name, | ||
1912 | struct ocfs2_dir_lookup_result *lookup) | ||
1913 | { | ||
1914 | int ret; | ||
1915 | struct ocfs2_super *osb = OCFS2_SB(orphan_dir_inode->i_sb); | ||
1916 | |||
1917 | ret = ocfs2_blkno_stringify(blkno, name); | ||
1918 | if (ret < 0) { | ||
1919 | mlog_errno(ret); | ||
1920 | return ret; | ||
1921 | } | ||
1922 | |||
1923 | ret = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode, | ||
1924 | orphan_dir_bh, name, | ||
1925 | OCFS2_ORPHAN_NAMELEN, lookup); | ||
1926 | if (ret < 0) { | ||
1927 | mlog_errno(ret); | ||
1928 | return ret; | ||
1929 | } | ||
1930 | |||
1931 | return 0; | ||
1932 | } | ||
1933 | |||
1934 | /** | ||
1935 | * ocfs2_prepare_orphan_dir() - Prepare an orphan directory for | ||
1936 | * insertion of an orphan. | ||
1937 | * @osb: ocfs2 file system | ||
1938 | * @ret_orphan_dir: Orphan dir inode - returned locked! | ||
1939 | * @blkno: Actual block number of the inode to be inserted into orphan dir. | ||
1940 | * @lookup: dir lookup result, to be passed back into functions like | ||
1941 | * ocfs2_orphan_add | ||
1942 | * | ||
1943 | * Returns zero on success and the ret_orphan_dir, name and lookup | ||
1944 | * fields will be populated. | ||
1945 | * | ||
1946 | * Returns non-zero on failure. | ||
1947 | */ | ||
1948 | static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, | ||
1949 | struct inode **ret_orphan_dir, | ||
1950 | u64 blkno, | ||
1951 | char *name, | ||
1952 | struct ocfs2_dir_lookup_result *lookup) | ||
1953 | { | ||
1954 | struct inode *orphan_dir_inode = NULL; | ||
1955 | struct buffer_head *orphan_dir_bh = NULL; | ||
1956 | int ret = 0; | ||
1957 | |||
1958 | ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir_inode, | ||
1959 | &orphan_dir_bh); | ||
1960 | if (ret < 0) { | ||
1961 | mlog_errno(ret); | ||
1962 | return ret; | ||
1963 | } | ||
1964 | |||
1965 | ret = __ocfs2_prepare_orphan_dir(orphan_dir_inode, orphan_dir_bh, | ||
1966 | blkno, name, lookup); | ||
1967 | if (ret < 0) { | ||
1968 | mlog_errno(ret); | ||
1969 | goto out; | ||
1896 | } | 1970 | } |
1897 | 1971 | ||
1898 | *ret_orphan_dir = orphan_dir_inode; | 1972 | *ret_orphan_dir = orphan_dir_inode; |
1899 | 1973 | ||
1900 | leave: | 1974 | out: |
1901 | if (status) { | 1975 | brelse(orphan_dir_bh); |
1976 | |||
1977 | if (ret) { | ||
1978 | ocfs2_inode_unlock(orphan_dir_inode, 1); | ||
1902 | mutex_unlock(&orphan_dir_inode->i_mutex); | 1979 | mutex_unlock(&orphan_dir_inode->i_mutex); |
1903 | iput(orphan_dir_inode); | 1980 | iput(orphan_dir_inode); |
1904 | } | 1981 | } |
1905 | 1982 | ||
1906 | brelse(orphan_dir_bh); | 1983 | mlog_exit(ret); |
1907 | 1984 | return ret; | |
1908 | mlog_exit(status); | ||
1909 | return status; | ||
1910 | } | 1985 | } |
1911 | 1986 | ||
1912 | static int ocfs2_orphan_add(struct ocfs2_super *osb, | 1987 | static int ocfs2_orphan_add(struct ocfs2_super *osb, |
@@ -2053,6 +2128,99 @@ leave: | |||
2053 | return status; | 2128 | return status; |
2054 | } | 2129 | } |
2055 | 2130 | ||
2131 | /** | ||
2132 | * ocfs2_prep_new_orphaned_file() - Prepare the orphan dir to recieve a newly | ||
2133 | * allocated file. This is different from the typical 'add to orphan dir' | ||
2134 | * operation in that the inode does not yet exist. This is a problem because | ||
2135 | * the orphan dir stringifies the inode block number to come up with it's | ||
2136 | * dirent. Obviously if the inode does not yet exist we have a chicken and egg | ||
2137 | * problem. This function works around it by calling deeper into the orphan | ||
2138 | * and suballoc code than other callers. Use this only by necessity. | ||
2139 | * @dir: The directory which this inode will ultimately wind up under - not the | ||
2140 | * orphan dir! | ||
2141 | * @dir_bh: buffer_head the @dir inode block | ||
2142 | * @orphan_name: string of length (CFS2_ORPHAN_NAMELEN + 1). Will be filled | ||
2143 | * with the string to be used for orphan dirent. Pass back to the orphan dir | ||
2144 | * code. | ||
2145 | * @ret_orphan_dir: orphan dir inode returned to be passed back into orphan | ||
2146 | * dir code. | ||
2147 | * @ret_di_blkno: block number where the new inode will be allocated. | ||
2148 | * @orphan_insert: Dir insert context to be passed back into orphan dir code. | ||
2149 | * @ret_inode_ac: Inode alloc context to be passed back to the allocator. | ||
2150 | * | ||
2151 | * Returns zero on success and the ret_orphan_dir, name and lookup | ||
2152 | * fields will be populated. | ||
2153 | * | ||
2154 | * Returns non-zero on failure. | ||
2155 | */ | ||
2156 | static int ocfs2_prep_new_orphaned_file(struct inode *dir, | ||
2157 | struct buffer_head *dir_bh, | ||
2158 | char *orphan_name, | ||
2159 | struct inode **ret_orphan_dir, | ||
2160 | u64 *ret_di_blkno, | ||
2161 | struct ocfs2_dir_lookup_result *orphan_insert, | ||
2162 | struct ocfs2_alloc_context **ret_inode_ac) | ||
2163 | { | ||
2164 | int ret; | ||
2165 | u64 di_blkno; | ||
2166 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); | ||
2167 | struct inode *orphan_dir = NULL; | ||
2168 | struct buffer_head *orphan_dir_bh = NULL; | ||
2169 | struct ocfs2_alloc_context *inode_ac = NULL; | ||
2170 | |||
2171 | ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir, &orphan_dir_bh); | ||
2172 | if (ret < 0) { | ||
2173 | mlog_errno(ret); | ||
2174 | return ret; | ||
2175 | } | ||
2176 | |||
2177 | /* reserve an inode spot */ | ||
2178 | ret = ocfs2_reserve_new_inode(osb, &inode_ac); | ||
2179 | if (ret < 0) { | ||
2180 | if (ret != -ENOSPC) | ||
2181 | mlog_errno(ret); | ||
2182 | goto out; | ||
2183 | } | ||
2184 | |||
2185 | ret = ocfs2_find_new_inode_loc(dir, dir_bh, inode_ac, | ||
2186 | &di_blkno); | ||
2187 | if (ret) { | ||
2188 | mlog_errno(ret); | ||
2189 | goto out; | ||
2190 | } | ||
2191 | |||
2192 | ret = __ocfs2_prepare_orphan_dir(orphan_dir, orphan_dir_bh, | ||
2193 | di_blkno, orphan_name, orphan_insert); | ||
2194 | if (ret < 0) { | ||
2195 | mlog_errno(ret); | ||
2196 | goto out; | ||
2197 | } | ||
2198 | |||
2199 | out: | ||
2200 | if (ret == 0) { | ||
2201 | *ret_orphan_dir = orphan_dir; | ||
2202 | *ret_di_blkno = di_blkno; | ||
2203 | *ret_inode_ac = inode_ac; | ||
2204 | /* | ||
2205 | * orphan_name and orphan_insert are already up to | ||
2206 | * date via prepare_orphan_dir | ||
2207 | */ | ||
2208 | } else { | ||
2209 | /* Unroll reserve_new_inode* */ | ||
2210 | if (inode_ac) | ||
2211 | ocfs2_free_alloc_context(inode_ac); | ||
2212 | |||
2213 | /* Unroll orphan dir locking */ | ||
2214 | mutex_unlock(&orphan_dir->i_mutex); | ||
2215 | ocfs2_inode_unlock(orphan_dir, 1); | ||
2216 | iput(orphan_dir); | ||
2217 | } | ||
2218 | |||
2219 | brelse(orphan_dir_bh); | ||
2220 | |||
2221 | return 0; | ||
2222 | } | ||
2223 | |||
2056 | int ocfs2_create_inode_in_orphan(struct inode *dir, | 2224 | int ocfs2_create_inode_in_orphan(struct inode *dir, |
2057 | int mode, | 2225 | int mode, |
2058 | struct inode **new_inode) | 2226 | struct inode **new_inode) |
@@ -2068,6 +2236,8 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, | |||
2068 | struct buffer_head *new_di_bh = NULL; | 2236 | struct buffer_head *new_di_bh = NULL; |
2069 | struct ocfs2_alloc_context *inode_ac = NULL; | 2237 | struct ocfs2_alloc_context *inode_ac = NULL; |
2070 | struct ocfs2_dir_lookup_result orphan_insert = { NULL, }; | 2238 | struct ocfs2_dir_lookup_result orphan_insert = { NULL, }; |
2239 | u64 uninitialized_var(di_blkno), suballoc_loc; | ||
2240 | u16 suballoc_bit; | ||
2071 | 2241 | ||
2072 | status = ocfs2_inode_lock(dir, &parent_di_bh, 1); | 2242 | status = ocfs2_inode_lock(dir, &parent_di_bh, 1); |
2073 | if (status < 0) { | 2243 | if (status < 0) { |
@@ -2076,20 +2246,9 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, | |||
2076 | return status; | 2246 | return status; |
2077 | } | 2247 | } |
2078 | 2248 | ||
2079 | /* | 2249 | status = ocfs2_prep_new_orphaned_file(dir, parent_di_bh, |
2080 | * We give the orphan dir the root blkno to fake an orphan name, | 2250 | orphan_name, &orphan_dir, |
2081 | * and allocate enough space for our insertion. | 2251 | &di_blkno, &orphan_insert, &inode_ac); |
2082 | */ | ||
2083 | status = ocfs2_prepare_orphan_dir(osb, &orphan_dir, | ||
2084 | osb->root_blkno, | ||
2085 | orphan_name, &orphan_insert); | ||
2086 | if (status < 0) { | ||
2087 | mlog_errno(status); | ||
2088 | goto leave; | ||
2089 | } | ||
2090 | |||
2091 | /* reserve an inode spot */ | ||
2092 | status = ocfs2_reserve_new_inode(osb, &inode_ac); | ||
2093 | if (status < 0) { | 2252 | if (status < 0) { |
2094 | if (status != -ENOSPC) | 2253 | if (status != -ENOSPC) |
2095 | mlog_errno(status); | 2254 | mlog_errno(status); |
@@ -2116,17 +2275,20 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, | |||
2116 | goto leave; | 2275 | goto leave; |
2117 | did_quota_inode = 1; | 2276 | did_quota_inode = 1; |
2118 | 2277 | ||
2119 | inode->i_nlink = 0; | 2278 | status = ocfs2_claim_new_inode_at_loc(handle, dir, inode_ac, |
2120 | /* do the real work now. */ | 2279 | &suballoc_loc, |
2121 | status = ocfs2_mknod_locked(osb, dir, inode, | 2280 | &suballoc_bit, di_blkno); |
2122 | 0, &new_di_bh, parent_di_bh, handle, | ||
2123 | inode_ac); | ||
2124 | if (status < 0) { | 2281 | if (status < 0) { |
2125 | mlog_errno(status); | 2282 | mlog_errno(status); |
2126 | goto leave; | 2283 | goto leave; |
2127 | } | 2284 | } |
2128 | 2285 | ||
2129 | status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, orphan_name); | 2286 | inode->i_nlink = 0; |
2287 | /* do the real work now. */ | ||
2288 | status = __ocfs2_mknod_locked(dir, inode, | ||
2289 | 0, &new_di_bh, parent_di_bh, handle, | ||
2290 | inode_ac, di_blkno, suballoc_loc, | ||
2291 | suballoc_bit); | ||
2130 | if (status < 0) { | 2292 | if (status < 0) { |
2131 | mlog_errno(status); | 2293 | mlog_errno(status); |
2132 | goto leave; | 2294 | goto leave; |
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 33f1c9a8258d..fa31d05e41b7 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h | |||
@@ -235,18 +235,31 @@ | |||
235 | #define OCFS2_HAS_REFCOUNT_FL (0x0010) | 235 | #define OCFS2_HAS_REFCOUNT_FL (0x0010) |
236 | 236 | ||
237 | /* Inode attributes, keep in sync with EXT2 */ | 237 | /* Inode attributes, keep in sync with EXT2 */ |
238 | #define OCFS2_SECRM_FL (0x00000001) /* Secure deletion */ | 238 | #define OCFS2_SECRM_FL FS_SECRM_FL /* Secure deletion */ |
239 | #define OCFS2_UNRM_FL (0x00000002) /* Undelete */ | 239 | #define OCFS2_UNRM_FL FS_UNRM_FL /* Undelete */ |
240 | #define OCFS2_COMPR_FL (0x00000004) /* Compress file */ | 240 | #define OCFS2_COMPR_FL FS_COMPR_FL /* Compress file */ |
241 | #define OCFS2_SYNC_FL (0x00000008) /* Synchronous updates */ | 241 | #define OCFS2_SYNC_FL FS_SYNC_FL /* Synchronous updates */ |
242 | #define OCFS2_IMMUTABLE_FL (0x00000010) /* Immutable file */ | 242 | #define OCFS2_IMMUTABLE_FL FS_IMMUTABLE_FL /* Immutable file */ |
243 | #define OCFS2_APPEND_FL (0x00000020) /* writes to file may only append */ | 243 | #define OCFS2_APPEND_FL FS_APPEND_FL /* writes to file may only append */ |
244 | #define OCFS2_NODUMP_FL (0x00000040) /* do not dump file */ | 244 | #define OCFS2_NODUMP_FL FS_NODUMP_FL /* do not dump file */ |
245 | #define OCFS2_NOATIME_FL (0x00000080) /* do not update atime */ | 245 | #define OCFS2_NOATIME_FL FS_NOATIME_FL /* do not update atime */ |
246 | #define OCFS2_DIRSYNC_FL (0x00010000) /* dirsync behaviour (directories only) */ | 246 | /* Reserved for compression usage... */ |
247 | 247 | #define OCFS2_DIRTY_FL FS_DIRTY_FL | |
248 | #define OCFS2_FL_VISIBLE (0x000100FF) /* User visible flags */ | 248 | #define OCFS2_COMPRBLK_FL FS_COMPRBLK_FL /* One or more compressed clusters */ |
249 | #define OCFS2_FL_MODIFIABLE (0x000100FF) /* User modifiable flags */ | 249 | #define OCFS2_NOCOMP_FL FS_NOCOMP_FL /* Don't compress */ |
250 | #define OCFS2_ECOMPR_FL FS_ECOMPR_FL /* Compression error */ | ||
251 | /* End compression flags --- maybe not all used */ | ||
252 | #define OCFS2_BTREE_FL FS_BTREE_FL /* btree format dir */ | ||
253 | #define OCFS2_INDEX_FL FS_INDEX_FL /* hash-indexed directory */ | ||
254 | #define OCFS2_IMAGIC_FL FS_IMAGIC_FL /* AFS directory */ | ||
255 | #define OCFS2_JOURNAL_DATA_FL FS_JOURNAL_DATA_FL /* Reserved for ext3 */ | ||
256 | #define OCFS2_NOTAIL_FL FS_NOTAIL_FL /* file tail should not be merged */ | ||
257 | #define OCFS2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */ | ||
258 | #define OCFS2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/ | ||
259 | #define OCFS2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */ | ||
260 | |||
261 | #define OCFS2_FL_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */ | ||
262 | #define OCFS2_FL_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */ | ||
250 | 263 | ||
251 | /* | 264 | /* |
252 | * Extent record flags (e_node.leaf.flags) | 265 | * Extent record flags (e_node.leaf.flags) |
diff --git a/fs/ocfs2/ocfs2_ioctl.h b/fs/ocfs2/ocfs2_ioctl.h index 2d3420af1a83..5d241505690b 100644 --- a/fs/ocfs2/ocfs2_ioctl.h +++ b/fs/ocfs2/ocfs2_ioctl.h | |||
@@ -23,10 +23,10 @@ | |||
23 | /* | 23 | /* |
24 | * ioctl commands | 24 | * ioctl commands |
25 | */ | 25 | */ |
26 | #define OCFS2_IOC_GETFLAGS _IOR('f', 1, long) | 26 | #define OCFS2_IOC_GETFLAGS FS_IOC_GETFLAGS |
27 | #define OCFS2_IOC_SETFLAGS _IOW('f', 2, long) | 27 | #define OCFS2_IOC_SETFLAGS FS_IOC_SETFLAGS |
28 | #define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int) | 28 | #define OCFS2_IOC32_GETFLAGS FS_IOC32_GETFLAGS |
29 | #define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int) | 29 | #define OCFS2_IOC32_SETFLAGS FS_IOC32_SETFLAGS |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * Space reservation / allocation / free ioctls and argument structure | 32 | * Space reservation / allocation / free ioctls and argument structure |
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 73a11ccfd4c2..efdd75607406 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c | |||
@@ -2960,7 +2960,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle, | |||
2960 | if (map_end & (PAGE_CACHE_SIZE - 1)) | 2960 | if (map_end & (PAGE_CACHE_SIZE - 1)) |
2961 | to = map_end & (PAGE_CACHE_SIZE - 1); | 2961 | to = map_end & (PAGE_CACHE_SIZE - 1); |
2962 | 2962 | ||
2963 | page = grab_cache_page(mapping, page_index); | 2963 | page = find_or_create_page(mapping, page_index, GFP_NOFS); |
2964 | 2964 | ||
2965 | /* | 2965 | /* |
2966 | * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page | 2966 | * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page |
@@ -3179,7 +3179,8 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb, | |||
3179 | if (map_end > end) | 3179 | if (map_end > end) |
3180 | map_end = end; | 3180 | map_end = end; |
3181 | 3181 | ||
3182 | page = grab_cache_page(context->inode->i_mapping, page_index); | 3182 | page = find_or_create_page(context->inode->i_mapping, |
3183 | page_index, GFP_NOFS); | ||
3183 | BUG_ON(!page); | 3184 | BUG_ON(!page); |
3184 | 3185 | ||
3185 | wait_on_page_writeback(page); | 3186 | wait_on_page_writeback(page); |
@@ -4200,8 +4201,9 @@ static int __ocfs2_reflink(struct dentry *old_dentry, | |||
4200 | goto out; | 4201 | goto out; |
4201 | } | 4202 | } |
4202 | 4203 | ||
4203 | mutex_lock(&new_inode->i_mutex); | 4204 | mutex_lock_nested(&new_inode->i_mutex, I_MUTEX_CHILD); |
4204 | ret = ocfs2_inode_lock(new_inode, &new_bh, 1); | 4205 | ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1, |
4206 | OI_LS_REFLINK_TARGET); | ||
4205 | if (ret) { | 4207 | if (ret) { |
4206 | mlog_errno(ret); | 4208 | mlog_errno(ret); |
4207 | goto out_unlock; | 4209 | goto out_unlock; |
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c index d8b6e4259b80..3e78db361bc7 100644 --- a/fs/ocfs2/reservations.c +++ b/fs/ocfs2/reservations.c | |||
@@ -732,25 +732,23 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap, | |||
732 | struct ocfs2_alloc_reservation *resv, | 732 | struct ocfs2_alloc_reservation *resv, |
733 | int *cstart, int *clen) | 733 | int *cstart, int *clen) |
734 | { | 734 | { |
735 | unsigned int wanted = *clen; | ||
736 | |||
737 | if (resv == NULL || ocfs2_resmap_disabled(resmap)) | 735 | if (resv == NULL || ocfs2_resmap_disabled(resmap)) |
738 | return -ENOSPC; | 736 | return -ENOSPC; |
739 | 737 | ||
740 | spin_lock(&resv_lock); | 738 | spin_lock(&resv_lock); |
741 | 739 | ||
742 | /* | ||
743 | * We don't want to over-allocate for temporary | ||
744 | * windows. Otherwise, we run the risk of fragmenting the | ||
745 | * allocation space. | ||
746 | */ | ||
747 | wanted = ocfs2_resv_window_bits(resmap, resv); | ||
748 | if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen) | ||
749 | wanted = *clen; | ||
750 | |||
751 | if (ocfs2_resv_empty(resv)) { | 740 | if (ocfs2_resv_empty(resv)) { |
752 | mlog(0, "empty reservation, find new window\n"); | 741 | /* |
742 | * We don't want to over-allocate for temporary | ||
743 | * windows. Otherwise, we run the risk of fragmenting the | ||
744 | * allocation space. | ||
745 | */ | ||
746 | unsigned int wanted = ocfs2_resv_window_bits(resmap, resv); | ||
753 | 747 | ||
748 | if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen) | ||
749 | wanted = *clen; | ||
750 | |||
751 | mlog(0, "empty reservation, find new window\n"); | ||
754 | /* | 752 | /* |
755 | * Try to get a window here. If it works, we must fall | 753 | * Try to get a window here. If it works, we must fall |
756 | * through and test the bitmap . This avoids some | 754 | * through and test the bitmap . This avoids some |
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index a8e6a95a353f..849c2f0e0a0e 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c | |||
@@ -57,11 +57,28 @@ struct ocfs2_suballoc_result { | |||
57 | u64 sr_bg_blkno; /* The bg we allocated from. Set | 57 | u64 sr_bg_blkno; /* The bg we allocated from. Set |
58 | to 0 when a block group is | 58 | to 0 when a block group is |
59 | contiguous. */ | 59 | contiguous. */ |
60 | u64 sr_bg_stable_blkno; /* | ||
61 | * Doesn't change, always | ||
62 | * set to target block | ||
63 | * group descriptor | ||
64 | * block. | ||
65 | */ | ||
60 | u64 sr_blkno; /* The first allocated block */ | 66 | u64 sr_blkno; /* The first allocated block */ |
61 | unsigned int sr_bit_offset; /* The bit in the bg */ | 67 | unsigned int sr_bit_offset; /* The bit in the bg */ |
62 | unsigned int sr_bits; /* How many bits we claimed */ | 68 | unsigned int sr_bits; /* How many bits we claimed */ |
63 | }; | 69 | }; |
64 | 70 | ||
71 | static u64 ocfs2_group_from_res(struct ocfs2_suballoc_result *res) | ||
72 | { | ||
73 | if (res->sr_blkno == 0) | ||
74 | return 0; | ||
75 | |||
76 | if (res->sr_bg_blkno) | ||
77 | return res->sr_bg_blkno; | ||
78 | |||
79 | return ocfs2_which_suballoc_group(res->sr_blkno, res->sr_bit_offset); | ||
80 | } | ||
81 | |||
65 | static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg); | 82 | static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg); |
66 | static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe); | 83 | static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe); |
67 | static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl); | 84 | static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl); |
@@ -138,6 +155,10 @@ void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac) | |||
138 | brelse(ac->ac_bh); | 155 | brelse(ac->ac_bh); |
139 | ac->ac_bh = NULL; | 156 | ac->ac_bh = NULL; |
140 | ac->ac_resv = NULL; | 157 | ac->ac_resv = NULL; |
158 | if (ac->ac_find_loc_priv) { | ||
159 | kfree(ac->ac_find_loc_priv); | ||
160 | ac->ac_find_loc_priv = NULL; | ||
161 | } | ||
141 | } | 162 | } |
142 | 163 | ||
143 | void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac) | 164 | void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac) |
@@ -336,7 +357,7 @@ out: | |||
336 | static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb, | 357 | static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb, |
337 | struct ocfs2_group_desc *bg, | 358 | struct ocfs2_group_desc *bg, |
338 | struct ocfs2_chain_list *cl, | 359 | struct ocfs2_chain_list *cl, |
339 | u64 p_blkno, u32 clusters) | 360 | u64 p_blkno, unsigned int clusters) |
340 | { | 361 | { |
341 | struct ocfs2_extent_list *el = &bg->bg_list; | 362 | struct ocfs2_extent_list *el = &bg->bg_list; |
342 | struct ocfs2_extent_rec *rec; | 363 | struct ocfs2_extent_rec *rec; |
@@ -348,7 +369,7 @@ static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb, | |||
348 | rec->e_blkno = cpu_to_le64(p_blkno); | 369 | rec->e_blkno = cpu_to_le64(p_blkno); |
349 | rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) / | 370 | rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) / |
350 | le16_to_cpu(cl->cl_bpc)); | 371 | le16_to_cpu(cl->cl_bpc)); |
351 | rec->e_leaf_clusters = cpu_to_le32(clusters); | 372 | rec->e_leaf_clusters = cpu_to_le16(clusters); |
352 | le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc)); | 373 | le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc)); |
353 | le16_add_cpu(&bg->bg_free_bits_count, | 374 | le16_add_cpu(&bg->bg_free_bits_count, |
354 | clusters * le16_to_cpu(cl->cl_bpc)); | 375 | clusters * le16_to_cpu(cl->cl_bpc)); |
@@ -1678,6 +1699,15 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac, | |||
1678 | if (!ret) | 1699 | if (!ret) |
1679 | ocfs2_bg_discontig_fix_result(ac, gd, res); | 1700 | ocfs2_bg_discontig_fix_result(ac, gd, res); |
1680 | 1701 | ||
1702 | /* | ||
1703 | * sr_bg_blkno might have been changed by | ||
1704 | * ocfs2_bg_discontig_fix_result | ||
1705 | */ | ||
1706 | res->sr_bg_stable_blkno = group_bh->b_blocknr; | ||
1707 | |||
1708 | if (ac->ac_find_loc_only) | ||
1709 | goto out_loc_only; | ||
1710 | |||
1681 | ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh, | 1711 | ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh, |
1682 | res->sr_bits, | 1712 | res->sr_bits, |
1683 | le16_to_cpu(gd->bg_chain)); | 1713 | le16_to_cpu(gd->bg_chain)); |
@@ -1691,6 +1721,7 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac, | |||
1691 | if (ret < 0) | 1721 | if (ret < 0) |
1692 | mlog_errno(ret); | 1722 | mlog_errno(ret); |
1693 | 1723 | ||
1724 | out_loc_only: | ||
1694 | *bits_left = le16_to_cpu(gd->bg_free_bits_count); | 1725 | *bits_left = le16_to_cpu(gd->bg_free_bits_count); |
1695 | 1726 | ||
1696 | out: | 1727 | out: |
@@ -1708,7 +1739,6 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, | |||
1708 | { | 1739 | { |
1709 | int status; | 1740 | int status; |
1710 | u16 chain; | 1741 | u16 chain; |
1711 | u32 tmp_used; | ||
1712 | u64 next_group; | 1742 | u64 next_group; |
1713 | struct inode *alloc_inode = ac->ac_inode; | 1743 | struct inode *alloc_inode = ac->ac_inode; |
1714 | struct buffer_head *group_bh = NULL; | 1744 | struct buffer_head *group_bh = NULL; |
@@ -1770,6 +1800,11 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, | |||
1770 | if (!status) | 1800 | if (!status) |
1771 | ocfs2_bg_discontig_fix_result(ac, bg, res); | 1801 | ocfs2_bg_discontig_fix_result(ac, bg, res); |
1772 | 1802 | ||
1803 | /* | ||
1804 | * sr_bg_blkno might have been changed by | ||
1805 | * ocfs2_bg_discontig_fix_result | ||
1806 | */ | ||
1807 | res->sr_bg_stable_blkno = group_bh->b_blocknr; | ||
1773 | 1808 | ||
1774 | /* | 1809 | /* |
1775 | * Keep track of previous block descriptor read. When | 1810 | * Keep track of previous block descriptor read. When |
@@ -1796,22 +1831,17 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, | |||
1796 | } | 1831 | } |
1797 | } | 1832 | } |
1798 | 1833 | ||
1799 | /* Ok, claim our bits now: set the info on dinode, chainlist | 1834 | if (ac->ac_find_loc_only) |
1800 | * and then the group */ | 1835 | goto out_loc_only; |
1801 | status = ocfs2_journal_access_di(handle, | 1836 | |
1802 | INODE_CACHE(alloc_inode), | 1837 | status = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, |
1803 | ac->ac_bh, | 1838 | ac->ac_bh, res->sr_bits, |
1804 | OCFS2_JOURNAL_ACCESS_WRITE); | 1839 | chain); |
1805 | if (status < 0) { | 1840 | if (status) { |
1806 | mlog_errno(status); | 1841 | mlog_errno(status); |
1807 | goto bail; | 1842 | goto bail; |
1808 | } | 1843 | } |
1809 | 1844 | ||
1810 | tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used); | ||
1811 | fe->id1.bitmap1.i_used = cpu_to_le32(res->sr_bits + tmp_used); | ||
1812 | le32_add_cpu(&cl->cl_recs[chain].c_free, -res->sr_bits); | ||
1813 | ocfs2_journal_dirty(handle, ac->ac_bh); | ||
1814 | |||
1815 | status = ocfs2_block_group_set_bits(handle, | 1845 | status = ocfs2_block_group_set_bits(handle, |
1816 | alloc_inode, | 1846 | alloc_inode, |
1817 | bg, | 1847 | bg, |
@@ -1826,6 +1856,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, | |||
1826 | mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits, | 1856 | mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits, |
1827 | (unsigned long long)le64_to_cpu(fe->i_blkno)); | 1857 | (unsigned long long)le64_to_cpu(fe->i_blkno)); |
1828 | 1858 | ||
1859 | out_loc_only: | ||
1829 | *bits_left = le16_to_cpu(bg->bg_free_bits_count); | 1860 | *bits_left = le16_to_cpu(bg->bg_free_bits_count); |
1830 | bail: | 1861 | bail: |
1831 | brelse(group_bh); | 1862 | brelse(group_bh); |
@@ -1845,6 +1876,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, | |||
1845 | int status; | 1876 | int status; |
1846 | u16 victim, i; | 1877 | u16 victim, i; |
1847 | u16 bits_left = 0; | 1878 | u16 bits_left = 0; |
1879 | u64 hint = ac->ac_last_group; | ||
1848 | struct ocfs2_chain_list *cl; | 1880 | struct ocfs2_chain_list *cl; |
1849 | struct ocfs2_dinode *fe; | 1881 | struct ocfs2_dinode *fe; |
1850 | 1882 | ||
@@ -1872,7 +1904,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, | |||
1872 | goto bail; | 1904 | goto bail; |
1873 | } | 1905 | } |
1874 | 1906 | ||
1875 | res->sr_bg_blkno = ac->ac_last_group; | 1907 | res->sr_bg_blkno = hint; |
1876 | if (res->sr_bg_blkno) { | 1908 | if (res->sr_bg_blkno) { |
1877 | /* Attempt to short-circuit the usual search mechanism | 1909 | /* Attempt to short-circuit the usual search mechanism |
1878 | * by jumping straight to the most recently used | 1910 | * by jumping straight to the most recently used |
@@ -1896,8 +1928,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, | |||
1896 | 1928 | ||
1897 | status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, | 1929 | status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, |
1898 | res, &bits_left); | 1930 | res, &bits_left); |
1899 | if (!status) | 1931 | if (!status) { |
1932 | hint = ocfs2_group_from_res(res); | ||
1900 | goto set_hint; | 1933 | goto set_hint; |
1934 | } | ||
1901 | if (status < 0 && status != -ENOSPC) { | 1935 | if (status < 0 && status != -ENOSPC) { |
1902 | mlog_errno(status); | 1936 | mlog_errno(status); |
1903 | goto bail; | 1937 | goto bail; |
@@ -1920,8 +1954,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, | |||
1920 | ac->ac_chain = i; | 1954 | ac->ac_chain = i; |
1921 | status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, | 1955 | status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, |
1922 | res, &bits_left); | 1956 | res, &bits_left); |
1923 | if (!status) | 1957 | if (!status) { |
1958 | hint = ocfs2_group_from_res(res); | ||
1924 | break; | 1959 | break; |
1960 | } | ||
1925 | if (status < 0 && status != -ENOSPC) { | 1961 | if (status < 0 && status != -ENOSPC) { |
1926 | mlog_errno(status); | 1962 | mlog_errno(status); |
1927 | goto bail; | 1963 | goto bail; |
@@ -1936,7 +1972,7 @@ set_hint: | |||
1936 | if (bits_left < min_bits) | 1972 | if (bits_left < min_bits) |
1937 | ac->ac_last_group = 0; | 1973 | ac->ac_last_group = 0; |
1938 | else | 1974 | else |
1939 | ac->ac_last_group = res->sr_bg_blkno; | 1975 | ac->ac_last_group = hint; |
1940 | } | 1976 | } |
1941 | 1977 | ||
1942 | bail: | 1978 | bail: |
@@ -2016,6 +2052,136 @@ static inline void ocfs2_save_inode_ac_group(struct inode *dir, | |||
2016 | OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot; | 2052 | OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot; |
2017 | } | 2053 | } |
2018 | 2054 | ||
2055 | int ocfs2_find_new_inode_loc(struct inode *dir, | ||
2056 | struct buffer_head *parent_fe_bh, | ||
2057 | struct ocfs2_alloc_context *ac, | ||
2058 | u64 *fe_blkno) | ||
2059 | { | ||
2060 | int ret; | ||
2061 | handle_t *handle = NULL; | ||
2062 | struct ocfs2_suballoc_result *res; | ||
2063 | |||
2064 | BUG_ON(!ac); | ||
2065 | BUG_ON(ac->ac_bits_given != 0); | ||
2066 | BUG_ON(ac->ac_bits_wanted != 1); | ||
2067 | BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE); | ||
2068 | |||
2069 | res = kzalloc(sizeof(*res), GFP_NOFS); | ||
2070 | if (res == NULL) { | ||
2071 | ret = -ENOMEM; | ||
2072 | mlog_errno(ret); | ||
2073 | goto out; | ||
2074 | } | ||
2075 | |||
2076 | ocfs2_init_inode_ac_group(dir, parent_fe_bh, ac); | ||
2077 | |||
2078 | /* | ||
2079 | * The handle started here is for chain relink. Alternatively, | ||
2080 | * we could just disable relink for these calls. | ||
2081 | */ | ||
2082 | handle = ocfs2_start_trans(OCFS2_SB(dir->i_sb), OCFS2_SUBALLOC_ALLOC); | ||
2083 | if (IS_ERR(handle)) { | ||
2084 | ret = PTR_ERR(handle); | ||
2085 | handle = NULL; | ||
2086 | mlog_errno(ret); | ||
2087 | goto out; | ||
2088 | } | ||
2089 | |||
2090 | /* | ||
2091 | * This will instruct ocfs2_claim_suballoc_bits and | ||
2092 | * ocfs2_search_one_group to search but save actual allocation | ||
2093 | * for later. | ||
2094 | */ | ||
2095 | ac->ac_find_loc_only = 1; | ||
2096 | |||
2097 | ret = ocfs2_claim_suballoc_bits(ac, handle, 1, 1, res); | ||
2098 | if (ret < 0) { | ||
2099 | mlog_errno(ret); | ||
2100 | goto out; | ||
2101 | } | ||
2102 | |||
2103 | ac->ac_find_loc_priv = res; | ||
2104 | *fe_blkno = res->sr_blkno; | ||
2105 | |||
2106 | out: | ||
2107 | if (handle) | ||
2108 | ocfs2_commit_trans(OCFS2_SB(dir->i_sb), handle); | ||
2109 | |||
2110 | if (ret) | ||
2111 | kfree(res); | ||
2112 | |||
2113 | return ret; | ||
2114 | } | ||
2115 | |||
2116 | int ocfs2_claim_new_inode_at_loc(handle_t *handle, | ||
2117 | struct inode *dir, | ||
2118 | struct ocfs2_alloc_context *ac, | ||
2119 | u64 *suballoc_loc, | ||
2120 | u16 *suballoc_bit, | ||
2121 | u64 di_blkno) | ||
2122 | { | ||
2123 | int ret; | ||
2124 | u16 chain; | ||
2125 | struct ocfs2_suballoc_result *res = ac->ac_find_loc_priv; | ||
2126 | struct buffer_head *bg_bh = NULL; | ||
2127 | struct ocfs2_group_desc *bg; | ||
2128 | struct ocfs2_dinode *di = (struct ocfs2_dinode *) ac->ac_bh->b_data; | ||
2129 | |||
2130 | /* | ||
2131 | * Since di_blkno is being passed back in, we check for any | ||
2132 | * inconsistencies which may have happened between | ||
2133 | * calls. These are code bugs as di_blkno is not expected to | ||
2134 | * change once returned from ocfs2_find_new_inode_loc() | ||
2135 | */ | ||
2136 | BUG_ON(res->sr_blkno != di_blkno); | ||
2137 | |||
2138 | ret = ocfs2_read_group_descriptor(ac->ac_inode, di, | ||
2139 | res->sr_bg_stable_blkno, &bg_bh); | ||
2140 | if (ret) { | ||
2141 | mlog_errno(ret); | ||
2142 | goto out; | ||
2143 | } | ||
2144 | |||
2145 | bg = (struct ocfs2_group_desc *) bg_bh->b_data; | ||
2146 | chain = le16_to_cpu(bg->bg_chain); | ||
2147 | |||
2148 | ret = ocfs2_alloc_dinode_update_counts(ac->ac_inode, handle, | ||
2149 | ac->ac_bh, res->sr_bits, | ||
2150 | chain); | ||
2151 | if (ret) { | ||
2152 | mlog_errno(ret); | ||
2153 | goto out; | ||
2154 | } | ||
2155 | |||
2156 | ret = ocfs2_block_group_set_bits(handle, | ||
2157 | ac->ac_inode, | ||
2158 | bg, | ||
2159 | bg_bh, | ||
2160 | res->sr_bit_offset, | ||
2161 | res->sr_bits); | ||
2162 | if (ret < 0) { | ||
2163 | mlog_errno(ret); | ||
2164 | goto out; | ||
2165 | } | ||
2166 | |||
2167 | mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits, | ||
2168 | (unsigned long long)di_blkno); | ||
2169 | |||
2170 | atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); | ||
2171 | |||
2172 | BUG_ON(res->sr_bits != 1); | ||
2173 | |||
2174 | *suballoc_loc = res->sr_bg_blkno; | ||
2175 | *suballoc_bit = res->sr_bit_offset; | ||
2176 | ac->ac_bits_given++; | ||
2177 | ocfs2_save_inode_ac_group(dir, ac); | ||
2178 | |||
2179 | out: | ||
2180 | brelse(bg_bh); | ||
2181 | |||
2182 | return ret; | ||
2183 | } | ||
2184 | |||
2019 | int ocfs2_claim_new_inode(handle_t *handle, | 2185 | int ocfs2_claim_new_inode(handle_t *handle, |
2020 | struct inode *dir, | 2186 | struct inode *dir, |
2021 | struct buffer_head *parent_fe_bh, | 2187 | struct buffer_head *parent_fe_bh, |
@@ -2567,7 +2733,8 @@ out: | |||
2567 | * suballoc_bit. | 2733 | * suballoc_bit. |
2568 | */ | 2734 | */ |
2569 | static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno, | 2735 | static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno, |
2570 | u16 *suballoc_slot, u16 *suballoc_bit) | 2736 | u16 *suballoc_slot, u64 *group_blkno, |
2737 | u16 *suballoc_bit) | ||
2571 | { | 2738 | { |
2572 | int status; | 2739 | int status; |
2573 | struct buffer_head *inode_bh = NULL; | 2740 | struct buffer_head *inode_bh = NULL; |
@@ -2604,6 +2771,8 @@ static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno, | |||
2604 | *suballoc_slot = le16_to_cpu(inode_fe->i_suballoc_slot); | 2771 | *suballoc_slot = le16_to_cpu(inode_fe->i_suballoc_slot); |
2605 | if (suballoc_bit) | 2772 | if (suballoc_bit) |
2606 | *suballoc_bit = le16_to_cpu(inode_fe->i_suballoc_bit); | 2773 | *suballoc_bit = le16_to_cpu(inode_fe->i_suballoc_bit); |
2774 | if (group_blkno) | ||
2775 | *group_blkno = le64_to_cpu(inode_fe->i_suballoc_loc); | ||
2607 | 2776 | ||
2608 | bail: | 2777 | bail: |
2609 | brelse(inode_bh); | 2778 | brelse(inode_bh); |
@@ -2621,7 +2790,8 @@ bail: | |||
2621 | */ | 2790 | */ |
2622 | static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb, | 2791 | static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb, |
2623 | struct inode *suballoc, | 2792 | struct inode *suballoc, |
2624 | struct buffer_head *alloc_bh, u64 blkno, | 2793 | struct buffer_head *alloc_bh, |
2794 | u64 group_blkno, u64 blkno, | ||
2625 | u16 bit, int *res) | 2795 | u16 bit, int *res) |
2626 | { | 2796 | { |
2627 | struct ocfs2_dinode *alloc_di; | 2797 | struct ocfs2_dinode *alloc_di; |
@@ -2642,10 +2812,8 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb, | |||
2642 | goto bail; | 2812 | goto bail; |
2643 | } | 2813 | } |
2644 | 2814 | ||
2645 | if (alloc_di->i_suballoc_loc) | 2815 | bg_blkno = group_blkno ? group_blkno : |
2646 | bg_blkno = le64_to_cpu(alloc_di->i_suballoc_loc); | 2816 | ocfs2_which_suballoc_group(blkno, bit); |
2647 | else | ||
2648 | bg_blkno = ocfs2_which_suballoc_group(blkno, bit); | ||
2649 | status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno, | 2817 | status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno, |
2650 | &group_bh); | 2818 | &group_bh); |
2651 | if (status < 0) { | 2819 | if (status < 0) { |
@@ -2680,6 +2848,7 @@ bail: | |||
2680 | int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) | 2848 | int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) |
2681 | { | 2849 | { |
2682 | int status; | 2850 | int status; |
2851 | u64 group_blkno = 0; | ||
2683 | u16 suballoc_bit = 0, suballoc_slot = 0; | 2852 | u16 suballoc_bit = 0, suballoc_slot = 0; |
2684 | struct inode *inode_alloc_inode; | 2853 | struct inode *inode_alloc_inode; |
2685 | struct buffer_head *alloc_bh = NULL; | 2854 | struct buffer_head *alloc_bh = NULL; |
@@ -2687,7 +2856,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) | |||
2687 | mlog_entry("blkno: %llu", (unsigned long long)blkno); | 2856 | mlog_entry("blkno: %llu", (unsigned long long)blkno); |
2688 | 2857 | ||
2689 | status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot, | 2858 | status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot, |
2690 | &suballoc_bit); | 2859 | &group_blkno, &suballoc_bit); |
2691 | if (status < 0) { | 2860 | if (status < 0) { |
2692 | mlog(ML_ERROR, "get alloc slot and bit failed %d\n", status); | 2861 | mlog(ML_ERROR, "get alloc slot and bit failed %d\n", status); |
2693 | goto bail; | 2862 | goto bail; |
@@ -2715,7 +2884,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) | |||
2715 | } | 2884 | } |
2716 | 2885 | ||
2717 | status = ocfs2_test_suballoc_bit(osb, inode_alloc_inode, alloc_bh, | 2886 | status = ocfs2_test_suballoc_bit(osb, inode_alloc_inode, alloc_bh, |
2718 | blkno, suballoc_bit, res); | 2887 | group_blkno, blkno, suballoc_bit, res); |
2719 | if (status < 0) | 2888 | if (status < 0) |
2720 | mlog(ML_ERROR, "test suballoc bit failed %d\n", status); | 2889 | mlog(ML_ERROR, "test suballoc bit failed %d\n", status); |
2721 | 2890 | ||
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index a017dd3ee7d9..b8afabfeede4 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h | |||
@@ -56,6 +56,9 @@ struct ocfs2_alloc_context { | |||
56 | u64 ac_max_block; /* Highest block number to allocate. 0 is | 56 | u64 ac_max_block; /* Highest block number to allocate. 0 is |
57 | is the same as ~0 - unlimited */ | 57 | is the same as ~0 - unlimited */ |
58 | 58 | ||
59 | int ac_find_loc_only; /* hack for reflink operation ordering */ | ||
60 | struct ocfs2_suballoc_result *ac_find_loc_priv; /* */ | ||
61 | |||
59 | struct ocfs2_alloc_reservation *ac_resv; | 62 | struct ocfs2_alloc_reservation *ac_resv; |
60 | }; | 63 | }; |
61 | 64 | ||
@@ -197,4 +200,22 @@ int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_extent_tree *et, | |||
197 | struct ocfs2_alloc_context **meta_ac); | 200 | struct ocfs2_alloc_context **meta_ac); |
198 | 201 | ||
199 | int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res); | 202 | int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res); |
203 | |||
204 | |||
205 | |||
206 | /* | ||
207 | * The following two interfaces are for ocfs2_create_inode_in_orphan(). | ||
208 | */ | ||
209 | int ocfs2_find_new_inode_loc(struct inode *dir, | ||
210 | struct buffer_head *parent_fe_bh, | ||
211 | struct ocfs2_alloc_context *ac, | ||
212 | u64 *fe_blkno); | ||
213 | |||
214 | int ocfs2_claim_new_inode_at_loc(handle_t *handle, | ||
215 | struct inode *dir, | ||
216 | struct ocfs2_alloc_context *ac, | ||
217 | u64 *suballoc_loc, | ||
218 | u16 *suballoc_bit, | ||
219 | u64 di_blkno); | ||
220 | |||
200 | #endif /* _CHAINALLOC_H_ */ | 221 | #endif /* _CHAINALLOC_H_ */ |
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index 32499d213fc4..9975457c981f 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c | |||
@@ -128,7 +128,7 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry, | |||
128 | } | 128 | } |
129 | 129 | ||
130 | /* Fast symlinks can't be large */ | 130 | /* Fast symlinks can't be large */ |
131 | len = strlen(target); | 131 | len = strnlen(target, ocfs2_fast_symlink_chars(inode->i_sb)); |
132 | link = kzalloc(len + 1, GFP_NOFS); | 132 | link = kzalloc(len + 1, GFP_NOFS); |
133 | if (!link) { | 133 | if (!link) { |
134 | status = -ENOMEM; | 134 | status = -ENOMEM; |
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index d03469f61801..06fa5e77c40e 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c | |||
@@ -1286,13 +1286,11 @@ int ocfs2_xattr_get_nolock(struct inode *inode, | |||
1286 | xis.inode_bh = xbs.inode_bh = di_bh; | 1286 | xis.inode_bh = xbs.inode_bh = di_bh; |
1287 | di = (struct ocfs2_dinode *)di_bh->b_data; | 1287 | di = (struct ocfs2_dinode *)di_bh->b_data; |
1288 | 1288 | ||
1289 | down_read(&oi->ip_xattr_sem); | ||
1290 | ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer, | 1289 | ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer, |
1291 | buffer_size, &xis); | 1290 | buffer_size, &xis); |
1292 | if (ret == -ENODATA && di->i_xattr_loc) | 1291 | if (ret == -ENODATA && di->i_xattr_loc) |
1293 | ret = ocfs2_xattr_block_get(inode, name_index, name, buffer, | 1292 | ret = ocfs2_xattr_block_get(inode, name_index, name, buffer, |
1294 | buffer_size, &xbs); | 1293 | buffer_size, &xbs); |
1295 | up_read(&oi->ip_xattr_sem); | ||
1296 | 1294 | ||
1297 | return ret; | 1295 | return ret; |
1298 | } | 1296 | } |
@@ -1316,8 +1314,10 @@ static int ocfs2_xattr_get(struct inode *inode, | |||
1316 | mlog_errno(ret); | 1314 | mlog_errno(ret); |
1317 | return ret; | 1315 | return ret; |
1318 | } | 1316 | } |
1317 | down_read(&OCFS2_I(inode)->ip_xattr_sem); | ||
1319 | ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, | 1318 | ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, |
1320 | name, buffer, buffer_size); | 1319 | name, buffer, buffer_size); |
1320 | up_read(&OCFS2_I(inode)->ip_xattr_sem); | ||
1321 | 1321 | ||
1322 | ocfs2_inode_unlock(inode, 0); | 1322 | ocfs2_inode_unlock(inode, 0); |
1323 | 1323 | ||
diff --git a/fs/proc/base.c b/fs/proc/base.c index a1c43e7c8a7b..8e4addaa5424 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -2675,7 +2675,7 @@ static const struct pid_entry tgid_base_stuff[] = { | |||
2675 | INF("auxv", S_IRUSR, proc_pid_auxv), | 2675 | INF("auxv", S_IRUSR, proc_pid_auxv), |
2676 | ONE("status", S_IRUGO, proc_pid_status), | 2676 | ONE("status", S_IRUGO, proc_pid_status), |
2677 | ONE("personality", S_IRUSR, proc_pid_personality), | 2677 | ONE("personality", S_IRUSR, proc_pid_personality), |
2678 | INF("limits", S_IRUSR, proc_pid_limits), | 2678 | INF("limits", S_IRUGO, proc_pid_limits), |
2679 | #ifdef CONFIG_SCHED_DEBUG | 2679 | #ifdef CONFIG_SCHED_DEBUG |
2680 | REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), | 2680 | REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), |
2681 | #endif | 2681 | #endif |
@@ -3011,7 +3011,7 @@ static const struct pid_entry tid_base_stuff[] = { | |||
3011 | INF("auxv", S_IRUSR, proc_pid_auxv), | 3011 | INF("auxv", S_IRUSR, proc_pid_auxv), |
3012 | ONE("status", S_IRUGO, proc_pid_status), | 3012 | ONE("status", S_IRUGO, proc_pid_status), |
3013 | ONE("personality", S_IRUSR, proc_pid_personality), | 3013 | ONE("personality", S_IRUSR, proc_pid_personality), |
3014 | INF("limits", S_IRUSR, proc_pid_limits), | 3014 | INF("limits", S_IRUGO, proc_pid_limits), |
3015 | #ifdef CONFIG_SCHED_DEBUG | 3015 | #ifdef CONFIG_SCHED_DEBUG |
3016 | REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), | 3016 | REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), |
3017 | #endif | 3017 | #endif |
diff --git a/fs/proc/page.c b/fs/proc/page.c index 180cf5a0bd67..3b8b45660331 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c | |||
@@ -146,7 +146,7 @@ u64 stable_page_flags(struct page *page) | |||
146 | u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); | 146 | u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); |
147 | #endif | 147 | #endif |
148 | 148 | ||
149 | #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR | 149 | #ifdef CONFIG_ARCH_USES_PG_UNCACHED |
150 | u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); | 150 | u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); |
151 | #endif | 151 | #endif |
152 | 152 | ||
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 439fc1f1c1c4..1dbca4e8cc16 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -224,7 +224,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) | |||
224 | /* We don't show the stack guard page in /proc/maps */ | 224 | /* We don't show the stack guard page in /proc/maps */ |
225 | start = vma->vm_start; | 225 | start = vma->vm_start; |
226 | if (vma->vm_flags & VM_GROWSDOWN) | 226 | if (vma->vm_flags & VM_GROWSDOWN) |
227 | start += PAGE_SIZE; | 227 | if (!vma_stack_continue(vma->vm_prev, vma->vm_start)) |
228 | start += PAGE_SIZE; | ||
228 | 229 | ||
229 | seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", | 230 | seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", |
230 | start, | 231 | start, |
@@ -362,13 +363,13 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
362 | mss->referenced += PAGE_SIZE; | 363 | mss->referenced += PAGE_SIZE; |
363 | mapcount = page_mapcount(page); | 364 | mapcount = page_mapcount(page); |
364 | if (mapcount >= 2) { | 365 | if (mapcount >= 2) { |
365 | if (pte_dirty(ptent)) | 366 | if (pte_dirty(ptent) || PageDirty(page)) |
366 | mss->shared_dirty += PAGE_SIZE; | 367 | mss->shared_dirty += PAGE_SIZE; |
367 | else | 368 | else |
368 | mss->shared_clean += PAGE_SIZE; | 369 | mss->shared_clean += PAGE_SIZE; |
369 | mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; | 370 | mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; |
370 | } else { | 371 | } else { |
371 | if (pte_dirty(ptent)) | 372 | if (pte_dirty(ptent) || PageDirty(page)) |
372 | mss->private_dirty += PAGE_SIZE; | 373 | mss->private_dirty += PAGE_SIZE; |
373 | else | 374 | else |
374 | mss->private_clean += PAGE_SIZE; | 375 | mss->private_clean += PAGE_SIZE; |
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 91c817ff02c3..2367fb3f70bc 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c | |||
@@ -163,7 +163,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer, | |||
163 | 163 | ||
164 | static const struct file_operations proc_vmcore_operations = { | 164 | static const struct file_operations proc_vmcore_operations = { |
165 | .read = read_vmcore, | 165 | .read = read_vmcore, |
166 | .llseek = generic_file_llseek, | 166 | .llseek = default_llseek, |
167 | }; | 167 | }; |
168 | 168 | ||
169 | static struct vmcore* __init get_new_element(void) | 169 | static struct vmcore* __init get_new_element(void) |
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c index f53505de0712..5cbb81e134ac 100644 --- a/fs/reiserfs/ioctl.c +++ b/fs/reiserfs/ioctl.c | |||
@@ -170,6 +170,7 @@ int reiserfs_prepare_write(struct file *f, struct page *page, | |||
170 | int reiserfs_unpack(struct inode *inode, struct file *filp) | 170 | int reiserfs_unpack(struct inode *inode, struct file *filp) |
171 | { | 171 | { |
172 | int retval = 0; | 172 | int retval = 0; |
173 | int depth; | ||
173 | int index; | 174 | int index; |
174 | struct page *page; | 175 | struct page *page; |
175 | struct address_space *mapping; | 176 | struct address_space *mapping; |
@@ -188,8 +189,8 @@ int reiserfs_unpack(struct inode *inode, struct file *filp) | |||
188 | /* we need to make sure nobody is changing the file size beneath | 189 | /* we need to make sure nobody is changing the file size beneath |
189 | ** us | 190 | ** us |
190 | */ | 191 | */ |
191 | mutex_lock(&inode->i_mutex); | 192 | reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb); |
192 | reiserfs_write_lock(inode->i_sb); | 193 | depth = reiserfs_write_lock_once(inode->i_sb); |
193 | 194 | ||
194 | write_from = inode->i_size & (blocksize - 1); | 195 | write_from = inode->i_size & (blocksize - 1); |
195 | /* if we are on a block boundary, we are already unpacked. */ | 196 | /* if we are on a block boundary, we are already unpacked. */ |
@@ -224,6 +225,6 @@ int reiserfs_unpack(struct inode *inode, struct file *filp) | |||
224 | 225 | ||
225 | out: | 226 | out: |
226 | mutex_unlock(&inode->i_mutex); | 227 | mutex_unlock(&inode->i_mutex); |
227 | reiserfs_write_unlock(inode->i_sb); | 228 | reiserfs_write_unlock_once(inode->i_sb, depth); |
228 | return retval; | 229 | return retval; |
229 | } | 230 | } |
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 1b27b5688f62..da3fefe91a8f 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c | |||
@@ -340,7 +340,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file) | |||
340 | char *p; | 340 | char *p; |
341 | 341 | ||
342 | p = d_path(&file->f_path, last_sysfs_file, sizeof(last_sysfs_file)); | 342 | p = d_path(&file->f_path, last_sysfs_file, sizeof(last_sysfs_file)); |
343 | if (p) | 343 | if (!IS_ERR(p)) |
344 | memmove(last_sysfs_file, p, strlen(p) + 1); | 344 | memmove(last_sysfs_file, p, strlen(p) + 1); |
345 | 345 | ||
346 | /* need attr_sd for attr and ops, its parent for kobj */ | 346 | /* need attr_sd for attr and ops, its parent for kobj */ |
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index ea79072f5210..286e36e21dae 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -440,12 +440,7 @@ _xfs_buf_find( | |||
440 | ASSERT(btp == bp->b_target); | 440 | ASSERT(btp == bp->b_target); |
441 | if (bp->b_file_offset == range_base && | 441 | if (bp->b_file_offset == range_base && |
442 | bp->b_buffer_length == range_length) { | 442 | bp->b_buffer_length == range_length) { |
443 | /* | ||
444 | * If we look at something, bring it to the | ||
445 | * front of the list for next time. | ||
446 | */ | ||
447 | atomic_inc(&bp->b_hold); | 443 | atomic_inc(&bp->b_hold); |
448 | list_move(&bp->b_hash_list, &hash->bh_list); | ||
449 | goto found; | 444 | goto found; |
450 | } | 445 | } |
451 | } | 446 | } |
@@ -1443,8 +1438,7 @@ xfs_alloc_bufhash( | |||
1443 | { | 1438 | { |
1444 | unsigned int i; | 1439 | unsigned int i; |
1445 | 1440 | ||
1446 | btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */ | 1441 | btp->bt_hashshift = external ? 3 : 12; /* 8 or 4096 buckets */ |
1447 | btp->bt_hashmask = (1 << btp->bt_hashshift) - 1; | ||
1448 | btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) * | 1442 | btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) * |
1449 | sizeof(xfs_bufhash_t)); | 1443 | sizeof(xfs_bufhash_t)); |
1450 | for (i = 0; i < (1 << btp->bt_hashshift); i++) { | 1444 | for (i = 0; i < (1 << btp->bt_hashshift); i++) { |
@@ -1938,7 +1932,8 @@ xfs_buf_init(void) | |||
1938 | if (!xfs_buf_zone) | 1932 | if (!xfs_buf_zone) |
1939 | goto out; | 1933 | goto out; |
1940 | 1934 | ||
1941 | xfslogd_workqueue = create_workqueue("xfslogd"); | 1935 | xfslogd_workqueue = alloc_workqueue("xfslogd", |
1936 | WQ_RESCUER | WQ_HIGHPRI, 1); | ||
1942 | if (!xfslogd_workqueue) | 1937 | if (!xfslogd_workqueue) |
1943 | goto out_free_buf_zone; | 1938 | goto out_free_buf_zone; |
1944 | 1939 | ||
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index d072e5ff923b..2a05614f0b92 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h | |||
@@ -137,7 +137,6 @@ typedef struct xfs_buftarg { | |||
137 | size_t bt_smask; | 137 | size_t bt_smask; |
138 | 138 | ||
139 | /* per device buffer hash table */ | 139 | /* per device buffer hash table */ |
140 | uint bt_hashmask; | ||
141 | uint bt_hashshift; | 140 | uint bt_hashshift; |
142 | xfs_bufhash_t *bt_hash; | 141 | xfs_bufhash_t *bt_hash; |
143 | 142 | ||
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 237f5ffb2ee8..3b9e626f7cd1 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c | |||
@@ -785,6 +785,8 @@ xfs_ioc_fsgetxattr( | |||
785 | { | 785 | { |
786 | struct fsxattr fa; | 786 | struct fsxattr fa; |
787 | 787 | ||
788 | memset(&fa, 0, sizeof(struct fsxattr)); | ||
789 | |||
788 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 790 | xfs_ilock(ip, XFS_ILOCK_SHARED); |
789 | fa.fsx_xflags = xfs_ip2xflags(ip); | 791 | fa.fsx_xflags = xfs_ip2xflags(ip); |
790 | fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; | 792 | fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; |
@@ -907,6 +909,13 @@ xfs_ioctl_setattr( | |||
907 | return XFS_ERROR(EIO); | 909 | return XFS_ERROR(EIO); |
908 | 910 | ||
909 | /* | 911 | /* |
912 | * Disallow 32bit project ids because on-disk structure | ||
913 | * is 16bit only. | ||
914 | */ | ||
915 | if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1)) | ||
916 | return XFS_ERROR(EINVAL); | ||
917 | |||
918 | /* | ||
910 | * If disk quotas is on, we make sure that the dquots do exist on disk, | 919 | * If disk quotas is on, we make sure that the dquots do exist on disk, |
911 | * before we start any other transactions. Trying to do this later | 920 | * before we start any other transactions. Trying to do this later |
912 | * is messy. We don't care to take a readlock to look at the ids | 921 | * is messy. We don't care to take a readlock to look at the ids |
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 68be25dcd301..b1fc2a6bfe83 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c | |||
@@ -664,7 +664,7 @@ xfs_vn_fiemap( | |||
664 | fieinfo->fi_extents_max + 1; | 664 | fieinfo->fi_extents_max + 1; |
665 | bm.bmv_count = min_t(__s32, bm.bmv_count, | 665 | bm.bmv_count = min_t(__s32, bm.bmv_count, |
666 | (PAGE_SIZE * 16 / sizeof(struct getbmapx))); | 666 | (PAGE_SIZE * 16 / sizeof(struct getbmapx))); |
667 | bm.bmv_iflags = BMV_IF_PREALLOC; | 667 | bm.bmv_iflags = BMV_IF_PREALLOC | BMV_IF_NO_HOLES; |
668 | if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) | 668 | if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) |
669 | bm.bmv_iflags |= BMV_IF_ATTRFORK; | 669 | bm.bmv_iflags |= BMV_IF_ATTRFORK; |
670 | if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC)) | 670 | if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC)) |
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 23f14e595c18..f90dadd5a968 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
@@ -5533,12 +5533,24 @@ xfs_getbmap( | |||
5533 | map[i].br_startblock)) | 5533 | map[i].br_startblock)) |
5534 | goto out_free_map; | 5534 | goto out_free_map; |
5535 | 5535 | ||
5536 | nexleft--; | ||
5537 | bmv->bmv_offset = | 5536 | bmv->bmv_offset = |
5538 | out[cur_ext].bmv_offset + | 5537 | out[cur_ext].bmv_offset + |
5539 | out[cur_ext].bmv_length; | 5538 | out[cur_ext].bmv_length; |
5540 | bmv->bmv_length = | 5539 | bmv->bmv_length = |
5541 | max_t(__int64_t, 0, bmvend - bmv->bmv_offset); | 5540 | max_t(__int64_t, 0, bmvend - bmv->bmv_offset); |
5541 | |||
5542 | /* | ||
5543 | * In case we don't want to return the hole, | ||
5544 | * don't increase cur_ext so that we can reuse | ||
5545 | * it in the next loop. | ||
5546 | */ | ||
5547 | if ((iflags & BMV_IF_NO_HOLES) && | ||
5548 | map[i].br_startblock == HOLESTARTBLOCK) { | ||
5549 | memset(&out[cur_ext], 0, sizeof(out[cur_ext])); | ||
5550 | continue; | ||
5551 | } | ||
5552 | |||
5553 | nexleft--; | ||
5542 | bmv->bmv_entries++; | 5554 | bmv->bmv_entries++; |
5543 | cur_ext++; | 5555 | cur_ext++; |
5544 | } | 5556 | } |
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h index 7cf7220e7d5f..87c2e9d02288 100644 --- a/fs/xfs/xfs_fs.h +++ b/fs/xfs/xfs_fs.h | |||
@@ -114,8 +114,10 @@ struct getbmapx { | |||
114 | #define BMV_IF_NO_DMAPI_READ 0x2 /* Do not generate DMAPI read event */ | 114 | #define BMV_IF_NO_DMAPI_READ 0x2 /* Do not generate DMAPI read event */ |
115 | #define BMV_IF_PREALLOC 0x4 /* rtn status BMV_OF_PREALLOC if req */ | 115 | #define BMV_IF_PREALLOC 0x4 /* rtn status BMV_OF_PREALLOC if req */ |
116 | #define BMV_IF_DELALLOC 0x8 /* rtn status BMV_OF_DELALLOC if req */ | 116 | #define BMV_IF_DELALLOC 0x8 /* rtn status BMV_OF_DELALLOC if req */ |
117 | #define BMV_IF_NO_HOLES 0x10 /* Do not return holes */ | ||
117 | #define BMV_IF_VALID \ | 118 | #define BMV_IF_VALID \ |
118 | (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC|BMV_IF_DELALLOC) | 119 | (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC| \ |
120 | BMV_IF_DELALLOC|BMV_IF_NO_HOLES) | ||
119 | 121 | ||
120 | /* bmv_oflags values - returned for each non-header segment */ | 122 | /* bmv_oflags values - returned for each non-header segment */ |
121 | #define BMV_OF_PREALLOC 0x1 /* segment = unwritten pre-allocation */ | 123 | #define BMV_OF_PREALLOC 0x1 /* segment = unwritten pre-allocation */ |
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index ed575fb4b495..7e206fc1fa36 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c | |||
@@ -405,9 +405,15 @@ xlog_cil_push( | |||
405 | new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); | 405 | new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); |
406 | new_ctx->ticket = xlog_cil_ticket_alloc(log); | 406 | new_ctx->ticket = xlog_cil_ticket_alloc(log); |
407 | 407 | ||
408 | /* lock out transaction commit, but don't block on background push */ | 408 | /* |
409 | * Lock out transaction commit, but don't block for background pushes | ||
410 | * unless we are well over the CIL space limit. See the definition of | ||
411 | * XLOG_CIL_HARD_SPACE_LIMIT() for the full explanation of the logic | ||
412 | * used here. | ||
413 | */ | ||
409 | if (!down_write_trylock(&cil->xc_ctx_lock)) { | 414 | if (!down_write_trylock(&cil->xc_ctx_lock)) { |
410 | if (!push_seq) | 415 | if (!push_seq && |
416 | cil->xc_ctx->space_used < XLOG_CIL_HARD_SPACE_LIMIT(log)) | ||
411 | goto out_free_ticket; | 417 | goto out_free_ticket; |
412 | down_write(&cil->xc_ctx_lock); | 418 | down_write(&cil->xc_ctx_lock); |
413 | } | 419 | } |
@@ -422,7 +428,7 @@ xlog_cil_push( | |||
422 | goto out_skip; | 428 | goto out_skip; |
423 | 429 | ||
424 | /* check for a previously pushed seqeunce */ | 430 | /* check for a previously pushed seqeunce */ |
425 | if (push_seq < cil->xc_ctx->sequence) | 431 | if (push_seq && push_seq < cil->xc_ctx->sequence) |
426 | goto out_skip; | 432 | goto out_skip; |
427 | 433 | ||
428 | /* | 434 | /* |
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index ced52b98b322..edcdfe01617f 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h | |||
@@ -426,13 +426,13 @@ struct xfs_cil { | |||
426 | }; | 426 | }; |
427 | 427 | ||
428 | /* | 428 | /* |
429 | * The amount of log space we should the CIL to aggregate is difficult to size. | 429 | * The amount of log space we allow the CIL to aggregate is difficult to size. |
430 | * Whatever we chose we have to make we can get a reservation for the log space | 430 | * Whatever we choose, we have to make sure we can get a reservation for the |
431 | * effectively, that it is large enough to capture sufficient relogging to | 431 | * log space effectively, that it is large enough to capture sufficient |
432 | * reduce log buffer IO significantly, but it is not too large for the log or | 432 | * relogging to reduce log buffer IO significantly, but it is not too large for |
433 | * induces too much latency when writing out through the iclogs. We track both | 433 | * the log or induces too much latency when writing out through the iclogs. We |
434 | * space consumed and the number of vectors in the checkpoint context, so we | 434 | * track both space consumed and the number of vectors in the checkpoint |
435 | * need to decide which to use for limiting. | 435 | * context, so we need to decide which to use for limiting. |
436 | * | 436 | * |
437 | * Every log buffer we write out during a push needs a header reserved, which | 437 | * Every log buffer we write out during a push needs a header reserved, which |
438 | * is at least one sector and more for v2 logs. Hence we need a reservation of | 438 | * is at least one sector and more for v2 logs. Hence we need a reservation of |
@@ -459,16 +459,21 @@ struct xfs_cil { | |||
459 | * checkpoint transaction ticket is specific to the checkpoint context, rather | 459 | * checkpoint transaction ticket is specific to the checkpoint context, rather |
460 | * than the CIL itself. | 460 | * than the CIL itself. |
461 | * | 461 | * |
462 | * With dynamic reservations, we can basically make up arbitrary limits for the | 462 | * With dynamic reservations, we can effectively make up arbitrary limits for |
463 | * checkpoint size so long as they don't violate any other size rules. Hence | 463 | * the checkpoint size so long as they don't violate any other size rules. |
464 | * the initial maximum size for the checkpoint transaction will be set to a | 464 | * Recovery imposes a rule that no transaction exceed half the log, so we are |
465 | * quarter of the log or 8MB, which ever is smaller. 8MB is an arbitrary limit | 465 | * limited by that. Furthermore, the log transaction reservation subsystem |
466 | * right now based on the latency of writing out a large amount of data through | 466 | * tries to keep 25% of the log free, so we need to keep below that limit or we |
467 | * the circular iclog buffers. | 467 | * risk running out of free log space to start any new transactions. |
468 | * | ||
469 | * In order to keep background CIL push efficient, we will set a lower | ||
470 | * threshold at which background pushing is attempted without blocking current | ||
471 | * transaction commits. A separate, higher bound defines when CIL pushes are | ||
472 | * enforced to ensure we stay within our maximum checkpoint size bounds. | ||
473 | * threshold, yet give us plenty of space for aggregation on large logs. | ||
468 | */ | 474 | */ |
469 | 475 | #define XLOG_CIL_SPACE_LIMIT(log) (log->l_logsize >> 3) | |
470 | #define XLOG_CIL_SPACE_LIMIT(log) \ | 476 | #define XLOG_CIL_HARD_SPACE_LIMIT(log) (3 * (log->l_logsize >> 4)) |
471 | (min((log->l_logsize >> 2), (8 * 1024 * 1024))) | ||
472 | 477 | ||
473 | /* | 478 | /* |
474 | * The reservation head lsn is not made up of a cycle number and block number. | 479 | * The reservation head lsn is not made up of a cycle number and block number. |
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 66d585c6917c..4c7c7bfb2b2f 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
@@ -2299,15 +2299,22 @@ xfs_alloc_file_space( | |||
2299 | e = allocatesize_fsb; | 2299 | e = allocatesize_fsb; |
2300 | } | 2300 | } |
2301 | 2301 | ||
2302 | /* | ||
2303 | * The transaction reservation is limited to a 32-bit block | ||
2304 | * count, hence we need to limit the number of blocks we are | ||
2305 | * trying to reserve to avoid an overflow. We can't allocate | ||
2306 | * more than @nimaps extents, and an extent is limited on disk | ||
2307 | * to MAXEXTLEN (21 bits), so use that to enforce the limit. | ||
2308 | */ | ||
2309 | resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps)); | ||
2302 | if (unlikely(rt)) { | 2310 | if (unlikely(rt)) { |
2303 | resrtextents = qblocks = (uint)(e - s); | 2311 | resrtextents = qblocks = resblks; |
2304 | resrtextents /= mp->m_sb.sb_rextsize; | 2312 | resrtextents /= mp->m_sb.sb_rextsize; |
2305 | resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); | 2313 | resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); |
2306 | quota_flag = XFS_QMOPT_RES_RTBLKS; | 2314 | quota_flag = XFS_QMOPT_RES_RTBLKS; |
2307 | } else { | 2315 | } else { |
2308 | resrtextents = 0; | 2316 | resrtextents = 0; |
2309 | resblks = qblocks = \ | 2317 | resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks); |
2310 | XFS_DIOSTRAT_SPACE_RES(mp, (uint)(e - s)); | ||
2311 | quota_flag = XFS_QMOPT_RES_REGBLKS; | 2318 | quota_flag = XFS_QMOPT_RES_REGBLKS; |
2312 | } | 2319 | } |
2313 | 2320 | ||
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index baacd98e7cc6..4de84ce3a927 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h | |||
@@ -377,9 +377,6 @@ struct acpi_pci_root { | |||
377 | 377 | ||
378 | u32 osc_support_set; /* _OSC state of support bits */ | 378 | u32 osc_support_set; /* _OSC state of support bits */ |
379 | u32 osc_control_set; /* _OSC state of control bits */ | 379 | u32 osc_control_set; /* _OSC state of control bits */ |
380 | u32 osc_control_qry; /* the latest _OSC query result */ | ||
381 | |||
382 | u32 osc_queried:1; /* has _OSC control been queried? */ | ||
383 | }; | 380 | }; |
384 | 381 | ||
385 | /* helper */ | 382 | /* helper */ |
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index c0786d446a00..984cdc62e30b 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h | |||
@@ -55,7 +55,7 @@ | |||
55 | extern u8 acpi_gbl_permanent_mmap; | 55 | extern u8 acpi_gbl_permanent_mmap; |
56 | 56 | ||
57 | /* | 57 | /* |
58 | * Globals that are publically available, allowing for | 58 | * Globals that are publicly available, allowing for |
59 | * run time configuration | 59 | * run time configuration |
60 | */ | 60 | */ |
61 | extern u32 acpi_dbg_level; | 61 | extern u32 acpi_dbg_level; |
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index c7376bf80b06..8ca18e26d7e3 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h | |||
@@ -16,15 +16,27 @@ | |||
16 | * While the GPIO programming interface defines valid GPIO numbers | 16 | * While the GPIO programming interface defines valid GPIO numbers |
17 | * to be in the range 0..MAX_INT, this library restricts them to the | 17 | * to be in the range 0..MAX_INT, this library restricts them to the |
18 | * smaller range 0..ARCH_NR_GPIOS-1. | 18 | * smaller range 0..ARCH_NR_GPIOS-1. |
19 | * | ||
20 | * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of | ||
21 | * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is | ||
22 | * actually an estimate of a board-specific value. | ||
19 | */ | 23 | */ |
20 | 24 | ||
21 | #ifndef ARCH_NR_GPIOS | 25 | #ifndef ARCH_NR_GPIOS |
22 | #define ARCH_NR_GPIOS 256 | 26 | #define ARCH_NR_GPIOS 256 |
23 | #endif | 27 | #endif |
24 | 28 | ||
29 | /* | ||
30 | * "valid" GPIO numbers are nonnegative and may be passed to | ||
31 | * setup routines like gpio_request(). only some valid numbers | ||
32 | * can successfully be requested and used. | ||
33 | * | ||
34 | * Invalid GPIO numbers are useful for indicating no-such-GPIO in | ||
35 | * platform data and other tables. | ||
36 | */ | ||
37 | |||
25 | static inline int gpio_is_valid(int number) | 38 | static inline int gpio_is_valid(int number) |
26 | { | 39 | { |
27 | /* only some non-negative numbers are valid */ | ||
28 | return ((unsigned)number) < ARCH_NR_GPIOS; | 40 | return ((unsigned)number) < ARCH_NR_GPIOS; |
29 | } | 41 | } |
30 | 42 | ||
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index b5043a9890d8..08923b684768 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h | |||
@@ -70,11 +70,16 @@ extern void setup_per_cpu_areas(void); | |||
70 | 70 | ||
71 | #else /* ! SMP */ | 71 | #else /* ! SMP */ |
72 | 72 | ||
73 | #define per_cpu(var, cpu) (*((void)(cpu), &(var))) | 73 | #define VERIFY_PERCPU_PTR(__p) ({ \ |
74 | #define __get_cpu_var(var) (var) | 74 | __verify_pcpu_ptr((__p)); \ |
75 | #define __raw_get_cpu_var(var) (var) | 75 | (typeof(*(__p)) __kernel __force *)(__p); \ |
76 | #define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) | 76 | }) |
77 | #define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) | 77 | |
78 | #define per_cpu(var, cpu) (*((void)(cpu), VERIFY_PERCPU_PTR(&(var)))) | ||
79 | #define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) | ||
80 | #define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) | ||
81 | #define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) | ||
82 | #define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) | ||
78 | 83 | ||
79 | #endif /* SMP */ | 84 | #endif /* SMP */ |
80 | 85 | ||
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 7809d230adee..4c9461a4f9e6 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -612,7 +612,7 @@ struct drm_gem_object { | |||
612 | struct kref refcount; | 612 | struct kref refcount; |
613 | 613 | ||
614 | /** Handle count of this object. Each handle also holds a reference */ | 614 | /** Handle count of this object. Each handle also holds a reference */ |
615 | struct kref handlecount; | 615 | atomic_t handle_count; /* number of handles on this object */ |
616 | 616 | ||
617 | /** Related drm device */ | 617 | /** Related drm device */ |
618 | struct drm_device *dev; | 618 | struct drm_device *dev; |
@@ -808,7 +808,6 @@ struct drm_driver { | |||
808 | */ | 808 | */ |
809 | int (*gem_init_object) (struct drm_gem_object *obj); | 809 | int (*gem_init_object) (struct drm_gem_object *obj); |
810 | void (*gem_free_object) (struct drm_gem_object *obj); | 810 | void (*gem_free_object) (struct drm_gem_object *obj); |
811 | void (*gem_free_object_unlocked) (struct drm_gem_object *obj); | ||
812 | 811 | ||
813 | /* vga arb irq handler */ | 812 | /* vga arb irq handler */ |
814 | void (*vgaarb_irq)(struct drm_device *dev, bool state); | 813 | void (*vgaarb_irq)(struct drm_device *dev, bool state); |
@@ -1175,6 +1174,7 @@ extern int drm_release(struct inode *inode, struct file *filp); | |||
1175 | extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); | 1174 | extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); |
1176 | extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); | 1175 | extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); |
1177 | extern void drm_vm_open_locked(struct vm_area_struct *vma); | 1176 | extern void drm_vm_open_locked(struct vm_area_struct *vma); |
1177 | extern void drm_vm_close_locked(struct vm_area_struct *vma); | ||
1178 | extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map); | 1178 | extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map); |
1179 | extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev); | 1179 | extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev); |
1180 | extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); | 1180 | extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); |
@@ -1455,12 +1455,11 @@ int drm_gem_init(struct drm_device *dev); | |||
1455 | void drm_gem_destroy(struct drm_device *dev); | 1455 | void drm_gem_destroy(struct drm_device *dev); |
1456 | void drm_gem_object_release(struct drm_gem_object *obj); | 1456 | void drm_gem_object_release(struct drm_gem_object *obj); |
1457 | void drm_gem_object_free(struct kref *kref); | 1457 | void drm_gem_object_free(struct kref *kref); |
1458 | void drm_gem_object_free_unlocked(struct kref *kref); | ||
1459 | struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, | 1458 | struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, |
1460 | size_t size); | 1459 | size_t size); |
1461 | int drm_gem_object_init(struct drm_device *dev, | 1460 | int drm_gem_object_init(struct drm_device *dev, |
1462 | struct drm_gem_object *obj, size_t size); | 1461 | struct drm_gem_object *obj, size_t size); |
1463 | void drm_gem_object_handle_free(struct kref *kref); | 1462 | void drm_gem_object_handle_free(struct drm_gem_object *obj); |
1464 | void drm_gem_vm_open(struct vm_area_struct *vma); | 1463 | void drm_gem_vm_open(struct vm_area_struct *vma); |
1465 | void drm_gem_vm_close(struct vm_area_struct *vma); | 1464 | void drm_gem_vm_close(struct vm_area_struct *vma); |
1466 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); | 1465 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
@@ -1483,8 +1482,12 @@ drm_gem_object_unreference(struct drm_gem_object *obj) | |||
1483 | static inline void | 1482 | static inline void |
1484 | drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) | 1483 | drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) |
1485 | { | 1484 | { |
1486 | if (obj != NULL) | 1485 | if (obj != NULL) { |
1487 | kref_put(&obj->refcount, drm_gem_object_free_unlocked); | 1486 | struct drm_device *dev = obj->dev; |
1487 | mutex_lock(&dev->struct_mutex); | ||
1488 | kref_put(&obj->refcount, drm_gem_object_free); | ||
1489 | mutex_unlock(&dev->struct_mutex); | ||
1490 | } | ||
1488 | } | 1491 | } |
1489 | 1492 | ||
1490 | int drm_gem_handle_create(struct drm_file *file_priv, | 1493 | int drm_gem_handle_create(struct drm_file *file_priv, |
@@ -1495,7 +1498,7 @@ static inline void | |||
1495 | drm_gem_object_handle_reference(struct drm_gem_object *obj) | 1498 | drm_gem_object_handle_reference(struct drm_gem_object *obj) |
1496 | { | 1499 | { |
1497 | drm_gem_object_reference(obj); | 1500 | drm_gem_object_reference(obj); |
1498 | kref_get(&obj->handlecount); | 1501 | atomic_inc(&obj->handle_count); |
1499 | } | 1502 | } |
1500 | 1503 | ||
1501 | static inline void | 1504 | static inline void |
@@ -1504,12 +1507,15 @@ drm_gem_object_handle_unreference(struct drm_gem_object *obj) | |||
1504 | if (obj == NULL) | 1507 | if (obj == NULL) |
1505 | return; | 1508 | return; |
1506 | 1509 | ||
1510 | if (atomic_read(&obj->handle_count) == 0) | ||
1511 | return; | ||
1507 | /* | 1512 | /* |
1508 | * Must bump handle count first as this may be the last | 1513 | * Must bump handle count first as this may be the last |
1509 | * ref, in which case the object would disappear before we | 1514 | * ref, in which case the object would disappear before we |
1510 | * checked for a name | 1515 | * checked for a name |
1511 | */ | 1516 | */ |
1512 | kref_put(&obj->handlecount, drm_gem_object_handle_free); | 1517 | if (atomic_dec_and_test(&obj->handle_count)) |
1518 | drm_gem_object_handle_free(obj); | ||
1513 | drm_gem_object_unreference(obj); | 1519 | drm_gem_object_unreference(obj); |
1514 | } | 1520 | } |
1515 | 1521 | ||
@@ -1519,12 +1525,17 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) | |||
1519 | if (obj == NULL) | 1525 | if (obj == NULL) |
1520 | return; | 1526 | return; |
1521 | 1527 | ||
1528 | if (atomic_read(&obj->handle_count) == 0) | ||
1529 | return; | ||
1530 | |||
1522 | /* | 1531 | /* |
1523 | * Must bump handle count first as this may be the last | 1532 | * Must bump handle count first as this may be the last |
1524 | * ref, in which case the object would disappear before we | 1533 | * ref, in which case the object would disappear before we |
1525 | * checked for a name | 1534 | * checked for a name |
1526 | */ | 1535 | */ |
1527 | kref_put(&obj->handlecount, drm_gem_object_handle_free); | 1536 | |
1537 | if (atomic_dec_and_test(&obj->handle_count)) | ||
1538 | drm_gem_object_handle_free(obj); | ||
1528 | drm_gem_object_unreference_unlocked(obj); | 1539 | drm_gem_object_unreference_unlocked(obj); |
1529 | } | 1540 | } |
1530 | 1541 | ||
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index c9f3cc5949a8..3e5a51af757c 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
@@ -386,7 +386,15 @@ struct drm_connector_funcs { | |||
386 | void (*dpms)(struct drm_connector *connector, int mode); | 386 | void (*dpms)(struct drm_connector *connector, int mode); |
387 | void (*save)(struct drm_connector *connector); | 387 | void (*save)(struct drm_connector *connector); |
388 | void (*restore)(struct drm_connector *connector); | 388 | void (*restore)(struct drm_connector *connector); |
389 | enum drm_connector_status (*detect)(struct drm_connector *connector); | 389 | |
390 | /* Check to see if anything is attached to the connector. | ||
391 | * @force is set to false whilst polling, true when checking the | ||
392 | * connector due to user request. @force can be used by the driver | ||
393 | * to avoid expensive, destructive operations during automated | ||
394 | * probing. | ||
395 | */ | ||
396 | enum drm_connector_status (*detect)(struct drm_connector *connector, | ||
397 | bool force); | ||
390 | int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); | 398 | int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); |
391 | int (*set_property)(struct drm_connector *connector, struct drm_property *property, | 399 | int (*set_property)(struct drm_connector *connector, struct drm_property *property, |
392 | uint64_t val); | 400 | uint64_t val); |
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 3a9940ef728b..883c1d439899 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
@@ -85,7 +85,6 @@ | |||
85 | {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 85 | {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
86 | {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 86 | {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
87 | {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 87 | {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
88 | {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | ||
89 | {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ | 88 | {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
90 | {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ | 89 | {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
91 | {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ | 90 | {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
@@ -103,6 +102,7 @@ | |||
103 | {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 102 | {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
104 | {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 103 | {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
105 | {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 104 | {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
105 | {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ | ||
106 | {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ | 106 | {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ |
107 | {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ | 107 | {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ |
108 | {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ | 108 | {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index ccf94dc5acdf..c227757feb06 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -304,8 +304,8 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); | |||
304 | OSC_PCI_EXPRESS_PME_CONTROL | \ | 304 | OSC_PCI_EXPRESS_PME_CONTROL | \ |
305 | OSC_PCI_EXPRESS_AER_CONTROL | \ | 305 | OSC_PCI_EXPRESS_AER_CONTROL | \ |
306 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL) | 306 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL) |
307 | 307 | extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, | |
308 | extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags); | 308 | u32 *mask, u32 req); |
309 | extern void acpi_early_init(void); | 309 | extern void acpi_early_init(void); |
310 | 310 | ||
311 | #else /* !CONFIG_ACPI */ | 311 | #else /* !CONFIG_ACPI */ |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index ed3e92e41c6e..0c991023ee47 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -578,7 +578,12 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp, | |||
578 | void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); | 578 | void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); |
579 | int cgroup_scan_tasks(struct cgroup_scanner *scan); | 579 | int cgroup_scan_tasks(struct cgroup_scanner *scan); |
580 | int cgroup_attach_task(struct cgroup *, struct task_struct *); | 580 | int cgroup_attach_task(struct cgroup *, struct task_struct *); |
581 | int cgroup_attach_task_current_cg(struct task_struct *); | 581 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); |
582 | |||
583 | static inline int cgroup_attach_task_current_cg(struct task_struct *tsk) | ||
584 | { | ||
585 | return cgroup_attach_task_all(current, tsk); | ||
586 | } | ||
582 | 587 | ||
583 | /* | 588 | /* |
584 | * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works | 589 | * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works |
@@ -636,6 +641,11 @@ static inline int cgroupstats_build(struct cgroupstats *stats, | |||
636 | } | 641 | } |
637 | 642 | ||
638 | /* No cgroups - nothing to do */ | 643 | /* No cgroups - nothing to do */ |
644 | static inline int cgroup_attach_task_all(struct task_struct *from, | ||
645 | struct task_struct *t) | ||
646 | { | ||
647 | return 0; | ||
648 | } | ||
639 | static inline int cgroup_attach_task_current_cg(struct task_struct *t) | 649 | static inline int cgroup_attach_task_current_cg(struct task_struct *t) |
640 | { | 650 | { |
641 | return 0; | 651 | return 0; |
diff --git a/include/linux/compat.h b/include/linux/compat.h index 9ddc8780e8db..5778b559d59c 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
@@ -360,5 +360,8 @@ extern ssize_t compat_rw_copy_check_uvector(int type, | |||
360 | const struct compat_iovec __user *uvector, unsigned long nr_segs, | 360 | const struct compat_iovec __user *uvector, unsigned long nr_segs, |
361 | unsigned long fast_segs, struct iovec *fast_pointer, | 361 | unsigned long fast_segs, struct iovec *fast_pointer, |
362 | struct iovec **ret_pointer); | 362 | struct iovec **ret_pointer); |
363 | |||
364 | extern void __user *compat_alloc_user_space(unsigned long len); | ||
365 | |||
363 | #endif /* CONFIG_COMPAT */ | 366 | #endif /* CONFIG_COMPAT */ |
364 | #endif /* _LINUX_COMPAT_H */ | 367 | #endif /* _LINUX_COMPAT_H */ |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 36ca9721a0c2..1be416bbbb82 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
@@ -53,6 +53,7 @@ struct cpuidle_state { | |||
53 | #define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */ | 53 | #define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */ |
54 | #define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */ | 54 | #define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */ |
55 | #define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */ | 55 | #define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */ |
56 | #define CPUIDLE_FLAG_TLB_FLUSHED (0x200) /* tlb will be flushed */ | ||
56 | 57 | ||
57 | #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) | 58 | #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) |
58 | 59 | ||
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index ce29b8151198..ba8319ae5fcc 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
@@ -102,6 +102,9 @@ static inline u64 dma_get_mask(struct device *dev) | |||
102 | return DMA_BIT_MASK(32); | 102 | return DMA_BIT_MASK(32); |
103 | } | 103 | } |
104 | 104 | ||
105 | #ifdef ARCH_HAS_DMA_SET_COHERENT_MASK | ||
106 | int dma_set_coherent_mask(struct device *dev, u64 mask); | ||
107 | #else | ||
105 | static inline int dma_set_coherent_mask(struct device *dev, u64 mask) | 108 | static inline int dma_set_coherent_mask(struct device *dev, u64 mask) |
106 | { | 109 | { |
107 | if (!dma_supported(dev, mask)) | 110 | if (!dma_supported(dev, mask)) |
@@ -109,6 +112,7 @@ static inline int dma_set_coherent_mask(struct device *dev, u64 mask) | |||
109 | dev->coherent_dma_mask = mask; | 112 | dev->coherent_dma_mask = mask; |
110 | return 0; | 113 | return 0; |
111 | } | 114 | } |
115 | #endif | ||
112 | 116 | ||
113 | extern u64 dma_get_required_mask(struct device *dev); | 117 | extern u64 dma_get_required_mask(struct device *dev); |
114 | 118 | ||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c61d4ca27bcc..e2106495cc11 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -548,7 +548,7 @@ static inline bool dma_dev_has_pq_continue(struct dma_device *dma) | |||
548 | return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; | 548 | return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; |
549 | } | 549 | } |
550 | 550 | ||
551 | static unsigned short dma_dev_to_maxpq(struct dma_device *dma) | 551 | static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma) |
552 | { | 552 | { |
553 | return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; | 553 | return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; |
554 | } | 554 | } |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 2c958f4fce1e..926b50322a46 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -136,6 +136,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); | |||
136 | 136 | ||
137 | extern int elevator_init(struct request_queue *, char *); | 137 | extern int elevator_init(struct request_queue *, char *); |
138 | extern void elevator_exit(struct elevator_queue *); | 138 | extern void elevator_exit(struct elevator_queue *); |
139 | extern int elevator_change(struct request_queue *, const char *); | ||
139 | extern int elv_rq_merge_ok(struct request *, struct bio *); | 140 | extern int elv_rq_merge_ok(struct request *, struct bio *); |
140 | 141 | ||
141 | /* | 142 | /* |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 76041b614758..63d069bd80b7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -1093,6 +1093,10 @@ struct file_lock { | |||
1093 | 1093 | ||
1094 | #include <linux/fcntl.h> | 1094 | #include <linux/fcntl.h> |
1095 | 1095 | ||
1096 | /* temporary stubs for BKL removal */ | ||
1097 | #define lock_flocks() lock_kernel() | ||
1098 | #define unlock_flocks() unlock_kernel() | ||
1099 | |||
1096 | extern void send_sigio(struct fown_struct *fown, int fd, int band); | 1100 | extern void send_sigio(struct fown_struct *fown, int fd, int band); |
1097 | 1101 | ||
1098 | #ifdef CONFIG_FILE_LOCKING | 1102 | #ifdef CONFIG_FILE_LOCKING |
diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 03f616b78cfa..e41f7dd1ae67 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | 14 | ||
15 | struct device; | 15 | struct device; |
16 | struct gpio_chip; | ||
16 | 17 | ||
17 | /* | 18 | /* |
18 | * Some platforms don't support the GPIO programming interface. | 19 | * Some platforms don't support the GPIO programming interface. |
diff --git a/include/linux/i2c/sx150x.h b/include/linux/i2c/sx150x.h index ee3049cb9ba5..52baa79d69a7 100644 --- a/include/linux/i2c/sx150x.h +++ b/include/linux/i2c/sx150x.h | |||
@@ -63,6 +63,9 @@ | |||
63 | * IRQ lines will appear. Similarly to gpio_base, the expander | 63 | * IRQ lines will appear. Similarly to gpio_base, the expander |
64 | * will create a block of irqs beginning at this number. | 64 | * will create a block of irqs beginning at this number. |
65 | * This value is ignored if irq_summary is < 0. | 65 | * This value is ignored if irq_summary is < 0. |
66 | * @reset_during_probe: If set to true, the driver will trigger a full | ||
67 | * reset of the chip at the beginning of the probe | ||
68 | * in order to place it in a known state. | ||
66 | */ | 69 | */ |
67 | struct sx150x_platform_data { | 70 | struct sx150x_platform_data { |
68 | unsigned gpio_base; | 71 | unsigned gpio_base; |
@@ -73,6 +76,7 @@ struct sx150x_platform_data { | |||
73 | u16 io_polarity; | 76 | u16 io_polarity; |
74 | int irq_summary; | 77 | int irq_summary; |
75 | unsigned irq_base; | 78 | unsigned irq_base; |
79 | bool reset_during_probe; | ||
76 | }; | 80 | }; |
77 | 81 | ||
78 | #endif /* __LINUX_I2C_SX150X_H */ | 82 | #endif /* __LINUX_I2C_SX150X_H */ |
diff --git a/include/linux/intel-gtt.h b/include/linux/intel-gtt.h new file mode 100644 index 000000000000..1d19ab2afa39 --- /dev/null +++ b/include/linux/intel-gtt.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Common Intel AGPGART and GTT definitions. | ||
3 | */ | ||
4 | #ifndef _INTEL_GTT_H | ||
5 | #define _INTEL_GTT_H | ||
6 | |||
7 | #include <linux/agp_backend.h> | ||
8 | |||
9 | /* This is for Intel only GTT controls. | ||
10 | * | ||
11 | * Sandybridge: AGP_USER_CACHED_MEMORY default to LLC only | ||
12 | */ | ||
13 | |||
14 | #define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2) | ||
15 | #define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4) | ||
16 | |||
17 | /* flag for GFDT type */ | ||
18 | #define AGP_USER_CACHED_MEMORY_GFDT (1 << 3) | ||
19 | |||
20 | #endif | ||
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 0a6b3d5c490c..7fb592793738 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h | |||
@@ -79,7 +79,7 @@ io_mapping_free(struct io_mapping *mapping) | |||
79 | } | 79 | } |
80 | 80 | ||
81 | /* Atomic map/unmap */ | 81 | /* Atomic map/unmap */ |
82 | static inline void * | 82 | static inline void __iomem * |
83 | io_mapping_map_atomic_wc(struct io_mapping *mapping, | 83 | io_mapping_map_atomic_wc(struct io_mapping *mapping, |
84 | unsigned long offset, | 84 | unsigned long offset, |
85 | int slot) | 85 | int slot) |
@@ -94,12 +94,12 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping, | |||
94 | } | 94 | } |
95 | 95 | ||
96 | static inline void | 96 | static inline void |
97 | io_mapping_unmap_atomic(void *vaddr, int slot) | 97 | io_mapping_unmap_atomic(void __iomem *vaddr, int slot) |
98 | { | 98 | { |
99 | iounmap_atomic(vaddr, slot); | 99 | iounmap_atomic(vaddr, slot); |
100 | } | 100 | } |
101 | 101 | ||
102 | static inline void * | 102 | static inline void __iomem * |
103 | io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) | 103 | io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) |
104 | { | 104 | { |
105 | resource_size_t phys_addr; | 105 | resource_size_t phys_addr; |
@@ -111,7 +111,7 @@ io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) | |||
111 | } | 111 | } |
112 | 112 | ||
113 | static inline void | 113 | static inline void |
114 | io_mapping_unmap(void *vaddr) | 114 | io_mapping_unmap(void __iomem *vaddr) |
115 | { | 115 | { |
116 | iounmap(vaddr); | 116 | iounmap(vaddr); |
117 | } | 117 | } |
@@ -125,38 +125,38 @@ struct io_mapping; | |||
125 | static inline struct io_mapping * | 125 | static inline struct io_mapping * |
126 | io_mapping_create_wc(resource_size_t base, unsigned long size) | 126 | io_mapping_create_wc(resource_size_t base, unsigned long size) |
127 | { | 127 | { |
128 | return (struct io_mapping *) ioremap_wc(base, size); | 128 | return (struct io_mapping __force *) ioremap_wc(base, size); |
129 | } | 129 | } |
130 | 130 | ||
131 | static inline void | 131 | static inline void |
132 | io_mapping_free(struct io_mapping *mapping) | 132 | io_mapping_free(struct io_mapping *mapping) |
133 | { | 133 | { |
134 | iounmap(mapping); | 134 | iounmap((void __force __iomem *) mapping); |
135 | } | 135 | } |
136 | 136 | ||
137 | /* Atomic map/unmap */ | 137 | /* Atomic map/unmap */ |
138 | static inline void * | 138 | static inline void __iomem * |
139 | io_mapping_map_atomic_wc(struct io_mapping *mapping, | 139 | io_mapping_map_atomic_wc(struct io_mapping *mapping, |
140 | unsigned long offset, | 140 | unsigned long offset, |
141 | int slot) | 141 | int slot) |
142 | { | 142 | { |
143 | return ((char *) mapping) + offset; | 143 | return ((char __force __iomem *) mapping) + offset; |
144 | } | 144 | } |
145 | 145 | ||
146 | static inline void | 146 | static inline void |
147 | io_mapping_unmap_atomic(void *vaddr, int slot) | 147 | io_mapping_unmap_atomic(void __iomem *vaddr, int slot) |
148 | { | 148 | { |
149 | } | 149 | } |
150 | 150 | ||
151 | /* Non-atomic map/unmap */ | 151 | /* Non-atomic map/unmap */ |
152 | static inline void * | 152 | static inline void __iomem * |
153 | io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) | 153 | io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) |
154 | { | 154 | { |
155 | return ((char *) mapping) + offset; | 155 | return ((char __force __iomem *) mapping) + offset; |
156 | } | 156 | } |
157 | 157 | ||
158 | static inline void | 158 | static inline void |
159 | io_mapping_unmap(void *vaddr) | 159 | io_mapping_unmap(void __iomem *vaddr) |
160 | { | 160 | { |
161 | } | 161 | } |
162 | 162 | ||
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 4aa95f203f3e..62dbee554f60 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h | |||
@@ -214,7 +214,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
214 | */ | 214 | */ |
215 | #define kfifo_reset(fifo) \ | 215 | #define kfifo_reset(fifo) \ |
216 | (void)({ \ | 216 | (void)({ \ |
217 | typeof(fifo + 1) __tmp = (fifo); \ | 217 | typeof((fifo) + 1) __tmp = (fifo); \ |
218 | __tmp->kfifo.in = __tmp->kfifo.out = 0; \ | 218 | __tmp->kfifo.in = __tmp->kfifo.out = 0; \ |
219 | }) | 219 | }) |
220 | 220 | ||
@@ -228,7 +228,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
228 | */ | 228 | */ |
229 | #define kfifo_reset_out(fifo) \ | 229 | #define kfifo_reset_out(fifo) \ |
230 | (void)({ \ | 230 | (void)({ \ |
231 | typeof(fifo + 1) __tmp = (fifo); \ | 231 | typeof((fifo) + 1) __tmp = (fifo); \ |
232 | __tmp->kfifo.out = __tmp->kfifo.in; \ | 232 | __tmp->kfifo.out = __tmp->kfifo.in; \ |
233 | }) | 233 | }) |
234 | 234 | ||
@@ -238,7 +238,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
238 | */ | 238 | */ |
239 | #define kfifo_len(fifo) \ | 239 | #define kfifo_len(fifo) \ |
240 | ({ \ | 240 | ({ \ |
241 | typeof(fifo + 1) __tmpl = (fifo); \ | 241 | typeof((fifo) + 1) __tmpl = (fifo); \ |
242 | __tmpl->kfifo.in - __tmpl->kfifo.out; \ | 242 | __tmpl->kfifo.in - __tmpl->kfifo.out; \ |
243 | }) | 243 | }) |
244 | 244 | ||
@@ -248,7 +248,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
248 | */ | 248 | */ |
249 | #define kfifo_is_empty(fifo) \ | 249 | #define kfifo_is_empty(fifo) \ |
250 | ({ \ | 250 | ({ \ |
251 | typeof(fifo + 1) __tmpq = (fifo); \ | 251 | typeof((fifo) + 1) __tmpq = (fifo); \ |
252 | __tmpq->kfifo.in == __tmpq->kfifo.out; \ | 252 | __tmpq->kfifo.in == __tmpq->kfifo.out; \ |
253 | }) | 253 | }) |
254 | 254 | ||
@@ -258,7 +258,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
258 | */ | 258 | */ |
259 | #define kfifo_is_full(fifo) \ | 259 | #define kfifo_is_full(fifo) \ |
260 | ({ \ | 260 | ({ \ |
261 | typeof(fifo + 1) __tmpq = (fifo); \ | 261 | typeof((fifo) + 1) __tmpq = (fifo); \ |
262 | kfifo_len(__tmpq) > __tmpq->kfifo.mask; \ | 262 | kfifo_len(__tmpq) > __tmpq->kfifo.mask; \ |
263 | }) | 263 | }) |
264 | 264 | ||
@@ -269,7 +269,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
269 | #define kfifo_avail(fifo) \ | 269 | #define kfifo_avail(fifo) \ |
270 | __kfifo_must_check_helper( \ | 270 | __kfifo_must_check_helper( \ |
271 | ({ \ | 271 | ({ \ |
272 | typeof(fifo + 1) __tmpq = (fifo); \ | 272 | typeof((fifo) + 1) __tmpq = (fifo); \ |
273 | const size_t __recsize = sizeof(*__tmpq->rectype); \ | 273 | const size_t __recsize = sizeof(*__tmpq->rectype); \ |
274 | unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \ | 274 | unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \ |
275 | (__recsize) ? ((__avail <= __recsize) ? 0 : \ | 275 | (__recsize) ? ((__avail <= __recsize) ? 0 : \ |
@@ -284,7 +284,7 @@ __kfifo_must_check_helper( \ | |||
284 | */ | 284 | */ |
285 | #define kfifo_skip(fifo) \ | 285 | #define kfifo_skip(fifo) \ |
286 | (void)({ \ | 286 | (void)({ \ |
287 | typeof(fifo + 1) __tmp = (fifo); \ | 287 | typeof((fifo) + 1) __tmp = (fifo); \ |
288 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 288 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
289 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 289 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
290 | if (__recsize) \ | 290 | if (__recsize) \ |
@@ -302,7 +302,7 @@ __kfifo_must_check_helper( \ | |||
302 | #define kfifo_peek_len(fifo) \ | 302 | #define kfifo_peek_len(fifo) \ |
303 | __kfifo_must_check_helper( \ | 303 | __kfifo_must_check_helper( \ |
304 | ({ \ | 304 | ({ \ |
305 | typeof(fifo + 1) __tmp = (fifo); \ | 305 | typeof((fifo) + 1) __tmp = (fifo); \ |
306 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 306 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
307 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 307 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
308 | (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \ | 308 | (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \ |
@@ -325,7 +325,7 @@ __kfifo_must_check_helper( \ | |||
325 | #define kfifo_alloc(fifo, size, gfp_mask) \ | 325 | #define kfifo_alloc(fifo, size, gfp_mask) \ |
326 | __kfifo_must_check_helper( \ | 326 | __kfifo_must_check_helper( \ |
327 | ({ \ | 327 | ({ \ |
328 | typeof(fifo + 1) __tmp = (fifo); \ | 328 | typeof((fifo) + 1) __tmp = (fifo); \ |
329 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 329 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
330 | __is_kfifo_ptr(__tmp) ? \ | 330 | __is_kfifo_ptr(__tmp) ? \ |
331 | __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \ | 331 | __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \ |
@@ -339,7 +339,7 @@ __kfifo_must_check_helper( \ | |||
339 | */ | 339 | */ |
340 | #define kfifo_free(fifo) \ | 340 | #define kfifo_free(fifo) \ |
341 | ({ \ | 341 | ({ \ |
342 | typeof(fifo + 1) __tmp = (fifo); \ | 342 | typeof((fifo) + 1) __tmp = (fifo); \ |
343 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 343 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
344 | if (__is_kfifo_ptr(__tmp)) \ | 344 | if (__is_kfifo_ptr(__tmp)) \ |
345 | __kfifo_free(__kfifo); \ | 345 | __kfifo_free(__kfifo); \ |
@@ -358,7 +358,7 @@ __kfifo_must_check_helper( \ | |||
358 | */ | 358 | */ |
359 | #define kfifo_init(fifo, buffer, size) \ | 359 | #define kfifo_init(fifo, buffer, size) \ |
360 | ({ \ | 360 | ({ \ |
361 | typeof(fifo + 1) __tmp = (fifo); \ | 361 | typeof((fifo) + 1) __tmp = (fifo); \ |
362 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 362 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
363 | __is_kfifo_ptr(__tmp) ? \ | 363 | __is_kfifo_ptr(__tmp) ? \ |
364 | __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \ | 364 | __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \ |
@@ -379,8 +379,8 @@ __kfifo_must_check_helper( \ | |||
379 | */ | 379 | */ |
380 | #define kfifo_put(fifo, val) \ | 380 | #define kfifo_put(fifo, val) \ |
381 | ({ \ | 381 | ({ \ |
382 | typeof(fifo + 1) __tmp = (fifo); \ | 382 | typeof((fifo) + 1) __tmp = (fifo); \ |
383 | typeof(val + 1) __val = (val); \ | 383 | typeof((val) + 1) __val = (val); \ |
384 | unsigned int __ret; \ | 384 | unsigned int __ret; \ |
385 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 385 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
386 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 386 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
@@ -421,8 +421,8 @@ __kfifo_must_check_helper( \ | |||
421 | #define kfifo_get(fifo, val) \ | 421 | #define kfifo_get(fifo, val) \ |
422 | __kfifo_must_check_helper( \ | 422 | __kfifo_must_check_helper( \ |
423 | ({ \ | 423 | ({ \ |
424 | typeof(fifo + 1) __tmp = (fifo); \ | 424 | typeof((fifo) + 1) __tmp = (fifo); \ |
425 | typeof(val + 1) __val = (val); \ | 425 | typeof((val) + 1) __val = (val); \ |
426 | unsigned int __ret; \ | 426 | unsigned int __ret; \ |
427 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 427 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
428 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 428 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
@@ -462,8 +462,8 @@ __kfifo_must_check_helper( \ | |||
462 | #define kfifo_peek(fifo, val) \ | 462 | #define kfifo_peek(fifo, val) \ |
463 | __kfifo_must_check_helper( \ | 463 | __kfifo_must_check_helper( \ |
464 | ({ \ | 464 | ({ \ |
465 | typeof(fifo + 1) __tmp = (fifo); \ | 465 | typeof((fifo) + 1) __tmp = (fifo); \ |
466 | typeof(val + 1) __val = (val); \ | 466 | typeof((val) + 1) __val = (val); \ |
467 | unsigned int __ret; \ | 467 | unsigned int __ret; \ |
468 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 468 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
469 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 469 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
@@ -501,8 +501,8 @@ __kfifo_must_check_helper( \ | |||
501 | */ | 501 | */ |
502 | #define kfifo_in(fifo, buf, n) \ | 502 | #define kfifo_in(fifo, buf, n) \ |
503 | ({ \ | 503 | ({ \ |
504 | typeof(fifo + 1) __tmp = (fifo); \ | 504 | typeof((fifo) + 1) __tmp = (fifo); \ |
505 | typeof(buf + 1) __buf = (buf); \ | 505 | typeof((buf) + 1) __buf = (buf); \ |
506 | unsigned long __n = (n); \ | 506 | unsigned long __n = (n); \ |
507 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 507 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
508 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 508 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
@@ -554,8 +554,8 @@ __kfifo_must_check_helper( \ | |||
554 | #define kfifo_out(fifo, buf, n) \ | 554 | #define kfifo_out(fifo, buf, n) \ |
555 | __kfifo_must_check_helper( \ | 555 | __kfifo_must_check_helper( \ |
556 | ({ \ | 556 | ({ \ |
557 | typeof(fifo + 1) __tmp = (fifo); \ | 557 | typeof((fifo) + 1) __tmp = (fifo); \ |
558 | typeof(buf + 1) __buf = (buf); \ | 558 | typeof((buf) + 1) __buf = (buf); \ |
559 | unsigned long __n = (n); \ | 559 | unsigned long __n = (n); \ |
560 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 560 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
561 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 561 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
@@ -611,7 +611,7 @@ __kfifo_must_check_helper( \ | |||
611 | #define kfifo_from_user(fifo, from, len, copied) \ | 611 | #define kfifo_from_user(fifo, from, len, copied) \ |
612 | __kfifo_must_check_helper( \ | 612 | __kfifo_must_check_helper( \ |
613 | ({ \ | 613 | ({ \ |
614 | typeof(fifo + 1) __tmp = (fifo); \ | 614 | typeof((fifo) + 1) __tmp = (fifo); \ |
615 | const void __user *__from = (from); \ | 615 | const void __user *__from = (from); \ |
616 | unsigned int __len = (len); \ | 616 | unsigned int __len = (len); \ |
617 | unsigned int *__copied = (copied); \ | 617 | unsigned int *__copied = (copied); \ |
@@ -639,7 +639,7 @@ __kfifo_must_check_helper( \ | |||
639 | #define kfifo_to_user(fifo, to, len, copied) \ | 639 | #define kfifo_to_user(fifo, to, len, copied) \ |
640 | __kfifo_must_check_helper( \ | 640 | __kfifo_must_check_helper( \ |
641 | ({ \ | 641 | ({ \ |
642 | typeof(fifo + 1) __tmp = (fifo); \ | 642 | typeof((fifo) + 1) __tmp = (fifo); \ |
643 | void __user *__to = (to); \ | 643 | void __user *__to = (to); \ |
644 | unsigned int __len = (len); \ | 644 | unsigned int __len = (len); \ |
645 | unsigned int *__copied = (copied); \ | 645 | unsigned int *__copied = (copied); \ |
@@ -666,7 +666,7 @@ __kfifo_must_check_helper( \ | |||
666 | */ | 666 | */ |
667 | #define kfifo_dma_in_prepare(fifo, sgl, nents, len) \ | 667 | #define kfifo_dma_in_prepare(fifo, sgl, nents, len) \ |
668 | ({ \ | 668 | ({ \ |
669 | typeof(fifo + 1) __tmp = (fifo); \ | 669 | typeof((fifo) + 1) __tmp = (fifo); \ |
670 | struct scatterlist *__sgl = (sgl); \ | 670 | struct scatterlist *__sgl = (sgl); \ |
671 | int __nents = (nents); \ | 671 | int __nents = (nents); \ |
672 | unsigned int __len = (len); \ | 672 | unsigned int __len = (len); \ |
@@ -690,7 +690,7 @@ __kfifo_must_check_helper( \ | |||
690 | */ | 690 | */ |
691 | #define kfifo_dma_in_finish(fifo, len) \ | 691 | #define kfifo_dma_in_finish(fifo, len) \ |
692 | (void)({ \ | 692 | (void)({ \ |
693 | typeof(fifo + 1) __tmp = (fifo); \ | 693 | typeof((fifo) + 1) __tmp = (fifo); \ |
694 | unsigned int __len = (len); \ | 694 | unsigned int __len = (len); \ |
695 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 695 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
696 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 696 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
@@ -717,7 +717,7 @@ __kfifo_must_check_helper( \ | |||
717 | */ | 717 | */ |
718 | #define kfifo_dma_out_prepare(fifo, sgl, nents, len) \ | 718 | #define kfifo_dma_out_prepare(fifo, sgl, nents, len) \ |
719 | ({ \ | 719 | ({ \ |
720 | typeof(fifo + 1) __tmp = (fifo); \ | 720 | typeof((fifo) + 1) __tmp = (fifo); \ |
721 | struct scatterlist *__sgl = (sgl); \ | 721 | struct scatterlist *__sgl = (sgl); \ |
722 | int __nents = (nents); \ | 722 | int __nents = (nents); \ |
723 | unsigned int __len = (len); \ | 723 | unsigned int __len = (len); \ |
@@ -741,7 +741,7 @@ __kfifo_must_check_helper( \ | |||
741 | */ | 741 | */ |
742 | #define kfifo_dma_out_finish(fifo, len) \ | 742 | #define kfifo_dma_out_finish(fifo, len) \ |
743 | (void)({ \ | 743 | (void)({ \ |
744 | typeof(fifo + 1) __tmp = (fifo); \ | 744 | typeof((fifo) + 1) __tmp = (fifo); \ |
745 | unsigned int __len = (len); \ | 745 | unsigned int __len = (len); \ |
746 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 746 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
747 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 747 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
@@ -766,8 +766,8 @@ __kfifo_must_check_helper( \ | |||
766 | #define kfifo_out_peek(fifo, buf, n) \ | 766 | #define kfifo_out_peek(fifo, buf, n) \ |
767 | __kfifo_must_check_helper( \ | 767 | __kfifo_must_check_helper( \ |
768 | ({ \ | 768 | ({ \ |
769 | typeof(fifo + 1) __tmp = (fifo); \ | 769 | typeof((fifo) + 1) __tmp = (fifo); \ |
770 | typeof(buf + 1) __buf = (buf); \ | 770 | typeof((buf) + 1) __buf = (buf); \ |
771 | unsigned long __n = (n); \ | 771 | unsigned long __n = (n); \ |
772 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 772 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
773 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 773 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 74d691ee9121..3319a6967626 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h | |||
@@ -16,6 +16,9 @@ | |||
16 | struct stable_node; | 16 | struct stable_node; |
17 | struct mem_cgroup; | 17 | struct mem_cgroup; |
18 | 18 | ||
19 | struct page *ksm_does_need_to_copy(struct page *page, | ||
20 | struct vm_area_struct *vma, unsigned long address); | ||
21 | |||
19 | #ifdef CONFIG_KSM | 22 | #ifdef CONFIG_KSM |
20 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | 23 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
21 | unsigned long end, int advice, unsigned long *vm_flags); | 24 | unsigned long end, int advice, unsigned long *vm_flags); |
@@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page, | |||
70 | * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, | 73 | * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, |
71 | * but what if the vma was unmerged while the page was swapped out? | 74 | * but what if the vma was unmerged while the page was swapped out? |
72 | */ | 75 | */ |
73 | struct page *ksm_does_need_to_copy(struct page *page, | 76 | static inline int ksm_might_need_to_copy(struct page *page, |
74 | struct vm_area_struct *vma, unsigned long address); | ||
75 | static inline struct page *ksm_might_need_to_copy(struct page *page, | ||
76 | struct vm_area_struct *vma, unsigned long address) | 77 | struct vm_area_struct *vma, unsigned long address) |
77 | { | 78 | { |
78 | struct anon_vma *anon_vma = page_anon_vma(page); | 79 | struct anon_vma *anon_vma = page_anon_vma(page); |
79 | 80 | ||
80 | if (!anon_vma || | 81 | return anon_vma && |
81 | (anon_vma->root == vma->anon_vma->root && | 82 | (anon_vma->root != vma->anon_vma->root || |
82 | page->index == linear_page_index(vma, address))) | 83 | page->index != linear_page_index(vma, address)); |
83 | return page; | ||
84 | |||
85 | return ksm_does_need_to_copy(page, vma, address); | ||
86 | } | 84 | } |
87 | 85 | ||
88 | int page_referenced_ksm(struct page *page, | 86 | int page_referenced_ksm(struct page *page, |
@@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | |||
115 | return 0; | 113 | return 0; |
116 | } | 114 | } |
117 | 115 | ||
118 | static inline struct page *ksm_might_need_to_copy(struct page *page, | 116 | static inline int ksm_might_need_to_copy(struct page *page, |
119 | struct vm_area_struct *vma, unsigned long address) | 117 | struct vm_area_struct *vma, unsigned long address) |
120 | { | 118 | { |
121 | return page; | 119 | return 0; |
122 | } | 120 | } |
123 | 121 | ||
124 | static inline int page_referenced_ksm(struct page *page, | 122 | static inline int page_referenced_ksm(struct page *page, |
diff --git a/include/linux/lglock.h b/include/linux/lglock.h index b288cb713b90..f549056fb20b 100644 --- a/include/linux/lglock.h +++ b/include/linux/lglock.h | |||
@@ -150,7 +150,7 @@ | |||
150 | int i; \ | 150 | int i; \ |
151 | preempt_disable(); \ | 151 | preempt_disable(); \ |
152 | rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ | 152 | rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ |
153 | for_each_online_cpu(i) { \ | 153 | for_each_possible_cpu(i) { \ |
154 | arch_spinlock_t *lock; \ | 154 | arch_spinlock_t *lock; \ |
155 | lock = &per_cpu(name##_lock, i); \ | 155 | lock = &per_cpu(name##_lock, i); \ |
156 | arch_spin_lock(lock); \ | 156 | arch_spin_lock(lock); \ |
@@ -161,7 +161,7 @@ | |||
161 | void name##_global_unlock(void) { \ | 161 | void name##_global_unlock(void) { \ |
162 | int i; \ | 162 | int i; \ |
163 | rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ | 163 | rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ |
164 | for_each_online_cpu(i) { \ | 164 | for_each_possible_cpu(i) { \ |
165 | arch_spinlock_t *lock; \ | 165 | arch_spinlock_t *lock; \ |
166 | lock = &per_cpu(name##_lock, i); \ | 166 | lock = &per_cpu(name##_lock, i); \ |
167 | arch_spin_unlock(lock); \ | 167 | arch_spin_unlock(lock); \ |
diff --git a/include/linux/libata.h b/include/linux/libata.h index f010f18a0f86..45fb2967b66d 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -335,6 +335,7 @@ enum { | |||
335 | ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ | 335 | ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ |
336 | ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ | 336 | ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ |
337 | ATA_EHI_QUIET = (1 << 3), /* be quiet */ | 337 | ATA_EHI_QUIET = (1 << 3), /* be quiet */ |
338 | ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */ | ||
338 | 339 | ||
339 | ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */ | 340 | ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */ |
340 | ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ | 341 | ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ |
@@ -723,6 +724,7 @@ struct ata_port { | |||
723 | struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ | 724 | struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ |
724 | u8 ctl; /* cache of ATA control register */ | 725 | u8 ctl; /* cache of ATA control register */ |
725 | u8 last_ctl; /* Cache last written value */ | 726 | u8 last_ctl; /* Cache last written value */ |
727 | struct ata_link* sff_pio_task_link; /* link currently used */ | ||
726 | struct delayed_work sff_pio_task; | 728 | struct delayed_work sff_pio_task; |
727 | #ifdef CONFIG_ATA_BMDMA | 729 | #ifdef CONFIG_ATA_BMDMA |
728 | struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */ | 730 | struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */ |
@@ -1594,7 +1596,7 @@ extern void ata_sff_irq_on(struct ata_port *ap); | |||
1594 | extern void ata_sff_irq_clear(struct ata_port *ap); | 1596 | extern void ata_sff_irq_clear(struct ata_port *ap); |
1595 | extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | 1597 | extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, |
1596 | u8 status, int in_wq); | 1598 | u8 status, int in_wq); |
1597 | extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay); | 1599 | extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay); |
1598 | extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); | 1600 | extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); |
1599 | extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); | 1601 | extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); |
1600 | extern unsigned int ata_sff_port_intr(struct ata_port *ap, | 1602 | extern unsigned int ata_sff_port_intr(struct ata_port *ap, |
diff --git a/include/linux/mm.h b/include/linux/mm.h index c0cceded7683..7687228dd3b7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -864,6 +864,12 @@ int set_page_dirty(struct page *page); | |||
864 | int set_page_dirty_lock(struct page *page); | 864 | int set_page_dirty_lock(struct page *page); |
865 | int clear_page_dirty_for_io(struct page *page); | 865 | int clear_page_dirty_for_io(struct page *page); |
866 | 866 | ||
867 | /* Is the vma a continuation of the stack vma above it? */ | ||
868 | static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) | ||
869 | { | ||
870 | return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); | ||
871 | } | ||
872 | |||
867 | extern unsigned long move_page_tables(struct vm_area_struct *vma, | 873 | extern unsigned long move_page_tables(struct vm_area_struct *vma, |
868 | unsigned long old_addr, struct vm_area_struct *new_vma, | 874 | unsigned long old_addr, struct vm_area_struct *new_vma, |
869 | unsigned long new_addr, unsigned long len); | 875 | unsigned long new_addr, unsigned long len); |
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h index 329a8faa6e37..245cdacee544 100644 --- a/include/linux/mmc/sdio.h +++ b/include/linux/mmc/sdio.h | |||
@@ -38,6 +38,8 @@ | |||
38 | * [8:0] Byte/block count | 38 | * [8:0] Byte/block count |
39 | */ | 39 | */ |
40 | 40 | ||
41 | #define R4_MEMORY_PRESENT (1 << 27) | ||
42 | |||
41 | /* | 43 | /* |
42 | SDIO status in R5 | 44 | SDIO status in R5 |
43 | Type | 45 | Type |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 6e6e62648a4d..3984c4eb41fd 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -284,6 +284,13 @@ struct zone { | |||
284 | unsigned long watermark[NR_WMARK]; | 284 | unsigned long watermark[NR_WMARK]; |
285 | 285 | ||
286 | /* | 286 | /* |
287 | * When free pages are below this point, additional steps are taken | ||
288 | * when reading the number of free pages to avoid per-cpu counter | ||
289 | * drift allowing watermarks to be breached | ||
290 | */ | ||
291 | unsigned long percpu_drift_mark; | ||
292 | |||
293 | /* | ||
287 | * We don't know if the memory that we're going to allocate will be freeable | 294 | * We don't know if the memory that we're going to allocate will be freeable |
288 | * or/and it will be released eventually, so to avoid totally wasting several | 295 | * or/and it will be released eventually, so to avoid totally wasting several |
289 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk | 296 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk |
@@ -441,6 +448,12 @@ static inline int zone_is_oom_locked(const struct zone *zone) | |||
441 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); | 448 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); |
442 | } | 449 | } |
443 | 450 | ||
451 | #ifdef CONFIG_SMP | ||
452 | unsigned long zone_nr_free_pages(struct zone *zone); | ||
453 | #else | ||
454 | #define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES) | ||
455 | #endif /* CONFIG_SMP */ | ||
456 | |||
444 | /* | 457 | /* |
445 | * The "priority" of VM scanning is how much of the queues we will scan in one | 458 | * The "priority" of VM scanning is how much of the queues we will scan in one |
446 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | 459 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the |
diff --git a/include/linux/module.h b/include/linux/module.h index 8a6b9fdc7ffa..aace066bad8f 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -686,17 +686,16 @@ extern int module_sysfs_initialized; | |||
686 | 686 | ||
687 | 687 | ||
688 | #ifdef CONFIG_GENERIC_BUG | 688 | #ifdef CONFIG_GENERIC_BUG |
689 | int module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, | 689 | void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, |
690 | struct module *); | 690 | struct module *); |
691 | void module_bug_cleanup(struct module *); | 691 | void module_bug_cleanup(struct module *); |
692 | 692 | ||
693 | #else /* !CONFIG_GENERIC_BUG */ | 693 | #else /* !CONFIG_GENERIC_BUG */ |
694 | 694 | ||
695 | static inline int module_bug_finalize(const Elf_Ehdr *hdr, | 695 | static inline void module_bug_finalize(const Elf_Ehdr *hdr, |
696 | const Elf_Shdr *sechdrs, | 696 | const Elf_Shdr *sechdrs, |
697 | struct module *mod) | 697 | struct module *mod) |
698 | { | 698 | { |
699 | return 0; | ||
700 | } | 699 | } |
701 | static inline void module_bug_cleanup(struct module *mod) {} | 700 | static inline void module_bug_cleanup(struct module *mod) {} |
702 | #endif /* CONFIG_GENERIC_BUG */ | 701 | #endif /* CONFIG_GENERIC_BUG */ |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 878cab4f5fcc..f363bc8fdc74 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -78,6 +78,14 @@ struct mutex_waiter { | |||
78 | # include <linux/mutex-debug.h> | 78 | # include <linux/mutex-debug.h> |
79 | #else | 79 | #else |
80 | # define __DEBUG_MUTEX_INITIALIZER(lockname) | 80 | # define __DEBUG_MUTEX_INITIALIZER(lockname) |
81 | /** | ||
82 | * mutex_init - initialize the mutex | ||
83 | * @mutex: the mutex to be initialized | ||
84 | * | ||
85 | * Initialize the mutex to unlocked state. | ||
86 | * | ||
87 | * It is not allowed to initialize an already locked mutex. | ||
88 | */ | ||
81 | # define mutex_init(mutex) \ | 89 | # define mutex_init(mutex) \ |
82 | do { \ | 90 | do { \ |
83 | static struct lock_class_key __key; \ | 91 | static struct lock_class_key __key; \ |
diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 59d066936ab9..123566912d73 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h | |||
@@ -27,8 +27,6 @@ | |||
27 | 27 | ||
28 | #define MAX_LINKS 32 | 28 | #define MAX_LINKS 32 |
29 | 29 | ||
30 | struct net; | ||
31 | |||
32 | struct sockaddr_nl { | 30 | struct sockaddr_nl { |
33 | sa_family_t nl_family; /* AF_NETLINK */ | 31 | sa_family_t nl_family; /* AF_NETLINK */ |
34 | unsigned short nl_pad; /* zero */ | 32 | unsigned short nl_pad; /* zero */ |
@@ -151,6 +149,8 @@ struct nlattr { | |||
151 | #include <linux/capability.h> | 149 | #include <linux/capability.h> |
152 | #include <linux/skbuff.h> | 150 | #include <linux/skbuff.h> |
153 | 151 | ||
152 | struct net; | ||
153 | |||
154 | static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) | 154 | static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) |
155 | { | 155 | { |
156 | return (struct nlmsghdr *)skb->data; | 156 | return (struct nlmsghdr *)skb->data; |
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 791d5109f34c..50d8009be86c 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h | |||
@@ -63,20 +63,20 @@ static inline bool netpoll_rx(struct sk_buff *skb) | |||
63 | unsigned long flags; | 63 | unsigned long flags; |
64 | bool ret = false; | 64 | bool ret = false; |
65 | 65 | ||
66 | rcu_read_lock_bh(); | 66 | local_irq_save(flags); |
67 | npinfo = rcu_dereference_bh(skb->dev->npinfo); | 67 | npinfo = rcu_dereference_bh(skb->dev->npinfo); |
68 | 68 | ||
69 | if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) | 69 | if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) |
70 | goto out; | 70 | goto out; |
71 | 71 | ||
72 | spin_lock_irqsave(&npinfo->rx_lock, flags); | 72 | spin_lock(&npinfo->rx_lock); |
73 | /* check rx_flags again with the lock held */ | 73 | /* check rx_flags again with the lock held */ |
74 | if (npinfo->rx_flags && __netpoll_rx(skb)) | 74 | if (npinfo->rx_flags && __netpoll_rx(skb)) |
75 | ret = true; | 75 | ret = true; |
76 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 76 | spin_unlock(&npinfo->rx_lock); |
77 | 77 | ||
78 | out: | 78 | out: |
79 | rcu_read_unlock_bh(); | 79 | local_irq_restore(flags); |
80 | return ret; | 80 | return ret; |
81 | } | 81 | } |
82 | 82 | ||
diff --git a/include/linux/pci.h b/include/linux/pci.h index b1d17956a153..c8d95e369ff4 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -1214,6 +1214,9 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, | |||
1214 | unsigned int devfn) | 1214 | unsigned int devfn) |
1215 | { return NULL; } | 1215 | { return NULL; } |
1216 | 1216 | ||
1217 | static inline int pci_domain_nr(struct pci_bus *bus) | ||
1218 | { return 0; } | ||
1219 | |||
1217 | #define dev_is_pci(d) (false) | 1220 | #define dev_is_pci(d) (false) |
1218 | #define dev_is_pf(d) (false) | 1221 | #define dev_is_pf(d) (false) |
1219 | #define dev_num_vf(d) (0) | 1222 | #define dev_num_vf(d) (0) |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index f6a3b2d36cad..570fddeb0388 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -393,6 +393,9 @@ | |||
393 | #define PCI_DEVICE_ID_VLSI_82C147 0x0105 | 393 | #define PCI_DEVICE_ID_VLSI_82C147 0x0105 |
394 | #define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 | 394 | #define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 |
395 | 395 | ||
396 | /* AMD RD890 Chipset */ | ||
397 | #define PCI_DEVICE_ID_RD890_IOMMU 0x5a23 | ||
398 | |||
396 | #define PCI_VENDOR_ID_ADL 0x1005 | 399 | #define PCI_VENDOR_ID_ADL 0x1005 |
397 | #define PCI_DEVICE_ID_ADL_2301 0x2301 | 400 | #define PCI_DEVICE_ID_ADL_2301 0x2301 |
398 | 401 | ||
@@ -2300,6 +2303,8 @@ | |||
2300 | #define PCI_DEVICE_ID_P2010 0x0079 | 2303 | #define PCI_DEVICE_ID_P2010 0x0079 |
2301 | #define PCI_DEVICE_ID_P1020E 0x0100 | 2304 | #define PCI_DEVICE_ID_P1020E 0x0100 |
2302 | #define PCI_DEVICE_ID_P1020 0x0101 | 2305 | #define PCI_DEVICE_ID_P1020 0x0101 |
2306 | #define PCI_DEVICE_ID_P1021E 0x0102 | ||
2307 | #define PCI_DEVICE_ID_P1021 0x0103 | ||
2303 | #define PCI_DEVICE_ID_P1011E 0x0108 | 2308 | #define PCI_DEVICE_ID_P1011E 0x0108 |
2304 | #define PCI_DEVICE_ID_P1011 0x0109 | 2309 | #define PCI_DEVICE_ID_P1011 0x0109 |
2305 | #define PCI_DEVICE_ID_P1022E 0x0110 | 2310 | #define PCI_DEVICE_ID_P1022E 0x0110 |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index b8b9084527b1..49466b13c5c6 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -149,7 +149,7 @@ extern void __init percpu_init_late(void); | |||
149 | 149 | ||
150 | #else /* CONFIG_SMP */ | 150 | #else /* CONFIG_SMP */ |
151 | 151 | ||
152 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) | 152 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); }) |
153 | 153 | ||
154 | /* can't distinguish from other static vars, always false */ | 154 | /* can't distinguish from other static vars, always false */ |
155 | static inline bool is_kernel_percpu_address(unsigned long addr) | 155 | static inline bool is_kernel_percpu_address(unsigned long addr) |
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index d50ba858cfe0..d1a9193960f1 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h | |||
@@ -274,8 +274,14 @@ static inline int dquot_alloc_space(struct inode *inode, qsize_t nr) | |||
274 | int ret; | 274 | int ret; |
275 | 275 | ||
276 | ret = dquot_alloc_space_nodirty(inode, nr); | 276 | ret = dquot_alloc_space_nodirty(inode, nr); |
277 | if (!ret) | 277 | if (!ret) { |
278 | mark_inode_dirty_sync(inode); | 278 | /* |
279 | * Mark inode fully dirty. Since we are allocating blocks, inode | ||
280 | * would become fully dirty soon anyway and it reportedly | ||
281 | * reduces inode_lock contention. | ||
282 | */ | ||
283 | mark_inode_dirty(inode); | ||
284 | } | ||
279 | return ret; | 285 | return ret; |
280 | } | 286 | } |
281 | 287 | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 9fbc54a2585d..83af1f8d8b74 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -454,7 +454,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
454 | * Makes rcu_dereference_check() do the dirty work. | 454 | * Makes rcu_dereference_check() do the dirty work. |
455 | */ | 455 | */ |
456 | #define rcu_dereference_bh(p) \ | 456 | #define rcu_dereference_bh(p) \ |
457 | rcu_dereference_check(p, rcu_read_lock_bh_held()) | 457 | rcu_dereference_check(p, rcu_read_lock_bh_held() || irqs_disabled()) |
458 | 458 | ||
459 | /** | 459 | /** |
460 | * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched | 460 | * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched |
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h index 7415839ac890..5310d27abd2a 100644 --- a/include/linux/semaphore.h +++ b/include/linux/semaphore.h | |||
@@ -26,6 +26,9 @@ struct semaphore { | |||
26 | .wait_list = LIST_HEAD_INIT((name).wait_list), \ | 26 | .wait_list = LIST_HEAD_INIT((name).wait_list), \ |
27 | } | 27 | } |
28 | 28 | ||
29 | #define DEFINE_SEMAPHORE(name) \ | ||
30 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) | ||
31 | |||
29 | #define DECLARE_MUTEX(name) \ | 32 | #define DECLARE_MUTEX(name) \ |
30 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) | 33 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) |
31 | 34 | ||
diff --git a/include/linux/serial.h b/include/linux/serial.h index 1ebc694a6d52..ef914061511e 100644 --- a/include/linux/serial.h +++ b/include/linux/serial.h | |||
@@ -77,8 +77,7 @@ struct serial_struct { | |||
77 | #define PORT_16654 11 | 77 | #define PORT_16654 11 |
78 | #define PORT_16850 12 | 78 | #define PORT_16850 12 |
79 | #define PORT_RSA 13 /* RSA-DV II/S card */ | 79 | #define PORT_RSA 13 /* RSA-DV II/S card */ |
80 | #define PORT_U6_16550A 14 | 80 | #define PORT_MAX 13 |
81 | #define PORT_MAX 14 | ||
82 | 81 | ||
83 | #define SERIAL_IO_PORT 0 | 82 | #define SERIAL_IO_PORT 0 |
84 | #define SERIAL_IO_HUB6 1 | 83 | #define SERIAL_IO_HUB6 1 |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 64458a9a8938..563e23400913 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
@@ -44,7 +44,8 @@ | |||
44 | #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ | 44 | #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ |
45 | #define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ | 45 | #define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ |
46 | #define PORT_AR7 18 /* Texas Instruments AR7 internal UART */ | 46 | #define PORT_AR7 18 /* Texas Instruments AR7 internal UART */ |
47 | #define PORT_MAX_8250 18 /* max port ID */ | 47 | #define PORT_U6_16550A 19 /* ST-Ericsson U6xxx internal UART */ |
48 | #define PORT_MAX_8250 19 /* max port ID */ | ||
48 | 49 | ||
49 | /* | 50 | /* |
50 | * ARM specific type numbers. These are not currently guaranteed | 51 | * ARM specific type numbers. These are not currently guaranteed |
diff --git a/include/linux/socket.h b/include/linux/socket.h index a2fada9becb6..a8f56e1ec760 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h | |||
@@ -322,7 +322,7 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata, | |||
322 | int offset, | 322 | int offset, |
323 | unsigned int len, __wsum *csump); | 323 | unsigned int len, __wsum *csump); |
324 | 324 | ||
325 | extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode); | 325 | extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode); |
326 | extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len); | 326 | extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len); |
327 | extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, | 327 | extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, |
328 | int offset, int len); | 328 | int offset, int len); |
diff --git a/include/linux/spi/dw_spi.h b/include/linux/spi/dw_spi.h index cc813f95a2f2..c91302f3a257 100644 --- a/include/linux/spi/dw_spi.h +++ b/include/linux/spi/dw_spi.h | |||
@@ -14,7 +14,9 @@ | |||
14 | #define SPI_MODE_OFFSET 6 | 14 | #define SPI_MODE_OFFSET 6 |
15 | #define SPI_SCPH_OFFSET 6 | 15 | #define SPI_SCPH_OFFSET 6 |
16 | #define SPI_SCOL_OFFSET 7 | 16 | #define SPI_SCOL_OFFSET 7 |
17 | |||
17 | #define SPI_TMOD_OFFSET 8 | 18 | #define SPI_TMOD_OFFSET 8 |
19 | #define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET) | ||
18 | #define SPI_TMOD_TR 0x0 /* xmit & recv */ | 20 | #define SPI_TMOD_TR 0x0 /* xmit & recv */ |
19 | #define SPI_TMOD_TO 0x1 /* xmit only */ | 21 | #define SPI_TMOD_TO 0x1 /* xmit only */ |
20 | #define SPI_TMOD_RO 0x2 /* recv only */ | 22 | #define SPI_TMOD_RO 0x2 /* recv only */ |
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 569dc722a600..85f38a63f098 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
@@ -30,7 +30,7 @@ struct rpc_inode; | |||
30 | * The high-level client handle | 30 | * The high-level client handle |
31 | */ | 31 | */ |
32 | struct rpc_clnt { | 32 | struct rpc_clnt { |
33 | struct kref cl_kref; /* Number of references */ | 33 | atomic_t cl_count; /* Number of references */ |
34 | struct list_head cl_clients; /* Global list of clients */ | 34 | struct list_head cl_clients; /* Global list of clients */ |
35 | struct list_head cl_tasks; /* List of tasks */ | 35 | struct list_head cl_tasks; /* List of tasks */ |
36 | spinlock_t cl_lock; /* spinlock */ | 36 | spinlock_t cl_lock; /* spinlock */ |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 2fee51a11b73..7cdd63366f88 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -19,6 +19,7 @@ struct bio; | |||
19 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ | 19 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ |
20 | #define SWAP_FLAG_PRIO_MASK 0x7fff | 20 | #define SWAP_FLAG_PRIO_MASK 0x7fff |
21 | #define SWAP_FLAG_PRIO_SHIFT 0 | 21 | #define SWAP_FLAG_PRIO_SHIFT 0 |
22 | #define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */ | ||
22 | 23 | ||
23 | static inline int current_is_kswapd(void) | 24 | static inline int current_is_kswapd(void) |
24 | { | 25 | { |
@@ -142,7 +143,7 @@ struct swap_extent { | |||
142 | enum { | 143 | enum { |
143 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ | 144 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ |
144 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ | 145 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ |
145 | SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */ | 146 | SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */ |
146 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ | 147 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ |
147 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ | 148 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ |
148 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ | 149 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ |
@@ -315,6 +316,7 @@ extern long nr_swap_pages; | |||
315 | extern long total_swap_pages; | 316 | extern long total_swap_pages; |
316 | extern void si_swapinfo(struct sysinfo *); | 317 | extern void si_swapinfo(struct sysinfo *); |
317 | extern swp_entry_t get_swap_page(void); | 318 | extern swp_entry_t get_swap_page(void); |
319 | extern swp_entry_t get_swap_page_of_type(int); | ||
318 | extern int valid_swaphandles(swp_entry_t, unsigned long *); | 320 | extern int valid_swaphandles(swp_entry_t, unsigned long *); |
319 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); | 321 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); |
320 | extern void swap_shmem_alloc(swp_entry_t); | 322 | extern void swap_shmem_alloc(swp_entry_t); |
@@ -331,13 +333,6 @@ extern int reuse_swap_page(struct page *); | |||
331 | extern int try_to_free_swap(struct page *); | 333 | extern int try_to_free_swap(struct page *); |
332 | struct backing_dev_info; | 334 | struct backing_dev_info; |
333 | 335 | ||
334 | #ifdef CONFIG_HIBERNATION | ||
335 | void hibernation_freeze_swap(void); | ||
336 | void hibernation_thaw_swap(void); | ||
337 | swp_entry_t get_swap_for_hibernation(int type); | ||
338 | void swap_free_for_hibernation(swp_entry_t val); | ||
339 | #endif | ||
340 | |||
341 | /* linux/mm/thrash.c */ | 336 | /* linux/mm/thrash.c */ |
342 | extern struct mm_struct *swap_token_mm; | 337 | extern struct mm_struct *swap_token_mm; |
343 | extern void grab_swap_token(struct mm_struct *); | 338 | extern void grab_swap_token(struct mm_struct *); |
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 7f43ccdc1d38..eaaea37b3b75 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
@@ -170,6 +170,28 @@ static inline unsigned long zone_page_state(struct zone *zone, | |||
170 | return x; | 170 | return x; |
171 | } | 171 | } |
172 | 172 | ||
173 | /* | ||
174 | * More accurate version that also considers the currently pending | ||
175 | * deltas. For that we need to loop over all cpus to find the current | ||
176 | * deltas. There is no synchronization so the result cannot be | ||
177 | * exactly accurate either. | ||
178 | */ | ||
179 | static inline unsigned long zone_page_state_snapshot(struct zone *zone, | ||
180 | enum zone_stat_item item) | ||
181 | { | ||
182 | long x = atomic_long_read(&zone->vm_stat[item]); | ||
183 | |||
184 | #ifdef CONFIG_SMP | ||
185 | int cpu; | ||
186 | for_each_online_cpu(cpu) | ||
187 | x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; | ||
188 | |||
189 | if (x < 0) | ||
190 | x = 0; | ||
191 | #endif | ||
192 | return x; | ||
193 | } | ||
194 | |||
173 | extern unsigned long global_reclaimable_pages(void); | 195 | extern unsigned long global_reclaimable_pages(void); |
174 | extern unsigned long zone_reclaimable_pages(struct zone *zone); | 196 | extern unsigned long zone_reclaimable_pages(struct zone *zone); |
175 | 197 | ||
diff --git a/include/linux/wait.h b/include/linux/wait.h index 0836ccc57121..3efc9f3f43a0 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -614,6 +614,7 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | |||
614 | (wait)->private = current; \ | 614 | (wait)->private = current; \ |
615 | (wait)->func = autoremove_wake_function; \ | 615 | (wait)->func = autoremove_wake_function; \ |
616 | INIT_LIST_HEAD(&(wait)->task_list); \ | 616 | INIT_LIST_HEAD(&(wait)->task_list); \ |
617 | (wait)->flags = 0; \ | ||
617 | } while (0) | 618 | } while (0) |
618 | 619 | ||
619 | /** | 620 | /** |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 4f9d277bcd9a..25e02c941bac 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -25,18 +25,20 @@ typedef void (*work_func_t)(struct work_struct *work); | |||
25 | 25 | ||
26 | enum { | 26 | enum { |
27 | WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ | 27 | WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ |
28 | WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */ | 28 | WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ |
29 | WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */ | 29 | WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */ |
30 | WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ | ||
30 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 31 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
31 | WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */ | 32 | WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ |
32 | WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ | 33 | WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */ |
33 | #else | 34 | #else |
34 | WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */ | 35 | WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ |
35 | #endif | 36 | #endif |
36 | 37 | ||
37 | WORK_STRUCT_COLOR_BITS = 4, | 38 | WORK_STRUCT_COLOR_BITS = 4, |
38 | 39 | ||
39 | WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, | 40 | WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, |
41 | WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, | ||
40 | WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, | 42 | WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, |
41 | WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, | 43 | WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, |
42 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 44 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
@@ -59,8 +61,8 @@ enum { | |||
59 | 61 | ||
60 | /* | 62 | /* |
61 | * Reserve 7 bits off of cwq pointer w/ debugobjects turned | 63 | * Reserve 7 bits off of cwq pointer w/ debugobjects turned |
62 | * off. This makes cwqs aligned to 128 bytes which isn't too | 64 | * off. This makes cwqs aligned to 256 bytes and allows 15 |
63 | * excessive while allowing 15 workqueue flush colors. | 65 | * workqueue flush colors. |
64 | */ | 66 | */ |
65 | WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + | 67 | WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + |
66 | WORK_STRUCT_COLOR_BITS, | 68 | WORK_STRUCT_COLOR_BITS, |
@@ -233,6 +235,10 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } | |||
233 | #define work_clear_pending(work) \ | 235 | #define work_clear_pending(work) \ |
234 | clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) | 236 | clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) |
235 | 237 | ||
238 | /* | ||
239 | * Workqueue flags and constants. For details, please refer to | ||
240 | * Documentation/workqueue.txt. | ||
241 | */ | ||
236 | enum { | 242 | enum { |
237 | WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ | 243 | WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ |
238 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ | 244 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ |
@@ -241,6 +247,8 @@ enum { | |||
241 | WQ_HIGHPRI = 1 << 4, /* high priority */ | 247 | WQ_HIGHPRI = 1 << 4, /* high priority */ |
242 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ | 248 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ |
243 | 249 | ||
250 | WQ_DYING = 1 << 6, /* internal: workqueue is dying */ | ||
251 | |||
244 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ | 252 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ |
245 | WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ | 253 | WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ |
246 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, | 254 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, |
diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 45375b41a2a0..4d40c4d0230b 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h | |||
@@ -121,6 +121,7 @@ static inline int addrconf_finite_timeout(unsigned long timeout) | |||
121 | * IPv6 Address Label subsystem (addrlabel.c) | 121 | * IPv6 Address Label subsystem (addrlabel.c) |
122 | */ | 122 | */ |
123 | extern int ipv6_addr_label_init(void); | 123 | extern int ipv6_addr_label_init(void); |
124 | extern void ipv6_addr_label_cleanup(void); | ||
124 | extern void ipv6_addr_label_rtnl_register(void); | 125 | extern void ipv6_addr_label_rtnl_register(void); |
125 | extern u32 ipv6_addr_label(struct net *net, | 126 | extern u32 ipv6_addr_label(struct net *net, |
126 | const struct in6_addr *addr, | 127 | const struct in6_addr *addr, |
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h index 726cc3536409..ef6c24a529e1 100644 --- a/include/net/cls_cgroup.h +++ b/include/net/cls_cgroup.h | |||
@@ -27,11 +27,17 @@ struct cgroup_cls_state | |||
27 | #ifdef CONFIG_NET_CLS_CGROUP | 27 | #ifdef CONFIG_NET_CLS_CGROUP |
28 | static inline u32 task_cls_classid(struct task_struct *p) | 28 | static inline u32 task_cls_classid(struct task_struct *p) |
29 | { | 29 | { |
30 | int classid; | ||
31 | |||
30 | if (in_interrupt()) | 32 | if (in_interrupt()) |
31 | return 0; | 33 | return 0; |
32 | 34 | ||
33 | return container_of(task_subsys_state(p, net_cls_subsys_id), | 35 | rcu_read_lock(); |
34 | struct cgroup_cls_state, css)->classid; | 36 | classid = container_of(task_subsys_state(p, net_cls_subsys_id), |
37 | struct cgroup_cls_state, css)->classid; | ||
38 | rcu_read_unlock(); | ||
39 | |||
40 | return classid; | ||
35 | } | 41 | } |
36 | #else | 42 | #else |
37 | extern int net_cls_subsys_id; | 43 | extern int net_cls_subsys_id; |
diff --git a/include/net/dst.h b/include/net/dst.h index 81d1413a8701..02386505033d 100644 --- a/include/net/dst.h +++ b/include/net/dst.h | |||
@@ -242,6 +242,7 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) | |||
242 | dev->stats.rx_packets++; | 242 | dev->stats.rx_packets++; |
243 | dev->stats.rx_bytes += skb->len; | 243 | dev->stats.rx_bytes += skb->len; |
244 | skb->rxhash = 0; | 244 | skb->rxhash = 0; |
245 | skb_set_queue_mapping(skb, 0); | ||
245 | skb_dst_drop(skb); | 246 | skb_dst_drop(skb); |
246 | nf_reset(skb); | 247 | nf_reset(skb); |
247 | } | 248 | } |
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index a4747a0f7303..f976885f686f 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
@@ -955,6 +955,9 @@ static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum) | |||
955 | return csum_partial(diff, sizeof(diff), oldsum); | 955 | return csum_partial(diff, sizeof(diff), oldsum); |
956 | } | 956 | } |
957 | 957 | ||
958 | extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, | ||
959 | int outin); | ||
960 | |||
958 | #endif /* __KERNEL__ */ | 961 | #endif /* __KERNEL__ */ |
959 | 962 | ||
960 | #endif /* _NET_IP_VS_H */ | 963 | #endif /* _NET_IP_VS_H */ |
diff --git a/include/net/route.h b/include/net/route.h index bd732d62e1c3..7e5e73bfa4de 100644 --- a/include/net/route.h +++ b/include/net/route.h | |||
@@ -199,6 +199,8 @@ static inline int ip_route_newports(struct rtable **rp, u8 protocol, | |||
199 | fl.fl_ip_sport = sport; | 199 | fl.fl_ip_sport = sport; |
200 | fl.fl_ip_dport = dport; | 200 | fl.fl_ip_dport = dport; |
201 | fl.proto = protocol; | 201 | fl.proto = protocol; |
202 | if (inet_sk(sk)->transparent) | ||
203 | fl.flags |= FLOWI_FLAG_ANYSRC; | ||
202 | ip_rt_put(*rp); | 204 | ip_rt_put(*rp); |
203 | *rp = NULL; | 205 | *rp = NULL; |
204 | security_sk_classify_flow(sk, &fl); | 206 | security_sk_classify_flow(sk, &fl); |
diff --git a/include/net/sock.h b/include/net/sock.h index ac53bfbdfe16..adab9dc58183 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -752,6 +752,7 @@ struct proto { | |||
752 | /* Keeping track of sk's, looking them up, and port selection methods. */ | 752 | /* Keeping track of sk's, looking them up, and port selection methods. */ |
753 | void (*hash)(struct sock *sk); | 753 | void (*hash)(struct sock *sk); |
754 | void (*unhash)(struct sock *sk); | 754 | void (*unhash)(struct sock *sk); |
755 | void (*rehash)(struct sock *sk); | ||
755 | int (*get_port)(struct sock *sk, unsigned short snum); | 756 | int (*get_port)(struct sock *sk, unsigned short snum); |
756 | 757 | ||
757 | /* Keeping track of sockets in use */ | 758 | /* Keeping track of sockets in use */ |
diff --git a/include/net/tcp.h b/include/net/tcp.h index eaa9582779d0..3e4b33e36602 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -475,8 +475,22 @@ extern unsigned int tcp_current_mss(struct sock *sk); | |||
475 | /* Bound MSS / TSO packet size with the half of the window */ | 475 | /* Bound MSS / TSO packet size with the half of the window */ |
476 | static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) | 476 | static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) |
477 | { | 477 | { |
478 | if (tp->max_window && pktsize > (tp->max_window >> 1)) | 478 | int cutoff; |
479 | return max(tp->max_window >> 1, 68U - tp->tcp_header_len); | 479 | |
480 | /* When peer uses tiny windows, there is no use in packetizing | ||
481 | * to sub-MSS pieces for the sake of SWS or making sure there | ||
482 | * are enough packets in the pipe for fast recovery. | ||
483 | * | ||
484 | * On the other hand, for extremely large MSS devices, handling | ||
485 | * smaller than MSS windows in this way does make sense. | ||
486 | */ | ||
487 | if (tp->max_window >= 512) | ||
488 | cutoff = (tp->max_window >> 1); | ||
489 | else | ||
490 | cutoff = tp->max_window; | ||
491 | |||
492 | if (cutoff && pktsize > cutoff) | ||
493 | return max_t(int, cutoff, 68U - tp->tcp_header_len); | ||
480 | else | 494 | else |
481 | return pktsize; | 495 | return pktsize; |
482 | } | 496 | } |
diff --git a/include/net/udp.h b/include/net/udp.h index 7abdf305da50..a184d3496b13 100644 --- a/include/net/udp.h +++ b/include/net/udp.h | |||
@@ -151,6 +151,7 @@ static inline void udp_lib_hash(struct sock *sk) | |||
151 | } | 151 | } |
152 | 152 | ||
153 | extern void udp_lib_unhash(struct sock *sk); | 153 | extern void udp_lib_unhash(struct sock *sk); |
154 | extern void udp_lib_rehash(struct sock *sk, u16 new_hash); | ||
154 | 155 | ||
155 | static inline void udp_lib_close(struct sock *sk, long timeout) | 156 | static inline void udp_lib_close(struct sock *sk, long timeout) |
156 | { | 157 | { |
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index fc8f36dd0f5c..4f53532d4c2f 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -298,8 +298,8 @@ struct xfrm_state_afinfo { | |||
298 | const struct xfrm_type *type_map[IPPROTO_MAX]; | 298 | const struct xfrm_type *type_map[IPPROTO_MAX]; |
299 | struct xfrm_mode *mode_map[XFRM_MODE_MAX]; | 299 | struct xfrm_mode *mode_map[XFRM_MODE_MAX]; |
300 | int (*init_flags)(struct xfrm_state *x); | 300 | int (*init_flags)(struct xfrm_state *x); |
301 | void (*init_tempsel)(struct xfrm_state *x, struct flowi *fl, | 301 | void (*init_tempsel)(struct xfrm_selector *sel, struct flowi *fl); |
302 | struct xfrm_tmpl *tmpl, | 302 | void (*init_temprop)(struct xfrm_state *x, struct xfrm_tmpl *tmpl, |
303 | xfrm_address_t *daddr, xfrm_address_t *saddr); | 303 | xfrm_address_t *daddr, xfrm_address_t *saddr); |
304 | int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); | 304 | int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); |
305 | int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); | 305 | int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); |
@@ -743,6 +743,8 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, | |||
743 | { | 743 | { |
744 | struct semid_ds out; | 744 | struct semid_ds out; |
745 | 745 | ||
746 | memset(&out, 0, sizeof(out)); | ||
747 | |||
746 | ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); | 748 | ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); |
747 | 749 | ||
748 | out.sem_otime = in->sem_otime; | 750 | out.sem_otime = in->sem_otime; |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 192f88c5b0f9..c9483d8f6140 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -1791,19 +1791,20 @@ out: | |||
1791 | } | 1791 | } |
1792 | 1792 | ||
1793 | /** | 1793 | /** |
1794 | * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup | 1794 | * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from' |
1795 | * @from: attach to all cgroups of a given task | ||
1795 | * @tsk: the task to be attached | 1796 | * @tsk: the task to be attached |
1796 | */ | 1797 | */ |
1797 | int cgroup_attach_task_current_cg(struct task_struct *tsk) | 1798 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) |
1798 | { | 1799 | { |
1799 | struct cgroupfs_root *root; | 1800 | struct cgroupfs_root *root; |
1800 | struct cgroup *cur_cg; | ||
1801 | int retval = 0; | 1801 | int retval = 0; |
1802 | 1802 | ||
1803 | cgroup_lock(); | 1803 | cgroup_lock(); |
1804 | for_each_active_root(root) { | 1804 | for_each_active_root(root) { |
1805 | cur_cg = task_cgroup_from_root(current, root); | 1805 | struct cgroup *from_cg = task_cgroup_from_root(from, root); |
1806 | retval = cgroup_attach_task(cur_cg, tsk); | 1806 | |
1807 | retval = cgroup_attach_task(from_cg, tsk); | ||
1807 | if (retval) | 1808 | if (retval) |
1808 | break; | 1809 | break; |
1809 | } | 1810 | } |
@@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk) | |||
1811 | 1812 | ||
1812 | return retval; | 1813 | return retval; |
1813 | } | 1814 | } |
1814 | EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg); | 1815 | EXPORT_SYMBOL_GPL(cgroup_attach_task_all); |
1815 | 1816 | ||
1816 | /* | 1817 | /* |
1817 | * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex | 1818 | * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex |
diff --git a/kernel/compat.c b/kernel/compat.c index e167efce8423..c9e2ec0b34a8 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
@@ -1126,3 +1126,24 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info) | |||
1126 | 1126 | ||
1127 | return 0; | 1127 | return 0; |
1128 | } | 1128 | } |
1129 | |||
1130 | /* | ||
1131 | * Allocate user-space memory for the duration of a single system call, | ||
1132 | * in order to marshall parameters inside a compat thunk. | ||
1133 | */ | ||
1134 | void __user *compat_alloc_user_space(unsigned long len) | ||
1135 | { | ||
1136 | void __user *ptr; | ||
1137 | |||
1138 | /* If len would occupy more than half of the entire compat space... */ | ||
1139 | if (unlikely(len > (((compat_uptr_t)~0) >> 1))) | ||
1140 | return NULL; | ||
1141 | |||
1142 | ptr = arch_compat_alloc_user_space(len); | ||
1143 | |||
1144 | if (unlikely(!access_ok(VERIFY_WRITE, ptr, len))) | ||
1145 | return NULL; | ||
1146 | |||
1147 | return ptr; | ||
1148 | } | ||
1149 | EXPORT_SYMBOL_GPL(compat_alloc_user_space); | ||
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c index 75bd9b3ebbb7..20059ef4459a 100644 --- a/kernel/debug/kdb/kdb_bp.c +++ b/kernel/debug/kdb/kdb_bp.c | |||
@@ -274,7 +274,6 @@ static int kdb_bp(int argc, const char **argv) | |||
274 | int i, bpno; | 274 | int i, bpno; |
275 | kdb_bp_t *bp, *bp_check; | 275 | kdb_bp_t *bp, *bp_check; |
276 | int diag; | 276 | int diag; |
277 | int free; | ||
278 | char *symname = NULL; | 277 | char *symname = NULL; |
279 | long offset = 0ul; | 278 | long offset = 0ul; |
280 | int nextarg; | 279 | int nextarg; |
@@ -305,7 +304,6 @@ static int kdb_bp(int argc, const char **argv) | |||
305 | /* | 304 | /* |
306 | * Find an empty bp structure to allocate | 305 | * Find an empty bp structure to allocate |
307 | */ | 306 | */ |
308 | free = KDB_MAXBPT; | ||
309 | for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { | 307 | for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { |
310 | if (bp->bp_free) | 308 | if (bp->bp_free) |
311 | break; | 309 | break; |
diff --git a/kernel/fork.c b/kernel/fork.c index b7e9d60a675d..c445f8cc408d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -356,10 +356,10 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
356 | if (IS_ERR(pol)) | 356 | if (IS_ERR(pol)) |
357 | goto fail_nomem_policy; | 357 | goto fail_nomem_policy; |
358 | vma_set_policy(tmp, pol); | 358 | vma_set_policy(tmp, pol); |
359 | tmp->vm_mm = mm; | ||
359 | if (anon_vma_fork(tmp, mpnt)) | 360 | if (anon_vma_fork(tmp, mpnt)) |
360 | goto fail_nomem_anon_vma_fork; | 361 | goto fail_nomem_anon_vma_fork; |
361 | tmp->vm_flags &= ~VM_LOCKED; | 362 | tmp->vm_flags &= ~VM_LOCKED; |
362 | tmp->vm_mm = mm; | ||
363 | tmp->vm_next = tmp->vm_prev = NULL; | 363 | tmp->vm_next = tmp->vm_prev = NULL; |
364 | file = tmp->vm_file; | 364 | file = tmp->vm_file; |
365 | if (file) { | 365 | if (file) { |
diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c index ef3c3f88a7a3..f83972b16564 100644 --- a/kernel/gcov/fs.c +++ b/kernel/gcov/fs.c | |||
@@ -33,10 +33,11 @@ | |||
33 | * @children: child nodes | 33 | * @children: child nodes |
34 | * @all: list head for list of all nodes | 34 | * @all: list head for list of all nodes |
35 | * @parent: parent node | 35 | * @parent: parent node |
36 | * @info: associated profiling data structure if not a directory | 36 | * @loaded_info: array of pointers to profiling data sets for loaded object |
37 | * @ghost: when an object file containing profiling data is unloaded we keep a | 37 | * files. |
38 | * copy of the profiling data here to allow collecting coverage data | 38 | * @num_loaded: number of profiling data sets for loaded object files. |
39 | * for cleanup code. Such a node is called a "ghost". | 39 | * @unloaded_info: accumulated copy of profiling data sets for unloaded |
40 | * object files. Used only when gcov_persist=1. | ||
40 | * @dentry: main debugfs entry, either a directory or data file | 41 | * @dentry: main debugfs entry, either a directory or data file |
41 | * @links: associated symbolic links | 42 | * @links: associated symbolic links |
42 | * @name: data file basename | 43 | * @name: data file basename |
@@ -51,10 +52,11 @@ struct gcov_node { | |||
51 | struct list_head children; | 52 | struct list_head children; |
52 | struct list_head all; | 53 | struct list_head all; |
53 | struct gcov_node *parent; | 54 | struct gcov_node *parent; |
54 | struct gcov_info *info; | 55 | struct gcov_info **loaded_info; |
55 | struct gcov_info *ghost; | 56 | struct gcov_info *unloaded_info; |
56 | struct dentry *dentry; | 57 | struct dentry *dentry; |
57 | struct dentry **links; | 58 | struct dentry **links; |
59 | int num_loaded; | ||
58 | char name[0]; | 60 | char name[0]; |
59 | }; | 61 | }; |
60 | 62 | ||
@@ -136,16 +138,37 @@ static const struct seq_operations gcov_seq_ops = { | |||
136 | }; | 138 | }; |
137 | 139 | ||
138 | /* | 140 | /* |
139 | * Return the profiling data set for a given node. This can either be the | 141 | * Return a profiling data set associated with the given node. This is |
140 | * original profiling data structure or a duplicate (also called "ghost") | 142 | * either a data set for a loaded object file or a data set copy in case |
141 | * in case the associated object file has been unloaded. | 143 | * all associated object files have been unloaded. |
142 | */ | 144 | */ |
143 | static struct gcov_info *get_node_info(struct gcov_node *node) | 145 | static struct gcov_info *get_node_info(struct gcov_node *node) |
144 | { | 146 | { |
145 | if (node->info) | 147 | if (node->num_loaded > 0) |
146 | return node->info; | 148 | return node->loaded_info[0]; |
147 | 149 | ||
148 | return node->ghost; | 150 | return node->unloaded_info; |
151 | } | ||
152 | |||
153 | /* | ||
154 | * Return a newly allocated profiling data set which contains the sum of | ||
155 | * all profiling data associated with the given node. | ||
156 | */ | ||
157 | static struct gcov_info *get_accumulated_info(struct gcov_node *node) | ||
158 | { | ||
159 | struct gcov_info *info; | ||
160 | int i = 0; | ||
161 | |||
162 | if (node->unloaded_info) | ||
163 | info = gcov_info_dup(node->unloaded_info); | ||
164 | else | ||
165 | info = gcov_info_dup(node->loaded_info[i++]); | ||
166 | if (!info) | ||
167 | return NULL; | ||
168 | for (; i < node->num_loaded; i++) | ||
169 | gcov_info_add(info, node->loaded_info[i]); | ||
170 | |||
171 | return info; | ||
149 | } | 172 | } |
150 | 173 | ||
151 | /* | 174 | /* |
@@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *inode, struct file *file) | |||
163 | mutex_lock(&node_lock); | 186 | mutex_lock(&node_lock); |
164 | /* | 187 | /* |
165 | * Read from a profiling data copy to minimize reference tracking | 188 | * Read from a profiling data copy to minimize reference tracking |
166 | * complexity and concurrent access. | 189 | * complexity and concurrent access and to keep accumulating multiple |
190 | * profiling data sets associated with one node simple. | ||
167 | */ | 191 | */ |
168 | info = gcov_info_dup(get_node_info(node)); | 192 | info = get_accumulated_info(node); |
169 | if (!info) | 193 | if (!info) |
170 | goto out_unlock; | 194 | goto out_unlock; |
171 | iter = gcov_iter_new(info); | 195 | iter = gcov_iter_new(info); |
@@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_name(const char *name) | |||
225 | return NULL; | 249 | return NULL; |
226 | } | 250 | } |
227 | 251 | ||
252 | /* | ||
253 | * Reset all profiling data associated with the specified node. | ||
254 | */ | ||
255 | static void reset_node(struct gcov_node *node) | ||
256 | { | ||
257 | int i; | ||
258 | |||
259 | if (node->unloaded_info) | ||
260 | gcov_info_reset(node->unloaded_info); | ||
261 | for (i = 0; i < node->num_loaded; i++) | ||
262 | gcov_info_reset(node->loaded_info[i]); | ||
263 | } | ||
264 | |||
228 | static void remove_node(struct gcov_node *node); | 265 | static void remove_node(struct gcov_node *node); |
229 | 266 | ||
230 | /* | 267 | /* |
231 | * write() implementation for gcov data files. Reset profiling data for the | 268 | * write() implementation for gcov data files. Reset profiling data for the |
232 | * associated file. If the object file has been unloaded (i.e. this is | 269 | * corresponding file. If all associated object files have been unloaded, |
233 | * a "ghost" node), remove the debug fs node as well. | 270 | * remove the debug fs node as well. |
234 | */ | 271 | */ |
235 | static ssize_t gcov_seq_write(struct file *file, const char __user *addr, | 272 | static ssize_t gcov_seq_write(struct file *file, const char __user *addr, |
236 | size_t len, loff_t *pos) | 273 | size_t len, loff_t *pos) |
@@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr, | |||
245 | node = get_node_by_name(info->filename); | 282 | node = get_node_by_name(info->filename); |
246 | if (node) { | 283 | if (node) { |
247 | /* Reset counts or remove node for unloaded modules. */ | 284 | /* Reset counts or remove node for unloaded modules. */ |
248 | if (node->ghost) | 285 | if (node->num_loaded == 0) |
249 | remove_node(node); | 286 | remove_node(node); |
250 | else | 287 | else |
251 | gcov_info_reset(node->info); | 288 | reset_node(node); |
252 | } | 289 | } |
253 | /* Reset counts for open file. */ | 290 | /* Reset counts for open file. */ |
254 | gcov_info_reset(info); | 291 | gcov_info_reset(info); |
@@ -378,7 +415,10 @@ static void init_node(struct gcov_node *node, struct gcov_info *info, | |||
378 | INIT_LIST_HEAD(&node->list); | 415 | INIT_LIST_HEAD(&node->list); |
379 | INIT_LIST_HEAD(&node->children); | 416 | INIT_LIST_HEAD(&node->children); |
380 | INIT_LIST_HEAD(&node->all); | 417 | INIT_LIST_HEAD(&node->all); |
381 | node->info = info; | 418 | if (node->loaded_info) { |
419 | node->loaded_info[0] = info; | ||
420 | node->num_loaded = 1; | ||
421 | } | ||
382 | node->parent = parent; | 422 | node->parent = parent; |
383 | if (name) | 423 | if (name) |
384 | strcpy(node->name, name); | 424 | strcpy(node->name, name); |
@@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct gcov_node *parent, | |||
394 | struct gcov_node *node; | 434 | struct gcov_node *node; |
395 | 435 | ||
396 | node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); | 436 | node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); |
397 | if (!node) { | 437 | if (!node) |
398 | pr_warning("out of memory\n"); | 438 | goto err_nomem; |
399 | return NULL; | 439 | if (info) { |
440 | node->loaded_info = kcalloc(1, sizeof(struct gcov_info *), | ||
441 | GFP_KERNEL); | ||
442 | if (!node->loaded_info) | ||
443 | goto err_nomem; | ||
400 | } | 444 | } |
401 | init_node(node, info, name, parent); | 445 | init_node(node, info, name, parent); |
402 | /* Differentiate between gcov data file nodes and directory nodes. */ | 446 | /* Differentiate between gcov data file nodes and directory nodes. */ |
@@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct gcov_node *parent, | |||
416 | list_add(&node->all, &all_head); | 460 | list_add(&node->all, &all_head); |
417 | 461 | ||
418 | return node; | 462 | return node; |
463 | |||
464 | err_nomem: | ||
465 | kfree(node); | ||
466 | pr_warning("out of memory\n"); | ||
467 | return NULL; | ||
419 | } | 468 | } |
420 | 469 | ||
421 | /* Remove symbolic links associated with node. */ | 470 | /* Remove symbolic links associated with node. */ |
@@ -441,8 +490,9 @@ static void release_node(struct gcov_node *node) | |||
441 | list_del(&node->all); | 490 | list_del(&node->all); |
442 | debugfs_remove(node->dentry); | 491 | debugfs_remove(node->dentry); |
443 | remove_links(node); | 492 | remove_links(node); |
444 | if (node->ghost) | 493 | kfree(node->loaded_info); |
445 | gcov_info_free(node->ghost); | 494 | if (node->unloaded_info) |
495 | gcov_info_free(node->unloaded_info); | ||
446 | kfree(node); | 496 | kfree(node); |
447 | } | 497 | } |
448 | 498 | ||
@@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent, | |||
477 | 527 | ||
478 | /* | 528 | /* |
479 | * write() implementation for reset file. Reset all profiling data to zero | 529 | * write() implementation for reset file. Reset all profiling data to zero |
480 | * and remove ghost nodes. | 530 | * and remove nodes for which all associated object files are unloaded. |
481 | */ | 531 | */ |
482 | static ssize_t reset_write(struct file *file, const char __user *addr, | 532 | static ssize_t reset_write(struct file *file, const char __user *addr, |
483 | size_t len, loff_t *pos) | 533 | size_t len, loff_t *pos) |
@@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr, | |||
487 | mutex_lock(&node_lock); | 537 | mutex_lock(&node_lock); |
488 | restart: | 538 | restart: |
489 | list_for_each_entry(node, &all_head, all) { | 539 | list_for_each_entry(node, &all_head, all) { |
490 | if (node->info) | 540 | if (node->num_loaded > 0) |
491 | gcov_info_reset(node->info); | 541 | reset_node(node); |
492 | else if (list_empty(&node->children)) { | 542 | else if (list_empty(&node->children)) { |
493 | remove_node(node); | 543 | remove_node(node); |
494 | /* Several nodes may have gone - restart loop. */ | 544 | /* Several nodes may have gone - restart loop. */ |
@@ -564,37 +614,115 @@ err_remove: | |||
564 | } | 614 | } |
565 | 615 | ||
566 | /* | 616 | /* |
567 | * The profiling data set associated with this node is being unloaded. Store a | 617 | * Associate a profiling data set with an existing node. Needs to be called |
568 | * copy of the profiling data and turn this node into a "ghost". | 618 | * with node_lock held. |
569 | */ | 619 | */ |
570 | static int ghost_node(struct gcov_node *node) | 620 | static void add_info(struct gcov_node *node, struct gcov_info *info) |
571 | { | 621 | { |
572 | node->ghost = gcov_info_dup(node->info); | 622 | struct gcov_info **loaded_info; |
573 | if (!node->ghost) { | 623 | int num = node->num_loaded; |
574 | pr_warning("could not save data for '%s' (out of memory)\n", | 624 | |
575 | node->info->filename); | 625 | /* |
576 | return -ENOMEM; | 626 | * Prepare new array. This is done first to simplify cleanup in |
627 | * case the new data set is incompatible, the node only contains | ||
628 | * unloaded data sets and there's not enough memory for the array. | ||
629 | */ | ||
630 | loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL); | ||
631 | if (!loaded_info) { | ||
632 | pr_warning("could not add '%s' (out of memory)\n", | ||
633 | info->filename); | ||
634 | return; | ||
635 | } | ||
636 | memcpy(loaded_info, node->loaded_info, | ||
637 | num * sizeof(struct gcov_info *)); | ||
638 | loaded_info[num] = info; | ||
639 | /* Check if the new data set is compatible. */ | ||
640 | if (num == 0) { | ||
641 | /* | ||
642 | * A module was unloaded, modified and reloaded. The new | ||
643 | * data set replaces the copy of the last one. | ||
644 | */ | ||
645 | if (!gcov_info_is_compatible(node->unloaded_info, info)) { | ||
646 | pr_warning("discarding saved data for %s " | ||
647 | "(incompatible version)\n", info->filename); | ||
648 | gcov_info_free(node->unloaded_info); | ||
649 | node->unloaded_info = NULL; | ||
650 | } | ||
651 | } else { | ||
652 | /* | ||
653 | * Two different versions of the same object file are loaded. | ||
654 | * The initial one takes precedence. | ||
655 | */ | ||
656 | if (!gcov_info_is_compatible(node->loaded_info[0], info)) { | ||
657 | pr_warning("could not add '%s' (incompatible " | ||
658 | "version)\n", info->filename); | ||
659 | kfree(loaded_info); | ||
660 | return; | ||
661 | } | ||
577 | } | 662 | } |
578 | node->info = NULL; | 663 | /* Overwrite previous array. */ |
664 | kfree(node->loaded_info); | ||
665 | node->loaded_info = loaded_info; | ||
666 | node->num_loaded = num + 1; | ||
667 | } | ||
579 | 668 | ||
580 | return 0; | 669 | /* |
670 | * Return the index of a profiling data set associated with a node. | ||
671 | */ | ||
672 | static int get_info_index(struct gcov_node *node, struct gcov_info *info) | ||
673 | { | ||
674 | int i; | ||
675 | |||
676 | for (i = 0; i < node->num_loaded; i++) { | ||
677 | if (node->loaded_info[i] == info) | ||
678 | return i; | ||
679 | } | ||
680 | return -ENOENT; | ||
581 | } | 681 | } |
582 | 682 | ||
583 | /* | 683 | /* |
584 | * Profiling data for this node has been loaded again. Add profiling data | 684 | * Save the data of a profiling data set which is being unloaded. |
585 | * from previous instantiation and turn this node into a regular node. | ||
586 | */ | 685 | */ |
587 | static void revive_node(struct gcov_node *node, struct gcov_info *info) | 686 | static void save_info(struct gcov_node *node, struct gcov_info *info) |
588 | { | 687 | { |
589 | if (gcov_info_is_compatible(node->ghost, info)) | 688 | if (node->unloaded_info) |
590 | gcov_info_add(info, node->ghost); | 689 | gcov_info_add(node->unloaded_info, info); |
591 | else { | 690 | else { |
592 | pr_warning("discarding saved data for '%s' (version changed)\n", | 691 | node->unloaded_info = gcov_info_dup(info); |
692 | if (!node->unloaded_info) { | ||
693 | pr_warning("could not save data for '%s' " | ||
694 | "(out of memory)\n", info->filename); | ||
695 | } | ||
696 | } | ||
697 | } | ||
698 | |||
699 | /* | ||
700 | * Disassociate a profiling data set from a node. Needs to be called with | ||
701 | * node_lock held. | ||
702 | */ | ||
703 | static void remove_info(struct gcov_node *node, struct gcov_info *info) | ||
704 | { | ||
705 | int i; | ||
706 | |||
707 | i = get_info_index(node, info); | ||
708 | if (i < 0) { | ||
709 | pr_warning("could not remove '%s' (not found)\n", | ||
593 | info->filename); | 710 | info->filename); |
711 | return; | ||
594 | } | 712 | } |
595 | gcov_info_free(node->ghost); | 713 | if (gcov_persist) |
596 | node->ghost = NULL; | 714 | save_info(node, info); |
597 | node->info = info; | 715 | /* Shrink array. */ |
716 | node->loaded_info[i] = node->loaded_info[node->num_loaded - 1]; | ||
717 | node->num_loaded--; | ||
718 | if (node->num_loaded > 0) | ||
719 | return; | ||
720 | /* Last loaded data set was removed. */ | ||
721 | kfree(node->loaded_info); | ||
722 | node->loaded_info = NULL; | ||
723 | node->num_loaded = 0; | ||
724 | if (!node->unloaded_info) | ||
725 | remove_node(node); | ||
598 | } | 726 | } |
599 | 727 | ||
600 | /* | 728 | /* |
@@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action, struct gcov_info *info) | |||
609 | node = get_node_by_name(info->filename); | 737 | node = get_node_by_name(info->filename); |
610 | switch (action) { | 738 | switch (action) { |
611 | case GCOV_ADD: | 739 | case GCOV_ADD: |
612 | /* Add new node or revive ghost. */ | 740 | if (node) |
613 | if (!node) { | 741 | add_info(node, info); |
742 | else | ||
614 | add_node(info); | 743 | add_node(info); |
615 | break; | ||
616 | } | ||
617 | if (gcov_persist) | ||
618 | revive_node(node, info); | ||
619 | else { | ||
620 | pr_warning("could not add '%s' (already exists)\n", | ||
621 | info->filename); | ||
622 | } | ||
623 | break; | 744 | break; |
624 | case GCOV_REMOVE: | 745 | case GCOV_REMOVE: |
625 | /* Remove node or turn into ghost. */ | 746 | if (node) |
626 | if (!node) { | 747 | remove_info(node, info); |
748 | else { | ||
627 | pr_warning("could not remove '%s' (not found)\n", | 749 | pr_warning("could not remove '%s' (not found)\n", |
628 | info->filename); | 750 | info->filename); |
629 | break; | ||
630 | } | 751 | } |
631 | if (gcov_persist) { | ||
632 | if (!ghost_node(node)) | ||
633 | break; | ||
634 | } | ||
635 | remove_node(node); | ||
636 | break; | 752 | break; |
637 | } | 753 | } |
638 | mutex_unlock(&node_lock); | 754 | mutex_unlock(&node_lock); |
diff --git a/kernel/groups.c b/kernel/groups.c index 53b1916c9492..253dc0f35cf4 100644 --- a/kernel/groups.c +++ b/kernel/groups.c | |||
@@ -143,10 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp) | |||
143 | right = group_info->ngroups; | 143 | right = group_info->ngroups; |
144 | while (left < right) { | 144 | while (left < right) { |
145 | unsigned int mid = (left+right)/2; | 145 | unsigned int mid = (left+right)/2; |
146 | int cmp = grp - GROUP_AT(group_info, mid); | 146 | if (grp > GROUP_AT(group_info, mid)) |
147 | if (cmp > 0) | ||
148 | left = mid + 1; | 147 | left = mid + 1; |
149 | else if (cmp < 0) | 148 | else if (grp < GROUP_AT(group_info, mid)) |
150 | right = mid; | 149 | right = mid; |
151 | else | 150 | else |
152 | return 1; | 151 | return 1; |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index ce669174f355..1decafbb6b1a 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -1091,11 +1091,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel); | |||
1091 | */ | 1091 | */ |
1092 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) | 1092 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) |
1093 | { | 1093 | { |
1094 | struct hrtimer_clock_base *base; | ||
1095 | unsigned long flags; | 1094 | unsigned long flags; |
1096 | ktime_t rem; | 1095 | ktime_t rem; |
1097 | 1096 | ||
1098 | base = lock_hrtimer_base(timer, &flags); | 1097 | lock_hrtimer_base(timer, &flags); |
1099 | rem = hrtimer_expires_remaining(timer); | 1098 | rem = hrtimer_expires_remaining(timer); |
1100 | unlock_hrtimer_base(timer, &flags); | 1099 | unlock_hrtimer_base(timer, &flags); |
1101 | 1100 | ||
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index d71a987fd2bf..c7c2aed9e2dc 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -433,7 +433,8 @@ register_user_hw_breakpoint(struct perf_event_attr *attr, | |||
433 | perf_overflow_handler_t triggered, | 433 | perf_overflow_handler_t triggered, |
434 | struct task_struct *tsk) | 434 | struct task_struct *tsk) |
435 | { | 435 | { |
436 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); | 436 | return perf_event_create_kernel_counter(attr, -1, task_pid_vnr(tsk), |
437 | triggered); | ||
437 | } | 438 | } |
438 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | 439 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); |
439 | 440 | ||
diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 6b5580c57644..01a0700e873f 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c | |||
@@ -365,8 +365,6 @@ static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl, | |||
365 | n = setup_sgl_buf(sgl, fifo->data + off, nents, l); | 365 | n = setup_sgl_buf(sgl, fifo->data + off, nents, l); |
366 | n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); | 366 | n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); |
367 | 367 | ||
368 | if (n) | ||
369 | sg_mark_end(sgl + n - 1); | ||
370 | return n; | 368 | return n; |
371 | } | 369 | } |
372 | 370 | ||
diff --git a/kernel/module.c b/kernel/module.c index d0b5f8db11b4..ccd641991842 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -1537,6 +1537,7 @@ static int __unlink_module(void *_mod) | |||
1537 | { | 1537 | { |
1538 | struct module *mod = _mod; | 1538 | struct module *mod = _mod; |
1539 | list_del(&mod->list); | 1539 | list_del(&mod->list); |
1540 | module_bug_cleanup(mod); | ||
1540 | return 0; | 1541 | return 0; |
1541 | } | 1542 | } |
1542 | 1543 | ||
@@ -2625,6 +2626,7 @@ static struct module *load_module(void __user *umod, | |||
2625 | if (err < 0) | 2626 | if (err < 0) |
2626 | goto ddebug; | 2627 | goto ddebug; |
2627 | 2628 | ||
2629 | module_bug_finalize(info.hdr, info.sechdrs, mod); | ||
2628 | list_add_rcu(&mod->list, &modules); | 2630 | list_add_rcu(&mod->list, &modules); |
2629 | mutex_unlock(&module_mutex); | 2631 | mutex_unlock(&module_mutex); |
2630 | 2632 | ||
@@ -2650,6 +2652,8 @@ static struct module *load_module(void __user *umod, | |||
2650 | mutex_lock(&module_mutex); | 2652 | mutex_lock(&module_mutex); |
2651 | /* Unlink carefully: kallsyms could be walking list. */ | 2653 | /* Unlink carefully: kallsyms could be walking list. */ |
2652 | list_del_rcu(&mod->list); | 2654 | list_del_rcu(&mod->list); |
2655 | module_bug_cleanup(mod); | ||
2656 | |||
2653 | ddebug: | 2657 | ddebug: |
2654 | if (!mod->taints) | 2658 | if (!mod->taints) |
2655 | dynamic_debug_remove(info.debug); | 2659 | dynamic_debug_remove(info.debug); |
diff --git a/kernel/mutex.c b/kernel/mutex.c index 4c0b7b3e6d2e..200407c1502f 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -36,15 +36,6 @@ | |||
36 | # include <asm/mutex.h> | 36 | # include <asm/mutex.h> |
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | /*** | ||
40 | * mutex_init - initialize the mutex | ||
41 | * @lock: the mutex to be initialized | ||
42 | * @key: the lock_class_key for the class; used by mutex lock debugging | ||
43 | * | ||
44 | * Initialize the mutex to unlocked state. | ||
45 | * | ||
46 | * It is not allowed to initialize an already locked mutex. | ||
47 | */ | ||
48 | void | 39 | void |
49 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | 40 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
50 | { | 41 | { |
@@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init); | |||
68 | static __used noinline void __sched | 59 | static __used noinline void __sched |
69 | __mutex_lock_slowpath(atomic_t *lock_count); | 60 | __mutex_lock_slowpath(atomic_t *lock_count); |
70 | 61 | ||
71 | /*** | 62 | /** |
72 | * mutex_lock - acquire the mutex | 63 | * mutex_lock - acquire the mutex |
73 | * @lock: the mutex to be acquired | 64 | * @lock: the mutex to be acquired |
74 | * | 65 | * |
@@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock); | |||
105 | 96 | ||
106 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); | 97 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
107 | 98 | ||
108 | /*** | 99 | /** |
109 | * mutex_unlock - release the mutex | 100 | * mutex_unlock - release the mutex |
110 | * @lock: the mutex to be released | 101 | * @lock: the mutex to be released |
111 | * | 102 | * |
@@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count); | |||
364 | static noinline int __sched | 355 | static noinline int __sched |
365 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); | 356 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
366 | 357 | ||
367 | /*** | 358 | /** |
368 | * mutex_lock_interruptible - acquire the mutex, interruptable | 359 | * mutex_lock_interruptible - acquire the mutex, interruptible |
369 | * @lock: the mutex to be acquired | 360 | * @lock: the mutex to be acquired |
370 | * | 361 | * |
371 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has | 362 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has |
@@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |||
456 | return prev == 1; | 447 | return prev == 1; |
457 | } | 448 | } |
458 | 449 | ||
459 | /*** | 450 | /** |
460 | * mutex_trylock - try acquire the mutex, without waiting | 451 | * mutex_trylock - try to acquire the mutex, without waiting |
461 | * @lock: the mutex to be acquired | 452 | * @lock: the mutex to be acquired |
462 | * | 453 | * |
463 | * Try to acquire the mutex atomically. Returns 1 if the mutex | 454 | * Try to acquire the mutex atomically. Returns 1 if the mutex |
464 | * has been acquired successfully, and 0 on contention. | 455 | * has been acquired successfully, and 0 on contention. |
465 | * | 456 | * |
466 | * NOTE: this function follows the spin_trylock() convention, so | 457 | * NOTE: this function follows the spin_trylock() convention, so |
467 | * it is negated to the down_trylock() return values! Be careful | 458 | * it is negated from the down_trylock() return values! Be careful |
468 | * about this when converting semaphore users to mutexes. | 459 | * about this when converting semaphore users to mutexes. |
469 | * | 460 | * |
470 | * This function must not be used in interrupt context. The | 461 | * This function must not be used in interrupt context. The |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 403d1804b198..db5b56064687 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -402,11 +402,31 @@ static void perf_group_detach(struct perf_event *event) | |||
402 | } | 402 | } |
403 | } | 403 | } |
404 | 404 | ||
405 | static inline int | ||
406 | event_filter_match(struct perf_event *event) | ||
407 | { | ||
408 | return event->cpu == -1 || event->cpu == smp_processor_id(); | ||
409 | } | ||
410 | |||
405 | static void | 411 | static void |
406 | event_sched_out(struct perf_event *event, | 412 | event_sched_out(struct perf_event *event, |
407 | struct perf_cpu_context *cpuctx, | 413 | struct perf_cpu_context *cpuctx, |
408 | struct perf_event_context *ctx) | 414 | struct perf_event_context *ctx) |
409 | { | 415 | { |
416 | u64 delta; | ||
417 | /* | ||
418 | * An event which could not be activated because of | ||
419 | * filter mismatch still needs to have its timings | ||
420 | * maintained, otherwise bogus information is return | ||
421 | * via read() for time_enabled, time_running: | ||
422 | */ | ||
423 | if (event->state == PERF_EVENT_STATE_INACTIVE | ||
424 | && !event_filter_match(event)) { | ||
425 | delta = ctx->time - event->tstamp_stopped; | ||
426 | event->tstamp_running += delta; | ||
427 | event->tstamp_stopped = ctx->time; | ||
428 | } | ||
429 | |||
410 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 430 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
411 | return; | 431 | return; |
412 | 432 | ||
@@ -432,9 +452,7 @@ group_sched_out(struct perf_event *group_event, | |||
432 | struct perf_event_context *ctx) | 452 | struct perf_event_context *ctx) |
433 | { | 453 | { |
434 | struct perf_event *event; | 454 | struct perf_event *event; |
435 | 455 | int state = group_event->state; | |
436 | if (group_event->state != PERF_EVENT_STATE_ACTIVE) | ||
437 | return; | ||
438 | 456 | ||
439 | event_sched_out(group_event, cpuctx, ctx); | 457 | event_sched_out(group_event, cpuctx, ctx); |
440 | 458 | ||
@@ -444,7 +462,7 @@ group_sched_out(struct perf_event *group_event, | |||
444 | list_for_each_entry(event, &group_event->sibling_list, group_entry) | 462 | list_for_each_entry(event, &group_event->sibling_list, group_entry) |
445 | event_sched_out(event, cpuctx, ctx); | 463 | event_sched_out(event, cpuctx, ctx); |
446 | 464 | ||
447 | if (group_event->attr.exclusive) | 465 | if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) |
448 | cpuctx->exclusive = 0; | 466 | cpuctx->exclusive = 0; |
449 | } | 467 | } |
450 | 468 | ||
@@ -5743,15 +5761,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | |||
5743 | { | 5761 | { |
5744 | unsigned int cpu = (long)hcpu; | 5762 | unsigned int cpu = (long)hcpu; |
5745 | 5763 | ||
5746 | switch (action) { | 5764 | switch (action & ~CPU_TASKS_FROZEN) { |
5747 | 5765 | ||
5748 | case CPU_UP_PREPARE: | 5766 | case CPU_UP_PREPARE: |
5749 | case CPU_UP_PREPARE_FROZEN: | 5767 | case CPU_DOWN_FAILED: |
5750 | perf_event_init_cpu(cpu); | 5768 | perf_event_init_cpu(cpu); |
5751 | break; | 5769 | break; |
5752 | 5770 | ||
5771 | case CPU_UP_CANCELED: | ||
5753 | case CPU_DOWN_PREPARE: | 5772 | case CPU_DOWN_PREPARE: |
5754 | case CPU_DOWN_PREPARE_FROZEN: | ||
5755 | perf_event_exit_cpu(cpu); | 5773 | perf_event_exit_cpu(cpu); |
5756 | break; | 5774 | break; |
5757 | 5775 | ||
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index b7e4c362361b..645e541a45f6 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c | |||
@@ -389,10 +389,12 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, | |||
389 | } else if (count == 11) { /* len('0x12345678/0') */ | 389 | } else if (count == 11) { /* len('0x12345678/0') */ |
390 | if (copy_from_user(ascii_value, buf, 11)) | 390 | if (copy_from_user(ascii_value, buf, 11)) |
391 | return -EFAULT; | 391 | return -EFAULT; |
392 | if (strlen(ascii_value) != 10) | ||
393 | return -EINVAL; | ||
392 | x = sscanf(ascii_value, "%x", &value); | 394 | x = sscanf(ascii_value, "%x", &value); |
393 | if (x != 1) | 395 | if (x != 1) |
394 | return -EINVAL; | 396 | return -EINVAL; |
395 | pr_debug(KERN_ERR "%s, %d, 0x%x\n", ascii_value, x, value); | 397 | pr_debug("%s, %d, 0x%x\n", ascii_value, x, value); |
396 | } else | 398 | } else |
397 | return -EINVAL; | 399 | return -EINVAL; |
398 | 400 | ||
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index c77963938bca..8dc31e02ae12 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode) | |||
338 | goto Close; | 338 | goto Close; |
339 | 339 | ||
340 | suspend_console(); | 340 | suspend_console(); |
341 | hibernation_freeze_swap(); | ||
342 | saved_mask = clear_gfp_allowed_mask(GFP_IOFS); | 341 | saved_mask = clear_gfp_allowed_mask(GFP_IOFS); |
343 | error = dpm_suspend_start(PMSG_FREEZE); | 342 | error = dpm_suspend_start(PMSG_FREEZE); |
344 | if (error) | 343 | if (error) |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 5e7edfb05e66..d3f795f01bbc 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -1086,7 +1086,6 @@ void swsusp_free(void) | |||
1086 | buffer = NULL; | 1086 | buffer = NULL; |
1087 | alloc_normal = 0; | 1087 | alloc_normal = 0; |
1088 | alloc_highmem = 0; | 1088 | alloc_highmem = 0; |
1089 | hibernation_thaw_swap(); | ||
1090 | } | 1089 | } |
1091 | 1090 | ||
1092 | /* Helper functions used for the shrinking of memory. */ | 1091 | /* Helper functions used for the shrinking of memory. */ |
@@ -1122,9 +1121,19 @@ static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask) | |||
1122 | return nr_alloc; | 1121 | return nr_alloc; |
1123 | } | 1122 | } |
1124 | 1123 | ||
1125 | static unsigned long preallocate_image_memory(unsigned long nr_pages) | 1124 | static unsigned long preallocate_image_memory(unsigned long nr_pages, |
1125 | unsigned long avail_normal) | ||
1126 | { | 1126 | { |
1127 | return preallocate_image_pages(nr_pages, GFP_IMAGE); | 1127 | unsigned long alloc; |
1128 | |||
1129 | if (avail_normal <= alloc_normal) | ||
1130 | return 0; | ||
1131 | |||
1132 | alloc = avail_normal - alloc_normal; | ||
1133 | if (nr_pages < alloc) | ||
1134 | alloc = nr_pages; | ||
1135 | |||
1136 | return preallocate_image_pages(alloc, GFP_IMAGE); | ||
1128 | } | 1137 | } |
1129 | 1138 | ||
1130 | #ifdef CONFIG_HIGHMEM | 1139 | #ifdef CONFIG_HIGHMEM |
@@ -1170,15 +1179,22 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages, | |||
1170 | */ | 1179 | */ |
1171 | static void free_unnecessary_pages(void) | 1180 | static void free_unnecessary_pages(void) |
1172 | { | 1181 | { |
1173 | unsigned long save_highmem, to_free_normal, to_free_highmem; | 1182 | unsigned long save, to_free_normal, to_free_highmem; |
1174 | 1183 | ||
1175 | to_free_normal = alloc_normal - count_data_pages(); | 1184 | save = count_data_pages(); |
1176 | save_highmem = count_highmem_pages(); | 1185 | if (alloc_normal >= save) { |
1177 | if (alloc_highmem > save_highmem) { | 1186 | to_free_normal = alloc_normal - save; |
1178 | to_free_highmem = alloc_highmem - save_highmem; | 1187 | save = 0; |
1188 | } else { | ||
1189 | to_free_normal = 0; | ||
1190 | save -= alloc_normal; | ||
1191 | } | ||
1192 | save += count_highmem_pages(); | ||
1193 | if (alloc_highmem >= save) { | ||
1194 | to_free_highmem = alloc_highmem - save; | ||
1179 | } else { | 1195 | } else { |
1180 | to_free_highmem = 0; | 1196 | to_free_highmem = 0; |
1181 | to_free_normal -= save_highmem - alloc_highmem; | 1197 | to_free_normal -= save - alloc_highmem; |
1182 | } | 1198 | } |
1183 | 1199 | ||
1184 | memory_bm_position_reset(©_bm); | 1200 | memory_bm_position_reset(©_bm); |
@@ -1259,7 +1275,7 @@ int hibernate_preallocate_memory(void) | |||
1259 | { | 1275 | { |
1260 | struct zone *zone; | 1276 | struct zone *zone; |
1261 | unsigned long saveable, size, max_size, count, highmem, pages = 0; | 1277 | unsigned long saveable, size, max_size, count, highmem, pages = 0; |
1262 | unsigned long alloc, save_highmem, pages_highmem; | 1278 | unsigned long alloc, save_highmem, pages_highmem, avail_normal; |
1263 | struct timeval start, stop; | 1279 | struct timeval start, stop; |
1264 | int error; | 1280 | int error; |
1265 | 1281 | ||
@@ -1296,6 +1312,7 @@ int hibernate_preallocate_memory(void) | |||
1296 | else | 1312 | else |
1297 | count += zone_page_state(zone, NR_FREE_PAGES); | 1313 | count += zone_page_state(zone, NR_FREE_PAGES); |
1298 | } | 1314 | } |
1315 | avail_normal = count; | ||
1299 | count += highmem; | 1316 | count += highmem; |
1300 | count -= totalreserve_pages; | 1317 | count -= totalreserve_pages; |
1301 | 1318 | ||
@@ -1310,12 +1327,21 @@ int hibernate_preallocate_memory(void) | |||
1310 | */ | 1327 | */ |
1311 | if (size >= saveable) { | 1328 | if (size >= saveable) { |
1312 | pages = preallocate_image_highmem(save_highmem); | 1329 | pages = preallocate_image_highmem(save_highmem); |
1313 | pages += preallocate_image_memory(saveable - pages); | 1330 | pages += preallocate_image_memory(saveable - pages, avail_normal); |
1314 | goto out; | 1331 | goto out; |
1315 | } | 1332 | } |
1316 | 1333 | ||
1317 | /* Estimate the minimum size of the image. */ | 1334 | /* Estimate the minimum size of the image. */ |
1318 | pages = minimum_image_size(saveable); | 1335 | pages = minimum_image_size(saveable); |
1336 | /* | ||
1337 | * To avoid excessive pressure on the normal zone, leave room in it to | ||
1338 | * accommodate an image of the minimum size (unless it's already too | ||
1339 | * small, in which case don't preallocate pages from it at all). | ||
1340 | */ | ||
1341 | if (avail_normal > pages) | ||
1342 | avail_normal -= pages; | ||
1343 | else | ||
1344 | avail_normal = 0; | ||
1319 | if (size < pages) | 1345 | if (size < pages) |
1320 | size = min_t(unsigned long, pages, max_size); | 1346 | size = min_t(unsigned long, pages, max_size); |
1321 | 1347 | ||
@@ -1336,16 +1362,34 @@ int hibernate_preallocate_memory(void) | |||
1336 | */ | 1362 | */ |
1337 | pages_highmem = preallocate_image_highmem(highmem / 2); | 1363 | pages_highmem = preallocate_image_highmem(highmem / 2); |
1338 | alloc = (count - max_size) - pages_highmem; | 1364 | alloc = (count - max_size) - pages_highmem; |
1339 | pages = preallocate_image_memory(alloc); | 1365 | pages = preallocate_image_memory(alloc, avail_normal); |
1340 | if (pages < alloc) | 1366 | if (pages < alloc) { |
1341 | goto err_out; | 1367 | /* We have exhausted non-highmem pages, try highmem. */ |
1342 | size = max_size - size; | 1368 | alloc -= pages; |
1343 | alloc = size; | 1369 | pages += pages_highmem; |
1344 | size = preallocate_highmem_fraction(size, highmem, count); | 1370 | pages_highmem = preallocate_image_highmem(alloc); |
1345 | pages_highmem += size; | 1371 | if (pages_highmem < alloc) |
1346 | alloc -= size; | 1372 | goto err_out; |
1347 | pages += preallocate_image_memory(alloc); | 1373 | pages += pages_highmem; |
1348 | pages += pages_highmem; | 1374 | /* |
1375 | * size is the desired number of saveable pages to leave in | ||
1376 | * memory, so try to preallocate (all memory - size) pages. | ||
1377 | */ | ||
1378 | alloc = (count - pages) - size; | ||
1379 | pages += preallocate_image_highmem(alloc); | ||
1380 | } else { | ||
1381 | /* | ||
1382 | * There are approximately max_size saveable pages at this point | ||
1383 | * and we want to reduce this number down to size. | ||
1384 | */ | ||
1385 | alloc = max_size - size; | ||
1386 | size = preallocate_highmem_fraction(alloc, highmem, count); | ||
1387 | pages_highmem += size; | ||
1388 | alloc -= size; | ||
1389 | size = preallocate_image_memory(alloc, avail_normal); | ||
1390 | pages_highmem += preallocate_image_highmem(alloc - size); | ||
1391 | pages += pages_highmem + size; | ||
1392 | } | ||
1349 | 1393 | ||
1350 | /* | 1394 | /* |
1351 | * We only need as many page frames for the image as there are saveable | 1395 | * We only need as many page frames for the image as there are saveable |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 5d0059eed3e4..e6a5bdf61a37 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap) | |||
136 | { | 136 | { |
137 | unsigned long offset; | 137 | unsigned long offset; |
138 | 138 | ||
139 | offset = swp_offset(get_swap_for_hibernation(swap)); | 139 | offset = swp_offset(get_swap_page_of_type(swap)); |
140 | if (offset) { | 140 | if (offset) { |
141 | if (swsusp_extents_insert(offset)) | 141 | if (swsusp_extents_insert(offset)) |
142 | swap_free_for_hibernation(swp_entry(swap, offset)); | 142 | swap_free(swp_entry(swap, offset)); |
143 | else | 143 | else |
144 | return swapdev_block(swap, offset); | 144 | return swapdev_block(swap, offset); |
145 | } | 145 | } |
@@ -163,7 +163,7 @@ void free_all_swap_pages(int swap) | |||
163 | ext = container_of(node, struct swsusp_extent, node); | 163 | ext = container_of(node, struct swsusp_extent, node); |
164 | rb_erase(node, &swsusp_extents); | 164 | rb_erase(node, &swsusp_extents); |
165 | for (offset = ext->start; offset <= ext->end; offset++) | 165 | for (offset = ext->start; offset <= ext->end; offset++) |
166 | swap_free_for_hibernation(swp_entry(swap, offset)); | 166 | swap_free(swp_entry(swap, offset)); |
167 | 167 | ||
168 | kfree(ext); | 168 | kfree(ext); |
169 | } | 169 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index 09b574e7f4df..dc85ceb90832 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1294,6 +1294,10 @@ static void resched_task(struct task_struct *p) | |||
1294 | static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) | 1294 | static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) |
1295 | { | 1295 | { |
1296 | } | 1296 | } |
1297 | |||
1298 | static void sched_avg_update(struct rq *rq) | ||
1299 | { | ||
1300 | } | ||
1297 | #endif /* CONFIG_SMP */ | 1301 | #endif /* CONFIG_SMP */ |
1298 | 1302 | ||
1299 | #if BITS_PER_LONG == 32 | 1303 | #if BITS_PER_LONG == 32 |
@@ -3182,6 +3186,8 @@ static void update_cpu_load(struct rq *this_rq) | |||
3182 | 3186 | ||
3183 | this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; | 3187 | this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; |
3184 | } | 3188 | } |
3189 | |||
3190 | sched_avg_update(this_rq); | ||
3185 | } | 3191 | } |
3186 | 3192 | ||
3187 | static void update_cpu_load_active(struct rq *this_rq) | 3193 | static void update_cpu_load_active(struct rq *this_rq) |
@@ -3507,9 +3513,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
3507 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); | 3513 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); |
3508 | 3514 | ||
3509 | if (total) { | 3515 | if (total) { |
3510 | u64 temp; | 3516 | u64 temp = rtime; |
3511 | 3517 | ||
3512 | temp = (u64)(rtime * utime); | 3518 | temp *= utime; |
3513 | do_div(temp, total); | 3519 | do_div(temp, total); |
3514 | utime = (cputime_t)temp; | 3520 | utime = (cputime_t)temp; |
3515 | } else | 3521 | } else |
@@ -3540,9 +3546,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
3540 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); | 3546 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); |
3541 | 3547 | ||
3542 | if (total) { | 3548 | if (total) { |
3543 | u64 temp; | 3549 | u64 temp = rtime; |
3544 | 3550 | ||
3545 | temp = (u64)(rtime * cputime.utime); | 3551 | temp *= cputime.utime; |
3546 | do_div(temp, total); | 3552 | do_div(temp, total); |
3547 | utime = (cputime_t)temp; | 3553 | utime = (cputime_t)temp; |
3548 | } else | 3554 | } else |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index ab661ebc4895..db3f674ca49d 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -54,13 +54,13 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling | |||
54 | * Minimal preemption granularity for CPU-bound tasks: | 54 | * Minimal preemption granularity for CPU-bound tasks: |
55 | * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds) | 55 | * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds) |
56 | */ | 56 | */ |
57 | unsigned int sysctl_sched_min_granularity = 2000000ULL; | 57 | unsigned int sysctl_sched_min_granularity = 750000ULL; |
58 | unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL; | 58 | unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; |
59 | 59 | ||
60 | /* | 60 | /* |
61 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity | 61 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity |
62 | */ | 62 | */ |
63 | static unsigned int sched_nr_latency = 3; | 63 | static unsigned int sched_nr_latency = 8; |
64 | 64 | ||
65 | /* | 65 | /* |
66 | * After fork, child runs first. If set to 0 (default) then | 66 | * After fork, child runs first. If set to 0 (default) then |
@@ -1313,7 +1313,7 @@ static struct sched_group * | |||
1313 | find_idlest_group(struct sched_domain *sd, struct task_struct *p, | 1313 | find_idlest_group(struct sched_domain *sd, struct task_struct *p, |
1314 | int this_cpu, int load_idx) | 1314 | int this_cpu, int load_idx) |
1315 | { | 1315 | { |
1316 | struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; | 1316 | struct sched_group *idlest = NULL, *group = sd->groups; |
1317 | unsigned long min_load = ULONG_MAX, this_load = 0; | 1317 | unsigned long min_load = ULONG_MAX, this_load = 0; |
1318 | int imbalance = 100 + (sd->imbalance_pct-100)/2; | 1318 | int imbalance = 100 + (sd->imbalance_pct-100)/2; |
1319 | 1319 | ||
@@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, | |||
1348 | 1348 | ||
1349 | if (local_group) { | 1349 | if (local_group) { |
1350 | this_load = avg_load; | 1350 | this_load = avg_load; |
1351 | this = group; | ||
1352 | } else if (avg_load < min_load) { | 1351 | } else if (avg_load < min_load) { |
1353 | min_load = avg_load; | 1352 | min_load = avg_load; |
1354 | idlest = group; | 1353 | idlest = group; |
@@ -2268,8 +2267,6 @@ unsigned long scale_rt_power(int cpu) | |||
2268 | struct rq *rq = cpu_rq(cpu); | 2267 | struct rq *rq = cpu_rq(cpu); |
2269 | u64 total, available; | 2268 | u64 total, available; |
2270 | 2269 | ||
2271 | sched_avg_update(rq); | ||
2272 | |||
2273 | total = sched_avg_period() + (rq->clock - rq->age_stamp); | 2270 | total = sched_avg_period() + (rq->clock - rq->age_stamp); |
2274 | available = total - rq->rt_avg; | 2271 | available = total - rq->rt_avg; |
2275 | 2272 | ||
@@ -3633,7 +3630,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu) | |||
3633 | if (time_before(now, nohz.next_balance)) | 3630 | if (time_before(now, nohz.next_balance)) |
3634 | return 0; | 3631 | return 0; |
3635 | 3632 | ||
3636 | if (!rq->nr_running) | 3633 | if (rq->idle_at_tick) |
3637 | return 0; | 3634 | return 0; |
3638 | 3635 | ||
3639 | first_pick_cpu = atomic_read(&nohz.first_pick_cpu); | 3636 | first_pick_cpu = atomic_read(&nohz.first_pick_cpu); |
diff --git a/kernel/smp.c b/kernel/smp.c index 75c970c715d3..ed6aacfcb7ef 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -365,9 +365,10 @@ call: | |||
365 | EXPORT_SYMBOL_GPL(smp_call_function_any); | 365 | EXPORT_SYMBOL_GPL(smp_call_function_any); |
366 | 366 | ||
367 | /** | 367 | /** |
368 | * __smp_call_function_single(): Run a function on another CPU | 368 | * __smp_call_function_single(): Run a function on a specific CPU |
369 | * @cpu: The CPU to run on. | 369 | * @cpu: The CPU to run on. |
370 | * @data: Pre-allocated and setup data structure | 370 | * @data: Pre-allocated and setup data structure |
371 | * @wait: If true, wait until function has completed on specified CPU. | ||
371 | * | 372 | * |
372 | * Like smp_call_function_single(), but allow caller to pass in a | 373 | * Like smp_call_function_single(), but allow caller to pass in a |
373 | * pre-allocated data structure. Useful for embedding @data inside | 374 | * pre-allocated data structure. Useful for embedding @data inside |
@@ -376,8 +377,10 @@ EXPORT_SYMBOL_GPL(smp_call_function_any); | |||
376 | void __smp_call_function_single(int cpu, struct call_single_data *data, | 377 | void __smp_call_function_single(int cpu, struct call_single_data *data, |
377 | int wait) | 378 | int wait) |
378 | { | 379 | { |
379 | csd_lock(data); | 380 | unsigned int this_cpu; |
381 | unsigned long flags; | ||
380 | 382 | ||
383 | this_cpu = get_cpu(); | ||
381 | /* | 384 | /* |
382 | * Can deadlock when called with interrupts disabled. | 385 | * Can deadlock when called with interrupts disabled. |
383 | * We allow cpu's that are not yet online though, as no one else can | 386 | * We allow cpu's that are not yet online though, as no one else can |
@@ -387,7 +390,15 @@ void __smp_call_function_single(int cpu, struct call_single_data *data, | |||
387 | WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() | 390 | WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() |
388 | && !oops_in_progress); | 391 | && !oops_in_progress); |
389 | 392 | ||
390 | generic_exec_single(cpu, data, wait); | 393 | if (cpu == this_cpu) { |
394 | local_irq_save(flags); | ||
395 | data->func(data->info); | ||
396 | local_irq_restore(flags); | ||
397 | } else { | ||
398 | csd_lock(data); | ||
399 | generic_exec_single(cpu, data, wait); | ||
400 | } | ||
401 | put_cpu(); | ||
391 | } | 402 | } |
392 | 403 | ||
393 | /** | 404 | /** |
diff --git a/kernel/sys.c b/kernel/sys.c index e9ad44489828..7f5a0cd296a9 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) | |||
931 | pgid = pid; | 931 | pgid = pid; |
932 | if (pgid < 0) | 932 | if (pgid < 0) |
933 | return -EINVAL; | 933 | return -EINVAL; |
934 | rcu_read_lock(); | ||
934 | 935 | ||
935 | /* From this point forward we keep holding onto the tasklist lock | 936 | /* From this point forward we keep holding onto the tasklist lock |
936 | * so that our parent does not change from under us. -DaveM | 937 | * so that our parent does not change from under us. -DaveM |
@@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) | |||
984 | out: | 985 | out: |
985 | /* All paths lead to here, thus we are safe. -DaveM */ | 986 | /* All paths lead to here, thus we are safe. -DaveM */ |
986 | write_unlock_irq(&tasklist_lock); | 987 | write_unlock_irq(&tasklist_lock); |
988 | rcu_read_unlock(); | ||
987 | return err; | 989 | return err; |
988 | } | 990 | } |
989 | 991 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ca38e8e3e907..f88552c6d227 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -1713,10 +1713,7 @@ static __init int sysctl_init(void) | |||
1713 | { | 1713 | { |
1714 | sysctl_set_parent(NULL, root_table); | 1714 | sysctl_set_parent(NULL, root_table); |
1715 | #ifdef CONFIG_SYSCTL_SYSCALL_CHECK | 1715 | #ifdef CONFIG_SYSCTL_SYSCALL_CHECK |
1716 | { | 1716 | sysctl_check_table(current->nsproxy, root_table); |
1717 | int err; | ||
1718 | err = sysctl_check_table(current->nsproxy, root_table); | ||
1719 | } | ||
1720 | #endif | 1717 | #endif |
1721 | return 0; | 1718 | return 0; |
1722 | } | 1719 | } |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0d88ce9b9fb8..fa7ece649fe1 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -381,12 +381,19 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
381 | { | 381 | { |
382 | struct ftrace_profile *rec = v; | 382 | struct ftrace_profile *rec = v; |
383 | char str[KSYM_SYMBOL_LEN]; | 383 | char str[KSYM_SYMBOL_LEN]; |
384 | int ret = 0; | ||
384 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 385 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
385 | static DEFINE_MUTEX(mutex); | ||
386 | static struct trace_seq s; | 386 | static struct trace_seq s; |
387 | unsigned long long avg; | 387 | unsigned long long avg; |
388 | unsigned long long stddev; | 388 | unsigned long long stddev; |
389 | #endif | 389 | #endif |
390 | mutex_lock(&ftrace_profile_lock); | ||
391 | |||
392 | /* we raced with function_profile_reset() */ | ||
393 | if (unlikely(rec->counter == 0)) { | ||
394 | ret = -EBUSY; | ||
395 | goto out; | ||
396 | } | ||
390 | 397 | ||
391 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 398 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
392 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); | 399 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); |
@@ -408,7 +415,6 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
408 | do_div(stddev, (rec->counter - 1) * 1000); | 415 | do_div(stddev, (rec->counter - 1) * 1000); |
409 | } | 416 | } |
410 | 417 | ||
411 | mutex_lock(&mutex); | ||
412 | trace_seq_init(&s); | 418 | trace_seq_init(&s); |
413 | trace_print_graph_duration(rec->time, &s); | 419 | trace_print_graph_duration(rec->time, &s); |
414 | trace_seq_puts(&s, " "); | 420 | trace_seq_puts(&s, " "); |
@@ -416,11 +422,12 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
416 | trace_seq_puts(&s, " "); | 422 | trace_seq_puts(&s, " "); |
417 | trace_print_graph_duration(stddev, &s); | 423 | trace_print_graph_duration(stddev, &s); |
418 | trace_print_seq(m, &s); | 424 | trace_print_seq(m, &s); |
419 | mutex_unlock(&mutex); | ||
420 | #endif | 425 | #endif |
421 | seq_putc(m, '\n'); | 426 | seq_putc(m, '\n'); |
427 | out: | ||
428 | mutex_unlock(&ftrace_profile_lock); | ||
422 | 429 | ||
423 | return 0; | 430 | return ret; |
424 | } | 431 | } |
425 | 432 | ||
426 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) | 433 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) |
@@ -1503,6 +1510,8 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
1503 | if (*pos > 0) | 1510 | if (*pos > 0) |
1504 | return t_hash_start(m, pos); | 1511 | return t_hash_start(m, pos); |
1505 | iter->flags |= FTRACE_ITER_PRINTALL; | 1512 | iter->flags |= FTRACE_ITER_PRINTALL; |
1513 | /* reset in case of seek/pread */ | ||
1514 | iter->flags &= ~FTRACE_ITER_HASH; | ||
1506 | return iter; | 1515 | return iter; |
1507 | } | 1516 | } |
1508 | 1517 | ||
@@ -2409,7 +2418,7 @@ static const struct file_operations ftrace_filter_fops = { | |||
2409 | .open = ftrace_filter_open, | 2418 | .open = ftrace_filter_open, |
2410 | .read = seq_read, | 2419 | .read = seq_read, |
2411 | .write = ftrace_filter_write, | 2420 | .write = ftrace_filter_write, |
2412 | .llseek = ftrace_regex_lseek, | 2421 | .llseek = no_llseek, |
2413 | .release = ftrace_filter_release, | 2422 | .release = ftrace_filter_release, |
2414 | }; | 2423 | }; |
2415 | 2424 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 19cccc3c3028..492197e2f86c 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -2985,13 +2985,11 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) | |||
2985 | 2985 | ||
2986 | static void rb_advance_iter(struct ring_buffer_iter *iter) | 2986 | static void rb_advance_iter(struct ring_buffer_iter *iter) |
2987 | { | 2987 | { |
2988 | struct ring_buffer *buffer; | ||
2989 | struct ring_buffer_per_cpu *cpu_buffer; | 2988 | struct ring_buffer_per_cpu *cpu_buffer; |
2990 | struct ring_buffer_event *event; | 2989 | struct ring_buffer_event *event; |
2991 | unsigned length; | 2990 | unsigned length; |
2992 | 2991 | ||
2993 | cpu_buffer = iter->cpu_buffer; | 2992 | cpu_buffer = iter->cpu_buffer; |
2994 | buffer = cpu_buffer->buffer; | ||
2995 | 2993 | ||
2996 | /* | 2994 | /* |
2997 | * Check if we are at the end of the buffer. | 2995 | * Check if we are at the end of the buffer. |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 000e6e85b445..31cc4cb0dbf2 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -91,6 +91,8 @@ int perf_trace_init(struct perf_event *p_event) | |||
91 | tp_event->class && tp_event->class->reg && | 91 | tp_event->class && tp_event->class->reg && |
92 | try_module_get(tp_event->mod)) { | 92 | try_module_get(tp_event->mod)) { |
93 | ret = perf_trace_event_init(tp_event, p_event); | 93 | ret = perf_trace_event_init(tp_event, p_event); |
94 | if (ret) | ||
95 | module_put(tp_event->mod); | ||
94 | break; | 96 | break; |
95 | } | 97 | } |
96 | } | 98 | } |
@@ -146,6 +148,7 @@ void perf_trace_destroy(struct perf_event *p_event) | |||
146 | } | 148 | } |
147 | } | 149 | } |
148 | out: | 150 | out: |
151 | module_put(tp_event->mod); | ||
149 | mutex_unlock(&event_mutex); | 152 | mutex_unlock(&event_mutex); |
150 | } | 153 | } |
151 | 154 | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 8b27c9849b42..544301d29dee 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -514,8 +514,8 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); | |||
514 | static int kretprobe_dispatcher(struct kretprobe_instance *ri, | 514 | static int kretprobe_dispatcher(struct kretprobe_instance *ri, |
515 | struct pt_regs *regs); | 515 | struct pt_regs *regs); |
516 | 516 | ||
517 | /* Check the name is good for event/group */ | 517 | /* Check the name is good for event/group/fields */ |
518 | static int check_event_name(const char *name) | 518 | static int is_good_name(const char *name) |
519 | { | 519 | { |
520 | if (!isalpha(*name) && *name != '_') | 520 | if (!isalpha(*name) && *name != '_') |
521 | return 0; | 521 | return 0; |
@@ -557,7 +557,7 @@ static struct trace_probe *alloc_trace_probe(const char *group, | |||
557 | else | 557 | else |
558 | tp->rp.kp.pre_handler = kprobe_dispatcher; | 558 | tp->rp.kp.pre_handler = kprobe_dispatcher; |
559 | 559 | ||
560 | if (!event || !check_event_name(event)) { | 560 | if (!event || !is_good_name(event)) { |
561 | ret = -EINVAL; | 561 | ret = -EINVAL; |
562 | goto error; | 562 | goto error; |
563 | } | 563 | } |
@@ -567,7 +567,7 @@ static struct trace_probe *alloc_trace_probe(const char *group, | |||
567 | if (!tp->call.name) | 567 | if (!tp->call.name) |
568 | goto error; | 568 | goto error; |
569 | 569 | ||
570 | if (!group || !check_event_name(group)) { | 570 | if (!group || !is_good_name(group)) { |
571 | ret = -EINVAL; | 571 | ret = -EINVAL; |
572 | goto error; | 572 | goto error; |
573 | } | 573 | } |
@@ -883,7 +883,7 @@ static int create_trace_probe(int argc, char **argv) | |||
883 | int i, ret = 0; | 883 | int i, ret = 0; |
884 | int is_return = 0, is_delete = 0; | 884 | int is_return = 0, is_delete = 0; |
885 | char *symbol = NULL, *event = NULL, *group = NULL; | 885 | char *symbol = NULL, *event = NULL, *group = NULL; |
886 | char *arg, *tmp; | 886 | char *arg; |
887 | unsigned long offset = 0; | 887 | unsigned long offset = 0; |
888 | void *addr = NULL; | 888 | void *addr = NULL; |
889 | char buf[MAX_EVENT_NAME_LEN]; | 889 | char buf[MAX_EVENT_NAME_LEN]; |
@@ -992,26 +992,36 @@ static int create_trace_probe(int argc, char **argv) | |||
992 | /* parse arguments */ | 992 | /* parse arguments */ |
993 | ret = 0; | 993 | ret = 0; |
994 | for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { | 994 | for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { |
995 | /* Increment count for freeing args in error case */ | ||
996 | tp->nr_args++; | ||
997 | |||
995 | /* Parse argument name */ | 998 | /* Parse argument name */ |
996 | arg = strchr(argv[i], '='); | 999 | arg = strchr(argv[i], '='); |
997 | if (arg) | 1000 | if (arg) { |
998 | *arg++ = '\0'; | 1001 | *arg++ = '\0'; |
999 | else | 1002 | tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); |
1003 | } else { | ||
1000 | arg = argv[i]; | 1004 | arg = argv[i]; |
1005 | /* If argument name is omitted, set "argN" */ | ||
1006 | snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); | ||
1007 | tp->args[i].name = kstrdup(buf, GFP_KERNEL); | ||
1008 | } | ||
1001 | 1009 | ||
1002 | tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); | ||
1003 | if (!tp->args[i].name) { | 1010 | if (!tp->args[i].name) { |
1004 | pr_info("Failed to allocate argument%d name '%s'.\n", | 1011 | pr_info("Failed to allocate argument[%d] name.\n", i); |
1005 | i, argv[i]); | ||
1006 | ret = -ENOMEM; | 1012 | ret = -ENOMEM; |
1007 | goto error; | 1013 | goto error; |
1008 | } | 1014 | } |
1009 | tmp = strchr(tp->args[i].name, ':'); | 1015 | |
1010 | if (tmp) | 1016 | if (!is_good_name(tp->args[i].name)) { |
1011 | *tmp = '_'; /* convert : to _ */ | 1017 | pr_info("Invalid argument[%d] name: %s\n", |
1018 | i, tp->args[i].name); | ||
1019 | ret = -EINVAL; | ||
1020 | goto error; | ||
1021 | } | ||
1012 | 1022 | ||
1013 | if (conflict_field_name(tp->args[i].name, tp->args, i)) { | 1023 | if (conflict_field_name(tp->args[i].name, tp->args, i)) { |
1014 | pr_info("Argument%d name '%s' conflicts with " | 1024 | pr_info("Argument[%d] name '%s' conflicts with " |
1015 | "another field.\n", i, argv[i]); | 1025 | "another field.\n", i, argv[i]); |
1016 | ret = -EINVAL; | 1026 | ret = -EINVAL; |
1017 | goto error; | 1027 | goto error; |
@@ -1020,12 +1030,9 @@ static int create_trace_probe(int argc, char **argv) | |||
1020 | /* Parse fetch argument */ | 1030 | /* Parse fetch argument */ |
1021 | ret = parse_probe_arg(arg, tp, &tp->args[i], is_return); | 1031 | ret = parse_probe_arg(arg, tp, &tp->args[i], is_return); |
1022 | if (ret) { | 1032 | if (ret) { |
1023 | pr_info("Parse error at argument%d. (%d)\n", i, ret); | 1033 | pr_info("Parse error at argument[%d]. (%d)\n", i, ret); |
1024 | kfree(tp->args[i].name); | ||
1025 | goto error; | 1034 | goto error; |
1026 | } | 1035 | } |
1027 | |||
1028 | tp->nr_args++; | ||
1029 | } | 1036 | } |
1030 | 1037 | ||
1031 | ret = register_trace_probe(tp); | 1038 | ret = register_trace_probe(tp); |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 0d53c8e853b1..7f9c3c52ecc1 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -122,7 +122,7 @@ static void __touch_watchdog(void) | |||
122 | 122 | ||
123 | void touch_softlockup_watchdog(void) | 123 | void touch_softlockup_watchdog(void) |
124 | { | 124 | { |
125 | __get_cpu_var(watchdog_touch_ts) = 0; | 125 | __raw_get_cpu_var(watchdog_touch_ts) = 0; |
126 | } | 126 | } |
127 | EXPORT_SYMBOL(touch_softlockup_watchdog); | 127 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
128 | 128 | ||
@@ -142,7 +142,14 @@ void touch_all_softlockup_watchdogs(void) | |||
142 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 142 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
143 | void touch_nmi_watchdog(void) | 143 | void touch_nmi_watchdog(void) |
144 | { | 144 | { |
145 | __get_cpu_var(watchdog_nmi_touch) = true; | 145 | if (watchdog_enabled) { |
146 | unsigned cpu; | ||
147 | |||
148 | for_each_present_cpu(cpu) { | ||
149 | if (per_cpu(watchdog_nmi_touch, cpu) != true) | ||
150 | per_cpu(watchdog_nmi_touch, cpu) = true; | ||
151 | } | ||
152 | } | ||
146 | touch_softlockup_watchdog(); | 153 | touch_softlockup_watchdog(); |
147 | } | 154 | } |
148 | EXPORT_SYMBOL(touch_nmi_watchdog); | 155 | EXPORT_SYMBOL(touch_nmi_watchdog); |
@@ -433,6 +440,9 @@ static int watchdog_enable(int cpu) | |||
433 | wake_up_process(p); | 440 | wake_up_process(p); |
434 | } | 441 | } |
435 | 442 | ||
443 | /* if any cpu succeeds, watchdog is considered enabled for the system */ | ||
444 | watchdog_enabled = 1; | ||
445 | |||
436 | return 0; | 446 | return 0; |
437 | } | 447 | } |
438 | 448 | ||
@@ -455,9 +465,6 @@ static void watchdog_disable(int cpu) | |||
455 | per_cpu(softlockup_watchdog, cpu) = NULL; | 465 | per_cpu(softlockup_watchdog, cpu) = NULL; |
456 | kthread_stop(p); | 466 | kthread_stop(p); |
457 | } | 467 | } |
458 | |||
459 | /* if any cpu succeeds, watchdog is considered enabled for the system */ | ||
460 | watchdog_enabled = 1; | ||
461 | } | 468 | } |
462 | 469 | ||
463 | static void watchdog_enable_all_cpus(void) | 470 | static void watchdog_enable_all_cpus(void) |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8bd600c020e5..f77afd939229 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -1,19 +1,26 @@ | |||
1 | /* | 1 | /* |
2 | * linux/kernel/workqueue.c | 2 | * kernel/workqueue.c - generic async execution with shared worker pool |
3 | * | 3 | * |
4 | * Generic mechanism for defining kernel helper threads for running | 4 | * Copyright (C) 2002 Ingo Molnar |
5 | * arbitrary tasks in process context. | ||
6 | * | 5 | * |
7 | * Started by Ingo Molnar, Copyright (C) 2002 | 6 | * Derived from the taskqueue/keventd code by: |
7 | * David Woodhouse <dwmw2@infradead.org> | ||
8 | * Andrew Morton | ||
9 | * Kai Petzke <wpp@marie.physik.tu-berlin.de> | ||
10 | * Theodore Ts'o <tytso@mit.edu> | ||
8 | * | 11 | * |
9 | * Derived from the taskqueue/keventd code by: | 12 | * Made to use alloc_percpu by Christoph Lameter. |
10 | * | 13 | * |
11 | * David Woodhouse <dwmw2@infradead.org> | 14 | * Copyright (C) 2010 SUSE Linux Products GmbH |
12 | * Andrew Morton | 15 | * Copyright (C) 2010 Tejun Heo <tj@kernel.org> |
13 | * Kai Petzke <wpp@marie.physik.tu-berlin.de> | ||
14 | * Theodore Ts'o <tytso@mit.edu> | ||
15 | * | 16 | * |
16 | * Made to use alloc_percpu by Christoph Lameter. | 17 | * This is the generic async execution mechanism. Work items as are |
18 | * executed in process context. The worker pool is shared and | ||
19 | * automatically managed. There is one worker pool for each CPU and | ||
20 | * one extra for works which are better served by workers which are | ||
21 | * not bound to any specific CPU. | ||
22 | * | ||
23 | * Please read Documentation/workqueue.txt for details. | ||
17 | */ | 24 | */ |
18 | 25 | ||
19 | #include <linux/module.h> | 26 | #include <linux/module.h> |
@@ -90,7 +97,8 @@ enum { | |||
90 | /* | 97 | /* |
91 | * Structure fields follow one of the following exclusion rules. | 98 | * Structure fields follow one of the following exclusion rules. |
92 | * | 99 | * |
93 | * I: Set during initialization and read-only afterwards. | 100 | * I: Modifiable by initialization/destruction paths and read-only for |
101 | * everyone else. | ||
94 | * | 102 | * |
95 | * P: Preemption protected. Disabling preemption is enough and should | 103 | * P: Preemption protected. Disabling preemption is enough and should |
96 | * only be modified and accessed from the local cpu. | 104 | * only be modified and accessed from the local cpu. |
@@ -198,7 +206,7 @@ typedef cpumask_var_t mayday_mask_t; | |||
198 | cpumask_test_and_set_cpu((cpu), (mask)) | 206 | cpumask_test_and_set_cpu((cpu), (mask)) |
199 | #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) | 207 | #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) |
200 | #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) | 208 | #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) |
201 | #define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp)) | 209 | #define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp)) |
202 | #define free_mayday_mask(mask) free_cpumask_var((mask)) | 210 | #define free_mayday_mask(mask) free_cpumask_var((mask)) |
203 | #else | 211 | #else |
204 | typedef unsigned long mayday_mask_t; | 212 | typedef unsigned long mayday_mask_t; |
@@ -943,10 +951,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
943 | struct global_cwq *gcwq; | 951 | struct global_cwq *gcwq; |
944 | struct cpu_workqueue_struct *cwq; | 952 | struct cpu_workqueue_struct *cwq; |
945 | struct list_head *worklist; | 953 | struct list_head *worklist; |
954 | unsigned int work_flags; | ||
946 | unsigned long flags; | 955 | unsigned long flags; |
947 | 956 | ||
948 | debug_work_activate(work); | 957 | debug_work_activate(work); |
949 | 958 | ||
959 | if (WARN_ON_ONCE(wq->flags & WQ_DYING)) | ||
960 | return; | ||
961 | |||
950 | /* determine gcwq to use */ | 962 | /* determine gcwq to use */ |
951 | if (!(wq->flags & WQ_UNBOUND)) { | 963 | if (!(wq->flags & WQ_UNBOUND)) { |
952 | struct global_cwq *last_gcwq; | 964 | struct global_cwq *last_gcwq; |
@@ -989,14 +1001,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
989 | BUG_ON(!list_empty(&work->entry)); | 1001 | BUG_ON(!list_empty(&work->entry)); |
990 | 1002 | ||
991 | cwq->nr_in_flight[cwq->work_color]++; | 1003 | cwq->nr_in_flight[cwq->work_color]++; |
1004 | work_flags = work_color_to_flags(cwq->work_color); | ||
992 | 1005 | ||
993 | if (likely(cwq->nr_active < cwq->max_active)) { | 1006 | if (likely(cwq->nr_active < cwq->max_active)) { |
994 | cwq->nr_active++; | 1007 | cwq->nr_active++; |
995 | worklist = gcwq_determine_ins_pos(gcwq, cwq); | 1008 | worklist = gcwq_determine_ins_pos(gcwq, cwq); |
996 | } else | 1009 | } else { |
1010 | work_flags |= WORK_STRUCT_DELAYED; | ||
997 | worklist = &cwq->delayed_works; | 1011 | worklist = &cwq->delayed_works; |
1012 | } | ||
998 | 1013 | ||
999 | insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); | 1014 | insert_work(cwq, work, worklist, work_flags); |
1000 | 1015 | ||
1001 | spin_unlock_irqrestore(&gcwq->lock, flags); | 1016 | spin_unlock_irqrestore(&gcwq->lock, flags); |
1002 | } | 1017 | } |
@@ -1215,6 +1230,7 @@ static void worker_leave_idle(struct worker *worker) | |||
1215 | * bound), %false if offline. | 1230 | * bound), %false if offline. |
1216 | */ | 1231 | */ |
1217 | static bool worker_maybe_bind_and_lock(struct worker *worker) | 1232 | static bool worker_maybe_bind_and_lock(struct worker *worker) |
1233 | __acquires(&gcwq->lock) | ||
1218 | { | 1234 | { |
1219 | struct global_cwq *gcwq = worker->gcwq; | 1235 | struct global_cwq *gcwq = worker->gcwq; |
1220 | struct task_struct *task = worker->task; | 1236 | struct task_struct *task = worker->task; |
@@ -1488,6 +1504,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq) | |||
1488 | * otherwise. | 1504 | * otherwise. |
1489 | */ | 1505 | */ |
1490 | static bool maybe_create_worker(struct global_cwq *gcwq) | 1506 | static bool maybe_create_worker(struct global_cwq *gcwq) |
1507 | __releases(&gcwq->lock) | ||
1508 | __acquires(&gcwq->lock) | ||
1491 | { | 1509 | { |
1492 | if (!need_to_create_worker(gcwq)) | 1510 | if (!need_to_create_worker(gcwq)) |
1493 | return false; | 1511 | return false; |
@@ -1662,6 +1680,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
1662 | struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); | 1680 | struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); |
1663 | 1681 | ||
1664 | move_linked_works(work, pos, NULL); | 1682 | move_linked_works(work, pos, NULL); |
1683 | __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); | ||
1665 | cwq->nr_active++; | 1684 | cwq->nr_active++; |
1666 | } | 1685 | } |
1667 | 1686 | ||
@@ -1669,6 +1688,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
1669 | * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight | 1688 | * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight |
1670 | * @cwq: cwq of interest | 1689 | * @cwq: cwq of interest |
1671 | * @color: color of work which left the queue | 1690 | * @color: color of work which left the queue |
1691 | * @delayed: for a delayed work | ||
1672 | * | 1692 | * |
1673 | * A work either has completed or is removed from pending queue, | 1693 | * A work either has completed or is removed from pending queue, |
1674 | * decrement nr_in_flight of its cwq and handle workqueue flushing. | 1694 | * decrement nr_in_flight of its cwq and handle workqueue flushing. |
@@ -1676,19 +1696,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
1676 | * CONTEXT: | 1696 | * CONTEXT: |
1677 | * spin_lock_irq(gcwq->lock). | 1697 | * spin_lock_irq(gcwq->lock). |
1678 | */ | 1698 | */ |
1679 | static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) | 1699 | static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color, |
1700 | bool delayed) | ||
1680 | { | 1701 | { |
1681 | /* ignore uncolored works */ | 1702 | /* ignore uncolored works */ |
1682 | if (color == WORK_NO_COLOR) | 1703 | if (color == WORK_NO_COLOR) |
1683 | return; | 1704 | return; |
1684 | 1705 | ||
1685 | cwq->nr_in_flight[color]--; | 1706 | cwq->nr_in_flight[color]--; |
1686 | cwq->nr_active--; | ||
1687 | 1707 | ||
1688 | if (!list_empty(&cwq->delayed_works)) { | 1708 | if (!delayed) { |
1689 | /* one down, submit a delayed one */ | 1709 | cwq->nr_active--; |
1690 | if (cwq->nr_active < cwq->max_active) | 1710 | if (!list_empty(&cwq->delayed_works)) { |
1691 | cwq_activate_first_delayed(cwq); | 1711 | /* one down, submit a delayed one */ |
1712 | if (cwq->nr_active < cwq->max_active) | ||
1713 | cwq_activate_first_delayed(cwq); | ||
1714 | } | ||
1692 | } | 1715 | } |
1693 | 1716 | ||
1694 | /* is flush in progress and are we at the flushing tip? */ | 1717 | /* is flush in progress and are we at the flushing tip? */ |
@@ -1725,6 +1748,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) | |||
1725 | * spin_lock_irq(gcwq->lock) which is released and regrabbed. | 1748 | * spin_lock_irq(gcwq->lock) which is released and regrabbed. |
1726 | */ | 1749 | */ |
1727 | static void process_one_work(struct worker *worker, struct work_struct *work) | 1750 | static void process_one_work(struct worker *worker, struct work_struct *work) |
1751 | __releases(&gcwq->lock) | ||
1752 | __acquires(&gcwq->lock) | ||
1728 | { | 1753 | { |
1729 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); | 1754 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); |
1730 | struct global_cwq *gcwq = cwq->gcwq; | 1755 | struct global_cwq *gcwq = cwq->gcwq; |
@@ -1823,7 +1848,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) | |||
1823 | hlist_del_init(&worker->hentry); | 1848 | hlist_del_init(&worker->hentry); |
1824 | worker->current_work = NULL; | 1849 | worker->current_work = NULL; |
1825 | worker->current_cwq = NULL; | 1850 | worker->current_cwq = NULL; |
1826 | cwq_dec_nr_in_flight(cwq, work_color); | 1851 | cwq_dec_nr_in_flight(cwq, work_color, false); |
1827 | } | 1852 | } |
1828 | 1853 | ||
1829 | /** | 1854 | /** |
@@ -2388,7 +2413,8 @@ static int try_to_grab_pending(struct work_struct *work) | |||
2388 | debug_work_deactivate(work); | 2413 | debug_work_deactivate(work); |
2389 | list_del_init(&work->entry); | 2414 | list_del_init(&work->entry); |
2390 | cwq_dec_nr_in_flight(get_work_cwq(work), | 2415 | cwq_dec_nr_in_flight(get_work_cwq(work), |
2391 | get_work_color(work)); | 2416 | get_work_color(work), |
2417 | *work_data_bits(work) & WORK_STRUCT_DELAYED); | ||
2392 | ret = 1; | 2418 | ret = 1; |
2393 | } | 2419 | } |
2394 | } | 2420 | } |
@@ -2791,7 +2817,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, | |||
2791 | if (IS_ERR(rescuer->task)) | 2817 | if (IS_ERR(rescuer->task)) |
2792 | goto err; | 2818 | goto err; |
2793 | 2819 | ||
2794 | wq->rescuer = rescuer; | ||
2795 | rescuer->task->flags |= PF_THREAD_BOUND; | 2820 | rescuer->task->flags |= PF_THREAD_BOUND; |
2796 | wake_up_process(rescuer->task); | 2821 | wake_up_process(rescuer->task); |
2797 | } | 2822 | } |
@@ -2833,6 +2858,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
2833 | { | 2858 | { |
2834 | unsigned int cpu; | 2859 | unsigned int cpu; |
2835 | 2860 | ||
2861 | wq->flags |= WQ_DYING; | ||
2836 | flush_workqueue(wq); | 2862 | flush_workqueue(wq); |
2837 | 2863 | ||
2838 | /* | 2864 | /* |
@@ -2857,6 +2883,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
2857 | if (wq->flags & WQ_RESCUER) { | 2883 | if (wq->flags & WQ_RESCUER) { |
2858 | kthread_stop(wq->rescuer->task); | 2884 | kthread_stop(wq->rescuer->task); |
2859 | free_mayday_mask(wq->mayday_mask); | 2885 | free_mayday_mask(wq->mayday_mask); |
2886 | kfree(wq->rescuer); | ||
2860 | } | 2887 | } |
2861 | 2888 | ||
2862 | free_cwqs(wq); | 2889 | free_cwqs(wq); |
@@ -3239,6 +3266,8 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3239 | * multiple times. To be used by cpu_callback. | 3266 | * multiple times. To be used by cpu_callback. |
3240 | */ | 3267 | */ |
3241 | static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) | 3268 | static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) |
3269 | __releases(&gcwq->lock) | ||
3270 | __acquires(&gcwq->lock) | ||
3242 | { | 3271 | { |
3243 | if (!(gcwq->trustee_state == state || | 3272 | if (!(gcwq->trustee_state == state || |
3244 | gcwq->trustee_state == TRUSTEE_DONE)) { | 3273 | gcwq->trustee_state == TRUSTEE_DONE)) { |
@@ -3545,8 +3574,7 @@ static int __init init_workqueues(void) | |||
3545 | spin_lock_init(&gcwq->lock); | 3574 | spin_lock_init(&gcwq->lock); |
3546 | INIT_LIST_HEAD(&gcwq->worklist); | 3575 | INIT_LIST_HEAD(&gcwq->worklist); |
3547 | gcwq->cpu = cpu; | 3576 | gcwq->cpu = cpu; |
3548 | if (cpu == WORK_CPU_UNBOUND) | 3577 | gcwq->flags |= GCWQ_DISASSOCIATED; |
3549 | gcwq->flags |= GCWQ_DISASSOCIATED; | ||
3550 | 3578 | ||
3551 | INIT_LIST_HEAD(&gcwq->idle_list); | 3579 | INIT_LIST_HEAD(&gcwq->idle_list); |
3552 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) | 3580 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) |
@@ -3570,6 +3598,8 @@ static int __init init_workqueues(void) | |||
3570 | struct global_cwq *gcwq = get_gcwq(cpu); | 3598 | struct global_cwq *gcwq = get_gcwq(cpu); |
3571 | struct worker *worker; | 3599 | struct worker *worker; |
3572 | 3600 | ||
3601 | if (cpu != WORK_CPU_UNBOUND) | ||
3602 | gcwq->flags &= ~GCWQ_DISASSOCIATED; | ||
3573 | worker = create_worker(gcwq, true); | 3603 | worker = create_worker(gcwq, true); |
3574 | BUG_ON(!worker); | 3604 | BUG_ON(!worker); |
3575 | spin_lock_irq(&gcwq->lock); | 3605 | spin_lock_irq(&gcwq->lock); |
@@ -72,8 +72,8 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr) | |||
72 | return NULL; | 72 | return NULL; |
73 | } | 73 | } |
74 | 74 | ||
75 | int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | 75 | void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, |
76 | struct module *mod) | 76 | struct module *mod) |
77 | { | 77 | { |
78 | char *secstrings; | 78 | char *secstrings; |
79 | unsigned int i; | 79 | unsigned int i; |
@@ -97,8 +97,6 @@ int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | |||
97 | * could potentially lead to deadlock and thus be counter-productive. | 97 | * could potentially lead to deadlock and thus be counter-productive. |
98 | */ | 98 | */ |
99 | list_add(&mod->bug_list, &module_bug_list); | 99 | list_add(&mod->bug_list, &module_bug_list); |
100 | |||
101 | return 0; | ||
102 | } | 100 | } |
103 | 101 | ||
104 | void module_bug_cleanup(struct module *mod) | 102 | void module_bug_cleanup(struct module *mod) |
diff --git a/lib/list_sort.c b/lib/list_sort.c index 4b5cb794c38b..a7616fa3162e 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c | |||
@@ -70,7 +70,7 @@ static void merge_and_restore_back_links(void *priv, | |||
70 | * element comparison is needed, so the client's cmp() | 70 | * element comparison is needed, so the client's cmp() |
71 | * routine can invoke cond_resched() periodically. | 71 | * routine can invoke cond_resched() periodically. |
72 | */ | 72 | */ |
73 | (*cmp)(priv, tail, tail); | 73 | (*cmp)(priv, tail->next, tail->next); |
74 | 74 | ||
75 | tail->next->prev = tail; | 75 | tail->next->prev = tail; |
76 | tail = tail->next; | 76 | tail = tail->next; |
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore new file mode 100644 index 000000000000..162becacf97c --- /dev/null +++ b/lib/raid6/.gitignore | |||
@@ -0,0 +1,4 @@ | |||
1 | mktables | ||
2 | altivec*.c | ||
3 | int*.c | ||
4 | tables.c | ||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index a5ec42868f99..4ceb05d772ae 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -248,8 +248,18 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, | |||
248 | left -= sg_size; | 248 | left -= sg_size; |
249 | 249 | ||
250 | sg = alloc_fn(alloc_size, gfp_mask); | 250 | sg = alloc_fn(alloc_size, gfp_mask); |
251 | if (unlikely(!sg)) | 251 | if (unlikely(!sg)) { |
252 | return -ENOMEM; | 252 | /* |
253 | * Adjust entry count to reflect that the last | ||
254 | * entry of the previous table won't be used for | ||
255 | * linkage. Without this, sg_kfree() may get | ||
256 | * confused. | ||
257 | */ | ||
258 | if (prv) | ||
259 | table->nents = ++table->orig_nents; | ||
260 | |||
261 | return -ENOMEM; | ||
262 | } | ||
253 | 263 | ||
254 | sg_init_table(sg, alloc_size); | 264 | sg_init_table(sg, alloc_size); |
255 | table->nents = table->orig_nents += sg_size; | 265 | table->nents = table->orig_nents += sg_size; |
diff --git a/mm/Kconfig b/mm/Kconfig index f4e516e9c37c..f0fb9124e410 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -189,7 +189,7 @@ config COMPACTION | |||
189 | config MIGRATION | 189 | config MIGRATION |
190 | bool "Page migration" | 190 | bool "Page migration" |
191 | def_bool y | 191 | def_bool y |
192 | depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE | 192 | depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION |
193 | help | 193 | help |
194 | Allows the migration of the physical location of pages of processes | 194 | Allows the migration of the physical location of pages of processes |
195 | while the virtual addresses are not changed. This is useful in | 195 | while the virtual addresses are not changed. This is useful in |
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index eaa4a5bbe063..65d420499a61 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -30,6 +30,7 @@ EXPORT_SYMBOL_GPL(default_backing_dev_info); | |||
30 | 30 | ||
31 | struct backing_dev_info noop_backing_dev_info = { | 31 | struct backing_dev_info noop_backing_dev_info = { |
32 | .name = "noop", | 32 | .name = "noop", |
33 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, | ||
33 | }; | 34 | }; |
34 | EXPORT_SYMBOL_GPL(noop_backing_dev_info); | 35 | EXPORT_SYMBOL_GPL(noop_backing_dev_info); |
35 | 36 | ||
@@ -243,6 +244,7 @@ static int __init default_bdi_init(void) | |||
243 | err = bdi_init(&default_backing_dev_info); | 244 | err = bdi_init(&default_backing_dev_info); |
244 | if (!err) | 245 | if (!err) |
245 | bdi_register(&default_backing_dev_info, NULL, "default"); | 246 | bdi_register(&default_backing_dev_info, NULL, "default"); |
247 | err = bdi_init(&noop_backing_dev_info); | ||
246 | 248 | ||
247 | return err; | 249 | return err; |
248 | } | 250 | } |
@@ -445,8 +447,8 @@ static int bdi_forker_thread(void *ptr) | |||
445 | switch (action) { | 447 | switch (action) { |
446 | case FORK_THREAD: | 448 | case FORK_THREAD: |
447 | __set_current_state(TASK_RUNNING); | 449 | __set_current_state(TASK_RUNNING); |
448 | task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s", | 450 | task = kthread_create(bdi_writeback_thread, &bdi->wb, |
449 | dev_name(bdi->dev)); | 451 | "flush-%s", dev_name(bdi->dev)); |
450 | if (IS_ERR(task)) { | 452 | if (IS_ERR(task)) { |
451 | /* | 453 | /* |
452 | * If thread creation fails, force writeout of | 454 | * If thread creation fails, force writeout of |
@@ -457,10 +459,13 @@ static int bdi_forker_thread(void *ptr) | |||
457 | /* | 459 | /* |
458 | * The spinlock makes sure we do not lose | 460 | * The spinlock makes sure we do not lose |
459 | * wake-ups when racing with 'bdi_queue_work()'. | 461 | * wake-ups when racing with 'bdi_queue_work()'. |
462 | * And as soon as the bdi thread is visible, we | ||
463 | * can start it. | ||
460 | */ | 464 | */ |
461 | spin_lock_bh(&bdi->wb_lock); | 465 | spin_lock_bh(&bdi->wb_lock); |
462 | bdi->wb.task = task; | 466 | bdi->wb.task = task; |
463 | spin_unlock_bh(&bdi->wb_lock); | 467 | spin_unlock_bh(&bdi->wb_lock); |
468 | wake_up_process(task); | ||
464 | } | 469 | } |
465 | break; | 470 | break; |
466 | 471 | ||
diff --git a/mm/bounce.c b/mm/bounce.c index 13b6dad1eed2..1481de68184b 100644 --- a/mm/bounce.c +++ b/mm/bounce.c | |||
@@ -116,8 +116,8 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from) | |||
116 | */ | 116 | */ |
117 | vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; | 117 | vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; |
118 | 118 | ||
119 | flush_dcache_page(tovec->bv_page); | ||
120 | bounce_copy_vec(tovec, vfrom); | 119 | bounce_copy_vec(tovec, vfrom); |
120 | flush_dcache_page(tovec->bv_page); | ||
121 | } | 121 | } |
122 | } | 122 | } |
123 | 123 | ||
diff --git a/mm/compaction.c b/mm/compaction.c index 94cce51b0b35..4d709ee59013 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -214,15 +214,16 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc) | |||
214 | /* Similar to reclaim, but different enough that they don't share logic */ | 214 | /* Similar to reclaim, but different enough that they don't share logic */ |
215 | static bool too_many_isolated(struct zone *zone) | 215 | static bool too_many_isolated(struct zone *zone) |
216 | { | 216 | { |
217 | 217 | unsigned long active, inactive, isolated; | |
218 | unsigned long inactive, isolated; | ||
219 | 218 | ||
220 | inactive = zone_page_state(zone, NR_INACTIVE_FILE) + | 219 | inactive = zone_page_state(zone, NR_INACTIVE_FILE) + |
221 | zone_page_state(zone, NR_INACTIVE_ANON); | 220 | zone_page_state(zone, NR_INACTIVE_ANON); |
221 | active = zone_page_state(zone, NR_ACTIVE_FILE) + | ||
222 | zone_page_state(zone, NR_ACTIVE_ANON); | ||
222 | isolated = zone_page_state(zone, NR_ISOLATED_FILE) + | 223 | isolated = zone_page_state(zone, NR_ISOLATED_FILE) + |
223 | zone_page_state(zone, NR_ISOLATED_ANON); | 224 | zone_page_state(zone, NR_ISOLATED_ANON); |
224 | 225 | ||
225 | return isolated > inactive; | 226 | return isolated > (inactive + active) / 2; |
226 | } | 227 | } |
227 | 228 | ||
228 | /* | 229 | /* |
diff --git a/mm/fremap.c b/mm/fremap.c index 46f5dacf90a2..ec520c7b28df 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
@@ -125,7 +125,6 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, | |||
125 | { | 125 | { |
126 | struct mm_struct *mm = current->mm; | 126 | struct mm_struct *mm = current->mm; |
127 | struct address_space *mapping; | 127 | struct address_space *mapping; |
128 | unsigned long end = start + size; | ||
129 | struct vm_area_struct *vma; | 128 | struct vm_area_struct *vma; |
130 | int err = -EINVAL; | 129 | int err = -EINVAL; |
131 | int has_write_lock = 0; | 130 | int has_write_lock = 0; |
@@ -142,6 +141,10 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, | |||
142 | if (start + size <= start) | 141 | if (start + size <= start) |
143 | return err; | 142 | return err; |
144 | 143 | ||
144 | /* Does pgoff wrap? */ | ||
145 | if (pgoff + (size >> PAGE_SHIFT) < pgoff) | ||
146 | return err; | ||
147 | |||
145 | /* Can we represent this offset inside this architecture's pte's? */ | 148 | /* Can we represent this offset inside this architecture's pte's? */ |
146 | #if PTE_FILE_MAX_BITS < BITS_PER_LONG | 149 | #if PTE_FILE_MAX_BITS < BITS_PER_LONG |
147 | if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS)) | 150 | if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS)) |
@@ -168,7 +171,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, | |||
168 | if (!(vma->vm_flags & VM_CAN_NONLINEAR)) | 171 | if (!(vma->vm_flags & VM_CAN_NONLINEAR)) |
169 | goto out; | 172 | goto out; |
170 | 173 | ||
171 | if (end <= start || start < vma->vm_start || end > vma->vm_end) | 174 | if (start < vma->vm_start || start + size > vma->vm_end) |
172 | goto out; | 175 | goto out; |
173 | 176 | ||
174 | /* Must set VM_NONLINEAR before any pages are populated. */ | 177 | /* Must set VM_NONLINEAR before any pages are populated. */ |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index cc5be788a39f..c03273807182 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2324,11 +2324,8 @@ retry_avoidcopy: | |||
2324 | * and just make the page writable */ | 2324 | * and just make the page writable */ |
2325 | avoidcopy = (page_mapcount(old_page) == 1); | 2325 | avoidcopy = (page_mapcount(old_page) == 1); |
2326 | if (avoidcopy) { | 2326 | if (avoidcopy) { |
2327 | if (!trylock_page(old_page)) { | 2327 | if (PageAnon(old_page)) |
2328 | if (PageAnon(old_page)) | 2328 | page_move_anon_rmap(old_page, vma, address); |
2329 | page_move_anon_rmap(old_page, vma, address); | ||
2330 | } else | ||
2331 | unlock_page(old_page); | ||
2332 | set_huge_ptep_writable(vma, address, ptep); | 2329 | set_huge_ptep_writable(vma, address, ptep); |
2333 | return 0; | 2330 | return 0; |
2334 | } | 2331 | } |
@@ -2404,7 +2401,7 @@ retry_avoidcopy: | |||
2404 | set_huge_pte_at(mm, address, ptep, | 2401 | set_huge_pte_at(mm, address, ptep, |
2405 | make_huge_pte(vma, new_page, 1)); | 2402 | make_huge_pte(vma, new_page, 1)); |
2406 | page_remove_rmap(old_page); | 2403 | page_remove_rmap(old_page); |
2407 | hugepage_add_anon_rmap(new_page, vma, address); | 2404 | hugepage_add_new_anon_rmap(new_page, vma, address); |
2408 | /* Make the old page be freed below */ | 2405 | /* Make the old page be freed below */ |
2409 | new_page = old_page; | 2406 | new_page = old_page; |
2410 | mmu_notifier_invalidate_range_end(mm, | 2407 | mmu_notifier_invalidate_range_end(mm, |
@@ -2631,10 +2628,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2631 | vma, address); | 2628 | vma, address); |
2632 | } | 2629 | } |
2633 | 2630 | ||
2634 | if (!pagecache_page) { | 2631 | /* |
2635 | page = pte_page(entry); | 2632 | * hugetlb_cow() requires page locks of pte_page(entry) and |
2633 | * pagecache_page, so here we need take the former one | ||
2634 | * when page != pagecache_page or !pagecache_page. | ||
2635 | * Note that locking order is always pagecache_page -> page, | ||
2636 | * so no worry about deadlock. | ||
2637 | */ | ||
2638 | page = pte_page(entry); | ||
2639 | if (page != pagecache_page) | ||
2636 | lock_page(page); | 2640 | lock_page(page); |
2637 | } | ||
2638 | 2641 | ||
2639 | spin_lock(&mm->page_table_lock); | 2642 | spin_lock(&mm->page_table_lock); |
2640 | /* Check for a racing update before calling hugetlb_cow */ | 2643 | /* Check for a racing update before calling hugetlb_cow */ |
@@ -2661,9 +2664,8 @@ out_page_table_lock: | |||
2661 | if (pagecache_page) { | 2664 | if (pagecache_page) { |
2662 | unlock_page(pagecache_page); | 2665 | unlock_page(pagecache_page); |
2663 | put_page(pagecache_page); | 2666 | put_page(pagecache_page); |
2664 | } else { | ||
2665 | unlock_page(page); | ||
2666 | } | 2667 | } |
2668 | unlock_page(page); | ||
2667 | 2669 | ||
2668 | out_mutex: | 2670 | out_mutex: |
2669 | mutex_unlock(&hugetlb_instantiation_mutex); | 2671 | mutex_unlock(&hugetlb_instantiation_mutex); |
@@ -712,7 +712,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |||
712 | if (!ptep) | 712 | if (!ptep) |
713 | goto out; | 713 | goto out; |
714 | 714 | ||
715 | if (pte_write(*ptep)) { | 715 | if (pte_write(*ptep) || pte_dirty(*ptep)) { |
716 | pte_t entry; | 716 | pte_t entry; |
717 | 717 | ||
718 | swapped = PageSwapCache(page); | 718 | swapped = PageSwapCache(page); |
@@ -735,7 +735,9 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |||
735 | set_pte_at(mm, addr, ptep, entry); | 735 | set_pte_at(mm, addr, ptep, entry); |
736 | goto out_unlock; | 736 | goto out_unlock; |
737 | } | 737 | } |
738 | entry = pte_wrprotect(entry); | 738 | if (pte_dirty(entry)) |
739 | set_page_dirty(page); | ||
740 | entry = pte_mkclean(pte_wrprotect(entry)); | ||
739 | set_pte_at_notify(mm, addr, ptep, entry); | 741 | set_pte_at_notify(mm, addr, ptep, entry); |
740 | } | 742 | } |
741 | *orig_pte = *ptep; | 743 | *orig_pte = *ptep; |
@@ -1504,8 +1506,6 @@ struct page *ksm_does_need_to_copy(struct page *page, | |||
1504 | { | 1506 | { |
1505 | struct page *new_page; | 1507 | struct page *new_page; |
1506 | 1508 | ||
1507 | unlock_page(page); /* any racers will COW it, not modify it */ | ||
1508 | |||
1509 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); | 1509 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); |
1510 | if (new_page) { | 1510 | if (new_page) { |
1511 | copy_user_highpage(new_page, page, address, vma); | 1511 | copy_user_highpage(new_page, page, address, vma); |
@@ -1521,7 +1521,6 @@ struct page *ksm_does_need_to_copy(struct page *page, | |||
1521 | add_page_to_unevictable_list(new_page); | 1521 | add_page_to_unevictable_list(new_page); |
1522 | } | 1522 | } |
1523 | 1523 | ||
1524 | page_cache_release(page); | ||
1525 | return new_page; | 1524 | return new_page; |
1526 | } | 1525 | } |
1527 | 1526 | ||
diff --git a/mm/memory.c b/mm/memory.c index 6b2ab1051851..0e18b4d649ec 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2623,7 +2623,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2623 | unsigned int flags, pte_t orig_pte) | 2623 | unsigned int flags, pte_t orig_pte) |
2624 | { | 2624 | { |
2625 | spinlock_t *ptl; | 2625 | spinlock_t *ptl; |
2626 | struct page *page; | 2626 | struct page *page, *swapcache = NULL; |
2627 | swp_entry_t entry; | 2627 | swp_entry_t entry; |
2628 | pte_t pte; | 2628 | pte_t pte; |
2629 | struct mem_cgroup *ptr = NULL; | 2629 | struct mem_cgroup *ptr = NULL; |
@@ -2679,10 +2679,25 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2679 | lock_page(page); | 2679 | lock_page(page); |
2680 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); | 2680 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); |
2681 | 2681 | ||
2682 | page = ksm_might_need_to_copy(page, vma, address); | 2682 | /* |
2683 | if (!page) { | 2683 | * Make sure try_to_free_swap or reuse_swap_page or swapoff did not |
2684 | ret = VM_FAULT_OOM; | 2684 | * release the swapcache from under us. The page pin, and pte_same |
2685 | goto out; | 2685 | * test below, are not enough to exclude that. Even if it is still |
2686 | * swapcache, we need to check that the page's swap has not changed. | ||
2687 | */ | ||
2688 | if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val)) | ||
2689 | goto out_page; | ||
2690 | |||
2691 | if (ksm_might_need_to_copy(page, vma, address)) { | ||
2692 | swapcache = page; | ||
2693 | page = ksm_does_need_to_copy(page, vma, address); | ||
2694 | |||
2695 | if (unlikely(!page)) { | ||
2696 | ret = VM_FAULT_OOM; | ||
2697 | page = swapcache; | ||
2698 | swapcache = NULL; | ||
2699 | goto out_page; | ||
2700 | } | ||
2686 | } | 2701 | } |
2687 | 2702 | ||
2688 | if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { | 2703 | if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { |
@@ -2735,6 +2750,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2735 | if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) | 2750 | if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) |
2736 | try_to_free_swap(page); | 2751 | try_to_free_swap(page); |
2737 | unlock_page(page); | 2752 | unlock_page(page); |
2753 | if (swapcache) { | ||
2754 | /* | ||
2755 | * Hold the lock to avoid the swap entry to be reused | ||
2756 | * until we take the PT lock for the pte_same() check | ||
2757 | * (to avoid false positives from pte_same). For | ||
2758 | * further safety release the lock after the swap_free | ||
2759 | * so that the swap count won't change under a | ||
2760 | * parallel locked swapcache. | ||
2761 | */ | ||
2762 | unlock_page(swapcache); | ||
2763 | page_cache_release(swapcache); | ||
2764 | } | ||
2738 | 2765 | ||
2739 | if (flags & FAULT_FLAG_WRITE) { | 2766 | if (flags & FAULT_FLAG_WRITE) { |
2740 | ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); | 2767 | ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); |
@@ -2756,6 +2783,10 @@ out_page: | |||
2756 | unlock_page(page); | 2783 | unlock_page(page); |
2757 | out_release: | 2784 | out_release: |
2758 | page_cache_release(page); | 2785 | page_cache_release(page); |
2786 | if (swapcache) { | ||
2787 | unlock_page(swapcache); | ||
2788 | page_cache_release(swapcache); | ||
2789 | } | ||
2759 | return ret; | 2790 | return ret; |
2760 | } | 2791 | } |
2761 | 2792 | ||
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a4cfcdc00455..dd186c1a5d53 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -584,19 +584,19 @@ static inline int pageblock_free(struct page *page) | |||
584 | /* Return the start of the next active pageblock after a given page */ | 584 | /* Return the start of the next active pageblock after a given page */ |
585 | static struct page *next_active_pageblock(struct page *page) | 585 | static struct page *next_active_pageblock(struct page *page) |
586 | { | 586 | { |
587 | int pageblocks_stride; | ||
588 | |||
589 | /* Ensure the starting page is pageblock-aligned */ | 587 | /* Ensure the starting page is pageblock-aligned */ |
590 | BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); | 588 | BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); |
591 | 589 | ||
592 | /* Move forward by at least 1 * pageblock_nr_pages */ | ||
593 | pageblocks_stride = 1; | ||
594 | |||
595 | /* If the entire pageblock is free, move to the end of free page */ | 590 | /* If the entire pageblock is free, move to the end of free page */ |
596 | if (pageblock_free(page)) | 591 | if (pageblock_free(page)) { |
597 | pageblocks_stride += page_order(page) - pageblock_order; | 592 | int order; |
593 | /* be careful. we don't have locks, page_order can be changed.*/ | ||
594 | order = page_order(page); | ||
595 | if ((order < MAX_ORDER) && (order >= pageblock_order)) | ||
596 | return page + (1 << order); | ||
597 | } | ||
598 | 598 | ||
599 | return page + (pageblocks_stride * pageblock_nr_pages); | 599 | return page + pageblock_nr_pages; |
600 | } | 600 | } |
601 | 601 | ||
602 | /* Checks if this range of memory is likely to be hot-removable. */ | 602 | /* Checks if this range of memory is likely to be hot-removable. */ |
diff --git a/mm/mlock.c b/mm/mlock.c index cbae7c5b9568..b70919ce4f72 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -135,12 +135,6 @@ void munlock_vma_page(struct page *page) | |||
135 | } | 135 | } |
136 | } | 136 | } |
137 | 137 | ||
138 | /* Is the vma a continuation of the stack vma above it? */ | ||
139 | static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) | ||
140 | { | ||
141 | return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); | ||
142 | } | ||
143 | |||
144 | static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) | 138 | static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) |
145 | { | 139 | { |
146 | return (vma->vm_flags & VM_GROWSDOWN) && | 140 | return (vma->vm_flags & VM_GROWSDOWN) && |
@@ -2009,6 +2009,7 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, | |||
2009 | removed_exe_file_vma(mm); | 2009 | removed_exe_file_vma(mm); |
2010 | fput(new->vm_file); | 2010 | fput(new->vm_file); |
2011 | } | 2011 | } |
2012 | unlink_anon_vmas(new); | ||
2012 | out_free_mpol: | 2013 | out_free_mpol: |
2013 | mpol_put(pol); | 2014 | mpol_put(pol); |
2014 | out_free_vma: | 2015 | out_free_vma: |
diff --git a/mm/mmzone.c b/mm/mmzone.c index f5b7d1760213..e35bfb82c855 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c | |||
@@ -87,3 +87,24 @@ int memmap_valid_within(unsigned long pfn, | |||
87 | return 1; | 87 | return 1; |
88 | } | 88 | } |
89 | #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ | 89 | #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ |
90 | |||
91 | #ifdef CONFIG_SMP | ||
92 | /* Called when a more accurate view of NR_FREE_PAGES is needed */ | ||
93 | unsigned long zone_nr_free_pages(struct zone *zone) | ||
94 | { | ||
95 | unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES); | ||
96 | |||
97 | /* | ||
98 | * While kswapd is awake, it is considered the zone is under some | ||
99 | * memory pressure. Under pressure, there is a risk that | ||
100 | * per-cpu-counter-drift will allow the min watermark to be breached | ||
101 | * potentially causing a live-lock. While kswapd is awake and | ||
102 | * free pages are low, get a better estimate for free pages | ||
103 | */ | ||
104 | if (nr_free_pages < zone->percpu_drift_mark && | ||
105 | !waitqueue_active(&zone->zone_pgdat->kswapd_wait)) | ||
106 | return zone_page_state_snapshot(zone, NR_FREE_PAGES); | ||
107 | |||
108 | return nr_free_pages; | ||
109 | } | ||
110 | #endif /* CONFIG_SMP */ | ||
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index fc81cb22869e..4029583a1024 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -121,8 +121,8 @@ struct task_struct *find_lock_task_mm(struct task_struct *p) | |||
121 | } | 121 | } |
122 | 122 | ||
123 | /* return true if the task is not adequate as candidate victim task. */ | 123 | /* return true if the task is not adequate as candidate victim task. */ |
124 | static bool oom_unkillable_task(struct task_struct *p, struct mem_cgroup *mem, | 124 | static bool oom_unkillable_task(struct task_struct *p, |
125 | const nodemask_t *nodemask) | 125 | const struct mem_cgroup *mem, const nodemask_t *nodemask) |
126 | { | 126 | { |
127 | if (is_global_init(p)) | 127 | if (is_global_init(p)) |
128 | return true; | 128 | return true; |
@@ -208,8 +208,13 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, | |||
208 | */ | 208 | */ |
209 | points += p->signal->oom_score_adj; | 209 | points += p->signal->oom_score_adj; |
210 | 210 | ||
211 | if (points < 0) | 211 | /* |
212 | return 0; | 212 | * Never return 0 for an eligible task that may be killed since it's |
213 | * possible that no single user task uses more than 0.1% of memory and | ||
214 | * no single admin tasks uses more than 3.0%. | ||
215 | */ | ||
216 | if (points <= 0) | ||
217 | return 1; | ||
213 | return (points < 1000) ? points : 1000; | 218 | return (points < 1000) ? points : 1000; |
214 | } | 219 | } |
215 | 220 | ||
@@ -339,26 +344,24 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, | |||
339 | /** | 344 | /** |
340 | * dump_tasks - dump current memory state of all system tasks | 345 | * dump_tasks - dump current memory state of all system tasks |
341 | * @mem: current's memory controller, if constrained | 346 | * @mem: current's memory controller, if constrained |
347 | * @nodemask: nodemask passed to page allocator for mempolicy ooms | ||
342 | * | 348 | * |
343 | * Dumps the current memory state of all system tasks, excluding kernel threads. | 349 | * Dumps the current memory state of all eligible tasks. Tasks not in the same |
350 | * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes | ||
351 | * are not shown. | ||
344 | * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj | 352 | * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj |
345 | * value, oom_score_adj value, and name. | 353 | * value, oom_score_adj value, and name. |
346 | * | 354 | * |
347 | * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are | ||
348 | * shown. | ||
349 | * | ||
350 | * Call with tasklist_lock read-locked. | 355 | * Call with tasklist_lock read-locked. |
351 | */ | 356 | */ |
352 | static void dump_tasks(const struct mem_cgroup *mem) | 357 | static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask) |
353 | { | 358 | { |
354 | struct task_struct *p; | 359 | struct task_struct *p; |
355 | struct task_struct *task; | 360 | struct task_struct *task; |
356 | 361 | ||
357 | pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n"); | 362 | pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n"); |
358 | for_each_process(p) { | 363 | for_each_process(p) { |
359 | if (p->flags & PF_KTHREAD) | 364 | if (oom_unkillable_task(p, mem, nodemask)) |
360 | continue; | ||
361 | if (mem && !task_in_mem_cgroup(p, mem)) | ||
362 | continue; | 365 | continue; |
363 | 366 | ||
364 | task = find_lock_task_mm(p); | 367 | task = find_lock_task_mm(p); |
@@ -381,7 +384,7 @@ static void dump_tasks(const struct mem_cgroup *mem) | |||
381 | } | 384 | } |
382 | 385 | ||
383 | static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, | 386 | static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, |
384 | struct mem_cgroup *mem) | 387 | struct mem_cgroup *mem, const nodemask_t *nodemask) |
385 | { | 388 | { |
386 | task_lock(current); | 389 | task_lock(current); |
387 | pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " | 390 | pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " |
@@ -394,7 +397,7 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, | |||
394 | mem_cgroup_print_oom_info(mem, p); | 397 | mem_cgroup_print_oom_info(mem, p); |
395 | show_mem(); | 398 | show_mem(); |
396 | if (sysctl_oom_dump_tasks) | 399 | if (sysctl_oom_dump_tasks) |
397 | dump_tasks(mem); | 400 | dump_tasks(mem, nodemask); |
398 | } | 401 | } |
399 | 402 | ||
400 | #define K(x) ((x) << (PAGE_SHIFT-10)) | 403 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
@@ -436,7 +439,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
436 | unsigned int victim_points = 0; | 439 | unsigned int victim_points = 0; |
437 | 440 | ||
438 | if (printk_ratelimit()) | 441 | if (printk_ratelimit()) |
439 | dump_header(p, gfp_mask, order, mem); | 442 | dump_header(p, gfp_mask, order, mem, nodemask); |
440 | 443 | ||
441 | /* | 444 | /* |
442 | * If the task is already exiting, don't alarm the sysadmin or kill | 445 | * If the task is already exiting, don't alarm the sysadmin or kill |
@@ -482,7 +485,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
482 | * Determines whether the kernel must panic because of the panic_on_oom sysctl. | 485 | * Determines whether the kernel must panic because of the panic_on_oom sysctl. |
483 | */ | 486 | */ |
484 | static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, | 487 | static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, |
485 | int order) | 488 | int order, const nodemask_t *nodemask) |
486 | { | 489 | { |
487 | if (likely(!sysctl_panic_on_oom)) | 490 | if (likely(!sysctl_panic_on_oom)) |
488 | return; | 491 | return; |
@@ -496,7 +499,7 @@ static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, | |||
496 | return; | 499 | return; |
497 | } | 500 | } |
498 | read_lock(&tasklist_lock); | 501 | read_lock(&tasklist_lock); |
499 | dump_header(NULL, gfp_mask, order, NULL); | 502 | dump_header(NULL, gfp_mask, order, NULL, nodemask); |
500 | read_unlock(&tasklist_lock); | 503 | read_unlock(&tasklist_lock); |
501 | panic("Out of memory: %s panic_on_oom is enabled\n", | 504 | panic("Out of memory: %s panic_on_oom is enabled\n", |
502 | sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); | 505 | sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); |
@@ -509,7 +512,7 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) | |||
509 | unsigned int points = 0; | 512 | unsigned int points = 0; |
510 | struct task_struct *p; | 513 | struct task_struct *p; |
511 | 514 | ||
512 | check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0); | 515 | check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL); |
513 | limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT; | 516 | limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT; |
514 | read_lock(&tasklist_lock); | 517 | read_lock(&tasklist_lock); |
515 | retry: | 518 | retry: |
@@ -641,6 +644,7 @@ static void clear_system_oom(void) | |||
641 | void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | 644 | void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, |
642 | int order, nodemask_t *nodemask) | 645 | int order, nodemask_t *nodemask) |
643 | { | 646 | { |
647 | const nodemask_t *mpol_mask; | ||
644 | struct task_struct *p; | 648 | struct task_struct *p; |
645 | unsigned long totalpages; | 649 | unsigned long totalpages; |
646 | unsigned long freed = 0; | 650 | unsigned long freed = 0; |
@@ -670,7 +674,8 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | |||
670 | */ | 674 | */ |
671 | constraint = constrained_alloc(zonelist, gfp_mask, nodemask, | 675 | constraint = constrained_alloc(zonelist, gfp_mask, nodemask, |
672 | &totalpages); | 676 | &totalpages); |
673 | check_panic_on_oom(constraint, gfp_mask, order); | 677 | mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL; |
678 | check_panic_on_oom(constraint, gfp_mask, order, mpol_mask); | ||
674 | 679 | ||
675 | read_lock(&tasklist_lock); | 680 | read_lock(&tasklist_lock); |
676 | if (sysctl_oom_kill_allocating_task && | 681 | if (sysctl_oom_kill_allocating_task && |
@@ -688,15 +693,13 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | |||
688 | } | 693 | } |
689 | 694 | ||
690 | retry: | 695 | retry: |
691 | p = select_bad_process(&points, totalpages, NULL, | 696 | p = select_bad_process(&points, totalpages, NULL, mpol_mask); |
692 | constraint == CONSTRAINT_MEMORY_POLICY ? nodemask : | ||
693 | NULL); | ||
694 | if (PTR_ERR(p) == -1UL) | 697 | if (PTR_ERR(p) == -1UL) |
695 | goto out; | 698 | goto out; |
696 | 699 | ||
697 | /* Found nothing?!?! Either we hang forever, or we panic. */ | 700 | /* Found nothing?!?! Either we hang forever, or we panic. */ |
698 | if (!p) { | 701 | if (!p) { |
699 | dump_header(NULL, gfp_mask, order, NULL); | 702 | dump_header(NULL, gfp_mask, order, NULL, mpol_mask); |
700 | read_unlock(&tasklist_lock); | 703 | read_unlock(&tasklist_lock); |
701 | panic("Out of memory and no killable processes...\n"); | 704 | panic("Out of memory and no killable processes...\n"); |
702 | } | 705 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 768ea486df58..9536017108ec 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -589,13 +589,13 @@ static void free_pcppages_bulk(struct zone *zone, int count, | |||
589 | { | 589 | { |
590 | int migratetype = 0; | 590 | int migratetype = 0; |
591 | int batch_free = 0; | 591 | int batch_free = 0; |
592 | int to_free = count; | ||
592 | 593 | ||
593 | spin_lock(&zone->lock); | 594 | spin_lock(&zone->lock); |
594 | zone->all_unreclaimable = 0; | 595 | zone->all_unreclaimable = 0; |
595 | zone->pages_scanned = 0; | 596 | zone->pages_scanned = 0; |
596 | 597 | ||
597 | __mod_zone_page_state(zone, NR_FREE_PAGES, count); | 598 | while (to_free) { |
598 | while (count) { | ||
599 | struct page *page; | 599 | struct page *page; |
600 | struct list_head *list; | 600 | struct list_head *list; |
601 | 601 | ||
@@ -620,8 +620,9 @@ static void free_pcppages_bulk(struct zone *zone, int count, | |||
620 | /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ | 620 | /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ |
621 | __free_one_page(page, zone, 0, page_private(page)); | 621 | __free_one_page(page, zone, 0, page_private(page)); |
622 | trace_mm_page_pcpu_drain(page, 0, page_private(page)); | 622 | trace_mm_page_pcpu_drain(page, 0, page_private(page)); |
623 | } while (--count && --batch_free && !list_empty(list)); | 623 | } while (--to_free && --batch_free && !list_empty(list)); |
624 | } | 624 | } |
625 | __mod_zone_page_state(zone, NR_FREE_PAGES, count); | ||
625 | spin_unlock(&zone->lock); | 626 | spin_unlock(&zone->lock); |
626 | } | 627 | } |
627 | 628 | ||
@@ -632,8 +633,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order, | |||
632 | zone->all_unreclaimable = 0; | 633 | zone->all_unreclaimable = 0; |
633 | zone->pages_scanned = 0; | 634 | zone->pages_scanned = 0; |
634 | 635 | ||
635 | __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); | ||
636 | __free_one_page(page, zone, order, migratetype); | 636 | __free_one_page(page, zone, order, migratetype); |
637 | __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); | ||
637 | spin_unlock(&zone->lock); | 638 | spin_unlock(&zone->lock); |
638 | } | 639 | } |
639 | 640 | ||
@@ -1462,7 +1463,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |||
1462 | { | 1463 | { |
1463 | /* free_pages my go negative - that's OK */ | 1464 | /* free_pages my go negative - that's OK */ |
1464 | long min = mark; | 1465 | long min = mark; |
1465 | long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; | 1466 | long free_pages = zone_nr_free_pages(z) - (1 << order) + 1; |
1466 | int o; | 1467 | int o; |
1467 | 1468 | ||
1468 | if (alloc_flags & ALLOC_HIGH) | 1469 | if (alloc_flags & ALLOC_HIGH) |
@@ -1847,6 +1848,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, | |||
1847 | struct page *page = NULL; | 1848 | struct page *page = NULL; |
1848 | struct reclaim_state reclaim_state; | 1849 | struct reclaim_state reclaim_state; |
1849 | struct task_struct *p = current; | 1850 | struct task_struct *p = current; |
1851 | bool drained = false; | ||
1850 | 1852 | ||
1851 | cond_resched(); | 1853 | cond_resched(); |
1852 | 1854 | ||
@@ -1865,14 +1867,25 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, | |||
1865 | 1867 | ||
1866 | cond_resched(); | 1868 | cond_resched(); |
1867 | 1869 | ||
1868 | if (order != 0) | 1870 | if (unlikely(!(*did_some_progress))) |
1869 | drain_all_pages(); | 1871 | return NULL; |
1870 | 1872 | ||
1871 | if (likely(*did_some_progress)) | 1873 | retry: |
1872 | page = get_page_from_freelist(gfp_mask, nodemask, order, | 1874 | page = get_page_from_freelist(gfp_mask, nodemask, order, |
1873 | zonelist, high_zoneidx, | 1875 | zonelist, high_zoneidx, |
1874 | alloc_flags, preferred_zone, | 1876 | alloc_flags, preferred_zone, |
1875 | migratetype); | 1877 | migratetype); |
1878 | |||
1879 | /* | ||
1880 | * If an allocation failed after direct reclaim, it could be because | ||
1881 | * pages are pinned on the per-cpu lists. Drain them and try again | ||
1882 | */ | ||
1883 | if (!page && !drained) { | ||
1884 | drain_all_pages(); | ||
1885 | drained = true; | ||
1886 | goto retry; | ||
1887 | } | ||
1888 | |||
1876 | return page; | 1889 | return page; |
1877 | } | 1890 | } |
1878 | 1891 | ||
@@ -2424,7 +2437,7 @@ void show_free_areas(void) | |||
2424 | " all_unreclaimable? %s" | 2437 | " all_unreclaimable? %s" |
2425 | "\n", | 2438 | "\n", |
2426 | zone->name, | 2439 | zone->name, |
2427 | K(zone_page_state(zone, NR_FREE_PAGES)), | 2440 | K(zone_nr_free_pages(zone)), |
2428 | K(min_wmark_pages(zone)), | 2441 | K(min_wmark_pages(zone)), |
2429 | K(low_wmark_pages(zone)), | 2442 | K(low_wmark_pages(zone)), |
2430 | K(high_wmark_pages(zone)), | 2443 | K(high_wmark_pages(zone)), |
diff --git a/mm/percpu.c b/mm/percpu.c index e61dc2cc5873..c76ef3891e0d 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -393,7 +393,9 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) | |||
393 | goto out_unlock; | 393 | goto out_unlock; |
394 | 394 | ||
395 | old_size = chunk->map_alloc * sizeof(chunk->map[0]); | 395 | old_size = chunk->map_alloc * sizeof(chunk->map[0]); |
396 | memcpy(new, chunk->map, old_size); | 396 | old = chunk->map; |
397 | |||
398 | memcpy(new, old, old_size); | ||
397 | 399 | ||
398 | chunk->map_alloc = new_alloc; | 400 | chunk->map_alloc = new_alloc; |
399 | chunk->map = new; | 401 | chunk->map = new; |
@@ -1162,7 +1164,7 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info( | |||
1162 | } | 1164 | } |
1163 | 1165 | ||
1164 | /* | 1166 | /* |
1165 | * Don't accept if wastage is over 25%. The | 1167 | * Don't accept if wastage is over 1/3. The |
1166 | * greater-than comparison ensures upa==1 always | 1168 | * greater-than comparison ensures upa==1 always |
1167 | * passes the following check. | 1169 | * passes the following check. |
1168 | */ | 1170 | */ |
@@ -1399,9 +1401,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
1399 | 1401 | ||
1400 | if (pcpu_first_unit_cpu == NR_CPUS) | 1402 | if (pcpu_first_unit_cpu == NR_CPUS) |
1401 | pcpu_first_unit_cpu = cpu; | 1403 | pcpu_first_unit_cpu = cpu; |
1404 | pcpu_last_unit_cpu = cpu; | ||
1402 | } | 1405 | } |
1403 | } | 1406 | } |
1404 | pcpu_last_unit_cpu = cpu; | ||
1405 | pcpu_nr_units = unit; | 1407 | pcpu_nr_units = unit; |
1406 | 1408 | ||
1407 | for_each_possible_cpu(cpu) | 1409 | for_each_possible_cpu(cpu) |
diff --git a/mm/percpu_up.c b/mm/percpu_up.c index c4351c7f57d2..db884fae5721 100644 --- a/mm/percpu_up.c +++ b/mm/percpu_up.c | |||
@@ -14,13 +14,13 @@ void __percpu *__alloc_percpu(size_t size, size_t align) | |||
14 | * percpu sections on SMP for which this path isn't used. | 14 | * percpu sections on SMP for which this path isn't used. |
15 | */ | 15 | */ |
16 | WARN_ON_ONCE(align > SMP_CACHE_BYTES); | 16 | WARN_ON_ONCE(align > SMP_CACHE_BYTES); |
17 | return kzalloc(size, GFP_KERNEL); | 17 | return (void __percpu __force *)kzalloc(size, GFP_KERNEL); |
18 | } | 18 | } |
19 | EXPORT_SYMBOL_GPL(__alloc_percpu); | 19 | EXPORT_SYMBOL_GPL(__alloc_percpu); |
20 | 20 | ||
21 | void free_percpu(void __percpu *p) | 21 | void free_percpu(void __percpu *p) |
22 | { | 22 | { |
23 | kfree(p); | 23 | kfree(this_cpu_ptr(p)); |
24 | } | 24 | } |
25 | EXPORT_SYMBOL_GPL(free_percpu); | 25 | EXPORT_SYMBOL_GPL(free_percpu); |
26 | 26 | ||
@@ -381,7 +381,13 @@ vma_address(struct page *page, struct vm_area_struct *vma) | |||
381 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) | 381 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) |
382 | { | 382 | { |
383 | if (PageAnon(page)) { | 383 | if (PageAnon(page)) { |
384 | if (vma->anon_vma->root != page_anon_vma(page)->root) | 384 | struct anon_vma *page__anon_vma = page_anon_vma(page); |
385 | /* | ||
386 | * Note: swapoff's unuse_vma() is more efficient with this | ||
387 | * check, and needs it to match anon_vma when KSM is active. | ||
388 | */ | ||
389 | if (!vma->anon_vma || !page__anon_vma || | ||
390 | vma->anon_vma->root != page__anon_vma->root) | ||
385 | return -EFAULT; | 391 | return -EFAULT; |
386 | } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { | 392 | } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { |
387 | if (!vma->vm_file || | 393 | if (!vma->vm_file || |
@@ -1564,13 +1570,14 @@ static void __hugepage_set_anon_rmap(struct page *page, | |||
1564 | struct vm_area_struct *vma, unsigned long address, int exclusive) | 1570 | struct vm_area_struct *vma, unsigned long address, int exclusive) |
1565 | { | 1571 | { |
1566 | struct anon_vma *anon_vma = vma->anon_vma; | 1572 | struct anon_vma *anon_vma = vma->anon_vma; |
1573 | |||
1567 | BUG_ON(!anon_vma); | 1574 | BUG_ON(!anon_vma); |
1568 | if (!exclusive) { | 1575 | |
1569 | struct anon_vma_chain *avc; | 1576 | if (PageAnon(page)) |
1570 | avc = list_entry(vma->anon_vma_chain.prev, | 1577 | return; |
1571 | struct anon_vma_chain, same_vma); | 1578 | if (!exclusive) |
1572 | anon_vma = avc->anon_vma; | 1579 | anon_vma = anon_vma->root; |
1573 | } | 1580 | |
1574 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | 1581 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
1575 | page->mapping = (struct address_space *) anon_vma; | 1582 | page->mapping = (struct address_space *) anon_vma; |
1576 | page->index = linear_page_index(vma, address); | 1583 | page->index = linear_page_index(vma, address); |
@@ -1581,6 +1588,8 @@ void hugepage_add_anon_rmap(struct page *page, | |||
1581 | { | 1588 | { |
1582 | struct anon_vma *anon_vma = vma->anon_vma; | 1589 | struct anon_vma *anon_vma = vma->anon_vma; |
1583 | int first; | 1590 | int first; |
1591 | |||
1592 | BUG_ON(!PageLocked(page)); | ||
1584 | BUG_ON(!anon_vma); | 1593 | BUG_ON(!anon_vma); |
1585 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 1594 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); |
1586 | first = atomic_inc_and_test(&page->_mapcount); | 1595 | first = atomic_inc_and_test(&page->_mapcount); |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 1f3f9c59a73a..7c703ff2f36f 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -47,8 +47,6 @@ long nr_swap_pages; | |||
47 | long total_swap_pages; | 47 | long total_swap_pages; |
48 | static int least_priority; | 48 | static int least_priority; |
49 | 49 | ||
50 | static bool swap_for_hibernation; | ||
51 | |||
52 | static const char Bad_file[] = "Bad swap file entry "; | 50 | static const char Bad_file[] = "Bad swap file entry "; |
53 | static const char Unused_file[] = "Unused swap file entry "; | 51 | static const char Unused_file[] = "Unused swap file entry "; |
54 | static const char Bad_offset[] = "Bad swap offset entry "; | 52 | static const char Bad_offset[] = "Bad swap offset entry "; |
@@ -141,8 +139,7 @@ static int discard_swap(struct swap_info_struct *si) | |||
141 | nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); | 139 | nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); |
142 | if (nr_blocks) { | 140 | if (nr_blocks) { |
143 | err = blkdev_issue_discard(si->bdev, start_block, | 141 | err = blkdev_issue_discard(si->bdev, start_block, |
144 | nr_blocks, GFP_KERNEL, | 142 | nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT); |
145 | BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); | ||
146 | if (err) | 143 | if (err) |
147 | return err; | 144 | return err; |
148 | cond_resched(); | 145 | cond_resched(); |
@@ -153,8 +150,7 @@ static int discard_swap(struct swap_info_struct *si) | |||
153 | nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); | 150 | nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); |
154 | 151 | ||
155 | err = blkdev_issue_discard(si->bdev, start_block, | 152 | err = blkdev_issue_discard(si->bdev, start_block, |
156 | nr_blocks, GFP_KERNEL, | 153 | nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT); |
157 | BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); | ||
158 | if (err) | 154 | if (err) |
159 | break; | 155 | break; |
160 | 156 | ||
@@ -193,8 +189,7 @@ static void discard_swap_cluster(struct swap_info_struct *si, | |||
193 | start_block <<= PAGE_SHIFT - 9; | 189 | start_block <<= PAGE_SHIFT - 9; |
194 | nr_blocks <<= PAGE_SHIFT - 9; | 190 | nr_blocks <<= PAGE_SHIFT - 9; |
195 | if (blkdev_issue_discard(si->bdev, start_block, | 191 | if (blkdev_issue_discard(si->bdev, start_block, |
196 | nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT | | 192 | nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT)) |
197 | BLKDEV_IFL_BARRIER)) | ||
198 | break; | 193 | break; |
199 | } | 194 | } |
200 | 195 | ||
@@ -320,10 +315,8 @@ checks: | |||
320 | if (offset > si->highest_bit) | 315 | if (offset > si->highest_bit) |
321 | scan_base = offset = si->lowest_bit; | 316 | scan_base = offset = si->lowest_bit; |
322 | 317 | ||
323 | /* reuse swap entry of cache-only swap if not hibernation. */ | 318 | /* reuse swap entry of cache-only swap if not busy. */ |
324 | if (vm_swap_full() | 319 | if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { |
325 | && usage == SWAP_HAS_CACHE | ||
326 | && si->swap_map[offset] == SWAP_HAS_CACHE) { | ||
327 | int swap_was_freed; | 320 | int swap_was_freed; |
328 | spin_unlock(&swap_lock); | 321 | spin_unlock(&swap_lock); |
329 | swap_was_freed = __try_to_reclaim_swap(si, offset); | 322 | swap_was_freed = __try_to_reclaim_swap(si, offset); |
@@ -453,8 +446,6 @@ swp_entry_t get_swap_page(void) | |||
453 | spin_lock(&swap_lock); | 446 | spin_lock(&swap_lock); |
454 | if (nr_swap_pages <= 0) | 447 | if (nr_swap_pages <= 0) |
455 | goto noswap; | 448 | goto noswap; |
456 | if (swap_for_hibernation) | ||
457 | goto noswap; | ||
458 | nr_swap_pages--; | 449 | nr_swap_pages--; |
459 | 450 | ||
460 | for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { | 451 | for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { |
@@ -487,6 +478,28 @@ noswap: | |||
487 | return (swp_entry_t) {0}; | 478 | return (swp_entry_t) {0}; |
488 | } | 479 | } |
489 | 480 | ||
481 | /* The only caller of this function is now susupend routine */ | ||
482 | swp_entry_t get_swap_page_of_type(int type) | ||
483 | { | ||
484 | struct swap_info_struct *si; | ||
485 | pgoff_t offset; | ||
486 | |||
487 | spin_lock(&swap_lock); | ||
488 | si = swap_info[type]; | ||
489 | if (si && (si->flags & SWP_WRITEOK)) { | ||
490 | nr_swap_pages--; | ||
491 | /* This is called for allocating swap entry, not cache */ | ||
492 | offset = scan_swap_map(si, 1); | ||
493 | if (offset) { | ||
494 | spin_unlock(&swap_lock); | ||
495 | return swp_entry(type, offset); | ||
496 | } | ||
497 | nr_swap_pages++; | ||
498 | } | ||
499 | spin_unlock(&swap_lock); | ||
500 | return (swp_entry_t) {0}; | ||
501 | } | ||
502 | |||
490 | static struct swap_info_struct *swap_info_get(swp_entry_t entry) | 503 | static struct swap_info_struct *swap_info_get(swp_entry_t entry) |
491 | { | 504 | { |
492 | struct swap_info_struct *p; | 505 | struct swap_info_struct *p; |
@@ -670,6 +683,24 @@ int try_to_free_swap(struct page *page) | |||
670 | if (page_swapcount(page)) | 683 | if (page_swapcount(page)) |
671 | return 0; | 684 | return 0; |
672 | 685 | ||
686 | /* | ||
687 | * Once hibernation has begun to create its image of memory, | ||
688 | * there's a danger that one of the calls to try_to_free_swap() | ||
689 | * - most probably a call from __try_to_reclaim_swap() while | ||
690 | * hibernation is allocating its own swap pages for the image, | ||
691 | * but conceivably even a call from memory reclaim - will free | ||
692 | * the swap from a page which has already been recorded in the | ||
693 | * image as a clean swapcache page, and then reuse its swap for | ||
694 | * another page of the image. On waking from hibernation, the | ||
695 | * original page might be freed under memory pressure, then | ||
696 | * later read back in from swap, now with the wrong data. | ||
697 | * | ||
698 | * Hibernation clears bits from gfp_allowed_mask to prevent | ||
699 | * memory reclaim from writing to disk, so check that here. | ||
700 | */ | ||
701 | if (!(gfp_allowed_mask & __GFP_IO)) | ||
702 | return 0; | ||
703 | |||
673 | delete_from_swap_cache(page); | 704 | delete_from_swap_cache(page); |
674 | SetPageDirty(page); | 705 | SetPageDirty(page); |
675 | return 1; | 706 | return 1; |
@@ -746,74 +777,6 @@ int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep) | |||
746 | #endif | 777 | #endif |
747 | 778 | ||
748 | #ifdef CONFIG_HIBERNATION | 779 | #ifdef CONFIG_HIBERNATION |
749 | |||
750 | static pgoff_t hibernation_offset[MAX_SWAPFILES]; | ||
751 | /* | ||
752 | * Once hibernation starts to use swap, we freeze swap_map[]. Otherwise, | ||
753 | * saved swap_map[] image to the disk will be an incomplete because it's | ||
754 | * changing without synchronization with hibernation snap shot. | ||
755 | * At resume, we just make swap_for_hibernation=false. We can forget | ||
756 | * used maps easily. | ||
757 | */ | ||
758 | void hibernation_freeze_swap(void) | ||
759 | { | ||
760 | int i; | ||
761 | |||
762 | spin_lock(&swap_lock); | ||
763 | |||
764 | printk(KERN_INFO "PM: Freeze Swap\n"); | ||
765 | swap_for_hibernation = true; | ||
766 | for (i = 0; i < MAX_SWAPFILES; i++) | ||
767 | hibernation_offset[i] = 1; | ||
768 | spin_unlock(&swap_lock); | ||
769 | } | ||
770 | |||
771 | void hibernation_thaw_swap(void) | ||
772 | { | ||
773 | spin_lock(&swap_lock); | ||
774 | if (swap_for_hibernation) { | ||
775 | printk(KERN_INFO "PM: Thaw Swap\n"); | ||
776 | swap_for_hibernation = false; | ||
777 | } | ||
778 | spin_unlock(&swap_lock); | ||
779 | } | ||
780 | |||
781 | /* | ||
782 | * Because updateing swap_map[] can make not-saved-status-change, | ||
783 | * we use our own easy allocator. | ||
784 | * Please see kernel/power/swap.c, Used swaps are recorded into | ||
785 | * RB-tree. | ||
786 | */ | ||
787 | swp_entry_t get_swap_for_hibernation(int type) | ||
788 | { | ||
789 | pgoff_t off; | ||
790 | swp_entry_t val = {0}; | ||
791 | struct swap_info_struct *si; | ||
792 | |||
793 | spin_lock(&swap_lock); | ||
794 | |||
795 | si = swap_info[type]; | ||
796 | if (!si || !(si->flags & SWP_WRITEOK)) | ||
797 | goto done; | ||
798 | |||
799 | for (off = hibernation_offset[type]; off < si->max; ++off) { | ||
800 | if (!si->swap_map[off]) | ||
801 | break; | ||
802 | } | ||
803 | if (off < si->max) { | ||
804 | val = swp_entry(type, off); | ||
805 | hibernation_offset[type] = off + 1; | ||
806 | } | ||
807 | done: | ||
808 | spin_unlock(&swap_lock); | ||
809 | return val; | ||
810 | } | ||
811 | |||
812 | void swap_free_for_hibernation(swp_entry_t ent) | ||
813 | { | ||
814 | /* Nothing to do */ | ||
815 | } | ||
816 | |||
817 | /* | 780 | /* |
818 | * Find the swap type that corresponds to given device (if any). | 781 | * Find the swap type that corresponds to given device (if any). |
819 | * | 782 | * |
@@ -2084,7 +2047,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
2084 | p->flags |= SWP_SOLIDSTATE; | 2047 | p->flags |= SWP_SOLIDSTATE; |
2085 | p->cluster_next = 1 + (random32() % p->highest_bit); | 2048 | p->cluster_next = 1 + (random32() % p->highest_bit); |
2086 | } | 2049 | } |
2087 | if (discard_swap(p) == 0) | 2050 | if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD)) |
2088 | p->flags |= SWP_DISCARDABLE; | 2051 | p->flags |= SWP_DISCARDABLE; |
2089 | } | 2052 | } |
2090 | 2053 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index c391c320dbaf..c5dfabf25f11 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1804,12 +1804,11 @@ static void shrink_zone(int priority, struct zone *zone, | |||
1804 | * If a zone is deemed to be full of pinned pages then just give it a light | 1804 | * If a zone is deemed to be full of pinned pages then just give it a light |
1805 | * scan then give up on it. | 1805 | * scan then give up on it. |
1806 | */ | 1806 | */ |
1807 | static bool shrink_zones(int priority, struct zonelist *zonelist, | 1807 | static void shrink_zones(int priority, struct zonelist *zonelist, |
1808 | struct scan_control *sc) | 1808 | struct scan_control *sc) |
1809 | { | 1809 | { |
1810 | struct zoneref *z; | 1810 | struct zoneref *z; |
1811 | struct zone *zone; | 1811 | struct zone *zone; |
1812 | bool all_unreclaimable = true; | ||
1813 | 1812 | ||
1814 | for_each_zone_zonelist_nodemask(zone, z, zonelist, | 1813 | for_each_zone_zonelist_nodemask(zone, z, zonelist, |
1815 | gfp_zone(sc->gfp_mask), sc->nodemask) { | 1814 | gfp_zone(sc->gfp_mask), sc->nodemask) { |
@@ -1827,8 +1826,38 @@ static bool shrink_zones(int priority, struct zonelist *zonelist, | |||
1827 | } | 1826 | } |
1828 | 1827 | ||
1829 | shrink_zone(priority, zone, sc); | 1828 | shrink_zone(priority, zone, sc); |
1830 | all_unreclaimable = false; | ||
1831 | } | 1829 | } |
1830 | } | ||
1831 | |||
1832 | static bool zone_reclaimable(struct zone *zone) | ||
1833 | { | ||
1834 | return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; | ||
1835 | } | ||
1836 | |||
1837 | /* | ||
1838 | * As hibernation is going on, kswapd is freezed so that it can't mark | ||
1839 | * the zone into all_unreclaimable. It can't handle OOM during hibernation. | ||
1840 | * So let's check zone's unreclaimable in direct reclaim as well as kswapd. | ||
1841 | */ | ||
1842 | static bool all_unreclaimable(struct zonelist *zonelist, | ||
1843 | struct scan_control *sc) | ||
1844 | { | ||
1845 | struct zoneref *z; | ||
1846 | struct zone *zone; | ||
1847 | bool all_unreclaimable = true; | ||
1848 | |||
1849 | for_each_zone_zonelist_nodemask(zone, z, zonelist, | ||
1850 | gfp_zone(sc->gfp_mask), sc->nodemask) { | ||
1851 | if (!populated_zone(zone)) | ||
1852 | continue; | ||
1853 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) | ||
1854 | continue; | ||
1855 | if (zone_reclaimable(zone)) { | ||
1856 | all_unreclaimable = false; | ||
1857 | break; | ||
1858 | } | ||
1859 | } | ||
1860 | |||
1832 | return all_unreclaimable; | 1861 | return all_unreclaimable; |
1833 | } | 1862 | } |
1834 | 1863 | ||
@@ -1852,7 +1881,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1852 | struct scan_control *sc) | 1881 | struct scan_control *sc) |
1853 | { | 1882 | { |
1854 | int priority; | 1883 | int priority; |
1855 | bool all_unreclaimable; | ||
1856 | unsigned long total_scanned = 0; | 1884 | unsigned long total_scanned = 0; |
1857 | struct reclaim_state *reclaim_state = current->reclaim_state; | 1885 | struct reclaim_state *reclaim_state = current->reclaim_state; |
1858 | struct zoneref *z; | 1886 | struct zoneref *z; |
@@ -1869,7 +1897,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1869 | sc->nr_scanned = 0; | 1897 | sc->nr_scanned = 0; |
1870 | if (!priority) | 1898 | if (!priority) |
1871 | disable_swap_token(); | 1899 | disable_swap_token(); |
1872 | all_unreclaimable = shrink_zones(priority, zonelist, sc); | 1900 | shrink_zones(priority, zonelist, sc); |
1873 | /* | 1901 | /* |
1874 | * Don't shrink slabs when reclaiming memory from | 1902 | * Don't shrink slabs when reclaiming memory from |
1875 | * over limit cgroups | 1903 | * over limit cgroups |
@@ -1931,7 +1959,7 @@ out: | |||
1931 | return sc->nr_reclaimed; | 1959 | return sc->nr_reclaimed; |
1932 | 1960 | ||
1933 | /* top priority shrink_zones still had more to do? don't OOM, then */ | 1961 | /* top priority shrink_zones still had more to do? don't OOM, then */ |
1934 | if (scanning_global_lru(sc) && !all_unreclaimable) | 1962 | if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc)) |
1935 | return 1; | 1963 | return 1; |
1936 | 1964 | ||
1937 | return 0; | 1965 | return 0; |
@@ -2197,8 +2225,7 @@ loop_again: | |||
2197 | total_scanned += sc.nr_scanned; | 2225 | total_scanned += sc.nr_scanned; |
2198 | if (zone->all_unreclaimable) | 2226 | if (zone->all_unreclaimable) |
2199 | continue; | 2227 | continue; |
2200 | if (nr_slab == 0 && | 2228 | if (nr_slab == 0 && !zone_reclaimable(zone)) |
2201 | zone->pages_scanned >= (zone_reclaimable_pages(zone) * 6)) | ||
2202 | zone->all_unreclaimable = 1; | 2229 | zone->all_unreclaimable = 1; |
2203 | /* | 2230 | /* |
2204 | * If we've done a decent amount of scanning and | 2231 | * If we've done a decent amount of scanning and |
diff --git a/mm/vmstat.c b/mm/vmstat.c index f389168f9a83..355a9e669aaa 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -138,11 +138,24 @@ static void refresh_zone_stat_thresholds(void) | |||
138 | int threshold; | 138 | int threshold; |
139 | 139 | ||
140 | for_each_populated_zone(zone) { | 140 | for_each_populated_zone(zone) { |
141 | unsigned long max_drift, tolerate_drift; | ||
142 | |||
141 | threshold = calculate_threshold(zone); | 143 | threshold = calculate_threshold(zone); |
142 | 144 | ||
143 | for_each_online_cpu(cpu) | 145 | for_each_online_cpu(cpu) |
144 | per_cpu_ptr(zone->pageset, cpu)->stat_threshold | 146 | per_cpu_ptr(zone->pageset, cpu)->stat_threshold |
145 | = threshold; | 147 | = threshold; |
148 | |||
149 | /* | ||
150 | * Only set percpu_drift_mark if there is a danger that | ||
151 | * NR_FREE_PAGES reports the low watermark is ok when in fact | ||
152 | * the min watermark could be breached by an allocation | ||
153 | */ | ||
154 | tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone); | ||
155 | max_drift = num_online_cpus() * threshold; | ||
156 | if (max_drift > tolerate_drift) | ||
157 | zone->percpu_drift_mark = high_wmark_pages(zone) + | ||
158 | max_drift; | ||
146 | } | 159 | } |
147 | } | 160 | } |
148 | 161 | ||
@@ -813,7 +826,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, | |||
813 | "\n scanned %lu" | 826 | "\n scanned %lu" |
814 | "\n spanned %lu" | 827 | "\n spanned %lu" |
815 | "\n present %lu", | 828 | "\n present %lu", |
816 | zone_page_state(zone, NR_FREE_PAGES), | 829 | zone_nr_free_pages(zone), |
817 | min_wmark_pages(zone), | 830 | min_wmark_pages(zone), |
818 | low_wmark_pages(zone), | 831 | low_wmark_pages(zone), |
819 | high_wmark_pages(zone), | 832 | high_wmark_pages(zone), |
@@ -998,6 +1011,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, | |||
998 | switch (action) { | 1011 | switch (action) { |
999 | case CPU_ONLINE: | 1012 | case CPU_ONLINE: |
1000 | case CPU_ONLINE_FROZEN: | 1013 | case CPU_ONLINE_FROZEN: |
1014 | refresh_zone_stat_thresholds(); | ||
1001 | start_cpu_timer(cpu); | 1015 | start_cpu_timer(cpu); |
1002 | node_set_state(cpu_to_node(cpu), N_CPU); | 1016 | node_set_state(cpu_to_node(cpu), N_CPU); |
1003 | break; | 1017 | break; |
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 01ddb0472f86..0eb96f7e44be 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -24,8 +24,11 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | |||
24 | 24 | ||
25 | if (vlan_dev) | 25 | if (vlan_dev) |
26 | skb->dev = vlan_dev; | 26 | skb->dev = vlan_dev; |
27 | else if (vlan_id) | 27 | else if (vlan_id) { |
28 | goto drop; | 28 | if (!(skb->dev->flags & IFF_PROMISC)) |
29 | goto drop; | ||
30 | skb->pkt_type = PACKET_OTHERHOST; | ||
31 | } | ||
29 | 32 | ||
30 | return (polling ? netif_receive_skb(skb) : netif_rx(skb)); | 33 | return (polling ? netif_receive_skb(skb) : netif_rx(skb)); |
31 | 34 | ||
@@ -102,8 +105,11 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, | |||
102 | 105 | ||
103 | if (vlan_dev) | 106 | if (vlan_dev) |
104 | skb->dev = vlan_dev; | 107 | skb->dev = vlan_dev; |
105 | else if (vlan_id) | 108 | else if (vlan_id) { |
106 | goto drop; | 109 | if (!(skb->dev->flags & IFF_PROMISC)) |
110 | goto drop; | ||
111 | skb->pkt_type = PACKET_OTHERHOST; | ||
112 | } | ||
107 | 113 | ||
108 | for (p = napi->gro_list; p; p = p->next) { | 114 | for (p = napi->gro_list; p; p = p->next) { |
109 | NAPI_GRO_CB(p)->same_flow = | 115 | NAPI_GRO_CB(p)->same_flow = |
diff --git a/net/9p/client.c b/net/9p/client.c index dc6f2f26d023..9eb72505308f 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
@@ -331,8 +331,10 @@ static void p9_tag_cleanup(struct p9_client *c) | |||
331 | } | 331 | } |
332 | } | 332 | } |
333 | 333 | ||
334 | if (c->tagpool) | 334 | if (c->tagpool) { |
335 | p9_idpool_put(0, c->tagpool); /* free reserved tag 0 */ | ||
335 | p9_idpool_destroy(c->tagpool); | 336 | p9_idpool_destroy(c->tagpool); |
337 | } | ||
336 | 338 | ||
337 | /* free requests associated with tags */ | 339 | /* free requests associated with tags */ |
338 | for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) { | 340 | for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) { |
@@ -944,6 +946,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, | |||
944 | int16_t nwqids, count; | 946 | int16_t nwqids, count; |
945 | 947 | ||
946 | err = 0; | 948 | err = 0; |
949 | wqids = NULL; | ||
947 | clnt = oldfid->clnt; | 950 | clnt = oldfid->clnt; |
948 | if (clone) { | 951 | if (clone) { |
949 | fid = p9_fid_create(clnt); | 952 | fid = p9_fid_create(clnt); |
@@ -994,9 +997,11 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, | |||
994 | else | 997 | else |
995 | fid->qid = oldfid->qid; | 998 | fid->qid = oldfid->qid; |
996 | 999 | ||
1000 | kfree(wqids); | ||
997 | return fid; | 1001 | return fid; |
998 | 1002 | ||
999 | clunk_fid: | 1003 | clunk_fid: |
1004 | kfree(wqids); | ||
1000 | p9_client_clunk(fid); | 1005 | p9_client_clunk(fid); |
1001 | fid = NULL; | 1006 | fid = NULL; |
1002 | 1007 | ||
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 0ea20c30466c..17c5ba7551a5 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c | |||
@@ -426,8 +426,10 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req) | |||
426 | 426 | ||
427 | /* Allocate an fcall for the reply */ | 427 | /* Allocate an fcall for the reply */ |
428 | rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL); | 428 | rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL); |
429 | if (!rpl_context) | 429 | if (!rpl_context) { |
430 | err = -ENOMEM; | ||
430 | goto err_close; | 431 | goto err_close; |
432 | } | ||
431 | 433 | ||
432 | /* | 434 | /* |
433 | * If the request has a buffer, steal it, otherwise | 435 | * If the request has a buffer, steal it, otherwise |
@@ -445,8 +447,8 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req) | |||
445 | } | 447 | } |
446 | rpl_context->rc = req->rc; | 448 | rpl_context->rc = req->rc; |
447 | if (!rpl_context->rc) { | 449 | if (!rpl_context->rc) { |
448 | kfree(rpl_context); | 450 | err = -ENOMEM; |
449 | goto err_close; | 451 | goto err_free2; |
450 | } | 452 | } |
451 | 453 | ||
452 | /* | 454 | /* |
@@ -458,11 +460,8 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req) | |||
458 | */ | 460 | */ |
459 | if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) { | 461 | if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) { |
460 | err = post_recv(client, rpl_context); | 462 | err = post_recv(client, rpl_context); |
461 | if (err) { | 463 | if (err) |
462 | kfree(rpl_context->rc); | 464 | goto err_free1; |
463 | kfree(rpl_context); | ||
464 | goto err_close; | ||
465 | } | ||
466 | } else | 465 | } else |
467 | atomic_dec(&rdma->rq_count); | 466 | atomic_dec(&rdma->rq_count); |
468 | 467 | ||
@@ -471,8 +470,10 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req) | |||
471 | 470 | ||
472 | /* Post the request */ | 471 | /* Post the request */ |
473 | c = kmalloc(sizeof *c, GFP_KERNEL); | 472 | c = kmalloc(sizeof *c, GFP_KERNEL); |
474 | if (!c) | 473 | if (!c) { |
475 | goto err_close; | 474 | err = -ENOMEM; |
475 | goto err_free1; | ||
476 | } | ||
476 | c->req = req; | 477 | c->req = req; |
477 | 478 | ||
478 | c->busa = ib_dma_map_single(rdma->cm_id->device, | 479 | c->busa = ib_dma_map_single(rdma->cm_id->device, |
@@ -499,9 +500,15 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req) | |||
499 | return ib_post_send(rdma->qp, &wr, &bad_wr); | 500 | return ib_post_send(rdma->qp, &wr, &bad_wr); |
500 | 501 | ||
501 | error: | 502 | error: |
503 | kfree(c); | ||
504 | kfree(rpl_context->rc); | ||
505 | kfree(rpl_context); | ||
502 | P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n"); | 506 | P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n"); |
503 | return -EIO; | 507 | return -EIO; |
504 | 508 | err_free1: | |
509 | kfree(rpl_context->rc); | ||
510 | err_free2: | ||
511 | kfree(rpl_context); | ||
505 | err_close: | 512 | err_close: |
506 | spin_lock_irqsave(&rdma->req_lock, flags); | 513 | spin_lock_irqsave(&rdma->req_lock, flags); |
507 | if (rdma->state < P9_RDMA_CLOSING) { | 514 | if (rdma->state < P9_RDMA_CLOSING) { |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index dcfbe99ff81c..b88515936e4b 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -329,7 +329,8 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args) | |||
329 | 329 | ||
330 | mutex_lock(&virtio_9p_lock); | 330 | mutex_lock(&virtio_9p_lock); |
331 | list_for_each_entry(chan, &virtio_chan_list, chan_list) { | 331 | list_for_each_entry(chan, &virtio_chan_list, chan_list) { |
332 | if (!strncmp(devname, chan->tag, chan->tag_len)) { | 332 | if (!strncmp(devname, chan->tag, chan->tag_len) && |
333 | strlen(devname) == chan->tag_len) { | ||
333 | if (!chan->inuse) { | 334 | if (!chan->inuse) { |
334 | chan->inuse = true; | 335 | chan->inuse = true; |
335 | found = 1; | 336 | found = 1; |
diff --git a/net/Kconfig b/net/Kconfig index e330594d3709..e926884c1675 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -217,7 +217,7 @@ source "net/dns_resolver/Kconfig" | |||
217 | 217 | ||
218 | config RPS | 218 | config RPS |
219 | boolean | 219 | boolean |
220 | depends on SMP && SYSFS | 220 | depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS |
221 | default y | 221 | default y |
222 | 222 | ||
223 | menu "Network testing" | 223 | menu "Network testing" |
diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 651babdfab38..ad2b232a2055 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c | |||
@@ -399,12 +399,6 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) | |||
399 | unregister_netdev(net_dev); | 399 | unregister_netdev(net_dev); |
400 | free_netdev(net_dev); | 400 | free_netdev(net_dev); |
401 | } | 401 | } |
402 | read_lock_irq(&devs_lock); | ||
403 | if (list_empty(&br2684_devs)) { | ||
404 | /* last br2684 device */ | ||
405 | unregister_atmdevice_notifier(&atm_dev_notifier); | ||
406 | } | ||
407 | read_unlock_irq(&devs_lock); | ||
408 | return; | 402 | return; |
409 | } | 403 | } |
410 | 404 | ||
@@ -675,7 +669,6 @@ static int br2684_create(void __user *arg) | |||
675 | 669 | ||
676 | if (list_empty(&br2684_devs)) { | 670 | if (list_empty(&br2684_devs)) { |
677 | /* 1st br2684 device */ | 671 | /* 1st br2684 device */ |
678 | register_atmdevice_notifier(&atm_dev_notifier); | ||
679 | brdev->number = 1; | 672 | brdev->number = 1; |
680 | } else | 673 | } else |
681 | brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1; | 674 | brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1; |
@@ -815,6 +808,7 @@ static int __init br2684_init(void) | |||
815 | return -ENOMEM; | 808 | return -ENOMEM; |
816 | #endif | 809 | #endif |
817 | register_atm_ioctl(&br2684_ioctl_ops); | 810 | register_atm_ioctl(&br2684_ioctl_ops); |
811 | register_atmdevice_notifier(&atm_dev_notifier); | ||
818 | return 0; | 812 | return 0; |
819 | } | 813 | } |
820 | 814 | ||
@@ -830,9 +824,7 @@ static void __exit br2684_exit(void) | |||
830 | #endif | 824 | #endif |
831 | 825 | ||
832 | 826 | ||
833 | /* if not already empty */ | 827 | unregister_atmdevice_notifier(&atm_dev_notifier); |
834 | if (!list_empty(&br2684_devs)) | ||
835 | unregister_atmdevice_notifier(&atm_dev_notifier); | ||
836 | 828 | ||
837 | while (!list_empty(&br2684_devs)) { | 829 | while (!list_empty(&br2684_devs)) { |
838 | net_dev = list_entry_brdev(br2684_devs.next); | 830 | net_dev = list_entry_brdev(br2684_devs.next); |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 5ed00bd7009f..137f23259a93 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -761,9 +761,11 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb) | |||
761 | { | 761 | { |
762 | if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) && | 762 | if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) && |
763 | skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && | 763 | skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && |
764 | !skb_is_gso(skb)) | 764 | !skb_is_gso(skb)) { |
765 | /* BUG: Should really parse the IP options here. */ | ||
766 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); | ||
765 | return ip_fragment(skb, br_dev_queue_push_xmit); | 767 | return ip_fragment(skb, br_dev_queue_push_xmit); |
766 | else | 768 | } else |
767 | return br_dev_queue_push_xmit(skb); | 769 | return br_dev_queue_push_xmit(skb); |
768 | } | 770 | } |
769 | #else | 771 | #else |
diff --git a/net/core/dev.c b/net/core/dev.c index 3721fbb9a83c..660dd41aaaa6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2058,16 +2058,16 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, | |||
2058 | struct sk_buff *skb) | 2058 | struct sk_buff *skb) |
2059 | { | 2059 | { |
2060 | int queue_index; | 2060 | int queue_index; |
2061 | struct sock *sk = skb->sk; | 2061 | const struct net_device_ops *ops = dev->netdev_ops; |
2062 | 2062 | ||
2063 | queue_index = sk_tx_queue_get(sk); | 2063 | if (ops->ndo_select_queue) { |
2064 | if (queue_index < 0) { | 2064 | queue_index = ops->ndo_select_queue(dev, skb); |
2065 | const struct net_device_ops *ops = dev->netdev_ops; | 2065 | queue_index = dev_cap_txqueue(dev, queue_index); |
2066 | } else { | ||
2067 | struct sock *sk = skb->sk; | ||
2068 | queue_index = sk_tx_queue_get(sk); | ||
2069 | if (queue_index < 0) { | ||
2066 | 2070 | ||
2067 | if (ops->ndo_select_queue) { | ||
2068 | queue_index = ops->ndo_select_queue(dev, skb); | ||
2069 | queue_index = dev_cap_txqueue(dev, queue_index); | ||
2070 | } else { | ||
2071 | queue_index = 0; | 2071 | queue_index = 0; |
2072 | if (dev->real_num_tx_queues > 1) | 2072 | if (dev->real_num_tx_queues > 1) |
2073 | queue_index = skb_tx_hash(dev, skb); | 2073 | queue_index = skb_tx_hash(dev, skb); |
@@ -4845,7 +4845,7 @@ static void rollback_registered_many(struct list_head *head) | |||
4845 | dev = list_first_entry(head, struct net_device, unreg_list); | 4845 | dev = list_first_entry(head, struct net_device, unreg_list); |
4846 | call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); | 4846 | call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); |
4847 | 4847 | ||
4848 | synchronize_net(); | 4848 | rcu_barrier(); |
4849 | 4849 | ||
4850 | list_for_each_entry(dev, head, unreg_list) | 4850 | list_for_each_entry(dev, head, unreg_list) |
4851 | dev_put(dev); | 4851 | dev_put(dev); |
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 9fbe7f7429b0..6743146e4d6b 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
@@ -232,7 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | |||
232 | est->last_packets = bstats->packets; | 232 | est->last_packets = bstats->packets; |
233 | est->avpps = rate_est->pps<<10; | 233 | est->avpps = rate_est->pps<<10; |
234 | 234 | ||
235 | spin_lock(&est_tree_lock); | 235 | spin_lock_bh(&est_tree_lock); |
236 | if (!elist[idx].timer.function) { | 236 | if (!elist[idx].timer.function) { |
237 | INIT_LIST_HEAD(&elist[idx].list); | 237 | INIT_LIST_HEAD(&elist[idx].list); |
238 | setup_timer(&elist[idx].timer, est_timer, idx); | 238 | setup_timer(&elist[idx].timer, est_timer, idx); |
@@ -243,7 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | |||
243 | 243 | ||
244 | list_add_rcu(&est->list, &elist[idx].list); | 244 | list_add_rcu(&est->list, &elist[idx].list); |
245 | gen_add_node(est); | 245 | gen_add_node(est); |
246 | spin_unlock(&est_tree_lock); | 246 | spin_unlock_bh(&est_tree_lock); |
247 | 247 | ||
248 | return 0; | 248 | return 0; |
249 | } | 249 | } |
@@ -270,7 +270,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, | |||
270 | { | 270 | { |
271 | struct gen_estimator *e; | 271 | struct gen_estimator *e; |
272 | 272 | ||
273 | spin_lock(&est_tree_lock); | 273 | spin_lock_bh(&est_tree_lock); |
274 | while ((e = gen_find_node(bstats, rate_est))) { | 274 | while ((e = gen_find_node(bstats, rate_est))) { |
275 | rb_erase(&e->node, &est_root); | 275 | rb_erase(&e->node, &est_root); |
276 | 276 | ||
@@ -281,7 +281,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, | |||
281 | list_del_rcu(&e->list); | 281 | list_del_rcu(&e->list); |
282 | call_rcu(&e->e_rcu, __gen_kill_estimator); | 282 | call_rcu(&e->e_rcu, __gen_kill_estimator); |
283 | } | 283 | } |
284 | spin_unlock(&est_tree_lock); | 284 | spin_unlock_bh(&est_tree_lock); |
285 | } | 285 | } |
286 | EXPORT_SYMBOL(gen_kill_estimator); | 286 | EXPORT_SYMBOL(gen_kill_estimator); |
287 | 287 | ||
@@ -320,9 +320,9 @@ bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, | |||
320 | 320 | ||
321 | ASSERT_RTNL(); | 321 | ASSERT_RTNL(); |
322 | 322 | ||
323 | spin_lock(&est_tree_lock); | 323 | spin_lock_bh(&est_tree_lock); |
324 | res = gen_find_node(bstats, rate_est) != NULL; | 324 | res = gen_find_node(bstats, rate_est) != NULL; |
325 | spin_unlock(&est_tree_lock); | 325 | spin_unlock_bh(&est_tree_lock); |
326 | 326 | ||
327 | return res; | 327 | return res; |
328 | } | 328 | } |
diff --git a/net/core/iovec.c b/net/core/iovec.c index 1cd98df412df..e6b133b77ccb 100644 --- a/net/core/iovec.c +++ b/net/core/iovec.c | |||
@@ -35,9 +35,10 @@ | |||
35 | * in any case. | 35 | * in any case. |
36 | */ | 36 | */ |
37 | 37 | ||
38 | int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) | 38 | long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) |
39 | { | 39 | { |
40 | int size, err, ct; | 40 | int size, ct; |
41 | long err; | ||
41 | 42 | ||
42 | if (m->msg_namelen) { | 43 | if (m->msg_namelen) { |
43 | if (mode == VERIFY_READ) { | 44 | if (mode == VERIFY_READ) { |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 3a2513f0d0c3..c83b421341c0 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -2573,6 +2573,10 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) | |||
2573 | __copy_skb_header(nskb, skb); | 2573 | __copy_skb_header(nskb, skb); |
2574 | nskb->mac_len = skb->mac_len; | 2574 | nskb->mac_len = skb->mac_len; |
2575 | 2575 | ||
2576 | /* nskb and skb might have different headroom */ | ||
2577 | if (nskb->ip_summed == CHECKSUM_PARTIAL) | ||
2578 | nskb->csum_start += skb_headroom(nskb) - headroom; | ||
2579 | |||
2576 | skb_reset_mac_header(nskb); | 2580 | skb_reset_mac_header(nskb); |
2577 | skb_set_network_header(nskb, skb->mac_len); | 2581 | skb_set_network_header(nskb, skb->mac_len); |
2578 | nskb->transport_header = (nskb->network_header + | 2582 | nskb->transport_header = (nskb->network_header + |
@@ -2703,7 +2707,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2703 | return -E2BIG; | 2707 | return -E2BIG; |
2704 | 2708 | ||
2705 | headroom = skb_headroom(p); | 2709 | headroom = skb_headroom(p); |
2706 | nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p)); | 2710 | nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); |
2707 | if (unlikely(!nskb)) | 2711 | if (unlikely(!nskb)) |
2708 | return -ENOMEM; | 2712 | return -ENOMEM; |
2709 | 2713 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index b05b9b6ddb87..ef30e9d286e7 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1351,9 +1351,9 @@ int sock_i_uid(struct sock *sk) | |||
1351 | { | 1351 | { |
1352 | int uid; | 1352 | int uid; |
1353 | 1353 | ||
1354 | read_lock(&sk->sk_callback_lock); | 1354 | read_lock_bh(&sk->sk_callback_lock); |
1355 | uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; | 1355 | uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; |
1356 | read_unlock(&sk->sk_callback_lock); | 1356 | read_unlock_bh(&sk->sk_callback_lock); |
1357 | return uid; | 1357 | return uid; |
1358 | } | 1358 | } |
1359 | EXPORT_SYMBOL(sock_i_uid); | 1359 | EXPORT_SYMBOL(sock_i_uid); |
@@ -1362,9 +1362,9 @@ unsigned long sock_i_ino(struct sock *sk) | |||
1362 | { | 1362 | { |
1363 | unsigned long ino; | 1363 | unsigned long ino; |
1364 | 1364 | ||
1365 | read_lock(&sk->sk_callback_lock); | 1365 | read_lock_bh(&sk->sk_callback_lock); |
1366 | ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; | 1366 | ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; |
1367 | read_unlock(&sk->sk_callback_lock); | 1367 | read_unlock_bh(&sk->sk_callback_lock); |
1368 | return ino; | 1368 | return ino; |
1369 | } | 1369 | } |
1370 | EXPORT_SYMBOL(sock_i_ino); | 1370 | EXPORT_SYMBOL(sock_i_ino); |
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 7c3a7d191249..72380a30d1c8 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -46,7 +46,7 @@ config IP_ADVANCED_ROUTER | |||
46 | rp_filter on use: | 46 | rp_filter on use: |
47 | 47 | ||
48 | echo 1 > /proc/sys/net/ipv4/conf/<device>/rp_filter | 48 | echo 1 > /proc/sys/net/ipv4/conf/<device>/rp_filter |
49 | and | 49 | or |
50 | echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter | 50 | echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter |
51 | 51 | ||
52 | Note that some distributions enable it in startup scripts. | 52 | Note that some distributions enable it in startup scripts. |
@@ -217,6 +217,7 @@ config NET_IPIP | |||
217 | 217 | ||
218 | config NET_IPGRE | 218 | config NET_IPGRE |
219 | tristate "IP: GRE tunnels over IP" | 219 | tristate "IP: GRE tunnels over IP" |
220 | depends on IPV6 || IPV6=n | ||
220 | help | 221 | help |
221 | Tunneling means encapsulating data of one protocol type within | 222 | Tunneling means encapsulating data of one protocol type within |
222 | another protocol and sending it over a channel that understands the | 223 | another protocol and sending it over a channel that understands the |
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index f0550941df7b..721a8a37b45c 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c | |||
@@ -62,8 +62,11 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
62 | } | 62 | } |
63 | if (!inet->inet_saddr) | 63 | if (!inet->inet_saddr) |
64 | inet->inet_saddr = rt->rt_src; /* Update source address */ | 64 | inet->inet_saddr = rt->rt_src; /* Update source address */ |
65 | if (!inet->inet_rcv_saddr) | 65 | if (!inet->inet_rcv_saddr) { |
66 | inet->inet_rcv_saddr = rt->rt_src; | 66 | inet->inet_rcv_saddr = rt->rt_src; |
67 | if (sk->sk_prot->rehash) | ||
68 | sk->sk_prot->rehash(sk); | ||
69 | } | ||
67 | inet->inet_daddr = rt->rt_dst; | 70 | inet->inet_daddr = rt->rt_dst; |
68 | inet->inet_dport = usin->sin_port; | 71 | inet->inet_dport = usin->sin_port; |
69 | sk->sk_state = TCP_ESTABLISHED; | 72 | sk->sk_state = TCP_ESTABLISHED; |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index a43968918350..7d02a9f999fa 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -246,6 +246,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, | |||
246 | 246 | ||
247 | struct fib_result res; | 247 | struct fib_result res; |
248 | int no_addr, rpf, accept_local; | 248 | int no_addr, rpf, accept_local; |
249 | bool dev_match; | ||
249 | int ret; | 250 | int ret; |
250 | struct net *net; | 251 | struct net *net; |
251 | 252 | ||
@@ -273,12 +274,22 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, | |||
273 | } | 274 | } |
274 | *spec_dst = FIB_RES_PREFSRC(res); | 275 | *spec_dst = FIB_RES_PREFSRC(res); |
275 | fib_combine_itag(itag, &res); | 276 | fib_combine_itag(itag, &res); |
277 | dev_match = false; | ||
278 | |||
276 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 279 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
277 | if (FIB_RES_DEV(res) == dev || res.fi->fib_nhs > 1) | 280 | for (ret = 0; ret < res.fi->fib_nhs; ret++) { |
281 | struct fib_nh *nh = &res.fi->fib_nh[ret]; | ||
282 | |||
283 | if (nh->nh_dev == dev) { | ||
284 | dev_match = true; | ||
285 | break; | ||
286 | } | ||
287 | } | ||
278 | #else | 288 | #else |
279 | if (FIB_RES_DEV(res) == dev) | 289 | if (FIB_RES_DEV(res) == dev) |
290 | dev_match = true; | ||
280 | #endif | 291 | #endif |
281 | { | 292 | if (dev_match) { |
282 | ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST; | 293 | ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST; |
283 | fib_res_put(&res); | 294 | fib_res_put(&res); |
284 | return ret; | 295 | return ret; |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 79d057a939ba..4a8e370862bc 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -186,7 +186,9 @@ static inline struct tnode *node_parent_rcu(struct node *node) | |||
186 | { | 186 | { |
187 | struct tnode *ret = node_parent(node); | 187 | struct tnode *ret = node_parent(node); |
188 | 188 | ||
189 | return rcu_dereference(ret); | 189 | return rcu_dereference_check(ret, |
190 | rcu_read_lock_held() || | ||
191 | lockdep_rtnl_is_held()); | ||
190 | } | 192 | } |
191 | 193 | ||
192 | /* Same as rcu_assign_pointer | 194 | /* Same as rcu_assign_pointer |
@@ -1753,7 +1755,9 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c) | |||
1753 | 1755 | ||
1754 | static struct leaf *trie_firstleaf(struct trie *t) | 1756 | static struct leaf *trie_firstleaf(struct trie *t) |
1755 | { | 1757 | { |
1756 | struct tnode *n = (struct tnode *) rcu_dereference(t->trie); | 1758 | struct tnode *n = (struct tnode *) rcu_dereference_check(t->trie, |
1759 | rcu_read_lock_held() || | ||
1760 | lockdep_rtnl_is_held()); | ||
1757 | 1761 | ||
1758 | if (!n) | 1762 | if (!n) |
1759 | return NULL; | 1763 | return NULL; |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index a1ad0e7180d2..1fdcacd36ce7 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -834,7 +834,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, | |||
834 | int mark = 0; | 834 | int mark = 0; |
835 | 835 | ||
836 | 836 | ||
837 | if (len == 8) { | 837 | if (len == 8 || IGMP_V2_SEEN(in_dev)) { |
838 | if (ih->code == 0) { | 838 | if (ih->code == 0) { |
839 | /* Alas, old v1 router presents here. */ | 839 | /* Alas, old v1 router presents here. */ |
840 | 840 | ||
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 945b20a5ad50..35c93e8b6a46 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -45,7 +45,7 @@ | |||
45 | #include <net/netns/generic.h> | 45 | #include <net/netns/generic.h> |
46 | #include <net/rtnetlink.h> | 46 | #include <net/rtnetlink.h> |
47 | 47 | ||
48 | #ifdef CONFIG_IPV6 | 48 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
49 | #include <net/ipv6.h> | 49 | #include <net/ipv6.h> |
50 | #include <net/ip6_fib.h> | 50 | #include <net/ip6_fib.h> |
51 | #include <net/ip6_route.h> | 51 | #include <net/ip6_route.h> |
@@ -699,7 +699,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
699 | if ((dst = rt->rt_gateway) == 0) | 699 | if ((dst = rt->rt_gateway) == 0) |
700 | goto tx_error_icmp; | 700 | goto tx_error_icmp; |
701 | } | 701 | } |
702 | #ifdef CONFIG_IPV6 | 702 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
703 | else if (skb->protocol == htons(ETH_P_IPV6)) { | 703 | else if (skb->protocol == htons(ETH_P_IPV6)) { |
704 | struct in6_addr *addr6; | 704 | struct in6_addr *addr6; |
705 | int addr_type; | 705 | int addr_type; |
@@ -774,7 +774,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
774 | goto tx_error; | 774 | goto tx_error; |
775 | } | 775 | } |
776 | } | 776 | } |
777 | #ifdef CONFIG_IPV6 | 777 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
778 | else if (skb->protocol == htons(ETH_P_IPV6)) { | 778 | else if (skb->protocol == htons(ETH_P_IPV6)) { |
779 | struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb); | 779 | struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb); |
780 | 780 | ||
@@ -850,7 +850,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
850 | if ((iph->ttl = tiph->ttl) == 0) { | 850 | if ((iph->ttl = tiph->ttl) == 0) { |
851 | if (skb->protocol == htons(ETH_P_IP)) | 851 | if (skb->protocol == htons(ETH_P_IP)) |
852 | iph->ttl = old_iph->ttl; | 852 | iph->ttl = old_iph->ttl; |
853 | #ifdef CONFIG_IPV6 | 853 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
854 | else if (skb->protocol == htons(ETH_P_IPV6)) | 854 | else if (skb->protocol == htons(ETH_P_IPV6)) |
855 | iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; | 855 | iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; |
856 | #endif | 856 | #endif |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 04b69896df5f..7649d7750075 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -488,9 +488,8 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
488 | * we can switch to copy when see the first bad fragment. | 488 | * we can switch to copy when see the first bad fragment. |
489 | */ | 489 | */ |
490 | if (skb_has_frags(skb)) { | 490 | if (skb_has_frags(skb)) { |
491 | struct sk_buff *frag; | 491 | struct sk_buff *frag, *frag2; |
492 | int first_len = skb_pagelen(skb); | 492 | int first_len = skb_pagelen(skb); |
493 | int truesizes = 0; | ||
494 | 493 | ||
495 | if (first_len - hlen > mtu || | 494 | if (first_len - hlen > mtu || |
496 | ((first_len - hlen) & 7) || | 495 | ((first_len - hlen) & 7) || |
@@ -503,18 +502,18 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
503 | if (frag->len > mtu || | 502 | if (frag->len > mtu || |
504 | ((frag->len & 7) && frag->next) || | 503 | ((frag->len & 7) && frag->next) || |
505 | skb_headroom(frag) < hlen) | 504 | skb_headroom(frag) < hlen) |
506 | goto slow_path; | 505 | goto slow_path_clean; |
507 | 506 | ||
508 | /* Partially cloned skb? */ | 507 | /* Partially cloned skb? */ |
509 | if (skb_shared(frag)) | 508 | if (skb_shared(frag)) |
510 | goto slow_path; | 509 | goto slow_path_clean; |
511 | 510 | ||
512 | BUG_ON(frag->sk); | 511 | BUG_ON(frag->sk); |
513 | if (skb->sk) { | 512 | if (skb->sk) { |
514 | frag->sk = skb->sk; | 513 | frag->sk = skb->sk; |
515 | frag->destructor = sock_wfree; | 514 | frag->destructor = sock_wfree; |
516 | } | 515 | } |
517 | truesizes += frag->truesize; | 516 | skb->truesize -= frag->truesize; |
518 | } | 517 | } |
519 | 518 | ||
520 | /* Everything is OK. Generate! */ | 519 | /* Everything is OK. Generate! */ |
@@ -524,7 +523,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
524 | frag = skb_shinfo(skb)->frag_list; | 523 | frag = skb_shinfo(skb)->frag_list; |
525 | skb_frag_list_init(skb); | 524 | skb_frag_list_init(skb); |
526 | skb->data_len = first_len - skb_headlen(skb); | 525 | skb->data_len = first_len - skb_headlen(skb); |
527 | skb->truesize -= truesizes; | ||
528 | skb->len = first_len; | 526 | skb->len = first_len; |
529 | iph->tot_len = htons(first_len); | 527 | iph->tot_len = htons(first_len); |
530 | iph->frag_off = htons(IP_MF); | 528 | iph->frag_off = htons(IP_MF); |
@@ -576,6 +574,15 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
576 | } | 574 | } |
577 | IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS); | 575 | IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS); |
578 | return err; | 576 | return err; |
577 | |||
578 | slow_path_clean: | ||
579 | skb_walk_frags(skb, frag2) { | ||
580 | if (frag2 == frag) | ||
581 | break; | ||
582 | frag2->sk = NULL; | ||
583 | frag2->destructor = NULL; | ||
584 | skb->truesize += frag2->truesize; | ||
585 | } | ||
579 | } | 586 | } |
580 | 587 | ||
581 | slow_path: | 588 | slow_path: |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 6c40a8c46e79..64b70ad162e3 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -1129,6 +1129,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1129 | case IP_HDRINCL: | 1129 | case IP_HDRINCL: |
1130 | val = inet->hdrincl; | 1130 | val = inet->hdrincl; |
1131 | break; | 1131 | break; |
1132 | case IP_NODEFRAG: | ||
1133 | val = inet->nodefrag; | ||
1134 | break; | ||
1132 | case IP_MTU_DISCOVER: | 1135 | case IP_MTU_DISCOVER: |
1133 | val = inet->pmtudisc; | 1136 | val = inet->pmtudisc; |
1134 | break; | 1137 | break; |
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index b254dafaf429..43eec80c0e7c 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c | |||
@@ -112,6 +112,7 @@ static void send_reset(struct sk_buff *oldskb, int hook) | |||
112 | /* ip_route_me_harder expects skb->dst to be set */ | 112 | /* ip_route_me_harder expects skb->dst to be set */ |
113 | skb_dst_set_noref(nskb, skb_dst(oldskb)); | 113 | skb_dst_set_noref(nskb, skb_dst(oldskb)); |
114 | 114 | ||
115 | nskb->protocol = htons(ETH_P_IP); | ||
115 | if (ip_route_me_harder(nskb, addr_type)) | 116 | if (ip_route_me_harder(nskb, addr_type)) |
116 | goto free_nskb; | 117 | goto free_nskb; |
117 | 118 | ||
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c index eab8de32f200..f3a9b42b16c6 100644 --- a/net/ipv4/netfilter/nf_defrag_ipv4.c +++ b/net/ipv4/netfilter/nf_defrag_ipv4.c | |||
@@ -66,9 +66,11 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, | |||
66 | const struct net_device *out, | 66 | const struct net_device *out, |
67 | int (*okfn)(struct sk_buff *)) | 67 | int (*okfn)(struct sk_buff *)) |
68 | { | 68 | { |
69 | struct sock *sk = skb->sk; | ||
69 | struct inet_sock *inet = inet_sk(skb->sk); | 70 | struct inet_sock *inet = inet_sk(skb->sk); |
70 | 71 | ||
71 | if (inet && inet->nodefrag) | 72 | if (sk && (sk->sk_family == PF_INET) && |
73 | inet->nodefrag) | ||
72 | return NF_ACCEPT; | 74 | return NF_ACCEPT; |
73 | 75 | ||
74 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | 76 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index 1679e2c0963d..ee5f419d0a56 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c | |||
@@ -893,13 +893,15 @@ static void fast_csum(__sum16 *csum, | |||
893 | unsigned char s[4]; | 893 | unsigned char s[4]; |
894 | 894 | ||
895 | if (offset & 1) { | 895 | if (offset & 1) { |
896 | s[0] = s[2] = 0; | 896 | s[0] = ~0; |
897 | s[1] = ~*optr; | 897 | s[1] = ~*optr; |
898 | s[2] = 0; | ||
898 | s[3] = *nptr; | 899 | s[3] = *nptr; |
899 | } else { | 900 | } else { |
900 | s[1] = s[3] = 0; | ||
901 | s[0] = ~*optr; | 901 | s[0] = ~*optr; |
902 | s[1] = ~0; | ||
902 | s[2] = *nptr; | 903 | s[2] = *nptr; |
904 | s[3] = 0; | ||
903 | } | 905 | } |
904 | 906 | ||
905 | *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum))); | 907 | *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum))); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 3f56b6e6c6aa..ac6559cb54f9 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1231,7 +1231,7 @@ restart: | |||
1231 | } | 1231 | } |
1232 | 1232 | ||
1233 | if (net_ratelimit()) | 1233 | if (net_ratelimit()) |
1234 | printk(KERN_WARNING "Neighbour table overflow.\n"); | 1234 | printk(KERN_WARNING "ipv4: Neighbour table overflow.\n"); |
1235 | rt_drop(rt); | 1235 | rt_drop(rt); |
1236 | return -ENOBUFS; | 1236 | return -ENOBUFS; |
1237 | } | 1237 | } |
@@ -2738,6 +2738,11 @@ slow_output: | |||
2738 | } | 2738 | } |
2739 | EXPORT_SYMBOL_GPL(__ip_route_output_key); | 2739 | EXPORT_SYMBOL_GPL(__ip_route_output_key); |
2740 | 2740 | ||
2741 | static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie) | ||
2742 | { | ||
2743 | return NULL; | ||
2744 | } | ||
2745 | |||
2741 | static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) | 2746 | static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) |
2742 | { | 2747 | { |
2743 | } | 2748 | } |
@@ -2746,7 +2751,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = { | |||
2746 | .family = AF_INET, | 2751 | .family = AF_INET, |
2747 | .protocol = cpu_to_be16(ETH_P_IP), | 2752 | .protocol = cpu_to_be16(ETH_P_IP), |
2748 | .destroy = ipv4_dst_destroy, | 2753 | .destroy = ipv4_dst_destroy, |
2749 | .check = ipv4_dst_check, | 2754 | .check = ipv4_blackhole_dst_check, |
2750 | .update_pmtu = ipv4_rt_blackhole_update_pmtu, | 2755 | .update_pmtu = ipv4_rt_blackhole_update_pmtu, |
2751 | .entries = ATOMIC_INIT(0), | 2756 | .entries = ATOMIC_INIT(0), |
2752 | }; | 2757 | }; |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 3fb1428e526e..f115ea68a4ef 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -386,8 +386,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
386 | */ | 386 | */ |
387 | 387 | ||
388 | mask = 0; | 388 | mask = 0; |
389 | if (sk->sk_err) | ||
390 | mask = POLLERR; | ||
391 | 389 | ||
392 | /* | 390 | /* |
393 | * POLLHUP is certainly not done right. But poll() doesn't | 391 | * POLLHUP is certainly not done right. But poll() doesn't |
@@ -457,6 +455,11 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
457 | if (tp->urg_data & TCP_URG_VALID) | 455 | if (tp->urg_data & TCP_URG_VALID) |
458 | mask |= POLLPRI; | 456 | mask |= POLLPRI; |
459 | } | 457 | } |
458 | /* This barrier is coupled with smp_wmb() in tcp_reset() */ | ||
459 | smp_rmb(); | ||
460 | if (sk->sk_err) | ||
461 | mask |= POLLERR; | ||
462 | |||
460 | return mask; | 463 | return mask; |
461 | } | 464 | } |
462 | EXPORT_SYMBOL(tcp_poll); | 465 | EXPORT_SYMBOL(tcp_poll); |
@@ -940,7 +943,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
940 | sg = sk->sk_route_caps & NETIF_F_SG; | 943 | sg = sk->sk_route_caps & NETIF_F_SG; |
941 | 944 | ||
942 | while (--iovlen >= 0) { | 945 | while (--iovlen >= 0) { |
943 | int seglen = iov->iov_len; | 946 | size_t seglen = iov->iov_len; |
944 | unsigned char __user *from = iov->iov_base; | 947 | unsigned char __user *from = iov->iov_base; |
945 | 948 | ||
946 | iov++; | 949 | iov++; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index e663b78a2ef6..b55f60f6fcbe 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2545,7 +2545,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets) | |||
2545 | cnt += tcp_skb_pcount(skb); | 2545 | cnt += tcp_skb_pcount(skb); |
2546 | 2546 | ||
2547 | if (cnt > packets) { | 2547 | if (cnt > packets) { |
2548 | if (tcp_is_sack(tp) || (oldcnt >= packets)) | 2548 | if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) || |
2549 | (oldcnt >= packets)) | ||
2549 | break; | 2550 | break; |
2550 | 2551 | ||
2551 | mss = skb_shinfo(skb)->gso_size; | 2552 | mss = skb_shinfo(skb)->gso_size; |
@@ -4048,6 +4049,8 @@ static void tcp_reset(struct sock *sk) | |||
4048 | default: | 4049 | default: |
4049 | sk->sk_err = ECONNRESET; | 4050 | sk->sk_err = ECONNRESET; |
4050 | } | 4051 | } |
4052 | /* This barrier is coupled with smp_rmb() in tcp_poll() */ | ||
4053 | smp_wmb(); | ||
4051 | 4054 | ||
4052 | if (!sock_flag(sk, SOCK_DEAD)) | 4055 | if (!sock_flag(sk, SOCK_DEAD)) |
4053 | sk->sk_error_report(sk); | 4056 | sk->sk_error_report(sk); |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index c35b469e851c..74c54b30600f 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -135,13 +135,16 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) | |||
135 | 135 | ||
136 | /* This function calculates a "timeout" which is equivalent to the timeout of a | 136 | /* This function calculates a "timeout" which is equivalent to the timeout of a |
137 | * TCP connection after "boundary" unsuccessful, exponentially backed-off | 137 | * TCP connection after "boundary" unsuccessful, exponentially backed-off |
138 | * retransmissions with an initial RTO of TCP_RTO_MIN. | 138 | * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if |
139 | * syn_set flag is set. | ||
139 | */ | 140 | */ |
140 | static bool retransmits_timed_out(struct sock *sk, | 141 | static bool retransmits_timed_out(struct sock *sk, |
141 | unsigned int boundary) | 142 | unsigned int boundary, |
143 | bool syn_set) | ||
142 | { | 144 | { |
143 | unsigned int timeout, linear_backoff_thresh; | 145 | unsigned int timeout, linear_backoff_thresh; |
144 | unsigned int start_ts; | 146 | unsigned int start_ts; |
147 | unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN; | ||
145 | 148 | ||
146 | if (!inet_csk(sk)->icsk_retransmits) | 149 | if (!inet_csk(sk)->icsk_retransmits) |
147 | return false; | 150 | return false; |
@@ -151,12 +154,12 @@ static bool retransmits_timed_out(struct sock *sk, | |||
151 | else | 154 | else |
152 | start_ts = tcp_sk(sk)->retrans_stamp; | 155 | start_ts = tcp_sk(sk)->retrans_stamp; |
153 | 156 | ||
154 | linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN); | 157 | linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base); |
155 | 158 | ||
156 | if (boundary <= linear_backoff_thresh) | 159 | if (boundary <= linear_backoff_thresh) |
157 | timeout = ((2 << boundary) - 1) * TCP_RTO_MIN; | 160 | timeout = ((2 << boundary) - 1) * rto_base; |
158 | else | 161 | else |
159 | timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN + | 162 | timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + |
160 | (boundary - linear_backoff_thresh) * TCP_RTO_MAX; | 163 | (boundary - linear_backoff_thresh) * TCP_RTO_MAX; |
161 | 164 | ||
162 | return (tcp_time_stamp - start_ts) >= timeout; | 165 | return (tcp_time_stamp - start_ts) >= timeout; |
@@ -167,14 +170,15 @@ static int tcp_write_timeout(struct sock *sk) | |||
167 | { | 170 | { |
168 | struct inet_connection_sock *icsk = inet_csk(sk); | 171 | struct inet_connection_sock *icsk = inet_csk(sk); |
169 | int retry_until; | 172 | int retry_until; |
170 | bool do_reset; | 173 | bool do_reset, syn_set = 0; |
171 | 174 | ||
172 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | 175 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
173 | if (icsk->icsk_retransmits) | 176 | if (icsk->icsk_retransmits) |
174 | dst_negative_advice(sk); | 177 | dst_negative_advice(sk); |
175 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; | 178 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
179 | syn_set = 1; | ||
176 | } else { | 180 | } else { |
177 | if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { | 181 | if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) { |
178 | /* Black hole detection */ | 182 | /* Black hole detection */ |
179 | tcp_mtu_probing(icsk, sk); | 183 | tcp_mtu_probing(icsk, sk); |
180 | 184 | ||
@@ -187,14 +191,14 @@ static int tcp_write_timeout(struct sock *sk) | |||
187 | 191 | ||
188 | retry_until = tcp_orphan_retries(sk, alive); | 192 | retry_until = tcp_orphan_retries(sk, alive); |
189 | do_reset = alive || | 193 | do_reset = alive || |
190 | !retransmits_timed_out(sk, retry_until); | 194 | !retransmits_timed_out(sk, retry_until, 0); |
191 | 195 | ||
192 | if (tcp_out_of_resources(sk, do_reset)) | 196 | if (tcp_out_of_resources(sk, do_reset)) |
193 | return 1; | 197 | return 1; |
194 | } | 198 | } |
195 | } | 199 | } |
196 | 200 | ||
197 | if (retransmits_timed_out(sk, retry_until)) { | 201 | if (retransmits_timed_out(sk, retry_until, syn_set)) { |
198 | /* Has it gone just too far? */ | 202 | /* Has it gone just too far? */ |
199 | tcp_write_err(sk); | 203 | tcp_write_err(sk); |
200 | return 1; | 204 | return 1; |
@@ -436,7 +440,7 @@ out_reset_timer: | |||
436 | icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); | 440 | icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); |
437 | } | 441 | } |
438 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); | 442 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); |
439 | if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1)) | 443 | if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0)) |
440 | __sk_dst_reset(sk); | 444 | __sk_dst_reset(sk); |
441 | 445 | ||
442 | out:; | 446 | out:; |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 32e0bef60d0a..fb23c2e63b52 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1260,6 +1260,49 @@ void udp_lib_unhash(struct sock *sk) | |||
1260 | } | 1260 | } |
1261 | EXPORT_SYMBOL(udp_lib_unhash); | 1261 | EXPORT_SYMBOL(udp_lib_unhash); |
1262 | 1262 | ||
1263 | /* | ||
1264 | * inet_rcv_saddr was changed, we must rehash secondary hash | ||
1265 | */ | ||
1266 | void udp_lib_rehash(struct sock *sk, u16 newhash) | ||
1267 | { | ||
1268 | if (sk_hashed(sk)) { | ||
1269 | struct udp_table *udptable = sk->sk_prot->h.udp_table; | ||
1270 | struct udp_hslot *hslot, *hslot2, *nhslot2; | ||
1271 | |||
1272 | hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); | ||
1273 | nhslot2 = udp_hashslot2(udptable, newhash); | ||
1274 | udp_sk(sk)->udp_portaddr_hash = newhash; | ||
1275 | if (hslot2 != nhslot2) { | ||
1276 | hslot = udp_hashslot(udptable, sock_net(sk), | ||
1277 | udp_sk(sk)->udp_port_hash); | ||
1278 | /* we must lock primary chain too */ | ||
1279 | spin_lock_bh(&hslot->lock); | ||
1280 | |||
1281 | spin_lock(&hslot2->lock); | ||
1282 | hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); | ||
1283 | hslot2->count--; | ||
1284 | spin_unlock(&hslot2->lock); | ||
1285 | |||
1286 | spin_lock(&nhslot2->lock); | ||
1287 | hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, | ||
1288 | &nhslot2->head); | ||
1289 | nhslot2->count++; | ||
1290 | spin_unlock(&nhslot2->lock); | ||
1291 | |||
1292 | spin_unlock_bh(&hslot->lock); | ||
1293 | } | ||
1294 | } | ||
1295 | } | ||
1296 | EXPORT_SYMBOL(udp_lib_rehash); | ||
1297 | |||
1298 | static void udp_v4_rehash(struct sock *sk) | ||
1299 | { | ||
1300 | u16 new_hash = udp4_portaddr_hash(sock_net(sk), | ||
1301 | inet_sk(sk)->inet_rcv_saddr, | ||
1302 | inet_sk(sk)->inet_num); | ||
1303 | udp_lib_rehash(sk, new_hash); | ||
1304 | } | ||
1305 | |||
1263 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 1306 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
1264 | { | 1307 | { |
1265 | int rc; | 1308 | int rc; |
@@ -1843,6 +1886,7 @@ struct proto udp_prot = { | |||
1843 | .backlog_rcv = __udp_queue_rcv_skb, | 1886 | .backlog_rcv = __udp_queue_rcv_skb, |
1844 | .hash = udp_lib_hash, | 1887 | .hash = udp_lib_hash, |
1845 | .unhash = udp_lib_unhash, | 1888 | .unhash = udp_lib_unhash, |
1889 | .rehash = udp_v4_rehash, | ||
1846 | .get_port = udp_v4_get_port, | 1890 | .get_port = udp_v4_get_port, |
1847 | .memory_allocated = &udp_memory_allocated, | 1891 | .memory_allocated = &udp_memory_allocated, |
1848 | .sysctl_mem = sysctl_udp_mem, | 1892 | .sysctl_mem = sysctl_udp_mem, |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 869078d4eeb9..a580349f0b8a 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -61,7 +61,7 @@ static int xfrm4_get_saddr(struct net *net, | |||
61 | 61 | ||
62 | static int xfrm4_get_tos(struct flowi *fl) | 62 | static int xfrm4_get_tos(struct flowi *fl) |
63 | { | 63 | { |
64 | return fl->fl4_tos; | 64 | return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */ |
65 | } | 65 | } |
66 | 66 | ||
67 | static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst, | 67 | static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst, |
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c index 1ef1366a0a03..47947624eccc 100644 --- a/net/ipv4/xfrm4_state.c +++ b/net/ipv4/xfrm4_state.c | |||
@@ -21,21 +21,25 @@ static int xfrm4_init_flags(struct xfrm_state *x) | |||
21 | } | 21 | } |
22 | 22 | ||
23 | static void | 23 | static void |
24 | __xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl, | 24 | __xfrm4_init_tempsel(struct xfrm_selector *sel, struct flowi *fl) |
25 | struct xfrm_tmpl *tmpl, | 25 | { |
26 | xfrm_address_t *daddr, xfrm_address_t *saddr) | 26 | sel->daddr.a4 = fl->fl4_dst; |
27 | sel->saddr.a4 = fl->fl4_src; | ||
28 | sel->dport = xfrm_flowi_dport(fl); | ||
29 | sel->dport_mask = htons(0xffff); | ||
30 | sel->sport = xfrm_flowi_sport(fl); | ||
31 | sel->sport_mask = htons(0xffff); | ||
32 | sel->family = AF_INET; | ||
33 | sel->prefixlen_d = 32; | ||
34 | sel->prefixlen_s = 32; | ||
35 | sel->proto = fl->proto; | ||
36 | sel->ifindex = fl->oif; | ||
37 | } | ||
38 | |||
39 | static void | ||
40 | xfrm4_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl, | ||
41 | xfrm_address_t *daddr, xfrm_address_t *saddr) | ||
27 | { | 42 | { |
28 | x->sel.daddr.a4 = fl->fl4_dst; | ||
29 | x->sel.saddr.a4 = fl->fl4_src; | ||
30 | x->sel.dport = xfrm_flowi_dport(fl); | ||
31 | x->sel.dport_mask = htons(0xffff); | ||
32 | x->sel.sport = xfrm_flowi_sport(fl); | ||
33 | x->sel.sport_mask = htons(0xffff); | ||
34 | x->sel.family = AF_INET; | ||
35 | x->sel.prefixlen_d = 32; | ||
36 | x->sel.prefixlen_s = 32; | ||
37 | x->sel.proto = fl->proto; | ||
38 | x->sel.ifindex = fl->oif; | ||
39 | x->id = tmpl->id; | 43 | x->id = tmpl->id; |
40 | if (x->id.daddr.a4 == 0) | 44 | if (x->id.daddr.a4 == 0) |
41 | x->id.daddr.a4 = daddr->a4; | 45 | x->id.daddr.a4 = daddr->a4; |
@@ -70,6 +74,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = { | |||
70 | .owner = THIS_MODULE, | 74 | .owner = THIS_MODULE, |
71 | .init_flags = xfrm4_init_flags, | 75 | .init_flags = xfrm4_init_flags, |
72 | .init_tempsel = __xfrm4_init_tempsel, | 76 | .init_tempsel = __xfrm4_init_tempsel, |
77 | .init_temprop = xfrm4_init_temprop, | ||
73 | .output = xfrm4_output, | 78 | .output = xfrm4_output, |
74 | .extract_input = xfrm4_extract_input, | 79 | .extract_input = xfrm4_extract_input, |
75 | .extract_output = xfrm4_extract_output, | 80 | .extract_output = xfrm4_extract_output, |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index ab70a3fbcafa..324fac3b6c16 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -4637,10 +4637,12 @@ int __init addrconf_init(void) | |||
4637 | if (err < 0) { | 4637 | if (err < 0) { |
4638 | printk(KERN_CRIT "IPv6 Addrconf:" | 4638 | printk(KERN_CRIT "IPv6 Addrconf:" |
4639 | " cannot initialize default policy table: %d.\n", err); | 4639 | " cannot initialize default policy table: %d.\n", err); |
4640 | return err; | 4640 | goto out; |
4641 | } | 4641 | } |
4642 | 4642 | ||
4643 | register_pernet_subsys(&addrconf_ops); | 4643 | err = register_pernet_subsys(&addrconf_ops); |
4644 | if (err < 0) | ||
4645 | goto out_addrlabel; | ||
4644 | 4646 | ||
4645 | /* The addrconf netdev notifier requires that loopback_dev | 4647 | /* The addrconf netdev notifier requires that loopback_dev |
4646 | * has it's ipv6 private information allocated and setup | 4648 | * has it's ipv6 private information allocated and setup |
@@ -4692,7 +4694,9 @@ errout: | |||
4692 | unregister_netdevice_notifier(&ipv6_dev_notf); | 4694 | unregister_netdevice_notifier(&ipv6_dev_notf); |
4693 | errlo: | 4695 | errlo: |
4694 | unregister_pernet_subsys(&addrconf_ops); | 4696 | unregister_pernet_subsys(&addrconf_ops); |
4695 | 4697 | out_addrlabel: | |
4698 | ipv6_addr_label_cleanup(); | ||
4699 | out: | ||
4696 | return err; | 4700 | return err; |
4697 | } | 4701 | } |
4698 | 4702 | ||
@@ -4703,6 +4707,7 @@ void addrconf_cleanup(void) | |||
4703 | 4707 | ||
4704 | unregister_netdevice_notifier(&ipv6_dev_notf); | 4708 | unregister_netdevice_notifier(&ipv6_dev_notf); |
4705 | unregister_pernet_subsys(&addrconf_ops); | 4709 | unregister_pernet_subsys(&addrconf_ops); |
4710 | ipv6_addr_label_cleanup(); | ||
4706 | 4711 | ||
4707 | rtnl_lock(); | 4712 | rtnl_lock(); |
4708 | 4713 | ||
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index f0e774cea386..8175f802651b 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c | |||
@@ -393,6 +393,11 @@ int __init ipv6_addr_label_init(void) | |||
393 | return register_pernet_subsys(&ipv6_addr_label_ops); | 393 | return register_pernet_subsys(&ipv6_addr_label_ops); |
394 | } | 394 | } |
395 | 395 | ||
396 | void ipv6_addr_label_cleanup(void) | ||
397 | { | ||
398 | unregister_pernet_subsys(&ipv6_addr_label_ops); | ||
399 | } | ||
400 | |||
396 | static const struct nla_policy ifal_policy[IFAL_MAX+1] = { | 401 | static const struct nla_policy ifal_policy[IFAL_MAX+1] = { |
397 | [IFAL_ADDRESS] = { .len = sizeof(struct in6_addr), }, | 402 | [IFAL_ADDRESS] = { .len = sizeof(struct in6_addr), }, |
398 | [IFAL_LABEL] = { .len = sizeof(u32), }, | 403 | [IFAL_LABEL] = { .len = sizeof(u32), }, |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 7d929a22cbc2..ef371aa01ac5 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -105,9 +105,12 @@ ipv4_connected: | |||
105 | if (ipv6_addr_any(&np->saddr)) | 105 | if (ipv6_addr_any(&np->saddr)) |
106 | ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); | 106 | ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); |
107 | 107 | ||
108 | if (ipv6_addr_any(&np->rcv_saddr)) | 108 | if (ipv6_addr_any(&np->rcv_saddr)) { |
109 | ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, | 109 | ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, |
110 | &np->rcv_saddr); | 110 | &np->rcv_saddr); |
111 | if (sk->sk_prot->rehash) | ||
112 | sk->sk_prot->rehash(sk); | ||
113 | } | ||
111 | 114 | ||
112 | goto out; | 115 | goto out; |
113 | } | 116 | } |
@@ -181,6 +184,8 @@ ipv4_connected: | |||
181 | if (ipv6_addr_any(&np->rcv_saddr)) { | 184 | if (ipv6_addr_any(&np->rcv_saddr)) { |
182 | ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src); | 185 | ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src); |
183 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; | 186 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; |
187 | if (sk->sk_prot->rehash) | ||
188 | sk->sk_prot->rehash(sk); | ||
184 | } | 189 | } |
185 | 190 | ||
186 | ip6_dst_store(sk, dst, | 191 | ip6_dst_store(sk, dst, |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index d40b330c0ee6..980912ed7a38 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -639,7 +639,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
639 | 639 | ||
640 | if (skb_has_frags(skb)) { | 640 | if (skb_has_frags(skb)) { |
641 | int first_len = skb_pagelen(skb); | 641 | int first_len = skb_pagelen(skb); |
642 | int truesizes = 0; | 642 | struct sk_buff *frag2; |
643 | 643 | ||
644 | if (first_len - hlen > mtu || | 644 | if (first_len - hlen > mtu || |
645 | ((first_len - hlen) & 7) || | 645 | ((first_len - hlen) & 7) || |
@@ -651,18 +651,18 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
651 | if (frag->len > mtu || | 651 | if (frag->len > mtu || |
652 | ((frag->len & 7) && frag->next) || | 652 | ((frag->len & 7) && frag->next) || |
653 | skb_headroom(frag) < hlen) | 653 | skb_headroom(frag) < hlen) |
654 | goto slow_path; | 654 | goto slow_path_clean; |
655 | 655 | ||
656 | /* Partially cloned skb? */ | 656 | /* Partially cloned skb? */ |
657 | if (skb_shared(frag)) | 657 | if (skb_shared(frag)) |
658 | goto slow_path; | 658 | goto slow_path_clean; |
659 | 659 | ||
660 | BUG_ON(frag->sk); | 660 | BUG_ON(frag->sk); |
661 | if (skb->sk) { | 661 | if (skb->sk) { |
662 | frag->sk = skb->sk; | 662 | frag->sk = skb->sk; |
663 | frag->destructor = sock_wfree; | 663 | frag->destructor = sock_wfree; |
664 | truesizes += frag->truesize; | ||
665 | } | 664 | } |
665 | skb->truesize -= frag->truesize; | ||
666 | } | 666 | } |
667 | 667 | ||
668 | err = 0; | 668 | err = 0; |
@@ -693,7 +693,6 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
693 | 693 | ||
694 | first_len = skb_pagelen(skb); | 694 | first_len = skb_pagelen(skb); |
695 | skb->data_len = first_len - skb_headlen(skb); | 695 | skb->data_len = first_len - skb_headlen(skb); |
696 | skb->truesize -= truesizes; | ||
697 | skb->len = first_len; | 696 | skb->len = first_len; |
698 | ipv6_hdr(skb)->payload_len = htons(first_len - | 697 | ipv6_hdr(skb)->payload_len = htons(first_len - |
699 | sizeof(struct ipv6hdr)); | 698 | sizeof(struct ipv6hdr)); |
@@ -756,6 +755,15 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
756 | IPSTATS_MIB_FRAGFAILS); | 755 | IPSTATS_MIB_FRAGFAILS); |
757 | dst_release(&rt->dst); | 756 | dst_release(&rt->dst); |
758 | return err; | 757 | return err; |
758 | |||
759 | slow_path_clean: | ||
760 | skb_walk_frags(skb, frag2) { | ||
761 | if (frag2 == frag) | ||
762 | break; | ||
763 | frag2->sk = NULL; | ||
764 | frag2->destructor = NULL; | ||
765 | skb->truesize += frag2->truesize; | ||
766 | } | ||
759 | } | 767 | } |
760 | 768 | ||
761 | slow_path: | 769 | slow_path: |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 13ef5bc05cf5..578f3c1a16db 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -113,14 +113,6 @@ static void nf_skb_free(struct sk_buff *skb) | |||
113 | kfree_skb(NFCT_FRAG6_CB(skb)->orig); | 113 | kfree_skb(NFCT_FRAG6_CB(skb)->orig); |
114 | } | 114 | } |
115 | 115 | ||
116 | /* Memory Tracking Functions. */ | ||
117 | static void frag_kfree_skb(struct sk_buff *skb) | ||
118 | { | ||
119 | atomic_sub(skb->truesize, &nf_init_frags.mem); | ||
120 | nf_skb_free(skb); | ||
121 | kfree_skb(skb); | ||
122 | } | ||
123 | |||
124 | /* Destruction primitives. */ | 116 | /* Destruction primitives. */ |
125 | 117 | ||
126 | static __inline__ void fq_put(struct nf_ct_frag6_queue *fq) | 118 | static __inline__ void fq_put(struct nf_ct_frag6_queue *fq) |
@@ -282,66 +274,22 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
282 | } | 274 | } |
283 | 275 | ||
284 | found: | 276 | found: |
285 | /* We found where to put this one. Check for overlap with | 277 | /* RFC5722, Section 4: |
286 | * preceding fragment, and, if needed, align things so that | 278 | * When reassembling an IPv6 datagram, if |
287 | * any overlaps are eliminated. | 279 | * one or more its constituent fragments is determined to be an |
288 | */ | 280 | * overlapping fragment, the entire datagram (and any constituent |
289 | if (prev) { | 281 | * fragments, including those not yet received) MUST be silently |
290 | int i = (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset; | 282 | * discarded. |
291 | |||
292 | if (i > 0) { | ||
293 | offset += i; | ||
294 | if (end <= offset) { | ||
295 | pr_debug("overlap\n"); | ||
296 | goto err; | ||
297 | } | ||
298 | if (!pskb_pull(skb, i)) { | ||
299 | pr_debug("Can't pull\n"); | ||
300 | goto err; | ||
301 | } | ||
302 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | ||
303 | skb->ip_summed = CHECKSUM_NONE; | ||
304 | } | ||
305 | } | ||
306 | |||
307 | /* Look for overlap with succeeding segments. | ||
308 | * If we can merge fragments, do it. | ||
309 | */ | 283 | */ |
310 | while (next && NFCT_FRAG6_CB(next)->offset < end) { | ||
311 | /* overlap is 'i' bytes */ | ||
312 | int i = end - NFCT_FRAG6_CB(next)->offset; | ||
313 | |||
314 | if (i < next->len) { | ||
315 | /* Eat head of the next overlapped fragment | ||
316 | * and leave the loop. The next ones cannot overlap. | ||
317 | */ | ||
318 | pr_debug("Eat head of the overlapped parts.: %d", i); | ||
319 | if (!pskb_pull(next, i)) | ||
320 | goto err; | ||
321 | 284 | ||
322 | /* next fragment */ | 285 | /* Check for overlap with preceding fragment. */ |
323 | NFCT_FRAG6_CB(next)->offset += i; | 286 | if (prev && |
324 | fq->q.meat -= i; | 287 | (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0) |
325 | if (next->ip_summed != CHECKSUM_UNNECESSARY) | 288 | goto discard_fq; |
326 | next->ip_summed = CHECKSUM_NONE; | ||
327 | break; | ||
328 | } else { | ||
329 | struct sk_buff *free_it = next; | ||
330 | |||
331 | /* Old fragmnet is completely overridden with | ||
332 | * new one drop it. | ||
333 | */ | ||
334 | next = next->next; | ||
335 | 289 | ||
336 | if (prev) | 290 | /* Look for overlap with succeeding segment. */ |
337 | prev->next = next; | 291 | if (next && NFCT_FRAG6_CB(next)->offset < end) |
338 | else | 292 | goto discard_fq; |
339 | fq->q.fragments = next; | ||
340 | |||
341 | fq->q.meat -= free_it->len; | ||
342 | frag_kfree_skb(free_it); | ||
343 | } | ||
344 | } | ||
345 | 293 | ||
346 | NFCT_FRAG6_CB(skb)->offset = offset; | 294 | NFCT_FRAG6_CB(skb)->offset = offset; |
347 | 295 | ||
@@ -371,6 +319,8 @@ found: | |||
371 | write_unlock(&nf_frags.lock); | 319 | write_unlock(&nf_frags.lock); |
372 | return 0; | 320 | return 0; |
373 | 321 | ||
322 | discard_fq: | ||
323 | fq_kill(fq); | ||
374 | err: | 324 | err: |
375 | return -1; | 325 | return -1; |
376 | } | 326 | } |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 545c4141b755..64cfef1b0a4c 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -149,13 +149,6 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a) | |||
149 | } | 149 | } |
150 | EXPORT_SYMBOL(ip6_frag_match); | 150 | EXPORT_SYMBOL(ip6_frag_match); |
151 | 151 | ||
152 | /* Memory Tracking Functions. */ | ||
153 | static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb) | ||
154 | { | ||
155 | atomic_sub(skb->truesize, &nf->mem); | ||
156 | kfree_skb(skb); | ||
157 | } | ||
158 | |||
159 | void ip6_frag_init(struct inet_frag_queue *q, void *a) | 152 | void ip6_frag_init(struct inet_frag_queue *q, void *a) |
160 | { | 153 | { |
161 | struct frag_queue *fq = container_of(q, struct frag_queue, q); | 154 | struct frag_queue *fq = container_of(q, struct frag_queue, q); |
@@ -346,58 +339,22 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
346 | } | 339 | } |
347 | 340 | ||
348 | found: | 341 | found: |
349 | /* We found where to put this one. Check for overlap with | 342 | /* RFC5722, Section 4: |
350 | * preceding fragment, and, if needed, align things so that | 343 | * When reassembling an IPv6 datagram, if |
351 | * any overlaps are eliminated. | 344 | * one or more its constituent fragments is determined to be an |
345 | * overlapping fragment, the entire datagram (and any constituent | ||
346 | * fragments, including those not yet received) MUST be silently | ||
347 | * discarded. | ||
352 | */ | 348 | */ |
353 | if (prev) { | ||
354 | int i = (FRAG6_CB(prev)->offset + prev->len) - offset; | ||
355 | 349 | ||
356 | if (i > 0) { | 350 | /* Check for overlap with preceding fragment. */ |
357 | offset += i; | 351 | if (prev && |
358 | if (end <= offset) | 352 | (FRAG6_CB(prev)->offset + prev->len) - offset > 0) |
359 | goto err; | 353 | goto discard_fq; |
360 | if (!pskb_pull(skb, i)) | ||
361 | goto err; | ||
362 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | ||
363 | skb->ip_summed = CHECKSUM_NONE; | ||
364 | } | ||
365 | } | ||
366 | 354 | ||
367 | /* Look for overlap with succeeding segments. | 355 | /* Look for overlap with succeeding segment. */ |
368 | * If we can merge fragments, do it. | 356 | if (next && FRAG6_CB(next)->offset < end) |
369 | */ | 357 | goto discard_fq; |
370 | while (next && FRAG6_CB(next)->offset < end) { | ||
371 | int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */ | ||
372 | |||
373 | if (i < next->len) { | ||
374 | /* Eat head of the next overlapped fragment | ||
375 | * and leave the loop. The next ones cannot overlap. | ||
376 | */ | ||
377 | if (!pskb_pull(next, i)) | ||
378 | goto err; | ||
379 | FRAG6_CB(next)->offset += i; /* next fragment */ | ||
380 | fq->q.meat -= i; | ||
381 | if (next->ip_summed != CHECKSUM_UNNECESSARY) | ||
382 | next->ip_summed = CHECKSUM_NONE; | ||
383 | break; | ||
384 | } else { | ||
385 | struct sk_buff *free_it = next; | ||
386 | |||
387 | /* Old fragment is completely overridden with | ||
388 | * new one drop it. | ||
389 | */ | ||
390 | next = next->next; | ||
391 | |||
392 | if (prev) | ||
393 | prev->next = next; | ||
394 | else | ||
395 | fq->q.fragments = next; | ||
396 | |||
397 | fq->q.meat -= free_it->len; | ||
398 | frag_kfree_skb(fq->q.net, free_it); | ||
399 | } | ||
400 | } | ||
401 | 358 | ||
402 | FRAG6_CB(skb)->offset = offset; | 359 | FRAG6_CB(skb)->offset = offset; |
403 | 360 | ||
@@ -436,6 +393,8 @@ found: | |||
436 | write_unlock(&ip6_frags.lock); | 393 | write_unlock(&ip6_frags.lock); |
437 | return -1; | 394 | return -1; |
438 | 395 | ||
396 | discard_fq: | ||
397 | fq_kill(fq); | ||
439 | err: | 398 | err: |
440 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), | 399 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
441 | IPSTATS_MIB_REASMFAILS); | 400 | IPSTATS_MIB_REASMFAILS); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index d126365ac046..8323136bdc54 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -670,7 +670,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad | |||
670 | 670 | ||
671 | if (net_ratelimit()) | 671 | if (net_ratelimit()) |
672 | printk(KERN_WARNING | 672 | printk(KERN_WARNING |
673 | "Neighbour table overflow.\n"); | 673 | "ipv6: Neighbour table overflow.\n"); |
674 | dst_free(&rt->dst); | 674 | dst_free(&rt->dst); |
675 | return NULL; | 675 | return NULL; |
676 | } | 676 | } |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 1dd1affdead2..5acb3560ff15 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -111,6 +111,15 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum) | |||
111 | return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr); | 111 | return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr); |
112 | } | 112 | } |
113 | 113 | ||
114 | static void udp_v6_rehash(struct sock *sk) | ||
115 | { | ||
116 | u16 new_hash = udp6_portaddr_hash(sock_net(sk), | ||
117 | &inet6_sk(sk)->rcv_saddr, | ||
118 | inet_sk(sk)->inet_num); | ||
119 | |||
120 | udp_lib_rehash(sk, new_hash); | ||
121 | } | ||
122 | |||
114 | static inline int compute_score(struct sock *sk, struct net *net, | 123 | static inline int compute_score(struct sock *sk, struct net *net, |
115 | unsigned short hnum, | 124 | unsigned short hnum, |
116 | struct in6_addr *saddr, __be16 sport, | 125 | struct in6_addr *saddr, __be16 sport, |
@@ -1447,6 +1456,7 @@ struct proto udpv6_prot = { | |||
1447 | .backlog_rcv = udpv6_queue_rcv_skb, | 1456 | .backlog_rcv = udpv6_queue_rcv_skb, |
1448 | .hash = udp_lib_hash, | 1457 | .hash = udp_lib_hash, |
1449 | .unhash = udp_lib_unhash, | 1458 | .unhash = udp_lib_unhash, |
1459 | .rehash = udp_v6_rehash, | ||
1450 | .get_port = udp_v6_get_port, | 1460 | .get_port = udp_v6_get_port, |
1451 | .memory_allocated = &udp_memory_allocated, | 1461 | .memory_allocated = &udp_memory_allocated, |
1452 | .sysctl_mem = sysctl_udp_mem, | 1462 | .sysctl_mem = sysctl_udp_mem, |
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c index f417b77fa0e1..a67575d472a3 100644 --- a/net/ipv6/xfrm6_state.c +++ b/net/ipv6/xfrm6_state.c | |||
@@ -20,23 +20,27 @@ | |||
20 | #include <net/addrconf.h> | 20 | #include <net/addrconf.h> |
21 | 21 | ||
22 | static void | 22 | static void |
23 | __xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl, | 23 | __xfrm6_init_tempsel(struct xfrm_selector *sel, struct flowi *fl) |
24 | struct xfrm_tmpl *tmpl, | ||
25 | xfrm_address_t *daddr, xfrm_address_t *saddr) | ||
26 | { | 24 | { |
27 | /* Initialize temporary selector matching only | 25 | /* Initialize temporary selector matching only |
28 | * to current session. */ | 26 | * to current session. */ |
29 | ipv6_addr_copy((struct in6_addr *)&x->sel.daddr, &fl->fl6_dst); | 27 | ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl->fl6_dst); |
30 | ipv6_addr_copy((struct in6_addr *)&x->sel.saddr, &fl->fl6_src); | 28 | ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl->fl6_src); |
31 | x->sel.dport = xfrm_flowi_dport(fl); | 29 | sel->dport = xfrm_flowi_dport(fl); |
32 | x->sel.dport_mask = htons(0xffff); | 30 | sel->dport_mask = htons(0xffff); |
33 | x->sel.sport = xfrm_flowi_sport(fl); | 31 | sel->sport = xfrm_flowi_sport(fl); |
34 | x->sel.sport_mask = htons(0xffff); | 32 | sel->sport_mask = htons(0xffff); |
35 | x->sel.family = AF_INET6; | 33 | sel->family = AF_INET6; |
36 | x->sel.prefixlen_d = 128; | 34 | sel->prefixlen_d = 128; |
37 | x->sel.prefixlen_s = 128; | 35 | sel->prefixlen_s = 128; |
38 | x->sel.proto = fl->proto; | 36 | sel->proto = fl->proto; |
39 | x->sel.ifindex = fl->oif; | 37 | sel->ifindex = fl->oif; |
38 | } | ||
39 | |||
40 | static void | ||
41 | xfrm6_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl, | ||
42 | xfrm_address_t *daddr, xfrm_address_t *saddr) | ||
43 | { | ||
40 | x->id = tmpl->id; | 44 | x->id = tmpl->id; |
41 | if (ipv6_addr_any((struct in6_addr*)&x->id.daddr)) | 45 | if (ipv6_addr_any((struct in6_addr*)&x->id.daddr)) |
42 | memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr)); | 46 | memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr)); |
@@ -168,6 +172,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = { | |||
168 | .eth_proto = htons(ETH_P_IPV6), | 172 | .eth_proto = htons(ETH_P_IPV6), |
169 | .owner = THIS_MODULE, | 173 | .owner = THIS_MODULE, |
170 | .init_tempsel = __xfrm6_init_tempsel, | 174 | .init_tempsel = __xfrm6_init_tempsel, |
175 | .init_temprop = xfrm6_init_temprop, | ||
171 | .tmpl_sort = __xfrm6_tmpl_sort, | 176 | .tmpl_sort = __xfrm6_tmpl_sort, |
172 | .state_sort = __xfrm6_state_sort, | 177 | .state_sort = __xfrm6_state_sort, |
173 | .output = xfrm6_output, | 178 | .output = xfrm6_output, |
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 79986a674f6e..fd55b5135de5 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
@@ -824,8 +824,8 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
824 | 824 | ||
825 | err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name); | 825 | err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name); |
826 | if (err < 0) { | 826 | if (err < 0) { |
827 | kfree(self->ias_obj->name); | 827 | irias_delete_object(self->ias_obj); |
828 | kfree(self->ias_obj); | 828 | self->ias_obj = NULL; |
829 | goto out; | 829 | goto out; |
830 | } | 830 | } |
831 | 831 | ||
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c index a788f9e9427d..6130f9d9dbe1 100644 --- a/net/irda/irlan/irlan_common.c +++ b/net/irda/irlan/irlan_common.c | |||
@@ -1102,7 +1102,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len) | |||
1102 | memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */ | 1102 | memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */ |
1103 | le16_to_cpus(&val_len); n+=2; | 1103 | le16_to_cpus(&val_len); n+=2; |
1104 | 1104 | ||
1105 | if (val_len > 1016) { | 1105 | if (val_len >= 1016) { |
1106 | IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ ); | 1106 | IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ ); |
1107 | return -RSP_INVALID_COMMAND_FORMAT; | 1107 | return -RSP_INVALID_COMMAND_FORMAT; |
1108 | } | 1108 | } |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 023ba820236f..582612998211 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -1024,7 +1024,8 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname, | |||
1024 | { | 1024 | { |
1025 | struct sock *sk = sock->sk; | 1025 | struct sock *sk = sock->sk; |
1026 | struct llc_sock *llc = llc_sk(sk); | 1026 | struct llc_sock *llc = llc_sk(sk); |
1027 | int rc = -EINVAL, opt; | 1027 | unsigned int opt; |
1028 | int rc = -EINVAL; | ||
1028 | 1029 | ||
1029 | lock_sock(sk); | 1030 | lock_sock(sk); |
1030 | if (unlikely(level != SOL_LLC || optlen != sizeof(int))) | 1031 | if (unlikely(level != SOL_LLC || optlen != sizeof(int))) |
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c index e4dae0244d76..cf4aea3ba30f 100644 --- a/net/llc/llc_station.c +++ b/net/llc/llc_station.c | |||
@@ -689,7 +689,7 @@ static void llc_station_rcv(struct sk_buff *skb) | |||
689 | 689 | ||
690 | int __init llc_station_init(void) | 690 | int __init llc_station_init(void) |
691 | { | 691 | { |
692 | u16 rc = -ENOBUFS; | 692 | int rc = -ENOBUFS; |
693 | struct sk_buff *skb; | 693 | struct sk_buff *skb; |
694 | struct llc_station_state_ev *ev; | 694 | struct llc_station_state_ev *ev; |
695 | 695 | ||
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 798a91b100cc..ded5c3843e06 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -732,6 +732,12 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
732 | 732 | ||
733 | rtnl_unlock(); | 733 | rtnl_unlock(); |
734 | 734 | ||
735 | /* | ||
736 | * Now all work items will be gone, but the | ||
737 | * timer might still be armed, so delete it | ||
738 | */ | ||
739 | del_timer_sync(&local->work_timer); | ||
740 | |||
735 | cancel_work_sync(&local->reconfig_filter); | 741 | cancel_work_sync(&local->reconfig_filter); |
736 | 742 | ||
737 | ieee80211_clear_tx_pending(local); | 743 | ieee80211_clear_tx_pending(local); |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index fa0f37e4afe4..28624282c5f3 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -2199,9 +2199,6 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, | |||
2199 | struct net_device *prev_dev = NULL; | 2199 | struct net_device *prev_dev = NULL; |
2200 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 2200 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
2201 | 2201 | ||
2202 | if (status->flag & RX_FLAG_INTERNAL_CMTR) | ||
2203 | goto out_free_skb; | ||
2204 | |||
2205 | if (skb_headroom(skb) < sizeof(*rthdr) && | 2202 | if (skb_headroom(skb) < sizeof(*rthdr) && |
2206 | pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) | 2203 | pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) |
2207 | goto out_free_skb; | 2204 | goto out_free_skb; |
@@ -2260,7 +2257,6 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, | |||
2260 | } else | 2257 | } else |
2261 | goto out_free_skb; | 2258 | goto out_free_skb; |
2262 | 2259 | ||
2263 | status->flag |= RX_FLAG_INTERNAL_CMTR; | ||
2264 | return; | 2260 | return; |
2265 | 2261 | ||
2266 | out_free_skb: | 2262 | out_free_skb: |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 4f8ddba48011..4c2f89df5cce 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -924,6 +924,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
924 | 924 | ||
925 | ip_vs_out_stats(cp, skb); | 925 | ip_vs_out_stats(cp, skb); |
926 | ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp); | 926 | ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp); |
927 | ip_vs_update_conntrack(skb, cp, 0); | ||
927 | ip_vs_conn_put(cp); | 928 | ip_vs_conn_put(cp); |
928 | 929 | ||
929 | skb->ipvs_property = 1; | 930 | skb->ipvs_property = 1; |
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index f228a17ec649..7e9af5b76d9e 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/netfilter.h> | 45 | #include <linux/netfilter.h> |
46 | #include <net/netfilter/nf_conntrack.h> | 46 | #include <net/netfilter/nf_conntrack.h> |
47 | #include <net/netfilter/nf_conntrack_expect.h> | 47 | #include <net/netfilter/nf_conntrack_expect.h> |
48 | #include <net/netfilter/nf_nat.h> | ||
48 | #include <net/netfilter/nf_nat_helper.h> | 49 | #include <net/netfilter/nf_nat_helper.h> |
49 | #include <linux/gfp.h> | 50 | #include <linux/gfp.h> |
50 | #include <net/protocol.h> | 51 | #include <net/protocol.h> |
@@ -359,7 +360,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
359 | buf_len = strlen(buf); | 360 | buf_len = strlen(buf); |
360 | 361 | ||
361 | ct = nf_ct_get(skb, &ctinfo); | 362 | ct = nf_ct_get(skb, &ctinfo); |
362 | if (ct && !nf_ct_is_untracked(ct)) { | 363 | if (ct && !nf_ct_is_untracked(ct) && nfct_nat(ct)) { |
363 | /* If mangling fails this function will return 0 | 364 | /* If mangling fails this function will return 0 |
364 | * which will cause the packet to be dropped. | 365 | * which will cause the packet to be dropped. |
365 | * Mangling can only fail under memory pressure, | 366 | * Mangling can only fail under memory pressure, |
@@ -409,7 +410,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
409 | union nf_inet_addr to; | 410 | union nf_inet_addr to; |
410 | __be16 port; | 411 | __be16 port; |
411 | struct ip_vs_conn *n_cp; | 412 | struct ip_vs_conn *n_cp; |
412 | struct nf_conn *ct; | ||
413 | 413 | ||
414 | #ifdef CONFIG_IP_VS_IPV6 | 414 | #ifdef CONFIG_IP_VS_IPV6 |
415 | /* This application helper doesn't work with IPv6 yet, | 415 | /* This application helper doesn't work with IPv6 yet, |
@@ -496,11 +496,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
496 | ip_vs_control_add(n_cp, cp); | 496 | ip_vs_control_add(n_cp, cp); |
497 | } | 497 | } |
498 | 498 | ||
499 | ct = (struct nf_conn *)skb->nfct; | ||
500 | if (ct && ct != &nf_conntrack_untracked) | ||
501 | ip_vs_expect_related(skb, ct, n_cp, | ||
502 | IPPROTO_TCP, &n_cp->dport, 1); | ||
503 | |||
504 | /* | 499 | /* |
505 | * Move tunnel to listen state | 500 | * Move tunnel to listen state |
506 | */ | 501 | */ |
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 21e1a5e9b9d3..49df6bea6a2d 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
@@ -349,8 +349,8 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
349 | } | 349 | } |
350 | #endif | 350 | #endif |
351 | 351 | ||
352 | static void | 352 | void |
353 | ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp) | 353 | ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin) |
354 | { | 354 | { |
355 | struct nf_conn *ct = (struct nf_conn *)skb->nfct; | 355 | struct nf_conn *ct = (struct nf_conn *)skb->nfct; |
356 | struct nf_conntrack_tuple new_tuple; | 356 | struct nf_conntrack_tuple new_tuple; |
@@ -365,11 +365,17 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp) | |||
365 | * real-server we will see RIP->DIP. | 365 | * real-server we will see RIP->DIP. |
366 | */ | 366 | */ |
367 | new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; | 367 | new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; |
368 | new_tuple.src.u3 = cp->daddr; | 368 | if (outin) |
369 | new_tuple.src.u3 = cp->daddr; | ||
370 | else | ||
371 | new_tuple.dst.u3 = cp->vaddr; | ||
369 | /* | 372 | /* |
370 | * This will also take care of UDP and other protocols. | 373 | * This will also take care of UDP and other protocols. |
371 | */ | 374 | */ |
372 | new_tuple.src.u.tcp.port = cp->dport; | 375 | if (outin) |
376 | new_tuple.src.u.tcp.port = cp->dport; | ||
377 | else | ||
378 | new_tuple.dst.u.tcp.port = cp->vport; | ||
373 | nf_conntrack_alter_reply(ct, &new_tuple); | 379 | nf_conntrack_alter_reply(ct, &new_tuple); |
374 | } | 380 | } |
375 | 381 | ||
@@ -428,7 +434,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
428 | 434 | ||
429 | IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); | 435 | IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); |
430 | 436 | ||
431 | ip_vs_update_conntrack(skb, cp); | 437 | ip_vs_update_conntrack(skb, cp, 1); |
432 | 438 | ||
433 | /* FIXME: when application helper enlarges the packet and the length | 439 | /* FIXME: when application helper enlarges the packet and the length |
434 | is larger than the MTU of outgoing device, there will be still | 440 | is larger than the MTU of outgoing device, there will be still |
@@ -506,7 +512,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
506 | 512 | ||
507 | IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); | 513 | IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); |
508 | 514 | ||
509 | ip_vs_update_conntrack(skb, cp); | 515 | ip_vs_update_conntrack(skb, cp, 1); |
510 | 516 | ||
511 | /* FIXME: when application helper enlarges the packet and the length | 517 | /* FIXME: when application helper enlarges the packet and the length |
512 | is larger than the MTU of outgoing device, there will be still | 518 | is larger than the MTU of outgoing device, there will be still |
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index 7dcf7a404190..8d9e4c949b96 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c | |||
@@ -48,15 +48,17 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp) | |||
48 | { | 48 | { |
49 | unsigned int off, len; | 49 | unsigned int off, len; |
50 | struct nf_ct_ext_type *t; | 50 | struct nf_ct_ext_type *t; |
51 | size_t alloc_size; | ||
51 | 52 | ||
52 | rcu_read_lock(); | 53 | rcu_read_lock(); |
53 | t = rcu_dereference(nf_ct_ext_types[id]); | 54 | t = rcu_dereference(nf_ct_ext_types[id]); |
54 | BUG_ON(t == NULL); | 55 | BUG_ON(t == NULL); |
55 | off = ALIGN(sizeof(struct nf_ct_ext), t->align); | 56 | off = ALIGN(sizeof(struct nf_ct_ext), t->align); |
56 | len = off + t->len; | 57 | len = off + t->len; |
58 | alloc_size = t->alloc_size; | ||
57 | rcu_read_unlock(); | 59 | rcu_read_unlock(); |
58 | 60 | ||
59 | *ext = kzalloc(t->alloc_size, gfp); | 61 | *ext = kzalloc(alloc_size, gfp); |
60 | if (!*ext) | 62 | if (!*ext) |
61 | return NULL; | 63 | return NULL; |
62 | 64 | ||
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 53d892210a04..f64de9544866 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
@@ -1376,7 +1376,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff, | |||
1376 | unsigned int msglen, origlen; | 1376 | unsigned int msglen, origlen; |
1377 | const char *dptr, *end; | 1377 | const char *dptr, *end; |
1378 | s16 diff, tdiff = 0; | 1378 | s16 diff, tdiff = 0; |
1379 | int ret; | 1379 | int ret = NF_ACCEPT; |
1380 | typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust; | 1380 | typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust; |
1381 | 1381 | ||
1382 | if (ctinfo != IP_CT_ESTABLISHED && | 1382 | if (ctinfo != IP_CT_ESTABLISHED && |
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c index 5490fc37c92d..daab8c4a903c 100644 --- a/net/netfilter/nf_tproxy_core.c +++ b/net/netfilter/nf_tproxy_core.c | |||
@@ -70,7 +70,11 @@ nf_tproxy_destructor(struct sk_buff *skb) | |||
70 | int | 70 | int |
71 | nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) | 71 | nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) |
72 | { | 72 | { |
73 | if (inet_sk(sk)->transparent) { | 73 | bool transparent = (sk->sk_state == TCP_TIME_WAIT) ? |
74 | inet_twsk(sk)->tw_transparent : | ||
75 | inet_sk(sk)->transparent; | ||
76 | |||
77 | if (transparent) { | ||
74 | skb_orphan(skb); | 78 | skb_orphan(skb); |
75 | skb->sk = sk; | 79 | skb->sk = sk; |
76 | skb->destructor = nf_tproxy_destructor; | 80 | skb->destructor = nf_tproxy_destructor; |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 980fe4ad0016..cd96ed3ccee4 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -2102,6 +2102,26 @@ static void __net_exit netlink_net_exit(struct net *net) | |||
2102 | #endif | 2102 | #endif |
2103 | } | 2103 | } |
2104 | 2104 | ||
2105 | static void __init netlink_add_usersock_entry(void) | ||
2106 | { | ||
2107 | unsigned long *listeners; | ||
2108 | int groups = 32; | ||
2109 | |||
2110 | listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head), | ||
2111 | GFP_KERNEL); | ||
2112 | if (!listeners) | ||
2113 | panic("netlink_add_usersock_entry: Cannot allocate listneres\n"); | ||
2114 | |||
2115 | netlink_table_grab(); | ||
2116 | |||
2117 | nl_table[NETLINK_USERSOCK].groups = groups; | ||
2118 | nl_table[NETLINK_USERSOCK].listeners = listeners; | ||
2119 | nl_table[NETLINK_USERSOCK].module = THIS_MODULE; | ||
2120 | nl_table[NETLINK_USERSOCK].registered = 1; | ||
2121 | |||
2122 | netlink_table_ungrab(); | ||
2123 | } | ||
2124 | |||
2105 | static struct pernet_operations __net_initdata netlink_net_ops = { | 2125 | static struct pernet_operations __net_initdata netlink_net_ops = { |
2106 | .init = netlink_net_init, | 2126 | .init = netlink_net_init, |
2107 | .exit = netlink_net_exit, | 2127 | .exit = netlink_net_exit, |
@@ -2150,6 +2170,8 @@ static int __init netlink_proto_init(void) | |||
2150 | hash->rehash_time = jiffies; | 2170 | hash->rehash_time = jiffies; |
2151 | } | 2171 | } |
2152 | 2172 | ||
2173 | netlink_add_usersock_entry(); | ||
2174 | |||
2153 | sock_register(&netlink_family_ops); | 2175 | sock_register(&netlink_family_ops); |
2154 | register_pernet_subsys(&netlink_net_ops); | 2176 | register_pernet_subsys(&netlink_net_ops); |
2155 | /* The netlink device handler may be needed early. */ | 2177 | /* The netlink device handler may be needed early. */ |
diff --git a/net/phonet/pep.c b/net/phonet/pep.c index b2a3ae6cad78..15003021f4f0 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c | |||
@@ -225,12 +225,13 @@ static void pipe_grant_credits(struct sock *sk) | |||
225 | static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) | 225 | static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) |
226 | { | 226 | { |
227 | struct pep_sock *pn = pep_sk(sk); | 227 | struct pep_sock *pn = pep_sk(sk); |
228 | struct pnpipehdr *hdr = pnp_hdr(skb); | 228 | struct pnpipehdr *hdr; |
229 | int wake = 0; | 229 | int wake = 0; |
230 | 230 | ||
231 | if (!pskb_may_pull(skb, sizeof(*hdr) + 4)) | 231 | if (!pskb_may_pull(skb, sizeof(*hdr) + 4)) |
232 | return -EINVAL; | 232 | return -EINVAL; |
233 | 233 | ||
234 | hdr = pnp_hdr(skb); | ||
234 | if (hdr->data[0] != PN_PEP_TYPE_COMMON) { | 235 | if (hdr->data[0] != PN_PEP_TYPE_COMMON) { |
235 | LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n", | 236 | LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n", |
236 | (unsigned)hdr->data[0]); | 237 | (unsigned)hdr->data[0]); |
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c index c397524c039c..c519939e8da9 100644 --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c | |||
@@ -43,7 +43,7 @@ void rds_tcp_state_change(struct sock *sk) | |||
43 | struct rds_connection *conn; | 43 | struct rds_connection *conn; |
44 | struct rds_tcp_connection *tc; | 44 | struct rds_tcp_connection *tc; |
45 | 45 | ||
46 | read_lock(&sk->sk_callback_lock); | 46 | read_lock_bh(&sk->sk_callback_lock); |
47 | conn = sk->sk_user_data; | 47 | conn = sk->sk_user_data; |
48 | if (conn == NULL) { | 48 | if (conn == NULL) { |
49 | state_change = sk->sk_state_change; | 49 | state_change = sk->sk_state_change; |
@@ -68,7 +68,7 @@ void rds_tcp_state_change(struct sock *sk) | |||
68 | break; | 68 | break; |
69 | } | 69 | } |
70 | out: | 70 | out: |
71 | read_unlock(&sk->sk_callback_lock); | 71 | read_unlock_bh(&sk->sk_callback_lock); |
72 | state_change(sk); | 72 | state_change(sk); |
73 | } | 73 | } |
74 | 74 | ||
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 975183fe6950..27844f231d10 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c | |||
@@ -114,7 +114,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes) | |||
114 | 114 | ||
115 | rdsdebug("listen data ready sk %p\n", sk); | 115 | rdsdebug("listen data ready sk %p\n", sk); |
116 | 116 | ||
117 | read_lock(&sk->sk_callback_lock); | 117 | read_lock_bh(&sk->sk_callback_lock); |
118 | ready = sk->sk_user_data; | 118 | ready = sk->sk_user_data; |
119 | if (ready == NULL) { /* check for teardown race */ | 119 | if (ready == NULL) { /* check for teardown race */ |
120 | ready = sk->sk_data_ready; | 120 | ready = sk->sk_data_ready; |
@@ -131,7 +131,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes) | |||
131 | queue_work(rds_wq, &rds_tcp_listen_work); | 131 | queue_work(rds_wq, &rds_tcp_listen_work); |
132 | 132 | ||
133 | out: | 133 | out: |
134 | read_unlock(&sk->sk_callback_lock); | 134 | read_unlock_bh(&sk->sk_callback_lock); |
135 | ready(sk, bytes); | 135 | ready(sk, bytes); |
136 | } | 136 | } |
137 | 137 | ||
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c index 1aba6878fa5d..e43797404102 100644 --- a/net/rds/tcp_recv.c +++ b/net/rds/tcp_recv.c | |||
@@ -324,7 +324,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes) | |||
324 | 324 | ||
325 | rdsdebug("data ready sk %p bytes %d\n", sk, bytes); | 325 | rdsdebug("data ready sk %p bytes %d\n", sk, bytes); |
326 | 326 | ||
327 | read_lock(&sk->sk_callback_lock); | 327 | read_lock_bh(&sk->sk_callback_lock); |
328 | conn = sk->sk_user_data; | 328 | conn = sk->sk_user_data; |
329 | if (conn == NULL) { /* check for teardown race */ | 329 | if (conn == NULL) { /* check for teardown race */ |
330 | ready = sk->sk_data_ready; | 330 | ready = sk->sk_data_ready; |
@@ -338,7 +338,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes) | |||
338 | if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM) | 338 | if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM) |
339 | queue_delayed_work(rds_wq, &conn->c_recv_w, 0); | 339 | queue_delayed_work(rds_wq, &conn->c_recv_w, 0); |
340 | out: | 340 | out: |
341 | read_unlock(&sk->sk_callback_lock); | 341 | read_unlock_bh(&sk->sk_callback_lock); |
342 | ready(sk, bytes); | 342 | ready(sk, bytes); |
343 | } | 343 | } |
344 | 344 | ||
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c index a28b895ff0d1..2f012a07d94d 100644 --- a/net/rds/tcp_send.c +++ b/net/rds/tcp_send.c | |||
@@ -224,7 +224,7 @@ void rds_tcp_write_space(struct sock *sk) | |||
224 | struct rds_connection *conn; | 224 | struct rds_connection *conn; |
225 | struct rds_tcp_connection *tc; | 225 | struct rds_tcp_connection *tc; |
226 | 226 | ||
227 | read_lock(&sk->sk_callback_lock); | 227 | read_lock_bh(&sk->sk_callback_lock); |
228 | conn = sk->sk_user_data; | 228 | conn = sk->sk_user_data; |
229 | if (conn == NULL) { | 229 | if (conn == NULL) { |
230 | write_space = sk->sk_write_space; | 230 | write_space = sk->sk_write_space; |
@@ -244,7 +244,7 @@ void rds_tcp_write_space(struct sock *sk) | |||
244 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | 244 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); |
245 | 245 | ||
246 | out: | 246 | out: |
247 | read_unlock(&sk->sk_callback_lock); | 247 | read_unlock_bh(&sk->sk_callback_lock); |
248 | 248 | ||
249 | /* | 249 | /* |
250 | * write_space is only called when data leaves tcp's send queue if | 250 | * write_space is only called when data leaves tcp's send queue if |
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 8e45e76a95f5..d952e7eac188 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -679,7 +679,7 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
679 | if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) | 679 | if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) |
680 | return -EINVAL; | 680 | return -EINVAL; |
681 | 681 | ||
682 | if (addr->srose_ndigis > ROSE_MAX_DIGIS) | 682 | if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) |
683 | return -EINVAL; | 683 | return -EINVAL; |
684 | 684 | ||
685 | if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { | 685 | if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { |
@@ -739,7 +739,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le | |||
739 | if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) | 739 | if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) |
740 | return -EINVAL; | 740 | return -EINVAL; |
741 | 741 | ||
742 | if (addr->srose_ndigis > ROSE_MAX_DIGIS) | 742 | if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) |
743 | return -EINVAL; | 743 | return -EINVAL; |
744 | 744 | ||
745 | /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */ | 745 | /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */ |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 537a48732e9e..7ebf7439b478 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -350,22 +350,19 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) | |||
350 | { | 350 | { |
351 | unsigned char *b = skb_tail_pointer(skb); | 351 | unsigned char *b = skb_tail_pointer(skb); |
352 | struct tcf_police *police = a->priv; | 352 | struct tcf_police *police = a->priv; |
353 | struct tc_police opt; | 353 | struct tc_police opt = { |
354 | 354 | .index = police->tcf_index, | |
355 | opt.index = police->tcf_index; | 355 | .action = police->tcf_action, |
356 | opt.action = police->tcf_action; | 356 | .mtu = police->tcfp_mtu, |
357 | opt.mtu = police->tcfp_mtu; | 357 | .burst = police->tcfp_burst, |
358 | opt.burst = police->tcfp_burst; | 358 | .refcnt = police->tcf_refcnt - ref, |
359 | opt.refcnt = police->tcf_refcnt - ref; | 359 | .bindcnt = police->tcf_bindcnt - bind, |
360 | opt.bindcnt = police->tcf_bindcnt - bind; | 360 | }; |
361 | |||
361 | if (police->tcfp_R_tab) | 362 | if (police->tcfp_R_tab) |
362 | opt.rate = police->tcfp_R_tab->rate; | 363 | opt.rate = police->tcfp_R_tab->rate; |
363 | else | ||
364 | memset(&opt.rate, 0, sizeof(opt.rate)); | ||
365 | if (police->tcfp_P_tab) | 364 | if (police->tcfp_P_tab) |
366 | opt.peakrate = police->tcfp_P_tab->rate; | 365 | opt.peakrate = police->tcfp_P_tab->rate; |
367 | else | ||
368 | memset(&opt.peakrate, 0, sizeof(opt.peakrate)); | ||
369 | NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); | 366 | NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); |
370 | if (police->tcfp_result) | 367 | if (police->tcfp_result) |
371 | NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result); | 368 | NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result); |
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 340662789529..6318e1136b83 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c | |||
@@ -255,10 +255,6 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, | |||
255 | error = -EINVAL; | 255 | error = -EINVAL; |
256 | goto err_out; | 256 | goto err_out; |
257 | } | 257 | } |
258 | if (!list_empty(&flow->list)) { | ||
259 | error = -EEXIST; | ||
260 | goto err_out; | ||
261 | } | ||
262 | } else { | 258 | } else { |
263 | int i; | 259 | int i; |
264 | unsigned long cl; | 260 | unsigned long cl; |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index abd904be4287..47496098d35c 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -761,8 +761,8 @@ init_vf(struct hfsc_class *cl, unsigned int len) | |||
761 | if (f != cl->cl_f) { | 761 | if (f != cl->cl_f) { |
762 | cl->cl_f = f; | 762 | cl->cl_f = f; |
763 | cftree_update(cl); | 763 | cftree_update(cl); |
764 | update_cfmin(cl->cl_parent); | ||
765 | } | 764 | } |
765 | update_cfmin(cl->cl_parent); | ||
766 | } | 766 | } |
767 | } | 767 | } |
768 | 768 | ||
diff --git a/net/sctp/output.c b/net/sctp/output.c index a646681f5acd..bcc4590ccaf2 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -92,7 +92,6 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet, | |||
92 | SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__, | 92 | SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__, |
93 | packet, vtag); | 93 | packet, vtag); |
94 | 94 | ||
95 | sctp_packet_reset(packet); | ||
96 | packet->vtag = vtag; | 95 | packet->vtag = vtag; |
97 | 96 | ||
98 | if (ecn_capable && sctp_packet_empty(packet)) { | 97 | if (ecn_capable && sctp_packet_empty(packet)) { |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 24b2cd555637..d344dc481ccc 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -1232,6 +1232,18 @@ out: | |||
1232 | return 0; | 1232 | return 0; |
1233 | } | 1233 | } |
1234 | 1234 | ||
1235 | static bool list_has_sctp_addr(const struct list_head *list, | ||
1236 | union sctp_addr *ipaddr) | ||
1237 | { | ||
1238 | struct sctp_transport *addr; | ||
1239 | |||
1240 | list_for_each_entry(addr, list, transports) { | ||
1241 | if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr)) | ||
1242 | return true; | ||
1243 | } | ||
1244 | |||
1245 | return false; | ||
1246 | } | ||
1235 | /* A restart is occurring, check to make sure no new addresses | 1247 | /* A restart is occurring, check to make sure no new addresses |
1236 | * are being added as we may be under a takeover attack. | 1248 | * are being added as we may be under a takeover attack. |
1237 | */ | 1249 | */ |
@@ -1240,10 +1252,10 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc, | |||
1240 | struct sctp_chunk *init, | 1252 | struct sctp_chunk *init, |
1241 | sctp_cmd_seq_t *commands) | 1253 | sctp_cmd_seq_t *commands) |
1242 | { | 1254 | { |
1243 | struct sctp_transport *new_addr, *addr; | 1255 | struct sctp_transport *new_addr; |
1244 | int found; | 1256 | int ret = 1; |
1245 | 1257 | ||
1246 | /* Implementor's Guide - Sectin 5.2.2 | 1258 | /* Implementor's Guide - Section 5.2.2 |
1247 | * ... | 1259 | * ... |
1248 | * Before responding the endpoint MUST check to see if the | 1260 | * Before responding the endpoint MUST check to see if the |
1249 | * unexpected INIT adds new addresses to the association. If new | 1261 | * unexpected INIT adds new addresses to the association. If new |
@@ -1254,31 +1266,19 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc, | |||
1254 | /* Search through all current addresses and make sure | 1266 | /* Search through all current addresses and make sure |
1255 | * we aren't adding any new ones. | 1267 | * we aren't adding any new ones. |
1256 | */ | 1268 | */ |
1257 | new_addr = NULL; | ||
1258 | found = 0; | ||
1259 | |||
1260 | list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list, | 1269 | list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list, |
1261 | transports) { | 1270 | transports) { |
1262 | found = 0; | 1271 | if (!list_has_sctp_addr(&asoc->peer.transport_addr_list, |
1263 | list_for_each_entry(addr, &asoc->peer.transport_addr_list, | 1272 | &new_addr->ipaddr)) { |
1264 | transports) { | 1273 | sctp_sf_send_restart_abort(&new_addr->ipaddr, init, |
1265 | if (sctp_cmp_addr_exact(&new_addr->ipaddr, | 1274 | commands); |
1266 | &addr->ipaddr)) { | 1275 | ret = 0; |
1267 | found = 1; | ||
1268 | break; | ||
1269 | } | ||
1270 | } | ||
1271 | if (!found) | ||
1272 | break; | 1276 | break; |
1273 | } | 1277 | } |
1274 | |||
1275 | /* If a new address was added, ABORT the sender. */ | ||
1276 | if (!found && new_addr) { | ||
1277 | sctp_sf_send_restart_abort(&new_addr->ipaddr, init, commands); | ||
1278 | } | 1278 | } |
1279 | 1279 | ||
1280 | /* Return success if all addresses were found. */ | 1280 | /* Return success if all addresses were found. */ |
1281 | return found; | 1281 | return ret; |
1282 | } | 1282 | } |
1283 | 1283 | ||
1284 | /* Populate the verification/tie tags based on overlapping INIT | 1284 | /* Populate the verification/tie tags based on overlapping INIT |
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 36cb66022a27..e9eaaf7d43c1 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
@@ -38,7 +38,7 @@ static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = { | |||
38 | static LIST_HEAD(cred_unused); | 38 | static LIST_HEAD(cred_unused); |
39 | static unsigned long number_cred_unused; | 39 | static unsigned long number_cred_unused; |
40 | 40 | ||
41 | #define MAX_HASHTABLE_BITS (10) | 41 | #define MAX_HASHTABLE_BITS (14) |
42 | static int param_set_hashtbl_sz(const char *val, const struct kernel_param *kp) | 42 | static int param_set_hashtbl_sz(const char *val, const struct kernel_param *kp) |
43 | { | 43 | { |
44 | unsigned long num; | 44 | unsigned long num; |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index dcfc66bab2bb..12c485982814 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -745,17 +745,18 @@ gss_pipe_release(struct inode *inode) | |||
745 | struct rpc_inode *rpci = RPC_I(inode); | 745 | struct rpc_inode *rpci = RPC_I(inode); |
746 | struct gss_upcall_msg *gss_msg; | 746 | struct gss_upcall_msg *gss_msg; |
747 | 747 | ||
748 | restart: | ||
748 | spin_lock(&inode->i_lock); | 749 | spin_lock(&inode->i_lock); |
749 | while (!list_empty(&rpci->in_downcall)) { | 750 | list_for_each_entry(gss_msg, &rpci->in_downcall, list) { |
750 | 751 | ||
751 | gss_msg = list_entry(rpci->in_downcall.next, | 752 | if (!list_empty(&gss_msg->msg.list)) |
752 | struct gss_upcall_msg, list); | 753 | continue; |
753 | gss_msg->msg.errno = -EPIPE; | 754 | gss_msg->msg.errno = -EPIPE; |
754 | atomic_inc(&gss_msg->count); | 755 | atomic_inc(&gss_msg->count); |
755 | __gss_unhash_msg(gss_msg); | 756 | __gss_unhash_msg(gss_msg); |
756 | spin_unlock(&inode->i_lock); | 757 | spin_unlock(&inode->i_lock); |
757 | gss_release_msg(gss_msg); | 758 | gss_release_msg(gss_msg); |
758 | spin_lock(&inode->i_lock); | 759 | goto restart; |
759 | } | 760 | } |
760 | spin_unlock(&inode->i_lock); | 761 | spin_unlock(&inode->i_lock); |
761 | 762 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 032644610524..778e5dfc5144 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c | |||
@@ -237,6 +237,7 @@ get_key(const void *p, const void *end, | |||
237 | if (!supported_gss_krb5_enctype(alg)) { | 237 | if (!supported_gss_krb5_enctype(alg)) { |
238 | printk(KERN_WARNING "gss_kerberos_mech: unsupported " | 238 | printk(KERN_WARNING "gss_kerberos_mech: unsupported " |
239 | "encryption key algorithm %d\n", alg); | 239 | "encryption key algorithm %d\n", alg); |
240 | p = ERR_PTR(-EINVAL); | ||
240 | goto out_err; | 241 | goto out_err; |
241 | } | 242 | } |
242 | p = simple_get_netobj(p, end, &key); | 243 | p = simple_get_netobj(p, end, &key); |
@@ -282,15 +283,19 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx) | |||
282 | ctx->enctype = ENCTYPE_DES_CBC_RAW; | 283 | ctx->enctype = ENCTYPE_DES_CBC_RAW; |
283 | 284 | ||
284 | ctx->gk5e = get_gss_krb5_enctype(ctx->enctype); | 285 | ctx->gk5e = get_gss_krb5_enctype(ctx->enctype); |
285 | if (ctx->gk5e == NULL) | 286 | if (ctx->gk5e == NULL) { |
287 | p = ERR_PTR(-EINVAL); | ||
286 | goto out_err; | 288 | goto out_err; |
289 | } | ||
287 | 290 | ||
288 | /* The downcall format was designed before we completely understood | 291 | /* The downcall format was designed before we completely understood |
289 | * the uses of the context fields; so it includes some stuff we | 292 | * the uses of the context fields; so it includes some stuff we |
290 | * just give some minimal sanity-checking, and some we ignore | 293 | * just give some minimal sanity-checking, and some we ignore |
291 | * completely (like the next twenty bytes): */ | 294 | * completely (like the next twenty bytes): */ |
292 | if (unlikely(p + 20 > end || p + 20 < p)) | 295 | if (unlikely(p + 20 > end || p + 20 < p)) { |
296 | p = ERR_PTR(-EFAULT); | ||
293 | goto out_err; | 297 | goto out_err; |
298 | } | ||
294 | p += 20; | 299 | p += 20; |
295 | p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); | 300 | p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); |
296 | if (IS_ERR(p)) | 301 | if (IS_ERR(p)) |
@@ -619,6 +624,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, | |||
619 | if (ctx->seq_send64 != ctx->seq_send) { | 624 | if (ctx->seq_send64 != ctx->seq_send) { |
620 | dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__, | 625 | dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__, |
621 | (long unsigned)ctx->seq_send64, ctx->seq_send); | 626 | (long unsigned)ctx->seq_send64, ctx->seq_send); |
627 | p = ERR_PTR(-EINVAL); | ||
622 | goto out_err; | 628 | goto out_err; |
623 | } | 629 | } |
624 | p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype)); | 630 | p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype)); |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index dc3f1f5ed865..adade3d313f2 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c | |||
@@ -100,6 +100,7 @@ gss_import_sec_context_spkm3(const void *p, size_t len, | |||
100 | if (version != 1) { | 100 | if (version != 1) { |
101 | dprintk("RPC: unknown spkm3 token format: " | 101 | dprintk("RPC: unknown spkm3 token format: " |
102 | "obsolete nfs-utils?\n"); | 102 | "obsolete nfs-utils?\n"); |
103 | p = ERR_PTR(-EINVAL); | ||
103 | goto out_err_free_ctx; | 104 | goto out_err_free_ctx; |
104 | } | 105 | } |
105 | 106 | ||
@@ -135,8 +136,10 @@ gss_import_sec_context_spkm3(const void *p, size_t len, | |||
135 | if (IS_ERR(p)) | 136 | if (IS_ERR(p)) |
136 | goto out_err_free_intg_alg; | 137 | goto out_err_free_intg_alg; |
137 | 138 | ||
138 | if (p != end) | 139 | if (p != end) { |
140 | p = ERR_PTR(-EFAULT); | ||
139 | goto out_err_free_intg_key; | 141 | goto out_err_free_intg_key; |
142 | } | ||
140 | 143 | ||
141 | ctx_id->internal_ctx_id = ctx; | 144 | ctx_id->internal_ctx_id = ctx; |
142 | 145 | ||
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 2388d83b68ff..fa5549079d79 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -226,7 +226,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru | |||
226 | goto out_no_principal; | 226 | goto out_no_principal; |
227 | } | 227 | } |
228 | 228 | ||
229 | kref_init(&clnt->cl_kref); | 229 | atomic_set(&clnt->cl_count, 1); |
230 | 230 | ||
231 | err = rpc_setup_pipedir(clnt, program->pipe_dir_name); | 231 | err = rpc_setup_pipedir(clnt, program->pipe_dir_name); |
232 | if (err < 0) | 232 | if (err < 0) |
@@ -390,14 +390,14 @@ rpc_clone_client(struct rpc_clnt *clnt) | |||
390 | if (new->cl_principal == NULL) | 390 | if (new->cl_principal == NULL) |
391 | goto out_no_principal; | 391 | goto out_no_principal; |
392 | } | 392 | } |
393 | kref_init(&new->cl_kref); | 393 | atomic_set(&new->cl_count, 1); |
394 | err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); | 394 | err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); |
395 | if (err != 0) | 395 | if (err != 0) |
396 | goto out_no_path; | 396 | goto out_no_path; |
397 | if (new->cl_auth) | 397 | if (new->cl_auth) |
398 | atomic_inc(&new->cl_auth->au_count); | 398 | atomic_inc(&new->cl_auth->au_count); |
399 | xprt_get(clnt->cl_xprt); | 399 | xprt_get(clnt->cl_xprt); |
400 | kref_get(&clnt->cl_kref); | 400 | atomic_inc(&clnt->cl_count); |
401 | rpc_register_client(new); | 401 | rpc_register_client(new); |
402 | rpciod_up(); | 402 | rpciod_up(); |
403 | return new; | 403 | return new; |
@@ -465,10 +465,8 @@ EXPORT_SYMBOL_GPL(rpc_shutdown_client); | |||
465 | * Free an RPC client | 465 | * Free an RPC client |
466 | */ | 466 | */ |
467 | static void | 467 | static void |
468 | rpc_free_client(struct kref *kref) | 468 | rpc_free_client(struct rpc_clnt *clnt) |
469 | { | 469 | { |
470 | struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); | ||
471 | |||
472 | dprintk("RPC: destroying %s client for %s\n", | 470 | dprintk("RPC: destroying %s client for %s\n", |
473 | clnt->cl_protname, clnt->cl_server); | 471 | clnt->cl_protname, clnt->cl_server); |
474 | if (!IS_ERR(clnt->cl_path.dentry)) { | 472 | if (!IS_ERR(clnt->cl_path.dentry)) { |
@@ -495,12 +493,10 @@ out_free: | |||
495 | * Free an RPC client | 493 | * Free an RPC client |
496 | */ | 494 | */ |
497 | static void | 495 | static void |
498 | rpc_free_auth(struct kref *kref) | 496 | rpc_free_auth(struct rpc_clnt *clnt) |
499 | { | 497 | { |
500 | struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); | ||
501 | |||
502 | if (clnt->cl_auth == NULL) { | 498 | if (clnt->cl_auth == NULL) { |
503 | rpc_free_client(kref); | 499 | rpc_free_client(clnt); |
504 | return; | 500 | return; |
505 | } | 501 | } |
506 | 502 | ||
@@ -509,10 +505,11 @@ rpc_free_auth(struct kref *kref) | |||
509 | * release remaining GSS contexts. This mechanism ensures | 505 | * release remaining GSS contexts. This mechanism ensures |
510 | * that it can do so safely. | 506 | * that it can do so safely. |
511 | */ | 507 | */ |
512 | kref_init(kref); | 508 | atomic_inc(&clnt->cl_count); |
513 | rpcauth_release(clnt->cl_auth); | 509 | rpcauth_release(clnt->cl_auth); |
514 | clnt->cl_auth = NULL; | 510 | clnt->cl_auth = NULL; |
515 | kref_put(kref, rpc_free_client); | 511 | if (atomic_dec_and_test(&clnt->cl_count)) |
512 | rpc_free_client(clnt); | ||
516 | } | 513 | } |
517 | 514 | ||
518 | /* | 515 | /* |
@@ -525,7 +522,8 @@ rpc_release_client(struct rpc_clnt *clnt) | |||
525 | 522 | ||
526 | if (list_empty(&clnt->cl_tasks)) | 523 | if (list_empty(&clnt->cl_tasks)) |
527 | wake_up(&destroy_wait); | 524 | wake_up(&destroy_wait); |
528 | kref_put(&clnt->cl_kref, rpc_free_auth); | 525 | if (atomic_dec_and_test(&clnt->cl_count)) |
526 | rpc_free_auth(clnt); | ||
529 | } | 527 | } |
530 | 528 | ||
531 | /** | 529 | /** |
@@ -588,7 +586,7 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) | |||
588 | if (clnt != NULL) { | 586 | if (clnt != NULL) { |
589 | rpc_task_release_client(task); | 587 | rpc_task_release_client(task); |
590 | task->tk_client = clnt; | 588 | task->tk_client = clnt; |
591 | kref_get(&clnt->cl_kref); | 589 | atomic_inc(&clnt->cl_count); |
592 | if (clnt->cl_softrtry) | 590 | if (clnt->cl_softrtry) |
593 | task->tk_flags |= RPC_TASK_SOFT; | 591 | task->tk_flags |= RPC_TASK_SOFT; |
594 | /* Add to the client's list of all tasks */ | 592 | /* Add to the client's list of all tasks */ |
@@ -931,7 +929,7 @@ call_reserveresult(struct rpc_task *task) | |||
931 | task->tk_status = 0; | 929 | task->tk_status = 0; |
932 | if (status >= 0) { | 930 | if (status >= 0) { |
933 | if (task->tk_rqstp) { | 931 | if (task->tk_rqstp) { |
934 | task->tk_action = call_allocate; | 932 | task->tk_action = call_refresh; |
935 | return; | 933 | return; |
936 | } | 934 | } |
937 | 935 | ||
@@ -966,13 +964,54 @@ call_reserveresult(struct rpc_task *task) | |||
966 | } | 964 | } |
967 | 965 | ||
968 | /* | 966 | /* |
969 | * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. | 967 | * 2. Bind and/or refresh the credentials |
968 | */ | ||
969 | static void | ||
970 | call_refresh(struct rpc_task *task) | ||
971 | { | ||
972 | dprint_status(task); | ||
973 | |||
974 | task->tk_action = call_refreshresult; | ||
975 | task->tk_status = 0; | ||
976 | task->tk_client->cl_stats->rpcauthrefresh++; | ||
977 | rpcauth_refreshcred(task); | ||
978 | } | ||
979 | |||
980 | /* | ||
981 | * 2a. Process the results of a credential refresh | ||
982 | */ | ||
983 | static void | ||
984 | call_refreshresult(struct rpc_task *task) | ||
985 | { | ||
986 | int status = task->tk_status; | ||
987 | |||
988 | dprint_status(task); | ||
989 | |||
990 | task->tk_status = 0; | ||
991 | task->tk_action = call_allocate; | ||
992 | if (status >= 0 && rpcauth_uptodatecred(task)) | ||
993 | return; | ||
994 | switch (status) { | ||
995 | case -EACCES: | ||
996 | rpc_exit(task, -EACCES); | ||
997 | return; | ||
998 | case -ENOMEM: | ||
999 | rpc_exit(task, -ENOMEM); | ||
1000 | return; | ||
1001 | case -ETIMEDOUT: | ||
1002 | rpc_delay(task, 3*HZ); | ||
1003 | } | ||
1004 | task->tk_action = call_refresh; | ||
1005 | } | ||
1006 | |||
1007 | /* | ||
1008 | * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc. | ||
970 | * (Note: buffer memory is freed in xprt_release). | 1009 | * (Note: buffer memory is freed in xprt_release). |
971 | */ | 1010 | */ |
972 | static void | 1011 | static void |
973 | call_allocate(struct rpc_task *task) | 1012 | call_allocate(struct rpc_task *task) |
974 | { | 1013 | { |
975 | unsigned int slack = task->tk_client->cl_auth->au_cslack; | 1014 | unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack; |
976 | struct rpc_rqst *req = task->tk_rqstp; | 1015 | struct rpc_rqst *req = task->tk_rqstp; |
977 | struct rpc_xprt *xprt = task->tk_xprt; | 1016 | struct rpc_xprt *xprt = task->tk_xprt; |
978 | struct rpc_procinfo *proc = task->tk_msg.rpc_proc; | 1017 | struct rpc_procinfo *proc = task->tk_msg.rpc_proc; |
@@ -980,7 +1019,7 @@ call_allocate(struct rpc_task *task) | |||
980 | dprint_status(task); | 1019 | dprint_status(task); |
981 | 1020 | ||
982 | task->tk_status = 0; | 1021 | task->tk_status = 0; |
983 | task->tk_action = call_refresh; | 1022 | task->tk_action = call_bind; |
984 | 1023 | ||
985 | if (req->rq_buffer) | 1024 | if (req->rq_buffer) |
986 | return; | 1025 | return; |
@@ -1017,47 +1056,6 @@ call_allocate(struct rpc_task *task) | |||
1017 | rpc_exit(task, -ERESTARTSYS); | 1056 | rpc_exit(task, -ERESTARTSYS); |
1018 | } | 1057 | } |
1019 | 1058 | ||
1020 | /* | ||
1021 | * 2a. Bind and/or refresh the credentials | ||
1022 | */ | ||
1023 | static void | ||
1024 | call_refresh(struct rpc_task *task) | ||
1025 | { | ||
1026 | dprint_status(task); | ||
1027 | |||
1028 | task->tk_action = call_refreshresult; | ||
1029 | task->tk_status = 0; | ||
1030 | task->tk_client->cl_stats->rpcauthrefresh++; | ||
1031 | rpcauth_refreshcred(task); | ||
1032 | } | ||
1033 | |||
1034 | /* | ||
1035 | * 2b. Process the results of a credential refresh | ||
1036 | */ | ||
1037 | static void | ||
1038 | call_refreshresult(struct rpc_task *task) | ||
1039 | { | ||
1040 | int status = task->tk_status; | ||
1041 | |||
1042 | dprint_status(task); | ||
1043 | |||
1044 | task->tk_status = 0; | ||
1045 | task->tk_action = call_bind; | ||
1046 | if (status >= 0 && rpcauth_uptodatecred(task)) | ||
1047 | return; | ||
1048 | switch (status) { | ||
1049 | case -EACCES: | ||
1050 | rpc_exit(task, -EACCES); | ||
1051 | return; | ||
1052 | case -ENOMEM: | ||
1053 | rpc_exit(task, -ENOMEM); | ||
1054 | return; | ||
1055 | case -ETIMEDOUT: | ||
1056 | rpc_delay(task, 3*HZ); | ||
1057 | } | ||
1058 | task->tk_action = call_refresh; | ||
1059 | } | ||
1060 | |||
1061 | static inline int | 1059 | static inline int |
1062 | rpc_task_need_encode(struct rpc_task *task) | 1060 | rpc_task_need_encode(struct rpc_task *task) |
1063 | { | 1061 | { |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 95ccbcf45d3e..8c8eef2b8f26 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -48,7 +48,7 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, | |||
48 | return; | 48 | return; |
49 | do { | 49 | do { |
50 | msg = list_entry(head->next, struct rpc_pipe_msg, list); | 50 | msg = list_entry(head->next, struct rpc_pipe_msg, list); |
51 | list_del(&msg->list); | 51 | list_del_init(&msg->list); |
52 | msg->errno = err; | 52 | msg->errno = err; |
53 | destroy_msg(msg); | 53 | destroy_msg(msg); |
54 | } while (!list_empty(head)); | 54 | } while (!list_empty(head)); |
@@ -208,7 +208,7 @@ rpc_pipe_release(struct inode *inode, struct file *filp) | |||
208 | if (msg != NULL) { | 208 | if (msg != NULL) { |
209 | spin_lock(&inode->i_lock); | 209 | spin_lock(&inode->i_lock); |
210 | msg->errno = -EAGAIN; | 210 | msg->errno = -EAGAIN; |
211 | list_del(&msg->list); | 211 | list_del_init(&msg->list); |
212 | spin_unlock(&inode->i_lock); | 212 | spin_unlock(&inode->i_lock); |
213 | rpci->ops->destroy_msg(msg); | 213 | rpci->ops->destroy_msg(msg); |
214 | } | 214 | } |
@@ -268,7 +268,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) | |||
268 | if (res < 0 || msg->len == msg->copied) { | 268 | if (res < 0 || msg->len == msg->copied) { |
269 | filp->private_data = NULL; | 269 | filp->private_data = NULL; |
270 | spin_lock(&inode->i_lock); | 270 | spin_lock(&inode->i_lock); |
271 | list_del(&msg->list); | 271 | list_del_init(&msg->list); |
272 | spin_unlock(&inode->i_lock); | 272 | spin_unlock(&inode->i_lock); |
273 | rpci->ops->destroy_msg(msg); | 273 | rpci->ops->destroy_msg(msg); |
274 | } | 274 | } |
@@ -371,21 +371,23 @@ rpc_show_info(struct seq_file *m, void *v) | |||
371 | static int | 371 | static int |
372 | rpc_info_open(struct inode *inode, struct file *file) | 372 | rpc_info_open(struct inode *inode, struct file *file) |
373 | { | 373 | { |
374 | struct rpc_clnt *clnt; | 374 | struct rpc_clnt *clnt = NULL; |
375 | int ret = single_open(file, rpc_show_info, NULL); | 375 | int ret = single_open(file, rpc_show_info, NULL); |
376 | 376 | ||
377 | if (!ret) { | 377 | if (!ret) { |
378 | struct seq_file *m = file->private_data; | 378 | struct seq_file *m = file->private_data; |
379 | mutex_lock(&inode->i_mutex); | 379 | |
380 | clnt = RPC_I(inode)->private; | 380 | spin_lock(&file->f_path.dentry->d_lock); |
381 | if (clnt) { | 381 | if (!d_unhashed(file->f_path.dentry)) |
382 | kref_get(&clnt->cl_kref); | 382 | clnt = RPC_I(inode)->private; |
383 | if (clnt != NULL && atomic_inc_not_zero(&clnt->cl_count)) { | ||
384 | spin_unlock(&file->f_path.dentry->d_lock); | ||
383 | m->private = clnt; | 385 | m->private = clnt; |
384 | } else { | 386 | } else { |
387 | spin_unlock(&file->f_path.dentry->d_lock); | ||
385 | single_release(inode, file); | 388 | single_release(inode, file); |
386 | ret = -EINVAL; | 389 | ret = -EINVAL; |
387 | } | 390 | } |
388 | mutex_unlock(&inode->i_mutex); | ||
389 | } | 391 | } |
390 | return ret; | 392 | return ret; |
391 | } | 393 | } |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index b6309db56226..fe9306bf10cc 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -800,7 +800,7 @@ static void xs_udp_data_ready(struct sock *sk, int len) | |||
800 | u32 _xid; | 800 | u32 _xid; |
801 | __be32 *xp; | 801 | __be32 *xp; |
802 | 802 | ||
803 | read_lock(&sk->sk_callback_lock); | 803 | read_lock_bh(&sk->sk_callback_lock); |
804 | dprintk("RPC: xs_udp_data_ready...\n"); | 804 | dprintk("RPC: xs_udp_data_ready...\n"); |
805 | if (!(xprt = xprt_from_sock(sk))) | 805 | if (!(xprt = xprt_from_sock(sk))) |
806 | goto out; | 806 | goto out; |
@@ -852,7 +852,7 @@ static void xs_udp_data_ready(struct sock *sk, int len) | |||
852 | dropit: | 852 | dropit: |
853 | skb_free_datagram(sk, skb); | 853 | skb_free_datagram(sk, skb); |
854 | out: | 854 | out: |
855 | read_unlock(&sk->sk_callback_lock); | 855 | read_unlock_bh(&sk->sk_callback_lock); |
856 | } | 856 | } |
857 | 857 | ||
858 | static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) | 858 | static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) |
@@ -1229,7 +1229,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes) | |||
1229 | 1229 | ||
1230 | dprintk("RPC: xs_tcp_data_ready...\n"); | 1230 | dprintk("RPC: xs_tcp_data_ready...\n"); |
1231 | 1231 | ||
1232 | read_lock(&sk->sk_callback_lock); | 1232 | read_lock_bh(&sk->sk_callback_lock); |
1233 | if (!(xprt = xprt_from_sock(sk))) | 1233 | if (!(xprt = xprt_from_sock(sk))) |
1234 | goto out; | 1234 | goto out; |
1235 | if (xprt->shutdown) | 1235 | if (xprt->shutdown) |
@@ -1248,7 +1248,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes) | |||
1248 | read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); | 1248 | read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); |
1249 | } while (read > 0); | 1249 | } while (read > 0); |
1250 | out: | 1250 | out: |
1251 | read_unlock(&sk->sk_callback_lock); | 1251 | read_unlock_bh(&sk->sk_callback_lock); |
1252 | } | 1252 | } |
1253 | 1253 | ||
1254 | /* | 1254 | /* |
@@ -1301,7 +1301,7 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1301 | { | 1301 | { |
1302 | struct rpc_xprt *xprt; | 1302 | struct rpc_xprt *xprt; |
1303 | 1303 | ||
1304 | read_lock(&sk->sk_callback_lock); | 1304 | read_lock_bh(&sk->sk_callback_lock); |
1305 | if (!(xprt = xprt_from_sock(sk))) | 1305 | if (!(xprt = xprt_from_sock(sk))) |
1306 | goto out; | 1306 | goto out; |
1307 | dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); | 1307 | dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); |
@@ -1313,7 +1313,7 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1313 | 1313 | ||
1314 | switch (sk->sk_state) { | 1314 | switch (sk->sk_state) { |
1315 | case TCP_ESTABLISHED: | 1315 | case TCP_ESTABLISHED: |
1316 | spin_lock_bh(&xprt->transport_lock); | 1316 | spin_lock(&xprt->transport_lock); |
1317 | if (!xprt_test_and_set_connected(xprt)) { | 1317 | if (!xprt_test_and_set_connected(xprt)) { |
1318 | struct sock_xprt *transport = container_of(xprt, | 1318 | struct sock_xprt *transport = container_of(xprt, |
1319 | struct sock_xprt, xprt); | 1319 | struct sock_xprt, xprt); |
@@ -1327,7 +1327,7 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1327 | 1327 | ||
1328 | xprt_wake_pending_tasks(xprt, -EAGAIN); | 1328 | xprt_wake_pending_tasks(xprt, -EAGAIN); |
1329 | } | 1329 | } |
1330 | spin_unlock_bh(&xprt->transport_lock); | 1330 | spin_unlock(&xprt->transport_lock); |
1331 | break; | 1331 | break; |
1332 | case TCP_FIN_WAIT1: | 1332 | case TCP_FIN_WAIT1: |
1333 | /* The client initiated a shutdown of the socket */ | 1333 | /* The client initiated a shutdown of the socket */ |
@@ -1365,7 +1365,7 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1365 | xs_sock_mark_closed(xprt); | 1365 | xs_sock_mark_closed(xprt); |
1366 | } | 1366 | } |
1367 | out: | 1367 | out: |
1368 | read_unlock(&sk->sk_callback_lock); | 1368 | read_unlock_bh(&sk->sk_callback_lock); |
1369 | } | 1369 | } |
1370 | 1370 | ||
1371 | /** | 1371 | /** |
@@ -1376,7 +1376,7 @@ static void xs_error_report(struct sock *sk) | |||
1376 | { | 1376 | { |
1377 | struct rpc_xprt *xprt; | 1377 | struct rpc_xprt *xprt; |
1378 | 1378 | ||
1379 | read_lock(&sk->sk_callback_lock); | 1379 | read_lock_bh(&sk->sk_callback_lock); |
1380 | if (!(xprt = xprt_from_sock(sk))) | 1380 | if (!(xprt = xprt_from_sock(sk))) |
1381 | goto out; | 1381 | goto out; |
1382 | dprintk("RPC: %s client %p...\n" | 1382 | dprintk("RPC: %s client %p...\n" |
@@ -1384,7 +1384,7 @@ static void xs_error_report(struct sock *sk) | |||
1384 | __func__, xprt, sk->sk_err); | 1384 | __func__, xprt, sk->sk_err); |
1385 | xprt_wake_pending_tasks(xprt, -EAGAIN); | 1385 | xprt_wake_pending_tasks(xprt, -EAGAIN); |
1386 | out: | 1386 | out: |
1387 | read_unlock(&sk->sk_callback_lock); | 1387 | read_unlock_bh(&sk->sk_callback_lock); |
1388 | } | 1388 | } |
1389 | 1389 | ||
1390 | static void xs_write_space(struct sock *sk) | 1390 | static void xs_write_space(struct sock *sk) |
@@ -1416,13 +1416,13 @@ static void xs_write_space(struct sock *sk) | |||
1416 | */ | 1416 | */ |
1417 | static void xs_udp_write_space(struct sock *sk) | 1417 | static void xs_udp_write_space(struct sock *sk) |
1418 | { | 1418 | { |
1419 | read_lock(&sk->sk_callback_lock); | 1419 | read_lock_bh(&sk->sk_callback_lock); |
1420 | 1420 | ||
1421 | /* from net/core/sock.c:sock_def_write_space */ | 1421 | /* from net/core/sock.c:sock_def_write_space */ |
1422 | if (sock_writeable(sk)) | 1422 | if (sock_writeable(sk)) |
1423 | xs_write_space(sk); | 1423 | xs_write_space(sk); |
1424 | 1424 | ||
1425 | read_unlock(&sk->sk_callback_lock); | 1425 | read_unlock_bh(&sk->sk_callback_lock); |
1426 | } | 1426 | } |
1427 | 1427 | ||
1428 | /** | 1428 | /** |
@@ -1437,13 +1437,13 @@ static void xs_udp_write_space(struct sock *sk) | |||
1437 | */ | 1437 | */ |
1438 | static void xs_tcp_write_space(struct sock *sk) | 1438 | static void xs_tcp_write_space(struct sock *sk) |
1439 | { | 1439 | { |
1440 | read_lock(&sk->sk_callback_lock); | 1440 | read_lock_bh(&sk->sk_callback_lock); |
1441 | 1441 | ||
1442 | /* from net/core/stream.c:sk_stream_write_space */ | 1442 | /* from net/core/stream.c:sk_stream_write_space */ |
1443 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) | 1443 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) |
1444 | xs_write_space(sk); | 1444 | xs_write_space(sk); |
1445 | 1445 | ||
1446 | read_unlock(&sk->sk_callback_lock); | 1446 | read_unlock_bh(&sk->sk_callback_lock); |
1447 | } | 1447 | } |
1448 | 1448 | ||
1449 | static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) | 1449 | static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 4414a18c63b4..0b39b2451ea5 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -692,6 +692,7 @@ static int unix_autobind(struct socket *sock) | |||
692 | static u32 ordernum = 1; | 692 | static u32 ordernum = 1; |
693 | struct unix_address *addr; | 693 | struct unix_address *addr; |
694 | int err; | 694 | int err; |
695 | unsigned int retries = 0; | ||
695 | 696 | ||
696 | mutex_lock(&u->readlock); | 697 | mutex_lock(&u->readlock); |
697 | 698 | ||
@@ -717,9 +718,17 @@ retry: | |||
717 | if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type, | 718 | if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type, |
718 | addr->hash)) { | 719 | addr->hash)) { |
719 | spin_unlock(&unix_table_lock); | 720 | spin_unlock(&unix_table_lock); |
720 | /* Sanity yield. It is unusual case, but yet... */ | 721 | /* |
721 | if (!(ordernum&0xFF)) | 722 | * __unix_find_socket_byname() may take long time if many names |
722 | yield(); | 723 | * are already in use. |
724 | */ | ||
725 | cond_resched(); | ||
726 | /* Give up if all names seems to be in use. */ | ||
727 | if (retries++ == 0xFFFFF) { | ||
728 | err = -ENOSPC; | ||
729 | kfree(addr); | ||
730 | goto out; | ||
731 | } | ||
723 | goto retry; | 732 | goto retry; |
724 | } | 733 | } |
725 | addr->hash ^= sk->sk_type; | 734 | addr->hash ^= sk->sk_type; |
diff --git a/net/wireless/core.c b/net/wireless/core.c index 541e2fff5e9c..d6d046b9f6f2 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -475,12 +475,10 @@ int wiphy_register(struct wiphy *wiphy) | |||
475 | mutex_lock(&cfg80211_mutex); | 475 | mutex_lock(&cfg80211_mutex); |
476 | 476 | ||
477 | res = device_add(&rdev->wiphy.dev); | 477 | res = device_add(&rdev->wiphy.dev); |
478 | if (res) | 478 | if (res) { |
479 | goto out_unlock; | 479 | mutex_unlock(&cfg80211_mutex); |
480 | 480 | return res; | |
481 | res = rfkill_register(rdev->rfkill); | 481 | } |
482 | if (res) | ||
483 | goto out_rm_dev; | ||
484 | 482 | ||
485 | /* set up regulatory info */ | 483 | /* set up regulatory info */ |
486 | wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); | 484 | wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); |
@@ -509,13 +507,18 @@ int wiphy_register(struct wiphy *wiphy) | |||
509 | cfg80211_debugfs_rdev_add(rdev); | 507 | cfg80211_debugfs_rdev_add(rdev); |
510 | mutex_unlock(&cfg80211_mutex); | 508 | mutex_unlock(&cfg80211_mutex); |
511 | 509 | ||
510 | /* | ||
511 | * due to a locking dependency this has to be outside of the | ||
512 | * cfg80211_mutex lock | ||
513 | */ | ||
514 | res = rfkill_register(rdev->rfkill); | ||
515 | if (res) | ||
516 | goto out_rm_dev; | ||
517 | |||
512 | return 0; | 518 | return 0; |
513 | 519 | ||
514 | out_rm_dev: | 520 | out_rm_dev: |
515 | device_del(&rdev->wiphy.dev); | 521 | device_del(&rdev->wiphy.dev); |
516 | |||
517 | out_unlock: | ||
518 | mutex_unlock(&cfg80211_mutex); | ||
519 | return res; | 522 | return res; |
520 | } | 523 | } |
521 | EXPORT_SYMBOL(wiphy_register); | 524 | EXPORT_SYMBOL(wiphy_register); |
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index bb5e0a5ecfa1..7e5c3a45f811 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c | |||
@@ -1420,6 +1420,9 @@ int cfg80211_wext_giwessid(struct net_device *dev, | |||
1420 | { | 1420 | { |
1421 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 1421 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
1422 | 1422 | ||
1423 | data->flags = 0; | ||
1424 | data->length = 0; | ||
1425 | |||
1423 | switch (wdev->iftype) { | 1426 | switch (wdev->iftype) { |
1424 | case NL80211_IFTYPE_ADHOC: | 1427 | case NL80211_IFTYPE_ADHOC: |
1425 | return cfg80211_ibss_wext_giwessid(dev, info, data, ssid); | 1428 | return cfg80211_ibss_wext_giwessid(dev, info, data, ssid); |
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 0ef17bc42bac..8f5116f5af19 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c | |||
@@ -782,6 +782,22 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, | |||
782 | } | 782 | } |
783 | } | 783 | } |
784 | 784 | ||
785 | if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) { | ||
786 | /* | ||
787 | * If this is a GET, but not NOMAX, it means that the extra | ||
788 | * data is not bounded by userspace, but by max_tokens. Thus | ||
789 | * set the length to max_tokens. This matches the extra data | ||
790 | * allocation. | ||
791 | * The driver should fill it with the number of tokens it | ||
792 | * provided, and it may check iwp->length rather than having | ||
793 | * knowledge of max_tokens. If the driver doesn't change the | ||
794 | * iwp->length, this ioctl just copies back max_token tokens | ||
795 | * filled with zeroes. Hopefully the driver isn't claiming | ||
796 | * them to be valid data. | ||
797 | */ | ||
798 | iwp->length = descr->max_tokens; | ||
799 | } | ||
800 | |||
785 | err = handler(dev, info, (union iwreq_data *) iwp, extra); | 801 | err = handler(dev, info, (union iwreq_data *) iwp, extra); |
786 | 802 | ||
787 | iwp->length += essid_compat; | 803 | iwp->length += essid_compat; |
diff --git a/net/wireless/wext-priv.c b/net/wireless/wext-priv.c index 3feb28e41c53..674d426a9d24 100644 --- a/net/wireless/wext-priv.c +++ b/net/wireless/wext-priv.c | |||
@@ -152,7 +152,7 @@ static int ioctl_private_iw_point(struct iw_point *iwp, unsigned int cmd, | |||
152 | } else if (!iwp->pointer) | 152 | } else if (!iwp->pointer) |
153 | return -EFAULT; | 153 | return -EFAULT; |
154 | 154 | ||
155 | extra = kmalloc(extra_size, GFP_KERNEL); | 155 | extra = kzalloc(extra_size, GFP_KERNEL); |
156 | if (!extra) | 156 | if (!extra) |
157 | return -ENOMEM; | 157 | return -ENOMEM; |
158 | 158 | ||
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index a3cca0a94346..64f2ae1fdc15 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c | |||
@@ -101,7 +101,7 @@ resume: | |||
101 | err = -EHOSTUNREACH; | 101 | err = -EHOSTUNREACH; |
102 | goto error_nolock; | 102 | goto error_nolock; |
103 | } | 103 | } |
104 | skb_dst_set_noref(skb, dst); | 104 | skb_dst_set(skb, dst_clone(dst)); |
105 | x = dst->xfrm; | 105 | x = dst->xfrm; |
106 | } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); | 106 | } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); |
107 | 107 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 2b3ed7ad4933..cbab6e1a8c9c 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1175,9 +1175,8 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl, | |||
1175 | tmpl->mode == XFRM_MODE_BEET) { | 1175 | tmpl->mode == XFRM_MODE_BEET) { |
1176 | remote = &tmpl->id.daddr; | 1176 | remote = &tmpl->id.daddr; |
1177 | local = &tmpl->saddr; | 1177 | local = &tmpl->saddr; |
1178 | family = tmpl->encap_family; | 1178 | if (xfrm_addr_any(local, tmpl->encap_family)) { |
1179 | if (xfrm_addr_any(local, family)) { | 1179 | error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family); |
1180 | error = xfrm_get_saddr(net, &tmp, remote, family); | ||
1181 | if (error) | 1180 | if (error) |
1182 | goto fail; | 1181 | goto fail; |
1183 | local = &tmp; | 1182 | local = &tmp; |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 5208b12fbfb4..eb96ce52f178 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -656,15 +656,23 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si) | |||
656 | EXPORT_SYMBOL(xfrm_sad_getinfo); | 656 | EXPORT_SYMBOL(xfrm_sad_getinfo); |
657 | 657 | ||
658 | static int | 658 | static int |
659 | xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl, | 659 | xfrm_init_tempstate(struct xfrm_state *x, struct flowi *fl, |
660 | struct xfrm_tmpl *tmpl, | 660 | struct xfrm_tmpl *tmpl, |
661 | xfrm_address_t *daddr, xfrm_address_t *saddr, | 661 | xfrm_address_t *daddr, xfrm_address_t *saddr, |
662 | unsigned short family) | 662 | unsigned short family) |
663 | { | 663 | { |
664 | struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); | 664 | struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); |
665 | if (!afinfo) | 665 | if (!afinfo) |
666 | return -1; | 666 | return -1; |
667 | afinfo->init_tempsel(x, fl, tmpl, daddr, saddr); | 667 | afinfo->init_tempsel(&x->sel, fl); |
668 | |||
669 | if (family != tmpl->encap_family) { | ||
670 | xfrm_state_put_afinfo(afinfo); | ||
671 | afinfo = xfrm_state_get_afinfo(tmpl->encap_family); | ||
672 | if (!afinfo) | ||
673 | return -1; | ||
674 | } | ||
675 | afinfo->init_temprop(x, tmpl, daddr, saddr); | ||
668 | xfrm_state_put_afinfo(afinfo); | 676 | xfrm_state_put_afinfo(afinfo); |
669 | return 0; | 677 | return 0; |
670 | } | 678 | } |
@@ -790,37 +798,38 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, | |||
790 | int error = 0; | 798 | int error = 0; |
791 | struct xfrm_state *best = NULL; | 799 | struct xfrm_state *best = NULL; |
792 | u32 mark = pol->mark.v & pol->mark.m; | 800 | u32 mark = pol->mark.v & pol->mark.m; |
801 | unsigned short encap_family = tmpl->encap_family; | ||
793 | 802 | ||
794 | to_put = NULL; | 803 | to_put = NULL; |
795 | 804 | ||
796 | spin_lock_bh(&xfrm_state_lock); | 805 | spin_lock_bh(&xfrm_state_lock); |
797 | h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, family); | 806 | h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); |
798 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { | 807 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { |
799 | if (x->props.family == family && | 808 | if (x->props.family == encap_family && |
800 | x->props.reqid == tmpl->reqid && | 809 | x->props.reqid == tmpl->reqid && |
801 | (mark & x->mark.m) == x->mark.v && | 810 | (mark & x->mark.m) == x->mark.v && |
802 | !(x->props.flags & XFRM_STATE_WILDRECV) && | 811 | !(x->props.flags & XFRM_STATE_WILDRECV) && |
803 | xfrm_state_addr_check(x, daddr, saddr, family) && | 812 | xfrm_state_addr_check(x, daddr, saddr, encap_family) && |
804 | tmpl->mode == x->props.mode && | 813 | tmpl->mode == x->props.mode && |
805 | tmpl->id.proto == x->id.proto && | 814 | tmpl->id.proto == x->id.proto && |
806 | (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) | 815 | (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) |
807 | xfrm_state_look_at(pol, x, fl, family, daddr, saddr, | 816 | xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr, |
808 | &best, &acquire_in_progress, &error); | 817 | &best, &acquire_in_progress, &error); |
809 | } | 818 | } |
810 | if (best) | 819 | if (best) |
811 | goto found; | 820 | goto found; |
812 | 821 | ||
813 | h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family); | 822 | h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family); |
814 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { | 823 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { |
815 | if (x->props.family == family && | 824 | if (x->props.family == encap_family && |
816 | x->props.reqid == tmpl->reqid && | 825 | x->props.reqid == tmpl->reqid && |
817 | (mark & x->mark.m) == x->mark.v && | 826 | (mark & x->mark.m) == x->mark.v && |
818 | !(x->props.flags & XFRM_STATE_WILDRECV) && | 827 | !(x->props.flags & XFRM_STATE_WILDRECV) && |
819 | xfrm_state_addr_check(x, daddr, saddr, family) && | 828 | xfrm_state_addr_check(x, daddr, saddr, encap_family) && |
820 | tmpl->mode == x->props.mode && | 829 | tmpl->mode == x->props.mode && |
821 | tmpl->id.proto == x->id.proto && | 830 | tmpl->id.proto == x->id.proto && |
822 | (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) | 831 | (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) |
823 | xfrm_state_look_at(pol, x, fl, family, daddr, saddr, | 832 | xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr, |
824 | &best, &acquire_in_progress, &error); | 833 | &best, &acquire_in_progress, &error); |
825 | } | 834 | } |
826 | 835 | ||
@@ -829,7 +838,7 @@ found: | |||
829 | if (!x && !error && !acquire_in_progress) { | 838 | if (!x && !error && !acquire_in_progress) { |
830 | if (tmpl->id.spi && | 839 | if (tmpl->id.spi && |
831 | (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi, | 840 | (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi, |
832 | tmpl->id.proto, family)) != NULL) { | 841 | tmpl->id.proto, encap_family)) != NULL) { |
833 | to_put = x0; | 842 | to_put = x0; |
834 | error = -EEXIST; | 843 | error = -EEXIST; |
835 | goto out; | 844 | goto out; |
@@ -839,9 +848,9 @@ found: | |||
839 | error = -ENOMEM; | 848 | error = -ENOMEM; |
840 | goto out; | 849 | goto out; |
841 | } | 850 | } |
842 | /* Initialize temporary selector matching only | 851 | /* Initialize temporary state matching only |
843 | * to current session. */ | 852 | * to current session. */ |
844 | xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family); | 853 | xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family); |
845 | memcpy(&x->mark, &pol->mark, sizeof(x->mark)); | 854 | memcpy(&x->mark, &pol->mark, sizeof(x->mark)); |
846 | 855 | ||
847 | error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid); | 856 | error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid); |
@@ -856,10 +865,10 @@ found: | |||
856 | x->km.state = XFRM_STATE_ACQ; | 865 | x->km.state = XFRM_STATE_ACQ; |
857 | list_add(&x->km.all, &net->xfrm.state_all); | 866 | list_add(&x->km.all, &net->xfrm.state_all); |
858 | hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); | 867 | hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); |
859 | h = xfrm_src_hash(net, daddr, saddr, family); | 868 | h = xfrm_src_hash(net, daddr, saddr, encap_family); |
860 | hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h); | 869 | hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h); |
861 | if (x->id.spi) { | 870 | if (x->id.spi) { |
862 | h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, family); | 871 | h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family); |
863 | hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); | 872 | hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); |
864 | } | 873 | } |
865 | x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; | 874 | x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index b14ed4b1f27c..8bae6b22c846 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -1801,7 +1801,7 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1801 | struct xfrm_user_expire *ue = nlmsg_data(nlh); | 1801 | struct xfrm_user_expire *ue = nlmsg_data(nlh); |
1802 | struct xfrm_usersa_info *p = &ue->state; | 1802 | struct xfrm_usersa_info *p = &ue->state; |
1803 | struct xfrm_mark m; | 1803 | struct xfrm_mark m; |
1804 | u32 mark = xfrm_mark_get(attrs, &m);; | 1804 | u32 mark = xfrm_mark_get(attrs, &m); |
1805 | 1805 | ||
1806 | x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family); | 1806 | x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family); |
1807 | 1807 | ||
diff --git a/samples/kfifo/dma-example.c b/samples/kfifo/dma-example.c index ee03a4f0b64f..06473791c08a 100644 --- a/samples/kfifo/dma-example.c +++ b/samples/kfifo/dma-example.c | |||
@@ -24,6 +24,7 @@ static int __init example_init(void) | |||
24 | { | 24 | { |
25 | int i; | 25 | int i; |
26 | unsigned int ret; | 26 | unsigned int ret; |
27 | unsigned int nents; | ||
27 | struct scatterlist sg[10]; | 28 | struct scatterlist sg[10]; |
28 | 29 | ||
29 | printk(KERN_INFO "DMA fifo test start\n"); | 30 | printk(KERN_INFO "DMA fifo test start\n"); |
@@ -61,9 +62,9 @@ static int __init example_init(void) | |||
61 | * byte at the beginning, after the kfifo_skip(). | 62 | * byte at the beginning, after the kfifo_skip(). |
62 | */ | 63 | */ |
63 | sg_init_table(sg, ARRAY_SIZE(sg)); | 64 | sg_init_table(sg, ARRAY_SIZE(sg)); |
64 | ret = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE); | 65 | nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE); |
65 | printk(KERN_INFO "DMA sgl entries: %d\n", ret); | 66 | printk(KERN_INFO "DMA sgl entries: %d\n", nents); |
66 | if (!ret) { | 67 | if (!nents) { |
67 | /* fifo is full and no sgl was created */ | 68 | /* fifo is full and no sgl was created */ |
68 | printk(KERN_WARNING "error kfifo_dma_in_prepare\n"); | 69 | printk(KERN_WARNING "error kfifo_dma_in_prepare\n"); |
69 | return -EIO; | 70 | return -EIO; |
@@ -71,7 +72,7 @@ static int __init example_init(void) | |||
71 | 72 | ||
72 | /* receive data */ | 73 | /* receive data */ |
73 | printk(KERN_INFO "scatterlist for receive:\n"); | 74 | printk(KERN_INFO "scatterlist for receive:\n"); |
74 | for (i = 0; i < ARRAY_SIZE(sg); i++) { | 75 | for (i = 0; i < nents; i++) { |
75 | printk(KERN_INFO | 76 | printk(KERN_INFO |
76 | "sg[%d] -> " | 77 | "sg[%d] -> " |
77 | "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", | 78 | "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", |
@@ -91,16 +92,16 @@ static int __init example_init(void) | |||
91 | kfifo_dma_in_finish(&fifo, ret); | 92 | kfifo_dma_in_finish(&fifo, ret); |
92 | 93 | ||
93 | /* Prepare to transmit data, example: 8 bytes */ | 94 | /* Prepare to transmit data, example: 8 bytes */ |
94 | ret = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8); | 95 | nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8); |
95 | printk(KERN_INFO "DMA sgl entries: %d\n", ret); | 96 | printk(KERN_INFO "DMA sgl entries: %d\n", nents); |
96 | if (!ret) { | 97 | if (!nents) { |
97 | /* no data was available and no sgl was created */ | 98 | /* no data was available and no sgl was created */ |
98 | printk(KERN_WARNING "error kfifo_dma_out_prepare\n"); | 99 | printk(KERN_WARNING "error kfifo_dma_out_prepare\n"); |
99 | return -EIO; | 100 | return -EIO; |
100 | } | 101 | } |
101 | 102 | ||
102 | printk(KERN_INFO "scatterlist for transmit:\n"); | 103 | printk(KERN_INFO "scatterlist for transmit:\n"); |
103 | for (i = 0; i < ARRAY_SIZE(sg); i++) { | 104 | for (i = 0; i < nents; i++) { |
104 | printk(KERN_INFO | 105 | printk(KERN_INFO |
105 | "sg[%d] -> " | 106 | "sg[%d] -> " |
106 | "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", | 107 | "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", |
diff --git a/scripts/basic/docproc.c b/scripts/basic/docproc.c index 79ab973fb43a..fc3b18d844af 100644 --- a/scripts/basic/docproc.c +++ b/scripts/basic/docproc.c | |||
@@ -34,12 +34,14 @@ | |||
34 | * | 34 | * |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #define _GNU_SOURCE | ||
37 | #include <stdio.h> | 38 | #include <stdio.h> |
38 | #include <stdlib.h> | 39 | #include <stdlib.h> |
39 | #include <string.h> | 40 | #include <string.h> |
40 | #include <ctype.h> | 41 | #include <ctype.h> |
41 | #include <unistd.h> | 42 | #include <unistd.h> |
42 | #include <limits.h> | 43 | #include <limits.h> |
44 | #include <errno.h> | ||
43 | #include <sys/types.h> | 45 | #include <sys/types.h> |
44 | #include <sys/wait.h> | 46 | #include <sys/wait.h> |
45 | 47 | ||
@@ -54,6 +56,7 @@ typedef void FILEONLY(char * file); | |||
54 | FILEONLY *internalfunctions; | 56 | FILEONLY *internalfunctions; |
55 | FILEONLY *externalfunctions; | 57 | FILEONLY *externalfunctions; |
56 | FILEONLY *symbolsonly; | 58 | FILEONLY *symbolsonly; |
59 | FILEONLY *findall; | ||
57 | 60 | ||
58 | typedef void FILELINE(char * file, char * line); | 61 | typedef void FILELINE(char * file, char * line); |
59 | FILELINE * singlefunctions; | 62 | FILELINE * singlefunctions; |
@@ -65,12 +68,30 @@ FILELINE * docsection; | |||
65 | #define KERNELDOCPATH "scripts/" | 68 | #define KERNELDOCPATH "scripts/" |
66 | #define KERNELDOC "kernel-doc" | 69 | #define KERNELDOC "kernel-doc" |
67 | #define DOCBOOK "-docbook" | 70 | #define DOCBOOK "-docbook" |
71 | #define LIST "-list" | ||
68 | #define FUNCTION "-function" | 72 | #define FUNCTION "-function" |
69 | #define NOFUNCTION "-nofunction" | 73 | #define NOFUNCTION "-nofunction" |
70 | #define NODOCSECTIONS "-no-doc-sections" | 74 | #define NODOCSECTIONS "-no-doc-sections" |
71 | 75 | ||
72 | static char *srctree, *kernsrctree; | 76 | static char *srctree, *kernsrctree; |
73 | 77 | ||
78 | static char **all_list = NULL; | ||
79 | static int all_list_len = 0; | ||
80 | |||
81 | static void consume_symbol(const char *sym) | ||
82 | { | ||
83 | int i; | ||
84 | |||
85 | for (i = 0; i < all_list_len; i++) { | ||
86 | if (!all_list[i]) | ||
87 | continue; | ||
88 | if (strcmp(sym, all_list[i])) | ||
89 | continue; | ||
90 | all_list[i] = NULL; | ||
91 | break; | ||
92 | } | ||
93 | } | ||
94 | |||
74 | static void usage (void) | 95 | static void usage (void) |
75 | { | 96 | { |
76 | fprintf(stderr, "Usage: docproc {doc|depend} file\n"); | 97 | fprintf(stderr, "Usage: docproc {doc|depend} file\n"); |
@@ -248,6 +269,7 @@ static void docfunctions(char * filename, char * type) | |||
248 | struct symfile * sym = &symfilelist[i]; | 269 | struct symfile * sym = &symfilelist[i]; |
249 | for (j=0; j < sym->symbolcnt; j++) { | 270 | for (j=0; j < sym->symbolcnt; j++) { |
250 | vec[idx++] = type; | 271 | vec[idx++] = type; |
272 | consume_symbol(sym->symbollist[j].name); | ||
251 | vec[idx++] = sym->symbollist[j].name; | 273 | vec[idx++] = sym->symbollist[j].name; |
252 | } | 274 | } |
253 | } | 275 | } |
@@ -287,6 +309,11 @@ static void singfunc(char * filename, char * line) | |||
287 | vec[idx++] = &line[i]; | 309 | vec[idx++] = &line[i]; |
288 | } | 310 | } |
289 | } | 311 | } |
312 | for (i = 0; i < idx; i++) { | ||
313 | if (strcmp(vec[i], FUNCTION)) | ||
314 | continue; | ||
315 | consume_symbol(vec[i + 1]); | ||
316 | } | ||
290 | vec[idx++] = filename; | 317 | vec[idx++] = filename; |
291 | vec[idx] = NULL; | 318 | vec[idx] = NULL; |
292 | exec_kernel_doc(vec); | 319 | exec_kernel_doc(vec); |
@@ -306,6 +333,10 @@ static void docsect(char *filename, char *line) | |||
306 | if (*s == '\n') | 333 | if (*s == '\n') |
307 | *s = '\0'; | 334 | *s = '\0'; |
308 | 335 | ||
336 | asprintf(&s, "DOC: %s", line); | ||
337 | consume_symbol(s); | ||
338 | free(s); | ||
339 | |||
309 | vec[0] = KERNELDOC; | 340 | vec[0] = KERNELDOC; |
310 | vec[1] = DOCBOOK; | 341 | vec[1] = DOCBOOK; |
311 | vec[2] = FUNCTION; | 342 | vec[2] = FUNCTION; |
@@ -315,6 +346,84 @@ static void docsect(char *filename, char *line) | |||
315 | exec_kernel_doc(vec); | 346 | exec_kernel_doc(vec); |
316 | } | 347 | } |
317 | 348 | ||
349 | static void find_all_symbols(char *filename) | ||
350 | { | ||
351 | char *vec[4]; /* kerneldoc -list file NULL */ | ||
352 | pid_t pid; | ||
353 | int ret, i, count, start; | ||
354 | char real_filename[PATH_MAX + 1]; | ||
355 | int pipefd[2]; | ||
356 | char *data, *str; | ||
357 | size_t data_len = 0; | ||
358 | |||
359 | vec[0] = KERNELDOC; | ||
360 | vec[1] = LIST; | ||
361 | vec[2] = filename; | ||
362 | vec[3] = NULL; | ||
363 | |||
364 | if (pipe(pipefd)) { | ||
365 | perror("pipe"); | ||
366 | exit(1); | ||
367 | } | ||
368 | |||
369 | switch (pid=fork()) { | ||
370 | case -1: | ||
371 | perror("fork"); | ||
372 | exit(1); | ||
373 | case 0: | ||
374 | close(pipefd[0]); | ||
375 | dup2(pipefd[1], 1); | ||
376 | memset(real_filename, 0, sizeof(real_filename)); | ||
377 | strncat(real_filename, kernsrctree, PATH_MAX); | ||
378 | strncat(real_filename, "/" KERNELDOCPATH KERNELDOC, | ||
379 | PATH_MAX - strlen(real_filename)); | ||
380 | execvp(real_filename, vec); | ||
381 | fprintf(stderr, "exec "); | ||
382 | perror(real_filename); | ||
383 | exit(1); | ||
384 | default: | ||
385 | close(pipefd[1]); | ||
386 | data = malloc(4096); | ||
387 | do { | ||
388 | while ((ret = read(pipefd[0], | ||
389 | data + data_len, | ||
390 | 4096)) > 0) { | ||
391 | data_len += ret; | ||
392 | data = realloc(data, data_len + 4096); | ||
393 | } | ||
394 | } while (ret == -EAGAIN); | ||
395 | if (ret != 0) { | ||
396 | perror("read"); | ||
397 | exit(1); | ||
398 | } | ||
399 | waitpid(pid, &ret ,0); | ||
400 | } | ||
401 | if (WIFEXITED(ret)) | ||
402 | exitstatus |= WEXITSTATUS(ret); | ||
403 | else | ||
404 | exitstatus = 0xff; | ||
405 | |||
406 | count = 0; | ||
407 | /* poor man's strtok, but with counting */ | ||
408 | for (i = 0; i < data_len; i++) { | ||
409 | if (data[i] == '\n') { | ||
410 | count++; | ||
411 | data[i] = '\0'; | ||
412 | } | ||
413 | } | ||
414 | start = all_list_len; | ||
415 | all_list_len += count; | ||
416 | all_list = realloc(all_list, sizeof(char *) * all_list_len); | ||
417 | str = data; | ||
418 | for (i = 0; i < data_len && start != all_list_len; i++) { | ||
419 | if (data[i] == '\0') { | ||
420 | all_list[start] = str; | ||
421 | str = data + i + 1; | ||
422 | start++; | ||
423 | } | ||
424 | } | ||
425 | } | ||
426 | |||
318 | /* | 427 | /* |
319 | * Parse file, calling action specific functions for: | 428 | * Parse file, calling action specific functions for: |
320 | * 1) Lines containing !E | 429 | * 1) Lines containing !E |
@@ -322,7 +431,8 @@ static void docsect(char *filename, char *line) | |||
322 | * 3) Lines containing !D | 431 | * 3) Lines containing !D |
323 | * 4) Lines containing !F | 432 | * 4) Lines containing !F |
324 | * 5) Lines containing !P | 433 | * 5) Lines containing !P |
325 | * 6) Default lines - lines not matching the above | 434 | * 6) Lines containing !C |
435 | * 7) Default lines - lines not matching the above | ||
326 | */ | 436 | */ |
327 | static void parse_file(FILE *infile) | 437 | static void parse_file(FILE *infile) |
328 | { | 438 | { |
@@ -365,6 +475,12 @@ static void parse_file(FILE *infile) | |||
365 | s++; | 475 | s++; |
366 | docsection(line + 2, s); | 476 | docsection(line + 2, s); |
367 | break; | 477 | break; |
478 | case 'C': | ||
479 | while (*s && !isspace(*s)) s++; | ||
480 | *s = '\0'; | ||
481 | if (findall) | ||
482 | findall(line+2); | ||
483 | break; | ||
368 | default: | 484 | default: |
369 | defaultline(line); | 485 | defaultline(line); |
370 | } | 486 | } |
@@ -380,6 +496,7 @@ static void parse_file(FILE *infile) | |||
380 | int main(int argc, char *argv[]) | 496 | int main(int argc, char *argv[]) |
381 | { | 497 | { |
382 | FILE * infile; | 498 | FILE * infile; |
499 | int i; | ||
383 | 500 | ||
384 | srctree = getenv("SRCTREE"); | 501 | srctree = getenv("SRCTREE"); |
385 | if (!srctree) | 502 | if (!srctree) |
@@ -415,6 +532,7 @@ int main(int argc, char *argv[]) | |||
415 | symbolsonly = find_export_symbols; | 532 | symbolsonly = find_export_symbols; |
416 | singlefunctions = noaction2; | 533 | singlefunctions = noaction2; |
417 | docsection = noaction2; | 534 | docsection = noaction2; |
535 | findall = find_all_symbols; | ||
418 | parse_file(infile); | 536 | parse_file(infile); |
419 | 537 | ||
420 | /* Rewind to start from beginning of file again */ | 538 | /* Rewind to start from beginning of file again */ |
@@ -425,8 +543,16 @@ int main(int argc, char *argv[]) | |||
425 | symbolsonly = printline; | 543 | symbolsonly = printline; |
426 | singlefunctions = singfunc; | 544 | singlefunctions = singfunc; |
427 | docsection = docsect; | 545 | docsection = docsect; |
546 | findall = NULL; | ||
428 | 547 | ||
429 | parse_file(infile); | 548 | parse_file(infile); |
549 | |||
550 | for (i = 0; i < all_list_len; i++) { | ||
551 | if (!all_list[i]) | ||
552 | continue; | ||
553 | fprintf(stderr, "Warning: didn't use docs for %s\n", | ||
554 | all_list[i]); | ||
555 | } | ||
430 | } | 556 | } |
431 | else if (strcmp("depend", argv[1]) == 0) | 557 | else if (strcmp("depend", argv[1]) == 0) |
432 | { | 558 | { |
@@ -439,6 +565,7 @@ int main(int argc, char *argv[]) | |||
439 | symbolsonly = adddep; | 565 | symbolsonly = adddep; |
440 | singlefunctions = adddep2; | 566 | singlefunctions = adddep2; |
441 | docsection = adddep2; | 567 | docsection = adddep2; |
568 | findall = adddep; | ||
442 | parse_file(infile); | 569 | parse_file(infile); |
443 | printf("\n"); | 570 | printf("\n"); |
444 | } | 571 | } |
diff --git a/scripts/kernel-doc b/scripts/kernel-doc index 102e1235fd5c..cdb6dc1f6458 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc | |||
@@ -44,12 +44,13 @@ use strict; | |||
44 | # Note: This only supports 'c'. | 44 | # Note: This only supports 'c'. |
45 | 45 | ||
46 | # usage: | 46 | # usage: |
47 | # kernel-doc [ -docbook | -html | -text | -man ] [ -no-doc-sections ] | 47 | # kernel-doc [ -docbook | -html | -text | -man | -list ] [ -no-doc-sections ] |
48 | # [ -function funcname [ -function funcname ...] ] c file(s)s > outputfile | 48 | # [ -function funcname [ -function funcname ...] ] c file(s)s > outputfile |
49 | # or | 49 | # or |
50 | # [ -nofunction funcname [ -function funcname ...] ] c file(s)s > outputfile | 50 | # [ -nofunction funcname [ -function funcname ...] ] c file(s)s > outputfile |
51 | # | 51 | # |
52 | # Set output format using one of -docbook -html -text or -man. Default is man. | 52 | # Set output format using one of -docbook -html -text or -man. Default is man. |
53 | # The -list format is for internal use by docproc. | ||
53 | # | 54 | # |
54 | # -no-doc-sections | 55 | # -no-doc-sections |
55 | # Do not output DOC: sections | 56 | # Do not output DOC: sections |
@@ -210,9 +211,16 @@ my %highlights_text = ( $type_constant, "\$1", | |||
210 | $type_param, "\$1" ); | 211 | $type_param, "\$1" ); |
211 | my $blankline_text = ""; | 212 | my $blankline_text = ""; |
212 | 213 | ||
214 | # list mode | ||
215 | my %highlights_list = ( $type_constant, "\$1", | ||
216 | $type_func, "\$1", | ||
217 | $type_struct, "\$1", | ||
218 | $type_param, "\$1" ); | ||
219 | my $blankline_list = ""; | ||
213 | 220 | ||
214 | sub usage { | 221 | sub usage { |
215 | print "Usage: $0 [ -v ] [ -docbook | -html | -text | -man ] [ -no-doc-sections ]\n"; | 222 | print "Usage: $0 [ -v ] [ -docbook | -html | -text | -man | -list ]\n"; |
223 | print " [ -no-doc-sections ]\n"; | ||
216 | print " [ -function funcname [ -function funcname ...] ]\n"; | 224 | print " [ -function funcname [ -function funcname ...] ]\n"; |
217 | print " [ -nofunction funcname [ -nofunction funcname ...] ]\n"; | 225 | print " [ -nofunction funcname [ -nofunction funcname ...] ]\n"; |
218 | print " c source file(s) > outputfile\n"; | 226 | print " c source file(s) > outputfile\n"; |
@@ -318,6 +326,10 @@ while ($ARGV[0] =~ m/^-(.*)/) { | |||
318 | $output_mode = "xml"; | 326 | $output_mode = "xml"; |
319 | %highlights = %highlights_xml; | 327 | %highlights = %highlights_xml; |
320 | $blankline = $blankline_xml; | 328 | $blankline = $blankline_xml; |
329 | } elsif ($cmd eq "-list") { | ||
330 | $output_mode = "list"; | ||
331 | %highlights = %highlights_list; | ||
332 | $blankline = $blankline_list; | ||
321 | } elsif ($cmd eq "-gnome") { | 333 | } elsif ($cmd eq "-gnome") { |
322 | $output_mode = "gnome"; | 334 | $output_mode = "gnome"; |
323 | %highlights = %highlights_gnome; | 335 | %highlights = %highlights_gnome; |
@@ -1361,6 +1373,42 @@ sub output_blockhead_text(%) { | |||
1361 | } | 1373 | } |
1362 | } | 1374 | } |
1363 | 1375 | ||
1376 | ## list mode output functions | ||
1377 | |||
1378 | sub output_function_list(%) { | ||
1379 | my %args = %{$_[0]}; | ||
1380 | |||
1381 | print $args{'function'} . "\n"; | ||
1382 | } | ||
1383 | |||
1384 | # output enum in list | ||
1385 | sub output_enum_list(%) { | ||
1386 | my %args = %{$_[0]}; | ||
1387 | print $args{'enum'} . "\n"; | ||
1388 | } | ||
1389 | |||
1390 | # output typedef in list | ||
1391 | sub output_typedef_list(%) { | ||
1392 | my %args = %{$_[0]}; | ||
1393 | print $args{'typedef'} . "\n"; | ||
1394 | } | ||
1395 | |||
1396 | # output struct as list | ||
1397 | sub output_struct_list(%) { | ||
1398 | my %args = %{$_[0]}; | ||
1399 | |||
1400 | print $args{'struct'} . "\n"; | ||
1401 | } | ||
1402 | |||
1403 | sub output_blockhead_list(%) { | ||
1404 | my %args = %{$_[0]}; | ||
1405 | my ($parameter, $section); | ||
1406 | |||
1407 | foreach $section (@{$args{'sectionlist'}}) { | ||
1408 | print "DOC: $section\n"; | ||
1409 | } | ||
1410 | } | ||
1411 | |||
1364 | ## | 1412 | ## |
1365 | # generic output function for all types (function, struct/union, typedef, enum); | 1413 | # generic output function for all types (function, struct/union, typedef, enum); |
1366 | # calls the generated, variable output_ function name based on | 1414 | # calls the generated, variable output_ function name based on |
@@ -1679,7 +1727,7 @@ sub check_sections($$$$$$) { | |||
1679 | foreach $px (0 .. $#prms) { | 1727 | foreach $px (0 .. $#prms) { |
1680 | $prm_clean = $prms[$px]; | 1728 | $prm_clean = $prms[$px]; |
1681 | $prm_clean =~ s/\[.*\]//; | 1729 | $prm_clean =~ s/\[.*\]//; |
1682 | $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//; | 1730 | $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i; |
1683 | # ignore array size in a parameter string; | 1731 | # ignore array size in a parameter string; |
1684 | # however, the original param string may contain | 1732 | # however, the original param string may contain |
1685 | # spaces, e.g.: addr[6 + 2] | 1733 | # spaces, e.g.: addr[6 + 2] |
diff --git a/security/apparmor/include/resource.h b/security/apparmor/include/resource.h index 3c88be946494..02baec732bb5 100644 --- a/security/apparmor/include/resource.h +++ b/security/apparmor/include/resource.h | |||
@@ -33,8 +33,8 @@ struct aa_rlimit { | |||
33 | }; | 33 | }; |
34 | 34 | ||
35 | int aa_map_resource(int resource); | 35 | int aa_map_resource(int resource); |
36 | int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource, | 36 | int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *, |
37 | struct rlimit *new_rlim); | 37 | unsigned int resource, struct rlimit *new_rlim); |
38 | 38 | ||
39 | void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new); | 39 | void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new); |
40 | 40 | ||
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c index 6e85cdb4303f..506d2baf6147 100644 --- a/security/apparmor/lib.c +++ b/security/apparmor/lib.c | |||
@@ -40,6 +40,7 @@ char *aa_split_fqname(char *fqname, char **ns_name) | |||
40 | *ns_name = NULL; | 40 | *ns_name = NULL; |
41 | if (name[0] == ':') { | 41 | if (name[0] == ':') { |
42 | char *split = strchr(&name[1], ':'); | 42 | char *split = strchr(&name[1], ':'); |
43 | *ns_name = skip_spaces(&name[1]); | ||
43 | if (split) { | 44 | if (split) { |
44 | /* overwrite ':' with \0 */ | 45 | /* overwrite ':' with \0 */ |
45 | *split = 0; | 46 | *split = 0; |
@@ -47,7 +48,6 @@ char *aa_split_fqname(char *fqname, char **ns_name) | |||
47 | } else | 48 | } else |
48 | /* a ns name without a following profile is allowed */ | 49 | /* a ns name without a following profile is allowed */ |
49 | name = NULL; | 50 | name = NULL; |
50 | *ns_name = &name[1]; | ||
51 | } | 51 | } |
52 | if (name && *name == 0) | 52 | if (name && *name == 0) |
53 | name = NULL; | 53 | name = NULL; |
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index f73e2c204218..cf1de4462ccd 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c | |||
@@ -614,7 +614,7 @@ static int apparmor_task_setrlimit(struct task_struct *task, | |||
614 | int error = 0; | 614 | int error = 0; |
615 | 615 | ||
616 | if (!unconfined(profile)) | 616 | if (!unconfined(profile)) |
617 | error = aa_task_setrlimit(profile, resource, new_rlim); | 617 | error = aa_task_setrlimit(profile, task, resource, new_rlim); |
618 | 618 | ||
619 | return error; | 619 | return error; |
620 | } | 620 | } |
diff --git a/security/apparmor/path.c b/security/apparmor/path.c index 19358dc14605..82396050f186 100644 --- a/security/apparmor/path.c +++ b/security/apparmor/path.c | |||
@@ -59,8 +59,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, | |||
59 | { | 59 | { |
60 | struct path root, tmp; | 60 | struct path root, tmp; |
61 | char *res; | 61 | char *res; |
62 | int deleted, connected; | 62 | int connected, error = 0; |
63 | int error = 0; | ||
64 | 63 | ||
65 | /* Get the root we want to resolve too, released below */ | 64 | /* Get the root we want to resolve too, released below */ |
66 | if (flags & PATH_CHROOT_REL) { | 65 | if (flags & PATH_CHROOT_REL) { |
@@ -74,19 +73,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, | |||
74 | } | 73 | } |
75 | 74 | ||
76 | spin_lock(&dcache_lock); | 75 | spin_lock(&dcache_lock); |
77 | /* There is a race window between path lookup here and the | 76 | tmp = root; |
78 | * need to strip the " (deleted) string that __d_path applies | 77 | res = __d_path(path, &tmp, buf, buflen); |
79 | * Detect the race and relookup the path | ||
80 | * | ||
81 | * The stripping of (deleted) is a hack that could be removed | ||
82 | * with an updated __d_path | ||
83 | */ | ||
84 | do { | ||
85 | tmp = root; | ||
86 | deleted = d_unlinked(path->dentry); | ||
87 | res = __d_path(path, &tmp, buf, buflen); | ||
88 | |||
89 | } while (deleted != d_unlinked(path->dentry)); | ||
90 | spin_unlock(&dcache_lock); | 78 | spin_unlock(&dcache_lock); |
91 | 79 | ||
92 | *name = res; | 80 | *name = res; |
@@ -98,21 +86,17 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, | |||
98 | *name = buf; | 86 | *name = buf; |
99 | goto out; | 87 | goto out; |
100 | } | 88 | } |
101 | if (deleted) { | ||
102 | /* On some filesystems, newly allocated dentries appear to the | ||
103 | * security_path hooks as a deleted dentry except without an | ||
104 | * inode allocated. | ||
105 | * | ||
106 | * Remove the appended deleted text and return as string for | ||
107 | * normal mediation, or auditing. The (deleted) string is | ||
108 | * guaranteed to be added in this case, so just strip it. | ||
109 | */ | ||
110 | buf[buflen - 11] = 0; /* - (len(" (deleted)") +\0) */ | ||
111 | 89 | ||
112 | if (path->dentry->d_inode && !(flags & PATH_MEDIATE_DELETED)) { | 90 | /* Handle two cases: |
91 | * 1. A deleted dentry && profile is not allowing mediation of deleted | ||
92 | * 2. On some filesystems, newly allocated dentries appear to the | ||
93 | * security_path hooks as a deleted dentry except without an inode | ||
94 | * allocated. | ||
95 | */ | ||
96 | if (d_unlinked(path->dentry) && path->dentry->d_inode && | ||
97 | !(flags & PATH_MEDIATE_DELETED)) { | ||
113 | error = -ENOENT; | 98 | error = -ENOENT; |
114 | goto out; | 99 | goto out; |
115 | } | ||
116 | } | 100 | } |
117 | 101 | ||
118 | /* Determine if the path is connected to the expected root */ | 102 | /* Determine if the path is connected to the expected root */ |
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index 3cdc1ad0787e..52cc865f1464 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c | |||
@@ -1151,12 +1151,14 @@ ssize_t aa_remove_profiles(char *fqname, size_t size) | |||
1151 | /* released below */ | 1151 | /* released below */ |
1152 | ns = aa_get_namespace(root); | 1152 | ns = aa_get_namespace(root); |
1153 | 1153 | ||
1154 | write_lock(&ns->lock); | ||
1155 | if (!name) { | 1154 | if (!name) { |
1156 | /* remove namespace - can only happen if fqname[0] == ':' */ | 1155 | /* remove namespace - can only happen if fqname[0] == ':' */ |
1156 | write_lock(&ns->parent->lock); | ||
1157 | __remove_namespace(ns); | 1157 | __remove_namespace(ns); |
1158 | write_unlock(&ns->parent->lock); | ||
1158 | } else { | 1159 | } else { |
1159 | /* remove profile */ | 1160 | /* remove profile */ |
1161 | write_lock(&ns->lock); | ||
1160 | profile = aa_get_profile(__lookup_profile(&ns->base, name)); | 1162 | profile = aa_get_profile(__lookup_profile(&ns->base, name)); |
1161 | if (!profile) { | 1163 | if (!profile) { |
1162 | error = -ENOENT; | 1164 | error = -ENOENT; |
@@ -1165,8 +1167,8 @@ ssize_t aa_remove_profiles(char *fqname, size_t size) | |||
1165 | } | 1167 | } |
1166 | name = profile->base.hname; | 1168 | name = profile->base.hname; |
1167 | __remove_profile(profile); | 1169 | __remove_profile(profile); |
1170 | write_unlock(&ns->lock); | ||
1168 | } | 1171 | } |
1169 | write_unlock(&ns->lock); | ||
1170 | 1172 | ||
1171 | /* don't fail removal if audit fails */ | 1173 | /* don't fail removal if audit fails */ |
1172 | (void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error); | 1174 | (void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error); |
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c index 4a368f1fd36d..a4136c10b1c6 100644 --- a/security/apparmor/resource.c +++ b/security/apparmor/resource.c | |||
@@ -72,6 +72,7 @@ int aa_map_resource(int resource) | |||
72 | /** | 72 | /** |
73 | * aa_task_setrlimit - test permission to set an rlimit | 73 | * aa_task_setrlimit - test permission to set an rlimit |
74 | * @profile - profile confining the task (NOT NULL) | 74 | * @profile - profile confining the task (NOT NULL) |
75 | * @task - task the resource is being set on | ||
75 | * @resource - the resource being set | 76 | * @resource - the resource being set |
76 | * @new_rlim - the new resource limit (NOT NULL) | 77 | * @new_rlim - the new resource limit (NOT NULL) |
77 | * | 78 | * |
@@ -79,18 +80,21 @@ int aa_map_resource(int resource) | |||
79 | * | 80 | * |
80 | * Returns: 0 or error code if setting resource failed | 81 | * Returns: 0 or error code if setting resource failed |
81 | */ | 82 | */ |
82 | int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource, | 83 | int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task, |
83 | struct rlimit *new_rlim) | 84 | unsigned int resource, struct rlimit *new_rlim) |
84 | { | 85 | { |
85 | int error = 0; | 86 | int error = 0; |
86 | 87 | ||
87 | if (profile->rlimits.mask & (1 << resource) && | 88 | /* TODO: extend resource control to handle other (non current) |
88 | new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max) | 89 | * processes. AppArmor rules currently have the implicit assumption |
89 | 90 | * that the task is setting the resource of the current process | |
90 | error = audit_resource(profile, resource, new_rlim->rlim_max, | 91 | */ |
91 | -EACCES); | 92 | if ((task != current->group_leader) || |
93 | (profile->rlimits.mask & (1 << resource) && | ||
94 | new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max)) | ||
95 | error = -EACCES; | ||
92 | 96 | ||
93 | return error; | 97 | return audit_resource(profile, resource, new_rlim->rlim_max, error); |
94 | } | 98 | } |
95 | 99 | ||
96 | /** | 100 | /** |
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index 16d100d3fc38..3fbcd1dda0ef 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h | |||
@@ -35,6 +35,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; | |||
35 | #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) | 35 | #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) |
36 | 36 | ||
37 | /* set during initialization */ | 37 | /* set during initialization */ |
38 | extern int iint_initialized; | ||
38 | extern int ima_initialized; | 39 | extern int ima_initialized; |
39 | extern int ima_used_chip; | 40 | extern int ima_used_chip; |
40 | extern char *ima_hash; | 41 | extern char *ima_hash; |
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c index 7625b85c2274..afba4aef812f 100644 --- a/security/integrity/ima/ima_iint.c +++ b/security/integrity/ima/ima_iint.c | |||
@@ -22,9 +22,10 @@ | |||
22 | 22 | ||
23 | RADIX_TREE(ima_iint_store, GFP_ATOMIC); | 23 | RADIX_TREE(ima_iint_store, GFP_ATOMIC); |
24 | DEFINE_SPINLOCK(ima_iint_lock); | 24 | DEFINE_SPINLOCK(ima_iint_lock); |
25 | |||
26 | static struct kmem_cache *iint_cache __read_mostly; | 25 | static struct kmem_cache *iint_cache __read_mostly; |
27 | 26 | ||
27 | int iint_initialized = 0; | ||
28 | |||
28 | /* ima_iint_find_get - return the iint associated with an inode | 29 | /* ima_iint_find_get - return the iint associated with an inode |
29 | * | 30 | * |
30 | * ima_iint_find_get gets a reference to the iint. Caller must | 31 | * ima_iint_find_get gets a reference to the iint. Caller must |
@@ -141,6 +142,7 @@ static int __init ima_iintcache_init(void) | |||
141 | iint_cache = | 142 | iint_cache = |
142 | kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, | 143 | kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, |
143 | SLAB_PANIC, init_once); | 144 | SLAB_PANIC, init_once); |
145 | iint_initialized = 1; | ||
144 | return 0; | 146 | return 0; |
145 | } | 147 | } |
146 | security_initcall(ima_iintcache_init); | 148 | security_initcall(ima_iintcache_init); |
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index f93641382e9f..e662b89d4079 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c | |||
@@ -148,12 +148,14 @@ void ima_counts_get(struct file *file) | |||
148 | struct ima_iint_cache *iint; | 148 | struct ima_iint_cache *iint; |
149 | int rc; | 149 | int rc; |
150 | 150 | ||
151 | if (!ima_initialized || !S_ISREG(inode->i_mode)) | 151 | if (!iint_initialized || !S_ISREG(inode->i_mode)) |
152 | return; | 152 | return; |
153 | iint = ima_iint_find_get(inode); | 153 | iint = ima_iint_find_get(inode); |
154 | if (!iint) | 154 | if (!iint) |
155 | return; | 155 | return; |
156 | mutex_lock(&iint->mutex); | 156 | mutex_lock(&iint->mutex); |
157 | if (!ima_initialized) | ||
158 | goto out; | ||
157 | rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK); | 159 | rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK); |
158 | if (rc < 0) | 160 | if (rc < 0) |
159 | goto out; | 161 | goto out; |
@@ -213,7 +215,7 @@ void ima_file_free(struct file *file) | |||
213 | struct inode *inode = file->f_dentry->d_inode; | 215 | struct inode *inode = file->f_dentry->d_inode; |
214 | struct ima_iint_cache *iint; | 216 | struct ima_iint_cache *iint; |
215 | 217 | ||
216 | if (!ima_initialized || !S_ISREG(inode->i_mode)) | 218 | if (!iint_initialized || !S_ISREG(inode->i_mode)) |
217 | return; | 219 | return; |
218 | iint = ima_iint_find_get(inode); | 220 | iint = ima_iint_find_get(inode); |
219 | if (!iint) | 221 | if (!iint) |
@@ -230,7 +232,7 @@ static int process_measurement(struct file *file, const unsigned char *filename, | |||
230 | { | 232 | { |
231 | struct inode *inode = file->f_dentry->d_inode; | 233 | struct inode *inode = file->f_dentry->d_inode; |
232 | struct ima_iint_cache *iint; | 234 | struct ima_iint_cache *iint; |
233 | int rc; | 235 | int rc = 0; |
234 | 236 | ||
235 | if (!ima_initialized || !S_ISREG(inode->i_mode)) | 237 | if (!ima_initialized || !S_ISREG(inode->i_mode)) |
236 | return 0; | 238 | return 0; |
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index b2b0998d6abd..60924f6a52db 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c | |||
@@ -1272,6 +1272,7 @@ long keyctl_session_to_parent(void) | |||
1272 | keyring_r = NULL; | 1272 | keyring_r = NULL; |
1273 | 1273 | ||
1274 | me = current; | 1274 | me = current; |
1275 | rcu_read_lock(); | ||
1275 | write_lock_irq(&tasklist_lock); | 1276 | write_lock_irq(&tasklist_lock); |
1276 | 1277 | ||
1277 | parent = me->real_parent; | 1278 | parent = me->real_parent; |
@@ -1304,7 +1305,8 @@ long keyctl_session_to_parent(void) | |||
1304 | goto not_permitted; | 1305 | goto not_permitted; |
1305 | 1306 | ||
1306 | /* the keyrings must have the same UID */ | 1307 | /* the keyrings must have the same UID */ |
1307 | if (pcred->tgcred->session_keyring->uid != mycred->euid || | 1308 | if ((pcred->tgcred->session_keyring && |
1309 | pcred->tgcred->session_keyring->uid != mycred->euid) || | ||
1308 | mycred->tgcred->session_keyring->uid != mycred->euid) | 1310 | mycred->tgcred->session_keyring->uid != mycred->euid) |
1309 | goto not_permitted; | 1311 | goto not_permitted; |
1310 | 1312 | ||
@@ -1319,6 +1321,7 @@ long keyctl_session_to_parent(void) | |||
1319 | set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME); | 1321 | set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME); |
1320 | 1322 | ||
1321 | write_unlock_irq(&tasklist_lock); | 1323 | write_unlock_irq(&tasklist_lock); |
1324 | rcu_read_unlock(); | ||
1322 | if (oldcred) | 1325 | if (oldcred) |
1323 | put_cred(oldcred); | 1326 | put_cred(oldcred); |
1324 | return 0; | 1327 | return 0; |
@@ -1327,6 +1330,7 @@ already_same: | |||
1327 | ret = 0; | 1330 | ret = 0; |
1328 | not_permitted: | 1331 | not_permitted: |
1329 | write_unlock_irq(&tasklist_lock); | 1332 | write_unlock_irq(&tasklist_lock); |
1333 | rcu_read_unlock(); | ||
1330 | put_cred(cred); | 1334 | put_cred(cred); |
1331 | return ret; | 1335 | return ret; |
1332 | 1336 | ||
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index ef43995119a4..c668b447c725 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c | |||
@@ -1416,15 +1416,19 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r) | |||
1416 | const pid_t gpid = task_pid_nr(current); | 1416 | const pid_t gpid = task_pid_nr(current); |
1417 | static const int tomoyo_buffer_len = 4096; | 1417 | static const int tomoyo_buffer_len = 4096; |
1418 | char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS); | 1418 | char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS); |
1419 | pid_t ppid; | ||
1419 | if (!buffer) | 1420 | if (!buffer) |
1420 | return NULL; | 1421 | return NULL; |
1421 | do_gettimeofday(&tv); | 1422 | do_gettimeofday(&tv); |
1423 | rcu_read_lock(); | ||
1424 | ppid = task_tgid_vnr(current->real_parent); | ||
1425 | rcu_read_unlock(); | ||
1422 | snprintf(buffer, tomoyo_buffer_len - 1, | 1426 | snprintf(buffer, tomoyo_buffer_len - 1, |
1423 | "#timestamp=%lu profile=%u mode=%s (global-pid=%u)" | 1427 | "#timestamp=%lu profile=%u mode=%s (global-pid=%u)" |
1424 | " task={ pid=%u ppid=%u uid=%u gid=%u euid=%u" | 1428 | " task={ pid=%u ppid=%u uid=%u gid=%u euid=%u" |
1425 | " egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }", | 1429 | " egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }", |
1426 | tv.tv_sec, r->profile, tomoyo_mode[r->mode], gpid, | 1430 | tv.tv_sec, r->profile, tomoyo_mode[r->mode], gpid, |
1427 | (pid_t) sys_getpid(), (pid_t) sys_getppid(), | 1431 | task_tgid_vnr(current), ppid, |
1428 | current_uid(), current_gid(), current_euid(), | 1432 | current_uid(), current_gid(), current_euid(), |
1429 | current_egid(), current_suid(), current_sgid(), | 1433 | current_egid(), current_suid(), current_sgid(), |
1430 | current_fsuid(), current_fsgid()); | 1434 | current_fsuid(), current_fsgid()); |
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h index 04454cb7b24a..7c66bd898782 100644 --- a/security/tomoyo/common.h +++ b/security/tomoyo/common.h | |||
@@ -689,9 +689,6 @@ struct tomoyo_profile { | |||
689 | 689 | ||
690 | /********** Function prototypes. **********/ | 690 | /********** Function prototypes. **********/ |
691 | 691 | ||
692 | extern asmlinkage long sys_getpid(void); | ||
693 | extern asmlinkage long sys_getppid(void); | ||
694 | |||
695 | /* Check whether the given string starts with the given keyword. */ | 692 | /* Check whether the given string starts with the given keyword. */ |
696 | bool tomoyo_str_starts(char **src, const char *find); | 693 | bool tomoyo_str_starts(char **src, const char *find); |
697 | /* Get tomoyo_realpath() of current process. */ | 694 | /* Get tomoyo_realpath() of current process. */ |
diff --git a/sound/core/control.c b/sound/core/control.c index 070aab490191..45a818002d99 100644 --- a/sound/core/control.c +++ b/sound/core/control.c | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | /* max number of user-defined controls */ | 32 | /* max number of user-defined controls */ |
33 | #define MAX_USER_CONTROLS 32 | 33 | #define MAX_USER_CONTROLS 32 |
34 | #define MAX_CONTROL_COUNT 1028 | ||
34 | 35 | ||
35 | struct snd_kctl_ioctl { | 36 | struct snd_kctl_ioctl { |
36 | struct list_head list; /* list of all ioctls */ | 37 | struct list_head list; /* list of all ioctls */ |
@@ -195,6 +196,10 @@ static struct snd_kcontrol *snd_ctl_new(struct snd_kcontrol *control, | |||
195 | 196 | ||
196 | if (snd_BUG_ON(!control || !control->count)) | 197 | if (snd_BUG_ON(!control || !control->count)) |
197 | return NULL; | 198 | return NULL; |
199 | |||
200 | if (control->count > MAX_CONTROL_COUNT) | ||
201 | return NULL; | ||
202 | |||
198 | kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL); | 203 | kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL); |
199 | if (kctl == NULL) { | 204 | if (kctl == NULL) { |
200 | snd_printk(KERN_ERR "Cannot allocate control instance\n"); | 205 | snd_printk(KERN_ERR "Cannot allocate control instance\n"); |
diff --git a/sound/core/pcm.c b/sound/core/pcm.c index 204af48c5cc1..ac242a377aea 100644 --- a/sound/core/pcm.c +++ b/sound/core/pcm.c | |||
@@ -372,14 +372,17 @@ static void snd_pcm_substream_proc_hw_params_read(struct snd_info_entry *entry, | |||
372 | struct snd_info_buffer *buffer) | 372 | struct snd_info_buffer *buffer) |
373 | { | 373 | { |
374 | struct snd_pcm_substream *substream = entry->private_data; | 374 | struct snd_pcm_substream *substream = entry->private_data; |
375 | struct snd_pcm_runtime *runtime = substream->runtime; | 375 | struct snd_pcm_runtime *runtime; |
376 | |||
377 | mutex_lock(&substream->pcm->open_mutex); | ||
378 | runtime = substream->runtime; | ||
376 | if (!runtime) { | 379 | if (!runtime) { |
377 | snd_iprintf(buffer, "closed\n"); | 380 | snd_iprintf(buffer, "closed\n"); |
378 | return; | 381 | goto unlock; |
379 | } | 382 | } |
380 | if (runtime->status->state == SNDRV_PCM_STATE_OPEN) { | 383 | if (runtime->status->state == SNDRV_PCM_STATE_OPEN) { |
381 | snd_iprintf(buffer, "no setup\n"); | 384 | snd_iprintf(buffer, "no setup\n"); |
382 | return; | 385 | goto unlock; |
383 | } | 386 | } |
384 | snd_iprintf(buffer, "access: %s\n", snd_pcm_access_name(runtime->access)); | 387 | snd_iprintf(buffer, "access: %s\n", snd_pcm_access_name(runtime->access)); |
385 | snd_iprintf(buffer, "format: %s\n", snd_pcm_format_name(runtime->format)); | 388 | snd_iprintf(buffer, "format: %s\n", snd_pcm_format_name(runtime->format)); |
@@ -398,20 +401,25 @@ static void snd_pcm_substream_proc_hw_params_read(struct snd_info_entry *entry, | |||
398 | snd_iprintf(buffer, "OSS period frames: %lu\n", (unsigned long)runtime->oss.period_frames); | 401 | snd_iprintf(buffer, "OSS period frames: %lu\n", (unsigned long)runtime->oss.period_frames); |
399 | } | 402 | } |
400 | #endif | 403 | #endif |
404 | unlock: | ||
405 | mutex_unlock(&substream->pcm->open_mutex); | ||
401 | } | 406 | } |
402 | 407 | ||
403 | static void snd_pcm_substream_proc_sw_params_read(struct snd_info_entry *entry, | 408 | static void snd_pcm_substream_proc_sw_params_read(struct snd_info_entry *entry, |
404 | struct snd_info_buffer *buffer) | 409 | struct snd_info_buffer *buffer) |
405 | { | 410 | { |
406 | struct snd_pcm_substream *substream = entry->private_data; | 411 | struct snd_pcm_substream *substream = entry->private_data; |
407 | struct snd_pcm_runtime *runtime = substream->runtime; | 412 | struct snd_pcm_runtime *runtime; |
413 | |||
414 | mutex_lock(&substream->pcm->open_mutex); | ||
415 | runtime = substream->runtime; | ||
408 | if (!runtime) { | 416 | if (!runtime) { |
409 | snd_iprintf(buffer, "closed\n"); | 417 | snd_iprintf(buffer, "closed\n"); |
410 | return; | 418 | goto unlock; |
411 | } | 419 | } |
412 | if (runtime->status->state == SNDRV_PCM_STATE_OPEN) { | 420 | if (runtime->status->state == SNDRV_PCM_STATE_OPEN) { |
413 | snd_iprintf(buffer, "no setup\n"); | 421 | snd_iprintf(buffer, "no setup\n"); |
414 | return; | 422 | goto unlock; |
415 | } | 423 | } |
416 | snd_iprintf(buffer, "tstamp_mode: %s\n", snd_pcm_tstamp_mode_name(runtime->tstamp_mode)); | 424 | snd_iprintf(buffer, "tstamp_mode: %s\n", snd_pcm_tstamp_mode_name(runtime->tstamp_mode)); |
417 | snd_iprintf(buffer, "period_step: %u\n", runtime->period_step); | 425 | snd_iprintf(buffer, "period_step: %u\n", runtime->period_step); |
@@ -421,24 +429,29 @@ static void snd_pcm_substream_proc_sw_params_read(struct snd_info_entry *entry, | |||
421 | snd_iprintf(buffer, "silence_threshold: %lu\n", runtime->silence_threshold); | 429 | snd_iprintf(buffer, "silence_threshold: %lu\n", runtime->silence_threshold); |
422 | snd_iprintf(buffer, "silence_size: %lu\n", runtime->silence_size); | 430 | snd_iprintf(buffer, "silence_size: %lu\n", runtime->silence_size); |
423 | snd_iprintf(buffer, "boundary: %lu\n", runtime->boundary); | 431 | snd_iprintf(buffer, "boundary: %lu\n", runtime->boundary); |
432 | unlock: | ||
433 | mutex_unlock(&substream->pcm->open_mutex); | ||
424 | } | 434 | } |
425 | 435 | ||
426 | static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry, | 436 | static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry, |
427 | struct snd_info_buffer *buffer) | 437 | struct snd_info_buffer *buffer) |
428 | { | 438 | { |
429 | struct snd_pcm_substream *substream = entry->private_data; | 439 | struct snd_pcm_substream *substream = entry->private_data; |
430 | struct snd_pcm_runtime *runtime = substream->runtime; | 440 | struct snd_pcm_runtime *runtime; |
431 | struct snd_pcm_status status; | 441 | struct snd_pcm_status status; |
432 | int err; | 442 | int err; |
443 | |||
444 | mutex_lock(&substream->pcm->open_mutex); | ||
445 | runtime = substream->runtime; | ||
433 | if (!runtime) { | 446 | if (!runtime) { |
434 | snd_iprintf(buffer, "closed\n"); | 447 | snd_iprintf(buffer, "closed\n"); |
435 | return; | 448 | goto unlock; |
436 | } | 449 | } |
437 | memset(&status, 0, sizeof(status)); | 450 | memset(&status, 0, sizeof(status)); |
438 | err = snd_pcm_status(substream, &status); | 451 | err = snd_pcm_status(substream, &status); |
439 | if (err < 0) { | 452 | if (err < 0) { |
440 | snd_iprintf(buffer, "error %d\n", err); | 453 | snd_iprintf(buffer, "error %d\n", err); |
441 | return; | 454 | goto unlock; |
442 | } | 455 | } |
443 | snd_iprintf(buffer, "state: %s\n", snd_pcm_state_name(status.state)); | 456 | snd_iprintf(buffer, "state: %s\n", snd_pcm_state_name(status.state)); |
444 | snd_iprintf(buffer, "owner_pid : %d\n", pid_vnr(substream->pid)); | 457 | snd_iprintf(buffer, "owner_pid : %d\n", pid_vnr(substream->pid)); |
@@ -452,6 +465,8 @@ static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry, | |||
452 | snd_iprintf(buffer, "-----\n"); | 465 | snd_iprintf(buffer, "-----\n"); |
453 | snd_iprintf(buffer, "hw_ptr : %ld\n", runtime->status->hw_ptr); | 466 | snd_iprintf(buffer, "hw_ptr : %ld\n", runtime->status->hw_ptr); |
454 | snd_iprintf(buffer, "appl_ptr : %ld\n", runtime->control->appl_ptr); | 467 | snd_iprintf(buffer, "appl_ptr : %ld\n", runtime->control->appl_ptr); |
468 | unlock: | ||
469 | mutex_unlock(&substream->pcm->open_mutex); | ||
455 | } | 470 | } |
456 | 471 | ||
457 | #ifdef CONFIG_SND_PCM_XRUN_DEBUG | 472 | #ifdef CONFIG_SND_PCM_XRUN_DEBUG |
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 134fc6c2e08d..d4eb2ef80784 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c | |||
@@ -1992,6 +1992,8 @@ void snd_pcm_release_substream(struct snd_pcm_substream *substream) | |||
1992 | substream->ops->close(substream); | 1992 | substream->ops->close(substream); |
1993 | substream->hw_opened = 0; | 1993 | substream->hw_opened = 0; |
1994 | } | 1994 | } |
1995 | if (pm_qos_request_active(&substream->latency_pm_qos_req)) | ||
1996 | pm_qos_remove_request(&substream->latency_pm_qos_req); | ||
1995 | if (substream->pcm_release) { | 1997 | if (substream->pcm_release) { |
1996 | substream->pcm_release(substream); | 1998 | substream->pcm_release(substream); |
1997 | substream->pcm_release = NULL; | 1999 | substream->pcm_release = NULL; |
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index eb68326c37d4..a7868ad4d530 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c | |||
@@ -829,6 +829,8 @@ static int snd_rawmidi_control_ioctl(struct snd_card *card, | |||
829 | 829 | ||
830 | if (get_user(device, (int __user *)argp)) | 830 | if (get_user(device, (int __user *)argp)) |
831 | return -EFAULT; | 831 | return -EFAULT; |
832 | if (device >= SNDRV_RAWMIDI_DEVICES) /* next device is -1 */ | ||
833 | device = SNDRV_RAWMIDI_DEVICES - 1; | ||
832 | mutex_lock(®ister_mutex); | 834 | mutex_lock(®ister_mutex); |
833 | device = device < 0 ? 0 : device + 1; | 835 | device = device < 0 ? 0 : device + 1; |
834 | while (device < SNDRV_RAWMIDI_DEVICES) { | 836 | while (device < SNDRV_RAWMIDI_DEVICES) { |
diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c index 685712276ac9..69cd7b3c362d 100644 --- a/sound/core/seq/oss/seq_oss_init.c +++ b/sound/core/seq/oss/seq_oss_init.c | |||
@@ -281,13 +281,10 @@ snd_seq_oss_open(struct file *file, int level) | |||
281 | return 0; | 281 | return 0; |
282 | 282 | ||
283 | _error: | 283 | _error: |
284 | snd_seq_oss_writeq_delete(dp->writeq); | ||
285 | snd_seq_oss_readq_delete(dp->readq); | ||
286 | snd_seq_oss_synth_cleanup(dp); | 284 | snd_seq_oss_synth_cleanup(dp); |
287 | snd_seq_oss_midi_cleanup(dp); | 285 | snd_seq_oss_midi_cleanup(dp); |
288 | delete_port(dp); | ||
289 | delete_seq_queue(dp->queue); | 286 | delete_seq_queue(dp->queue); |
290 | kfree(dp); | 287 | delete_port(dp); |
291 | 288 | ||
292 | return rc; | 289 | return rc; |
293 | } | 290 | } |
@@ -350,8 +347,10 @@ create_port(struct seq_oss_devinfo *dp) | |||
350 | static int | 347 | static int |
351 | delete_port(struct seq_oss_devinfo *dp) | 348 | delete_port(struct seq_oss_devinfo *dp) |
352 | { | 349 | { |
353 | if (dp->port < 0) | 350 | if (dp->port < 0) { |
351 | kfree(dp); | ||
354 | return 0; | 352 | return 0; |
353 | } | ||
355 | 354 | ||
356 | debug_printk(("delete_port %i\n", dp->port)); | 355 | debug_printk(("delete_port %i\n", dp->port)); |
357 | return snd_seq_event_port_detach(dp->cseq, dp->port); | 356 | return snd_seq_event_port_detach(dp->cseq, dp->port); |
diff --git a/sound/i2c/other/ak4xxx-adda.c b/sound/i2c/other/ak4xxx-adda.c index 1adb8a3c2b62..42d7844ecd0b 100644 --- a/sound/i2c/other/ak4xxx-adda.c +++ b/sound/i2c/other/ak4xxx-adda.c | |||
@@ -900,7 +900,7 @@ static int proc_init(struct snd_akm4xxx *ak) | |||
900 | return 0; | 900 | return 0; |
901 | } | 901 | } |
902 | #else /* !CONFIG_PROC_FS */ | 902 | #else /* !CONFIG_PROC_FS */ |
903 | static int proc_init(struct snd_akm4xxx *ak) {} | 903 | static int proc_init(struct snd_akm4xxx *ak) { return 0; } |
904 | #endif | 904 | #endif |
905 | 905 | ||
906 | int snd_akm4xxx_build_controls(struct snd_akm4xxx *ak) | 906 | int snd_akm4xxx_build_controls(struct snd_akm4xxx *ak) |
diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c index 5f3e68401f90..91d6023a63e5 100644 --- a/sound/isa/msnd/msnd_pinnacle.c +++ b/sound/isa/msnd/msnd_pinnacle.c | |||
@@ -764,9 +764,9 @@ static long io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; | |||
764 | static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; | 764 | static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; |
765 | static long mem[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; | 765 | static long mem[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; |
766 | 766 | ||
767 | #ifndef MSND_CLASSIC | ||
767 | static long cfg[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; | 768 | static long cfg[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; |
768 | 769 | ||
769 | #ifndef MSND_CLASSIC | ||
770 | /* Extra Peripheral Configuration (Default: Disable) */ | 770 | /* Extra Peripheral Configuration (Default: Disable) */ |
771 | static long ide_io0[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; | 771 | static long ide_io0[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; |
772 | static long ide_io1[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; | 772 | static long ide_io1[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; |
@@ -894,7 +894,11 @@ static int __devinit snd_msnd_isa_probe(struct device *pdev, unsigned int idx) | |||
894 | struct snd_card *card; | 894 | struct snd_card *card; |
895 | struct snd_msnd *chip; | 895 | struct snd_msnd *chip; |
896 | 896 | ||
897 | if (has_isapnp(idx) || cfg[idx] == SNDRV_AUTO_PORT) { | 897 | if (has_isapnp(idx) |
898 | #ifndef MSND_CLASSIC | ||
899 | || cfg[idx] == SNDRV_AUTO_PORT | ||
900 | #endif | ||
901 | ) { | ||
898 | printk(KERN_INFO LOGNAME ": Assuming PnP mode\n"); | 902 | printk(KERN_INFO LOGNAME ": Assuming PnP mode\n"); |
899 | return -ENODEV; | 903 | return -ENODEV; |
900 | } | 904 | } |
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 3827092cc1d2..14829210ef0b 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
@@ -4536,7 +4536,7 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec, | |||
4536 | cfg->hp_outs--; | 4536 | cfg->hp_outs--; |
4537 | memmove(cfg->hp_pins + i, cfg->hp_pins + i + 1, | 4537 | memmove(cfg->hp_pins + i, cfg->hp_pins + i + 1, |
4538 | sizeof(cfg->hp_pins[0]) * (cfg->hp_outs - i)); | 4538 | sizeof(cfg->hp_pins[0]) * (cfg->hp_outs - i)); |
4539 | memmove(sequences_hp + i - 1, sequences_hp + i, | 4539 | memmove(sequences_hp + i, sequences_hp + i + 1, |
4540 | sizeof(sequences_hp[0]) * (cfg->hp_outs - i)); | 4540 | sizeof(sequences_hp[0]) * (cfg->hp_outs - i)); |
4541 | } | 4541 | } |
4542 | } | 4542 | } |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 1053fff4bd0a..34940a079051 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -126,6 +126,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6}," | |||
126 | "{Intel, ICH10}," | 126 | "{Intel, ICH10}," |
127 | "{Intel, PCH}," | 127 | "{Intel, PCH}," |
128 | "{Intel, CPT}," | 128 | "{Intel, CPT}," |
129 | "{Intel, PBG}," | ||
129 | "{Intel, SCH}," | 130 | "{Intel, SCH}," |
130 | "{ATI, SB450}," | 131 | "{ATI, SB450}," |
131 | "{ATI, SB600}," | 132 | "{ATI, SB600}," |
@@ -2749,6 +2750,8 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { | |||
2749 | { PCI_DEVICE(0x8086, 0x3b57), .driver_data = AZX_DRIVER_ICH }, | 2750 | { PCI_DEVICE(0x8086, 0x3b57), .driver_data = AZX_DRIVER_ICH }, |
2750 | /* CPT */ | 2751 | /* CPT */ |
2751 | { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH }, | 2752 | { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH }, |
2753 | /* PBG */ | ||
2754 | { PCI_DEVICE(0x8086, 0x1d20), .driver_data = AZX_DRIVER_PCH }, | ||
2752 | /* SCH */ | 2755 | /* SCH */ |
2753 | { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH }, | 2756 | { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH }, |
2754 | /* ATI SB 450/600 */ | 2757 | /* ATI SB 450/600 */ |
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index b697fd2a6f8b..10bbbaf6ebc3 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c | |||
@@ -3641,6 +3641,7 @@ static struct snd_pci_quirk ad1984_cfg_tbl[] = { | |||
3641 | /* Lenovo Thinkpad T61/X61 */ | 3641 | /* Lenovo Thinkpad T61/X61 */ |
3642 | SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD), | 3642 | SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD), |
3643 | SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP), | 3643 | SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP), |
3644 | SND_PCI_QUIRK(0x1028, 0x0233, "Dell Latitude E6400", AD1984_DELL_DESKTOP), | ||
3644 | {} | 3645 | {} |
3645 | }; | 3646 | }; |
3646 | 3647 | ||
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 4ef5efaaaef1..488fd9ade1ba 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c | |||
@@ -972,6 +972,53 @@ static struct hda_verb cs_coef_init_verbs[] = { | |||
972 | {} /* terminator */ | 972 | {} /* terminator */ |
973 | }; | 973 | }; |
974 | 974 | ||
975 | /* Errata: CS4207 rev C0/C1/C2 Silicon | ||
976 | * | ||
977 | * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf | ||
978 | * | ||
979 | * 6. At high temperature (TA > +85°C), the digital supply current (IVD) | ||
980 | * may be excessive (up to an additional 200 μA), which is most easily | ||
981 | * observed while the part is being held in reset (RESET# active low). | ||
982 | * | ||
983 | * Root Cause: At initial powerup of the device, the logic that drives | ||
984 | * the clock and write enable to the S/PDIF SRC RAMs is not properly | ||
985 | * initialized. | ||
986 | * Certain random patterns will cause a steady leakage current in those | ||
987 | * RAM cells. The issue will resolve once the SRCs are used (turned on). | ||
988 | * | ||
989 | * Workaround: The following verb sequence briefly turns on the S/PDIF SRC | ||
990 | * blocks, which will alleviate the issue. | ||
991 | */ | ||
992 | |||
993 | static struct hda_verb cs_errata_init_verbs[] = { | ||
994 | {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */ | ||
995 | {0x11, AC_VERB_SET_PROC_STATE, 0x01}, /* VPW: processing on */ | ||
996 | |||
997 | {0x11, AC_VERB_SET_COEF_INDEX, 0x0008}, | ||
998 | {0x11, AC_VERB_SET_PROC_COEF, 0x9999}, | ||
999 | {0x11, AC_VERB_SET_COEF_INDEX, 0x0017}, | ||
1000 | {0x11, AC_VERB_SET_PROC_COEF, 0xa412}, | ||
1001 | {0x11, AC_VERB_SET_COEF_INDEX, 0x0001}, | ||
1002 | {0x11, AC_VERB_SET_PROC_COEF, 0x0009}, | ||
1003 | |||
1004 | {0x07, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Rx: D0 */ | ||
1005 | {0x08, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Tx: D0 */ | ||
1006 | |||
1007 | {0x11, AC_VERB_SET_COEF_INDEX, 0x0017}, | ||
1008 | {0x11, AC_VERB_SET_PROC_COEF, 0x2412}, | ||
1009 | {0x11, AC_VERB_SET_COEF_INDEX, 0x0008}, | ||
1010 | {0x11, AC_VERB_SET_PROC_COEF, 0x0000}, | ||
1011 | {0x11, AC_VERB_SET_COEF_INDEX, 0x0001}, | ||
1012 | {0x11, AC_VERB_SET_PROC_COEF, 0x0008}, | ||
1013 | {0x11, AC_VERB_SET_PROC_STATE, 0x00}, | ||
1014 | |||
1015 | {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */ | ||
1016 | {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */ | ||
1017 | /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */ | ||
1018 | |||
1019 | {} /* terminator */ | ||
1020 | }; | ||
1021 | |||
975 | /* SPDIF setup */ | 1022 | /* SPDIF setup */ |
976 | static void init_digital(struct hda_codec *codec) | 1023 | static void init_digital(struct hda_codec *codec) |
977 | { | 1024 | { |
@@ -991,6 +1038,9 @@ static int cs_init(struct hda_codec *codec) | |||
991 | { | 1038 | { |
992 | struct cs_spec *spec = codec->spec; | 1039 | struct cs_spec *spec = codec->spec; |
993 | 1040 | ||
1041 | /* init_verb sequence for C0/C1/C2 errata*/ | ||
1042 | snd_hda_sequence_write(codec, cs_errata_init_verbs); | ||
1043 | |||
994 | snd_hda_sequence_write(codec, cs_coef_init_verbs); | 1044 | snd_hda_sequence_write(codec, cs_coef_init_verbs); |
995 | 1045 | ||
996 | if (spec->gpio_mask) { | 1046 | if (spec->gpio_mask) { |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 5cdb80edbd7f..972e7c453b3d 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -116,6 +116,7 @@ struct conexant_spec { | |||
116 | unsigned int dell_vostro:1; | 116 | unsigned int dell_vostro:1; |
117 | unsigned int ideapad:1; | 117 | unsigned int ideapad:1; |
118 | unsigned int thinkpad:1; | 118 | unsigned int thinkpad:1; |
119 | unsigned int hp_laptop:1; | ||
119 | 120 | ||
120 | unsigned int ext_mic_present; | 121 | unsigned int ext_mic_present; |
121 | unsigned int recording; | 122 | unsigned int recording; |
@@ -2299,6 +2300,18 @@ static void cxt5066_ideapad_automic(struct hda_codec *codec) | |||
2299 | } | 2300 | } |
2300 | } | 2301 | } |
2301 | 2302 | ||
2303 | /* toggle input of built-in digital mic and mic jack appropriately */ | ||
2304 | static void cxt5066_hp_laptop_automic(struct hda_codec *codec) | ||
2305 | { | ||
2306 | unsigned int present; | ||
2307 | |||
2308 | present = snd_hda_jack_detect(codec, 0x1b); | ||
2309 | snd_printdd("CXT5066: external microphone present=%d\n", present); | ||
2310 | snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL, | ||
2311 | present ? 1 : 3); | ||
2312 | } | ||
2313 | |||
2314 | |||
2302 | /* toggle input of built-in digital mic and mic jack appropriately | 2315 | /* toggle input of built-in digital mic and mic jack appropriately |
2303 | order is: external mic -> dock mic -> interal mic */ | 2316 | order is: external mic -> dock mic -> interal mic */ |
2304 | static void cxt5066_thinkpad_automic(struct hda_codec *codec) | 2317 | static void cxt5066_thinkpad_automic(struct hda_codec *codec) |
@@ -2408,6 +2421,20 @@ static void cxt5066_ideapad_event(struct hda_codec *codec, unsigned int res) | |||
2408 | } | 2421 | } |
2409 | 2422 | ||
2410 | /* unsolicited event for jack sensing */ | 2423 | /* unsolicited event for jack sensing */ |
2424 | static void cxt5066_hp_laptop_event(struct hda_codec *codec, unsigned int res) | ||
2425 | { | ||
2426 | snd_printdd("CXT5066_hp_laptop: unsol event %x (%x)\n", res, res >> 26); | ||
2427 | switch (res >> 26) { | ||
2428 | case CONEXANT_HP_EVENT: | ||
2429 | cxt5066_hp_automute(codec); | ||
2430 | break; | ||
2431 | case CONEXANT_MIC_EVENT: | ||
2432 | cxt5066_hp_laptop_automic(codec); | ||
2433 | break; | ||
2434 | } | ||
2435 | } | ||
2436 | |||
2437 | /* unsolicited event for jack sensing */ | ||
2411 | static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res) | 2438 | static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res) |
2412 | { | 2439 | { |
2413 | snd_printdd("CXT5066_thinkpad: unsol event %x (%x)\n", res, res >> 26); | 2440 | snd_printdd("CXT5066_thinkpad: unsol event %x (%x)\n", res, res >> 26); |
@@ -2989,6 +3016,14 @@ static struct hda_verb cxt5066_init_verbs_portd_lo[] = { | |||
2989 | { } /* end */ | 3016 | { } /* end */ |
2990 | }; | 3017 | }; |
2991 | 3018 | ||
3019 | |||
3020 | static struct hda_verb cxt5066_init_verbs_hp_laptop[] = { | ||
3021 | {0x14, AC_VERB_SET_CONNECT_SEL, 0x0}, | ||
3022 | {0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT}, | ||
3023 | {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT}, | ||
3024 | { } /* end */ | ||
3025 | }; | ||
3026 | |||
2992 | /* initialize jack-sensing, too */ | 3027 | /* initialize jack-sensing, too */ |
2993 | static int cxt5066_init(struct hda_codec *codec) | 3028 | static int cxt5066_init(struct hda_codec *codec) |
2994 | { | 3029 | { |
@@ -3004,6 +3039,8 @@ static int cxt5066_init(struct hda_codec *codec) | |||
3004 | cxt5066_ideapad_automic(codec); | 3039 | cxt5066_ideapad_automic(codec); |
3005 | else if (spec->thinkpad) | 3040 | else if (spec->thinkpad) |
3006 | cxt5066_thinkpad_automic(codec); | 3041 | cxt5066_thinkpad_automic(codec); |
3042 | else if (spec->hp_laptop) | ||
3043 | cxt5066_hp_laptop_automic(codec); | ||
3007 | } | 3044 | } |
3008 | cxt5066_set_mic_boost(codec); | 3045 | cxt5066_set_mic_boost(codec); |
3009 | return 0; | 3046 | return 0; |
@@ -3031,6 +3068,7 @@ enum { | |||
3031 | CXT5066_DELL_VOSTO, /* Dell Vostro 1015i */ | 3068 | CXT5066_DELL_VOSTO, /* Dell Vostro 1015i */ |
3032 | CXT5066_IDEAPAD, /* Lenovo IdeaPad U150 */ | 3069 | CXT5066_IDEAPAD, /* Lenovo IdeaPad U150 */ |
3033 | CXT5066_THINKPAD, /* Lenovo ThinkPad T410s, others? */ | 3070 | CXT5066_THINKPAD, /* Lenovo ThinkPad T410s, others? */ |
3071 | CXT5066_HP_LAPTOP, /* HP Laptop */ | ||
3034 | CXT5066_MODELS | 3072 | CXT5066_MODELS |
3035 | }; | 3073 | }; |
3036 | 3074 | ||
@@ -3041,6 +3079,7 @@ static const char *cxt5066_models[CXT5066_MODELS] = { | |||
3041 | [CXT5066_DELL_VOSTO] = "dell-vostro", | 3079 | [CXT5066_DELL_VOSTO] = "dell-vostro", |
3042 | [CXT5066_IDEAPAD] = "ideapad", | 3080 | [CXT5066_IDEAPAD] = "ideapad", |
3043 | [CXT5066_THINKPAD] = "thinkpad", | 3081 | [CXT5066_THINKPAD] = "thinkpad", |
3082 | [CXT5066_HP_LAPTOP] = "hp-laptop", | ||
3044 | }; | 3083 | }; |
3045 | 3084 | ||
3046 | static struct snd_pci_quirk cxt5066_cfg_tbl[] = { | 3085 | static struct snd_pci_quirk cxt5066_cfg_tbl[] = { |
@@ -3052,8 +3091,11 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { | |||
3052 | SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO), | 3091 | SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO), |
3053 | SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO), | 3092 | SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO), |
3054 | SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), | 3093 | SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), |
3094 | SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), | ||
3095 | SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD), | ||
3055 | SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), | 3096 | SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), |
3056 | SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5), | 3097 | SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5), |
3098 | SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD), | ||
3057 | SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD), | 3099 | SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD), |
3058 | SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD), | 3100 | SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD), |
3059 | SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), | 3101 | SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), |
@@ -3116,6 +3158,23 @@ static int patch_cxt5066(struct hda_codec *codec) | |||
3116 | spec->num_init_verbs++; | 3158 | spec->num_init_verbs++; |
3117 | spec->dell_automute = 1; | 3159 | spec->dell_automute = 1; |
3118 | break; | 3160 | break; |
3161 | case CXT5066_HP_LAPTOP: | ||
3162 | codec->patch_ops.init = cxt5066_init; | ||
3163 | codec->patch_ops.unsol_event = cxt5066_hp_laptop_event; | ||
3164 | spec->init_verbs[spec->num_init_verbs] = | ||
3165 | cxt5066_init_verbs_hp_laptop; | ||
3166 | spec->num_init_verbs++; | ||
3167 | spec->hp_laptop = 1; | ||
3168 | spec->mixers[spec->num_mixers++] = cxt5066_mixer_master; | ||
3169 | spec->mixers[spec->num_mixers++] = cxt5066_mixers; | ||
3170 | /* no S/PDIF out */ | ||
3171 | spec->multiout.dig_out_nid = 0; | ||
3172 | /* input source automatically selected */ | ||
3173 | spec->input_mux = NULL; | ||
3174 | spec->port_d_mode = 0; | ||
3175 | spec->mic_boost = 3; /* default 30dB gain */ | ||
3176 | break; | ||
3177 | |||
3119 | case CXT5066_OLPC_XO_1_5: | 3178 | case CXT5066_OLPC_XO_1_5: |
3120 | codec->patch_ops.init = cxt5066_olpc_init; | 3179 | codec->patch_ops.init = cxt5066_olpc_init; |
3121 | codec->patch_ops.unsol_event = cxt5066_olpc_unsol_event; | 3180 | codec->patch_ops.unsol_event = cxt5066_olpc_unsol_event; |
diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c index 69b950d527c3..baa108b9d6aa 100644 --- a/sound/pci/hda/patch_nvhdmi.c +++ b/sound/pci/hda/patch_nvhdmi.c | |||
@@ -84,7 +84,7 @@ static struct hda_verb nvhdmi_basic_init_7x[] = { | |||
84 | #else | 84 | #else |
85 | /* support all rates and formats */ | 85 | /* support all rates and formats */ |
86 | #define SUPPORTED_RATES \ | 86 | #define SUPPORTED_RATES \ |
87 | (SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\ | 87 | (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\ |
88 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |\ | 88 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |\ |
89 | SNDRV_PCM_RATE_192000) | 89 | SNDRV_PCM_RATE_192000) |
90 | #define SUPPORTED_MAXBPS 24 | 90 | #define SUPPORTED_MAXBPS 24 |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 627bf9963368..a432e6efd19b 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -1594,12 +1594,22 @@ static void alc_auto_parse_digital(struct hda_codec *codec) | |||
1594 | } | 1594 | } |
1595 | 1595 | ||
1596 | if (spec->autocfg.dig_in_pin) { | 1596 | if (spec->autocfg.dig_in_pin) { |
1597 | hda_nid_t dig_nid; | 1597 | dig_nid = codec->start_nid; |
1598 | err = snd_hda_get_connections(codec, | 1598 | for (i = 0; i < codec->num_nodes; i++, dig_nid++) { |
1599 | spec->autocfg.dig_in_pin, | 1599 | unsigned int wcaps = get_wcaps(codec, dig_nid); |
1600 | &dig_nid, 1); | 1600 | if (get_wcaps_type(wcaps) != AC_WID_AUD_IN) |
1601 | if (err > 0) | 1601 | continue; |
1602 | spec->dig_in_nid = dig_nid; | 1602 | if (!(wcaps & AC_WCAP_DIGITAL)) |
1603 | continue; | ||
1604 | if (!(wcaps & AC_WCAP_CONN_LIST)) | ||
1605 | continue; | ||
1606 | err = get_connection_index(codec, dig_nid, | ||
1607 | spec->autocfg.dig_in_pin); | ||
1608 | if (err >= 0) { | ||
1609 | spec->dig_in_nid = dig_nid; | ||
1610 | break; | ||
1611 | } | ||
1612 | } | ||
1603 | } | 1613 | } |
1604 | } | 1614 | } |
1605 | 1615 | ||
@@ -5334,6 +5344,7 @@ static void fillup_priv_adc_nids(struct hda_codec *codec, hda_nid_t *nids, | |||
5334 | 5344 | ||
5335 | static struct snd_pci_quirk beep_white_list[] = { | 5345 | static struct snd_pci_quirk beep_white_list[] = { |
5336 | SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1), | 5346 | SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1), |
5347 | SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1), | ||
5337 | SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1), | 5348 | SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1), |
5338 | {} | 5349 | {} |
5339 | }; | 5350 | }; |
@@ -14452,6 +14463,7 @@ static void alc269_auto_init(struct hda_codec *codec) | |||
14452 | 14463 | ||
14453 | enum { | 14464 | enum { |
14454 | ALC269_FIXUP_SONY_VAIO, | 14465 | ALC269_FIXUP_SONY_VAIO, |
14466 | ALC269_FIXUP_DELL_M101Z, | ||
14455 | }; | 14467 | }; |
14456 | 14468 | ||
14457 | static const struct hda_verb alc269_sony_vaio_fixup_verbs[] = { | 14469 | static const struct hda_verb alc269_sony_vaio_fixup_verbs[] = { |
@@ -14463,11 +14475,20 @@ static const struct alc_fixup alc269_fixups[] = { | |||
14463 | [ALC269_FIXUP_SONY_VAIO] = { | 14475 | [ALC269_FIXUP_SONY_VAIO] = { |
14464 | .verbs = alc269_sony_vaio_fixup_verbs | 14476 | .verbs = alc269_sony_vaio_fixup_verbs |
14465 | }, | 14477 | }, |
14478 | [ALC269_FIXUP_DELL_M101Z] = { | ||
14479 | .verbs = (const struct hda_verb[]) { | ||
14480 | /* Enables internal speaker */ | ||
14481 | {0x20, AC_VERB_SET_COEF_INDEX, 13}, | ||
14482 | {0x20, AC_VERB_SET_PROC_COEF, 0x4040}, | ||
14483 | {} | ||
14484 | } | ||
14485 | }, | ||
14466 | }; | 14486 | }; |
14467 | 14487 | ||
14468 | static struct snd_pci_quirk alc269_fixup_tbl[] = { | 14488 | static struct snd_pci_quirk alc269_fixup_tbl[] = { |
14469 | SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), | 14489 | SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), |
14470 | SND_PCI_QUIRK(0x104d, 0x9077, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), | 14490 | SND_PCI_QUIRK(0x104d, 0x9077, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), |
14491 | SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), | ||
14471 | {} | 14492 | {} |
14472 | }; | 14493 | }; |
14473 | 14494 | ||
diff --git a/sound/pci/oxygen/oxygen.c b/sound/pci/oxygen/oxygen.c index 289cb4dacfc7..6c0a11adb2a8 100644 --- a/sound/pci/oxygen/oxygen.c +++ b/sound/pci/oxygen/oxygen.c | |||
@@ -543,6 +543,10 @@ static int __devinit get_oxygen_model(struct oxygen *chip, | |||
543 | chip->model.suspend = claro_suspend; | 543 | chip->model.suspend = claro_suspend; |
544 | chip->model.resume = claro_resume; | 544 | chip->model.resume = claro_resume; |
545 | chip->model.set_adc_params = set_ak5385_params; | 545 | chip->model.set_adc_params = set_ak5385_params; |
546 | chip->model.device_config = PLAYBACK_0_TO_I2S | | ||
547 | PLAYBACK_1_TO_SPDIF | | ||
548 | CAPTURE_0_FROM_I2S_2 | | ||
549 | CAPTURE_1_FROM_SPDIF; | ||
546 | break; | 550 | break; |
547 | } | 551 | } |
548 | if (id->driver_data == MODEL_MERIDIAN || | 552 | if (id->driver_data == MODEL_MERIDIAN || |
diff --git a/sound/pci/oxygen/oxygen.h b/sound/pci/oxygen/oxygen.h index 6147216af744..a3409edcfb50 100644 --- a/sound/pci/oxygen/oxygen.h +++ b/sound/pci/oxygen/oxygen.h | |||
@@ -155,6 +155,7 @@ void oxygen_pci_remove(struct pci_dev *pci); | |||
155 | int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state); | 155 | int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state); |
156 | int oxygen_pci_resume(struct pci_dev *pci); | 156 | int oxygen_pci_resume(struct pci_dev *pci); |
157 | #endif | 157 | #endif |
158 | void oxygen_pci_shutdown(struct pci_dev *pci); | ||
158 | 159 | ||
159 | /* oxygen_mixer.c */ | 160 | /* oxygen_mixer.c */ |
160 | 161 | ||
diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c index fad03d64e3ad..7e93cf884437 100644 --- a/sound/pci/oxygen/oxygen_lib.c +++ b/sound/pci/oxygen/oxygen_lib.c | |||
@@ -519,16 +519,21 @@ static void oxygen_init(struct oxygen *chip) | |||
519 | } | 519 | } |
520 | } | 520 | } |
521 | 521 | ||
522 | static void oxygen_card_free(struct snd_card *card) | 522 | static void oxygen_shutdown(struct oxygen *chip) |
523 | { | 523 | { |
524 | struct oxygen *chip = card->private_data; | ||
525 | |||
526 | spin_lock_irq(&chip->reg_lock); | 524 | spin_lock_irq(&chip->reg_lock); |
527 | chip->interrupt_mask = 0; | 525 | chip->interrupt_mask = 0; |
528 | chip->pcm_running = 0; | 526 | chip->pcm_running = 0; |
529 | oxygen_write16(chip, OXYGEN_DMA_STATUS, 0); | 527 | oxygen_write16(chip, OXYGEN_DMA_STATUS, 0); |
530 | oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, 0); | 528 | oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, 0); |
531 | spin_unlock_irq(&chip->reg_lock); | 529 | spin_unlock_irq(&chip->reg_lock); |
530 | } | ||
531 | |||
532 | static void oxygen_card_free(struct snd_card *card) | ||
533 | { | ||
534 | struct oxygen *chip = card->private_data; | ||
535 | |||
536 | oxygen_shutdown(chip); | ||
532 | if (chip->irq >= 0) | 537 | if (chip->irq >= 0) |
533 | free_irq(chip->irq, chip); | 538 | free_irq(chip->irq, chip); |
534 | flush_scheduled_work(); | 539 | flush_scheduled_work(); |
@@ -778,3 +783,13 @@ int oxygen_pci_resume(struct pci_dev *pci) | |||
778 | } | 783 | } |
779 | EXPORT_SYMBOL(oxygen_pci_resume); | 784 | EXPORT_SYMBOL(oxygen_pci_resume); |
780 | #endif /* CONFIG_PM */ | 785 | #endif /* CONFIG_PM */ |
786 | |||
787 | void oxygen_pci_shutdown(struct pci_dev *pci) | ||
788 | { | ||
789 | struct snd_card *card = pci_get_drvdata(pci); | ||
790 | struct oxygen *chip = card->private_data; | ||
791 | |||
792 | oxygen_shutdown(chip); | ||
793 | chip->model.cleanup(chip); | ||
794 | } | ||
795 | EXPORT_SYMBOL(oxygen_pci_shutdown); | ||
diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c index f03a2f2cffee..06c863e86e3d 100644 --- a/sound/pci/oxygen/virtuoso.c +++ b/sound/pci/oxygen/virtuoso.c | |||
@@ -95,6 +95,7 @@ static struct pci_driver xonar_driver = { | |||
95 | .suspend = oxygen_pci_suspend, | 95 | .suspend = oxygen_pci_suspend, |
96 | .resume = oxygen_pci_resume, | 96 | .resume = oxygen_pci_resume, |
97 | #endif | 97 | #endif |
98 | .shutdown = oxygen_pci_shutdown, | ||
98 | }; | 99 | }; |
99 | 100 | ||
100 | static int __init alsa_card_xonar_init(void) | 101 | static int __init alsa_card_xonar_init(void) |
diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c index dbc4b89d74e4..b82c1cfa96f5 100644 --- a/sound/pci/oxygen/xonar_wm87x6.c +++ b/sound/pci/oxygen/xonar_wm87x6.c | |||
@@ -53,6 +53,8 @@ struct xonar_wm87x6 { | |||
53 | struct xonar_generic generic; | 53 | struct xonar_generic generic; |
54 | u16 wm8776_regs[0x17]; | 54 | u16 wm8776_regs[0x17]; |
55 | u16 wm8766_regs[0x10]; | 55 | u16 wm8766_regs[0x10]; |
56 | struct snd_kcontrol *line_adcmux_control; | ||
57 | struct snd_kcontrol *mic_adcmux_control; | ||
56 | struct snd_kcontrol *lc_controls[13]; | 58 | struct snd_kcontrol *lc_controls[13]; |
57 | }; | 59 | }; |
58 | 60 | ||
@@ -193,6 +195,7 @@ static void xonar_ds_init(struct oxygen *chip) | |||
193 | static void xonar_ds_cleanup(struct oxygen *chip) | 195 | static void xonar_ds_cleanup(struct oxygen *chip) |
194 | { | 196 | { |
195 | xonar_disable_output(chip); | 197 | xonar_disable_output(chip); |
198 | wm8776_write(chip, WM8776_RESET, 0); | ||
196 | } | 199 | } |
197 | 200 | ||
198 | static void xonar_ds_suspend(struct oxygen *chip) | 201 | static void xonar_ds_suspend(struct oxygen *chip) |
@@ -603,6 +606,7 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl, | |||
603 | { | 606 | { |
604 | struct oxygen *chip = ctl->private_data; | 607 | struct oxygen *chip = ctl->private_data; |
605 | struct xonar_wm87x6 *data = chip->model_data; | 608 | struct xonar_wm87x6 *data = chip->model_data; |
609 | struct snd_kcontrol *other_ctl; | ||
606 | unsigned int mux_bit = ctl->private_value; | 610 | unsigned int mux_bit = ctl->private_value; |
607 | u16 reg; | 611 | u16 reg; |
608 | int changed; | 612 | int changed; |
@@ -610,8 +614,18 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl, | |||
610 | mutex_lock(&chip->mutex); | 614 | mutex_lock(&chip->mutex); |
611 | reg = data->wm8776_regs[WM8776_ADCMUX]; | 615 | reg = data->wm8776_regs[WM8776_ADCMUX]; |
612 | if (value->value.integer.value[0]) { | 616 | if (value->value.integer.value[0]) { |
613 | reg &= ~0x003; | ||
614 | reg |= mux_bit; | 617 | reg |= mux_bit; |
618 | /* line-in and mic-in are exclusive */ | ||
619 | mux_bit ^= 3; | ||
620 | if (reg & mux_bit) { | ||
621 | reg &= ~mux_bit; | ||
622 | if (mux_bit == 1) | ||
623 | other_ctl = data->line_adcmux_control; | ||
624 | else | ||
625 | other_ctl = data->mic_adcmux_control; | ||
626 | snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, | ||
627 | &other_ctl->id); | ||
628 | } | ||
615 | } else | 629 | } else |
616 | reg &= ~mux_bit; | 630 | reg &= ~mux_bit; |
617 | changed = reg != data->wm8776_regs[WM8776_ADCMUX]; | 631 | changed = reg != data->wm8776_regs[WM8776_ADCMUX]; |
@@ -963,7 +977,13 @@ static int xonar_ds_mixer_init(struct oxygen *chip) | |||
963 | err = snd_ctl_add(chip->card, ctl); | 977 | err = snd_ctl_add(chip->card, ctl); |
964 | if (err < 0) | 978 | if (err < 0) |
965 | return err; | 979 | return err; |
980 | if (!strcmp(ctl->id.name, "Line Capture Switch")) | ||
981 | data->line_adcmux_control = ctl; | ||
982 | else if (!strcmp(ctl->id.name, "Mic Capture Switch")) | ||
983 | data->mic_adcmux_control = ctl; | ||
966 | } | 984 | } |
985 | if (!data->line_adcmux_control || !data->mic_adcmux_control) | ||
986 | return -ENXIO; | ||
967 | BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls)); | 987 | BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls)); |
968 | for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) { | 988 | for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) { |
969 | ctl = snd_ctl_new1(&lc_controls[i], chip); | 989 | ctl = snd_ctl_new1(&lc_controls[i], chip); |
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c index b92adef8e81e..d6fa7bfd9aa1 100644 --- a/sound/pci/rme9652/hdsp.c +++ b/sound/pci/rme9652/hdsp.c | |||
@@ -4609,6 +4609,7 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne | |||
4609 | if (err < 0) | 4609 | if (err < 0) |
4610 | return err; | 4610 | return err; |
4611 | 4611 | ||
4612 | memset(&info, 0, sizeof(info)); | ||
4612 | spin_lock_irqsave(&hdsp->lock, flags); | 4613 | spin_lock_irqsave(&hdsp->lock, flags); |
4613 | info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp); | 4614 | info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp); |
4614 | info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp); | 4615 | info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp); |
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c index 547b713d7204..0c98ef9156d8 100644 --- a/sound/pci/rme9652/hdspm.c +++ b/sound/pci/rme9652/hdspm.c | |||
@@ -4127,6 +4127,7 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep * hw, struct file *file, | |||
4127 | 4127 | ||
4128 | case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO: | 4128 | case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO: |
4129 | 4129 | ||
4130 | memset(&info, 0, sizeof(info)); | ||
4130 | spin_lock_irq(&hdspm->lock); | 4131 | spin_lock_irq(&hdspm->lock); |
4131 | info.pref_sync_ref = hdspm_pref_sync_ref(hdspm); | 4132 | info.pref_sync_ref = hdspm_pref_sync_ref(hdspm); |
4132 | info.wordclock_sync_check = hdspm_wc_sync_check(hdspm); | 4133 | info.wordclock_sync_check = hdspm_wc_sync_check(hdspm); |
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c index 2f12da4da561..581a670e8261 100644 --- a/sound/ppc/snd_ps3.c +++ b/sound/ppc/snd_ps3.c | |||
@@ -579,7 +579,7 @@ static int snd_ps3_delay_to_bytes(struct snd_pcm_substream *substream, | |||
579 | rate * delay_ms / 1000) | 579 | rate * delay_ms / 1000) |
580 | * substream->runtime->channels; | 580 | * substream->runtime->channels; |
581 | 581 | ||
582 | pr_debug(KERN_ERR "%s: time=%d rate=%d bytes=%ld, frames=%d, ret=%d\n", | 582 | pr_debug("%s: time=%d rate=%d bytes=%ld, frames=%d, ret=%d\n", |
583 | __func__, | 583 | __func__, |
584 | delay_ms, | 584 | delay_ms, |
585 | rate, | 585 | rate, |
diff --git a/sound/soc/s3c24xx/s3c-dma.c b/sound/soc/s3c24xx/s3c-dma.c index 1b61c23ff300..f1b1bc4bacfb 100644 --- a/sound/soc/s3c24xx/s3c-dma.c +++ b/sound/soc/s3c24xx/s3c-dma.c | |||
@@ -94,8 +94,7 @@ static void s3c_dma_enqueue(struct snd_pcm_substream *substream) | |||
94 | 94 | ||
95 | if ((pos + len) > prtd->dma_end) { | 95 | if ((pos + len) > prtd->dma_end) { |
96 | len = prtd->dma_end - pos; | 96 | len = prtd->dma_end - pos; |
97 | pr_debug(KERN_DEBUG "%s: corrected dma len %ld\n", | 97 | pr_debug("%s: corrected dma len %ld\n", __func__, len); |
98 | __func__, len); | ||
99 | } | 98 | } |
100 | 99 | ||
101 | ret = s3c2410_dma_enqueue(prtd->params->channel, | 100 | ret = s3c2410_dma_enqueue(prtd->params->channel, |
diff --git a/sound/soc/sh/migor.c b/sound/soc/sh/migor.c index b823a5c9b9bc..87e2b7fcbf17 100644 --- a/sound/soc/sh/migor.c +++ b/sound/soc/sh/migor.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/firmware.h> | 12 | #include <linux/firmware.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | 14 | ||
15 | #include <asm/clkdev.h> | ||
15 | #include <asm/clock.h> | 16 | #include <asm/clock.h> |
16 | 17 | ||
17 | #include <cpu/sh7722.h> | 18 | #include <cpu/sh7722.h> |
@@ -40,12 +41,12 @@ static struct clk_ops siumckb_clk_ops = { | |||
40 | }; | 41 | }; |
41 | 42 | ||
42 | static struct clk siumckb_clk = { | 43 | static struct clk siumckb_clk = { |
43 | .name = "siumckb_clk", | ||
44 | .id = -1, | ||
45 | .ops = &siumckb_clk_ops, | 44 | .ops = &siumckb_clk_ops, |
46 | .rate = 0, /* initialised at run-time */ | 45 | .rate = 0, /* initialised at run-time */ |
47 | }; | 46 | }; |
48 | 47 | ||
48 | static struct clk_lookup *siumckb_lookup; | ||
49 | |||
49 | static int migor_hw_params(struct snd_pcm_substream *substream, | 50 | static int migor_hw_params(struct snd_pcm_substream *substream, |
50 | struct snd_pcm_hw_params *params) | 51 | struct snd_pcm_hw_params *params) |
51 | { | 52 | { |
@@ -180,6 +181,13 @@ static int __init migor_init(void) | |||
180 | if (ret < 0) | 181 | if (ret < 0) |
181 | return ret; | 182 | return ret; |
182 | 183 | ||
184 | siumckb_lookup = clkdev_alloc(&siumckb_clk, "siumckb_clk", NULL); | ||
185 | if (!siumckb_lookup) { | ||
186 | ret = -ENOMEM; | ||
187 | goto eclkdevalloc; | ||
188 | } | ||
189 | clkdev_add(siumckb_lookup); | ||
190 | |||
183 | /* Port number used on this machine: port B */ | 191 | /* Port number used on this machine: port B */ |
184 | migor_snd_device = platform_device_alloc("soc-audio", 1); | 192 | migor_snd_device = platform_device_alloc("soc-audio", 1); |
185 | if (!migor_snd_device) { | 193 | if (!migor_snd_device) { |
@@ -200,12 +208,15 @@ static int __init migor_init(void) | |||
200 | epdevadd: | 208 | epdevadd: |
201 | platform_device_put(migor_snd_device); | 209 | platform_device_put(migor_snd_device); |
202 | epdevalloc: | 210 | epdevalloc: |
211 | clkdev_drop(siumckb_lookup); | ||
212 | eclkdevalloc: | ||
203 | clk_unregister(&siumckb_clk); | 213 | clk_unregister(&siumckb_clk); |
204 | return ret; | 214 | return ret; |
205 | } | 215 | } |
206 | 216 | ||
207 | static void __exit migor_exit(void) | 217 | static void __exit migor_exit(void) |
208 | { | 218 | { |
219 | clkdev_drop(siumckb_lookup); | ||
209 | clk_unregister(&siumckb_clk); | 220 | clk_unregister(&siumckb_clk); |
210 | platform_device_unregister(migor_snd_device); | 221 | platform_device_unregister(migor_snd_device); |
211 | } | 222 | } |
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c index adbc68ce9050..f6b0d2829ea9 100644 --- a/sound/soc/soc-cache.c +++ b/sound/soc/soc-cache.c | |||
@@ -203,8 +203,9 @@ static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg, | |||
203 | data[1] = (value >> 8) & 0xff; | 203 | data[1] = (value >> 8) & 0xff; |
204 | data[2] = value & 0xff; | 204 | data[2] = value & 0xff; |
205 | 205 | ||
206 | if (!snd_soc_codec_volatile_register(codec, reg)) | 206 | if (!snd_soc_codec_volatile_register(codec, reg) |
207 | reg_cache[reg] = value; | 207 | && reg < codec->reg_cache_size) |
208 | reg_cache[reg] = value; | ||
208 | 209 | ||
209 | if (codec->cache_only) { | 210 | if (codec->cache_only) { |
210 | codec->cache_sync = 1; | 211 | codec->cache_sync = 1; |
diff --git a/sound/usb/card.c b/sound/usb/card.c index 9feb00c831a0..4eabafa5b037 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c | |||
@@ -126,7 +126,7 @@ static void snd_usb_stream_disconnect(struct list_head *head) | |||
126 | for (idx = 0; idx < 2; idx++) { | 126 | for (idx = 0; idx < 2; idx++) { |
127 | subs = &as->substream[idx]; | 127 | subs = &as->substream[idx]; |
128 | if (!subs->num_formats) | 128 | if (!subs->num_formats) |
129 | return; | 129 | continue; |
130 | snd_usb_release_substream_urbs(subs, 1); | 130 | snd_usb_release_substream_urbs(subs, 1); |
131 | subs->interface = -1; | 131 | subs->interface = -1; |
132 | } | 132 | } |
@@ -216,6 +216,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) | |||
216 | } | 216 | } |
217 | 217 | ||
218 | switch (protocol) { | 218 | switch (protocol) { |
219 | default: | ||
220 | snd_printdd(KERN_WARNING "unknown interface protocol %#02x, assuming v1\n", | ||
221 | protocol); | ||
222 | /* fall through */ | ||
223 | |||
219 | case UAC_VERSION_1: { | 224 | case UAC_VERSION_1: { |
220 | struct uac1_ac_header_descriptor *h1 = control_header; | 225 | struct uac1_ac_header_descriptor *h1 = control_header; |
221 | 226 | ||
@@ -253,10 +258,6 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) | |||
253 | 258 | ||
254 | break; | 259 | break; |
255 | } | 260 | } |
256 | |||
257 | default: | ||
258 | snd_printk(KERN_ERR "unknown protocol version 0x%02x\n", protocol); | ||
259 | return -EINVAL; | ||
260 | } | 261 | } |
261 | 262 | ||
262 | return 0; | 263 | return 0; |
@@ -465,7 +466,13 @@ static void *snd_usb_audio_probe(struct usb_device *dev, | |||
465 | goto __error; | 466 | goto __error; |
466 | } | 467 | } |
467 | 468 | ||
468 | chip->ctrl_intf = alts; | 469 | /* |
470 | * For devices with more than one control interface, we assume the | ||
471 | * first contains the audio controls. We might need a more specific | ||
472 | * check here in the future. | ||
473 | */ | ||
474 | if (!chip->ctrl_intf) | ||
475 | chip->ctrl_intf = alts; | ||
469 | 476 | ||
470 | if (err > 0) { | 477 | if (err > 0) { |
471 | /* create normal USB audio interfaces */ | 478 | /* create normal USB audio interfaces */ |
diff --git a/sound/usb/clock.c b/sound/usb/clock.c index b853f8df794f..7754a1034545 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c | |||
@@ -295,12 +295,11 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface, | |||
295 | 295 | ||
296 | switch (altsd->bInterfaceProtocol) { | 296 | switch (altsd->bInterfaceProtocol) { |
297 | case UAC_VERSION_1: | 297 | case UAC_VERSION_1: |
298 | default: | ||
298 | return set_sample_rate_v1(chip, iface, alts, fmt, rate); | 299 | return set_sample_rate_v1(chip, iface, alts, fmt, rate); |
299 | 300 | ||
300 | case UAC_VERSION_2: | 301 | case UAC_VERSION_2: |
301 | return set_sample_rate_v2(chip, iface, alts, fmt, rate); | 302 | return set_sample_rate_v2(chip, iface, alts, fmt, rate); |
302 | } | 303 | } |
303 | |||
304 | return -EINVAL; | ||
305 | } | 304 | } |
306 | 305 | ||
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 1a701f1e8f50..ef0a07e34844 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c | |||
@@ -275,6 +275,12 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no) | |||
275 | 275 | ||
276 | /* get audio formats */ | 276 | /* get audio formats */ |
277 | switch (protocol) { | 277 | switch (protocol) { |
278 | default: | ||
279 | snd_printdd(KERN_WARNING "%d:%u:%d: unknown interface protocol %#02x, assuming v1\n", | ||
280 | dev->devnum, iface_no, altno, protocol); | ||
281 | protocol = UAC_VERSION_1; | ||
282 | /* fall through */ | ||
283 | |||
278 | case UAC_VERSION_1: { | 284 | case UAC_VERSION_1: { |
279 | struct uac1_as_header_descriptor *as = | 285 | struct uac1_as_header_descriptor *as = |
280 | snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL); | 286 | snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL); |
@@ -336,11 +342,6 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no) | |||
336 | dev->devnum, iface_no, altno, as->bTerminalLink); | 342 | dev->devnum, iface_no, altno, as->bTerminalLink); |
337 | continue; | 343 | continue; |
338 | } | 344 | } |
339 | |||
340 | default: | ||
341 | snd_printk(KERN_ERR "%d:%u:%d : unknown interface protocol %04x\n", | ||
342 | dev->devnum, iface_no, altno, protocol); | ||
343 | continue; | ||
344 | } | 345 | } |
345 | 346 | ||
346 | /* get format type */ | 347 | /* get format type */ |
diff --git a/sound/usb/format.c b/sound/usb/format.c index 3a1375459c06..69148212aa70 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c | |||
@@ -49,7 +49,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, | |||
49 | u64 pcm_formats; | 49 | u64 pcm_formats; |
50 | 50 | ||
51 | switch (protocol) { | 51 | switch (protocol) { |
52 | case UAC_VERSION_1: { | 52 | case UAC_VERSION_1: |
53 | default: { | ||
53 | struct uac_format_type_i_discrete_descriptor *fmt = _fmt; | 54 | struct uac_format_type_i_discrete_descriptor *fmt = _fmt; |
54 | sample_width = fmt->bBitResolution; | 55 | sample_width = fmt->bBitResolution; |
55 | sample_bytes = fmt->bSubframeSize; | 56 | sample_bytes = fmt->bSubframeSize; |
@@ -64,9 +65,6 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, | |||
64 | format <<= 1; | 65 | format <<= 1; |
65 | break; | 66 | break; |
66 | } | 67 | } |
67 | |||
68 | default: | ||
69 | return -EINVAL; | ||
70 | } | 68 | } |
71 | 69 | ||
72 | pcm_formats = 0; | 70 | pcm_formats = 0; |
@@ -384,6 +382,10 @@ static int parse_audio_format_i(struct snd_usb_audio *chip, | |||
384 | * audio class v2 uses class specific EP0 range requests for that. | 382 | * audio class v2 uses class specific EP0 range requests for that. |
385 | */ | 383 | */ |
386 | switch (protocol) { | 384 | switch (protocol) { |
385 | default: | ||
386 | snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n", | ||
387 | chip->dev->devnum, fp->iface, fp->altsetting, protocol); | ||
388 | /* fall through */ | ||
387 | case UAC_VERSION_1: | 389 | case UAC_VERSION_1: |
388 | fp->channels = fmt->bNrChannels; | 390 | fp->channels = fmt->bNrChannels; |
389 | ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7); | 391 | ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7); |
@@ -392,10 +394,6 @@ static int parse_audio_format_i(struct snd_usb_audio *chip, | |||
392 | /* fp->channels is already set in this case */ | 394 | /* fp->channels is already set in this case */ |
393 | ret = parse_audio_format_rates_v2(chip, fp); | 395 | ret = parse_audio_format_rates_v2(chip, fp); |
394 | break; | 396 | break; |
395 | default: | ||
396 | snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n", | ||
397 | chip->dev->devnum, fp->iface, fp->altsetting, protocol); | ||
398 | return -EINVAL; | ||
399 | } | 397 | } |
400 | 398 | ||
401 | if (fp->channels < 1) { | 399 | if (fp->channels < 1) { |
@@ -438,6 +436,10 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip, | |||
438 | fp->channels = 1; | 436 | fp->channels = 1; |
439 | 437 | ||
440 | switch (protocol) { | 438 | switch (protocol) { |
439 | default: | ||
440 | snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n", | ||
441 | chip->dev->devnum, fp->iface, fp->altsetting, protocol); | ||
442 | /* fall through */ | ||
441 | case UAC_VERSION_1: { | 443 | case UAC_VERSION_1: { |
442 | struct uac_format_type_ii_discrete_descriptor *fmt = _fmt; | 444 | struct uac_format_type_ii_discrete_descriptor *fmt = _fmt; |
443 | brate = le16_to_cpu(fmt->wMaxBitRate); | 445 | brate = le16_to_cpu(fmt->wMaxBitRate); |
@@ -456,10 +458,6 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip, | |||
456 | ret = parse_audio_format_rates_v2(chip, fp); | 458 | ret = parse_audio_format_rates_v2(chip, fp); |
457 | break; | 459 | break; |
458 | } | 460 | } |
459 | default: | ||
460 | snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n", | ||
461 | chip->dev->devnum, fp->iface, fp->altsetting, protocol); | ||
462 | return -EINVAL; | ||
463 | } | 461 | } |
464 | 462 | ||
465 | return ret; | 463 | return ret; |
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index c166db0057d3..3ed3901369ce 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c | |||
@@ -2175,7 +2175,15 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif, | |||
2175 | } | 2175 | } |
2176 | 2176 | ||
2177 | host_iface = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0]; | 2177 | host_iface = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0]; |
2178 | mixer->protocol = get_iface_desc(host_iface)->bInterfaceProtocol; | 2178 | switch (get_iface_desc(host_iface)->bInterfaceProtocol) { |
2179 | case UAC_VERSION_1: | ||
2180 | default: | ||
2181 | mixer->protocol = UAC_VERSION_1; | ||
2182 | break; | ||
2183 | case UAC_VERSION_2: | ||
2184 | mixer->protocol = UAC_VERSION_2; | ||
2185 | break; | ||
2186 | } | ||
2179 | 2187 | ||
2180 | if ((err = snd_usb_mixer_controls(mixer)) < 0 || | 2188 | if ((err = snd_usb_mixer_controls(mixer)) < 0 || |
2181 | (err = snd_usb_mixer_status_create(mixer)) < 0) | 2189 | (err = snd_usb_mixer_status_create(mixer)) < 0) |
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 3634cedf9306..3b5135c93062 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c | |||
@@ -173,13 +173,12 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface, | |||
173 | 173 | ||
174 | switch (altsd->bInterfaceProtocol) { | 174 | switch (altsd->bInterfaceProtocol) { |
175 | case UAC_VERSION_1: | 175 | case UAC_VERSION_1: |
176 | default: | ||
176 | return init_pitch_v1(chip, iface, alts, fmt); | 177 | return init_pitch_v1(chip, iface, alts, fmt); |
177 | 178 | ||
178 | case UAC_VERSION_2: | 179 | case UAC_VERSION_2: |
179 | return init_pitch_v2(chip, iface, alts, fmt); | 180 | return init_pitch_v2(chip, iface, alts, fmt); |
180 | } | 181 | } |
181 | |||
182 | return -EINVAL; | ||
183 | } | 182 | } |
184 | 183 | ||
185 | /* | 184 | /* |
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 4f1fa77c1feb..1950e19af1cf 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -1017,7 +1017,7 @@ builtin-revert.o wt-status.o: wt-status.h | |||
1017 | # we compile into subdirectories. if the target directory is not the source directory, they might not exists. So | 1017 | # we compile into subdirectories. if the target directory is not the source directory, they might not exists. So |
1018 | # we depend the various files onto their directories. | 1018 | # we depend the various files onto their directories. |
1019 | DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h | 1019 | DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h |
1020 | $(DIRECTORY_DEPS): $(sort $(dir $(DIRECTORY_DEPS))) | 1020 | $(DIRECTORY_DEPS): | $(sort $(dir $(DIRECTORY_DEPS))) |
1021 | # In the second step, we make a rule to actually create these directories | 1021 | # In the second step, we make a rule to actually create these directories |
1022 | $(sort $(dir $(DIRECTORY_DEPS))): | 1022 | $(sort $(dir $(DIRECTORY_DEPS))): |
1023 | $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null | 1023 | $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null |
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 624a96c636fd..6de4313924fb 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h | |||
@@ -50,6 +50,7 @@ static inline void callchain_init(struct callchain_node *node) | |||
50 | INIT_LIST_HEAD(&node->children); | 50 | INIT_LIST_HEAD(&node->children); |
51 | INIT_LIST_HEAD(&node->val); | 51 | INIT_LIST_HEAD(&node->val); |
52 | 52 | ||
53 | node->children_hit = 0; | ||
53 | node->parent = NULL; | 54 | node->parent = NULL; |
54 | node->hit = 0; | 55 | node->hit = 0; |
55 | } | 56 | } |
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index e72f05c3bef0..fcc16e4349df 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c | |||
@@ -1539,6 +1539,7 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev, | |||
1539 | goto error; | 1539 | goto error; |
1540 | } | 1540 | } |
1541 | tev->point.offset = pev->point.offset; | 1541 | tev->point.offset = pev->point.offset; |
1542 | tev->point.retprobe = pev->point.retprobe; | ||
1542 | tev->nargs = pev->nargs; | 1543 | tev->nargs = pev->nargs; |
1543 | if (tev->nargs) { | 1544 | if (tev->nargs) { |
1544 | tev->args = zalloc(sizeof(struct probe_trace_arg) | 1545 | tev->args = zalloc(sizeof(struct probe_trace_arg) |
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 525136684d4e..32b81f707ff5 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c | |||
@@ -686,6 +686,25 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) | |||
686 | char buf[32], *ptr; | 686 | char buf[32], *ptr; |
687 | int ret, nscopes; | 687 | int ret, nscopes; |
688 | 688 | ||
689 | if (!is_c_varname(pf->pvar->var)) { | ||
690 | /* Copy raw parameters */ | ||
691 | pf->tvar->value = strdup(pf->pvar->var); | ||
692 | if (pf->tvar->value == NULL) | ||
693 | return -ENOMEM; | ||
694 | if (pf->pvar->type) { | ||
695 | pf->tvar->type = strdup(pf->pvar->type); | ||
696 | if (pf->tvar->type == NULL) | ||
697 | return -ENOMEM; | ||
698 | } | ||
699 | if (pf->pvar->name) { | ||
700 | pf->tvar->name = strdup(pf->pvar->name); | ||
701 | if (pf->tvar->name == NULL) | ||
702 | return -ENOMEM; | ||
703 | } else | ||
704 | pf->tvar->name = NULL; | ||
705 | return 0; | ||
706 | } | ||
707 | |||
689 | if (pf->pvar->name) | 708 | if (pf->pvar->name) |
690 | pf->tvar->name = strdup(pf->pvar->name); | 709 | pf->tvar->name = strdup(pf->pvar->name); |
691 | else { | 710 | else { |
@@ -700,19 +719,6 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) | |||
700 | if (pf->tvar->name == NULL) | 719 | if (pf->tvar->name == NULL) |
701 | return -ENOMEM; | 720 | return -ENOMEM; |
702 | 721 | ||
703 | if (!is_c_varname(pf->pvar->var)) { | ||
704 | /* Copy raw parameters */ | ||
705 | pf->tvar->value = strdup(pf->pvar->var); | ||
706 | if (pf->tvar->value == NULL) | ||
707 | return -ENOMEM; | ||
708 | if (pf->pvar->type) { | ||
709 | pf->tvar->type = strdup(pf->pvar->type); | ||
710 | if (pf->tvar->type == NULL) | ||
711 | return -ENOMEM; | ||
712 | } | ||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | pr_debug("Searching '%s' variable in context.\n", | 722 | pr_debug("Searching '%s' variable in context.\n", |
717 | pf->pvar->var); | 723 | pf->pvar->var); |
718 | /* Search child die for local variables and parameters. */ | 724 | /* Search child die for local variables and parameters. */ |
@@ -783,6 +789,16 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf) | |||
783 | /* This function has no name. */ | 789 | /* This function has no name. */ |
784 | tev->point.offset = (unsigned long)pf->addr; | 790 | tev->point.offset = (unsigned long)pf->addr; |
785 | 791 | ||
792 | /* Return probe must be on the head of a subprogram */ | ||
793 | if (pf->pev->point.retprobe) { | ||
794 | if (tev->point.offset != 0) { | ||
795 | pr_warning("Return probe must be on the head of" | ||
796 | " a real function\n"); | ||
797 | return -EINVAL; | ||
798 | } | ||
799 | tev->point.retprobe = true; | ||
800 | } | ||
801 | |||
786 | pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, | 802 | pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, |
787 | tev->point.offset); | 803 | tev->point.offset); |
788 | 804 | ||
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 1a367734e016..b2f5ae97f33d 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -2268,6 +2268,9 @@ static int setup_list(struct strlist **list, const char *list_str, | |||
2268 | 2268 | ||
2269 | int symbol__init(void) | 2269 | int symbol__init(void) |
2270 | { | 2270 | { |
2271 | if (symbol_conf.initialized) | ||
2272 | return 0; | ||
2273 | |||
2271 | elf_version(EV_CURRENT); | 2274 | elf_version(EV_CURRENT); |
2272 | if (symbol_conf.sort_by_name) | 2275 | if (symbol_conf.sort_by_name) |
2273 | symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - | 2276 | symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - |
@@ -2293,6 +2296,7 @@ int symbol__init(void) | |||
2293 | symbol_conf.sym_list_str, "symbol") < 0) | 2296 | symbol_conf.sym_list_str, "symbol") < 0) |
2294 | goto out_free_comm_list; | 2297 | goto out_free_comm_list; |
2295 | 2298 | ||
2299 | symbol_conf.initialized = true; | ||
2296 | return 0; | 2300 | return 0; |
2297 | 2301 | ||
2298 | out_free_dso_list: | 2302 | out_free_dso_list: |
@@ -2304,11 +2308,14 @@ out_free_comm_list: | |||
2304 | 2308 | ||
2305 | void symbol__exit(void) | 2309 | void symbol__exit(void) |
2306 | { | 2310 | { |
2311 | if (!symbol_conf.initialized) | ||
2312 | return; | ||
2307 | strlist__delete(symbol_conf.sym_list); | 2313 | strlist__delete(symbol_conf.sym_list); |
2308 | strlist__delete(symbol_conf.dso_list); | 2314 | strlist__delete(symbol_conf.dso_list); |
2309 | strlist__delete(symbol_conf.comm_list); | 2315 | strlist__delete(symbol_conf.comm_list); |
2310 | vmlinux_path__exit(); | 2316 | vmlinux_path__exit(); |
2311 | symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; | 2317 | symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; |
2318 | symbol_conf.initialized = false; | ||
2312 | } | 2319 | } |
2313 | 2320 | ||
2314 | int machines__create_kernel_maps(struct rb_root *self, pid_t pid) | 2321 | int machines__create_kernel_maps(struct rb_root *self, pid_t pid) |
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index b7a8da4af5a0..ea95c2756f05 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
@@ -69,7 +69,8 @@ struct symbol_conf { | |||
69 | show_nr_samples, | 69 | show_nr_samples, |
70 | use_callchain, | 70 | use_callchain, |
71 | exclude_other, | 71 | exclude_other, |
72 | show_cpu_utilization; | 72 | show_cpu_utilization, |
73 | initialized; | ||
73 | const char *vmlinux_name, | 74 | const char *vmlinux_name, |
74 | *source_prefix, | 75 | *source_prefix, |
75 | *field_sep; | 76 | *field_sep; |
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c index 7ea983acfaea..f7af2fca965d 100644 --- a/tools/perf/util/trace-event-scripting.c +++ b/tools/perf/util/trace-event-scripting.c | |||
@@ -97,7 +97,7 @@ void setup_python_scripting(void) | |||
97 | register_python_scripting(&python_scripting_unsupported_ops); | 97 | register_python_scripting(&python_scripting_unsupported_ops); |
98 | } | 98 | } |
99 | #else | 99 | #else |
100 | struct scripting_ops python_scripting_ops; | 100 | extern struct scripting_ops python_scripting_ops; |
101 | 101 | ||
102 | void setup_python_scripting(void) | 102 | void setup_python_scripting(void) |
103 | { | 103 | { |
@@ -158,7 +158,7 @@ void setup_perl_scripting(void) | |||
158 | register_perl_scripting(&perl_scripting_unsupported_ops); | 158 | register_perl_scripting(&perl_scripting_unsupported_ops); |
159 | } | 159 | } |
160 | #else | 160 | #else |
161 | struct scripting_ops perl_scripting_ops; | 161 | extern struct scripting_ops perl_scripting_ops; |
162 | 162 | ||
163 | void setup_perl_scripting(void) | 163 | void setup_perl_scripting(void) |
164 | { | 164 | { |
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c index dafdf6775d77..6866aa4c41e0 100644 --- a/tools/perf/util/ui/browsers/hists.c +++ b/tools/perf/util/ui/browsers/hists.c | |||
@@ -773,7 +773,7 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name) | |||
773 | 773 | ||
774 | switch (key) { | 774 | switch (key) { |
775 | case 'a': | 775 | case 'a': |
776 | if (browser->selection->map == NULL && | 776 | if (browser->selection->map == NULL || |
777 | browser->selection->map->dso->annotate_warned) | 777 | browser->selection->map->dso->annotate_warned) |
778 | continue; | 778 | continue; |
779 | goto do_annotate; | 779 | goto do_annotate; |
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 66cf65b510b1..c1f1e3c62984 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c | |||
@@ -218,7 +218,6 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi) | |||
218 | events = file->f_op->poll(file, &irqfd->pt); | 218 | events = file->f_op->poll(file, &irqfd->pt); |
219 | 219 | ||
220 | list_add_tail(&irqfd->list, &kvm->irqfds.items); | 220 | list_add_tail(&irqfd->list, &kvm->irqfds.items); |
221 | spin_unlock_irq(&kvm->irqfds.lock); | ||
222 | 221 | ||
223 | /* | 222 | /* |
224 | * Check if there was an event already pending on the eventfd | 223 | * Check if there was an event already pending on the eventfd |
@@ -227,6 +226,8 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi) | |||
227 | if (events & POLLIN) | 226 | if (events & POLLIN) |
228 | schedule_work(&irqfd->inject); | 227 | schedule_work(&irqfd->inject); |
229 | 228 | ||
229 | spin_unlock_irq(&kvm->irqfds.lock); | ||
230 | |||
230 | /* | 231 | /* |
231 | * do not drop the file until the irqfd is fully initialized, otherwise | 232 | * do not drop the file until the irqfd is fully initialized, otherwise |
232 | * we might race against the POLLHUP | 233 | * we might race against the POLLHUP |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index b78b794c1039..5186e728c53e 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -1958,10 +1958,10 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | |||
1958 | cpu); | 1958 | cpu); |
1959 | hardware_disable(NULL); | 1959 | hardware_disable(NULL); |
1960 | break; | 1960 | break; |
1961 | case CPU_ONLINE: | 1961 | case CPU_STARTING: |
1962 | printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", | 1962 | printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", |
1963 | cpu); | 1963 | cpu); |
1964 | smp_call_function_single(cpu, hardware_enable, NULL, 1); | 1964 | hardware_enable(NULL); |
1965 | break; | 1965 | break; |
1966 | } | 1966 | } |
1967 | return NOTIFY_OK; | 1967 | return NOTIFY_OK; |
@@ -1970,10 +1970,12 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | |||
1970 | 1970 | ||
1971 | asmlinkage void kvm_handle_fault_on_reboot(void) | 1971 | asmlinkage void kvm_handle_fault_on_reboot(void) |
1972 | { | 1972 | { |
1973 | if (kvm_rebooting) | 1973 | if (kvm_rebooting) { |
1974 | /* spin while reset goes on */ | 1974 | /* spin while reset goes on */ |
1975 | local_irq_enable(); | ||
1975 | while (true) | 1976 | while (true) |
1976 | ; | 1977 | ; |
1978 | } | ||
1977 | /* Fault while not rebooting. We want the trace. */ | 1979 | /* Fault while not rebooting. We want the trace. */ |
1978 | BUG(); | 1980 | BUG(); |
1979 | } | 1981 | } |
@@ -2096,7 +2098,6 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, | |||
2096 | 2098 | ||
2097 | static struct notifier_block kvm_cpu_notifier = { | 2099 | static struct notifier_block kvm_cpu_notifier = { |
2098 | .notifier_call = kvm_cpu_hotplug, | 2100 | .notifier_call = kvm_cpu_hotplug, |
2099 | .priority = 20, /* must be > scheduler priority */ | ||
2100 | }; | 2101 | }; |
2101 | 2102 | ||
2102 | static int vm_stat_get(void *_offset, u64 *val) | 2103 | static int vm_stat_get(void *_offset, u64 *val) |