diff options
| author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-09-12 20:02:18 -0400 |
|---|---|---|
| committer | Chris Wilson <chris@chris-wilson.co.uk> | 2010-09-12 20:02:18 -0400 |
| commit | 9277bf4b4f94655eef177d0daffa90a47c51eb62 (patch) | |
| tree | 569b5a13b3f7030a603460d8d9ded035a6f7f48f | |
| parent | 897493504addc5609f04a2c4f73c37ab972c29b2 (diff) | |
| parent | 49553c2ef88749dd502687f4eb9c258bb10a4f44 (diff) | |
Merge remote branch 'linus' into drm-intel-fixes
412 files changed, 4527 insertions, 2149 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl index ecd35e9d4410..feca0758391e 100644 --- a/Documentation/DocBook/device-drivers.tmpl +++ b/Documentation/DocBook/device-drivers.tmpl | |||
| @@ -46,7 +46,6 @@ | |||
| 46 | 46 | ||
| 47 | <sect1><title>Atomic and pointer manipulation</title> | 47 | <sect1><title>Atomic and pointer manipulation</title> |
| 48 | !Iarch/x86/include/asm/atomic.h | 48 | !Iarch/x86/include/asm/atomic.h |
| 49 | !Iarch/x86/include/asm/unaligned.h | ||
| 50 | </sect1> | 49 | </sect1> |
| 51 | 50 | ||
| 52 | <sect1><title>Delaying, scheduling, and timer routines</title> | 51 | <sect1><title>Delaying, scheduling, and timer routines</title> |
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl index a20c6f6fffc3..6899f471fb15 100644 --- a/Documentation/DocBook/kernel-api.tmpl +++ b/Documentation/DocBook/kernel-api.tmpl | |||
| @@ -57,7 +57,6 @@ | |||
| 57 | </para> | 57 | </para> |
| 58 | 58 | ||
| 59 | <sect1><title>String Conversions</title> | 59 | <sect1><title>String Conversions</title> |
| 60 | !Ilib/vsprintf.c | ||
| 61 | !Elib/vsprintf.c | 60 | !Elib/vsprintf.c |
| 62 | </sect1> | 61 | </sect1> |
| 63 | <sect1><title>String Manipulation</title> | 62 | <sect1><title>String Manipulation</title> |
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl index 0b1a3f97f285..a0d479d1e1dd 100644 --- a/Documentation/DocBook/kernel-locking.tmpl +++ b/Documentation/DocBook/kernel-locking.tmpl | |||
| @@ -1961,6 +1961,12 @@ machines due to caching. | |||
| 1961 | </sect1> | 1961 | </sect1> |
| 1962 | </chapter> | 1962 | </chapter> |
| 1963 | 1963 | ||
| 1964 | <chapter id="apiref"> | ||
| 1965 | <title>Mutex API reference</title> | ||
| 1966 | !Iinclude/linux/mutex.h | ||
| 1967 | !Ekernel/mutex.c | ||
| 1968 | </chapter> | ||
| 1969 | |||
| 1964 | <chapter id="references"> | 1970 | <chapter id="references"> |
| 1965 | <title>Further reading</title> | 1971 | <title>Further reading</title> |
| 1966 | 1972 | ||
diff --git a/Documentation/DocBook/tracepoint.tmpl b/Documentation/DocBook/tracepoint.tmpl index e8473eae2a20..b57a9ede3224 100644 --- a/Documentation/DocBook/tracepoint.tmpl +++ b/Documentation/DocBook/tracepoint.tmpl | |||
| @@ -104,4 +104,9 @@ | |||
| 104 | <title>Block IO</title> | 104 | <title>Block IO</title> |
| 105 | !Iinclude/trace/events/block.h | 105 | !Iinclude/trace/events/block.h |
| 106 | </chapter> | 106 | </chapter> |
| 107 | |||
| 108 | <chapter id="workqueue"> | ||
| 109 | <title>Workqueue</title> | ||
| 110 | !Iinclude/trace/events/workqueue.h | ||
| 111 | </chapter> | ||
| 107 | </book> | 112 | </book> |
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt new file mode 100644 index 000000000000..e578feed6d81 --- /dev/null +++ b/Documentation/block/cfq-iosched.txt | |||
| @@ -0,0 +1,45 @@ | |||
| 1 | CFQ ioscheduler tunables | ||
| 2 | ======================== | ||
| 3 | |||
| 4 | slice_idle | ||
| 5 | ---------- | ||
| 6 | This specifies how long CFQ should idle for next request on certain cfq queues | ||
| 7 | (for sequential workloads) and service trees (for random workloads) before | ||
| 8 | queue is expired and CFQ selects next queue to dispatch from. | ||
| 9 | |||
| 10 | By default slice_idle is a non-zero value. That means by default we idle on | ||
| 11 | queues/service trees. This can be very helpful on highly seeky media like | ||
| 12 | single spindle SATA/SAS disks where we can cut down on overall number of | ||
| 13 | seeks and see improved throughput. | ||
| 14 | |||
| 15 | Setting slice_idle to 0 will remove all the idling on queues/service tree | ||
| 16 | level and one should see an overall improved throughput on faster storage | ||
| 17 | devices like multiple SATA/SAS disks in hardware RAID configuration. The down | ||
| 18 | side is that isolation provided from WRITES also goes down and notion of | ||
| 19 | IO priority becomes weaker. | ||
| 20 | |||
| 21 | So depending on storage and workload, it might be useful to set slice_idle=0. | ||
| 22 | In general I think for SATA/SAS disks and software RAID of SATA/SAS disks | ||
| 23 | keeping slice_idle enabled should be useful. For any configurations where | ||
| 24 | there are multiple spindles behind single LUN (Host based hardware RAID | ||
| 25 | controller or for storage arrays), setting slice_idle=0 might end up in better | ||
| 26 | throughput and acceptable latencies. | ||
| 27 | |||
| 28 | CFQ IOPS Mode for group scheduling | ||
| 29 | =================================== | ||
| 30 | Basic CFQ design is to provide priority based time slices. Higher priority | ||
| 31 | process gets bigger time slice and lower priority process gets smaller time | ||
| 32 | slice. Measuring time becomes harder if storage is fast and supports NCQ and | ||
| 33 | it would be better to dispatch multiple requests from multiple cfq queues in | ||
| 34 | request queue at a time. In such scenario, it is not possible to measure time | ||
| 35 | consumed by single queue accurately. | ||
| 36 | |||
| 37 | What is possible though is to measure number of requests dispatched from a | ||
| 38 | single queue and also allow dispatch from multiple cfq queue at the same time. | ||
| 39 | This effectively becomes the fairness in terms of IOPS (IO operations per | ||
| 40 | second). | ||
| 41 | |||
| 42 | If one sets slice_idle=0 and if storage supports NCQ, CFQ internally switches | ||
| 43 | to IOPS mode and starts providing fairness in terms of number of requests | ||
| 44 | dispatched. Note that this mode switching takes effect only for group | ||
| 45 | scheduling. For non-cgroup users nothing should change. | ||
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt index 48e0b21b0059..6919d62591d9 100644 --- a/Documentation/cgroups/blkio-controller.txt +++ b/Documentation/cgroups/blkio-controller.txt | |||
| @@ -217,6 +217,7 @@ Details of cgroup files | |||
| 217 | CFQ sysfs tunable | 217 | CFQ sysfs tunable |
| 218 | ================= | 218 | ================= |
| 219 | /sys/block/<disk>/queue/iosched/group_isolation | 219 | /sys/block/<disk>/queue/iosched/group_isolation |
| 220 | ----------------------------------------------- | ||
| 220 | 221 | ||
| 221 | If group_isolation=1, it provides stronger isolation between groups at the | 222 | If group_isolation=1, it provides stronger isolation between groups at the |
| 222 | expense of throughput. By default group_isolation is 0. In general that | 223 | expense of throughput. By default group_isolation is 0. In general that |
| @@ -243,6 +244,33 @@ By default one should run with group_isolation=0. If that is not sufficient | |||
| 243 | and one wants stronger isolation between groups, then set group_isolation=1 | 244 | and one wants stronger isolation between groups, then set group_isolation=1 |
| 244 | but this will come at cost of reduced throughput. | 245 | but this will come at cost of reduced throughput. |
| 245 | 246 | ||
| 247 | /sys/block/<disk>/queue/iosched/slice_idle | ||
| 248 | ------------------------------------------ | ||
| 249 | On a faster hardware CFQ can be slow, especially with sequential workload. | ||
| 250 | This happens because CFQ idles on a single queue and single queue might not | ||
| 251 | drive deeper request queue depths to keep the storage busy. In such scenarios | ||
| 252 | one can try setting slice_idle=0 and that would switch CFQ to IOPS | ||
| 253 | (IO operations per second) mode on NCQ supporting hardware. | ||
| 254 | |||
| 255 | That means CFQ will not idle between cfq queues of a cfq group and hence be | ||
| 256 | able to driver higher queue depth and achieve better throughput. That also | ||
| 257 | means that cfq provides fairness among groups in terms of IOPS and not in | ||
| 258 | terms of disk time. | ||
| 259 | |||
| 260 | /sys/block/<disk>/queue/iosched/group_idle | ||
| 261 | ------------------------------------------ | ||
| 262 | If one disables idling on individual cfq queues and cfq service trees by | ||
| 263 | setting slice_idle=0, group_idle kicks in. That means CFQ will still idle | ||
| 264 | on the group in an attempt to provide fairness among groups. | ||
| 265 | |||
| 266 | By default group_idle is same as slice_idle and does not do anything if | ||
| 267 | slice_idle is enabled. | ||
| 268 | |||
| 269 | One can experience an overall throughput drop if you have created multiple | ||
| 270 | groups and put applications in that group which are not driving enough | ||
| 271 | IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle | ||
| 272 | on individual groups and throughput should improve. | ||
| 273 | |||
| 246 | What works | 274 | What works |
| 247 | ========== | 275 | ========== |
| 248 | - Currently only sync IO queues are support. All the buffered writes are | 276 | - Currently only sync IO queues are support. All the buffered writes are |
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt index d96a6dba5748..9633da01ff46 100644 --- a/Documentation/gpio.txt +++ b/Documentation/gpio.txt | |||
| @@ -109,17 +109,19 @@ use numbers 2000-2063 to identify GPIOs in a bank of I2C GPIO expanders. | |||
| 109 | 109 | ||
| 110 | If you want to initialize a structure with an invalid GPIO number, use | 110 | If you want to initialize a structure with an invalid GPIO number, use |
| 111 | some negative number (perhaps "-EINVAL"); that will never be valid. To | 111 | some negative number (perhaps "-EINVAL"); that will never be valid. To |
| 112 | test if a number could reference a GPIO, you may use this predicate: | 112 | test if such number from such a structure could reference a GPIO, you |
| 113 | may use this predicate: | ||
| 113 | 114 | ||
| 114 | int gpio_is_valid(int number); | 115 | int gpio_is_valid(int number); |
| 115 | 116 | ||
| 116 | A number that's not valid will be rejected by calls which may request | 117 | A number that's not valid will be rejected by calls which may request |
| 117 | or free GPIOs (see below). Other numbers may also be rejected; for | 118 | or free GPIOs (see below). Other numbers may also be rejected; for |
| 118 | example, a number might be valid but unused on a given board. | 119 | example, a number might be valid but temporarily unused on a given board. |
| 119 | |||
| 120 | Whether a platform supports multiple GPIO controllers is currently a | ||
| 121 | platform-specific implementation issue. | ||
| 122 | 120 | ||
| 121 | Whether a platform supports multiple GPIO controllers is a platform-specific | ||
| 122 | implementation issue, as are whether that support can leave "holes" in the space | ||
| 123 | of GPIO numbers, and whether new controllers can be added at runtime. Such issues | ||
| 124 | can affect things including whether adjacent GPIO numbers are both valid. | ||
| 123 | 125 | ||
| 124 | Using GPIOs | 126 | Using GPIOs |
| 125 | ----------- | 127 | ----------- |
| @@ -480,12 +482,16 @@ To support this framework, a platform's Kconfig will "select" either | |||
| 480 | ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB | 482 | ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB |
| 481 | and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines | 483 | and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines |
| 482 | three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep(). | 484 | three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep(). |
| 483 | They may also want to provide a custom value for ARCH_NR_GPIOS. | ||
| 484 | 485 | ||
| 485 | ARCH_REQUIRE_GPIOLIB means that the gpio-lib code will always get compiled | 486 | It may also provide a custom value for ARCH_NR_GPIOS, so that it better |
| 487 | reflects the number of GPIOs in actual use on that platform, without | ||
| 488 | wasting static table space. (It should count both built-in/SoC GPIOs and | ||
| 489 | also ones on GPIO expanders. | ||
| 490 | |||
| 491 | ARCH_REQUIRE_GPIOLIB means that the gpiolib code will always get compiled | ||
| 486 | into the kernel on that architecture. | 492 | into the kernel on that architecture. |
| 487 | 493 | ||
| 488 | ARCH_WANT_OPTIONAL_GPIOLIB means the gpio-lib code defaults to off and the user | 494 | ARCH_WANT_OPTIONAL_GPIOLIB means the gpiolib code defaults to off and the user |
| 489 | can enable it and build it into the kernel optionally. | 495 | can enable it and build it into the kernel optionally. |
| 490 | 496 | ||
| 491 | If neither of these options are selected, the platform does not support | 497 | If neither of these options are selected, the platform does not support |
diff --git a/Documentation/kernel-doc-nano-HOWTO.txt b/Documentation/kernel-doc-nano-HOWTO.txt index 27a52b35d55b..3d8a97747f77 100644 --- a/Documentation/kernel-doc-nano-HOWTO.txt +++ b/Documentation/kernel-doc-nano-HOWTO.txt | |||
| @@ -345,5 +345,10 @@ documentation, in <filename>, for the functions listed. | |||
| 345 | section titled <section title> from <filename>. | 345 | section titled <section title> from <filename>. |
| 346 | Spaces are allowed in <section title>; do not quote the <section title>. | 346 | Spaces are allowed in <section title>; do not quote the <section title>. |
| 347 | 347 | ||
| 348 | !C<filename> is replaced by nothing, but makes the tools check that | ||
| 349 | all DOC: sections and documented functions, symbols, etc. are used. | ||
| 350 | This makes sense to use when you use !F/!P only and want to verify | ||
| 351 | that all documentation is included. | ||
| 352 | |||
| 348 | Tim. | 353 | Tim. |
| 349 | */ <twaugh@redhat.com> | 354 | */ <twaugh@redhat.com> |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index f084af0cb8e0..8dd7248508a9 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
| @@ -1974,15 +1974,18 @@ and is between 256 and 4096 characters. It is defined in the file | |||
| 1974 | force Enable ASPM even on devices that claim not to support it. | 1974 | force Enable ASPM even on devices that claim not to support it. |
| 1975 | WARNING: Forcing ASPM on may cause system lockups. | 1975 | WARNING: Forcing ASPM on may cause system lockups. |
| 1976 | 1976 | ||
| 1977 | pcie_ports= [PCIE] PCIe ports handling: | ||
| 1978 | auto Ask the BIOS whether or not to use native PCIe services | ||
| 1979 | associated with PCIe ports (PME, hot-plug, AER). Use | ||
| 1980 | them only if that is allowed by the BIOS. | ||
| 1981 | native Use native PCIe services associated with PCIe ports | ||
| 1982 | unconditionally. | ||
| 1983 | compat Treat PCIe ports as PCI-to-PCI bridges, disable the PCIe | ||
| 1984 | ports driver. | ||
| 1985 | |||
| 1977 | pcie_pme= [PCIE,PM] Native PCIe PME signaling options: | 1986 | pcie_pme= [PCIE,PM] Native PCIe PME signaling options: |
| 1978 | Format: {auto|force}[,nomsi] | ||
| 1979 | auto Use native PCIe PME signaling if the BIOS allows the | ||
| 1980 | kernel to control PCIe config registers of root ports. | ||
| 1981 | force Use native PCIe PME signaling even if the BIOS refuses | ||
| 1982 | to allow the kernel to control the relevant PCIe config | ||
| 1983 | registers. | ||
| 1984 | nomsi Do not use MSI for native PCIe PME signaling (this makes | 1987 | nomsi Do not use MSI for native PCIe PME signaling (this makes |
| 1985 | all PCIe root ports use INTx for everything). | 1988 | all PCIe root ports use INTx for all services). |
| 1986 | 1989 | ||
| 1987 | pcmv= [HW,PCMCIA] BadgePAD 4 | 1990 | pcmv= [HW,PCMCIA] BadgePAD 4 |
| 1988 | 1991 | ||
diff --git a/Documentation/mutex-design.txt b/Documentation/mutex-design.txt index c91ccc0720fa..38c10fd7f411 100644 --- a/Documentation/mutex-design.txt +++ b/Documentation/mutex-design.txt | |||
| @@ -9,7 +9,7 @@ firstly, there's nothing wrong with semaphores. But if the simpler | |||
| 9 | mutex semantics are sufficient for your code, then there are a couple | 9 | mutex semantics are sufficient for your code, then there are a couple |
| 10 | of advantages of mutexes: | 10 | of advantages of mutexes: |
| 11 | 11 | ||
| 12 | - 'struct mutex' is smaller on most architectures: .e.g on x86, | 12 | - 'struct mutex' is smaller on most architectures: E.g. on x86, |
| 13 | 'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes. | 13 | 'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes. |
| 14 | A smaller structure size means less RAM footprint, and better | 14 | A smaller structure size means less RAM footprint, and better |
| 15 | CPU-cache utilization. | 15 | CPU-cache utilization. |
| @@ -136,3 +136,4 @@ the APIs of 'struct mutex' have been streamlined: | |||
| 136 | void mutex_lock_nested(struct mutex *lock, unsigned int subclass); | 136 | void mutex_lock_nested(struct mutex *lock, unsigned int subclass); |
| 137 | int mutex_lock_interruptible_nested(struct mutex *lock, | 137 | int mutex_lock_interruptible_nested(struct mutex *lock, |
| 138 | unsigned int subclass); | 138 | unsigned int subclass); |
| 139 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); | ||
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index ce46fa1e643e..37c6aad5e590 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt | |||
| @@ -296,6 +296,7 @@ Conexant 5051 | |||
| 296 | Conexant 5066 | 296 | Conexant 5066 |
| 297 | ============= | 297 | ============= |
| 298 | laptop Basic Laptop config (default) | 298 | laptop Basic Laptop config (default) |
| 299 | hp-laptop HP laptops, e g G60 | ||
| 299 | dell-laptop Dell laptops | 300 | dell-laptop Dell laptops |
| 300 | dell-vostro Dell Vostro | 301 | dell-vostro Dell Vostro |
| 301 | olpc-xo-1_5 OLPC XO 1.5 | 302 | olpc-xo-1_5 OLPC XO 1.5 |
diff --git a/MAINTAINERS b/MAINTAINERS index c36f5d76e1a2..e7c528ff1013 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -1445,6 +1445,16 @@ S: Maintained | |||
| 1445 | F: Documentation/video4linux/cafe_ccic | 1445 | F: Documentation/video4linux/cafe_ccic |
| 1446 | F: drivers/media/video/cafe_ccic* | 1446 | F: drivers/media/video/cafe_ccic* |
| 1447 | 1447 | ||
| 1448 | CAIF NETWORK LAYER | ||
| 1449 | M: Sjur Braendeland <sjur.brandeland@stericsson.com> | ||
| 1450 | L: netdev@vger.kernel.org | ||
| 1451 | S: Supported | ||
| 1452 | F: Documentation/networking/caif/ | ||
| 1453 | F: drivers/net/caif/ | ||
| 1454 | F: include/linux/caif/ | ||
| 1455 | F: include/net/caif/ | ||
| 1456 | F: net/caif/ | ||
| 1457 | |||
| 1448 | CALGARY x86-64 IOMMU | 1458 | CALGARY x86-64 IOMMU |
| 1449 | M: Muli Ben-Yehuda <muli@il.ibm.com> | 1459 | M: Muli Ben-Yehuda <muli@il.ibm.com> |
| 1450 | M: "Jon D. Mason" <jdmason@kudzu.us> | 1460 | M: "Jon D. Mason" <jdmason@kudzu.us> |
| @@ -2201,6 +2211,12 @@ L: linux-rdma@vger.kernel.org | |||
| 2201 | S: Supported | 2211 | S: Supported |
| 2202 | F: drivers/infiniband/hw/ehca/ | 2212 | F: drivers/infiniband/hw/ehca/ |
| 2203 | 2213 | ||
| 2214 | EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER | ||
| 2215 | M: Breno Leitao <leitao@linux.vnet.ibm.com> | ||
| 2216 | L: netdev@vger.kernel.org | ||
| 2217 | S: Maintained | ||
| 2218 | F: drivers/net/ehea/ | ||
| 2219 | |||
| 2204 | EMBEDDED LINUX | 2220 | EMBEDDED LINUX |
| 2205 | M: Paul Gortmaker <paul.gortmaker@windriver.com> | 2221 | M: Paul Gortmaker <paul.gortmaker@windriver.com> |
| 2206 | M: Matt Mackall <mpm@selenic.com> | 2222 | M: Matt Mackall <mpm@selenic.com> |
| @@ -2781,11 +2797,6 @@ S: Maintained | |||
| 2781 | F: arch/x86/kernel/hpet.c | 2797 | F: arch/x86/kernel/hpet.c |
| 2782 | F: arch/x86/include/asm/hpet.h | 2798 | F: arch/x86/include/asm/hpet.h |
| 2783 | 2799 | ||
| 2784 | HPET: ACPI | ||
| 2785 | M: Bob Picco <bob.picco@hp.com> | ||
| 2786 | S: Maintained | ||
| 2787 | F: drivers/char/hpet.c | ||
| 2788 | |||
| 2789 | HPFS FILESYSTEM | 2800 | HPFS FILESYSTEM |
| 2790 | M: Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz> | 2801 | M: Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz> |
| 2791 | W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi | 2802 | W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi |
| @@ -3398,7 +3409,7 @@ F: drivers/s390/kvm/ | |||
| 3398 | 3409 | ||
| 3399 | KEXEC | 3410 | KEXEC |
| 3400 | M: Eric Biederman <ebiederm@xmission.com> | 3411 | M: Eric Biederman <ebiederm@xmission.com> |
| 3401 | W: http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/ | 3412 | W: http://kernel.org/pub/linux/utils/kernel/kexec/ |
| 3402 | L: kexec@lists.infradead.org | 3413 | L: kexec@lists.infradead.org |
| 3403 | S: Maintained | 3414 | S: Maintained |
| 3404 | F: include/linux/kexec.h | 3415 | F: include/linux/kexec.h |
| @@ -3923,8 +3934,7 @@ F: Documentation/sound/oss/MultiSound | |||
| 3923 | F: sound/oss/msnd* | 3934 | F: sound/oss/msnd* |
| 3924 | 3935 | ||
| 3925 | MULTITECH MULTIPORT CARD (ISICOM) | 3936 | MULTITECH MULTIPORT CARD (ISICOM) |
| 3926 | M: Jiri Slaby <jirislaby@gmail.com> | 3937 | S: Orphan |
| 3927 | S: Maintained | ||
| 3928 | F: drivers/char/isicom.c | 3938 | F: drivers/char/isicom.c |
| 3929 | F: include/linux/isicom.h | 3939 | F: include/linux/isicom.h |
| 3930 | 3940 | ||
| @@ -4604,7 +4614,7 @@ F: include/linux/preempt.h | |||
| 4604 | PRISM54 WIRELESS DRIVER | 4614 | PRISM54 WIRELESS DRIVER |
| 4605 | M: "Luis R. Rodriguez" <mcgrof@gmail.com> | 4615 | M: "Luis R. Rodriguez" <mcgrof@gmail.com> |
| 4606 | L: linux-wireless@vger.kernel.org | 4616 | L: linux-wireless@vger.kernel.org |
| 4607 | W: http://prism54.org | 4617 | W: http://wireless.kernel.org/en/users/Drivers/p54 |
| 4608 | S: Obsolete | 4618 | S: Obsolete |
| 4609 | F: drivers/net/wireless/prism54/ | 4619 | F: drivers/net/wireless/prism54/ |
| 4610 | 4620 | ||
| @@ -4805,6 +4815,7 @@ RCUTORTURE MODULE | |||
| 4805 | M: Josh Triplett <josh@freedesktop.org> | 4815 | M: Josh Triplett <josh@freedesktop.org> |
| 4806 | M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> | 4816 | M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> |
| 4807 | S: Supported | 4817 | S: Supported |
| 4818 | T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git | ||
| 4808 | F: Documentation/RCU/torture.txt | 4819 | F: Documentation/RCU/torture.txt |
| 4809 | F: kernel/rcutorture.c | 4820 | F: kernel/rcutorture.c |
| 4810 | 4821 | ||
| @@ -4829,6 +4840,7 @@ M: Dipankar Sarma <dipankar@in.ibm.com> | |||
| 4829 | M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> | 4840 | M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> |
| 4830 | W: http://www.rdrop.com/users/paulmck/rclock/ | 4841 | W: http://www.rdrop.com/users/paulmck/rclock/ |
| 4831 | S: Supported | 4842 | S: Supported |
| 4843 | T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git | ||
| 4832 | F: Documentation/RCU/ | 4844 | F: Documentation/RCU/ |
| 4833 | F: include/linux/rcu* | 4845 | F: include/linux/rcu* |
| 4834 | F: include/linux/srcu* | 4846 | F: include/linux/srcu* |
| @@ -4836,12 +4848,10 @@ F: kernel/rcu* | |||
| 4836 | F: kernel/srcu* | 4848 | F: kernel/srcu* |
| 4837 | X: kernel/rcutorture.c | 4849 | X: kernel/rcutorture.c |
| 4838 | 4850 | ||
| 4839 | REAL TIME CLOCK DRIVER | 4851 | REAL TIME CLOCK DRIVER (LEGACY) |
| 4840 | M: Paul Gortmaker <p_gortmaker@yahoo.com> | 4852 | M: Paul Gortmaker <p_gortmaker@yahoo.com> |
| 4841 | S: Maintained | 4853 | S: Maintained |
| 4842 | F: Documentation/rtc.txt | 4854 | F: drivers/char/rtc.c |
| 4843 | F: drivers/rtc/ | ||
| 4844 | F: include/linux/rtc.h | ||
| 4845 | 4855 | ||
| 4846 | REAL TIME CLOCK (RTC) SUBSYSTEM | 4856 | REAL TIME CLOCK (RTC) SUBSYSTEM |
| 4847 | M: Alessandro Zummo <a.zummo@towertech.it> | 4857 | M: Alessandro Zummo <a.zummo@towertech.it> |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 2 | 1 | VERSION = 2 |
| 2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
| 3 | SUBLEVEL = 36 | 3 | SUBLEVEL = 36 |
| 4 | EXTRAVERSION = -rc3 | 4 | EXTRAVERSION = -rc4 |
| 5 | NAME = Sheep on Meth | 5 | NAME = Sheep on Meth |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h index f199e69a5d0b..ad368a93a46a 100644 --- a/arch/alpha/include/asm/cache.h +++ b/arch/alpha/include/asm/cache.h | |||
| @@ -17,7 +17,6 @@ | |||
| 17 | # define L1_CACHE_SHIFT 5 | 17 | # define L1_CACHE_SHIFT 5 |
| 18 | #endif | 18 | #endif |
| 19 | 19 | ||
| 20 | #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) | ||
| 21 | #define SMP_CACHE_BYTES L1_CACHE_BYTES | 20 | #define SMP_CACHE_BYTES L1_CACHE_BYTES |
| 22 | 21 | ||
| 23 | #endif | 22 | #endif |
diff --git a/arch/alpha/kernel/err_marvel.c b/arch/alpha/kernel/err_marvel.c index 52a79dfc13c6..5c905aaaeccd 100644 --- a/arch/alpha/kernel/err_marvel.c +++ b/arch/alpha/kernel/err_marvel.c | |||
| @@ -109,7 +109,7 @@ marvel_print_err_cyc(u64 err_cyc) | |||
| 109 | #define IO7__ERR_CYC__CYCLE__M (0x7) | 109 | #define IO7__ERR_CYC__CYCLE__M (0x7) |
| 110 | 110 | ||
| 111 | printk("%s Packet In Error: %s\n" | 111 | printk("%s Packet In Error: %s\n" |
| 112 | "%s Error in %s, cycle %ld%s%s\n", | 112 | "%s Error in %s, cycle %lld%s%s\n", |
| 113 | err_print_prefix, | 113 | err_print_prefix, |
| 114 | packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)], | 114 | packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)], |
| 115 | err_print_prefix, | 115 | err_print_prefix, |
| @@ -313,7 +313,7 @@ marvel_print_po7_ugbge_sym(u64 ugbge_sym) | |||
| 313 | } | 313 | } |
| 314 | 314 | ||
| 315 | printk("%s Up Hose Garbage Symptom:\n" | 315 | printk("%s Up Hose Garbage Symptom:\n" |
| 316 | "%s Source Port: %ld - Dest PID: %ld - OpCode: %s\n", | 316 | "%s Source Port: %lld - Dest PID: %lld - OpCode: %s\n", |
| 317 | err_print_prefix, | 317 | err_print_prefix, |
| 318 | err_print_prefix, | 318 | err_print_prefix, |
| 319 | EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT), | 319 | EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT), |
| @@ -552,7 +552,7 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt) | |||
| 552 | #define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M (0xfff) | 552 | #define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M (0xfff) |
| 553 | 553 | ||
| 554 | printk("%s Split Completion Error:\n" | 554 | printk("%s Split Completion Error:\n" |
| 555 | "%s Source (Bus:Dev:Func): %ld:%ld:%ld\n", | 555 | "%s Source (Bus:Dev:Func): %lld:%lld:%lld\n", |
| 556 | err_print_prefix, | 556 | err_print_prefix, |
| 557 | err_print_prefix, | 557 | err_print_prefix, |
| 558 | EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS), | 558 | EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS), |
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 51c39fa41693..85d8e4f58c83 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c | |||
| @@ -241,20 +241,20 @@ static inline unsigned long alpha_read_pmc(int idx) | |||
| 241 | static int alpha_perf_event_set_period(struct perf_event *event, | 241 | static int alpha_perf_event_set_period(struct perf_event *event, |
| 242 | struct hw_perf_event *hwc, int idx) | 242 | struct hw_perf_event *hwc, int idx) |
| 243 | { | 243 | { |
| 244 | long left = atomic64_read(&hwc->period_left); | 244 | long left = local64_read(&hwc->period_left); |
| 245 | long period = hwc->sample_period; | 245 | long period = hwc->sample_period; |
| 246 | int ret = 0; | 246 | int ret = 0; |
| 247 | 247 | ||
| 248 | if (unlikely(left <= -period)) { | 248 | if (unlikely(left <= -period)) { |
| 249 | left = period; | 249 | left = period; |
| 250 | atomic64_set(&hwc->period_left, left); | 250 | local64_set(&hwc->period_left, left); |
| 251 | hwc->last_period = period; | 251 | hwc->last_period = period; |
| 252 | ret = 1; | 252 | ret = 1; |
| 253 | } | 253 | } |
| 254 | 254 | ||
| 255 | if (unlikely(left <= 0)) { | 255 | if (unlikely(left <= 0)) { |
| 256 | left += period; | 256 | left += period; |
| 257 | atomic64_set(&hwc->period_left, left); | 257 | local64_set(&hwc->period_left, left); |
| 258 | hwc->last_period = period; | 258 | hwc->last_period = period; |
| 259 | ret = 1; | 259 | ret = 1; |
| 260 | } | 260 | } |
| @@ -269,7 +269,7 @@ static int alpha_perf_event_set_period(struct perf_event *event, | |||
| 269 | if (left > (long)alpha_pmu->pmc_max_period[idx]) | 269 | if (left > (long)alpha_pmu->pmc_max_period[idx]) |
| 270 | left = alpha_pmu->pmc_max_period[idx]; | 270 | left = alpha_pmu->pmc_max_period[idx]; |
| 271 | 271 | ||
| 272 | atomic64_set(&hwc->prev_count, (unsigned long)(-left)); | 272 | local64_set(&hwc->prev_count, (unsigned long)(-left)); |
| 273 | 273 | ||
| 274 | alpha_write_pmc(idx, (unsigned long)(-left)); | 274 | alpha_write_pmc(idx, (unsigned long)(-left)); |
| 275 | 275 | ||
| @@ -300,10 +300,10 @@ static unsigned long alpha_perf_event_update(struct perf_event *event, | |||
| 300 | long delta; | 300 | long delta; |
| 301 | 301 | ||
| 302 | again: | 302 | again: |
| 303 | prev_raw_count = atomic64_read(&hwc->prev_count); | 303 | prev_raw_count = local64_read(&hwc->prev_count); |
| 304 | new_raw_count = alpha_read_pmc(idx); | 304 | new_raw_count = alpha_read_pmc(idx); |
| 305 | 305 | ||
| 306 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | 306 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
| 307 | new_raw_count) != prev_raw_count) | 307 | new_raw_count) != prev_raw_count) |
| 308 | goto again; | 308 | goto again; |
| 309 | 309 | ||
| @@ -316,8 +316,8 @@ again: | |||
| 316 | delta += alpha_pmu->pmc_max_period[idx] + 1; | 316 | delta += alpha_pmu->pmc_max_period[idx] + 1; |
| 317 | } | 317 | } |
| 318 | 318 | ||
| 319 | atomic64_add(delta, &event->count); | 319 | local64_add(delta, &event->count); |
| 320 | atomic64_sub(delta, &hwc->period_left); | 320 | local64_sub(delta, &hwc->period_left); |
| 321 | 321 | ||
| 322 | return new_raw_count; | 322 | return new_raw_count; |
| 323 | } | 323 | } |
| @@ -636,7 +636,7 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
| 636 | if (!hwc->sample_period) { | 636 | if (!hwc->sample_period) { |
| 637 | hwc->sample_period = alpha_pmu->pmc_max_period[0]; | 637 | hwc->sample_period = alpha_pmu->pmc_max_period[0]; |
| 638 | hwc->last_period = hwc->sample_period; | 638 | hwc->last_period = hwc->sample_period; |
| 639 | atomic64_set(&hwc->period_left, hwc->sample_period); | 639 | local64_set(&hwc->period_left, hwc->sample_period); |
| 640 | } | 640 | } |
| 641 | 641 | ||
| 642 | return 0; | 642 | return 0; |
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h index 3d2627ec9860..d3e52d3fd592 100644 --- a/arch/alpha/kernel/proto.h +++ b/arch/alpha/kernel/proto.h | |||
| @@ -156,9 +156,6 @@ extern void SMC669_Init(int); | |||
| 156 | /* es1888.c */ | 156 | /* es1888.c */ |
| 157 | extern void es1888_init(void); | 157 | extern void es1888_init(void); |
| 158 | 158 | ||
| 159 | /* ns87312.c */ | ||
| 160 | extern void ns87312_enable_ide(long ide_base); | ||
| 161 | |||
| 162 | /* ../lib/fpreg.c */ | 159 | /* ../lib/fpreg.c */ |
| 163 | extern void alpha_write_fp_reg (unsigned long reg, unsigned long val); | 160 | extern void alpha_write_fp_reg (unsigned long reg, unsigned long val); |
| 164 | extern unsigned long alpha_read_fp_reg (unsigned long reg); | 161 | extern unsigned long alpha_read_fp_reg (unsigned long reg); |
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c index affd0f3f25df..14c8898d19ec 100644 --- a/arch/alpha/kernel/sys_cabriolet.c +++ b/arch/alpha/kernel/sys_cabriolet.c | |||
| @@ -33,7 +33,7 @@ | |||
| 33 | #include "irq_impl.h" | 33 | #include "irq_impl.h" |
| 34 | #include "pci_impl.h" | 34 | #include "pci_impl.h" |
| 35 | #include "machvec_impl.h" | 35 | #include "machvec_impl.h" |
| 36 | 36 | #include "pc873xx.h" | |
| 37 | 37 | ||
| 38 | /* Note mask bit is true for DISABLED irqs. */ | 38 | /* Note mask bit is true for DISABLED irqs. */ |
| 39 | static unsigned long cached_irq_mask = ~0UL; | 39 | static unsigned long cached_irq_mask = ~0UL; |
| @@ -236,17 +236,30 @@ cabriolet_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | |||
| 236 | } | 236 | } |
| 237 | 237 | ||
| 238 | static inline void __init | 238 | static inline void __init |
| 239 | cabriolet_enable_ide(void) | ||
| 240 | { | ||
| 241 | if (pc873xx_probe() == -1) { | ||
| 242 | printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n"); | ||
| 243 | } else { | ||
| 244 | printk(KERN_INFO "Found %s Super IO chip at 0x%x\n", | ||
| 245 | pc873xx_get_model(), pc873xx_get_base()); | ||
| 246 | |||
| 247 | pc873xx_enable_ide(); | ||
| 248 | } | ||
| 249 | } | ||
| 250 | |||
| 251 | static inline void __init | ||
| 239 | cabriolet_init_pci(void) | 252 | cabriolet_init_pci(void) |
| 240 | { | 253 | { |
| 241 | common_init_pci(); | 254 | common_init_pci(); |
| 242 | ns87312_enable_ide(0x398); | 255 | cabriolet_enable_ide(); |
| 243 | } | 256 | } |
| 244 | 257 | ||
| 245 | static inline void __init | 258 | static inline void __init |
| 246 | cia_cab_init_pci(void) | 259 | cia_cab_init_pci(void) |
| 247 | { | 260 | { |
| 248 | cia_init_pci(); | 261 | cia_init_pci(); |
| 249 | ns87312_enable_ide(0x398); | 262 | cabriolet_enable_ide(); |
| 250 | } | 263 | } |
| 251 | 264 | ||
| 252 | /* | 265 | /* |
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c index 230464885b5c..4da596b6adbb 100644 --- a/arch/alpha/kernel/sys_takara.c +++ b/arch/alpha/kernel/sys_takara.c | |||
| @@ -29,7 +29,7 @@ | |||
| 29 | #include "irq_impl.h" | 29 | #include "irq_impl.h" |
| 30 | #include "pci_impl.h" | 30 | #include "pci_impl.h" |
| 31 | #include "machvec_impl.h" | 31 | #include "machvec_impl.h" |
| 32 | 32 | #include "pc873xx.h" | |
| 33 | 33 | ||
| 34 | /* Note mask bit is true for DISABLED irqs. */ | 34 | /* Note mask bit is true for DISABLED irqs. */ |
| 35 | static unsigned long cached_irq_mask[2] = { -1, -1 }; | 35 | static unsigned long cached_irq_mask[2] = { -1, -1 }; |
| @@ -264,7 +264,14 @@ takara_init_pci(void) | |||
| 264 | alpha_mv.pci_map_irq = takara_map_irq_srm; | 264 | alpha_mv.pci_map_irq = takara_map_irq_srm; |
| 265 | 265 | ||
| 266 | cia_init_pci(); | 266 | cia_init_pci(); |
| 267 | ns87312_enable_ide(0x26e); | 267 | |
| 268 | if (pc873xx_probe() == -1) { | ||
| 269 | printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n"); | ||
| 270 | } else { | ||
| 271 | printk(KERN_INFO "Found %s Super IO chip at 0x%x\n", | ||
| 272 | pc873xx_get_model(), pc873xx_get_base()); | ||
| 273 | pc873xx_enable_ide(); | ||
| 274 | } | ||
| 268 | } | 275 | } |
| 269 | 276 | ||
| 270 | 277 | ||
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index a7ed21f0136a..553b7cf17bfb 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
| @@ -1576,96 +1576,6 @@ config AUTO_ZRELADDR | |||
| 1576 | 0xf8000000. This assumes the zImage being placed in the first 128MB | 1576 | 0xf8000000. This assumes the zImage being placed in the first 128MB |
| 1577 | from start of memory. | 1577 | from start of memory. |
| 1578 | 1578 | ||
| 1579 | config ZRELADDR | ||
| 1580 | hex "Physical address of the decompressed kernel image" | ||
| 1581 | depends on !AUTO_ZRELADDR | ||
| 1582 | default 0x00008000 if ARCH_BCMRING ||\ | ||
| 1583 | ARCH_CNS3XXX ||\ | ||
| 1584 | ARCH_DOVE ||\ | ||
| 1585 | ARCH_EBSA110 ||\ | ||
| 1586 | ARCH_FOOTBRIDGE ||\ | ||
| 1587 | ARCH_INTEGRATOR ||\ | ||
| 1588 | ARCH_IOP13XX ||\ | ||
| 1589 | ARCH_IOP33X ||\ | ||
| 1590 | ARCH_IXP2000 ||\ | ||
| 1591 | ARCH_IXP23XX ||\ | ||
| 1592 | ARCH_IXP4XX ||\ | ||
| 1593 | ARCH_KIRKWOOD ||\ | ||
| 1594 | ARCH_KS8695 ||\ | ||
| 1595 | ARCH_LOKI ||\ | ||
| 1596 | ARCH_MMP ||\ | ||
| 1597 | ARCH_MV78XX0 ||\ | ||
| 1598 | ARCH_NOMADIK ||\ | ||
| 1599 | ARCH_NUC93X ||\ | ||
| 1600 | ARCH_NS9XXX ||\ | ||
| 1601 | ARCH_ORION5X ||\ | ||
| 1602 | ARCH_SPEAR3XX ||\ | ||
| 1603 | ARCH_SPEAR6XX ||\ | ||
| 1604 | ARCH_U8500 ||\ | ||
| 1605 | ARCH_VERSATILE ||\ | ||
| 1606 | ARCH_W90X900 | ||
| 1607 | default 0x08008000 if ARCH_MX1 ||\ | ||
| 1608 | ARCH_SHARK | ||
| 1609 | default 0x10008000 if ARCH_MSM ||\ | ||
| 1610 | ARCH_OMAP1 ||\ | ||
| 1611 | ARCH_RPC | ||
| 1612 | default 0x20008000 if ARCH_S5P6440 ||\ | ||
| 1613 | ARCH_S5P6442 ||\ | ||
| 1614 | ARCH_S5PC100 ||\ | ||
| 1615 | ARCH_S5PV210 | ||
| 1616 | default 0x30008000 if ARCH_S3C2410 ||\ | ||
| 1617 | ARCH_S3C2400 ||\ | ||
| 1618 | ARCH_S3C2412 ||\ | ||
| 1619 | ARCH_S3C2416 ||\ | ||
| 1620 | ARCH_S3C2440 ||\ | ||
| 1621 | ARCH_S3C2443 | ||
| 1622 | default 0x40008000 if ARCH_STMP378X ||\ | ||
| 1623 | ARCH_STMP37XX ||\ | ||
| 1624 | ARCH_SH7372 ||\ | ||
| 1625 | ARCH_SH7377 ||\ | ||
| 1626 | ARCH_S5PV310 | ||
| 1627 | default 0x50008000 if ARCH_S3C64XX ||\ | ||
| 1628 | ARCH_SH7367 | ||
| 1629 | default 0x60008000 if ARCH_VEXPRESS | ||
| 1630 | default 0x80008000 if ARCH_MX25 ||\ | ||
| 1631 | ARCH_MX3 ||\ | ||
| 1632 | ARCH_NETX ||\ | ||
| 1633 | ARCH_OMAP2PLUS ||\ | ||
| 1634 | ARCH_PNX4008 | ||
| 1635 | default 0x90008000 if ARCH_MX5 ||\ | ||
| 1636 | ARCH_MX91231 | ||
| 1637 | default 0xa0008000 if ARCH_IOP32X ||\ | ||
| 1638 | ARCH_PXA ||\ | ||
| 1639 | MACH_MX27 | ||
| 1640 | default 0xc0008000 if ARCH_LH7A40X ||\ | ||
| 1641 | MACH_MX21 | ||
| 1642 | default 0xf0008000 if ARCH_AAEC2000 ||\ | ||
| 1643 | ARCH_L7200 | ||
| 1644 | default 0xc0028000 if ARCH_CLPS711X | ||
| 1645 | default 0x70008000 if ARCH_AT91 && (ARCH_AT91CAP9 || ARCH_AT91SAM9G45) | ||
| 1646 | default 0x20008000 if ARCH_AT91 && !(ARCH_AT91CAP9 || ARCH_AT91SAM9G45) | ||
| 1647 | default 0xc0008000 if ARCH_DAVINCI && ARCH_DAVINCI_DA8XX | ||
| 1648 | default 0x80008000 if ARCH_DAVINCI && !ARCH_DAVINCI_DA8XX | ||
| 1649 | default 0x00008000 if ARCH_EP93XX && EP93XX_SDCE3_SYNC_PHYS_OFFSET | ||
| 1650 | default 0xc0008000 if ARCH_EP93XX && EP93XX_SDCE0_PHYS_OFFSET | ||
| 1651 | default 0xd0008000 if ARCH_EP93XX && EP93XX_SDCE1_PHYS_OFFSET | ||
| 1652 | default 0xe0008000 if ARCH_EP93XX && EP93XX_SDCE2_PHYS_OFFSET | ||
| 1653 | default 0xf0008000 if ARCH_EP93XX && EP93XX_SDCE3_ASYNC_PHYS_OFFSET | ||
| 1654 | default 0x00008000 if ARCH_GEMINI && GEMINI_MEM_SWAP | ||
| 1655 | default 0x10008000 if ARCH_GEMINI && !GEMINI_MEM_SWAP | ||
| 1656 | default 0x70008000 if ARCH_REALVIEW && REALVIEW_HIGH_PHYS_OFFSET | ||
| 1657 | default 0x00008000 if ARCH_REALVIEW && !REALVIEW_HIGH_PHYS_OFFSET | ||
| 1658 | default 0xc0208000 if ARCH_SA1100 && SA1111 | ||
| 1659 | default 0xc0008000 if ARCH_SA1100 && !SA1111 | ||
| 1660 | default 0x30108000 if ARCH_S3C2410 && PM_H1940 | ||
| 1661 | default 0x28E08000 if ARCH_U300 && MACH_U300_SINGLE_RAM | ||
| 1662 | default 0x48008000 if ARCH_U300 && !MACH_U300_SINGLE_RAM | ||
| 1663 | help | ||
| 1664 | ZRELADDR is the physical address where the decompressed kernel | ||
| 1665 | image will be placed. ZRELADDR has to be specified when the | ||
| 1666 | assumption of AUTO_ZRELADDR is not valid, or when ZBOOT_ROM is | ||
| 1667 | selected. | ||
| 1668 | |||
| 1669 | endmenu | 1579 | endmenu |
| 1670 | 1580 | ||
| 1671 | menu "CPU Power Management" | 1581 | menu "CPU Power Management" |
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile index f705213caa88..4a590f4113e2 100644 --- a/arch/arm/boot/Makefile +++ b/arch/arm/boot/Makefile | |||
| @@ -14,16 +14,18 @@ | |||
| 14 | MKIMAGE := $(srctree)/scripts/mkuboot.sh | 14 | MKIMAGE := $(srctree)/scripts/mkuboot.sh |
| 15 | 15 | ||
| 16 | ifneq ($(MACHINE),) | 16 | ifneq ($(MACHINE),) |
| 17 | -include $(srctree)/$(MACHINE)/Makefile.boot | 17 | include $(srctree)/$(MACHINE)/Makefile.boot |
| 18 | endif | 18 | endif |
| 19 | 19 | ||
| 20 | # Note: the following conditions must always be true: | 20 | # Note: the following conditions must always be true: |
| 21 | # ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET) | ||
| 21 | # PARAMS_PHYS must be within 4MB of ZRELADDR | 22 | # PARAMS_PHYS must be within 4MB of ZRELADDR |
| 22 | # INITRD_PHYS must be in RAM | 23 | # INITRD_PHYS must be in RAM |
| 24 | ZRELADDR := $(zreladdr-y) | ||
| 23 | PARAMS_PHYS := $(params_phys-y) | 25 | PARAMS_PHYS := $(params_phys-y) |
| 24 | INITRD_PHYS := $(initrd_phys-y) | 26 | INITRD_PHYS := $(initrd_phys-y) |
| 25 | 27 | ||
| 26 | export INITRD_PHYS PARAMS_PHYS | 28 | export ZRELADDR INITRD_PHYS PARAMS_PHYS |
| 27 | 29 | ||
| 28 | targets := Image zImage xipImage bootpImage uImage | 30 | targets := Image zImage xipImage bootpImage uImage |
| 29 | 31 | ||
| @@ -65,7 +67,7 @@ quiet_cmd_uimage = UIMAGE $@ | |||
| 65 | ifeq ($(CONFIG_ZBOOT_ROM),y) | 67 | ifeq ($(CONFIG_ZBOOT_ROM),y) |
| 66 | $(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT) | 68 | $(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT) |
| 67 | else | 69 | else |
| 68 | $(obj)/uImage: LOADADDR=$(CONFIG_ZRELADDR) | 70 | $(obj)/uImage: LOADADDR=$(ZRELADDR) |
| 69 | endif | 71 | endif |
| 70 | 72 | ||
| 71 | ifeq ($(CONFIG_THUMB2_KERNEL),y) | 73 | ifeq ($(CONFIG_THUMB2_KERNEL),y) |
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 68775e33476c..b23f6bc46cfa 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile | |||
| @@ -79,6 +79,10 @@ endif | |||
| 79 | EXTRA_CFLAGS := -fpic -fno-builtin | 79 | EXTRA_CFLAGS := -fpic -fno-builtin |
| 80 | EXTRA_AFLAGS := -Wa,-march=all | 80 | EXTRA_AFLAGS := -Wa,-march=all |
| 81 | 81 | ||
| 82 | # Supply ZRELADDR to the decompressor via a linker symbol. | ||
| 83 | ifneq ($(CONFIG_AUTO_ZRELADDR),y) | ||
| 84 | LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR) | ||
| 85 | endif | ||
| 82 | ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) | 86 | ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) |
| 83 | LDFLAGS_vmlinux += --be8 | 87 | LDFLAGS_vmlinux += --be8 |
| 84 | endif | 88 | endif |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 6af9907c3b5c..6825c34646d4 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
| @@ -177,7 +177,7 @@ not_angel: | |||
| 177 | and r4, pc, #0xf8000000 | 177 | and r4, pc, #0xf8000000 |
| 178 | add r4, r4, #TEXT_OFFSET | 178 | add r4, r4, #TEXT_OFFSET |
| 179 | #else | 179 | #else |
| 180 | ldr r4, =CONFIG_ZRELADDR | 180 | ldr r4, =zreladdr |
| 181 | #endif | 181 | #endif |
| 182 | subs r0, r0, r1 @ calculate the delta offset | 182 | subs r0, r0, r1 @ calculate the delta offset |
| 183 | 183 | ||
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 6c0913562455..7974baacafce 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c | |||
| @@ -263,6 +263,14 @@ static int it8152_pci_platform_notify_remove(struct device *dev) | |||
| 263 | return 0; | 263 | return 0; |
| 264 | } | 264 | } |
| 265 | 265 | ||
| 266 | int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) | ||
| 267 | { | ||
| 268 | dev_dbg(dev, "%s: dma_addr %08x, size %08x\n", | ||
| 269 | __func__, dma_addr, size); | ||
| 270 | return (dev->bus == &pci_bus_type) && | ||
| 271 | ((dma_addr + size - PHYS_OFFSET) >= SZ_64M); | ||
| 272 | } | ||
| 273 | |||
| 266 | int __init it8152_pci_setup(int nr, struct pci_sys_data *sys) | 274 | int __init it8152_pci_setup(int nr, struct pci_sys_data *sys) |
| 267 | { | 275 | { |
| 268 | it8152_io.start = IT8152_IO_BASE + 0x12000; | 276 | it8152_io.start = IT8152_IO_BASE + 0x12000; |
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index c226fe10553e..c568da7dcae4 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
| @@ -288,15 +288,7 @@ extern void dmabounce_unregister_dev(struct device *); | |||
| 288 | * DMA access and 1 if the buffer needs to be bounced. | 288 | * DMA access and 1 if the buffer needs to be bounced. |
| 289 | * | 289 | * |
| 290 | */ | 290 | */ |
| 291 | #ifdef CONFIG_SA1111 | ||
| 292 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | 291 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); |
| 293 | #else | ||
| 294 | static inline int dma_needs_bounce(struct device *dev, dma_addr_t addr, | ||
| 295 | size_t size) | ||
| 296 | { | ||
| 297 | return 0; | ||
| 298 | } | ||
| 299 | #endif | ||
| 300 | 292 | ||
| 301 | /* | 293 | /* |
| 302 | * The DMA API, implemented by dmabounce.c. See below for descriptions. | 294 | * The DMA API, implemented by dmabounce.c. See below for descriptions. |
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 48837e6d8887..b5799a3b7117 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | * counter interrupts are regular interrupts and not an NMI. This | 17 | * counter interrupts are regular interrupts and not an NMI. This |
| 18 | * means that when we receive the interrupt we can call | 18 | * means that when we receive the interrupt we can call |
| 19 | * perf_event_do_pending() that handles all of the work with | 19 | * perf_event_do_pending() that handles all of the work with |
| 20 | * interrupts enabled. | 20 | * interrupts disabled. |
| 21 | */ | 21 | */ |
| 22 | static inline void | 22 | static inline void |
| 23 | set_perf_event_pending(void) | 23 | set_perf_event_pending(void) |
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index d02cfb683487..c891eb76c0e3 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h | |||
| @@ -393,6 +393,9 @@ | |||
| 393 | #define __NR_perf_event_open (__NR_SYSCALL_BASE+364) | 393 | #define __NR_perf_event_open (__NR_SYSCALL_BASE+364) |
| 394 | #define __NR_recvmmsg (__NR_SYSCALL_BASE+365) | 394 | #define __NR_recvmmsg (__NR_SYSCALL_BASE+365) |
| 395 | #define __NR_accept4 (__NR_SYSCALL_BASE+366) | 395 | #define __NR_accept4 (__NR_SYSCALL_BASE+366) |
| 396 | #define __NR_fanotify_init (__NR_SYSCALL_BASE+367) | ||
| 397 | #define __NR_fanotify_mark (__NR_SYSCALL_BASE+368) | ||
| 398 | #define __NR_prlimit64 (__NR_SYSCALL_BASE+369) | ||
| 396 | 399 | ||
| 397 | /* | 400 | /* |
| 398 | * The following SWIs are ARM private. | 401 | * The following SWIs are ARM private. |
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index afeb71fa72cb..5c26eccef998 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S | |||
| @@ -376,6 +376,9 @@ | |||
| 376 | CALL(sys_perf_event_open) | 376 | CALL(sys_perf_event_open) |
| 377 | /* 365 */ CALL(sys_recvmmsg) | 377 | /* 365 */ CALL(sys_recvmmsg) |
| 378 | CALL(sys_accept4) | 378 | CALL(sys_accept4) |
| 379 | CALL(sys_fanotify_init) | ||
| 380 | CALL(sys_fanotify_mark) | ||
| 381 | CALL(sys_prlimit64) | ||
| 379 | #ifndef syscalls_counted | 382 | #ifndef syscalls_counted |
| 380 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls | 383 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls |
| 381 | #define syscalls_counted | 384 | #define syscalls_counted |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 417c392ddf1c..ecbb0288e5dd 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
| @@ -319,8 +319,8 @@ validate_event(struct cpu_hw_events *cpuc, | |||
| 319 | { | 319 | { |
| 320 | struct hw_perf_event fake_event = event->hw; | 320 | struct hw_perf_event fake_event = event->hw; |
| 321 | 321 | ||
| 322 | if (event->pmu && event->pmu != &pmu) | 322 | if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) |
| 323 | return 0; | 323 | return 1; |
| 324 | 324 | ||
| 325 | return armpmu->get_event_idx(cpuc, &fake_event) >= 0; | 325 | return armpmu->get_event_idx(cpuc, &fake_event) >= 0; |
| 326 | } | 326 | } |
| @@ -1041,8 +1041,8 @@ armv6pmu_handle_irq(int irq_num, | |||
| 1041 | /* | 1041 | /* |
| 1042 | * Handle the pending perf events. | 1042 | * Handle the pending perf events. |
| 1043 | * | 1043 | * |
| 1044 | * Note: this call *must* be run with interrupts enabled. For | 1044 | * Note: this call *must* be run with interrupts disabled. For |
| 1045 | * platforms that can have the PMU interrupts raised as a PMI, this | 1045 | * platforms that can have the PMU interrupts raised as an NMI, this |
| 1046 | * will not work. | 1046 | * will not work. |
| 1047 | */ | 1047 | */ |
| 1048 | perf_event_do_pending(); | 1048 | perf_event_do_pending(); |
| @@ -2017,8 +2017,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
| 2017 | /* | 2017 | /* |
| 2018 | * Handle the pending perf events. | 2018 | * Handle the pending perf events. |
| 2019 | * | 2019 | * |
| 2020 | * Note: this call *must* be run with interrupts enabled. For | 2020 | * Note: this call *must* be run with interrupts disabled. For |
| 2021 | * platforms that can have the PMU interrupts raised as a PMI, this | 2021 | * platforms that can have the PMU interrupts raised as an NMI, this |
| 2022 | * will not work. | 2022 | * will not work. |
| 2023 | */ | 2023 | */ |
| 2024 | perf_event_do_pending(); | 2024 | perf_event_do_pending(); |
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c index 753c0d31a3d3..c67b47f1c0fd 100644 --- a/arch/arm/mach-at91/at91sam9g45.c +++ b/arch/arm/mach-at91/at91sam9g45.c | |||
| @@ -121,8 +121,8 @@ static struct clk ssc1_clk = { | |||
| 121 | .pmc_mask = 1 << AT91SAM9G45_ID_SSC1, | 121 | .pmc_mask = 1 << AT91SAM9G45_ID_SSC1, |
| 122 | .type = CLK_TYPE_PERIPHERAL, | 122 | .type = CLK_TYPE_PERIPHERAL, |
| 123 | }; | 123 | }; |
| 124 | static struct clk tcb_clk = { | 124 | static struct clk tcb0_clk = { |
| 125 | .name = "tcb_clk", | 125 | .name = "tcb0_clk", |
| 126 | .pmc_mask = 1 << AT91SAM9G45_ID_TCB, | 126 | .pmc_mask = 1 << AT91SAM9G45_ID_TCB, |
| 127 | .type = CLK_TYPE_PERIPHERAL, | 127 | .type = CLK_TYPE_PERIPHERAL, |
| 128 | }; | 128 | }; |
| @@ -192,6 +192,14 @@ static struct clk ohci_clk = { | |||
| 192 | .parent = &uhphs_clk, | 192 | .parent = &uhphs_clk, |
| 193 | }; | 193 | }; |
| 194 | 194 | ||
| 195 | /* One additional fake clock for second TC block */ | ||
| 196 | static struct clk tcb1_clk = { | ||
| 197 | .name = "tcb1_clk", | ||
| 198 | .pmc_mask = 0, | ||
| 199 | .type = CLK_TYPE_PERIPHERAL, | ||
| 200 | .parent = &tcb0_clk, | ||
| 201 | }; | ||
| 202 | |||
| 195 | static struct clk *periph_clocks[] __initdata = { | 203 | static struct clk *periph_clocks[] __initdata = { |
| 196 | &pioA_clk, | 204 | &pioA_clk, |
| 197 | &pioB_clk, | 205 | &pioB_clk, |
| @@ -208,7 +216,7 @@ static struct clk *periph_clocks[] __initdata = { | |||
| 208 | &spi1_clk, | 216 | &spi1_clk, |
| 209 | &ssc0_clk, | 217 | &ssc0_clk, |
| 210 | &ssc1_clk, | 218 | &ssc1_clk, |
| 211 | &tcb_clk, | 219 | &tcb0_clk, |
| 212 | &pwm_clk, | 220 | &pwm_clk, |
| 213 | &tsc_clk, | 221 | &tsc_clk, |
| 214 | &dma_clk, | 222 | &dma_clk, |
| @@ -221,6 +229,7 @@ static struct clk *periph_clocks[] __initdata = { | |||
| 221 | &mmc1_clk, | 229 | &mmc1_clk, |
| 222 | // irq0 | 230 | // irq0 |
| 223 | &ohci_clk, | 231 | &ohci_clk, |
| 232 | &tcb1_clk, | ||
| 224 | }; | 233 | }; |
| 225 | 234 | ||
| 226 | /* | 235 | /* |
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index 809114d5a5a6..5e71ccd5e7d3 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c | |||
| @@ -46,7 +46,7 @@ static struct resource hdmac_resources[] = { | |||
| 46 | .end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1, | 46 | .end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1, |
| 47 | .flags = IORESOURCE_MEM, | 47 | .flags = IORESOURCE_MEM, |
| 48 | }, | 48 | }, |
| 49 | [2] = { | 49 | [1] = { |
| 50 | .start = AT91SAM9G45_ID_DMA, | 50 | .start = AT91SAM9G45_ID_DMA, |
| 51 | .end = AT91SAM9G45_ID_DMA, | 51 | .end = AT91SAM9G45_ID_DMA, |
| 52 | .flags = IORESOURCE_IRQ, | 52 | .flags = IORESOURCE_IRQ, |
| @@ -835,9 +835,9 @@ static struct platform_device at91sam9g45_tcb1_device = { | |||
| 835 | static void __init at91_add_device_tc(void) | 835 | static void __init at91_add_device_tc(void) |
| 836 | { | 836 | { |
| 837 | /* this chip has one clock and irq for all six TC channels */ | 837 | /* this chip has one clock and irq for all six TC channels */ |
| 838 | at91_clock_associate("tcb_clk", &at91sam9g45_tcb0_device.dev, "t0_clk"); | 838 | at91_clock_associate("tcb0_clk", &at91sam9g45_tcb0_device.dev, "t0_clk"); |
| 839 | platform_device_register(&at91sam9g45_tcb0_device); | 839 | platform_device_register(&at91sam9g45_tcb0_device); |
| 840 | at91_clock_associate("tcb_clk", &at91sam9g45_tcb1_device.dev, "t0_clk"); | 840 | at91_clock_associate("tcb1_clk", &at91sam9g45_tcb1_device.dev, "t0_clk"); |
| 841 | platform_device_register(&at91sam9g45_tcb1_device); | 841 | platform_device_register(&at91sam9g45_tcb1_device); |
| 842 | } | 842 | } |
| 843 | #else | 843 | #else |
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c index c4c8865d52d7..65eb0943194f 100644 --- a/arch/arm/mach-at91/board-sam9261ek.c +++ b/arch/arm/mach-at91/board-sam9261ek.c | |||
| @@ -93,11 +93,12 @@ static struct resource dm9000_resource[] = { | |||
| 93 | .start = AT91_PIN_PC11, | 93 | .start = AT91_PIN_PC11, |
| 94 | .end = AT91_PIN_PC11, | 94 | .end = AT91_PIN_PC11, |
| 95 | .flags = IORESOURCE_IRQ | 95 | .flags = IORESOURCE_IRQ |
| 96 | | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE, | ||
| 96 | } | 97 | } |
| 97 | }; | 98 | }; |
| 98 | 99 | ||
| 99 | static struct dm9000_plat_data dm9000_platdata = { | 100 | static struct dm9000_plat_data dm9000_platdata = { |
| 100 | .flags = DM9000_PLATF_16BITONLY, | 101 | .flags = DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM, |
| 101 | }; | 102 | }; |
| 102 | 103 | ||
| 103 | static struct platform_device dm9000_device = { | 104 | static struct platform_device dm9000_device = { |
| @@ -168,17 +169,6 @@ static struct at91_udc_data __initdata ek_udc_data = { | |||
| 168 | 169 | ||
| 169 | 170 | ||
| 170 | /* | 171 | /* |
| 171 | * MCI (SD/MMC) | ||
| 172 | */ | ||
| 173 | static struct at91_mmc_data __initdata ek_mmc_data = { | ||
| 174 | .wire4 = 1, | ||
| 175 | // .det_pin = ... not connected | ||
| 176 | // .wp_pin = ... not connected | ||
| 177 | // .vcc_pin = ... not connected | ||
| 178 | }; | ||
| 179 | |||
| 180 | |||
| 181 | /* | ||
| 182 | * NAND flash | 172 | * NAND flash |
| 183 | */ | 173 | */ |
| 184 | static struct mtd_partition __initdata ek_nand_partition[] = { | 174 | static struct mtd_partition __initdata ek_nand_partition[] = { |
| @@ -246,6 +236,10 @@ static void __init ek_add_device_nand(void) | |||
| 246 | at91_add_device_nand(&ek_nand_data); | 236 | at91_add_device_nand(&ek_nand_data); |
| 247 | } | 237 | } |
| 248 | 238 | ||
| 239 | /* | ||
| 240 | * SPI related devices | ||
| 241 | */ | ||
| 242 | #if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE) | ||
| 249 | 243 | ||
| 250 | /* | 244 | /* |
| 251 | * ADS7846 Touchscreen | 245 | * ADS7846 Touchscreen |
| @@ -356,6 +350,19 @@ static struct spi_board_info ek_spi_devices[] = { | |||
| 356 | #endif | 350 | #endif |
| 357 | }; | 351 | }; |
| 358 | 352 | ||
| 353 | #else /* CONFIG_SPI_ATMEL_* */ | ||
| 354 | /* spi0 and mmc/sd share the same PIO pins: cannot be used at the same time */ | ||
| 355 | |||
| 356 | /* | ||
| 357 | * MCI (SD/MMC) | ||
| 358 | * det_pin, wp_pin and vcc_pin are not connected | ||
| 359 | */ | ||
| 360 | static struct at91_mmc_data __initdata ek_mmc_data = { | ||
| 361 | .wire4 = 1, | ||
| 362 | }; | ||
| 363 | |||
| 364 | #endif /* CONFIG_SPI_ATMEL_* */ | ||
| 365 | |||
| 359 | 366 | ||
| 360 | /* | 367 | /* |
| 361 | * LCD Controller | 368 | * LCD Controller |
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c index 7f7da439341f..7525cee3983f 100644 --- a/arch/arm/mach-at91/clock.c +++ b/arch/arm/mach-at91/clock.c | |||
| @@ -501,7 +501,8 @@ postcore_initcall(at91_clk_debugfs_init); | |||
| 501 | int __init clk_register(struct clk *clk) | 501 | int __init clk_register(struct clk *clk) |
| 502 | { | 502 | { |
| 503 | if (clk_is_peripheral(clk)) { | 503 | if (clk_is_peripheral(clk)) { |
| 504 | clk->parent = &mck; | 504 | if (!clk->parent) |
| 505 | clk->parent = &mck; | ||
| 505 | clk->mode = pmc_periph_mode; | 506 | clk->mode = pmc_periph_mode; |
| 506 | list_add_tail(&clk->node, &clocks); | 507 | list_add_tail(&clk->node, &clocks); |
| 507 | } | 508 | } |
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c index 8bf3cec98cfa..4566bd1c8660 100644 --- a/arch/arm/mach-ep93xx/clock.c +++ b/arch/arm/mach-ep93xx/clock.c | |||
| @@ -560,4 +560,4 @@ static int __init ep93xx_clock_init(void) | |||
| 560 | clkdev_add_table(clocks, ARRAY_SIZE(clocks)); | 560 | clkdev_add_table(clocks, ARRAY_SIZE(clocks)); |
| 561 | return 0; | 561 | return 0; |
| 562 | } | 562 | } |
| 563 | arch_initcall(ep93xx_clock_init); | 563 | postcore_initcall(ep93xx_clock_init); |
diff --git a/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c index 91931dcb0689..4aaadc753d3e 100644 --- a/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c +++ b/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c | |||
| @@ -215,7 +215,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = { | |||
| 215 | * Add platform devices present on this baseboard and init | 215 | * Add platform devices present on this baseboard and init |
| 216 | * them from CPU side as far as required to use them later on | 216 | * them from CPU side as far as required to use them later on |
| 217 | */ | 217 | */ |
| 218 | void __init eukrea_mbimxsd_baseboard_init(void) | 218 | void __init eukrea_mbimxsd25_baseboard_init(void) |
| 219 | { | 219 | { |
| 220 | if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads, | 220 | if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads, |
| 221 | ARRAY_SIZE(eukrea_mbimxsd_pads))) | 221 | ARRAY_SIZE(eukrea_mbimxsd_pads))) |
diff --git a/arch/arm/mach-mx25/mach-cpuimx25.c b/arch/arm/mach-mx25/mach-cpuimx25.c index a5f0174290b4..e064bb3d6919 100644 --- a/arch/arm/mach-mx25/mach-cpuimx25.c +++ b/arch/arm/mach-mx25/mach-cpuimx25.c | |||
| @@ -147,8 +147,8 @@ static void __init eukrea_cpuimx25_init(void) | |||
| 147 | if (!otg_mode_host) | 147 | if (!otg_mode_host) |
| 148 | mxc_register_device(&otg_udc_device, &otg_device_pdata); | 148 | mxc_register_device(&otg_udc_device, &otg_device_pdata); |
| 149 | 149 | ||
| 150 | #ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD | 150 | #ifdef CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD |
| 151 | eukrea_mbimxsd_baseboard_init(); | 151 | eukrea_mbimxsd25_baseboard_init(); |
| 152 | #endif | 152 | #endif |
| 153 | } | 153 | } |
| 154 | 154 | ||
diff --git a/arch/arm/mach-mx3/clock-imx35.c b/arch/arm/mach-mx3/clock-imx35.c index d3af0fdf8475..7a62e744a8b0 100644 --- a/arch/arm/mach-mx3/clock-imx35.c +++ b/arch/arm/mach-mx3/clock-imx35.c | |||
| @@ -155,7 +155,7 @@ static unsigned long get_rate_arm(void) | |||
| 155 | 155 | ||
| 156 | aad = &clk_consumer[(pdr0 >> 16) & 0xf]; | 156 | aad = &clk_consumer[(pdr0 >> 16) & 0xf]; |
| 157 | if (aad->sel) | 157 | if (aad->sel) |
| 158 | fref = fref * 2 / 3; | 158 | fref = fref * 3 / 4; |
| 159 | 159 | ||
| 160 | return fref / aad->arm; | 160 | return fref / aad->arm; |
| 161 | } | 161 | } |
| @@ -164,7 +164,7 @@ static unsigned long get_rate_ahb(struct clk *clk) | |||
| 164 | { | 164 | { |
| 165 | unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0); | 165 | unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0); |
| 166 | struct arm_ahb_div *aad; | 166 | struct arm_ahb_div *aad; |
| 167 | unsigned long fref = get_rate_mpll(); | 167 | unsigned long fref = get_rate_arm(); |
| 168 | 168 | ||
| 169 | aad = &clk_consumer[(pdr0 >> 16) & 0xf]; | 169 | aad = &clk_consumer[(pdr0 >> 16) & 0xf]; |
| 170 | 170 | ||
| @@ -176,16 +176,11 @@ static unsigned long get_rate_ipg(struct clk *clk) | |||
| 176 | return get_rate_ahb(NULL) >> 1; | 176 | return get_rate_ahb(NULL) >> 1; |
| 177 | } | 177 | } |
| 178 | 178 | ||
| 179 | static unsigned long get_3_3_div(unsigned long in) | ||
| 180 | { | ||
| 181 | return (((in >> 3) & 0x7) + 1) * ((in & 0x7) + 1); | ||
| 182 | } | ||
| 183 | |||
| 184 | static unsigned long get_rate_uart(struct clk *clk) | 179 | static unsigned long get_rate_uart(struct clk *clk) |
| 185 | { | 180 | { |
| 186 | unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3); | 181 | unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3); |
| 187 | unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4); | 182 | unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4); |
| 188 | unsigned long div = get_3_3_div(pdr4 >> 10); | 183 | unsigned long div = ((pdr4 >> 10) & 0x3f) + 1; |
| 189 | 184 | ||
| 190 | if (pdr3 & (1 << 14)) | 185 | if (pdr3 & (1 << 14)) |
| 191 | return get_rate_arm() / div; | 186 | return get_rate_arm() / div; |
| @@ -216,7 +211,7 @@ static unsigned long get_rate_sdhc(struct clk *clk) | |||
| 216 | break; | 211 | break; |
| 217 | } | 212 | } |
| 218 | 213 | ||
| 219 | return rate / get_3_3_div(div); | 214 | return rate / (div + 1); |
| 220 | } | 215 | } |
| 221 | 216 | ||
| 222 | static unsigned long get_rate_mshc(struct clk *clk) | 217 | static unsigned long get_rate_mshc(struct clk *clk) |
| @@ -270,7 +265,7 @@ static unsigned long get_rate_csi(struct clk *clk) | |||
| 270 | else | 265 | else |
| 271 | rate = get_rate_ppll(); | 266 | rate = get_rate_ppll(); |
| 272 | 267 | ||
| 273 | return rate / get_3_3_div((pdr2 >> 16) & 0x3f); | 268 | return rate / (((pdr2 >> 16) & 0x3f) + 1); |
| 274 | } | 269 | } |
| 275 | 270 | ||
| 276 | static unsigned long get_rate_otg(struct clk *clk) | 271 | static unsigned long get_rate_otg(struct clk *clk) |
| @@ -283,25 +278,51 @@ static unsigned long get_rate_otg(struct clk *clk) | |||
| 283 | else | 278 | else |
| 284 | rate = get_rate_ppll(); | 279 | rate = get_rate_ppll(); |
| 285 | 280 | ||
| 286 | return rate / get_3_3_div((pdr4 >> 22) & 0x3f); | 281 | return rate / (((pdr4 >> 22) & 0x3f) + 1); |
| 287 | } | 282 | } |
| 288 | 283 | ||
| 289 | static unsigned long get_rate_ipg_per(struct clk *clk) | 284 | static unsigned long get_rate_ipg_per(struct clk *clk) |
| 290 | { | 285 | { |
| 291 | unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0); | 286 | unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0); |
| 292 | unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4); | 287 | unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4); |
| 293 | unsigned long div1, div2; | 288 | unsigned long div; |
| 294 | 289 | ||
| 295 | if (pdr0 & (1 << 26)) { | 290 | if (pdr0 & (1 << 26)) { |
| 296 | div1 = (pdr4 >> 19) & 0x7; | 291 | div = (pdr4 >> 16) & 0x3f; |
| 297 | div2 = (pdr4 >> 16) & 0x7; | 292 | return get_rate_arm() / (div + 1); |
| 298 | return get_rate_arm() / ((div1 + 1) * (div2 + 1)); | ||
| 299 | } else { | 293 | } else { |
| 300 | div1 = (pdr0 >> 12) & 0x7; | 294 | div = (pdr0 >> 12) & 0x7; |
| 301 | return get_rate_ahb(NULL) / div1; | 295 | return get_rate_ahb(NULL) / (div + 1); |
| 302 | } | 296 | } |
| 303 | } | 297 | } |
| 304 | 298 | ||
| 299 | static unsigned long get_rate_hsp(struct clk *clk) | ||
| 300 | { | ||
| 301 | unsigned long hsp_podf = (__raw_readl(CCM_BASE + CCM_PDR0) >> 20) & 0x03; | ||
| 302 | unsigned long fref = get_rate_mpll(); | ||
| 303 | |||
| 304 | if (fref > 400 * 1000 * 1000) { | ||
| 305 | switch (hsp_podf) { | ||
| 306 | case 0: | ||
| 307 | return fref >> 2; | ||
| 308 | case 1: | ||
| 309 | return fref >> 3; | ||
| 310 | case 2: | ||
| 311 | return fref / 3; | ||
| 312 | } | ||
| 313 | } else { | ||
| 314 | switch (hsp_podf) { | ||
| 315 | case 0: | ||
| 316 | case 2: | ||
| 317 | return fref / 3; | ||
| 318 | case 1: | ||
| 319 | return fref / 6; | ||
| 320 | } | ||
| 321 | } | ||
| 322 | |||
| 323 | return 0; | ||
| 324 | } | ||
| 325 | |||
| 305 | static int clk_cgr_enable(struct clk *clk) | 326 | static int clk_cgr_enable(struct clk *clk) |
| 306 | { | 327 | { |
| 307 | u32 reg; | 328 | u32 reg; |
| @@ -359,7 +380,7 @@ DEFINE_CLOCK(i2c1_clk, 0, CCM_CGR1, 10, get_rate_ipg_per, NULL); | |||
| 359 | DEFINE_CLOCK(i2c2_clk, 1, CCM_CGR1, 12, get_rate_ipg_per, NULL); | 380 | DEFINE_CLOCK(i2c2_clk, 1, CCM_CGR1, 12, get_rate_ipg_per, NULL); |
| 360 | DEFINE_CLOCK(i2c3_clk, 2, CCM_CGR1, 14, get_rate_ipg_per, NULL); | 381 | DEFINE_CLOCK(i2c3_clk, 2, CCM_CGR1, 14, get_rate_ipg_per, NULL); |
| 361 | DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL); | 382 | DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL); |
| 362 | DEFINE_CLOCK(ipu_clk, 0, CCM_CGR1, 18, get_rate_ahb, NULL); | 383 | DEFINE_CLOCK(ipu_clk, 0, CCM_CGR1, 18, get_rate_hsp, NULL); |
| 363 | DEFINE_CLOCK(kpp_clk, 0, CCM_CGR1, 20, get_rate_ipg, NULL); | 384 | DEFINE_CLOCK(kpp_clk, 0, CCM_CGR1, 20, get_rate_ipg, NULL); |
| 364 | DEFINE_CLOCK(mlb_clk, 0, CCM_CGR1, 22, get_rate_ahb, NULL); | 385 | DEFINE_CLOCK(mlb_clk, 0, CCM_CGR1, 22, get_rate_ahb, NULL); |
| 365 | DEFINE_CLOCK(mshc_clk, 0, CCM_CGR1, 24, get_rate_mshc, NULL); | 386 | DEFINE_CLOCK(mshc_clk, 0, CCM_CGR1, 24, get_rate_mshc, NULL); |
| @@ -485,10 +506,10 @@ static struct clk_lookup lookups[] = { | |||
| 485 | 506 | ||
| 486 | int __init mx35_clocks_init() | 507 | int __init mx35_clocks_init() |
| 487 | { | 508 | { |
| 488 | unsigned int ll = 0; | 509 | unsigned int cgr2 = 3 << 26, cgr3 = 0; |
| 489 | 510 | ||
| 490 | #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC) | 511 | #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC) |
| 491 | ll = (3 << 16); | 512 | cgr2 |= 3 << 16; |
| 492 | #endif | 513 | #endif |
| 493 | 514 | ||
| 494 | clkdev_add_table(lookups, ARRAY_SIZE(lookups)); | 515 | clkdev_add_table(lookups, ARRAY_SIZE(lookups)); |
| @@ -499,8 +520,20 @@ int __init mx35_clocks_init() | |||
| 499 | __raw_writel((3 << 18), CCM_BASE + CCM_CGR0); | 520 | __raw_writel((3 << 18), CCM_BASE + CCM_CGR0); |
| 500 | __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16), | 521 | __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16), |
| 501 | CCM_BASE + CCM_CGR1); | 522 | CCM_BASE + CCM_CGR1); |
| 502 | __raw_writel((3 << 26) | ll, CCM_BASE + CCM_CGR2); | 523 | |
| 503 | __raw_writel(0, CCM_BASE + CCM_CGR3); | 524 | /* |
| 525 | * Check if we came up in internal boot mode. If yes, we need some | ||
| 526 | * extra clocks turned on, otherwise the MX35 boot ROM code will | ||
| 527 | * hang after a watchdog reset. | ||
| 528 | */ | ||
| 529 | if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) { | ||
| 530 | /* Additionally turn on UART1, SCC, and IIM clocks */ | ||
| 531 | cgr2 |= 3 << 16 | 3 << 4; | ||
| 532 | cgr3 |= 3 << 2; | ||
| 533 | } | ||
| 534 | |||
| 535 | __raw_writel(cgr2, CCM_BASE + CCM_CGR2); | ||
| 536 | __raw_writel(cgr3, CCM_BASE + CCM_CGR3); | ||
| 504 | 537 | ||
| 505 | mxc_timer_init(&gpt_clk, | 538 | mxc_timer_init(&gpt_clk, |
| 506 | MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT); | 539 | MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT); |
diff --git a/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c index 1dc5004df866..f8f15e3ac7a0 100644 --- a/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c +++ b/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c | |||
| @@ -216,7 +216,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = { | |||
| 216 | * Add platform devices present on this baseboard and init | 216 | * Add platform devices present on this baseboard and init |
| 217 | * them from CPU side as far as required to use them later on | 217 | * them from CPU side as far as required to use them later on |
| 218 | */ | 218 | */ |
| 219 | void __init eukrea_mbimxsd_baseboard_init(void) | 219 | void __init eukrea_mbimxsd35_baseboard_init(void) |
| 220 | { | 220 | { |
| 221 | if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads, | 221 | if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads, |
| 222 | ARRAY_SIZE(eukrea_mbimxsd_pads))) | 222 | ARRAY_SIZE(eukrea_mbimxsd_pads))) |
diff --git a/arch/arm/mach-mx3/mach-cpuimx35.c b/arch/arm/mach-mx3/mach-cpuimx35.c index 9770a6a973be..2a4f8b781ba4 100644 --- a/arch/arm/mach-mx3/mach-cpuimx35.c +++ b/arch/arm/mach-mx3/mach-cpuimx35.c | |||
| @@ -201,8 +201,8 @@ static void __init mxc_board_init(void) | |||
| 201 | if (!otg_mode_host) | 201 | if (!otg_mode_host) |
| 202 | mxc_register_device(&mxc_otg_udc_device, &otg_device_pdata); | 202 | mxc_register_device(&mxc_otg_udc_device, &otg_device_pdata); |
| 203 | 203 | ||
| 204 | #ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD | 204 | #ifdef CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD |
| 205 | eukrea_mbimxsd_baseboard_init(); | 205 | eukrea_mbimxsd35_baseboard_init(); |
| 206 | #endif | 206 | #endif |
| 207 | } | 207 | } |
| 208 | 208 | ||
diff --git a/arch/arm/mach-mx5/clock-mx51.c b/arch/arm/mach-mx5/clock-mx51.c index 6af69def357f..57c10a9926cc 100644 --- a/arch/arm/mach-mx5/clock-mx51.c +++ b/arch/arm/mach-mx5/clock-mx51.c | |||
| @@ -56,7 +56,7 @@ static void _clk_ccgr_disable(struct clk *clk) | |||
| 56 | { | 56 | { |
| 57 | u32 reg; | 57 | u32 reg; |
| 58 | reg = __raw_readl(clk->enable_reg); | 58 | reg = __raw_readl(clk->enable_reg); |
| 59 | reg &= ~(MXC_CCM_CCGRx_MOD_OFF << clk->enable_shift); | 59 | reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift); |
| 60 | __raw_writel(reg, clk->enable_reg); | 60 | __raw_writel(reg, clk->enable_reg); |
| 61 | 61 | ||
| 62 | } | 62 | } |
diff --git a/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/arch/arm/mach-pxa/cpufreq-pxa2xx.c index 268a9bc6be8a..50d5939a78f1 100644 --- a/arch/arm/mach-pxa/cpufreq-pxa2xx.c +++ b/arch/arm/mach-pxa/cpufreq-pxa2xx.c | |||
| @@ -398,7 +398,7 @@ static int pxa_set_target(struct cpufreq_policy *policy, | |||
| 398 | return 0; | 398 | return 0; |
| 399 | } | 399 | } |
| 400 | 400 | ||
| 401 | static __init int pxa_cpufreq_init(struct cpufreq_policy *policy) | 401 | static int pxa_cpufreq_init(struct cpufreq_policy *policy) |
| 402 | { | 402 | { |
| 403 | int i; | 403 | int i; |
| 404 | unsigned int freq; | 404 | unsigned int freq; |
diff --git a/arch/arm/mach-pxa/cpufreq-pxa3xx.c b/arch/arm/mach-pxa/cpufreq-pxa3xx.c index 27fa329d9a8b..0a0d0fe99220 100644 --- a/arch/arm/mach-pxa/cpufreq-pxa3xx.c +++ b/arch/arm/mach-pxa/cpufreq-pxa3xx.c | |||
| @@ -204,7 +204,7 @@ static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, | |||
| 204 | return 0; | 204 | return 0; |
| 205 | } | 205 | } |
| 206 | 206 | ||
| 207 | static __init int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) | 207 | static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) |
| 208 | { | 208 | { |
| 209 | int ret = -EINVAL; | 209 | int ret = -EINVAL; |
| 210 | 210 | ||
diff --git a/arch/arm/mach-pxa/include/mach/mfp-pxa300.h b/arch/arm/mach-pxa/include/mach/mfp-pxa300.h index 7139e0dc26d1..4e1287070d21 100644 --- a/arch/arm/mach-pxa/include/mach/mfp-pxa300.h +++ b/arch/arm/mach-pxa/include/mach/mfp-pxa300.h | |||
| @@ -71,10 +71,10 @@ | |||
| 71 | #define GPIO46_CI_DD_7 MFP_CFG_DRV(GPIO46, AF0, DS04X) | 71 | #define GPIO46_CI_DD_7 MFP_CFG_DRV(GPIO46, AF0, DS04X) |
| 72 | #define GPIO47_CI_DD_8 MFP_CFG_DRV(GPIO47, AF1, DS04X) | 72 | #define GPIO47_CI_DD_8 MFP_CFG_DRV(GPIO47, AF1, DS04X) |
| 73 | #define GPIO48_CI_DD_9 MFP_CFG_DRV(GPIO48, AF1, DS04X) | 73 | #define GPIO48_CI_DD_9 MFP_CFG_DRV(GPIO48, AF1, DS04X) |
| 74 | #define GPIO52_CI_HSYNC MFP_CFG_DRV(GPIO52, AF0, DS04X) | ||
| 75 | #define GPIO51_CI_VSYNC MFP_CFG_DRV(GPIO51, AF0, DS04X) | ||
| 76 | #define GPIO49_CI_MCLK MFP_CFG_DRV(GPIO49, AF0, DS04X) | 74 | #define GPIO49_CI_MCLK MFP_CFG_DRV(GPIO49, AF0, DS04X) |
| 77 | #define GPIO50_CI_PCLK MFP_CFG_DRV(GPIO50, AF0, DS04X) | 75 | #define GPIO50_CI_PCLK MFP_CFG_DRV(GPIO50, AF0, DS04X) |
| 76 | #define GPIO51_CI_HSYNC MFP_CFG_DRV(GPIO51, AF0, DS04X) | ||
| 77 | #define GPIO52_CI_VSYNC MFP_CFG_DRV(GPIO52, AF0, DS04X) | ||
| 78 | 78 | ||
| 79 | /* KEYPAD */ | 79 | /* KEYPAD */ |
| 80 | #define GPIO3_KP_DKIN_6 MFP_CFG_LPM(GPIO3, AF2, FLOAT) | 80 | #define GPIO3_KP_DKIN_6 MFP_CFG_LPM(GPIO3, AF2, FLOAT) |
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile index 5e16b4c69222..ae416fe7daf2 100644 --- a/arch/arm/mach-shmobile/Makefile +++ b/arch/arm/mach-shmobile/Makefile | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | # Common objects | 5 | # Common objects |
| 6 | obj-y := timer.o console.o clock.o | 6 | obj-y := timer.o console.o clock.o pm_runtime.o |
| 7 | 7 | ||
| 8 | # CPU objects | 8 | # CPU objects |
| 9 | obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o | 9 | obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o |
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index 23d472f9525e..95935c83c306 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
| 26 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
| 27 | #include <linux/mfd/sh_mobile_sdhi.h> | 27 | #include <linux/mfd/sh_mobile_sdhi.h> |
| 28 | #include <linux/mfd/tmio.h> | ||
| 28 | #include <linux/mmc/host.h> | 29 | #include <linux/mmc/host.h> |
| 29 | #include <linux/mtd/mtd.h> | 30 | #include <linux/mtd/mtd.h> |
| 30 | #include <linux/mtd/partitions.h> | 31 | #include <linux/mtd/partitions.h> |
| @@ -39,6 +40,7 @@ | |||
| 39 | #include <linux/sh_clk.h> | 40 | #include <linux/sh_clk.h> |
| 40 | #include <linux/gpio.h> | 41 | #include <linux/gpio.h> |
| 41 | #include <linux/input.h> | 42 | #include <linux/input.h> |
| 43 | #include <linux/leds.h> | ||
| 42 | #include <linux/input/sh_keysc.h> | 44 | #include <linux/input/sh_keysc.h> |
| 43 | #include <linux/usb/r8a66597.h> | 45 | #include <linux/usb/r8a66597.h> |
| 44 | 46 | ||
| @@ -307,6 +309,7 @@ static struct sh_mobile_sdhi_info sdhi1_info = { | |||
| 307 | .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, | 309 | .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, |
| 308 | .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, | 310 | .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, |
| 309 | .tmio_ocr_mask = MMC_VDD_165_195, | 311 | .tmio_ocr_mask = MMC_VDD_165_195, |
| 312 | .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE, | ||
| 310 | }; | 313 | }; |
| 311 | 314 | ||
| 312 | static struct resource sdhi1_resources[] = { | 315 | static struct resource sdhi1_resources[] = { |
| @@ -558,7 +561,7 @@ static struct resource fsi_resources[] = { | |||
| 558 | 561 | ||
| 559 | static struct platform_device fsi_device = { | 562 | static struct platform_device fsi_device = { |
| 560 | .name = "sh_fsi2", | 563 | .name = "sh_fsi2", |
| 561 | .id = 0, | 564 | .id = -1, |
| 562 | .num_resources = ARRAY_SIZE(fsi_resources), | 565 | .num_resources = ARRAY_SIZE(fsi_resources), |
| 563 | .resource = fsi_resources, | 566 | .resource = fsi_resources, |
| 564 | .dev = { | 567 | .dev = { |
| @@ -650,7 +653,44 @@ static struct platform_device hdmi_device = { | |||
| 650 | }, | 653 | }, |
| 651 | }; | 654 | }; |
| 652 | 655 | ||
| 656 | static struct gpio_led ap4evb_leds[] = { | ||
| 657 | { | ||
| 658 | .name = "led4", | ||
| 659 | .gpio = GPIO_PORT185, | ||
| 660 | .default_state = LEDS_GPIO_DEFSTATE_ON, | ||
| 661 | }, | ||
| 662 | { | ||
| 663 | .name = "led2", | ||
| 664 | .gpio = GPIO_PORT186, | ||
| 665 | .default_state = LEDS_GPIO_DEFSTATE_ON, | ||
| 666 | }, | ||
| 667 | { | ||
| 668 | .name = "led3", | ||
| 669 | .gpio = GPIO_PORT187, | ||
| 670 | .default_state = LEDS_GPIO_DEFSTATE_ON, | ||
| 671 | }, | ||
| 672 | { | ||
| 673 | .name = "led1", | ||
| 674 | .gpio = GPIO_PORT188, | ||
| 675 | .default_state = LEDS_GPIO_DEFSTATE_ON, | ||
| 676 | } | ||
| 677 | }; | ||
| 678 | |||
| 679 | static struct gpio_led_platform_data ap4evb_leds_pdata = { | ||
| 680 | .num_leds = ARRAY_SIZE(ap4evb_leds), | ||
| 681 | .leds = ap4evb_leds, | ||
| 682 | }; | ||
| 683 | |||
| 684 | static struct platform_device leds_device = { | ||
| 685 | .name = "leds-gpio", | ||
| 686 | .id = 0, | ||
| 687 | .dev = { | ||
| 688 | .platform_data = &ap4evb_leds_pdata, | ||
| 689 | }, | ||
| 690 | }; | ||
| 691 | |||
| 653 | static struct platform_device *ap4evb_devices[] __initdata = { | 692 | static struct platform_device *ap4evb_devices[] __initdata = { |
| 693 | &leds_device, | ||
| 654 | &nor_flash_device, | 694 | &nor_flash_device, |
| 655 | &smc911x_device, | 695 | &smc911x_device, |
| 656 | &sdhi0_device, | 696 | &sdhi0_device, |
| @@ -840,20 +880,6 @@ static void __init ap4evb_init(void) | |||
| 840 | gpio_request(GPIO_FN_CS5A, NULL); | 880 | gpio_request(GPIO_FN_CS5A, NULL); |
| 841 | gpio_request(GPIO_FN_IRQ6_39, NULL); | 881 | gpio_request(GPIO_FN_IRQ6_39, NULL); |
| 842 | 882 | ||
| 843 | /* enable LED 1 - 4 */ | ||
| 844 | gpio_request(GPIO_PORT185, NULL); | ||
| 845 | gpio_request(GPIO_PORT186, NULL); | ||
| 846 | gpio_request(GPIO_PORT187, NULL); | ||
| 847 | gpio_request(GPIO_PORT188, NULL); | ||
| 848 | gpio_direction_output(GPIO_PORT185, 1); | ||
| 849 | gpio_direction_output(GPIO_PORT186, 1); | ||
| 850 | gpio_direction_output(GPIO_PORT187, 1); | ||
| 851 | gpio_direction_output(GPIO_PORT188, 1); | ||
| 852 | gpio_export(GPIO_PORT185, 0); | ||
| 853 | gpio_export(GPIO_PORT186, 0); | ||
| 854 | gpio_export(GPIO_PORT187, 0); | ||
| 855 | gpio_export(GPIO_PORT188, 0); | ||
| 856 | |||
| 857 | /* enable Debug switch (S6) */ | 883 | /* enable Debug switch (S6) */ |
| 858 | gpio_request(GPIO_PORT32, NULL); | 884 | gpio_request(GPIO_PORT32, NULL); |
| 859 | gpio_request(GPIO_PORT33, NULL); | 885 | gpio_request(GPIO_PORT33, NULL); |
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c index fb4e9b1d788e..759468992ad2 100644 --- a/arch/arm/mach-shmobile/clock-sh7372.c +++ b/arch/arm/mach-shmobile/clock-sh7372.c | |||
| @@ -286,7 +286,6 @@ static struct clk_ops pllc2_clk_ops = { | |||
| 286 | 286 | ||
| 287 | struct clk pllc2_clk = { | 287 | struct clk pllc2_clk = { |
| 288 | .ops = &pllc2_clk_ops, | 288 | .ops = &pllc2_clk_ops, |
| 289 | .flags = CLK_ENABLE_ON_INIT, | ||
| 290 | .parent = &extal1_div2_clk, | 289 | .parent = &extal1_div2_clk, |
| 291 | .freq_table = pllc2_freq_table, | 290 | .freq_table = pllc2_freq_table, |
| 292 | .parent_table = pllc2_parent, | 291 | .parent_table = pllc2_parent, |
| @@ -395,7 +394,7 @@ static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = { | |||
| 395 | 394 | ||
| 396 | enum { MSTP001, | 395 | enum { MSTP001, |
| 397 | MSTP131, MSTP130, | 396 | MSTP131, MSTP130, |
| 398 | MSTP129, MSTP128, | 397 | MSTP129, MSTP128, MSTP127, MSTP126, |
| 399 | MSTP118, MSTP117, MSTP116, | 398 | MSTP118, MSTP117, MSTP116, |
| 400 | MSTP106, MSTP101, MSTP100, | 399 | MSTP106, MSTP101, MSTP100, |
| 401 | MSTP223, | 400 | MSTP223, |
| @@ -413,6 +412,8 @@ static struct clk mstp_clks[MSTP_NR] = { | |||
| 413 | [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */ | 412 | [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */ |
| 414 | [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */ | 413 | [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */ |
| 415 | [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */ | 414 | [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */ |
| 415 | [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU */ | ||
| 416 | [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2 */ | ||
| 416 | [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */ | 417 | [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */ |
| 417 | [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */ | 418 | [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */ |
| 418 | [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */ | 419 | [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */ |
| @@ -428,7 +429,7 @@ static struct clk mstp_clks[MSTP_NR] = { | |||
| 428 | [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */ | 429 | [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */ |
| 429 | [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */ | 430 | [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */ |
| 430 | [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */ | 431 | [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */ |
| 431 | [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, CLK_ENABLE_ON_INIT), /* FSIA */ | 432 | [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSIA */ |
| 432 | [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */ | 433 | [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */ |
| 433 | [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */ | 434 | [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */ |
| 434 | [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */ | 435 | [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */ |
| @@ -498,6 +499,8 @@ static struct clk_lookup lookups[] = { | |||
| 498 | CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */ | 499 | CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */ |
| 499 | CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */ | 500 | CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */ |
| 500 | CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */ | 501 | CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */ |
| 502 | CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU */ | ||
| 503 | CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */ | ||
| 501 | CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ | 504 | CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ |
| 502 | CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */ | 505 | CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */ |
| 503 | CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */ | 506 | CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */ |
diff --git a/arch/arm/mach-shmobile/clock.c b/arch/arm/mach-shmobile/clock.c index b7c705a213a2..6b7c7c42bc8f 100644 --- a/arch/arm/mach-shmobile/clock.c +++ b/arch/arm/mach-shmobile/clock.c | |||
| @@ -1,8 +1,10 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * SH-Mobile Timer | 2 | * SH-Mobile Clock Framework |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2010 Magnus Damm | 4 | * Copyright (C) 2010 Magnus Damm |
| 5 | * | 5 | * |
| 6 | * Used together with arch/arm/common/clkdev.c and drivers/sh/clk.c. | ||
| 7 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; version 2 of the License. | 10 | * the Free Software Foundation; version 2 of the License. |
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c new file mode 100644 index 000000000000..94912d3944d3 --- /dev/null +++ b/arch/arm/mach-shmobile/pm_runtime.c | |||
| @@ -0,0 +1,169 @@ | |||
| 1 | /* | ||
| 2 | * arch/arm/mach-shmobile/pm_runtime.c | ||
| 3 | * | ||
| 4 | * Runtime PM support code for SuperH Mobile ARM | ||
| 5 | * | ||
| 6 | * Copyright (C) 2009-2010 Magnus Damm | ||
| 7 | * | ||
| 8 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 9 | * License. See the file "COPYING" in the main directory of this archive | ||
| 10 | * for more details. | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/init.h> | ||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/io.h> | ||
| 16 | #include <linux/pm_runtime.h> | ||
| 17 | #include <linux/platform_device.h> | ||
| 18 | #include <linux/clk.h> | ||
| 19 | #include <linux/sh_clk.h> | ||
| 20 | #include <linux/bitmap.h> | ||
| 21 | |||
| 22 | #ifdef CONFIG_PM_RUNTIME | ||
| 23 | #define BIT_ONCE 0 | ||
| 24 | #define BIT_ACTIVE 1 | ||
| 25 | #define BIT_CLK_ENABLED 2 | ||
| 26 | |||
| 27 | struct pm_runtime_data { | ||
| 28 | unsigned long flags; | ||
| 29 | struct clk *clk; | ||
| 30 | }; | ||
| 31 | |||
| 32 | static void __devres_release(struct device *dev, void *res) | ||
| 33 | { | ||
| 34 | struct pm_runtime_data *prd = res; | ||
| 35 | |||
| 36 | dev_dbg(dev, "__devres_release()\n"); | ||
| 37 | |||
| 38 | if (test_bit(BIT_CLK_ENABLED, &prd->flags)) | ||
| 39 | clk_disable(prd->clk); | ||
| 40 | |||
| 41 | if (test_bit(BIT_ACTIVE, &prd->flags)) | ||
| 42 | clk_put(prd->clk); | ||
| 43 | } | ||
| 44 | |||
| 45 | static struct pm_runtime_data *__to_prd(struct device *dev) | ||
| 46 | { | ||
| 47 | return devres_find(dev, __devres_release, NULL, NULL); | ||
| 48 | } | ||
| 49 | |||
| 50 | static void platform_pm_runtime_init(struct device *dev, | ||
| 51 | struct pm_runtime_data *prd) | ||
| 52 | { | ||
| 53 | if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) { | ||
| 54 | prd->clk = clk_get(dev, NULL); | ||
| 55 | if (!IS_ERR(prd->clk)) { | ||
| 56 | set_bit(BIT_ACTIVE, &prd->flags); | ||
| 57 | dev_info(dev, "clocks managed by runtime pm\n"); | ||
| 58 | } | ||
| 59 | } | ||
| 60 | } | ||
| 61 | |||
| 62 | static void platform_pm_runtime_bug(struct device *dev, | ||
| 63 | struct pm_runtime_data *prd) | ||
| 64 | { | ||
| 65 | if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) | ||
| 66 | dev_err(dev, "runtime pm suspend before resume\n"); | ||
| 67 | } | ||
| 68 | |||
| 69 | int platform_pm_runtime_suspend(struct device *dev) | ||
| 70 | { | ||
| 71 | struct pm_runtime_data *prd = __to_prd(dev); | ||
| 72 | |||
| 73 | dev_dbg(dev, "platform_pm_runtime_suspend()\n"); | ||
| 74 | |||
| 75 | platform_pm_runtime_bug(dev, prd); | ||
| 76 | |||
| 77 | if (prd && test_bit(BIT_ACTIVE, &prd->flags)) { | ||
| 78 | clk_disable(prd->clk); | ||
| 79 | clear_bit(BIT_CLK_ENABLED, &prd->flags); | ||
| 80 | } | ||
| 81 | |||
| 82 | return 0; | ||
| 83 | } | ||
| 84 | |||
| 85 | int platform_pm_runtime_resume(struct device *dev) | ||
| 86 | { | ||
| 87 | struct pm_runtime_data *prd = __to_prd(dev); | ||
| 88 | |||
| 89 | dev_dbg(dev, "platform_pm_runtime_resume()\n"); | ||
| 90 | |||
| 91 | platform_pm_runtime_init(dev, prd); | ||
| 92 | |||
| 93 | if (prd && test_bit(BIT_ACTIVE, &prd->flags)) { | ||
| 94 | clk_enable(prd->clk); | ||
| 95 | set_bit(BIT_CLK_ENABLED, &prd->flags); | ||
| 96 | } | ||
| 97 | |||
| 98 | return 0; | ||
| 99 | } | ||
| 100 | |||
| 101 | int platform_pm_runtime_idle(struct device *dev) | ||
| 102 | { | ||
| 103 | /* suspend synchronously to disable clocks immediately */ | ||
| 104 | return pm_runtime_suspend(dev); | ||
| 105 | } | ||
| 106 | |||
| 107 | static int platform_bus_notify(struct notifier_block *nb, | ||
| 108 | unsigned long action, void *data) | ||
| 109 | { | ||
| 110 | struct device *dev = data; | ||
| 111 | struct pm_runtime_data *prd; | ||
| 112 | |||
| 113 | dev_dbg(dev, "platform_bus_notify() %ld !\n", action); | ||
| 114 | |||
| 115 | if (action == BUS_NOTIFY_BIND_DRIVER) { | ||
| 116 | prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL); | ||
| 117 | if (prd) | ||
| 118 | devres_add(dev, prd); | ||
| 119 | else | ||
| 120 | dev_err(dev, "unable to alloc memory for runtime pm\n"); | ||
| 121 | } | ||
| 122 | |||
| 123 | return 0; | ||
| 124 | } | ||
| 125 | |||
| 126 | #else /* CONFIG_PM_RUNTIME */ | ||
| 127 | |||
| 128 | static int platform_bus_notify(struct notifier_block *nb, | ||
| 129 | unsigned long action, void *data) | ||
| 130 | { | ||
| 131 | struct device *dev = data; | ||
| 132 | struct clk *clk; | ||
| 133 | |||
| 134 | dev_dbg(dev, "platform_bus_notify() %ld !\n", action); | ||
| 135 | |||
| 136 | switch (action) { | ||
| 137 | case BUS_NOTIFY_BIND_DRIVER: | ||
| 138 | clk = clk_get(dev, NULL); | ||
| 139 | if (!IS_ERR(clk)) { | ||
| 140 | clk_enable(clk); | ||
| 141 | clk_put(clk); | ||
| 142 | dev_info(dev, "runtime pm disabled, clock forced on\n"); | ||
| 143 | } | ||
| 144 | break; | ||
| 145 | case BUS_NOTIFY_UNBOUND_DRIVER: | ||
| 146 | clk = clk_get(dev, NULL); | ||
| 147 | if (!IS_ERR(clk)) { | ||
| 148 | clk_disable(clk); | ||
| 149 | clk_put(clk); | ||
| 150 | dev_info(dev, "runtime pm disabled, clock forced off\n"); | ||
| 151 | } | ||
| 152 | break; | ||
| 153 | } | ||
| 154 | |||
| 155 | return 0; | ||
| 156 | } | ||
| 157 | |||
| 158 | #endif /* CONFIG_PM_RUNTIME */ | ||
| 159 | |||
| 160 | static struct notifier_block platform_bus_notifier = { | ||
| 161 | .notifier_call = platform_bus_notify | ||
| 162 | }; | ||
| 163 | |||
| 164 | static int __init sh_pm_runtime_init(void) | ||
| 165 | { | ||
| 166 | bus_register_notifier(&platform_bus_type, &platform_bus_notifier); | ||
| 167 | return 0; | ||
| 168 | } | ||
| 169 | core_initcall(sh_pm_runtime_init); | ||
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 33c3f570aaa0..a0a2928ae4dd 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
| @@ -398,7 +398,7 @@ config CPU_V6 | |||
| 398 | # ARMv6k | 398 | # ARMv6k |
| 399 | config CPU_32v6K | 399 | config CPU_32v6K |
| 400 | bool "Support ARM V6K processor extensions" if !SMP | 400 | bool "Support ARM V6K processor extensions" if !SMP |
| 401 | depends on CPU_V6 | 401 | depends on CPU_V6 || CPU_V7 |
| 402 | default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) | 402 | default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) |
| 403 | help | 403 | help |
| 404 | Say Y here if your ARMv6 processor supports the 'K' extension. | 404 | Say Y here if your ARMv6 processor supports the 'K' extension. |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c704eed63c5d..4bc43e535d3b 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
| @@ -229,6 +229,8 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) | |||
| 229 | } | 229 | } |
| 230 | } while (size -= PAGE_SIZE); | 230 | } while (size -= PAGE_SIZE); |
| 231 | 231 | ||
| 232 | dsb(); | ||
| 233 | |||
| 232 | return (void *)c->vm_start; | 234 | return (void *)c->vm_start; |
| 233 | } | 235 | } |
| 234 | return NULL; | 236 | return NULL; |
diff --git a/arch/arm/plat-mxc/Kconfig b/arch/arm/plat-mxc/Kconfig index 0527e65318f4..6785db4179b8 100644 --- a/arch/arm/plat-mxc/Kconfig +++ b/arch/arm/plat-mxc/Kconfig | |||
| @@ -43,6 +43,7 @@ config ARCH_MXC91231 | |||
| 43 | config ARCH_MX5 | 43 | config ARCH_MX5 |
| 44 | bool "MX5-based" | 44 | bool "MX5-based" |
| 45 | select CPU_V7 | 45 | select CPU_V7 |
| 46 | select ARM_L1_CACHE_SHIFT_6 | ||
| 46 | help | 47 | help |
| 47 | This enables support for systems based on the Freescale i.MX51 family | 48 | This enables support for systems based on the Freescale i.MX51 family |
| 48 | 49 | ||
diff --git a/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h b/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h index 634e3f4c454d..656acb45d434 100644 --- a/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h +++ b/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h | |||
| @@ -37,9 +37,9 @@ | |||
| 37 | * mach-mx5/eukrea_mbimx51-baseboard.c for cpuimx51 | 37 | * mach-mx5/eukrea_mbimx51-baseboard.c for cpuimx51 |
| 38 | */ | 38 | */ |
| 39 | 39 | ||
| 40 | extern void eukrea_mbimx25_baseboard_init(void); | 40 | extern void eukrea_mbimxsd25_baseboard_init(void); |
| 41 | extern void eukrea_mbimx27_baseboard_init(void); | 41 | extern void eukrea_mbimx27_baseboard_init(void); |
| 42 | extern void eukrea_mbimx35_baseboard_init(void); | 42 | extern void eukrea_mbimxsd35_baseboard_init(void); |
| 43 | extern void eukrea_mbimx51_baseboard_init(void); | 43 | extern void eukrea_mbimx51_baseboard_init(void); |
| 44 | 44 | ||
| 45 | #endif | 45 | #endif |
diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c index b3da9aad4295..3703ab28257f 100644 --- a/arch/arm/plat-mxc/tzic.c +++ b/arch/arm/plat-mxc/tzic.c | |||
| @@ -164,8 +164,9 @@ int tzic_enable_wake(int is_idle) | |||
| 164 | return -EAGAIN; | 164 | return -EAGAIN; |
| 165 | 165 | ||
| 166 | for (i = 0; i < 4; i++) { | 166 | for (i = 0; i < 4; i++) { |
| 167 | v = is_idle ? __raw_readl(TZIC_ENSET0(i)) : wakeup_intr[i]; | 167 | v = is_idle ? __raw_readl(tzic_base + TZIC_ENSET0(i)) : |
| 168 | __raw_writel(v, TZIC_WAKEUP0(i)); | 168 | wakeup_intr[i]; |
| 169 | __raw_writel(v, tzic_base + TZIC_WAKEUP0(i)); | ||
| 169 | } | 170 | } |
| 170 | 171 | ||
| 171 | return 0; | 172 | return 0; |
diff --git a/arch/arm/plat-pxa/pwm.c b/arch/arm/plat-pxa/pwm.c index 0732c6c8d511..ef32686feef9 100644 --- a/arch/arm/plat-pxa/pwm.c +++ b/arch/arm/plat-pxa/pwm.c | |||
| @@ -176,7 +176,7 @@ static inline void __add_pwm(struct pwm_device *pwm) | |||
| 176 | 176 | ||
| 177 | static int __devinit pwm_probe(struct platform_device *pdev) | 177 | static int __devinit pwm_probe(struct platform_device *pdev) |
| 178 | { | 178 | { |
| 179 | struct platform_device_id *id = platform_get_device_id(pdev); | 179 | const struct platform_device_id *id = platform_get_device_id(pdev); |
| 180 | struct pwm_device *pwm, *secondary = NULL; | 180 | struct pwm_device *pwm, *secondary = NULL; |
| 181 | struct resource *r; | 181 | struct resource *r; |
| 182 | int ret = 0; | 182 | int ret = 0; |
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 48cbdcb6bbd4..55590a4d87c9 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | # | 12 | # |
| 13 | # http://www.arm.linux.org.uk/developer/machines/?action=new | 13 | # http://www.arm.linux.org.uk/developer/machines/?action=new |
| 14 | # | 14 | # |
| 15 | # Last update: Mon Jul 12 21:10:14 2010 | 15 | # Last update: Thu Sep 9 22:43:01 2010 |
| 16 | # | 16 | # |
| 17 | # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number | 17 | # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number |
| 18 | # | 18 | # |
| @@ -2622,7 +2622,7 @@ kraken MACH_KRAKEN KRAKEN 2634 | |||
| 2622 | gw2388 MACH_GW2388 GW2388 2635 | 2622 | gw2388 MACH_GW2388 GW2388 2635 |
| 2623 | jadecpu MACH_JADECPU JADECPU 2636 | 2623 | jadecpu MACH_JADECPU JADECPU 2636 |
| 2624 | carlisle MACH_CARLISLE CARLISLE 2637 | 2624 | carlisle MACH_CARLISLE CARLISLE 2637 |
| 2625 | lux_sf9 MACH_LUX_SFT9 LUX_SFT9 2638 | 2625 | lux_sf9 MACH_LUX_SF9 LUX_SF9 2638 |
| 2626 | nemid_tb MACH_NEMID_TB NEMID_TB 2639 | 2626 | nemid_tb MACH_NEMID_TB NEMID_TB 2639 |
| 2627 | terrier MACH_TERRIER TERRIER 2640 | 2627 | terrier MACH_TERRIER TERRIER 2640 |
| 2628 | turbot MACH_TURBOT TURBOT 2641 | 2628 | turbot MACH_TURBOT TURBOT 2641 |
| @@ -2950,3 +2950,97 @@ davinci_dm365_dvr MACH_DAVINCI_DM365_DVR DAVINCI_DM365_DVR 2963 | |||
| 2950 | netviz MACH_NETVIZ NETVIZ 2964 | 2950 | netviz MACH_NETVIZ NETVIZ 2964 |
| 2951 | flexibity MACH_FLEXIBITY FLEXIBITY 2965 | 2951 | flexibity MACH_FLEXIBITY FLEXIBITY 2965 |
| 2952 | wlan_computer MACH_WLAN_COMPUTER WLAN_COMPUTER 2966 | 2952 | wlan_computer MACH_WLAN_COMPUTER WLAN_COMPUTER 2966 |
| 2953 | lpc24xx MACH_LPC24XX LPC24XX 2967 | ||
| 2954 | spica MACH_SPICA SPICA 2968 | ||
| 2955 | gpsdisplay MACH_GPSDISPLAY GPSDISPLAY 2969 | ||
| 2956 | bipnet MACH_BIPNET BIPNET 2970 | ||
| 2957 | overo_ctu_inertial MACH_OVERO_CTU_INERTIAL OVERO_CTU_INERTIAL 2971 | ||
| 2958 | davinci_dm355_mmm MACH_DAVINCI_DM355_MMM DAVINCI_DM355_MMM 2972 | ||
| 2959 | pc9260_v2 MACH_PC9260_V2 PC9260_V2 2973 | ||
| 2960 | ptx7545 MACH_PTX7545 PTX7545 2974 | ||
| 2961 | tm_efdc MACH_TM_EFDC TM_EFDC 2975 | ||
| 2962 | omap3_waldo1 MACH_OMAP3_WALDO1 OMAP3_WALDO1 2977 | ||
| 2963 | flyer MACH_FLYER FLYER 2978 | ||
| 2964 | tornado3240 MACH_TORNADO3240 TORNADO3240 2979 | ||
| 2965 | soli_01 MACH_SOLI_01 SOLI_01 2980 | ||
| 2966 | omapl138_europalc MACH_OMAPL138_EUROPALC OMAPL138_EUROPALC 2981 | ||
| 2967 | helios_v1 MACH_HELIOS_V1 HELIOS_V1 2982 | ||
| 2968 | netspace_lite_v2 MACH_NETSPACE_LITE_V2 NETSPACE_LITE_V2 2983 | ||
| 2969 | ssc MACH_SSC SSC 2984 | ||
| 2970 | premierwave_en MACH_PREMIERWAVE_EN PREMIERWAVE_EN 2985 | ||
| 2971 | wasabi MACH_WASABI WASABI 2986 | ||
| 2972 | vivow MACH_VIVOW VIVOW 2987 | ||
| 2973 | mx50_rdp MACH_MX50_RDP MX50_RDP 2988 | ||
| 2974 | universal MACH_UNIVERSAL UNIVERSAL 2989 | ||
| 2975 | real6410 MACH_REAL6410 REAL6410 2990 | ||
| 2976 | spx_sakura MACH_SPX_SAKURA SPX_SAKURA 2991 | ||
| 2977 | ij3k_2440 MACH_IJ3K_2440 IJ3K_2440 2992 | ||
| 2978 | omap3_bc10 MACH_OMAP3_BC10 OMAP3_BC10 2993 | ||
| 2979 | thebe MACH_THEBE THEBE 2994 | ||
| 2980 | rv082 MACH_RV082 RV082 2995 | ||
| 2981 | armlguest MACH_ARMLGUEST ARMLGUEST 2996 | ||
| 2982 | tjinc1000 MACH_TJINC1000 TJINC1000 2997 | ||
| 2983 | dockstar MACH_DOCKSTAR DOCKSTAR 2998 | ||
| 2984 | ax8008 MACH_AX8008 AX8008 2999 | ||
| 2985 | gnet_sgce MACH_GNET_SGCE GNET_SGCE 3000 | ||
| 2986 | pxwnas_500_1000 MACH_PXWNAS_500_1000 PXWNAS_500_1000 3001 | ||
| 2987 | ea20 MACH_EA20 EA20 3002 | ||
| 2988 | awm2 MACH_AWM2 AWM2 3003 | ||
| 2989 | ti8148evm MACH_TI8148EVM TI8148EVM 3004 | ||
| 2990 | tegra_seaboard MACH_TEGRA_SEABOARD TEGRA_SEABOARD 3005 | ||
| 2991 | linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006 | ||
| 2992 | tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007 | ||
| 2993 | rubys MACH_RUBYS RUBYS 3008 | ||
| 2994 | aquarius MACH_AQUARIUS AQUARIUS 3009 | ||
| 2995 | mx53_ard MACH_MX53_ARD MX53_ARD 3010 | ||
| 2996 | mx53_smd MACH_MX53_SMD MX53_SMD 3011 | ||
| 2997 | lswxl MACH_LSWXL LSWXL 3012 | ||
| 2998 | dove_avng_v3 MACH_DOVE_AVNG_V3 DOVE_AVNG_V3 3013 | ||
| 2999 | sdi_ess_9263 MACH_SDI_ESS_9263 SDI_ESS_9263 3014 | ||
| 3000 | jocpu550 MACH_JOCPU550 JOCPU550 3015 | ||
| 3001 | msm8x60_rumi3 MACH_MSM8X60_RUMI3 MSM8X60_RUMI3 3016 | ||
| 3002 | msm8x60_ffa MACH_MSM8X60_FFA MSM8X60_FFA 3017 | ||
| 3003 | yanomami MACH_YANOMAMI YANOMAMI 3018 | ||
| 3004 | gta04 MACH_GTA04 GTA04 3019 | ||
| 3005 | cm_a510 MACH_CM_A510 CM_A510 3020 | ||
| 3006 | omap3_rfs200 MACH_OMAP3_RFS200 OMAP3_RFS200 3021 | ||
| 3007 | kx33xx MACH_KX33XX KX33XX 3022 | ||
| 3008 | ptx7510 MACH_PTX7510 PTX7510 3023 | ||
| 3009 | top9000 MACH_TOP9000 TOP9000 3024 | ||
| 3010 | teenote MACH_TEENOTE TEENOTE 3025 | ||
| 3011 | ts3 MACH_TS3 TS3 3026 | ||
| 3012 | a0 MACH_A0 A0 3027 | ||
| 3013 | fsm9xxx_surf MACH_FSM9XXX_SURF FSM9XXX_SURF 3028 | ||
| 3014 | fsm9xxx_ffa MACH_FSM9XXX_FFA FSM9XXX_FFA 3029 | ||
| 3015 | frrhwcdma60w MACH_FRRHWCDMA60W FRRHWCDMA60W 3030 | ||
| 3016 | remus MACH_REMUS REMUS 3031 | ||
| 3017 | at91cap7xdk MACH_AT91CAP7XDK AT91CAP7XDK 3032 | ||
| 3018 | at91cap7stk MACH_AT91CAP7STK AT91CAP7STK 3033 | ||
| 3019 | kt_sbc_sam9_1 MACH_KT_SBC_SAM9_1 KT_SBC_SAM9_1 3034 | ||
| 3020 | oratisrouter MACH_ORATISROUTER ORATISROUTER 3035 | ||
| 3021 | armada_xp_db MACH_ARMADA_XP_DB ARMADA_XP_DB 3036 | ||
| 3022 | spdm MACH_SPDM SPDM 3037 | ||
| 3023 | gtib MACH_GTIB GTIB 3038 | ||
| 3024 | dgm3240 MACH_DGM3240 DGM3240 3039 | ||
| 3025 | atlas_i_lpe MACH_ATLAS_I_LPE ATLAS_I_LPE 3040 | ||
| 3026 | htcmega MACH_HTCMEGA HTCMEGA 3041 | ||
| 3027 | tricorder MACH_TRICORDER TRICORDER 3042 | ||
| 3028 | tx28 MACH_TX28 TX28 3043 | ||
| 3029 | bstbrd MACH_BSTBRD BSTBRD 3044 | ||
| 3030 | pwb3090 MACH_PWB3090 PWB3090 3045 | ||
| 3031 | idea6410 MACH_IDEA6410 IDEA6410 3046 | ||
| 3032 | qbc9263 MACH_QBC9263 QBC9263 3047 | ||
| 3033 | borabora MACH_BORABORA BORABORA 3048 | ||
| 3034 | valdez MACH_VALDEZ VALDEZ 3049 | ||
| 3035 | ls9g20 MACH_LS9G20 LS9G20 3050 | ||
| 3036 | mios_v1 MACH_MIOS_V1 MIOS_V1 3051 | ||
| 3037 | s5pc110_crespo MACH_S5PC110_CRESPO S5PC110_CRESPO 3052 | ||
| 3038 | controltek9g20 MACH_CONTROLTEK9G20 CONTROLTEK9G20 3053 | ||
| 3039 | tin307 MACH_TIN307 TIN307 3054 | ||
| 3040 | tin510 MACH_TIN510 TIN510 3055 | ||
| 3041 | bluecheese MACH_BLUECHEESE BLUECHEESE 3057 | ||
| 3042 | tem3x30 MACH_TEM3X30 TEM3X30 3058 | ||
| 3043 | harvest_desoto MACH_HARVEST_DESOTO HARVEST_DESOTO 3059 | ||
| 3044 | msm8x60_qrdc MACH_MSM8X60_QRDC MSM8X60_QRDC 3060 | ||
| 3045 | spear900 MACH_SPEAR900 SPEAR900 3061 | ||
| 3046 | pcontrol_g20 MACH_PCONTROL_G20 PCONTROL_G20 3062 | ||
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index e936804b7508..984221abb66d 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h | |||
| @@ -18,7 +18,8 @@ | |||
| 18 | 18 | ||
| 19 | static __inline__ int atomic_add_return(int i, atomic_t *v) | 19 | static __inline__ int atomic_add_return(int i, atomic_t *v) |
| 20 | { | 20 | { |
| 21 | int ret,flags; | 21 | unsigned long flags; |
| 22 | int ret; | ||
| 22 | local_irq_save(flags); | 23 | local_irq_save(flags); |
| 23 | ret = v->counter += i; | 24 | ret = v->counter += i; |
| 24 | local_irq_restore(flags); | 25 | local_irq_restore(flags); |
| @@ -30,7 +31,8 @@ static __inline__ int atomic_add_return(int i, atomic_t *v) | |||
| 30 | 31 | ||
| 31 | static __inline__ int atomic_sub_return(int i, atomic_t *v) | 32 | static __inline__ int atomic_sub_return(int i, atomic_t *v) |
| 32 | { | 33 | { |
| 33 | int ret,flags; | 34 | unsigned long flags; |
| 35 | int ret; | ||
| 34 | local_irq_save(flags); | 36 | local_irq_save(flags); |
| 35 | ret = v->counter -= i; | 37 | ret = v->counter -= i; |
| 36 | local_irq_restore(flags); | 38 | local_irq_restore(flags); |
| @@ -42,7 +44,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) | |||
| 42 | 44 | ||
| 43 | static __inline__ int atomic_inc_return(atomic_t *v) | 45 | static __inline__ int atomic_inc_return(atomic_t *v) |
| 44 | { | 46 | { |
| 45 | int ret,flags; | 47 | unsigned long flags; |
| 48 | int ret; | ||
| 46 | local_irq_save(flags); | 49 | local_irq_save(flags); |
| 47 | v->counter++; | 50 | v->counter++; |
| 48 | ret = v->counter; | 51 | ret = v->counter; |
| @@ -64,7 +67,8 @@ static __inline__ int atomic_inc_return(atomic_t *v) | |||
| 64 | 67 | ||
| 65 | static __inline__ int atomic_dec_return(atomic_t *v) | 68 | static __inline__ int atomic_dec_return(atomic_t *v) |
| 66 | { | 69 | { |
| 67 | int ret,flags; | 70 | unsigned long flags; |
| 71 | int ret; | ||
| 68 | local_irq_save(flags); | 72 | local_irq_save(flags); |
| 69 | --v->counter; | 73 | --v->counter; |
| 70 | ret = v->counter; | 74 | ret = v->counter; |
| @@ -76,7 +80,8 @@ static __inline__ int atomic_dec_return(atomic_t *v) | |||
| 76 | 80 | ||
| 77 | static __inline__ int atomic_dec_and_test(atomic_t *v) | 81 | static __inline__ int atomic_dec_and_test(atomic_t *v) |
| 78 | { | 82 | { |
| 79 | int ret,flags; | 83 | unsigned long flags; |
| 84 | int ret; | ||
| 80 | local_irq_save(flags); | 85 | local_irq_save(flags); |
| 81 | --v->counter; | 86 | --v->counter; |
| 82 | ret = v->counter; | 87 | ret = v->counter; |
diff --git a/arch/h8300/include/asm/system.h b/arch/h8300/include/asm/system.h index d98d97685f06..16bf1560ff68 100644 --- a/arch/h8300/include/asm/system.h +++ b/arch/h8300/include/asm/system.h | |||
| @@ -3,6 +3,8 @@ | |||
| 3 | 3 | ||
| 4 | #include <linux/linkage.h> | 4 | #include <linux/linkage.h> |
| 5 | 5 | ||
| 6 | struct pt_regs; | ||
| 7 | |||
| 6 | /* | 8 | /* |
| 7 | * switch_to(n) should switch tasks to task ptr, first checking that | 9 | * switch_to(n) should switch tasks to task ptr, first checking that |
| 8 | * ptr isn't the current task, in which case it does nothing. This | 10 | * ptr isn't the current task, in which case it does nothing. This |
| @@ -155,6 +157,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz | |||
| 155 | 157 | ||
| 156 | #define arch_align_stack(x) (x) | 158 | #define arch_align_stack(x) (x) |
| 157 | 159 | ||
| 158 | void die(char *str, struct pt_regs *fp, unsigned long err); | 160 | extern void die(const char *str, struct pt_regs *fp, unsigned long err); |
| 159 | 161 | ||
| 160 | #endif /* _H8300_SYSTEM_H */ | 162 | #endif /* _H8300_SYSTEM_H */ |
diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c index dc1ac0243b78..aaf5e5a48f93 100644 --- a/arch/h8300/kernel/sys_h8300.c +++ b/arch/h8300/kernel/sys_h8300.c | |||
| @@ -56,8 +56,8 @@ int kernel_execve(const char *filename, | |||
| 56 | const char *const envp[]) | 56 | const char *const envp[]) |
| 57 | { | 57 | { |
| 58 | register long res __asm__("er0"); | 58 | register long res __asm__("er0"); |
| 59 | register char *const *_c __asm__("er3") = envp; | 59 | register const char *const *_c __asm__("er3") = envp; |
| 60 | register char *const *_b __asm__("er2") = argv; | 60 | register const char *const *_b __asm__("er2") = argv; |
| 61 | register const char * _a __asm__("er1") = filename; | 61 | register const char * _a __asm__("er1") = filename; |
| 62 | __asm__ __volatile__ ("mov.l %1,er0\n\t" | 62 | __asm__ __volatile__ ("mov.l %1,er0\n\t" |
| 63 | "trapa #0\n\t" | 63 | "trapa #0\n\t" |
diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c index 3c0b66bc669e..dfa05bd908b6 100644 --- a/arch/h8300/kernel/traps.c +++ b/arch/h8300/kernel/traps.c | |||
| @@ -96,7 +96,7 @@ static void dump(struct pt_regs *fp) | |||
| 96 | printk("\n\n"); | 96 | printk("\n\n"); |
| 97 | } | 97 | } |
| 98 | 98 | ||
| 99 | void die(char *str, struct pt_regs *fp, unsigned long err) | 99 | void die(const char *str, struct pt_regs *fp, unsigned long err) |
| 100 | { | 100 | { |
| 101 | static int diecount; | 101 | static int diecount; |
| 102 | 102 | ||
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S index a91b2713451d..ef332136f96d 100644 --- a/arch/m68knommu/kernel/vmlinux.lds.S +++ b/arch/m68knommu/kernel/vmlinux.lds.S | |||
| @@ -150,6 +150,8 @@ SECTIONS { | |||
| 150 | _sdata = . ; | 150 | _sdata = . ; |
| 151 | DATA_DATA | 151 | DATA_DATA |
| 152 | CACHELINE_ALIGNED_DATA(32) | 152 | CACHELINE_ALIGNED_DATA(32) |
| 153 | PAGE_ALIGNED_DATA(PAGE_SIZE) | ||
| 154 | *(.data..shared_aligned) | ||
| 153 | INIT_TASK_DATA(THREAD_SIZE) | 155 | INIT_TASK_DATA(THREAD_SIZE) |
| 154 | _edata = . ; | 156 | _edata = . ; |
| 155 | } > DATA | 157 | } > DATA |
diff --git a/arch/powerpc/include/asm/fsldma.h b/arch/powerpc/include/asm/fsldma.h index a67aeed17d40..debc5ed96d6e 100644 --- a/arch/powerpc/include/asm/fsldma.h +++ b/arch/powerpc/include/asm/fsldma.h | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #ifndef __ARCH_POWERPC_ASM_FSLDMA_H__ | 11 | #ifndef __ARCH_POWERPC_ASM_FSLDMA_H__ |
| 12 | #define __ARCH_POWERPC_ASM_FSLDMA_H__ | 12 | #define __ARCH_POWERPC_ASM_FSLDMA_H__ |
| 13 | 13 | ||
| 14 | #include <linux/slab.h> | ||
| 14 | #include <linux/dmaengine.h> | 15 | #include <linux/dmaengine.h> |
| 15 | 16 | ||
| 16 | /* | 17 | /* |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 4d6681dce816..c571cd3c1453 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
| @@ -575,13 +575,19 @@ __secondary_start: | |||
| 575 | /* Initialize the kernel stack. Just a repeat for iSeries. */ | 575 | /* Initialize the kernel stack. Just a repeat for iSeries. */ |
| 576 | LOAD_REG_ADDR(r3, current_set) | 576 | LOAD_REG_ADDR(r3, current_set) |
| 577 | sldi r28,r24,3 /* get current_set[cpu#] */ | 577 | sldi r28,r24,3 /* get current_set[cpu#] */ |
| 578 | ldx r1,r3,r28 | 578 | ldx r14,r3,r28 |
| 579 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | 579 | addi r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD |
| 580 | std r1,PACAKSAVE(r13) | 580 | std r14,PACAKSAVE(r13) |
| 581 | 581 | ||
| 582 | /* Do early setup for that CPU (stab, slb, hash table pointer) */ | 582 | /* Do early setup for that CPU (stab, slb, hash table pointer) */ |
| 583 | bl .early_setup_secondary | 583 | bl .early_setup_secondary |
| 584 | 584 | ||
| 585 | /* | ||
| 586 | * setup the new stack pointer, but *don't* use this until | ||
| 587 | * translation is on. | ||
| 588 | */ | ||
| 589 | mr r1, r14 | ||
| 590 | |||
| 585 | /* Clear backchain so we get nice backtraces */ | 591 | /* Clear backchain so we get nice backtraces */ |
| 586 | li r7,0 | 592 | li r7,0 |
| 587 | mtlr r7 | 593 | mtlr r7 |
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 6bbd7a604d24..a7a570dcdd57 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S | |||
| @@ -810,6 +810,9 @@ relocate_new_kernel: | |||
| 810 | isync | 810 | isync |
| 811 | sync | 811 | sync |
| 812 | 812 | ||
| 813 | mfspr r3, SPRN_PIR /* current core we are running on */ | ||
| 814 | mr r4, r5 /* load physical address of chunk called */ | ||
| 815 | |||
| 813 | /* jump to the entry point, usually the setup routine */ | 816 | /* jump to the entry point, usually the setup routine */ |
| 814 | mtlr r5 | 817 | mtlr r5 |
| 815 | blrl | 818 | blrl |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index ce53dfa7130d..8533b3b83f5d 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
| @@ -577,20 +577,11 @@ void timer_interrupt(struct pt_regs * regs) | |||
| 577 | * some CPUs will continuue to take decrementer exceptions */ | 577 | * some CPUs will continuue to take decrementer exceptions */ |
| 578 | set_dec(DECREMENTER_MAX); | 578 | set_dec(DECREMENTER_MAX); |
| 579 | 579 | ||
| 580 | #ifdef CONFIG_PPC32 | 580 | #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) |
| 581 | if (atomic_read(&ppc_n_lost_interrupts) != 0) | 581 | if (atomic_read(&ppc_n_lost_interrupts) != 0) |
| 582 | do_IRQ(regs); | 582 | do_IRQ(regs); |
| 583 | #endif | 583 | #endif |
| 584 | 584 | ||
| 585 | now = get_tb_or_rtc(); | ||
| 586 | if (now < decrementer->next_tb) { | ||
| 587 | /* not time for this event yet */ | ||
| 588 | now = decrementer->next_tb - now; | ||
| 589 | if (now <= DECREMENTER_MAX) | ||
| 590 | set_dec((int)now); | ||
| 591 | trace_timer_interrupt_exit(regs); | ||
| 592 | return; | ||
| 593 | } | ||
| 594 | old_regs = set_irq_regs(regs); | 585 | old_regs = set_irq_regs(regs); |
| 595 | irq_enter(); | 586 | irq_enter(); |
| 596 | 587 | ||
| @@ -606,8 +597,16 @@ void timer_interrupt(struct pt_regs * regs) | |||
| 606 | get_lppaca()->int_dword.fields.decr_int = 0; | 597 | get_lppaca()->int_dword.fields.decr_int = 0; |
| 607 | #endif | 598 | #endif |
| 608 | 599 | ||
| 609 | if (evt->event_handler) | 600 | now = get_tb_or_rtc(); |
| 610 | evt->event_handler(evt); | 601 | if (now >= decrementer->next_tb) { |
| 602 | decrementer->next_tb = ~(u64)0; | ||
| 603 | if (evt->event_handler) | ||
| 604 | evt->event_handler(evt); | ||
| 605 | } else { | ||
| 606 | now = decrementer->next_tb - now; | ||
| 607 | if (now <= DECREMENTER_MAX) | ||
| 608 | set_dec((int)now); | ||
| 609 | } | ||
| 611 | 610 | ||
| 612 | #ifdef CONFIG_PPC_ISERIES | 611 | #ifdef CONFIG_PPC_ISERIES |
| 613 | if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) | 612 | if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) |
diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c index f9751c8905be..83068322abd1 100644 --- a/arch/powerpc/platforms/83xx/mpc837x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c | |||
| @@ -48,8 +48,10 @@ static int mpc837xmds_usb_cfg(void) | |||
| 48 | return -1; | 48 | return -1; |
| 49 | 49 | ||
| 50 | np = of_find_node_by_name(NULL, "usb"); | 50 | np = of_find_node_by_name(NULL, "usb"); |
| 51 | if (!np) | 51 | if (!np) { |
| 52 | return -ENODEV; | 52 | ret = -ENODEV; |
| 53 | goto out; | ||
| 54 | } | ||
| 53 | phy_type = of_get_property(np, "phy_type", NULL); | 55 | phy_type = of_get_property(np, "phy_type", NULL); |
| 54 | if (phy_type && !strcmp(phy_type, "ulpi")) { | 56 | if (phy_type && !strcmp(phy_type, "ulpi")) { |
| 55 | clrbits8(bcsr_regs + 12, BCSR12_USB_SER_PIN); | 57 | clrbits8(bcsr_regs + 12, BCSR12_USB_SER_PIN); |
| @@ -65,8 +67,9 @@ static int mpc837xmds_usb_cfg(void) | |||
| 65 | } | 67 | } |
| 66 | 68 | ||
| 67 | of_node_put(np); | 69 | of_node_put(np); |
| 70 | out: | ||
| 68 | iounmap(bcsr_regs); | 71 | iounmap(bcsr_regs); |
| 69 | return 0; | 72 | return ret; |
| 70 | } | 73 | } |
| 71 | 74 | ||
| 72 | /* ************************************************************************ | 75 | /* ************************************************************************ |
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c index da64be19d099..aa34cac4eb5c 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c | |||
| @@ -357,6 +357,7 @@ static void __init mpc85xx_mds_setup_arch(void) | |||
| 357 | { | 357 | { |
| 358 | #ifdef CONFIG_PCI | 358 | #ifdef CONFIG_PCI |
| 359 | struct pci_controller *hose; | 359 | struct pci_controller *hose; |
| 360 | struct device_node *np; | ||
| 360 | #endif | 361 | #endif |
| 361 | dma_addr_t max = 0xffffffff; | 362 | dma_addr_t max = 0xffffffff; |
| 362 | 363 | ||
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c index e1467c937450..34e00902ce86 100644 --- a/arch/powerpc/platforms/85xx/p1022_ds.c +++ b/arch/powerpc/platforms/85xx/p1022_ds.c | |||
| @@ -19,7 +19,7 @@ | |||
| 19 | 19 | ||
| 20 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
| 21 | #include <linux/of_platform.h> | 21 | #include <linux/of_platform.h> |
| 22 | #include <linux/lmb.h> | 22 | #include <linux/memblock.h> |
| 23 | 23 | ||
| 24 | #include <asm/mpic.h> | 24 | #include <asm/mpic.h> |
| 25 | #include <asm/swiotlb.h> | 25 | #include <asm/swiotlb.h> |
| @@ -97,7 +97,7 @@ static void __init p1022_ds_setup_arch(void) | |||
| 97 | #endif | 97 | #endif |
| 98 | 98 | ||
| 99 | #ifdef CONFIG_SWIOTLB | 99 | #ifdef CONFIG_SWIOTLB |
| 100 | if (lmb_end_of_DRAM() > max) { | 100 | if (memblock_end_of_DRAM() > max) { |
| 101 | ppc_swiotlb_enable = 1; | 101 | ppc_swiotlb_enable = 1; |
| 102 | set_pci_dma_ops(&swiotlb_dma_ops); | 102 | set_pci_dma_ops(&swiotlb_dma_ops); |
| 103 | ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; | 103 | ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; |
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 227c1c3d585e..72d8054fa739 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c | |||
| @@ -129,20 +129,35 @@ struct device_node *dlpar_configure_connector(u32 drc_index) | |||
| 129 | struct property *property; | 129 | struct property *property; |
| 130 | struct property *last_property = NULL; | 130 | struct property *last_property = NULL; |
| 131 | struct cc_workarea *ccwa; | 131 | struct cc_workarea *ccwa; |
| 132 | char *data_buf; | ||
| 132 | int cc_token; | 133 | int cc_token; |
| 133 | int rc; | 134 | int rc = -1; |
| 134 | 135 | ||
| 135 | cc_token = rtas_token("ibm,configure-connector"); | 136 | cc_token = rtas_token("ibm,configure-connector"); |
| 136 | if (cc_token == RTAS_UNKNOWN_SERVICE) | 137 | if (cc_token == RTAS_UNKNOWN_SERVICE) |
| 137 | return NULL; | 138 | return NULL; |
| 138 | 139 | ||
| 139 | spin_lock(&rtas_data_buf_lock); | 140 | data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); |
| 140 | ccwa = (struct cc_workarea *)&rtas_data_buf[0]; | 141 | if (!data_buf) |
| 142 | return NULL; | ||
| 143 | |||
| 144 | ccwa = (struct cc_workarea *)&data_buf[0]; | ||
| 141 | ccwa->drc_index = drc_index; | 145 | ccwa->drc_index = drc_index; |
| 142 | ccwa->zero = 0; | 146 | ccwa->zero = 0; |
| 143 | 147 | ||
| 144 | rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); | 148 | do { |
| 145 | while (rc) { | 149 | /* Since we release the rtas_data_buf lock between configure |
| 150 | * connector calls we want to re-populate the rtas_data_buffer | ||
| 151 | * with the contents of the previous call. | ||
| 152 | */ | ||
| 153 | spin_lock(&rtas_data_buf_lock); | ||
| 154 | |||
| 155 | memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE); | ||
| 156 | rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); | ||
| 157 | memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE); | ||
| 158 | |||
| 159 | spin_unlock(&rtas_data_buf_lock); | ||
| 160 | |||
| 146 | switch (rc) { | 161 | switch (rc) { |
| 147 | case NEXT_SIBLING: | 162 | case NEXT_SIBLING: |
| 148 | dn = dlpar_parse_cc_node(ccwa); | 163 | dn = dlpar_parse_cc_node(ccwa); |
| @@ -197,18 +212,19 @@ struct device_node *dlpar_configure_connector(u32 drc_index) | |||
| 197 | "returned from configure-connector\n", rc); | 212 | "returned from configure-connector\n", rc); |
| 198 | goto cc_error; | 213 | goto cc_error; |
| 199 | } | 214 | } |
| 215 | } while (rc); | ||
| 200 | 216 | ||
| 201 | rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); | 217 | cc_error: |
| 218 | kfree(data_buf); | ||
| 219 | |||
| 220 | if (rc) { | ||
| 221 | if (first_dn) | ||
| 222 | dlpar_free_cc_nodes(first_dn); | ||
| 223 | |||
| 224 | return NULL; | ||
| 202 | } | 225 | } |
| 203 | 226 | ||
| 204 | spin_unlock(&rtas_data_buf_lock); | ||
| 205 | return first_dn; | 227 | return first_dn; |
| 206 | |||
| 207 | cc_error: | ||
| 208 | if (first_dn) | ||
| 209 | dlpar_free_cc_nodes(first_dn); | ||
| 210 | spin_unlock(&rtas_data_buf_lock); | ||
| 211 | return NULL; | ||
| 212 | } | 228 | } |
| 213 | 229 | ||
| 214 | static struct device_node *derive_parent(const char *path) | 230 | static struct device_node *derive_parent(const char *path) |
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index 209384b6e039..4ae933225251 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c | |||
| @@ -399,6 +399,8 @@ DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013E, quirk_fsl_pcie_header); | |||
| 399 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header); | 399 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header); |
| 400 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header); | 400 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header); |
| 401 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header); | 401 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header); |
| 402 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021E, quirk_fsl_pcie_header); | ||
| 403 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021, quirk_fsl_pcie_header); | ||
| 402 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header); | 404 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header); |
| 403 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header); | 405 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header); |
| 404 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header); | 406 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header); |
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 6425abe5b7db..3017532319c8 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c | |||
| @@ -240,12 +240,13 @@ struct rio_priv { | |||
| 240 | 240 | ||
| 241 | static void __iomem *rio_regs_win; | 241 | static void __iomem *rio_regs_win; |
| 242 | 242 | ||
| 243 | #ifdef CONFIG_E500 | ||
| 243 | static int (*saved_mcheck_exception)(struct pt_regs *regs); | 244 | static int (*saved_mcheck_exception)(struct pt_regs *regs); |
| 244 | 245 | ||
| 245 | static int fsl_rio_mcheck_exception(struct pt_regs *regs) | 246 | static int fsl_rio_mcheck_exception(struct pt_regs *regs) |
| 246 | { | 247 | { |
| 247 | const struct exception_table_entry *entry = NULL; | 248 | const struct exception_table_entry *entry = NULL; |
| 248 | unsigned long reason = (mfspr(SPRN_MCSR) & MCSR_MASK); | 249 | unsigned long reason = mfspr(SPRN_MCSR); |
| 249 | 250 | ||
| 250 | if (reason & MCSR_BUS_RBERR) { | 251 | if (reason & MCSR_BUS_RBERR) { |
| 251 | reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR)); | 252 | reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR)); |
| @@ -269,6 +270,7 @@ static int fsl_rio_mcheck_exception(struct pt_regs *regs) | |||
| 269 | else | 270 | else |
| 270 | return cur_cpu_spec->machine_check(regs); | 271 | return cur_cpu_spec->machine_check(regs); |
| 271 | } | 272 | } |
| 273 | #endif | ||
| 272 | 274 | ||
| 273 | /** | 275 | /** |
| 274 | * fsl_rio_doorbell_send - Send a MPC85xx doorbell message | 276 | * fsl_rio_doorbell_send - Send a MPC85xx doorbell message |
| @@ -1517,8 +1519,10 @@ int fsl_rio_setup(struct platform_device *dev) | |||
| 1517 | fsl_rio_doorbell_init(port); | 1519 | fsl_rio_doorbell_init(port); |
| 1518 | fsl_rio_port_write_init(port); | 1520 | fsl_rio_port_write_init(port); |
| 1519 | 1521 | ||
| 1522 | #ifdef CONFIG_E500 | ||
| 1520 | saved_mcheck_exception = ppc_md.machine_check_exception; | 1523 | saved_mcheck_exception = ppc_md.machine_check_exception; |
| 1521 | ppc_md.machine_check_exception = fsl_rio_mcheck_exception; | 1524 | ppc_md.machine_check_exception = fsl_rio_mcheck_exception; |
| 1525 | #endif | ||
| 1522 | /* Ensure that RFXE is set */ | 1526 | /* Ensure that RFXE is set */ |
| 1523 | mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000)); | 1527 | mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000)); |
| 1524 | 1528 | ||
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c index 3da8014931c9..90020de4dcf2 100644 --- a/arch/powerpc/sysdev/qe_lib/qe.c +++ b/arch/powerpc/sysdev/qe_lib/qe.c | |||
| @@ -640,6 +640,7 @@ unsigned int qe_get_num_of_snums(void) | |||
| 640 | if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) { | 640 | if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) { |
| 641 | /* No QE ever has fewer than 28 SNUMs */ | 641 | /* No QE ever has fewer than 28 SNUMs */ |
| 642 | pr_err("QE: number of snum is invalid\n"); | 642 | pr_err("QE: number of snum is invalid\n"); |
| 643 | of_node_put(qe); | ||
| 643 | return -EINVAL; | 644 | return -EINVAL; |
| 644 | } | 645 | } |
| 645 | } | 646 | } |
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index 50794137d710..675c9e11ada5 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c | |||
| @@ -166,7 +166,6 @@ sparc_breakpoint (struct pt_regs *regs) | |||
| 166 | { | 166 | { |
| 167 | siginfo_t info; | 167 | siginfo_t info; |
| 168 | 168 | ||
| 169 | lock_kernel(); | ||
| 170 | #ifdef DEBUG_SPARC_BREAKPOINT | 169 | #ifdef DEBUG_SPARC_BREAKPOINT |
| 171 | printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc); | 170 | printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc); |
| 172 | #endif | 171 | #endif |
| @@ -180,7 +179,6 @@ sparc_breakpoint (struct pt_regs *regs) | |||
| 180 | #ifdef DEBUG_SPARC_BREAKPOINT | 179 | #ifdef DEBUG_SPARC_BREAKPOINT |
| 181 | printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc); | 180 | printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc); |
| 182 | #endif | 181 | #endif |
| 183 | unlock_kernel(); | ||
| 184 | } | 182 | } |
| 185 | 183 | ||
| 186 | asmlinkage int | 184 | asmlinkage int |
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c index f8514e291e15..12b9f352595f 100644 --- a/arch/sparc/kernel/unaligned_32.c +++ b/arch/sparc/kernel/unaligned_32.c | |||
| @@ -323,7 +323,6 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) | |||
| 323 | { | 323 | { |
| 324 | enum direction dir; | 324 | enum direction dir; |
| 325 | 325 | ||
| 326 | lock_kernel(); | ||
| 327 | if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) || | 326 | if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) || |
| 328 | (((insn >> 30) & 3) != 3)) | 327 | (((insn >> 30) & 3) != 3)) |
| 329 | goto kill_user; | 328 | goto kill_user; |
| @@ -377,5 +376,5 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) | |||
| 377 | kill_user: | 376 | kill_user: |
| 378 | user_mna_trap_fault(regs, insn); | 377 | user_mna_trap_fault(regs, insn); |
| 379 | out: | 378 | out: |
| 380 | unlock_kernel(); | 379 | ; |
| 381 | } | 380 | } |
diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c index f24d298bda29..b351770cbdd6 100644 --- a/arch/sparc/kernel/windows.c +++ b/arch/sparc/kernel/windows.c | |||
| @@ -112,7 +112,6 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who) | |||
| 112 | struct thread_info *tp = current_thread_info(); | 112 | struct thread_info *tp = current_thread_info(); |
| 113 | int window; | 113 | int window; |
| 114 | 114 | ||
| 115 | lock_kernel(); | ||
| 116 | flush_user_windows(); | 115 | flush_user_windows(); |
| 117 | for(window = 0; window < tp->w_saved; window++) { | 116 | for(window = 0; window < tp->w_saved; window++) { |
| 118 | unsigned long sp = tp->rwbuf_stkptrs[window]; | 117 | unsigned long sp = tp->rwbuf_stkptrs[window]; |
| @@ -123,5 +122,4 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who) | |||
| 123 | do_exit(SIGILL); | 122 | do_exit(SIGILL); |
| 124 | } | 123 | } |
| 125 | tp->w_saved = 0; | 124 | tp->w_saved = 0; |
| 126 | unlock_kernel(); | ||
| 127 | } | 125 | } |
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h index f35eb45d6576..c4191b3b7056 100644 --- a/arch/x86/include/asm/iomap.h +++ b/arch/x86/include/asm/iomap.h | |||
| @@ -26,11 +26,11 @@ | |||
| 26 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
| 27 | #include <asm/tlbflush.h> | 27 | #include <asm/tlbflush.h> |
| 28 | 28 | ||
| 29 | void * | 29 | void __iomem * |
| 30 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); | 30 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); |
| 31 | 31 | ||
| 32 | void | 32 | void |
| 33 | iounmap_atomic(void *kvaddr, enum km_type type); | 33 | iounmap_atomic(void __iomem *kvaddr, enum km_type type); |
| 34 | 34 | ||
| 35 | int | 35 | int |
| 36 | iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); | 36 | iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); |
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 51cfd730ac5d..1f99ecfc48e1 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
| @@ -152,9 +152,14 @@ struct x86_emulate_ops { | |||
| 152 | struct operand { | 152 | struct operand { |
| 153 | enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type; | 153 | enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type; |
| 154 | unsigned int bytes; | 154 | unsigned int bytes; |
| 155 | unsigned long orig_val, *ptr; | 155 | union { |
| 156 | unsigned long orig_val; | ||
| 157 | u64 orig_val64; | ||
| 158 | }; | ||
| 159 | unsigned long *ptr; | ||
| 156 | union { | 160 | union { |
| 157 | unsigned long val; | 161 | unsigned long val; |
| 162 | u64 val64; | ||
| 158 | char valptr[sizeof(unsigned long) + 2]; | 163 | char valptr[sizeof(unsigned long) + 2]; |
| 159 | }; | 164 | }; |
| 160 | }; | 165 | }; |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 404a880ea325..d395540ff894 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
| @@ -27,6 +27,9 @@ extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops, | |||
| 27 | int node); | 27 | int node); |
| 28 | extern struct pci_bus *pci_scan_bus_with_sysdata(int busno); | 28 | extern struct pci_bus *pci_scan_bus_with_sysdata(int busno); |
| 29 | 29 | ||
| 30 | #ifdef CONFIG_PCI | ||
| 31 | |||
| 32 | #ifdef CONFIG_PCI_DOMAINS | ||
| 30 | static inline int pci_domain_nr(struct pci_bus *bus) | 33 | static inline int pci_domain_nr(struct pci_bus *bus) |
| 31 | { | 34 | { |
| 32 | struct pci_sysdata *sd = bus->sysdata; | 35 | struct pci_sysdata *sd = bus->sysdata; |
| @@ -37,13 +40,12 @@ static inline int pci_proc_domain(struct pci_bus *bus) | |||
| 37 | { | 40 | { |
| 38 | return pci_domain_nr(bus); | 41 | return pci_domain_nr(bus); |
| 39 | } | 42 | } |
| 40 | 43 | #endif | |
| 41 | 44 | ||
| 42 | /* Can be used to override the logic in pci_scan_bus for skipping | 45 | /* Can be used to override the logic in pci_scan_bus for skipping |
| 43 | already-configured bus numbers - to be used for buggy BIOSes | 46 | already-configured bus numbers - to be used for buggy BIOSes |
| 44 | or architectures with incomplete PCI setup by the loader */ | 47 | or architectures with incomplete PCI setup by the loader */ |
| 45 | 48 | ||
| 46 | #ifdef CONFIG_PCI | ||
| 47 | extern unsigned int pcibios_assign_all_busses(void); | 49 | extern unsigned int pcibios_assign_all_busses(void); |
| 48 | extern int pci_legacy_init(void); | 50 | extern int pci_legacy_init(void); |
| 49 | # ifdef CONFIG_ACPI | 51 | # ifdef CONFIG_ACPI |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 224392d8fe8c..5e975298fa81 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
| @@ -530,7 +530,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 530 | err = -ENOMEM; | 530 | err = -ENOMEM; |
| 531 | goto out; | 531 | goto out; |
| 532 | } | 532 | } |
| 533 | if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) { | 533 | if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) { |
| 534 | kfree(b); | 534 | kfree(b); |
| 535 | err = -ENOMEM; | 535 | err = -ENOMEM; |
| 536 | goto out; | 536 | goto out; |
| @@ -543,7 +543,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 543 | #ifndef CONFIG_SMP | 543 | #ifndef CONFIG_SMP |
| 544 | cpumask_setall(b->cpus); | 544 | cpumask_setall(b->cpus); |
| 545 | #else | 545 | #else |
| 546 | cpumask_copy(b->cpus, c->llc_shared_map); | 546 | cpumask_set_cpu(cpu, b->cpus); |
| 547 | #endif | 547 | #endif |
| 548 | 548 | ||
| 549 | per_cpu(threshold_banks, cpu)[bank] = b; | 549 | per_cpu(threshold_banks, cpu)[bank] = b; |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index c2a8b26d4fea..d9368eeda309 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
| @@ -202,10 +202,11 @@ static int therm_throt_process(bool new_event, int event, int level) | |||
| 202 | 202 | ||
| 203 | #ifdef CONFIG_SYSFS | 203 | #ifdef CONFIG_SYSFS |
| 204 | /* Add/Remove thermal_throttle interface for CPU device: */ | 204 | /* Add/Remove thermal_throttle interface for CPU device: */ |
| 205 | static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev) | 205 | static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev, |
| 206 | unsigned int cpu) | ||
| 206 | { | 207 | { |
| 207 | int err; | 208 | int err; |
| 208 | struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); | 209 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
| 209 | 210 | ||
| 210 | err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group); | 211 | err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group); |
| 211 | if (err) | 212 | if (err) |
| @@ -251,7 +252,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb, | |||
| 251 | case CPU_UP_PREPARE: | 252 | case CPU_UP_PREPARE: |
| 252 | case CPU_UP_PREPARE_FROZEN: | 253 | case CPU_UP_PREPARE_FROZEN: |
| 253 | mutex_lock(&therm_cpu_lock); | 254 | mutex_lock(&therm_cpu_lock); |
| 254 | err = thermal_throttle_add_dev(sys_dev); | 255 | err = thermal_throttle_add_dev(sys_dev, cpu); |
| 255 | mutex_unlock(&therm_cpu_lock); | 256 | mutex_unlock(&therm_cpu_lock); |
| 256 | WARN_ON(err); | 257 | WARN_ON(err); |
| 257 | break; | 258 | break; |
| @@ -287,7 +288,7 @@ static __init int thermal_throttle_init_device(void) | |||
| 287 | #endif | 288 | #endif |
| 288 | /* connect live CPUs to sysfs */ | 289 | /* connect live CPUs to sysfs */ |
| 289 | for_each_online_cpu(cpu) { | 290 | for_each_online_cpu(cpu) { |
| 290 | err = thermal_throttle_add_dev(get_cpu_sysdev(cpu)); | 291 | err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu); |
| 291 | WARN_ON(err); | 292 | WARN_ON(err); |
| 292 | } | 293 | } |
| 293 | #ifdef CONFIG_HOTPLUG_CPU | 294 | #ifdef CONFIG_HOTPLUG_CPU |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index f2da20fda02d..3efdf2870a35 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
| @@ -1154,7 +1154,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
| 1154 | /* | 1154 | /* |
| 1155 | * event overflow | 1155 | * event overflow |
| 1156 | */ | 1156 | */ |
| 1157 | handled = 1; | 1157 | handled++; |
| 1158 | data.period = event->hw.last_period; | 1158 | data.period = event->hw.last_period; |
| 1159 | 1159 | ||
| 1160 | if (!x86_perf_event_set_period(event)) | 1160 | if (!x86_perf_event_set_period(event)) |
| @@ -1200,12 +1200,20 @@ void perf_events_lapic_init(void) | |||
| 1200 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 1200 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
| 1201 | } | 1201 | } |
| 1202 | 1202 | ||
| 1203 | struct pmu_nmi_state { | ||
| 1204 | unsigned int marked; | ||
| 1205 | int handled; | ||
| 1206 | }; | ||
| 1207 | |||
| 1208 | static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi); | ||
| 1209 | |||
| 1203 | static int __kprobes | 1210 | static int __kprobes |
| 1204 | perf_event_nmi_handler(struct notifier_block *self, | 1211 | perf_event_nmi_handler(struct notifier_block *self, |
| 1205 | unsigned long cmd, void *__args) | 1212 | unsigned long cmd, void *__args) |
| 1206 | { | 1213 | { |
| 1207 | struct die_args *args = __args; | 1214 | struct die_args *args = __args; |
| 1208 | struct pt_regs *regs; | 1215 | unsigned int this_nmi; |
| 1216 | int handled; | ||
| 1209 | 1217 | ||
| 1210 | if (!atomic_read(&active_events)) | 1218 | if (!atomic_read(&active_events)) |
| 1211 | return NOTIFY_DONE; | 1219 | return NOTIFY_DONE; |
| @@ -1214,22 +1222,47 @@ perf_event_nmi_handler(struct notifier_block *self, | |||
| 1214 | case DIE_NMI: | 1222 | case DIE_NMI: |
| 1215 | case DIE_NMI_IPI: | 1223 | case DIE_NMI_IPI: |
| 1216 | break; | 1224 | break; |
| 1217 | 1225 | case DIE_NMIUNKNOWN: | |
| 1226 | this_nmi = percpu_read(irq_stat.__nmi_count); | ||
| 1227 | if (this_nmi != __get_cpu_var(pmu_nmi).marked) | ||
| 1228 | /* let the kernel handle the unknown nmi */ | ||
| 1229 | return NOTIFY_DONE; | ||
| 1230 | /* | ||
| 1231 | * This one is a PMU back-to-back nmi. Two events | ||
| 1232 | * trigger 'simultaneously' raising two back-to-back | ||
| 1233 | * NMIs. If the first NMI handles both, the latter | ||
| 1234 | * will be empty and daze the CPU. So, we drop it to | ||
| 1235 | * avoid false-positive 'unknown nmi' messages. | ||
| 1236 | */ | ||
| 1237 | return NOTIFY_STOP; | ||
| 1218 | default: | 1238 | default: |
| 1219 | return NOTIFY_DONE; | 1239 | return NOTIFY_DONE; |
| 1220 | } | 1240 | } |
| 1221 | 1241 | ||
| 1222 | regs = args->regs; | ||
| 1223 | |||
| 1224 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 1242 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
| 1225 | /* | 1243 | |
| 1226 | * Can't rely on the handled return value to say it was our NMI, two | 1244 | handled = x86_pmu.handle_irq(args->regs); |
| 1227 | * events could trigger 'simultaneously' raising two back-to-back NMIs. | 1245 | if (!handled) |
| 1228 | * | 1246 | return NOTIFY_DONE; |
| 1229 | * If the first NMI handles both, the latter will be empty and daze | 1247 | |
| 1230 | * the CPU. | 1248 | this_nmi = percpu_read(irq_stat.__nmi_count); |
| 1231 | */ | 1249 | if ((handled > 1) || |
| 1232 | x86_pmu.handle_irq(regs); | 1250 | /* the next nmi could be a back-to-back nmi */ |
| 1251 | ((__get_cpu_var(pmu_nmi).marked == this_nmi) && | ||
| 1252 | (__get_cpu_var(pmu_nmi).handled > 1))) { | ||
| 1253 | /* | ||
| 1254 | * We could have two subsequent back-to-back nmis: The | ||
| 1255 | * first handles more than one counter, the 2nd | ||
| 1256 | * handles only one counter and the 3rd handles no | ||
| 1257 | * counter. | ||
| 1258 | * | ||
| 1259 | * This is the 2nd nmi because the previous was | ||
| 1260 | * handling more than one counter. We will mark the | ||
| 1261 | * next (3rd) and then drop it if unhandled. | ||
| 1262 | */ | ||
| 1263 | __get_cpu_var(pmu_nmi).marked = this_nmi + 1; | ||
| 1264 | __get_cpu_var(pmu_nmi).handled = handled; | ||
| 1265 | } | ||
| 1233 | 1266 | ||
| 1234 | return NOTIFY_STOP; | 1267 | return NOTIFY_STOP; |
| 1235 | } | 1268 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index d8d86d014008..ee05c90012d2 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
| @@ -712,7 +712,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
| 712 | struct perf_sample_data data; | 712 | struct perf_sample_data data; |
| 713 | struct cpu_hw_events *cpuc; | 713 | struct cpu_hw_events *cpuc; |
| 714 | int bit, loops; | 714 | int bit, loops; |
| 715 | u64 ack, status; | 715 | u64 status; |
| 716 | int handled = 0; | ||
| 716 | 717 | ||
| 717 | perf_sample_data_init(&data, 0); | 718 | perf_sample_data_init(&data, 0); |
| 718 | 719 | ||
| @@ -728,6 +729,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
| 728 | 729 | ||
| 729 | loops = 0; | 730 | loops = 0; |
| 730 | again: | 731 | again: |
| 732 | intel_pmu_ack_status(status); | ||
| 731 | if (++loops > 100) { | 733 | if (++loops > 100) { |
| 732 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); | 734 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); |
| 733 | perf_event_print_debug(); | 735 | perf_event_print_debug(); |
| @@ -736,19 +738,22 @@ again: | |||
| 736 | } | 738 | } |
| 737 | 739 | ||
| 738 | inc_irq_stat(apic_perf_irqs); | 740 | inc_irq_stat(apic_perf_irqs); |
| 739 | ack = status; | ||
| 740 | 741 | ||
| 741 | intel_pmu_lbr_read(); | 742 | intel_pmu_lbr_read(); |
| 742 | 743 | ||
| 743 | /* | 744 | /* |
| 744 | * PEBS overflow sets bit 62 in the global status register | 745 | * PEBS overflow sets bit 62 in the global status register |
| 745 | */ | 746 | */ |
| 746 | if (__test_and_clear_bit(62, (unsigned long *)&status)) | 747 | if (__test_and_clear_bit(62, (unsigned long *)&status)) { |
| 748 | handled++; | ||
| 747 | x86_pmu.drain_pebs(regs); | 749 | x86_pmu.drain_pebs(regs); |
| 750 | } | ||
| 748 | 751 | ||
| 749 | for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { | 752 | for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { |
| 750 | struct perf_event *event = cpuc->events[bit]; | 753 | struct perf_event *event = cpuc->events[bit]; |
| 751 | 754 | ||
| 755 | handled++; | ||
| 756 | |||
| 752 | if (!test_bit(bit, cpuc->active_mask)) | 757 | if (!test_bit(bit, cpuc->active_mask)) |
| 753 | continue; | 758 | continue; |
| 754 | 759 | ||
| @@ -761,8 +766,6 @@ again: | |||
| 761 | x86_pmu_stop(event); | 766 | x86_pmu_stop(event); |
| 762 | } | 767 | } |
| 763 | 768 | ||
| 764 | intel_pmu_ack_status(ack); | ||
| 765 | |||
| 766 | /* | 769 | /* |
| 767 | * Repeat if there is more work to be done: | 770 | * Repeat if there is more work to be done: |
| 768 | */ | 771 | */ |
| @@ -772,7 +775,7 @@ again: | |||
| 772 | 775 | ||
| 773 | done: | 776 | done: |
| 774 | intel_pmu_enable_all(0); | 777 | intel_pmu_enable_all(0); |
| 775 | return 1; | 778 | return handled; |
| 776 | } | 779 | } |
| 777 | 780 | ||
| 778 | static struct event_constraint * | 781 | static struct event_constraint * |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 7e578e9cc58b..b560db3305be 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
| @@ -692,7 +692,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) | |||
| 692 | inc_irq_stat(apic_perf_irqs); | 692 | inc_irq_stat(apic_perf_irqs); |
| 693 | } | 693 | } |
| 694 | 694 | ||
| 695 | return handled > 0; | 695 | return handled; |
| 696 | } | 696 | } |
| 697 | 697 | ||
| 698 | /* | 698 | /* |
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index a874495b3673..e2a595257390 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c | |||
| @@ -45,8 +45,7 @@ void __init setup_trampoline_page_table(void) | |||
| 45 | /* Copy kernel address range */ | 45 | /* Copy kernel address range */ |
| 46 | clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY, | 46 | clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY, |
| 47 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | 47 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, |
| 48 | min_t(unsigned long, KERNEL_PGD_PTRS, | 48 | KERNEL_PGD_PTRS); |
| 49 | KERNEL_PGD_BOUNDARY)); | ||
| 50 | 49 | ||
| 51 | /* Initialize low mappings */ | 50 | /* Initialize low mappings */ |
| 52 | clone_pgd_range(trampoline_pg_dir, | 51 | clone_pgd_range(trampoline_pg_dir, |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index d632934cb638..26a863a9c2a8 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
| @@ -655,7 +655,7 @@ void restore_sched_clock_state(void) | |||
| 655 | 655 | ||
| 656 | local_irq_save(flags); | 656 | local_irq_save(flags); |
| 657 | 657 | ||
| 658 | get_cpu_var(cyc2ns_offset) = 0; | 658 | __get_cpu_var(cyc2ns_offset) = 0; |
| 659 | offset = cyc2ns_suspend - sched_clock(); | 659 | offset = cyc2ns_suspend - sched_clock(); |
| 660 | 660 | ||
| 661 | for_each_possible_cpu(cpu) | 661 | for_each_possible_cpu(cpu) |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index b38bd8b92aa6..66ca98aafdd6 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
| @@ -1870,17 +1870,16 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt, | |||
| 1870 | struct x86_emulate_ops *ops) | 1870 | struct x86_emulate_ops *ops) |
| 1871 | { | 1871 | { |
| 1872 | struct decode_cache *c = &ctxt->decode; | 1872 | struct decode_cache *c = &ctxt->decode; |
| 1873 | u64 old = c->dst.orig_val; | 1873 | u64 old = c->dst.orig_val64; |
| 1874 | 1874 | ||
| 1875 | if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) || | 1875 | if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) || |
| 1876 | ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) { | 1876 | ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) { |
| 1877 | |||
| 1878 | c->regs[VCPU_REGS_RAX] = (u32) (old >> 0); | 1877 | c->regs[VCPU_REGS_RAX] = (u32) (old >> 0); |
| 1879 | c->regs[VCPU_REGS_RDX] = (u32) (old >> 32); | 1878 | c->regs[VCPU_REGS_RDX] = (u32) (old >> 32); |
| 1880 | ctxt->eflags &= ~EFLG_ZF; | 1879 | ctxt->eflags &= ~EFLG_ZF; |
| 1881 | } else { | 1880 | } else { |
| 1882 | c->dst.val = ((u64)c->regs[VCPU_REGS_RCX] << 32) | | 1881 | c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) | |
| 1883 | (u32) c->regs[VCPU_REGS_RBX]; | 1882 | (u32) c->regs[VCPU_REGS_RBX]; |
| 1884 | 1883 | ||
| 1885 | ctxt->eflags |= EFLG_ZF; | 1884 | ctxt->eflags |= EFLG_ZF; |
| 1886 | } | 1885 | } |
| @@ -2616,7 +2615,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
| 2616 | c->src.valptr, c->src.bytes); | 2615 | c->src.valptr, c->src.bytes); |
| 2617 | if (rc != X86EMUL_CONTINUE) | 2616 | if (rc != X86EMUL_CONTINUE) |
| 2618 | goto done; | 2617 | goto done; |
| 2619 | c->src.orig_val = c->src.val; | 2618 | c->src.orig_val64 = c->src.val64; |
| 2620 | } | 2619 | } |
| 2621 | 2620 | ||
| 2622 | if (c->src2.type == OP_MEM) { | 2621 | if (c->src2.type == OP_MEM) { |
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 8d10c063d7f2..4b7b73ce2098 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c | |||
| @@ -64,6 +64,9 @@ static void pic_unlock(struct kvm_pic *s) | |||
| 64 | if (!found) | 64 | if (!found) |
| 65 | found = s->kvm->bsp_vcpu; | 65 | found = s->kvm->bsp_vcpu; |
| 66 | 66 | ||
| 67 | if (!found) | ||
| 68 | return; | ||
| 69 | |||
| 67 | kvm_vcpu_kick(found); | 70 | kvm_vcpu_kick(found); |
| 68 | } | 71 | } |
| 69 | } | 72 | } |
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index ffed06871c5c..63c314502993 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h | |||
| @@ -43,7 +43,6 @@ struct kvm_kpic_state { | |||
| 43 | u8 irr; /* interrupt request register */ | 43 | u8 irr; /* interrupt request register */ |
| 44 | u8 imr; /* interrupt mask register */ | 44 | u8 imr; /* interrupt mask register */ |
| 45 | u8 isr; /* interrupt service register */ | 45 | u8 isr; /* interrupt service register */ |
| 46 | u8 isr_ack; /* interrupt ack detection */ | ||
| 47 | u8 priority_add; /* highest irq priority */ | 46 | u8 priority_add; /* highest irq priority */ |
| 48 | u8 irq_base; | 47 | u8 irq_base; |
| 49 | u8 read_reg_select; | 48 | u8 read_reg_select; |
| @@ -56,6 +55,7 @@ struct kvm_kpic_state { | |||
| 56 | u8 init4; /* true if 4 byte init */ | 55 | u8 init4; /* true if 4 byte init */ |
| 57 | u8 elcr; /* PIIX edge/trigger selection */ | 56 | u8 elcr; /* PIIX edge/trigger selection */ |
| 58 | u8 elcr_mask; | 57 | u8 elcr_mask; |
| 58 | u8 isr_ack; /* interrupt ack detection */ | ||
| 59 | struct kvm_pic *pics_state; | 59 | struct kvm_pic *pics_state; |
| 60 | }; | 60 | }; |
| 61 | 61 | ||
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 84e236ce76ba..72fc70cf6184 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c | |||
| @@ -74,7 +74,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | |||
| 74 | /* | 74 | /* |
| 75 | * Map 'pfn' using fixed map 'type' and protections 'prot' | 75 | * Map 'pfn' using fixed map 'type' and protections 'prot' |
| 76 | */ | 76 | */ |
| 77 | void * | 77 | void __iomem * |
| 78 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | 78 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) |
| 79 | { | 79 | { |
| 80 | /* | 80 | /* |
| @@ -86,12 +86,12 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | |||
| 86 | if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) | 86 | if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) |
| 87 | prot = PAGE_KERNEL_UC_MINUS; | 87 | prot = PAGE_KERNEL_UC_MINUS; |
| 88 | 88 | ||
| 89 | return kmap_atomic_prot_pfn(pfn, type, prot); | 89 | return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot); |
| 90 | } | 90 | } |
| 91 | EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); | 91 | EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); |
| 92 | 92 | ||
| 93 | void | 93 | void |
| 94 | iounmap_atomic(void *kvaddr, enum km_type type) | 94 | iounmap_atomic(void __iomem *kvaddr, enum km_type type) |
| 95 | { | 95 | { |
| 96 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | 96 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
| 97 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | 97 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index f6b48f6c5951..cfe4faabb0f6 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
| @@ -568,8 +568,13 @@ static int __init init_sysfs(void) | |||
| 568 | int error; | 568 | int error; |
| 569 | 569 | ||
| 570 | error = sysdev_class_register(&oprofile_sysclass); | 570 | error = sysdev_class_register(&oprofile_sysclass); |
| 571 | if (!error) | 571 | if (error) |
| 572 | error = sysdev_register(&device_oprofile); | 572 | return error; |
| 573 | |||
| 574 | error = sysdev_register(&device_oprofile); | ||
| 575 | if (error) | ||
| 576 | sysdev_class_unregister(&oprofile_sysclass); | ||
| 577 | |||
| 573 | return error; | 578 | return error; |
| 574 | } | 579 | } |
| 575 | 580 | ||
| @@ -580,8 +585,10 @@ static void exit_sysfs(void) | |||
| 580 | } | 585 | } |
| 581 | 586 | ||
| 582 | #else | 587 | #else |
| 583 | #define init_sysfs() do { } while (0) | 588 | |
| 584 | #define exit_sysfs() do { } while (0) | 589 | static inline int init_sysfs(void) { return 0; } |
| 590 | static inline void exit_sysfs(void) { } | ||
| 591 | |||
| 585 | #endif /* CONFIG_PM */ | 592 | #endif /* CONFIG_PM */ |
| 586 | 593 | ||
| 587 | static int __init p4_init(char **cpu_type) | 594 | static int __init p4_init(char **cpu_type) |
| @@ -695,6 +702,8 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
| 695 | char *cpu_type = NULL; | 702 | char *cpu_type = NULL; |
| 696 | int ret = 0; | 703 | int ret = 0; |
| 697 | 704 | ||
| 705 | using_nmi = 0; | ||
| 706 | |||
| 698 | if (!cpu_has_apic) | 707 | if (!cpu_has_apic) |
| 699 | return -ENODEV; | 708 | return -ENODEV; |
| 700 | 709 | ||
| @@ -774,7 +783,10 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
| 774 | 783 | ||
| 775 | mux_init(ops); | 784 | mux_init(ops); |
| 776 | 785 | ||
| 777 | init_sysfs(); | 786 | ret = init_sysfs(); |
| 787 | if (ret) | ||
| 788 | return ret; | ||
| 789 | |||
| 778 | using_nmi = 1; | 790 | using_nmi = 1; |
| 779 | printk(KERN_INFO "oprofile: using NMI interrupt.\n"); | 791 | printk(KERN_INFO "oprofile: using NMI interrupt.\n"); |
| 780 | return 0; | 792 | return 0; |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index a6809645d212..2fef1ef931a0 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
| @@ -966,7 +966,7 @@ blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |||
| 966 | 966 | ||
| 967 | /* Currently we do not support hierarchy deeper than two level (0,1) */ | 967 | /* Currently we do not support hierarchy deeper than two level (0,1) */ |
| 968 | if (parent != cgroup->top_cgroup) | 968 | if (parent != cgroup->top_cgroup) |
| 969 | return ERR_PTR(-EINVAL); | 969 | return ERR_PTR(-EPERM); |
| 970 | 970 | ||
| 971 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); | 971 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
| 972 | if (!blkcg) | 972 | if (!blkcg) |
diff --git a/block/blk-core.c b/block/blk-core.c index ee1a1e7e63cc..32a1c123dfb3 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
| @@ -1198,9 +1198,9 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
| 1198 | int el_ret; | 1198 | int el_ret; |
| 1199 | unsigned int bytes = bio->bi_size; | 1199 | unsigned int bytes = bio->bi_size; |
| 1200 | const unsigned short prio = bio_prio(bio); | 1200 | const unsigned short prio = bio_prio(bio); |
| 1201 | const bool sync = (bio->bi_rw & REQ_SYNC); | 1201 | const bool sync = !!(bio->bi_rw & REQ_SYNC); |
| 1202 | const bool unplug = (bio->bi_rw & REQ_UNPLUG); | 1202 | const bool unplug = !!(bio->bi_rw & REQ_UNPLUG); |
| 1203 | const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; | 1203 | const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK; |
| 1204 | int rw_flags; | 1204 | int rw_flags; |
| 1205 | 1205 | ||
| 1206 | if ((bio->bi_rw & REQ_HARDBARRIER) && | 1206 | if ((bio->bi_rw & REQ_HARDBARRIER) && |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 001ab18078f5..0749b89c6885 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
| @@ -511,6 +511,7 @@ int blk_register_queue(struct gendisk *disk) | |||
| 511 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | 511 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
| 512 | kobject_del(&q->kobj); | 512 | kobject_del(&q->kobj); |
| 513 | blk_trace_remove_sysfs(disk_to_dev(disk)); | 513 | blk_trace_remove_sysfs(disk_to_dev(disk)); |
| 514 | kobject_put(&dev->kobj); | ||
| 514 | return ret; | 515 | return ret; |
| 515 | } | 516 | } |
| 516 | 517 | ||
diff --git a/block/blk.h b/block/blk.h index 6e7dc87141e4..d6b911ac002c 100644 --- a/block/blk.h +++ b/block/blk.h | |||
| @@ -142,14 +142,18 @@ static inline int queue_congestion_off_threshold(struct request_queue *q) | |||
| 142 | 142 | ||
| 143 | static inline int blk_cpu_to_group(int cpu) | 143 | static inline int blk_cpu_to_group(int cpu) |
| 144 | { | 144 | { |
| 145 | int group = NR_CPUS; | ||
| 145 | #ifdef CONFIG_SCHED_MC | 146 | #ifdef CONFIG_SCHED_MC |
| 146 | const struct cpumask *mask = cpu_coregroup_mask(cpu); | 147 | const struct cpumask *mask = cpu_coregroup_mask(cpu); |
| 147 | return cpumask_first(mask); | 148 | group = cpumask_first(mask); |
| 148 | #elif defined(CONFIG_SCHED_SMT) | 149 | #elif defined(CONFIG_SCHED_SMT) |
| 149 | return cpumask_first(topology_thread_cpumask(cpu)); | 150 | group = cpumask_first(topology_thread_cpumask(cpu)); |
| 150 | #else | 151 | #else |
| 151 | return cpu; | 152 | return cpu; |
| 152 | #endif | 153 | #endif |
| 154 | if (likely(group < NR_CPUS)) | ||
| 155 | return group; | ||
| 156 | return cpu; | ||
| 153 | } | 157 | } |
| 154 | 158 | ||
| 155 | /* | 159 | /* |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index eb4086f7dfef..f65c6f01c475 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
| @@ -30,6 +30,7 @@ static const int cfq_slice_sync = HZ / 10; | |||
| 30 | static int cfq_slice_async = HZ / 25; | 30 | static int cfq_slice_async = HZ / 25; |
| 31 | static const int cfq_slice_async_rq = 2; | 31 | static const int cfq_slice_async_rq = 2; |
| 32 | static int cfq_slice_idle = HZ / 125; | 32 | static int cfq_slice_idle = HZ / 125; |
| 33 | static int cfq_group_idle = HZ / 125; | ||
| 33 | static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ | 34 | static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ |
| 34 | static const int cfq_hist_divisor = 4; | 35 | static const int cfq_hist_divisor = 4; |
| 35 | 36 | ||
| @@ -147,6 +148,8 @@ struct cfq_queue { | |||
| 147 | struct cfq_queue *new_cfqq; | 148 | struct cfq_queue *new_cfqq; |
| 148 | struct cfq_group *cfqg; | 149 | struct cfq_group *cfqg; |
| 149 | struct cfq_group *orig_cfqg; | 150 | struct cfq_group *orig_cfqg; |
| 151 | /* Number of sectors dispatched from queue in single dispatch round */ | ||
| 152 | unsigned long nr_sectors; | ||
| 150 | }; | 153 | }; |
| 151 | 154 | ||
| 152 | /* | 155 | /* |
| @@ -198,6 +201,8 @@ struct cfq_group { | |||
| 198 | struct hlist_node cfqd_node; | 201 | struct hlist_node cfqd_node; |
| 199 | atomic_t ref; | 202 | atomic_t ref; |
| 200 | #endif | 203 | #endif |
| 204 | /* number of requests that are on the dispatch list or inside driver */ | ||
| 205 | int dispatched; | ||
| 201 | }; | 206 | }; |
| 202 | 207 | ||
| 203 | /* | 208 | /* |
| @@ -271,6 +276,7 @@ struct cfq_data { | |||
| 271 | unsigned int cfq_slice[2]; | 276 | unsigned int cfq_slice[2]; |
| 272 | unsigned int cfq_slice_async_rq; | 277 | unsigned int cfq_slice_async_rq; |
| 273 | unsigned int cfq_slice_idle; | 278 | unsigned int cfq_slice_idle; |
| 279 | unsigned int cfq_group_idle; | ||
| 274 | unsigned int cfq_latency; | 280 | unsigned int cfq_latency; |
| 275 | unsigned int cfq_group_isolation; | 281 | unsigned int cfq_group_isolation; |
| 276 | 282 | ||
| @@ -378,6 +384,21 @@ CFQ_CFQQ_FNS(wait_busy); | |||
| 378 | &cfqg->service_trees[i][j]: NULL) \ | 384 | &cfqg->service_trees[i][j]: NULL) \ |
| 379 | 385 | ||
| 380 | 386 | ||
| 387 | static inline bool iops_mode(struct cfq_data *cfqd) | ||
| 388 | { | ||
| 389 | /* | ||
| 390 | * If we are not idling on queues and it is a NCQ drive, parallel | ||
| 391 | * execution of requests is on and measuring time is not possible | ||
| 392 | * in most of the cases until and unless we drive shallower queue | ||
| 393 | * depths and that becomes a performance bottleneck. In such cases | ||
| 394 | * switch to start providing fairness in terms of number of IOs. | ||
| 395 | */ | ||
| 396 | if (!cfqd->cfq_slice_idle && cfqd->hw_tag) | ||
| 397 | return true; | ||
| 398 | else | ||
| 399 | return false; | ||
| 400 | } | ||
| 401 | |||
| 381 | static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) | 402 | static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) |
| 382 | { | 403 | { |
| 383 | if (cfq_class_idle(cfqq)) | 404 | if (cfq_class_idle(cfqq)) |
| @@ -906,7 +927,6 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) | |||
| 906 | slice_used = cfqq->allocated_slice; | 927 | slice_used = cfqq->allocated_slice; |
| 907 | } | 928 | } |
| 908 | 929 | ||
| 909 | cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used); | ||
| 910 | return slice_used; | 930 | return slice_used; |
| 911 | } | 931 | } |
| 912 | 932 | ||
| @@ -914,19 +934,21 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
| 914 | struct cfq_queue *cfqq) | 934 | struct cfq_queue *cfqq) |
| 915 | { | 935 | { |
| 916 | struct cfq_rb_root *st = &cfqd->grp_service_tree; | 936 | struct cfq_rb_root *st = &cfqd->grp_service_tree; |
| 917 | unsigned int used_sl, charge_sl; | 937 | unsigned int used_sl, charge; |
| 918 | int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) | 938 | int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) |
| 919 | - cfqg->service_tree_idle.count; | 939 | - cfqg->service_tree_idle.count; |
| 920 | 940 | ||
| 921 | BUG_ON(nr_sync < 0); | 941 | BUG_ON(nr_sync < 0); |
| 922 | used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq); | 942 | used_sl = charge = cfq_cfqq_slice_usage(cfqq); |
| 923 | 943 | ||
| 924 | if (!cfq_cfqq_sync(cfqq) && !nr_sync) | 944 | if (iops_mode(cfqd)) |
| 925 | charge_sl = cfqq->allocated_slice; | 945 | charge = cfqq->slice_dispatch; |
| 946 | else if (!cfq_cfqq_sync(cfqq) && !nr_sync) | ||
| 947 | charge = cfqq->allocated_slice; | ||
| 926 | 948 | ||
| 927 | /* Can't update vdisktime while group is on service tree */ | 949 | /* Can't update vdisktime while group is on service tree */ |
| 928 | cfq_rb_erase(&cfqg->rb_node, st); | 950 | cfq_rb_erase(&cfqg->rb_node, st); |
| 929 | cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg); | 951 | cfqg->vdisktime += cfq_scale_slice(charge, cfqg); |
| 930 | __cfq_group_service_tree_add(st, cfqg); | 952 | __cfq_group_service_tree_add(st, cfqg); |
| 931 | 953 | ||
| 932 | /* This group is being expired. Save the context */ | 954 | /* This group is being expired. Save the context */ |
| @@ -940,6 +962,9 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
| 940 | 962 | ||
| 941 | cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, | 963 | cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, |
| 942 | st->min_vdisktime); | 964 | st->min_vdisktime); |
| 965 | cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u" | ||
| 966 | " sect=%u", used_sl, cfqq->slice_dispatch, charge, | ||
| 967 | iops_mode(cfqd), cfqq->nr_sectors); | ||
| 943 | cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); | 968 | cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); |
| 944 | cfq_blkiocg_set_start_empty_time(&cfqg->blkg); | 969 | cfq_blkiocg_set_start_empty_time(&cfqg->blkg); |
| 945 | } | 970 | } |
| @@ -1587,6 +1612,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, | |||
| 1587 | cfqq->allocated_slice = 0; | 1612 | cfqq->allocated_slice = 0; |
| 1588 | cfqq->slice_end = 0; | 1613 | cfqq->slice_end = 0; |
| 1589 | cfqq->slice_dispatch = 0; | 1614 | cfqq->slice_dispatch = 0; |
| 1615 | cfqq->nr_sectors = 0; | ||
| 1590 | 1616 | ||
| 1591 | cfq_clear_cfqq_wait_request(cfqq); | 1617 | cfq_clear_cfqq_wait_request(cfqq); |
| 1592 | cfq_clear_cfqq_must_dispatch(cfqq); | 1618 | cfq_clear_cfqq_must_dispatch(cfqq); |
| @@ -1839,6 +1865,9 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
| 1839 | BUG_ON(!service_tree); | 1865 | BUG_ON(!service_tree); |
| 1840 | BUG_ON(!service_tree->count); | 1866 | BUG_ON(!service_tree->count); |
| 1841 | 1867 | ||
| 1868 | if (!cfqd->cfq_slice_idle) | ||
| 1869 | return false; | ||
| 1870 | |||
| 1842 | /* We never do for idle class queues. */ | 1871 | /* We never do for idle class queues. */ |
| 1843 | if (prio == IDLE_WORKLOAD) | 1872 | if (prio == IDLE_WORKLOAD) |
| 1844 | return false; | 1873 | return false; |
| @@ -1863,7 +1892,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
| 1863 | { | 1892 | { |
| 1864 | struct cfq_queue *cfqq = cfqd->active_queue; | 1893 | struct cfq_queue *cfqq = cfqd->active_queue; |
| 1865 | struct cfq_io_context *cic; | 1894 | struct cfq_io_context *cic; |
| 1866 | unsigned long sl; | 1895 | unsigned long sl, group_idle = 0; |
| 1867 | 1896 | ||
| 1868 | /* | 1897 | /* |
| 1869 | * SSD device without seek penalty, disable idling. But only do so | 1898 | * SSD device without seek penalty, disable idling. But only do so |
| @@ -1879,8 +1908,13 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
| 1879 | /* | 1908 | /* |
| 1880 | * idle is disabled, either manually or by past process history | 1909 | * idle is disabled, either manually or by past process history |
| 1881 | */ | 1910 | */ |
| 1882 | if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq)) | 1911 | if (!cfq_should_idle(cfqd, cfqq)) { |
| 1883 | return; | 1912 | /* no queue idling. Check for group idling */ |
| 1913 | if (cfqd->cfq_group_idle) | ||
| 1914 | group_idle = cfqd->cfq_group_idle; | ||
| 1915 | else | ||
| 1916 | return; | ||
| 1917 | } | ||
| 1884 | 1918 | ||
| 1885 | /* | 1919 | /* |
| 1886 | * still active requests from this queue, don't idle | 1920 | * still active requests from this queue, don't idle |
| @@ -1907,13 +1941,21 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
| 1907 | return; | 1941 | return; |
| 1908 | } | 1942 | } |
| 1909 | 1943 | ||
| 1944 | /* There are other queues in the group, don't do group idle */ | ||
| 1945 | if (group_idle && cfqq->cfqg->nr_cfqq > 1) | ||
| 1946 | return; | ||
| 1947 | |||
| 1910 | cfq_mark_cfqq_wait_request(cfqq); | 1948 | cfq_mark_cfqq_wait_request(cfqq); |
| 1911 | 1949 | ||
| 1912 | sl = cfqd->cfq_slice_idle; | 1950 | if (group_idle) |
| 1951 | sl = cfqd->cfq_group_idle; | ||
| 1952 | else | ||
| 1953 | sl = cfqd->cfq_slice_idle; | ||
| 1913 | 1954 | ||
| 1914 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); | 1955 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); |
| 1915 | cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); | 1956 | cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); |
| 1916 | cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); | 1957 | cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, |
| 1958 | group_idle ? 1 : 0); | ||
| 1917 | } | 1959 | } |
| 1918 | 1960 | ||
| 1919 | /* | 1961 | /* |
| @@ -1929,9 +1971,11 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) | |||
| 1929 | cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); | 1971 | cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); |
| 1930 | cfq_remove_request(rq); | 1972 | cfq_remove_request(rq); |
| 1931 | cfqq->dispatched++; | 1973 | cfqq->dispatched++; |
| 1974 | (RQ_CFQG(rq))->dispatched++; | ||
| 1932 | elv_dispatch_sort(q, rq); | 1975 | elv_dispatch_sort(q, rq); |
| 1933 | 1976 | ||
| 1934 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; | 1977 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; |
| 1978 | cfqq->nr_sectors += blk_rq_sectors(rq); | ||
| 1935 | cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), | 1979 | cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), |
| 1936 | rq_data_dir(rq), rq_is_sync(rq)); | 1980 | rq_data_dir(rq), rq_is_sync(rq)); |
| 1937 | } | 1981 | } |
| @@ -2198,7 +2242,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
| 2198 | cfqq = NULL; | 2242 | cfqq = NULL; |
| 2199 | goto keep_queue; | 2243 | goto keep_queue; |
| 2200 | } else | 2244 | } else |
| 2201 | goto expire; | 2245 | goto check_group_idle; |
| 2202 | } | 2246 | } |
| 2203 | 2247 | ||
| 2204 | /* | 2248 | /* |
| @@ -2226,8 +2270,23 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
| 2226 | * flight or is idling for a new request, allow either of these | 2270 | * flight or is idling for a new request, allow either of these |
| 2227 | * conditions to happen (or time out) before selecting a new queue. | 2271 | * conditions to happen (or time out) before selecting a new queue. |
| 2228 | */ | 2272 | */ |
| 2229 | if (timer_pending(&cfqd->idle_slice_timer) || | 2273 | if (timer_pending(&cfqd->idle_slice_timer)) { |
| 2230 | (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) { | 2274 | cfqq = NULL; |
| 2275 | goto keep_queue; | ||
| 2276 | } | ||
| 2277 | |||
| 2278 | if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { | ||
| 2279 | cfqq = NULL; | ||
| 2280 | goto keep_queue; | ||
| 2281 | } | ||
| 2282 | |||
| 2283 | /* | ||
| 2284 | * If group idle is enabled and there are requests dispatched from | ||
| 2285 | * this group, wait for requests to complete. | ||
| 2286 | */ | ||
| 2287 | check_group_idle: | ||
| 2288 | if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 | ||
| 2289 | && cfqq->cfqg->dispatched) { | ||
| 2231 | cfqq = NULL; | 2290 | cfqq = NULL; |
| 2232 | goto keep_queue; | 2291 | goto keep_queue; |
| 2233 | } | 2292 | } |
| @@ -3375,6 +3434,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
| 3375 | WARN_ON(!cfqq->dispatched); | 3434 | WARN_ON(!cfqq->dispatched); |
| 3376 | cfqd->rq_in_driver--; | 3435 | cfqd->rq_in_driver--; |
| 3377 | cfqq->dispatched--; | 3436 | cfqq->dispatched--; |
| 3437 | (RQ_CFQG(rq))->dispatched--; | ||
| 3378 | cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, | 3438 | cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, |
| 3379 | rq_start_time_ns(rq), rq_io_start_time_ns(rq), | 3439 | rq_start_time_ns(rq), rq_io_start_time_ns(rq), |
| 3380 | rq_data_dir(rq), rq_is_sync(rq)); | 3440 | rq_data_dir(rq), rq_is_sync(rq)); |
| @@ -3404,7 +3464,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
| 3404 | * the queue. | 3464 | * the queue. |
| 3405 | */ | 3465 | */ |
| 3406 | if (cfq_should_wait_busy(cfqd, cfqq)) { | 3466 | if (cfq_should_wait_busy(cfqd, cfqq)) { |
| 3407 | cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; | 3467 | unsigned long extend_sl = cfqd->cfq_slice_idle; |
| 3468 | if (!cfqd->cfq_slice_idle) | ||
| 3469 | extend_sl = cfqd->cfq_group_idle; | ||
| 3470 | cfqq->slice_end = jiffies + extend_sl; | ||
| 3408 | cfq_mark_cfqq_wait_busy(cfqq); | 3471 | cfq_mark_cfqq_wait_busy(cfqq); |
| 3409 | cfq_log_cfqq(cfqd, cfqq, "will busy wait"); | 3472 | cfq_log_cfqq(cfqd, cfqq, "will busy wait"); |
| 3410 | } | 3473 | } |
| @@ -3850,6 +3913,7 @@ static void *cfq_init_queue(struct request_queue *q) | |||
| 3850 | cfqd->cfq_slice[1] = cfq_slice_sync; | 3913 | cfqd->cfq_slice[1] = cfq_slice_sync; |
| 3851 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; | 3914 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; |
| 3852 | cfqd->cfq_slice_idle = cfq_slice_idle; | 3915 | cfqd->cfq_slice_idle = cfq_slice_idle; |
| 3916 | cfqd->cfq_group_idle = cfq_group_idle; | ||
| 3853 | cfqd->cfq_latency = 1; | 3917 | cfqd->cfq_latency = 1; |
| 3854 | cfqd->cfq_group_isolation = 0; | 3918 | cfqd->cfq_group_isolation = 0; |
| 3855 | cfqd->hw_tag = -1; | 3919 | cfqd->hw_tag = -1; |
| @@ -3922,6 +3986,7 @@ SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); | |||
| 3922 | SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); | 3986 | SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); |
| 3923 | SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); | 3987 | SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); |
| 3924 | SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); | 3988 | SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); |
| 3989 | SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1); | ||
| 3925 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); | 3990 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); |
| 3926 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); | 3991 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); |
| 3927 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); | 3992 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); |
| @@ -3954,6 +4019,7 @@ STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); | |||
| 3954 | STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, | 4019 | STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, |
| 3955 | UINT_MAX, 0); | 4020 | UINT_MAX, 0); |
| 3956 | STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); | 4021 | STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); |
| 4022 | STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1); | ||
| 3957 | STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); | 4023 | STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); |
| 3958 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); | 4024 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); |
| 3959 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, | 4025 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, |
| @@ -3975,6 +4041,7 @@ static struct elv_fs_entry cfq_attrs[] = { | |||
| 3975 | CFQ_ATTR(slice_async), | 4041 | CFQ_ATTR(slice_async), |
| 3976 | CFQ_ATTR(slice_async_rq), | 4042 | CFQ_ATTR(slice_async_rq), |
| 3977 | CFQ_ATTR(slice_idle), | 4043 | CFQ_ATTR(slice_idle), |
| 4044 | CFQ_ATTR(group_idle), | ||
| 3978 | CFQ_ATTR(low_latency), | 4045 | CFQ_ATTR(low_latency), |
| 3979 | CFQ_ATTR(group_isolation), | 4046 | CFQ_ATTR(group_isolation), |
| 3980 | __ATTR_NULL | 4047 | __ATTR_NULL |
| @@ -4028,6 +4095,12 @@ static int __init cfq_init(void) | |||
| 4028 | if (!cfq_slice_idle) | 4095 | if (!cfq_slice_idle) |
| 4029 | cfq_slice_idle = 1; | 4096 | cfq_slice_idle = 1; |
| 4030 | 4097 | ||
| 4098 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | ||
| 4099 | if (!cfq_group_idle) | ||
| 4100 | cfq_group_idle = 1; | ||
| 4101 | #else | ||
| 4102 | cfq_group_idle = 0; | ||
| 4103 | #endif | ||
| 4031 | if (cfq_slab_setup()) | 4104 | if (cfq_slab_setup()) |
| 4032 | return -ENOMEM; | 4105 | return -ENOMEM; |
| 4033 | 4106 | ||
diff --git a/block/elevator.c b/block/elevator.c index ec585c9554d3..205b09a5bd9e 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
| @@ -1009,18 +1009,19 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
| 1009 | { | 1009 | { |
| 1010 | struct elevator_queue *old_elevator, *e; | 1010 | struct elevator_queue *old_elevator, *e; |
| 1011 | void *data; | 1011 | void *data; |
| 1012 | int err; | ||
| 1012 | 1013 | ||
| 1013 | /* | 1014 | /* |
| 1014 | * Allocate new elevator | 1015 | * Allocate new elevator |
| 1015 | */ | 1016 | */ |
| 1016 | e = elevator_alloc(q, new_e); | 1017 | e = elevator_alloc(q, new_e); |
| 1017 | if (!e) | 1018 | if (!e) |
| 1018 | return 0; | 1019 | return -ENOMEM; |
| 1019 | 1020 | ||
| 1020 | data = elevator_init_queue(q, e); | 1021 | data = elevator_init_queue(q, e); |
| 1021 | if (!data) { | 1022 | if (!data) { |
| 1022 | kobject_put(&e->kobj); | 1023 | kobject_put(&e->kobj); |
| 1023 | return 0; | 1024 | return -ENOMEM; |
| 1024 | } | 1025 | } |
| 1025 | 1026 | ||
| 1026 | /* | 1027 | /* |
| @@ -1043,7 +1044,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
| 1043 | 1044 | ||
| 1044 | __elv_unregister_queue(old_elevator); | 1045 | __elv_unregister_queue(old_elevator); |
| 1045 | 1046 | ||
| 1046 | if (elv_register_queue(q)) | 1047 | err = elv_register_queue(q); |
| 1048 | if (err) | ||
| 1047 | goto fail_register; | 1049 | goto fail_register; |
| 1048 | 1050 | ||
| 1049 | /* | 1051 | /* |
| @@ -1056,7 +1058,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
| 1056 | 1058 | ||
| 1057 | blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); | 1059 | blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); |
| 1058 | 1060 | ||
| 1059 | return 1; | 1061 | return 0; |
| 1060 | 1062 | ||
| 1061 | fail_register: | 1063 | fail_register: |
| 1062 | /* | 1064 | /* |
| @@ -1071,17 +1073,19 @@ fail_register: | |||
| 1071 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); | 1073 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); |
| 1072 | spin_unlock_irq(q->queue_lock); | 1074 | spin_unlock_irq(q->queue_lock); |
| 1073 | 1075 | ||
| 1074 | return 0; | 1076 | return err; |
| 1075 | } | 1077 | } |
| 1076 | 1078 | ||
| 1077 | ssize_t elv_iosched_store(struct request_queue *q, const char *name, | 1079 | /* |
| 1078 | size_t count) | 1080 | * Switch this queue to the given IO scheduler. |
| 1081 | */ | ||
| 1082 | int elevator_change(struct request_queue *q, const char *name) | ||
| 1079 | { | 1083 | { |
| 1080 | char elevator_name[ELV_NAME_MAX]; | 1084 | char elevator_name[ELV_NAME_MAX]; |
| 1081 | struct elevator_type *e; | 1085 | struct elevator_type *e; |
| 1082 | 1086 | ||
| 1083 | if (!q->elevator) | 1087 | if (!q->elevator) |
| 1084 | return count; | 1088 | return -ENXIO; |
| 1085 | 1089 | ||
| 1086 | strlcpy(elevator_name, name, sizeof(elevator_name)); | 1090 | strlcpy(elevator_name, name, sizeof(elevator_name)); |
| 1087 | e = elevator_get(strstrip(elevator_name)); | 1091 | e = elevator_get(strstrip(elevator_name)); |
| @@ -1092,13 +1096,27 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name, | |||
| 1092 | 1096 | ||
| 1093 | if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { | 1097 | if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { |
| 1094 | elevator_put(e); | 1098 | elevator_put(e); |
| 1095 | return count; | 1099 | return 0; |
| 1096 | } | 1100 | } |
| 1097 | 1101 | ||
| 1098 | if (!elevator_switch(q, e)) | 1102 | return elevator_switch(q, e); |
| 1099 | printk(KERN_ERR "elevator: switch to %s failed\n", | 1103 | } |
| 1100 | elevator_name); | 1104 | EXPORT_SYMBOL(elevator_change); |
| 1101 | return count; | 1105 | |
| 1106 | ssize_t elv_iosched_store(struct request_queue *q, const char *name, | ||
| 1107 | size_t count) | ||
| 1108 | { | ||
| 1109 | int ret; | ||
| 1110 | |||
| 1111 | if (!q->elevator) | ||
| 1112 | return count; | ||
| 1113 | |||
| 1114 | ret = elevator_change(q, name); | ||
| 1115 | if (!ret) | ||
| 1116 | return count; | ||
| 1117 | |||
| 1118 | printk(KERN_ERR "elevator: switch to %s failed\n", name); | ||
| 1119 | return ret; | ||
| 1102 | } | 1120 | } |
| 1103 | 1121 | ||
| 1104 | ssize_t elv_iosched_show(struct request_queue *q, char *name) | 1122 | ssize_t elv_iosched_show(struct request_queue *q, char *name) |
diff --git a/crypto/Kconfig b/crypto/Kconfig index 1cd497d7a15a..e573077f1672 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
| @@ -101,13 +101,13 @@ config CRYPTO_MANAGER2 | |||
| 101 | select CRYPTO_BLKCIPHER2 | 101 | select CRYPTO_BLKCIPHER2 |
| 102 | select CRYPTO_PCOMP2 | 102 | select CRYPTO_PCOMP2 |
| 103 | 103 | ||
| 104 | config CRYPTO_MANAGER_TESTS | 104 | config CRYPTO_MANAGER_DISABLE_TESTS |
| 105 | bool "Run algolithms' self-tests" | 105 | bool "Disable run-time self tests" |
| 106 | default y | 106 | default y |
| 107 | depends on CRYPTO_MANAGER2 | 107 | depends on CRYPTO_MANAGER2 |
| 108 | help | 108 | help |
| 109 | Run cryptomanager's tests for the new crypto algorithms being | 109 | Disable run-time self tests that normally take place at |
| 110 | registered. | 110 | algorithm registration. |
| 111 | 111 | ||
| 112 | config CRYPTO_GF128MUL | 112 | config CRYPTO_GF128MUL |
| 113 | tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" | 113 | tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" |
diff --git a/crypto/ahash.c b/crypto/ahash.c index b8c59b889c6e..f669822a7a44 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c | |||
| @@ -47,8 +47,11 @@ static int hash_walk_next(struct crypto_hash_walk *walk) | |||
| 47 | walk->data = crypto_kmap(walk->pg, 0); | 47 | walk->data = crypto_kmap(walk->pg, 0); |
| 48 | walk->data += offset; | 48 | walk->data += offset; |
| 49 | 49 | ||
| 50 | if (offset & alignmask) | 50 | if (offset & alignmask) { |
| 51 | nbytes = alignmask + 1 - (offset & alignmask); | 51 | unsigned int unaligned = alignmask + 1 - (offset & alignmask); |
| 52 | if (nbytes > unaligned) | ||
| 53 | nbytes = unaligned; | ||
| 54 | } | ||
| 52 | 55 | ||
| 53 | walk->entrylen -= nbytes; | 56 | walk->entrylen -= nbytes; |
| 54 | return nbytes; | 57 | return nbytes; |
diff --git a/crypto/algboss.c b/crypto/algboss.c index 40bd391f34d9..791d194958fa 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c | |||
| @@ -206,13 +206,16 @@ err: | |||
| 206 | return NOTIFY_OK; | 206 | return NOTIFY_OK; |
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | #ifdef CONFIG_CRYPTO_MANAGER_TESTS | ||
| 210 | static int cryptomgr_test(void *data) | 209 | static int cryptomgr_test(void *data) |
| 211 | { | 210 | { |
| 212 | struct crypto_test_param *param = data; | 211 | struct crypto_test_param *param = data; |
| 213 | u32 type = param->type; | 212 | u32 type = param->type; |
| 214 | int err = 0; | 213 | int err = 0; |
| 215 | 214 | ||
| 215 | #ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS | ||
| 216 | goto skiptest; | ||
| 217 | #endif | ||
| 218 | |||
| 216 | if (type & CRYPTO_ALG_TESTED) | 219 | if (type & CRYPTO_ALG_TESTED) |
| 217 | goto skiptest; | 220 | goto skiptest; |
| 218 | 221 | ||
| @@ -267,7 +270,6 @@ err_put_module: | |||
| 267 | err: | 270 | err: |
| 268 | return NOTIFY_OK; | 271 | return NOTIFY_OK; |
| 269 | } | 272 | } |
| 270 | #endif /* CONFIG_CRYPTO_MANAGER_TESTS */ | ||
| 271 | 273 | ||
| 272 | static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, | 274 | static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, |
| 273 | void *data) | 275 | void *data) |
| @@ -275,10 +277,8 @@ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, | |||
| 275 | switch (msg) { | 277 | switch (msg) { |
| 276 | case CRYPTO_MSG_ALG_REQUEST: | 278 | case CRYPTO_MSG_ALG_REQUEST: |
| 277 | return cryptomgr_schedule_probe(data); | 279 | return cryptomgr_schedule_probe(data); |
| 278 | #ifdef CONFIG_CRYPTO_MANAGER_TESTS | ||
| 279 | case CRYPTO_MSG_ALG_REGISTER: | 280 | case CRYPTO_MSG_ALG_REGISTER: |
| 280 | return cryptomgr_schedule_test(data); | 281 | return cryptomgr_schedule_test(data); |
| 281 | #endif | ||
| 282 | } | 282 | } |
| 283 | 283 | ||
| 284 | return NOTIFY_DONE; | 284 | return NOTIFY_DONE; |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index abd980c729eb..fa8c8f78c8d4 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
| @@ -23,7 +23,7 @@ | |||
| 23 | 23 | ||
| 24 | #include "internal.h" | 24 | #include "internal.h" |
| 25 | 25 | ||
| 26 | #ifndef CONFIG_CRYPTO_MANAGER_TESTS | 26 | #ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS |
| 27 | 27 | ||
| 28 | /* a perfect nop */ | 28 | /* a perfect nop */ |
| 29 | int alg_test(const char *driver, const char *alg, u32 type, u32 mask) | 29 | int alg_test(const char *driver, const char *alg, u32 type, u32 mask) |
| @@ -2542,6 +2542,6 @@ non_fips_alg: | |||
| 2542 | return -EINVAL; | 2542 | return -EINVAL; |
| 2543 | } | 2543 | } |
| 2544 | 2544 | ||
| 2545 | #endif /* CONFIG_CRYPTO_MANAGER_TESTS */ | 2545 | #endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */ |
| 2546 | 2546 | ||
| 2547 | EXPORT_SYMBOL_GPL(alg_test); | 2547 | EXPORT_SYMBOL_GPL(alg_test); |
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 1f67057af2a5..3ba8d1f44a73 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
| @@ -33,7 +33,6 @@ | |||
| 33 | #include <linux/pm_runtime.h> | 33 | #include <linux/pm_runtime.h> |
| 34 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
| 35 | #include <linux/pci-acpi.h> | 35 | #include <linux/pci-acpi.h> |
| 36 | #include <linux/pci-aspm.h> | ||
| 37 | #include <linux/acpi.h> | 36 | #include <linux/acpi.h> |
| 38 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
| 39 | #include <acpi/acpi_bus.h> | 38 | #include <acpi/acpi_bus.h> |
| @@ -226,22 +225,31 @@ static acpi_status acpi_pci_run_osc(acpi_handle handle, | |||
| 226 | return status; | 225 | return status; |
| 227 | } | 226 | } |
| 228 | 227 | ||
| 229 | static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 flags) | 228 | static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, |
| 229 | u32 support, | ||
| 230 | u32 *control) | ||
| 230 | { | 231 | { |
| 231 | acpi_status status; | 232 | acpi_status status; |
| 232 | u32 support_set, result, capbuf[3]; | 233 | u32 result, capbuf[3]; |
| 234 | |||
| 235 | support &= OSC_PCI_SUPPORT_MASKS; | ||
| 236 | support |= root->osc_support_set; | ||
| 233 | 237 | ||
| 234 | /* do _OSC query for all possible controls */ | ||
| 235 | support_set = root->osc_support_set | (flags & OSC_PCI_SUPPORT_MASKS); | ||
| 236 | capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; | 238 | capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; |
| 237 | capbuf[OSC_SUPPORT_TYPE] = support_set; | 239 | capbuf[OSC_SUPPORT_TYPE] = support; |
| 238 | capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS; | 240 | if (control) { |
| 241 | *control &= OSC_PCI_CONTROL_MASKS; | ||
| 242 | capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set; | ||
| 243 | } else { | ||
| 244 | /* Run _OSC query for all possible controls. */ | ||
| 245 | capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS; | ||
| 246 | } | ||
| 239 | 247 | ||
| 240 | status = acpi_pci_run_osc(root->device->handle, capbuf, &result); | 248 | status = acpi_pci_run_osc(root->device->handle, capbuf, &result); |
| 241 | if (ACPI_SUCCESS(status)) { | 249 | if (ACPI_SUCCESS(status)) { |
| 242 | root->osc_support_set = support_set; | 250 | root->osc_support_set = support; |
| 243 | root->osc_control_qry = result; | 251 | if (control) |
| 244 | root->osc_queried = 1; | 252 | *control = result; |
| 245 | } | 253 | } |
| 246 | return status; | 254 | return status; |
| 247 | } | 255 | } |
| @@ -255,7 +263,7 @@ static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags) | |||
| 255 | if (ACPI_FAILURE(status)) | 263 | if (ACPI_FAILURE(status)) |
| 256 | return status; | 264 | return status; |
| 257 | mutex_lock(&osc_lock); | 265 | mutex_lock(&osc_lock); |
| 258 | status = acpi_pci_query_osc(root, flags); | 266 | status = acpi_pci_query_osc(root, flags, NULL); |
| 259 | mutex_unlock(&osc_lock); | 267 | mutex_unlock(&osc_lock); |
| 260 | return status; | 268 | return status; |
| 261 | } | 269 | } |
| @@ -365,55 +373,70 @@ out: | |||
| 365 | EXPORT_SYMBOL_GPL(acpi_get_pci_dev); | 373 | EXPORT_SYMBOL_GPL(acpi_get_pci_dev); |
| 366 | 374 | ||
| 367 | /** | 375 | /** |
| 368 | * acpi_pci_osc_control_set - commit requested control to Firmware | 376 | * acpi_pci_osc_control_set - Request control of PCI root _OSC features. |
| 369 | * @handle: acpi_handle for the target ACPI object | 377 | * @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex). |
| 370 | * @flags: driver's requested control bits | 378 | * @mask: Mask of _OSC bits to request control of, place to store control mask. |
| 379 | * @req: Mask of _OSC bits the control of is essential to the caller. | ||
| 380 | * | ||
| 381 | * Run _OSC query for @mask and if that is successful, compare the returned | ||
| 382 | * mask of control bits with @req. If all of the @req bits are set in the | ||
| 383 | * returned mask, run _OSC request for it. | ||
| 371 | * | 384 | * |
| 372 | * Attempt to take control from Firmware on requested control bits. | 385 | * The variable at the @mask address may be modified regardless of whether or |
| 386 | * not the function returns success. On success it will contain the mask of | ||
| 387 | * _OSC bits the BIOS has granted control of, but its contents are meaningless | ||
| 388 | * on failure. | ||
| 373 | **/ | 389 | **/ |
| 374 | acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags) | 390 | acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req) |
| 375 | { | 391 | { |
| 392 | struct acpi_pci_root *root; | ||
| 376 | acpi_status status; | 393 | acpi_status status; |
| 377 | u32 control_req, result, capbuf[3]; | 394 | u32 ctrl, capbuf[3]; |
| 378 | acpi_handle tmp; | 395 | acpi_handle tmp; |
| 379 | struct acpi_pci_root *root; | ||
| 380 | 396 | ||
| 381 | status = acpi_get_handle(handle, "_OSC", &tmp); | 397 | if (!mask) |
| 382 | if (ACPI_FAILURE(status)) | 398 | return AE_BAD_PARAMETER; |
| 383 | return status; | ||
| 384 | 399 | ||
| 385 | control_req = (flags & OSC_PCI_CONTROL_MASKS); | 400 | ctrl = *mask & OSC_PCI_CONTROL_MASKS; |
| 386 | if (!control_req) | 401 | if ((ctrl & req) != req) |
| 387 | return AE_TYPE; | 402 | return AE_TYPE; |
| 388 | 403 | ||
| 389 | root = acpi_pci_find_root(handle); | 404 | root = acpi_pci_find_root(handle); |
| 390 | if (!root) | 405 | if (!root) |
| 391 | return AE_NOT_EXIST; | 406 | return AE_NOT_EXIST; |
| 392 | 407 | ||
| 408 | status = acpi_get_handle(handle, "_OSC", &tmp); | ||
| 409 | if (ACPI_FAILURE(status)) | ||
| 410 | return status; | ||
| 411 | |||
| 393 | mutex_lock(&osc_lock); | 412 | mutex_lock(&osc_lock); |
| 413 | |||
| 414 | *mask = ctrl | root->osc_control_set; | ||
| 394 | /* No need to evaluate _OSC if the control was already granted. */ | 415 | /* No need to evaluate _OSC if the control was already granted. */ |
| 395 | if ((root->osc_control_set & control_req) == control_req) | 416 | if ((root->osc_control_set & ctrl) == ctrl) |
| 396 | goto out; | 417 | goto out; |
| 397 | 418 | ||
| 398 | /* Need to query controls first before requesting them */ | 419 | /* Need to check the available controls bits before requesting them. */ |
| 399 | if (!root->osc_queried) { | 420 | while (*mask) { |
| 400 | status = acpi_pci_query_osc(root, root->osc_support_set); | 421 | status = acpi_pci_query_osc(root, root->osc_support_set, mask); |
| 401 | if (ACPI_FAILURE(status)) | 422 | if (ACPI_FAILURE(status)) |
| 402 | goto out; | 423 | goto out; |
| 424 | if (ctrl == *mask) | ||
| 425 | break; | ||
| 426 | ctrl = *mask; | ||
| 403 | } | 427 | } |
| 404 | if ((root->osc_control_qry & control_req) != control_req) { | 428 | |
| 405 | printk(KERN_DEBUG | 429 | if ((ctrl & req) != req) { |
| 406 | "Firmware did not grant requested _OSC control\n"); | ||
| 407 | status = AE_SUPPORT; | 430 | status = AE_SUPPORT; |
| 408 | goto out; | 431 | goto out; |
| 409 | } | 432 | } |
| 410 | 433 | ||
| 411 | capbuf[OSC_QUERY_TYPE] = 0; | 434 | capbuf[OSC_QUERY_TYPE] = 0; |
| 412 | capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set; | 435 | capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set; |
| 413 | capbuf[OSC_CONTROL_TYPE] = root->osc_control_set | control_req; | 436 | capbuf[OSC_CONTROL_TYPE] = ctrl; |
| 414 | status = acpi_pci_run_osc(handle, capbuf, &result); | 437 | status = acpi_pci_run_osc(handle, capbuf, mask); |
| 415 | if (ACPI_SUCCESS(status)) | 438 | if (ACPI_SUCCESS(status)) |
| 416 | root->osc_control_set = result; | 439 | root->osc_control_set = *mask; |
| 417 | out: | 440 | out: |
| 418 | mutex_unlock(&osc_lock); | 441 | mutex_unlock(&osc_lock); |
| 419 | return status; | 442 | return status; |
| @@ -544,14 +567,6 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
| 544 | if (flags != base_flags) | 567 | if (flags != base_flags) |
| 545 | acpi_pci_osc_support(root, flags); | 568 | acpi_pci_osc_support(root, flags); |
| 546 | 569 | ||
| 547 | status = acpi_pci_osc_control_set(root->device->handle, | ||
| 548 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | ||
| 549 | |||
| 550 | if (ACPI_FAILURE(status)) { | ||
| 551 | printk(KERN_INFO "Unable to assume PCIe control: Disabling ASPM\n"); | ||
| 552 | pcie_no_aspm(); | ||
| 553 | } | ||
| 554 | |||
| 555 | pci_acpi_add_bus_pm_notifier(device, root->bus); | 570 | pci_acpi_add_bus_pm_notifier(device, root->bus); |
| 556 | if (device->wakeup.flags.run_wake) | 571 | if (device->wakeup.flags.run_wake) |
| 557 | device_set_run_wake(root->bus->bridge, true); | 572 | device_set_run_wake(root->bus->bridge, true); |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 013727b20417..ff1c945fba98 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
| @@ -253,6 +253,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
| 253 | { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ | 253 | { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ |
| 254 | { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ | 254 | { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ |
| 255 | { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ | 255 | { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ |
| 256 | { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ | ||
| 257 | { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */ | ||
| 258 | { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */ | ||
| 256 | 259 | ||
| 257 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ | 260 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ |
| 258 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 261 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 3971bc0a4838..d712675d0a96 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
| @@ -302,6 +302,10 @@ static const struct pci_device_id piix_pci_tbl[] = { | |||
| 302 | { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 302 | { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, |
| 303 | /* SATA Controller IDE (CPT) */ | 303 | /* SATA Controller IDE (CPT) */ |
| 304 | { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 304 | { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, |
| 305 | /* SATA Controller IDE (PBG) */ | ||
| 306 | { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, | ||
| 307 | /* SATA Controller IDE (PBG) */ | ||
| 308 | { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | ||
| 305 | { } /* terminate list */ | 309 | { } /* terminate list */ |
| 306 | }; | 310 | }; |
| 307 | 311 | ||
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 666850d31df2..68dc6785472f 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c | |||
| @@ -1326,7 +1326,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class, | |||
| 1326 | /* issue the first D2H Register FIS */ | 1326 | /* issue the first D2H Register FIS */ |
| 1327 | msecs = 0; | 1327 | msecs = 0; |
| 1328 | now = jiffies; | 1328 | now = jiffies; |
| 1329 | if (time_after(now, deadline)) | 1329 | if (time_after(deadline, now)) |
| 1330 | msecs = jiffies_to_msecs(deadline - now); | 1330 | msecs = jiffies_to_msecs(deadline - now); |
| 1331 | 1331 | ||
| 1332 | tf.ctl |= ATA_SRST; | 1332 | tf.ctl |= ATA_SRST; |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index c035b3d041ee..932eaee50245 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -5418,6 +5418,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, | |||
| 5418 | */ | 5418 | */ |
| 5419 | int ata_host_suspend(struct ata_host *host, pm_message_t mesg) | 5419 | int ata_host_suspend(struct ata_host *host, pm_message_t mesg) |
| 5420 | { | 5420 | { |
| 5421 | unsigned int ehi_flags = ATA_EHI_QUIET; | ||
| 5421 | int rc; | 5422 | int rc; |
| 5422 | 5423 | ||
| 5423 | /* | 5424 | /* |
| @@ -5426,7 +5427,18 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg) | |||
| 5426 | */ | 5427 | */ |
| 5427 | ata_lpm_enable(host); | 5428 | ata_lpm_enable(host); |
| 5428 | 5429 | ||
| 5429 | rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); | 5430 | /* |
| 5431 | * On some hardware, device fails to respond after spun down | ||
| 5432 | * for suspend. As the device won't be used before being | ||
| 5433 | * resumed, we don't need to touch the device. Ask EH to skip | ||
| 5434 | * the usual stuff and proceed directly to suspend. | ||
| 5435 | * | ||
| 5436 | * http://thread.gmane.org/gmane.linux.ide/46764 | ||
| 5437 | */ | ||
| 5438 | if (mesg.event == PM_EVENT_SUSPEND) | ||
| 5439 | ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY; | ||
| 5440 | |||
| 5441 | rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1); | ||
| 5430 | if (rc == 0) | 5442 | if (rc == 0) |
| 5431 | host->dev->power.power_state = mesg; | 5443 | host->dev->power.power_state = mesg; |
| 5432 | return rc; | 5444 | return rc; |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index c9ae299b8342..e48302eae55f 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
| @@ -3235,6 +3235,10 @@ static int ata_eh_skip_recovery(struct ata_link *link) | |||
| 3235 | if (link->flags & ATA_LFLAG_DISABLED) | 3235 | if (link->flags & ATA_LFLAG_DISABLED) |
| 3236 | return 1; | 3236 | return 1; |
| 3237 | 3237 | ||
| 3238 | /* skip if explicitly requested */ | ||
| 3239 | if (ehc->i.flags & ATA_EHI_NO_RECOVERY) | ||
| 3240 | return 1; | ||
| 3241 | |||
| 3238 | /* thaw frozen port and recover failed devices */ | 3242 | /* thaw frozen port and recover failed devices */ |
| 3239 | if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) | 3243 | if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) |
| 3240 | return 0; | 3244 | return 0; |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 3b82d8ef76f0..e30c537cce32 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
| @@ -418,6 +418,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | |||
| 418 | if (ioaddr->ctl_addr) | 418 | if (ioaddr->ctl_addr) |
| 419 | iowrite8(tf->ctl, ioaddr->ctl_addr); | 419 | iowrite8(tf->ctl, ioaddr->ctl_addr); |
| 420 | ap->last_ctl = tf->ctl; | 420 | ap->last_ctl = tf->ctl; |
| 421 | ata_wait_idle(ap); | ||
| 421 | } | 422 | } |
| 422 | 423 | ||
| 423 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { | 424 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { |
| @@ -453,6 +454,8 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | |||
| 453 | iowrite8(tf->device, ioaddr->device_addr); | 454 | iowrite8(tf->device, ioaddr->device_addr); |
| 454 | VPRINTK("device 0x%X\n", tf->device); | 455 | VPRINTK("device 0x%X\n", tf->device); |
| 455 | } | 456 | } |
| 457 | |||
| 458 | ata_wait_idle(ap); | ||
| 456 | } | 459 | } |
| 457 | EXPORT_SYMBOL_GPL(ata_sff_tf_load); | 460 | EXPORT_SYMBOL_GPL(ata_sff_tf_load); |
| 458 | 461 | ||
| @@ -1042,7 +1045,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) | |||
| 1042 | int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | 1045 | int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, |
| 1043 | u8 status, int in_wq) | 1046 | u8 status, int in_wq) |
| 1044 | { | 1047 | { |
| 1045 | struct ata_eh_info *ehi = &ap->link.eh_info; | 1048 | struct ata_link *link = qc->dev->link; |
| 1049 | struct ata_eh_info *ehi = &link->eh_info; | ||
| 1046 | unsigned long flags = 0; | 1050 | unsigned long flags = 0; |
| 1047 | int poll_next; | 1051 | int poll_next; |
| 1048 | 1052 | ||
| @@ -1298,8 +1302,14 @@ fsm_start: | |||
| 1298 | } | 1302 | } |
| 1299 | EXPORT_SYMBOL_GPL(ata_sff_hsm_move); | 1303 | EXPORT_SYMBOL_GPL(ata_sff_hsm_move); |
| 1300 | 1304 | ||
| 1301 | void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay) | 1305 | void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay) |
| 1302 | { | 1306 | { |
| 1307 | struct ata_port *ap = link->ap; | ||
| 1308 | |||
| 1309 | WARN_ON((ap->sff_pio_task_link != NULL) && | ||
| 1310 | (ap->sff_pio_task_link != link)); | ||
| 1311 | ap->sff_pio_task_link = link; | ||
| 1312 | |||
| 1303 | /* may fail if ata_sff_flush_pio_task() in progress */ | 1313 | /* may fail if ata_sff_flush_pio_task() in progress */ |
| 1304 | queue_delayed_work(ata_sff_wq, &ap->sff_pio_task, | 1314 | queue_delayed_work(ata_sff_wq, &ap->sff_pio_task, |
| 1305 | msecs_to_jiffies(delay)); | 1315 | msecs_to_jiffies(delay)); |
| @@ -1321,14 +1331,18 @@ static void ata_sff_pio_task(struct work_struct *work) | |||
| 1321 | { | 1331 | { |
| 1322 | struct ata_port *ap = | 1332 | struct ata_port *ap = |
| 1323 | container_of(work, struct ata_port, sff_pio_task.work); | 1333 | container_of(work, struct ata_port, sff_pio_task.work); |
| 1334 | struct ata_link *link = ap->sff_pio_task_link; | ||
| 1324 | struct ata_queued_cmd *qc; | 1335 | struct ata_queued_cmd *qc; |
| 1325 | u8 status; | 1336 | u8 status; |
| 1326 | int poll_next; | 1337 | int poll_next; |
| 1327 | 1338 | ||
| 1339 | BUG_ON(ap->sff_pio_task_link == NULL); | ||
| 1328 | /* qc can be NULL if timeout occurred */ | 1340 | /* qc can be NULL if timeout occurred */ |
| 1329 | qc = ata_qc_from_tag(ap, ap->link.active_tag); | 1341 | qc = ata_qc_from_tag(ap, link->active_tag); |
| 1330 | if (!qc) | 1342 | if (!qc) { |
| 1343 | ap->sff_pio_task_link = NULL; | ||
| 1331 | return; | 1344 | return; |
| 1345 | } | ||
| 1332 | 1346 | ||
| 1333 | fsm_start: | 1347 | fsm_start: |
| 1334 | WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); | 1348 | WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); |
| @@ -1345,11 +1359,16 @@ fsm_start: | |||
| 1345 | msleep(2); | 1359 | msleep(2); |
| 1346 | status = ata_sff_busy_wait(ap, ATA_BUSY, 10); | 1360 | status = ata_sff_busy_wait(ap, ATA_BUSY, 10); |
| 1347 | if (status & ATA_BUSY) { | 1361 | if (status & ATA_BUSY) { |
| 1348 | ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE); | 1362 | ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); |
| 1349 | return; | 1363 | return; |
| 1350 | } | 1364 | } |
| 1351 | } | 1365 | } |
| 1352 | 1366 | ||
| 1367 | /* | ||
| 1368 | * hsm_move() may trigger another command to be processed. | ||
| 1369 | * clean the link beforehand. | ||
| 1370 | */ | ||
| 1371 | ap->sff_pio_task_link = NULL; | ||
| 1353 | /* move the HSM */ | 1372 | /* move the HSM */ |
| 1354 | poll_next = ata_sff_hsm_move(ap, qc, status, 1); | 1373 | poll_next = ata_sff_hsm_move(ap, qc, status, 1); |
| 1355 | 1374 | ||
| @@ -1376,6 +1395,7 @@ fsm_start: | |||
| 1376 | unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | 1395 | unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) |
| 1377 | { | 1396 | { |
| 1378 | struct ata_port *ap = qc->ap; | 1397 | struct ata_port *ap = qc->ap; |
| 1398 | struct ata_link *link = qc->dev->link; | ||
| 1379 | 1399 | ||
| 1380 | /* Use polling pio if the LLD doesn't handle | 1400 | /* Use polling pio if the LLD doesn't handle |
| 1381 | * interrupt driven pio and atapi CDB interrupt. | 1401 | * interrupt driven pio and atapi CDB interrupt. |
| @@ -1396,7 +1416,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
| 1396 | ap->hsm_task_state = HSM_ST_LAST; | 1416 | ap->hsm_task_state = HSM_ST_LAST; |
| 1397 | 1417 | ||
| 1398 | if (qc->tf.flags & ATA_TFLAG_POLLING) | 1418 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
| 1399 | ata_sff_queue_pio_task(ap, 0); | 1419 | ata_sff_queue_pio_task(link, 0); |
| 1400 | 1420 | ||
| 1401 | break; | 1421 | break; |
| 1402 | 1422 | ||
| @@ -1409,7 +1429,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
| 1409 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | 1429 | if (qc->tf.flags & ATA_TFLAG_WRITE) { |
| 1410 | /* PIO data out protocol */ | 1430 | /* PIO data out protocol */ |
| 1411 | ap->hsm_task_state = HSM_ST_FIRST; | 1431 | ap->hsm_task_state = HSM_ST_FIRST; |
| 1412 | ata_sff_queue_pio_task(ap, 0); | 1432 | ata_sff_queue_pio_task(link, 0); |
| 1413 | 1433 | ||
| 1414 | /* always send first data block using the | 1434 | /* always send first data block using the |
| 1415 | * ata_sff_pio_task() codepath. | 1435 | * ata_sff_pio_task() codepath. |
| @@ -1419,7 +1439,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
| 1419 | ap->hsm_task_state = HSM_ST; | 1439 | ap->hsm_task_state = HSM_ST; |
| 1420 | 1440 | ||
| 1421 | if (qc->tf.flags & ATA_TFLAG_POLLING) | 1441 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
| 1422 | ata_sff_queue_pio_task(ap, 0); | 1442 | ata_sff_queue_pio_task(link, 0); |
| 1423 | 1443 | ||
| 1424 | /* if polling, ata_sff_pio_task() handles the | 1444 | /* if polling, ata_sff_pio_task() handles the |
| 1425 | * rest. otherwise, interrupt handler takes | 1445 | * rest. otherwise, interrupt handler takes |
| @@ -1441,7 +1461,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
| 1441 | /* send cdb by polling if no cdb interrupt */ | 1461 | /* send cdb by polling if no cdb interrupt */ |
| 1442 | if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || | 1462 | if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || |
| 1443 | (qc->tf.flags & ATA_TFLAG_POLLING)) | 1463 | (qc->tf.flags & ATA_TFLAG_POLLING)) |
| 1444 | ata_sff_queue_pio_task(ap, 0); | 1464 | ata_sff_queue_pio_task(link, 0); |
| 1445 | break; | 1465 | break; |
| 1446 | 1466 | ||
| 1447 | default: | 1467 | default: |
| @@ -2734,6 +2754,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); | |||
| 2734 | unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) | 2754 | unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) |
| 2735 | { | 2755 | { |
| 2736 | struct ata_port *ap = qc->ap; | 2756 | struct ata_port *ap = qc->ap; |
| 2757 | struct ata_link *link = qc->dev->link; | ||
| 2737 | 2758 | ||
| 2738 | /* defer PIO handling to sff_qc_issue */ | 2759 | /* defer PIO handling to sff_qc_issue */ |
| 2739 | if (!ata_is_dma(qc->tf.protocol)) | 2760 | if (!ata_is_dma(qc->tf.protocol)) |
| @@ -2762,7 +2783,7 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) | |||
| 2762 | 2783 | ||
| 2763 | /* send cdb by polling if no cdb interrupt */ | 2784 | /* send cdb by polling if no cdb interrupt */ |
| 2764 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | 2785 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) |
| 2765 | ata_sff_queue_pio_task(ap, 0); | 2786 | ata_sff_queue_pio_task(link, 0); |
| 2766 | break; | 2787 | break; |
| 2767 | 2788 | ||
| 2768 | default: | 2789 | default: |
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c index ba43f0f8c880..2215632e4b31 100644 --- a/drivers/ata/pata_artop.c +++ b/drivers/ata/pata_artop.c | |||
| @@ -74,7 +74,8 @@ static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline) | |||
| 74 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 74 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
| 75 | 75 | ||
| 76 | /* Odd numbered device ids are the units with enable bits (the -R cards) */ | 76 | /* Odd numbered device ids are the units with enable bits (the -R cards) */ |
| 77 | if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) | 77 | if ((pdev->device & 1) && |
| 78 | !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) | ||
| 78 | return -ENOENT; | 79 | return -ENOENT; |
| 79 | 80 | ||
| 80 | return ata_sff_prereset(link, deadline); | 81 | return ata_sff_prereset(link, deadline); |
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 5e659885de16..ac8d7d97e408 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c | |||
| @@ -417,6 +417,8 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | |||
| 417 | tf->lbam, | 417 | tf->lbam, |
| 418 | tf->lbah); | 418 | tf->lbah); |
| 419 | } | 419 | } |
| 420 | |||
| 421 | ata_wait_idle(ap); | ||
| 420 | } | 422 | } |
| 421 | 423 | ||
| 422 | static int via_port_start(struct ata_port *ap) | 424 | static int via_port_start(struct ata_port *ap) |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 81982594a014..a9fd9709c262 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
| @@ -2284,7 +2284,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc) | |||
| 2284 | } | 2284 | } |
| 2285 | 2285 | ||
| 2286 | if (qc->tf.flags & ATA_TFLAG_POLLING) | 2286 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
| 2287 | ata_sff_queue_pio_task(ap, 0); | 2287 | ata_sff_queue_pio_task(link, 0); |
| 2288 | return 0; | 2288 | return 0; |
| 2289 | } | 2289 | } |
| 2290 | 2290 | ||
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 5419a49ff135..276d5a701dc3 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
| @@ -59,6 +59,7 @@ void device_pm_init(struct device *dev) | |||
| 59 | { | 59 | { |
| 60 | dev->power.status = DPM_ON; | 60 | dev->power.status = DPM_ON; |
| 61 | init_completion(&dev->power.completion); | 61 | init_completion(&dev->power.completion); |
| 62 | complete_all(&dev->power.completion); | ||
| 62 | dev->power.wakeup_count = 0; | 63 | dev->power.wakeup_count = 0; |
| 63 | pm_runtime_init(dev); | 64 | pm_runtime_init(dev); |
| 64 | } | 65 | } |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 31064df1370a..6124c2fd2d33 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
| @@ -297,6 +297,8 @@ static void enqueue_cmd_and_start_io(ctlr_info_t *h, | |||
| 297 | spin_lock_irqsave(&h->lock, flags); | 297 | spin_lock_irqsave(&h->lock, flags); |
| 298 | addQ(&h->reqQ, c); | 298 | addQ(&h->reqQ, c); |
| 299 | h->Qdepth++; | 299 | h->Qdepth++; |
| 300 | if (h->Qdepth > h->maxQsinceinit) | ||
| 301 | h->maxQsinceinit = h->Qdepth; | ||
| 300 | start_io(h); | 302 | start_io(h); |
| 301 | spin_unlock_irqrestore(&h->lock, flags); | 303 | spin_unlock_irqrestore(&h->lock, flags); |
| 302 | } | 304 | } |
| @@ -4519,6 +4521,12 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) | |||
| 4519 | misc_fw_support = readl(&cfgtable->misc_fw_support); | 4521 | misc_fw_support = readl(&cfgtable->misc_fw_support); |
| 4520 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; | 4522 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; |
| 4521 | 4523 | ||
| 4524 | /* The doorbell reset seems to cause lockups on some Smart | ||
| 4525 | * Arrays (e.g. P410, P410i, maybe others). Until this is | ||
| 4526 | * fixed or at least isolated, avoid the doorbell reset. | ||
| 4527 | */ | ||
| 4528 | use_doorbell = 0; | ||
| 4529 | |||
| 4522 | rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); | 4530 | rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); |
| 4523 | if (rc) | 4531 | if (rc) |
| 4524 | goto unmap_cfgtable; | 4532 | goto unmap_cfgtable; |
| @@ -4712,6 +4720,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
| 4712 | h->scatter_list = kmalloc(h->max_commands * | 4720 | h->scatter_list = kmalloc(h->max_commands * |
| 4713 | sizeof(struct scatterlist *), | 4721 | sizeof(struct scatterlist *), |
| 4714 | GFP_KERNEL); | 4722 | GFP_KERNEL); |
| 4723 | if (!h->scatter_list) | ||
| 4724 | goto clean4; | ||
| 4725 | |||
| 4715 | for (k = 0; k < h->nr_cmds; k++) { | 4726 | for (k = 0; k < h->nr_cmds; k++) { |
| 4716 | h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) * | 4727 | h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) * |
| 4717 | h->maxsgentries, | 4728 | h->maxsgentries, |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f3c636d23718..91797bbbe702 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
| @@ -477,7 +477,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) | |||
| 477 | pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; | 477 | pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; |
| 478 | 478 | ||
| 479 | if (bio_rw(bio) == WRITE) { | 479 | if (bio_rw(bio) == WRITE) { |
| 480 | bool barrier = (bio->bi_rw & REQ_HARDBARRIER); | 480 | bool barrier = !!(bio->bi_rw & REQ_HARDBARRIER); |
| 481 | struct file *file = lo->lo_backing_file; | 481 | struct file *file = lo->lo_backing_file; |
| 482 | 482 | ||
| 483 | if (barrier) { | 483 | if (barrier) { |
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index b82c5ce5e9df..76fa3deaee84 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c | |||
| @@ -974,8 +974,7 @@ static int mg_probe(struct platform_device *plat_dev) | |||
| 974 | host->breq->queuedata = host; | 974 | host->breq->queuedata = host; |
| 975 | 975 | ||
| 976 | /* mflash is random device, thanx for the noop */ | 976 | /* mflash is random device, thanx for the noop */ |
| 977 | elevator_exit(host->breq->elevator); | 977 | err = elevator_change(host->breq, "noop"); |
| 978 | err = elevator_init(host->breq, "noop"); | ||
| 979 | if (err) { | 978 | if (err) { |
| 980 | printk(KERN_ERR "%s:%d (elevator_init) fail\n", | 979 | printk(KERN_ERR "%s:%d (elevator_init) fail\n", |
| 981 | __func__, __LINE__); | 980 | __func__, __LINE__); |
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c index 1acdb2509511..a3f5e381e746 100644 --- a/drivers/char/hw_random/n2-drv.c +++ b/drivers/char/hw_random/n2-drv.c | |||
| @@ -387,7 +387,7 @@ static int n2rng_init_control(struct n2rng *np) | |||
| 387 | 387 | ||
| 388 | static int n2rng_data_read(struct hwrng *rng, u32 *data) | 388 | static int n2rng_data_read(struct hwrng *rng, u32 *data) |
| 389 | { | 389 | { |
| 390 | struct n2rng *np = rng->priv; | 390 | struct n2rng *np = (struct n2rng *) rng->priv; |
| 391 | unsigned long ra = __pa(&np->test_data); | 391 | unsigned long ra = __pa(&np->test_data); |
| 392 | int len; | 392 | int len; |
| 393 | 393 | ||
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 949067a0bd47..613c852ee0fe 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
| @@ -355,7 +355,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line) | |||
| 355 | if (*stp == '\0') | 355 | if (*stp == '\0') |
| 356 | stp = NULL; | 356 | stp = NULL; |
| 357 | 357 | ||
| 358 | if (tty_line >= 0 && tty_line <= p->num && p->ops && | 358 | if (tty_line >= 0 && tty_line < p->num && p->ops && |
| 359 | p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) { | 359 | p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) { |
| 360 | res = tty_driver_kref_get(p); | 360 | res = tty_driver_kref_get(p); |
| 361 | *line = tty_line; | 361 | *line = tty_line; |
diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 50590c7f2c01..281aada7b4a1 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c | |||
| @@ -906,22 +906,16 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, | |||
| 906 | * bottom of buffer | 906 | * bottom of buffer |
| 907 | */ | 907 | */ |
| 908 | old_origin += (old_rows - new_rows) * old_row_size; | 908 | old_origin += (old_rows - new_rows) * old_row_size; |
| 909 | end = vc->vc_scr_end; | ||
| 910 | } else { | 909 | } else { |
| 911 | /* | 910 | /* |
| 912 | * Cursor is in no man's land, copy 1/2 screenful | 911 | * Cursor is in no man's land, copy 1/2 screenful |
| 913 | * from the top and bottom of cursor position | 912 | * from the top and bottom of cursor position |
| 914 | */ | 913 | */ |
| 915 | old_origin += (vc->vc_y - new_rows/2) * old_row_size; | 914 | old_origin += (vc->vc_y - new_rows/2) * old_row_size; |
| 916 | end = old_origin + (old_row_size * new_rows); | ||
| 917 | } | 915 | } |
| 918 | } else | 916 | } |
| 919 | /* | 917 | |
| 920 | * Cursor near the top, copy contents from the top of buffer | 918 | end = old_origin + old_row_size * min(old_rows, new_rows); |
| 921 | */ | ||
| 922 | end = (old_rows > new_rows) ? old_origin + | ||
| 923 | (old_row_size * new_rows) : | ||
| 924 | vc->vc_scr_end; | ||
| 925 | 919 | ||
| 926 | update_attr(vc); | 920 | update_attr(vc); |
| 927 | 921 | ||
| @@ -3075,8 +3069,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last, | |||
| 3075 | 3069 | ||
| 3076 | old_was_color = vc->vc_can_do_color; | 3070 | old_was_color = vc->vc_can_do_color; |
| 3077 | vc->vc_sw->con_deinit(vc); | 3071 | vc->vc_sw->con_deinit(vc); |
| 3078 | if (!vc->vc_origin) | 3072 | vc->vc_origin = (unsigned long)vc->vc_screenbuf; |
| 3079 | vc->vc_origin = (unsigned long)vc->vc_screenbuf; | ||
| 3080 | visual_init(vc, i, 0); | 3073 | visual_init(vc, i, 0); |
| 3081 | set_origin(vc); | 3074 | set_origin(vc); |
| 3082 | update_attr(vc); | 3075 | update_attr(vc); |
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index 2bbeaaea46e9..38df8c19e74c 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c | |||
| @@ -533,11 +533,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 533 | case KIOCSOUND: | 533 | case KIOCSOUND: |
| 534 | if (!perm) | 534 | if (!perm) |
| 535 | goto eperm; | 535 | goto eperm; |
| 536 | /* FIXME: This is an old broken API but we need to keep it | 536 | /* |
| 537 | supported and somehow separate the historic advertised | 537 | * The use of PIT_TICK_RATE is historic, it used to be |
| 538 | tick rate from any real one */ | 538 | * the platform-dependent CLOCK_TICK_RATE between 2.6.12 |
| 539 | * and 2.6.36, which was a minor but unfortunate ABI | ||
| 540 | * change. | ||
| 541 | */ | ||
| 539 | if (arg) | 542 | if (arg) |
| 540 | arg = CLOCK_TICK_RATE / arg; | 543 | arg = PIT_TICK_RATE / arg; |
| 541 | kd_mksound(arg, 0); | 544 | kd_mksound(arg, 0); |
| 542 | break; | 545 | break; |
| 543 | 546 | ||
| @@ -553,11 +556,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 553 | */ | 556 | */ |
| 554 | ticks = HZ * ((arg >> 16) & 0xffff) / 1000; | 557 | ticks = HZ * ((arg >> 16) & 0xffff) / 1000; |
| 555 | count = ticks ? (arg & 0xffff) : 0; | 558 | count = ticks ? (arg & 0xffff) : 0; |
| 556 | /* FIXME: This is an old broken API but we need to keep it | ||
| 557 | supported and somehow separate the historic advertised | ||
| 558 | tick rate from any real one */ | ||
| 559 | if (count) | 559 | if (count) |
| 560 | count = CLOCK_TICK_RATE / count; | 560 | count = PIT_TICK_RATE / count; |
| 561 | kd_mksound(count, ticks); | 561 | kd_mksound(count, ticks); |
| 562 | break; | 562 | break; |
| 563 | } | 563 | } |
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c index b42f42ca70c3..823559ab0e24 100644 --- a/drivers/gpio/sx150x.c +++ b/drivers/gpio/sx150x.c | |||
| @@ -459,17 +459,33 @@ static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg) | |||
| 459 | return err; | 459 | return err; |
| 460 | } | 460 | } |
| 461 | 461 | ||
| 462 | static int sx150x_init_hw(struct sx150x_chip *chip, | 462 | static int sx150x_reset(struct sx150x_chip *chip) |
| 463 | struct sx150x_platform_data *pdata) | ||
| 464 | { | 463 | { |
| 465 | int err = 0; | 464 | int err; |
| 466 | 465 | ||
| 467 | err = i2c_smbus_write_word_data(chip->client, | 466 | err = i2c_smbus_write_byte_data(chip->client, |
| 468 | chip->dev_cfg->reg_reset, | 467 | chip->dev_cfg->reg_reset, |
| 469 | 0x3412); | 468 | 0x12); |
| 470 | if (err < 0) | 469 | if (err < 0) |
| 471 | return err; | 470 | return err; |
| 472 | 471 | ||
| 472 | err = i2c_smbus_write_byte_data(chip->client, | ||
| 473 | chip->dev_cfg->reg_reset, | ||
| 474 | 0x34); | ||
| 475 | return err; | ||
| 476 | } | ||
| 477 | |||
| 478 | static int sx150x_init_hw(struct sx150x_chip *chip, | ||
| 479 | struct sx150x_platform_data *pdata) | ||
| 480 | { | ||
| 481 | int err = 0; | ||
| 482 | |||
| 483 | if (pdata->reset_during_probe) { | ||
| 484 | err = sx150x_reset(chip); | ||
| 485 | if (err < 0) | ||
| 486 | return err; | ||
| 487 | } | ||
| 488 | |||
| 473 | err = sx150x_i2c_write(chip->client, | 489 | err = sx150x_i2c_write(chip->client, |
| 474 | chip->dev_cfg->reg_misc, | 490 | chip->dev_cfg->reg_misc, |
| 475 | 0x01); | 491 | 0x01); |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 7e31d4348340..d2ab01e90a96 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
| @@ -34,6 +34,9 @@ | |||
| 34 | #include "drm_crtc_helper.h" | 34 | #include "drm_crtc_helper.h" |
| 35 | #include "drm_fb_helper.h" | 35 | #include "drm_fb_helper.h" |
| 36 | 36 | ||
| 37 | static bool drm_kms_helper_poll = true; | ||
| 38 | module_param_named(poll, drm_kms_helper_poll, bool, 0600); | ||
| 39 | |||
| 37 | static void drm_mode_validate_flag(struct drm_connector *connector, | 40 | static void drm_mode_validate_flag(struct drm_connector *connector, |
| 38 | int flags) | 41 | int flags) |
| 39 | { | 42 | { |
| @@ -99,8 +102,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
| 99 | connector->status = connector_status_disconnected; | 102 | connector->status = connector_status_disconnected; |
| 100 | if (connector->funcs->force) | 103 | if (connector->funcs->force) |
| 101 | connector->funcs->force(connector); | 104 | connector->funcs->force(connector); |
| 102 | } else | 105 | } else { |
| 103 | connector->status = connector->funcs->detect(connector); | 106 | connector->status = connector->funcs->detect(connector); |
| 107 | drm_helper_hpd_irq_event(dev); | ||
| 108 | } | ||
| 104 | 109 | ||
| 105 | if (connector->status == connector_status_disconnected) { | 110 | if (connector->status == connector_status_disconnected) { |
| 106 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", | 111 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", |
| @@ -110,11 +115,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
| 110 | } | 115 | } |
| 111 | 116 | ||
| 112 | count = (*connector_funcs->get_modes)(connector); | 117 | count = (*connector_funcs->get_modes)(connector); |
| 113 | if (!count) { | 118 | if (count == 0 && connector->status == connector_status_connected) |
| 114 | count = drm_add_modes_noedid(connector, 1024, 768); | 119 | count = drm_add_modes_noedid(connector, 1024, 768); |
| 115 | if (!count) | 120 | if (count == 0) |
| 116 | return 0; | 121 | goto prune; |
| 117 | } | ||
| 118 | 122 | ||
| 119 | drm_mode_connector_list_update(connector); | 123 | drm_mode_connector_list_update(connector); |
| 120 | 124 | ||
| @@ -840,6 +844,9 @@ static void output_poll_execute(struct work_struct *work) | |||
| 840 | enum drm_connector_status old_status, status; | 844 | enum drm_connector_status old_status, status; |
| 841 | bool repoll = false, changed = false; | 845 | bool repoll = false, changed = false; |
| 842 | 846 | ||
| 847 | if (!drm_kms_helper_poll) | ||
| 848 | return; | ||
| 849 | |||
| 843 | mutex_lock(&dev->mode_config.mutex); | 850 | mutex_lock(&dev->mode_config.mutex); |
| 844 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 851 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 845 | 852 | ||
| @@ -890,6 +897,9 @@ void drm_kms_helper_poll_enable(struct drm_device *dev) | |||
| 890 | bool poll = false; | 897 | bool poll = false; |
| 891 | struct drm_connector *connector; | 898 | struct drm_connector *connector; |
| 892 | 899 | ||
| 900 | if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll) | ||
| 901 | return; | ||
| 902 | |||
| 893 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 903 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 894 | if (connector->polled) | 904 | if (connector->polled) |
| 895 | poll = true; | 905 | poll = true; |
| @@ -919,8 +929,10 @@ void drm_helper_hpd_irq_event(struct drm_device *dev) | |||
| 919 | { | 929 | { |
| 920 | if (!dev->mode_config.poll_enabled) | 930 | if (!dev->mode_config.poll_enabled) |
| 921 | return; | 931 | return; |
| 932 | |||
| 922 | /* kill timer and schedule immediate execution, this doesn't block */ | 933 | /* kill timer and schedule immediate execution, this doesn't block */ |
| 923 | cancel_delayed_work(&dev->mode_config.output_poll_work); | 934 | cancel_delayed_work(&dev->mode_config.output_poll_work); |
| 924 | queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); | 935 | if (drm_kms_helper_poll) |
| 936 | queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); | ||
| 925 | } | 937 | } |
| 926 | EXPORT_SYMBOL(drm_helper_hpd_irq_event); | 938 | EXPORT_SYMBOL(drm_helper_hpd_irq_event); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 6b208ffafa8d..87ac21ec23d2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
| @@ -64,16 +64,17 @@ nouveau_fence_update(struct nouveau_channel *chan) | |||
| 64 | struct nouveau_fence *fence; | 64 | struct nouveau_fence *fence; |
| 65 | uint32_t sequence; | 65 | uint32_t sequence; |
| 66 | 66 | ||
| 67 | spin_lock(&chan->fence.lock); | ||
| 68 | |||
| 67 | if (USE_REFCNT) | 69 | if (USE_REFCNT) |
| 68 | sequence = nvchan_rd32(chan, 0x48); | 70 | sequence = nvchan_rd32(chan, 0x48); |
| 69 | else | 71 | else |
| 70 | sequence = atomic_read(&chan->fence.last_sequence_irq); | 72 | sequence = atomic_read(&chan->fence.last_sequence_irq); |
| 71 | 73 | ||
| 72 | if (chan->fence.sequence_ack == sequence) | 74 | if (chan->fence.sequence_ack == sequence) |
| 73 | return; | 75 | goto out; |
| 74 | chan->fence.sequence_ack = sequence; | 76 | chan->fence.sequence_ack = sequence; |
| 75 | 77 | ||
| 76 | spin_lock(&chan->fence.lock); | ||
| 77 | list_for_each_safe(entry, tmp, &chan->fence.pending) { | 78 | list_for_each_safe(entry, tmp, &chan->fence.pending) { |
| 78 | fence = list_entry(entry, struct nouveau_fence, entry); | 79 | fence = list_entry(entry, struct nouveau_fence, entry); |
| 79 | 80 | ||
| @@ -85,6 +86,7 @@ nouveau_fence_update(struct nouveau_channel *chan) | |||
| 85 | if (sequence == chan->fence.sequence_ack) | 86 | if (sequence == chan->fence.sequence_ack) |
| 86 | break; | 87 | break; |
| 87 | } | 88 | } |
| 89 | out: | ||
| 88 | spin_unlock(&chan->fence.lock); | 90 | spin_unlock(&chan->fence.lock); |
| 89 | } | 91 | } |
| 90 | 92 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 581c67cd7b24..ead7b8fc53fc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
| @@ -245,7 +245,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence) | |||
| 245 | list_del(&nvbo->entry); | 245 | list_del(&nvbo->entry); |
| 246 | nvbo->reserved_by = NULL; | 246 | nvbo->reserved_by = NULL; |
| 247 | ttm_bo_unreserve(&nvbo->bo); | 247 | ttm_bo_unreserve(&nvbo->bo); |
| 248 | drm_gem_object_unreference(nvbo->gem); | 248 | drm_gem_object_unreference_unlocked(nvbo->gem); |
| 249 | } | 249 | } |
| 250 | } | 250 | } |
| 251 | 251 | ||
| @@ -300,7 +300,7 @@ retry: | |||
| 300 | validate_fini(op, NULL); | 300 | validate_fini(op, NULL); |
| 301 | if (ret == -EAGAIN) | 301 | if (ret == -EAGAIN) |
| 302 | ret = ttm_bo_wait_unreserved(&nvbo->bo, false); | 302 | ret = ttm_bo_wait_unreserved(&nvbo->bo, false); |
| 303 | drm_gem_object_unreference(gem); | 303 | drm_gem_object_unreference_unlocked(gem); |
| 304 | if (ret) { | 304 | if (ret) { |
| 305 | NV_ERROR(dev, "fail reserve\n"); | 305 | NV_ERROR(dev, "fail reserve\n"); |
| 306 | return ret; | 306 | return ret; |
| @@ -616,8 +616,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
| 616 | return PTR_ERR(bo); | 616 | return PTR_ERR(bo); |
| 617 | } | 617 | } |
| 618 | 618 | ||
| 619 | mutex_lock(&dev->struct_mutex); | ||
| 620 | |||
| 621 | /* Mark push buffers as being used on PFIFO, the validation code | 619 | /* Mark push buffers as being used on PFIFO, the validation code |
| 622 | * will then make sure that if the pushbuf bo moves, that they | 620 | * will then make sure that if the pushbuf bo moves, that they |
| 623 | * happen on the kernel channel, which will in turn cause a sync | 621 | * happen on the kernel channel, which will in turn cause a sync |
| @@ -731,7 +729,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
| 731 | out: | 729 | out: |
| 732 | validate_fini(&op, fence); | 730 | validate_fini(&op, fence); |
| 733 | nouveau_fence_unref((void**)&fence); | 731 | nouveau_fence_unref((void**)&fence); |
| 734 | mutex_unlock(&dev->struct_mutex); | ||
| 735 | kfree(bo); | 732 | kfree(bo); |
| 736 | kfree(push); | 733 | kfree(push); |
| 737 | 734 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index c95bf9b681dd..91ef93cf1f35 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
| @@ -139,6 +139,8 @@ nv50_instmem_init(struct drm_device *dev) | |||
| 139 | chan->file_priv = (struct drm_file *)-2; | 139 | chan->file_priv = (struct drm_file *)-2; |
| 140 | dev_priv->fifos[0] = dev_priv->fifos[127] = chan; | 140 | dev_priv->fifos[0] = dev_priv->fifos[127] = chan; |
| 141 | 141 | ||
| 142 | INIT_LIST_HEAD(&chan->ramht_refs); | ||
| 143 | |||
| 142 | /* Channel's PRAMIN object + heap */ | 144 | /* Channel's PRAMIN object + heap */ |
| 143 | ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, | 145 | ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, |
| 144 | NULL, &chan->ramin); | 146 | NULL, &chan->ramin); |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 577239a24fd5..464a81a1990f 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -332,6 +332,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc, | |||
| 332 | args.usV_SyncWidth = | 332 | args.usV_SyncWidth = |
| 333 | cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); | 333 | cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); |
| 334 | 334 | ||
| 335 | args.ucOverscanRight = radeon_crtc->h_border; | ||
| 336 | args.ucOverscanLeft = radeon_crtc->h_border; | ||
| 337 | args.ucOverscanBottom = radeon_crtc->v_border; | ||
| 338 | args.ucOverscanTop = radeon_crtc->v_border; | ||
| 339 | |||
| 335 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) | 340 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) |
| 336 | misc |= ATOM_VSYNC_POLARITY; | 341 | misc |= ATOM_VSYNC_POLARITY; |
| 337 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) | 342 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) |
| @@ -534,6 +539,20 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
| 534 | pll->algo = PLL_ALGO_LEGACY; | 539 | pll->algo = PLL_ALGO_LEGACY; |
| 535 | pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; | 540 | pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; |
| 536 | } | 541 | } |
| 542 | /* There is some evidence (often anecdotal) that RV515 LVDS | ||
| 543 | * (on some boards at least) prefers the legacy algo. I'm not | ||
| 544 | * sure whether this should handled generically or on a | ||
| 545 | * case-by-case quirk basis. Both algos should work fine in the | ||
| 546 | * majority of cases. | ||
| 547 | */ | ||
| 548 | if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) && | ||
| 549 | (rdev->family == CHIP_RV515)) { | ||
| 550 | /* allow the user to overrride just in case */ | ||
| 551 | if (radeon_new_pll == 1) | ||
| 552 | pll->algo = PLL_ALGO_NEW; | ||
| 553 | else | ||
| 554 | pll->algo = PLL_ALGO_LEGACY; | ||
| 555 | } | ||
| 537 | } else { | 556 | } else { |
| 538 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) | 557 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) |
| 539 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; | 558 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; |
| @@ -1056,11 +1075,11 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 1056 | 1075 | ||
| 1057 | if (rdev->family >= CHIP_RV770) { | 1076 | if (rdev->family >= CHIP_RV770) { |
| 1058 | if (radeon_crtc->crtc_id) { | 1077 | if (radeon_crtc->crtc_id) { |
| 1059 | WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); | 1078 | WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); |
| 1060 | WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); | 1079 | WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); |
| 1061 | } else { | 1080 | } else { |
| 1062 | WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); | 1081 | WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); |
| 1063 | WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); | 1082 | WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); |
| 1064 | } | 1083 | } |
| 1065 | } | 1084 | } |
| 1066 | WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | 1085 | WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, |
| @@ -1197,8 +1216,18 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, | |||
| 1197 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 1216 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
| 1198 | struct drm_device *dev = crtc->dev; | 1217 | struct drm_device *dev = crtc->dev; |
| 1199 | struct radeon_device *rdev = dev->dev_private; | 1218 | struct radeon_device *rdev = dev->dev_private; |
| 1219 | struct drm_encoder *encoder; | ||
| 1220 | bool is_tvcv = false; | ||
| 1200 | 1221 | ||
| 1201 | /* TODO color tiling */ | 1222 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
| 1223 | /* find tv std */ | ||
| 1224 | if (encoder->crtc == crtc) { | ||
| 1225 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
| 1226 | if (radeon_encoder->active_device & | ||
| 1227 | (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) | ||
| 1228 | is_tvcv = true; | ||
| 1229 | } | ||
| 1230 | } | ||
| 1202 | 1231 | ||
| 1203 | atombios_disable_ss(crtc); | 1232 | atombios_disable_ss(crtc); |
| 1204 | /* always set DCPLL */ | 1233 | /* always set DCPLL */ |
| @@ -1207,9 +1236,14 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, | |||
| 1207 | atombios_crtc_set_pll(crtc, adjusted_mode); | 1236 | atombios_crtc_set_pll(crtc, adjusted_mode); |
| 1208 | atombios_enable_ss(crtc); | 1237 | atombios_enable_ss(crtc); |
| 1209 | 1238 | ||
| 1210 | if (ASIC_IS_AVIVO(rdev)) | 1239 | if (ASIC_IS_DCE4(rdev)) |
| 1211 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); | 1240 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); |
| 1212 | else { | 1241 | else if (ASIC_IS_AVIVO(rdev)) { |
| 1242 | if (is_tvcv) | ||
| 1243 | atombios_crtc_set_timing(crtc, adjusted_mode); | ||
| 1244 | else | ||
| 1245 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); | ||
| 1246 | } else { | ||
| 1213 | atombios_crtc_set_timing(crtc, adjusted_mode); | 1247 | atombios_crtc_set_timing(crtc, adjusted_mode); |
| 1214 | if (radeon_crtc->crtc_id == 0) | 1248 | if (radeon_crtc->crtc_id == 0) |
| 1215 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); | 1249 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 957d5067ad9c..b8b7f010b25f 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -675,6 +675,43 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev) | |||
| 675 | return 0; | 675 | return 0; |
| 676 | } | 676 | } |
| 677 | 677 | ||
| 678 | static int evergreen_cp_start(struct radeon_device *rdev) | ||
| 679 | { | ||
| 680 | int r; | ||
| 681 | uint32_t cp_me; | ||
| 682 | |||
| 683 | r = radeon_ring_lock(rdev, 7); | ||
| 684 | if (r) { | ||
| 685 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | ||
| 686 | return r; | ||
| 687 | } | ||
| 688 | radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); | ||
| 689 | radeon_ring_write(rdev, 0x1); | ||
| 690 | radeon_ring_write(rdev, 0x0); | ||
| 691 | radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1); | ||
| 692 | radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); | ||
| 693 | radeon_ring_write(rdev, 0); | ||
| 694 | radeon_ring_write(rdev, 0); | ||
| 695 | radeon_ring_unlock_commit(rdev); | ||
| 696 | |||
| 697 | cp_me = 0xff; | ||
| 698 | WREG32(CP_ME_CNTL, cp_me); | ||
| 699 | |||
| 700 | r = radeon_ring_lock(rdev, 4); | ||
| 701 | if (r) { | ||
| 702 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | ||
| 703 | return r; | ||
| 704 | } | ||
| 705 | /* init some VGT regs */ | ||
| 706 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | ||
| 707 | radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2); | ||
| 708 | radeon_ring_write(rdev, 0xe); | ||
| 709 | radeon_ring_write(rdev, 0x10); | ||
| 710 | radeon_ring_unlock_commit(rdev); | ||
| 711 | |||
| 712 | return 0; | ||
| 713 | } | ||
| 714 | |||
| 678 | int evergreen_cp_resume(struct radeon_device *rdev) | 715 | int evergreen_cp_resume(struct radeon_device *rdev) |
| 679 | { | 716 | { |
| 680 | u32 tmp; | 717 | u32 tmp; |
| @@ -719,7 +756,7 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
| 719 | rdev->cp.rptr = RREG32(CP_RB_RPTR); | 756 | rdev->cp.rptr = RREG32(CP_RB_RPTR); |
| 720 | rdev->cp.wptr = RREG32(CP_RB_WPTR); | 757 | rdev->cp.wptr = RREG32(CP_RB_WPTR); |
| 721 | 758 | ||
| 722 | r600_cp_start(rdev); | 759 | evergreen_cp_start(rdev); |
| 723 | rdev->cp.ready = true; | 760 | rdev->cp.ready = true; |
| 724 | r = radeon_ring_test(rdev); | 761 | r = radeon_ring_test(rdev); |
| 725 | if (r) { | 762 | if (r) { |
| @@ -2054,11 +2091,6 @@ int evergreen_resume(struct radeon_device *rdev) | |||
| 2054 | */ | 2091 | */ |
| 2055 | /* post card */ | 2092 | /* post card */ |
| 2056 | atom_asic_init(rdev->mode_info.atom_context); | 2093 | atom_asic_init(rdev->mode_info.atom_context); |
| 2057 | /* Initialize clocks */ | ||
| 2058 | r = radeon_clocks_init(rdev); | ||
| 2059 | if (r) { | ||
| 2060 | return r; | ||
| 2061 | } | ||
| 2062 | 2094 | ||
| 2063 | r = evergreen_startup(rdev); | 2095 | r = evergreen_startup(rdev); |
| 2064 | if (r) { | 2096 | if (r) { |
| @@ -2164,9 +2196,6 @@ int evergreen_init(struct radeon_device *rdev) | |||
| 2164 | radeon_surface_init(rdev); | 2196 | radeon_surface_init(rdev); |
| 2165 | /* Initialize clocks */ | 2197 | /* Initialize clocks */ |
| 2166 | radeon_get_clock_info(rdev->ddev); | 2198 | radeon_get_clock_info(rdev->ddev); |
| 2167 | r = radeon_clocks_init(rdev); | ||
| 2168 | if (r) | ||
| 2169 | return r; | ||
| 2170 | /* Fence driver */ | 2199 | /* Fence driver */ |
| 2171 | r = radeon_fence_driver_init(rdev); | 2200 | r = radeon_fence_driver_init(rdev); |
| 2172 | if (r) | 2201 | if (r) |
| @@ -2236,7 +2265,6 @@ void evergreen_fini(struct radeon_device *rdev) | |||
| 2236 | evergreen_pcie_gart_fini(rdev); | 2265 | evergreen_pcie_gart_fini(rdev); |
| 2237 | radeon_gem_fini(rdev); | 2266 | radeon_gem_fini(rdev); |
| 2238 | radeon_fence_driver_fini(rdev); | 2267 | radeon_fence_driver_fini(rdev); |
| 2239 | radeon_clocks_fini(rdev); | ||
| 2240 | radeon_agp_fini(rdev); | 2268 | radeon_agp_fini(rdev); |
| 2241 | radeon_bo_fini(rdev); | 2269 | radeon_bo_fini(rdev); |
| 2242 | radeon_atombios_fini(rdev); | 2270 | radeon_atombios_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index d0ebae9dde25..afc18d87fdca 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -2119,10 +2119,7 @@ int r600_cp_start(struct radeon_device *rdev) | |||
| 2119 | } | 2119 | } |
| 2120 | radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); | 2120 | radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); |
| 2121 | radeon_ring_write(rdev, 0x1); | 2121 | radeon_ring_write(rdev, 0x1); |
| 2122 | if (rdev->family >= CHIP_CEDAR) { | 2122 | if (rdev->family >= CHIP_RV770) { |
| 2123 | radeon_ring_write(rdev, 0x0); | ||
| 2124 | radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1); | ||
| 2125 | } else if (rdev->family >= CHIP_RV770) { | ||
| 2126 | radeon_ring_write(rdev, 0x0); | 2123 | radeon_ring_write(rdev, 0x0); |
| 2127 | radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); | 2124 | radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); |
| 2128 | } else { | 2125 | } else { |
| @@ -2489,11 +2486,6 @@ int r600_resume(struct radeon_device *rdev) | |||
| 2489 | */ | 2486 | */ |
| 2490 | /* post card */ | 2487 | /* post card */ |
| 2491 | atom_asic_init(rdev->mode_info.atom_context); | 2488 | atom_asic_init(rdev->mode_info.atom_context); |
| 2492 | /* Initialize clocks */ | ||
| 2493 | r = radeon_clocks_init(rdev); | ||
| 2494 | if (r) { | ||
| 2495 | return r; | ||
| 2496 | } | ||
| 2497 | 2489 | ||
| 2498 | r = r600_startup(rdev); | 2490 | r = r600_startup(rdev); |
| 2499 | if (r) { | 2491 | if (r) { |
| @@ -2586,9 +2578,6 @@ int r600_init(struct radeon_device *rdev) | |||
| 2586 | radeon_surface_init(rdev); | 2578 | radeon_surface_init(rdev); |
| 2587 | /* Initialize clocks */ | 2579 | /* Initialize clocks */ |
| 2588 | radeon_get_clock_info(rdev->ddev); | 2580 | radeon_get_clock_info(rdev->ddev); |
| 2589 | r = radeon_clocks_init(rdev); | ||
| 2590 | if (r) | ||
| 2591 | return r; | ||
| 2592 | /* Fence driver */ | 2581 | /* Fence driver */ |
| 2593 | r = radeon_fence_driver_init(rdev); | 2582 | r = radeon_fence_driver_init(rdev); |
| 2594 | if (r) | 2583 | if (r) |
| @@ -2663,7 +2652,6 @@ void r600_fini(struct radeon_device *rdev) | |||
| 2663 | radeon_agp_fini(rdev); | 2652 | radeon_agp_fini(rdev); |
| 2664 | radeon_gem_fini(rdev); | 2653 | radeon_gem_fini(rdev); |
| 2665 | radeon_fence_driver_fini(rdev); | 2654 | radeon_fence_driver_fini(rdev); |
| 2666 | radeon_clocks_fini(rdev); | ||
| 2667 | radeon_bo_fini(rdev); | 2655 | radeon_bo_fini(rdev); |
| 2668 | radeon_atombios_fini(rdev); | 2656 | radeon_atombios_fini(rdev); |
| 2669 | kfree(rdev->bios); | 2657 | kfree(rdev->bios); |
| @@ -3541,7 +3529,7 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) | |||
| 3541 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL | 3529 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL |
| 3542 | */ | 3530 | */ |
| 3543 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { | 3531 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { |
| 3544 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; | 3532 | void __iomem *ptr = (void *)rdev->vram_scratch.ptr; |
| 3545 | u32 tmp; | 3533 | u32 tmp; |
| 3546 | 3534 | ||
| 3547 | WREG32(HDP_DEBUG1, 0); | 3535 | WREG32(HDP_DEBUG1, 0); |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3dfcfa3ca425..a168d644bf9e 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -1013,6 +1013,11 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | |||
| 1013 | int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | 1013 | int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, |
| 1014 | struct drm_file *filp); | 1014 | struct drm_file *filp); |
| 1015 | 1015 | ||
| 1016 | /* VRAM scratch page for HDP bug */ | ||
| 1017 | struct r700_vram_scratch { | ||
| 1018 | struct radeon_bo *robj; | ||
| 1019 | volatile uint32_t *ptr; | ||
| 1020 | }; | ||
| 1016 | 1021 | ||
| 1017 | /* | 1022 | /* |
| 1018 | * Core structure, functions and helpers. | 1023 | * Core structure, functions and helpers. |
| @@ -1079,6 +1084,7 @@ struct radeon_device { | |||
| 1079 | const struct firmware *pfp_fw; /* r6/700 PFP firmware */ | 1084 | const struct firmware *pfp_fw; /* r6/700 PFP firmware */ |
| 1080 | const struct firmware *rlc_fw; /* r6/700 RLC firmware */ | 1085 | const struct firmware *rlc_fw; /* r6/700 RLC firmware */ |
| 1081 | struct r600_blit r600_blit; | 1086 | struct r600_blit r600_blit; |
| 1087 | struct r700_vram_scratch vram_scratch; | ||
| 1082 | int msi_enabled; /* msi enabled */ | 1088 | int msi_enabled; /* msi enabled */ |
| 1083 | struct r600_ih ih; /* r6/700 interrupt ring */ | 1089 | struct r600_ih ih; /* r6/700 interrupt ring */ |
| 1084 | struct workqueue_struct *wq; | 1090 | struct workqueue_struct *wq; |
| @@ -1333,8 +1339,6 @@ extern bool radeon_card_posted(struct radeon_device *rdev); | |||
| 1333 | extern void radeon_update_bandwidth_info(struct radeon_device *rdev); | 1339 | extern void radeon_update_bandwidth_info(struct radeon_device *rdev); |
| 1334 | extern void radeon_update_display_priority(struct radeon_device *rdev); | 1340 | extern void radeon_update_display_priority(struct radeon_device *rdev); |
| 1335 | extern bool radeon_boot_test_post_card(struct radeon_device *rdev); | 1341 | extern bool radeon_boot_test_post_card(struct radeon_device *rdev); |
| 1336 | extern int radeon_clocks_init(struct radeon_device *rdev); | ||
| 1337 | extern void radeon_clocks_fini(struct radeon_device *rdev); | ||
| 1338 | extern void radeon_scratch_init(struct radeon_device *rdev); | 1342 | extern void radeon_scratch_init(struct radeon_device *rdev); |
| 1339 | extern void radeon_surface_init(struct radeon_device *rdev); | 1343 | extern void radeon_surface_init(struct radeon_device *rdev); |
| 1340 | extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); | 1344 | extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index a21bf88e8c2d..25e1dd197791 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
| @@ -858,21 +858,3 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
| 858 | return 0; | 858 | return 0; |
| 859 | } | 859 | } |
| 860 | 860 | ||
| 861 | /* | ||
| 862 | * Wrapper around modesetting bits. Move to radeon_clocks.c? | ||
| 863 | */ | ||
| 864 | int radeon_clocks_init(struct radeon_device *rdev) | ||
| 865 | { | ||
| 866 | int r; | ||
| 867 | |||
| 868 | r = radeon_static_clocks_init(rdev->ddev); | ||
| 869 | if (r) { | ||
| 870 | return r; | ||
| 871 | } | ||
| 872 | DRM_INFO("Clocks initialized !\n"); | ||
| 873 | return 0; | ||
| 874 | } | ||
| 875 | |||
| 876 | void radeon_clocks_fini(struct radeon_device *rdev) | ||
| 877 | { | ||
| 878 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 61141981880d..ebae14c4b768 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
| @@ -85,6 +85,19 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev | |||
| 85 | for (i = 0; i < num_indices; i++) { | 85 | for (i = 0; i < num_indices; i++) { |
| 86 | gpio = &i2c_info->asGPIO_Info[i]; | 86 | gpio = &i2c_info->asGPIO_Info[i]; |
| 87 | 87 | ||
| 88 | /* some evergreen boards have bad data for this entry */ | ||
| 89 | if (ASIC_IS_DCE4(rdev)) { | ||
| 90 | if ((i == 7) && | ||
| 91 | (gpio->usClkMaskRegisterIndex == 0x1936) && | ||
| 92 | (gpio->sucI2cId.ucAccess == 0)) { | ||
| 93 | gpio->sucI2cId.ucAccess = 0x97; | ||
| 94 | gpio->ucDataMaskShift = 8; | ||
| 95 | gpio->ucDataEnShift = 8; | ||
| 96 | gpio->ucDataY_Shift = 8; | ||
| 97 | gpio->ucDataA_Shift = 8; | ||
| 98 | } | ||
| 99 | } | ||
| 100 | |||
| 88 | if (gpio->sucI2cId.ucAccess == id) { | 101 | if (gpio->sucI2cId.ucAccess == id) { |
| 89 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; | 102 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; |
| 90 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; | 103 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; |
| @@ -147,6 +160,20 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) | |||
| 147 | for (i = 0; i < num_indices; i++) { | 160 | for (i = 0; i < num_indices; i++) { |
| 148 | gpio = &i2c_info->asGPIO_Info[i]; | 161 | gpio = &i2c_info->asGPIO_Info[i]; |
| 149 | i2c.valid = false; | 162 | i2c.valid = false; |
| 163 | |||
| 164 | /* some evergreen boards have bad data for this entry */ | ||
| 165 | if (ASIC_IS_DCE4(rdev)) { | ||
| 166 | if ((i == 7) && | ||
| 167 | (gpio->usClkMaskRegisterIndex == 0x1936) && | ||
| 168 | (gpio->sucI2cId.ucAccess == 0)) { | ||
| 169 | gpio->sucI2cId.ucAccess = 0x97; | ||
| 170 | gpio->ucDataMaskShift = 8; | ||
| 171 | gpio->ucDataEnShift = 8; | ||
| 172 | gpio->ucDataY_Shift = 8; | ||
| 173 | gpio->ucDataA_Shift = 8; | ||
| 174 | } | ||
| 175 | } | ||
| 176 | |||
| 150 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; | 177 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; |
| 151 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; | 178 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; |
| 152 | i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; | 179 | i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; |
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index 14448a740ba6..5249af8931e6 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c | |||
| @@ -327,6 +327,14 @@ void radeon_get_clock_info(struct drm_device *dev) | |||
| 327 | mpll->max_feedback_div = 0xff; | 327 | mpll->max_feedback_div = 0xff; |
| 328 | mpll->best_vco = 0; | 328 | mpll->best_vco = 0; |
| 329 | 329 | ||
| 330 | if (!rdev->clock.default_sclk) | ||
| 331 | rdev->clock.default_sclk = radeon_get_engine_clock(rdev); | ||
| 332 | if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock) | ||
| 333 | rdev->clock.default_mclk = radeon_get_memory_clock(rdev); | ||
| 334 | |||
| 335 | rdev->pm.current_sclk = rdev->clock.default_sclk; | ||
| 336 | rdev->pm.current_mclk = rdev->clock.default_mclk; | ||
| 337 | |||
| 330 | } | 338 | } |
| 331 | 339 | ||
| 332 | /* 10 khz */ | 340 | /* 10 khz */ |
| @@ -897,53 +905,3 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) | |||
| 897 | } | 905 | } |
| 898 | } | 906 | } |
| 899 | 907 | ||
| 900 | static void radeon_apply_clock_quirks(struct radeon_device *rdev) | ||
| 901 | { | ||
| 902 | uint32_t tmp; | ||
| 903 | |||
| 904 | /* XXX make sure engine is idle */ | ||
| 905 | |||
| 906 | if (rdev->family < CHIP_RS600) { | ||
| 907 | tmp = RREG32_PLL(RADEON_SCLK_CNTL); | ||
| 908 | if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev)) | ||
| 909 | tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP; | ||
| 910 | if ((rdev->family == CHIP_RV250) | ||
| 911 | || (rdev->family == CHIP_RV280)) | ||
| 912 | tmp |= | ||
| 913 | RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2; | ||
| 914 | if ((rdev->family == CHIP_RV350) | ||
| 915 | || (rdev->family == CHIP_RV380)) | ||
| 916 | tmp |= R300_SCLK_FORCE_VAP; | ||
| 917 | if (rdev->family == CHIP_R420) | ||
| 918 | tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX; | ||
| 919 | WREG32_PLL(RADEON_SCLK_CNTL, tmp); | ||
| 920 | } else if (rdev->family < CHIP_R600) { | ||
| 921 | tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL); | ||
| 922 | tmp |= AVIVO_CP_FORCEON; | ||
| 923 | WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp); | ||
| 924 | |||
| 925 | tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL); | ||
| 926 | tmp |= AVIVO_E2_FORCEON; | ||
| 927 | WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp); | ||
| 928 | |||
| 929 | tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL); | ||
| 930 | tmp |= AVIVO_IDCT_FORCEON; | ||
| 931 | WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp); | ||
| 932 | } | ||
| 933 | } | ||
| 934 | |||
| 935 | int radeon_static_clocks_init(struct drm_device *dev) | ||
| 936 | { | ||
| 937 | struct radeon_device *rdev = dev->dev_private; | ||
| 938 | |||
| 939 | /* XXX make sure engine is idle */ | ||
| 940 | |||
| 941 | if (radeon_dynclks != -1) { | ||
| 942 | if (radeon_dynclks) { | ||
| 943 | if (rdev->asic->set_clock_gating) | ||
| 944 | radeon_set_clock_gating(rdev, 1); | ||
| 945 | } | ||
| 946 | } | ||
| 947 | radeon_apply_clock_quirks(rdev); | ||
| 948 | return 0; | ||
| 949 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 1a5ee392e9c7..a9dd7847d96e 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
| @@ -1051,10 +1051,16 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1051 | uint32_t subpixel_order = SubPixelNone; | 1051 | uint32_t subpixel_order = SubPixelNone; |
| 1052 | bool shared_ddc = false; | 1052 | bool shared_ddc = false; |
| 1053 | 1053 | ||
| 1054 | /* fixme - tv/cv/din */ | ||
| 1055 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) | 1054 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) |
| 1056 | return; | 1055 | return; |
| 1057 | 1056 | ||
| 1057 | /* if the user selected tv=0 don't try and add the connector */ | ||
| 1058 | if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) || | ||
| 1059 | (connector_type == DRM_MODE_CONNECTOR_Composite) || | ||
| 1060 | (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) && | ||
| 1061 | (radeon_tv == 0)) | ||
| 1062 | return; | ||
| 1063 | |||
| 1058 | /* see if we already added it */ | 1064 | /* see if we already added it */ |
| 1059 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1065 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 1060 | radeon_connector = to_radeon_connector(connector); | 1066 | radeon_connector = to_radeon_connector(connector); |
| @@ -1209,19 +1215,17 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1209 | case DRM_MODE_CONNECTOR_SVIDEO: | 1215 | case DRM_MODE_CONNECTOR_SVIDEO: |
| 1210 | case DRM_MODE_CONNECTOR_Composite: | 1216 | case DRM_MODE_CONNECTOR_Composite: |
| 1211 | case DRM_MODE_CONNECTOR_9PinDIN: | 1217 | case DRM_MODE_CONNECTOR_9PinDIN: |
| 1212 | if (radeon_tv == 1) { | 1218 | drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); |
| 1213 | drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); | 1219 | drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); |
| 1214 | drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); | 1220 | radeon_connector->dac_load_detect = true; |
| 1215 | radeon_connector->dac_load_detect = true; | 1221 | drm_connector_attach_property(&radeon_connector->base, |
| 1216 | drm_connector_attach_property(&radeon_connector->base, | 1222 | rdev->mode_info.load_detect_property, |
| 1217 | rdev->mode_info.load_detect_property, | 1223 | 1); |
| 1218 | 1); | 1224 | drm_connector_attach_property(&radeon_connector->base, |
| 1219 | drm_connector_attach_property(&radeon_connector->base, | 1225 | rdev->mode_info.tv_std_property, |
| 1220 | rdev->mode_info.tv_std_property, | 1226 | radeon_atombios_get_tv_info(rdev)); |
| 1221 | radeon_atombios_get_tv_info(rdev)); | 1227 | /* no HPD on analog connectors */ |
| 1222 | /* no HPD on analog connectors */ | 1228 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
| 1223 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | ||
| 1224 | } | ||
| 1225 | break; | 1229 | break; |
| 1226 | case DRM_MODE_CONNECTOR_LVDS: | 1230 | case DRM_MODE_CONNECTOR_LVDS: |
| 1227 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | 1231 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); |
| @@ -1272,10 +1276,16 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
| 1272 | struct radeon_connector *radeon_connector; | 1276 | struct radeon_connector *radeon_connector; |
| 1273 | uint32_t subpixel_order = SubPixelNone; | 1277 | uint32_t subpixel_order = SubPixelNone; |
| 1274 | 1278 | ||
| 1275 | /* fixme - tv/cv/din */ | ||
| 1276 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) | 1279 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) |
| 1277 | return; | 1280 | return; |
| 1278 | 1281 | ||
| 1282 | /* if the user selected tv=0 don't try and add the connector */ | ||
| 1283 | if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) || | ||
| 1284 | (connector_type == DRM_MODE_CONNECTOR_Composite) || | ||
| 1285 | (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) && | ||
| 1286 | (radeon_tv == 0)) | ||
| 1287 | return; | ||
| 1288 | |||
| 1279 | /* see if we already added it */ | 1289 | /* see if we already added it */ |
| 1280 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1290 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 1281 | radeon_connector = to_radeon_connector(connector); | 1291 | radeon_connector = to_radeon_connector(connector); |
| @@ -1347,26 +1357,24 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
| 1347 | case DRM_MODE_CONNECTOR_SVIDEO: | 1357 | case DRM_MODE_CONNECTOR_SVIDEO: |
| 1348 | case DRM_MODE_CONNECTOR_Composite: | 1358 | case DRM_MODE_CONNECTOR_Composite: |
| 1349 | case DRM_MODE_CONNECTOR_9PinDIN: | 1359 | case DRM_MODE_CONNECTOR_9PinDIN: |
| 1350 | if (radeon_tv == 1) { | 1360 | drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); |
| 1351 | drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); | 1361 | drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); |
| 1352 | drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); | 1362 | radeon_connector->dac_load_detect = true; |
| 1353 | radeon_connector->dac_load_detect = true; | 1363 | /* RS400,RC410,RS480 chipset seems to report a lot |
| 1354 | /* RS400,RC410,RS480 chipset seems to report a lot | 1364 | * of false positive on load detect, we haven't yet |
| 1355 | * of false positive on load detect, we haven't yet | 1365 | * found a way to make load detect reliable on those |
| 1356 | * found a way to make load detect reliable on those | 1366 | * chipset, thus just disable it for TV. |
| 1357 | * chipset, thus just disable it for TV. | 1367 | */ |
| 1358 | */ | 1368 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) |
| 1359 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) | 1369 | radeon_connector->dac_load_detect = false; |
| 1360 | radeon_connector->dac_load_detect = false; | 1370 | drm_connector_attach_property(&radeon_connector->base, |
| 1361 | drm_connector_attach_property(&radeon_connector->base, | 1371 | rdev->mode_info.load_detect_property, |
| 1362 | rdev->mode_info.load_detect_property, | 1372 | radeon_connector->dac_load_detect); |
| 1363 | radeon_connector->dac_load_detect); | 1373 | drm_connector_attach_property(&radeon_connector->base, |
| 1364 | drm_connector_attach_property(&radeon_connector->base, | 1374 | rdev->mode_info.tv_std_property, |
| 1365 | rdev->mode_info.tv_std_property, | 1375 | radeon_combios_get_tv_info(rdev)); |
| 1366 | radeon_combios_get_tv_info(rdev)); | 1376 | /* no HPD on analog connectors */ |
| 1367 | /* no HPD on analog connectors */ | 1377 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
| 1368 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | ||
| 1369 | } | ||
| 1370 | break; | 1378 | break; |
| 1371 | case DRM_MODE_CONNECTOR_LVDS: | 1379 | case DRM_MODE_CONNECTOR_LVDS: |
| 1372 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); | 1380 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 69b3c2291e92..256d204a6d24 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -293,30 +293,20 @@ bool radeon_card_posted(struct radeon_device *rdev) | |||
| 293 | void radeon_update_bandwidth_info(struct radeon_device *rdev) | 293 | void radeon_update_bandwidth_info(struct radeon_device *rdev) |
| 294 | { | 294 | { |
| 295 | fixed20_12 a; | 295 | fixed20_12 a; |
| 296 | u32 sclk, mclk; | 296 | u32 sclk = rdev->pm.current_sclk; |
| 297 | u32 mclk = rdev->pm.current_mclk; | ||
| 297 | 298 | ||
| 298 | if (rdev->flags & RADEON_IS_IGP) { | 299 | /* sclk/mclk in Mhz */ |
| 299 | sclk = radeon_get_engine_clock(rdev); | 300 | a.full = dfixed_const(100); |
| 300 | mclk = rdev->clock.default_mclk; | 301 | rdev->pm.sclk.full = dfixed_const(sclk); |
| 301 | 302 | rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); | |
| 302 | a.full = dfixed_const(100); | 303 | rdev->pm.mclk.full = dfixed_const(mclk); |
| 303 | rdev->pm.sclk.full = dfixed_const(sclk); | 304 | rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); |
| 304 | rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); | ||
| 305 | rdev->pm.mclk.full = dfixed_const(mclk); | ||
| 306 | rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); | ||
| 307 | 305 | ||
| 306 | if (rdev->flags & RADEON_IS_IGP) { | ||
| 308 | a.full = dfixed_const(16); | 307 | a.full = dfixed_const(16); |
| 309 | /* core_bandwidth = sclk(Mhz) * 16 */ | 308 | /* core_bandwidth = sclk(Mhz) * 16 */ |
| 310 | rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); | 309 | rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); |
| 311 | } else { | ||
| 312 | sclk = radeon_get_engine_clock(rdev); | ||
| 313 | mclk = radeon_get_memory_clock(rdev); | ||
| 314 | |||
| 315 | a.full = dfixed_const(100); | ||
| 316 | rdev->pm.sclk.full = dfixed_const(sclk); | ||
| 317 | rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); | ||
| 318 | rdev->pm.mclk.full = dfixed_const(mclk); | ||
| 319 | rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); | ||
| 320 | } | 310 | } |
| 321 | } | 311 | } |
| 322 | 312 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 0416804d8f30..6a13ee38a5b9 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
| @@ -213,7 +213,7 @@ static void post_xfer(struct i2c_adapter *i2c_adap) | |||
| 213 | 213 | ||
| 214 | static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) | 214 | static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) |
| 215 | { | 215 | { |
| 216 | u32 sclk = radeon_get_engine_clock(rdev); | 216 | u32 sclk = rdev->pm.current_sclk; |
| 217 | u32 prescale = 0; | 217 | u32 prescale = 0; |
| 218 | u32 nm; | 218 | u32 nm; |
| 219 | u8 n, m, loop; | 219 | u8 n, m, loop; |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 8f93e2b4b0c8..efbe975312dc 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
| @@ -600,7 +600,6 @@ extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct d | |||
| 600 | void radeon_enc_destroy(struct drm_encoder *encoder); | 600 | void radeon_enc_destroy(struct drm_encoder *encoder); |
| 601 | void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); | 601 | void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); |
| 602 | void radeon_combios_asic_init(struct drm_device *dev); | 602 | void radeon_combios_asic_init(struct drm_device *dev); |
| 603 | extern int radeon_static_clocks_init(struct drm_device *dev); | ||
| 604 | bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | 603 | bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, |
| 605 | struct drm_display_mode *mode, | 604 | struct drm_display_mode *mode, |
| 606 | struct drm_display_mode *adjusted_mode); | 605 | struct drm_display_mode *adjusted_mode); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index f1c796810117..bfa59db374d2 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -905,6 +905,54 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
| 905 | 905 | ||
| 906 | } | 906 | } |
| 907 | 907 | ||
| 908 | static int rv770_vram_scratch_init(struct radeon_device *rdev) | ||
| 909 | { | ||
| 910 | int r; | ||
| 911 | u64 gpu_addr; | ||
| 912 | |||
| 913 | if (rdev->vram_scratch.robj == NULL) { | ||
| 914 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, | ||
| 915 | true, RADEON_GEM_DOMAIN_VRAM, | ||
| 916 | &rdev->vram_scratch.robj); | ||
| 917 | if (r) { | ||
| 918 | return r; | ||
| 919 | } | ||
| 920 | } | ||
| 921 | |||
| 922 | r = radeon_bo_reserve(rdev->vram_scratch.robj, false); | ||
| 923 | if (unlikely(r != 0)) | ||
| 924 | return r; | ||
| 925 | r = radeon_bo_pin(rdev->vram_scratch.robj, | ||
| 926 | RADEON_GEM_DOMAIN_VRAM, &gpu_addr); | ||
| 927 | if (r) { | ||
| 928 | radeon_bo_unreserve(rdev->vram_scratch.robj); | ||
| 929 | return r; | ||
| 930 | } | ||
| 931 | r = radeon_bo_kmap(rdev->vram_scratch.robj, | ||
| 932 | (void **)&rdev->vram_scratch.ptr); | ||
| 933 | if (r) | ||
| 934 | radeon_bo_unpin(rdev->vram_scratch.robj); | ||
| 935 | radeon_bo_unreserve(rdev->vram_scratch.robj); | ||
| 936 | |||
| 937 | return r; | ||
| 938 | } | ||
| 939 | |||
| 940 | static void rv770_vram_scratch_fini(struct radeon_device *rdev) | ||
| 941 | { | ||
| 942 | int r; | ||
| 943 | |||
| 944 | if (rdev->vram_scratch.robj == NULL) { | ||
| 945 | return; | ||
| 946 | } | ||
| 947 | r = radeon_bo_reserve(rdev->vram_scratch.robj, false); | ||
| 948 | if (likely(r == 0)) { | ||
| 949 | radeon_bo_kunmap(rdev->vram_scratch.robj); | ||
| 950 | radeon_bo_unpin(rdev->vram_scratch.robj); | ||
| 951 | radeon_bo_unreserve(rdev->vram_scratch.robj); | ||
| 952 | } | ||
| 953 | radeon_bo_unref(&rdev->vram_scratch.robj); | ||
| 954 | } | ||
| 955 | |||
| 908 | int rv770_mc_init(struct radeon_device *rdev) | 956 | int rv770_mc_init(struct radeon_device *rdev) |
| 909 | { | 957 | { |
| 910 | u32 tmp; | 958 | u32 tmp; |
| @@ -970,6 +1018,9 @@ static int rv770_startup(struct radeon_device *rdev) | |||
| 970 | if (r) | 1018 | if (r) |
| 971 | return r; | 1019 | return r; |
| 972 | } | 1020 | } |
| 1021 | r = rv770_vram_scratch_init(rdev); | ||
| 1022 | if (r) | ||
| 1023 | return r; | ||
| 973 | rv770_gpu_init(rdev); | 1024 | rv770_gpu_init(rdev); |
| 974 | r = r600_blit_init(rdev); | 1025 | r = r600_blit_init(rdev); |
| 975 | if (r) { | 1026 | if (r) { |
| @@ -1023,11 +1074,6 @@ int rv770_resume(struct radeon_device *rdev) | |||
| 1023 | */ | 1074 | */ |
| 1024 | /* post card */ | 1075 | /* post card */ |
| 1025 | atom_asic_init(rdev->mode_info.atom_context); | 1076 | atom_asic_init(rdev->mode_info.atom_context); |
| 1026 | /* Initialize clocks */ | ||
| 1027 | r = radeon_clocks_init(rdev); | ||
| 1028 | if (r) { | ||
| 1029 | return r; | ||
| 1030 | } | ||
| 1031 | 1077 | ||
| 1032 | r = rv770_startup(rdev); | 1078 | r = rv770_startup(rdev); |
| 1033 | if (r) { | 1079 | if (r) { |
| @@ -1118,9 +1164,6 @@ int rv770_init(struct radeon_device *rdev) | |||
| 1118 | radeon_surface_init(rdev); | 1164 | radeon_surface_init(rdev); |
| 1119 | /* Initialize clocks */ | 1165 | /* Initialize clocks */ |
| 1120 | radeon_get_clock_info(rdev->ddev); | 1166 | radeon_get_clock_info(rdev->ddev); |
| 1121 | r = radeon_clocks_init(rdev); | ||
| 1122 | if (r) | ||
| 1123 | return r; | ||
| 1124 | /* Fence driver */ | 1167 | /* Fence driver */ |
| 1125 | r = radeon_fence_driver_init(rdev); | 1168 | r = radeon_fence_driver_init(rdev); |
| 1126 | if (r) | 1169 | if (r) |
| @@ -1195,9 +1238,9 @@ void rv770_fini(struct radeon_device *rdev) | |||
| 1195 | r600_irq_fini(rdev); | 1238 | r600_irq_fini(rdev); |
| 1196 | radeon_irq_kms_fini(rdev); | 1239 | radeon_irq_kms_fini(rdev); |
| 1197 | rv770_pcie_gart_fini(rdev); | 1240 | rv770_pcie_gart_fini(rdev); |
| 1241 | rv770_vram_scratch_fini(rdev); | ||
| 1198 | radeon_gem_fini(rdev); | 1242 | radeon_gem_fini(rdev); |
| 1199 | radeon_fence_driver_fini(rdev); | 1243 | radeon_fence_driver_fini(rdev); |
| 1200 | radeon_clocks_fini(rdev); | ||
| 1201 | radeon_agp_fini(rdev); | 1244 | radeon_agp_fini(rdev); |
| 1202 | radeon_bo_fini(rdev); | 1245 | radeon_bo_fini(rdev); |
| 1203 | radeon_atombios_fini(rdev); | 1246 | radeon_atombios_fini(rdev); |
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c index 7580f55e67e3..36e957532230 100644 --- a/drivers/hwmon/hp_accel.c +++ b/drivers/hwmon/hp_accel.c | |||
| @@ -221,6 +221,8 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = { | |||
| 221 | AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), | 221 | AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), |
| 222 | AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), | 222 | AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), |
| 223 | AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), | 223 | AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), |
| 224 | AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted), | ||
| 225 | AXIS_DMI_MATCH("Mini5102", "HP Mini 5102", xy_rotated_left_usd), | ||
| 224 | { NULL, } | 226 | { NULL, } |
| 225 | /* Laptop models without axis info (yet): | 227 | /* Laptop models without axis info (yet): |
| 226 | * "NC6910" "HP Compaq 6910" | 228 | * "NC6910" "HP Compaq 6910" |
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h index 8f0caf7d4482..78fbe9ffe7f0 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.h +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h | |||
| @@ -53,7 +53,7 @@ | |||
| 53 | #define T3_MAX_PBL_SIZE 256 | 53 | #define T3_MAX_PBL_SIZE 256 |
| 54 | #define T3_MAX_RQ_SIZE 1024 | 54 | #define T3_MAX_RQ_SIZE 1024 |
| 55 | #define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1) | 55 | #define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1) |
| 56 | #define T3_MAX_CQ_DEPTH 262144 | 56 | #define T3_MAX_CQ_DEPTH 65536 |
| 57 | #define T3_MAX_NUM_STAG (1<<15) | 57 | #define T3_MAX_NUM_STAG (1<<15) |
| 58 | #define T3_MAX_MR_SIZE 0x100000000ULL | 58 | #define T3_MAX_MR_SIZE 0x100000000ULL |
| 59 | #define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ | 59 | #define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 443cea55daac..61e0efd4ccfb 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
| @@ -502,7 +502,9 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
| 502 | static void nes_retrans_expired(struct nes_cm_node *cm_node) | 502 | static void nes_retrans_expired(struct nes_cm_node *cm_node) |
| 503 | { | 503 | { |
| 504 | struct iw_cm_id *cm_id = cm_node->cm_id; | 504 | struct iw_cm_id *cm_id = cm_node->cm_id; |
| 505 | switch (cm_node->state) { | 505 | enum nes_cm_node_state state = cm_node->state; |
| 506 | cm_node->state = NES_CM_STATE_CLOSED; | ||
| 507 | switch (state) { | ||
| 506 | case NES_CM_STATE_SYN_RCVD: | 508 | case NES_CM_STATE_SYN_RCVD: |
| 507 | case NES_CM_STATE_CLOSING: | 509 | case NES_CM_STATE_CLOSING: |
| 508 | rem_ref_cm_node(cm_node->cm_core, cm_node); | 510 | rem_ref_cm_node(cm_node->cm_core, cm_node); |
| @@ -511,7 +513,6 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node) | |||
| 511 | case NES_CM_STATE_FIN_WAIT1: | 513 | case NES_CM_STATE_FIN_WAIT1: |
| 512 | if (cm_node->cm_id) | 514 | if (cm_node->cm_id) |
| 513 | cm_id->rem_ref(cm_id); | 515 | cm_id->rem_ref(cm_id); |
| 514 | cm_node->state = NES_CM_STATE_CLOSED; | ||
| 515 | send_reset(cm_node, NULL); | 516 | send_reset(cm_node, NULL); |
| 516 | break; | 517 | break; |
| 517 | default: | 518 | default: |
| @@ -1439,9 +1440,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
| 1439 | break; | 1440 | break; |
| 1440 | case NES_CM_STATE_MPAREQ_RCVD: | 1441 | case NES_CM_STATE_MPAREQ_RCVD: |
| 1441 | passive_state = atomic_add_return(1, &cm_node->passive_state); | 1442 | passive_state = atomic_add_return(1, &cm_node->passive_state); |
| 1442 | if (passive_state == NES_SEND_RESET_EVENT) | ||
| 1443 | create_event(cm_node, NES_CM_EVENT_RESET); | ||
| 1444 | cm_node->state = NES_CM_STATE_CLOSED; | ||
| 1445 | dev_kfree_skb_any(skb); | 1443 | dev_kfree_skb_any(skb); |
| 1446 | break; | 1444 | break; |
| 1447 | case NES_CM_STATE_ESTABLISHED: | 1445 | case NES_CM_STATE_ESTABLISHED: |
| @@ -1456,6 +1454,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
| 1456 | case NES_CM_STATE_CLOSED: | 1454 | case NES_CM_STATE_CLOSED: |
| 1457 | drop_packet(skb); | 1455 | drop_packet(skb); |
| 1458 | break; | 1456 | break; |
| 1457 | case NES_CM_STATE_FIN_WAIT2: | ||
| 1459 | case NES_CM_STATE_FIN_WAIT1: | 1458 | case NES_CM_STATE_FIN_WAIT1: |
| 1460 | case NES_CM_STATE_LAST_ACK: | 1459 | case NES_CM_STATE_LAST_ACK: |
| 1461 | cm_node->cm_id->rem_ref(cm_node->cm_id); | 1460 | cm_node->cm_id->rem_ref(cm_node->cm_id); |
| @@ -2777,6 +2776,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
| 2777 | return -EINVAL; | 2776 | return -EINVAL; |
| 2778 | } | 2777 | } |
| 2779 | 2778 | ||
| 2779 | passive_state = atomic_add_return(1, &cm_node->passive_state); | ||
| 2780 | if (passive_state == NES_SEND_RESET_EVENT) { | ||
| 2781 | rem_ref_cm_node(cm_node->cm_core, cm_node); | ||
| 2782 | return -ECONNRESET; | ||
| 2783 | } | ||
| 2784 | |||
| 2780 | /* associate the node with the QP */ | 2785 | /* associate the node with the QP */ |
| 2781 | nesqp->cm_node = (void *)cm_node; | 2786 | nesqp->cm_node = (void *)cm_node; |
| 2782 | cm_node->nesqp = nesqp; | 2787 | cm_node->nesqp = nesqp; |
| @@ -2979,9 +2984,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
| 2979 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " | 2984 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " |
| 2980 | "ret=%d\n", __func__, __LINE__, ret); | 2985 | "ret=%d\n", __func__, __LINE__, ret); |
| 2981 | 2986 | ||
| 2982 | passive_state = atomic_add_return(1, &cm_node->passive_state); | ||
| 2983 | if (passive_state == NES_SEND_RESET_EVENT) | ||
| 2984 | create_event(cm_node, NES_CM_EVENT_RESET); | ||
| 2985 | return 0; | 2987 | return 0; |
| 2986 | } | 2988 | } |
| 2987 | 2989 | ||
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index f8233c851c69..1980a461c499 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
| @@ -3468,6 +3468,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
| 3468 | return; /* Ignore it, wait for close complete */ | 3468 | return; /* Ignore it, wait for close complete */ |
| 3469 | 3469 | ||
| 3470 | if (atomic_inc_return(&nesqp->close_timer_started) == 1) { | 3470 | if (atomic_inc_return(&nesqp->close_timer_started) == 1) { |
| 3471 | if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) && | ||
| 3472 | (nesqp->ibqp_state == IB_QPS_RTS) && | ||
| 3473 | ((nesadapter->eeprom_version >> 16) != NES_A0)) { | ||
| 3474 | spin_lock_irqsave(&nesqp->lock, flags); | ||
| 3475 | nesqp->hw_iwarp_state = iwarp_state; | ||
| 3476 | nesqp->hw_tcp_state = tcp_state; | ||
| 3477 | nesqp->last_aeq = async_event_id; | ||
| 3478 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING; | ||
| 3479 | nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING; | ||
| 3480 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
| 3481 | nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0); | ||
| 3482 | nes_cm_disconn(nesqp); | ||
| 3483 | } | ||
| 3471 | nesqp->cm_id->add_ref(nesqp->cm_id); | 3484 | nesqp->cm_id->add_ref(nesqp->cm_id); |
| 3472 | schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp, | 3485 | schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp, |
| 3473 | NES_TIMER_TYPE_CLOSE, 1, 0); | 3486 | NES_TIMER_TYPE_CLOSE, 1, 0); |
| @@ -3477,7 +3490,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
| 3477 | nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), | 3490 | nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), |
| 3478 | async_event_id, nesqp->last_aeq, tcp_state); | 3491 | async_event_id, nesqp->last_aeq, tcp_state); |
| 3479 | } | 3492 | } |
| 3480 | |||
| 3481 | break; | 3493 | break; |
| 3482 | case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: | 3494 | case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: |
| 3483 | if (nesqp->term_flags) { | 3495 | if (nesqp->term_flags) { |
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index aa9183db32b1..1204c3432b63 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h | |||
| @@ -45,6 +45,7 @@ | |||
| 45 | #define NES_PHY_TYPE_KR 9 | 45 | #define NES_PHY_TYPE_KR 9 |
| 46 | 46 | ||
| 47 | #define NES_MULTICAST_PF_MAX 8 | 47 | #define NES_MULTICAST_PF_MAX 8 |
| 48 | #define NES_A0 3 | ||
| 48 | 49 | ||
| 49 | enum pci_regs { | 50 | enum pci_regs { |
| 50 | NES_INT_STAT = 0x0000, | 51 | NES_INT_STAT = 0x0000, |
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 6dfdd49cdbcf..10560c796fd6 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c | |||
| @@ -1446,14 +1446,14 @@ static int nes_netdev_set_pauseparam(struct net_device *netdev, | |||
| 1446 | NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); | 1446 | NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); |
| 1447 | u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; | 1447 | u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; |
| 1448 | nes_write_indexed(nesdev, | 1448 | nes_write_indexed(nesdev, |
| 1449 | NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp); | 1449 | NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp); |
| 1450 | nesdev->disable_tx_flow_control = 0; | 1450 | nesdev->disable_tx_flow_control = 0; |
| 1451 | } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) { | 1451 | } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) { |
| 1452 | u32temp = nes_read_indexed(nesdev, | 1452 | u32temp = nes_read_indexed(nesdev, |
| 1453 | NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); | 1453 | NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); |
| 1454 | u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; | 1454 | u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; |
| 1455 | nes_write_indexed(nesdev, | 1455 | nes_write_indexed(nesdev, |
| 1456 | NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp); | 1456 | NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp); |
| 1457 | nesdev->disable_tx_flow_control = 1; | 1457 | nesdev->disable_tx_flow_control = 1; |
| 1458 | } | 1458 | } |
| 1459 | if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) { | 1459 | if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) { |
diff --git a/drivers/input/input.c b/drivers/input/input.c index a9b025f4147a..ab6982056518 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
| @@ -1599,11 +1599,14 @@ EXPORT_SYMBOL(input_free_device); | |||
| 1599 | * @dev: input device supporting MT events and finger tracking | 1599 | * @dev: input device supporting MT events and finger tracking |
| 1600 | * @num_slots: number of slots used by the device | 1600 | * @num_slots: number of slots used by the device |
| 1601 | * | 1601 | * |
| 1602 | * This function allocates all necessary memory for MT slot handling | 1602 | * This function allocates all necessary memory for MT slot handling in the |
| 1603 | * in the input device, and adds ABS_MT_SLOT to the device capabilities. | 1603 | * input device, and adds ABS_MT_SLOT to the device capabilities. All slots |
| 1604 | * are initially marked as unused iby setting ABS_MT_TRACKING_ID to -1. | ||
| 1604 | */ | 1605 | */ |
| 1605 | int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots) | 1606 | int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots) |
| 1606 | { | 1607 | { |
| 1608 | int i; | ||
| 1609 | |||
| 1607 | if (!num_slots) | 1610 | if (!num_slots) |
| 1608 | return 0; | 1611 | return 0; |
| 1609 | 1612 | ||
| @@ -1614,6 +1617,10 @@ int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots) | |||
| 1614 | dev->mtsize = num_slots; | 1617 | dev->mtsize = num_slots; |
| 1615 | input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0); | 1618 | input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0); |
| 1616 | 1619 | ||
| 1620 | /* Mark slots as 'unused' */ | ||
| 1621 | for (i = 0; i < num_slots; i++) | ||
| 1622 | dev->mt[i].abs[ABS_MT_TRACKING_ID - ABS_MT_FIRST] = -1; | ||
| 1623 | |||
| 1617 | return 0; | 1624 | return 0; |
| 1618 | } | 1625 | } |
| 1619 | EXPORT_SYMBOL(input_mt_create_slots); | 1626 | EXPORT_SYMBOL(input_mt_create_slots); |
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index ea67c49146a3..b95231763911 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c | |||
| @@ -337,10 +337,14 @@ static void report_finger_data(struct input_dev *input, | |||
| 337 | const struct bcm5974_config *cfg, | 337 | const struct bcm5974_config *cfg, |
| 338 | const struct tp_finger *f) | 338 | const struct tp_finger *f) |
| 339 | { | 339 | { |
| 340 | input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major)); | 340 | input_report_abs(input, ABS_MT_TOUCH_MAJOR, |
| 341 | input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor)); | 341 | raw2int(f->force_major) << 1); |
| 342 | input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major)); | 342 | input_report_abs(input, ABS_MT_TOUCH_MINOR, |
| 343 | input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor)); | 343 | raw2int(f->force_minor) << 1); |
| 344 | input_report_abs(input, ABS_MT_WIDTH_MAJOR, | ||
| 345 | raw2int(f->size_major) << 1); | ||
| 346 | input_report_abs(input, ABS_MT_WIDTH_MINOR, | ||
| 347 | raw2int(f->size_minor) << 1); | ||
| 344 | input_report_abs(input, ABS_MT_ORIENTATION, | 348 | input_report_abs(input, ABS_MT_ORIENTATION, |
| 345 | MAX_FINGER_ORIENTATION - raw2int(f->orientation)); | 349 | MAX_FINGER_ORIENTATION - raw2int(f->orientation)); |
| 346 | input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x)); | 350 | input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x)); |
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 46e4ba0b9246..f58513160480 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c | |||
| @@ -1485,8 +1485,8 @@ static int __init i8042_init(void) | |||
| 1485 | 1485 | ||
| 1486 | static void __exit i8042_exit(void) | 1486 | static void __exit i8042_exit(void) |
| 1487 | { | 1487 | { |
| 1488 | platform_driver_unregister(&i8042_driver); | ||
| 1489 | platform_device_unregister(i8042_platform_device); | 1488 | platform_device_unregister(i8042_platform_device); |
| 1489 | platform_driver_unregister(&i8042_driver); | ||
| 1490 | i8042_platform_exit(); | 1490 | i8042_platform_exit(); |
| 1491 | 1491 | ||
| 1492 | panic_blink = NULL; | 1492 | panic_blink = NULL; |
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 40d77ba8fdc1..6e29badb969e 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c | |||
| @@ -243,10 +243,10 @@ static int wacom_graphire_irq(struct wacom_wac *wacom) | |||
| 243 | if (features->type == WACOM_G4 || | 243 | if (features->type == WACOM_G4 || |
| 244 | features->type == WACOM_MO) { | 244 | features->type == WACOM_MO) { |
| 245 | input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f); | 245 | input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f); |
| 246 | rw = (signed)(data[7] & 0x04) - (data[7] & 0x03); | 246 | rw = (data[7] & 0x04) - (data[7] & 0x03); |
| 247 | } else { | 247 | } else { |
| 248 | input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f); | 248 | input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f); |
| 249 | rw = -(signed)data[6]; | 249 | rw = -(signed char)data[6]; |
| 250 | } | 250 | } |
| 251 | input_report_rel(input, REL_WHEEL, rw); | 251 | input_report_rel(input, REL_WHEEL, rw); |
| 252 | } | 252 | } |
diff --git a/drivers/md/.gitignore b/drivers/md/.gitignore deleted file mode 100644 index a7afec6b19c6..000000000000 --- a/drivers/md/.gitignore +++ /dev/null | |||
| @@ -1,4 +0,0 @@ | |||
| 1 | mktables | ||
| 2 | raid6altivec*.c | ||
| 3 | raid6int*.c | ||
| 4 | raid6tables.c | ||
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 1ba1e122e948..ed4900ade93a 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
| @@ -1542,8 +1542,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) | |||
| 1542 | atomic_read(&bitmap->mddev->recovery_active) == 0); | 1542 | atomic_read(&bitmap->mddev->recovery_active) == 0); |
| 1543 | 1543 | ||
| 1544 | bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync; | 1544 | bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync; |
| 1545 | if (bitmap->mddev->persistent) | 1545 | set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags); |
| 1546 | set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags); | ||
| 1547 | sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1); | 1546 | sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1); |
| 1548 | s = 0; | 1547 | s = 0; |
| 1549 | while (s < sector && s < bitmap->mddev->resync_max_sectors) { | 1548 | while (s < sector && s < bitmap->mddev->resync_max_sectors) { |
diff --git a/drivers/md/md.c b/drivers/md/md.c index c148b6302154..43cf9cc9c1df 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -2167,9 +2167,9 @@ repeat: | |||
| 2167 | rdev->recovery_offset = mddev->curr_resync_completed; | 2167 | rdev->recovery_offset = mddev->curr_resync_completed; |
| 2168 | 2168 | ||
| 2169 | } | 2169 | } |
| 2170 | if (mddev->external || !mddev->persistent) { | 2170 | if (!mddev->persistent) { |
| 2171 | clear_bit(MD_CHANGE_DEVS, &mddev->flags); | ||
| 2172 | clear_bit(MD_CHANGE_CLEAN, &mddev->flags); | 2171 | clear_bit(MD_CHANGE_CLEAN, &mddev->flags); |
| 2172 | clear_bit(MD_CHANGE_DEVS, &mddev->flags); | ||
| 2173 | wake_up(&mddev->sb_wait); | 2173 | wake_up(&mddev->sb_wait); |
| 2174 | return; | 2174 | return; |
| 2175 | } | 2175 | } |
| @@ -2178,7 +2178,6 @@ repeat: | |||
| 2178 | 2178 | ||
| 2179 | mddev->utime = get_seconds(); | 2179 | mddev->utime = get_seconds(); |
| 2180 | 2180 | ||
| 2181 | set_bit(MD_CHANGE_PENDING, &mddev->flags); | ||
| 2182 | if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) | 2181 | if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) |
| 2183 | force_change = 1; | 2182 | force_change = 1; |
| 2184 | if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) | 2183 | if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) |
| @@ -3371,7 +3370,7 @@ array_state_show(mddev_t *mddev, char *page) | |||
| 3371 | case 0: | 3370 | case 0: |
| 3372 | if (mddev->in_sync) | 3371 | if (mddev->in_sync) |
| 3373 | st = clean; | 3372 | st = clean; |
| 3374 | else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) | 3373 | else if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) |
| 3375 | st = write_pending; | 3374 | st = write_pending; |
| 3376 | else if (mddev->safemode) | 3375 | else if (mddev->safemode) |
| 3377 | st = active_idle; | 3376 | st = active_idle; |
| @@ -3452,9 +3451,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len) | |||
| 3452 | mddev->in_sync = 1; | 3451 | mddev->in_sync = 1; |
| 3453 | if (mddev->safemode == 1) | 3452 | if (mddev->safemode == 1) |
| 3454 | mddev->safemode = 0; | 3453 | mddev->safemode = 0; |
| 3455 | if (mddev->persistent) | 3454 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); |
| 3456 | set_bit(MD_CHANGE_CLEAN, | ||
| 3457 | &mddev->flags); | ||
| 3458 | } | 3455 | } |
| 3459 | err = 0; | 3456 | err = 0; |
| 3460 | } else | 3457 | } else |
| @@ -3466,8 +3463,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len) | |||
| 3466 | case active: | 3463 | case active: |
| 3467 | if (mddev->pers) { | 3464 | if (mddev->pers) { |
| 3468 | restart_array(mddev); | 3465 | restart_array(mddev); |
| 3469 | if (mddev->external) | 3466 | clear_bit(MD_CHANGE_PENDING, &mddev->flags); |
| 3470 | clear_bit(MD_CHANGE_CLEAN, &mddev->flags); | ||
| 3471 | wake_up(&mddev->sb_wait); | 3467 | wake_up(&mddev->sb_wait); |
| 3472 | err = 0; | 3468 | err = 0; |
| 3473 | } else { | 3469 | } else { |
| @@ -6572,6 +6568,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi) | |||
| 6572 | if (mddev->in_sync) { | 6568 | if (mddev->in_sync) { |
| 6573 | mddev->in_sync = 0; | 6569 | mddev->in_sync = 0; |
| 6574 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); | 6570 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); |
| 6571 | set_bit(MD_CHANGE_PENDING, &mddev->flags); | ||
| 6575 | md_wakeup_thread(mddev->thread); | 6572 | md_wakeup_thread(mddev->thread); |
| 6576 | did_change = 1; | 6573 | did_change = 1; |
| 6577 | } | 6574 | } |
| @@ -6580,7 +6577,6 @@ void md_write_start(mddev_t *mddev, struct bio *bi) | |||
| 6580 | if (did_change) | 6577 | if (did_change) |
| 6581 | sysfs_notify_dirent_safe(mddev->sysfs_state); | 6578 | sysfs_notify_dirent_safe(mddev->sysfs_state); |
| 6582 | wait_event(mddev->sb_wait, | 6579 | wait_event(mddev->sb_wait, |
| 6583 | !test_bit(MD_CHANGE_CLEAN, &mddev->flags) && | ||
| 6584 | !test_bit(MD_CHANGE_PENDING, &mddev->flags)); | 6580 | !test_bit(MD_CHANGE_PENDING, &mddev->flags)); |
| 6585 | } | 6581 | } |
| 6586 | 6582 | ||
| @@ -6616,6 +6612,7 @@ int md_allow_write(mddev_t *mddev) | |||
| 6616 | if (mddev->in_sync) { | 6612 | if (mddev->in_sync) { |
| 6617 | mddev->in_sync = 0; | 6613 | mddev->in_sync = 0; |
| 6618 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); | 6614 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); |
| 6615 | set_bit(MD_CHANGE_PENDING, &mddev->flags); | ||
| 6619 | if (mddev->safemode_delay && | 6616 | if (mddev->safemode_delay && |
| 6620 | mddev->safemode == 0) | 6617 | mddev->safemode == 0) |
| 6621 | mddev->safemode = 1; | 6618 | mddev->safemode = 1; |
| @@ -6625,7 +6622,7 @@ int md_allow_write(mddev_t *mddev) | |||
| 6625 | } else | 6622 | } else |
| 6626 | spin_unlock_irq(&mddev->write_lock); | 6623 | spin_unlock_irq(&mddev->write_lock); |
| 6627 | 6624 | ||
| 6628 | if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) | 6625 | if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) |
| 6629 | return -EAGAIN; | 6626 | return -EAGAIN; |
| 6630 | else | 6627 | else |
| 6631 | return 0; | 6628 | return 0; |
| @@ -6823,8 +6820,7 @@ void md_do_sync(mddev_t *mddev) | |||
| 6823 | atomic_read(&mddev->recovery_active) == 0); | 6820 | atomic_read(&mddev->recovery_active) == 0); |
| 6824 | mddev->curr_resync_completed = | 6821 | mddev->curr_resync_completed = |
| 6825 | mddev->curr_resync; | 6822 | mddev->curr_resync; |
| 6826 | if (mddev->persistent) | 6823 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); |
| 6827 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); | ||
| 6828 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); | 6824 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); |
| 6829 | } | 6825 | } |
| 6830 | 6826 | ||
| @@ -7103,8 +7099,7 @@ void md_check_recovery(mddev_t *mddev) | |||
| 7103 | mddev->recovery_cp == MaxSector) { | 7099 | mddev->recovery_cp == MaxSector) { |
| 7104 | mddev->in_sync = 1; | 7100 | mddev->in_sync = 1; |
| 7105 | did_change = 1; | 7101 | did_change = 1; |
| 7106 | if (mddev->persistent) | 7102 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); |
| 7107 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); | ||
| 7108 | } | 7103 | } |
| 7109 | if (mddev->safemode == 1) | 7104 | if (mddev->safemode == 1) |
| 7110 | mddev->safemode = 0; | 7105 | mddev->safemode = 0; |
diff --git a/drivers/md/md.h b/drivers/md/md.h index a953fe2808ae..3931299788dc 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
| @@ -140,7 +140,7 @@ struct mddev_s | |||
| 140 | unsigned long flags; | 140 | unsigned long flags; |
| 141 | #define MD_CHANGE_DEVS 0 /* Some device status has changed */ | 141 | #define MD_CHANGE_DEVS 0 /* Some device status has changed */ |
| 142 | #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ | 142 | #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ |
| 143 | #define MD_CHANGE_PENDING 2 /* superblock update in progress */ | 143 | #define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */ |
| 144 | 144 | ||
| 145 | int suspended; | 145 | int suspended; |
| 146 | atomic_t active_io; | 146 | atomic_t active_io; |
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index bd2755e8d9a3..f332c52968b7 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c | |||
| @@ -362,9 +362,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, | |||
| 362 | goto err; | 362 | goto err; |
| 363 | } | 363 | } |
| 364 | 364 | ||
| 365 | err = mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid); | 365 | if (ocr & R4_MEMORY_PRESENT |
| 366 | 366 | && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) { | |
| 367 | if (!err) { | ||
| 368 | card->type = MMC_TYPE_SD_COMBO; | 367 | card->type = MMC_TYPE_SD_COMBO; |
| 369 | 368 | ||
| 370 | if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || | 369 | if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || |
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c index 5f3a599ead07..87226cd202a5 100644 --- a/drivers/mmc/host/at91_mci.c +++ b/drivers/mmc/host/at91_mci.c | |||
| @@ -66,6 +66,7 @@ | |||
| 66 | #include <linux/clk.h> | 66 | #include <linux/clk.h> |
| 67 | #include <linux/atmel_pdc.h> | 67 | #include <linux/atmel_pdc.h> |
| 68 | #include <linux/gfp.h> | 68 | #include <linux/gfp.h> |
| 69 | #include <linux/highmem.h> | ||
| 69 | 70 | ||
| 70 | #include <linux/mmc/host.h> | 71 | #include <linux/mmc/host.h> |
| 71 | 72 | ||
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c index 9a68ff4353a2..5a950b16d9e6 100644 --- a/drivers/mmc/host/imxmmc.c +++ b/drivers/mmc/host/imxmmc.c | |||
| @@ -148,11 +148,12 @@ static int imxmci_start_clock(struct imxmci_host *host) | |||
| 148 | 148 | ||
| 149 | while (delay--) { | 149 | while (delay--) { |
| 150 | reg = readw(host->base + MMC_REG_STATUS); | 150 | reg = readw(host->base + MMC_REG_STATUS); |
| 151 | if (reg & STATUS_CARD_BUS_CLK_RUN) | 151 | if (reg & STATUS_CARD_BUS_CLK_RUN) { |
| 152 | /* Check twice before cut */ | 152 | /* Check twice before cut */ |
| 153 | reg = readw(host->base + MMC_REG_STATUS); | 153 | reg = readw(host->base + MMC_REG_STATUS); |
| 154 | if (reg & STATUS_CARD_BUS_CLK_RUN) | 154 | if (reg & STATUS_CARD_BUS_CLK_RUN) |
| 155 | return 0; | 155 | return 0; |
| 156 | } | ||
| 156 | 157 | ||
| 157 | if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) | 158 | if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) |
| 158 | return 0; | 159 | return 0; |
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 4a8776f8afdd..4526d2791f29 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c | |||
| @@ -2305,7 +2305,6 @@ static int omap_hsmmc_suspend(struct device *dev) | |||
| 2305 | int ret = 0; | 2305 | int ret = 0; |
| 2306 | struct platform_device *pdev = to_platform_device(dev); | 2306 | struct platform_device *pdev = to_platform_device(dev); |
| 2307 | struct omap_hsmmc_host *host = platform_get_drvdata(pdev); | 2307 | struct omap_hsmmc_host *host = platform_get_drvdata(pdev); |
| 2308 | pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */ | ||
| 2309 | 2308 | ||
| 2310 | if (host && host->suspended) | 2309 | if (host && host->suspended) |
| 2311 | return 0; | 2310 | return 0; |
| @@ -2324,8 +2323,8 @@ static int omap_hsmmc_suspend(struct device *dev) | |||
| 2324 | } | 2323 | } |
| 2325 | } | 2324 | } |
| 2326 | cancel_work_sync(&host->mmc_carddetect_work); | 2325 | cancel_work_sync(&host->mmc_carddetect_work); |
| 2327 | mmc_host_enable(host->mmc); | ||
| 2328 | ret = mmc_suspend_host(host->mmc); | 2326 | ret = mmc_suspend_host(host->mmc); |
| 2327 | mmc_host_enable(host->mmc); | ||
| 2329 | if (ret == 0) { | 2328 | if (ret == 0) { |
| 2330 | omap_hsmmc_disable_irq(host); | 2329 | omap_hsmmc_disable_irq(host); |
| 2331 | OMAP_HSMMC_WRITE(host->base, HCTL, | 2330 | OMAP_HSMMC_WRITE(host->base, HCTL, |
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 2e16e0a90a5e..976330de379e 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c | |||
| @@ -1600,7 +1600,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev) | |||
| 1600 | host->pio_active = XFER_NONE; | 1600 | host->pio_active = XFER_NONE; |
| 1601 | 1601 | ||
| 1602 | #ifdef CONFIG_MMC_S3C_PIODMA | 1602 | #ifdef CONFIG_MMC_S3C_PIODMA |
| 1603 | host->dodma = host->pdata->dma; | 1603 | host->dodma = host->pdata->use_dma; |
| 1604 | #endif | 1604 | #endif |
| 1605 | 1605 | ||
| 1606 | host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1606 | host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index ee7d0a5a51c4..69d98e3bf6ab 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c | |||
| @@ -164,6 +164,7 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) | |||
| 164 | static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) | 164 | static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) |
| 165 | { | 165 | { |
| 166 | struct mmc_data *data = host->data; | 166 | struct mmc_data *data = host->data; |
| 167 | void *sg_virt; | ||
| 167 | unsigned short *buf; | 168 | unsigned short *buf; |
| 168 | unsigned int count; | 169 | unsigned int count; |
| 169 | unsigned long flags; | 170 | unsigned long flags; |
| @@ -173,8 +174,8 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) | |||
| 173 | return; | 174 | return; |
| 174 | } | 175 | } |
| 175 | 176 | ||
| 176 | buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) + | 177 | sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); |
| 177 | host->sg_off); | 178 | buf = (unsigned short *)(sg_virt + host->sg_off); |
| 178 | 179 | ||
| 179 | count = host->sg_ptr->length - host->sg_off; | 180 | count = host->sg_ptr->length - host->sg_off; |
| 180 | if (count > data->blksz) | 181 | if (count > data->blksz) |
| @@ -191,7 +192,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) | |||
| 191 | 192 | ||
| 192 | host->sg_off += count; | 193 | host->sg_off += count; |
| 193 | 194 | ||
| 194 | tmio_mmc_kunmap_atomic(host, &flags); | 195 | tmio_mmc_kunmap_atomic(sg_virt, &flags); |
| 195 | 196 | ||
| 196 | if (host->sg_off == host->sg_ptr->length) | 197 | if (host->sg_off == host->sg_ptr->length) |
| 197 | tmio_mmc_next_sg(host); | 198 | tmio_mmc_next_sg(host); |
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 64f7d5dfc106..0fedc78e3ea5 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h | |||
| @@ -82,10 +82,7 @@ | |||
| 82 | 82 | ||
| 83 | #define ack_mmc_irqs(host, i) \ | 83 | #define ack_mmc_irqs(host, i) \ |
| 84 | do { \ | 84 | do { \ |
| 85 | u32 mask;\ | 85 | sd_ctrl_write32((host), CTL_STATUS, ~(i)); \ |
| 86 | mask = sd_ctrl_read32((host), CTL_STATUS); \ | ||
| 87 | mask &= ~((i) & TMIO_MASK_IRQ); \ | ||
| 88 | sd_ctrl_write32((host), CTL_STATUS, mask); \ | ||
| 89 | } while (0) | 86 | } while (0) |
| 90 | 87 | ||
| 91 | 88 | ||
| @@ -177,19 +174,17 @@ static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host) | |||
| 177 | return --host->sg_len; | 174 | return --host->sg_len; |
| 178 | } | 175 | } |
| 179 | 176 | ||
| 180 | static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host, | 177 | static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, |
| 181 | unsigned long *flags) | 178 | unsigned long *flags) |
| 182 | { | 179 | { |
| 183 | struct scatterlist *sg = host->sg_ptr; | ||
| 184 | |||
| 185 | local_irq_save(*flags); | 180 | local_irq_save(*flags); |
| 186 | return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; | 181 | return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; |
| 187 | } | 182 | } |
| 188 | 183 | ||
| 189 | static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host, | 184 | static inline void tmio_mmc_kunmap_atomic(void *virt, |
| 190 | unsigned long *flags) | 185 | unsigned long *flags) |
| 191 | { | 186 | { |
| 192 | kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ); | 187 | kunmap_atomic(virt, KM_BIO_SRC_IRQ); |
| 193 | local_irq_restore(*flags); | 188 | local_irq_restore(*flags); |
| 194 | } | 189 | } |
| 195 | 190 | ||
diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug index 2246f154e2f7..61f6e5e40458 100644 --- a/drivers/mtd/ubi/Kconfig.debug +++ b/drivers/mtd/ubi/Kconfig.debug | |||
| @@ -6,7 +6,7 @@ config MTD_UBI_DEBUG | |||
| 6 | depends on SYSFS | 6 | depends on SYSFS |
| 7 | depends on MTD_UBI | 7 | depends on MTD_UBI |
| 8 | select DEBUG_FS | 8 | select DEBUG_FS |
| 9 | select KALLSYMS_ALL | 9 | select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL |
| 10 | help | 10 | help |
| 11 | This option enables UBI debugging. | 11 | This option enables UBI debugging. |
| 12 | 12 | ||
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index 4dfa6b90c21c..3d2d1a69e9a0 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c | |||
| @@ -798,18 +798,18 @@ static int rename_volumes(struct ubi_device *ubi, | |||
| 798 | goto out_free; | 798 | goto out_free; |
| 799 | } | 799 | } |
| 800 | 800 | ||
| 801 | re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); | 801 | re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); |
| 802 | if (!re) { | 802 | if (!re1) { |
| 803 | err = -ENOMEM; | 803 | err = -ENOMEM; |
| 804 | ubi_close_volume(desc); | 804 | ubi_close_volume(desc); |
| 805 | goto out_free; | 805 | goto out_free; |
| 806 | } | 806 | } |
| 807 | 807 | ||
| 808 | re->remove = 1; | 808 | re1->remove = 1; |
| 809 | re->desc = desc; | 809 | re1->desc = desc; |
| 810 | list_add(&re->list, &rename_list); | 810 | list_add(&re1->list, &rename_list); |
| 811 | dbg_msg("will remove volume %d, name \"%s\"", | 811 | dbg_msg("will remove volume %d, name \"%s\"", |
| 812 | re->desc->vol->vol_id, re->desc->vol->name); | 812 | re1->desc->vol->vol_id, re1->desc->vol->name); |
| 813 | } | 813 | } |
| 814 | 814 | ||
| 815 | mutex_lock(&ubi->device_mutex); | 815 | mutex_lock(&ubi->device_mutex); |
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index 372a15ac9995..69b52e9c9489 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c | |||
| @@ -843,7 +843,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, | |||
| 843 | case UBI_COMPAT_DELETE: | 843 | case UBI_COMPAT_DELETE: |
| 844 | ubi_msg("\"delete\" compatible internal volume %d:%d" | 844 | ubi_msg("\"delete\" compatible internal volume %d:%d" |
| 845 | " found, will remove it", vol_id, lnum); | 845 | " found, will remove it", vol_id, lnum); |
| 846 | err = add_to_list(si, pnum, ec, &si->corr); | 846 | err = add_to_list(si, pnum, ec, &si->erase); |
| 847 | if (err) | 847 | if (err) |
| 848 | return err; | 848 | return err; |
| 849 | return 0; | 849 | return 0; |
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index ee7b1d8fbb92..97a435672eaf 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
| @@ -1212,7 +1212,8 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) | |||
| 1212 | retry: | 1212 | retry: |
| 1213 | spin_lock(&ubi->wl_lock); | 1213 | spin_lock(&ubi->wl_lock); |
| 1214 | e = ubi->lookuptbl[pnum]; | 1214 | e = ubi->lookuptbl[pnum]; |
| 1215 | if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) { | 1215 | if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) || |
| 1216 | in_wl_tree(e, &ubi->erroneous)) { | ||
| 1216 | spin_unlock(&ubi->wl_lock); | 1217 | spin_unlock(&ubi->wl_lock); |
| 1217 | return 0; | 1218 | return 0; |
| 1218 | } | 1219 | } |
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index c685a55fc2f4..85671adae455 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
| @@ -647,7 +647,7 @@ struct vortex_private { | |||
| 647 | u16 io_size; /* Size of PCI region (for release_region) */ | 647 | u16 io_size; /* Size of PCI region (for release_region) */ |
| 648 | 648 | ||
| 649 | /* Serialises access to hardware other than MII and variables below. | 649 | /* Serialises access to hardware other than MII and variables below. |
| 650 | * The lock hierarchy is rtnl_lock > lock > mii_lock > window_lock. */ | 650 | * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */ |
| 651 | spinlock_t lock; | 651 | spinlock_t lock; |
| 652 | 652 | ||
| 653 | spinlock_t mii_lock; /* Serialises access to MII */ | 653 | spinlock_t mii_lock; /* Serialises access to MII */ |
| @@ -1994,10 +1994,9 @@ vortex_error(struct net_device *dev, int status) | |||
| 1994 | } | 1994 | } |
| 1995 | } | 1995 | } |
| 1996 | 1996 | ||
| 1997 | if (status & RxEarly) { /* Rx early is unused. */ | 1997 | if (status & RxEarly) /* Rx early is unused. */ |
| 1998 | vortex_rx(dev); | ||
| 1999 | iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD); | 1998 | iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD); |
| 2000 | } | 1999 | |
| 2001 | if (status & StatsFull) { /* Empty statistics. */ | 2000 | if (status & StatsFull) { /* Empty statistics. */ |
| 2002 | static int DoneDidThat; | 2001 | static int DoneDidThat; |
| 2003 | if (vortex_debug > 4) | 2002 | if (vortex_debug > 4) |
| @@ -2298,7 +2297,12 @@ vortex_interrupt(int irq, void *dev_id) | |||
| 2298 | if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { | 2297 | if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { |
| 2299 | if (status == 0xffff) | 2298 | if (status == 0xffff) |
| 2300 | break; | 2299 | break; |
| 2300 | if (status & RxEarly) | ||
| 2301 | vortex_rx(dev); | ||
| 2302 | spin_unlock(&vp->window_lock); | ||
| 2301 | vortex_error(dev, status); | 2303 | vortex_error(dev, status); |
| 2304 | spin_lock(&vp->window_lock); | ||
| 2305 | window_set(vp, 7); | ||
| 2302 | } | 2306 | } |
| 2303 | 2307 | ||
| 2304 | if (--work_done < 0) { | 2308 | if (--work_done < 0) { |
| @@ -2984,7 +2988,6 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
| 2984 | { | 2988 | { |
| 2985 | int err; | 2989 | int err; |
| 2986 | struct vortex_private *vp = netdev_priv(dev); | 2990 | struct vortex_private *vp = netdev_priv(dev); |
| 2987 | unsigned long flags; | ||
| 2988 | pci_power_t state = 0; | 2991 | pci_power_t state = 0; |
| 2989 | 2992 | ||
| 2990 | if(VORTEX_PCI(vp)) | 2993 | if(VORTEX_PCI(vp)) |
| @@ -2994,9 +2997,7 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
| 2994 | 2997 | ||
| 2995 | if(state != 0) | 2998 | if(state != 0) |
| 2996 | pci_set_power_state(VORTEX_PCI(vp), PCI_D0); | 2999 | pci_set_power_state(VORTEX_PCI(vp), PCI_D0); |
| 2997 | spin_lock_irqsave(&vp->lock, flags); | ||
| 2998 | err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); | 3000 | err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); |
| 2999 | spin_unlock_irqrestore(&vp->lock, flags); | ||
| 3000 | if(state != 0) | 3001 | if(state != 0) |
| 3001 | pci_set_power_state(VORTEX_PCI(vp), state); | 3002 | pci_set_power_state(VORTEX_PCI(vp), state); |
| 3002 | 3003 | ||
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 37617abc1647..1e620e287ae0 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
| @@ -848,6 +848,15 @@ static int b44_poll(struct napi_struct *napi, int budget) | |||
| 848 | b44_tx(bp); | 848 | b44_tx(bp); |
| 849 | /* spin_unlock(&bp->tx_lock); */ | 849 | /* spin_unlock(&bp->tx_lock); */ |
| 850 | } | 850 | } |
| 851 | if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */ | ||
| 852 | bp->istat &= ~ISTAT_RFO; | ||
| 853 | b44_disable_ints(bp); | ||
| 854 | ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */ | ||
| 855 | b44_init_rings(bp); | ||
| 856 | b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); | ||
| 857 | netif_wake_queue(bp->dev); | ||
| 858 | } | ||
| 859 | |||
| 851 | spin_unlock_irqrestore(&bp->lock, flags); | 860 | spin_unlock_irqrestore(&bp->lock, flags); |
| 852 | 861 | ||
| 853 | work_done = 0; | 862 | work_done = 0; |
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index 99197bd54da5..53306bf3f401 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h | |||
| @@ -181,6 +181,7 @@ struct be_drvr_stats { | |||
| 181 | u64 be_rx_bytes_prev; | 181 | u64 be_rx_bytes_prev; |
| 182 | u64 be_rx_pkts; | 182 | u64 be_rx_pkts; |
| 183 | u32 be_rx_rate; | 183 | u32 be_rx_rate; |
| 184 | u32 be_rx_mcast_pkt; | ||
| 184 | /* number of non ether type II frames dropped where | 185 | /* number of non ether type II frames dropped where |
| 185 | * frame len > length field of Mac Hdr */ | 186 | * frame len > length field of Mac Hdr */ |
| 186 | u32 be_802_3_dropped_frames; | 187 | u32 be_802_3_dropped_frames; |
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 3d305494a606..34abcc9403d6 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c | |||
| @@ -140,10 +140,8 @@ int be_process_mcc(struct be_adapter *adapter, int *status) | |||
| 140 | while ((compl = be_mcc_compl_get(adapter))) { | 140 | while ((compl = be_mcc_compl_get(adapter))) { |
| 141 | if (compl->flags & CQE_FLAGS_ASYNC_MASK) { | 141 | if (compl->flags & CQE_FLAGS_ASYNC_MASK) { |
| 142 | /* Interpret flags as an async trailer */ | 142 | /* Interpret flags as an async trailer */ |
| 143 | BUG_ON(!is_link_state_evt(compl->flags)); | 143 | if (is_link_state_evt(compl->flags)) |
| 144 | 144 | be_async_link_state_process(adapter, | |
| 145 | /* Interpret compl as a async link evt */ | ||
| 146 | be_async_link_state_process(adapter, | ||
| 147 | (struct be_async_event_link_state *) compl); | 145 | (struct be_async_event_link_state *) compl); |
| 148 | } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { | 146 | } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { |
| 149 | *status = be_mcc_compl_process(adapter, compl); | 147 | *status = be_mcc_compl_process(adapter, compl); |
| @@ -207,7 +205,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) | |||
| 207 | 205 | ||
| 208 | if (msecs > 4000) { | 206 | if (msecs > 4000) { |
| 209 | dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); | 207 | dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); |
| 210 | be_dump_ue(adapter); | 208 | be_detect_dump_ue(adapter); |
| 211 | return -1; | 209 | return -1; |
| 212 | } | 210 | } |
| 213 | 211 | ||
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h index bdc10a28cfda..ad1e6fac60c5 100644 --- a/drivers/net/benet/be_cmds.h +++ b/drivers/net/benet/be_cmds.h | |||
| @@ -992,5 +992,5 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, | |||
| 992 | extern int be_cmd_get_phy_info(struct be_adapter *adapter, | 992 | extern int be_cmd_get_phy_info(struct be_adapter *adapter, |
| 993 | struct be_dma_mem *cmd); | 993 | struct be_dma_mem *cmd); |
| 994 | extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); | 994 | extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); |
| 995 | extern void be_dump_ue(struct be_adapter *adapter); | 995 | extern void be_detect_dump_ue(struct be_adapter *adapter); |
| 996 | 996 | ||
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index cd16243c7c36..13f0abbc5205 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c | |||
| @@ -60,6 +60,7 @@ static const struct be_ethtool_stat et_stats[] = { | |||
| 60 | {DRVSTAT_INFO(be_rx_events)}, | 60 | {DRVSTAT_INFO(be_rx_events)}, |
| 61 | {DRVSTAT_INFO(be_tx_compl)}, | 61 | {DRVSTAT_INFO(be_tx_compl)}, |
| 62 | {DRVSTAT_INFO(be_rx_compl)}, | 62 | {DRVSTAT_INFO(be_rx_compl)}, |
| 63 | {DRVSTAT_INFO(be_rx_mcast_pkt)}, | ||
| 63 | {DRVSTAT_INFO(be_ethrx_post_fail)}, | 64 | {DRVSTAT_INFO(be_ethrx_post_fail)}, |
| 64 | {DRVSTAT_INFO(be_802_3_dropped_frames)}, | 65 | {DRVSTAT_INFO(be_802_3_dropped_frames)}, |
| 65 | {DRVSTAT_INFO(be_802_3_malformed_frames)}, | 66 | {DRVSTAT_INFO(be_802_3_malformed_frames)}, |
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h index 5d38046402b2..a2ec5df0d733 100644 --- a/drivers/net/benet/be_hw.h +++ b/drivers/net/benet/be_hw.h | |||
| @@ -167,8 +167,11 @@ | |||
| 167 | #define FLASH_FCoE_BIOS_START_g3 (13631488) | 167 | #define FLASH_FCoE_BIOS_START_g3 (13631488) |
| 168 | #define FLASH_REDBOOT_START_g3 (262144) | 168 | #define FLASH_REDBOOT_START_g3 (262144) |
| 169 | 169 | ||
| 170 | 170 | /************* Rx Packet Type Encoding **************/ | |
| 171 | 171 | #define BE_UNICAST_PACKET 0 | |
| 172 | #define BE_MULTICAST_PACKET 1 | ||
| 173 | #define BE_BROADCAST_PACKET 2 | ||
| 174 | #define BE_RSVD_PACKET 3 | ||
| 172 | 175 | ||
| 173 | /* | 176 | /* |
| 174 | * BE descriptors: host memory data structures whose formats | 177 | * BE descriptors: host memory data structures whose formats |
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 74e146f470c6..6eda7a022256 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
| @@ -247,6 +247,7 @@ void netdev_stats_update(struct be_adapter *adapter) | |||
| 247 | dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts; | 247 | dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts; |
| 248 | dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes; | 248 | dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes; |
| 249 | dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes; | 249 | dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes; |
| 250 | dev_stats->multicast = drvr_stats(adapter)->be_rx_mcast_pkt; | ||
| 250 | 251 | ||
| 251 | /* bad pkts received */ | 252 | /* bad pkts received */ |
| 252 | dev_stats->rx_errors = port_stats->rx_crc_errors + | 253 | dev_stats->rx_errors = port_stats->rx_crc_errors + |
| @@ -294,7 +295,6 @@ void netdev_stats_update(struct be_adapter *adapter) | |||
| 294 | /* no space available in linux */ | 295 | /* no space available in linux */ |
| 295 | dev_stats->tx_dropped = 0; | 296 | dev_stats->tx_dropped = 0; |
| 296 | 297 | ||
| 297 | dev_stats->multicast = port_stats->rx_multicast_frames; | ||
| 298 | dev_stats->collisions = 0; | 298 | dev_stats->collisions = 0; |
| 299 | 299 | ||
| 300 | /* detailed tx_errors */ | 300 | /* detailed tx_errors */ |
| @@ -848,7 +848,7 @@ static void be_rx_rate_update(struct be_adapter *adapter) | |||
| 848 | } | 848 | } |
| 849 | 849 | ||
| 850 | static void be_rx_stats_update(struct be_adapter *adapter, | 850 | static void be_rx_stats_update(struct be_adapter *adapter, |
| 851 | u32 pktsize, u16 numfrags) | 851 | u32 pktsize, u16 numfrags, u8 pkt_type) |
| 852 | { | 852 | { |
| 853 | struct be_drvr_stats *stats = drvr_stats(adapter); | 853 | struct be_drvr_stats *stats = drvr_stats(adapter); |
| 854 | 854 | ||
| @@ -856,6 +856,9 @@ static void be_rx_stats_update(struct be_adapter *adapter, | |||
| 856 | stats->be_rx_frags += numfrags; | 856 | stats->be_rx_frags += numfrags; |
| 857 | stats->be_rx_bytes += pktsize; | 857 | stats->be_rx_bytes += pktsize; |
| 858 | stats->be_rx_pkts++; | 858 | stats->be_rx_pkts++; |
| 859 | |||
| 860 | if (pkt_type == BE_MULTICAST_PACKET) | ||
| 861 | stats->be_rx_mcast_pkt++; | ||
| 859 | } | 862 | } |
| 860 | 863 | ||
| 861 | static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso) | 864 | static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso) |
| @@ -925,9 +928,11 @@ static void skb_fill_rx_data(struct be_adapter *adapter, | |||
| 925 | u16 rxq_idx, i, j; | 928 | u16 rxq_idx, i, j; |
| 926 | u32 pktsize, hdr_len, curr_frag_len, size; | 929 | u32 pktsize, hdr_len, curr_frag_len, size; |
| 927 | u8 *start; | 930 | u8 *start; |
| 931 | u8 pkt_type; | ||
| 928 | 932 | ||
| 929 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | 933 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); |
| 930 | pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); | 934 | pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); |
| 935 | pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp); | ||
| 931 | 936 | ||
| 932 | page_info = get_rx_page_info(adapter, rxq_idx); | 937 | page_info = get_rx_page_info(adapter, rxq_idx); |
| 933 | 938 | ||
| @@ -993,7 +998,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, | |||
| 993 | BUG_ON(j > MAX_SKB_FRAGS); | 998 | BUG_ON(j > MAX_SKB_FRAGS); |
| 994 | 999 | ||
| 995 | done: | 1000 | done: |
| 996 | be_rx_stats_update(adapter, pktsize, num_rcvd); | 1001 | be_rx_stats_update(adapter, pktsize, num_rcvd, pkt_type); |
| 997 | } | 1002 | } |
| 998 | 1003 | ||
| 999 | /* Process the RX completion indicated by rxcp when GRO is disabled */ | 1004 | /* Process the RX completion indicated by rxcp when GRO is disabled */ |
| @@ -1060,6 +1065,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, | |||
| 1060 | u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; | 1065 | u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; |
| 1061 | u16 i, rxq_idx = 0, vid, j; | 1066 | u16 i, rxq_idx = 0, vid, j; |
| 1062 | u8 vtm; | 1067 | u8 vtm; |
| 1068 | u8 pkt_type; | ||
| 1063 | 1069 | ||
| 1064 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); | 1070 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); |
| 1065 | /* Is it a flush compl that has no data */ | 1071 | /* Is it a flush compl that has no data */ |
| @@ -1070,6 +1076,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, | |||
| 1070 | vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); | 1076 | vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); |
| 1071 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | 1077 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); |
| 1072 | vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); | 1078 | vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); |
| 1079 | pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp); | ||
| 1073 | 1080 | ||
| 1074 | /* vlanf could be wrongly set in some cards. | 1081 | /* vlanf could be wrongly set in some cards. |
| 1075 | * ignore if vtm is not set */ | 1082 | * ignore if vtm is not set */ |
| @@ -1125,7 +1132,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, | |||
| 1125 | vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); | 1132 | vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); |
| 1126 | } | 1133 | } |
| 1127 | 1134 | ||
| 1128 | be_rx_stats_update(adapter, pkt_size, num_rcvd); | 1135 | be_rx_stats_update(adapter, pkt_size, num_rcvd, pkt_type); |
| 1129 | } | 1136 | } |
| 1130 | 1137 | ||
| 1131 | static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter) | 1138 | static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter) |
| @@ -1743,26 +1750,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget) | |||
| 1743 | return 1; | 1750 | return 1; |
| 1744 | } | 1751 | } |
| 1745 | 1752 | ||
| 1746 | static inline bool be_detect_ue(struct be_adapter *adapter) | 1753 | void be_detect_dump_ue(struct be_adapter *adapter) |
| 1747 | { | ||
| 1748 | u32 online0 = 0, online1 = 0; | ||
| 1749 | |||
| 1750 | pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0); | ||
| 1751 | |||
| 1752 | pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1); | ||
| 1753 | |||
| 1754 | if (!online0 || !online1) { | ||
| 1755 | adapter->ue_detected = true; | ||
| 1756 | dev_err(&adapter->pdev->dev, | ||
| 1757 | "UE Detected!! online0=%d online1=%d\n", | ||
| 1758 | online0, online1); | ||
| 1759 | return true; | ||
| 1760 | } | ||
| 1761 | |||
| 1762 | return false; | ||
| 1763 | } | ||
| 1764 | |||
| 1765 | void be_dump_ue(struct be_adapter *adapter) | ||
| 1766 | { | 1754 | { |
| 1767 | u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask; | 1755 | u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask; |
| 1768 | u32 i; | 1756 | u32 i; |
| @@ -1779,6 +1767,11 @@ void be_dump_ue(struct be_adapter *adapter) | |||
| 1779 | ue_status_lo = (ue_status_lo & (~ue_status_lo_mask)); | 1767 | ue_status_lo = (ue_status_lo & (~ue_status_lo_mask)); |
| 1780 | ue_status_hi = (ue_status_hi & (~ue_status_hi_mask)); | 1768 | ue_status_hi = (ue_status_hi & (~ue_status_hi_mask)); |
| 1781 | 1769 | ||
| 1770 | if (ue_status_lo || ue_status_hi) { | ||
| 1771 | adapter->ue_detected = true; | ||
| 1772 | dev_err(&adapter->pdev->dev, "UE Detected!!\n"); | ||
| 1773 | } | ||
| 1774 | |||
| 1782 | if (ue_status_lo) { | 1775 | if (ue_status_lo) { |
| 1783 | for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) { | 1776 | for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) { |
| 1784 | if (ue_status_lo & 1) | 1777 | if (ue_status_lo & 1) |
| @@ -1814,10 +1807,8 @@ static void be_worker(struct work_struct *work) | |||
| 1814 | adapter->rx_post_starved = false; | 1807 | adapter->rx_post_starved = false; |
| 1815 | be_post_rx_frags(adapter); | 1808 | be_post_rx_frags(adapter); |
| 1816 | } | 1809 | } |
| 1817 | if (!adapter->ue_detected) { | 1810 | if (!adapter->ue_detected) |
| 1818 | if (be_detect_ue(adapter)) | 1811 | be_detect_dump_ue(adapter); |
| 1819 | be_dump_ue(adapter); | ||
| 1820 | } | ||
| 1821 | 1812 | ||
| 1822 | schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); | 1813 | schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); |
| 1823 | } | 1814 | } |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2cc4cfc31892..3b16f62d5606 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
| @@ -2797,9 +2797,15 @@ void bond_loadbalance_arp_mon(struct work_struct *work) | |||
| 2797 | * so it can wait | 2797 | * so it can wait |
| 2798 | */ | 2798 | */ |
| 2799 | bond_for_each_slave(bond, slave, i) { | 2799 | bond_for_each_slave(bond, slave, i) { |
| 2800 | unsigned long trans_start = dev_trans_start(slave->dev); | ||
| 2801 | |||
| 2800 | if (slave->link != BOND_LINK_UP) { | 2802 | if (slave->link != BOND_LINK_UP) { |
| 2801 | if (time_before_eq(jiffies, dev_trans_start(slave->dev) + delta_in_ticks) && | 2803 | if (time_in_range(jiffies, |
| 2802 | time_before_eq(jiffies, slave->dev->last_rx + delta_in_ticks)) { | 2804 | trans_start - delta_in_ticks, |
| 2805 | trans_start + delta_in_ticks) && | ||
| 2806 | time_in_range(jiffies, | ||
| 2807 | slave->dev->last_rx - delta_in_ticks, | ||
| 2808 | slave->dev->last_rx + delta_in_ticks)) { | ||
| 2803 | 2809 | ||
| 2804 | slave->link = BOND_LINK_UP; | 2810 | slave->link = BOND_LINK_UP; |
| 2805 | slave->state = BOND_STATE_ACTIVE; | 2811 | slave->state = BOND_STATE_ACTIVE; |
| @@ -2827,8 +2833,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work) | |||
| 2827 | * when the source ip is 0, so don't take the link down | 2833 | * when the source ip is 0, so don't take the link down |
| 2828 | * if we don't know our ip yet | 2834 | * if we don't know our ip yet |
| 2829 | */ | 2835 | */ |
| 2830 | if (time_after_eq(jiffies, dev_trans_start(slave->dev) + 2*delta_in_ticks) || | 2836 | if (!time_in_range(jiffies, |
| 2831 | (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks))) { | 2837 | trans_start - delta_in_ticks, |
| 2838 | trans_start + 2 * delta_in_ticks) || | ||
| 2839 | !time_in_range(jiffies, | ||
| 2840 | slave->dev->last_rx - delta_in_ticks, | ||
| 2841 | slave->dev->last_rx + 2 * delta_in_ticks)) { | ||
| 2832 | 2842 | ||
| 2833 | slave->link = BOND_LINK_DOWN; | 2843 | slave->link = BOND_LINK_DOWN; |
| 2834 | slave->state = BOND_STATE_BACKUP; | 2844 | slave->state = BOND_STATE_BACKUP; |
| @@ -2883,13 +2893,16 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) | |||
| 2883 | { | 2893 | { |
| 2884 | struct slave *slave; | 2894 | struct slave *slave; |
| 2885 | int i, commit = 0; | 2895 | int i, commit = 0; |
| 2896 | unsigned long trans_start; | ||
| 2886 | 2897 | ||
| 2887 | bond_for_each_slave(bond, slave, i) { | 2898 | bond_for_each_slave(bond, slave, i) { |
| 2888 | slave->new_link = BOND_LINK_NOCHANGE; | 2899 | slave->new_link = BOND_LINK_NOCHANGE; |
| 2889 | 2900 | ||
| 2890 | if (slave->link != BOND_LINK_UP) { | 2901 | if (slave->link != BOND_LINK_UP) { |
| 2891 | if (time_before_eq(jiffies, slave_last_rx(bond, slave) + | 2902 | if (time_in_range(jiffies, |
| 2892 | delta_in_ticks)) { | 2903 | slave_last_rx(bond, slave) - delta_in_ticks, |
| 2904 | slave_last_rx(bond, slave) + delta_in_ticks)) { | ||
| 2905 | |||
| 2893 | slave->new_link = BOND_LINK_UP; | 2906 | slave->new_link = BOND_LINK_UP; |
| 2894 | commit++; | 2907 | commit++; |
| 2895 | } | 2908 | } |
| @@ -2902,8 +2915,9 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) | |||
| 2902 | * active. This avoids bouncing, as the last receive | 2915 | * active. This avoids bouncing, as the last receive |
| 2903 | * times need a full ARP monitor cycle to be updated. | 2916 | * times need a full ARP monitor cycle to be updated. |
| 2904 | */ | 2917 | */ |
| 2905 | if (!time_after_eq(jiffies, slave->jiffies + | 2918 | if (time_in_range(jiffies, |
| 2906 | 2 * delta_in_ticks)) | 2919 | slave->jiffies - delta_in_ticks, |
| 2920 | slave->jiffies + 2 * delta_in_ticks)) | ||
| 2907 | continue; | 2921 | continue; |
| 2908 | 2922 | ||
| 2909 | /* | 2923 | /* |
| @@ -2921,8 +2935,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) | |||
| 2921 | */ | 2935 | */ |
| 2922 | if (slave->state == BOND_STATE_BACKUP && | 2936 | if (slave->state == BOND_STATE_BACKUP && |
| 2923 | !bond->current_arp_slave && | 2937 | !bond->current_arp_slave && |
| 2924 | time_after(jiffies, slave_last_rx(bond, slave) + | 2938 | !time_in_range(jiffies, |
| 2925 | 3 * delta_in_ticks)) { | 2939 | slave_last_rx(bond, slave) - delta_in_ticks, |
| 2940 | slave_last_rx(bond, slave) + 3 * delta_in_ticks)) { | ||
| 2941 | |||
| 2926 | slave->new_link = BOND_LINK_DOWN; | 2942 | slave->new_link = BOND_LINK_DOWN; |
| 2927 | commit++; | 2943 | commit++; |
| 2928 | } | 2944 | } |
| @@ -2933,11 +2949,15 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) | |||
| 2933 | * - (more than 2*delta since receive AND | 2949 | * - (more than 2*delta since receive AND |
| 2934 | * the bond has an IP address) | 2950 | * the bond has an IP address) |
| 2935 | */ | 2951 | */ |
| 2952 | trans_start = dev_trans_start(slave->dev); | ||
| 2936 | if ((slave->state == BOND_STATE_ACTIVE) && | 2953 | if ((slave->state == BOND_STATE_ACTIVE) && |
| 2937 | (time_after_eq(jiffies, dev_trans_start(slave->dev) + | 2954 | (!time_in_range(jiffies, |
| 2938 | 2 * delta_in_ticks) || | 2955 | trans_start - delta_in_ticks, |
| 2939 | (time_after_eq(jiffies, slave_last_rx(bond, slave) | 2956 | trans_start + 2 * delta_in_ticks) || |
| 2940 | + 2 * delta_in_ticks)))) { | 2957 | !time_in_range(jiffies, |
| 2958 | slave_last_rx(bond, slave) - delta_in_ticks, | ||
| 2959 | slave_last_rx(bond, slave) + 2 * delta_in_ticks))) { | ||
| 2960 | |||
| 2941 | slave->new_link = BOND_LINK_DOWN; | 2961 | slave->new_link = BOND_LINK_DOWN; |
| 2942 | commit++; | 2962 | commit++; |
| 2943 | } | 2963 | } |
| @@ -2956,6 +2976,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks) | |||
| 2956 | { | 2976 | { |
| 2957 | struct slave *slave; | 2977 | struct slave *slave; |
| 2958 | int i; | 2978 | int i; |
| 2979 | unsigned long trans_start; | ||
| 2959 | 2980 | ||
| 2960 | bond_for_each_slave(bond, slave, i) { | 2981 | bond_for_each_slave(bond, slave, i) { |
| 2961 | switch (slave->new_link) { | 2982 | switch (slave->new_link) { |
| @@ -2963,10 +2984,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks) | |||
| 2963 | continue; | 2984 | continue; |
| 2964 | 2985 | ||
| 2965 | case BOND_LINK_UP: | 2986 | case BOND_LINK_UP: |
| 2987 | trans_start = dev_trans_start(slave->dev); | ||
| 2966 | if ((!bond->curr_active_slave && | 2988 | if ((!bond->curr_active_slave && |
| 2967 | time_before_eq(jiffies, | 2989 | time_in_range(jiffies, |
| 2968 | dev_trans_start(slave->dev) + | 2990 | trans_start - delta_in_ticks, |
| 2969 | delta_in_ticks)) || | 2991 | trans_start + delta_in_ticks)) || |
| 2970 | bond->curr_active_slave != slave) { | 2992 | bond->curr_active_slave != slave) { |
| 2971 | slave->link = BOND_LINK_UP; | 2993 | slave->link = BOND_LINK_UP; |
| 2972 | bond->current_arp_slave = NULL; | 2994 | bond->current_arp_slave = NULL; |
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c index b4fb07a6f13f..51919fcd50c2 100644 --- a/drivers/net/ks8851.c +++ b/drivers/net/ks8851.c | |||
| @@ -503,30 +503,33 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) | |||
| 503 | ks8851_wrreg16(ks, KS_RXQCR, | 503 | ks8851_wrreg16(ks, KS_RXQCR, |
| 504 | ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE); | 504 | ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE); |
| 505 | 505 | ||
| 506 | if (rxlen > 0) { | 506 | if (rxlen > 4) { |
| 507 | skb = netdev_alloc_skb(ks->netdev, rxlen + 2 + 8); | 507 | unsigned int rxalign; |
| 508 | if (!skb) { | 508 | |
| 509 | /* todo - dump frame and move on */ | 509 | rxlen -= 4; |
| 510 | } | 510 | rxalign = ALIGN(rxlen, 4); |
| 511 | skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign); | ||
| 512 | if (skb) { | ||
| 511 | 513 | ||
| 512 | /* two bytes to ensure ip is aligned, and four bytes | 514 | /* 4 bytes of status header + 4 bytes of |
| 513 | * for the status header and 4 bytes of garbage */ | 515 | * garbage: we put them before ethernet |
| 514 | skb_reserve(skb, 2 + 4 + 4); | 516 | * header, so that they are copied, |
| 517 | * but ignored. | ||
| 518 | */ | ||
| 515 | 519 | ||
| 516 | rxpkt = skb_put(skb, rxlen - 4) - 8; | 520 | rxpkt = skb_put(skb, rxlen) - 8; |
| 517 | 521 | ||
| 518 | /* align the packet length to 4 bytes, and add 4 bytes | 522 | ks8851_rdfifo(ks, rxpkt, rxalign + 8); |
| 519 | * as we're getting the rx status header as well */ | ||
| 520 | ks8851_rdfifo(ks, rxpkt, ALIGN(rxlen, 4) + 8); | ||
| 521 | 523 | ||
| 522 | if (netif_msg_pktdata(ks)) | 524 | if (netif_msg_pktdata(ks)) |
| 523 | ks8851_dbg_dumpkkt(ks, rxpkt); | 525 | ks8851_dbg_dumpkkt(ks, rxpkt); |
| 524 | 526 | ||
| 525 | skb->protocol = eth_type_trans(skb, ks->netdev); | 527 | skb->protocol = eth_type_trans(skb, ks->netdev); |
| 526 | netif_rx(skb); | 528 | netif_rx(skb); |
| 527 | 529 | ||
| 528 | ks->netdev->stats.rx_packets++; | 530 | ks->netdev->stats.rx_packets++; |
| 529 | ks->netdev->stats.rx_bytes += rxlen - 4; | 531 | ks->netdev->stats.rx_bytes += rxlen; |
| 532 | } | ||
| 530 | } | 533 | } |
| 531 | 534 | ||
| 532 | ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); | 535 | ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); |
diff --git a/drivers/net/niu.c b/drivers/net/niu.c index bc695d53cdcc..fe6983af6918 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c | |||
| @@ -7269,32 +7269,28 @@ static int niu_get_ethtool_tcam_all(struct niu *np, | |||
| 7269 | struct niu_parent *parent = np->parent; | 7269 | struct niu_parent *parent = np->parent; |
| 7270 | struct niu_tcam_entry *tp; | 7270 | struct niu_tcam_entry *tp; |
| 7271 | int i, idx, cnt; | 7271 | int i, idx, cnt; |
| 7272 | u16 n_entries; | ||
| 7273 | unsigned long flags; | 7272 | unsigned long flags; |
| 7274 | 7273 | int ret = 0; | |
| 7275 | 7274 | ||
| 7276 | /* put the tcam size here */ | 7275 | /* put the tcam size here */ |
| 7277 | nfc->data = tcam_get_size(np); | 7276 | nfc->data = tcam_get_size(np); |
| 7278 | 7277 | ||
| 7279 | niu_lock_parent(np, flags); | 7278 | niu_lock_parent(np, flags); |
| 7280 | n_entries = nfc->rule_cnt; | ||
| 7281 | for (cnt = 0, i = 0; i < nfc->data; i++) { | 7279 | for (cnt = 0, i = 0; i < nfc->data; i++) { |
| 7282 | idx = tcam_get_index(np, i); | 7280 | idx = tcam_get_index(np, i); |
| 7283 | tp = &parent->tcam[idx]; | 7281 | tp = &parent->tcam[idx]; |
| 7284 | if (!tp->valid) | 7282 | if (!tp->valid) |
| 7285 | continue; | 7283 | continue; |
| 7284 | if (cnt == nfc->rule_cnt) { | ||
| 7285 | ret = -EMSGSIZE; | ||
| 7286 | break; | ||
| 7287 | } | ||
| 7286 | rule_locs[cnt] = i; | 7288 | rule_locs[cnt] = i; |
| 7287 | cnt++; | 7289 | cnt++; |
| 7288 | } | 7290 | } |
| 7289 | niu_unlock_parent(np, flags); | 7291 | niu_unlock_parent(np, flags); |
| 7290 | 7292 | ||
| 7291 | if (n_entries != cnt) { | 7293 | return ret; |
| 7292 | /* print warning, this should not happen */ | ||
| 7293 | netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n", | ||
| 7294 | np->parent->index, __func__, n_entries, cnt); | ||
| 7295 | } | ||
| 7296 | |||
| 7297 | return 0; | ||
| 7298 | } | 7294 | } |
| 7299 | 7295 | ||
| 7300 | static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, | 7296 | static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, |
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index c3edfe4c2651..49279b0ee526 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c | |||
| @@ -1637,6 +1637,7 @@ static struct pcmcia_device_id pcnet_ids[] = { | |||
| 1637 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b), | 1637 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b), |
| 1638 | PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0), | 1638 | PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0), |
| 1639 | PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956), | 1639 | PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956), |
| 1640 | PCMCIA_DEVICE_PROD_ID12("KENTRONICS", "KEP-230", 0xaf8144c9, 0x868f6616), | ||
| 1640 | PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64), | 1641 | PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64), |
| 1641 | PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5), | 1642 | PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5), |
| 1642 | PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3), | 1643 | PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3), |
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c index 410ea0a61371..85eddda276bd 100644 --- a/drivers/net/pxa168_eth.c +++ b/drivers/net/pxa168_eth.c | |||
| @@ -1606,6 +1606,8 @@ static int pxa168_eth_remove(struct platform_device *pdev) | |||
| 1606 | 1606 | ||
| 1607 | iounmap(pep->base); | 1607 | iounmap(pep->base); |
| 1608 | pep->base = NULL; | 1608 | pep->base = NULL; |
| 1609 | mdiobus_unregister(pep->smi_bus); | ||
| 1610 | mdiobus_free(pep->smi_bus); | ||
| 1609 | unregister_netdev(dev); | 1611 | unregister_netdev(dev); |
| 1610 | flush_scheduled_work(); | 1612 | flush_scheduled_work(); |
| 1611 | free_netdev(dev); | 1613 | free_netdev(dev); |
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index bbb7951b9c4c..ea0461eb2dbe 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c | |||
| @@ -1865,15 +1865,15 @@ static int stmmac_resume(struct platform_device *pdev) | |||
| 1865 | if (!netif_running(dev)) | 1865 | if (!netif_running(dev)) |
| 1866 | return 0; | 1866 | return 0; |
| 1867 | 1867 | ||
| 1868 | spin_lock(&priv->lock); | ||
| 1869 | |||
| 1870 | if (priv->shutdown) { | 1868 | if (priv->shutdown) { |
| 1871 | /* Re-open the interface and re-init the MAC/DMA | 1869 | /* Re-open the interface and re-init the MAC/DMA |
| 1872 | and the rings. */ | 1870 | and the rings (i.e. on hibernation stage) */ |
| 1873 | stmmac_open(dev); | 1871 | stmmac_open(dev); |
| 1874 | goto out_resume; | 1872 | return 0; |
| 1875 | } | 1873 | } |
| 1876 | 1874 | ||
| 1875 | spin_lock(&priv->lock); | ||
| 1876 | |||
| 1877 | /* Power Down bit, into the PM register, is cleared | 1877 | /* Power Down bit, into the PM register, is cleared |
| 1878 | * automatically as soon as a magic packet or a Wake-up frame | 1878 | * automatically as soon as a magic packet or a Wake-up frame |
| 1879 | * is received. Anyway, it's better to manually clear | 1879 | * is received. Anyway, it's better to manually clear |
| @@ -1901,7 +1901,6 @@ static int stmmac_resume(struct platform_device *pdev) | |||
| 1901 | 1901 | ||
| 1902 | netif_start_queue(dev); | 1902 | netif_start_queue(dev); |
| 1903 | 1903 | ||
| 1904 | out_resume: | ||
| 1905 | spin_unlock(&priv->lock); | 1904 | spin_unlock(&priv->lock); |
| 1906 | return 0; | 1905 | return 0; |
| 1907 | } | 1906 | } |
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 8ed30fa35d0a..b2bcf99e6f08 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c | |||
| @@ -429,10 +429,6 @@ static const struct net_device_ops ipheth_netdev_ops = { | |||
| 429 | .ndo_get_stats = &ipheth_stats, | 429 | .ndo_get_stats = &ipheth_stats, |
| 430 | }; | 430 | }; |
| 431 | 431 | ||
| 432 | static struct device_type ipheth_type = { | ||
| 433 | .name = "wwan", | ||
| 434 | }; | ||
| 435 | |||
| 436 | static int ipheth_probe(struct usb_interface *intf, | 432 | static int ipheth_probe(struct usb_interface *intf, |
| 437 | const struct usb_device_id *id) | 433 | const struct usb_device_id *id) |
| 438 | { | 434 | { |
| @@ -450,7 +446,7 @@ static int ipheth_probe(struct usb_interface *intf, | |||
| 450 | 446 | ||
| 451 | netdev->netdev_ops = &ipheth_netdev_ops; | 447 | netdev->netdev_ops = &ipheth_netdev_ops; |
| 452 | netdev->watchdog_timeo = IPHETH_TX_TIMEOUT; | 448 | netdev->watchdog_timeo = IPHETH_TX_TIMEOUT; |
| 453 | strcpy(netdev->name, "wwan%d"); | 449 | strcpy(netdev->name, "eth%d"); |
| 454 | 450 | ||
| 455 | dev = netdev_priv(netdev); | 451 | dev = netdev_priv(netdev); |
| 456 | dev->udev = udev; | 452 | dev->udev = udev; |
| @@ -500,7 +496,6 @@ static int ipheth_probe(struct usb_interface *intf, | |||
| 500 | 496 | ||
| 501 | SET_NETDEV_DEV(netdev, &intf->dev); | 497 | SET_NETDEV_DEV(netdev, &intf->dev); |
| 502 | SET_ETHTOOL_OPS(netdev, &ops); | 498 | SET_ETHTOOL_OPS(netdev, &ops); |
| 503 | SET_NETDEV_DEVTYPE(netdev, &ipheth_type); | ||
| 504 | 499 | ||
| 505 | retval = register_netdev(netdev); | 500 | retval = register_netdev(netdev); |
| 506 | if (retval) { | 501 | if (retval) { |
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index fd69095ef6e3..f53412368ce1 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
| @@ -2824,7 +2824,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi | |||
| 2824 | netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); | 2824 | netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); |
| 2825 | 2825 | ||
| 2826 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | | 2826 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | |
| 2827 | NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG; | 2827 | NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM; |
| 2828 | 2828 | ||
| 2829 | ret = register_netdev(dev); | 2829 | ret = register_netdev(dev); |
| 2830 | if (ret < 0) | 2830 | if (ret < 0) |
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 373dcfec689c..d77ce9906b6c 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c | |||
| @@ -1327,6 +1327,10 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, | |||
| 1327 | PCI_DMA_TODEVICE); | 1327 | PCI_DMA_TODEVICE); |
| 1328 | 1328 | ||
| 1329 | rate = ieee80211_get_tx_rate(sc->hw, info); | 1329 | rate = ieee80211_get_tx_rate(sc->hw, info); |
| 1330 | if (!rate) { | ||
| 1331 | ret = -EINVAL; | ||
| 1332 | goto err_unmap; | ||
| 1333 | } | ||
| 1330 | 1334 | ||
| 1331 | if (info->flags & IEEE80211_TX_CTL_NO_ACK) | 1335 | if (info->flags & IEEE80211_TX_CTL_NO_ACK) |
| 1332 | flags |= AR5K_TXDESC_NOACK; | 1336 | flags |= AR5K_TXDESC_NOACK; |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index b883b174385b..057fb69ddf7f 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c | |||
| @@ -797,7 +797,7 @@ static bool ar9300_uncompress_block(struct ath_hw *ah, | |||
| 797 | length = block[it+1]; | 797 | length = block[it+1]; |
| 798 | length &= 0xff; | 798 | length &= 0xff; |
| 799 | 799 | ||
| 800 | if (length > 0 && spot >= 0 && spot+length < mdataSize) { | 800 | if (length > 0 && spot >= 0 && spot+length <= mdataSize) { |
| 801 | ath_print(common, ATH_DBG_EEPROM, | 801 | ath_print(common, ATH_DBG_EEPROM, |
| 802 | "Restore at %d: spot=%d " | 802 | "Restore at %d: spot=%d " |
| 803 | "offset=%d length=%d\n", | 803 | "offset=%d length=%d\n", |
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index 7f48df1e2903..0b09db0f8e7d 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h | |||
| @@ -62,7 +62,7 @@ | |||
| 62 | 62 | ||
| 63 | #define SD_NO_CTL 0xE0 | 63 | #define SD_NO_CTL 0xE0 |
| 64 | #define NO_CTL 0xff | 64 | #define NO_CTL 0xff |
| 65 | #define CTL_MODE_M 7 | 65 | #define CTL_MODE_M 0xf |
| 66 | #define CTL_11A 0 | 66 | #define CTL_11A 0 |
| 67 | #define CTL_11B 1 | 67 | #define CTL_11B 1 |
| 68 | #define CTL_11G 2 | 68 | #define CTL_11G 2 |
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index a1c39526161a..345dd9721b41 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h | |||
| @@ -31,7 +31,6 @@ enum ctl_group { | |||
| 31 | #define NO_CTL 0xff | 31 | #define NO_CTL 0xff |
| 32 | #define SD_NO_CTL 0xE0 | 32 | #define SD_NO_CTL 0xE0 |
| 33 | #define NO_CTL 0xff | 33 | #define NO_CTL 0xff |
| 34 | #define CTL_MODE_M 7 | ||
| 35 | #define CTL_11A 0 | 34 | #define CTL_11A 0 |
| 36 | #define CTL_11B 1 | 35 | #define CTL_11B 1 |
| 37 | #define CTL_11G 2 | 36 | #define CTL_11G 2 |
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c index ba854c70ab94..87b634978b35 100644 --- a/drivers/net/wireless/libertas/if_sdio.c +++ b/drivers/net/wireless/libertas/if_sdio.c | |||
| @@ -128,7 +128,7 @@ struct if_sdio_card { | |||
| 128 | bool helper_allocated; | 128 | bool helper_allocated; |
| 129 | bool firmware_allocated; | 129 | bool firmware_allocated; |
| 130 | 130 | ||
| 131 | u8 buffer[65536]; | 131 | u8 buffer[65536] __attribute__((aligned(4))); |
| 132 | 132 | ||
| 133 | spinlock_t lock; | 133 | spinlock_t lock; |
| 134 | struct if_sdio_packet *packets; | 134 | struct if_sdio_packet *packets; |
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c index 173aec3d6e7e..0e937dc0c9c4 100644 --- a/drivers/net/wireless/p54/txrx.c +++ b/drivers/net/wireless/p54/txrx.c | |||
| @@ -446,7 +446,7 @@ static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb) | |||
| 446 | } | 446 | } |
| 447 | 447 | ||
| 448 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && | 448 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && |
| 449 | (!payload->status)) | 449 | !(payload->status & P54_TX_FAILED)) |
| 450 | info->flags |= IEEE80211_TX_STAT_ACK; | 450 | info->flags |= IEEE80211_TX_STAT_ACK; |
| 451 | if (payload->status & P54_TX_PSM_CANCELLED) | 451 | if (payload->status & P54_TX_PSM_CANCELLED) |
| 452 | info->flags |= IEEE80211_TX_STAT_TX_FILTERED; | 452 | info->flags |= IEEE80211_TX_STAT_TX_FILTERED; |
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index a9352b2c7ac4..b7e755f4178a 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c | |||
| @@ -141,16 +141,6 @@ static struct notifier_block module_load_nb = { | |||
| 141 | .notifier_call = module_load_notify, | 141 | .notifier_call = module_load_notify, |
| 142 | }; | 142 | }; |
| 143 | 143 | ||
| 144 | |||
| 145 | static void end_sync(void) | ||
| 146 | { | ||
| 147 | end_cpu_work(); | ||
| 148 | /* make sure we don't leak task structs */ | ||
| 149 | process_task_mortuary(); | ||
| 150 | process_task_mortuary(); | ||
| 151 | } | ||
| 152 | |||
| 153 | |||
| 154 | int sync_start(void) | 144 | int sync_start(void) |
| 155 | { | 145 | { |
| 156 | int err; | 146 | int err; |
| @@ -158,7 +148,7 @@ int sync_start(void) | |||
| 158 | if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) | 148 | if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) |
| 159 | return -ENOMEM; | 149 | return -ENOMEM; |
| 160 | 150 | ||
| 161 | start_cpu_work(); | 151 | mutex_lock(&buffer_mutex); |
| 162 | 152 | ||
| 163 | err = task_handoff_register(&task_free_nb); | 153 | err = task_handoff_register(&task_free_nb); |
| 164 | if (err) | 154 | if (err) |
| @@ -173,7 +163,10 @@ int sync_start(void) | |||
| 173 | if (err) | 163 | if (err) |
| 174 | goto out4; | 164 | goto out4; |
| 175 | 165 | ||
| 166 | start_cpu_work(); | ||
| 167 | |||
| 176 | out: | 168 | out: |
| 169 | mutex_unlock(&buffer_mutex); | ||
| 177 | return err; | 170 | return err; |
| 178 | out4: | 171 | out4: |
| 179 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); | 172 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); |
| @@ -182,7 +175,6 @@ out3: | |||
| 182 | out2: | 175 | out2: |
| 183 | task_handoff_unregister(&task_free_nb); | 176 | task_handoff_unregister(&task_free_nb); |
| 184 | out1: | 177 | out1: |
| 185 | end_sync(); | ||
| 186 | free_cpumask_var(marked_cpus); | 178 | free_cpumask_var(marked_cpus); |
| 187 | goto out; | 179 | goto out; |
| 188 | } | 180 | } |
| @@ -190,11 +182,20 @@ out1: | |||
| 190 | 182 | ||
| 191 | void sync_stop(void) | 183 | void sync_stop(void) |
| 192 | { | 184 | { |
| 185 | /* flush buffers */ | ||
| 186 | mutex_lock(&buffer_mutex); | ||
| 187 | end_cpu_work(); | ||
| 193 | unregister_module_notifier(&module_load_nb); | 188 | unregister_module_notifier(&module_load_nb); |
| 194 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); | 189 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); |
| 195 | profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); | 190 | profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); |
| 196 | task_handoff_unregister(&task_free_nb); | 191 | task_handoff_unregister(&task_free_nb); |
| 197 | end_sync(); | 192 | mutex_unlock(&buffer_mutex); |
| 193 | flush_scheduled_work(); | ||
| 194 | |||
| 195 | /* make sure we don't leak task structs */ | ||
| 196 | process_task_mortuary(); | ||
| 197 | process_task_mortuary(); | ||
| 198 | |||
| 198 | free_cpumask_var(marked_cpus); | 199 | free_cpumask_var(marked_cpus); |
| 199 | } | 200 | } |
| 200 | 201 | ||
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 219f79e2210a..f179ac2ea801 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c | |||
| @@ -120,8 +120,6 @@ void end_cpu_work(void) | |||
| 120 | 120 | ||
| 121 | cancel_delayed_work(&b->work); | 121 | cancel_delayed_work(&b->work); |
| 122 | } | 122 | } |
| 123 | |||
| 124 | flush_scheduled_work(); | ||
| 125 | } | 123 | } |
| 126 | 124 | ||
| 127 | /* | 125 | /* |
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index 45fcc1e96df9..3bc72d18b121 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c | |||
| @@ -338,9 +338,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) | |||
| 338 | acpi_handle chandle, handle; | 338 | acpi_handle chandle, handle; |
| 339 | struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; | 339 | struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; |
| 340 | 340 | ||
| 341 | flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | | 341 | flags &= OSC_SHPC_NATIVE_HP_CONTROL; |
| 342 | OSC_SHPC_NATIVE_HP_CONTROL | | ||
| 343 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | ||
| 344 | if (!flags) { | 342 | if (!flags) { |
| 345 | err("Invalid flags %u specified!\n", flags); | 343 | err("Invalid flags %u specified!\n", flags); |
| 346 | return -EINVAL; | 344 | return -EINVAL; |
| @@ -360,7 +358,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) | |||
| 360 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); | 358 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); |
| 361 | dbg("Trying to get hotplug control for %s\n", | 359 | dbg("Trying to get hotplug control for %s\n", |
| 362 | (char *)string.pointer); | 360 | (char *)string.pointer); |
| 363 | status = acpi_pci_osc_control_set(handle, flags); | 361 | status = acpi_pci_osc_control_set(handle, &flags, flags); |
| 364 | if (ACPI_SUCCESS(status)) | 362 | if (ACPI_SUCCESS(status)) |
| 365 | goto got_one; | 363 | goto got_one; |
| 366 | if (status == AE_SUPPORT) | 364 | if (status == AE_SUPPORT) |
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 4ed76b47b6dc..73d513989263 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h | |||
| @@ -176,19 +176,11 @@ static inline void pciehp_firmware_init(void) | |||
| 176 | { | 176 | { |
| 177 | pciehp_acpi_slot_detection_init(); | 177 | pciehp_acpi_slot_detection_init(); |
| 178 | } | 178 | } |
| 179 | |||
| 180 | static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev) | ||
| 181 | { | ||
| 182 | int retval; | ||
| 183 | u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | | ||
| 184 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | ||
| 185 | retval = acpi_get_hp_hw_control_from_firmware(dev, flags); | ||
| 186 | if (retval) | ||
| 187 | return retval; | ||
| 188 | return pciehp_acpi_slot_detection_check(dev); | ||
| 189 | } | ||
| 190 | #else | 179 | #else |
| 191 | #define pciehp_firmware_init() do {} while (0) | 180 | #define pciehp_firmware_init() do {} while (0) |
| 192 | #define pciehp_get_hp_hw_control_from_firmware(dev) 0 | 181 | static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev) |
| 182 | { | ||
| 183 | return 0; | ||
| 184 | } | ||
| 193 | #endif /* CONFIG_ACPI */ | 185 | #endif /* CONFIG_ACPI */ |
| 194 | #endif /* _PCIEHP_H */ | 186 | #endif /* _PCIEHP_H */ |
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c index 1f4000a5a108..2574700db461 100644 --- a/drivers/pci/hotplug/pciehp_acpi.c +++ b/drivers/pci/hotplug/pciehp_acpi.c | |||
| @@ -85,9 +85,7 @@ static int __init dummy_probe(struct pcie_device *dev) | |||
| 85 | acpi_handle handle; | 85 | acpi_handle handle; |
| 86 | struct dummy_slot *slot, *tmp; | 86 | struct dummy_slot *slot, *tmp; |
| 87 | struct pci_dev *pdev = dev->port; | 87 | struct pci_dev *pdev = dev->port; |
| 88 | /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ | 88 | |
| 89 | if (pciehp_get_hp_hw_control_from_firmware(pdev)) | ||
| 90 | return -ENODEV; | ||
| 91 | pos = pci_pcie_cap(pdev); | 89 | pos = pci_pcie_cap(pdev); |
| 92 | if (!pos) | 90 | if (!pos) |
| 93 | return -ENODEV; | 91 | return -ENODEV; |
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 3588ea61b0dd..aa5f3ff629ff 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c | |||
| @@ -59,7 +59,7 @@ module_param(pciehp_force, bool, 0644); | |||
| 59 | MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not"); | 59 | MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not"); |
| 60 | MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); | 60 | MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); |
| 61 | MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); | 61 | MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); |
| 62 | MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing"); | 62 | MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if OSHP is missing"); |
| 63 | 63 | ||
| 64 | #define PCIE_MODULE_NAME "pciehp" | 64 | #define PCIE_MODULE_NAME "pciehp" |
| 65 | 65 | ||
| @@ -235,7 +235,7 @@ static int pciehp_probe(struct pcie_device *dev) | |||
| 235 | dev_info(&dev->device, | 235 | dev_info(&dev->device, |
| 236 | "Bypassing BIOS check for pciehp use on %s\n", | 236 | "Bypassing BIOS check for pciehp use on %s\n", |
| 237 | pci_name(dev->port)); | 237 | pci_name(dev->port)); |
| 238 | else if (pciehp_get_hp_hw_control_from_firmware(dev->port)) | 238 | else if (pciehp_acpi_slot_detection_check(dev->port)) |
| 239 | goto err_out_none; | 239 | goto err_out_none; |
| 240 | 240 | ||
| 241 | ctrl = pcie_init(dev); | 241 | ctrl = pcie_init(dev); |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 679c39de6a89..7754a678ab15 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
| @@ -140,8 +140,10 @@ static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } | |||
| 140 | 140 | ||
| 141 | #ifdef CONFIG_PCIEAER | 141 | #ifdef CONFIG_PCIEAER |
| 142 | void pci_no_aer(void); | 142 | void pci_no_aer(void); |
| 143 | bool pci_aer_available(void); | ||
| 143 | #else | 144 | #else |
| 144 | static inline void pci_no_aer(void) { } | 145 | static inline void pci_no_aer(void) { } |
| 146 | static inline bool pci_aer_available(void) { return false; } | ||
| 145 | #endif | 147 | #endif |
| 146 | 148 | ||
| 147 | static inline int pci_no_d1d2(struct pci_dev *dev) | 149 | static inline int pci_no_d1d2(struct pci_dev *dev) |
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile index ea654545e7c4..00c62df5a9fc 100644 --- a/drivers/pci/pcie/Makefile +++ b/drivers/pci/pcie/Makefile | |||
| @@ -6,10 +6,11 @@ | |||
| 6 | obj-$(CONFIG_PCIEASPM) += aspm.o | 6 | obj-$(CONFIG_PCIEASPM) += aspm.o |
| 7 | 7 | ||
| 8 | pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o | 8 | pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o |
| 9 | pcieportdrv-$(CONFIG_ACPI) += portdrv_acpi.o | ||
| 9 | 10 | ||
| 10 | obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o | 11 | obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o |
| 11 | 12 | ||
| 12 | # Build PCI Express AER if needed | 13 | # Build PCI Express AER if needed |
| 13 | obj-$(CONFIG_PCIEAER) += aer/ | 14 | obj-$(CONFIG_PCIEAER) += aer/ |
| 14 | 15 | ||
| 15 | obj-$(CONFIG_PCIE_PME) += pme/ | 16 | obj-$(CONFIG_PCIE_PME) += pme.o |
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 484cc55194b8..f409948e1a9b 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c | |||
| @@ -72,6 +72,11 @@ void pci_no_aer(void) | |||
| 72 | pcie_aer_disable = 1; /* has priority over 'forceload' */ | 72 | pcie_aer_disable = 1; /* has priority over 'forceload' */ |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | bool pci_aer_available(void) | ||
| 76 | { | ||
| 77 | return !pcie_aer_disable && pci_msi_enabled(); | ||
| 78 | } | ||
| 79 | |||
| 75 | static int set_device_error_reporting(struct pci_dev *dev, void *data) | 80 | static int set_device_error_reporting(struct pci_dev *dev, void *data) |
| 76 | { | 81 | { |
| 77 | bool enable = *((bool *)data); | 82 | bool enable = *((bool *)data); |
| @@ -411,9 +416,7 @@ static void aer_error_resume(struct pci_dev *dev) | |||
| 411 | */ | 416 | */ |
| 412 | static int __init aer_service_init(void) | 417 | static int __init aer_service_init(void) |
| 413 | { | 418 | { |
| 414 | if (pcie_aer_disable) | 419 | if (!pci_aer_available()) |
| 415 | return -ENXIO; | ||
| 416 | if (!pci_msi_enabled()) | ||
| 417 | return -ENXIO; | 420 | return -ENXIO; |
| 418 | return pcie_port_service_register(&aerdriver); | 421 | return pcie_port_service_register(&aerdriver); |
| 419 | } | 422 | } |
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c index f278d7b0d95d..2bb9b8972211 100644 --- a/drivers/pci/pcie/aer/aerdrv_acpi.c +++ b/drivers/pci/pcie/aer/aerdrv_acpi.c | |||
| @@ -19,42 +19,6 @@ | |||
| 19 | #include <acpi/apei.h> | 19 | #include <acpi/apei.h> |
| 20 | #include "aerdrv.h" | 20 | #include "aerdrv.h" |
| 21 | 21 | ||
| 22 | /** | ||
| 23 | * aer_osc_setup - run ACPI _OSC method | ||
| 24 | * @pciedev: pcie_device which AER is being enabled on | ||
| 25 | * | ||
| 26 | * @return: Zero on success. Nonzero otherwise. | ||
| 27 | * | ||
| 28 | * Invoked when PCIe bus loads AER service driver. To avoid conflict with | ||
| 29 | * BIOS AER support requires BIOS to yield AER control to OS native driver. | ||
| 30 | **/ | ||
| 31 | int aer_osc_setup(struct pcie_device *pciedev) | ||
| 32 | { | ||
| 33 | acpi_status status = AE_NOT_FOUND; | ||
| 34 | struct pci_dev *pdev = pciedev->port; | ||
| 35 | acpi_handle handle = NULL; | ||
| 36 | |||
| 37 | if (acpi_pci_disabled) | ||
| 38 | return -1; | ||
| 39 | |||
| 40 | handle = acpi_find_root_bridge_handle(pdev); | ||
| 41 | if (handle) { | ||
| 42 | status = acpi_pci_osc_control_set(handle, | ||
| 43 | OSC_PCI_EXPRESS_AER_CONTROL | | ||
| 44 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | ||
| 45 | } | ||
| 46 | |||
| 47 | if (ACPI_FAILURE(status)) { | ||
| 48 | dev_printk(KERN_DEBUG, &pciedev->device, "AER service couldn't " | ||
| 49 | "init device: %s\n", | ||
| 50 | (status == AE_SUPPORT || status == AE_NOT_FOUND) ? | ||
| 51 | "no _OSC support" : "_OSC failed"); | ||
| 52 | return -1; | ||
| 53 | } | ||
| 54 | |||
| 55 | return 0; | ||
| 56 | } | ||
| 57 | |||
| 58 | #ifdef CONFIG_ACPI_APEI | 22 | #ifdef CONFIG_ACPI_APEI |
| 59 | static inline int hest_match_pci(struct acpi_hest_aer_common *p, | 23 | static inline int hest_match_pci(struct acpi_hest_aer_common *p, |
| 60 | struct pci_dev *pci) | 24 | struct pci_dev *pci) |
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index fc0b5a93e1de..29e268fadf14 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c | |||
| @@ -772,22 +772,10 @@ void aer_isr(struct work_struct *work) | |||
| 772 | */ | 772 | */ |
| 773 | int aer_init(struct pcie_device *dev) | 773 | int aer_init(struct pcie_device *dev) |
| 774 | { | 774 | { |
| 775 | if (pcie_aer_get_firmware_first(dev->port)) { | ||
| 776 | dev_printk(KERN_DEBUG, &dev->device, | ||
| 777 | "PCIe errors handled by platform firmware.\n"); | ||
| 778 | goto out; | ||
| 779 | } | ||
| 780 | |||
| 781 | if (aer_osc_setup(dev)) | ||
| 782 | goto out; | ||
| 783 | |||
| 784 | return 0; | ||
| 785 | out: | ||
| 786 | if (forceload) { | 775 | if (forceload) { |
| 787 | dev_printk(KERN_DEBUG, &dev->device, | 776 | dev_printk(KERN_DEBUG, &dev->device, |
| 788 | "aerdrv forceload requested.\n"); | 777 | "aerdrv forceload requested.\n"); |
| 789 | pcie_aer_force_firmware_first(dev->port, 0); | 778 | pcie_aer_force_firmware_first(dev->port, 0); |
| 790 | return 0; | ||
| 791 | } | 779 | } |
| 792 | return -ENXIO; | 780 | return 0; |
| 793 | } | 781 | } |
diff --git a/drivers/pci/pcie/pme/pcie_pme.c b/drivers/pci/pcie/pme.c index bbdea18693d9..2f3c90407227 100644 --- a/drivers/pci/pcie/pme/pcie_pme.c +++ b/drivers/pci/pcie/pme.c | |||
| @@ -23,38 +23,13 @@ | |||
| 23 | #include <linux/pci-acpi.h> | 23 | #include <linux/pci-acpi.h> |
| 24 | #include <linux/pm_runtime.h> | 24 | #include <linux/pm_runtime.h> |
| 25 | 25 | ||
| 26 | #include "../../pci.h" | 26 | #include "../pci.h" |
| 27 | #include "pcie_pme.h" | 27 | #include "portdrv.h" |
| 28 | 28 | ||
| 29 | #define PCI_EXP_RTSTA_PME 0x10000 /* PME status */ | 29 | #define PCI_EXP_RTSTA_PME 0x10000 /* PME status */ |
| 30 | #define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */ | 30 | #define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */ |
| 31 | 31 | ||
| 32 | /* | 32 | /* |
| 33 | * If set, this switch will prevent the PCIe root port PME service driver from | ||
| 34 | * being registered. Consequently, the interrupt-based PCIe PME signaling will | ||
| 35 | * not be used by any PCIe root ports in that case. | ||
| 36 | */ | ||
| 37 | static bool pcie_pme_disabled = true; | ||
| 38 | |||
| 39 | /* | ||
| 40 | * The PCI Express Base Specification 2.0, Section 6.1.8, states the following: | ||
| 41 | * "In order to maintain compatibility with non-PCI Express-aware system | ||
| 42 | * software, system power management logic must be configured by firmware to use | ||
| 43 | * the legacy mechanism of signaling PME by default. PCI Express-aware system | ||
| 44 | * software must notify the firmware prior to enabling native, interrupt-based | ||
| 45 | * PME signaling." However, if the platform doesn't provide us with a suitable | ||
| 46 | * notification mechanism or the notification fails, it is not clear whether or | ||
| 47 | * not we are supposed to use the interrupt-based PCIe PME signaling. The | ||
| 48 | * switch below can be used to indicate the desired behaviour. When set, it | ||
| 49 | * will make the kernel use the interrupt-based PCIe PME signaling regardless of | ||
| 50 | * the platform notification status, although the kernel will attempt to notify | ||
| 51 | * the platform anyway. When unset, it will prevent the kernel from using the | ||
| 52 | * the interrupt-based PCIe PME signaling if the platform notification fails, | ||
| 53 | * which is the default. | ||
| 54 | */ | ||
| 55 | static bool pcie_pme_force_enable; | ||
| 56 | |||
| 57 | /* | ||
| 58 | * If this switch is set, MSI will not be used for PCIe PME signaling. This | 33 | * If this switch is set, MSI will not be used for PCIe PME signaling. This |
| 59 | * causes the PCIe port driver to use INTx interrupts only, but it turns out | 34 | * causes the PCIe port driver to use INTx interrupts only, but it turns out |
| 60 | * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based | 35 | * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based |
| @@ -64,38 +39,13 @@ bool pcie_pme_msi_disabled; | |||
| 64 | 39 | ||
| 65 | static int __init pcie_pme_setup(char *str) | 40 | static int __init pcie_pme_setup(char *str) |
| 66 | { | 41 | { |
| 67 | if (!strncmp(str, "auto", 4)) | 42 | if (!strncmp(str, "nomsi", 5)) |
| 68 | pcie_pme_disabled = false; | 43 | pcie_pme_msi_disabled = true; |
| 69 | else if (!strncmp(str, "force", 5)) | ||
| 70 | pcie_pme_force_enable = true; | ||
| 71 | |||
| 72 | str = strchr(str, ','); | ||
| 73 | if (str) { | ||
| 74 | str++; | ||
| 75 | str += strspn(str, " \t"); | ||
| 76 | if (*str && !strcmp(str, "nomsi")) | ||
| 77 | pcie_pme_msi_disabled = true; | ||
| 78 | } | ||
| 79 | 44 | ||
| 80 | return 1; | 45 | return 1; |
| 81 | } | 46 | } |
| 82 | __setup("pcie_pme=", pcie_pme_setup); | 47 | __setup("pcie_pme=", pcie_pme_setup); |
| 83 | 48 | ||
| 84 | /** | ||
| 85 | * pcie_pme_platform_setup - Ensure that the kernel controls the PCIe PME. | ||
| 86 | * @srv: PCIe PME root port service to use for carrying out the check. | ||
| 87 | * | ||
| 88 | * Notify the platform that the native PCIe PME is going to be used and return | ||
| 89 | * 'true' if the control of the PCIe PME registers has been acquired from the | ||
| 90 | * platform. | ||
| 91 | */ | ||
| 92 | static bool pcie_pme_platform_setup(struct pcie_device *srv) | ||
| 93 | { | ||
| 94 | if (!pcie_pme_platform_notify(srv)) | ||
| 95 | return true; | ||
| 96 | return pcie_pme_force_enable; | ||
| 97 | } | ||
| 98 | |||
| 99 | struct pcie_pme_service_data { | 49 | struct pcie_pme_service_data { |
| 100 | spinlock_t lock; | 50 | spinlock_t lock; |
| 101 | struct pcie_device *srv; | 51 | struct pcie_device *srv; |
| @@ -108,7 +58,7 @@ struct pcie_pme_service_data { | |||
| 108 | * @dev: PCIe root port or event collector. | 58 | * @dev: PCIe root port or event collector. |
| 109 | * @enable: Enable or disable the interrupt. | 59 | * @enable: Enable or disable the interrupt. |
| 110 | */ | 60 | */ |
| 111 | static void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable) | 61 | void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable) |
| 112 | { | 62 | { |
| 113 | int rtctl_pos; | 63 | int rtctl_pos; |
| 114 | u16 rtctl; | 64 | u16 rtctl; |
| @@ -417,9 +367,6 @@ static int pcie_pme_probe(struct pcie_device *srv) | |||
| 417 | struct pcie_pme_service_data *data; | 367 | struct pcie_pme_service_data *data; |
| 418 | int ret; | 368 | int ret; |
| 419 | 369 | ||
| 420 | if (!pcie_pme_platform_setup(srv)) | ||
| 421 | return -EACCES; | ||
| 422 | |||
| 423 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 370 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
| 424 | if (!data) | 371 | if (!data) |
| 425 | return -ENOMEM; | 372 | return -ENOMEM; |
| @@ -509,8 +456,7 @@ static struct pcie_port_service_driver pcie_pme_driver = { | |||
| 509 | */ | 456 | */ |
| 510 | static int __init pcie_pme_service_init(void) | 457 | static int __init pcie_pme_service_init(void) |
| 511 | { | 458 | { |
| 512 | return pcie_pme_disabled ? | 459 | return pcie_port_service_register(&pcie_pme_driver); |
| 513 | -ENODEV : pcie_port_service_register(&pcie_pme_driver); | ||
| 514 | } | 460 | } |
| 515 | 461 | ||
| 516 | module_init(pcie_pme_service_init); | 462 | module_init(pcie_pme_service_init); |
diff --git a/drivers/pci/pcie/pme/Makefile b/drivers/pci/pcie/pme/Makefile deleted file mode 100644 index 8b9238053080..000000000000 --- a/drivers/pci/pcie/pme/Makefile +++ /dev/null | |||
| @@ -1,8 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Makefile for PCI-Express Root Port PME signaling driver | ||
| 3 | # | ||
| 4 | |||
| 5 | obj-$(CONFIG_PCIE_PME) += pmedriver.o | ||
| 6 | |||
| 7 | pmedriver-objs := pcie_pme.o | ||
| 8 | pmedriver-$(CONFIG_ACPI) += pcie_pme_acpi.o | ||
diff --git a/drivers/pci/pcie/pme/pcie_pme.h b/drivers/pci/pcie/pme/pcie_pme.h deleted file mode 100644 index b30d2b7c9775..000000000000 --- a/drivers/pci/pcie/pme/pcie_pme.h +++ /dev/null | |||
| @@ -1,28 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * drivers/pci/pcie/pme/pcie_pme.h | ||
| 3 | * | ||
| 4 | * PCI Express Root Port PME signaling support | ||
| 5 | * | ||
| 6 | * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #ifndef _PCIE_PME_H_ | ||
| 10 | #define _PCIE_PME_H_ | ||
| 11 | |||
| 12 | struct pcie_device; | ||
| 13 | |||
| 14 | #ifdef CONFIG_ACPI | ||
| 15 | extern int pcie_pme_acpi_setup(struct pcie_device *srv); | ||
| 16 | |||
| 17 | static inline int pcie_pme_platform_notify(struct pcie_device *srv) | ||
| 18 | { | ||
| 19 | return pcie_pme_acpi_setup(srv); | ||
| 20 | } | ||
| 21 | #else /* !CONFIG_ACPI */ | ||
| 22 | static inline int pcie_pme_platform_notify(struct pcie_device *srv) | ||
| 23 | { | ||
| 24 | return 0; | ||
| 25 | } | ||
| 26 | #endif /* !CONFIG_ACPI */ | ||
| 27 | |||
| 28 | #endif | ||
diff --git a/drivers/pci/pcie/pme/pcie_pme_acpi.c b/drivers/pci/pcie/pme/pcie_pme_acpi.c deleted file mode 100644 index 83ab2287ae3f..000000000000 --- a/drivers/pci/pcie/pme/pcie_pme_acpi.c +++ /dev/null | |||
| @@ -1,54 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * PCIe Native PME support, ACPI-related part | ||
| 3 | * | ||
| 4 | * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
| 5 | * | ||
| 6 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 7 | * License V2. See the file "COPYING" in the main directory of this archive | ||
| 8 | * for more details. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/pci.h> | ||
| 12 | #include <linux/kernel.h> | ||
| 13 | #include <linux/errno.h> | ||
| 14 | #include <linux/acpi.h> | ||
| 15 | #include <linux/pci-acpi.h> | ||
| 16 | #include <linux/pcieport_if.h> | ||
| 17 | |||
| 18 | /** | ||
| 19 | * pcie_pme_acpi_setup - Request the ACPI BIOS to release control over PCIe PME. | ||
| 20 | * @srv - PCIe PME service for a root port or event collector. | ||
| 21 | * | ||
| 22 | * Invoked when the PCIe bus type loads PCIe PME service driver. To avoid | ||
| 23 | * conflict with the BIOS PCIe support requires the BIOS to yield PCIe PME | ||
| 24 | * control to the kernel. | ||
| 25 | */ | ||
| 26 | int pcie_pme_acpi_setup(struct pcie_device *srv) | ||
| 27 | { | ||
| 28 | acpi_status status = AE_NOT_FOUND; | ||
| 29 | struct pci_dev *port = srv->port; | ||
| 30 | acpi_handle handle; | ||
| 31 | int error = 0; | ||
| 32 | |||
| 33 | if (acpi_pci_disabled) | ||
| 34 | return -ENOSYS; | ||
| 35 | |||
| 36 | dev_info(&port->dev, "Requesting control of PCIe PME from ACPI BIOS\n"); | ||
| 37 | |||
| 38 | handle = acpi_find_root_bridge_handle(port); | ||
| 39 | if (!handle) | ||
| 40 | return -EINVAL; | ||
| 41 | |||
| 42 | status = acpi_pci_osc_control_set(handle, | ||
| 43 | OSC_PCI_EXPRESS_PME_CONTROL | | ||
| 44 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | ||
| 45 | if (ACPI_FAILURE(status)) { | ||
| 46 | dev_info(&port->dev, | ||
| 47 | "Failed to receive control of PCIe PME service: %s\n", | ||
| 48 | (status == AE_SUPPORT || status == AE_NOT_FOUND) ? | ||
| 49 | "no _OSC support" : "ACPI _OSC failed"); | ||
| 50 | error = -ENODEV; | ||
| 51 | } | ||
| 52 | |||
| 53 | return error; | ||
| 54 | } | ||
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index 813a5c3427b6..7b5aba0a3291 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h | |||
| @@ -20,6 +20,9 @@ | |||
| 20 | 20 | ||
| 21 | #define get_descriptor_id(type, service) (((type - 4) << 4) | service) | 21 | #define get_descriptor_id(type, service) (((type - 4) << 4) | service) |
| 22 | 22 | ||
| 23 | extern bool pcie_ports_disabled; | ||
| 24 | extern bool pcie_ports_auto; | ||
| 25 | |||
| 23 | extern struct bus_type pcie_port_bus_type; | 26 | extern struct bus_type pcie_port_bus_type; |
| 24 | extern int pcie_port_device_register(struct pci_dev *dev); | 27 | extern int pcie_port_device_register(struct pci_dev *dev); |
| 25 | #ifdef CONFIG_PM | 28 | #ifdef CONFIG_PM |
| @@ -30,6 +33,8 @@ extern void pcie_port_device_remove(struct pci_dev *dev); | |||
| 30 | extern int __must_check pcie_port_bus_register(void); | 33 | extern int __must_check pcie_port_bus_register(void); |
| 31 | extern void pcie_port_bus_unregister(void); | 34 | extern void pcie_port_bus_unregister(void); |
| 32 | 35 | ||
| 36 | struct pci_dev; | ||
| 37 | |||
| 33 | #ifdef CONFIG_PCIE_PME | 38 | #ifdef CONFIG_PCIE_PME |
| 34 | extern bool pcie_pme_msi_disabled; | 39 | extern bool pcie_pme_msi_disabled; |
| 35 | 40 | ||
| @@ -42,9 +47,26 @@ static inline bool pcie_pme_no_msi(void) | |||
| 42 | { | 47 | { |
| 43 | return pcie_pme_msi_disabled; | 48 | return pcie_pme_msi_disabled; |
| 44 | } | 49 | } |
| 50 | |||
| 51 | extern void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable); | ||
| 45 | #else /* !CONFIG_PCIE_PME */ | 52 | #else /* !CONFIG_PCIE_PME */ |
| 46 | static inline void pcie_pme_disable_msi(void) {} | 53 | static inline void pcie_pme_disable_msi(void) {} |
| 47 | static inline bool pcie_pme_no_msi(void) { return false; } | 54 | static inline bool pcie_pme_no_msi(void) { return false; } |
| 55 | static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {} | ||
| 48 | #endif /* !CONFIG_PCIE_PME */ | 56 | #endif /* !CONFIG_PCIE_PME */ |
| 49 | 57 | ||
| 58 | #ifdef CONFIG_ACPI | ||
| 59 | extern int pcie_port_acpi_setup(struct pci_dev *port, int *mask); | ||
| 60 | |||
| 61 | static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask) | ||
| 62 | { | ||
| 63 | return pcie_port_acpi_setup(port, mask); | ||
| 64 | } | ||
| 65 | #else /* !CONFIG_ACPI */ | ||
| 66 | static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask) | ||
| 67 | { | ||
| 68 | return 0; | ||
| 69 | } | ||
| 70 | #endif /* !CONFIG_ACPI */ | ||
| 71 | |||
| 50 | #endif /* _PORTDRV_H_ */ | 72 | #endif /* _PORTDRV_H_ */ |
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c new file mode 100644 index 000000000000..b7c4cb1ccb23 --- /dev/null +++ b/drivers/pci/pcie/portdrv_acpi.c | |||
| @@ -0,0 +1,77 @@ | |||
| 1 | /* | ||
| 2 | * PCIe Port Native Services Support, ACPI-Related Part | ||
| 3 | * | ||
| 4 | * Copyright (C) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
| 5 | * | ||
| 6 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 7 | * License V2. See the file "COPYING" in the main directory of this archive | ||
| 8 | * for more details. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/pci.h> | ||
| 12 | #include <linux/kernel.h> | ||
| 13 | #include <linux/errno.h> | ||
| 14 | #include <linux/acpi.h> | ||
| 15 | #include <linux/pci-acpi.h> | ||
| 16 | #include <linux/pcieport_if.h> | ||
| 17 | |||
| 18 | #include "aer/aerdrv.h" | ||
| 19 | #include "../pci.h" | ||
| 20 | |||
| 21 | /** | ||
| 22 | * pcie_port_acpi_setup - Request the BIOS to release control of PCIe services. | ||
| 23 | * @port: PCIe Port service for a root port or event collector. | ||
| 24 | * @srv_mask: Bit mask of services that can be enabled for @port. | ||
| 25 | * | ||
| 26 | * Invoked when @port is identified as a PCIe port device. To avoid conflicts | ||
| 27 | * with the BIOS PCIe port native services support requires the BIOS to yield | ||
| 28 | * control of these services to the kernel. The mask of services that the BIOS | ||
| 29 | * allows to be enabled for @port is written to @srv_mask. | ||
| 30 | * | ||
| 31 | * NOTE: It turns out that we cannot do that for individual port services | ||
| 32 | * separately, because that would make some systems work incorrectly. | ||
| 33 | */ | ||
| 34 | int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask) | ||
| 35 | { | ||
| 36 | acpi_status status; | ||
| 37 | acpi_handle handle; | ||
| 38 | u32 flags; | ||
| 39 | |||
| 40 | if (acpi_pci_disabled) | ||
| 41 | return 0; | ||
| 42 | |||
| 43 | handle = acpi_find_root_bridge_handle(port); | ||
| 44 | if (!handle) | ||
| 45 | return -EINVAL; | ||
| 46 | |||
| 47 | flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL | ||
| 48 | | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | ||
| 49 | | OSC_PCI_EXPRESS_PME_CONTROL; | ||
| 50 | |||
| 51 | if (pci_aer_available()) { | ||
| 52 | if (pcie_aer_get_firmware_first(port)) | ||
| 53 | dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n"); | ||
| 54 | else | ||
| 55 | flags |= OSC_PCI_EXPRESS_AER_CONTROL; | ||
| 56 | } | ||
| 57 | |||
| 58 | status = acpi_pci_osc_control_set(handle, &flags, | ||
| 59 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | ||
| 60 | if (ACPI_FAILURE(status)) { | ||
| 61 | dev_dbg(&port->dev, "ACPI _OSC request failed (code %d)\n", | ||
| 62 | status); | ||
| 63 | return -ENODEV; | ||
| 64 | } | ||
| 65 | |||
| 66 | dev_info(&port->dev, "ACPI _OSC control granted for 0x%02x\n", flags); | ||
| 67 | |||
| 68 | *srv_mask = PCIE_PORT_SERVICE_VC; | ||
| 69 | if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL) | ||
| 70 | *srv_mask |= PCIE_PORT_SERVICE_HP; | ||
| 71 | if (flags & OSC_PCI_EXPRESS_PME_CONTROL) | ||
| 72 | *srv_mask |= PCIE_PORT_SERVICE_PME; | ||
| 73 | if (flags & OSC_PCI_EXPRESS_AER_CONTROL) | ||
| 74 | *srv_mask |= PCIE_PORT_SERVICE_AER; | ||
| 75 | |||
| 76 | return 0; | ||
| 77 | } | ||
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index e73effbe402c..a9c222d79ebc 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c | |||
| @@ -14,6 +14,8 @@ | |||
| 14 | #include <linux/string.h> | 14 | #include <linux/string.h> |
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 16 | #include <linux/pcieport_if.h> | 16 | #include <linux/pcieport_if.h> |
| 17 | #include <linux/aer.h> | ||
| 18 | #include <linux/pci-aspm.h> | ||
| 17 | 19 | ||
| 18 | #include "../pci.h" | 20 | #include "../pci.h" |
| 19 | #include "portdrv.h" | 21 | #include "portdrv.h" |
| @@ -236,24 +238,64 @@ static int get_port_device_capability(struct pci_dev *dev) | |||
| 236 | int services = 0, pos; | 238 | int services = 0, pos; |
| 237 | u16 reg16; | 239 | u16 reg16; |
| 238 | u32 reg32; | 240 | u32 reg32; |
| 241 | int cap_mask; | ||
| 242 | int err; | ||
| 243 | |||
| 244 | err = pcie_port_platform_notify(dev, &cap_mask); | ||
| 245 | if (pcie_ports_auto) { | ||
| 246 | if (err) { | ||
| 247 | pcie_no_aspm(); | ||
| 248 | return 0; | ||
| 249 | } | ||
| 250 | } else { | ||
| 251 | cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP | ||
| 252 | | PCIE_PORT_SERVICE_VC; | ||
| 253 | if (pci_aer_available()) | ||
| 254 | cap_mask |= PCIE_PORT_SERVICE_AER; | ||
| 255 | } | ||
| 239 | 256 | ||
| 240 | pos = pci_pcie_cap(dev); | 257 | pos = pci_pcie_cap(dev); |
| 241 | pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); | 258 | pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); |
| 242 | /* Hot-Plug Capable */ | 259 | /* Hot-Plug Capable */ |
| 243 | if (reg16 & PCI_EXP_FLAGS_SLOT) { | 260 | if ((cap_mask & PCIE_PORT_SERVICE_HP) && (reg16 & PCI_EXP_FLAGS_SLOT)) { |
| 244 | pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, ®32); | 261 | pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, ®32); |
| 245 | if (reg32 & PCI_EXP_SLTCAP_HPC) | 262 | if (reg32 & PCI_EXP_SLTCAP_HPC) { |
| 246 | services |= PCIE_PORT_SERVICE_HP; | 263 | services |= PCIE_PORT_SERVICE_HP; |
| 264 | /* | ||
| 265 | * Disable hot-plug interrupts in case they have been | ||
| 266 | * enabled by the BIOS and the hot-plug service driver | ||
| 267 | * is not loaded. | ||
| 268 | */ | ||
| 269 | pos += PCI_EXP_SLTCTL; | ||
| 270 | pci_read_config_word(dev, pos, ®16); | ||
| 271 | reg16 &= ~(PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE); | ||
| 272 | pci_write_config_word(dev, pos, reg16); | ||
| 273 | } | ||
| 247 | } | 274 | } |
| 248 | /* AER capable */ | 275 | /* AER capable */ |
| 249 | if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) | 276 | if ((cap_mask & PCIE_PORT_SERVICE_AER) |
| 277 | && pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) { | ||
| 250 | services |= PCIE_PORT_SERVICE_AER; | 278 | services |= PCIE_PORT_SERVICE_AER; |
| 279 | /* | ||
| 280 | * Disable AER on this port in case it's been enabled by the | ||
| 281 | * BIOS (the AER service driver will enable it when necessary). | ||
| 282 | */ | ||
| 283 | pci_disable_pcie_error_reporting(dev); | ||
| 284 | } | ||
| 251 | /* VC support */ | 285 | /* VC support */ |
| 252 | if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) | 286 | if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) |
| 253 | services |= PCIE_PORT_SERVICE_VC; | 287 | services |= PCIE_PORT_SERVICE_VC; |
| 254 | /* Root ports are capable of generating PME too */ | 288 | /* Root ports are capable of generating PME too */ |
| 255 | if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) | 289 | if ((cap_mask & PCIE_PORT_SERVICE_PME) |
| 290 | && dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) { | ||
| 256 | services |= PCIE_PORT_SERVICE_PME; | 291 | services |= PCIE_PORT_SERVICE_PME; |
| 292 | /* | ||
| 293 | * Disable PME interrupt on this port in case it's been enabled | ||
| 294 | * by the BIOS (the PME service driver will enable it when | ||
| 295 | * necessary). | ||
| 296 | */ | ||
| 297 | pcie_pme_interrupt_enable(dev, false); | ||
| 298 | } | ||
| 257 | 299 | ||
| 258 | return services; | 300 | return services; |
| 259 | } | 301 | } |
| @@ -494,6 +536,9 @@ static void pcie_port_shutdown_service(struct device *dev) {} | |||
| 494 | */ | 536 | */ |
| 495 | int pcie_port_service_register(struct pcie_port_service_driver *new) | 537 | int pcie_port_service_register(struct pcie_port_service_driver *new) |
| 496 | { | 538 | { |
| 539 | if (pcie_ports_disabled) | ||
| 540 | return -ENODEV; | ||
| 541 | |||
| 497 | new->driver.name = (char *)new->name; | 542 | new->driver.name = (char *)new->name; |
| 498 | new->driver.bus = &pcie_port_bus_type; | 543 | new->driver.bus = &pcie_port_bus_type; |
| 499 | new->driver.probe = pcie_port_probe_service; | 544 | new->driver.probe = pcie_port_probe_service; |
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 3debed25e46b..f9033e190fb6 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/pcieport_if.h> | 15 | #include <linux/pcieport_if.h> |
| 16 | #include <linux/aer.h> | 16 | #include <linux/aer.h> |
| 17 | #include <linux/dmi.h> | 17 | #include <linux/dmi.h> |
| 18 | #include <linux/pci-aspm.h> | ||
| 18 | 19 | ||
| 19 | #include "portdrv.h" | 20 | #include "portdrv.h" |
| 20 | #include "aer/aerdrv.h" | 21 | #include "aer/aerdrv.h" |
| @@ -29,6 +30,31 @@ MODULE_AUTHOR(DRIVER_AUTHOR); | |||
| 29 | MODULE_DESCRIPTION(DRIVER_DESC); | 30 | MODULE_DESCRIPTION(DRIVER_DESC); |
| 30 | MODULE_LICENSE("GPL"); | 31 | MODULE_LICENSE("GPL"); |
| 31 | 32 | ||
| 33 | /* If this switch is set, PCIe port native services should not be enabled. */ | ||
| 34 | bool pcie_ports_disabled; | ||
| 35 | |||
| 36 | /* | ||
| 37 | * If this switch is set, ACPI _OSC will be used to determine whether or not to | ||
| 38 | * enable PCIe port native services. | ||
| 39 | */ | ||
| 40 | bool pcie_ports_auto = true; | ||
| 41 | |||
| 42 | static int __init pcie_port_setup(char *str) | ||
| 43 | { | ||
| 44 | if (!strncmp(str, "compat", 6)) { | ||
| 45 | pcie_ports_disabled = true; | ||
| 46 | } else if (!strncmp(str, "native", 6)) { | ||
| 47 | pcie_ports_disabled = false; | ||
| 48 | pcie_ports_auto = false; | ||
| 49 | } else if (!strncmp(str, "auto", 4)) { | ||
| 50 | pcie_ports_disabled = false; | ||
| 51 | pcie_ports_auto = true; | ||
| 52 | } | ||
| 53 | |||
| 54 | return 1; | ||
| 55 | } | ||
| 56 | __setup("pcie_ports=", pcie_port_setup); | ||
| 57 | |||
| 32 | /* global data */ | 58 | /* global data */ |
| 33 | 59 | ||
| 34 | static int pcie_portdrv_restore_config(struct pci_dev *dev) | 60 | static int pcie_portdrv_restore_config(struct pci_dev *dev) |
| @@ -301,6 +327,11 @@ static int __init pcie_portdrv_init(void) | |||
| 301 | { | 327 | { |
| 302 | int retval; | 328 | int retval; |
| 303 | 329 | ||
| 330 | if (pcie_ports_disabled) { | ||
| 331 | pcie_no_aspm(); | ||
| 332 | return -EACCES; | ||
| 333 | } | ||
| 334 | |||
| 304 | dmi_check_system(pcie_portdrv_dmi_table); | 335 | dmi_check_system(pcie_portdrv_dmi_table); |
| 305 | 336 | ||
| 306 | retval = pcie_port_bus_register(); | 337 | retval = pcie_port_bus_register(); |
| @@ -315,11 +346,4 @@ static int __init pcie_portdrv_init(void) | |||
| 315 | return retval; | 346 | return retval; |
| 316 | } | 347 | } |
| 317 | 348 | ||
| 318 | static void __exit pcie_portdrv_exit(void) | ||
| 319 | { | ||
| 320 | pci_unregister_driver(&pcie_portdriver); | ||
| 321 | pcie_port_bus_unregister(); | ||
| 322 | } | ||
| 323 | |||
| 324 | module_init(pcie_portdrv_init); | 349 | module_init(pcie_portdrv_init); |
| 325 | module_exit(pcie_portdrv_exit); | ||
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index 659eaa0fc48f..968cfea04f74 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c | |||
| @@ -49,7 +49,7 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf) | |||
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | /* these strings match up with the values in pci_bus_speed */ | 51 | /* these strings match up with the values in pci_bus_speed */ |
| 52 | static char *pci_bus_speed_strings[] = { | 52 | static const char *pci_bus_speed_strings[] = { |
| 53 | "33 MHz PCI", /* 0x00 */ | 53 | "33 MHz PCI", /* 0x00 */ |
| 54 | "66 MHz PCI", /* 0x01 */ | 54 | "66 MHz PCI", /* 0x01 */ |
| 55 | "66 MHz PCI-X", /* 0x02 */ | 55 | "66 MHz PCI-X", /* 0x02 */ |
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c index 72b2bcc2c224..d4fb82d85e9b 100644 --- a/drivers/rtc/rtc-bfin.c +++ b/drivers/rtc/rtc-bfin.c | |||
| @@ -426,7 +426,7 @@ static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state) | |||
| 426 | enable_irq_wake(IRQ_RTC); | 426 | enable_irq_wake(IRQ_RTC); |
| 427 | bfin_rtc_sync_pending(&pdev->dev); | 427 | bfin_rtc_sync_pending(&pdev->dev); |
| 428 | } else | 428 | } else |
| 429 | bfin_rtc_int_clear(-1); | 429 | bfin_rtc_int_clear(0); |
| 430 | 430 | ||
| 431 | return 0; | 431 | return 0; |
| 432 | } | 432 | } |
| @@ -435,8 +435,17 @@ static int bfin_rtc_resume(struct platform_device *pdev) | |||
| 435 | { | 435 | { |
| 436 | if (device_may_wakeup(&pdev->dev)) | 436 | if (device_may_wakeup(&pdev->dev)) |
| 437 | disable_irq_wake(IRQ_RTC); | 437 | disable_irq_wake(IRQ_RTC); |
| 438 | else | 438 | |
| 439 | bfin_write_RTC_ISTAT(-1); | 439 | /* |
| 440 | * Since only some of the RTC bits are maintained externally in the | ||
| 441 | * Vbat domain, we need to wait for the RTC MMRs to be synced into | ||
| 442 | * the core after waking up. This happens every RTC 1HZ. Once that | ||
| 443 | * has happened, we can go ahead and re-enable the important write | ||
| 444 | * complete interrupt event. | ||
| 445 | */ | ||
| 446 | while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_SEC)) | ||
| 447 | continue; | ||
| 448 | bfin_rtc_int_set(RTC_ISTAT_WRITE_COMPLETE); | ||
| 440 | 449 | ||
| 441 | return 0; | 450 | return 0; |
| 442 | } | 451 | } |
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 66377f3e28b8..d60557cae8ef 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c | |||
| @@ -364,7 +364,7 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t) | |||
| 364 | t->time.tm_isdst = -1; | 364 | t->time.tm_isdst = -1; |
| 365 | t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE); | 365 | t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE); |
| 366 | t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF); | 366 | t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF); |
| 367 | return rtc_valid_tm(t); | 367 | return 0; |
| 368 | } | 368 | } |
| 369 | 369 | ||
| 370 | static struct rtc_class_ops m41t80_rtc_ops = { | 370 | static struct rtc_class_ops m41t80_rtc_ops = { |
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index 6c418fe7f288..b7a6690e5b35 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c | |||
| @@ -403,7 +403,7 @@ static int pl031_probe(struct amba_device *adev, struct amba_id *id) | |||
| 403 | } | 403 | } |
| 404 | 404 | ||
| 405 | if (request_irq(adev->irq[0], pl031_interrupt, | 405 | if (request_irq(adev->irq[0], pl031_interrupt, |
| 406 | IRQF_DISABLED | IRQF_SHARED, "rtc-pl031", ldata)) { | 406 | IRQF_DISABLED, "rtc-pl031", ldata)) { |
| 407 | ret = -EIO; | 407 | ret = -EIO; |
| 408 | goto out_no_irq; | 408 | goto out_no_irq; |
| 409 | } | 409 | } |
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index b7de02525ec9..85cf607fc78f 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c | |||
| @@ -217,8 +217,7 @@ tapeblock_setup_device(struct tape_device * device) | |||
| 217 | if (!blkdat->request_queue) | 217 | if (!blkdat->request_queue) |
| 218 | return -ENOMEM; | 218 | return -ENOMEM; |
| 219 | 219 | ||
| 220 | elevator_exit(blkdat->request_queue->elevator); | 220 | rc = elevator_change(blkdat->request_queue, "noop"); |
| 221 | rc = elevator_init(blkdat->request_queue, "noop"); | ||
| 222 | if (rc) | 221 | if (rc) |
| 223 | goto cleanup_queue; | 222 | goto cleanup_queue; |
| 224 | 223 | ||
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index 7d4d2275573c..7f11f3e48e12 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c | |||
| @@ -300,8 +300,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost, | |||
| 300 | enum iscsi_host_param param, char *buf) | 300 | enum iscsi_host_param param, char *buf) |
| 301 | { | 301 | { |
| 302 | struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost); | 302 | struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost); |
| 303 | int len = 0; | 303 | int status = 0; |
| 304 | int status; | ||
| 305 | 304 | ||
| 306 | SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param); | 305 | SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param); |
| 307 | switch (param) { | 306 | switch (param) { |
| @@ -315,7 +314,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost, | |||
| 315 | default: | 314 | default: |
| 316 | return iscsi_host_get_param(shost, param, buf); | 315 | return iscsi_host_get_param(shost, param, buf); |
| 317 | } | 316 | } |
| 318 | return len; | 317 | return status; |
| 319 | } | 318 | } |
| 320 | 319 | ||
| 321 | int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba) | 320 | int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba) |
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 26350e470bcc..877324fc594c 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c | |||
| @@ -368,7 +368,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba, | |||
| 368 | memset(req, 0, sizeof(*req)); | 368 | memset(req, 0, sizeof(*req)); |
| 369 | wrb->tag0 |= tag; | 369 | wrb->tag0 |= tag; |
| 370 | 370 | ||
| 371 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 1); | 371 | be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); |
| 372 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, | 372 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, |
| 373 | OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, | 373 | OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, |
| 374 | sizeof(*req)); | 374 | sizeof(*req)); |
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c index cd05e049d5f6..d0c82340f0e2 100644 --- a/drivers/scsi/constants.c +++ b/drivers/scsi/constants.c | |||
| @@ -1404,13 +1404,13 @@ void scsi_print_sense(char *name, struct scsi_cmnd *cmd) | |||
| 1404 | { | 1404 | { |
| 1405 | struct scsi_sense_hdr sshdr; | 1405 | struct scsi_sense_hdr sshdr; |
| 1406 | 1406 | ||
| 1407 | scmd_printk(KERN_INFO, cmd, ""); | 1407 | scmd_printk(KERN_INFO, cmd, " "); |
| 1408 | scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, | 1408 | scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, |
| 1409 | &sshdr); | 1409 | &sshdr); |
| 1410 | scsi_show_sense_hdr(&sshdr); | 1410 | scsi_show_sense_hdr(&sshdr); |
| 1411 | scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, | 1411 | scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, |
| 1412 | &sshdr); | 1412 | &sshdr); |
| 1413 | scmd_printk(KERN_INFO, cmd, ""); | 1413 | scmd_printk(KERN_INFO, cmd, " "); |
| 1414 | scsi_show_extd_sense(sshdr.asc, sshdr.ascq); | 1414 | scsi_show_extd_sense(sshdr.asc, sshdr.ascq); |
| 1415 | } | 1415 | } |
| 1416 | EXPORT_SYMBOL(scsi_print_sense); | 1416 | EXPORT_SYMBOL(scsi_print_sense); |
| @@ -1453,7 +1453,7 @@ EXPORT_SYMBOL(scsi_show_result); | |||
| 1453 | 1453 | ||
| 1454 | void scsi_print_result(struct scsi_cmnd *cmd) | 1454 | void scsi_print_result(struct scsi_cmnd *cmd) |
| 1455 | { | 1455 | { |
| 1456 | scmd_printk(KERN_INFO, cmd, ""); | 1456 | scmd_printk(KERN_INFO, cmd, " "); |
| 1457 | scsi_show_result(cmd->result); | 1457 | scsi_show_result(cmd->result); |
| 1458 | } | 1458 | } |
| 1459 | EXPORT_SYMBOL(scsi_print_result); | 1459 | EXPORT_SYMBOL(scsi_print_result); |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 4f5551b5fe53..c5d0606ad097 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
| @@ -3231,6 +3231,12 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) | |||
| 3231 | misc_fw_support = readl(&cfgtable->misc_fw_support); | 3231 | misc_fw_support = readl(&cfgtable->misc_fw_support); |
| 3232 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; | 3232 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; |
| 3233 | 3233 | ||
| 3234 | /* The doorbell reset seems to cause lockups on some Smart | ||
| 3235 | * Arrays (e.g. P410, P410i, maybe others). Until this is | ||
| 3236 | * fixed or at least isolated, avoid the doorbell reset. | ||
| 3237 | */ | ||
| 3238 | use_doorbell = 0; | ||
| 3239 | |||
| 3234 | rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); | 3240 | rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); |
| 3235 | if (rc) | 3241 | if (rc) |
| 3236 | goto unmap_cfgtable; | 3242 | goto unmap_cfgtable; |
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index fda4de3440c4..e88bbdde49c5 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c | |||
| @@ -865,7 +865,7 @@ void osd_req_read(struct osd_request *or, | |||
| 865 | { | 865 | { |
| 866 | _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); | 866 | _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); |
| 867 | WARN_ON(or->in.bio || or->in.total_bytes); | 867 | WARN_ON(or->in.bio || or->in.total_bytes); |
| 868 | WARN_ON(1 == (bio->bi_rw & REQ_WRITE)); | 868 | WARN_ON(bio->bi_rw & REQ_WRITE); |
| 869 | or->in.bio = bio; | 869 | or->in.bio = bio; |
| 870 | or->in.total_bytes = len; | 870 | or->in.total_bytes = len; |
| 871 | } | 871 | } |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 420238cc794e..114bc5a81171 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
| @@ -1838,26 +1838,33 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) | |||
| 1838 | 1838 | ||
| 1839 | qla24xx_disable_vp(vha); | 1839 | qla24xx_disable_vp(vha); |
| 1840 | 1840 | ||
| 1841 | vha->flags.delete_progress = 1; | ||
| 1842 | |||
| 1841 | fc_remove_host(vha->host); | 1843 | fc_remove_host(vha->host); |
| 1842 | 1844 | ||
| 1843 | scsi_remove_host(vha->host); | 1845 | scsi_remove_host(vha->host); |
| 1844 | 1846 | ||
| 1845 | qla2x00_free_fcports(vha); | 1847 | if (vha->timer_active) { |
| 1848 | qla2x00_vp_stop_timer(vha); | ||
| 1849 | DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]" | ||
| 1850 | " = %p has stopped\n", vha->host_no, vha->vp_idx, vha)); | ||
| 1851 | } | ||
| 1846 | 1852 | ||
| 1847 | qla24xx_deallocate_vp_id(vha); | 1853 | qla24xx_deallocate_vp_id(vha); |
| 1848 | 1854 | ||
| 1855 | /* No pending activities shall be there on the vha now */ | ||
| 1856 | DEBUG(msleep(random32()%10)); /* Just to see if something falls on | ||
| 1857 | * the net we have placed below */ | ||
| 1858 | |||
| 1859 | BUG_ON(atomic_read(&vha->vref_count)); | ||
| 1860 | |||
| 1861 | qla2x00_free_fcports(vha); | ||
| 1862 | |||
| 1849 | mutex_lock(&ha->vport_lock); | 1863 | mutex_lock(&ha->vport_lock); |
| 1850 | ha->cur_vport_count--; | 1864 | ha->cur_vport_count--; |
| 1851 | clear_bit(vha->vp_idx, ha->vp_idx_map); | 1865 | clear_bit(vha->vp_idx, ha->vp_idx_map); |
| 1852 | mutex_unlock(&ha->vport_lock); | 1866 | mutex_unlock(&ha->vport_lock); |
| 1853 | 1867 | ||
| 1854 | if (vha->timer_active) { | ||
| 1855 | qla2x00_vp_stop_timer(vha); | ||
| 1856 | DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p " | ||
| 1857 | "has stopped\n", | ||
| 1858 | vha->host_no, vha->vp_idx, vha)); | ||
| 1859 | } | ||
| 1860 | |||
| 1861 | if (vha->req->id && !ha->flags.cpu_affinity_enabled) { | 1868 | if (vha->req->id && !ha->flags.cpu_affinity_enabled) { |
| 1862 | if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) | 1869 | if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) |
| 1863 | qla_printk(KERN_WARNING, ha, | 1870 | qla_printk(KERN_WARNING, ha, |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index 6cfc28a25eb3..b74e6b5743dc 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h | |||
| @@ -29,8 +29,6 @@ | |||
| 29 | /* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */ | 29 | /* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */ |
| 30 | /* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */ | 30 | /* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */ |
| 31 | 31 | ||
| 32 | /* #define QL_PRINTK_BUF */ /* Captures printk to buffer */ | ||
| 33 | |||
| 34 | /* | 32 | /* |
| 35 | * Macros use for debugging the driver. | 33 | * Macros use for debugging the driver. |
| 36 | */ | 34 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 3a432ea0c7a3..d2a4e1530708 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
| @@ -2641,6 +2641,7 @@ struct qla_hw_data { | |||
| 2641 | #define MBX_UPDATE_FLASH_ACTIVE 3 | 2641 | #define MBX_UPDATE_FLASH_ACTIVE 3 |
| 2642 | 2642 | ||
| 2643 | struct mutex vport_lock; /* Virtual port synchronization */ | 2643 | struct mutex vport_lock; /* Virtual port synchronization */ |
| 2644 | spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */ | ||
| 2644 | struct completion mbx_cmd_comp; /* Serialize mbx access */ | 2645 | struct completion mbx_cmd_comp; /* Serialize mbx access */ |
| 2645 | struct completion mbx_intr_comp; /* Used for completion notification */ | 2646 | struct completion mbx_intr_comp; /* Used for completion notification */ |
| 2646 | struct completion dcbx_comp; /* For set port config notification */ | 2647 | struct completion dcbx_comp; /* For set port config notification */ |
| @@ -2828,6 +2829,7 @@ typedef struct scsi_qla_host { | |||
| 2828 | uint32_t management_server_logged_in :1; | 2829 | uint32_t management_server_logged_in :1; |
| 2829 | uint32_t process_response_queue :1; | 2830 | uint32_t process_response_queue :1; |
| 2830 | uint32_t difdix_supported:1; | 2831 | uint32_t difdix_supported:1; |
| 2832 | uint32_t delete_progress:1; | ||
| 2831 | } flags; | 2833 | } flags; |
| 2832 | 2834 | ||
| 2833 | atomic_t loop_state; | 2835 | atomic_t loop_state; |
| @@ -2922,6 +2924,8 @@ typedef struct scsi_qla_host { | |||
| 2922 | struct req_que *req; | 2924 | struct req_que *req; |
| 2923 | int fw_heartbeat_counter; | 2925 | int fw_heartbeat_counter; |
| 2924 | int seconds_since_last_heartbeat; | 2926 | int seconds_since_last_heartbeat; |
| 2927 | |||
| 2928 | atomic_t vref_count; | ||
| 2925 | } scsi_qla_host_t; | 2929 | } scsi_qla_host_t; |
| 2926 | 2930 | ||
| 2927 | /* | 2931 | /* |
| @@ -2932,6 +2936,22 @@ typedef struct scsi_qla_host { | |||
| 2932 | test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \ | 2936 | test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \ |
| 2933 | atomic_read(&ha->loop_state) == LOOP_DOWN) | 2937 | atomic_read(&ha->loop_state) == LOOP_DOWN) |
| 2934 | 2938 | ||
| 2939 | #define QLA_VHA_MARK_BUSY(__vha, __bail) do { \ | ||
| 2940 | atomic_inc(&__vha->vref_count); \ | ||
| 2941 | mb(); \ | ||
| 2942 | if (__vha->flags.delete_progress) { \ | ||
| 2943 | atomic_dec(&__vha->vref_count); \ | ||
| 2944 | __bail = 1; \ | ||
| 2945 | } else { \ | ||
| 2946 | __bail = 0; \ | ||
| 2947 | } \ | ||
| 2948 | } while (0) | ||
| 2949 | |||
| 2950 | #define QLA_VHA_MARK_NOT_BUSY(__vha) do { \ | ||
| 2951 | atomic_dec(&__vha->vref_count); \ | ||
| 2952 | } while (0) | ||
| 2953 | |||
| 2954 | |||
| 2935 | #define qla_printk(level, ha, format, arg...) \ | 2955 | #define qla_printk(level, ha, format, arg...) \ |
| 2936 | dev_printk(level , &((ha)->pdev->dev) , format , ## arg) | 2956 | dev_printk(level , &((ha)->pdev->dev) , format , ## arg) |
| 2937 | 2957 | ||
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index d863ed2619b5..9c383baebe27 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
| @@ -69,21 +69,29 @@ qla2x00_ctx_sp_free(srb_t *sp) | |||
| 69 | { | 69 | { |
| 70 | struct srb_ctx *ctx = sp->ctx; | 70 | struct srb_ctx *ctx = sp->ctx; |
| 71 | struct srb_iocb *iocb = ctx->u.iocb_cmd; | 71 | struct srb_iocb *iocb = ctx->u.iocb_cmd; |
| 72 | struct scsi_qla_host *vha = sp->fcport->vha; | ||
| 72 | 73 | ||
| 73 | del_timer_sync(&iocb->timer); | 74 | del_timer_sync(&iocb->timer); |
| 74 | kfree(iocb); | 75 | kfree(iocb); |
| 75 | kfree(ctx); | 76 | kfree(ctx); |
| 76 | mempool_free(sp, sp->fcport->vha->hw->srb_mempool); | 77 | mempool_free(sp, sp->fcport->vha->hw->srb_mempool); |
| 78 | |||
| 79 | QLA_VHA_MARK_NOT_BUSY(vha); | ||
| 77 | } | 80 | } |
| 78 | 81 | ||
| 79 | inline srb_t * | 82 | inline srb_t * |
| 80 | qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size, | 83 | qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size, |
| 81 | unsigned long tmo) | 84 | unsigned long tmo) |
| 82 | { | 85 | { |
| 83 | srb_t *sp; | 86 | srb_t *sp = NULL; |
| 84 | struct qla_hw_data *ha = vha->hw; | 87 | struct qla_hw_data *ha = vha->hw; |
| 85 | struct srb_ctx *ctx; | 88 | struct srb_ctx *ctx; |
| 86 | struct srb_iocb *iocb; | 89 | struct srb_iocb *iocb; |
| 90 | uint8_t bail; | ||
| 91 | |||
| 92 | QLA_VHA_MARK_BUSY(vha, bail); | ||
| 93 | if (bail) | ||
| 94 | return NULL; | ||
| 87 | 95 | ||
| 88 | sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL); | 96 | sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL); |
| 89 | if (!sp) | 97 | if (!sp) |
| @@ -116,6 +124,8 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size, | |||
| 116 | iocb->timer.function = qla2x00_ctx_sp_timeout; | 124 | iocb->timer.function = qla2x00_ctx_sp_timeout; |
| 117 | add_timer(&iocb->timer); | 125 | add_timer(&iocb->timer); |
| 118 | done: | 126 | done: |
| 127 | if (!sp) | ||
| 128 | QLA_VHA_MARK_NOT_BUSY(vha); | ||
| 119 | return sp; | 129 | return sp; |
| 120 | } | 130 | } |
| 121 | 131 | ||
| @@ -1777,11 +1787,15 @@ qla2x00_init_rings(scsi_qla_host_t *vha) | |||
| 1777 | qla2x00_init_response_q_entries(rsp); | 1787 | qla2x00_init_response_q_entries(rsp); |
| 1778 | } | 1788 | } |
| 1779 | 1789 | ||
| 1790 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 1780 | /* Clear RSCN queue. */ | 1791 | /* Clear RSCN queue. */ |
| 1781 | list_for_each_entry(vp, &ha->vp_list, list) { | 1792 | list_for_each_entry(vp, &ha->vp_list, list) { |
| 1782 | vp->rscn_in_ptr = 0; | 1793 | vp->rscn_in_ptr = 0; |
| 1783 | vp->rscn_out_ptr = 0; | 1794 | vp->rscn_out_ptr = 0; |
| 1784 | } | 1795 | } |
| 1796 | |||
| 1797 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 1798 | |||
| 1785 | ha->isp_ops->config_rings(vha); | 1799 | ha->isp_ops->config_rings(vha); |
| 1786 | 1800 | ||
| 1787 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 1801 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| @@ -3218,12 +3232,17 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, | |||
| 3218 | /* Bypass virtual ports of the same host. */ | 3232 | /* Bypass virtual ports of the same host. */ |
| 3219 | found = 0; | 3233 | found = 0; |
| 3220 | if (ha->num_vhosts) { | 3234 | if (ha->num_vhosts) { |
| 3235 | unsigned long flags; | ||
| 3236 | |||
| 3237 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 3221 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { | 3238 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { |
| 3222 | if (new_fcport->d_id.b24 == vp->d_id.b24) { | 3239 | if (new_fcport->d_id.b24 == vp->d_id.b24) { |
| 3223 | found = 1; | 3240 | found = 1; |
| 3224 | break; | 3241 | break; |
| 3225 | } | 3242 | } |
| 3226 | } | 3243 | } |
| 3244 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 3245 | |||
| 3227 | if (found) | 3246 | if (found) |
| 3228 | continue; | 3247 | continue; |
| 3229 | } | 3248 | } |
| @@ -3343,6 +3362,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) | |||
| 3343 | struct qla_hw_data *ha = vha->hw; | 3362 | struct qla_hw_data *ha = vha->hw; |
| 3344 | struct scsi_qla_host *vp; | 3363 | struct scsi_qla_host *vp; |
| 3345 | struct scsi_qla_host *tvp; | 3364 | struct scsi_qla_host *tvp; |
| 3365 | unsigned long flags = 0; | ||
| 3346 | 3366 | ||
| 3347 | rval = QLA_SUCCESS; | 3367 | rval = QLA_SUCCESS; |
| 3348 | 3368 | ||
| @@ -3367,6 +3387,8 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) | |||
| 3367 | /* Check for loop ID being already in use. */ | 3387 | /* Check for loop ID being already in use. */ |
| 3368 | found = 0; | 3388 | found = 0; |
| 3369 | fcport = NULL; | 3389 | fcport = NULL; |
| 3390 | |||
| 3391 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 3370 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { | 3392 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { |
| 3371 | list_for_each_entry(fcport, &vp->vp_fcports, list) { | 3393 | list_for_each_entry(fcport, &vp->vp_fcports, list) { |
| 3372 | if (fcport->loop_id == dev->loop_id && | 3394 | if (fcport->loop_id == dev->loop_id && |
| @@ -3379,6 +3401,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) | |||
| 3379 | if (found) | 3401 | if (found) |
| 3380 | break; | 3402 | break; |
| 3381 | } | 3403 | } |
| 3404 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 3382 | 3405 | ||
| 3383 | /* If not in use then it is free to use. */ | 3406 | /* If not in use then it is free to use. */ |
| 3384 | if (!found) { | 3407 | if (!found) { |
| @@ -3791,14 +3814,27 @@ void | |||
| 3791 | qla2x00_update_fcports(scsi_qla_host_t *base_vha) | 3814 | qla2x00_update_fcports(scsi_qla_host_t *base_vha) |
| 3792 | { | 3815 | { |
| 3793 | fc_port_t *fcport; | 3816 | fc_port_t *fcport; |
| 3794 | struct scsi_qla_host *tvp, *vha; | 3817 | struct scsi_qla_host *vha; |
| 3818 | struct qla_hw_data *ha = base_vha->hw; | ||
| 3819 | unsigned long flags; | ||
| 3795 | 3820 | ||
| 3821 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 3796 | /* Go with deferred removal of rport references. */ | 3822 | /* Go with deferred removal of rport references. */ |
| 3797 | list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) | 3823 | list_for_each_entry(vha, &base_vha->hw->vp_list, list) { |
| 3798 | list_for_each_entry(fcport, &vha->vp_fcports, list) | 3824 | atomic_inc(&vha->vref_count); |
| 3825 | list_for_each_entry(fcport, &vha->vp_fcports, list) { | ||
| 3799 | if (fcport && fcport->drport && | 3826 | if (fcport && fcport->drport && |
| 3800 | atomic_read(&fcport->state) != FCS_UNCONFIGURED) | 3827 | atomic_read(&fcport->state) != FCS_UNCONFIGURED) { |
| 3828 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 3829 | |||
| 3801 | qla2x00_rport_del(fcport); | 3830 | qla2x00_rport_del(fcport); |
| 3831 | |||
| 3832 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 3833 | } | ||
| 3834 | } | ||
| 3835 | atomic_dec(&vha->vref_count); | ||
| 3836 | } | ||
| 3837 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 3802 | } | 3838 | } |
| 3803 | 3839 | ||
| 3804 | void | 3840 | void |
| @@ -3806,7 +3842,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) | |||
| 3806 | { | 3842 | { |
| 3807 | struct qla_hw_data *ha = vha->hw; | 3843 | struct qla_hw_data *ha = vha->hw; |
| 3808 | struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); | 3844 | struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); |
| 3809 | struct scsi_qla_host *tvp; | 3845 | unsigned long flags; |
| 3810 | 3846 | ||
| 3811 | vha->flags.online = 0; | 3847 | vha->flags.online = 0; |
| 3812 | ha->flags.chip_reset_done = 0; | 3848 | ha->flags.chip_reset_done = 0; |
| @@ -3824,8 +3860,18 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) | |||
| 3824 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { | 3860 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { |
| 3825 | atomic_set(&vha->loop_state, LOOP_DOWN); | 3861 | atomic_set(&vha->loop_state, LOOP_DOWN); |
| 3826 | qla2x00_mark_all_devices_lost(vha, 0); | 3862 | qla2x00_mark_all_devices_lost(vha, 0); |
| 3827 | list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list) | 3863 | |
| 3864 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 3865 | list_for_each_entry(vp, &base_vha->hw->vp_list, list) { | ||
| 3866 | atomic_inc(&vp->vref_count); | ||
| 3867 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 3868 | |||
| 3828 | qla2x00_mark_all_devices_lost(vp, 0); | 3869 | qla2x00_mark_all_devices_lost(vp, 0); |
| 3870 | |||
| 3871 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 3872 | atomic_dec(&vp->vref_count); | ||
| 3873 | } | ||
| 3874 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 3829 | } else { | 3875 | } else { |
| 3830 | if (!atomic_read(&vha->loop_down_timer)) | 3876 | if (!atomic_read(&vha->loop_down_timer)) |
| 3831 | atomic_set(&vha->loop_down_timer, | 3877 | atomic_set(&vha->loop_down_timer, |
| @@ -3862,8 +3908,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) | |||
| 3862 | uint8_t status = 0; | 3908 | uint8_t status = 0; |
| 3863 | struct qla_hw_data *ha = vha->hw; | 3909 | struct qla_hw_data *ha = vha->hw; |
| 3864 | struct scsi_qla_host *vp; | 3910 | struct scsi_qla_host *vp; |
| 3865 | struct scsi_qla_host *tvp; | ||
| 3866 | struct req_que *req = ha->req_q_map[0]; | 3911 | struct req_que *req = ha->req_q_map[0]; |
| 3912 | unsigned long flags; | ||
| 3867 | 3913 | ||
| 3868 | if (vha->flags.online) { | 3914 | if (vha->flags.online) { |
| 3869 | qla2x00_abort_isp_cleanup(vha); | 3915 | qla2x00_abort_isp_cleanup(vha); |
| @@ -3970,10 +4016,21 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) | |||
| 3970 | DEBUG(printk(KERN_INFO | 4016 | DEBUG(printk(KERN_INFO |
| 3971 | "qla2x00_abort_isp(%ld): succeeded.\n", | 4017 | "qla2x00_abort_isp(%ld): succeeded.\n", |
| 3972 | vha->host_no)); | 4018 | vha->host_no)); |
| 3973 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { | 4019 | |
| 3974 | if (vp->vp_idx) | 4020 | spin_lock_irqsave(&ha->vport_slock, flags); |
| 4021 | list_for_each_entry(vp, &ha->vp_list, list) { | ||
| 4022 | if (vp->vp_idx) { | ||
| 4023 | atomic_inc(&vp->vref_count); | ||
| 4024 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 4025 | |||
| 3975 | qla2x00_vp_abort_isp(vp); | 4026 | qla2x00_vp_abort_isp(vp); |
| 4027 | |||
| 4028 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 4029 | atomic_dec(&vp->vref_count); | ||
| 4030 | } | ||
| 3976 | } | 4031 | } |
| 4032 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 4033 | |||
| 3977 | } else { | 4034 | } else { |
| 3978 | qla_printk(KERN_INFO, ha, | 4035 | qla_printk(KERN_INFO, ha, |
| 3979 | "qla2x00_abort_isp: **** FAILED ****\n"); | 4036 | "qla2x00_abort_isp: **** FAILED ****\n"); |
| @@ -5185,7 +5242,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) | |||
| 5185 | struct req_que *req = ha->req_q_map[0]; | 5242 | struct req_que *req = ha->req_q_map[0]; |
| 5186 | struct rsp_que *rsp = ha->rsp_q_map[0]; | 5243 | struct rsp_que *rsp = ha->rsp_q_map[0]; |
| 5187 | struct scsi_qla_host *vp; | 5244 | struct scsi_qla_host *vp; |
| 5188 | struct scsi_qla_host *tvp; | 5245 | unsigned long flags; |
| 5189 | 5246 | ||
| 5190 | status = qla2x00_init_rings(vha); | 5247 | status = qla2x00_init_rings(vha); |
| 5191 | if (!status) { | 5248 | if (!status) { |
| @@ -5272,10 +5329,21 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) | |||
| 5272 | DEBUG(printk(KERN_INFO | 5329 | DEBUG(printk(KERN_INFO |
| 5273 | "qla82xx_restart_isp(%ld): succeeded.\n", | 5330 | "qla82xx_restart_isp(%ld): succeeded.\n", |
| 5274 | vha->host_no)); | 5331 | vha->host_no)); |
| 5275 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { | 5332 | |
| 5276 | if (vp->vp_idx) | 5333 | spin_lock_irqsave(&ha->vport_slock, flags); |
| 5334 | list_for_each_entry(vp, &ha->vp_list, list) { | ||
| 5335 | if (vp->vp_idx) { | ||
| 5336 | atomic_inc(&vp->vref_count); | ||
| 5337 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 5338 | |||
| 5277 | qla2x00_vp_abort_isp(vp); | 5339 | qla2x00_vp_abort_isp(vp); |
| 5340 | |||
| 5341 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 5342 | atomic_dec(&vp->vref_count); | ||
| 5343 | } | ||
| 5278 | } | 5344 | } |
| 5345 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 5346 | |||
| 5279 | } else { | 5347 | } else { |
| 5280 | qla_printk(KERN_INFO, ha, | 5348 | qla_printk(KERN_INFO, ha, |
| 5281 | "qla82xx_restart_isp: **** FAILED ****\n"); | 5349 | "qla82xx_restart_isp: **** FAILED ****\n"); |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 6982ba70e12a..28f65be19dad 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
| @@ -1706,19 +1706,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
| 1706 | cp->result = DID_ERROR << 16; | 1706 | cp->result = DID_ERROR << 16; |
| 1707 | break; | 1707 | break; |
| 1708 | } | 1708 | } |
| 1709 | } else if (!lscsi_status) { | 1709 | } else { |
| 1710 | DEBUG2(qla_printk(KERN_INFO, ha, | 1710 | DEBUG2(qla_printk(KERN_INFO, ha, |
| 1711 | "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x " | 1711 | "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x " |
| 1712 | "of 0x%x bytes).\n", vha->host_no, cp->device->id, | 1712 | "of 0x%x bytes).\n", vha->host_no, cp->device->id, |
| 1713 | cp->device->lun, resid, scsi_bufflen(cp))); | 1713 | cp->device->lun, resid, scsi_bufflen(cp))); |
| 1714 | 1714 | ||
| 1715 | cp->result = DID_ERROR << 16; | 1715 | cp->result = DID_ERROR << 16 | lscsi_status; |
| 1716 | break; | 1716 | goto check_scsi_status; |
| 1717 | } | 1717 | } |
| 1718 | 1718 | ||
| 1719 | cp->result = DID_OK << 16 | lscsi_status; | 1719 | cp->result = DID_OK << 16 | lscsi_status; |
| 1720 | logit = 0; | 1720 | logit = 0; |
| 1721 | 1721 | ||
| 1722 | check_scsi_status: | ||
| 1722 | /* | 1723 | /* |
| 1723 | * Check to see if SCSI Status is non zero. If so report SCSI | 1724 | * Check to see if SCSI Status is non zero. If so report SCSI |
| 1724 | * Status. | 1725 | * Status. |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 6009b0c69488..a595ec8264f8 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
| @@ -2913,7 +2913,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, | |||
| 2913 | uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); | 2913 | uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); |
| 2914 | struct qla_hw_data *ha = vha->hw; | 2914 | struct qla_hw_data *ha = vha->hw; |
| 2915 | scsi_qla_host_t *vp; | 2915 | scsi_qla_host_t *vp; |
| 2916 | scsi_qla_host_t *tvp; | 2916 | unsigned long flags; |
| 2917 | 2917 | ||
| 2918 | if (rptid_entry->entry_status != 0) | 2918 | if (rptid_entry->entry_status != 0) |
| 2919 | return; | 2919 | return; |
| @@ -2945,9 +2945,12 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, | |||
| 2945 | return; | 2945 | return; |
| 2946 | } | 2946 | } |
| 2947 | 2947 | ||
| 2948 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) | 2948 | spin_lock_irqsave(&ha->vport_slock, flags); |
| 2949 | list_for_each_entry(vp, &ha->vp_list, list) | ||
| 2949 | if (vp_idx == vp->vp_idx) | 2950 | if (vp_idx == vp->vp_idx) |
| 2950 | break; | 2951 | break; |
| 2952 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 2953 | |||
| 2951 | if (!vp) | 2954 | if (!vp) |
| 2952 | return; | 2955 | return; |
| 2953 | 2956 | ||
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 987c5b0ca78e..2b69392a71a1 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c | |||
| @@ -30,6 +30,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha) | |||
| 30 | { | 30 | { |
| 31 | uint32_t vp_id; | 31 | uint32_t vp_id; |
| 32 | struct qla_hw_data *ha = vha->hw; | 32 | struct qla_hw_data *ha = vha->hw; |
| 33 | unsigned long flags; | ||
| 33 | 34 | ||
| 34 | /* Find an empty slot and assign an vp_id */ | 35 | /* Find an empty slot and assign an vp_id */ |
| 35 | mutex_lock(&ha->vport_lock); | 36 | mutex_lock(&ha->vport_lock); |
| @@ -44,7 +45,11 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha) | |||
| 44 | set_bit(vp_id, ha->vp_idx_map); | 45 | set_bit(vp_id, ha->vp_idx_map); |
| 45 | ha->num_vhosts++; | 46 | ha->num_vhosts++; |
| 46 | vha->vp_idx = vp_id; | 47 | vha->vp_idx = vp_id; |
| 48 | |||
| 49 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 47 | list_add_tail(&vha->list, &ha->vp_list); | 50 | list_add_tail(&vha->list, &ha->vp_list); |
| 51 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 52 | |||
| 48 | mutex_unlock(&ha->vport_lock); | 53 | mutex_unlock(&ha->vport_lock); |
| 49 | return vp_id; | 54 | return vp_id; |
| 50 | } | 55 | } |
| @@ -54,12 +59,31 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) | |||
| 54 | { | 59 | { |
| 55 | uint16_t vp_id; | 60 | uint16_t vp_id; |
| 56 | struct qla_hw_data *ha = vha->hw; | 61 | struct qla_hw_data *ha = vha->hw; |
| 62 | unsigned long flags = 0; | ||
| 57 | 63 | ||
| 58 | mutex_lock(&ha->vport_lock); | 64 | mutex_lock(&ha->vport_lock); |
| 65 | /* | ||
| 66 | * Wait for all pending activities to finish before removing vport from | ||
| 67 | * the list. | ||
| 68 | * Lock needs to be held for safe removal from the list (it | ||
| 69 | * ensures no active vp_list traversal while the vport is removed | ||
| 70 | * from the queue) | ||
| 71 | */ | ||
| 72 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 73 | while (atomic_read(&vha->vref_count)) { | ||
| 74 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 75 | |||
| 76 | msleep(500); | ||
| 77 | |||
| 78 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 79 | } | ||
| 80 | list_del(&vha->list); | ||
| 81 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 82 | |||
| 59 | vp_id = vha->vp_idx; | 83 | vp_id = vha->vp_idx; |
| 60 | ha->num_vhosts--; | 84 | ha->num_vhosts--; |
| 61 | clear_bit(vp_id, ha->vp_idx_map); | 85 | clear_bit(vp_id, ha->vp_idx_map); |
| 62 | list_del(&vha->list); | 86 | |
| 63 | mutex_unlock(&ha->vport_lock); | 87 | mutex_unlock(&ha->vport_lock); |
| 64 | } | 88 | } |
| 65 | 89 | ||
| @@ -68,12 +92,17 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name) | |||
| 68 | { | 92 | { |
| 69 | scsi_qla_host_t *vha; | 93 | scsi_qla_host_t *vha; |
| 70 | struct scsi_qla_host *tvha; | 94 | struct scsi_qla_host *tvha; |
| 95 | unsigned long flags; | ||
| 71 | 96 | ||
| 97 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 72 | /* Locate matching device in database. */ | 98 | /* Locate matching device in database. */ |
| 73 | list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { | 99 | list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { |
| 74 | if (!memcmp(port_name, vha->port_name, WWN_SIZE)) | 100 | if (!memcmp(port_name, vha->port_name, WWN_SIZE)) { |
| 101 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 75 | return vha; | 102 | return vha; |
| 103 | } | ||
| 76 | } | 104 | } |
| 105 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 77 | return NULL; | 106 | return NULL; |
| 78 | } | 107 | } |
| 79 | 108 | ||
| @@ -93,6 +122,12 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name) | |||
| 93 | static void | 122 | static void |
| 94 | qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) | 123 | qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) |
| 95 | { | 124 | { |
| 125 | /* | ||
| 126 | * !!! NOTE !!! | ||
| 127 | * This function, if called in contexts other than vp create, disable | ||
| 128 | * or delete, please make sure this is synchronized with the | ||
| 129 | * delete thread. | ||
| 130 | */ | ||
| 96 | fc_port_t *fcport; | 131 | fc_port_t *fcport; |
| 97 | 132 | ||
| 98 | list_for_each_entry(fcport, &vha->vp_fcports, list) { | 133 | list_for_each_entry(fcport, &vha->vp_fcports, list) { |
| @@ -100,7 +135,6 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) | |||
| 100 | "loop_id=0x%04x :%x\n", | 135 | "loop_id=0x%04x :%x\n", |
| 101 | vha->host_no, fcport->loop_id, fcport->vp_idx)); | 136 | vha->host_no, fcport->loop_id, fcport->vp_idx)); |
| 102 | 137 | ||
| 103 | atomic_set(&fcport->state, FCS_DEVICE_DEAD); | ||
| 104 | qla2x00_mark_device_lost(vha, fcport, 0, 0); | 138 | qla2x00_mark_device_lost(vha, fcport, 0, 0); |
| 105 | atomic_set(&fcport->state, FCS_UNCONFIGURED); | 139 | atomic_set(&fcport->state, FCS_UNCONFIGURED); |
| 106 | } | 140 | } |
| @@ -194,12 +228,17 @@ qla24xx_configure_vp(scsi_qla_host_t *vha) | |||
| 194 | void | 228 | void |
| 195 | qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) | 229 | qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) |
| 196 | { | 230 | { |
| 197 | scsi_qla_host_t *vha, *tvha; | 231 | scsi_qla_host_t *vha; |
| 198 | struct qla_hw_data *ha = rsp->hw; | 232 | struct qla_hw_data *ha = rsp->hw; |
| 199 | int i = 0; | 233 | int i = 0; |
| 234 | unsigned long flags; | ||
| 200 | 235 | ||
| 201 | list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { | 236 | spin_lock_irqsave(&ha->vport_slock, flags); |
| 237 | list_for_each_entry(vha, &ha->vp_list, list) { | ||
| 202 | if (vha->vp_idx) { | 238 | if (vha->vp_idx) { |
| 239 | atomic_inc(&vha->vref_count); | ||
| 240 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 241 | |||
| 203 | switch (mb[0]) { | 242 | switch (mb[0]) { |
| 204 | case MBA_LIP_OCCURRED: | 243 | case MBA_LIP_OCCURRED: |
| 205 | case MBA_LOOP_UP: | 244 | case MBA_LOOP_UP: |
| @@ -215,9 +254,13 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) | |||
| 215 | qla2x00_async_event(vha, rsp, mb); | 254 | qla2x00_async_event(vha, rsp, mb); |
| 216 | break; | 255 | break; |
| 217 | } | 256 | } |
| 257 | |||
| 258 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 259 | atomic_dec(&vha->vref_count); | ||
| 218 | } | 260 | } |
| 219 | i++; | 261 | i++; |
| 220 | } | 262 | } |
| 263 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 221 | } | 264 | } |
| 222 | 265 | ||
| 223 | int | 266 | int |
| @@ -297,7 +340,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) | |||
| 297 | int ret; | 340 | int ret; |
| 298 | struct qla_hw_data *ha = vha->hw; | 341 | struct qla_hw_data *ha = vha->hw; |
| 299 | scsi_qla_host_t *vp; | 342 | scsi_qla_host_t *vp; |
| 300 | struct scsi_qla_host *tvp; | 343 | unsigned long flags = 0; |
| 301 | 344 | ||
| 302 | if (vha->vp_idx) | 345 | if (vha->vp_idx) |
| 303 | return; | 346 | return; |
| @@ -309,10 +352,19 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) | |||
| 309 | if (!(ha->current_topology & ISP_CFG_F)) | 352 | if (!(ha->current_topology & ISP_CFG_F)) |
| 310 | return; | 353 | return; |
| 311 | 354 | ||
| 312 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { | 355 | spin_lock_irqsave(&ha->vport_slock, flags); |
| 313 | if (vp->vp_idx) | 356 | list_for_each_entry(vp, &ha->vp_list, list) { |
| 357 | if (vp->vp_idx) { | ||
| 358 | atomic_inc(&vp->vref_count); | ||
| 359 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 360 | |||
| 314 | ret = qla2x00_do_dpc_vp(vp); | 361 | ret = qla2x00_do_dpc_vp(vp); |
| 362 | |||
| 363 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 364 | atomic_dec(&vp->vref_count); | ||
| 365 | } | ||
| 315 | } | 366 | } |
| 367 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 316 | } | 368 | } |
| 317 | 369 | ||
| 318 | int | 370 | int |
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 915b77a6e193..0a71cc71eab2 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c | |||
| @@ -2672,6 +2672,19 @@ qla82xx_start_scsi(srb_t *sp) | |||
| 2672 | sufficient_dsds: | 2672 | sufficient_dsds: |
| 2673 | req_cnt = 1; | 2673 | req_cnt = 1; |
| 2674 | 2674 | ||
| 2675 | if (req->cnt < (req_cnt + 2)) { | ||
| 2676 | cnt = (uint16_t)RD_REG_DWORD_RELAXED( | ||
| 2677 | ®->req_q_out[0]); | ||
| 2678 | if (req->ring_index < cnt) | ||
| 2679 | req->cnt = cnt - req->ring_index; | ||
| 2680 | else | ||
| 2681 | req->cnt = req->length - | ||
| 2682 | (req->ring_index - cnt); | ||
| 2683 | } | ||
| 2684 | |||
| 2685 | if (req->cnt < (req_cnt + 2)) | ||
| 2686 | goto queuing_error; | ||
| 2687 | |||
| 2675 | ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); | 2688 | ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); |
| 2676 | if (!sp->ctx) { | 2689 | if (!sp->ctx) { |
| 2677 | DEBUG(printk(KERN_INFO | 2690 | DEBUG(printk(KERN_INFO |
| @@ -3307,16 +3320,19 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha) | |||
| 3307 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | 3320 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
| 3308 | } | 3321 | } |
| 3309 | qla2xxx_wake_dpc(vha); | 3322 | qla2xxx_wake_dpc(vha); |
| 3323 | ha->flags.fw_hung = 1; | ||
| 3310 | if (ha->flags.mbox_busy) { | 3324 | if (ha->flags.mbox_busy) { |
| 3311 | ha->flags.fw_hung = 1; | ||
| 3312 | ha->flags.mbox_int = 1; | 3325 | ha->flags.mbox_int = 1; |
| 3313 | DEBUG2(qla_printk(KERN_ERR, ha, | 3326 | DEBUG2(qla_printk(KERN_ERR, ha, |
| 3314 | "Due to fw hung, doing premature " | 3327 | "Due to fw hung, doing premature " |
| 3315 | "completion of mbx command\n")); | 3328 | "completion of mbx command\n")); |
| 3316 | complete(&ha->mbx_intr_comp); | 3329 | if (test_bit(MBX_INTR_WAIT, |
| 3330 | &ha->mbx_cmd_flags)) | ||
| 3331 | complete(&ha->mbx_intr_comp); | ||
| 3317 | } | 3332 | } |
| 3318 | } | 3333 | } |
| 3319 | } | 3334 | } else |
| 3335 | vha->seconds_since_last_heartbeat = 0; | ||
| 3320 | vha->fw_heartbeat_counter = fw_heartbeat_counter; | 3336 | vha->fw_heartbeat_counter = fw_heartbeat_counter; |
| 3321 | } | 3337 | } |
| 3322 | 3338 | ||
| @@ -3418,13 +3434,15 @@ void qla82xx_watchdog(scsi_qla_host_t *vha) | |||
| 3418 | "%s(): Adapter reset needed!\n", __func__); | 3434 | "%s(): Adapter reset needed!\n", __func__); |
| 3419 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | 3435 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
| 3420 | qla2xxx_wake_dpc(vha); | 3436 | qla2xxx_wake_dpc(vha); |
| 3437 | ha->flags.fw_hung = 1; | ||
| 3421 | if (ha->flags.mbox_busy) { | 3438 | if (ha->flags.mbox_busy) { |
| 3422 | ha->flags.fw_hung = 1; | ||
| 3423 | ha->flags.mbox_int = 1; | 3439 | ha->flags.mbox_int = 1; |
| 3424 | DEBUG2(qla_printk(KERN_ERR, ha, | 3440 | DEBUG2(qla_printk(KERN_ERR, ha, |
| 3425 | "Need reset, doing premature " | 3441 | "Need reset, doing premature " |
| 3426 | "completion of mbx command\n")); | 3442 | "completion of mbx command\n")); |
| 3427 | complete(&ha->mbx_intr_comp); | 3443 | if (test_bit(MBX_INTR_WAIT, |
| 3444 | &ha->mbx_cmd_flags)) | ||
| 3445 | complete(&ha->mbx_intr_comp); | ||
| 3428 | } | 3446 | } |
| 3429 | } else { | 3447 | } else { |
| 3430 | qla82xx_check_fw_alive(vha); | 3448 | qla82xx_check_fw_alive(vha); |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 8c80b49ac1c4..1e4bff695254 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
| @@ -2341,16 +2341,28 @@ probe_out: | |||
| 2341 | static void | 2341 | static void |
| 2342 | qla2x00_remove_one(struct pci_dev *pdev) | 2342 | qla2x00_remove_one(struct pci_dev *pdev) |
| 2343 | { | 2343 | { |
| 2344 | scsi_qla_host_t *base_vha, *vha, *temp; | 2344 | scsi_qla_host_t *base_vha, *vha; |
| 2345 | struct qla_hw_data *ha; | 2345 | struct qla_hw_data *ha; |
| 2346 | unsigned long flags; | ||
| 2346 | 2347 | ||
| 2347 | base_vha = pci_get_drvdata(pdev); | 2348 | base_vha = pci_get_drvdata(pdev); |
| 2348 | ha = base_vha->hw; | 2349 | ha = base_vha->hw; |
| 2349 | 2350 | ||
| 2350 | list_for_each_entry_safe(vha, temp, &ha->vp_list, list) { | 2351 | spin_lock_irqsave(&ha->vport_slock, flags); |
| 2351 | if (vha && vha->fc_vport) | 2352 | list_for_each_entry(vha, &ha->vp_list, list) { |
| 2353 | atomic_inc(&vha->vref_count); | ||
| 2354 | |||
| 2355 | if (vha && vha->fc_vport) { | ||
| 2356 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 2357 | |||
| 2352 | fc_vport_terminate(vha->fc_vport); | 2358 | fc_vport_terminate(vha->fc_vport); |
| 2359 | |||
| 2360 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 2361 | } | ||
| 2362 | |||
| 2363 | atomic_dec(&vha->vref_count); | ||
| 2353 | } | 2364 | } |
| 2365 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 2354 | 2366 | ||
| 2355 | set_bit(UNLOADING, &base_vha->dpc_flags); | 2367 | set_bit(UNLOADING, &base_vha->dpc_flags); |
| 2356 | 2368 | ||
| @@ -2975,10 +2987,17 @@ static struct qla_work_evt * | |||
| 2975 | qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) | 2987 | qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) |
| 2976 | { | 2988 | { |
| 2977 | struct qla_work_evt *e; | 2989 | struct qla_work_evt *e; |
| 2990 | uint8_t bail; | ||
| 2991 | |||
| 2992 | QLA_VHA_MARK_BUSY(vha, bail); | ||
| 2993 | if (bail) | ||
| 2994 | return NULL; | ||
| 2978 | 2995 | ||
| 2979 | e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); | 2996 | e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); |
| 2980 | if (!e) | 2997 | if (!e) { |
| 2998 | QLA_VHA_MARK_NOT_BUSY(vha); | ||
| 2981 | return NULL; | 2999 | return NULL; |
| 3000 | } | ||
| 2982 | 3001 | ||
| 2983 | INIT_LIST_HEAD(&e->list); | 3002 | INIT_LIST_HEAD(&e->list); |
| 2984 | e->type = type; | 3003 | e->type = type; |
| @@ -3135,6 +3154,9 @@ qla2x00_do_work(struct scsi_qla_host *vha) | |||
| 3135 | } | 3154 | } |
| 3136 | if (e->flags & QLA_EVT_FLAG_FREE) | 3155 | if (e->flags & QLA_EVT_FLAG_FREE) |
| 3137 | kfree(e); | 3156 | kfree(e); |
| 3157 | |||
| 3158 | /* For each work completed decrement vha ref count */ | ||
| 3159 | QLA_VHA_MARK_NOT_BUSY(vha); | ||
| 3138 | } | 3160 | } |
| 3139 | } | 3161 | } |
| 3140 | 3162 | ||
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index e75ccb91317d..8edbccb3232d 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
| @@ -7,9 +7,9 @@ | |||
| 7 | /* | 7 | /* |
| 8 | * Driver version | 8 | * Driver version |
| 9 | */ | 9 | */ |
| 10 | #define QLA2XXX_VERSION "8.03.03-k0" | 10 | #define QLA2XXX_VERSION "8.03.04-k0" |
| 11 | 11 | ||
| 12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
| 13 | #define QLA_DRIVER_MINOR_VER 3 | 13 | #define QLA_DRIVER_MINOR_VER 3 |
| 14 | #define QLA_DRIVER_PATCH_VER 3 | 14 | #define QLA_DRIVER_PATCH_VER 4 |
| 15 | #define QLA_DRIVER_BETA_VER 0 | 15 | #define QLA_DRIVER_BETA_VER 0 |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9ade720422c6..ee02d3838a0a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -1011,8 +1011,8 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) | |||
| 1011 | 1011 | ||
| 1012 | err_exit: | 1012 | err_exit: |
| 1013 | scsi_release_buffers(cmd); | 1013 | scsi_release_buffers(cmd); |
| 1014 | scsi_put_command(cmd); | ||
| 1015 | cmd->request->special = NULL; | 1014 | cmd->request->special = NULL; |
| 1015 | scsi_put_command(cmd); | ||
| 1016 | return error; | 1016 | return error; |
| 1017 | } | 1017 | } |
| 1018 | EXPORT_SYMBOL(scsi_init_io); | 1018 | EXPORT_SYMBOL(scsi_init_io); |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 2714becc2eaf..ffa0689ee840 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -870,7 +870,7 @@ static int sd_release(struct gendisk *disk, fmode_t mode) | |||
| 870 | 870 | ||
| 871 | SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); | 871 | SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); |
| 872 | 872 | ||
| 873 | if (atomic_dec_return(&sdkp->openers) && sdev->removable) { | 873 | if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) { |
| 874 | if (scsi_block_when_processing_errors(sdev)) | 874 | if (scsi_block_when_processing_errors(sdev)) |
| 875 | scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); | 875 | scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); |
| 876 | } | 876 | } |
| @@ -2625,15 +2625,15 @@ module_exit(exit_sd); | |||
| 2625 | static void sd_print_sense_hdr(struct scsi_disk *sdkp, | 2625 | static void sd_print_sense_hdr(struct scsi_disk *sdkp, |
| 2626 | struct scsi_sense_hdr *sshdr) | 2626 | struct scsi_sense_hdr *sshdr) |
| 2627 | { | 2627 | { |
| 2628 | sd_printk(KERN_INFO, sdkp, ""); | 2628 | sd_printk(KERN_INFO, sdkp, " "); |
| 2629 | scsi_show_sense_hdr(sshdr); | 2629 | scsi_show_sense_hdr(sshdr); |
| 2630 | sd_printk(KERN_INFO, sdkp, ""); | 2630 | sd_printk(KERN_INFO, sdkp, " "); |
| 2631 | scsi_show_extd_sense(sshdr->asc, sshdr->ascq); | 2631 | scsi_show_extd_sense(sshdr->asc, sshdr->ascq); |
| 2632 | } | 2632 | } |
| 2633 | 2633 | ||
| 2634 | static void sd_print_result(struct scsi_disk *sdkp, int result) | 2634 | static void sd_print_result(struct scsi_disk *sdkp, int result) |
| 2635 | { | 2635 | { |
| 2636 | sd_printk(KERN_INFO, sdkp, ""); | 2636 | sd_printk(KERN_INFO, sdkp, " "); |
| 2637 | scsi_show_result(result); | 2637 | scsi_show_result(result); |
| 2638 | } | 2638 | } |
| 2639 | 2639 | ||
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index a7bc8b7b09ac..2c3e89ddf069 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c | |||
| @@ -72,10 +72,7 @@ static void sym_printl_hex(u_char *p, int n) | |||
| 72 | 72 | ||
| 73 | static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg) | 73 | static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg) |
| 74 | { | 74 | { |
| 75 | if (label) | 75 | sym_print_addr(cp->cmd, "%s: ", label); |
| 76 | sym_print_addr(cp->cmd, "%s: ", label); | ||
| 77 | else | ||
| 78 | sym_print_addr(cp->cmd, ""); | ||
| 79 | 76 | ||
| 80 | spi_print_msg(msg); | 77 | spi_print_msg(msg); |
| 81 | printf("\n"); | 78 | printf("\n"); |
| @@ -4558,7 +4555,8 @@ static void sym_int_sir(struct sym_hcb *np) | |||
| 4558 | switch (np->msgin [2]) { | 4555 | switch (np->msgin [2]) { |
| 4559 | case M_X_MODIFY_DP: | 4556 | case M_X_MODIFY_DP: |
| 4560 | if (DEBUG_FLAGS & DEBUG_POINTER) | 4557 | if (DEBUG_FLAGS & DEBUG_POINTER) |
| 4561 | sym_print_msg(cp, NULL, np->msgin); | 4558 | sym_print_msg(cp, "extended msg ", |
| 4559 | np->msgin); | ||
| 4562 | tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + | 4560 | tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + |
| 4563 | (np->msgin[5]<<8) + (np->msgin[6]); | 4561 | (np->msgin[5]<<8) + (np->msgin[6]); |
| 4564 | sym_modify_dp(np, tp, cp, tmp); | 4562 | sym_modify_dp(np, tp, cp, tmp); |
| @@ -4585,7 +4583,7 @@ static void sym_int_sir(struct sym_hcb *np) | |||
| 4585 | */ | 4583 | */ |
| 4586 | case M_IGN_RESIDUE: | 4584 | case M_IGN_RESIDUE: |
| 4587 | if (DEBUG_FLAGS & DEBUG_POINTER) | 4585 | if (DEBUG_FLAGS & DEBUG_POINTER) |
| 4588 | sym_print_msg(cp, NULL, np->msgin); | 4586 | sym_print_msg(cp, "1 or 2 byte ", np->msgin); |
| 4589 | if (cp->host_flags & HF_SENSE) | 4587 | if (cp->host_flags & HF_SENSE) |
| 4590 | OUTL_DSP(np, SCRIPTA_BA(np, clrack)); | 4588 | OUTL_DSP(np, SCRIPTA_BA(np, clrack)); |
| 4591 | else | 4589 | else |
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c index e57fb3d228e2..5318dd3774ae 100644 --- a/drivers/serial/bfin_sport_uart.c +++ b/drivers/serial/bfin_sport_uart.c | |||
| @@ -121,7 +121,7 @@ static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate) | |||
| 121 | unsigned int sclk = get_sclk(); | 121 | unsigned int sclk = get_sclk(); |
| 122 | 122 | ||
| 123 | /* Set TCR1 and TCR2, TFSR is not enabled for uart */ | 123 | /* Set TCR1 and TCR2, TFSR is not enabled for uart */ |
| 124 | SPORT_PUT_TCR1(up, (ITFS | TLSBIT | ITCLK)); | 124 | SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK)); |
| 125 | SPORT_PUT_TCR2(up, size + 1); | 125 | SPORT_PUT_TCR2(up, size + 1); |
| 126 | pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up)); | 126 | pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up)); |
| 127 | 127 | ||
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c index c6aa52f8dcee..48d9fb1227df 100644 --- a/drivers/staging/comedi/drivers/das08_cs.c +++ b/drivers/staging/comedi/drivers/das08_cs.c | |||
| @@ -222,7 +222,6 @@ static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev, | |||
| 222 | p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; | 222 | p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; |
| 223 | p_dev->resource[0]->flags |= | 223 | p_dev->resource[0]->flags |= |
| 224 | pcmcia_io_cfg_data_width(io->flags); | 224 | pcmcia_io_cfg_data_width(io->flags); |
| 225 | p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; | ||
| 226 | p_dev->resource[0]->start = io->win[0].base; | 225 | p_dev->resource[0]->start = io->win[0].base; |
| 227 | p_dev->resource[0]->end = io->win[0].len; | 226 | p_dev->resource[0]->end = io->win[0].len; |
| 228 | if (io->nwin > 1) { | 227 | if (io->nwin > 1) { |
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c index 56e11575c977..64a01147ecae 100644 --- a/drivers/staging/hv/netvsc_drv.c +++ b/drivers/staging/hv/netvsc_drv.c | |||
| @@ -327,6 +327,9 @@ static const struct net_device_ops device_ops = { | |||
| 327 | .ndo_stop = netvsc_close, | 327 | .ndo_stop = netvsc_close, |
| 328 | .ndo_start_xmit = netvsc_start_xmit, | 328 | .ndo_start_xmit = netvsc_start_xmit, |
| 329 | .ndo_set_multicast_list = netvsc_set_multicast_list, | 329 | .ndo_set_multicast_list = netvsc_set_multicast_list, |
| 330 | .ndo_change_mtu = eth_change_mtu, | ||
| 331 | .ndo_validate_addr = eth_validate_addr, | ||
| 332 | .ndo_set_mac_address = eth_mac_addr, | ||
| 330 | }; | 333 | }; |
| 331 | 334 | ||
| 332 | static int netvsc_probe(struct device *device) | 335 | static int netvsc_probe(struct device *device) |
diff --git a/drivers/staging/hv/ring_buffer.c b/drivers/staging/hv/ring_buffer.c index 17bc7626f70a..d78c569ac94a 100644 --- a/drivers/staging/hv/ring_buffer.c +++ b/drivers/staging/hv/ring_buffer.c | |||
| @@ -193,8 +193,7 @@ Description: | |||
| 193 | static inline u64 | 193 | static inline u64 |
| 194 | GetRingBufferIndices(struct hv_ring_buffer_info *RingInfo) | 194 | GetRingBufferIndices(struct hv_ring_buffer_info *RingInfo) |
| 195 | { | 195 | { |
| 196 | return ((u64)RingInfo->RingBuffer->WriteIndex << 32) | 196 | return (u64)RingInfo->RingBuffer->WriteIndex << 32; |
| 197 | || RingInfo->RingBuffer->ReadIndex; | ||
| 198 | } | 197 | } |
| 199 | 198 | ||
| 200 | 199 | ||
diff --git a/drivers/staging/hv/storvsc_api.h b/drivers/staging/hv/storvsc_api.h index 0063bde9a4b2..8505a1c5f9ee 100644 --- a/drivers/staging/hv/storvsc_api.h +++ b/drivers/staging/hv/storvsc_api.h | |||
| @@ -28,10 +28,10 @@ | |||
| 28 | #include "vmbus_api.h" | 28 | #include "vmbus_api.h" |
| 29 | 29 | ||
| 30 | /* Defines */ | 30 | /* Defines */ |
| 31 | #define STORVSC_RING_BUFFER_SIZE (10*PAGE_SIZE) | 31 | #define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE) |
| 32 | #define BLKVSC_RING_BUFFER_SIZE (20*PAGE_SIZE) | 32 | #define BLKVSC_RING_BUFFER_SIZE (20*PAGE_SIZE) |
| 33 | 33 | ||
| 34 | #define STORVSC_MAX_IO_REQUESTS 64 | 34 | #define STORVSC_MAX_IO_REQUESTS 128 |
| 35 | 35 | ||
| 36 | /* | 36 | /* |
| 37 | * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In | 37 | * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In |
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c index 075b61bd492f..62882a437aa4 100644 --- a/drivers/staging/hv/storvsc_drv.c +++ b/drivers/staging/hv/storvsc_drv.c | |||
| @@ -495,7 +495,7 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl, | |||
| 495 | 495 | ||
| 496 | /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ | 496 | /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ |
| 497 | 497 | ||
| 498 | if (j == 0) | 498 | if (bounce_addr == 0) |
| 499 | bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); | 499 | bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); |
| 500 | 500 | ||
| 501 | while (srclen) { | 501 | while (srclen) { |
| @@ -556,7 +556,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, | |||
| 556 | destlen = orig_sgl[i].length; | 556 | destlen = orig_sgl[i].length; |
| 557 | /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ | 557 | /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ |
| 558 | 558 | ||
| 559 | if (j == 0) | 559 | if (bounce_addr == 0) |
| 560 | bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); | 560 | bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); |
| 561 | 561 | ||
| 562 | while (destlen) { | 562 | while (destlen) { |
| @@ -615,6 +615,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd, | |||
| 615 | unsigned int request_size = 0; | 615 | unsigned int request_size = 0; |
| 616 | int i; | 616 | int i; |
| 617 | struct scatterlist *sgl; | 617 | struct scatterlist *sgl; |
| 618 | unsigned int sg_count = 0; | ||
| 618 | 619 | ||
| 619 | DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d " | 620 | DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d " |
| 620 | "queue depth %d tagged %d", scmnd, scmnd->sc_data_direction, | 621 | "queue depth %d tagged %d", scmnd, scmnd->sc_data_direction, |
| @@ -697,6 +698,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd, | |||
| 697 | request->DataBuffer.Length = scsi_bufflen(scmnd); | 698 | request->DataBuffer.Length = scsi_bufflen(scmnd); |
| 698 | if (scsi_sg_count(scmnd)) { | 699 | if (scsi_sg_count(scmnd)) { |
| 699 | sgl = (struct scatterlist *)scsi_sglist(scmnd); | 700 | sgl = (struct scatterlist *)scsi_sglist(scmnd); |
| 701 | sg_count = scsi_sg_count(scmnd); | ||
| 700 | 702 | ||
| 701 | /* check if we need to bounce the sgl */ | 703 | /* check if we need to bounce the sgl */ |
| 702 | if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) { | 704 | if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) { |
| @@ -731,15 +733,16 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd, | |||
| 731 | scsi_sg_count(scmnd)); | 733 | scsi_sg_count(scmnd)); |
| 732 | 734 | ||
| 733 | sgl = cmd_request->bounce_sgl; | 735 | sgl = cmd_request->bounce_sgl; |
| 736 | sg_count = cmd_request->bounce_sgl_count; | ||
| 734 | } | 737 | } |
| 735 | 738 | ||
| 736 | request->DataBuffer.Offset = sgl[0].offset; | 739 | request->DataBuffer.Offset = sgl[0].offset; |
| 737 | 740 | ||
| 738 | for (i = 0; i < scsi_sg_count(scmnd); i++) { | 741 | for (i = 0; i < sg_count; i++) { |
| 739 | DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n", | 742 | DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n", |
| 740 | i, sgl[i].length, sgl[i].offset); | 743 | i, sgl[i].length, sgl[i].offset); |
| 741 | request->DataBuffer.PfnArray[i] = | 744 | request->DataBuffer.PfnArray[i] = |
| 742 | page_to_pfn(sg_page((&sgl[i]))); | 745 | page_to_pfn(sg_page((&sgl[i]))); |
| 743 | } | 746 | } |
| 744 | } else if (scsi_sglist(scmnd)) { | 747 | } else if (scsi_sglist(scmnd)) { |
| 745 | /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */ | 748 | /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */ |
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig index 638ad6b35891..9493128e5fd2 100644 --- a/drivers/staging/octeon/Kconfig +++ b/drivers/staging/octeon/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config OCTEON_ETHERNET | 1 | config OCTEON_ETHERNET |
| 2 | tristate "Cavium Networks Octeon Ethernet support" | 2 | tristate "Cavium Networks Octeon Ethernet support" |
| 3 | depends on CPU_CAVIUM_OCTEON | 3 | depends on CPU_CAVIUM_OCTEON && NETDEVICES |
| 4 | select PHYLIB | 4 | select PHYLIB |
| 5 | select MDIO_OCTEON | 5 | select MDIO_OCTEON |
| 6 | help | 6 | help |
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c index a0fe31de0a6d..ebf9074a9083 100644 --- a/drivers/staging/rt2860/usb_main_dev.c +++ b/drivers/staging/rt2860/usb_main_dev.c | |||
| @@ -44,6 +44,7 @@ struct usb_device_id rtusb_usb_id[] = { | |||
| 44 | {USB_DEVICE(0x07B8, 0x2870)}, /* AboCom */ | 44 | {USB_DEVICE(0x07B8, 0x2870)}, /* AboCom */ |
| 45 | {USB_DEVICE(0x07B8, 0x2770)}, /* AboCom */ | 45 | {USB_DEVICE(0x07B8, 0x2770)}, /* AboCom */ |
| 46 | {USB_DEVICE(0x0DF6, 0x0039)}, /* Sitecom 2770 */ | 46 | {USB_DEVICE(0x0DF6, 0x0039)}, /* Sitecom 2770 */ |
| 47 | {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom 2770 */ | ||
| 47 | {USB_DEVICE(0x083A, 0x7512)}, /* Arcadyan 2770 */ | 48 | {USB_DEVICE(0x083A, 0x7512)}, /* Arcadyan 2770 */ |
| 48 | {USB_DEVICE(0x0789, 0x0162)}, /* Logitec 2870 */ | 49 | {USB_DEVICE(0x0789, 0x0162)}, /* Logitec 2870 */ |
| 49 | {USB_DEVICE(0x0789, 0x0163)}, /* Logitec 2870 */ | 50 | {USB_DEVICE(0x0789, 0x0163)}, /* Logitec 2870 */ |
| @@ -95,7 +96,8 @@ struct usb_device_id rtusb_usb_id[] = { | |||
| 95 | {USB_DEVICE(0x050d, 0x815c)}, | 96 | {USB_DEVICE(0x050d, 0x815c)}, |
| 96 | {USB_DEVICE(0x1482, 0x3C09)}, /* Abocom */ | 97 | {USB_DEVICE(0x1482, 0x3C09)}, /* Abocom */ |
| 97 | {USB_DEVICE(0x14B2, 0x3C09)}, /* Alpha */ | 98 | {USB_DEVICE(0x14B2, 0x3C09)}, /* Alpha */ |
| 98 | {USB_DEVICE(0x04E8, 0x2018)}, /* samsung */ | 99 | {USB_DEVICE(0x04E8, 0x2018)}, /* samsung linkstick2 */ |
| 100 | {USB_DEVICE(0x1690, 0x0740)}, /* Askey */ | ||
| 99 | {USB_DEVICE(0x5A57, 0x0280)}, /* Zinwell */ | 101 | {USB_DEVICE(0x5A57, 0x0280)}, /* Zinwell */ |
| 100 | {USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */ | 102 | {USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */ |
| 101 | {USB_DEVICE(0x7392, 0x7718)}, | 103 | {USB_DEVICE(0x7392, 0x7718)}, |
| @@ -105,21 +107,34 @@ struct usb_device_id rtusb_usb_id[] = { | |||
| 105 | {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */ | 107 | {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */ |
| 106 | {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */ | 108 | {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */ |
| 107 | {USB_DEVICE(0x050d, 0x815c)}, /* Belkin F5D8053 */ | 109 | {USB_DEVICE(0x050d, 0x815c)}, /* Belkin F5D8053 */ |
| 110 | {USB_DEVICE(0x100D, 0x9031)}, /* Motorola 2770 */ | ||
| 108 | #endif /* RT2870 // */ | 111 | #endif /* RT2870 // */ |
| 109 | #ifdef RT3070 | 112 | #ifdef RT3070 |
| 110 | {USB_DEVICE(0x148F, 0x3070)}, /* Ralink 3070 */ | 113 | {USB_DEVICE(0x148F, 0x3070)}, /* Ralink 3070 */ |
| 111 | {USB_DEVICE(0x148F, 0x3071)}, /* Ralink 3071 */ | 114 | {USB_DEVICE(0x148F, 0x3071)}, /* Ralink 3071 */ |
| 112 | {USB_DEVICE(0x148F, 0x3072)}, /* Ralink 3072 */ | 115 | {USB_DEVICE(0x148F, 0x3072)}, /* Ralink 3072 */ |
| 113 | {USB_DEVICE(0x0DB0, 0x3820)}, /* Ralink 3070 */ | 116 | {USB_DEVICE(0x0DB0, 0x3820)}, /* Ralink 3070 */ |
| 117 | {USB_DEVICE(0x0DB0, 0x871C)}, /* Ralink 3070 */ | ||
| 118 | {USB_DEVICE(0x0DB0, 0x822C)}, /* Ralink 3070 */ | ||
| 119 | {USB_DEVICE(0x0DB0, 0x871B)}, /* Ralink 3070 */ | ||
| 120 | {USB_DEVICE(0x0DB0, 0x822B)}, /* Ralink 3070 */ | ||
| 114 | {USB_DEVICE(0x0DF6, 0x003E)}, /* Sitecom 3070 */ | 121 | {USB_DEVICE(0x0DF6, 0x003E)}, /* Sitecom 3070 */ |
| 115 | {USB_DEVICE(0x0DF6, 0x0042)}, /* Sitecom 3072 */ | 122 | {USB_DEVICE(0x0DF6, 0x0042)}, /* Sitecom 3072 */ |
| 123 | {USB_DEVICE(0x0DF6, 0x0048)}, /* Sitecom 3070 */ | ||
| 124 | {USB_DEVICE(0x0DF6, 0x0047)}, /* Sitecom 3071 */ | ||
| 116 | {USB_DEVICE(0x14B2, 0x3C12)}, /* AL 3070 */ | 125 | {USB_DEVICE(0x14B2, 0x3C12)}, /* AL 3070 */ |
| 117 | {USB_DEVICE(0x18C5, 0x0012)}, /* Corega 3070 */ | 126 | {USB_DEVICE(0x18C5, 0x0012)}, /* Corega 3070 */ |
| 118 | {USB_DEVICE(0x083A, 0x7511)}, /* Arcadyan 3070 */ | 127 | {USB_DEVICE(0x083A, 0x7511)}, /* Arcadyan 3070 */ |
| 128 | {USB_DEVICE(0x083A, 0xA701)}, /* SMC 3070 */ | ||
| 129 | {USB_DEVICE(0x083A, 0xA702)}, /* SMC 3072 */ | ||
| 119 | {USB_DEVICE(0x1740, 0x9703)}, /* EnGenius 3070 */ | 130 | {USB_DEVICE(0x1740, 0x9703)}, /* EnGenius 3070 */ |
| 120 | {USB_DEVICE(0x1740, 0x9705)}, /* EnGenius 3071 */ | 131 | {USB_DEVICE(0x1740, 0x9705)}, /* EnGenius 3071 */ |
| 121 | {USB_DEVICE(0x1740, 0x9706)}, /* EnGenius 3072 */ | 132 | {USB_DEVICE(0x1740, 0x9706)}, /* EnGenius 3072 */ |
| 133 | {USB_DEVICE(0x1740, 0x9707)}, /* EnGenius 3070 */ | ||
| 134 | {USB_DEVICE(0x1740, 0x9708)}, /* EnGenius 3071 */ | ||
| 135 | {USB_DEVICE(0x1740, 0x9709)}, /* EnGenius 3072 */ | ||
| 122 | {USB_DEVICE(0x13D3, 0x3273)}, /* AzureWave 3070 */ | 136 | {USB_DEVICE(0x13D3, 0x3273)}, /* AzureWave 3070 */ |
| 137 | {USB_DEVICE(0x13D3, 0x3305)}, /* AzureWave 3070*/ | ||
| 123 | {USB_DEVICE(0x1044, 0x800D)}, /* Gigabyte GN-WB32L 3070 */ | 138 | {USB_DEVICE(0x1044, 0x800D)}, /* Gigabyte GN-WB32L 3070 */ |
| 124 | {USB_DEVICE(0x2019, 0xAB25)}, /* Planex Communications, Inc. RT3070 */ | 139 | {USB_DEVICE(0x2019, 0xAB25)}, /* Planex Communications, Inc. RT3070 */ |
| 125 | {USB_DEVICE(0x07B8, 0x3070)}, /* AboCom 3070 */ | 140 | {USB_DEVICE(0x07B8, 0x3070)}, /* AboCom 3070 */ |
| @@ -132,14 +147,36 @@ struct usb_device_id rtusb_usb_id[] = { | |||
| 132 | {USB_DEVICE(0x07D1, 0x3C0D)}, /* D-Link 3070 */ | 147 | {USB_DEVICE(0x07D1, 0x3C0D)}, /* D-Link 3070 */ |
| 133 | {USB_DEVICE(0x07D1, 0x3C0E)}, /* D-Link 3070 */ | 148 | {USB_DEVICE(0x07D1, 0x3C0E)}, /* D-Link 3070 */ |
| 134 | {USB_DEVICE(0x07D1, 0x3C0F)}, /* D-Link 3070 */ | 149 | {USB_DEVICE(0x07D1, 0x3C0F)}, /* D-Link 3070 */ |
| 150 | {USB_DEVICE(0x07D1, 0x3C16)}, /* D-Link 3070 */ | ||
| 151 | {USB_DEVICE(0x07D1, 0x3C17)}, /* D-Link 8070 */ | ||
| 135 | {USB_DEVICE(0x1D4D, 0x000C)}, /* Pegatron Corporation 3070 */ | 152 | {USB_DEVICE(0x1D4D, 0x000C)}, /* Pegatron Corporation 3070 */ |
| 136 | {USB_DEVICE(0x1D4D, 0x000E)}, /* Pegatron Corporation 3070 */ | 153 | {USB_DEVICE(0x1D4D, 0x000E)}, /* Pegatron Corporation 3070 */ |
| 137 | {USB_DEVICE(0x5A57, 0x5257)}, /* Zinwell 3070 */ | 154 | {USB_DEVICE(0x5A57, 0x5257)}, /* Zinwell 3070 */ |
| 138 | {USB_DEVICE(0x5A57, 0x0283)}, /* Zinwell 3072 */ | 155 | {USB_DEVICE(0x5A57, 0x0283)}, /* Zinwell 3072 */ |
| 139 | {USB_DEVICE(0x04BB, 0x0945)}, /* I-O DATA 3072 */ | 156 | {USB_DEVICE(0x04BB, 0x0945)}, /* I-O DATA 3072 */ |
| 157 | {USB_DEVICE(0x04BB, 0x0947)}, /* I-O DATA 3070 */ | ||
| 158 | {USB_DEVICE(0x04BB, 0x0948)}, /* I-O DATA 3072 */ | ||
| 140 | {USB_DEVICE(0x203D, 0x1480)}, /* Encore 3070 */ | 159 | {USB_DEVICE(0x203D, 0x1480)}, /* Encore 3070 */ |
| 160 | {USB_DEVICE(0x20B8, 0x8888)}, /* PARA INDUSTRIAL 3070 */ | ||
| 161 | {USB_DEVICE(0x0B05, 0x1784)}, /* Asus 3072 */ | ||
| 162 | {USB_DEVICE(0x203D, 0x14A9)}, /* Encore 3070*/ | ||
| 163 | {USB_DEVICE(0x0DB0, 0x899A)}, /* MSI 3070*/ | ||
| 164 | {USB_DEVICE(0x0DB0, 0x3870)}, /* MSI 3070*/ | ||
| 165 | {USB_DEVICE(0x0DB0, 0x870A)}, /* MSI 3070*/ | ||
| 166 | {USB_DEVICE(0x0DB0, 0x6899)}, /* MSI 3070 */ | ||
| 167 | {USB_DEVICE(0x0DB0, 0x3822)}, /* MSI 3070 */ | ||
| 168 | {USB_DEVICE(0x0DB0, 0x3871)}, /* MSI 3070 */ | ||
| 169 | {USB_DEVICE(0x0DB0, 0x871A)}, /* MSI 3070 */ | ||
| 170 | {USB_DEVICE(0x0DB0, 0x822A)}, /* MSI 3070 */ | ||
| 171 | {USB_DEVICE(0x0DB0, 0x3821)}, /* Ralink 3070 */ | ||
| 172 | {USB_DEVICE(0x0DB0, 0x821A)}, /* Ralink 3070 */ | ||
| 173 | {USB_DEVICE(0x083A, 0xA703)}, /* IO-MAGIC */ | ||
| 174 | {USB_DEVICE(0x13D3, 0x3307)}, /* Azurewave */ | ||
| 175 | {USB_DEVICE(0x13D3, 0x3321)}, /* Azurewave */ | ||
| 176 | {USB_DEVICE(0x07FA, 0x7712)}, /* Edimax */ | ||
| 177 | {USB_DEVICE(0x0789, 0x0166)}, /* Edimax */ | ||
| 178 | {USB_DEVICE(0x148F, 0x2070)}, /* Edimax */ | ||
| 141 | #endif /* RT3070 // */ | 179 | #endif /* RT3070 // */ |
| 142 | {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom WL-608 */ | ||
| 143 | {USB_DEVICE(0x1737, 0x0077)}, /* Linksys WUSB54GC-EU v3 */ | 180 | {USB_DEVICE(0x1737, 0x0077)}, /* Linksys WUSB54GC-EU v3 */ |
| 144 | {USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */ | 181 | {USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */ |
| 145 | {USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */ | 182 | {USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */ |
diff --git a/drivers/staging/spectra/Kconfig b/drivers/staging/spectra/Kconfig index 5e2ffefb60af..d231ae27299d 100644 --- a/drivers/staging/spectra/Kconfig +++ b/drivers/staging/spectra/Kconfig | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | menuconfig SPECTRA | 2 | menuconfig SPECTRA |
| 3 | tristate "Denali Spectra Flash Translation Layer" | 3 | tristate "Denali Spectra Flash Translation Layer" |
| 4 | depends on BLOCK | 4 | depends on BLOCK |
| 5 | depends on X86_MRST | ||
| 5 | default n | 6 | default n |
| 6 | ---help--- | 7 | ---help--- |
| 7 | Enable the FTL pseudo-filesystem used with the NAND Flash | 8 | Enable the FTL pseudo-filesystem used with the NAND Flash |
diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c index 44a7fbe7eccd..fa21a0fd8e84 100644 --- a/drivers/staging/spectra/ffsport.c +++ b/drivers/staging/spectra/ffsport.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/log2.h> | 28 | #include <linux/log2.h> |
| 29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
| 30 | #include <linux/smp_lock.h> | 30 | #include <linux/smp_lock.h> |
| 31 | #include <linux/slab.h> | ||
| 31 | 32 | ||
| 32 | /**** Helper functions used for Div, Remainder operation on u64 ****/ | 33 | /**** Helper functions used for Div, Remainder operation on u64 ****/ |
| 33 | 34 | ||
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c index 368c30a9d5ff..4af83d5318f2 100644 --- a/drivers/staging/wlan-ng/cfg80211.c +++ b/drivers/staging/wlan-ng/cfg80211.c | |||
| @@ -219,6 +219,7 @@ int prism2_get_key(struct wiphy *wiphy, struct net_device *dev, | |||
| 219 | return -ENOENT; | 219 | return -ENOENT; |
| 220 | params.key_len = len; | 220 | params.key_len = len; |
| 221 | params.key = wlandev->wep_keys[key_index]; | 221 | params.key = wlandev->wep_keys[key_index]; |
| 222 | params.seq_len = 0; | ||
| 222 | 223 | ||
| 223 | callback(cookie, ¶ms); | 224 | callback(cookie, ¶ms); |
| 224 | 225 | ||
| @@ -735,6 +736,8 @@ struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev) | |||
| 735 | priv->band.n_channels = ARRAY_SIZE(prism2_channels); | 736 | priv->band.n_channels = ARRAY_SIZE(prism2_channels); |
| 736 | priv->band.bitrates = priv->rates; | 737 | priv->band.bitrates = priv->rates; |
| 737 | priv->band.n_bitrates = ARRAY_SIZE(prism2_rates); | 738 | priv->band.n_bitrates = ARRAY_SIZE(prism2_rates); |
| 739 | priv->band.band = IEEE80211_BAND_2GHZ; | ||
| 740 | priv->band.ht_cap.ht_supported = false; | ||
| 738 | wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; | 741 | wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; |
| 739 | 742 | ||
| 740 | set_wiphy_dev(wiphy, dev); | 743 | set_wiphy_dev(wiphy, dev); |
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 77d4d715a789..722c840ac638 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c | |||
| @@ -769,6 +769,7 @@ static int __init zram_init(void) | |||
| 769 | free_devices: | 769 | free_devices: |
| 770 | while (dev_id) | 770 | while (dev_id) |
| 771 | destroy_device(&devices[--dev_id]); | 771 | destroy_device(&devices[--dev_id]); |
| 772 | kfree(devices); | ||
| 772 | unregister: | 773 | unregister: |
| 773 | unregister_blkdev(zram_major, "zram"); | 774 | unregister_blkdev(zram_major, "zram"); |
| 774 | out: | 775 | out: |
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index 593fc5e2d2e6..5af23cc5ea9f 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c | |||
| @@ -1127,6 +1127,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, | |||
| 1127 | { | 1127 | { |
| 1128 | struct cxacru_data *instance; | 1128 | struct cxacru_data *instance; |
| 1129 | struct usb_device *usb_dev = interface_to_usbdev(intf); | 1129 | struct usb_device *usb_dev = interface_to_usbdev(intf); |
| 1130 | struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD]; | ||
| 1130 | int ret; | 1131 | int ret; |
| 1131 | 1132 | ||
| 1132 | /* instance init */ | 1133 | /* instance init */ |
| @@ -1171,15 +1172,34 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, | |||
| 1171 | goto fail; | 1172 | goto fail; |
| 1172 | } | 1173 | } |
| 1173 | 1174 | ||
| 1174 | usb_fill_int_urb(instance->rcv_urb, | 1175 | if (!cmd_ep) { |
| 1176 | dbg("cxacru_bind: no command endpoint"); | ||
| 1177 | ret = -ENODEV; | ||
| 1178 | goto fail; | ||
| 1179 | } | ||
| 1180 | |||
| 1181 | if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) | ||
| 1182 | == USB_ENDPOINT_XFER_INT) { | ||
| 1183 | usb_fill_int_urb(instance->rcv_urb, | ||
| 1175 | usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD), | 1184 | usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD), |
| 1176 | instance->rcv_buf, PAGE_SIZE, | 1185 | instance->rcv_buf, PAGE_SIZE, |
| 1177 | cxacru_blocking_completion, &instance->rcv_done, 1); | 1186 | cxacru_blocking_completion, &instance->rcv_done, 1); |
| 1178 | 1187 | ||
| 1179 | usb_fill_int_urb(instance->snd_urb, | 1188 | usb_fill_int_urb(instance->snd_urb, |
| 1180 | usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD), | 1189 | usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD), |
| 1181 | instance->snd_buf, PAGE_SIZE, | 1190 | instance->snd_buf, PAGE_SIZE, |
| 1182 | cxacru_blocking_completion, &instance->snd_done, 4); | 1191 | cxacru_blocking_completion, &instance->snd_done, 4); |
| 1192 | } else { | ||
| 1193 | usb_fill_bulk_urb(instance->rcv_urb, | ||
| 1194 | usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD), | ||
| 1195 | instance->rcv_buf, PAGE_SIZE, | ||
| 1196 | cxacru_blocking_completion, &instance->rcv_done); | ||
| 1197 | |||
| 1198 | usb_fill_bulk_urb(instance->snd_urb, | ||
| 1199 | usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD), | ||
| 1200 | instance->snd_buf, PAGE_SIZE, | ||
| 1201 | cxacru_blocking_completion, &instance->snd_done); | ||
| 1202 | } | ||
| 1183 | 1203 | ||
| 1184 | mutex_init(&instance->cm_serialize); | 1204 | mutex_init(&instance->cm_serialize); |
| 1185 | 1205 | ||
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 1833b3a71515..bc62fae0680f 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
| @@ -965,7 +965,8 @@ static int acm_probe(struct usb_interface *intf, | |||
| 965 | } | 965 | } |
| 966 | 966 | ||
| 967 | if (!buflen) { | 967 | if (!buflen) { |
| 968 | if (intf->cur_altsetting->endpoint->extralen && | 968 | if (intf->cur_altsetting->endpoint && |
| 969 | intf->cur_altsetting->endpoint->extralen && | ||
| 969 | intf->cur_altsetting->endpoint->extra) { | 970 | intf->cur_altsetting->endpoint->extra) { |
| 970 | dev_dbg(&intf->dev, | 971 | dev_dbg(&intf->dev, |
| 971 | "Seeking extra descriptors on endpoint\n"); | 972 | "Seeking extra descriptors on endpoint\n"); |
| @@ -1481,6 +1482,11 @@ static int acm_reset_resume(struct usb_interface *intf) | |||
| 1481 | USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \ | 1482 | USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \ |
| 1482 | USB_CDC_ACM_PROTO_VENDOR) | 1483 | USB_CDC_ACM_PROTO_VENDOR) |
| 1483 | 1484 | ||
| 1485 | #define SAMSUNG_PCSUITE_ACM_INFO(x) \ | ||
| 1486 | USB_DEVICE_AND_INTERFACE_INFO(0x04e7, x, \ | ||
| 1487 | USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \ | ||
| 1488 | USB_CDC_ACM_PROTO_VENDOR) | ||
| 1489 | |||
| 1484 | /* | 1490 | /* |
| 1485 | * USB driver structure. | 1491 | * USB driver structure. |
| 1486 | */ | 1492 | */ |
| @@ -1591,6 +1597,17 @@ static const struct usb_device_id acm_ids[] = { | |||
| 1591 | { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */ | 1597 | { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */ |
| 1592 | { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */ | 1598 | { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */ |
| 1593 | { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */ | 1599 | { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */ |
| 1600 | { NOKIA_PCSUITE_ACM_INFO(0x0178), }, /* Nokia E63 */ | ||
| 1601 | { NOKIA_PCSUITE_ACM_INFO(0x010e), }, /* Nokia E75 */ | ||
| 1602 | { NOKIA_PCSUITE_ACM_INFO(0x02d9), }, /* Nokia 6760 Slide */ | ||
| 1603 | { NOKIA_PCSUITE_ACM_INFO(0x01d0), }, /* Nokia E52 */ | ||
| 1604 | { NOKIA_PCSUITE_ACM_INFO(0x0223), }, /* Nokia E72 */ | ||
| 1605 | { NOKIA_PCSUITE_ACM_INFO(0x0275), }, /* Nokia X6 */ | ||
| 1606 | { NOKIA_PCSUITE_ACM_INFO(0x026c), }, /* Nokia N97 Mini */ | ||
| 1607 | { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */ | ||
| 1608 | { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */ | ||
| 1609 | { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */ | ||
| 1610 | { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */ | ||
| 1594 | 1611 | ||
| 1595 | /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ | 1612 | /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ |
| 1596 | 1613 | ||
| @@ -1599,6 +1616,10 @@ static const struct usb_device_id acm_ids[] = { | |||
| 1599 | .driver_info = NOT_A_MODEM, | 1616 | .driver_info = NOT_A_MODEM, |
| 1600 | }, | 1617 | }, |
| 1601 | 1618 | ||
| 1619 | /* control interfaces without any protocol set */ | ||
| 1620 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, | ||
| 1621 | USB_CDC_PROTO_NONE) }, | ||
| 1622 | |||
| 1602 | /* control interfaces with various AT-command sets */ | 1623 | /* control interfaces with various AT-command sets */ |
| 1603 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, | 1624 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, |
| 1604 | USB_CDC_ACM_PROTO_AT_V25TER) }, | 1625 | USB_CDC_ACM_PROTO_AT_V25TER) }, |
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index fd4c36ea5e46..844683e50383 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c | |||
| @@ -1724,6 +1724,15 @@ free_interfaces: | |||
| 1724 | if (ret) | 1724 | if (ret) |
| 1725 | goto free_interfaces; | 1725 | goto free_interfaces; |
| 1726 | 1726 | ||
| 1727 | /* if it's already configured, clear out old state first. | ||
| 1728 | * getting rid of old interfaces means unbinding their drivers. | ||
| 1729 | */ | ||
| 1730 | if (dev->state != USB_STATE_ADDRESS) | ||
| 1731 | usb_disable_device(dev, 1); /* Skip ep0 */ | ||
| 1732 | |||
| 1733 | /* Get rid of pending async Set-Config requests for this device */ | ||
| 1734 | cancel_async_set_config(dev); | ||
| 1735 | |||
| 1727 | /* Make sure we have bandwidth (and available HCD resources) for this | 1736 | /* Make sure we have bandwidth (and available HCD resources) for this |
| 1728 | * configuration. Remove endpoints from the schedule if we're dropping | 1737 | * configuration. Remove endpoints from the schedule if we're dropping |
| 1729 | * this configuration to set configuration 0. After this point, the | 1738 | * this configuration to set configuration 0. After this point, the |
| @@ -1733,20 +1742,11 @@ free_interfaces: | |||
| 1733 | mutex_lock(&hcd->bandwidth_mutex); | 1742 | mutex_lock(&hcd->bandwidth_mutex); |
| 1734 | ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); | 1743 | ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); |
| 1735 | if (ret < 0) { | 1744 | if (ret < 0) { |
| 1736 | usb_autosuspend_device(dev); | ||
| 1737 | mutex_unlock(&hcd->bandwidth_mutex); | 1745 | mutex_unlock(&hcd->bandwidth_mutex); |
| 1746 | usb_autosuspend_device(dev); | ||
| 1738 | goto free_interfaces; | 1747 | goto free_interfaces; |
| 1739 | } | 1748 | } |
| 1740 | 1749 | ||
| 1741 | /* if it's already configured, clear out old state first. | ||
| 1742 | * getting rid of old interfaces means unbinding their drivers. | ||
| 1743 | */ | ||
| 1744 | if (dev->state != USB_STATE_ADDRESS) | ||
| 1745 | usb_disable_device(dev, 1); /* Skip ep0 */ | ||
| 1746 | |||
| 1747 | /* Get rid of pending async Set-Config requests for this device */ | ||
| 1748 | cancel_async_set_config(dev); | ||
| 1749 | |||
| 1750 | ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), | 1750 | ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), |
| 1751 | USB_REQ_SET_CONFIGURATION, 0, configuration, 0, | 1751 | USB_REQ_SET_CONFIGURATION, 0, configuration, 0, |
| 1752 | NULL, 0, USB_CTRL_SET_TIMEOUT); | 1752 | NULL, 0, USB_CTRL_SET_TIMEOUT); |
| @@ -1761,8 +1761,8 @@ free_interfaces: | |||
| 1761 | if (!cp) { | 1761 | if (!cp) { |
| 1762 | usb_set_device_state(dev, USB_STATE_ADDRESS); | 1762 | usb_set_device_state(dev, USB_STATE_ADDRESS); |
| 1763 | usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); | 1763 | usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); |
| 1764 | usb_autosuspend_device(dev); | ||
| 1765 | mutex_unlock(&hcd->bandwidth_mutex); | 1764 | mutex_unlock(&hcd->bandwidth_mutex); |
| 1765 | usb_autosuspend_device(dev); | ||
| 1766 | goto free_interfaces; | 1766 | goto free_interfaces; |
| 1767 | } | 1767 | } |
| 1768 | mutex_unlock(&hcd->bandwidth_mutex); | 1768 | mutex_unlock(&hcd->bandwidth_mutex); |
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c index 020fa5a25fda..972d5ddd1e18 100644 --- a/drivers/usb/gadget/rndis.c +++ b/drivers/usb/gadget/rndis.c | |||
| @@ -293,9 +293,13 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len, | |||
| 293 | /* mandatory */ | 293 | /* mandatory */ |
| 294 | case OID_GEN_VENDOR_DESCRIPTION: | 294 | case OID_GEN_VENDOR_DESCRIPTION: |
| 295 | pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__); | 295 | pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__); |
| 296 | length = strlen (rndis_per_dev_params [configNr].vendorDescr); | 296 | if ( rndis_per_dev_params [configNr].vendorDescr ) { |
| 297 | memcpy (outbuf, | 297 | length = strlen (rndis_per_dev_params [configNr].vendorDescr); |
| 298 | rndis_per_dev_params [configNr].vendorDescr, length); | 298 | memcpy (outbuf, |
| 299 | rndis_per_dev_params [configNr].vendorDescr, length); | ||
| 300 | } else { | ||
| 301 | outbuf[0] = 0; | ||
| 302 | } | ||
| 299 | retval = 0; | 303 | retval = 0; |
| 300 | break; | 304 | break; |
| 301 | 305 | ||
| @@ -1148,7 +1152,7 @@ static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS]; | |||
| 1148 | #endif /* CONFIG_USB_GADGET_DEBUG_FILES */ | 1152 | #endif /* CONFIG_USB_GADGET_DEBUG_FILES */ |
| 1149 | 1153 | ||
| 1150 | 1154 | ||
| 1151 | int __init rndis_init (void) | 1155 | int rndis_init(void) |
| 1152 | { | 1156 | { |
| 1153 | u8 i; | 1157 | u8 i; |
| 1154 | 1158 | ||
diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h index c236aaa9dcd1..907c33008118 100644 --- a/drivers/usb/gadget/rndis.h +++ b/drivers/usb/gadget/rndis.h | |||
| @@ -262,7 +262,7 @@ int rndis_signal_disconnect (int configNr); | |||
| 262 | int rndis_state (int configNr); | 262 | int rndis_state (int configNr); |
| 263 | extern void rndis_set_host_mac (int configNr, const u8 *addr); | 263 | extern void rndis_set_host_mac (int configNr, const u8 *addr); |
| 264 | 264 | ||
| 265 | int __devinit rndis_init (void); | 265 | int rndis_init(void); |
| 266 | void rndis_exit (void); | 266 | void rndis_exit (void); |
| 267 | 267 | ||
| 268 | #endif /* _LINUX_RNDIS_H */ | 268 | #endif /* _LINUX_RNDIS_H */ |
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c index 521ebed0118d..a229744a8c7d 100644 --- a/drivers/usb/gadget/s3c-hsotg.c +++ b/drivers/usb/gadget/s3c-hsotg.c | |||
| @@ -12,8 +12,6 @@ | |||
| 12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | #define DEBUG | ||
| 16 | |||
| 17 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
| 18 | #include <linux/module.h> | 16 | #include <linux/module.h> |
| 19 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c index 335ee699fd85..ba52be473027 100644 --- a/drivers/usb/host/ehci-ppc-of.c +++ b/drivers/usb/host/ehci-ppc-of.c | |||
| @@ -192,17 +192,19 @@ ehci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *mat | |||
| 192 | } | 192 | } |
| 193 | 193 | ||
| 194 | rv = usb_add_hcd(hcd, irq, 0); | 194 | rv = usb_add_hcd(hcd, irq, 0); |
| 195 | if (rv == 0) | 195 | if (rv) |
| 196 | return 0; | 196 | goto err_ehci; |
| 197 | |||
| 198 | return 0; | ||
| 197 | 199 | ||
| 200 | err_ehci: | ||
| 201 | if (ehci->has_amcc_usb23) | ||
| 202 | iounmap(ehci->ohci_hcctrl_reg); | ||
| 198 | iounmap(hcd->regs); | 203 | iounmap(hcd->regs); |
| 199 | err_ioremap: | 204 | err_ioremap: |
| 200 | irq_dispose_mapping(irq); | 205 | irq_dispose_mapping(irq); |
| 201 | err_irq: | 206 | err_irq: |
| 202 | release_mem_region(hcd->rsrc_start, hcd->rsrc_len); | 207 | release_mem_region(hcd->rsrc_start, hcd->rsrc_len); |
| 203 | |||
| 204 | if (ehci->has_amcc_usb23) | ||
| 205 | iounmap(ehci->ohci_hcctrl_reg); | ||
| 206 | err_rmr: | 208 | err_rmr: |
| 207 | usb_put_hcd(hcd); | 209 | usb_put_hcd(hcd); |
| 208 | 210 | ||
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 80bf8333bb03..4f1744c5871f 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
| @@ -56,6 +56,7 @@ static int debug; | |||
| 56 | static const struct usb_device_id id_table[] = { | 56 | static const struct usb_device_id id_table[] = { |
| 57 | { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ | 57 | { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ |
| 58 | { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ | 58 | { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ |
| 59 | { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ | ||
| 59 | { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ | 60 | { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ |
| 60 | { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ | 61 | { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ |
| 61 | { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ | 62 | { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ |
| @@ -88,6 +89,7 @@ static const struct usb_device_id id_table[] = { | |||
| 88 | { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */ | 89 | { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */ |
| 89 | { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ | 90 | { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ |
| 90 | { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ | 91 | { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ |
| 92 | { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */ | ||
| 91 | { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ | 93 | { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ |
| 92 | { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ | 94 | { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ |
| 93 | { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ | 95 | { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ |
| @@ -109,6 +111,7 @@ static const struct usb_device_id id_table[] = { | |||
| 109 | { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ | 111 | { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ |
| 110 | { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ | 112 | { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ |
| 111 | { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ | 113 | { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ |
| 114 | { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ | ||
| 112 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ | 115 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ |
| 113 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ | 116 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ |
| 114 | { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ | 117 | { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ |
| @@ -122,14 +125,14 @@ static const struct usb_device_id id_table[] = { | |||
| 122 | { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ | 125 | { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ |
| 123 | { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ | 126 | { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ |
| 124 | { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ | 127 | { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ |
| 125 | { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ | ||
| 126 | { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ | ||
| 127 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ | ||
| 128 | { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ | ||
| 129 | { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ | 128 | { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ |
| 130 | { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ | 129 | { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ |
| 131 | { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */ | 130 | { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */ |
| 132 | { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ | 131 | { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ |
| 132 | { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ | ||
| 133 | { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ | ||
| 134 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ | ||
| 135 | { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ | ||
| 133 | { } /* Terminating Entry */ | 136 | { } /* Terminating Entry */ |
| 134 | }; | 137 | }; |
| 135 | 138 | ||
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index c792c96f590e..97cc87d654ce 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
| @@ -753,6 +753,14 @@ static struct usb_device_id id_table_combined [] = { | |||
| 753 | { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) }, | 753 | { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) }, |
| 754 | { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID), | 754 | { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID), |
| 755 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 755 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
| 756 | { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) }, | ||
| 757 | { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_PC_WING_PID) }, | ||
| 758 | { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_USB_DMX_PID) }, | ||
| 759 | { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MIDI_TIMECODE_PID) }, | ||
| 760 | { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MINI_WING_PID) }, | ||
| 761 | { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) }, | ||
| 762 | { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) }, | ||
| 763 | { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) }, | ||
| 756 | { }, /* Optional parameter entry */ | 764 | { }, /* Optional parameter entry */ |
| 757 | { } /* Terminating entry */ | 765 | { } /* Terminating entry */ |
| 758 | }; | 766 | }; |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 2e95857c9633..15a4583775ad 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
| @@ -135,6 +135,18 @@ | |||
| 135 | #define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */ | 135 | #define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */ |
| 136 | 136 | ||
| 137 | /* | 137 | /* |
| 138 | * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs | ||
| 139 | */ | ||
| 140 | #define FTDI_CHAMSYS_24_MASTER_WING_PID 0xDAF8 | ||
| 141 | #define FTDI_CHAMSYS_PC_WING_PID 0xDAF9 | ||
| 142 | #define FTDI_CHAMSYS_USB_DMX_PID 0xDAFA | ||
| 143 | #define FTDI_CHAMSYS_MIDI_TIMECODE_PID 0xDAFB | ||
| 144 | #define FTDI_CHAMSYS_MINI_WING_PID 0xDAFC | ||
| 145 | #define FTDI_CHAMSYS_MAXI_WING_PID 0xDAFD | ||
| 146 | #define FTDI_CHAMSYS_MEDIA_WING_PID 0xDAFE | ||
| 147 | #define FTDI_CHAMSYS_WING_PID 0xDAFF | ||
| 148 | |||
| 149 | /* | ||
| 138 | * Westrex International devices submitted by Cory Lee | 150 | * Westrex International devices submitted by Cory Lee |
| 139 | */ | 151 | */ |
| 140 | #define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */ | 152 | #define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */ |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 585b7e663740..1c9b6e9b2386 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
| @@ -119,16 +119,20 @@ | |||
| 119 | * by making a change here, in moschip_port_id_table, and in | 119 | * by making a change here, in moschip_port_id_table, and in |
| 120 | * moschip_id_table_combined | 120 | * moschip_id_table_combined |
| 121 | */ | 121 | */ |
| 122 | #define USB_VENDOR_ID_BANDB 0x0856 | 122 | #define USB_VENDOR_ID_BANDB 0x0856 |
| 123 | #define BANDB_DEVICE_ID_USO9ML2_2 0xAC22 | 123 | #define BANDB_DEVICE_ID_USO9ML2_2 0xAC22 |
| 124 | #define BANDB_DEVICE_ID_USO9ML2_4 0xAC24 | 124 | #define BANDB_DEVICE_ID_USO9ML2_2P 0xBC00 |
| 125 | #define BANDB_DEVICE_ID_US9ML2_2 0xAC29 | 125 | #define BANDB_DEVICE_ID_USO9ML2_4 0xAC24 |
| 126 | #define BANDB_DEVICE_ID_US9ML2_4 0xAC30 | 126 | #define BANDB_DEVICE_ID_USO9ML2_4P 0xBC01 |
| 127 | #define BANDB_DEVICE_ID_USPTL4_2 0xAC31 | 127 | #define BANDB_DEVICE_ID_US9ML2_2 0xAC29 |
| 128 | #define BANDB_DEVICE_ID_USPTL4_4 0xAC32 | 128 | #define BANDB_DEVICE_ID_US9ML2_4 0xAC30 |
| 129 | #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 | 129 | #define BANDB_DEVICE_ID_USPTL4_2 0xAC31 |
| 130 | #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 | 130 | #define BANDB_DEVICE_ID_USPTL4_4 0xAC32 |
| 131 | #define BANDB_DEVICE_ID_USOPTL2_4 0xAC24 | 131 | #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 |
| 132 | #define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02 | ||
| 133 | #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 | ||
| 134 | #define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03 | ||
| 135 | #define BANDB_DEVICE_ID_USOPTL2_4 0xAC24 | ||
| 132 | 136 | ||
| 133 | /* This driver also supports | 137 | /* This driver also supports |
| 134 | * ATEN UC2324 device using Moschip MCS7840 | 138 | * ATEN UC2324 device using Moschip MCS7840 |
| @@ -184,13 +188,17 @@ static const struct usb_device_id moschip_port_id_table[] = { | |||
| 184 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, | 188 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, |
| 185 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, | 189 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, |
| 186 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, | 190 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, |
| 191 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)}, | ||
| 187 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, | 192 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, |
| 193 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)}, | ||
| 188 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, | 194 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, |
| 189 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, | 195 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, |
| 190 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, | 196 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, |
| 191 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, | 197 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, |
| 192 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | 198 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, |
| 199 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)}, | ||
| 193 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | 200 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, |
| 201 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)}, | ||
| 194 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, | 202 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, |
| 195 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, | 203 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, |
| 196 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, | 204 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, |
| @@ -201,13 +209,17 @@ static const struct usb_device_id moschip_id_table_combined[] __devinitconst = { | |||
| 201 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, | 209 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, |
| 202 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, | 210 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, |
| 203 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, | 211 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, |
| 212 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)}, | ||
| 204 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, | 213 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, |
| 214 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)}, | ||
| 205 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, | 215 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, |
| 206 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, | 216 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, |
| 207 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, | 217 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, |
| 208 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, | 218 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, |
| 209 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | 219 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, |
| 220 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)}, | ||
| 210 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | 221 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, |
| 222 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)}, | ||
| 211 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, | 223 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, |
| 212 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, | 224 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, |
| 213 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, | 225 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index adcbdb994de3..c46911af282f 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
| @@ -164,6 +164,14 @@ static void option_instat_callback(struct urb *urb); | |||
| 164 | #define YISO_VENDOR_ID 0x0EAB | 164 | #define YISO_VENDOR_ID 0x0EAB |
| 165 | #define YISO_PRODUCT_U893 0xC893 | 165 | #define YISO_PRODUCT_U893 0xC893 |
| 166 | 166 | ||
| 167 | /* | ||
| 168 | * NOVATEL WIRELESS PRODUCTS | ||
| 169 | * | ||
| 170 | * Note from Novatel Wireless: | ||
| 171 | * If your Novatel modem does not work on linux, don't | ||
| 172 | * change the option module, but check our website. If | ||
| 173 | * that does not help, contact ddeschepper@nvtl.com | ||
| 174 | */ | ||
| 167 | /* MERLIN EVDO PRODUCTS */ | 175 | /* MERLIN EVDO PRODUCTS */ |
| 168 | #define NOVATELWIRELESS_PRODUCT_V640 0x1100 | 176 | #define NOVATELWIRELESS_PRODUCT_V640 0x1100 |
| 169 | #define NOVATELWIRELESS_PRODUCT_V620 0x1110 | 177 | #define NOVATELWIRELESS_PRODUCT_V620 0x1110 |
| @@ -185,24 +193,39 @@ static void option_instat_callback(struct urb *urb); | |||
| 185 | #define NOVATELWIRELESS_PRODUCT_EU730 0x2400 | 193 | #define NOVATELWIRELESS_PRODUCT_EU730 0x2400 |
| 186 | #define NOVATELWIRELESS_PRODUCT_EU740 0x2410 | 194 | #define NOVATELWIRELESS_PRODUCT_EU740 0x2410 |
| 187 | #define NOVATELWIRELESS_PRODUCT_EU870D 0x2420 | 195 | #define NOVATELWIRELESS_PRODUCT_EU870D 0x2420 |
| 188 | |||
| 189 | /* OVATION PRODUCTS */ | 196 | /* OVATION PRODUCTS */ |
| 190 | #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 | 197 | #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 |
| 191 | #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 | 198 | #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 |
| 192 | #define NOVATELWIRELESS_PRODUCT_U727 0x5010 | 199 | /* |
| 193 | #define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100 | 200 | * Note from Novatel Wireless: |
| 194 | #define NOVATELWIRELESS_PRODUCT_MC760 0x6000 | 201 | * All PID in the 5xxx range are currently reserved for |
| 202 | * auto-install CDROMs, and should not be added to this | ||
| 203 | * module. | ||
| 204 | * | ||
| 205 | * #define NOVATELWIRELESS_PRODUCT_U727 0x5010 | ||
| 206 | * #define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100 | ||
| 207 | */ | ||
| 195 | #define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 | 208 | #define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 |
| 196 | 209 | #define NOVATELWIRELESS_PRODUCT_MC780 0x6010 | |
| 197 | /* FUTURE NOVATEL PRODUCTS */ | 210 | #define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0x6000 |
| 198 | #define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001 | 211 | #define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0x6001 |
| 199 | #define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000 | 212 | #define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0x7000 |
| 200 | #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001 | 213 | #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0x7001 |
| 201 | #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0X8000 | 214 | #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3 0x7003 |
| 202 | #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0X8001 | 215 | #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4 0x7004 |
| 203 | #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0X9000 | 216 | #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5 0x7005 |
| 204 | #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0X9001 | 217 | #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6 0x7006 |
| 205 | #define NOVATELWIRELESS_PRODUCT_GLOBAL 0XA001 | 218 | #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7 0x7007 |
| 219 | #define NOVATELWIRELESS_PRODUCT_MC996D 0x7030 | ||
| 220 | #define NOVATELWIRELESS_PRODUCT_MF3470 0x7041 | ||
| 221 | #define NOVATELWIRELESS_PRODUCT_MC547 0x7042 | ||
| 222 | #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0x8000 | ||
| 223 | #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001 | ||
| 224 | #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000 | ||
| 225 | #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 | ||
| 226 | #define NOVATELWIRELESS_PRODUCT_G1 0xA001 | ||
| 227 | #define NOVATELWIRELESS_PRODUCT_G1_M 0xA002 | ||
| 228 | #define NOVATELWIRELESS_PRODUCT_G2 0xA010 | ||
| 206 | 229 | ||
| 207 | /* AMOI PRODUCTS */ | 230 | /* AMOI PRODUCTS */ |
| 208 | #define AMOI_VENDOR_ID 0x1614 | 231 | #define AMOI_VENDOR_ID 0x1614 |
| @@ -490,36 +513,44 @@ static const struct usb_device_id option_ids[] = { | |||
| 490 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, | 513 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, |
| 491 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, | 514 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, |
| 492 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) }, | 515 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) }, |
| 493 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */ | 516 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, |
| 494 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */ | 517 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, |
| 495 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, /* Novatel Merlin EX720/V740/X720 */ | 518 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, |
| 496 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, /* Novatel Merlin V720/S720/PC720 */ | 519 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, |
| 497 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, /* Novatel U730/U740 (VF version) */ | 520 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, |
| 498 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, /* Novatel U740 */ | 521 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, |
| 499 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, /* Novatel U870 */ | 522 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, |
| 500 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, /* Novatel Merlin XU870 HSDPA/3G */ | 523 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, |
| 501 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, /* Novatel X950D */ | 524 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, |
| 502 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, /* Novatel EV620/ES620 CDMA/EV-DO */ | 525 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, |
| 503 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, /* Novatel ES620/ES720/U720/USB720 */ | 526 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, |
| 504 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, /* Novatel E725/E726 */ | 527 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, |
| 505 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) }, /* Novatel Merlin ES620 SM Bus */ | 528 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) }, |
| 506 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, /* Novatel EU730 and Vodafone EU740 */ | 529 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, |
| 507 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, /* Novatel non-Vodafone EU740 */ | 530 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, |
| 508 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ | 531 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, |
| 509 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ | 532 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, |
| 510 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ | 533 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, |
| 511 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */ | 534 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, |
| 512 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ | 535 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC780) }, |
| 513 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ | 536 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) }, |
| 514 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */ | 537 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, |
| 515 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */ | 538 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, |
| 516 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */ | 539 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, |
| 517 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */ | 540 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, |
| 518 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, /* Novatel EVDO product */ | 541 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) }, |
| 519 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, /* Novatel HSPA product */ | 542 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) }, |
| 520 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, /* Novatel EVDO Embedded product */ | 543 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) }, |
| 521 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, /* Novatel HSPA Embedded product */ | 544 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6) }, |
| 522 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL) }, /* Novatel Global product */ | 545 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7) }, |
| 546 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC996D) }, | ||
| 547 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MF3470) }, | ||
| 548 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) }, | ||
| 549 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, | ||
| 550 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, | ||
| 551 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) }, | ||
| 552 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) }, | ||
| 553 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) }, | ||
| 523 | 554 | ||
| 524 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, | 555 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, |
| 525 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, | 556 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, |
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c index 68c18fdfc6da..e986002b3844 100644 --- a/drivers/usb/serial/ssu100.c +++ b/drivers/usb/serial/ssu100.c | |||
| @@ -46,7 +46,7 @@ | |||
| 46 | #define FULLPWRBIT 0x00000080 | 46 | #define FULLPWRBIT 0x00000080 |
| 47 | #define NEXT_BOARD_POWER_BIT 0x00000004 | 47 | #define NEXT_BOARD_POWER_BIT 0x00000004 |
| 48 | 48 | ||
| 49 | static int debug = 1; | 49 | static int debug; |
| 50 | 50 | ||
| 51 | /* Version Information */ | 51 | /* Version Information */ |
| 52 | #define DRIVER_VERSION "v0.1" | 52 | #define DRIVER_VERSION "v0.1" |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index e05557d52999..c579dcc9200c 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
| @@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync, | |||
| 60 | return 0; | 60 | return 0; |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) | ||
| 64 | { | ||
| 65 | INIT_LIST_HEAD(&work->node); | ||
| 66 | work->fn = fn; | ||
| 67 | init_waitqueue_head(&work->done); | ||
| 68 | work->flushing = 0; | ||
| 69 | work->queue_seq = work->done_seq = 0; | ||
| 70 | } | ||
| 71 | |||
| 63 | /* Init poll structure */ | 72 | /* Init poll structure */ |
| 64 | void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, | 73 | void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, |
| 65 | unsigned long mask, struct vhost_dev *dev) | 74 | unsigned long mask, struct vhost_dev *dev) |
| 66 | { | 75 | { |
| 67 | struct vhost_work *work = &poll->work; | ||
| 68 | |||
| 69 | init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); | 76 | init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); |
| 70 | init_poll_funcptr(&poll->table, vhost_poll_func); | 77 | init_poll_funcptr(&poll->table, vhost_poll_func); |
| 71 | poll->mask = mask; | 78 | poll->mask = mask; |
| 72 | poll->dev = dev; | 79 | poll->dev = dev; |
| 73 | 80 | ||
| 74 | INIT_LIST_HEAD(&work->node); | 81 | vhost_work_init(&poll->work, fn); |
| 75 | work->fn = fn; | ||
| 76 | init_waitqueue_head(&work->done); | ||
| 77 | work->flushing = 0; | ||
| 78 | work->queue_seq = work->done_seq = 0; | ||
| 79 | } | 82 | } |
| 80 | 83 | ||
| 81 | /* Start polling a file. We add ourselves to file's wait queue. The caller must | 84 | /* Start polling a file. We add ourselves to file's wait queue. The caller must |
| @@ -95,35 +98,38 @@ void vhost_poll_stop(struct vhost_poll *poll) | |||
| 95 | remove_wait_queue(poll->wqh, &poll->wait); | 98 | remove_wait_queue(poll->wqh, &poll->wait); |
| 96 | } | 99 | } |
| 97 | 100 | ||
| 98 | /* Flush any work that has been scheduled. When calling this, don't hold any | 101 | static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) |
| 99 | * locks that are also used by the callback. */ | ||
| 100 | void vhost_poll_flush(struct vhost_poll *poll) | ||
| 101 | { | 102 | { |
| 102 | struct vhost_work *work = &poll->work; | ||
| 103 | unsigned seq; | 103 | unsigned seq; |
| 104 | int left; | 104 | int left; |
| 105 | int flushing; | 105 | int flushing; |
| 106 | 106 | ||
| 107 | spin_lock_irq(&poll->dev->work_lock); | 107 | spin_lock_irq(&dev->work_lock); |
| 108 | seq = work->queue_seq; | 108 | seq = work->queue_seq; |
| 109 | work->flushing++; | 109 | work->flushing++; |
| 110 | spin_unlock_irq(&poll->dev->work_lock); | 110 | spin_unlock_irq(&dev->work_lock); |
| 111 | wait_event(work->done, ({ | 111 | wait_event(work->done, ({ |
| 112 | spin_lock_irq(&poll->dev->work_lock); | 112 | spin_lock_irq(&dev->work_lock); |
| 113 | left = seq - work->done_seq <= 0; | 113 | left = seq - work->done_seq <= 0; |
| 114 | spin_unlock_irq(&poll->dev->work_lock); | 114 | spin_unlock_irq(&dev->work_lock); |
| 115 | left; | 115 | left; |
| 116 | })); | 116 | })); |
| 117 | spin_lock_irq(&poll->dev->work_lock); | 117 | spin_lock_irq(&dev->work_lock); |
| 118 | flushing = --work->flushing; | 118 | flushing = --work->flushing; |
| 119 | spin_unlock_irq(&poll->dev->work_lock); | 119 | spin_unlock_irq(&dev->work_lock); |
| 120 | BUG_ON(flushing < 0); | 120 | BUG_ON(flushing < 0); |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | void vhost_poll_queue(struct vhost_poll *poll) | 123 | /* Flush any work that has been scheduled. When calling this, don't hold any |
| 124 | * locks that are also used by the callback. */ | ||
| 125 | void vhost_poll_flush(struct vhost_poll *poll) | ||
| 126 | { | ||
| 127 | vhost_work_flush(poll->dev, &poll->work); | ||
| 128 | } | ||
| 129 | |||
| 130 | static inline void vhost_work_queue(struct vhost_dev *dev, | ||
| 131 | struct vhost_work *work) | ||
| 124 | { | 132 | { |
| 125 | struct vhost_dev *dev = poll->dev; | ||
| 126 | struct vhost_work *work = &poll->work; | ||
| 127 | unsigned long flags; | 133 | unsigned long flags; |
| 128 | 134 | ||
| 129 | spin_lock_irqsave(&dev->work_lock, flags); | 135 | spin_lock_irqsave(&dev->work_lock, flags); |
| @@ -135,6 +141,11 @@ void vhost_poll_queue(struct vhost_poll *poll) | |||
| 135 | spin_unlock_irqrestore(&dev->work_lock, flags); | 141 | spin_unlock_irqrestore(&dev->work_lock, flags); |
| 136 | } | 142 | } |
| 137 | 143 | ||
| 144 | void vhost_poll_queue(struct vhost_poll *poll) | ||
| 145 | { | ||
| 146 | vhost_work_queue(poll->dev, &poll->work); | ||
| 147 | } | ||
| 148 | |||
| 138 | static void vhost_vq_reset(struct vhost_dev *dev, | 149 | static void vhost_vq_reset(struct vhost_dev *dev, |
| 139 | struct vhost_virtqueue *vq) | 150 | struct vhost_virtqueue *vq) |
| 140 | { | 151 | { |
| @@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev) | |||
| 236 | return dev->mm == current->mm ? 0 : -EPERM; | 247 | return dev->mm == current->mm ? 0 : -EPERM; |
| 237 | } | 248 | } |
| 238 | 249 | ||
| 250 | struct vhost_attach_cgroups_struct { | ||
| 251 | struct vhost_work work; | ||
| 252 | struct task_struct *owner; | ||
| 253 | int ret; | ||
| 254 | }; | ||
| 255 | |||
| 256 | static void vhost_attach_cgroups_work(struct vhost_work *work) | ||
| 257 | { | ||
| 258 | struct vhost_attach_cgroups_struct *s; | ||
| 259 | s = container_of(work, struct vhost_attach_cgroups_struct, work); | ||
| 260 | s->ret = cgroup_attach_task_all(s->owner, current); | ||
| 261 | } | ||
| 262 | |||
| 263 | static int vhost_attach_cgroups(struct vhost_dev *dev) | ||
| 264 | { | ||
| 265 | struct vhost_attach_cgroups_struct attach; | ||
| 266 | attach.owner = current; | ||
| 267 | vhost_work_init(&attach.work, vhost_attach_cgroups_work); | ||
| 268 | vhost_work_queue(dev, &attach.work); | ||
| 269 | vhost_work_flush(dev, &attach.work); | ||
| 270 | return attach.ret; | ||
| 271 | } | ||
| 272 | |||
| 239 | /* Caller should have device mutex */ | 273 | /* Caller should have device mutex */ |
| 240 | static long vhost_dev_set_owner(struct vhost_dev *dev) | 274 | static long vhost_dev_set_owner(struct vhost_dev *dev) |
| 241 | { | 275 | { |
| @@ -255,14 +289,16 @@ static long vhost_dev_set_owner(struct vhost_dev *dev) | |||
| 255 | } | 289 | } |
| 256 | 290 | ||
| 257 | dev->worker = worker; | 291 | dev->worker = worker; |
| 258 | err = cgroup_attach_task_current_cg(worker); | 292 | wake_up_process(worker); /* avoid contributing to loadavg */ |
| 293 | |||
| 294 | err = vhost_attach_cgroups(dev); | ||
| 259 | if (err) | 295 | if (err) |
| 260 | goto err_cgroup; | 296 | goto err_cgroup; |
| 261 | wake_up_process(worker); /* avoid contributing to loadavg */ | ||
| 262 | 297 | ||
| 263 | return 0; | 298 | return 0; |
| 264 | err_cgroup: | 299 | err_cgroup: |
| 265 | kthread_stop(worker); | 300 | kthread_stop(worker); |
| 301 | dev->worker = NULL; | ||
| 266 | err_worker: | 302 | err_worker: |
| 267 | if (dev->mm) | 303 | if (dev->mm) |
| 268 | mmput(dev->mm); | 304 | mmput(dev->mm); |
| @@ -323,7 +359,10 @@ void vhost_dev_cleanup(struct vhost_dev *dev) | |||
| 323 | dev->mm = NULL; | 359 | dev->mm = NULL; |
| 324 | 360 | ||
| 325 | WARN_ON(!list_empty(&dev->work_list)); | 361 | WARN_ON(!list_empty(&dev->work_list)); |
| 326 | kthread_stop(dev->worker); | 362 | if (dev->worker) { |
| 363 | kthread_stop(dev->worker); | ||
| 364 | dev->worker = NULL; | ||
| 365 | } | ||
| 327 | } | 366 | } |
| 328 | 367 | ||
| 329 | static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) | 368 | static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) |
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c index c91a7f70f7b0..5d786bd3e304 100644 --- a/drivers/video/pxa168fb.c +++ b/drivers/video/pxa168fb.c | |||
| @@ -559,7 +559,7 @@ static struct fb_ops pxa168fb_ops = { | |||
| 559 | .fb_imageblit = cfb_imageblit, | 559 | .fb_imageblit = cfb_imageblit, |
| 560 | }; | 560 | }; |
| 561 | 561 | ||
| 562 | static int __init pxa168fb_init_mode(struct fb_info *info, | 562 | static int __devinit pxa168fb_init_mode(struct fb_info *info, |
| 563 | struct pxa168fb_mach_info *mi) | 563 | struct pxa168fb_mach_info *mi) |
| 564 | { | 564 | { |
| 565 | struct pxa168fb_info *fbi = info->par; | 565 | struct pxa168fb_info *fbi = info->par; |
| @@ -599,7 +599,7 @@ static int __init pxa168fb_init_mode(struct fb_info *info, | |||
| 599 | return ret; | 599 | return ret; |
| 600 | } | 600 | } |
| 601 | 601 | ||
| 602 | static int __init pxa168fb_probe(struct platform_device *pdev) | 602 | static int __devinit pxa168fb_probe(struct platform_device *pdev) |
| 603 | { | 603 | { |
| 604 | struct pxa168fb_mach_info *mi; | 604 | struct pxa168fb_mach_info *mi; |
| 605 | struct fb_info *info = 0; | 605 | struct fb_info *info = 0; |
| @@ -792,7 +792,7 @@ static struct platform_driver pxa168fb_driver = { | |||
| 792 | .probe = pxa168fb_probe, | 792 | .probe = pxa168fb_probe, |
| 793 | }; | 793 | }; |
| 794 | 794 | ||
| 795 | static int __devinit pxa168fb_init(void) | 795 | static int __init pxa168fb_init(void) |
| 796 | { | 796 | { |
| 797 | return platform_driver_register(&pxa168fb_driver); | 797 | return platform_driver_register(&pxa168fb_driver); |
| 798 | } | 798 | } |
diff --git a/fs/9p/fid.c b/fs/9p/fid.c index 358563689064..6406f896bf95 100644 --- a/fs/9p/fid.c +++ b/fs/9p/fid.c | |||
| @@ -242,7 +242,8 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry) | |||
| 242 | } | 242 | } |
| 243 | kfree(wnames); | 243 | kfree(wnames); |
| 244 | fid_out: | 244 | fid_out: |
| 245 | v9fs_fid_add(dentry, fid); | 245 | if (!IS_ERR(fid)) |
| 246 | v9fs_fid_add(dentry, fid); | ||
| 246 | err_out: | 247 | err_out: |
| 247 | up_read(&v9ses->rename_sem); | 248 | up_read(&v9ses->rename_sem); |
| 248 | return fid; | 249 | return fid; |
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index a7528b913936..fd0cc0bf9a40 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c | |||
| @@ -724,7 +724,7 @@ static int __init init_misc_binfmt(void) | |||
| 724 | { | 724 | { |
| 725 | int err = register_filesystem(&bm_fs_type); | 725 | int err = register_filesystem(&bm_fs_type); |
| 726 | if (!err) { | 726 | if (!err) { |
| 727 | err = register_binfmt(&misc_format); | 727 | err = insert_binfmt(&misc_format); |
| 728 | if (err) | 728 | if (err) |
| 729 | unregister_filesystem(&bm_fs_type); | 729 | unregister_filesystem(&bm_fs_type); |
| 730 | } | 730 | } |
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 612a5c38d3c1..4d0ff5ee27b8 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c | |||
| @@ -413,10 +413,10 @@ int bio_integrity_prep(struct bio *bio) | |||
| 413 | 413 | ||
| 414 | /* Allocate kernel buffer for protection data */ | 414 | /* Allocate kernel buffer for protection data */ |
| 415 | len = sectors * blk_integrity_tuple_size(bi); | 415 | len = sectors * blk_integrity_tuple_size(bi); |
| 416 | buf = kmalloc(len, GFP_NOIO | __GFP_NOFAIL | q->bounce_gfp); | 416 | buf = kmalloc(len, GFP_NOIO | q->bounce_gfp); |
| 417 | if (unlikely(buf == NULL)) { | 417 | if (unlikely(buf == NULL)) { |
| 418 | printk(KERN_ERR "could not allocate integrity buffer\n"); | 418 | printk(KERN_ERR "could not allocate integrity buffer\n"); |
| 419 | return -EIO; | 419 | return -ENOMEM; |
| 420 | } | 420 | } |
| 421 | 421 | ||
| 422 | end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 422 | end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
diff --git a/fs/direct-io.c b/fs/direct-io.c index 51f270b479b6..48d74c7391d1 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
| @@ -634,7 +634,7 @@ static int dio_send_cur_page(struct dio *dio) | |||
| 634 | int ret = 0; | 634 | int ret = 0; |
| 635 | 635 | ||
| 636 | if (dio->bio) { | 636 | if (dio->bio) { |
| 637 | loff_t cur_offset = dio->block_in_file << dio->blkbits; | 637 | loff_t cur_offset = dio->cur_page_fs_offset; |
| 638 | loff_t bio_next_offset = dio->logical_offset_in_bio + | 638 | loff_t bio_next_offset = dio->logical_offset_in_bio + |
| 639 | dio->bio->bi_size; | 639 | dio->bio->bi_size; |
| 640 | 640 | ||
| @@ -659,7 +659,7 @@ static int dio_send_cur_page(struct dio *dio) | |||
| 659 | * Submit now if the underlying fs is about to perform a | 659 | * Submit now if the underlying fs is about to perform a |
| 660 | * metadata read | 660 | * metadata read |
| 661 | */ | 661 | */ |
| 662 | if (dio->boundary) | 662 | else if (dio->boundary) |
| 663 | dio_bio_submit(dio); | 663 | dio_bio_submit(dio); |
| 664 | } | 664 | } |
| 665 | 665 | ||
| @@ -376,6 +376,9 @@ static int count(const char __user * const __user * argv, int max) | |||
| 376 | argv++; | 376 | argv++; |
| 377 | if (i++ >= max) | 377 | if (i++ >= max) |
| 378 | return -E2BIG; | 378 | return -E2BIG; |
| 379 | |||
| 380 | if (fatal_signal_pending(current)) | ||
| 381 | return -ERESTARTNOHAND; | ||
| 379 | cond_resched(); | 382 | cond_resched(); |
| 380 | } | 383 | } |
| 381 | } | 384 | } |
| @@ -419,6 +422,12 @@ static int copy_strings(int argc, const char __user *const __user *argv, | |||
| 419 | while (len > 0) { | 422 | while (len > 0) { |
| 420 | int offset, bytes_to_copy; | 423 | int offset, bytes_to_copy; |
| 421 | 424 | ||
| 425 | if (fatal_signal_pending(current)) { | ||
| 426 | ret = -ERESTARTNOHAND; | ||
| 427 | goto out; | ||
| 428 | } | ||
| 429 | cond_resched(); | ||
| 430 | |||
| 422 | offset = pos % PAGE_SIZE; | 431 | offset = pos % PAGE_SIZE; |
| 423 | if (offset == 0) | 432 | if (offset == 0) |
| 424 | offset = PAGE_SIZE; | 433 | offset = PAGE_SIZE; |
| @@ -594,6 +603,11 @@ int setup_arg_pages(struct linux_binprm *bprm, | |||
| 594 | #else | 603 | #else |
| 595 | stack_top = arch_align_stack(stack_top); | 604 | stack_top = arch_align_stack(stack_top); |
| 596 | stack_top = PAGE_ALIGN(stack_top); | 605 | stack_top = PAGE_ALIGN(stack_top); |
| 606 | |||
| 607 | if (unlikely(stack_top < mmap_min_addr) || | ||
| 608 | unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr)) | ||
| 609 | return -ENOMEM; | ||
| 610 | |||
| 597 | stack_shift = vma->vm_end - stack_top; | 611 | stack_shift = vma->vm_end - stack_top; |
| 598 | 612 | ||
| 599 | bprm->p -= stack_shift; | 613 | bprm->p -= stack_shift; |
diff --git a/fs/fcntl.c b/fs/fcntl.c index 6769fd0f35b8..f8cc34f542c3 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
| @@ -769,11 +769,15 @@ EXPORT_SYMBOL(kill_fasync); | |||
| 769 | 769 | ||
| 770 | static int __init fcntl_init(void) | 770 | static int __init fcntl_init(void) |
| 771 | { | 771 | { |
| 772 | /* please add new bits here to ensure allocation uniqueness */ | 772 | /* |
| 773 | BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( | 773 | * Please add new bits here to ensure allocation uniqueness. |
| 774 | * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY | ||
| 775 | * is defined as O_NONBLOCK on some platforms and not on others. | ||
| 776 | */ | ||
| 777 | BUILD_BUG_ON(18 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( | ||
| 774 | O_RDONLY | O_WRONLY | O_RDWR | | 778 | O_RDONLY | O_WRONLY | O_RDWR | |
| 775 | O_CREAT | O_EXCL | O_NOCTTY | | 779 | O_CREAT | O_EXCL | O_NOCTTY | |
| 776 | O_TRUNC | O_APPEND | O_NONBLOCK | | 780 | O_TRUNC | O_APPEND | /* O_NONBLOCK | */ |
| 777 | __O_SYNC | O_DSYNC | FASYNC | | 781 | __O_SYNC | O_DSYNC | FASYNC | |
| 778 | O_DIRECT | O_LARGEFILE | O_DIRECTORY | | 782 | O_DIRECT | O_LARGEFILE | O_DIRECTORY | |
| 779 | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | | 783 | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 7d9d06ba184b..81e086d8aa57 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
| @@ -808,7 +808,7 @@ int bdi_writeback_thread(void *data) | |||
| 808 | wb->last_active = jiffies; | 808 | wb->last_active = jiffies; |
| 809 | 809 | ||
| 810 | set_current_state(TASK_INTERRUPTIBLE); | 810 | set_current_state(TASK_INTERRUPTIBLE); |
| 811 | if (!list_empty(&bdi->work_list)) { | 811 | if (!list_empty(&bdi->work_list) || kthread_should_stop()) { |
| 812 | __set_current_state(TASK_RUNNING); | 812 | __set_current_state(TASK_RUNNING); |
| 813 | continue; | 813 | continue; |
| 814 | } | 814 | } |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 69ad053ffd78..d367af1514ef 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
| @@ -276,7 +276,7 @@ static void flush_bg_queue(struct fuse_conn *fc) | |||
| 276 | * Called with fc->lock, unlocks it | 276 | * Called with fc->lock, unlocks it |
| 277 | */ | 277 | */ |
| 278 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) | 278 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) |
| 279 | __releases(&fc->lock) | 279 | __releases(fc->lock) |
| 280 | { | 280 | { |
| 281 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; | 281 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; |
| 282 | req->end = NULL; | 282 | req->end = NULL; |
| @@ -306,8 +306,8 @@ __releases(&fc->lock) | |||
| 306 | 306 | ||
| 307 | static void wait_answer_interruptible(struct fuse_conn *fc, | 307 | static void wait_answer_interruptible(struct fuse_conn *fc, |
| 308 | struct fuse_req *req) | 308 | struct fuse_req *req) |
| 309 | __releases(&fc->lock) | 309 | __releases(fc->lock) |
| 310 | __acquires(&fc->lock) | 310 | __acquires(fc->lock) |
| 311 | { | 311 | { |
| 312 | if (signal_pending(current)) | 312 | if (signal_pending(current)) |
| 313 | return; | 313 | return; |
| @@ -325,8 +325,8 @@ static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req) | |||
| 325 | } | 325 | } |
| 326 | 326 | ||
| 327 | static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) | 327 | static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) |
| 328 | __releases(&fc->lock) | 328 | __releases(fc->lock) |
| 329 | __acquires(&fc->lock) | 329 | __acquires(fc->lock) |
| 330 | { | 330 | { |
| 331 | if (!fc->no_interrupt) { | 331 | if (!fc->no_interrupt) { |
| 332 | /* Any signal may interrupt this */ | 332 | /* Any signal may interrupt this */ |
| @@ -905,8 +905,8 @@ static int request_pending(struct fuse_conn *fc) | |||
| 905 | 905 | ||
| 906 | /* Wait until a request is available on the pending list */ | 906 | /* Wait until a request is available on the pending list */ |
| 907 | static void request_wait(struct fuse_conn *fc) | 907 | static void request_wait(struct fuse_conn *fc) |
| 908 | __releases(&fc->lock) | 908 | __releases(fc->lock) |
| 909 | __acquires(&fc->lock) | 909 | __acquires(fc->lock) |
| 910 | { | 910 | { |
| 911 | DECLARE_WAITQUEUE(wait, current); | 911 | DECLARE_WAITQUEUE(wait, current); |
| 912 | 912 | ||
| @@ -934,7 +934,7 @@ __acquires(&fc->lock) | |||
| 934 | */ | 934 | */ |
| 935 | static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs, | 935 | static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs, |
| 936 | size_t nbytes, struct fuse_req *req) | 936 | size_t nbytes, struct fuse_req *req) |
| 937 | __releases(&fc->lock) | 937 | __releases(fc->lock) |
| 938 | { | 938 | { |
| 939 | struct fuse_in_header ih; | 939 | struct fuse_in_header ih; |
| 940 | struct fuse_interrupt_in arg; | 940 | struct fuse_interrupt_in arg; |
| @@ -1720,8 +1720,8 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait) | |||
| 1720 | * This function releases and reacquires fc->lock | 1720 | * This function releases and reacquires fc->lock |
| 1721 | */ | 1721 | */ |
| 1722 | static void end_requests(struct fuse_conn *fc, struct list_head *head) | 1722 | static void end_requests(struct fuse_conn *fc, struct list_head *head) |
| 1723 | __releases(&fc->lock) | 1723 | __releases(fc->lock) |
| 1724 | __acquires(&fc->lock) | 1724 | __acquires(fc->lock) |
| 1725 | { | 1725 | { |
| 1726 | while (!list_empty(head)) { | 1726 | while (!list_empty(head)) { |
| 1727 | struct fuse_req *req; | 1727 | struct fuse_req *req; |
| @@ -1744,8 +1744,8 @@ __acquires(&fc->lock) | |||
| 1744 | * locked). | 1744 | * locked). |
| 1745 | */ | 1745 | */ |
| 1746 | static void end_io_requests(struct fuse_conn *fc) | 1746 | static void end_io_requests(struct fuse_conn *fc) |
| 1747 | __releases(&fc->lock) | 1747 | __releases(fc->lock) |
| 1748 | __acquires(&fc->lock) | 1748 | __acquires(fc->lock) |
| 1749 | { | 1749 | { |
| 1750 | while (!list_empty(&fc->io)) { | 1750 | while (!list_empty(&fc->io)) { |
| 1751 | struct fuse_req *req = | 1751 | struct fuse_req *req = |
| @@ -1769,6 +1769,16 @@ __acquires(&fc->lock) | |||
| 1769 | } | 1769 | } |
| 1770 | } | 1770 | } |
| 1771 | 1771 | ||
| 1772 | static void end_queued_requests(struct fuse_conn *fc) | ||
| 1773 | __releases(fc->lock) | ||
| 1774 | __acquires(fc->lock) | ||
| 1775 | { | ||
| 1776 | fc->max_background = UINT_MAX; | ||
| 1777 | flush_bg_queue(fc); | ||
| 1778 | end_requests(fc, &fc->pending); | ||
| 1779 | end_requests(fc, &fc->processing); | ||
| 1780 | } | ||
| 1781 | |||
| 1772 | /* | 1782 | /* |
| 1773 | * Abort all requests. | 1783 | * Abort all requests. |
| 1774 | * | 1784 | * |
| @@ -1795,8 +1805,7 @@ void fuse_abort_conn(struct fuse_conn *fc) | |||
| 1795 | fc->connected = 0; | 1805 | fc->connected = 0; |
| 1796 | fc->blocked = 0; | 1806 | fc->blocked = 0; |
| 1797 | end_io_requests(fc); | 1807 | end_io_requests(fc); |
| 1798 | end_requests(fc, &fc->pending); | 1808 | end_queued_requests(fc); |
| 1799 | end_requests(fc, &fc->processing); | ||
| 1800 | wake_up_all(&fc->waitq); | 1809 | wake_up_all(&fc->waitq); |
| 1801 | wake_up_all(&fc->blocked_waitq); | 1810 | wake_up_all(&fc->blocked_waitq); |
| 1802 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | 1811 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); |
| @@ -1811,8 +1820,9 @@ int fuse_dev_release(struct inode *inode, struct file *file) | |||
| 1811 | if (fc) { | 1820 | if (fc) { |
| 1812 | spin_lock(&fc->lock); | 1821 | spin_lock(&fc->lock); |
| 1813 | fc->connected = 0; | 1822 | fc->connected = 0; |
| 1814 | end_requests(fc, &fc->pending); | 1823 | fc->blocked = 0; |
| 1815 | end_requests(fc, &fc->processing); | 1824 | end_queued_requests(fc); |
| 1825 | wake_up_all(&fc->blocked_waitq); | ||
| 1816 | spin_unlock(&fc->lock); | 1826 | spin_unlock(&fc->lock); |
| 1817 | fuse_conn_put(fc); | 1827 | fuse_conn_put(fc); |
| 1818 | } | 1828 | } |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 147c1f71bdb9..c8224587123f 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
| @@ -1144,8 +1144,8 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) | |||
| 1144 | 1144 | ||
| 1145 | /* Called under fc->lock, may release and reacquire it */ | 1145 | /* Called under fc->lock, may release and reacquire it */ |
| 1146 | static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req) | 1146 | static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req) |
| 1147 | __releases(&fc->lock) | 1147 | __releases(fc->lock) |
| 1148 | __acquires(&fc->lock) | 1148 | __acquires(fc->lock) |
| 1149 | { | 1149 | { |
| 1150 | struct fuse_inode *fi = get_fuse_inode(req->inode); | 1150 | struct fuse_inode *fi = get_fuse_inode(req->inode); |
| 1151 | loff_t size = i_size_read(req->inode); | 1151 | loff_t size = i_size_read(req->inode); |
| @@ -1183,8 +1183,8 @@ __acquires(&fc->lock) | |||
| 1183 | * Called with fc->lock | 1183 | * Called with fc->lock |
| 1184 | */ | 1184 | */ |
| 1185 | void fuse_flush_writepages(struct inode *inode) | 1185 | void fuse_flush_writepages(struct inode *inode) |
| 1186 | __releases(&fc->lock) | 1186 | __releases(fc->lock) |
| 1187 | __acquires(&fc->lock) | 1187 | __acquires(fc->lock) |
| 1188 | { | 1188 | { |
| 1189 | struct fuse_conn *fc = get_fuse_conn(inode); | 1189 | struct fuse_conn *fc = get_fuse_conn(inode); |
| 1190 | struct fuse_inode *fi = get_fuse_inode(inode); | 1190 | struct fuse_inode *fi = get_fuse_inode(inode); |
diff --git a/fs/minix/namei.c b/fs/minix/namei.c index e20ee85955d1..f3f3578393a4 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c | |||
| @@ -115,7 +115,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode) | |||
| 115 | 115 | ||
| 116 | inode_inc_link_count(dir); | 116 | inode_inc_link_count(dir); |
| 117 | 117 | ||
| 118 | inode = minix_new_inode(dir, mode, &err); | 118 | inode = minix_new_inode(dir, S_IFDIR | mode, &err); |
| 119 | if (!inode) | 119 | if (!inode) |
| 120 | goto out_dir; | 120 | goto out_dir; |
| 121 | 121 | ||
diff --git a/fs/namespace.c b/fs/namespace.c index de402eb6eafb..a72eaabfe8f2 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
| @@ -1484,13 +1484,30 @@ out_unlock: | |||
| 1484 | } | 1484 | } |
| 1485 | 1485 | ||
| 1486 | /* | 1486 | /* |
| 1487 | * Sanity check the flags to change_mnt_propagation. | ||
| 1488 | */ | ||
| 1489 | |||
| 1490 | static int flags_to_propagation_type(int flags) | ||
| 1491 | { | ||
| 1492 | int type = flags & ~MS_REC; | ||
| 1493 | |||
| 1494 | /* Fail if any non-propagation flags are set */ | ||
| 1495 | if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) | ||
| 1496 | return 0; | ||
| 1497 | /* Only one propagation flag should be set */ | ||
| 1498 | if (!is_power_of_2(type)) | ||
| 1499 | return 0; | ||
| 1500 | return type; | ||
| 1501 | } | ||
| 1502 | |||
| 1503 | /* | ||
| 1487 | * recursively change the type of the mountpoint. | 1504 | * recursively change the type of the mountpoint. |
| 1488 | */ | 1505 | */ |
| 1489 | static int do_change_type(struct path *path, int flag) | 1506 | static int do_change_type(struct path *path, int flag) |
| 1490 | { | 1507 | { |
| 1491 | struct vfsmount *m, *mnt = path->mnt; | 1508 | struct vfsmount *m, *mnt = path->mnt; |
| 1492 | int recurse = flag & MS_REC; | 1509 | int recurse = flag & MS_REC; |
| 1493 | int type = flag & ~MS_REC; | 1510 | int type; |
| 1494 | int err = 0; | 1511 | int err = 0; |
| 1495 | 1512 | ||
| 1496 | if (!capable(CAP_SYS_ADMIN)) | 1513 | if (!capable(CAP_SYS_ADMIN)) |
| @@ -1499,6 +1516,10 @@ static int do_change_type(struct path *path, int flag) | |||
| 1499 | if (path->dentry != path->mnt->mnt_root) | 1516 | if (path->dentry != path->mnt->mnt_root) |
| 1500 | return -EINVAL; | 1517 | return -EINVAL; |
| 1501 | 1518 | ||
| 1519 | type = flags_to_propagation_type(flag); | ||
| 1520 | if (!type) | ||
| 1521 | return -EINVAL; | ||
| 1522 | |||
| 1502 | down_write(&namespace_sem); | 1523 | down_write(&namespace_sem); |
| 1503 | if (type == MS_SHARED) { | 1524 | if (type == MS_SHARED) { |
| 1504 | err = invent_group_ids(mnt, recurse); | 1525 | err = invent_group_ids(mnt, recurse); |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 3dfef0623968..cf0d2ffb3c84 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
| @@ -440,7 +440,7 @@ test_share(struct nfs4_stateid *stp, struct nfsd4_open *open) { | |||
| 440 | 440 | ||
| 441 | static int nfs4_access_to_omode(u32 access) | 441 | static int nfs4_access_to_omode(u32 access) |
| 442 | { | 442 | { |
| 443 | switch (access) { | 443 | switch (access & NFS4_SHARE_ACCESS_BOTH) { |
| 444 | case NFS4_SHARE_ACCESS_READ: | 444 | case NFS4_SHARE_ACCESS_READ: |
| 445 | return O_RDONLY; | 445 | return O_RDONLY; |
| 446 | case NFS4_SHARE_ACCESS_WRITE: | 446 | case NFS4_SHARE_ACCESS_WRITE: |
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 4317f177ea7c..ba7c10c917fc 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c | |||
| @@ -446,6 +446,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) | |||
| 446 | nilfs_mdt_destroy(nilfs->ns_cpfile); | 446 | nilfs_mdt_destroy(nilfs->ns_cpfile); |
| 447 | nilfs_mdt_destroy(nilfs->ns_sufile); | 447 | nilfs_mdt_destroy(nilfs->ns_sufile); |
| 448 | nilfs_mdt_destroy(nilfs->ns_dat); | 448 | nilfs_mdt_destroy(nilfs->ns_dat); |
| 449 | nilfs_mdt_destroy(nilfs->ns_gc_dat); | ||
| 449 | 450 | ||
| 450 | failed: | 451 | failed: |
| 451 | nilfs_clear_recovery_info(&ri); | 452 | nilfs_clear_recovery_info(&ri); |
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 215e12ce1d85..592fae5007d1 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c | |||
| @@ -6672,7 +6672,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end, | |||
| 6672 | last_page_bytes = PAGE_ALIGN(end); | 6672 | last_page_bytes = PAGE_ALIGN(end); |
| 6673 | index = start >> PAGE_CACHE_SHIFT; | 6673 | index = start >> PAGE_CACHE_SHIFT; |
| 6674 | do { | 6674 | do { |
| 6675 | pages[numpages] = grab_cache_page(mapping, index); | 6675 | pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS); |
| 6676 | if (!pages[numpages]) { | 6676 | if (!pages[numpages]) { |
| 6677 | ret = -ENOMEM; | 6677 | ret = -ENOMEM; |
| 6678 | mlog_errno(ret); | 6678 | mlog_errno(ret); |
diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c index ec6d12339593..c7ee03c22226 100644 --- a/fs/ocfs2/blockcheck.c +++ b/fs/ocfs2/blockcheck.c | |||
| @@ -439,7 +439,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize, | |||
| 439 | 439 | ||
| 440 | ocfs2_blockcheck_inc_failure(stats); | 440 | ocfs2_blockcheck_inc_failure(stats); |
| 441 | mlog(ML_ERROR, | 441 | mlog(ML_ERROR, |
| 442 | "CRC32 failed: stored: %u, computed %u. Applying ECC.\n", | 442 | "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n", |
| 443 | (unsigned int)check.bc_crc32e, (unsigned int)crc); | 443 | (unsigned int)check.bc_crc32e, (unsigned int)crc); |
| 444 | 444 | ||
| 445 | /* Ok, try ECC fixups */ | 445 | /* Ok, try ECC fixups */ |
| @@ -453,7 +453,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize, | |||
| 453 | goto out; | 453 | goto out; |
| 454 | } | 454 | } |
| 455 | 455 | ||
| 456 | mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n", | 456 | mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n", |
| 457 | (unsigned int)check.bc_crc32e, (unsigned int)crc); | 457 | (unsigned int)check.bc_crc32e, (unsigned int)crc); |
| 458 | 458 | ||
| 459 | rc = -EIO; | 459 | rc = -EIO; |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 81296b4e3646..9a03c151b5ce 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <linux/writeback.h> | 36 | #include <linux/writeback.h> |
| 37 | #include <linux/falloc.h> | 37 | #include <linux/falloc.h> |
| 38 | #include <linux/quotaops.h> | 38 | #include <linux/quotaops.h> |
| 39 | #include <linux/blkdev.h> | ||
| 39 | 40 | ||
| 40 | #define MLOG_MASK_PREFIX ML_INODE | 41 | #define MLOG_MASK_PREFIX ML_INODE |
| 41 | #include <cluster/masklog.h> | 42 | #include <cluster/masklog.h> |
| @@ -190,8 +191,16 @@ static int ocfs2_sync_file(struct file *file, int datasync) | |||
| 190 | if (err) | 191 | if (err) |
| 191 | goto bail; | 192 | goto bail; |
| 192 | 193 | ||
| 193 | if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) | 194 | if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) { |
| 195 | /* | ||
| 196 | * We still have to flush drive's caches to get data to the | ||
| 197 | * platter | ||
| 198 | */ | ||
| 199 | if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER) | ||
| 200 | blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, | ||
| 201 | NULL, BLKDEV_IFL_WAIT); | ||
| 194 | goto bail; | 202 | goto bail; |
| 203 | } | ||
| 195 | 204 | ||
| 196 | journal = osb->journal->j_journal; | 205 | journal = osb->journal->j_journal; |
| 197 | err = jbd2_journal_force_commit(journal); | 206 | err = jbd2_journal_force_commit(journal); |
| @@ -774,7 +783,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, | |||
| 774 | BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT)); | 783 | BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT)); |
| 775 | BUG_ON(abs_from & (inode->i_blkbits - 1)); | 784 | BUG_ON(abs_from & (inode->i_blkbits - 1)); |
| 776 | 785 | ||
| 777 | page = grab_cache_page(mapping, index); | 786 | page = find_or_create_page(mapping, index, GFP_NOFS); |
| 778 | if (!page) { | 787 | if (!page) { |
| 779 | ret = -ENOMEM; | 788 | ret = -ENOMEM; |
| 780 | mlog_errno(ret); | 789 | mlog_errno(ret); |
| @@ -2329,7 +2338,7 @@ out_dio: | |||
| 2329 | BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); | 2338 | BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); |
| 2330 | 2339 | ||
| 2331 | if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || | 2340 | if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || |
| 2332 | ((file->f_flags & O_DIRECT) && has_refcount)) { | 2341 | ((file->f_flags & O_DIRECT) && !direct_io)) { |
| 2333 | ret = filemap_fdatawrite_range(file->f_mapping, pos, | 2342 | ret = filemap_fdatawrite_range(file->f_mapping, pos, |
| 2334 | pos + count - 1); | 2343 | pos + count - 1); |
| 2335 | if (ret < 0) | 2344 | if (ret < 0) |
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 0492464916b1..eece3e05d9d0 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c | |||
| @@ -488,7 +488,11 @@ static int ocfs2_read_locked_inode(struct inode *inode, | |||
| 488 | OCFS2_BH_IGNORE_CACHE); | 488 | OCFS2_BH_IGNORE_CACHE); |
| 489 | } else { | 489 | } else { |
| 490 | status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh); | 490 | status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh); |
| 491 | if (!status) | 491 | /* |
| 492 | * If buffer is in jbd, then its checksum may not have been | ||
| 493 | * computed as yet. | ||
| 494 | */ | ||
| 495 | if (!status && !buffer_jbd(bh)) | ||
| 492 | status = ocfs2_validate_inode_block(osb->sb, bh); | 496 | status = ocfs2_validate_inode_block(osb->sb, bh); |
| 493 | } | 497 | } |
| 494 | if (status < 0) { | 498 | if (status < 0) { |
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index af2b8fe1f139..4c18f4ad93b4 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c | |||
| @@ -74,9 +74,11 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh, | |||
| 74 | /* | 74 | /* |
| 75 | * Another node might have truncated while we were waiting on | 75 | * Another node might have truncated while we were waiting on |
| 76 | * cluster locks. | 76 | * cluster locks. |
| 77 | * We don't check size == 0 before the shift. This is borrowed | ||
| 78 | * from do_generic_file_read. | ||
| 77 | */ | 79 | */ |
| 78 | last_index = size >> PAGE_CACHE_SHIFT; | 80 | last_index = (size - 1) >> PAGE_CACHE_SHIFT; |
| 79 | if (page->index > last_index) { | 81 | if (unlikely(!size || page->index > last_index)) { |
| 80 | ret = -EINVAL; | 82 | ret = -EINVAL; |
| 81 | goto out; | 83 | goto out; |
| 82 | } | 84 | } |
| @@ -107,7 +109,7 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh, | |||
| 107 | * because the "write" would invalidate their data. | 109 | * because the "write" would invalidate their data. |
| 108 | */ | 110 | */ |
| 109 | if (page->index == last_index) | 111 | if (page->index == last_index) |
| 110 | len = size & ~PAGE_CACHE_MASK; | 112 | len = ((size - 1) & ~PAGE_CACHE_MASK) + 1; |
| 111 | 113 | ||
| 112 | ret = ocfs2_write_begin_nolock(mapping, pos, len, 0, &locked_page, | 114 | ret = ocfs2_write_begin_nolock(mapping, pos, len, 0, &locked_page, |
| 113 | &fsdata, di_bh, page); | 115 | &fsdata, di_bh, page); |
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index f171b51a74f7..a00dda2e4f16 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c | |||
| @@ -472,32 +472,23 @@ leave: | |||
| 472 | return status; | 472 | return status; |
| 473 | } | 473 | } |
| 474 | 474 | ||
| 475 | static int ocfs2_mknod_locked(struct ocfs2_super *osb, | 475 | static int __ocfs2_mknod_locked(struct inode *dir, |
| 476 | struct inode *dir, | 476 | struct inode *inode, |
| 477 | struct inode *inode, | 477 | dev_t dev, |
| 478 | dev_t dev, | 478 | struct buffer_head **new_fe_bh, |
| 479 | struct buffer_head **new_fe_bh, | 479 | struct buffer_head *parent_fe_bh, |
| 480 | struct buffer_head *parent_fe_bh, | 480 | handle_t *handle, |
| 481 | handle_t *handle, | 481 | struct ocfs2_alloc_context *inode_ac, |
| 482 | struct ocfs2_alloc_context *inode_ac) | 482 | u64 fe_blkno, u64 suballoc_loc, u16 suballoc_bit) |
| 483 | { | 483 | { |
| 484 | int status = 0; | 484 | int status = 0; |
| 485 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); | ||
| 485 | struct ocfs2_dinode *fe = NULL; | 486 | struct ocfs2_dinode *fe = NULL; |
| 486 | struct ocfs2_extent_list *fel; | 487 | struct ocfs2_extent_list *fel; |
| 487 | u64 suballoc_loc, fe_blkno = 0; | ||
| 488 | u16 suballoc_bit; | ||
| 489 | u16 feat; | 488 | u16 feat; |
| 490 | 489 | ||
| 491 | *new_fe_bh = NULL; | 490 | *new_fe_bh = NULL; |
| 492 | 491 | ||
| 493 | status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh, | ||
| 494 | inode_ac, &suballoc_loc, | ||
| 495 | &suballoc_bit, &fe_blkno); | ||
| 496 | if (status < 0) { | ||
| 497 | mlog_errno(status); | ||
| 498 | goto leave; | ||
| 499 | } | ||
| 500 | |||
| 501 | /* populate as many fields early on as possible - many of | 492 | /* populate as many fields early on as possible - many of |
| 502 | * these are used by the support functions here and in | 493 | * these are used by the support functions here and in |
| 503 | * callers. */ | 494 | * callers. */ |
| @@ -591,6 +582,34 @@ leave: | |||
| 591 | return status; | 582 | return status; |
| 592 | } | 583 | } |
| 593 | 584 | ||
| 585 | static int ocfs2_mknod_locked(struct ocfs2_super *osb, | ||
| 586 | struct inode *dir, | ||
| 587 | struct inode *inode, | ||
| 588 | dev_t dev, | ||
| 589 | struct buffer_head **new_fe_bh, | ||
| 590 | struct buffer_head *parent_fe_bh, | ||
| 591 | handle_t *handle, | ||
| 592 | struct ocfs2_alloc_context *inode_ac) | ||
| 593 | { | ||
| 594 | int status = 0; | ||
| 595 | u64 suballoc_loc, fe_blkno = 0; | ||
| 596 | u16 suballoc_bit; | ||
| 597 | |||
| 598 | *new_fe_bh = NULL; | ||
| 599 | |||
| 600 | status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh, | ||
| 601 | inode_ac, &suballoc_loc, | ||
| 602 | &suballoc_bit, &fe_blkno); | ||
| 603 | if (status < 0) { | ||
| 604 | mlog_errno(status); | ||
| 605 | return status; | ||
| 606 | } | ||
| 607 | |||
| 608 | return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh, | ||
| 609 | parent_fe_bh, handle, inode_ac, | ||
| 610 | fe_blkno, suballoc_loc, suballoc_bit); | ||
| 611 | } | ||
| 612 | |||
| 594 | static int ocfs2_mkdir(struct inode *dir, | 613 | static int ocfs2_mkdir(struct inode *dir, |
| 595 | struct dentry *dentry, | 614 | struct dentry *dentry, |
| 596 | int mode) | 615 | int mode) |
| @@ -1852,61 +1871,117 @@ bail: | |||
| 1852 | return status; | 1871 | return status; |
| 1853 | } | 1872 | } |
| 1854 | 1873 | ||
| 1855 | static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, | 1874 | static int ocfs2_lookup_lock_orphan_dir(struct ocfs2_super *osb, |
| 1856 | struct inode **ret_orphan_dir, | 1875 | struct inode **ret_orphan_dir, |
| 1857 | u64 blkno, | 1876 | struct buffer_head **ret_orphan_dir_bh) |
| 1858 | char *name, | ||
| 1859 | struct ocfs2_dir_lookup_result *lookup) | ||
| 1860 | { | 1877 | { |
| 1861 | struct inode *orphan_dir_inode; | 1878 | struct inode *orphan_dir_inode; |
| 1862 | struct buffer_head *orphan_dir_bh = NULL; | 1879 | struct buffer_head *orphan_dir_bh = NULL; |
| 1863 | int status = 0; | 1880 | int ret = 0; |
| 1864 | |||
| 1865 | status = ocfs2_blkno_stringify(blkno, name); | ||
| 1866 | if (status < 0) { | ||
| 1867 | mlog_errno(status); | ||
| 1868 | return status; | ||
| 1869 | } | ||
| 1870 | 1881 | ||
| 1871 | orphan_dir_inode = ocfs2_get_system_file_inode(osb, | 1882 | orphan_dir_inode = ocfs2_get_system_file_inode(osb, |
| 1872 | ORPHAN_DIR_SYSTEM_INODE, | 1883 | ORPHAN_DIR_SYSTEM_INODE, |
| 1873 | osb->slot_num); | 1884 | osb->slot_num); |
| 1874 | if (!orphan_dir_inode) { | 1885 | if (!orphan_dir_inode) { |
| 1875 | status = -ENOENT; | 1886 | ret = -ENOENT; |
| 1876 | mlog_errno(status); | 1887 | mlog_errno(ret); |
| 1877 | return status; | 1888 | return ret; |
| 1878 | } | 1889 | } |
| 1879 | 1890 | ||
| 1880 | mutex_lock(&orphan_dir_inode->i_mutex); | 1891 | mutex_lock(&orphan_dir_inode->i_mutex); |
| 1881 | 1892 | ||
| 1882 | status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1); | 1893 | ret = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1); |
| 1883 | if (status < 0) { | 1894 | if (ret < 0) { |
| 1884 | mlog_errno(status); | 1895 | mutex_unlock(&orphan_dir_inode->i_mutex); |
| 1885 | goto leave; | 1896 | iput(orphan_dir_inode); |
| 1897 | |||
| 1898 | mlog_errno(ret); | ||
| 1899 | return ret; | ||
| 1886 | } | 1900 | } |
| 1887 | 1901 | ||
| 1888 | status = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode, | 1902 | *ret_orphan_dir = orphan_dir_inode; |
| 1889 | orphan_dir_bh, name, | 1903 | *ret_orphan_dir_bh = orphan_dir_bh; |
| 1890 | OCFS2_ORPHAN_NAMELEN, lookup); | ||
| 1891 | if (status < 0) { | ||
| 1892 | ocfs2_inode_unlock(orphan_dir_inode, 1); | ||
| 1893 | 1904 | ||
| 1894 | mlog_errno(status); | 1905 | return 0; |
| 1895 | goto leave; | 1906 | } |
| 1907 | |||
| 1908 | static int __ocfs2_prepare_orphan_dir(struct inode *orphan_dir_inode, | ||
| 1909 | struct buffer_head *orphan_dir_bh, | ||
| 1910 | u64 blkno, | ||
| 1911 | char *name, | ||
| 1912 | struct ocfs2_dir_lookup_result *lookup) | ||
| 1913 | { | ||
| 1914 | int ret; | ||
| 1915 | struct ocfs2_super *osb = OCFS2_SB(orphan_dir_inode->i_sb); | ||
| 1916 | |||
| 1917 | ret = ocfs2_blkno_stringify(blkno, name); | ||
| 1918 | if (ret < 0) { | ||
| 1919 | mlog_errno(ret); | ||
| 1920 | return ret; | ||
| 1921 | } | ||
| 1922 | |||
| 1923 | ret = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode, | ||
| 1924 | orphan_dir_bh, name, | ||
| 1925 | OCFS2_ORPHAN_NAMELEN, lookup); | ||
| 1926 | if (ret < 0) { | ||
| 1927 | mlog_errno(ret); | ||
| 1928 | return ret; | ||
| 1929 | } | ||
| 1930 | |||
| 1931 | return 0; | ||
| 1932 | } | ||
| 1933 | |||
| 1934 | /** | ||
| 1935 | * ocfs2_prepare_orphan_dir() - Prepare an orphan directory for | ||
| 1936 | * insertion of an orphan. | ||
| 1937 | * @osb: ocfs2 file system | ||
| 1938 | * @ret_orphan_dir: Orphan dir inode - returned locked! | ||
| 1939 | * @blkno: Actual block number of the inode to be inserted into orphan dir. | ||
| 1940 | * @lookup: dir lookup result, to be passed back into functions like | ||
| 1941 | * ocfs2_orphan_add | ||
| 1942 | * | ||
| 1943 | * Returns zero on success and the ret_orphan_dir, name and lookup | ||
| 1944 | * fields will be populated. | ||
| 1945 | * | ||
| 1946 | * Returns non-zero on failure. | ||
| 1947 | */ | ||
| 1948 | static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, | ||
| 1949 | struct inode **ret_orphan_dir, | ||
| 1950 | u64 blkno, | ||
| 1951 | char *name, | ||
| 1952 | struct ocfs2_dir_lookup_result *lookup) | ||
| 1953 | { | ||
| 1954 | struct inode *orphan_dir_inode = NULL; | ||
| 1955 | struct buffer_head *orphan_dir_bh = NULL; | ||
| 1956 | int ret = 0; | ||
| 1957 | |||
| 1958 | ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir_inode, | ||
| 1959 | &orphan_dir_bh); | ||
| 1960 | if (ret < 0) { | ||
| 1961 | mlog_errno(ret); | ||
| 1962 | return ret; | ||
| 1963 | } | ||
| 1964 | |||
| 1965 | ret = __ocfs2_prepare_orphan_dir(orphan_dir_inode, orphan_dir_bh, | ||
| 1966 | blkno, name, lookup); | ||
| 1967 | if (ret < 0) { | ||
| 1968 | mlog_errno(ret); | ||
| 1969 | goto out; | ||
| 1896 | } | 1970 | } |
| 1897 | 1971 | ||
| 1898 | *ret_orphan_dir = orphan_dir_inode; | 1972 | *ret_orphan_dir = orphan_dir_inode; |
| 1899 | 1973 | ||
| 1900 | leave: | 1974 | out: |
| 1901 | if (status) { | 1975 | brelse(orphan_dir_bh); |
| 1976 | |||
| 1977 | if (ret) { | ||
| 1978 | ocfs2_inode_unlock(orphan_dir_inode, 1); | ||
| 1902 | mutex_unlock(&orphan_dir_inode->i_mutex); | 1979 | mutex_unlock(&orphan_dir_inode->i_mutex); |
| 1903 | iput(orphan_dir_inode); | 1980 | iput(orphan_dir_inode); |
| 1904 | } | 1981 | } |
| 1905 | 1982 | ||
| 1906 | brelse(orphan_dir_bh); | 1983 | mlog_exit(ret); |
| 1907 | 1984 | return ret; | |
| 1908 | mlog_exit(status); | ||
| 1909 | return status; | ||
| 1910 | } | 1985 | } |
| 1911 | 1986 | ||
| 1912 | static int ocfs2_orphan_add(struct ocfs2_super *osb, | 1987 | static int ocfs2_orphan_add(struct ocfs2_super *osb, |
| @@ -2053,6 +2128,99 @@ leave: | |||
| 2053 | return status; | 2128 | return status; |
| 2054 | } | 2129 | } |
| 2055 | 2130 | ||
| 2131 | /** | ||
| 2132 | * ocfs2_prep_new_orphaned_file() - Prepare the orphan dir to recieve a newly | ||
| 2133 | * allocated file. This is different from the typical 'add to orphan dir' | ||
| 2134 | * operation in that the inode does not yet exist. This is a problem because | ||
| 2135 | * the orphan dir stringifies the inode block number to come up with it's | ||
| 2136 | * dirent. Obviously if the inode does not yet exist we have a chicken and egg | ||
| 2137 | * problem. This function works around it by calling deeper into the orphan | ||
| 2138 | * and suballoc code than other callers. Use this only by necessity. | ||
| 2139 | * @dir: The directory which this inode will ultimately wind up under - not the | ||
| 2140 | * orphan dir! | ||
| 2141 | * @dir_bh: buffer_head the @dir inode block | ||
| 2142 | * @orphan_name: string of length (CFS2_ORPHAN_NAMELEN + 1). Will be filled | ||
| 2143 | * with the string to be used for orphan dirent. Pass back to the orphan dir | ||
| 2144 | * code. | ||
| 2145 | * @ret_orphan_dir: orphan dir inode returned to be passed back into orphan | ||
| 2146 | * dir code. | ||
| 2147 | * @ret_di_blkno: block number where the new inode will be allocated. | ||
| 2148 | * @orphan_insert: Dir insert context to be passed back into orphan dir code. | ||
| 2149 | * @ret_inode_ac: Inode alloc context to be passed back to the allocator. | ||
| 2150 | * | ||
| 2151 | * Returns zero on success and the ret_orphan_dir, name and lookup | ||
| 2152 | * fields will be populated. | ||
| 2153 | * | ||
| 2154 | * Returns non-zero on failure. | ||
| 2155 | */ | ||
| 2156 | static int ocfs2_prep_new_orphaned_file(struct inode *dir, | ||
| 2157 | struct buffer_head *dir_bh, | ||
| 2158 | char *orphan_name, | ||
| 2159 | struct inode **ret_orphan_dir, | ||
| 2160 | u64 *ret_di_blkno, | ||
| 2161 | struct ocfs2_dir_lookup_result *orphan_insert, | ||
| 2162 | struct ocfs2_alloc_context **ret_inode_ac) | ||
| 2163 | { | ||
| 2164 | int ret; | ||
| 2165 | u64 di_blkno; | ||
| 2166 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); | ||
| 2167 | struct inode *orphan_dir = NULL; | ||
| 2168 | struct buffer_head *orphan_dir_bh = NULL; | ||
| 2169 | struct ocfs2_alloc_context *inode_ac = NULL; | ||
| 2170 | |||
| 2171 | ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir, &orphan_dir_bh); | ||
| 2172 | if (ret < 0) { | ||
| 2173 | mlog_errno(ret); | ||
| 2174 | return ret; | ||
| 2175 | } | ||
| 2176 | |||
| 2177 | /* reserve an inode spot */ | ||
| 2178 | ret = ocfs2_reserve_new_inode(osb, &inode_ac); | ||
| 2179 | if (ret < 0) { | ||
| 2180 | if (ret != -ENOSPC) | ||
| 2181 | mlog_errno(ret); | ||
| 2182 | goto out; | ||
| 2183 | } | ||
| 2184 | |||
| 2185 | ret = ocfs2_find_new_inode_loc(dir, dir_bh, inode_ac, | ||
| 2186 | &di_blkno); | ||
| 2187 | if (ret) { | ||
| 2188 | mlog_errno(ret); | ||
| 2189 | goto out; | ||
| 2190 | } | ||
| 2191 | |||
| 2192 | ret = __ocfs2_prepare_orphan_dir(orphan_dir, orphan_dir_bh, | ||
| 2193 | di_blkno, orphan_name, orphan_insert); | ||
| 2194 | if (ret < 0) { | ||
| 2195 | mlog_errno(ret); | ||
| 2196 | goto out; | ||
| 2197 | } | ||
| 2198 | |||
| 2199 | out: | ||
| 2200 | if (ret == 0) { | ||
| 2201 | *ret_orphan_dir = orphan_dir; | ||
| 2202 | *ret_di_blkno = di_blkno; | ||
| 2203 | *ret_inode_ac = inode_ac; | ||
| 2204 | /* | ||
| 2205 | * orphan_name and orphan_insert are already up to | ||
| 2206 | * date via prepare_orphan_dir | ||
| 2207 | */ | ||
| 2208 | } else { | ||
| 2209 | /* Unroll reserve_new_inode* */ | ||
| 2210 | if (inode_ac) | ||
| 2211 | ocfs2_free_alloc_context(inode_ac); | ||
| 2212 | |||
| 2213 | /* Unroll orphan dir locking */ | ||
| 2214 | mutex_unlock(&orphan_dir->i_mutex); | ||
| 2215 | ocfs2_inode_unlock(orphan_dir, 1); | ||
| 2216 | iput(orphan_dir); | ||
| 2217 | } | ||
| 2218 | |||
| 2219 | brelse(orphan_dir_bh); | ||
| 2220 | |||
| 2221 | return 0; | ||
| 2222 | } | ||
| 2223 | |||
| 2056 | int ocfs2_create_inode_in_orphan(struct inode *dir, | 2224 | int ocfs2_create_inode_in_orphan(struct inode *dir, |
| 2057 | int mode, | 2225 | int mode, |
| 2058 | struct inode **new_inode) | 2226 | struct inode **new_inode) |
| @@ -2068,6 +2236,8 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, | |||
| 2068 | struct buffer_head *new_di_bh = NULL; | 2236 | struct buffer_head *new_di_bh = NULL; |
| 2069 | struct ocfs2_alloc_context *inode_ac = NULL; | 2237 | struct ocfs2_alloc_context *inode_ac = NULL; |
| 2070 | struct ocfs2_dir_lookup_result orphan_insert = { NULL, }; | 2238 | struct ocfs2_dir_lookup_result orphan_insert = { NULL, }; |
| 2239 | u64 uninitialized_var(di_blkno), suballoc_loc; | ||
| 2240 | u16 suballoc_bit; | ||
| 2071 | 2241 | ||
| 2072 | status = ocfs2_inode_lock(dir, &parent_di_bh, 1); | 2242 | status = ocfs2_inode_lock(dir, &parent_di_bh, 1); |
| 2073 | if (status < 0) { | 2243 | if (status < 0) { |
| @@ -2076,20 +2246,9 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, | |||
| 2076 | return status; | 2246 | return status; |
| 2077 | } | 2247 | } |
| 2078 | 2248 | ||
| 2079 | /* | 2249 | status = ocfs2_prep_new_orphaned_file(dir, parent_di_bh, |
| 2080 | * We give the orphan dir the root blkno to fake an orphan name, | 2250 | orphan_name, &orphan_dir, |
| 2081 | * and allocate enough space for our insertion. | 2251 | &di_blkno, &orphan_insert, &inode_ac); |
| 2082 | */ | ||
| 2083 | status = ocfs2_prepare_orphan_dir(osb, &orphan_dir, | ||
| 2084 | osb->root_blkno, | ||
| 2085 | orphan_name, &orphan_insert); | ||
| 2086 | if (status < 0) { | ||
| 2087 | mlog_errno(status); | ||
| 2088 | goto leave; | ||
| 2089 | } | ||
| 2090 | |||
| 2091 | /* reserve an inode spot */ | ||
| 2092 | status = ocfs2_reserve_new_inode(osb, &inode_ac); | ||
| 2093 | if (status < 0) { | 2252 | if (status < 0) { |
| 2094 | if (status != -ENOSPC) | 2253 | if (status != -ENOSPC) |
| 2095 | mlog_errno(status); | 2254 | mlog_errno(status); |
| @@ -2116,17 +2275,20 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, | |||
| 2116 | goto leave; | 2275 | goto leave; |
| 2117 | did_quota_inode = 1; | 2276 | did_quota_inode = 1; |
| 2118 | 2277 | ||
| 2119 | inode->i_nlink = 0; | 2278 | status = ocfs2_claim_new_inode_at_loc(handle, dir, inode_ac, |
| 2120 | /* do the real work now. */ | 2279 | &suballoc_loc, |
| 2121 | status = ocfs2_mknod_locked(osb, dir, inode, | 2280 | &suballoc_bit, di_blkno); |
| 2122 | 0, &new_di_bh, parent_di_bh, handle, | ||
| 2123 | inode_ac); | ||
| 2124 | if (status < 0) { | 2281 | if (status < 0) { |
| 2125 | mlog_errno(status); | 2282 | mlog_errno(status); |
| 2126 | goto leave; | 2283 | goto leave; |
| 2127 | } | 2284 | } |
| 2128 | 2285 | ||
| 2129 | status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, orphan_name); | 2286 | inode->i_nlink = 0; |
| 2287 | /* do the real work now. */ | ||
| 2288 | status = __ocfs2_mknod_locked(dir, inode, | ||
| 2289 | 0, &new_di_bh, parent_di_bh, handle, | ||
| 2290 | inode_ac, di_blkno, suballoc_loc, | ||
| 2291 | suballoc_bit); | ||
| 2130 | if (status < 0) { | 2292 | if (status < 0) { |
| 2131 | mlog_errno(status); | 2293 | mlog_errno(status); |
| 2132 | goto leave; | 2294 | goto leave; |
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 73a11ccfd4c2..0afeda83120f 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c | |||
| @@ -2960,7 +2960,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle, | |||
| 2960 | if (map_end & (PAGE_CACHE_SIZE - 1)) | 2960 | if (map_end & (PAGE_CACHE_SIZE - 1)) |
| 2961 | to = map_end & (PAGE_CACHE_SIZE - 1); | 2961 | to = map_end & (PAGE_CACHE_SIZE - 1); |
| 2962 | 2962 | ||
| 2963 | page = grab_cache_page(mapping, page_index); | 2963 | page = find_or_create_page(mapping, page_index, GFP_NOFS); |
| 2964 | 2964 | ||
| 2965 | /* | 2965 | /* |
| 2966 | * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page | 2966 | * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page |
| @@ -3179,7 +3179,8 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb, | |||
| 3179 | if (map_end > end) | 3179 | if (map_end > end) |
| 3180 | map_end = end; | 3180 | map_end = end; |
| 3181 | 3181 | ||
| 3182 | page = grab_cache_page(context->inode->i_mapping, page_index); | 3182 | page = find_or_create_page(context->inode->i_mapping, |
| 3183 | page_index, GFP_NOFS); | ||
| 3183 | BUG_ON(!page); | 3184 | BUG_ON(!page); |
| 3184 | 3185 | ||
| 3185 | wait_on_page_writeback(page); | 3186 | wait_on_page_writeback(page); |
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index a8e6a95a353f..8a286f54dca1 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c | |||
| @@ -57,11 +57,28 @@ struct ocfs2_suballoc_result { | |||
| 57 | u64 sr_bg_blkno; /* The bg we allocated from. Set | 57 | u64 sr_bg_blkno; /* The bg we allocated from. Set |
| 58 | to 0 when a block group is | 58 | to 0 when a block group is |
| 59 | contiguous. */ | 59 | contiguous. */ |
| 60 | u64 sr_bg_stable_blkno; /* | ||
| 61 | * Doesn't change, always | ||
| 62 | * set to target block | ||
| 63 | * group descriptor | ||
| 64 | * block. | ||
| 65 | */ | ||
| 60 | u64 sr_blkno; /* The first allocated block */ | 66 | u64 sr_blkno; /* The first allocated block */ |
| 61 | unsigned int sr_bit_offset; /* The bit in the bg */ | 67 | unsigned int sr_bit_offset; /* The bit in the bg */ |
| 62 | unsigned int sr_bits; /* How many bits we claimed */ | 68 | unsigned int sr_bits; /* How many bits we claimed */ |
| 63 | }; | 69 | }; |
| 64 | 70 | ||
| 71 | static u64 ocfs2_group_from_res(struct ocfs2_suballoc_result *res) | ||
| 72 | { | ||
| 73 | if (res->sr_blkno == 0) | ||
| 74 | return 0; | ||
| 75 | |||
| 76 | if (res->sr_bg_blkno) | ||
| 77 | return res->sr_bg_blkno; | ||
| 78 | |||
| 79 | return ocfs2_which_suballoc_group(res->sr_blkno, res->sr_bit_offset); | ||
| 80 | } | ||
| 81 | |||
| 65 | static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg); | 82 | static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg); |
| 66 | static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe); | 83 | static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe); |
| 67 | static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl); | 84 | static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl); |
| @@ -138,6 +155,10 @@ void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac) | |||
| 138 | brelse(ac->ac_bh); | 155 | brelse(ac->ac_bh); |
| 139 | ac->ac_bh = NULL; | 156 | ac->ac_bh = NULL; |
| 140 | ac->ac_resv = NULL; | 157 | ac->ac_resv = NULL; |
| 158 | if (ac->ac_find_loc_priv) { | ||
| 159 | kfree(ac->ac_find_loc_priv); | ||
| 160 | ac->ac_find_loc_priv = NULL; | ||
| 161 | } | ||
| 141 | } | 162 | } |
| 142 | 163 | ||
| 143 | void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac) | 164 | void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac) |
| @@ -1678,6 +1699,15 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac, | |||
| 1678 | if (!ret) | 1699 | if (!ret) |
| 1679 | ocfs2_bg_discontig_fix_result(ac, gd, res); | 1700 | ocfs2_bg_discontig_fix_result(ac, gd, res); |
| 1680 | 1701 | ||
| 1702 | /* | ||
| 1703 | * sr_bg_blkno might have been changed by | ||
| 1704 | * ocfs2_bg_discontig_fix_result | ||
| 1705 | */ | ||
| 1706 | res->sr_bg_stable_blkno = group_bh->b_blocknr; | ||
| 1707 | |||
| 1708 | if (ac->ac_find_loc_only) | ||
| 1709 | goto out_loc_only; | ||
| 1710 | |||
| 1681 | ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh, | 1711 | ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh, |
| 1682 | res->sr_bits, | 1712 | res->sr_bits, |
| 1683 | le16_to_cpu(gd->bg_chain)); | 1713 | le16_to_cpu(gd->bg_chain)); |
| @@ -1691,6 +1721,7 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac, | |||
| 1691 | if (ret < 0) | 1721 | if (ret < 0) |
| 1692 | mlog_errno(ret); | 1722 | mlog_errno(ret); |
| 1693 | 1723 | ||
| 1724 | out_loc_only: | ||
| 1694 | *bits_left = le16_to_cpu(gd->bg_free_bits_count); | 1725 | *bits_left = le16_to_cpu(gd->bg_free_bits_count); |
| 1695 | 1726 | ||
| 1696 | out: | 1727 | out: |
| @@ -1708,7 +1739,6 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, | |||
| 1708 | { | 1739 | { |
| 1709 | int status; | 1740 | int status; |
| 1710 | u16 chain; | 1741 | u16 chain; |
| 1711 | u32 tmp_used; | ||
| 1712 | u64 next_group; | 1742 | u64 next_group; |
| 1713 | struct inode *alloc_inode = ac->ac_inode; | 1743 | struct inode *alloc_inode = ac->ac_inode; |
| 1714 | struct buffer_head *group_bh = NULL; | 1744 | struct buffer_head *group_bh = NULL; |
| @@ -1770,6 +1800,11 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, | |||
| 1770 | if (!status) | 1800 | if (!status) |
| 1771 | ocfs2_bg_discontig_fix_result(ac, bg, res); | 1801 | ocfs2_bg_discontig_fix_result(ac, bg, res); |
| 1772 | 1802 | ||
| 1803 | /* | ||
| 1804 | * sr_bg_blkno might have been changed by | ||
| 1805 | * ocfs2_bg_discontig_fix_result | ||
| 1806 | */ | ||
| 1807 | res->sr_bg_stable_blkno = group_bh->b_blocknr; | ||
| 1773 | 1808 | ||
| 1774 | /* | 1809 | /* |
| 1775 | * Keep track of previous block descriptor read. When | 1810 | * Keep track of previous block descriptor read. When |
| @@ -1796,22 +1831,17 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, | |||
| 1796 | } | 1831 | } |
| 1797 | } | 1832 | } |
| 1798 | 1833 | ||
| 1799 | /* Ok, claim our bits now: set the info on dinode, chainlist | 1834 | if (ac->ac_find_loc_only) |
| 1800 | * and then the group */ | 1835 | goto out_loc_only; |
| 1801 | status = ocfs2_journal_access_di(handle, | 1836 | |
| 1802 | INODE_CACHE(alloc_inode), | 1837 | status = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, |
| 1803 | ac->ac_bh, | 1838 | ac->ac_bh, res->sr_bits, |
| 1804 | OCFS2_JOURNAL_ACCESS_WRITE); | 1839 | chain); |
| 1805 | if (status < 0) { | 1840 | if (status) { |
| 1806 | mlog_errno(status); | 1841 | mlog_errno(status); |
| 1807 | goto bail; | 1842 | goto bail; |
| 1808 | } | 1843 | } |
| 1809 | 1844 | ||
| 1810 | tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used); | ||
| 1811 | fe->id1.bitmap1.i_used = cpu_to_le32(res->sr_bits + tmp_used); | ||
| 1812 | le32_add_cpu(&cl->cl_recs[chain].c_free, -res->sr_bits); | ||
| 1813 | ocfs2_journal_dirty(handle, ac->ac_bh); | ||
| 1814 | |||
| 1815 | status = ocfs2_block_group_set_bits(handle, | 1845 | status = ocfs2_block_group_set_bits(handle, |
| 1816 | alloc_inode, | 1846 | alloc_inode, |
| 1817 | bg, | 1847 | bg, |
| @@ -1826,6 +1856,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, | |||
| 1826 | mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits, | 1856 | mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits, |
| 1827 | (unsigned long long)le64_to_cpu(fe->i_blkno)); | 1857 | (unsigned long long)le64_to_cpu(fe->i_blkno)); |
| 1828 | 1858 | ||
| 1859 | out_loc_only: | ||
| 1829 | *bits_left = le16_to_cpu(bg->bg_free_bits_count); | 1860 | *bits_left = le16_to_cpu(bg->bg_free_bits_count); |
| 1830 | bail: | 1861 | bail: |
| 1831 | brelse(group_bh); | 1862 | brelse(group_bh); |
| @@ -1845,6 +1876,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, | |||
| 1845 | int status; | 1876 | int status; |
| 1846 | u16 victim, i; | 1877 | u16 victim, i; |
| 1847 | u16 bits_left = 0; | 1878 | u16 bits_left = 0; |
| 1879 | u64 hint = ac->ac_last_group; | ||
| 1848 | struct ocfs2_chain_list *cl; | 1880 | struct ocfs2_chain_list *cl; |
| 1849 | struct ocfs2_dinode *fe; | 1881 | struct ocfs2_dinode *fe; |
| 1850 | 1882 | ||
| @@ -1872,7 +1904,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, | |||
| 1872 | goto bail; | 1904 | goto bail; |
| 1873 | } | 1905 | } |
| 1874 | 1906 | ||
| 1875 | res->sr_bg_blkno = ac->ac_last_group; | 1907 | res->sr_bg_blkno = hint; |
| 1876 | if (res->sr_bg_blkno) { | 1908 | if (res->sr_bg_blkno) { |
| 1877 | /* Attempt to short-circuit the usual search mechanism | 1909 | /* Attempt to short-circuit the usual search mechanism |
| 1878 | * by jumping straight to the most recently used | 1910 | * by jumping straight to the most recently used |
| @@ -1896,8 +1928,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, | |||
| 1896 | 1928 | ||
| 1897 | status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, | 1929 | status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, |
| 1898 | res, &bits_left); | 1930 | res, &bits_left); |
| 1899 | if (!status) | 1931 | if (!status) { |
| 1932 | hint = ocfs2_group_from_res(res); | ||
| 1900 | goto set_hint; | 1933 | goto set_hint; |
| 1934 | } | ||
| 1901 | if (status < 0 && status != -ENOSPC) { | 1935 | if (status < 0 && status != -ENOSPC) { |
| 1902 | mlog_errno(status); | 1936 | mlog_errno(status); |
| 1903 | goto bail; | 1937 | goto bail; |
| @@ -1920,8 +1954,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, | |||
| 1920 | ac->ac_chain = i; | 1954 | ac->ac_chain = i; |
| 1921 | status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, | 1955 | status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, |
| 1922 | res, &bits_left); | 1956 | res, &bits_left); |
| 1923 | if (!status) | 1957 | if (!status) { |
| 1958 | hint = ocfs2_group_from_res(res); | ||
| 1924 | break; | 1959 | break; |
| 1960 | } | ||
| 1925 | if (status < 0 && status != -ENOSPC) { | 1961 | if (status < 0 && status != -ENOSPC) { |
| 1926 | mlog_errno(status); | 1962 | mlog_errno(status); |
| 1927 | goto bail; | 1963 | goto bail; |
| @@ -1936,7 +1972,7 @@ set_hint: | |||
| 1936 | if (bits_left < min_bits) | 1972 | if (bits_left < min_bits) |
| 1937 | ac->ac_last_group = 0; | 1973 | ac->ac_last_group = 0; |
| 1938 | else | 1974 | else |
| 1939 | ac->ac_last_group = res->sr_bg_blkno; | 1975 | ac->ac_last_group = hint; |
| 1940 | } | 1976 | } |
| 1941 | 1977 | ||
| 1942 | bail: | 1978 | bail: |
| @@ -2016,6 +2052,136 @@ static inline void ocfs2_save_inode_ac_group(struct inode *dir, | |||
| 2016 | OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot; | 2052 | OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot; |
| 2017 | } | 2053 | } |
| 2018 | 2054 | ||
| 2055 | int ocfs2_find_new_inode_loc(struct inode *dir, | ||
| 2056 | struct buffer_head *parent_fe_bh, | ||
| 2057 | struct ocfs2_alloc_context *ac, | ||
| 2058 | u64 *fe_blkno) | ||
| 2059 | { | ||
| 2060 | int ret; | ||
| 2061 | handle_t *handle = NULL; | ||
| 2062 | struct ocfs2_suballoc_result *res; | ||
| 2063 | |||
| 2064 | BUG_ON(!ac); | ||
| 2065 | BUG_ON(ac->ac_bits_given != 0); | ||
| 2066 | BUG_ON(ac->ac_bits_wanted != 1); | ||
| 2067 | BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE); | ||
| 2068 | |||
| 2069 | res = kzalloc(sizeof(*res), GFP_NOFS); | ||
| 2070 | if (res == NULL) { | ||
| 2071 | ret = -ENOMEM; | ||
| 2072 | mlog_errno(ret); | ||
| 2073 | goto out; | ||
| 2074 | } | ||
| 2075 | |||
| 2076 | ocfs2_init_inode_ac_group(dir, parent_fe_bh, ac); | ||
| 2077 | |||
| 2078 | /* | ||
| 2079 | * The handle started here is for chain relink. Alternatively, | ||
| 2080 | * we could just disable relink for these calls. | ||
| 2081 | */ | ||
| 2082 | handle = ocfs2_start_trans(OCFS2_SB(dir->i_sb), OCFS2_SUBALLOC_ALLOC); | ||
| 2083 | if (IS_ERR(handle)) { | ||
| 2084 | ret = PTR_ERR(handle); | ||
| 2085 | handle = NULL; | ||
| 2086 | mlog_errno(ret); | ||
| 2087 | goto out; | ||
| 2088 | } | ||
| 2089 | |||
| 2090 | /* | ||
| 2091 | * This will instruct ocfs2_claim_suballoc_bits and | ||
| 2092 | * ocfs2_search_one_group to search but save actual allocation | ||
| 2093 | * for later. | ||
| 2094 | */ | ||
| 2095 | ac->ac_find_loc_only = 1; | ||
| 2096 | |||
| 2097 | ret = ocfs2_claim_suballoc_bits(ac, handle, 1, 1, res); | ||
| 2098 | if (ret < 0) { | ||
| 2099 | mlog_errno(ret); | ||
| 2100 | goto out; | ||
| 2101 | } | ||
| 2102 | |||
| 2103 | ac->ac_find_loc_priv = res; | ||
| 2104 | *fe_blkno = res->sr_blkno; | ||
| 2105 | |||
| 2106 | out: | ||
| 2107 | if (handle) | ||
| 2108 | ocfs2_commit_trans(OCFS2_SB(dir->i_sb), handle); | ||
| 2109 | |||
| 2110 | if (ret) | ||
| 2111 | kfree(res); | ||
| 2112 | |||
| 2113 | return ret; | ||
| 2114 | } | ||
| 2115 | |||
| 2116 | int ocfs2_claim_new_inode_at_loc(handle_t *handle, | ||
| 2117 | struct inode *dir, | ||
| 2118 | struct ocfs2_alloc_context *ac, | ||
| 2119 | u64 *suballoc_loc, | ||
| 2120 | u16 *suballoc_bit, | ||
| 2121 | u64 di_blkno) | ||
| 2122 | { | ||
| 2123 | int ret; | ||
| 2124 | u16 chain; | ||
| 2125 | struct ocfs2_suballoc_result *res = ac->ac_find_loc_priv; | ||
| 2126 | struct buffer_head *bg_bh = NULL; | ||
| 2127 | struct ocfs2_group_desc *bg; | ||
| 2128 | struct ocfs2_dinode *di = (struct ocfs2_dinode *) ac->ac_bh->b_data; | ||
| 2129 | |||
| 2130 | /* | ||
| 2131 | * Since di_blkno is being passed back in, we check for any | ||
| 2132 | * inconsistencies which may have happened between | ||
| 2133 | * calls. These are code bugs as di_blkno is not expected to | ||
| 2134 | * change once returned from ocfs2_find_new_inode_loc() | ||
| 2135 | */ | ||
| 2136 | BUG_ON(res->sr_blkno != di_blkno); | ||
| 2137 | |||
| 2138 | ret = ocfs2_read_group_descriptor(ac->ac_inode, di, | ||
| 2139 | res->sr_bg_stable_blkno, &bg_bh); | ||
| 2140 | if (ret) { | ||
| 2141 | mlog_errno(ret); | ||
| 2142 | goto out; | ||
| 2143 | } | ||
| 2144 | |||
| 2145 | bg = (struct ocfs2_group_desc *) bg_bh->b_data; | ||
| 2146 | chain = le16_to_cpu(bg->bg_chain); | ||
| 2147 | |||
| 2148 | ret = ocfs2_alloc_dinode_update_counts(ac->ac_inode, handle, | ||
| 2149 | ac->ac_bh, res->sr_bits, | ||
| 2150 | chain); | ||
| 2151 | if (ret) { | ||
| 2152 | mlog_errno(ret); | ||
| 2153 | goto out; | ||
| 2154 | } | ||
| 2155 | |||
| 2156 | ret = ocfs2_block_group_set_bits(handle, | ||
| 2157 | ac->ac_inode, | ||
| 2158 | bg, | ||
| 2159 | bg_bh, | ||
| 2160 | res->sr_bit_offset, | ||
| 2161 | res->sr_bits); | ||
| 2162 | if (ret < 0) { | ||
| 2163 | mlog_errno(ret); | ||
| 2164 | goto out; | ||
| 2165 | } | ||
| 2166 | |||
| 2167 | mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits, | ||
| 2168 | (unsigned long long)di_blkno); | ||
| 2169 | |||
| 2170 | atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); | ||
| 2171 | |||
| 2172 | BUG_ON(res->sr_bits != 1); | ||
| 2173 | |||
| 2174 | *suballoc_loc = res->sr_bg_blkno; | ||
| 2175 | *suballoc_bit = res->sr_bit_offset; | ||
| 2176 | ac->ac_bits_given++; | ||
| 2177 | ocfs2_save_inode_ac_group(dir, ac); | ||
| 2178 | |||
| 2179 | out: | ||
| 2180 | brelse(bg_bh); | ||
| 2181 | |||
| 2182 | return ret; | ||
| 2183 | } | ||
| 2184 | |||
| 2019 | int ocfs2_claim_new_inode(handle_t *handle, | 2185 | int ocfs2_claim_new_inode(handle_t *handle, |
| 2020 | struct inode *dir, | 2186 | struct inode *dir, |
| 2021 | struct buffer_head *parent_fe_bh, | 2187 | struct buffer_head *parent_fe_bh, |
| @@ -2567,7 +2733,8 @@ out: | |||
| 2567 | * suballoc_bit. | 2733 | * suballoc_bit. |
| 2568 | */ | 2734 | */ |
| 2569 | static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno, | 2735 | static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno, |
| 2570 | u16 *suballoc_slot, u16 *suballoc_bit) | 2736 | u16 *suballoc_slot, u64 *group_blkno, |
| 2737 | u16 *suballoc_bit) | ||
| 2571 | { | 2738 | { |
| 2572 | int status; | 2739 | int status; |
| 2573 | struct buffer_head *inode_bh = NULL; | 2740 | struct buffer_head *inode_bh = NULL; |
| @@ -2604,6 +2771,8 @@ static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno, | |||
| 2604 | *suballoc_slot = le16_to_cpu(inode_fe->i_suballoc_slot); | 2771 | *suballoc_slot = le16_to_cpu(inode_fe->i_suballoc_slot); |
| 2605 | if (suballoc_bit) | 2772 | if (suballoc_bit) |
| 2606 | *suballoc_bit = le16_to_cpu(inode_fe->i_suballoc_bit); | 2773 | *suballoc_bit = le16_to_cpu(inode_fe->i_suballoc_bit); |
| 2774 | if (group_blkno) | ||
| 2775 | *group_blkno = le64_to_cpu(inode_fe->i_suballoc_loc); | ||
| 2607 | 2776 | ||
| 2608 | bail: | 2777 | bail: |
| 2609 | brelse(inode_bh); | 2778 | brelse(inode_bh); |
| @@ -2621,7 +2790,8 @@ bail: | |||
| 2621 | */ | 2790 | */ |
| 2622 | static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb, | 2791 | static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb, |
| 2623 | struct inode *suballoc, | 2792 | struct inode *suballoc, |
| 2624 | struct buffer_head *alloc_bh, u64 blkno, | 2793 | struct buffer_head *alloc_bh, |
| 2794 | u64 group_blkno, u64 blkno, | ||
| 2625 | u16 bit, int *res) | 2795 | u16 bit, int *res) |
| 2626 | { | 2796 | { |
| 2627 | struct ocfs2_dinode *alloc_di; | 2797 | struct ocfs2_dinode *alloc_di; |
| @@ -2642,10 +2812,8 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb, | |||
| 2642 | goto bail; | 2812 | goto bail; |
| 2643 | } | 2813 | } |
| 2644 | 2814 | ||
| 2645 | if (alloc_di->i_suballoc_loc) | 2815 | bg_blkno = group_blkno ? group_blkno : |
| 2646 | bg_blkno = le64_to_cpu(alloc_di->i_suballoc_loc); | 2816 | ocfs2_which_suballoc_group(blkno, bit); |
| 2647 | else | ||
| 2648 | bg_blkno = ocfs2_which_suballoc_group(blkno, bit); | ||
| 2649 | status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno, | 2817 | status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno, |
| 2650 | &group_bh); | 2818 | &group_bh); |
| 2651 | if (status < 0) { | 2819 | if (status < 0) { |
| @@ -2680,6 +2848,7 @@ bail: | |||
| 2680 | int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) | 2848 | int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) |
| 2681 | { | 2849 | { |
| 2682 | int status; | 2850 | int status; |
| 2851 | u64 group_blkno = 0; | ||
| 2683 | u16 suballoc_bit = 0, suballoc_slot = 0; | 2852 | u16 suballoc_bit = 0, suballoc_slot = 0; |
| 2684 | struct inode *inode_alloc_inode; | 2853 | struct inode *inode_alloc_inode; |
| 2685 | struct buffer_head *alloc_bh = NULL; | 2854 | struct buffer_head *alloc_bh = NULL; |
| @@ -2687,7 +2856,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) | |||
| 2687 | mlog_entry("blkno: %llu", (unsigned long long)blkno); | 2856 | mlog_entry("blkno: %llu", (unsigned long long)blkno); |
| 2688 | 2857 | ||
| 2689 | status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot, | 2858 | status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot, |
| 2690 | &suballoc_bit); | 2859 | &group_blkno, &suballoc_bit); |
| 2691 | if (status < 0) { | 2860 | if (status < 0) { |
| 2692 | mlog(ML_ERROR, "get alloc slot and bit failed %d\n", status); | 2861 | mlog(ML_ERROR, "get alloc slot and bit failed %d\n", status); |
| 2693 | goto bail; | 2862 | goto bail; |
| @@ -2715,7 +2884,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) | |||
| 2715 | } | 2884 | } |
| 2716 | 2885 | ||
| 2717 | status = ocfs2_test_suballoc_bit(osb, inode_alloc_inode, alloc_bh, | 2886 | status = ocfs2_test_suballoc_bit(osb, inode_alloc_inode, alloc_bh, |
| 2718 | blkno, suballoc_bit, res); | 2887 | group_blkno, blkno, suballoc_bit, res); |
| 2719 | if (status < 0) | 2888 | if (status < 0) |
| 2720 | mlog(ML_ERROR, "test suballoc bit failed %d\n", status); | 2889 | mlog(ML_ERROR, "test suballoc bit failed %d\n", status); |
| 2721 | 2890 | ||
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index a017dd3ee7d9..b8afabfeede4 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h | |||
| @@ -56,6 +56,9 @@ struct ocfs2_alloc_context { | |||
| 56 | u64 ac_max_block; /* Highest block number to allocate. 0 is | 56 | u64 ac_max_block; /* Highest block number to allocate. 0 is |
| 57 | is the same as ~0 - unlimited */ | 57 | is the same as ~0 - unlimited */ |
| 58 | 58 | ||
| 59 | int ac_find_loc_only; /* hack for reflink operation ordering */ | ||
| 60 | struct ocfs2_suballoc_result *ac_find_loc_priv; /* */ | ||
| 61 | |||
| 59 | struct ocfs2_alloc_reservation *ac_resv; | 62 | struct ocfs2_alloc_reservation *ac_resv; |
| 60 | }; | 63 | }; |
| 61 | 64 | ||
| @@ -197,4 +200,22 @@ int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_extent_tree *et, | |||
| 197 | struct ocfs2_alloc_context **meta_ac); | 200 | struct ocfs2_alloc_context **meta_ac); |
| 198 | 201 | ||
| 199 | int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res); | 202 | int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res); |
| 203 | |||
| 204 | |||
| 205 | |||
| 206 | /* | ||
| 207 | * The following two interfaces are for ocfs2_create_inode_in_orphan(). | ||
| 208 | */ | ||
| 209 | int ocfs2_find_new_inode_loc(struct inode *dir, | ||
| 210 | struct buffer_head *parent_fe_bh, | ||
| 211 | struct ocfs2_alloc_context *ac, | ||
| 212 | u64 *fe_blkno); | ||
| 213 | |||
| 214 | int ocfs2_claim_new_inode_at_loc(handle_t *handle, | ||
| 215 | struct inode *dir, | ||
| 216 | struct ocfs2_alloc_context *ac, | ||
| 217 | u64 *suballoc_loc, | ||
| 218 | u16 *suballoc_bit, | ||
| 219 | u64 di_blkno); | ||
| 220 | |||
| 200 | #endif /* _CHAINALLOC_H_ */ | 221 | #endif /* _CHAINALLOC_H_ */ |
diff --git a/fs/proc/page.c b/fs/proc/page.c index 180cf5a0bd67..3b8b45660331 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c | |||
| @@ -146,7 +146,7 @@ u64 stable_page_flags(struct page *page) | |||
| 146 | u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); | 146 | u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); |
| 147 | #endif | 147 | #endif |
| 148 | 148 | ||
| 149 | #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR | 149 | #ifdef CONFIG_ARCH_USES_PG_UNCACHED |
| 150 | u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); | 150 | u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); |
| 151 | #endif | 151 | #endif |
| 152 | 152 | ||
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 439fc1f1c1c4..271afc48b9a5 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
| @@ -224,7 +224,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) | |||
| 224 | /* We don't show the stack guard page in /proc/maps */ | 224 | /* We don't show the stack guard page in /proc/maps */ |
| 225 | start = vma->vm_start; | 225 | start = vma->vm_start; |
| 226 | if (vma->vm_flags & VM_GROWSDOWN) | 226 | if (vma->vm_flags & VM_GROWSDOWN) |
| 227 | start += PAGE_SIZE; | 227 | if (!vma_stack_continue(vma->vm_prev, vma->vm_start)) |
| 228 | start += PAGE_SIZE; | ||
| 228 | 229 | ||
| 229 | seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", | 230 | seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", |
| 230 | start, | 231 | start, |
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 1b27b5688f62..da3fefe91a8f 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c | |||
| @@ -340,7 +340,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file) | |||
| 340 | char *p; | 340 | char *p; |
| 341 | 341 | ||
| 342 | p = d_path(&file->f_path, last_sysfs_file, sizeof(last_sysfs_file)); | 342 | p = d_path(&file->f_path, last_sysfs_file, sizeof(last_sysfs_file)); |
| 343 | if (p) | 343 | if (!IS_ERR(p)) |
| 344 | memmove(last_sysfs_file, p, strlen(p) + 1); | 344 | memmove(last_sysfs_file, p, strlen(p) + 1); |
| 345 | 345 | ||
| 346 | /* need attr_sd for attr and ops, its parent for kobj */ | 346 | /* need attr_sd for attr and ops, its parent for kobj */ |
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index ea79072f5210..286e36e21dae 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
| @@ -440,12 +440,7 @@ _xfs_buf_find( | |||
| 440 | ASSERT(btp == bp->b_target); | 440 | ASSERT(btp == bp->b_target); |
| 441 | if (bp->b_file_offset == range_base && | 441 | if (bp->b_file_offset == range_base && |
| 442 | bp->b_buffer_length == range_length) { | 442 | bp->b_buffer_length == range_length) { |
| 443 | /* | ||
| 444 | * If we look at something, bring it to the | ||
| 445 | * front of the list for next time. | ||
| 446 | */ | ||
| 447 | atomic_inc(&bp->b_hold); | 443 | atomic_inc(&bp->b_hold); |
| 448 | list_move(&bp->b_hash_list, &hash->bh_list); | ||
| 449 | goto found; | 444 | goto found; |
| 450 | } | 445 | } |
| 451 | } | 446 | } |
| @@ -1443,8 +1438,7 @@ xfs_alloc_bufhash( | |||
| 1443 | { | 1438 | { |
| 1444 | unsigned int i; | 1439 | unsigned int i; |
| 1445 | 1440 | ||
| 1446 | btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */ | 1441 | btp->bt_hashshift = external ? 3 : 12; /* 8 or 4096 buckets */ |
| 1447 | btp->bt_hashmask = (1 << btp->bt_hashshift) - 1; | ||
| 1448 | btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) * | 1442 | btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) * |
| 1449 | sizeof(xfs_bufhash_t)); | 1443 | sizeof(xfs_bufhash_t)); |
| 1450 | for (i = 0; i < (1 << btp->bt_hashshift); i++) { | 1444 | for (i = 0; i < (1 << btp->bt_hashshift); i++) { |
| @@ -1938,7 +1932,8 @@ xfs_buf_init(void) | |||
| 1938 | if (!xfs_buf_zone) | 1932 | if (!xfs_buf_zone) |
| 1939 | goto out; | 1933 | goto out; |
| 1940 | 1934 | ||
| 1941 | xfslogd_workqueue = create_workqueue("xfslogd"); | 1935 | xfslogd_workqueue = alloc_workqueue("xfslogd", |
| 1936 | WQ_RESCUER | WQ_HIGHPRI, 1); | ||
| 1942 | if (!xfslogd_workqueue) | 1937 | if (!xfslogd_workqueue) |
| 1943 | goto out_free_buf_zone; | 1938 | goto out_free_buf_zone; |
| 1944 | 1939 | ||
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index d072e5ff923b..2a05614f0b92 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h | |||
| @@ -137,7 +137,6 @@ typedef struct xfs_buftarg { | |||
| 137 | size_t bt_smask; | 137 | size_t bt_smask; |
| 138 | 138 | ||
| 139 | /* per device buffer hash table */ | 139 | /* per device buffer hash table */ |
| 140 | uint bt_hashmask; | ||
| 141 | uint bt_hashshift; | 140 | uint bt_hashshift; |
| 142 | xfs_bufhash_t *bt_hash; | 141 | xfs_bufhash_t *bt_hash; |
| 143 | 142 | ||
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 237f5ffb2ee8..3b9e626f7cd1 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c | |||
| @@ -785,6 +785,8 @@ xfs_ioc_fsgetxattr( | |||
| 785 | { | 785 | { |
| 786 | struct fsxattr fa; | 786 | struct fsxattr fa; |
| 787 | 787 | ||
| 788 | memset(&fa, 0, sizeof(struct fsxattr)); | ||
| 789 | |||
| 788 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 790 | xfs_ilock(ip, XFS_ILOCK_SHARED); |
| 789 | fa.fsx_xflags = xfs_ip2xflags(ip); | 791 | fa.fsx_xflags = xfs_ip2xflags(ip); |
| 790 | fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; | 792 | fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; |
| @@ -907,6 +909,13 @@ xfs_ioctl_setattr( | |||
| 907 | return XFS_ERROR(EIO); | 909 | return XFS_ERROR(EIO); |
| 908 | 910 | ||
| 909 | /* | 911 | /* |
| 912 | * Disallow 32bit project ids because on-disk structure | ||
| 913 | * is 16bit only. | ||
| 914 | */ | ||
| 915 | if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1)) | ||
| 916 | return XFS_ERROR(EINVAL); | ||
| 917 | |||
| 918 | /* | ||
| 910 | * If disk quotas is on, we make sure that the dquots do exist on disk, | 919 | * If disk quotas is on, we make sure that the dquots do exist on disk, |
| 911 | * before we start any other transactions. Trying to do this later | 920 | * before we start any other transactions. Trying to do this later |
| 912 | * is messy. We don't care to take a readlock to look at the ids | 921 | * is messy. We don't care to take a readlock to look at the ids |
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 68be25dcd301..b1fc2a6bfe83 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c | |||
| @@ -664,7 +664,7 @@ xfs_vn_fiemap( | |||
| 664 | fieinfo->fi_extents_max + 1; | 664 | fieinfo->fi_extents_max + 1; |
| 665 | bm.bmv_count = min_t(__s32, bm.bmv_count, | 665 | bm.bmv_count = min_t(__s32, bm.bmv_count, |
| 666 | (PAGE_SIZE * 16 / sizeof(struct getbmapx))); | 666 | (PAGE_SIZE * 16 / sizeof(struct getbmapx))); |
| 667 | bm.bmv_iflags = BMV_IF_PREALLOC; | 667 | bm.bmv_iflags = BMV_IF_PREALLOC | BMV_IF_NO_HOLES; |
| 668 | if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) | 668 | if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) |
| 669 | bm.bmv_iflags |= BMV_IF_ATTRFORK; | 669 | bm.bmv_iflags |= BMV_IF_ATTRFORK; |
| 670 | if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC)) | 670 | if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC)) |
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 23f14e595c18..f90dadd5a968 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
| @@ -5533,12 +5533,24 @@ xfs_getbmap( | |||
| 5533 | map[i].br_startblock)) | 5533 | map[i].br_startblock)) |
| 5534 | goto out_free_map; | 5534 | goto out_free_map; |
| 5535 | 5535 | ||
| 5536 | nexleft--; | ||
| 5537 | bmv->bmv_offset = | 5536 | bmv->bmv_offset = |
| 5538 | out[cur_ext].bmv_offset + | 5537 | out[cur_ext].bmv_offset + |
| 5539 | out[cur_ext].bmv_length; | 5538 | out[cur_ext].bmv_length; |
| 5540 | bmv->bmv_length = | 5539 | bmv->bmv_length = |
| 5541 | max_t(__int64_t, 0, bmvend - bmv->bmv_offset); | 5540 | max_t(__int64_t, 0, bmvend - bmv->bmv_offset); |
| 5541 | |||
| 5542 | /* | ||
| 5543 | * In case we don't want to return the hole, | ||
| 5544 | * don't increase cur_ext so that we can reuse | ||
| 5545 | * it in the next loop. | ||
| 5546 | */ | ||
| 5547 | if ((iflags & BMV_IF_NO_HOLES) && | ||
| 5548 | map[i].br_startblock == HOLESTARTBLOCK) { | ||
| 5549 | memset(&out[cur_ext], 0, sizeof(out[cur_ext])); | ||
| 5550 | continue; | ||
| 5551 | } | ||
| 5552 | |||
| 5553 | nexleft--; | ||
| 5542 | bmv->bmv_entries++; | 5554 | bmv->bmv_entries++; |
| 5543 | cur_ext++; | 5555 | cur_ext++; |
| 5544 | } | 5556 | } |
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h index 7cf7220e7d5f..87c2e9d02288 100644 --- a/fs/xfs/xfs_fs.h +++ b/fs/xfs/xfs_fs.h | |||
| @@ -114,8 +114,10 @@ struct getbmapx { | |||
| 114 | #define BMV_IF_NO_DMAPI_READ 0x2 /* Do not generate DMAPI read event */ | 114 | #define BMV_IF_NO_DMAPI_READ 0x2 /* Do not generate DMAPI read event */ |
| 115 | #define BMV_IF_PREALLOC 0x4 /* rtn status BMV_OF_PREALLOC if req */ | 115 | #define BMV_IF_PREALLOC 0x4 /* rtn status BMV_OF_PREALLOC if req */ |
| 116 | #define BMV_IF_DELALLOC 0x8 /* rtn status BMV_OF_DELALLOC if req */ | 116 | #define BMV_IF_DELALLOC 0x8 /* rtn status BMV_OF_DELALLOC if req */ |
| 117 | #define BMV_IF_NO_HOLES 0x10 /* Do not return holes */ | ||
| 117 | #define BMV_IF_VALID \ | 118 | #define BMV_IF_VALID \ |
| 118 | (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC|BMV_IF_DELALLOC) | 119 | (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC| \ |
| 120 | BMV_IF_DELALLOC|BMV_IF_NO_HOLES) | ||
| 119 | 121 | ||
| 120 | /* bmv_oflags values - returned for each non-header segment */ | 122 | /* bmv_oflags values - returned for each non-header segment */ |
| 121 | #define BMV_OF_PREALLOC 0x1 /* segment = unwritten pre-allocation */ | 123 | #define BMV_OF_PREALLOC 0x1 /* segment = unwritten pre-allocation */ |
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 66d585c6917c..4c7c7bfb2b2f 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
| @@ -2299,15 +2299,22 @@ xfs_alloc_file_space( | |||
| 2299 | e = allocatesize_fsb; | 2299 | e = allocatesize_fsb; |
| 2300 | } | 2300 | } |
| 2301 | 2301 | ||
| 2302 | /* | ||
| 2303 | * The transaction reservation is limited to a 32-bit block | ||
| 2304 | * count, hence we need to limit the number of blocks we are | ||
| 2305 | * trying to reserve to avoid an overflow. We can't allocate | ||
| 2306 | * more than @nimaps extents, and an extent is limited on disk | ||
| 2307 | * to MAXEXTLEN (21 bits), so use that to enforce the limit. | ||
| 2308 | */ | ||
| 2309 | resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps)); | ||
| 2302 | if (unlikely(rt)) { | 2310 | if (unlikely(rt)) { |
| 2303 | resrtextents = qblocks = (uint)(e - s); | 2311 | resrtextents = qblocks = resblks; |
| 2304 | resrtextents /= mp->m_sb.sb_rextsize; | 2312 | resrtextents /= mp->m_sb.sb_rextsize; |
| 2305 | resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); | 2313 | resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); |
| 2306 | quota_flag = XFS_QMOPT_RES_RTBLKS; | 2314 | quota_flag = XFS_QMOPT_RES_RTBLKS; |
| 2307 | } else { | 2315 | } else { |
| 2308 | resrtextents = 0; | 2316 | resrtextents = 0; |
| 2309 | resblks = qblocks = \ | 2317 | resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks); |
| 2310 | XFS_DIOSTRAT_SPACE_RES(mp, (uint)(e - s)); | ||
| 2311 | quota_flag = XFS_QMOPT_RES_REGBLKS; | 2318 | quota_flag = XFS_QMOPT_RES_REGBLKS; |
| 2312 | } | 2319 | } |
| 2313 | 2320 | ||
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index baacd98e7cc6..4de84ce3a927 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h | |||
| @@ -377,9 +377,6 @@ struct acpi_pci_root { | |||
| 377 | 377 | ||
| 378 | u32 osc_support_set; /* _OSC state of support bits */ | 378 | u32 osc_support_set; /* _OSC state of support bits */ |
| 379 | u32 osc_control_set; /* _OSC state of control bits */ | 379 | u32 osc_control_set; /* _OSC state of control bits */ |
| 380 | u32 osc_control_qry; /* the latest _OSC query result */ | ||
| 381 | |||
| 382 | u32 osc_queried:1; /* has _OSC control been queried? */ | ||
| 383 | }; | 380 | }; |
| 384 | 381 | ||
| 385 | /* helper */ | 382 | /* helper */ |
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index c7376bf80b06..8ca18e26d7e3 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h | |||
| @@ -16,15 +16,27 @@ | |||
| 16 | * While the GPIO programming interface defines valid GPIO numbers | 16 | * While the GPIO programming interface defines valid GPIO numbers |
| 17 | * to be in the range 0..MAX_INT, this library restricts them to the | 17 | * to be in the range 0..MAX_INT, this library restricts them to the |
| 18 | * smaller range 0..ARCH_NR_GPIOS-1. | 18 | * smaller range 0..ARCH_NR_GPIOS-1. |
| 19 | * | ||
| 20 | * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of | ||
| 21 | * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is | ||
| 22 | * actually an estimate of a board-specific value. | ||
| 19 | */ | 23 | */ |
| 20 | 24 | ||
| 21 | #ifndef ARCH_NR_GPIOS | 25 | #ifndef ARCH_NR_GPIOS |
| 22 | #define ARCH_NR_GPIOS 256 | 26 | #define ARCH_NR_GPIOS 256 |
| 23 | #endif | 27 | #endif |
| 24 | 28 | ||
| 29 | /* | ||
| 30 | * "valid" GPIO numbers are nonnegative and may be passed to | ||
| 31 | * setup routines like gpio_request(). only some valid numbers | ||
| 32 | * can successfully be requested and used. | ||
| 33 | * | ||
| 34 | * Invalid GPIO numbers are useful for indicating no-such-GPIO in | ||
| 35 | * platform data and other tables. | ||
| 36 | */ | ||
| 37 | |||
| 25 | static inline int gpio_is_valid(int number) | 38 | static inline int gpio_is_valid(int number) |
| 26 | { | 39 | { |
| 27 | /* only some non-negative numbers are valid */ | ||
| 28 | return ((unsigned)number) < ARCH_NR_GPIOS; | 40 | return ((unsigned)number) < ARCH_NR_GPIOS; |
| 29 | } | 41 | } |
| 30 | 42 | ||
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index b5043a9890d8..08923b684768 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h | |||
| @@ -70,11 +70,16 @@ extern void setup_per_cpu_areas(void); | |||
| 70 | 70 | ||
| 71 | #else /* ! SMP */ | 71 | #else /* ! SMP */ |
| 72 | 72 | ||
| 73 | #define per_cpu(var, cpu) (*((void)(cpu), &(var))) | 73 | #define VERIFY_PERCPU_PTR(__p) ({ \ |
| 74 | #define __get_cpu_var(var) (var) | 74 | __verify_pcpu_ptr((__p)); \ |
| 75 | #define __raw_get_cpu_var(var) (var) | 75 | (typeof(*(__p)) __kernel __force *)(__p); \ |
| 76 | #define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) | 76 | }) |
| 77 | #define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) | 77 | |
| 78 | #define per_cpu(var, cpu) (*((void)(cpu), VERIFY_PERCPU_PTR(&(var)))) | ||
| 79 | #define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) | ||
| 80 | #define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) | ||
| 81 | #define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) | ||
| 82 | #define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) | ||
| 78 | 83 | ||
| 79 | #endif /* SMP */ | 84 | #endif /* SMP */ |
| 80 | 85 | ||
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index ccf94dc5acdf..c227757feb06 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
| @@ -304,8 +304,8 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); | |||
| 304 | OSC_PCI_EXPRESS_PME_CONTROL | \ | 304 | OSC_PCI_EXPRESS_PME_CONTROL | \ |
| 305 | OSC_PCI_EXPRESS_AER_CONTROL | \ | 305 | OSC_PCI_EXPRESS_AER_CONTROL | \ |
| 306 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL) | 306 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL) |
| 307 | 307 | extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, | |
| 308 | extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags); | 308 | u32 *mask, u32 req); |
| 309 | extern void acpi_early_init(void); | 309 | extern void acpi_early_init(void); |
| 310 | 310 | ||
| 311 | #else /* !CONFIG_ACPI */ | 311 | #else /* !CONFIG_ACPI */ |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index ed3e92e41c6e..0c991023ee47 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
| @@ -578,7 +578,12 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp, | |||
| 578 | void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); | 578 | void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); |
| 579 | int cgroup_scan_tasks(struct cgroup_scanner *scan); | 579 | int cgroup_scan_tasks(struct cgroup_scanner *scan); |
| 580 | int cgroup_attach_task(struct cgroup *, struct task_struct *); | 580 | int cgroup_attach_task(struct cgroup *, struct task_struct *); |
| 581 | int cgroup_attach_task_current_cg(struct task_struct *); | 581 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); |
| 582 | |||
| 583 | static inline int cgroup_attach_task_current_cg(struct task_struct *tsk) | ||
| 584 | { | ||
| 585 | return cgroup_attach_task_all(current, tsk); | ||
| 586 | } | ||
| 582 | 587 | ||
| 583 | /* | 588 | /* |
| 584 | * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works | 589 | * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works |
| @@ -636,6 +641,11 @@ static inline int cgroupstats_build(struct cgroupstats *stats, | |||
| 636 | } | 641 | } |
| 637 | 642 | ||
| 638 | /* No cgroups - nothing to do */ | 643 | /* No cgroups - nothing to do */ |
| 644 | static inline int cgroup_attach_task_all(struct task_struct *from, | ||
| 645 | struct task_struct *t) | ||
| 646 | { | ||
| 647 | return 0; | ||
| 648 | } | ||
| 639 | static inline int cgroup_attach_task_current_cg(struct task_struct *t) | 649 | static inline int cgroup_attach_task_current_cg(struct task_struct *t) |
| 640 | { | 650 | { |
| 641 | return 0; | 651 | return 0; |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 2c958f4fce1e..926b50322a46 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
| @@ -136,6 +136,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); | |||
| 136 | 136 | ||
| 137 | extern int elevator_init(struct request_queue *, char *); | 137 | extern int elevator_init(struct request_queue *, char *); |
| 138 | extern void elevator_exit(struct elevator_queue *); | 138 | extern void elevator_exit(struct elevator_queue *); |
| 139 | extern int elevator_change(struct request_queue *, const char *); | ||
| 139 | extern int elv_rq_merge_ok(struct request *, struct bio *); | 140 | extern int elv_rq_merge_ok(struct request *, struct bio *); |
| 140 | 141 | ||
| 141 | /* | 142 | /* |
diff --git a/include/linux/i2c/sx150x.h b/include/linux/i2c/sx150x.h index ee3049cb9ba5..52baa79d69a7 100644 --- a/include/linux/i2c/sx150x.h +++ b/include/linux/i2c/sx150x.h | |||
| @@ -63,6 +63,9 @@ | |||
| 63 | * IRQ lines will appear. Similarly to gpio_base, the expander | 63 | * IRQ lines will appear. Similarly to gpio_base, the expander |
| 64 | * will create a block of irqs beginning at this number. | 64 | * will create a block of irqs beginning at this number. |
| 65 | * This value is ignored if irq_summary is < 0. | 65 | * This value is ignored if irq_summary is < 0. |
| 66 | * @reset_during_probe: If set to true, the driver will trigger a full | ||
| 67 | * reset of the chip at the beginning of the probe | ||
| 68 | * in order to place it in a known state. | ||
| 66 | */ | 69 | */ |
| 67 | struct sx150x_platform_data { | 70 | struct sx150x_platform_data { |
| 68 | unsigned gpio_base; | 71 | unsigned gpio_base; |
| @@ -73,6 +76,7 @@ struct sx150x_platform_data { | |||
| 73 | u16 io_polarity; | 76 | u16 io_polarity; |
| 74 | int irq_summary; | 77 | int irq_summary; |
| 75 | unsigned irq_base; | 78 | unsigned irq_base; |
| 79 | bool reset_during_probe; | ||
| 76 | }; | 80 | }; |
| 77 | 81 | ||
| 78 | #endif /* __LINUX_I2C_SX150X_H */ | 82 | #endif /* __LINUX_I2C_SX150X_H */ |
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 0a6b3d5c490c..7fb592793738 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h | |||
| @@ -79,7 +79,7 @@ io_mapping_free(struct io_mapping *mapping) | |||
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | /* Atomic map/unmap */ | 81 | /* Atomic map/unmap */ |
| 82 | static inline void * | 82 | static inline void __iomem * |
| 83 | io_mapping_map_atomic_wc(struct io_mapping *mapping, | 83 | io_mapping_map_atomic_wc(struct io_mapping *mapping, |
| 84 | unsigned long offset, | 84 | unsigned long offset, |
| 85 | int slot) | 85 | int slot) |
| @@ -94,12 +94,12 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping, | |||
| 94 | } | 94 | } |
| 95 | 95 | ||
| 96 | static inline void | 96 | static inline void |
| 97 | io_mapping_unmap_atomic(void *vaddr, int slot) | 97 | io_mapping_unmap_atomic(void __iomem *vaddr, int slot) |
| 98 | { | 98 | { |
| 99 | iounmap_atomic(vaddr, slot); | 99 | iounmap_atomic(vaddr, slot); |
| 100 | } | 100 | } |
| 101 | 101 | ||
| 102 | static inline void * | 102 | static inline void __iomem * |
| 103 | io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) | 103 | io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) |
| 104 | { | 104 | { |
| 105 | resource_size_t phys_addr; | 105 | resource_size_t phys_addr; |
| @@ -111,7 +111,7 @@ io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) | |||
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | static inline void | 113 | static inline void |
| 114 | io_mapping_unmap(void *vaddr) | 114 | io_mapping_unmap(void __iomem *vaddr) |
| 115 | { | 115 | { |
| 116 | iounmap(vaddr); | 116 | iounmap(vaddr); |
| 117 | } | 117 | } |
| @@ -125,38 +125,38 @@ struct io_mapping; | |||
| 125 | static inline struct io_mapping * | 125 | static inline struct io_mapping * |
| 126 | io_mapping_create_wc(resource_size_t base, unsigned long size) | 126 | io_mapping_create_wc(resource_size_t base, unsigned long size) |
| 127 | { | 127 | { |
| 128 | return (struct io_mapping *) ioremap_wc(base, size); | 128 | return (struct io_mapping __force *) ioremap_wc(base, size); |
| 129 | } | 129 | } |
| 130 | 130 | ||
| 131 | static inline void | 131 | static inline void |
| 132 | io_mapping_free(struct io_mapping *mapping) | 132 | io_mapping_free(struct io_mapping *mapping) |
| 133 | { | 133 | { |
| 134 | iounmap(mapping); | 134 | iounmap((void __force __iomem *) mapping); |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | /* Atomic map/unmap */ | 137 | /* Atomic map/unmap */ |
| 138 | static inline void * | 138 | static inline void __iomem * |
| 139 | io_mapping_map_atomic_wc(struct io_mapping *mapping, | 139 | io_mapping_map_atomic_wc(struct io_mapping *mapping, |
| 140 | unsigned long offset, | 140 | unsigned long offset, |
| 141 | int slot) | 141 | int slot) |
| 142 | { | 142 | { |
| 143 | return ((char *) mapping) + offset; | 143 | return ((char __force __iomem *) mapping) + offset; |
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | static inline void | 146 | static inline void |
| 147 | io_mapping_unmap_atomic(void *vaddr, int slot) | 147 | io_mapping_unmap_atomic(void __iomem *vaddr, int slot) |
| 148 | { | 148 | { |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | /* Non-atomic map/unmap */ | 151 | /* Non-atomic map/unmap */ |
| 152 | static inline void * | 152 | static inline void __iomem * |
| 153 | io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) | 153 | io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) |
| 154 | { | 154 | { |
| 155 | return ((char *) mapping) + offset; | 155 | return ((char __force __iomem *) mapping) + offset; |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | static inline void | 158 | static inline void |
| 159 | io_mapping_unmap(void *vaddr) | 159 | io_mapping_unmap(void __iomem *vaddr) |
| 160 | { | 160 | { |
| 161 | } | 161 | } |
| 162 | 162 | ||
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 4aa95f203f3e..62dbee554f60 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h | |||
| @@ -214,7 +214,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
| 214 | */ | 214 | */ |
| 215 | #define kfifo_reset(fifo) \ | 215 | #define kfifo_reset(fifo) \ |
| 216 | (void)({ \ | 216 | (void)({ \ |
| 217 | typeof(fifo + 1) __tmp = (fifo); \ | 217 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 218 | __tmp->kfifo.in = __tmp->kfifo.out = 0; \ | 218 | __tmp->kfifo.in = __tmp->kfifo.out = 0; \ |
| 219 | }) | 219 | }) |
| 220 | 220 | ||
| @@ -228,7 +228,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
| 228 | */ | 228 | */ |
| 229 | #define kfifo_reset_out(fifo) \ | 229 | #define kfifo_reset_out(fifo) \ |
| 230 | (void)({ \ | 230 | (void)({ \ |
| 231 | typeof(fifo + 1) __tmp = (fifo); \ | 231 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 232 | __tmp->kfifo.out = __tmp->kfifo.in; \ | 232 | __tmp->kfifo.out = __tmp->kfifo.in; \ |
| 233 | }) | 233 | }) |
| 234 | 234 | ||
| @@ -238,7 +238,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
| 238 | */ | 238 | */ |
| 239 | #define kfifo_len(fifo) \ | 239 | #define kfifo_len(fifo) \ |
| 240 | ({ \ | 240 | ({ \ |
| 241 | typeof(fifo + 1) __tmpl = (fifo); \ | 241 | typeof((fifo) + 1) __tmpl = (fifo); \ |
| 242 | __tmpl->kfifo.in - __tmpl->kfifo.out; \ | 242 | __tmpl->kfifo.in - __tmpl->kfifo.out; \ |
| 243 | }) | 243 | }) |
| 244 | 244 | ||
| @@ -248,7 +248,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
| 248 | */ | 248 | */ |
| 249 | #define kfifo_is_empty(fifo) \ | 249 | #define kfifo_is_empty(fifo) \ |
| 250 | ({ \ | 250 | ({ \ |
| 251 | typeof(fifo + 1) __tmpq = (fifo); \ | 251 | typeof((fifo) + 1) __tmpq = (fifo); \ |
| 252 | __tmpq->kfifo.in == __tmpq->kfifo.out; \ | 252 | __tmpq->kfifo.in == __tmpq->kfifo.out; \ |
| 253 | }) | 253 | }) |
| 254 | 254 | ||
| @@ -258,7 +258,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
| 258 | */ | 258 | */ |
| 259 | #define kfifo_is_full(fifo) \ | 259 | #define kfifo_is_full(fifo) \ |
| 260 | ({ \ | 260 | ({ \ |
| 261 | typeof(fifo + 1) __tmpq = (fifo); \ | 261 | typeof((fifo) + 1) __tmpq = (fifo); \ |
| 262 | kfifo_len(__tmpq) > __tmpq->kfifo.mask; \ | 262 | kfifo_len(__tmpq) > __tmpq->kfifo.mask; \ |
| 263 | }) | 263 | }) |
| 264 | 264 | ||
| @@ -269,7 +269,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
| 269 | #define kfifo_avail(fifo) \ | 269 | #define kfifo_avail(fifo) \ |
| 270 | __kfifo_must_check_helper( \ | 270 | __kfifo_must_check_helper( \ |
| 271 | ({ \ | 271 | ({ \ |
| 272 | typeof(fifo + 1) __tmpq = (fifo); \ | 272 | typeof((fifo) + 1) __tmpq = (fifo); \ |
| 273 | const size_t __recsize = sizeof(*__tmpq->rectype); \ | 273 | const size_t __recsize = sizeof(*__tmpq->rectype); \ |
| 274 | unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \ | 274 | unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \ |
| 275 | (__recsize) ? ((__avail <= __recsize) ? 0 : \ | 275 | (__recsize) ? ((__avail <= __recsize) ? 0 : \ |
| @@ -284,7 +284,7 @@ __kfifo_must_check_helper( \ | |||
| 284 | */ | 284 | */ |
| 285 | #define kfifo_skip(fifo) \ | 285 | #define kfifo_skip(fifo) \ |
| 286 | (void)({ \ | 286 | (void)({ \ |
| 287 | typeof(fifo + 1) __tmp = (fifo); \ | 287 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 288 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 288 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 289 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 289 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| 290 | if (__recsize) \ | 290 | if (__recsize) \ |
| @@ -302,7 +302,7 @@ __kfifo_must_check_helper( \ | |||
| 302 | #define kfifo_peek_len(fifo) \ | 302 | #define kfifo_peek_len(fifo) \ |
| 303 | __kfifo_must_check_helper( \ | 303 | __kfifo_must_check_helper( \ |
| 304 | ({ \ | 304 | ({ \ |
| 305 | typeof(fifo + 1) __tmp = (fifo); \ | 305 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 306 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 306 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 307 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 307 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| 308 | (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \ | 308 | (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \ |
| @@ -325,7 +325,7 @@ __kfifo_must_check_helper( \ | |||
| 325 | #define kfifo_alloc(fifo, size, gfp_mask) \ | 325 | #define kfifo_alloc(fifo, size, gfp_mask) \ |
| 326 | __kfifo_must_check_helper( \ | 326 | __kfifo_must_check_helper( \ |
| 327 | ({ \ | 327 | ({ \ |
| 328 | typeof(fifo + 1) __tmp = (fifo); \ | 328 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 329 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 329 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| 330 | __is_kfifo_ptr(__tmp) ? \ | 330 | __is_kfifo_ptr(__tmp) ? \ |
| 331 | __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \ | 331 | __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \ |
| @@ -339,7 +339,7 @@ __kfifo_must_check_helper( \ | |||
| 339 | */ | 339 | */ |
| 340 | #define kfifo_free(fifo) \ | 340 | #define kfifo_free(fifo) \ |
| 341 | ({ \ | 341 | ({ \ |
| 342 | typeof(fifo + 1) __tmp = (fifo); \ | 342 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 343 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 343 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| 344 | if (__is_kfifo_ptr(__tmp)) \ | 344 | if (__is_kfifo_ptr(__tmp)) \ |
| 345 | __kfifo_free(__kfifo); \ | 345 | __kfifo_free(__kfifo); \ |
| @@ -358,7 +358,7 @@ __kfifo_must_check_helper( \ | |||
| 358 | */ | 358 | */ |
| 359 | #define kfifo_init(fifo, buffer, size) \ | 359 | #define kfifo_init(fifo, buffer, size) \ |
| 360 | ({ \ | 360 | ({ \ |
| 361 | typeof(fifo + 1) __tmp = (fifo); \ | 361 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 362 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 362 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| 363 | __is_kfifo_ptr(__tmp) ? \ | 363 | __is_kfifo_ptr(__tmp) ? \ |
| 364 | __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \ | 364 | __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \ |
| @@ -379,8 +379,8 @@ __kfifo_must_check_helper( \ | |||
| 379 | */ | 379 | */ |
| 380 | #define kfifo_put(fifo, val) \ | 380 | #define kfifo_put(fifo, val) \ |
| 381 | ({ \ | 381 | ({ \ |
| 382 | typeof(fifo + 1) __tmp = (fifo); \ | 382 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 383 | typeof(val + 1) __val = (val); \ | 383 | typeof((val) + 1) __val = (val); \ |
| 384 | unsigned int __ret; \ | 384 | unsigned int __ret; \ |
| 385 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 385 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 386 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 386 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| @@ -421,8 +421,8 @@ __kfifo_must_check_helper( \ | |||
| 421 | #define kfifo_get(fifo, val) \ | 421 | #define kfifo_get(fifo, val) \ |
| 422 | __kfifo_must_check_helper( \ | 422 | __kfifo_must_check_helper( \ |
| 423 | ({ \ | 423 | ({ \ |
| 424 | typeof(fifo + 1) __tmp = (fifo); \ | 424 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 425 | typeof(val + 1) __val = (val); \ | 425 | typeof((val) + 1) __val = (val); \ |
| 426 | unsigned int __ret; \ | 426 | unsigned int __ret; \ |
| 427 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 427 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 428 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 428 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| @@ -462,8 +462,8 @@ __kfifo_must_check_helper( \ | |||
| 462 | #define kfifo_peek(fifo, val) \ | 462 | #define kfifo_peek(fifo, val) \ |
| 463 | __kfifo_must_check_helper( \ | 463 | __kfifo_must_check_helper( \ |
| 464 | ({ \ | 464 | ({ \ |
| 465 | typeof(fifo + 1) __tmp = (fifo); \ | 465 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 466 | typeof(val + 1) __val = (val); \ | 466 | typeof((val) + 1) __val = (val); \ |
| 467 | unsigned int __ret; \ | 467 | unsigned int __ret; \ |
| 468 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 468 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 469 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 469 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| @@ -501,8 +501,8 @@ __kfifo_must_check_helper( \ | |||
| 501 | */ | 501 | */ |
| 502 | #define kfifo_in(fifo, buf, n) \ | 502 | #define kfifo_in(fifo, buf, n) \ |
| 503 | ({ \ | 503 | ({ \ |
| 504 | typeof(fifo + 1) __tmp = (fifo); \ | 504 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 505 | typeof(buf + 1) __buf = (buf); \ | 505 | typeof((buf) + 1) __buf = (buf); \ |
| 506 | unsigned long __n = (n); \ | 506 | unsigned long __n = (n); \ |
| 507 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 507 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 508 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 508 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| @@ -554,8 +554,8 @@ __kfifo_must_check_helper( \ | |||
| 554 | #define kfifo_out(fifo, buf, n) \ | 554 | #define kfifo_out(fifo, buf, n) \ |
| 555 | __kfifo_must_check_helper( \ | 555 | __kfifo_must_check_helper( \ |
| 556 | ({ \ | 556 | ({ \ |
| 557 | typeof(fifo + 1) __tmp = (fifo); \ | 557 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 558 | typeof(buf + 1) __buf = (buf); \ | 558 | typeof((buf) + 1) __buf = (buf); \ |
| 559 | unsigned long __n = (n); \ | 559 | unsigned long __n = (n); \ |
| 560 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 560 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 561 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 561 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| @@ -611,7 +611,7 @@ __kfifo_must_check_helper( \ | |||
| 611 | #define kfifo_from_user(fifo, from, len, copied) \ | 611 | #define kfifo_from_user(fifo, from, len, copied) \ |
| 612 | __kfifo_must_check_helper( \ | 612 | __kfifo_must_check_helper( \ |
| 613 | ({ \ | 613 | ({ \ |
| 614 | typeof(fifo + 1) __tmp = (fifo); \ | 614 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 615 | const void __user *__from = (from); \ | 615 | const void __user *__from = (from); \ |
| 616 | unsigned int __len = (len); \ | 616 | unsigned int __len = (len); \ |
| 617 | unsigned int *__copied = (copied); \ | 617 | unsigned int *__copied = (copied); \ |
| @@ -639,7 +639,7 @@ __kfifo_must_check_helper( \ | |||
| 639 | #define kfifo_to_user(fifo, to, len, copied) \ | 639 | #define kfifo_to_user(fifo, to, len, copied) \ |
| 640 | __kfifo_must_check_helper( \ | 640 | __kfifo_must_check_helper( \ |
| 641 | ({ \ | 641 | ({ \ |
| 642 | typeof(fifo + 1) __tmp = (fifo); \ | 642 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 643 | void __user *__to = (to); \ | 643 | void __user *__to = (to); \ |
| 644 | unsigned int __len = (len); \ | 644 | unsigned int __len = (len); \ |
| 645 | unsigned int *__copied = (copied); \ | 645 | unsigned int *__copied = (copied); \ |
| @@ -666,7 +666,7 @@ __kfifo_must_check_helper( \ | |||
| 666 | */ | 666 | */ |
| 667 | #define kfifo_dma_in_prepare(fifo, sgl, nents, len) \ | 667 | #define kfifo_dma_in_prepare(fifo, sgl, nents, len) \ |
| 668 | ({ \ | 668 | ({ \ |
| 669 | typeof(fifo + 1) __tmp = (fifo); \ | 669 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 670 | struct scatterlist *__sgl = (sgl); \ | 670 | struct scatterlist *__sgl = (sgl); \ |
| 671 | int __nents = (nents); \ | 671 | int __nents = (nents); \ |
| 672 | unsigned int __len = (len); \ | 672 | unsigned int __len = (len); \ |
| @@ -690,7 +690,7 @@ __kfifo_must_check_helper( \ | |||
| 690 | */ | 690 | */ |
| 691 | #define kfifo_dma_in_finish(fifo, len) \ | 691 | #define kfifo_dma_in_finish(fifo, len) \ |
| 692 | (void)({ \ | 692 | (void)({ \ |
| 693 | typeof(fifo + 1) __tmp = (fifo); \ | 693 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 694 | unsigned int __len = (len); \ | 694 | unsigned int __len = (len); \ |
| 695 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 695 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 696 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 696 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| @@ -717,7 +717,7 @@ __kfifo_must_check_helper( \ | |||
| 717 | */ | 717 | */ |
| 718 | #define kfifo_dma_out_prepare(fifo, sgl, nents, len) \ | 718 | #define kfifo_dma_out_prepare(fifo, sgl, nents, len) \ |
| 719 | ({ \ | 719 | ({ \ |
| 720 | typeof(fifo + 1) __tmp = (fifo); \ | 720 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 721 | struct scatterlist *__sgl = (sgl); \ | 721 | struct scatterlist *__sgl = (sgl); \ |
| 722 | int __nents = (nents); \ | 722 | int __nents = (nents); \ |
| 723 | unsigned int __len = (len); \ | 723 | unsigned int __len = (len); \ |
| @@ -741,7 +741,7 @@ __kfifo_must_check_helper( \ | |||
| 741 | */ | 741 | */ |
| 742 | #define kfifo_dma_out_finish(fifo, len) \ | 742 | #define kfifo_dma_out_finish(fifo, len) \ |
| 743 | (void)({ \ | 743 | (void)({ \ |
| 744 | typeof(fifo + 1) __tmp = (fifo); \ | 744 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 745 | unsigned int __len = (len); \ | 745 | unsigned int __len = (len); \ |
| 746 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 746 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 747 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 747 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| @@ -766,8 +766,8 @@ __kfifo_must_check_helper( \ | |||
| 766 | #define kfifo_out_peek(fifo, buf, n) \ | 766 | #define kfifo_out_peek(fifo, buf, n) \ |
| 767 | __kfifo_must_check_helper( \ | 767 | __kfifo_must_check_helper( \ |
| 768 | ({ \ | 768 | ({ \ |
| 769 | typeof(fifo + 1) __tmp = (fifo); \ | 769 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 770 | typeof(buf + 1) __buf = (buf); \ | 770 | typeof((buf) + 1) __buf = (buf); \ |
| 771 | unsigned long __n = (n); \ | 771 | unsigned long __n = (n); \ |
| 772 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 772 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 773 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 773 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 74d691ee9121..3319a6967626 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h | |||
| @@ -16,6 +16,9 @@ | |||
| 16 | struct stable_node; | 16 | struct stable_node; |
| 17 | struct mem_cgroup; | 17 | struct mem_cgroup; |
| 18 | 18 | ||
| 19 | struct page *ksm_does_need_to_copy(struct page *page, | ||
| 20 | struct vm_area_struct *vma, unsigned long address); | ||
| 21 | |||
| 19 | #ifdef CONFIG_KSM | 22 | #ifdef CONFIG_KSM |
| 20 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | 23 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
| 21 | unsigned long end, int advice, unsigned long *vm_flags); | 24 | unsigned long end, int advice, unsigned long *vm_flags); |
| @@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page, | |||
| 70 | * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, | 73 | * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, |
| 71 | * but what if the vma was unmerged while the page was swapped out? | 74 | * but what if the vma was unmerged while the page was swapped out? |
| 72 | */ | 75 | */ |
| 73 | struct page *ksm_does_need_to_copy(struct page *page, | 76 | static inline int ksm_might_need_to_copy(struct page *page, |
| 74 | struct vm_area_struct *vma, unsigned long address); | ||
| 75 | static inline struct page *ksm_might_need_to_copy(struct page *page, | ||
| 76 | struct vm_area_struct *vma, unsigned long address) | 77 | struct vm_area_struct *vma, unsigned long address) |
| 77 | { | 78 | { |
| 78 | struct anon_vma *anon_vma = page_anon_vma(page); | 79 | struct anon_vma *anon_vma = page_anon_vma(page); |
| 79 | 80 | ||
| 80 | if (!anon_vma || | 81 | return anon_vma && |
| 81 | (anon_vma->root == vma->anon_vma->root && | 82 | (anon_vma->root != vma->anon_vma->root || |
| 82 | page->index == linear_page_index(vma, address))) | 83 | page->index != linear_page_index(vma, address)); |
| 83 | return page; | ||
| 84 | |||
| 85 | return ksm_does_need_to_copy(page, vma, address); | ||
| 86 | } | 84 | } |
| 87 | 85 | ||
| 88 | int page_referenced_ksm(struct page *page, | 86 | int page_referenced_ksm(struct page *page, |
| @@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | |||
| 115 | return 0; | 113 | return 0; |
| 116 | } | 114 | } |
| 117 | 115 | ||
| 118 | static inline struct page *ksm_might_need_to_copy(struct page *page, | 116 | static inline int ksm_might_need_to_copy(struct page *page, |
| 119 | struct vm_area_struct *vma, unsigned long address) | 117 | struct vm_area_struct *vma, unsigned long address) |
| 120 | { | 118 | { |
| 121 | return page; | 119 | return 0; |
| 122 | } | 120 | } |
| 123 | 121 | ||
| 124 | static inline int page_referenced_ksm(struct page *page, | 122 | static inline int page_referenced_ksm(struct page *page, |
diff --git a/include/linux/lglock.h b/include/linux/lglock.h index b288cb713b90..f549056fb20b 100644 --- a/include/linux/lglock.h +++ b/include/linux/lglock.h | |||
| @@ -150,7 +150,7 @@ | |||
| 150 | int i; \ | 150 | int i; \ |
| 151 | preempt_disable(); \ | 151 | preempt_disable(); \ |
| 152 | rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ | 152 | rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ |
| 153 | for_each_online_cpu(i) { \ | 153 | for_each_possible_cpu(i) { \ |
| 154 | arch_spinlock_t *lock; \ | 154 | arch_spinlock_t *lock; \ |
| 155 | lock = &per_cpu(name##_lock, i); \ | 155 | lock = &per_cpu(name##_lock, i); \ |
| 156 | arch_spin_lock(lock); \ | 156 | arch_spin_lock(lock); \ |
| @@ -161,7 +161,7 @@ | |||
| 161 | void name##_global_unlock(void) { \ | 161 | void name##_global_unlock(void) { \ |
| 162 | int i; \ | 162 | int i; \ |
| 163 | rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ | 163 | rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ |
| 164 | for_each_online_cpu(i) { \ | 164 | for_each_possible_cpu(i) { \ |
| 165 | arch_spinlock_t *lock; \ | 165 | arch_spinlock_t *lock; \ |
| 166 | lock = &per_cpu(name##_lock, i); \ | 166 | lock = &per_cpu(name##_lock, i); \ |
| 167 | arch_spin_unlock(lock); \ | 167 | arch_spin_unlock(lock); \ |
diff --git a/include/linux/libata.h b/include/linux/libata.h index f010f18a0f86..45fb2967b66d 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -335,6 +335,7 @@ enum { | |||
| 335 | ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ | 335 | ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ |
| 336 | ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ | 336 | ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ |
| 337 | ATA_EHI_QUIET = (1 << 3), /* be quiet */ | 337 | ATA_EHI_QUIET = (1 << 3), /* be quiet */ |
| 338 | ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */ | ||
| 338 | 339 | ||
| 339 | ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */ | 340 | ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */ |
| 340 | ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ | 341 | ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ |
| @@ -723,6 +724,7 @@ struct ata_port { | |||
| 723 | struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ | 724 | struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ |
| 724 | u8 ctl; /* cache of ATA control register */ | 725 | u8 ctl; /* cache of ATA control register */ |
| 725 | u8 last_ctl; /* Cache last written value */ | 726 | u8 last_ctl; /* Cache last written value */ |
| 727 | struct ata_link* sff_pio_task_link; /* link currently used */ | ||
| 726 | struct delayed_work sff_pio_task; | 728 | struct delayed_work sff_pio_task; |
| 727 | #ifdef CONFIG_ATA_BMDMA | 729 | #ifdef CONFIG_ATA_BMDMA |
| 728 | struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */ | 730 | struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */ |
| @@ -1594,7 +1596,7 @@ extern void ata_sff_irq_on(struct ata_port *ap); | |||
| 1594 | extern void ata_sff_irq_clear(struct ata_port *ap); | 1596 | extern void ata_sff_irq_clear(struct ata_port *ap); |
| 1595 | extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | 1597 | extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, |
| 1596 | u8 status, int in_wq); | 1598 | u8 status, int in_wq); |
| 1597 | extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay); | 1599 | extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay); |
| 1598 | extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); | 1600 | extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); |
| 1599 | extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); | 1601 | extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); |
| 1600 | extern unsigned int ata_sff_port_intr(struct ata_port *ap, | 1602 | extern unsigned int ata_sff_port_intr(struct ata_port *ap, |
diff --git a/include/linux/mm.h b/include/linux/mm.h index e6b1210772ce..74949fbef8c6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -864,6 +864,12 @@ int set_page_dirty(struct page *page); | |||
| 864 | int set_page_dirty_lock(struct page *page); | 864 | int set_page_dirty_lock(struct page *page); |
| 865 | int clear_page_dirty_for_io(struct page *page); | 865 | int clear_page_dirty_for_io(struct page *page); |
| 866 | 866 | ||
| 867 | /* Is the vma a continuation of the stack vma above it? */ | ||
| 868 | static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) | ||
| 869 | { | ||
| 870 | return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); | ||
| 871 | } | ||
| 872 | |||
| 867 | extern unsigned long move_page_tables(struct vm_area_struct *vma, | 873 | extern unsigned long move_page_tables(struct vm_area_struct *vma, |
| 868 | unsigned long old_addr, struct vm_area_struct *new_vma, | 874 | unsigned long old_addr, struct vm_area_struct *new_vma, |
| 869 | unsigned long new_addr, unsigned long len); | 875 | unsigned long new_addr, unsigned long len); |
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h index 329a8faa6e37..245cdacee544 100644 --- a/include/linux/mmc/sdio.h +++ b/include/linux/mmc/sdio.h | |||
| @@ -38,6 +38,8 @@ | |||
| 38 | * [8:0] Byte/block count | 38 | * [8:0] Byte/block count |
| 39 | */ | 39 | */ |
| 40 | 40 | ||
| 41 | #define R4_MEMORY_PRESENT (1 << 27) | ||
| 42 | |||
| 41 | /* | 43 | /* |
| 42 | SDIO status in R5 | 44 | SDIO status in R5 |
| 43 | Type | 45 | Type |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 6e6e62648a4d..3984c4eb41fd 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -284,6 +284,13 @@ struct zone { | |||
| 284 | unsigned long watermark[NR_WMARK]; | 284 | unsigned long watermark[NR_WMARK]; |
| 285 | 285 | ||
| 286 | /* | 286 | /* |
| 287 | * When free pages are below this point, additional steps are taken | ||
| 288 | * when reading the number of free pages to avoid per-cpu counter | ||
| 289 | * drift allowing watermarks to be breached | ||
| 290 | */ | ||
| 291 | unsigned long percpu_drift_mark; | ||
| 292 | |||
| 293 | /* | ||
| 287 | * We don't know if the memory that we're going to allocate will be freeable | 294 | * We don't know if the memory that we're going to allocate will be freeable |
| 288 | * or/and it will be released eventually, so to avoid totally wasting several | 295 | * or/and it will be released eventually, so to avoid totally wasting several |
| 289 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk | 296 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk |
| @@ -441,6 +448,12 @@ static inline int zone_is_oom_locked(const struct zone *zone) | |||
| 441 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); | 448 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); |
| 442 | } | 449 | } |
| 443 | 450 | ||
| 451 | #ifdef CONFIG_SMP | ||
| 452 | unsigned long zone_nr_free_pages(struct zone *zone); | ||
| 453 | #else | ||
| 454 | #define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES) | ||
| 455 | #endif /* CONFIG_SMP */ | ||
| 456 | |||
| 444 | /* | 457 | /* |
| 445 | * The "priority" of VM scanning is how much of the queues we will scan in one | 458 | * The "priority" of VM scanning is how much of the queues we will scan in one |
| 446 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | 459 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 878cab4f5fcc..f363bc8fdc74 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
| @@ -78,6 +78,14 @@ struct mutex_waiter { | |||
| 78 | # include <linux/mutex-debug.h> | 78 | # include <linux/mutex-debug.h> |
| 79 | #else | 79 | #else |
| 80 | # define __DEBUG_MUTEX_INITIALIZER(lockname) | 80 | # define __DEBUG_MUTEX_INITIALIZER(lockname) |
| 81 | /** | ||
| 82 | * mutex_init - initialize the mutex | ||
| 83 | * @mutex: the mutex to be initialized | ||
| 84 | * | ||
| 85 | * Initialize the mutex to unlocked state. | ||
| 86 | * | ||
| 87 | * It is not allowed to initialize an already locked mutex. | ||
| 88 | */ | ||
| 81 | # define mutex_init(mutex) \ | 89 | # define mutex_init(mutex) \ |
| 82 | do { \ | 90 | do { \ |
| 83 | static struct lock_class_key __key; \ | 91 | static struct lock_class_key __key; \ |
diff --git a/include/linux/pci.h b/include/linux/pci.h index b1d17956a153..c8d95e369ff4 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
| @@ -1214,6 +1214,9 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, | |||
| 1214 | unsigned int devfn) | 1214 | unsigned int devfn) |
| 1215 | { return NULL; } | 1215 | { return NULL; } |
| 1216 | 1216 | ||
| 1217 | static inline int pci_domain_nr(struct pci_bus *bus) | ||
| 1218 | { return 0; } | ||
| 1219 | |||
| 1217 | #define dev_is_pci(d) (false) | 1220 | #define dev_is_pci(d) (false) |
| 1218 | #define dev_is_pf(d) (false) | 1221 | #define dev_is_pf(d) (false) |
| 1219 | #define dev_num_vf(d) (0) | 1222 | #define dev_num_vf(d) (0) |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index f6a3b2d36cad..10d33309e9a6 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
| @@ -2300,6 +2300,8 @@ | |||
| 2300 | #define PCI_DEVICE_ID_P2010 0x0079 | 2300 | #define PCI_DEVICE_ID_P2010 0x0079 |
| 2301 | #define PCI_DEVICE_ID_P1020E 0x0100 | 2301 | #define PCI_DEVICE_ID_P1020E 0x0100 |
| 2302 | #define PCI_DEVICE_ID_P1020 0x0101 | 2302 | #define PCI_DEVICE_ID_P1020 0x0101 |
| 2303 | #define PCI_DEVICE_ID_P1021E 0x0102 | ||
| 2304 | #define PCI_DEVICE_ID_P1021 0x0103 | ||
| 2303 | #define PCI_DEVICE_ID_P1011E 0x0108 | 2305 | #define PCI_DEVICE_ID_P1011E 0x0108 |
| 2304 | #define PCI_DEVICE_ID_P1011 0x0109 | 2306 | #define PCI_DEVICE_ID_P1011 0x0109 |
| 2305 | #define PCI_DEVICE_ID_P1022E 0x0110 | 2307 | #define PCI_DEVICE_ID_P1022E 0x0110 |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index b8b9084527b1..49466b13c5c6 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
| @@ -149,7 +149,7 @@ extern void __init percpu_init_late(void); | |||
| 149 | 149 | ||
| 150 | #else /* CONFIG_SMP */ | 150 | #else /* CONFIG_SMP */ |
| 151 | 151 | ||
| 152 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) | 152 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); }) |
| 153 | 153 | ||
| 154 | /* can't distinguish from other static vars, always false */ | 154 | /* can't distinguish from other static vars, always false */ |
| 155 | static inline bool is_kernel_percpu_address(unsigned long addr) | 155 | static inline bool is_kernel_percpu_address(unsigned long addr) |
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h index 7415839ac890..5310d27abd2a 100644 --- a/include/linux/semaphore.h +++ b/include/linux/semaphore.h | |||
| @@ -26,6 +26,9 @@ struct semaphore { | |||
| 26 | .wait_list = LIST_HEAD_INIT((name).wait_list), \ | 26 | .wait_list = LIST_HEAD_INIT((name).wait_list), \ |
| 27 | } | 27 | } |
| 28 | 28 | ||
| 29 | #define DEFINE_SEMAPHORE(name) \ | ||
| 30 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) | ||
| 31 | |||
| 29 | #define DECLARE_MUTEX(name) \ | 32 | #define DECLARE_MUTEX(name) \ |
| 30 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) | 33 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) |
| 31 | 34 | ||
diff --git a/include/linux/serial.h b/include/linux/serial.h index 1ebc694a6d52..ef914061511e 100644 --- a/include/linux/serial.h +++ b/include/linux/serial.h | |||
| @@ -77,8 +77,7 @@ struct serial_struct { | |||
| 77 | #define PORT_16654 11 | 77 | #define PORT_16654 11 |
| 78 | #define PORT_16850 12 | 78 | #define PORT_16850 12 |
| 79 | #define PORT_RSA 13 /* RSA-DV II/S card */ | 79 | #define PORT_RSA 13 /* RSA-DV II/S card */ |
| 80 | #define PORT_U6_16550A 14 | 80 | #define PORT_MAX 13 |
| 81 | #define PORT_MAX 14 | ||
| 82 | 81 | ||
| 83 | #define SERIAL_IO_PORT 0 | 82 | #define SERIAL_IO_PORT 0 |
| 84 | #define SERIAL_IO_HUB6 1 | 83 | #define SERIAL_IO_HUB6 1 |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 64458a9a8938..563e23400913 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
| @@ -44,7 +44,8 @@ | |||
| 44 | #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ | 44 | #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ |
| 45 | #define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ | 45 | #define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ |
| 46 | #define PORT_AR7 18 /* Texas Instruments AR7 internal UART */ | 46 | #define PORT_AR7 18 /* Texas Instruments AR7 internal UART */ |
| 47 | #define PORT_MAX_8250 18 /* max port ID */ | 47 | #define PORT_U6_16550A 19 /* ST-Ericsson U6xxx internal UART */ |
| 48 | #define PORT_MAX_8250 19 /* max port ID */ | ||
| 48 | 49 | ||
| 49 | /* | 50 | /* |
| 50 | * ARM specific type numbers. These are not currently guaranteed | 51 | * ARM specific type numbers. These are not currently guaranteed |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 2fee51a11b73..7cdd63366f88 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
| @@ -19,6 +19,7 @@ struct bio; | |||
| 19 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ | 19 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ |
| 20 | #define SWAP_FLAG_PRIO_MASK 0x7fff | 20 | #define SWAP_FLAG_PRIO_MASK 0x7fff |
| 21 | #define SWAP_FLAG_PRIO_SHIFT 0 | 21 | #define SWAP_FLAG_PRIO_SHIFT 0 |
| 22 | #define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */ | ||
| 22 | 23 | ||
| 23 | static inline int current_is_kswapd(void) | 24 | static inline int current_is_kswapd(void) |
| 24 | { | 25 | { |
| @@ -142,7 +143,7 @@ struct swap_extent { | |||
| 142 | enum { | 143 | enum { |
| 143 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ | 144 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ |
| 144 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ | 145 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ |
| 145 | SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */ | 146 | SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */ |
| 146 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ | 147 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ |
| 147 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ | 148 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ |
| 148 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ | 149 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ |
| @@ -315,6 +316,7 @@ extern long nr_swap_pages; | |||
| 315 | extern long total_swap_pages; | 316 | extern long total_swap_pages; |
| 316 | extern void si_swapinfo(struct sysinfo *); | 317 | extern void si_swapinfo(struct sysinfo *); |
| 317 | extern swp_entry_t get_swap_page(void); | 318 | extern swp_entry_t get_swap_page(void); |
| 319 | extern swp_entry_t get_swap_page_of_type(int); | ||
| 318 | extern int valid_swaphandles(swp_entry_t, unsigned long *); | 320 | extern int valid_swaphandles(swp_entry_t, unsigned long *); |
| 319 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); | 321 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); |
| 320 | extern void swap_shmem_alloc(swp_entry_t); | 322 | extern void swap_shmem_alloc(swp_entry_t); |
| @@ -331,13 +333,6 @@ extern int reuse_swap_page(struct page *); | |||
| 331 | extern int try_to_free_swap(struct page *); | 333 | extern int try_to_free_swap(struct page *); |
| 332 | struct backing_dev_info; | 334 | struct backing_dev_info; |
| 333 | 335 | ||
| 334 | #ifdef CONFIG_HIBERNATION | ||
| 335 | void hibernation_freeze_swap(void); | ||
| 336 | void hibernation_thaw_swap(void); | ||
| 337 | swp_entry_t get_swap_for_hibernation(int type); | ||
| 338 | void swap_free_for_hibernation(swp_entry_t val); | ||
| 339 | #endif | ||
| 340 | |||
| 341 | /* linux/mm/thrash.c */ | 336 | /* linux/mm/thrash.c */ |
| 342 | extern struct mm_struct *swap_token_mm; | 337 | extern struct mm_struct *swap_token_mm; |
| 343 | extern void grab_swap_token(struct mm_struct *); | 338 | extern void grab_swap_token(struct mm_struct *); |
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 7f43ccdc1d38..eaaea37b3b75 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
| @@ -170,6 +170,28 @@ static inline unsigned long zone_page_state(struct zone *zone, | |||
| 170 | return x; | 170 | return x; |
| 171 | } | 171 | } |
| 172 | 172 | ||
| 173 | /* | ||
| 174 | * More accurate version that also considers the currently pending | ||
| 175 | * deltas. For that we need to loop over all cpus to find the current | ||
| 176 | * deltas. There is no synchronization so the result cannot be | ||
| 177 | * exactly accurate either. | ||
| 178 | */ | ||
| 179 | static inline unsigned long zone_page_state_snapshot(struct zone *zone, | ||
| 180 | enum zone_stat_item item) | ||
| 181 | { | ||
| 182 | long x = atomic_long_read(&zone->vm_stat[item]); | ||
| 183 | |||
| 184 | #ifdef CONFIG_SMP | ||
| 185 | int cpu; | ||
| 186 | for_each_online_cpu(cpu) | ||
| 187 | x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; | ||
| 188 | |||
| 189 | if (x < 0) | ||
| 190 | x = 0; | ||
| 191 | #endif | ||
| 192 | return x; | ||
| 193 | } | ||
| 194 | |||
| 173 | extern unsigned long global_reclaimable_pages(void); | 195 | extern unsigned long global_reclaimable_pages(void); |
| 174 | extern unsigned long zone_reclaimable_pages(struct zone *zone); | 196 | extern unsigned long zone_reclaimable_pages(struct zone *zone); |
| 175 | 197 | ||
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 4f9d277bcd9a..f11100f96482 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
| @@ -25,18 +25,20 @@ typedef void (*work_func_t)(struct work_struct *work); | |||
| 25 | 25 | ||
| 26 | enum { | 26 | enum { |
| 27 | WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ | 27 | WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ |
| 28 | WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */ | 28 | WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ |
| 29 | WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */ | 29 | WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */ |
| 30 | WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ | ||
| 30 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 31 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
| 31 | WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */ | 32 | WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ |
| 32 | WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ | 33 | WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */ |
| 33 | #else | 34 | #else |
| 34 | WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */ | 35 | WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ |
| 35 | #endif | 36 | #endif |
| 36 | 37 | ||
| 37 | WORK_STRUCT_COLOR_BITS = 4, | 38 | WORK_STRUCT_COLOR_BITS = 4, |
| 38 | 39 | ||
| 39 | WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, | 40 | WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, |
| 41 | WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, | ||
| 40 | WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, | 42 | WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, |
| 41 | WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, | 43 | WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, |
| 42 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 44 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
| @@ -59,8 +61,8 @@ enum { | |||
| 59 | 61 | ||
| 60 | /* | 62 | /* |
| 61 | * Reserve 7 bits off of cwq pointer w/ debugobjects turned | 63 | * Reserve 7 bits off of cwq pointer w/ debugobjects turned |
| 62 | * off. This makes cwqs aligned to 128 bytes which isn't too | 64 | * off. This makes cwqs aligned to 256 bytes and allows 15 |
| 63 | * excessive while allowing 15 workqueue flush colors. | 65 | * workqueue flush colors. |
| 64 | */ | 66 | */ |
| 65 | WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + | 67 | WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + |
| 66 | WORK_STRUCT_COLOR_BITS, | 68 | WORK_STRUCT_COLOR_BITS, |
| @@ -241,6 +243,8 @@ enum { | |||
| 241 | WQ_HIGHPRI = 1 << 4, /* high priority */ | 243 | WQ_HIGHPRI = 1 << 4, /* high priority */ |
| 242 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ | 244 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ |
| 243 | 245 | ||
| 246 | WQ_DYING = 1 << 6, /* internal: workqueue is dying */ | ||
| 247 | |||
| 244 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ | 248 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ |
| 245 | WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ | 249 | WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ |
| 246 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, | 250 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, |
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h index 726cc3536409..ef6c24a529e1 100644 --- a/include/net/cls_cgroup.h +++ b/include/net/cls_cgroup.h | |||
| @@ -27,11 +27,17 @@ struct cgroup_cls_state | |||
| 27 | #ifdef CONFIG_NET_CLS_CGROUP | 27 | #ifdef CONFIG_NET_CLS_CGROUP |
| 28 | static inline u32 task_cls_classid(struct task_struct *p) | 28 | static inline u32 task_cls_classid(struct task_struct *p) |
| 29 | { | 29 | { |
| 30 | int classid; | ||
| 31 | |||
| 30 | if (in_interrupt()) | 32 | if (in_interrupt()) |
| 31 | return 0; | 33 | return 0; |
| 32 | 34 | ||
| 33 | return container_of(task_subsys_state(p, net_cls_subsys_id), | 35 | rcu_read_lock(); |
| 34 | struct cgroup_cls_state, css)->classid; | 36 | classid = container_of(task_subsys_state(p, net_cls_subsys_id), |
| 37 | struct cgroup_cls_state, css)->classid; | ||
| 38 | rcu_read_unlock(); | ||
| 39 | |||
| 40 | return classid; | ||
| 35 | } | 41 | } |
| 36 | #else | 42 | #else |
| 37 | extern int net_cls_subsys_id; | 43 | extern int net_cls_subsys_id; |
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index a4747a0f7303..f976885f686f 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
| @@ -955,6 +955,9 @@ static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum) | |||
| 955 | return csum_partial(diff, sizeof(diff), oldsum); | 955 | return csum_partial(diff, sizeof(diff), oldsum); |
| 956 | } | 956 | } |
| 957 | 957 | ||
| 958 | extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, | ||
| 959 | int outin); | ||
| 960 | |||
| 958 | #endif /* __KERNEL__ */ | 961 | #endif /* __KERNEL__ */ |
| 959 | 962 | ||
| 960 | #endif /* _NET_IP_VS_H */ | 963 | #endif /* _NET_IP_VS_H */ |
diff --git a/include/net/sock.h b/include/net/sock.h index ac53bfbdfe16..adab9dc58183 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
| @@ -752,6 +752,7 @@ struct proto { | |||
| 752 | /* Keeping track of sk's, looking them up, and port selection methods. */ | 752 | /* Keeping track of sk's, looking them up, and port selection methods. */ |
| 753 | void (*hash)(struct sock *sk); | 753 | void (*hash)(struct sock *sk); |
| 754 | void (*unhash)(struct sock *sk); | 754 | void (*unhash)(struct sock *sk); |
| 755 | void (*rehash)(struct sock *sk); | ||
| 755 | int (*get_port)(struct sock *sk, unsigned short snum); | 756 | int (*get_port)(struct sock *sk, unsigned short snum); |
| 756 | 757 | ||
| 757 | /* Keeping track of sockets in use */ | 758 | /* Keeping track of sockets in use */ |
diff --git a/include/net/udp.h b/include/net/udp.h index 7abdf305da50..a184d3496b13 100644 --- a/include/net/udp.h +++ b/include/net/udp.h | |||
| @@ -151,6 +151,7 @@ static inline void udp_lib_hash(struct sock *sk) | |||
| 151 | } | 151 | } |
| 152 | 152 | ||
| 153 | extern void udp_lib_unhash(struct sock *sk); | 153 | extern void udp_lib_unhash(struct sock *sk); |
| 154 | extern void udp_lib_rehash(struct sock *sk, u16 new_hash); | ||
| 154 | 155 | ||
| 155 | static inline void udp_lib_close(struct sock *sk, long timeout) | 156 | static inline void udp_lib_close(struct sock *sk, long timeout) |
| 156 | { | 157 | { |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 192f88c5b0f9..c9483d8f6140 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -1791,19 +1791,20 @@ out: | |||
| 1791 | } | 1791 | } |
| 1792 | 1792 | ||
| 1793 | /** | 1793 | /** |
| 1794 | * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup | 1794 | * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from' |
| 1795 | * @from: attach to all cgroups of a given task | ||
| 1795 | * @tsk: the task to be attached | 1796 | * @tsk: the task to be attached |
| 1796 | */ | 1797 | */ |
| 1797 | int cgroup_attach_task_current_cg(struct task_struct *tsk) | 1798 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) |
| 1798 | { | 1799 | { |
| 1799 | struct cgroupfs_root *root; | 1800 | struct cgroupfs_root *root; |
| 1800 | struct cgroup *cur_cg; | ||
| 1801 | int retval = 0; | 1801 | int retval = 0; |
| 1802 | 1802 | ||
| 1803 | cgroup_lock(); | 1803 | cgroup_lock(); |
| 1804 | for_each_active_root(root) { | 1804 | for_each_active_root(root) { |
| 1805 | cur_cg = task_cgroup_from_root(current, root); | 1805 | struct cgroup *from_cg = task_cgroup_from_root(from, root); |
| 1806 | retval = cgroup_attach_task(cur_cg, tsk); | 1806 | |
| 1807 | retval = cgroup_attach_task(from_cg, tsk); | ||
| 1807 | if (retval) | 1808 | if (retval) |
| 1808 | break; | 1809 | break; |
| 1809 | } | 1810 | } |
| @@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk) | |||
| 1811 | 1812 | ||
| 1812 | return retval; | 1813 | return retval; |
| 1813 | } | 1814 | } |
| 1814 | EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg); | 1815 | EXPORT_SYMBOL_GPL(cgroup_attach_task_all); |
| 1815 | 1816 | ||
| 1816 | /* | 1817 | /* |
| 1817 | * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex | 1818 | * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex |
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c index 75bd9b3ebbb7..20059ef4459a 100644 --- a/kernel/debug/kdb/kdb_bp.c +++ b/kernel/debug/kdb/kdb_bp.c | |||
| @@ -274,7 +274,6 @@ static int kdb_bp(int argc, const char **argv) | |||
| 274 | int i, bpno; | 274 | int i, bpno; |
| 275 | kdb_bp_t *bp, *bp_check; | 275 | kdb_bp_t *bp, *bp_check; |
| 276 | int diag; | 276 | int diag; |
| 277 | int free; | ||
| 278 | char *symname = NULL; | 277 | char *symname = NULL; |
| 279 | long offset = 0ul; | 278 | long offset = 0ul; |
| 280 | int nextarg; | 279 | int nextarg; |
| @@ -305,7 +304,6 @@ static int kdb_bp(int argc, const char **argv) | |||
| 305 | /* | 304 | /* |
| 306 | * Find an empty bp structure to allocate | 305 | * Find an empty bp structure to allocate |
| 307 | */ | 306 | */ |
| 308 | free = KDB_MAXBPT; | ||
| 309 | for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { | 307 | for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { |
| 310 | if (bp->bp_free) | 308 | if (bp->bp_free) |
| 311 | break; | 309 | break; |
diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c index ef3c3f88a7a3..f83972b16564 100644 --- a/kernel/gcov/fs.c +++ b/kernel/gcov/fs.c | |||
| @@ -33,10 +33,11 @@ | |||
| 33 | * @children: child nodes | 33 | * @children: child nodes |
| 34 | * @all: list head for list of all nodes | 34 | * @all: list head for list of all nodes |
| 35 | * @parent: parent node | 35 | * @parent: parent node |
| 36 | * @info: associated profiling data structure if not a directory | 36 | * @loaded_info: array of pointers to profiling data sets for loaded object |
| 37 | * @ghost: when an object file containing profiling data is unloaded we keep a | 37 | * files. |
| 38 | * copy of the profiling data here to allow collecting coverage data | 38 | * @num_loaded: number of profiling data sets for loaded object files. |
| 39 | * for cleanup code. Such a node is called a "ghost". | 39 | * @unloaded_info: accumulated copy of profiling data sets for unloaded |
| 40 | * object files. Used only when gcov_persist=1. | ||
| 40 | * @dentry: main debugfs entry, either a directory or data file | 41 | * @dentry: main debugfs entry, either a directory or data file |
| 41 | * @links: associated symbolic links | 42 | * @links: associated symbolic links |
| 42 | * @name: data file basename | 43 | * @name: data file basename |
| @@ -51,10 +52,11 @@ struct gcov_node { | |||
| 51 | struct list_head children; | 52 | struct list_head children; |
| 52 | struct list_head all; | 53 | struct list_head all; |
| 53 | struct gcov_node *parent; | 54 | struct gcov_node *parent; |
| 54 | struct gcov_info *info; | 55 | struct gcov_info **loaded_info; |
| 55 | struct gcov_info *ghost; | 56 | struct gcov_info *unloaded_info; |
| 56 | struct dentry *dentry; | 57 | struct dentry *dentry; |
| 57 | struct dentry **links; | 58 | struct dentry **links; |
| 59 | int num_loaded; | ||
| 58 | char name[0]; | 60 | char name[0]; |
| 59 | }; | 61 | }; |
| 60 | 62 | ||
| @@ -136,16 +138,37 @@ static const struct seq_operations gcov_seq_ops = { | |||
| 136 | }; | 138 | }; |
| 137 | 139 | ||
| 138 | /* | 140 | /* |
| 139 | * Return the profiling data set for a given node. This can either be the | 141 | * Return a profiling data set associated with the given node. This is |
| 140 | * original profiling data structure or a duplicate (also called "ghost") | 142 | * either a data set for a loaded object file or a data set copy in case |
| 141 | * in case the associated object file has been unloaded. | 143 | * all associated object files have been unloaded. |
| 142 | */ | 144 | */ |
| 143 | static struct gcov_info *get_node_info(struct gcov_node *node) | 145 | static struct gcov_info *get_node_info(struct gcov_node *node) |
| 144 | { | 146 | { |
| 145 | if (node->info) | 147 | if (node->num_loaded > 0) |
| 146 | return node->info; | 148 | return node->loaded_info[0]; |
| 147 | 149 | ||
| 148 | return node->ghost; | 150 | return node->unloaded_info; |
| 151 | } | ||
| 152 | |||
| 153 | /* | ||
| 154 | * Return a newly allocated profiling data set which contains the sum of | ||
| 155 | * all profiling data associated with the given node. | ||
| 156 | */ | ||
| 157 | static struct gcov_info *get_accumulated_info(struct gcov_node *node) | ||
| 158 | { | ||
| 159 | struct gcov_info *info; | ||
| 160 | int i = 0; | ||
| 161 | |||
| 162 | if (node->unloaded_info) | ||
| 163 | info = gcov_info_dup(node->unloaded_info); | ||
| 164 | else | ||
| 165 | info = gcov_info_dup(node->loaded_info[i++]); | ||
| 166 | if (!info) | ||
| 167 | return NULL; | ||
| 168 | for (; i < node->num_loaded; i++) | ||
| 169 | gcov_info_add(info, node->loaded_info[i]); | ||
| 170 | |||
| 171 | return info; | ||
| 149 | } | 172 | } |
| 150 | 173 | ||
| 151 | /* | 174 | /* |
| @@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *inode, struct file *file) | |||
| 163 | mutex_lock(&node_lock); | 186 | mutex_lock(&node_lock); |
| 164 | /* | 187 | /* |
| 165 | * Read from a profiling data copy to minimize reference tracking | 188 | * Read from a profiling data copy to minimize reference tracking |
| 166 | * complexity and concurrent access. | 189 | * complexity and concurrent access and to keep accumulating multiple |
| 190 | * profiling data sets associated with one node simple. | ||
| 167 | */ | 191 | */ |
| 168 | info = gcov_info_dup(get_node_info(node)); | 192 | info = get_accumulated_info(node); |
| 169 | if (!info) | 193 | if (!info) |
| 170 | goto out_unlock; | 194 | goto out_unlock; |
| 171 | iter = gcov_iter_new(info); | 195 | iter = gcov_iter_new(info); |
| @@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_name(const char *name) | |||
| 225 | return NULL; | 249 | return NULL; |
| 226 | } | 250 | } |
| 227 | 251 | ||
| 252 | /* | ||
| 253 | * Reset all profiling data associated with the specified node. | ||
| 254 | */ | ||
| 255 | static void reset_node(struct gcov_node *node) | ||
| 256 | { | ||
| 257 | int i; | ||
| 258 | |||
| 259 | if (node->unloaded_info) | ||
| 260 | gcov_info_reset(node->unloaded_info); | ||
| 261 | for (i = 0; i < node->num_loaded; i++) | ||
| 262 | gcov_info_reset(node->loaded_info[i]); | ||
| 263 | } | ||
| 264 | |||
| 228 | static void remove_node(struct gcov_node *node); | 265 | static void remove_node(struct gcov_node *node); |
| 229 | 266 | ||
| 230 | /* | 267 | /* |
| 231 | * write() implementation for gcov data files. Reset profiling data for the | 268 | * write() implementation for gcov data files. Reset profiling data for the |
| 232 | * associated file. If the object file has been unloaded (i.e. this is | 269 | * corresponding file. If all associated object files have been unloaded, |
| 233 | * a "ghost" node), remove the debug fs node as well. | 270 | * remove the debug fs node as well. |
| 234 | */ | 271 | */ |
| 235 | static ssize_t gcov_seq_write(struct file *file, const char __user *addr, | 272 | static ssize_t gcov_seq_write(struct file *file, const char __user *addr, |
| 236 | size_t len, loff_t *pos) | 273 | size_t len, loff_t *pos) |
| @@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr, | |||
| 245 | node = get_node_by_name(info->filename); | 282 | node = get_node_by_name(info->filename); |
| 246 | if (node) { | 283 | if (node) { |
| 247 | /* Reset counts or remove node for unloaded modules. */ | 284 | /* Reset counts or remove node for unloaded modules. */ |
| 248 | if (node->ghost) | 285 | if (node->num_loaded == 0) |
| 249 | remove_node(node); | 286 | remove_node(node); |
| 250 | else | 287 | else |
| 251 | gcov_info_reset(node->info); | 288 | reset_node(node); |
| 252 | } | 289 | } |
| 253 | /* Reset counts for open file. */ | 290 | /* Reset counts for open file. */ |
| 254 | gcov_info_reset(info); | 291 | gcov_info_reset(info); |
| @@ -378,7 +415,10 @@ static void init_node(struct gcov_node *node, struct gcov_info *info, | |||
| 378 | INIT_LIST_HEAD(&node->list); | 415 | INIT_LIST_HEAD(&node->list); |
| 379 | INIT_LIST_HEAD(&node->children); | 416 | INIT_LIST_HEAD(&node->children); |
| 380 | INIT_LIST_HEAD(&node->all); | 417 | INIT_LIST_HEAD(&node->all); |
| 381 | node->info = info; | 418 | if (node->loaded_info) { |
| 419 | node->loaded_info[0] = info; | ||
| 420 | node->num_loaded = 1; | ||
| 421 | } | ||
| 382 | node->parent = parent; | 422 | node->parent = parent; |
| 383 | if (name) | 423 | if (name) |
| 384 | strcpy(node->name, name); | 424 | strcpy(node->name, name); |
| @@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct gcov_node *parent, | |||
| 394 | struct gcov_node *node; | 434 | struct gcov_node *node; |
| 395 | 435 | ||
| 396 | node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); | 436 | node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); |
| 397 | if (!node) { | 437 | if (!node) |
| 398 | pr_warning("out of memory\n"); | 438 | goto err_nomem; |
| 399 | return NULL; | 439 | if (info) { |
| 440 | node->loaded_info = kcalloc(1, sizeof(struct gcov_info *), | ||
| 441 | GFP_KERNEL); | ||
| 442 | if (!node->loaded_info) | ||
| 443 | goto err_nomem; | ||
| 400 | } | 444 | } |
| 401 | init_node(node, info, name, parent); | 445 | init_node(node, info, name, parent); |
| 402 | /* Differentiate between gcov data file nodes and directory nodes. */ | 446 | /* Differentiate between gcov data file nodes and directory nodes. */ |
| @@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct gcov_node *parent, | |||
| 416 | list_add(&node->all, &all_head); | 460 | list_add(&node->all, &all_head); |
| 417 | 461 | ||
| 418 | return node; | 462 | return node; |
| 463 | |||
| 464 | err_nomem: | ||
| 465 | kfree(node); | ||
| 466 | pr_warning("out of memory\n"); | ||
| 467 | return NULL; | ||
| 419 | } | 468 | } |
| 420 | 469 | ||
| 421 | /* Remove symbolic links associated with node. */ | 470 | /* Remove symbolic links associated with node. */ |
| @@ -441,8 +490,9 @@ static void release_node(struct gcov_node *node) | |||
| 441 | list_del(&node->all); | 490 | list_del(&node->all); |
| 442 | debugfs_remove(node->dentry); | 491 | debugfs_remove(node->dentry); |
| 443 | remove_links(node); | 492 | remove_links(node); |
| 444 | if (node->ghost) | 493 | kfree(node->loaded_info); |
| 445 | gcov_info_free(node->ghost); | 494 | if (node->unloaded_info) |
| 495 | gcov_info_free(node->unloaded_info); | ||
| 446 | kfree(node); | 496 | kfree(node); |
| 447 | } | 497 | } |
| 448 | 498 | ||
| @@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent, | |||
| 477 | 527 | ||
| 478 | /* | 528 | /* |
| 479 | * write() implementation for reset file. Reset all profiling data to zero | 529 | * write() implementation for reset file. Reset all profiling data to zero |
| 480 | * and remove ghost nodes. | 530 | * and remove nodes for which all associated object files are unloaded. |
| 481 | */ | 531 | */ |
| 482 | static ssize_t reset_write(struct file *file, const char __user *addr, | 532 | static ssize_t reset_write(struct file *file, const char __user *addr, |
| 483 | size_t len, loff_t *pos) | 533 | size_t len, loff_t *pos) |
| @@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr, | |||
| 487 | mutex_lock(&node_lock); | 537 | mutex_lock(&node_lock); |
| 488 | restart: | 538 | restart: |
| 489 | list_for_each_entry(node, &all_head, all) { | 539 | list_for_each_entry(node, &all_head, all) { |
| 490 | if (node->info) | 540 | if (node->num_loaded > 0) |
| 491 | gcov_info_reset(node->info); | 541 | reset_node(node); |
| 492 | else if (list_empty(&node->children)) { | 542 | else if (list_empty(&node->children)) { |
| 493 | remove_node(node); | 543 | remove_node(node); |
| 494 | /* Several nodes may have gone - restart loop. */ | 544 | /* Several nodes may have gone - restart loop. */ |
| @@ -564,37 +614,115 @@ err_remove: | |||
| 564 | } | 614 | } |
| 565 | 615 | ||
| 566 | /* | 616 | /* |
| 567 | * The profiling data set associated with this node is being unloaded. Store a | 617 | * Associate a profiling data set with an existing node. Needs to be called |
| 568 | * copy of the profiling data and turn this node into a "ghost". | 618 | * with node_lock held. |
| 569 | */ | 619 | */ |
| 570 | static int ghost_node(struct gcov_node *node) | 620 | static void add_info(struct gcov_node *node, struct gcov_info *info) |
| 571 | { | 621 | { |
| 572 | node->ghost = gcov_info_dup(node->info); | 622 | struct gcov_info **loaded_info; |
| 573 | if (!node->ghost) { | 623 | int num = node->num_loaded; |
| 574 | pr_warning("could not save data for '%s' (out of memory)\n", | 624 | |
| 575 | node->info->filename); | 625 | /* |
| 576 | return -ENOMEM; | 626 | * Prepare new array. This is done first to simplify cleanup in |
| 627 | * case the new data set is incompatible, the node only contains | ||
| 628 | * unloaded data sets and there's not enough memory for the array. | ||
| 629 | */ | ||
| 630 | loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL); | ||
| 631 | if (!loaded_info) { | ||
| 632 | pr_warning("could not add '%s' (out of memory)\n", | ||
| 633 | info->filename); | ||
| 634 | return; | ||
| 635 | } | ||
| 636 | memcpy(loaded_info, node->loaded_info, | ||
| 637 | num * sizeof(struct gcov_info *)); | ||
| 638 | loaded_info[num] = info; | ||
| 639 | /* Check if the new data set is compatible. */ | ||
| 640 | if (num == 0) { | ||
| 641 | /* | ||
| 642 | * A module was unloaded, modified and reloaded. The new | ||
| 643 | * data set replaces the copy of the last one. | ||
| 644 | */ | ||
| 645 | if (!gcov_info_is_compatible(node->unloaded_info, info)) { | ||
| 646 | pr_warning("discarding saved data for %s " | ||
| 647 | "(incompatible version)\n", info->filename); | ||
| 648 | gcov_info_free(node->unloaded_info); | ||
| 649 | node->unloaded_info = NULL; | ||
| 650 | } | ||
| 651 | } else { | ||
| 652 | /* | ||
| 653 | * Two different versions of the same object file are loaded. | ||
| 654 | * The initial one takes precedence. | ||
| 655 | */ | ||
| 656 | if (!gcov_info_is_compatible(node->loaded_info[0], info)) { | ||
| 657 | pr_warning("could not add '%s' (incompatible " | ||
| 658 | "version)\n", info->filename); | ||
| 659 | kfree(loaded_info); | ||
| 660 | return; | ||
| 661 | } | ||
| 577 | } | 662 | } |
| 578 | node->info = NULL; | 663 | /* Overwrite previous array. */ |
| 664 | kfree(node->loaded_info); | ||
| 665 | node->loaded_info = loaded_info; | ||
| 666 | node->num_loaded = num + 1; | ||
| 667 | } | ||
| 579 | 668 | ||
| 580 | return 0; | 669 | /* |
| 670 | * Return the index of a profiling data set associated with a node. | ||
| 671 | */ | ||
| 672 | static int get_info_index(struct gcov_node *node, struct gcov_info *info) | ||
| 673 | { | ||
| 674 | int i; | ||
| 675 | |||
| 676 | for (i = 0; i < node->num_loaded; i++) { | ||
| 677 | if (node->loaded_info[i] == info) | ||
| 678 | return i; | ||
| 679 | } | ||
| 680 | return -ENOENT; | ||
| 581 | } | 681 | } |
| 582 | 682 | ||
| 583 | /* | 683 | /* |
| 584 | * Profiling data for this node has been loaded again. Add profiling data | 684 | * Save the data of a profiling data set which is being unloaded. |
| 585 | * from previous instantiation and turn this node into a regular node. | ||
| 586 | */ | 685 | */ |
| 587 | static void revive_node(struct gcov_node *node, struct gcov_info *info) | 686 | static void save_info(struct gcov_node *node, struct gcov_info *info) |
| 588 | { | 687 | { |
| 589 | if (gcov_info_is_compatible(node->ghost, info)) | 688 | if (node->unloaded_info) |
| 590 | gcov_info_add(info, node->ghost); | 689 | gcov_info_add(node->unloaded_info, info); |
| 591 | else { | 690 | else { |
| 592 | pr_warning("discarding saved data for '%s' (version changed)\n", | 691 | node->unloaded_info = gcov_info_dup(info); |
| 692 | if (!node->unloaded_info) { | ||
| 693 | pr_warning("could not save data for '%s' " | ||
| 694 | "(out of memory)\n", info->filename); | ||
| 695 | } | ||
| 696 | } | ||
| 697 | } | ||
| 698 | |||
| 699 | /* | ||
| 700 | * Disassociate a profiling data set from a node. Needs to be called with | ||
| 701 | * node_lock held. | ||
| 702 | */ | ||
| 703 | static void remove_info(struct gcov_node *node, struct gcov_info *info) | ||
| 704 | { | ||
| 705 | int i; | ||
| 706 | |||
| 707 | i = get_info_index(node, info); | ||
| 708 | if (i < 0) { | ||
| 709 | pr_warning("could not remove '%s' (not found)\n", | ||
| 593 | info->filename); | 710 | info->filename); |
| 711 | return; | ||
| 594 | } | 712 | } |
| 595 | gcov_info_free(node->ghost); | 713 | if (gcov_persist) |
| 596 | node->ghost = NULL; | 714 | save_info(node, info); |
| 597 | node->info = info; | 715 | /* Shrink array. */ |
| 716 | node->loaded_info[i] = node->loaded_info[node->num_loaded - 1]; | ||
| 717 | node->num_loaded--; | ||
| 718 | if (node->num_loaded > 0) | ||
| 719 | return; | ||
| 720 | /* Last loaded data set was removed. */ | ||
| 721 | kfree(node->loaded_info); | ||
| 722 | node->loaded_info = NULL; | ||
| 723 | node->num_loaded = 0; | ||
| 724 | if (!node->unloaded_info) | ||
| 725 | remove_node(node); | ||
| 598 | } | 726 | } |
| 599 | 727 | ||
| 600 | /* | 728 | /* |
| @@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action, struct gcov_info *info) | |||
| 609 | node = get_node_by_name(info->filename); | 737 | node = get_node_by_name(info->filename); |
| 610 | switch (action) { | 738 | switch (action) { |
| 611 | case GCOV_ADD: | 739 | case GCOV_ADD: |
| 612 | /* Add new node or revive ghost. */ | 740 | if (node) |
| 613 | if (!node) { | 741 | add_info(node, info); |
| 742 | else | ||
| 614 | add_node(info); | 743 | add_node(info); |
| 615 | break; | ||
| 616 | } | ||
| 617 | if (gcov_persist) | ||
| 618 | revive_node(node, info); | ||
| 619 | else { | ||
| 620 | pr_warning("could not add '%s' (already exists)\n", | ||
| 621 | info->filename); | ||
| 622 | } | ||
| 623 | break; | 744 | break; |
| 624 | case GCOV_REMOVE: | 745 | case GCOV_REMOVE: |
| 625 | /* Remove node or turn into ghost. */ | 746 | if (node) |
| 626 | if (!node) { | 747 | remove_info(node, info); |
| 748 | else { | ||
| 627 | pr_warning("could not remove '%s' (not found)\n", | 749 | pr_warning("could not remove '%s' (not found)\n", |
| 628 | info->filename); | 750 | info->filename); |
| 629 | break; | ||
| 630 | } | 751 | } |
| 631 | if (gcov_persist) { | ||
| 632 | if (!ghost_node(node)) | ||
| 633 | break; | ||
| 634 | } | ||
| 635 | remove_node(node); | ||
| 636 | break; | 752 | break; |
| 637 | } | 753 | } |
| 638 | mutex_unlock(&node_lock); | 754 | mutex_unlock(&node_lock); |
diff --git a/kernel/groups.c b/kernel/groups.c index 53b1916c9492..253dc0f35cf4 100644 --- a/kernel/groups.c +++ b/kernel/groups.c | |||
| @@ -143,10 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp) | |||
| 143 | right = group_info->ngroups; | 143 | right = group_info->ngroups; |
| 144 | while (left < right) { | 144 | while (left < right) { |
| 145 | unsigned int mid = (left+right)/2; | 145 | unsigned int mid = (left+right)/2; |
| 146 | int cmp = grp - GROUP_AT(group_info, mid); | 146 | if (grp > GROUP_AT(group_info, mid)) |
| 147 | if (cmp > 0) | ||
| 148 | left = mid + 1; | 147 | left = mid + 1; |
| 149 | else if (cmp < 0) | 148 | else if (grp < GROUP_AT(group_info, mid)) |
| 150 | right = mid; | 149 | right = mid; |
| 151 | else | 150 | else |
| 152 | return 1; | 151 | return 1; |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index ce669174f355..1decafbb6b1a 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -1091,11 +1091,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel); | |||
| 1091 | */ | 1091 | */ |
| 1092 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) | 1092 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) |
| 1093 | { | 1093 | { |
| 1094 | struct hrtimer_clock_base *base; | ||
| 1095 | unsigned long flags; | 1094 | unsigned long flags; |
| 1096 | ktime_t rem; | 1095 | ktime_t rem; |
| 1097 | 1096 | ||
| 1098 | base = lock_hrtimer_base(timer, &flags); | 1097 | lock_hrtimer_base(timer, &flags); |
| 1099 | rem = hrtimer_expires_remaining(timer); | 1098 | rem = hrtimer_expires_remaining(timer); |
| 1100 | unlock_hrtimer_base(timer, &flags); | 1099 | unlock_hrtimer_base(timer, &flags); |
| 1101 | 1100 | ||
diff --git a/kernel/mutex.c b/kernel/mutex.c index 4c0b7b3e6d2e..200407c1502f 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
| @@ -36,15 +36,6 @@ | |||
| 36 | # include <asm/mutex.h> | 36 | # include <asm/mutex.h> |
| 37 | #endif | 37 | #endif |
| 38 | 38 | ||
| 39 | /*** | ||
| 40 | * mutex_init - initialize the mutex | ||
| 41 | * @lock: the mutex to be initialized | ||
| 42 | * @key: the lock_class_key for the class; used by mutex lock debugging | ||
| 43 | * | ||
| 44 | * Initialize the mutex to unlocked state. | ||
| 45 | * | ||
| 46 | * It is not allowed to initialize an already locked mutex. | ||
| 47 | */ | ||
| 48 | void | 39 | void |
| 49 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | 40 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
| 50 | { | 41 | { |
| @@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init); | |||
| 68 | static __used noinline void __sched | 59 | static __used noinline void __sched |
| 69 | __mutex_lock_slowpath(atomic_t *lock_count); | 60 | __mutex_lock_slowpath(atomic_t *lock_count); |
| 70 | 61 | ||
| 71 | /*** | 62 | /** |
| 72 | * mutex_lock - acquire the mutex | 63 | * mutex_lock - acquire the mutex |
| 73 | * @lock: the mutex to be acquired | 64 | * @lock: the mutex to be acquired |
| 74 | * | 65 | * |
| @@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock); | |||
| 105 | 96 | ||
| 106 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); | 97 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
| 107 | 98 | ||
| 108 | /*** | 99 | /** |
| 109 | * mutex_unlock - release the mutex | 100 | * mutex_unlock - release the mutex |
| 110 | * @lock: the mutex to be released | 101 | * @lock: the mutex to be released |
| 111 | * | 102 | * |
| @@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count); | |||
| 364 | static noinline int __sched | 355 | static noinline int __sched |
| 365 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); | 356 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
| 366 | 357 | ||
| 367 | /*** | 358 | /** |
| 368 | * mutex_lock_interruptible - acquire the mutex, interruptable | 359 | * mutex_lock_interruptible - acquire the mutex, interruptible |
| 369 | * @lock: the mutex to be acquired | 360 | * @lock: the mutex to be acquired |
| 370 | * | 361 | * |
| 371 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has | 362 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has |
| @@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |||
| 456 | return prev == 1; | 447 | return prev == 1; |
| 457 | } | 448 | } |
| 458 | 449 | ||
| 459 | /*** | 450 | /** |
| 460 | * mutex_trylock - try acquire the mutex, without waiting | 451 | * mutex_trylock - try to acquire the mutex, without waiting |
| 461 | * @lock: the mutex to be acquired | 452 | * @lock: the mutex to be acquired |
| 462 | * | 453 | * |
| 463 | * Try to acquire the mutex atomically. Returns 1 if the mutex | 454 | * Try to acquire the mutex atomically. Returns 1 if the mutex |
| 464 | * has been acquired successfully, and 0 on contention. | 455 | * has been acquired successfully, and 0 on contention. |
| 465 | * | 456 | * |
| 466 | * NOTE: this function follows the spin_trylock() convention, so | 457 | * NOTE: this function follows the spin_trylock() convention, so |
| 467 | * it is negated to the down_trylock() return values! Be careful | 458 | * it is negated from the down_trylock() return values! Be careful |
| 468 | * about this when converting semaphore users to mutexes. | 459 | * about this when converting semaphore users to mutexes. |
| 469 | * | 460 | * |
| 470 | * This function must not be used in interrupt context. The | 461 | * This function must not be used in interrupt context. The |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 403d1804b198..db5b56064687 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -402,11 +402,31 @@ static void perf_group_detach(struct perf_event *event) | |||
| 402 | } | 402 | } |
| 403 | } | 403 | } |
| 404 | 404 | ||
| 405 | static inline int | ||
| 406 | event_filter_match(struct perf_event *event) | ||
| 407 | { | ||
| 408 | return event->cpu == -1 || event->cpu == smp_processor_id(); | ||
| 409 | } | ||
| 410 | |||
| 405 | static void | 411 | static void |
| 406 | event_sched_out(struct perf_event *event, | 412 | event_sched_out(struct perf_event *event, |
| 407 | struct perf_cpu_context *cpuctx, | 413 | struct perf_cpu_context *cpuctx, |
| 408 | struct perf_event_context *ctx) | 414 | struct perf_event_context *ctx) |
| 409 | { | 415 | { |
| 416 | u64 delta; | ||
| 417 | /* | ||
| 418 | * An event which could not be activated because of | ||
| 419 | * filter mismatch still needs to have its timings | ||
| 420 | * maintained, otherwise bogus information is return | ||
| 421 | * via read() for time_enabled, time_running: | ||
| 422 | */ | ||
| 423 | if (event->state == PERF_EVENT_STATE_INACTIVE | ||
| 424 | && !event_filter_match(event)) { | ||
| 425 | delta = ctx->time - event->tstamp_stopped; | ||
| 426 | event->tstamp_running += delta; | ||
| 427 | event->tstamp_stopped = ctx->time; | ||
| 428 | } | ||
| 429 | |||
| 410 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 430 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| 411 | return; | 431 | return; |
| 412 | 432 | ||
| @@ -432,9 +452,7 @@ group_sched_out(struct perf_event *group_event, | |||
| 432 | struct perf_event_context *ctx) | 452 | struct perf_event_context *ctx) |
| 433 | { | 453 | { |
| 434 | struct perf_event *event; | 454 | struct perf_event *event; |
| 435 | 455 | int state = group_event->state; | |
| 436 | if (group_event->state != PERF_EVENT_STATE_ACTIVE) | ||
| 437 | return; | ||
| 438 | 456 | ||
| 439 | event_sched_out(group_event, cpuctx, ctx); | 457 | event_sched_out(group_event, cpuctx, ctx); |
| 440 | 458 | ||
| @@ -444,7 +462,7 @@ group_sched_out(struct perf_event *group_event, | |||
| 444 | list_for_each_entry(event, &group_event->sibling_list, group_entry) | 462 | list_for_each_entry(event, &group_event->sibling_list, group_entry) |
| 445 | event_sched_out(event, cpuctx, ctx); | 463 | event_sched_out(event, cpuctx, ctx); |
| 446 | 464 | ||
| 447 | if (group_event->attr.exclusive) | 465 | if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) |
| 448 | cpuctx->exclusive = 0; | 466 | cpuctx->exclusive = 0; |
| 449 | } | 467 | } |
| 450 | 468 | ||
| @@ -5743,15 +5761,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | |||
| 5743 | { | 5761 | { |
| 5744 | unsigned int cpu = (long)hcpu; | 5762 | unsigned int cpu = (long)hcpu; |
| 5745 | 5763 | ||
| 5746 | switch (action) { | 5764 | switch (action & ~CPU_TASKS_FROZEN) { |
| 5747 | 5765 | ||
| 5748 | case CPU_UP_PREPARE: | 5766 | case CPU_UP_PREPARE: |
| 5749 | case CPU_UP_PREPARE_FROZEN: | 5767 | case CPU_DOWN_FAILED: |
| 5750 | perf_event_init_cpu(cpu); | 5768 | perf_event_init_cpu(cpu); |
| 5751 | break; | 5769 | break; |
| 5752 | 5770 | ||
| 5771 | case CPU_UP_CANCELED: | ||
| 5753 | case CPU_DOWN_PREPARE: | 5772 | case CPU_DOWN_PREPARE: |
| 5754 | case CPU_DOWN_PREPARE_FROZEN: | ||
| 5755 | perf_event_exit_cpu(cpu); | 5773 | perf_event_exit_cpu(cpu); |
| 5756 | break; | 5774 | break; |
| 5757 | 5775 | ||
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index b7e4c362361b..645e541a45f6 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c | |||
| @@ -389,10 +389,12 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, | |||
| 389 | } else if (count == 11) { /* len('0x12345678/0') */ | 389 | } else if (count == 11) { /* len('0x12345678/0') */ |
| 390 | if (copy_from_user(ascii_value, buf, 11)) | 390 | if (copy_from_user(ascii_value, buf, 11)) |
| 391 | return -EFAULT; | 391 | return -EFAULT; |
| 392 | if (strlen(ascii_value) != 10) | ||
| 393 | return -EINVAL; | ||
| 392 | x = sscanf(ascii_value, "%x", &value); | 394 | x = sscanf(ascii_value, "%x", &value); |
| 393 | if (x != 1) | 395 | if (x != 1) |
| 394 | return -EINVAL; | 396 | return -EINVAL; |
| 395 | pr_debug(KERN_ERR "%s, %d, 0x%x\n", ascii_value, x, value); | 397 | pr_debug("%s, %d, 0x%x\n", ascii_value, x, value); |
| 396 | } else | 398 | } else |
| 397 | return -EINVAL; | 399 | return -EINVAL; |
| 398 | 400 | ||
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index c77963938bca..8dc31e02ae12 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
| @@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode) | |||
| 338 | goto Close; | 338 | goto Close; |
| 339 | 339 | ||
| 340 | suspend_console(); | 340 | suspend_console(); |
| 341 | hibernation_freeze_swap(); | ||
| 342 | saved_mask = clear_gfp_allowed_mask(GFP_IOFS); | 341 | saved_mask = clear_gfp_allowed_mask(GFP_IOFS); |
| 343 | error = dpm_suspend_start(PMSG_FREEZE); | 342 | error = dpm_suspend_start(PMSG_FREEZE); |
| 344 | if (error) | 343 | if (error) |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 5e7edfb05e66..d3f795f01bbc 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
| @@ -1086,7 +1086,6 @@ void swsusp_free(void) | |||
| 1086 | buffer = NULL; | 1086 | buffer = NULL; |
| 1087 | alloc_normal = 0; | 1087 | alloc_normal = 0; |
| 1088 | alloc_highmem = 0; | 1088 | alloc_highmem = 0; |
| 1089 | hibernation_thaw_swap(); | ||
| 1090 | } | 1089 | } |
| 1091 | 1090 | ||
| 1092 | /* Helper functions used for the shrinking of memory. */ | 1091 | /* Helper functions used for the shrinking of memory. */ |
| @@ -1122,9 +1121,19 @@ static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask) | |||
| 1122 | return nr_alloc; | 1121 | return nr_alloc; |
| 1123 | } | 1122 | } |
| 1124 | 1123 | ||
| 1125 | static unsigned long preallocate_image_memory(unsigned long nr_pages) | 1124 | static unsigned long preallocate_image_memory(unsigned long nr_pages, |
| 1125 | unsigned long avail_normal) | ||
| 1126 | { | 1126 | { |
| 1127 | return preallocate_image_pages(nr_pages, GFP_IMAGE); | 1127 | unsigned long alloc; |
| 1128 | |||
| 1129 | if (avail_normal <= alloc_normal) | ||
| 1130 | return 0; | ||
| 1131 | |||
| 1132 | alloc = avail_normal - alloc_normal; | ||
| 1133 | if (nr_pages < alloc) | ||
| 1134 | alloc = nr_pages; | ||
| 1135 | |||
| 1136 | return preallocate_image_pages(alloc, GFP_IMAGE); | ||
| 1128 | } | 1137 | } |
| 1129 | 1138 | ||
| 1130 | #ifdef CONFIG_HIGHMEM | 1139 | #ifdef CONFIG_HIGHMEM |
| @@ -1170,15 +1179,22 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages, | |||
| 1170 | */ | 1179 | */ |
| 1171 | static void free_unnecessary_pages(void) | 1180 | static void free_unnecessary_pages(void) |
| 1172 | { | 1181 | { |
| 1173 | unsigned long save_highmem, to_free_normal, to_free_highmem; | 1182 | unsigned long save, to_free_normal, to_free_highmem; |
| 1174 | 1183 | ||
| 1175 | to_free_normal = alloc_normal - count_data_pages(); | 1184 | save = count_data_pages(); |
| 1176 | save_highmem = count_highmem_pages(); | 1185 | if (alloc_normal >= save) { |
| 1177 | if (alloc_highmem > save_highmem) { | 1186 | to_free_normal = alloc_normal - save; |
| 1178 | to_free_highmem = alloc_highmem - save_highmem; | 1187 | save = 0; |
| 1188 | } else { | ||
| 1189 | to_free_normal = 0; | ||
| 1190 | save -= alloc_normal; | ||
| 1191 | } | ||
| 1192 | save += count_highmem_pages(); | ||
| 1193 | if (alloc_highmem >= save) { | ||
| 1194 | to_free_highmem = alloc_highmem - save; | ||
| 1179 | } else { | 1195 | } else { |
| 1180 | to_free_highmem = 0; | 1196 | to_free_highmem = 0; |
| 1181 | to_free_normal -= save_highmem - alloc_highmem; | 1197 | to_free_normal -= save - alloc_highmem; |
| 1182 | } | 1198 | } |
| 1183 | 1199 | ||
| 1184 | memory_bm_position_reset(©_bm); | 1200 | memory_bm_position_reset(©_bm); |
| @@ -1259,7 +1275,7 @@ int hibernate_preallocate_memory(void) | |||
| 1259 | { | 1275 | { |
| 1260 | struct zone *zone; | 1276 | struct zone *zone; |
| 1261 | unsigned long saveable, size, max_size, count, highmem, pages = 0; | 1277 | unsigned long saveable, size, max_size, count, highmem, pages = 0; |
| 1262 | unsigned long alloc, save_highmem, pages_highmem; | 1278 | unsigned long alloc, save_highmem, pages_highmem, avail_normal; |
| 1263 | struct timeval start, stop; | 1279 | struct timeval start, stop; |
| 1264 | int error; | 1280 | int error; |
| 1265 | 1281 | ||
| @@ -1296,6 +1312,7 @@ int hibernate_preallocate_memory(void) | |||
| 1296 | else | 1312 | else |
| 1297 | count += zone_page_state(zone, NR_FREE_PAGES); | 1313 | count += zone_page_state(zone, NR_FREE_PAGES); |
| 1298 | } | 1314 | } |
| 1315 | avail_normal = count; | ||
| 1299 | count += highmem; | 1316 | count += highmem; |
| 1300 | count -= totalreserve_pages; | 1317 | count -= totalreserve_pages; |
| 1301 | 1318 | ||
| @@ -1310,12 +1327,21 @@ int hibernate_preallocate_memory(void) | |||
| 1310 | */ | 1327 | */ |
| 1311 | if (size >= saveable) { | 1328 | if (size >= saveable) { |
| 1312 | pages = preallocate_image_highmem(save_highmem); | 1329 | pages = preallocate_image_highmem(save_highmem); |
| 1313 | pages += preallocate_image_memory(saveable - pages); | 1330 | pages += preallocate_image_memory(saveable - pages, avail_normal); |
| 1314 | goto out; | 1331 | goto out; |
| 1315 | } | 1332 | } |
| 1316 | 1333 | ||
| 1317 | /* Estimate the minimum size of the image. */ | 1334 | /* Estimate the minimum size of the image. */ |
| 1318 | pages = minimum_image_size(saveable); | 1335 | pages = minimum_image_size(saveable); |
| 1336 | /* | ||
| 1337 | * To avoid excessive pressure on the normal zone, leave room in it to | ||
| 1338 | * accommodate an image of the minimum size (unless it's already too | ||
| 1339 | * small, in which case don't preallocate pages from it at all). | ||
| 1340 | */ | ||
| 1341 | if (avail_normal > pages) | ||
| 1342 | avail_normal -= pages; | ||
| 1343 | else | ||
| 1344 | avail_normal = 0; | ||
| 1319 | if (size < pages) | 1345 | if (size < pages) |
| 1320 | size = min_t(unsigned long, pages, max_size); | 1346 | size = min_t(unsigned long, pages, max_size); |
| 1321 | 1347 | ||
| @@ -1336,16 +1362,34 @@ int hibernate_preallocate_memory(void) | |||
| 1336 | */ | 1362 | */ |
| 1337 | pages_highmem = preallocate_image_highmem(highmem / 2); | 1363 | pages_highmem = preallocate_image_highmem(highmem / 2); |
| 1338 | alloc = (count - max_size) - pages_highmem; | 1364 | alloc = (count - max_size) - pages_highmem; |
| 1339 | pages = preallocate_image_memory(alloc); | 1365 | pages = preallocate_image_memory(alloc, avail_normal); |
| 1340 | if (pages < alloc) | 1366 | if (pages < alloc) { |
| 1341 | goto err_out; | 1367 | /* We have exhausted non-highmem pages, try highmem. */ |
| 1342 | size = max_size - size; | 1368 | alloc -= pages; |
| 1343 | alloc = size; | 1369 | pages += pages_highmem; |
| 1344 | size = preallocate_highmem_fraction(size, highmem, count); | 1370 | pages_highmem = preallocate_image_highmem(alloc); |
| 1345 | pages_highmem += size; | 1371 | if (pages_highmem < alloc) |
| 1346 | alloc -= size; | 1372 | goto err_out; |
| 1347 | pages += preallocate_image_memory(alloc); | 1373 | pages += pages_highmem; |
| 1348 | pages += pages_highmem; | 1374 | /* |
| 1375 | * size is the desired number of saveable pages to leave in | ||
| 1376 | * memory, so try to preallocate (all memory - size) pages. | ||
| 1377 | */ | ||
| 1378 | alloc = (count - pages) - size; | ||
| 1379 | pages += preallocate_image_highmem(alloc); | ||
| 1380 | } else { | ||
| 1381 | /* | ||
| 1382 | * There are approximately max_size saveable pages at this point | ||
| 1383 | * and we want to reduce this number down to size. | ||
| 1384 | */ | ||
| 1385 | alloc = max_size - size; | ||
| 1386 | size = preallocate_highmem_fraction(alloc, highmem, count); | ||
| 1387 | pages_highmem += size; | ||
| 1388 | alloc -= size; | ||
| 1389 | size = preallocate_image_memory(alloc, avail_normal); | ||
| 1390 | pages_highmem += preallocate_image_highmem(alloc - size); | ||
| 1391 | pages += pages_highmem + size; | ||
| 1392 | } | ||
| 1349 | 1393 | ||
| 1350 | /* | 1394 | /* |
| 1351 | * We only need as many page frames for the image as there are saveable | 1395 | * We only need as many page frames for the image as there are saveable |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 5d0059eed3e4..e6a5bdf61a37 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
| @@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap) | |||
| 136 | { | 136 | { |
| 137 | unsigned long offset; | 137 | unsigned long offset; |
| 138 | 138 | ||
| 139 | offset = swp_offset(get_swap_for_hibernation(swap)); | 139 | offset = swp_offset(get_swap_page_of_type(swap)); |
| 140 | if (offset) { | 140 | if (offset) { |
| 141 | if (swsusp_extents_insert(offset)) | 141 | if (swsusp_extents_insert(offset)) |
| 142 | swap_free_for_hibernation(swp_entry(swap, offset)); | 142 | swap_free(swp_entry(swap, offset)); |
| 143 | else | 143 | else |
| 144 | return swapdev_block(swap, offset); | 144 | return swapdev_block(swap, offset); |
| 145 | } | 145 | } |
| @@ -163,7 +163,7 @@ void free_all_swap_pages(int swap) | |||
| 163 | ext = container_of(node, struct swsusp_extent, node); | 163 | ext = container_of(node, struct swsusp_extent, node); |
| 164 | rb_erase(node, &swsusp_extents); | 164 | rb_erase(node, &swsusp_extents); |
| 165 | for (offset = ext->start; offset <= ext->end; offset++) | 165 | for (offset = ext->start; offset <= ext->end; offset++) |
| 166 | swap_free_for_hibernation(swp_entry(swap, offset)); | 166 | swap_free(swp_entry(swap, offset)); |
| 167 | 167 | ||
| 168 | kfree(ext); | 168 | kfree(ext); |
| 169 | } | 169 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index 09b574e7f4df..ed09d4f2a69c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -1294,6 +1294,10 @@ static void resched_task(struct task_struct *p) | |||
| 1294 | static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) | 1294 | static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) |
| 1295 | { | 1295 | { |
| 1296 | } | 1296 | } |
| 1297 | |||
| 1298 | static void sched_avg_update(struct rq *rq) | ||
| 1299 | { | ||
| 1300 | } | ||
| 1297 | #endif /* CONFIG_SMP */ | 1301 | #endif /* CONFIG_SMP */ |
| 1298 | 1302 | ||
| 1299 | #if BITS_PER_LONG == 32 | 1303 | #if BITS_PER_LONG == 32 |
| @@ -3182,6 +3186,8 @@ static void update_cpu_load(struct rq *this_rq) | |||
| 3182 | 3186 | ||
| 3183 | this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; | 3187 | this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; |
| 3184 | } | 3188 | } |
| 3189 | |||
| 3190 | sched_avg_update(this_rq); | ||
| 3185 | } | 3191 | } |
| 3186 | 3192 | ||
| 3187 | static void update_cpu_load_active(struct rq *this_rq) | 3193 | static void update_cpu_load_active(struct rq *this_rq) |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index ab661ebc4895..9b5b4f86b742 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -1313,7 +1313,7 @@ static struct sched_group * | |||
| 1313 | find_idlest_group(struct sched_domain *sd, struct task_struct *p, | 1313 | find_idlest_group(struct sched_domain *sd, struct task_struct *p, |
| 1314 | int this_cpu, int load_idx) | 1314 | int this_cpu, int load_idx) |
| 1315 | { | 1315 | { |
| 1316 | struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; | 1316 | struct sched_group *idlest = NULL, *group = sd->groups; |
| 1317 | unsigned long min_load = ULONG_MAX, this_load = 0; | 1317 | unsigned long min_load = ULONG_MAX, this_load = 0; |
| 1318 | int imbalance = 100 + (sd->imbalance_pct-100)/2; | 1318 | int imbalance = 100 + (sd->imbalance_pct-100)/2; |
| 1319 | 1319 | ||
| @@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, | |||
| 1348 | 1348 | ||
| 1349 | if (local_group) { | 1349 | if (local_group) { |
| 1350 | this_load = avg_load; | 1350 | this_load = avg_load; |
| 1351 | this = group; | ||
| 1352 | } else if (avg_load < min_load) { | 1351 | } else if (avg_load < min_load) { |
| 1353 | min_load = avg_load; | 1352 | min_load = avg_load; |
| 1354 | idlest = group; | 1353 | idlest = group; |
| @@ -2268,8 +2267,6 @@ unsigned long scale_rt_power(int cpu) | |||
| 2268 | struct rq *rq = cpu_rq(cpu); | 2267 | struct rq *rq = cpu_rq(cpu); |
| 2269 | u64 total, available; | 2268 | u64 total, available; |
| 2270 | 2269 | ||
| 2271 | sched_avg_update(rq); | ||
| 2272 | |||
| 2273 | total = sched_avg_period() + (rq->clock - rq->age_stamp); | 2270 | total = sched_avg_period() + (rq->clock - rq->age_stamp); |
| 2274 | available = total - rq->rt_avg; | 2271 | available = total - rq->rt_avg; |
| 2275 | 2272 | ||
diff --git a/kernel/sys.c b/kernel/sys.c index e9ad44489828..7f5a0cd296a9 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) | |||
| 931 | pgid = pid; | 931 | pgid = pid; |
| 932 | if (pgid < 0) | 932 | if (pgid < 0) |
| 933 | return -EINVAL; | 933 | return -EINVAL; |
| 934 | rcu_read_lock(); | ||
| 934 | 935 | ||
| 935 | /* From this point forward we keep holding onto the tasklist lock | 936 | /* From this point forward we keep holding onto the tasklist lock |
| 936 | * so that our parent does not change from under us. -DaveM | 937 | * so that our parent does not change from under us. -DaveM |
| @@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) | |||
| 984 | out: | 985 | out: |
| 985 | /* All paths lead to here, thus we are safe. -DaveM */ | 986 | /* All paths lead to here, thus we are safe. -DaveM */ |
| 986 | write_unlock_irq(&tasklist_lock); | 987 | write_unlock_irq(&tasklist_lock); |
| 988 | rcu_read_unlock(); | ||
| 987 | return err; | 989 | return err; |
| 988 | } | 990 | } |
| 989 | 991 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ca38e8e3e907..f88552c6d227 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -1713,10 +1713,7 @@ static __init int sysctl_init(void) | |||
| 1713 | { | 1713 | { |
| 1714 | sysctl_set_parent(NULL, root_table); | 1714 | sysctl_set_parent(NULL, root_table); |
| 1715 | #ifdef CONFIG_SYSCTL_SYSCALL_CHECK | 1715 | #ifdef CONFIG_SYSCTL_SYSCALL_CHECK |
| 1716 | { | 1716 | sysctl_check_table(current->nsproxy, root_table); |
| 1717 | int err; | ||
| 1718 | err = sysctl_check_table(current->nsproxy, root_table); | ||
| 1719 | } | ||
| 1720 | #endif | 1717 | #endif |
| 1721 | return 0; | 1718 | return 0; |
| 1722 | } | 1719 | } |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0d88ce9b9fb8..fa7ece649fe1 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -381,12 +381,19 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
| 381 | { | 381 | { |
| 382 | struct ftrace_profile *rec = v; | 382 | struct ftrace_profile *rec = v; |
| 383 | char str[KSYM_SYMBOL_LEN]; | 383 | char str[KSYM_SYMBOL_LEN]; |
| 384 | int ret = 0; | ||
| 384 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 385 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 385 | static DEFINE_MUTEX(mutex); | ||
| 386 | static struct trace_seq s; | 386 | static struct trace_seq s; |
| 387 | unsigned long long avg; | 387 | unsigned long long avg; |
| 388 | unsigned long long stddev; | 388 | unsigned long long stddev; |
| 389 | #endif | 389 | #endif |
| 390 | mutex_lock(&ftrace_profile_lock); | ||
| 391 | |||
| 392 | /* we raced with function_profile_reset() */ | ||
| 393 | if (unlikely(rec->counter == 0)) { | ||
| 394 | ret = -EBUSY; | ||
| 395 | goto out; | ||
| 396 | } | ||
| 390 | 397 | ||
| 391 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 398 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
| 392 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); | 399 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); |
| @@ -408,7 +415,6 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
| 408 | do_div(stddev, (rec->counter - 1) * 1000); | 415 | do_div(stddev, (rec->counter - 1) * 1000); |
| 409 | } | 416 | } |
| 410 | 417 | ||
| 411 | mutex_lock(&mutex); | ||
| 412 | trace_seq_init(&s); | 418 | trace_seq_init(&s); |
| 413 | trace_print_graph_duration(rec->time, &s); | 419 | trace_print_graph_duration(rec->time, &s); |
| 414 | trace_seq_puts(&s, " "); | 420 | trace_seq_puts(&s, " "); |
| @@ -416,11 +422,12 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
| 416 | trace_seq_puts(&s, " "); | 422 | trace_seq_puts(&s, " "); |
| 417 | trace_print_graph_duration(stddev, &s); | 423 | trace_print_graph_duration(stddev, &s); |
| 418 | trace_print_seq(m, &s); | 424 | trace_print_seq(m, &s); |
| 419 | mutex_unlock(&mutex); | ||
| 420 | #endif | 425 | #endif |
| 421 | seq_putc(m, '\n'); | 426 | seq_putc(m, '\n'); |
| 427 | out: | ||
| 428 | mutex_unlock(&ftrace_profile_lock); | ||
| 422 | 429 | ||
| 423 | return 0; | 430 | return ret; |
| 424 | } | 431 | } |
| 425 | 432 | ||
| 426 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) | 433 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) |
| @@ -1503,6 +1510,8 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
| 1503 | if (*pos > 0) | 1510 | if (*pos > 0) |
| 1504 | return t_hash_start(m, pos); | 1511 | return t_hash_start(m, pos); |
| 1505 | iter->flags |= FTRACE_ITER_PRINTALL; | 1512 | iter->flags |= FTRACE_ITER_PRINTALL; |
| 1513 | /* reset in case of seek/pread */ | ||
| 1514 | iter->flags &= ~FTRACE_ITER_HASH; | ||
| 1506 | return iter; | 1515 | return iter; |
| 1507 | } | 1516 | } |
| 1508 | 1517 | ||
| @@ -2409,7 +2418,7 @@ static const struct file_operations ftrace_filter_fops = { | |||
| 2409 | .open = ftrace_filter_open, | 2418 | .open = ftrace_filter_open, |
| 2410 | .read = seq_read, | 2419 | .read = seq_read, |
| 2411 | .write = ftrace_filter_write, | 2420 | .write = ftrace_filter_write, |
| 2412 | .llseek = ftrace_regex_lseek, | 2421 | .llseek = no_llseek, |
| 2413 | .release = ftrace_filter_release, | 2422 | .release = ftrace_filter_release, |
| 2414 | }; | 2423 | }; |
| 2415 | 2424 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 19cccc3c3028..492197e2f86c 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -2985,13 +2985,11 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 2985 | 2985 | ||
| 2986 | static void rb_advance_iter(struct ring_buffer_iter *iter) | 2986 | static void rb_advance_iter(struct ring_buffer_iter *iter) |
| 2987 | { | 2987 | { |
| 2988 | struct ring_buffer *buffer; | ||
| 2989 | struct ring_buffer_per_cpu *cpu_buffer; | 2988 | struct ring_buffer_per_cpu *cpu_buffer; |
| 2990 | struct ring_buffer_event *event; | 2989 | struct ring_buffer_event *event; |
| 2991 | unsigned length; | 2990 | unsigned length; |
| 2992 | 2991 | ||
| 2993 | cpu_buffer = iter->cpu_buffer; | 2992 | cpu_buffer = iter->cpu_buffer; |
| 2994 | buffer = cpu_buffer->buffer; | ||
| 2995 | 2993 | ||
| 2996 | /* | 2994 | /* |
| 2997 | * Check if we are at the end of the buffer. | 2995 | * Check if we are at the end of the buffer. |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 000e6e85b445..31cc4cb0dbf2 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
| @@ -91,6 +91,8 @@ int perf_trace_init(struct perf_event *p_event) | |||
| 91 | tp_event->class && tp_event->class->reg && | 91 | tp_event->class && tp_event->class->reg && |
| 92 | try_module_get(tp_event->mod)) { | 92 | try_module_get(tp_event->mod)) { |
| 93 | ret = perf_trace_event_init(tp_event, p_event); | 93 | ret = perf_trace_event_init(tp_event, p_event); |
| 94 | if (ret) | ||
| 95 | module_put(tp_event->mod); | ||
| 94 | break; | 96 | break; |
| 95 | } | 97 | } |
| 96 | } | 98 | } |
| @@ -146,6 +148,7 @@ void perf_trace_destroy(struct perf_event *p_event) | |||
| 146 | } | 148 | } |
| 147 | } | 149 | } |
| 148 | out: | 150 | out: |
| 151 | module_put(tp_event->mod); | ||
| 149 | mutex_unlock(&event_mutex); | 152 | mutex_unlock(&event_mutex); |
| 150 | } | 153 | } |
| 151 | 154 | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 8b27c9849b42..544301d29dee 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -514,8 +514,8 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); | |||
| 514 | static int kretprobe_dispatcher(struct kretprobe_instance *ri, | 514 | static int kretprobe_dispatcher(struct kretprobe_instance *ri, |
| 515 | struct pt_regs *regs); | 515 | struct pt_regs *regs); |
| 516 | 516 | ||
| 517 | /* Check the name is good for event/group */ | 517 | /* Check the name is good for event/group/fields */ |
| 518 | static int check_event_name(const char *name) | 518 | static int is_good_name(const char *name) |
| 519 | { | 519 | { |
| 520 | if (!isalpha(*name) && *name != '_') | 520 | if (!isalpha(*name) && *name != '_') |
| 521 | return 0; | 521 | return 0; |
| @@ -557,7 +557,7 @@ static struct trace_probe *alloc_trace_probe(const char *group, | |||
| 557 | else | 557 | else |
| 558 | tp->rp.kp.pre_handler = kprobe_dispatcher; | 558 | tp->rp.kp.pre_handler = kprobe_dispatcher; |
| 559 | 559 | ||
| 560 | if (!event || !check_event_name(event)) { | 560 | if (!event || !is_good_name(event)) { |
| 561 | ret = -EINVAL; | 561 | ret = -EINVAL; |
| 562 | goto error; | 562 | goto error; |
| 563 | } | 563 | } |
| @@ -567,7 +567,7 @@ static struct trace_probe *alloc_trace_probe(const char *group, | |||
| 567 | if (!tp->call.name) | 567 | if (!tp->call.name) |
| 568 | goto error; | 568 | goto error; |
| 569 | 569 | ||
| 570 | if (!group || !check_event_name(group)) { | 570 | if (!group || !is_good_name(group)) { |
| 571 | ret = -EINVAL; | 571 | ret = -EINVAL; |
| 572 | goto error; | 572 | goto error; |
| 573 | } | 573 | } |
| @@ -883,7 +883,7 @@ static int create_trace_probe(int argc, char **argv) | |||
| 883 | int i, ret = 0; | 883 | int i, ret = 0; |
| 884 | int is_return = 0, is_delete = 0; | 884 | int is_return = 0, is_delete = 0; |
| 885 | char *symbol = NULL, *event = NULL, *group = NULL; | 885 | char *symbol = NULL, *event = NULL, *group = NULL; |
| 886 | char *arg, *tmp; | 886 | char *arg; |
| 887 | unsigned long offset = 0; | 887 | unsigned long offset = 0; |
| 888 | void *addr = NULL; | 888 | void *addr = NULL; |
| 889 | char buf[MAX_EVENT_NAME_LEN]; | 889 | char buf[MAX_EVENT_NAME_LEN]; |
| @@ -992,26 +992,36 @@ static int create_trace_probe(int argc, char **argv) | |||
| 992 | /* parse arguments */ | 992 | /* parse arguments */ |
| 993 | ret = 0; | 993 | ret = 0; |
| 994 | for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { | 994 | for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { |
| 995 | /* Increment count for freeing args in error case */ | ||
| 996 | tp->nr_args++; | ||
| 997 | |||
| 995 | /* Parse argument name */ | 998 | /* Parse argument name */ |
| 996 | arg = strchr(argv[i], '='); | 999 | arg = strchr(argv[i], '='); |
| 997 | if (arg) | 1000 | if (arg) { |
| 998 | *arg++ = '\0'; | 1001 | *arg++ = '\0'; |
| 999 | else | 1002 | tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); |
| 1003 | } else { | ||
| 1000 | arg = argv[i]; | 1004 | arg = argv[i]; |
| 1005 | /* If argument name is omitted, set "argN" */ | ||
| 1006 | snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); | ||
| 1007 | tp->args[i].name = kstrdup(buf, GFP_KERNEL); | ||
| 1008 | } | ||
| 1001 | 1009 | ||
| 1002 | tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); | ||
| 1003 | if (!tp->args[i].name) { | 1010 | if (!tp->args[i].name) { |
| 1004 | pr_info("Failed to allocate argument%d name '%s'.\n", | 1011 | pr_info("Failed to allocate argument[%d] name.\n", i); |
| 1005 | i, argv[i]); | ||
| 1006 | ret = -ENOMEM; | 1012 | ret = -ENOMEM; |
| 1007 | goto error; | 1013 | goto error; |
| 1008 | } | 1014 | } |
| 1009 | tmp = strchr(tp->args[i].name, ':'); | 1015 | |
| 1010 | if (tmp) | 1016 | if (!is_good_name(tp->args[i].name)) { |
| 1011 | *tmp = '_'; /* convert : to _ */ | 1017 | pr_info("Invalid argument[%d] name: %s\n", |
| 1018 | i, tp->args[i].name); | ||
| 1019 | ret = -EINVAL; | ||
| 1020 | goto error; | ||
| 1021 | } | ||
| 1012 | 1022 | ||
| 1013 | if (conflict_field_name(tp->args[i].name, tp->args, i)) { | 1023 | if (conflict_field_name(tp->args[i].name, tp->args, i)) { |
| 1014 | pr_info("Argument%d name '%s' conflicts with " | 1024 | pr_info("Argument[%d] name '%s' conflicts with " |
| 1015 | "another field.\n", i, argv[i]); | 1025 | "another field.\n", i, argv[i]); |
| 1016 | ret = -EINVAL; | 1026 | ret = -EINVAL; |
| 1017 | goto error; | 1027 | goto error; |
| @@ -1020,12 +1030,9 @@ static int create_trace_probe(int argc, char **argv) | |||
| 1020 | /* Parse fetch argument */ | 1030 | /* Parse fetch argument */ |
| 1021 | ret = parse_probe_arg(arg, tp, &tp->args[i], is_return); | 1031 | ret = parse_probe_arg(arg, tp, &tp->args[i], is_return); |
| 1022 | if (ret) { | 1032 | if (ret) { |
| 1023 | pr_info("Parse error at argument%d. (%d)\n", i, ret); | 1033 | pr_info("Parse error at argument[%d]. (%d)\n", i, ret); |
| 1024 | kfree(tp->args[i].name); | ||
| 1025 | goto error; | 1034 | goto error; |
| 1026 | } | 1035 | } |
| 1027 | |||
| 1028 | tp->nr_args++; | ||
| 1029 | } | 1036 | } |
| 1030 | 1037 | ||
| 1031 | ret = register_trace_probe(tp); | 1038 | ret = register_trace_probe(tp); |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 0d53c8e853b1..7f9c3c52ecc1 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
| @@ -122,7 +122,7 @@ static void __touch_watchdog(void) | |||
| 122 | 122 | ||
| 123 | void touch_softlockup_watchdog(void) | 123 | void touch_softlockup_watchdog(void) |
| 124 | { | 124 | { |
| 125 | __get_cpu_var(watchdog_touch_ts) = 0; | 125 | __raw_get_cpu_var(watchdog_touch_ts) = 0; |
| 126 | } | 126 | } |
| 127 | EXPORT_SYMBOL(touch_softlockup_watchdog); | 127 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
| 128 | 128 | ||
| @@ -142,7 +142,14 @@ void touch_all_softlockup_watchdogs(void) | |||
| 142 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 142 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
| 143 | void touch_nmi_watchdog(void) | 143 | void touch_nmi_watchdog(void) |
| 144 | { | 144 | { |
| 145 | __get_cpu_var(watchdog_nmi_touch) = true; | 145 | if (watchdog_enabled) { |
| 146 | unsigned cpu; | ||
| 147 | |||
| 148 | for_each_present_cpu(cpu) { | ||
| 149 | if (per_cpu(watchdog_nmi_touch, cpu) != true) | ||
| 150 | per_cpu(watchdog_nmi_touch, cpu) = true; | ||
| 151 | } | ||
| 152 | } | ||
| 146 | touch_softlockup_watchdog(); | 153 | touch_softlockup_watchdog(); |
| 147 | } | 154 | } |
| 148 | EXPORT_SYMBOL(touch_nmi_watchdog); | 155 | EXPORT_SYMBOL(touch_nmi_watchdog); |
| @@ -433,6 +440,9 @@ static int watchdog_enable(int cpu) | |||
| 433 | wake_up_process(p); | 440 | wake_up_process(p); |
| 434 | } | 441 | } |
| 435 | 442 | ||
| 443 | /* if any cpu succeeds, watchdog is considered enabled for the system */ | ||
| 444 | watchdog_enabled = 1; | ||
| 445 | |||
| 436 | return 0; | 446 | return 0; |
| 437 | } | 447 | } |
| 438 | 448 | ||
| @@ -455,9 +465,6 @@ static void watchdog_disable(int cpu) | |||
| 455 | per_cpu(softlockup_watchdog, cpu) = NULL; | 465 | per_cpu(softlockup_watchdog, cpu) = NULL; |
| 456 | kthread_stop(p); | 466 | kthread_stop(p); |
| 457 | } | 467 | } |
| 458 | |||
| 459 | /* if any cpu succeeds, watchdog is considered enabled for the system */ | ||
| 460 | watchdog_enabled = 1; | ||
| 461 | } | 468 | } |
| 462 | 469 | ||
| 463 | static void watchdog_enable_all_cpus(void) | 470 | static void watchdog_enable_all_cpus(void) |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8bd600c020e5..727f24e563ae 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -90,7 +90,8 @@ enum { | |||
| 90 | /* | 90 | /* |
| 91 | * Structure fields follow one of the following exclusion rules. | 91 | * Structure fields follow one of the following exclusion rules. |
| 92 | * | 92 | * |
| 93 | * I: Set during initialization and read-only afterwards. | 93 | * I: Modifiable by initialization/destruction paths and read-only for |
| 94 | * everyone else. | ||
| 94 | * | 95 | * |
| 95 | * P: Preemption protected. Disabling preemption is enough and should | 96 | * P: Preemption protected. Disabling preemption is enough and should |
| 96 | * only be modified and accessed from the local cpu. | 97 | * only be modified and accessed from the local cpu. |
| @@ -198,7 +199,7 @@ typedef cpumask_var_t mayday_mask_t; | |||
| 198 | cpumask_test_and_set_cpu((cpu), (mask)) | 199 | cpumask_test_and_set_cpu((cpu), (mask)) |
| 199 | #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) | 200 | #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) |
| 200 | #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) | 201 | #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) |
| 201 | #define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp)) | 202 | #define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp)) |
| 202 | #define free_mayday_mask(mask) free_cpumask_var((mask)) | 203 | #define free_mayday_mask(mask) free_cpumask_var((mask)) |
| 203 | #else | 204 | #else |
| 204 | typedef unsigned long mayday_mask_t; | 205 | typedef unsigned long mayday_mask_t; |
| @@ -943,10 +944,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
| 943 | struct global_cwq *gcwq; | 944 | struct global_cwq *gcwq; |
| 944 | struct cpu_workqueue_struct *cwq; | 945 | struct cpu_workqueue_struct *cwq; |
| 945 | struct list_head *worklist; | 946 | struct list_head *worklist; |
| 947 | unsigned int work_flags; | ||
| 946 | unsigned long flags; | 948 | unsigned long flags; |
| 947 | 949 | ||
| 948 | debug_work_activate(work); | 950 | debug_work_activate(work); |
| 949 | 951 | ||
| 952 | if (WARN_ON_ONCE(wq->flags & WQ_DYING)) | ||
| 953 | return; | ||
| 954 | |||
| 950 | /* determine gcwq to use */ | 955 | /* determine gcwq to use */ |
| 951 | if (!(wq->flags & WQ_UNBOUND)) { | 956 | if (!(wq->flags & WQ_UNBOUND)) { |
| 952 | struct global_cwq *last_gcwq; | 957 | struct global_cwq *last_gcwq; |
| @@ -989,14 +994,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
| 989 | BUG_ON(!list_empty(&work->entry)); | 994 | BUG_ON(!list_empty(&work->entry)); |
| 990 | 995 | ||
| 991 | cwq->nr_in_flight[cwq->work_color]++; | 996 | cwq->nr_in_flight[cwq->work_color]++; |
| 997 | work_flags = work_color_to_flags(cwq->work_color); | ||
| 992 | 998 | ||
| 993 | if (likely(cwq->nr_active < cwq->max_active)) { | 999 | if (likely(cwq->nr_active < cwq->max_active)) { |
| 994 | cwq->nr_active++; | 1000 | cwq->nr_active++; |
| 995 | worklist = gcwq_determine_ins_pos(gcwq, cwq); | 1001 | worklist = gcwq_determine_ins_pos(gcwq, cwq); |
| 996 | } else | 1002 | } else { |
| 1003 | work_flags |= WORK_STRUCT_DELAYED; | ||
| 997 | worklist = &cwq->delayed_works; | 1004 | worklist = &cwq->delayed_works; |
| 1005 | } | ||
| 998 | 1006 | ||
| 999 | insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); | 1007 | insert_work(cwq, work, worklist, work_flags); |
| 1000 | 1008 | ||
| 1001 | spin_unlock_irqrestore(&gcwq->lock, flags); | 1009 | spin_unlock_irqrestore(&gcwq->lock, flags); |
| 1002 | } | 1010 | } |
| @@ -1215,6 +1223,7 @@ static void worker_leave_idle(struct worker *worker) | |||
| 1215 | * bound), %false if offline. | 1223 | * bound), %false if offline. |
| 1216 | */ | 1224 | */ |
| 1217 | static bool worker_maybe_bind_and_lock(struct worker *worker) | 1225 | static bool worker_maybe_bind_and_lock(struct worker *worker) |
| 1226 | __acquires(&gcwq->lock) | ||
| 1218 | { | 1227 | { |
| 1219 | struct global_cwq *gcwq = worker->gcwq; | 1228 | struct global_cwq *gcwq = worker->gcwq; |
| 1220 | struct task_struct *task = worker->task; | 1229 | struct task_struct *task = worker->task; |
| @@ -1488,6 +1497,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq) | |||
| 1488 | * otherwise. | 1497 | * otherwise. |
| 1489 | */ | 1498 | */ |
| 1490 | static bool maybe_create_worker(struct global_cwq *gcwq) | 1499 | static bool maybe_create_worker(struct global_cwq *gcwq) |
| 1500 | __releases(&gcwq->lock) | ||
| 1501 | __acquires(&gcwq->lock) | ||
| 1491 | { | 1502 | { |
| 1492 | if (!need_to_create_worker(gcwq)) | 1503 | if (!need_to_create_worker(gcwq)) |
| 1493 | return false; | 1504 | return false; |
| @@ -1662,6 +1673,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
| 1662 | struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); | 1673 | struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); |
| 1663 | 1674 | ||
| 1664 | move_linked_works(work, pos, NULL); | 1675 | move_linked_works(work, pos, NULL); |
| 1676 | __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); | ||
| 1665 | cwq->nr_active++; | 1677 | cwq->nr_active++; |
| 1666 | } | 1678 | } |
| 1667 | 1679 | ||
| @@ -1669,6 +1681,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
| 1669 | * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight | 1681 | * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight |
| 1670 | * @cwq: cwq of interest | 1682 | * @cwq: cwq of interest |
| 1671 | * @color: color of work which left the queue | 1683 | * @color: color of work which left the queue |
| 1684 | * @delayed: for a delayed work | ||
| 1672 | * | 1685 | * |
| 1673 | * A work either has completed or is removed from pending queue, | 1686 | * A work either has completed or is removed from pending queue, |
| 1674 | * decrement nr_in_flight of its cwq and handle workqueue flushing. | 1687 | * decrement nr_in_flight of its cwq and handle workqueue flushing. |
| @@ -1676,19 +1689,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
| 1676 | * CONTEXT: | 1689 | * CONTEXT: |
| 1677 | * spin_lock_irq(gcwq->lock). | 1690 | * spin_lock_irq(gcwq->lock). |
| 1678 | */ | 1691 | */ |
| 1679 | static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) | 1692 | static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color, |
| 1693 | bool delayed) | ||
| 1680 | { | 1694 | { |
| 1681 | /* ignore uncolored works */ | 1695 | /* ignore uncolored works */ |
| 1682 | if (color == WORK_NO_COLOR) | 1696 | if (color == WORK_NO_COLOR) |
| 1683 | return; | 1697 | return; |
| 1684 | 1698 | ||
| 1685 | cwq->nr_in_flight[color]--; | 1699 | cwq->nr_in_flight[color]--; |
| 1686 | cwq->nr_active--; | ||
| 1687 | 1700 | ||
| 1688 | if (!list_empty(&cwq->delayed_works)) { | 1701 | if (!delayed) { |
| 1689 | /* one down, submit a delayed one */ | 1702 | cwq->nr_active--; |
| 1690 | if (cwq->nr_active < cwq->max_active) | 1703 | if (!list_empty(&cwq->delayed_works)) { |
| 1691 | cwq_activate_first_delayed(cwq); | 1704 | /* one down, submit a delayed one */ |
| 1705 | if (cwq->nr_active < cwq->max_active) | ||
| 1706 | cwq_activate_first_delayed(cwq); | ||
| 1707 | } | ||
| 1692 | } | 1708 | } |
| 1693 | 1709 | ||
| 1694 | /* is flush in progress and are we at the flushing tip? */ | 1710 | /* is flush in progress and are we at the flushing tip? */ |
| @@ -1725,6 +1741,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) | |||
| 1725 | * spin_lock_irq(gcwq->lock) which is released and regrabbed. | 1741 | * spin_lock_irq(gcwq->lock) which is released and regrabbed. |
| 1726 | */ | 1742 | */ |
| 1727 | static void process_one_work(struct worker *worker, struct work_struct *work) | 1743 | static void process_one_work(struct worker *worker, struct work_struct *work) |
| 1744 | __releases(&gcwq->lock) | ||
| 1745 | __acquires(&gcwq->lock) | ||
| 1728 | { | 1746 | { |
| 1729 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); | 1747 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); |
| 1730 | struct global_cwq *gcwq = cwq->gcwq; | 1748 | struct global_cwq *gcwq = cwq->gcwq; |
| @@ -1823,7 +1841,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) | |||
| 1823 | hlist_del_init(&worker->hentry); | 1841 | hlist_del_init(&worker->hentry); |
| 1824 | worker->current_work = NULL; | 1842 | worker->current_work = NULL; |
| 1825 | worker->current_cwq = NULL; | 1843 | worker->current_cwq = NULL; |
| 1826 | cwq_dec_nr_in_flight(cwq, work_color); | 1844 | cwq_dec_nr_in_flight(cwq, work_color, false); |
| 1827 | } | 1845 | } |
| 1828 | 1846 | ||
| 1829 | /** | 1847 | /** |
| @@ -2388,7 +2406,8 @@ static int try_to_grab_pending(struct work_struct *work) | |||
| 2388 | debug_work_deactivate(work); | 2406 | debug_work_deactivate(work); |
| 2389 | list_del_init(&work->entry); | 2407 | list_del_init(&work->entry); |
| 2390 | cwq_dec_nr_in_flight(get_work_cwq(work), | 2408 | cwq_dec_nr_in_flight(get_work_cwq(work), |
| 2391 | get_work_color(work)); | 2409 | get_work_color(work), |
| 2410 | *work_data_bits(work) & WORK_STRUCT_DELAYED); | ||
| 2392 | ret = 1; | 2411 | ret = 1; |
| 2393 | } | 2412 | } |
| 2394 | } | 2413 | } |
| @@ -2791,7 +2810,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, | |||
| 2791 | if (IS_ERR(rescuer->task)) | 2810 | if (IS_ERR(rescuer->task)) |
| 2792 | goto err; | 2811 | goto err; |
| 2793 | 2812 | ||
| 2794 | wq->rescuer = rescuer; | ||
| 2795 | rescuer->task->flags |= PF_THREAD_BOUND; | 2813 | rescuer->task->flags |= PF_THREAD_BOUND; |
| 2796 | wake_up_process(rescuer->task); | 2814 | wake_up_process(rescuer->task); |
| 2797 | } | 2815 | } |
| @@ -2833,6 +2851,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
| 2833 | { | 2851 | { |
| 2834 | unsigned int cpu; | 2852 | unsigned int cpu; |
| 2835 | 2853 | ||
| 2854 | wq->flags |= WQ_DYING; | ||
| 2836 | flush_workqueue(wq); | 2855 | flush_workqueue(wq); |
| 2837 | 2856 | ||
| 2838 | /* | 2857 | /* |
| @@ -2857,6 +2876,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
| 2857 | if (wq->flags & WQ_RESCUER) { | 2876 | if (wq->flags & WQ_RESCUER) { |
| 2858 | kthread_stop(wq->rescuer->task); | 2877 | kthread_stop(wq->rescuer->task); |
| 2859 | free_mayday_mask(wq->mayday_mask); | 2878 | free_mayday_mask(wq->mayday_mask); |
| 2879 | kfree(wq->rescuer); | ||
| 2860 | } | 2880 | } |
| 2861 | 2881 | ||
| 2862 | free_cwqs(wq); | 2882 | free_cwqs(wq); |
| @@ -3239,6 +3259,8 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
| 3239 | * multiple times. To be used by cpu_callback. | 3259 | * multiple times. To be used by cpu_callback. |
| 3240 | */ | 3260 | */ |
| 3241 | static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) | 3261 | static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) |
| 3262 | __releases(&gcwq->lock) | ||
| 3263 | __acquires(&gcwq->lock) | ||
| 3242 | { | 3264 | { |
| 3243 | if (!(gcwq->trustee_state == state || | 3265 | if (!(gcwq->trustee_state == state || |
| 3244 | gcwq->trustee_state == TRUSTEE_DONE)) { | 3266 | gcwq->trustee_state == TRUSTEE_DONE)) { |
| @@ -3545,8 +3567,7 @@ static int __init init_workqueues(void) | |||
| 3545 | spin_lock_init(&gcwq->lock); | 3567 | spin_lock_init(&gcwq->lock); |
| 3546 | INIT_LIST_HEAD(&gcwq->worklist); | 3568 | INIT_LIST_HEAD(&gcwq->worklist); |
| 3547 | gcwq->cpu = cpu; | 3569 | gcwq->cpu = cpu; |
| 3548 | if (cpu == WORK_CPU_UNBOUND) | 3570 | gcwq->flags |= GCWQ_DISASSOCIATED; |
| 3549 | gcwq->flags |= GCWQ_DISASSOCIATED; | ||
| 3550 | 3571 | ||
| 3551 | INIT_LIST_HEAD(&gcwq->idle_list); | 3572 | INIT_LIST_HEAD(&gcwq->idle_list); |
| 3552 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) | 3573 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) |
| @@ -3570,6 +3591,8 @@ static int __init init_workqueues(void) | |||
| 3570 | struct global_cwq *gcwq = get_gcwq(cpu); | 3591 | struct global_cwq *gcwq = get_gcwq(cpu); |
| 3571 | struct worker *worker; | 3592 | struct worker *worker; |
| 3572 | 3593 | ||
| 3594 | if (cpu != WORK_CPU_UNBOUND) | ||
| 3595 | gcwq->flags &= ~GCWQ_DISASSOCIATED; | ||
| 3573 | worker = create_worker(gcwq, true); | 3596 | worker = create_worker(gcwq, true); |
| 3574 | BUG_ON(!worker); | 3597 | BUG_ON(!worker); |
| 3575 | spin_lock_irq(&gcwq->lock); | 3598 | spin_lock_irq(&gcwq->lock); |
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore new file mode 100644 index 000000000000..162becacf97c --- /dev/null +++ b/lib/raid6/.gitignore | |||
| @@ -0,0 +1,4 @@ | |||
| 1 | mktables | ||
| 2 | altivec*.c | ||
| 3 | int*.c | ||
| 4 | tables.c | ||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index a5ec42868f99..4ceb05d772ae 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
| @@ -248,8 +248,18 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, | |||
| 248 | left -= sg_size; | 248 | left -= sg_size; |
| 249 | 249 | ||
| 250 | sg = alloc_fn(alloc_size, gfp_mask); | 250 | sg = alloc_fn(alloc_size, gfp_mask); |
| 251 | if (unlikely(!sg)) | 251 | if (unlikely(!sg)) { |
| 252 | return -ENOMEM; | 252 | /* |
| 253 | * Adjust entry count to reflect that the last | ||
| 254 | * entry of the previous table won't be used for | ||
| 255 | * linkage. Without this, sg_kfree() may get | ||
| 256 | * confused. | ||
| 257 | */ | ||
| 258 | if (prv) | ||
| 259 | table->nents = ++table->orig_nents; | ||
| 260 | |||
| 261 | return -ENOMEM; | ||
| 262 | } | ||
| 253 | 263 | ||
| 254 | sg_init_table(sg, alloc_size); | 264 | sg_init_table(sg, alloc_size); |
| 255 | table->nents = table->orig_nents += sg_size; | 265 | table->nents = table->orig_nents += sg_size; |
diff --git a/mm/Kconfig b/mm/Kconfig index f4e516e9c37c..f0fb9124e410 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
| @@ -189,7 +189,7 @@ config COMPACTION | |||
| 189 | config MIGRATION | 189 | config MIGRATION |
| 190 | bool "Page migration" | 190 | bool "Page migration" |
| 191 | def_bool y | 191 | def_bool y |
| 192 | depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE | 192 | depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION |
| 193 | help | 193 | help |
| 194 | Allows the migration of the physical location of pages of processes | 194 | Allows the migration of the physical location of pages of processes |
| 195 | while the virtual addresses are not changed. This is useful in | 195 | while the virtual addresses are not changed. This is useful in |
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index eaa4a5bbe063..c2bf86f470ed 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
| @@ -445,8 +445,8 @@ static int bdi_forker_thread(void *ptr) | |||
| 445 | switch (action) { | 445 | switch (action) { |
| 446 | case FORK_THREAD: | 446 | case FORK_THREAD: |
| 447 | __set_current_state(TASK_RUNNING); | 447 | __set_current_state(TASK_RUNNING); |
| 448 | task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s", | 448 | task = kthread_create(bdi_writeback_thread, &bdi->wb, |
| 449 | dev_name(bdi->dev)); | 449 | "flush-%s", dev_name(bdi->dev)); |
| 450 | if (IS_ERR(task)) { | 450 | if (IS_ERR(task)) { |
| 451 | /* | 451 | /* |
| 452 | * If thread creation fails, force writeout of | 452 | * If thread creation fails, force writeout of |
| @@ -457,10 +457,13 @@ static int bdi_forker_thread(void *ptr) | |||
| 457 | /* | 457 | /* |
| 458 | * The spinlock makes sure we do not lose | 458 | * The spinlock makes sure we do not lose |
| 459 | * wake-ups when racing with 'bdi_queue_work()'. | 459 | * wake-ups when racing with 'bdi_queue_work()'. |
| 460 | * And as soon as the bdi thread is visible, we | ||
| 461 | * can start it. | ||
| 460 | */ | 462 | */ |
| 461 | spin_lock_bh(&bdi->wb_lock); | 463 | spin_lock_bh(&bdi->wb_lock); |
| 462 | bdi->wb.task = task; | 464 | bdi->wb.task = task; |
| 463 | spin_unlock_bh(&bdi->wb_lock); | 465 | spin_unlock_bh(&bdi->wb_lock); |
| 466 | wake_up_process(task); | ||
| 464 | } | 467 | } |
| 465 | break; | 468 | break; |
| 466 | 469 | ||
diff --git a/mm/bounce.c b/mm/bounce.c index 13b6dad1eed2..1481de68184b 100644 --- a/mm/bounce.c +++ b/mm/bounce.c | |||
| @@ -116,8 +116,8 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from) | |||
| 116 | */ | 116 | */ |
| 117 | vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; | 117 | vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; |
| 118 | 118 | ||
| 119 | flush_dcache_page(tovec->bv_page); | ||
| 120 | bounce_copy_vec(tovec, vfrom); | 119 | bounce_copy_vec(tovec, vfrom); |
| 120 | flush_dcache_page(tovec->bv_page); | ||
| 121 | } | 121 | } |
| 122 | } | 122 | } |
| 123 | 123 | ||
diff --git a/mm/compaction.c b/mm/compaction.c index 94cce51b0b35..4d709ee59013 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
| @@ -214,15 +214,16 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc) | |||
| 214 | /* Similar to reclaim, but different enough that they don't share logic */ | 214 | /* Similar to reclaim, but different enough that they don't share logic */ |
| 215 | static bool too_many_isolated(struct zone *zone) | 215 | static bool too_many_isolated(struct zone *zone) |
| 216 | { | 216 | { |
| 217 | 217 | unsigned long active, inactive, isolated; | |
| 218 | unsigned long inactive, isolated; | ||
| 219 | 218 | ||
| 220 | inactive = zone_page_state(zone, NR_INACTIVE_FILE) + | 219 | inactive = zone_page_state(zone, NR_INACTIVE_FILE) + |
| 221 | zone_page_state(zone, NR_INACTIVE_ANON); | 220 | zone_page_state(zone, NR_INACTIVE_ANON); |
| 221 | active = zone_page_state(zone, NR_ACTIVE_FILE) + | ||
| 222 | zone_page_state(zone, NR_ACTIVE_ANON); | ||
| 222 | isolated = zone_page_state(zone, NR_ISOLATED_FILE) + | 223 | isolated = zone_page_state(zone, NR_ISOLATED_FILE) + |
| 223 | zone_page_state(zone, NR_ISOLATED_ANON); | 224 | zone_page_state(zone, NR_ISOLATED_ANON); |
| 224 | 225 | ||
| 225 | return isolated > inactive; | 226 | return isolated > (inactive + active) / 2; |
| 226 | } | 227 | } |
| 227 | 228 | ||
| 228 | /* | 229 | /* |
| @@ -1504,8 +1504,6 @@ struct page *ksm_does_need_to_copy(struct page *page, | |||
| 1504 | { | 1504 | { |
| 1505 | struct page *new_page; | 1505 | struct page *new_page; |
| 1506 | 1506 | ||
| 1507 | unlock_page(page); /* any racers will COW it, not modify it */ | ||
| 1508 | |||
| 1509 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); | 1507 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); |
| 1510 | if (new_page) { | 1508 | if (new_page) { |
| 1511 | copy_user_highpage(new_page, page, address, vma); | 1509 | copy_user_highpage(new_page, page, address, vma); |
| @@ -1521,7 +1519,6 @@ struct page *ksm_does_need_to_copy(struct page *page, | |||
| 1521 | add_page_to_unevictable_list(new_page); | 1519 | add_page_to_unevictable_list(new_page); |
| 1522 | } | 1520 | } |
| 1523 | 1521 | ||
| 1524 | page_cache_release(page); | ||
| 1525 | return new_page; | 1522 | return new_page; |
| 1526 | } | 1523 | } |
| 1527 | 1524 | ||
diff --git a/mm/memory.c b/mm/memory.c index 6b2ab1051851..71b161b73bb5 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -2623,7 +2623,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2623 | unsigned int flags, pte_t orig_pte) | 2623 | unsigned int flags, pte_t orig_pte) |
| 2624 | { | 2624 | { |
| 2625 | spinlock_t *ptl; | 2625 | spinlock_t *ptl; |
| 2626 | struct page *page; | 2626 | struct page *page, *swapcache = NULL; |
| 2627 | swp_entry_t entry; | 2627 | swp_entry_t entry; |
| 2628 | pte_t pte; | 2628 | pte_t pte; |
| 2629 | struct mem_cgroup *ptr = NULL; | 2629 | struct mem_cgroup *ptr = NULL; |
| @@ -2679,10 +2679,23 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2679 | lock_page(page); | 2679 | lock_page(page); |
| 2680 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); | 2680 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); |
| 2681 | 2681 | ||
| 2682 | page = ksm_might_need_to_copy(page, vma, address); | 2682 | /* |
| 2683 | if (!page) { | 2683 | * Make sure try_to_free_swap didn't release the swapcache |
| 2684 | ret = VM_FAULT_OOM; | 2684 | * from under us. The page pin isn't enough to prevent that. |
| 2685 | goto out; | 2685 | */ |
| 2686 | if (unlikely(!PageSwapCache(page))) | ||
| 2687 | goto out_page; | ||
| 2688 | |||
| 2689 | if (ksm_might_need_to_copy(page, vma, address)) { | ||
| 2690 | swapcache = page; | ||
| 2691 | page = ksm_does_need_to_copy(page, vma, address); | ||
| 2692 | |||
| 2693 | if (unlikely(!page)) { | ||
| 2694 | ret = VM_FAULT_OOM; | ||
| 2695 | page = swapcache; | ||
| 2696 | swapcache = NULL; | ||
| 2697 | goto out_page; | ||
| 2698 | } | ||
| 2686 | } | 2699 | } |
| 2687 | 2700 | ||
| 2688 | if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { | 2701 | if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { |
| @@ -2735,6 +2748,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2735 | if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) | 2748 | if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) |
| 2736 | try_to_free_swap(page); | 2749 | try_to_free_swap(page); |
| 2737 | unlock_page(page); | 2750 | unlock_page(page); |
| 2751 | if (swapcache) { | ||
| 2752 | /* | ||
| 2753 | * Hold the lock to avoid the swap entry to be reused | ||
| 2754 | * until we take the PT lock for the pte_same() check | ||
| 2755 | * (to avoid false positives from pte_same). For | ||
| 2756 | * further safety release the lock after the swap_free | ||
| 2757 | * so that the swap count won't change under a | ||
| 2758 | * parallel locked swapcache. | ||
| 2759 | */ | ||
| 2760 | unlock_page(swapcache); | ||
| 2761 | page_cache_release(swapcache); | ||
| 2762 | } | ||
| 2738 | 2763 | ||
| 2739 | if (flags & FAULT_FLAG_WRITE) { | 2764 | if (flags & FAULT_FLAG_WRITE) { |
| 2740 | ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); | 2765 | ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); |
| @@ -2756,6 +2781,10 @@ out_page: | |||
| 2756 | unlock_page(page); | 2781 | unlock_page(page); |
| 2757 | out_release: | 2782 | out_release: |
| 2758 | page_cache_release(page); | 2783 | page_cache_release(page); |
| 2784 | if (swapcache) { | ||
| 2785 | unlock_page(swapcache); | ||
| 2786 | page_cache_release(swapcache); | ||
| 2787 | } | ||
| 2759 | return ret; | 2788 | return ret; |
| 2760 | } | 2789 | } |
| 2761 | 2790 | ||
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a4cfcdc00455..dd186c1a5d53 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
| @@ -584,19 +584,19 @@ static inline int pageblock_free(struct page *page) | |||
| 584 | /* Return the start of the next active pageblock after a given page */ | 584 | /* Return the start of the next active pageblock after a given page */ |
| 585 | static struct page *next_active_pageblock(struct page *page) | 585 | static struct page *next_active_pageblock(struct page *page) |
| 586 | { | 586 | { |
| 587 | int pageblocks_stride; | ||
| 588 | |||
| 589 | /* Ensure the starting page is pageblock-aligned */ | 587 | /* Ensure the starting page is pageblock-aligned */ |
| 590 | BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); | 588 | BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); |
| 591 | 589 | ||
| 592 | /* Move forward by at least 1 * pageblock_nr_pages */ | ||
| 593 | pageblocks_stride = 1; | ||
| 594 | |||
| 595 | /* If the entire pageblock is free, move to the end of free page */ | 590 | /* If the entire pageblock is free, move to the end of free page */ |
| 596 | if (pageblock_free(page)) | 591 | if (pageblock_free(page)) { |
| 597 | pageblocks_stride += page_order(page) - pageblock_order; | 592 | int order; |
| 593 | /* be careful. we don't have locks, page_order can be changed.*/ | ||
| 594 | order = page_order(page); | ||
| 595 | if ((order < MAX_ORDER) && (order >= pageblock_order)) | ||
| 596 | return page + (1 << order); | ||
| 597 | } | ||
| 598 | 598 | ||
| 599 | return page + (pageblocks_stride * pageblock_nr_pages); | 599 | return page + pageblock_nr_pages; |
| 600 | } | 600 | } |
| 601 | 601 | ||
| 602 | /* Checks if this range of memory is likely to be hot-removable. */ | 602 | /* Checks if this range of memory is likely to be hot-removable. */ |
diff --git a/mm/mlock.c b/mm/mlock.c index cbae7c5b9568..b70919ce4f72 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
| @@ -135,12 +135,6 @@ void munlock_vma_page(struct page *page) | |||
| 135 | } | 135 | } |
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | /* Is the vma a continuation of the stack vma above it? */ | ||
| 139 | static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) | ||
| 140 | { | ||
| 141 | return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); | ||
| 142 | } | ||
| 143 | |||
| 144 | static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) | 138 | static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) |
| 145 | { | 139 | { |
| 146 | return (vma->vm_flags & VM_GROWSDOWN) && | 140 | return (vma->vm_flags & VM_GROWSDOWN) && |
diff --git a/mm/mmzone.c b/mm/mmzone.c index f5b7d1760213..e35bfb82c855 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c | |||
| @@ -87,3 +87,24 @@ int memmap_valid_within(unsigned long pfn, | |||
| 87 | return 1; | 87 | return 1; |
| 88 | } | 88 | } |
| 89 | #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ | 89 | #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ |
| 90 | |||
| 91 | #ifdef CONFIG_SMP | ||
| 92 | /* Called when a more accurate view of NR_FREE_PAGES is needed */ | ||
| 93 | unsigned long zone_nr_free_pages(struct zone *zone) | ||
| 94 | { | ||
| 95 | unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES); | ||
| 96 | |||
| 97 | /* | ||
| 98 | * While kswapd is awake, it is considered the zone is under some | ||
| 99 | * memory pressure. Under pressure, there is a risk that | ||
| 100 | * per-cpu-counter-drift will allow the min watermark to be breached | ||
| 101 | * potentially causing a live-lock. While kswapd is awake and | ||
| 102 | * free pages are low, get a better estimate for free pages | ||
| 103 | */ | ||
| 104 | if (nr_free_pages < zone->percpu_drift_mark && | ||
| 105 | !waitqueue_active(&zone->zone_pgdat->kswapd_wait)) | ||
| 106 | return zone_page_state_snapshot(zone, NR_FREE_PAGES); | ||
| 107 | |||
| 108 | return nr_free_pages; | ||
| 109 | } | ||
| 110 | #endif /* CONFIG_SMP */ | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a9649f4b261e..a8cfa9cc6e86 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -588,13 +588,13 @@ static void free_pcppages_bulk(struct zone *zone, int count, | |||
| 588 | { | 588 | { |
| 589 | int migratetype = 0; | 589 | int migratetype = 0; |
| 590 | int batch_free = 0; | 590 | int batch_free = 0; |
| 591 | int to_free = count; | ||
| 591 | 592 | ||
| 592 | spin_lock(&zone->lock); | 593 | spin_lock(&zone->lock); |
| 593 | zone->all_unreclaimable = 0; | 594 | zone->all_unreclaimable = 0; |
| 594 | zone->pages_scanned = 0; | 595 | zone->pages_scanned = 0; |
| 595 | 596 | ||
| 596 | __mod_zone_page_state(zone, NR_FREE_PAGES, count); | 597 | while (to_free) { |
| 597 | while (count) { | ||
| 598 | struct page *page; | 598 | struct page *page; |
| 599 | struct list_head *list; | 599 | struct list_head *list; |
| 600 | 600 | ||
| @@ -619,8 +619,9 @@ static void free_pcppages_bulk(struct zone *zone, int count, | |||
| 619 | /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ | 619 | /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ |
| 620 | __free_one_page(page, zone, 0, page_private(page)); | 620 | __free_one_page(page, zone, 0, page_private(page)); |
| 621 | trace_mm_page_pcpu_drain(page, 0, page_private(page)); | 621 | trace_mm_page_pcpu_drain(page, 0, page_private(page)); |
| 622 | } while (--count && --batch_free && !list_empty(list)); | 622 | } while (--to_free && --batch_free && !list_empty(list)); |
| 623 | } | 623 | } |
| 624 | __mod_zone_page_state(zone, NR_FREE_PAGES, count); | ||
| 624 | spin_unlock(&zone->lock); | 625 | spin_unlock(&zone->lock); |
| 625 | } | 626 | } |
| 626 | 627 | ||
| @@ -631,8 +632,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order, | |||
| 631 | zone->all_unreclaimable = 0; | 632 | zone->all_unreclaimable = 0; |
| 632 | zone->pages_scanned = 0; | 633 | zone->pages_scanned = 0; |
| 633 | 634 | ||
| 634 | __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); | ||
| 635 | __free_one_page(page, zone, order, migratetype); | 635 | __free_one_page(page, zone, order, migratetype); |
| 636 | __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); | ||
| 636 | spin_unlock(&zone->lock); | 637 | spin_unlock(&zone->lock); |
| 637 | } | 638 | } |
| 638 | 639 | ||
| @@ -1461,7 +1462,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |||
| 1461 | { | 1462 | { |
| 1462 | /* free_pages my go negative - that's OK */ | 1463 | /* free_pages my go negative - that's OK */ |
| 1463 | long min = mark; | 1464 | long min = mark; |
| 1464 | long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; | 1465 | long free_pages = zone_nr_free_pages(z) - (1 << order) + 1; |
| 1465 | int o; | 1466 | int o; |
| 1466 | 1467 | ||
| 1467 | if (alloc_flags & ALLOC_HIGH) | 1468 | if (alloc_flags & ALLOC_HIGH) |
| @@ -1846,6 +1847,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, | |||
| 1846 | struct page *page = NULL; | 1847 | struct page *page = NULL; |
| 1847 | struct reclaim_state reclaim_state; | 1848 | struct reclaim_state reclaim_state; |
| 1848 | struct task_struct *p = current; | 1849 | struct task_struct *p = current; |
| 1850 | bool drained = false; | ||
| 1849 | 1851 | ||
| 1850 | cond_resched(); | 1852 | cond_resched(); |
| 1851 | 1853 | ||
| @@ -1864,14 +1866,25 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, | |||
| 1864 | 1866 | ||
| 1865 | cond_resched(); | 1867 | cond_resched(); |
| 1866 | 1868 | ||
| 1867 | if (order != 0) | 1869 | if (unlikely(!(*did_some_progress))) |
| 1868 | drain_all_pages(); | 1870 | return NULL; |
| 1869 | 1871 | ||
| 1870 | if (likely(*did_some_progress)) | 1872 | retry: |
| 1871 | page = get_page_from_freelist(gfp_mask, nodemask, order, | 1873 | page = get_page_from_freelist(gfp_mask, nodemask, order, |
| 1872 | zonelist, high_zoneidx, | 1874 | zonelist, high_zoneidx, |
| 1873 | alloc_flags, preferred_zone, | 1875 | alloc_flags, preferred_zone, |
| 1874 | migratetype); | 1876 | migratetype); |
| 1877 | |||
| 1878 | /* | ||
| 1879 | * If an allocation failed after direct reclaim, it could be because | ||
| 1880 | * pages are pinned on the per-cpu lists. Drain them and try again | ||
| 1881 | */ | ||
| 1882 | if (!page && !drained) { | ||
| 1883 | drain_all_pages(); | ||
| 1884 | drained = true; | ||
| 1885 | goto retry; | ||
| 1886 | } | ||
| 1887 | |||
| 1875 | return page; | 1888 | return page; |
| 1876 | } | 1889 | } |
| 1877 | 1890 | ||
| @@ -2423,7 +2436,7 @@ void show_free_areas(void) | |||
| 2423 | " all_unreclaimable? %s" | 2436 | " all_unreclaimable? %s" |
| 2424 | "\n", | 2437 | "\n", |
| 2425 | zone->name, | 2438 | zone->name, |
| 2426 | K(zone_page_state(zone, NR_FREE_PAGES)), | 2439 | K(zone_nr_free_pages(zone)), |
| 2427 | K(min_wmark_pages(zone)), | 2440 | K(min_wmark_pages(zone)), |
| 2428 | K(low_wmark_pages(zone)), | 2441 | K(low_wmark_pages(zone)), |
| 2429 | K(high_wmark_pages(zone)), | 2442 | K(high_wmark_pages(zone)), |
diff --git a/mm/percpu.c b/mm/percpu.c index e61dc2cc5873..58c572b18b07 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
| @@ -393,7 +393,9 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) | |||
| 393 | goto out_unlock; | 393 | goto out_unlock; |
| 394 | 394 | ||
| 395 | old_size = chunk->map_alloc * sizeof(chunk->map[0]); | 395 | old_size = chunk->map_alloc * sizeof(chunk->map[0]); |
| 396 | memcpy(new, chunk->map, old_size); | 396 | old = chunk->map; |
| 397 | |||
| 398 | memcpy(new, old, old_size); | ||
| 397 | 399 | ||
| 398 | chunk->map_alloc = new_alloc; | 400 | chunk->map_alloc = new_alloc; |
| 399 | chunk->map = new; | 401 | chunk->map = new; |
| @@ -1162,7 +1164,7 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info( | |||
| 1162 | } | 1164 | } |
| 1163 | 1165 | ||
| 1164 | /* | 1166 | /* |
| 1165 | * Don't accept if wastage is over 25%. The | 1167 | * Don't accept if wastage is over 1/3. The |
| 1166 | * greater-than comparison ensures upa==1 always | 1168 | * greater-than comparison ensures upa==1 always |
| 1167 | * passes the following check. | 1169 | * passes the following check. |
| 1168 | */ | 1170 | */ |
diff --git a/mm/percpu_up.c b/mm/percpu_up.c index c4351c7f57d2..db884fae5721 100644 --- a/mm/percpu_up.c +++ b/mm/percpu_up.c | |||
| @@ -14,13 +14,13 @@ void __percpu *__alloc_percpu(size_t size, size_t align) | |||
| 14 | * percpu sections on SMP for which this path isn't used. | 14 | * percpu sections on SMP for which this path isn't used. |
| 15 | */ | 15 | */ |
| 16 | WARN_ON_ONCE(align > SMP_CACHE_BYTES); | 16 | WARN_ON_ONCE(align > SMP_CACHE_BYTES); |
| 17 | return kzalloc(size, GFP_KERNEL); | 17 | return (void __percpu __force *)kzalloc(size, GFP_KERNEL); |
| 18 | } | 18 | } |
| 19 | EXPORT_SYMBOL_GPL(__alloc_percpu); | 19 | EXPORT_SYMBOL_GPL(__alloc_percpu); |
| 20 | 20 | ||
| 21 | void free_percpu(void __percpu *p) | 21 | void free_percpu(void __percpu *p) |
| 22 | { | 22 | { |
| 23 | kfree(p); | 23 | kfree(this_cpu_ptr(p)); |
| 24 | } | 24 | } |
| 25 | EXPORT_SYMBOL_GPL(free_percpu); | 25 | EXPORT_SYMBOL_GPL(free_percpu); |
| 26 | 26 | ||
diff --git a/mm/swapfile.c b/mm/swapfile.c index 1f3f9c59a73a..7c703ff2f36f 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
| @@ -47,8 +47,6 @@ long nr_swap_pages; | |||
| 47 | long total_swap_pages; | 47 | long total_swap_pages; |
| 48 | static int least_priority; | 48 | static int least_priority; |
| 49 | 49 | ||
| 50 | static bool swap_for_hibernation; | ||
| 51 | |||
| 52 | static const char Bad_file[] = "Bad swap file entry "; | 50 | static const char Bad_file[] = "Bad swap file entry "; |
| 53 | static const char Unused_file[] = "Unused swap file entry "; | 51 | static const char Unused_file[] = "Unused swap file entry "; |
| 54 | static const char Bad_offset[] = "Bad swap offset entry "; | 52 | static const char Bad_offset[] = "Bad swap offset entry "; |
| @@ -141,8 +139,7 @@ static int discard_swap(struct swap_info_struct *si) | |||
| 141 | nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); | 139 | nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); |
| 142 | if (nr_blocks) { | 140 | if (nr_blocks) { |
| 143 | err = blkdev_issue_discard(si->bdev, start_block, | 141 | err = blkdev_issue_discard(si->bdev, start_block, |
| 144 | nr_blocks, GFP_KERNEL, | 142 | nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT); |
| 145 | BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); | ||
| 146 | if (err) | 143 | if (err) |
| 147 | return err; | 144 | return err; |
| 148 | cond_resched(); | 145 | cond_resched(); |
| @@ -153,8 +150,7 @@ static int discard_swap(struct swap_info_struct *si) | |||
| 153 | nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); | 150 | nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); |
| 154 | 151 | ||
| 155 | err = blkdev_issue_discard(si->bdev, start_block, | 152 | err = blkdev_issue_discard(si->bdev, start_block, |
| 156 | nr_blocks, GFP_KERNEL, | 153 | nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT); |
| 157 | BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); | ||
| 158 | if (err) | 154 | if (err) |
| 159 | break; | 155 | break; |
| 160 | 156 | ||
| @@ -193,8 +189,7 @@ static void discard_swap_cluster(struct swap_info_struct *si, | |||
| 193 | start_block <<= PAGE_SHIFT - 9; | 189 | start_block <<= PAGE_SHIFT - 9; |
| 194 | nr_blocks <<= PAGE_SHIFT - 9; | 190 | nr_blocks <<= PAGE_SHIFT - 9; |
| 195 | if (blkdev_issue_discard(si->bdev, start_block, | 191 | if (blkdev_issue_discard(si->bdev, start_block, |
| 196 | nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT | | 192 | nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT)) |
| 197 | BLKDEV_IFL_BARRIER)) | ||
| 198 | break; | 193 | break; |
| 199 | } | 194 | } |
| 200 | 195 | ||
| @@ -320,10 +315,8 @@ checks: | |||
| 320 | if (offset > si->highest_bit) | 315 | if (offset > si->highest_bit) |
| 321 | scan_base = offset = si->lowest_bit; | 316 | scan_base = offset = si->lowest_bit; |
| 322 | 317 | ||
| 323 | /* reuse swap entry of cache-only swap if not hibernation. */ | 318 | /* reuse swap entry of cache-only swap if not busy. */ |
| 324 | if (vm_swap_full() | 319 | if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { |
| 325 | && usage == SWAP_HAS_CACHE | ||
| 326 | && si->swap_map[offset] == SWAP_HAS_CACHE) { | ||
| 327 | int swap_was_freed; | 320 | int swap_was_freed; |
| 328 | spin_unlock(&swap_lock); | 321 | spin_unlock(&swap_lock); |
| 329 | swap_was_freed = __try_to_reclaim_swap(si, offset); | 322 | swap_was_freed = __try_to_reclaim_swap(si, offset); |
| @@ -453,8 +446,6 @@ swp_entry_t get_swap_page(void) | |||
| 453 | spin_lock(&swap_lock); | 446 | spin_lock(&swap_lock); |
| 454 | if (nr_swap_pages <= 0) | 447 | if (nr_swap_pages <= 0) |
| 455 | goto noswap; | 448 | goto noswap; |
| 456 | if (swap_for_hibernation) | ||
| 457 | goto noswap; | ||
| 458 | nr_swap_pages--; | 449 | nr_swap_pages--; |
| 459 | 450 | ||
| 460 | for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { | 451 | for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { |
| @@ -487,6 +478,28 @@ noswap: | |||
| 487 | return (swp_entry_t) {0}; | 478 | return (swp_entry_t) {0}; |
| 488 | } | 479 | } |
| 489 | 480 | ||
| 481 | /* The only caller of this function is now susupend routine */ | ||
| 482 | swp_entry_t get_swap_page_of_type(int type) | ||
| 483 | { | ||
| 484 | struct swap_info_struct *si; | ||
| 485 | pgoff_t offset; | ||
| 486 | |||
| 487 | spin_lock(&swap_lock); | ||
| 488 | si = swap_info[type]; | ||
| 489 | if (si && (si->flags & SWP_WRITEOK)) { | ||
| 490 | nr_swap_pages--; | ||
| 491 | /* This is called for allocating swap entry, not cache */ | ||
| 492 | offset = scan_swap_map(si, 1); | ||
| 493 | if (offset) { | ||
| 494 | spin_unlock(&swap_lock); | ||
| 495 | return swp_entry(type, offset); | ||
| 496 | } | ||
| 497 | nr_swap_pages++; | ||
| 498 | } | ||
| 499 | spin_unlock(&swap_lock); | ||
| 500 | return (swp_entry_t) {0}; | ||
| 501 | } | ||
| 502 | |||
| 490 | static struct swap_info_struct *swap_info_get(swp_entry_t entry) | 503 | static struct swap_info_struct *swap_info_get(swp_entry_t entry) |
| 491 | { | 504 | { |
| 492 | struct swap_info_struct *p; | 505 | struct swap_info_struct *p; |
| @@ -670,6 +683,24 @@ int try_to_free_swap(struct page *page) | |||
| 670 | if (page_swapcount(page)) | 683 | if (page_swapcount(page)) |
| 671 | return 0; | 684 | return 0; |
| 672 | 685 | ||
| 686 | /* | ||
| 687 | * Once hibernation has begun to create its image of memory, | ||
| 688 | * there's a danger that one of the calls to try_to_free_swap() | ||
| 689 | * - most probably a call from __try_to_reclaim_swap() while | ||
| 690 | * hibernation is allocating its own swap pages for the image, | ||
| 691 | * but conceivably even a call from memory reclaim - will free | ||
| 692 | * the swap from a page which has already been recorded in the | ||
| 693 | * image as a clean swapcache page, and then reuse its swap for | ||
| 694 | * another page of the image. On waking from hibernation, the | ||
| 695 | * original page might be freed under memory pressure, then | ||
| 696 | * later read back in from swap, now with the wrong data. | ||
| 697 | * | ||
| 698 | * Hibernation clears bits from gfp_allowed_mask to prevent | ||
| 699 | * memory reclaim from writing to disk, so check that here. | ||
| 700 | */ | ||
| 701 | if (!(gfp_allowed_mask & __GFP_IO)) | ||
| 702 | return 0; | ||
| 703 | |||
| 673 | delete_from_swap_cache(page); | 704 | delete_from_swap_cache(page); |
| 674 | SetPageDirty(page); | 705 | SetPageDirty(page); |
| 675 | return 1; | 706 | return 1; |
| @@ -746,74 +777,6 @@ int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep) | |||
| 746 | #endif | 777 | #endif |
| 747 | 778 | ||
| 748 | #ifdef CONFIG_HIBERNATION | 779 | #ifdef CONFIG_HIBERNATION |
| 749 | |||
| 750 | static pgoff_t hibernation_offset[MAX_SWAPFILES]; | ||
| 751 | /* | ||
| 752 | * Once hibernation starts to use swap, we freeze swap_map[]. Otherwise, | ||
| 753 | * saved swap_map[] image to the disk will be an incomplete because it's | ||
| 754 | * changing without synchronization with hibernation snap shot. | ||
| 755 | * At resume, we just make swap_for_hibernation=false. We can forget | ||
| 756 | * used maps easily. | ||
| 757 | */ | ||
| 758 | void hibernation_freeze_swap(void) | ||
| 759 | { | ||
| 760 | int i; | ||
| 761 | |||
| 762 | spin_lock(&swap_lock); | ||
| 763 | |||
| 764 | printk(KERN_INFO "PM: Freeze Swap\n"); | ||
| 765 | swap_for_hibernation = true; | ||
| 766 | for (i = 0; i < MAX_SWAPFILES; i++) | ||
| 767 | hibernation_offset[i] = 1; | ||
| 768 | spin_unlock(&swap_lock); | ||
| 769 | } | ||
| 770 | |||
| 771 | void hibernation_thaw_swap(void) | ||
| 772 | { | ||
| 773 | spin_lock(&swap_lock); | ||
| 774 | if (swap_for_hibernation) { | ||
| 775 | printk(KERN_INFO "PM: Thaw Swap\n"); | ||
| 776 | swap_for_hibernation = false; | ||
| 777 | } | ||
| 778 | spin_unlock(&swap_lock); | ||
| 779 | } | ||
| 780 | |||
| 781 | /* | ||
| 782 | * Because updateing swap_map[] can make not-saved-status-change, | ||
| 783 | * we use our own easy allocator. | ||
| 784 | * Please see kernel/power/swap.c, Used swaps are recorded into | ||
| 785 | * RB-tree. | ||
| 786 | */ | ||
| 787 | swp_entry_t get_swap_for_hibernation(int type) | ||
| 788 | { | ||
| 789 | pgoff_t off; | ||
| 790 | swp_entry_t val = {0}; | ||
| 791 | struct swap_info_struct *si; | ||
| 792 | |||
| 793 | spin_lock(&swap_lock); | ||
| 794 | |||
| 795 | si = swap_info[type]; | ||
| 796 | if (!si || !(si->flags & SWP_WRITEOK)) | ||
| 797 | goto done; | ||
| 798 | |||
| 799 | for (off = hibernation_offset[type]; off < si->max; ++off) { | ||
| 800 | if (!si->swap_map[off]) | ||
| 801 | break; | ||
| 802 | } | ||
| 803 | if (off < si->max) { | ||
| 804 | val = swp_entry(type, off); | ||
| 805 | hibernation_offset[type] = off + 1; | ||
| 806 | } | ||
| 807 | done: | ||
| 808 | spin_unlock(&swap_lock); | ||
| 809 | return val; | ||
| 810 | } | ||
| 811 | |||
| 812 | void swap_free_for_hibernation(swp_entry_t ent) | ||
| 813 | { | ||
| 814 | /* Nothing to do */ | ||
| 815 | } | ||
| 816 | |||
| 817 | /* | 780 | /* |
| 818 | * Find the swap type that corresponds to given device (if any). | 781 | * Find the swap type that corresponds to given device (if any). |
| 819 | * | 782 | * |
| @@ -2084,7 +2047,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
| 2084 | p->flags |= SWP_SOLIDSTATE; | 2047 | p->flags |= SWP_SOLIDSTATE; |
| 2085 | p->cluster_next = 1 + (random32() % p->highest_bit); | 2048 | p->cluster_next = 1 + (random32() % p->highest_bit); |
| 2086 | } | 2049 | } |
| 2087 | if (discard_swap(p) == 0) | 2050 | if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD)) |
| 2088 | p->flags |= SWP_DISCARDABLE; | 2051 | p->flags |= SWP_DISCARDABLE; |
| 2089 | } | 2052 | } |
| 2090 | 2053 | ||
diff --git a/mm/vmstat.c b/mm/vmstat.c index f389168f9a83..355a9e669aaa 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
| @@ -138,11 +138,24 @@ static void refresh_zone_stat_thresholds(void) | |||
| 138 | int threshold; | 138 | int threshold; |
| 139 | 139 | ||
| 140 | for_each_populated_zone(zone) { | 140 | for_each_populated_zone(zone) { |
| 141 | unsigned long max_drift, tolerate_drift; | ||
| 142 | |||
| 141 | threshold = calculate_threshold(zone); | 143 | threshold = calculate_threshold(zone); |
| 142 | 144 | ||
| 143 | for_each_online_cpu(cpu) | 145 | for_each_online_cpu(cpu) |
| 144 | per_cpu_ptr(zone->pageset, cpu)->stat_threshold | 146 | per_cpu_ptr(zone->pageset, cpu)->stat_threshold |
| 145 | = threshold; | 147 | = threshold; |
| 148 | |||
| 149 | /* | ||
| 150 | * Only set percpu_drift_mark if there is a danger that | ||
| 151 | * NR_FREE_PAGES reports the low watermark is ok when in fact | ||
| 152 | * the min watermark could be breached by an allocation | ||
| 153 | */ | ||
| 154 | tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone); | ||
| 155 | max_drift = num_online_cpus() * threshold; | ||
| 156 | if (max_drift > tolerate_drift) | ||
| 157 | zone->percpu_drift_mark = high_wmark_pages(zone) + | ||
| 158 | max_drift; | ||
| 146 | } | 159 | } |
| 147 | } | 160 | } |
| 148 | 161 | ||
| @@ -813,7 +826,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, | |||
| 813 | "\n scanned %lu" | 826 | "\n scanned %lu" |
| 814 | "\n spanned %lu" | 827 | "\n spanned %lu" |
| 815 | "\n present %lu", | 828 | "\n present %lu", |
| 816 | zone_page_state(zone, NR_FREE_PAGES), | 829 | zone_nr_free_pages(zone), |
| 817 | min_wmark_pages(zone), | 830 | min_wmark_pages(zone), |
| 818 | low_wmark_pages(zone), | 831 | low_wmark_pages(zone), |
| 819 | high_wmark_pages(zone), | 832 | high_wmark_pages(zone), |
| @@ -998,6 +1011,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, | |||
| 998 | switch (action) { | 1011 | switch (action) { |
| 999 | case CPU_ONLINE: | 1012 | case CPU_ONLINE: |
| 1000 | case CPU_ONLINE_FROZEN: | 1013 | case CPU_ONLINE_FROZEN: |
| 1014 | refresh_zone_stat_thresholds(); | ||
| 1001 | start_cpu_timer(cpu); | 1015 | start_cpu_timer(cpu); |
| 1002 | node_set_state(cpu_to_node(cpu), N_CPU); | 1016 | node_set_state(cpu_to_node(cpu), N_CPU); |
| 1003 | break; | 1017 | break; |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 5ed00bd7009f..137f23259a93 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
| @@ -761,9 +761,11 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb) | |||
| 761 | { | 761 | { |
| 762 | if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) && | 762 | if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) && |
| 763 | skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && | 763 | skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && |
| 764 | !skb_is_gso(skb)) | 764 | !skb_is_gso(skb)) { |
| 765 | /* BUG: Should really parse the IP options here. */ | ||
| 766 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); | ||
| 765 | return ip_fragment(skb, br_dev_queue_push_xmit); | 767 | return ip_fragment(skb, br_dev_queue_push_xmit); |
| 766 | else | 768 | } else |
| 767 | return br_dev_queue_push_xmit(skb); | 769 | return br_dev_queue_push_xmit(skb); |
| 768 | } | 770 | } |
| 769 | #else | 771 | #else |
diff --git a/net/core/dev.c b/net/core/dev.c index 3721fbb9a83c..b9b22a3c4c8f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -2058,16 +2058,16 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, | |||
| 2058 | struct sk_buff *skb) | 2058 | struct sk_buff *skb) |
| 2059 | { | 2059 | { |
| 2060 | int queue_index; | 2060 | int queue_index; |
| 2061 | struct sock *sk = skb->sk; | 2061 | const struct net_device_ops *ops = dev->netdev_ops; |
| 2062 | 2062 | ||
| 2063 | queue_index = sk_tx_queue_get(sk); | 2063 | if (ops->ndo_select_queue) { |
| 2064 | if (queue_index < 0) { | 2064 | queue_index = ops->ndo_select_queue(dev, skb); |
| 2065 | const struct net_device_ops *ops = dev->netdev_ops; | 2065 | queue_index = dev_cap_txqueue(dev, queue_index); |
| 2066 | } else { | ||
| 2067 | struct sock *sk = skb->sk; | ||
| 2068 | queue_index = sk_tx_queue_get(sk); | ||
| 2069 | if (queue_index < 0) { | ||
| 2066 | 2070 | ||
| 2067 | if (ops->ndo_select_queue) { | ||
| 2068 | queue_index = ops->ndo_select_queue(dev, skb); | ||
| 2069 | queue_index = dev_cap_txqueue(dev, queue_index); | ||
| 2070 | } else { | ||
| 2071 | queue_index = 0; | 2071 | queue_index = 0; |
| 2072 | if (dev->real_num_tx_queues > 1) | 2072 | if (dev->real_num_tx_queues > 1) |
| 2073 | queue_index = skb_tx_hash(dev, skb); | 2073 | queue_index = skb_tx_hash(dev, skb); |
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 9fbe7f7429b0..6743146e4d6b 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
| @@ -232,7 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | |||
| 232 | est->last_packets = bstats->packets; | 232 | est->last_packets = bstats->packets; |
| 233 | est->avpps = rate_est->pps<<10; | 233 | est->avpps = rate_est->pps<<10; |
| 234 | 234 | ||
| 235 | spin_lock(&est_tree_lock); | 235 | spin_lock_bh(&est_tree_lock); |
| 236 | if (!elist[idx].timer.function) { | 236 | if (!elist[idx].timer.function) { |
| 237 | INIT_LIST_HEAD(&elist[idx].list); | 237 | INIT_LIST_HEAD(&elist[idx].list); |
| 238 | setup_timer(&elist[idx].timer, est_timer, idx); | 238 | setup_timer(&elist[idx].timer, est_timer, idx); |
| @@ -243,7 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | |||
| 243 | 243 | ||
| 244 | list_add_rcu(&est->list, &elist[idx].list); | 244 | list_add_rcu(&est->list, &elist[idx].list); |
| 245 | gen_add_node(est); | 245 | gen_add_node(est); |
| 246 | spin_unlock(&est_tree_lock); | 246 | spin_unlock_bh(&est_tree_lock); |
| 247 | 247 | ||
| 248 | return 0; | 248 | return 0; |
| 249 | } | 249 | } |
| @@ -270,7 +270,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, | |||
| 270 | { | 270 | { |
| 271 | struct gen_estimator *e; | 271 | struct gen_estimator *e; |
| 272 | 272 | ||
| 273 | spin_lock(&est_tree_lock); | 273 | spin_lock_bh(&est_tree_lock); |
| 274 | while ((e = gen_find_node(bstats, rate_est))) { | 274 | while ((e = gen_find_node(bstats, rate_est))) { |
| 275 | rb_erase(&e->node, &est_root); | 275 | rb_erase(&e->node, &est_root); |
| 276 | 276 | ||
| @@ -281,7 +281,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, | |||
| 281 | list_del_rcu(&e->list); | 281 | list_del_rcu(&e->list); |
| 282 | call_rcu(&e->e_rcu, __gen_kill_estimator); | 282 | call_rcu(&e->e_rcu, __gen_kill_estimator); |
| 283 | } | 283 | } |
| 284 | spin_unlock(&est_tree_lock); | 284 | spin_unlock_bh(&est_tree_lock); |
| 285 | } | 285 | } |
| 286 | EXPORT_SYMBOL(gen_kill_estimator); | 286 | EXPORT_SYMBOL(gen_kill_estimator); |
| 287 | 287 | ||
| @@ -320,9 +320,9 @@ bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, | |||
| 320 | 320 | ||
| 321 | ASSERT_RTNL(); | 321 | ASSERT_RTNL(); |
| 322 | 322 | ||
| 323 | spin_lock(&est_tree_lock); | 323 | spin_lock_bh(&est_tree_lock); |
| 324 | res = gen_find_node(bstats, rate_est) != NULL; | 324 | res = gen_find_node(bstats, rate_est) != NULL; |
| 325 | spin_unlock(&est_tree_lock); | 325 | spin_unlock_bh(&est_tree_lock); |
| 326 | 326 | ||
| 327 | return res; | 327 | return res; |
| 328 | } | 328 | } |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 3a2513f0d0c3..c83b421341c0 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -2573,6 +2573,10 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) | |||
| 2573 | __copy_skb_header(nskb, skb); | 2573 | __copy_skb_header(nskb, skb); |
| 2574 | nskb->mac_len = skb->mac_len; | 2574 | nskb->mac_len = skb->mac_len; |
| 2575 | 2575 | ||
| 2576 | /* nskb and skb might have different headroom */ | ||
| 2577 | if (nskb->ip_summed == CHECKSUM_PARTIAL) | ||
| 2578 | nskb->csum_start += skb_headroom(nskb) - headroom; | ||
| 2579 | |||
| 2576 | skb_reset_mac_header(nskb); | 2580 | skb_reset_mac_header(nskb); |
| 2577 | skb_set_network_header(nskb, skb->mac_len); | 2581 | skb_set_network_header(nskb, skb->mac_len); |
| 2578 | nskb->transport_header = (nskb->network_header + | 2582 | nskb->transport_header = (nskb->network_header + |
| @@ -2703,7 +2707,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
| 2703 | return -E2BIG; | 2707 | return -E2BIG; |
| 2704 | 2708 | ||
| 2705 | headroom = skb_headroom(p); | 2709 | headroom = skb_headroom(p); |
| 2706 | nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p)); | 2710 | nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); |
| 2707 | if (unlikely(!nskb)) | 2711 | if (unlikely(!nskb)) |
| 2708 | return -ENOMEM; | 2712 | return -ENOMEM; |
| 2709 | 2713 | ||
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 7c3a7d191249..571f8950ed06 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
| @@ -46,7 +46,7 @@ config IP_ADVANCED_ROUTER | |||
| 46 | rp_filter on use: | 46 | rp_filter on use: |
| 47 | 47 | ||
| 48 | echo 1 > /proc/sys/net/ipv4/conf/<device>/rp_filter | 48 | echo 1 > /proc/sys/net/ipv4/conf/<device>/rp_filter |
| 49 | and | 49 | or |
| 50 | echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter | 50 | echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter |
| 51 | 51 | ||
| 52 | Note that some distributions enable it in startup scripts. | 52 | Note that some distributions enable it in startup scripts. |
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index f0550941df7b..721a8a37b45c 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c | |||
| @@ -62,8 +62,11 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
| 62 | } | 62 | } |
| 63 | if (!inet->inet_saddr) | 63 | if (!inet->inet_saddr) |
| 64 | inet->inet_saddr = rt->rt_src; /* Update source address */ | 64 | inet->inet_saddr = rt->rt_src; /* Update source address */ |
| 65 | if (!inet->inet_rcv_saddr) | 65 | if (!inet->inet_rcv_saddr) { |
| 66 | inet->inet_rcv_saddr = rt->rt_src; | 66 | inet->inet_rcv_saddr = rt->rt_src; |
| 67 | if (sk->sk_prot->rehash) | ||
| 68 | sk->sk_prot->rehash(sk); | ||
| 69 | } | ||
| 67 | inet->inet_daddr = rt->rt_dst; | 70 | inet->inet_daddr = rt->rt_dst; |
| 68 | inet->inet_dport = usin->sin_port; | 71 | inet->inet_dport = usin->sin_port; |
| 69 | sk->sk_state = TCP_ESTABLISHED; | 72 | sk->sk_state = TCP_ESTABLISHED; |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index a43968918350..7d02a9f999fa 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
| @@ -246,6 +246,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, | |||
| 246 | 246 | ||
| 247 | struct fib_result res; | 247 | struct fib_result res; |
| 248 | int no_addr, rpf, accept_local; | 248 | int no_addr, rpf, accept_local; |
| 249 | bool dev_match; | ||
| 249 | int ret; | 250 | int ret; |
| 250 | struct net *net; | 251 | struct net *net; |
| 251 | 252 | ||
| @@ -273,12 +274,22 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, | |||
| 273 | } | 274 | } |
| 274 | *spec_dst = FIB_RES_PREFSRC(res); | 275 | *spec_dst = FIB_RES_PREFSRC(res); |
| 275 | fib_combine_itag(itag, &res); | 276 | fib_combine_itag(itag, &res); |
| 277 | dev_match = false; | ||
| 278 | |||
| 276 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 279 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
| 277 | if (FIB_RES_DEV(res) == dev || res.fi->fib_nhs > 1) | 280 | for (ret = 0; ret < res.fi->fib_nhs; ret++) { |
| 281 | struct fib_nh *nh = &res.fi->fib_nh[ret]; | ||
| 282 | |||
| 283 | if (nh->nh_dev == dev) { | ||
| 284 | dev_match = true; | ||
| 285 | break; | ||
| 286 | } | ||
| 287 | } | ||
| 278 | #else | 288 | #else |
| 279 | if (FIB_RES_DEV(res) == dev) | 289 | if (FIB_RES_DEV(res) == dev) |
| 290 | dev_match = true; | ||
| 280 | #endif | 291 | #endif |
| 281 | { | 292 | if (dev_match) { |
| 282 | ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST; | 293 | ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST; |
| 283 | fib_res_put(&res); | 294 | fib_res_put(&res); |
| 284 | return ret; | 295 | return ret; |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 79d057a939ba..4a8e370862bc 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
| @@ -186,7 +186,9 @@ static inline struct tnode *node_parent_rcu(struct node *node) | |||
| 186 | { | 186 | { |
| 187 | struct tnode *ret = node_parent(node); | 187 | struct tnode *ret = node_parent(node); |
| 188 | 188 | ||
| 189 | return rcu_dereference(ret); | 189 | return rcu_dereference_check(ret, |
| 190 | rcu_read_lock_held() || | ||
| 191 | lockdep_rtnl_is_held()); | ||
| 190 | } | 192 | } |
| 191 | 193 | ||
| 192 | /* Same as rcu_assign_pointer | 194 | /* Same as rcu_assign_pointer |
| @@ -1753,7 +1755,9 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c) | |||
| 1753 | 1755 | ||
| 1754 | static struct leaf *trie_firstleaf(struct trie *t) | 1756 | static struct leaf *trie_firstleaf(struct trie *t) |
| 1755 | { | 1757 | { |
| 1756 | struct tnode *n = (struct tnode *) rcu_dereference(t->trie); | 1758 | struct tnode *n = (struct tnode *) rcu_dereference_check(t->trie, |
| 1759 | rcu_read_lock_held() || | ||
| 1760 | lockdep_rtnl_is_held()); | ||
| 1757 | 1761 | ||
| 1758 | if (!n) | 1762 | if (!n) |
| 1759 | return NULL; | 1763 | return NULL; |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 3f56b6e6c6aa..6298f75d5e93 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
| @@ -2738,6 +2738,11 @@ slow_output: | |||
| 2738 | } | 2738 | } |
| 2739 | EXPORT_SYMBOL_GPL(__ip_route_output_key); | 2739 | EXPORT_SYMBOL_GPL(__ip_route_output_key); |
| 2740 | 2740 | ||
| 2741 | static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie) | ||
| 2742 | { | ||
| 2743 | return NULL; | ||
| 2744 | } | ||
| 2745 | |||
| 2741 | static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) | 2746 | static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) |
| 2742 | { | 2747 | { |
| 2743 | } | 2748 | } |
| @@ -2746,7 +2751,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = { | |||
| 2746 | .family = AF_INET, | 2751 | .family = AF_INET, |
| 2747 | .protocol = cpu_to_be16(ETH_P_IP), | 2752 | .protocol = cpu_to_be16(ETH_P_IP), |
| 2748 | .destroy = ipv4_dst_destroy, | 2753 | .destroy = ipv4_dst_destroy, |
| 2749 | .check = ipv4_dst_check, | 2754 | .check = ipv4_blackhole_dst_check, |
| 2750 | .update_pmtu = ipv4_rt_blackhole_update_pmtu, | 2755 | .update_pmtu = ipv4_rt_blackhole_update_pmtu, |
| 2751 | .entries = ATOMIC_INIT(0), | 2756 | .entries = ATOMIC_INIT(0), |
| 2752 | }; | 2757 | }; |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 32e0bef60d0a..fb23c2e63b52 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
| @@ -1260,6 +1260,49 @@ void udp_lib_unhash(struct sock *sk) | |||
| 1260 | } | 1260 | } |
| 1261 | EXPORT_SYMBOL(udp_lib_unhash); | 1261 | EXPORT_SYMBOL(udp_lib_unhash); |
| 1262 | 1262 | ||
| 1263 | /* | ||
| 1264 | * inet_rcv_saddr was changed, we must rehash secondary hash | ||
| 1265 | */ | ||
| 1266 | void udp_lib_rehash(struct sock *sk, u16 newhash) | ||
| 1267 | { | ||
| 1268 | if (sk_hashed(sk)) { | ||
| 1269 | struct udp_table *udptable = sk->sk_prot->h.udp_table; | ||
| 1270 | struct udp_hslot *hslot, *hslot2, *nhslot2; | ||
| 1271 | |||
| 1272 | hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); | ||
| 1273 | nhslot2 = udp_hashslot2(udptable, newhash); | ||
| 1274 | udp_sk(sk)->udp_portaddr_hash = newhash; | ||
| 1275 | if (hslot2 != nhslot2) { | ||
| 1276 | hslot = udp_hashslot(udptable, sock_net(sk), | ||
| 1277 | udp_sk(sk)->udp_port_hash); | ||
| 1278 | /* we must lock primary chain too */ | ||
| 1279 | spin_lock_bh(&hslot->lock); | ||
| 1280 | |||
| 1281 | spin_lock(&hslot2->lock); | ||
| 1282 | hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); | ||
| 1283 | hslot2->count--; | ||
| 1284 | spin_unlock(&hslot2->lock); | ||
| 1285 | |||
| 1286 | spin_lock(&nhslot2->lock); | ||
| 1287 | hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, | ||
| 1288 | &nhslot2->head); | ||
| 1289 | nhslot2->count++; | ||
| 1290 | spin_unlock(&nhslot2->lock); | ||
| 1291 | |||
| 1292 | spin_unlock_bh(&hslot->lock); | ||
| 1293 | } | ||
| 1294 | } | ||
| 1295 | } | ||
| 1296 | EXPORT_SYMBOL(udp_lib_rehash); | ||
| 1297 | |||
| 1298 | static void udp_v4_rehash(struct sock *sk) | ||
| 1299 | { | ||
| 1300 | u16 new_hash = udp4_portaddr_hash(sock_net(sk), | ||
| 1301 | inet_sk(sk)->inet_rcv_saddr, | ||
| 1302 | inet_sk(sk)->inet_num); | ||
| 1303 | udp_lib_rehash(sk, new_hash); | ||
| 1304 | } | ||
| 1305 | |||
| 1263 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 1306 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
| 1264 | { | 1307 | { |
| 1265 | int rc; | 1308 | int rc; |
| @@ -1843,6 +1886,7 @@ struct proto udp_prot = { | |||
| 1843 | .backlog_rcv = __udp_queue_rcv_skb, | 1886 | .backlog_rcv = __udp_queue_rcv_skb, |
| 1844 | .hash = udp_lib_hash, | 1887 | .hash = udp_lib_hash, |
| 1845 | .unhash = udp_lib_unhash, | 1888 | .unhash = udp_lib_unhash, |
| 1889 | .rehash = udp_v4_rehash, | ||
| 1846 | .get_port = udp_v4_get_port, | 1890 | .get_port = udp_v4_get_port, |
| 1847 | .memory_allocated = &udp_memory_allocated, | 1891 | .memory_allocated = &udp_memory_allocated, |
| 1848 | .sysctl_mem = sysctl_udp_mem, | 1892 | .sysctl_mem = sysctl_udp_mem, |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 7d929a22cbc2..ef371aa01ac5 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
| @@ -105,9 +105,12 @@ ipv4_connected: | |||
| 105 | if (ipv6_addr_any(&np->saddr)) | 105 | if (ipv6_addr_any(&np->saddr)) |
| 106 | ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); | 106 | ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); |
| 107 | 107 | ||
| 108 | if (ipv6_addr_any(&np->rcv_saddr)) | 108 | if (ipv6_addr_any(&np->rcv_saddr)) { |
| 109 | ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, | 109 | ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, |
| 110 | &np->rcv_saddr); | 110 | &np->rcv_saddr); |
| 111 | if (sk->sk_prot->rehash) | ||
| 112 | sk->sk_prot->rehash(sk); | ||
| 113 | } | ||
| 111 | 114 | ||
| 112 | goto out; | 115 | goto out; |
| 113 | } | 116 | } |
| @@ -181,6 +184,8 @@ ipv4_connected: | |||
| 181 | if (ipv6_addr_any(&np->rcv_saddr)) { | 184 | if (ipv6_addr_any(&np->rcv_saddr)) { |
| 182 | ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src); | 185 | ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src); |
| 183 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; | 186 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; |
| 187 | if (sk->sk_prot->rehash) | ||
| 188 | sk->sk_prot->rehash(sk); | ||
| 184 | } | 189 | } |
| 185 | 190 | ||
| 186 | ip6_dst_store(sk, dst, | 191 | ip6_dst_store(sk, dst, |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 13ef5bc05cf5..578f3c1a16db 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
| @@ -113,14 +113,6 @@ static void nf_skb_free(struct sk_buff *skb) | |||
| 113 | kfree_skb(NFCT_FRAG6_CB(skb)->orig); | 113 | kfree_skb(NFCT_FRAG6_CB(skb)->orig); |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | /* Memory Tracking Functions. */ | ||
| 117 | static void frag_kfree_skb(struct sk_buff *skb) | ||
| 118 | { | ||
| 119 | atomic_sub(skb->truesize, &nf_init_frags.mem); | ||
| 120 | nf_skb_free(skb); | ||
| 121 | kfree_skb(skb); | ||
| 122 | } | ||
| 123 | |||
| 124 | /* Destruction primitives. */ | 116 | /* Destruction primitives. */ |
| 125 | 117 | ||
| 126 | static __inline__ void fq_put(struct nf_ct_frag6_queue *fq) | 118 | static __inline__ void fq_put(struct nf_ct_frag6_queue *fq) |
| @@ -282,66 +274,22 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
| 282 | } | 274 | } |
| 283 | 275 | ||
| 284 | found: | 276 | found: |
| 285 | /* We found where to put this one. Check for overlap with | 277 | /* RFC5722, Section 4: |
| 286 | * preceding fragment, and, if needed, align things so that | 278 | * When reassembling an IPv6 datagram, if |
| 287 | * any overlaps are eliminated. | 279 | * one or more its constituent fragments is determined to be an |
| 288 | */ | 280 | * overlapping fragment, the entire datagram (and any constituent |
| 289 | if (prev) { | 281 | * fragments, including those not yet received) MUST be silently |
| 290 | int i = (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset; | 282 | * discarded. |
| 291 | |||
| 292 | if (i > 0) { | ||
| 293 | offset += i; | ||
| 294 | if (end <= offset) { | ||
| 295 | pr_debug("overlap\n"); | ||
| 296 | goto err; | ||
| 297 | } | ||
| 298 | if (!pskb_pull(skb, i)) { | ||
| 299 | pr_debug("Can't pull\n"); | ||
| 300 | goto err; | ||
| 301 | } | ||
| 302 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | ||
| 303 | skb->ip_summed = CHECKSUM_NONE; | ||
| 304 | } | ||
| 305 | } | ||
| 306 | |||
| 307 | /* Look for overlap with succeeding segments. | ||
| 308 | * If we can merge fragments, do it. | ||
| 309 | */ | 283 | */ |
| 310 | while (next && NFCT_FRAG6_CB(next)->offset < end) { | ||
| 311 | /* overlap is 'i' bytes */ | ||
| 312 | int i = end - NFCT_FRAG6_CB(next)->offset; | ||
| 313 | |||
| 314 | if (i < next->len) { | ||
| 315 | /* Eat head of the next overlapped fragment | ||
| 316 | * and leave the loop. The next ones cannot overlap. | ||
| 317 | */ | ||
| 318 | pr_debug("Eat head of the overlapped parts.: %d", i); | ||
| 319 | if (!pskb_pull(next, i)) | ||
| 320 | goto err; | ||
| 321 | 284 | ||
| 322 | /* next fragment */ | 285 | /* Check for overlap with preceding fragment. */ |
| 323 | NFCT_FRAG6_CB(next)->offset += i; | 286 | if (prev && |
| 324 | fq->q.meat -= i; | 287 | (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0) |
| 325 | if (next->ip_summed != CHECKSUM_UNNECESSARY) | 288 | goto discard_fq; |
| 326 | next->ip_summed = CHECKSUM_NONE; | ||
| 327 | break; | ||
| 328 | } else { | ||
| 329 | struct sk_buff *free_it = next; | ||
| 330 | |||
| 331 | /* Old fragmnet is completely overridden with | ||
| 332 | * new one drop it. | ||
| 333 | */ | ||
| 334 | next = next->next; | ||
| 335 | 289 | ||
| 336 | if (prev) | 290 | /* Look for overlap with succeeding segment. */ |
| 337 | prev->next = next; | 291 | if (next && NFCT_FRAG6_CB(next)->offset < end) |
| 338 | else | 292 | goto discard_fq; |
| 339 | fq->q.fragments = next; | ||
| 340 | |||
| 341 | fq->q.meat -= free_it->len; | ||
| 342 | frag_kfree_skb(free_it); | ||
| 343 | } | ||
| 344 | } | ||
| 345 | 293 | ||
| 346 | NFCT_FRAG6_CB(skb)->offset = offset; | 294 | NFCT_FRAG6_CB(skb)->offset = offset; |
| 347 | 295 | ||
| @@ -371,6 +319,8 @@ found: | |||
| 371 | write_unlock(&nf_frags.lock); | 319 | write_unlock(&nf_frags.lock); |
| 372 | return 0; | 320 | return 0; |
| 373 | 321 | ||
| 322 | discard_fq: | ||
| 323 | fq_kill(fq); | ||
| 374 | err: | 324 | err: |
| 375 | return -1; | 325 | return -1; |
| 376 | } | 326 | } |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 545c4141b755..64cfef1b0a4c 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
| @@ -149,13 +149,6 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a) | |||
| 149 | } | 149 | } |
| 150 | EXPORT_SYMBOL(ip6_frag_match); | 150 | EXPORT_SYMBOL(ip6_frag_match); |
| 151 | 151 | ||
| 152 | /* Memory Tracking Functions. */ | ||
| 153 | static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb) | ||
| 154 | { | ||
| 155 | atomic_sub(skb->truesize, &nf->mem); | ||
| 156 | kfree_skb(skb); | ||
| 157 | } | ||
| 158 | |||
| 159 | void ip6_frag_init(struct inet_frag_queue *q, void *a) | 152 | void ip6_frag_init(struct inet_frag_queue *q, void *a) |
| 160 | { | 153 | { |
| 161 | struct frag_queue *fq = container_of(q, struct frag_queue, q); | 154 | struct frag_queue *fq = container_of(q, struct frag_queue, q); |
| @@ -346,58 +339,22 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
| 346 | } | 339 | } |
| 347 | 340 | ||
| 348 | found: | 341 | found: |
| 349 | /* We found where to put this one. Check for overlap with | 342 | /* RFC5722, Section 4: |
| 350 | * preceding fragment, and, if needed, align things so that | 343 | * When reassembling an IPv6 datagram, if |
| 351 | * any overlaps are eliminated. | 344 | * one or more its constituent fragments is determined to be an |
| 345 | * overlapping fragment, the entire datagram (and any constituent | ||
| 346 | * fragments, including those not yet received) MUST be silently | ||
| 347 | * discarded. | ||
| 352 | */ | 348 | */ |
| 353 | if (prev) { | ||
| 354 | int i = (FRAG6_CB(prev)->offset + prev->len) - offset; | ||
| 355 | 349 | ||
| 356 | if (i > 0) { | 350 | /* Check for overlap with preceding fragment. */ |
| 357 | offset += i; | 351 | if (prev && |
| 358 | if (end <= offset) | 352 | (FRAG6_CB(prev)->offset + prev->len) - offset > 0) |
| 359 | goto err; | 353 | goto discard_fq; |
| 360 | if (!pskb_pull(skb, i)) | ||
| 361 | goto err; | ||
| 362 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | ||
| 363 | skb->ip_summed = CHECKSUM_NONE; | ||
| 364 | } | ||
| 365 | } | ||
| 366 | 354 | ||
| 367 | /* Look for overlap with succeeding segments. | 355 | /* Look for overlap with succeeding segment. */ |
| 368 | * If we can merge fragments, do it. | 356 | if (next && FRAG6_CB(next)->offset < end) |
| 369 | */ | 357 | goto discard_fq; |
| 370 | while (next && FRAG6_CB(next)->offset < end) { | ||
| 371 | int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */ | ||
| 372 | |||
| 373 | if (i < next->len) { | ||
| 374 | /* Eat head of the next overlapped fragment | ||
| 375 | * and leave the loop. The next ones cannot overlap. | ||
| 376 | */ | ||
| 377 | if (!pskb_pull(next, i)) | ||
| 378 | goto err; | ||
| 379 | FRAG6_CB(next)->offset += i; /* next fragment */ | ||
| 380 | fq->q.meat -= i; | ||
| 381 | if (next->ip_summed != CHECKSUM_UNNECESSARY) | ||
| 382 | next->ip_summed = CHECKSUM_NONE; | ||
| 383 | break; | ||
| 384 | } else { | ||
| 385 | struct sk_buff *free_it = next; | ||
| 386 | |||
| 387 | /* Old fragment is completely overridden with | ||
| 388 | * new one drop it. | ||
| 389 | */ | ||
| 390 | next = next->next; | ||
| 391 | |||
| 392 | if (prev) | ||
| 393 | prev->next = next; | ||
| 394 | else | ||
| 395 | fq->q.fragments = next; | ||
| 396 | |||
| 397 | fq->q.meat -= free_it->len; | ||
| 398 | frag_kfree_skb(fq->q.net, free_it); | ||
| 399 | } | ||
| 400 | } | ||
| 401 | 358 | ||
| 402 | FRAG6_CB(skb)->offset = offset; | 359 | FRAG6_CB(skb)->offset = offset; |
| 403 | 360 | ||
| @@ -436,6 +393,8 @@ found: | |||
| 436 | write_unlock(&ip6_frags.lock); | 393 | write_unlock(&ip6_frags.lock); |
| 437 | return -1; | 394 | return -1; |
| 438 | 395 | ||
| 396 | discard_fq: | ||
| 397 | fq_kill(fq); | ||
| 439 | err: | 398 | err: |
| 440 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), | 399 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
| 441 | IPSTATS_MIB_REASMFAILS); | 400 | IPSTATS_MIB_REASMFAILS); |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 1dd1affdead2..5acb3560ff15 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
| @@ -111,6 +111,15 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum) | |||
| 111 | return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr); | 111 | return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr); |
| 112 | } | 112 | } |
| 113 | 113 | ||
| 114 | static void udp_v6_rehash(struct sock *sk) | ||
| 115 | { | ||
| 116 | u16 new_hash = udp6_portaddr_hash(sock_net(sk), | ||
| 117 | &inet6_sk(sk)->rcv_saddr, | ||
| 118 | inet_sk(sk)->inet_num); | ||
| 119 | |||
| 120 | udp_lib_rehash(sk, new_hash); | ||
| 121 | } | ||
| 122 | |||
| 114 | static inline int compute_score(struct sock *sk, struct net *net, | 123 | static inline int compute_score(struct sock *sk, struct net *net, |
| 115 | unsigned short hnum, | 124 | unsigned short hnum, |
| 116 | struct in6_addr *saddr, __be16 sport, | 125 | struct in6_addr *saddr, __be16 sport, |
| @@ -1447,6 +1456,7 @@ struct proto udpv6_prot = { | |||
| 1447 | .backlog_rcv = udpv6_queue_rcv_skb, | 1456 | .backlog_rcv = udpv6_queue_rcv_skb, |
| 1448 | .hash = udp_lib_hash, | 1457 | .hash = udp_lib_hash, |
| 1449 | .unhash = udp_lib_unhash, | 1458 | .unhash = udp_lib_unhash, |
| 1459 | .rehash = udp_v6_rehash, | ||
| 1450 | .get_port = udp_v6_get_port, | 1460 | .get_port = udp_v6_get_port, |
| 1451 | .memory_allocated = &udp_memory_allocated, | 1461 | .memory_allocated = &udp_memory_allocated, |
| 1452 | .sysctl_mem = sysctl_udp_mem, | 1462 | .sysctl_mem = sysctl_udp_mem, |
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 79986a674f6e..fd55b5135de5 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
| @@ -824,8 +824,8 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
| 824 | 824 | ||
| 825 | err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name); | 825 | err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name); |
| 826 | if (err < 0) { | 826 | if (err < 0) { |
| 827 | kfree(self->ias_obj->name); | 827 | irias_delete_object(self->ias_obj); |
| 828 | kfree(self->ias_obj); | 828 | self->ias_obj = NULL; |
| 829 | goto out; | 829 | goto out; |
| 830 | } | 830 | } |
| 831 | 831 | ||
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c index a788f9e9427d..6130f9d9dbe1 100644 --- a/net/irda/irlan/irlan_common.c +++ b/net/irda/irlan/irlan_common.c | |||
| @@ -1102,7 +1102,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len) | |||
| 1102 | memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */ | 1102 | memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */ |
| 1103 | le16_to_cpus(&val_len); n+=2; | 1103 | le16_to_cpus(&val_len); n+=2; |
| 1104 | 1104 | ||
| 1105 | if (val_len > 1016) { | 1105 | if (val_len >= 1016) { |
| 1106 | IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ ); | 1106 | IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ ); |
| 1107 | return -RSP_INVALID_COMMAND_FORMAT; | 1107 | return -RSP_INVALID_COMMAND_FORMAT; |
| 1108 | } | 1108 | } |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 798a91b100cc..ded5c3843e06 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
| @@ -732,6 +732,12 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
| 732 | 732 | ||
| 733 | rtnl_unlock(); | 733 | rtnl_unlock(); |
| 734 | 734 | ||
| 735 | /* | ||
| 736 | * Now all work items will be gone, but the | ||
| 737 | * timer might still be armed, so delete it | ||
| 738 | */ | ||
| 739 | del_timer_sync(&local->work_timer); | ||
| 740 | |||
| 735 | cancel_work_sync(&local->reconfig_filter); | 741 | cancel_work_sync(&local->reconfig_filter); |
| 736 | 742 | ||
| 737 | ieee80211_clear_tx_pending(local); | 743 | ieee80211_clear_tx_pending(local); |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 4f8ddba48011..4c2f89df5cce 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
| @@ -924,6 +924,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
| 924 | 924 | ||
| 925 | ip_vs_out_stats(cp, skb); | 925 | ip_vs_out_stats(cp, skb); |
| 926 | ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp); | 926 | ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp); |
| 927 | ip_vs_update_conntrack(skb, cp, 0); | ||
| 927 | ip_vs_conn_put(cp); | 928 | ip_vs_conn_put(cp); |
| 928 | 929 | ||
| 929 | skb->ipvs_property = 1; | 930 | skb->ipvs_property = 1; |
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index f228a17ec649..7e9af5b76d9e 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c | |||
| @@ -45,6 +45,7 @@ | |||
| 45 | #include <linux/netfilter.h> | 45 | #include <linux/netfilter.h> |
| 46 | #include <net/netfilter/nf_conntrack.h> | 46 | #include <net/netfilter/nf_conntrack.h> |
| 47 | #include <net/netfilter/nf_conntrack_expect.h> | 47 | #include <net/netfilter/nf_conntrack_expect.h> |
| 48 | #include <net/netfilter/nf_nat.h> | ||
| 48 | #include <net/netfilter/nf_nat_helper.h> | 49 | #include <net/netfilter/nf_nat_helper.h> |
| 49 | #include <linux/gfp.h> | 50 | #include <linux/gfp.h> |
| 50 | #include <net/protocol.h> | 51 | #include <net/protocol.h> |
| @@ -359,7 +360,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
| 359 | buf_len = strlen(buf); | 360 | buf_len = strlen(buf); |
| 360 | 361 | ||
| 361 | ct = nf_ct_get(skb, &ctinfo); | 362 | ct = nf_ct_get(skb, &ctinfo); |
| 362 | if (ct && !nf_ct_is_untracked(ct)) { | 363 | if (ct && !nf_ct_is_untracked(ct) && nfct_nat(ct)) { |
| 363 | /* If mangling fails this function will return 0 | 364 | /* If mangling fails this function will return 0 |
| 364 | * which will cause the packet to be dropped. | 365 | * which will cause the packet to be dropped. |
| 365 | * Mangling can only fail under memory pressure, | 366 | * Mangling can only fail under memory pressure, |
| @@ -409,7 +410,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
| 409 | union nf_inet_addr to; | 410 | union nf_inet_addr to; |
| 410 | __be16 port; | 411 | __be16 port; |
| 411 | struct ip_vs_conn *n_cp; | 412 | struct ip_vs_conn *n_cp; |
| 412 | struct nf_conn *ct; | ||
| 413 | 413 | ||
| 414 | #ifdef CONFIG_IP_VS_IPV6 | 414 | #ifdef CONFIG_IP_VS_IPV6 |
| 415 | /* This application helper doesn't work with IPv6 yet, | 415 | /* This application helper doesn't work with IPv6 yet, |
| @@ -496,11 +496,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
| 496 | ip_vs_control_add(n_cp, cp); | 496 | ip_vs_control_add(n_cp, cp); |
| 497 | } | 497 | } |
| 498 | 498 | ||
| 499 | ct = (struct nf_conn *)skb->nfct; | ||
| 500 | if (ct && ct != &nf_conntrack_untracked) | ||
| 501 | ip_vs_expect_related(skb, ct, n_cp, | ||
| 502 | IPPROTO_TCP, &n_cp->dport, 1); | ||
| 503 | |||
| 504 | /* | 499 | /* |
| 505 | * Move tunnel to listen state | 500 | * Move tunnel to listen state |
| 506 | */ | 501 | */ |
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 21e1a5e9b9d3..49df6bea6a2d 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
| @@ -349,8 +349,8 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
| 349 | } | 349 | } |
| 350 | #endif | 350 | #endif |
| 351 | 351 | ||
| 352 | static void | 352 | void |
| 353 | ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp) | 353 | ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin) |
| 354 | { | 354 | { |
| 355 | struct nf_conn *ct = (struct nf_conn *)skb->nfct; | 355 | struct nf_conn *ct = (struct nf_conn *)skb->nfct; |
| 356 | struct nf_conntrack_tuple new_tuple; | 356 | struct nf_conntrack_tuple new_tuple; |
| @@ -365,11 +365,17 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp) | |||
| 365 | * real-server we will see RIP->DIP. | 365 | * real-server we will see RIP->DIP. |
| 366 | */ | 366 | */ |
| 367 | new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; | 367 | new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; |
| 368 | new_tuple.src.u3 = cp->daddr; | 368 | if (outin) |
| 369 | new_tuple.src.u3 = cp->daddr; | ||
| 370 | else | ||
| 371 | new_tuple.dst.u3 = cp->vaddr; | ||
| 369 | /* | 372 | /* |
| 370 | * This will also take care of UDP and other protocols. | 373 | * This will also take care of UDP and other protocols. |
| 371 | */ | 374 | */ |
| 372 | new_tuple.src.u.tcp.port = cp->dport; | 375 | if (outin) |
| 376 | new_tuple.src.u.tcp.port = cp->dport; | ||
| 377 | else | ||
| 378 | new_tuple.dst.u.tcp.port = cp->vport; | ||
| 373 | nf_conntrack_alter_reply(ct, &new_tuple); | 379 | nf_conntrack_alter_reply(ct, &new_tuple); |
| 374 | } | 380 | } |
| 375 | 381 | ||
| @@ -428,7 +434,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
| 428 | 434 | ||
| 429 | IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); | 435 | IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); |
| 430 | 436 | ||
| 431 | ip_vs_update_conntrack(skb, cp); | 437 | ip_vs_update_conntrack(skb, cp, 1); |
| 432 | 438 | ||
| 433 | /* FIXME: when application helper enlarges the packet and the length | 439 | /* FIXME: when application helper enlarges the packet and the length |
| 434 | is larger than the MTU of outgoing device, there will be still | 440 | is larger than the MTU of outgoing device, there will be still |
| @@ -506,7 +512,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
| 506 | 512 | ||
| 507 | IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); | 513 | IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); |
| 508 | 514 | ||
| 509 | ip_vs_update_conntrack(skb, cp); | 515 | ip_vs_update_conntrack(skb, cp, 1); |
| 510 | 516 | ||
| 511 | /* FIXME: when application helper enlarges the packet and the length | 517 | /* FIXME: when application helper enlarges the packet and the length |
| 512 | is larger than the MTU of outgoing device, there will be still | 518 | is larger than the MTU of outgoing device, there will be still |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 980fe4ad0016..cd96ed3ccee4 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
| @@ -2102,6 +2102,26 @@ static void __net_exit netlink_net_exit(struct net *net) | |||
| 2102 | #endif | 2102 | #endif |
| 2103 | } | 2103 | } |
| 2104 | 2104 | ||
| 2105 | static void __init netlink_add_usersock_entry(void) | ||
| 2106 | { | ||
| 2107 | unsigned long *listeners; | ||
| 2108 | int groups = 32; | ||
| 2109 | |||
| 2110 | listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head), | ||
| 2111 | GFP_KERNEL); | ||
| 2112 | if (!listeners) | ||
| 2113 | panic("netlink_add_usersock_entry: Cannot allocate listneres\n"); | ||
| 2114 | |||
| 2115 | netlink_table_grab(); | ||
| 2116 | |||
| 2117 | nl_table[NETLINK_USERSOCK].groups = groups; | ||
| 2118 | nl_table[NETLINK_USERSOCK].listeners = listeners; | ||
| 2119 | nl_table[NETLINK_USERSOCK].module = THIS_MODULE; | ||
| 2120 | nl_table[NETLINK_USERSOCK].registered = 1; | ||
| 2121 | |||
| 2122 | netlink_table_ungrab(); | ||
| 2123 | } | ||
| 2124 | |||
| 2105 | static struct pernet_operations __net_initdata netlink_net_ops = { | 2125 | static struct pernet_operations __net_initdata netlink_net_ops = { |
| 2106 | .init = netlink_net_init, | 2126 | .init = netlink_net_init, |
| 2107 | .exit = netlink_net_exit, | 2127 | .exit = netlink_net_exit, |
| @@ -2150,6 +2170,8 @@ static int __init netlink_proto_init(void) | |||
| 2150 | hash->rehash_time = jiffies; | 2170 | hash->rehash_time = jiffies; |
| 2151 | } | 2171 | } |
| 2152 | 2172 | ||
| 2173 | netlink_add_usersock_entry(); | ||
| 2174 | |||
| 2153 | sock_register(&netlink_family_ops); | 2175 | sock_register(&netlink_family_ops); |
| 2154 | register_pernet_subsys(&netlink_net_ops); | 2176 | register_pernet_subsys(&netlink_net_ops); |
| 2155 | /* The netlink device handler may be needed early. */ | 2177 | /* The netlink device handler may be needed early. */ |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 537a48732e9e..7ebf7439b478 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
| @@ -350,22 +350,19 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) | |||
| 350 | { | 350 | { |
| 351 | unsigned char *b = skb_tail_pointer(skb); | 351 | unsigned char *b = skb_tail_pointer(skb); |
| 352 | struct tcf_police *police = a->priv; | 352 | struct tcf_police *police = a->priv; |
| 353 | struct tc_police opt; | 353 | struct tc_police opt = { |
| 354 | 354 | .index = police->tcf_index, | |
| 355 | opt.index = police->tcf_index; | 355 | .action = police->tcf_action, |
| 356 | opt.action = police->tcf_action; | 356 | .mtu = police->tcfp_mtu, |
| 357 | opt.mtu = police->tcfp_mtu; | 357 | .burst = police->tcfp_burst, |
| 358 | opt.burst = police->tcfp_burst; | 358 | .refcnt = police->tcf_refcnt - ref, |
| 359 | opt.refcnt = police->tcf_refcnt - ref; | 359 | .bindcnt = police->tcf_bindcnt - bind, |
| 360 | opt.bindcnt = police->tcf_bindcnt - bind; | 360 | }; |
| 361 | |||
| 361 | if (police->tcfp_R_tab) | 362 | if (police->tcfp_R_tab) |
| 362 | opt.rate = police->tcfp_R_tab->rate; | 363 | opt.rate = police->tcfp_R_tab->rate; |
| 363 | else | ||
| 364 | memset(&opt.rate, 0, sizeof(opt.rate)); | ||
| 365 | if (police->tcfp_P_tab) | 364 | if (police->tcfp_P_tab) |
| 366 | opt.peakrate = police->tcfp_P_tab->rate; | 365 | opt.peakrate = police->tcfp_P_tab->rate; |
| 367 | else | ||
| 368 | memset(&opt.peakrate, 0, sizeof(opt.peakrate)); | ||
| 369 | NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); | 366 | NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); |
| 370 | if (police->tcfp_result) | 367 | if (police->tcfp_result) |
| 371 | NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result); | 368 | NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result); |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index abd904be4287..47496098d35c 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
| @@ -761,8 +761,8 @@ init_vf(struct hfsc_class *cl, unsigned int len) | |||
| 761 | if (f != cl->cl_f) { | 761 | if (f != cl->cl_f) { |
| 762 | cl->cl_f = f; | 762 | cl->cl_f = f; |
| 763 | cftree_update(cl); | 763 | cftree_update(cl); |
| 764 | update_cfmin(cl->cl_parent); | ||
| 765 | } | 764 | } |
| 765 | update_cfmin(cl->cl_parent); | ||
| 766 | } | 766 | } |
| 767 | } | 767 | } |
| 768 | 768 | ||
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 24b2cd555637..d344dc481ccc 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
| @@ -1232,6 +1232,18 @@ out: | |||
| 1232 | return 0; | 1232 | return 0; |
| 1233 | } | 1233 | } |
| 1234 | 1234 | ||
| 1235 | static bool list_has_sctp_addr(const struct list_head *list, | ||
| 1236 | union sctp_addr *ipaddr) | ||
| 1237 | { | ||
| 1238 | struct sctp_transport *addr; | ||
| 1239 | |||
| 1240 | list_for_each_entry(addr, list, transports) { | ||
| 1241 | if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr)) | ||
| 1242 | return true; | ||
| 1243 | } | ||
| 1244 | |||
| 1245 | return false; | ||
| 1246 | } | ||
| 1235 | /* A restart is occurring, check to make sure no new addresses | 1247 | /* A restart is occurring, check to make sure no new addresses |
| 1236 | * are being added as we may be under a takeover attack. | 1248 | * are being added as we may be under a takeover attack. |
| 1237 | */ | 1249 | */ |
| @@ -1240,10 +1252,10 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc, | |||
| 1240 | struct sctp_chunk *init, | 1252 | struct sctp_chunk *init, |
| 1241 | sctp_cmd_seq_t *commands) | 1253 | sctp_cmd_seq_t *commands) |
| 1242 | { | 1254 | { |
| 1243 | struct sctp_transport *new_addr, *addr; | 1255 | struct sctp_transport *new_addr; |
| 1244 | int found; | 1256 | int ret = 1; |
| 1245 | 1257 | ||
| 1246 | /* Implementor's Guide - Sectin 5.2.2 | 1258 | /* Implementor's Guide - Section 5.2.2 |
| 1247 | * ... | 1259 | * ... |
| 1248 | * Before responding the endpoint MUST check to see if the | 1260 | * Before responding the endpoint MUST check to see if the |
| 1249 | * unexpected INIT adds new addresses to the association. If new | 1261 | * unexpected INIT adds new addresses to the association. If new |
| @@ -1254,31 +1266,19 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc, | |||
| 1254 | /* Search through all current addresses and make sure | 1266 | /* Search through all current addresses and make sure |
| 1255 | * we aren't adding any new ones. | 1267 | * we aren't adding any new ones. |
| 1256 | */ | 1268 | */ |
| 1257 | new_addr = NULL; | ||
| 1258 | found = 0; | ||
| 1259 | |||
| 1260 | list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list, | 1269 | list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list, |
| 1261 | transports) { | 1270 | transports) { |
| 1262 | found = 0; | 1271 | if (!list_has_sctp_addr(&asoc->peer.transport_addr_list, |
| 1263 | list_for_each_entry(addr, &asoc->peer.transport_addr_list, | 1272 | &new_addr->ipaddr)) { |
| 1264 | transports) { | 1273 | sctp_sf_send_restart_abort(&new_addr->ipaddr, init, |
| 1265 | if (sctp_cmp_addr_exact(&new_addr->ipaddr, | 1274 | commands); |
| 1266 | &addr->ipaddr)) { | 1275 | ret = 0; |
| 1267 | found = 1; | ||
| 1268 | break; | ||
| 1269 | } | ||
| 1270 | } | ||
| 1271 | if (!found) | ||
| 1272 | break; | 1276 | break; |
| 1273 | } | 1277 | } |
| 1274 | |||
| 1275 | /* If a new address was added, ABORT the sender. */ | ||
| 1276 | if (!found && new_addr) { | ||
| 1277 | sctp_sf_send_restart_abort(&new_addr->ipaddr, init, commands); | ||
| 1278 | } | 1278 | } |
| 1279 | 1279 | ||
| 1280 | /* Return success if all addresses were found. */ | 1280 | /* Return success if all addresses were found. */ |
| 1281 | return found; | 1281 | return ret; |
| 1282 | } | 1282 | } |
| 1283 | 1283 | ||
| 1284 | /* Populate the verification/tie tags based on overlapping INIT | 1284 | /* Populate the verification/tie tags based on overlapping INIT |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 4414a18c63b4..0b39b2451ea5 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
| @@ -692,6 +692,7 @@ static int unix_autobind(struct socket *sock) | |||
| 692 | static u32 ordernum = 1; | 692 | static u32 ordernum = 1; |
| 693 | struct unix_address *addr; | 693 | struct unix_address *addr; |
| 694 | int err; | 694 | int err; |
| 695 | unsigned int retries = 0; | ||
| 695 | 696 | ||
| 696 | mutex_lock(&u->readlock); | 697 | mutex_lock(&u->readlock); |
| 697 | 698 | ||
| @@ -717,9 +718,17 @@ retry: | |||
| 717 | if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type, | 718 | if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type, |
| 718 | addr->hash)) { | 719 | addr->hash)) { |
| 719 | spin_unlock(&unix_table_lock); | 720 | spin_unlock(&unix_table_lock); |
| 720 | /* Sanity yield. It is unusual case, but yet... */ | 721 | /* |
| 721 | if (!(ordernum&0xFF)) | 722 | * __unix_find_socket_byname() may take long time if many names |
| 722 | yield(); | 723 | * are already in use. |
| 724 | */ | ||
| 725 | cond_resched(); | ||
| 726 | /* Give up if all names seems to be in use. */ | ||
| 727 | if (retries++ == 0xFFFFF) { | ||
| 728 | err = -ENOSPC; | ||
| 729 | kfree(addr); | ||
| 730 | goto out; | ||
| 731 | } | ||
| 723 | goto retry; | 732 | goto retry; |
| 724 | } | 733 | } |
| 725 | addr->hash ^= sk->sk_type; | 734 | addr->hash ^= sk->sk_type; |
diff --git a/net/wireless/core.c b/net/wireless/core.c index 541e2fff5e9c..d6d046b9f6f2 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
| @@ -475,12 +475,10 @@ int wiphy_register(struct wiphy *wiphy) | |||
| 475 | mutex_lock(&cfg80211_mutex); | 475 | mutex_lock(&cfg80211_mutex); |
| 476 | 476 | ||
| 477 | res = device_add(&rdev->wiphy.dev); | 477 | res = device_add(&rdev->wiphy.dev); |
| 478 | if (res) | 478 | if (res) { |
| 479 | goto out_unlock; | 479 | mutex_unlock(&cfg80211_mutex); |
| 480 | 480 | return res; | |
| 481 | res = rfkill_register(rdev->rfkill); | 481 | } |
| 482 | if (res) | ||
| 483 | goto out_rm_dev; | ||
| 484 | 482 | ||
| 485 | /* set up regulatory info */ | 483 | /* set up regulatory info */ |
| 486 | wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); | 484 | wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); |
| @@ -509,13 +507,18 @@ int wiphy_register(struct wiphy *wiphy) | |||
| 509 | cfg80211_debugfs_rdev_add(rdev); | 507 | cfg80211_debugfs_rdev_add(rdev); |
| 510 | mutex_unlock(&cfg80211_mutex); | 508 | mutex_unlock(&cfg80211_mutex); |
| 511 | 509 | ||
| 510 | /* | ||
| 511 | * due to a locking dependency this has to be outside of the | ||
| 512 | * cfg80211_mutex lock | ||
| 513 | */ | ||
| 514 | res = rfkill_register(rdev->rfkill); | ||
| 515 | if (res) | ||
| 516 | goto out_rm_dev; | ||
| 517 | |||
| 512 | return 0; | 518 | return 0; |
| 513 | 519 | ||
| 514 | out_rm_dev: | 520 | out_rm_dev: |
| 515 | device_del(&rdev->wiphy.dev); | 521 | device_del(&rdev->wiphy.dev); |
| 516 | |||
| 517 | out_unlock: | ||
| 518 | mutex_unlock(&cfg80211_mutex); | ||
| 519 | return res; | 522 | return res; |
| 520 | } | 523 | } |
| 521 | EXPORT_SYMBOL(wiphy_register); | 524 | EXPORT_SYMBOL(wiphy_register); |
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index bb5e0a5ecfa1..7e5c3a45f811 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c | |||
| @@ -1420,6 +1420,9 @@ int cfg80211_wext_giwessid(struct net_device *dev, | |||
| 1420 | { | 1420 | { |
| 1421 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 1421 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
| 1422 | 1422 | ||
| 1423 | data->flags = 0; | ||
| 1424 | data->length = 0; | ||
| 1425 | |||
| 1423 | switch (wdev->iftype) { | 1426 | switch (wdev->iftype) { |
| 1424 | case NL80211_IFTYPE_ADHOC: | 1427 | case NL80211_IFTYPE_ADHOC: |
| 1425 | return cfg80211_ibss_wext_giwessid(dev, info, data, ssid); | 1428 | return cfg80211_ibss_wext_giwessid(dev, info, data, ssid); |
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 0ef17bc42bac..8f5116f5af19 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c | |||
| @@ -782,6 +782,22 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, | |||
| 782 | } | 782 | } |
| 783 | } | 783 | } |
| 784 | 784 | ||
| 785 | if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) { | ||
| 786 | /* | ||
| 787 | * If this is a GET, but not NOMAX, it means that the extra | ||
| 788 | * data is not bounded by userspace, but by max_tokens. Thus | ||
| 789 | * set the length to max_tokens. This matches the extra data | ||
| 790 | * allocation. | ||
| 791 | * The driver should fill it with the number of tokens it | ||
| 792 | * provided, and it may check iwp->length rather than having | ||
| 793 | * knowledge of max_tokens. If the driver doesn't change the | ||
| 794 | * iwp->length, this ioctl just copies back max_token tokens | ||
| 795 | * filled with zeroes. Hopefully the driver isn't claiming | ||
| 796 | * them to be valid data. | ||
| 797 | */ | ||
| 798 | iwp->length = descr->max_tokens; | ||
| 799 | } | ||
| 800 | |||
| 785 | err = handler(dev, info, (union iwreq_data *) iwp, extra); | 801 | err = handler(dev, info, (union iwreq_data *) iwp, extra); |
| 786 | 802 | ||
| 787 | iwp->length += essid_compat; | 803 | iwp->length += essid_compat; |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index b14ed4b1f27c..8bae6b22c846 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
| @@ -1801,7 +1801,7 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
| 1801 | struct xfrm_user_expire *ue = nlmsg_data(nlh); | 1801 | struct xfrm_user_expire *ue = nlmsg_data(nlh); |
| 1802 | struct xfrm_usersa_info *p = &ue->state; | 1802 | struct xfrm_usersa_info *p = &ue->state; |
| 1803 | struct xfrm_mark m; | 1803 | struct xfrm_mark m; |
| 1804 | u32 mark = xfrm_mark_get(attrs, &m);; | 1804 | u32 mark = xfrm_mark_get(attrs, &m); |
| 1805 | 1805 | ||
| 1806 | x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family); | 1806 | x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family); |
| 1807 | 1807 | ||
diff --git a/scripts/basic/docproc.c b/scripts/basic/docproc.c index 79ab973fb43a..fc3b18d844af 100644 --- a/scripts/basic/docproc.c +++ b/scripts/basic/docproc.c | |||
| @@ -34,12 +34,14 @@ | |||
| 34 | * | 34 | * |
| 35 | */ | 35 | */ |
| 36 | 36 | ||
| 37 | #define _GNU_SOURCE | ||
| 37 | #include <stdio.h> | 38 | #include <stdio.h> |
| 38 | #include <stdlib.h> | 39 | #include <stdlib.h> |
| 39 | #include <string.h> | 40 | #include <string.h> |
| 40 | #include <ctype.h> | 41 | #include <ctype.h> |
| 41 | #include <unistd.h> | 42 | #include <unistd.h> |
| 42 | #include <limits.h> | 43 | #include <limits.h> |
| 44 | #include <errno.h> | ||
| 43 | #include <sys/types.h> | 45 | #include <sys/types.h> |
| 44 | #include <sys/wait.h> | 46 | #include <sys/wait.h> |
| 45 | 47 | ||
| @@ -54,6 +56,7 @@ typedef void FILEONLY(char * file); | |||
| 54 | FILEONLY *internalfunctions; | 56 | FILEONLY *internalfunctions; |
| 55 | FILEONLY *externalfunctions; | 57 | FILEONLY *externalfunctions; |
| 56 | FILEONLY *symbolsonly; | 58 | FILEONLY *symbolsonly; |
| 59 | FILEONLY *findall; | ||
| 57 | 60 | ||
| 58 | typedef void FILELINE(char * file, char * line); | 61 | typedef void FILELINE(char * file, char * line); |
| 59 | FILELINE * singlefunctions; | 62 | FILELINE * singlefunctions; |
| @@ -65,12 +68,30 @@ FILELINE * docsection; | |||
| 65 | #define KERNELDOCPATH "scripts/" | 68 | #define KERNELDOCPATH "scripts/" |
| 66 | #define KERNELDOC "kernel-doc" | 69 | #define KERNELDOC "kernel-doc" |
| 67 | #define DOCBOOK "-docbook" | 70 | #define DOCBOOK "-docbook" |
| 71 | #define LIST "-list" | ||
| 68 | #define FUNCTION "-function" | 72 | #define FUNCTION "-function" |
| 69 | #define NOFUNCTION "-nofunction" | 73 | #define NOFUNCTION "-nofunction" |
| 70 | #define NODOCSECTIONS "-no-doc-sections" | 74 | #define NODOCSECTIONS "-no-doc-sections" |
| 71 | 75 | ||
| 72 | static char *srctree, *kernsrctree; | 76 | static char *srctree, *kernsrctree; |
| 73 | 77 | ||
| 78 | static char **all_list = NULL; | ||
| 79 | static int all_list_len = 0; | ||
| 80 | |||
| 81 | static void consume_symbol(const char *sym) | ||
| 82 | { | ||
| 83 | int i; | ||
| 84 | |||
| 85 | for (i = 0; i < all_list_len; i++) { | ||
| 86 | if (!all_list[i]) | ||
| 87 | continue; | ||
| 88 | if (strcmp(sym, all_list[i])) | ||
| 89 | continue; | ||
| 90 | all_list[i] = NULL; | ||
| 91 | break; | ||
| 92 | } | ||
| 93 | } | ||
| 94 | |||
| 74 | static void usage (void) | 95 | static void usage (void) |
| 75 | { | 96 | { |
| 76 | fprintf(stderr, "Usage: docproc {doc|depend} file\n"); | 97 | fprintf(stderr, "Usage: docproc {doc|depend} file\n"); |
| @@ -248,6 +269,7 @@ static void docfunctions(char * filename, char * type) | |||
| 248 | struct symfile * sym = &symfilelist[i]; | 269 | struct symfile * sym = &symfilelist[i]; |
| 249 | for (j=0; j < sym->symbolcnt; j++) { | 270 | for (j=0; j < sym->symbolcnt; j++) { |
| 250 | vec[idx++] = type; | 271 | vec[idx++] = type; |
| 272 | consume_symbol(sym->symbollist[j].name); | ||
| 251 | vec[idx++] = sym->symbollist[j].name; | 273 | vec[idx++] = sym->symbollist[j].name; |
| 252 | } | 274 | } |
| 253 | } | 275 | } |
| @@ -287,6 +309,11 @@ static void singfunc(char * filename, char * line) | |||
| 287 | vec[idx++] = &line[i]; | 309 | vec[idx++] = &line[i]; |
| 288 | } | 310 | } |
| 289 | } | 311 | } |
| 312 | for (i = 0; i < idx; i++) { | ||
| 313 | if (strcmp(vec[i], FUNCTION)) | ||
| 314 | continue; | ||
| 315 | consume_symbol(vec[i + 1]); | ||
| 316 | } | ||
| 290 | vec[idx++] = filename; | 317 | vec[idx++] = filename; |
| 291 | vec[idx] = NULL; | 318 | vec[idx] = NULL; |
| 292 | exec_kernel_doc(vec); | 319 | exec_kernel_doc(vec); |
| @@ -306,6 +333,10 @@ static void docsect(char *filename, char *line) | |||
| 306 | if (*s == '\n') | 333 | if (*s == '\n') |
| 307 | *s = '\0'; | 334 | *s = '\0'; |
| 308 | 335 | ||
| 336 | asprintf(&s, "DOC: %s", line); | ||
| 337 | consume_symbol(s); | ||
| 338 | free(s); | ||
| 339 | |||
| 309 | vec[0] = KERNELDOC; | 340 | vec[0] = KERNELDOC; |
| 310 | vec[1] = DOCBOOK; | 341 | vec[1] = DOCBOOK; |
| 311 | vec[2] = FUNCTION; | 342 | vec[2] = FUNCTION; |
| @@ -315,6 +346,84 @@ static void docsect(char *filename, char *line) | |||
| 315 | exec_kernel_doc(vec); | 346 | exec_kernel_doc(vec); |
| 316 | } | 347 | } |
| 317 | 348 | ||
| 349 | static void find_all_symbols(char *filename) | ||
| 350 | { | ||
| 351 | char *vec[4]; /* kerneldoc -list file NULL */ | ||
| 352 | pid_t pid; | ||
| 353 | int ret, i, count, start; | ||
| 354 | char real_filename[PATH_MAX + 1]; | ||
| 355 | int pipefd[2]; | ||
| 356 | char *data, *str; | ||
| 357 | size_t data_len = 0; | ||
| 358 | |||
| 359 | vec[0] = KERNELDOC; | ||
| 360 | vec[1] = LIST; | ||
| 361 | vec[2] = filename; | ||
| 362 | vec[3] = NULL; | ||
| 363 | |||
| 364 | if (pipe(pipefd)) { | ||
| 365 | perror("pipe"); | ||
| 366 | exit(1); | ||
| 367 | } | ||
| 368 | |||
| 369 | switch (pid=fork()) { | ||
| 370 | case -1: | ||
| 371 | perror("fork"); | ||
| 372 | exit(1); | ||
| 373 | case 0: | ||
| 374 | close(pipefd[0]); | ||
| 375 | dup2(pipefd[1], 1); | ||
| 376 | memset(real_filename, 0, sizeof(real_filename)); | ||
| 377 | strncat(real_filename, kernsrctree, PATH_MAX); | ||
| 378 | strncat(real_filename, "/" KERNELDOCPATH KERNELDOC, | ||
| 379 | PATH_MAX - strlen(real_filename)); | ||
| 380 | execvp(real_filename, vec); | ||
| 381 | fprintf(stderr, "exec "); | ||
| 382 | perror(real_filename); | ||
| 383 | exit(1); | ||
| 384 | default: | ||
| 385 | close(pipefd[1]); | ||
| 386 | data = malloc(4096); | ||
| 387 | do { | ||
| 388 | while ((ret = read(pipefd[0], | ||
| 389 | data + data_len, | ||
| 390 | 4096)) > 0) { | ||
| 391 | data_len += ret; | ||
| 392 | data = realloc(data, data_len + 4096); | ||
| 393 | } | ||
| 394 | } while (ret == -EAGAIN); | ||
| 395 | if (ret != 0) { | ||
| 396 | perror("read"); | ||
| 397 | exit(1); | ||
| 398 | } | ||
| 399 | waitpid(pid, &ret ,0); | ||
| 400 | } | ||
| 401 | if (WIFEXITED(ret)) | ||
| 402 | exitstatus |= WEXITSTATUS(ret); | ||
| 403 | else | ||
| 404 | exitstatus = 0xff; | ||
| 405 | |||
| 406 | count = 0; | ||
| 407 | /* poor man's strtok, but with counting */ | ||
| 408 | for (i = 0; i < data_len; i++) { | ||
| 409 | if (data[i] == '\n') { | ||
| 410 | count++; | ||
| 411 | data[i] = '\0'; | ||
| 412 | } | ||
| 413 | } | ||
| 414 | start = all_list_len; | ||
| 415 | all_list_len += count; | ||
| 416 | all_list = realloc(all_list, sizeof(char *) * all_list_len); | ||
| 417 | str = data; | ||
| 418 | for (i = 0; i < data_len && start != all_list_len; i++) { | ||
| 419 | if (data[i] == '\0') { | ||
| 420 | all_list[start] = str; | ||
| 421 | str = data + i + 1; | ||
| 422 | start++; | ||
| 423 | } | ||
| 424 | } | ||
| 425 | } | ||
| 426 | |||
| 318 | /* | 427 | /* |
| 319 | * Parse file, calling action specific functions for: | 428 | * Parse file, calling action specific functions for: |
| 320 | * 1) Lines containing !E | 429 | * 1) Lines containing !E |
| @@ -322,7 +431,8 @@ static void docsect(char *filename, char *line) | |||
| 322 | * 3) Lines containing !D | 431 | * 3) Lines containing !D |
| 323 | * 4) Lines containing !F | 432 | * 4) Lines containing !F |
| 324 | * 5) Lines containing !P | 433 | * 5) Lines containing !P |
| 325 | * 6) Default lines - lines not matching the above | 434 | * 6) Lines containing !C |
| 435 | * 7) Default lines - lines not matching the above | ||
| 326 | */ | 436 | */ |
| 327 | static void parse_file(FILE *infile) | 437 | static void parse_file(FILE *infile) |
| 328 | { | 438 | { |
| @@ -365,6 +475,12 @@ static void parse_file(FILE *infile) | |||
| 365 | s++; | 475 | s++; |
| 366 | docsection(line + 2, s); | 476 | docsection(line + 2, s); |
| 367 | break; | 477 | break; |
| 478 | case 'C': | ||
| 479 | while (*s && !isspace(*s)) s++; | ||
| 480 | *s = '\0'; | ||
| 481 | if (findall) | ||
| 482 | findall(line+2); | ||
| 483 | break; | ||
| 368 | default: | 484 | default: |
| 369 | defaultline(line); | 485 | defaultline(line); |
| 370 | } | 486 | } |
| @@ -380,6 +496,7 @@ static void parse_file(FILE *infile) | |||
| 380 | int main(int argc, char *argv[]) | 496 | int main(int argc, char *argv[]) |
| 381 | { | 497 | { |
| 382 | FILE * infile; | 498 | FILE * infile; |
| 499 | int i; | ||
| 383 | 500 | ||
| 384 | srctree = getenv("SRCTREE"); | 501 | srctree = getenv("SRCTREE"); |
| 385 | if (!srctree) | 502 | if (!srctree) |
| @@ -415,6 +532,7 @@ int main(int argc, char *argv[]) | |||
| 415 | symbolsonly = find_export_symbols; | 532 | symbolsonly = find_export_symbols; |
| 416 | singlefunctions = noaction2; | 533 | singlefunctions = noaction2; |
| 417 | docsection = noaction2; | 534 | docsection = noaction2; |
| 535 | findall = find_all_symbols; | ||
| 418 | parse_file(infile); | 536 | parse_file(infile); |
| 419 | 537 | ||
| 420 | /* Rewind to start from beginning of file again */ | 538 | /* Rewind to start from beginning of file again */ |
| @@ -425,8 +543,16 @@ int main(int argc, char *argv[]) | |||
| 425 | symbolsonly = printline; | 543 | symbolsonly = printline; |
| 426 | singlefunctions = singfunc; | 544 | singlefunctions = singfunc; |
| 427 | docsection = docsect; | 545 | docsection = docsect; |
| 546 | findall = NULL; | ||
| 428 | 547 | ||
| 429 | parse_file(infile); | 548 | parse_file(infile); |
| 549 | |||
| 550 | for (i = 0; i < all_list_len; i++) { | ||
| 551 | if (!all_list[i]) | ||
| 552 | continue; | ||
| 553 | fprintf(stderr, "Warning: didn't use docs for %s\n", | ||
| 554 | all_list[i]); | ||
| 555 | } | ||
| 430 | } | 556 | } |
| 431 | else if (strcmp("depend", argv[1]) == 0) | 557 | else if (strcmp("depend", argv[1]) == 0) |
| 432 | { | 558 | { |
| @@ -439,6 +565,7 @@ int main(int argc, char *argv[]) | |||
| 439 | symbolsonly = adddep; | 565 | symbolsonly = adddep; |
| 440 | singlefunctions = adddep2; | 566 | singlefunctions = adddep2; |
| 441 | docsection = adddep2; | 567 | docsection = adddep2; |
| 568 | findall = adddep; | ||
| 442 | parse_file(infile); | 569 | parse_file(infile); |
| 443 | printf("\n"); | 570 | printf("\n"); |
| 444 | } | 571 | } |
diff --git a/scripts/kernel-doc b/scripts/kernel-doc index 102e1235fd5c..cdb6dc1f6458 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc | |||
| @@ -44,12 +44,13 @@ use strict; | |||
| 44 | # Note: This only supports 'c'. | 44 | # Note: This only supports 'c'. |
| 45 | 45 | ||
| 46 | # usage: | 46 | # usage: |
| 47 | # kernel-doc [ -docbook | -html | -text | -man ] [ -no-doc-sections ] | 47 | # kernel-doc [ -docbook | -html | -text | -man | -list ] [ -no-doc-sections ] |
| 48 | # [ -function funcname [ -function funcname ...] ] c file(s)s > outputfile | 48 | # [ -function funcname [ -function funcname ...] ] c file(s)s > outputfile |
| 49 | # or | 49 | # or |
| 50 | # [ -nofunction funcname [ -function funcname ...] ] c file(s)s > outputfile | 50 | # [ -nofunction funcname [ -function funcname ...] ] c file(s)s > outputfile |
| 51 | # | 51 | # |
| 52 | # Set output format using one of -docbook -html -text or -man. Default is man. | 52 | # Set output format using one of -docbook -html -text or -man. Default is man. |
| 53 | # The -list format is for internal use by docproc. | ||
| 53 | # | 54 | # |
| 54 | # -no-doc-sections | 55 | # -no-doc-sections |
| 55 | # Do not output DOC: sections | 56 | # Do not output DOC: sections |
| @@ -210,9 +211,16 @@ my %highlights_text = ( $type_constant, "\$1", | |||
| 210 | $type_param, "\$1" ); | 211 | $type_param, "\$1" ); |
| 211 | my $blankline_text = ""; | 212 | my $blankline_text = ""; |
| 212 | 213 | ||
| 214 | # list mode | ||
| 215 | my %highlights_list = ( $type_constant, "\$1", | ||
| 216 | $type_func, "\$1", | ||
| 217 | $type_struct, "\$1", | ||
| 218 | $type_param, "\$1" ); | ||
| 219 | my $blankline_list = ""; | ||
| 213 | 220 | ||
| 214 | sub usage { | 221 | sub usage { |
| 215 | print "Usage: $0 [ -v ] [ -docbook | -html | -text | -man ] [ -no-doc-sections ]\n"; | 222 | print "Usage: $0 [ -v ] [ -docbook | -html | -text | -man | -list ]\n"; |
| 223 | print " [ -no-doc-sections ]\n"; | ||
| 216 | print " [ -function funcname [ -function funcname ...] ]\n"; | 224 | print " [ -function funcname [ -function funcname ...] ]\n"; |
| 217 | print " [ -nofunction funcname [ -nofunction funcname ...] ]\n"; | 225 | print " [ -nofunction funcname [ -nofunction funcname ...] ]\n"; |
| 218 | print " c source file(s) > outputfile\n"; | 226 | print " c source file(s) > outputfile\n"; |
| @@ -318,6 +326,10 @@ while ($ARGV[0] =~ m/^-(.*)/) { | |||
| 318 | $output_mode = "xml"; | 326 | $output_mode = "xml"; |
| 319 | %highlights = %highlights_xml; | 327 | %highlights = %highlights_xml; |
| 320 | $blankline = $blankline_xml; | 328 | $blankline = $blankline_xml; |
| 329 | } elsif ($cmd eq "-list") { | ||
| 330 | $output_mode = "list"; | ||
| 331 | %highlights = %highlights_list; | ||
| 332 | $blankline = $blankline_list; | ||
| 321 | } elsif ($cmd eq "-gnome") { | 333 | } elsif ($cmd eq "-gnome") { |
| 322 | $output_mode = "gnome"; | 334 | $output_mode = "gnome"; |
| 323 | %highlights = %highlights_gnome; | 335 | %highlights = %highlights_gnome; |
| @@ -1361,6 +1373,42 @@ sub output_blockhead_text(%) { | |||
| 1361 | } | 1373 | } |
| 1362 | } | 1374 | } |
| 1363 | 1375 | ||
| 1376 | ## list mode output functions | ||
| 1377 | |||
| 1378 | sub output_function_list(%) { | ||
| 1379 | my %args = %{$_[0]}; | ||
| 1380 | |||
| 1381 | print $args{'function'} . "\n"; | ||
| 1382 | } | ||
| 1383 | |||
| 1384 | # output enum in list | ||
| 1385 | sub output_enum_list(%) { | ||
| 1386 | my %args = %{$_[0]}; | ||
| 1387 | print $args{'enum'} . "\n"; | ||
| 1388 | } | ||
| 1389 | |||
| 1390 | # output typedef in list | ||
| 1391 | sub output_typedef_list(%) { | ||
| 1392 | my %args = %{$_[0]}; | ||
| 1393 | print $args{'typedef'} . "\n"; | ||
| 1394 | } | ||
| 1395 | |||
| 1396 | # output struct as list | ||
| 1397 | sub output_struct_list(%) { | ||
| 1398 | my %args = %{$_[0]}; | ||
| 1399 | |||
| 1400 | print $args{'struct'} . "\n"; | ||
| 1401 | } | ||
| 1402 | |||
| 1403 | sub output_blockhead_list(%) { | ||
| 1404 | my %args = %{$_[0]}; | ||
| 1405 | my ($parameter, $section); | ||
| 1406 | |||
| 1407 | foreach $section (@{$args{'sectionlist'}}) { | ||
| 1408 | print "DOC: $section\n"; | ||
| 1409 | } | ||
| 1410 | } | ||
| 1411 | |||
| 1364 | ## | 1412 | ## |
| 1365 | # generic output function for all types (function, struct/union, typedef, enum); | 1413 | # generic output function for all types (function, struct/union, typedef, enum); |
| 1366 | # calls the generated, variable output_ function name based on | 1414 | # calls the generated, variable output_ function name based on |
| @@ -1679,7 +1727,7 @@ sub check_sections($$$$$$) { | |||
| 1679 | foreach $px (0 .. $#prms) { | 1727 | foreach $px (0 .. $#prms) { |
| 1680 | $prm_clean = $prms[$px]; | 1728 | $prm_clean = $prms[$px]; |
| 1681 | $prm_clean =~ s/\[.*\]//; | 1729 | $prm_clean =~ s/\[.*\]//; |
| 1682 | $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//; | 1730 | $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i; |
| 1683 | # ignore array size in a parameter string; | 1731 | # ignore array size in a parameter string; |
| 1684 | # however, the original param string may contain | 1732 | # however, the original param string may contain |
| 1685 | # spaces, e.g.: addr[6 + 2] | 1733 | # spaces, e.g.: addr[6 + 2] |
diff --git a/security/apparmor/include/resource.h b/security/apparmor/include/resource.h index 3c88be946494..02baec732bb5 100644 --- a/security/apparmor/include/resource.h +++ b/security/apparmor/include/resource.h | |||
| @@ -33,8 +33,8 @@ struct aa_rlimit { | |||
| 33 | }; | 33 | }; |
| 34 | 34 | ||
| 35 | int aa_map_resource(int resource); | 35 | int aa_map_resource(int resource); |
| 36 | int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource, | 36 | int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *, |
| 37 | struct rlimit *new_rlim); | 37 | unsigned int resource, struct rlimit *new_rlim); |
| 38 | 38 | ||
| 39 | void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new); | 39 | void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new); |
| 40 | 40 | ||
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c index 6e85cdb4303f..506d2baf6147 100644 --- a/security/apparmor/lib.c +++ b/security/apparmor/lib.c | |||
| @@ -40,6 +40,7 @@ char *aa_split_fqname(char *fqname, char **ns_name) | |||
| 40 | *ns_name = NULL; | 40 | *ns_name = NULL; |
| 41 | if (name[0] == ':') { | 41 | if (name[0] == ':') { |
| 42 | char *split = strchr(&name[1], ':'); | 42 | char *split = strchr(&name[1], ':'); |
| 43 | *ns_name = skip_spaces(&name[1]); | ||
| 43 | if (split) { | 44 | if (split) { |
| 44 | /* overwrite ':' with \0 */ | 45 | /* overwrite ':' with \0 */ |
| 45 | *split = 0; | 46 | *split = 0; |
| @@ -47,7 +48,6 @@ char *aa_split_fqname(char *fqname, char **ns_name) | |||
| 47 | } else | 48 | } else |
| 48 | /* a ns name without a following profile is allowed */ | 49 | /* a ns name without a following profile is allowed */ |
| 49 | name = NULL; | 50 | name = NULL; |
| 50 | *ns_name = &name[1]; | ||
| 51 | } | 51 | } |
| 52 | if (name && *name == 0) | 52 | if (name && *name == 0) |
| 53 | name = NULL; | 53 | name = NULL; |
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index f73e2c204218..cf1de4462ccd 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c | |||
| @@ -614,7 +614,7 @@ static int apparmor_task_setrlimit(struct task_struct *task, | |||
| 614 | int error = 0; | 614 | int error = 0; |
| 615 | 615 | ||
| 616 | if (!unconfined(profile)) | 616 | if (!unconfined(profile)) |
| 617 | error = aa_task_setrlimit(profile, resource, new_rlim); | 617 | error = aa_task_setrlimit(profile, task, resource, new_rlim); |
| 618 | 618 | ||
| 619 | return error; | 619 | return error; |
| 620 | } | 620 | } |
diff --git a/security/apparmor/path.c b/security/apparmor/path.c index 19358dc14605..82396050f186 100644 --- a/security/apparmor/path.c +++ b/security/apparmor/path.c | |||
| @@ -59,8 +59,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, | |||
| 59 | { | 59 | { |
| 60 | struct path root, tmp; | 60 | struct path root, tmp; |
| 61 | char *res; | 61 | char *res; |
| 62 | int deleted, connected; | 62 | int connected, error = 0; |
| 63 | int error = 0; | ||
| 64 | 63 | ||
| 65 | /* Get the root we want to resolve too, released below */ | 64 | /* Get the root we want to resolve too, released below */ |
| 66 | if (flags & PATH_CHROOT_REL) { | 65 | if (flags & PATH_CHROOT_REL) { |
| @@ -74,19 +73,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, | |||
| 74 | } | 73 | } |
| 75 | 74 | ||
| 76 | spin_lock(&dcache_lock); | 75 | spin_lock(&dcache_lock); |
| 77 | /* There is a race window between path lookup here and the | 76 | tmp = root; |
| 78 | * need to strip the " (deleted) string that __d_path applies | 77 | res = __d_path(path, &tmp, buf, buflen); |
| 79 | * Detect the race and relookup the path | ||
| 80 | * | ||
| 81 | * The stripping of (deleted) is a hack that could be removed | ||
| 82 | * with an updated __d_path | ||
| 83 | */ | ||
| 84 | do { | ||
| 85 | tmp = root; | ||
| 86 | deleted = d_unlinked(path->dentry); | ||
| 87 | res = __d_path(path, &tmp, buf, buflen); | ||
| 88 | |||
| 89 | } while (deleted != d_unlinked(path->dentry)); | ||
| 90 | spin_unlock(&dcache_lock); | 78 | spin_unlock(&dcache_lock); |
| 91 | 79 | ||
| 92 | *name = res; | 80 | *name = res; |
| @@ -98,21 +86,17 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, | |||
| 98 | *name = buf; | 86 | *name = buf; |
| 99 | goto out; | 87 | goto out; |
| 100 | } | 88 | } |
| 101 | if (deleted) { | ||
| 102 | /* On some filesystems, newly allocated dentries appear to the | ||
| 103 | * security_path hooks as a deleted dentry except without an | ||
| 104 | * inode allocated. | ||
| 105 | * | ||
| 106 | * Remove the appended deleted text and return as string for | ||
| 107 | * normal mediation, or auditing. The (deleted) string is | ||
| 108 | * guaranteed to be added in this case, so just strip it. | ||
| 109 | */ | ||
| 110 | buf[buflen - 11] = 0; /* - (len(" (deleted)") +\0) */ | ||
| 111 | 89 | ||
| 112 | if (path->dentry->d_inode && !(flags & PATH_MEDIATE_DELETED)) { | 90 | /* Handle two cases: |
| 91 | * 1. A deleted dentry && profile is not allowing mediation of deleted | ||
| 92 | * 2. On some filesystems, newly allocated dentries appear to the | ||
| 93 | * security_path hooks as a deleted dentry except without an inode | ||
| 94 | * allocated. | ||
| 95 | */ | ||
| 96 | if (d_unlinked(path->dentry) && path->dentry->d_inode && | ||
| 97 | !(flags & PATH_MEDIATE_DELETED)) { | ||
| 113 | error = -ENOENT; | 98 | error = -ENOENT; |
| 114 | goto out; | 99 | goto out; |
| 115 | } | ||
| 116 | } | 100 | } |
| 117 | 101 | ||
| 118 | /* Determine if the path is connected to the expected root */ | 102 | /* Determine if the path is connected to the expected root */ |
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index 3cdc1ad0787e..52cc865f1464 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c | |||
| @@ -1151,12 +1151,14 @@ ssize_t aa_remove_profiles(char *fqname, size_t size) | |||
| 1151 | /* released below */ | 1151 | /* released below */ |
| 1152 | ns = aa_get_namespace(root); | 1152 | ns = aa_get_namespace(root); |
| 1153 | 1153 | ||
| 1154 | write_lock(&ns->lock); | ||
| 1155 | if (!name) { | 1154 | if (!name) { |
| 1156 | /* remove namespace - can only happen if fqname[0] == ':' */ | 1155 | /* remove namespace - can only happen if fqname[0] == ':' */ |
| 1156 | write_lock(&ns->parent->lock); | ||
| 1157 | __remove_namespace(ns); | 1157 | __remove_namespace(ns); |
| 1158 | write_unlock(&ns->parent->lock); | ||
| 1158 | } else { | 1159 | } else { |
| 1159 | /* remove profile */ | 1160 | /* remove profile */ |
| 1161 | write_lock(&ns->lock); | ||
| 1160 | profile = aa_get_profile(__lookup_profile(&ns->base, name)); | 1162 | profile = aa_get_profile(__lookup_profile(&ns->base, name)); |
| 1161 | if (!profile) { | 1163 | if (!profile) { |
| 1162 | error = -ENOENT; | 1164 | error = -ENOENT; |
| @@ -1165,8 +1167,8 @@ ssize_t aa_remove_profiles(char *fqname, size_t size) | |||
| 1165 | } | 1167 | } |
| 1166 | name = profile->base.hname; | 1168 | name = profile->base.hname; |
| 1167 | __remove_profile(profile); | 1169 | __remove_profile(profile); |
| 1170 | write_unlock(&ns->lock); | ||
| 1168 | } | 1171 | } |
| 1169 | write_unlock(&ns->lock); | ||
| 1170 | 1172 | ||
| 1171 | /* don't fail removal if audit fails */ | 1173 | /* don't fail removal if audit fails */ |
| 1172 | (void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error); | 1174 | (void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error); |
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c index 4a368f1fd36d..a4136c10b1c6 100644 --- a/security/apparmor/resource.c +++ b/security/apparmor/resource.c | |||
| @@ -72,6 +72,7 @@ int aa_map_resource(int resource) | |||
| 72 | /** | 72 | /** |
| 73 | * aa_task_setrlimit - test permission to set an rlimit | 73 | * aa_task_setrlimit - test permission to set an rlimit |
| 74 | * @profile - profile confining the task (NOT NULL) | 74 | * @profile - profile confining the task (NOT NULL) |
| 75 | * @task - task the resource is being set on | ||
| 75 | * @resource - the resource being set | 76 | * @resource - the resource being set |
| 76 | * @new_rlim - the new resource limit (NOT NULL) | 77 | * @new_rlim - the new resource limit (NOT NULL) |
| 77 | * | 78 | * |
| @@ -79,18 +80,21 @@ int aa_map_resource(int resource) | |||
| 79 | * | 80 | * |
| 80 | * Returns: 0 or error code if setting resource failed | 81 | * Returns: 0 or error code if setting resource failed |
| 81 | */ | 82 | */ |
| 82 | int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource, | 83 | int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task, |
| 83 | struct rlimit *new_rlim) | 84 | unsigned int resource, struct rlimit *new_rlim) |
| 84 | { | 85 | { |
| 85 | int error = 0; | 86 | int error = 0; |
| 86 | 87 | ||
| 87 | if (profile->rlimits.mask & (1 << resource) && | 88 | /* TODO: extend resource control to handle other (non current) |
| 88 | new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max) | 89 | * processes. AppArmor rules currently have the implicit assumption |
| 89 | 90 | * that the task is setting the resource of the current process | |
| 90 | error = audit_resource(profile, resource, new_rlim->rlim_max, | 91 | */ |
| 91 | -EACCES); | 92 | if ((task != current->group_leader) || |
| 93 | (profile->rlimits.mask & (1 << resource) && | ||
| 94 | new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max)) | ||
| 95 | error = -EACCES; | ||
| 92 | 96 | ||
| 93 | return error; | 97 | return audit_resource(profile, resource, new_rlim->rlim_max, error); |
| 94 | } | 98 | } |
| 95 | 99 | ||
| 96 | /** | 100 | /** |
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index 16d100d3fc38..3fbcd1dda0ef 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h | |||
| @@ -35,6 +35,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; | |||
| 35 | #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) | 35 | #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) |
| 36 | 36 | ||
| 37 | /* set during initialization */ | 37 | /* set during initialization */ |
| 38 | extern int iint_initialized; | ||
| 38 | extern int ima_initialized; | 39 | extern int ima_initialized; |
| 39 | extern int ima_used_chip; | 40 | extern int ima_used_chip; |
| 40 | extern char *ima_hash; | 41 | extern char *ima_hash; |
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c index 7625b85c2274..afba4aef812f 100644 --- a/security/integrity/ima/ima_iint.c +++ b/security/integrity/ima/ima_iint.c | |||
| @@ -22,9 +22,10 @@ | |||
| 22 | 22 | ||
| 23 | RADIX_TREE(ima_iint_store, GFP_ATOMIC); | 23 | RADIX_TREE(ima_iint_store, GFP_ATOMIC); |
| 24 | DEFINE_SPINLOCK(ima_iint_lock); | 24 | DEFINE_SPINLOCK(ima_iint_lock); |
| 25 | |||
| 26 | static struct kmem_cache *iint_cache __read_mostly; | 25 | static struct kmem_cache *iint_cache __read_mostly; |
| 27 | 26 | ||
| 27 | int iint_initialized = 0; | ||
| 28 | |||
| 28 | /* ima_iint_find_get - return the iint associated with an inode | 29 | /* ima_iint_find_get - return the iint associated with an inode |
| 29 | * | 30 | * |
| 30 | * ima_iint_find_get gets a reference to the iint. Caller must | 31 | * ima_iint_find_get gets a reference to the iint. Caller must |
| @@ -141,6 +142,7 @@ static int __init ima_iintcache_init(void) | |||
| 141 | iint_cache = | 142 | iint_cache = |
| 142 | kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, | 143 | kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, |
| 143 | SLAB_PANIC, init_once); | 144 | SLAB_PANIC, init_once); |
| 145 | iint_initialized = 1; | ||
| 144 | return 0; | 146 | return 0; |
| 145 | } | 147 | } |
| 146 | security_initcall(ima_iintcache_init); | 148 | security_initcall(ima_iintcache_init); |
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index f93641382e9f..e662b89d4079 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c | |||
| @@ -148,12 +148,14 @@ void ima_counts_get(struct file *file) | |||
| 148 | struct ima_iint_cache *iint; | 148 | struct ima_iint_cache *iint; |
| 149 | int rc; | 149 | int rc; |
| 150 | 150 | ||
| 151 | if (!ima_initialized || !S_ISREG(inode->i_mode)) | 151 | if (!iint_initialized || !S_ISREG(inode->i_mode)) |
| 152 | return; | 152 | return; |
| 153 | iint = ima_iint_find_get(inode); | 153 | iint = ima_iint_find_get(inode); |
| 154 | if (!iint) | 154 | if (!iint) |
| 155 | return; | 155 | return; |
| 156 | mutex_lock(&iint->mutex); | 156 | mutex_lock(&iint->mutex); |
| 157 | if (!ima_initialized) | ||
| 158 | goto out; | ||
| 157 | rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK); | 159 | rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK); |
| 158 | if (rc < 0) | 160 | if (rc < 0) |
| 159 | goto out; | 161 | goto out; |
| @@ -213,7 +215,7 @@ void ima_file_free(struct file *file) | |||
| 213 | struct inode *inode = file->f_dentry->d_inode; | 215 | struct inode *inode = file->f_dentry->d_inode; |
| 214 | struct ima_iint_cache *iint; | 216 | struct ima_iint_cache *iint; |
| 215 | 217 | ||
| 216 | if (!ima_initialized || !S_ISREG(inode->i_mode)) | 218 | if (!iint_initialized || !S_ISREG(inode->i_mode)) |
| 217 | return; | 219 | return; |
| 218 | iint = ima_iint_find_get(inode); | 220 | iint = ima_iint_find_get(inode); |
| 219 | if (!iint) | 221 | if (!iint) |
| @@ -230,7 +232,7 @@ static int process_measurement(struct file *file, const unsigned char *filename, | |||
| 230 | { | 232 | { |
| 231 | struct inode *inode = file->f_dentry->d_inode; | 233 | struct inode *inode = file->f_dentry->d_inode; |
| 232 | struct ima_iint_cache *iint; | 234 | struct ima_iint_cache *iint; |
| 233 | int rc; | 235 | int rc = 0; |
| 234 | 236 | ||
| 235 | if (!ima_initialized || !S_ISREG(inode->i_mode)) | 237 | if (!ima_initialized || !S_ISREG(inode->i_mode)) |
| 236 | return 0; | 238 | return 0; |
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index b2b0998d6abd..60924f6a52db 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c | |||
| @@ -1272,6 +1272,7 @@ long keyctl_session_to_parent(void) | |||
| 1272 | keyring_r = NULL; | 1272 | keyring_r = NULL; |
| 1273 | 1273 | ||
| 1274 | me = current; | 1274 | me = current; |
| 1275 | rcu_read_lock(); | ||
| 1275 | write_lock_irq(&tasklist_lock); | 1276 | write_lock_irq(&tasklist_lock); |
| 1276 | 1277 | ||
| 1277 | parent = me->real_parent; | 1278 | parent = me->real_parent; |
| @@ -1304,7 +1305,8 @@ long keyctl_session_to_parent(void) | |||
| 1304 | goto not_permitted; | 1305 | goto not_permitted; |
| 1305 | 1306 | ||
| 1306 | /* the keyrings must have the same UID */ | 1307 | /* the keyrings must have the same UID */ |
| 1307 | if (pcred->tgcred->session_keyring->uid != mycred->euid || | 1308 | if ((pcred->tgcred->session_keyring && |
| 1309 | pcred->tgcred->session_keyring->uid != mycred->euid) || | ||
| 1308 | mycred->tgcred->session_keyring->uid != mycred->euid) | 1310 | mycred->tgcred->session_keyring->uid != mycred->euid) |
| 1309 | goto not_permitted; | 1311 | goto not_permitted; |
| 1310 | 1312 | ||
| @@ -1319,6 +1321,7 @@ long keyctl_session_to_parent(void) | |||
| 1319 | set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME); | 1321 | set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME); |
| 1320 | 1322 | ||
| 1321 | write_unlock_irq(&tasklist_lock); | 1323 | write_unlock_irq(&tasklist_lock); |
| 1324 | rcu_read_unlock(); | ||
| 1322 | if (oldcred) | 1325 | if (oldcred) |
| 1323 | put_cred(oldcred); | 1326 | put_cred(oldcred); |
| 1324 | return 0; | 1327 | return 0; |
| @@ -1327,6 +1330,7 @@ already_same: | |||
| 1327 | ret = 0; | 1330 | ret = 0; |
| 1328 | not_permitted: | 1331 | not_permitted: |
| 1329 | write_unlock_irq(&tasklist_lock); | 1332 | write_unlock_irq(&tasklist_lock); |
| 1333 | rcu_read_unlock(); | ||
| 1330 | put_cred(cred); | 1334 | put_cred(cred); |
| 1331 | return ret; | 1335 | return ret; |
| 1332 | 1336 | ||
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index eb68326c37d4..a7868ad4d530 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c | |||
| @@ -829,6 +829,8 @@ static int snd_rawmidi_control_ioctl(struct snd_card *card, | |||
| 829 | 829 | ||
| 830 | if (get_user(device, (int __user *)argp)) | 830 | if (get_user(device, (int __user *)argp)) |
| 831 | return -EFAULT; | 831 | return -EFAULT; |
| 832 | if (device >= SNDRV_RAWMIDI_DEVICES) /* next device is -1 */ | ||
| 833 | device = SNDRV_RAWMIDI_DEVICES - 1; | ||
| 832 | mutex_lock(®ister_mutex); | 834 | mutex_lock(®ister_mutex); |
| 833 | device = device < 0 ? 0 : device + 1; | 835 | device = device < 0 ? 0 : device + 1; |
| 834 | while (device < SNDRV_RAWMIDI_DEVICES) { | 836 | while (device < SNDRV_RAWMIDI_DEVICES) { |
diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c index 685712276ac9..69cd7b3c362d 100644 --- a/sound/core/seq/oss/seq_oss_init.c +++ b/sound/core/seq/oss/seq_oss_init.c | |||
| @@ -281,13 +281,10 @@ snd_seq_oss_open(struct file *file, int level) | |||
| 281 | return 0; | 281 | return 0; |
| 282 | 282 | ||
| 283 | _error: | 283 | _error: |
| 284 | snd_seq_oss_writeq_delete(dp->writeq); | ||
| 285 | snd_seq_oss_readq_delete(dp->readq); | ||
| 286 | snd_seq_oss_synth_cleanup(dp); | 284 | snd_seq_oss_synth_cleanup(dp); |
| 287 | snd_seq_oss_midi_cleanup(dp); | 285 | snd_seq_oss_midi_cleanup(dp); |
| 288 | delete_port(dp); | ||
| 289 | delete_seq_queue(dp->queue); | 286 | delete_seq_queue(dp->queue); |
| 290 | kfree(dp); | 287 | delete_port(dp); |
| 291 | 288 | ||
| 292 | return rc; | 289 | return rc; |
| 293 | } | 290 | } |
| @@ -350,8 +347,10 @@ create_port(struct seq_oss_devinfo *dp) | |||
| 350 | static int | 347 | static int |
| 351 | delete_port(struct seq_oss_devinfo *dp) | 348 | delete_port(struct seq_oss_devinfo *dp) |
| 352 | { | 349 | { |
| 353 | if (dp->port < 0) | 350 | if (dp->port < 0) { |
| 351 | kfree(dp); | ||
| 354 | return 0; | 352 | return 0; |
| 353 | } | ||
| 355 | 354 | ||
| 356 | debug_printk(("delete_port %i\n", dp->port)); | 355 | debug_printk(("delete_port %i\n", dp->port)); |
| 357 | return snd_seq_event_port_detach(dp->cseq, dp->port); | 356 | return snd_seq_event_port_detach(dp->cseq, dp->port); |
diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c index 5f3e68401f90..91d6023a63e5 100644 --- a/sound/isa/msnd/msnd_pinnacle.c +++ b/sound/isa/msnd/msnd_pinnacle.c | |||
| @@ -764,9 +764,9 @@ static long io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; | |||
| 764 | static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; | 764 | static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; |
| 765 | static long mem[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; | 765 | static long mem[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; |
| 766 | 766 | ||
| 767 | #ifndef MSND_CLASSIC | ||
| 767 | static long cfg[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; | 768 | static long cfg[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; |
| 768 | 769 | ||
| 769 | #ifndef MSND_CLASSIC | ||
| 770 | /* Extra Peripheral Configuration (Default: Disable) */ | 770 | /* Extra Peripheral Configuration (Default: Disable) */ |
| 771 | static long ide_io0[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; | 771 | static long ide_io0[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; |
| 772 | static long ide_io1[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; | 772 | static long ide_io1[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; |
| @@ -894,7 +894,11 @@ static int __devinit snd_msnd_isa_probe(struct device *pdev, unsigned int idx) | |||
| 894 | struct snd_card *card; | 894 | struct snd_card *card; |
| 895 | struct snd_msnd *chip; | 895 | struct snd_msnd *chip; |
| 896 | 896 | ||
| 897 | if (has_isapnp(idx) || cfg[idx] == SNDRV_AUTO_PORT) { | 897 | if (has_isapnp(idx) |
| 898 | #ifndef MSND_CLASSIC | ||
| 899 | || cfg[idx] == SNDRV_AUTO_PORT | ||
| 900 | #endif | ||
| 901 | ) { | ||
| 898 | printk(KERN_INFO LOGNAME ": Assuming PnP mode\n"); | 902 | printk(KERN_INFO LOGNAME ": Assuming PnP mode\n"); |
| 899 | return -ENODEV; | 903 | return -ENODEV; |
| 900 | } | 904 | } |
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 3827092cc1d2..14829210ef0b 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
| @@ -4536,7 +4536,7 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec, | |||
| 4536 | cfg->hp_outs--; | 4536 | cfg->hp_outs--; |
| 4537 | memmove(cfg->hp_pins + i, cfg->hp_pins + i + 1, | 4537 | memmove(cfg->hp_pins + i, cfg->hp_pins + i + 1, |
| 4538 | sizeof(cfg->hp_pins[0]) * (cfg->hp_outs - i)); | 4538 | sizeof(cfg->hp_pins[0]) * (cfg->hp_outs - i)); |
| 4539 | memmove(sequences_hp + i - 1, sequences_hp + i, | 4539 | memmove(sequences_hp + i, sequences_hp + i + 1, |
| 4540 | sizeof(sequences_hp[0]) * (cfg->hp_outs - i)); | 4540 | sizeof(sequences_hp[0]) * (cfg->hp_outs - i)); |
| 4541 | } | 4541 | } |
| 4542 | } | 4542 | } |
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 4ef5efaaaef1..488fd9ade1ba 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c | |||
| @@ -972,6 +972,53 @@ static struct hda_verb cs_coef_init_verbs[] = { | |||
| 972 | {} /* terminator */ | 972 | {} /* terminator */ |
| 973 | }; | 973 | }; |
| 974 | 974 | ||
| 975 | /* Errata: CS4207 rev C0/C1/C2 Silicon | ||
| 976 | * | ||
| 977 | * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf | ||
| 978 | * | ||
| 979 | * 6. At high temperature (TA > +85°C), the digital supply current (IVD) | ||
| 980 | * may be excessive (up to an additional 200 μA), which is most easily | ||
| 981 | * observed while the part is being held in reset (RESET# active low). | ||
| 982 | * | ||
| 983 | * Root Cause: At initial powerup of the device, the logic that drives | ||
| 984 | * the clock and write enable to the S/PDIF SRC RAMs is not properly | ||
| 985 | * initialized. | ||
| 986 | * Certain random patterns will cause a steady leakage current in those | ||
| 987 | * RAM cells. The issue will resolve once the SRCs are used (turned on). | ||
| 988 | * | ||
| 989 | * Workaround: The following verb sequence briefly turns on the S/PDIF SRC | ||
| 990 | * blocks, which will alleviate the issue. | ||
| 991 | */ | ||
| 992 | |||
| 993 | static struct hda_verb cs_errata_init_verbs[] = { | ||
| 994 | {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */ | ||
| 995 | {0x11, AC_VERB_SET_PROC_STATE, 0x01}, /* VPW: processing on */ | ||
| 996 | |||
| 997 | {0x11, AC_VERB_SET_COEF_INDEX, 0x0008}, | ||
| 998 | {0x11, AC_VERB_SET_PROC_COEF, 0x9999}, | ||
| 999 | {0x11, AC_VERB_SET_COEF_INDEX, 0x0017}, | ||
| 1000 | {0x11, AC_VERB_SET_PROC_COEF, 0xa412}, | ||
| 1001 | {0x11, AC_VERB_SET_COEF_INDEX, 0x0001}, | ||
| 1002 | {0x11, AC_VERB_SET_PROC_COEF, 0x0009}, | ||
| 1003 | |||
| 1004 | {0x07, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Rx: D0 */ | ||
| 1005 | {0x08, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Tx: D0 */ | ||
| 1006 | |||
| 1007 | {0x11, AC_VERB_SET_COEF_INDEX, 0x0017}, | ||
| 1008 | {0x11, AC_VERB_SET_PROC_COEF, 0x2412}, | ||
| 1009 | {0x11, AC_VERB_SET_COEF_INDEX, 0x0008}, | ||
| 1010 | {0x11, AC_VERB_SET_PROC_COEF, 0x0000}, | ||
| 1011 | {0x11, AC_VERB_SET_COEF_INDEX, 0x0001}, | ||
| 1012 | {0x11, AC_VERB_SET_PROC_COEF, 0x0008}, | ||
| 1013 | {0x11, AC_VERB_SET_PROC_STATE, 0x00}, | ||
| 1014 | |||
| 1015 | {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */ | ||
| 1016 | {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */ | ||
| 1017 | /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */ | ||
| 1018 | |||
| 1019 | {} /* terminator */ | ||
| 1020 | }; | ||
| 1021 | |||
| 975 | /* SPDIF setup */ | 1022 | /* SPDIF setup */ |
| 976 | static void init_digital(struct hda_codec *codec) | 1023 | static void init_digital(struct hda_codec *codec) |
| 977 | { | 1024 | { |
| @@ -991,6 +1038,9 @@ static int cs_init(struct hda_codec *codec) | |||
| 991 | { | 1038 | { |
| 992 | struct cs_spec *spec = codec->spec; | 1039 | struct cs_spec *spec = codec->spec; |
| 993 | 1040 | ||
| 1041 | /* init_verb sequence for C0/C1/C2 errata*/ | ||
| 1042 | snd_hda_sequence_write(codec, cs_errata_init_verbs); | ||
| 1043 | |||
| 994 | snd_hda_sequence_write(codec, cs_coef_init_verbs); | 1044 | snd_hda_sequence_write(codec, cs_coef_init_verbs); |
| 995 | 1045 | ||
| 996 | if (spec->gpio_mask) { | 1046 | if (spec->gpio_mask) { |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 5cdb80edbd7f..71f9d6475b09 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
| @@ -116,6 +116,7 @@ struct conexant_spec { | |||
| 116 | unsigned int dell_vostro:1; | 116 | unsigned int dell_vostro:1; |
| 117 | unsigned int ideapad:1; | 117 | unsigned int ideapad:1; |
| 118 | unsigned int thinkpad:1; | 118 | unsigned int thinkpad:1; |
| 119 | unsigned int hp_laptop:1; | ||
| 119 | 120 | ||
| 120 | unsigned int ext_mic_present; | 121 | unsigned int ext_mic_present; |
| 121 | unsigned int recording; | 122 | unsigned int recording; |
| @@ -2299,6 +2300,18 @@ static void cxt5066_ideapad_automic(struct hda_codec *codec) | |||
| 2299 | } | 2300 | } |
| 2300 | } | 2301 | } |
| 2301 | 2302 | ||
| 2303 | /* toggle input of built-in digital mic and mic jack appropriately */ | ||
| 2304 | static void cxt5066_hp_laptop_automic(struct hda_codec *codec) | ||
| 2305 | { | ||
| 2306 | unsigned int present; | ||
| 2307 | |||
| 2308 | present = snd_hda_jack_detect(codec, 0x1b); | ||
| 2309 | snd_printdd("CXT5066: external microphone present=%d\n", present); | ||
| 2310 | snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL, | ||
| 2311 | present ? 1 : 3); | ||
| 2312 | } | ||
| 2313 | |||
| 2314 | |||
| 2302 | /* toggle input of built-in digital mic and mic jack appropriately | 2315 | /* toggle input of built-in digital mic and mic jack appropriately |
| 2303 | order is: external mic -> dock mic -> interal mic */ | 2316 | order is: external mic -> dock mic -> interal mic */ |
| 2304 | static void cxt5066_thinkpad_automic(struct hda_codec *codec) | 2317 | static void cxt5066_thinkpad_automic(struct hda_codec *codec) |
| @@ -2408,6 +2421,20 @@ static void cxt5066_ideapad_event(struct hda_codec *codec, unsigned int res) | |||
| 2408 | } | 2421 | } |
| 2409 | 2422 | ||
| 2410 | /* unsolicited event for jack sensing */ | 2423 | /* unsolicited event for jack sensing */ |
| 2424 | static void cxt5066_hp_laptop_event(struct hda_codec *codec, unsigned int res) | ||
| 2425 | { | ||
| 2426 | snd_printdd("CXT5066_hp_laptop: unsol event %x (%x)\n", res, res >> 26); | ||
| 2427 | switch (res >> 26) { | ||
| 2428 | case CONEXANT_HP_EVENT: | ||
| 2429 | cxt5066_hp_automute(codec); | ||
| 2430 | break; | ||
| 2431 | case CONEXANT_MIC_EVENT: | ||
| 2432 | cxt5066_hp_laptop_automic(codec); | ||
| 2433 | break; | ||
| 2434 | } | ||
| 2435 | } | ||
| 2436 | |||
| 2437 | /* unsolicited event for jack sensing */ | ||
| 2411 | static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res) | 2438 | static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res) |
| 2412 | { | 2439 | { |
| 2413 | snd_printdd("CXT5066_thinkpad: unsol event %x (%x)\n", res, res >> 26); | 2440 | snd_printdd("CXT5066_thinkpad: unsol event %x (%x)\n", res, res >> 26); |
| @@ -2989,6 +3016,14 @@ static struct hda_verb cxt5066_init_verbs_portd_lo[] = { | |||
| 2989 | { } /* end */ | 3016 | { } /* end */ |
| 2990 | }; | 3017 | }; |
| 2991 | 3018 | ||
| 3019 | |||
| 3020 | static struct hda_verb cxt5066_init_verbs_hp_laptop[] = { | ||
| 3021 | {0x14, AC_VERB_SET_CONNECT_SEL, 0x0}, | ||
| 3022 | {0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT}, | ||
| 3023 | {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT}, | ||
| 3024 | { } /* end */ | ||
| 3025 | }; | ||
| 3026 | |||
| 2992 | /* initialize jack-sensing, too */ | 3027 | /* initialize jack-sensing, too */ |
| 2993 | static int cxt5066_init(struct hda_codec *codec) | 3028 | static int cxt5066_init(struct hda_codec *codec) |
| 2994 | { | 3029 | { |
| @@ -3004,6 +3039,8 @@ static int cxt5066_init(struct hda_codec *codec) | |||
| 3004 | cxt5066_ideapad_automic(codec); | 3039 | cxt5066_ideapad_automic(codec); |
| 3005 | else if (spec->thinkpad) | 3040 | else if (spec->thinkpad) |
| 3006 | cxt5066_thinkpad_automic(codec); | 3041 | cxt5066_thinkpad_automic(codec); |
| 3042 | else if (spec->hp_laptop) | ||
| 3043 | cxt5066_hp_laptop_automic(codec); | ||
| 3007 | } | 3044 | } |
| 3008 | cxt5066_set_mic_boost(codec); | 3045 | cxt5066_set_mic_boost(codec); |
| 3009 | return 0; | 3046 | return 0; |
| @@ -3031,6 +3068,7 @@ enum { | |||
| 3031 | CXT5066_DELL_VOSTO, /* Dell Vostro 1015i */ | 3068 | CXT5066_DELL_VOSTO, /* Dell Vostro 1015i */ |
| 3032 | CXT5066_IDEAPAD, /* Lenovo IdeaPad U150 */ | 3069 | CXT5066_IDEAPAD, /* Lenovo IdeaPad U150 */ |
| 3033 | CXT5066_THINKPAD, /* Lenovo ThinkPad T410s, others? */ | 3070 | CXT5066_THINKPAD, /* Lenovo ThinkPad T410s, others? */ |
| 3071 | CXT5066_HP_LAPTOP, /* HP Laptop */ | ||
| 3034 | CXT5066_MODELS | 3072 | CXT5066_MODELS |
| 3035 | }; | 3073 | }; |
| 3036 | 3074 | ||
| @@ -3041,6 +3079,7 @@ static const char *cxt5066_models[CXT5066_MODELS] = { | |||
| 3041 | [CXT5066_DELL_VOSTO] = "dell-vostro", | 3079 | [CXT5066_DELL_VOSTO] = "dell-vostro", |
| 3042 | [CXT5066_IDEAPAD] = "ideapad", | 3080 | [CXT5066_IDEAPAD] = "ideapad", |
| 3043 | [CXT5066_THINKPAD] = "thinkpad", | 3081 | [CXT5066_THINKPAD] = "thinkpad", |
| 3082 | [CXT5066_HP_LAPTOP] = "hp-laptop", | ||
| 3044 | }; | 3083 | }; |
| 3045 | 3084 | ||
| 3046 | static struct snd_pci_quirk cxt5066_cfg_tbl[] = { | 3085 | static struct snd_pci_quirk cxt5066_cfg_tbl[] = { |
| @@ -3052,8 +3091,10 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { | |||
| 3052 | SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO), | 3091 | SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO), |
| 3053 | SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO), | 3092 | SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO), |
| 3054 | SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), | 3093 | SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), |
| 3094 | SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), | ||
| 3055 | SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), | 3095 | SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), |
| 3056 | SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5), | 3096 | SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5), |
| 3097 | SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD), | ||
| 3057 | SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD), | 3098 | SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD), |
| 3058 | SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD), | 3099 | SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD), |
| 3059 | SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), | 3100 | SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), |
| @@ -3116,6 +3157,23 @@ static int patch_cxt5066(struct hda_codec *codec) | |||
| 3116 | spec->num_init_verbs++; | 3157 | spec->num_init_verbs++; |
| 3117 | spec->dell_automute = 1; | 3158 | spec->dell_automute = 1; |
| 3118 | break; | 3159 | break; |
| 3160 | case CXT5066_HP_LAPTOP: | ||
| 3161 | codec->patch_ops.init = cxt5066_init; | ||
| 3162 | codec->patch_ops.unsol_event = cxt5066_hp_laptop_event; | ||
| 3163 | spec->init_verbs[spec->num_init_verbs] = | ||
| 3164 | cxt5066_init_verbs_hp_laptop; | ||
| 3165 | spec->num_init_verbs++; | ||
| 3166 | spec->hp_laptop = 1; | ||
| 3167 | spec->mixers[spec->num_mixers++] = cxt5066_mixer_master; | ||
| 3168 | spec->mixers[spec->num_mixers++] = cxt5066_mixers; | ||
| 3169 | /* no S/PDIF out */ | ||
| 3170 | spec->multiout.dig_out_nid = 0; | ||
| 3171 | /* input source automatically selected */ | ||
| 3172 | spec->input_mux = NULL; | ||
| 3173 | spec->port_d_mode = 0; | ||
| 3174 | spec->mic_boost = 3; /* default 30dB gain */ | ||
| 3175 | break; | ||
| 3176 | |||
| 3119 | case CXT5066_OLPC_XO_1_5: | 3177 | case CXT5066_OLPC_XO_1_5: |
| 3120 | codec->patch_ops.init = cxt5066_olpc_init; | 3178 | codec->patch_ops.init = cxt5066_olpc_init; |
| 3121 | codec->patch_ops.unsol_event = cxt5066_olpc_unsol_event; | 3179 | codec->patch_ops.unsol_event = cxt5066_olpc_unsol_event; |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 627bf9963368..bcbf9160ed81 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -5334,6 +5334,7 @@ static void fillup_priv_adc_nids(struct hda_codec *codec, hda_nid_t *nids, | |||
| 5334 | 5334 | ||
| 5335 | static struct snd_pci_quirk beep_white_list[] = { | 5335 | static struct snd_pci_quirk beep_white_list[] = { |
| 5336 | SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1), | 5336 | SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1), |
| 5337 | SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1), | ||
| 5337 | SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1), | 5338 | SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1), |
| 5338 | {} | 5339 | {} |
| 5339 | }; | 5340 | }; |
diff --git a/sound/pci/oxygen/oxygen.h b/sound/pci/oxygen/oxygen.h index 6147216af744..a3409edcfb50 100644 --- a/sound/pci/oxygen/oxygen.h +++ b/sound/pci/oxygen/oxygen.h | |||
| @@ -155,6 +155,7 @@ void oxygen_pci_remove(struct pci_dev *pci); | |||
| 155 | int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state); | 155 | int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state); |
| 156 | int oxygen_pci_resume(struct pci_dev *pci); | 156 | int oxygen_pci_resume(struct pci_dev *pci); |
| 157 | #endif | 157 | #endif |
| 158 | void oxygen_pci_shutdown(struct pci_dev *pci); | ||
| 158 | 159 | ||
| 159 | /* oxygen_mixer.c */ | 160 | /* oxygen_mixer.c */ |
| 160 | 161 | ||
diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c index fad03d64e3ad..7e93cf884437 100644 --- a/sound/pci/oxygen/oxygen_lib.c +++ b/sound/pci/oxygen/oxygen_lib.c | |||
| @@ -519,16 +519,21 @@ static void oxygen_init(struct oxygen *chip) | |||
| 519 | } | 519 | } |
| 520 | } | 520 | } |
| 521 | 521 | ||
| 522 | static void oxygen_card_free(struct snd_card *card) | 522 | static void oxygen_shutdown(struct oxygen *chip) |
| 523 | { | 523 | { |
| 524 | struct oxygen *chip = card->private_data; | ||
| 525 | |||
| 526 | spin_lock_irq(&chip->reg_lock); | 524 | spin_lock_irq(&chip->reg_lock); |
| 527 | chip->interrupt_mask = 0; | 525 | chip->interrupt_mask = 0; |
| 528 | chip->pcm_running = 0; | 526 | chip->pcm_running = 0; |
| 529 | oxygen_write16(chip, OXYGEN_DMA_STATUS, 0); | 527 | oxygen_write16(chip, OXYGEN_DMA_STATUS, 0); |
| 530 | oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, 0); | 528 | oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, 0); |
| 531 | spin_unlock_irq(&chip->reg_lock); | 529 | spin_unlock_irq(&chip->reg_lock); |
| 530 | } | ||
| 531 | |||
| 532 | static void oxygen_card_free(struct snd_card *card) | ||
| 533 | { | ||
| 534 | struct oxygen *chip = card->private_data; | ||
| 535 | |||
| 536 | oxygen_shutdown(chip); | ||
| 532 | if (chip->irq >= 0) | 537 | if (chip->irq >= 0) |
| 533 | free_irq(chip->irq, chip); | 538 | free_irq(chip->irq, chip); |
| 534 | flush_scheduled_work(); | 539 | flush_scheduled_work(); |
| @@ -778,3 +783,13 @@ int oxygen_pci_resume(struct pci_dev *pci) | |||
| 778 | } | 783 | } |
| 779 | EXPORT_SYMBOL(oxygen_pci_resume); | 784 | EXPORT_SYMBOL(oxygen_pci_resume); |
| 780 | #endif /* CONFIG_PM */ | 785 | #endif /* CONFIG_PM */ |
| 786 | |||
| 787 | void oxygen_pci_shutdown(struct pci_dev *pci) | ||
| 788 | { | ||
| 789 | struct snd_card *card = pci_get_drvdata(pci); | ||
| 790 | struct oxygen *chip = card->private_data; | ||
| 791 | |||
| 792 | oxygen_shutdown(chip); | ||
| 793 | chip->model.cleanup(chip); | ||
| 794 | } | ||
| 795 | EXPORT_SYMBOL(oxygen_pci_shutdown); | ||
diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c index f03a2f2cffee..06c863e86e3d 100644 --- a/sound/pci/oxygen/virtuoso.c +++ b/sound/pci/oxygen/virtuoso.c | |||
| @@ -95,6 +95,7 @@ static struct pci_driver xonar_driver = { | |||
| 95 | .suspend = oxygen_pci_suspend, | 95 | .suspend = oxygen_pci_suspend, |
| 96 | .resume = oxygen_pci_resume, | 96 | .resume = oxygen_pci_resume, |
| 97 | #endif | 97 | #endif |
| 98 | .shutdown = oxygen_pci_shutdown, | ||
| 98 | }; | 99 | }; |
| 99 | 100 | ||
| 100 | static int __init alsa_card_xonar_init(void) | 101 | static int __init alsa_card_xonar_init(void) |
diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c index dbc4b89d74e4..b82c1cfa96f5 100644 --- a/sound/pci/oxygen/xonar_wm87x6.c +++ b/sound/pci/oxygen/xonar_wm87x6.c | |||
| @@ -53,6 +53,8 @@ struct xonar_wm87x6 { | |||
| 53 | struct xonar_generic generic; | 53 | struct xonar_generic generic; |
| 54 | u16 wm8776_regs[0x17]; | 54 | u16 wm8776_regs[0x17]; |
| 55 | u16 wm8766_regs[0x10]; | 55 | u16 wm8766_regs[0x10]; |
| 56 | struct snd_kcontrol *line_adcmux_control; | ||
| 57 | struct snd_kcontrol *mic_adcmux_control; | ||
| 56 | struct snd_kcontrol *lc_controls[13]; | 58 | struct snd_kcontrol *lc_controls[13]; |
| 57 | }; | 59 | }; |
| 58 | 60 | ||
| @@ -193,6 +195,7 @@ static void xonar_ds_init(struct oxygen *chip) | |||
| 193 | static void xonar_ds_cleanup(struct oxygen *chip) | 195 | static void xonar_ds_cleanup(struct oxygen *chip) |
| 194 | { | 196 | { |
| 195 | xonar_disable_output(chip); | 197 | xonar_disable_output(chip); |
| 198 | wm8776_write(chip, WM8776_RESET, 0); | ||
| 196 | } | 199 | } |
| 197 | 200 | ||
| 198 | static void xonar_ds_suspend(struct oxygen *chip) | 201 | static void xonar_ds_suspend(struct oxygen *chip) |
| @@ -603,6 +606,7 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl, | |||
| 603 | { | 606 | { |
| 604 | struct oxygen *chip = ctl->private_data; | 607 | struct oxygen *chip = ctl->private_data; |
| 605 | struct xonar_wm87x6 *data = chip->model_data; | 608 | struct xonar_wm87x6 *data = chip->model_data; |
| 609 | struct snd_kcontrol *other_ctl; | ||
| 606 | unsigned int mux_bit = ctl->private_value; | 610 | unsigned int mux_bit = ctl->private_value; |
| 607 | u16 reg; | 611 | u16 reg; |
| 608 | int changed; | 612 | int changed; |
| @@ -610,8 +614,18 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl, | |||
| 610 | mutex_lock(&chip->mutex); | 614 | mutex_lock(&chip->mutex); |
| 611 | reg = data->wm8776_regs[WM8776_ADCMUX]; | 615 | reg = data->wm8776_regs[WM8776_ADCMUX]; |
| 612 | if (value->value.integer.value[0]) { | 616 | if (value->value.integer.value[0]) { |
| 613 | reg &= ~0x003; | ||
| 614 | reg |= mux_bit; | 617 | reg |= mux_bit; |
| 618 | /* line-in and mic-in are exclusive */ | ||
| 619 | mux_bit ^= 3; | ||
| 620 | if (reg & mux_bit) { | ||
| 621 | reg &= ~mux_bit; | ||
| 622 | if (mux_bit == 1) | ||
| 623 | other_ctl = data->line_adcmux_control; | ||
| 624 | else | ||
| 625 | other_ctl = data->mic_adcmux_control; | ||
| 626 | snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, | ||
| 627 | &other_ctl->id); | ||
| 628 | } | ||
| 615 | } else | 629 | } else |
| 616 | reg &= ~mux_bit; | 630 | reg &= ~mux_bit; |
| 617 | changed = reg != data->wm8776_regs[WM8776_ADCMUX]; | 631 | changed = reg != data->wm8776_regs[WM8776_ADCMUX]; |
| @@ -963,7 +977,13 @@ static int xonar_ds_mixer_init(struct oxygen *chip) | |||
| 963 | err = snd_ctl_add(chip->card, ctl); | 977 | err = snd_ctl_add(chip->card, ctl); |
| 964 | if (err < 0) | 978 | if (err < 0) |
| 965 | return err; | 979 | return err; |
| 980 | if (!strcmp(ctl->id.name, "Line Capture Switch")) | ||
| 981 | data->line_adcmux_control = ctl; | ||
| 982 | else if (!strcmp(ctl->id.name, "Mic Capture Switch")) | ||
| 983 | data->mic_adcmux_control = ctl; | ||
| 966 | } | 984 | } |
| 985 | if (!data->line_adcmux_control || !data->mic_adcmux_control) | ||
| 986 | return -ENXIO; | ||
| 967 | BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls)); | 987 | BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls)); |
| 968 | for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) { | 988 | for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) { |
| 969 | ctl = snd_ctl_new1(&lc_controls[i], chip); | 989 | ctl = snd_ctl_new1(&lc_controls[i], chip); |
diff --git a/sound/usb/card.c b/sound/usb/card.c index 9feb00c831a0..4eabafa5b037 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c | |||
| @@ -126,7 +126,7 @@ static void snd_usb_stream_disconnect(struct list_head *head) | |||
| 126 | for (idx = 0; idx < 2; idx++) { | 126 | for (idx = 0; idx < 2; idx++) { |
| 127 | subs = &as->substream[idx]; | 127 | subs = &as->substream[idx]; |
| 128 | if (!subs->num_formats) | 128 | if (!subs->num_formats) |
| 129 | return; | 129 | continue; |
| 130 | snd_usb_release_substream_urbs(subs, 1); | 130 | snd_usb_release_substream_urbs(subs, 1); |
| 131 | subs->interface = -1; | 131 | subs->interface = -1; |
| 132 | } | 132 | } |
| @@ -216,6 +216,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) | |||
| 216 | } | 216 | } |
| 217 | 217 | ||
| 218 | switch (protocol) { | 218 | switch (protocol) { |
| 219 | default: | ||
| 220 | snd_printdd(KERN_WARNING "unknown interface protocol %#02x, assuming v1\n", | ||
| 221 | protocol); | ||
| 222 | /* fall through */ | ||
| 223 | |||
| 219 | case UAC_VERSION_1: { | 224 | case UAC_VERSION_1: { |
| 220 | struct uac1_ac_header_descriptor *h1 = control_header; | 225 | struct uac1_ac_header_descriptor *h1 = control_header; |
| 221 | 226 | ||
| @@ -253,10 +258,6 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) | |||
| 253 | 258 | ||
| 254 | break; | 259 | break; |
| 255 | } | 260 | } |
| 256 | |||
| 257 | default: | ||
| 258 | snd_printk(KERN_ERR "unknown protocol version 0x%02x\n", protocol); | ||
| 259 | return -EINVAL; | ||
| 260 | } | 261 | } |
| 261 | 262 | ||
| 262 | return 0; | 263 | return 0; |
| @@ -465,7 +466,13 @@ static void *snd_usb_audio_probe(struct usb_device *dev, | |||
| 465 | goto __error; | 466 | goto __error; |
| 466 | } | 467 | } |
| 467 | 468 | ||
| 468 | chip->ctrl_intf = alts; | 469 | /* |
| 470 | * For devices with more than one control interface, we assume the | ||
| 471 | * first contains the audio controls. We might need a more specific | ||
| 472 | * check here in the future. | ||
| 473 | */ | ||
| 474 | if (!chip->ctrl_intf) | ||
| 475 | chip->ctrl_intf = alts; | ||
| 469 | 476 | ||
| 470 | if (err > 0) { | 477 | if (err > 0) { |
| 471 | /* create normal USB audio interfaces */ | 478 | /* create normal USB audio interfaces */ |
diff --git a/sound/usb/clock.c b/sound/usb/clock.c index b853f8df794f..7754a1034545 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c | |||
| @@ -295,12 +295,11 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface, | |||
| 295 | 295 | ||
| 296 | switch (altsd->bInterfaceProtocol) { | 296 | switch (altsd->bInterfaceProtocol) { |
| 297 | case UAC_VERSION_1: | 297 | case UAC_VERSION_1: |
| 298 | default: | ||
| 298 | return set_sample_rate_v1(chip, iface, alts, fmt, rate); | 299 | return set_sample_rate_v1(chip, iface, alts, fmt, rate); |
| 299 | 300 | ||
| 300 | case UAC_VERSION_2: | 301 | case UAC_VERSION_2: |
| 301 | return set_sample_rate_v2(chip, iface, alts, fmt, rate); | 302 | return set_sample_rate_v2(chip, iface, alts, fmt, rate); |
| 302 | } | 303 | } |
| 303 | |||
| 304 | return -EINVAL; | ||
| 305 | } | 304 | } |
| 306 | 305 | ||
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 1a701f1e8f50..ef0a07e34844 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c | |||
| @@ -275,6 +275,12 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no) | |||
| 275 | 275 | ||
| 276 | /* get audio formats */ | 276 | /* get audio formats */ |
| 277 | switch (protocol) { | 277 | switch (protocol) { |
| 278 | default: | ||
| 279 | snd_printdd(KERN_WARNING "%d:%u:%d: unknown interface protocol %#02x, assuming v1\n", | ||
| 280 | dev->devnum, iface_no, altno, protocol); | ||
| 281 | protocol = UAC_VERSION_1; | ||
| 282 | /* fall through */ | ||
| 283 | |||
| 278 | case UAC_VERSION_1: { | 284 | case UAC_VERSION_1: { |
| 279 | struct uac1_as_header_descriptor *as = | 285 | struct uac1_as_header_descriptor *as = |
| 280 | snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL); | 286 | snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL); |
| @@ -336,11 +342,6 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no) | |||
| 336 | dev->devnum, iface_no, altno, as->bTerminalLink); | 342 | dev->devnum, iface_no, altno, as->bTerminalLink); |
| 337 | continue; | 343 | continue; |
| 338 | } | 344 | } |
| 339 | |||
| 340 | default: | ||
| 341 | snd_printk(KERN_ERR "%d:%u:%d : unknown interface protocol %04x\n", | ||
| 342 | dev->devnum, iface_no, altno, protocol); | ||
| 343 | continue; | ||
| 344 | } | 345 | } |
| 345 | 346 | ||
| 346 | /* get format type */ | 347 | /* get format type */ |
diff --git a/sound/usb/format.c b/sound/usb/format.c index 3a1375459c06..69148212aa70 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c | |||
| @@ -49,7 +49,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, | |||
| 49 | u64 pcm_formats; | 49 | u64 pcm_formats; |
| 50 | 50 | ||
| 51 | switch (protocol) { | 51 | switch (protocol) { |
| 52 | case UAC_VERSION_1: { | 52 | case UAC_VERSION_1: |
| 53 | default: { | ||
| 53 | struct uac_format_type_i_discrete_descriptor *fmt = _fmt; | 54 | struct uac_format_type_i_discrete_descriptor *fmt = _fmt; |
| 54 | sample_width = fmt->bBitResolution; | 55 | sample_width = fmt->bBitResolution; |
| 55 | sample_bytes = fmt->bSubframeSize; | 56 | sample_bytes = fmt->bSubframeSize; |
| @@ -64,9 +65,6 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, | |||
| 64 | format <<= 1; | 65 | format <<= 1; |
| 65 | break; | 66 | break; |
| 66 | } | 67 | } |
| 67 | |||
| 68 | default: | ||
| 69 | return -EINVAL; | ||
| 70 | } | 68 | } |
| 71 | 69 | ||
| 72 | pcm_formats = 0; | 70 | pcm_formats = 0; |
| @@ -384,6 +382,10 @@ static int parse_audio_format_i(struct snd_usb_audio *chip, | |||
| 384 | * audio class v2 uses class specific EP0 range requests for that. | 382 | * audio class v2 uses class specific EP0 range requests for that. |
| 385 | */ | 383 | */ |
| 386 | switch (protocol) { | 384 | switch (protocol) { |
| 385 | default: | ||
| 386 | snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n", | ||
| 387 | chip->dev->devnum, fp->iface, fp->altsetting, protocol); | ||
| 388 | /* fall through */ | ||
| 387 | case UAC_VERSION_1: | 389 | case UAC_VERSION_1: |
| 388 | fp->channels = fmt->bNrChannels; | 390 | fp->channels = fmt->bNrChannels; |
| 389 | ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7); | 391 | ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7); |
| @@ -392,10 +394,6 @@ static int parse_audio_format_i(struct snd_usb_audio *chip, | |||
| 392 | /* fp->channels is already set in this case */ | 394 | /* fp->channels is already set in this case */ |
| 393 | ret = parse_audio_format_rates_v2(chip, fp); | 395 | ret = parse_audio_format_rates_v2(chip, fp); |
| 394 | break; | 396 | break; |
| 395 | default: | ||
| 396 | snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n", | ||
| 397 | chip->dev->devnum, fp->iface, fp->altsetting, protocol); | ||
| 398 | return -EINVAL; | ||
| 399 | } | 397 | } |
| 400 | 398 | ||
| 401 | if (fp->channels < 1) { | 399 | if (fp->channels < 1) { |
| @@ -438,6 +436,10 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip, | |||
| 438 | fp->channels = 1; | 436 | fp->channels = 1; |
| 439 | 437 | ||
| 440 | switch (protocol) { | 438 | switch (protocol) { |
| 439 | default: | ||
| 440 | snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n", | ||
| 441 | chip->dev->devnum, fp->iface, fp->altsetting, protocol); | ||
| 442 | /* fall through */ | ||
| 441 | case UAC_VERSION_1: { | 443 | case UAC_VERSION_1: { |
| 442 | struct uac_format_type_ii_discrete_descriptor *fmt = _fmt; | 444 | struct uac_format_type_ii_discrete_descriptor *fmt = _fmt; |
| 443 | brate = le16_to_cpu(fmt->wMaxBitRate); | 445 | brate = le16_to_cpu(fmt->wMaxBitRate); |
| @@ -456,10 +458,6 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip, | |||
| 456 | ret = parse_audio_format_rates_v2(chip, fp); | 458 | ret = parse_audio_format_rates_v2(chip, fp); |
| 457 | break; | 459 | break; |
| 458 | } | 460 | } |
| 459 | default: | ||
| 460 | snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n", | ||
| 461 | chip->dev->devnum, fp->iface, fp->altsetting, protocol); | ||
| 462 | return -EINVAL; | ||
| 463 | } | 461 | } |
| 464 | 462 | ||
| 465 | return ret; | 463 | return ret; |
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index c166db0057d3..3ed3901369ce 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c | |||
| @@ -2175,7 +2175,15 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif, | |||
| 2175 | } | 2175 | } |
| 2176 | 2176 | ||
| 2177 | host_iface = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0]; | 2177 | host_iface = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0]; |
| 2178 | mixer->protocol = get_iface_desc(host_iface)->bInterfaceProtocol; | 2178 | switch (get_iface_desc(host_iface)->bInterfaceProtocol) { |
| 2179 | case UAC_VERSION_1: | ||
| 2180 | default: | ||
| 2181 | mixer->protocol = UAC_VERSION_1; | ||
| 2182 | break; | ||
| 2183 | case UAC_VERSION_2: | ||
| 2184 | mixer->protocol = UAC_VERSION_2; | ||
| 2185 | break; | ||
| 2186 | } | ||
| 2179 | 2187 | ||
| 2180 | if ((err = snd_usb_mixer_controls(mixer)) < 0 || | 2188 | if ((err = snd_usb_mixer_controls(mixer)) < 0 || |
| 2181 | (err = snd_usb_mixer_status_create(mixer)) < 0) | 2189 | (err = snd_usb_mixer_status_create(mixer)) < 0) |
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 3634cedf9306..3b5135c93062 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c | |||
| @@ -173,13 +173,12 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface, | |||
| 173 | 173 | ||
| 174 | switch (altsd->bInterfaceProtocol) { | 174 | switch (altsd->bInterfaceProtocol) { |
| 175 | case UAC_VERSION_1: | 175 | case UAC_VERSION_1: |
| 176 | default: | ||
| 176 | return init_pitch_v1(chip, iface, alts, fmt); | 177 | return init_pitch_v1(chip, iface, alts, fmt); |
| 177 | 178 | ||
| 178 | case UAC_VERSION_2: | 179 | case UAC_VERSION_2: |
| 179 | return init_pitch_v2(chip, iface, alts, fmt); | 180 | return init_pitch_v2(chip, iface, alts, fmt); |
| 180 | } | 181 | } |
| 181 | |||
| 182 | return -EINVAL; | ||
| 183 | } | 182 | } |
| 184 | 183 | ||
| 185 | /* | 184 | /* |
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 624a96c636fd..6de4313924fb 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h | |||
| @@ -50,6 +50,7 @@ static inline void callchain_init(struct callchain_node *node) | |||
| 50 | INIT_LIST_HEAD(&node->children); | 50 | INIT_LIST_HEAD(&node->children); |
| 51 | INIT_LIST_HEAD(&node->val); | 51 | INIT_LIST_HEAD(&node->val); |
| 52 | 52 | ||
| 53 | node->children_hit = 0; | ||
| 53 | node->parent = NULL; | 54 | node->parent = NULL; |
| 54 | node->hit = 0; | 55 | node->hit = 0; |
| 55 | } | 56 | } |
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index e72f05c3bef0..fcc16e4349df 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c | |||
| @@ -1539,6 +1539,7 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev, | |||
| 1539 | goto error; | 1539 | goto error; |
| 1540 | } | 1540 | } |
| 1541 | tev->point.offset = pev->point.offset; | 1541 | tev->point.offset = pev->point.offset; |
| 1542 | tev->point.retprobe = pev->point.retprobe; | ||
| 1542 | tev->nargs = pev->nargs; | 1543 | tev->nargs = pev->nargs; |
| 1543 | if (tev->nargs) { | 1544 | if (tev->nargs) { |
| 1544 | tev->args = zalloc(sizeof(struct probe_trace_arg) | 1545 | tev->args = zalloc(sizeof(struct probe_trace_arg) |
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 525136684d4e..32b81f707ff5 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c | |||
| @@ -686,6 +686,25 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) | |||
| 686 | char buf[32], *ptr; | 686 | char buf[32], *ptr; |
| 687 | int ret, nscopes; | 687 | int ret, nscopes; |
| 688 | 688 | ||
| 689 | if (!is_c_varname(pf->pvar->var)) { | ||
| 690 | /* Copy raw parameters */ | ||
| 691 | pf->tvar->value = strdup(pf->pvar->var); | ||
| 692 | if (pf->tvar->value == NULL) | ||
| 693 | return -ENOMEM; | ||
| 694 | if (pf->pvar->type) { | ||
| 695 | pf->tvar->type = strdup(pf->pvar->type); | ||
| 696 | if (pf->tvar->type == NULL) | ||
| 697 | return -ENOMEM; | ||
| 698 | } | ||
| 699 | if (pf->pvar->name) { | ||
| 700 | pf->tvar->name = strdup(pf->pvar->name); | ||
| 701 | if (pf->tvar->name == NULL) | ||
| 702 | return -ENOMEM; | ||
| 703 | } else | ||
| 704 | pf->tvar->name = NULL; | ||
| 705 | return 0; | ||
| 706 | } | ||
| 707 | |||
| 689 | if (pf->pvar->name) | 708 | if (pf->pvar->name) |
| 690 | pf->tvar->name = strdup(pf->pvar->name); | 709 | pf->tvar->name = strdup(pf->pvar->name); |
| 691 | else { | 710 | else { |
| @@ -700,19 +719,6 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) | |||
| 700 | if (pf->tvar->name == NULL) | 719 | if (pf->tvar->name == NULL) |
| 701 | return -ENOMEM; | 720 | return -ENOMEM; |
| 702 | 721 | ||
| 703 | if (!is_c_varname(pf->pvar->var)) { | ||
| 704 | /* Copy raw parameters */ | ||
| 705 | pf->tvar->value = strdup(pf->pvar->var); | ||
| 706 | if (pf->tvar->value == NULL) | ||
| 707 | return -ENOMEM; | ||
| 708 | if (pf->pvar->type) { | ||
| 709 | pf->tvar->type = strdup(pf->pvar->type); | ||
| 710 | if (pf->tvar->type == NULL) | ||
| 711 | return -ENOMEM; | ||
| 712 | } | ||
| 713 | return 0; | ||
| 714 | } | ||
| 715 | |||
| 716 | pr_debug("Searching '%s' variable in context.\n", | 722 | pr_debug("Searching '%s' variable in context.\n", |
| 717 | pf->pvar->var); | 723 | pf->pvar->var); |
| 718 | /* Search child die for local variables and parameters. */ | 724 | /* Search child die for local variables and parameters. */ |
| @@ -783,6 +789,16 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf) | |||
| 783 | /* This function has no name. */ | 789 | /* This function has no name. */ |
| 784 | tev->point.offset = (unsigned long)pf->addr; | 790 | tev->point.offset = (unsigned long)pf->addr; |
| 785 | 791 | ||
| 792 | /* Return probe must be on the head of a subprogram */ | ||
| 793 | if (pf->pev->point.retprobe) { | ||
| 794 | if (tev->point.offset != 0) { | ||
| 795 | pr_warning("Return probe must be on the head of" | ||
| 796 | " a real function\n"); | ||
| 797 | return -EINVAL; | ||
| 798 | } | ||
| 799 | tev->point.retprobe = true; | ||
| 800 | } | ||
| 801 | |||
| 786 | pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, | 802 | pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, |
| 787 | tev->point.offset); | 803 | tev->point.offset); |
| 788 | 804 | ||
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 1a367734e016..b2f5ae97f33d 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
| @@ -2268,6 +2268,9 @@ static int setup_list(struct strlist **list, const char *list_str, | |||
| 2268 | 2268 | ||
| 2269 | int symbol__init(void) | 2269 | int symbol__init(void) |
| 2270 | { | 2270 | { |
| 2271 | if (symbol_conf.initialized) | ||
| 2272 | return 0; | ||
| 2273 | |||
| 2271 | elf_version(EV_CURRENT); | 2274 | elf_version(EV_CURRENT); |
| 2272 | if (symbol_conf.sort_by_name) | 2275 | if (symbol_conf.sort_by_name) |
| 2273 | symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - | 2276 | symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - |
| @@ -2293,6 +2296,7 @@ int symbol__init(void) | |||
| 2293 | symbol_conf.sym_list_str, "symbol") < 0) | 2296 | symbol_conf.sym_list_str, "symbol") < 0) |
| 2294 | goto out_free_comm_list; | 2297 | goto out_free_comm_list; |
| 2295 | 2298 | ||
| 2299 | symbol_conf.initialized = true; | ||
| 2296 | return 0; | 2300 | return 0; |
| 2297 | 2301 | ||
| 2298 | out_free_dso_list: | 2302 | out_free_dso_list: |
| @@ -2304,11 +2308,14 @@ out_free_comm_list: | |||
| 2304 | 2308 | ||
| 2305 | void symbol__exit(void) | 2309 | void symbol__exit(void) |
| 2306 | { | 2310 | { |
| 2311 | if (!symbol_conf.initialized) | ||
| 2312 | return; | ||
| 2307 | strlist__delete(symbol_conf.sym_list); | 2313 | strlist__delete(symbol_conf.sym_list); |
| 2308 | strlist__delete(symbol_conf.dso_list); | 2314 | strlist__delete(symbol_conf.dso_list); |
| 2309 | strlist__delete(symbol_conf.comm_list); | 2315 | strlist__delete(symbol_conf.comm_list); |
| 2310 | vmlinux_path__exit(); | 2316 | vmlinux_path__exit(); |
| 2311 | symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; | 2317 | symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; |
| 2318 | symbol_conf.initialized = false; | ||
| 2312 | } | 2319 | } |
| 2313 | 2320 | ||
| 2314 | int machines__create_kernel_maps(struct rb_root *self, pid_t pid) | 2321 | int machines__create_kernel_maps(struct rb_root *self, pid_t pid) |
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index b7a8da4af5a0..ea95c2756f05 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
| @@ -69,7 +69,8 @@ struct symbol_conf { | |||
| 69 | show_nr_samples, | 69 | show_nr_samples, |
| 70 | use_callchain, | 70 | use_callchain, |
| 71 | exclude_other, | 71 | exclude_other, |
| 72 | show_cpu_utilization; | 72 | show_cpu_utilization, |
| 73 | initialized; | ||
| 73 | const char *vmlinux_name, | 74 | const char *vmlinux_name, |
| 74 | *source_prefix, | 75 | *source_prefix, |
| 75 | *field_sep; | 76 | *field_sep; |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index b78b794c1039..d4853a54771a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -1958,10 +1958,10 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | |||
| 1958 | cpu); | 1958 | cpu); |
| 1959 | hardware_disable(NULL); | 1959 | hardware_disable(NULL); |
| 1960 | break; | 1960 | break; |
| 1961 | case CPU_ONLINE: | 1961 | case CPU_STARTING: |
| 1962 | printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", | 1962 | printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", |
| 1963 | cpu); | 1963 | cpu); |
| 1964 | smp_call_function_single(cpu, hardware_enable, NULL, 1); | 1964 | hardware_enable(NULL); |
| 1965 | break; | 1965 | break; |
| 1966 | } | 1966 | } |
| 1967 | return NOTIFY_OK; | 1967 | return NOTIFY_OK; |
| @@ -2096,7 +2096,6 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, | |||
| 2096 | 2096 | ||
| 2097 | static struct notifier_block kvm_cpu_notifier = { | 2097 | static struct notifier_block kvm_cpu_notifier = { |
| 2098 | .notifier_call = kvm_cpu_hotplug, | 2098 | .notifier_call = kvm_cpu_hotplug, |
| 2099 | .priority = 20, /* must be > scheduler priority */ | ||
| 2100 | }; | 2099 | }; |
| 2101 | 2100 | ||
| 2102 | static int vm_stat_get(void *_offset, u64 *val) | 2101 | static int vm_stat_get(void *_offset, u64 *val) |
