diff options
382 files changed, 6551 insertions, 2751 deletions
diff --git a/Documentation/block/data-integrity.txt b/Documentation/block/data-integrity.txt index e8ca040ba2cf..2d735b0ae383 100644 --- a/Documentation/block/data-integrity.txt +++ b/Documentation/block/data-integrity.txt | |||
@@ -50,7 +50,7 @@ encouraged them to allow separation of the data and integrity metadata | |||
50 | scatter-gather lists. | 50 | scatter-gather lists. |
51 | 51 | ||
52 | The controller will interleave the buffers on write and split them on | 52 | The controller will interleave the buffers on write and split them on |
53 | read. This means that the Linux can DMA the data buffers to and from | 53 | read. This means that Linux can DMA the data buffers to and from |
54 | host memory without changes to the page cache. | 54 | host memory without changes to the page cache. |
55 | 55 | ||
56 | Also, the 16-bit CRC checksum mandated by both the SCSI and SATA specs | 56 | Also, the 16-bit CRC checksum mandated by both the SCSI and SATA specs |
@@ -66,7 +66,7 @@ software RAID5). | |||
66 | 66 | ||
67 | The IP checksum is weaker than the CRC in terms of detecting bit | 67 | The IP checksum is weaker than the CRC in terms of detecting bit |
68 | errors. However, the strength is really in the separation of the data | 68 | errors. However, the strength is really in the separation of the data |
69 | buffers and the integrity metadata. These two distinct buffers much | 69 | buffers and the integrity metadata. These two distinct buffers must |
70 | match up for an I/O to complete. | 70 | match up for an I/O to complete. |
71 | 71 | ||
72 | The separation of the data and integrity metadata buffers as well as | 72 | The separation of the data and integrity metadata buffers as well as |
diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt index f9ca389dddf4..1d7e9784439a 100644 --- a/Documentation/cgroups/cpusets.txt +++ b/Documentation/cgroups/cpusets.txt | |||
@@ -777,6 +777,18 @@ in cpuset directories: | |||
777 | # /bin/echo 1-4 > cpus -> set cpus list to cpus 1,2,3,4 | 777 | # /bin/echo 1-4 > cpus -> set cpus list to cpus 1,2,3,4 |
778 | # /bin/echo 1,2,3,4 > cpus -> set cpus list to cpus 1,2,3,4 | 778 | # /bin/echo 1,2,3,4 > cpus -> set cpus list to cpus 1,2,3,4 |
779 | 779 | ||
780 | To add a CPU to a cpuset, write the new list of CPUs including the | ||
781 | CPU to be added. To add 6 to the above cpuset: | ||
782 | |||
783 | # /bin/echo 1-4,6 > cpus -> set cpus list to cpus 1,2,3,4,6 | ||
784 | |||
785 | Similarly to remove a CPU from a cpuset, write the new list of CPUs | ||
786 | without the CPU to be removed. | ||
787 | |||
788 | To remove all the CPUs: | ||
789 | |||
790 | # /bin/echo "" > cpus -> clear cpus list | ||
791 | |||
780 | 2.3 Setting flags | 792 | 2.3 Setting flags |
781 | ----------------- | 793 | ----------------- |
782 | 794 | ||
diff --git a/Documentation/gcov.txt b/Documentation/gcov.txt index e716aadb3a33..40ec63352760 100644 --- a/Documentation/gcov.txt +++ b/Documentation/gcov.txt | |||
@@ -188,13 +188,18 @@ Solution: Exclude affected source files from profiling by specifying | |||
188 | GCOV_PROFILE := n or GCOV_PROFILE_basename.o := n in the | 188 | GCOV_PROFILE := n or GCOV_PROFILE_basename.o := n in the |
189 | corresponding Makefile. | 189 | corresponding Makefile. |
190 | 190 | ||
191 | Problem: Files copied from sysfs appear empty or incomplete. | ||
192 | Cause: Due to the way seq_file works, some tools such as cp or tar | ||
193 | may not correctly copy files from sysfs. | ||
194 | Solution: Use 'cat' to read .gcda files and 'cp -d' to copy links. | ||
195 | Alternatively use the mechanism shown in Appendix B. | ||
196 | |||
191 | 197 | ||
192 | Appendix A: gather_on_build.sh | 198 | Appendix A: gather_on_build.sh |
193 | ============================== | 199 | ============================== |
194 | 200 | ||
195 | Sample script to gather coverage meta files on the build machine | 201 | Sample script to gather coverage meta files on the build machine |
196 | (see 6a): | 202 | (see 6a): |
197 | |||
198 | #!/bin/bash | 203 | #!/bin/bash |
199 | 204 | ||
200 | KSRC=$1 | 205 | KSRC=$1 |
@@ -226,7 +231,7 @@ Appendix B: gather_on_test.sh | |||
226 | Sample script to gather coverage data files on the test machine | 231 | Sample script to gather coverage data files on the test machine |
227 | (see 6b): | 232 | (see 6b): |
228 | 233 | ||
229 | #!/bin/bash | 234 | #!/bin/bash -e |
230 | 235 | ||
231 | DEST=$1 | 236 | DEST=$1 |
232 | GCDA=/sys/kernel/debug/gcov | 237 | GCDA=/sys/kernel/debug/gcov |
@@ -236,11 +241,13 @@ if [ -z "$DEST" ] ; then | |||
236 | exit 1 | 241 | exit 1 |
237 | fi | 242 | fi |
238 | 243 | ||
239 | find $GCDA -name '*.gcno' -o -name '*.gcda' | tar cfz $DEST -T - | 244 | TEMPDIR=$(mktemp -d) |
245 | echo Collecting data.. | ||
246 | find $GCDA -type d -exec mkdir -p $TEMPDIR/\{\} \; | ||
247 | find $GCDA -name '*.gcda' -exec sh -c 'cat < $0 > '$TEMPDIR'/$0' {} \; | ||
248 | find $GCDA -name '*.gcno' -exec sh -c 'cp -d $0 '$TEMPDIR'/$0' {} \; | ||
249 | tar czf $DEST -C $TEMPDIR sys | ||
250 | rm -rf $TEMPDIR | ||
240 | 251 | ||
241 | if [ $? -eq 0 ] ; then | 252 | echo "$DEST successfully created, copy to build system and unpack with:" |
242 | echo "$DEST successfully created, copy to build system and unpack with:" | 253 | echo " tar xfz $DEST" |
243 | echo " tar xfz $DEST" | ||
244 | else | ||
245 | echo "Could not create file $DEST" | ||
246 | fi | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index d08759aa0903..d77fbd8b79ac 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -1915,6 +1915,12 @@ and is between 256 and 4096 characters. It is defined in the file | |||
1915 | Format: { 0 | 1 } | 1915 | Format: { 0 | 1 } |
1916 | See arch/parisc/kernel/pdc_chassis.c | 1916 | See arch/parisc/kernel/pdc_chassis.c |
1917 | 1917 | ||
1918 | percpu_alloc= [X86] Select which percpu first chunk allocator to use. | ||
1919 | Allowed values are one of "lpage", "embed" and "4k". | ||
1920 | See comments in arch/x86/kernel/setup_percpu.c for | ||
1921 | details on each allocator. This parameter is primarily | ||
1922 | for debugging and performance comparison. | ||
1923 | |||
1918 | pf. [PARIDE] | 1924 | pf. [PARIDE] |
1919 | See Documentation/blockdev/paride.txt. | 1925 | See Documentation/blockdev/paride.txt. |
1920 | 1926 | ||
@@ -2467,7 +2473,8 @@ and is between 256 and 4096 characters. It is defined in the file | |||
2467 | 2473 | ||
2468 | tp720= [HW,PS2] | 2474 | tp720= [HW,PS2] |
2469 | 2475 | ||
2470 | trace_buf_size=nn[KMG] [ftrace] will set tracing buffer size. | 2476 | trace_buf_size=nn[KMG] |
2477 | [FTRACE] will set tracing buffer size. | ||
2471 | 2478 | ||
2472 | trix= [HW,OSS] MediaTrix AudioTrix Pro | 2479 | trix= [HW,OSS] MediaTrix AudioTrix Pro |
2473 | Format: | 2480 | Format: |
diff --git a/Documentation/kmemleak.txt b/Documentation/kmemleak.txt index 0112da3b9ab8..89068030b01b 100644 --- a/Documentation/kmemleak.txt +++ b/Documentation/kmemleak.txt | |||
@@ -16,13 +16,17 @@ Usage | |||
16 | ----- | 16 | ----- |
17 | 17 | ||
18 | CONFIG_DEBUG_KMEMLEAK in "Kernel hacking" has to be enabled. A kernel | 18 | CONFIG_DEBUG_KMEMLEAK in "Kernel hacking" has to be enabled. A kernel |
19 | thread scans the memory every 10 minutes (by default) and prints any new | 19 | thread scans the memory every 10 minutes (by default) and prints the |
20 | unreferenced objects found. To trigger an intermediate scan and display | 20 | number of new unreferenced objects found. To display the details of all |
21 | all the possible memory leaks: | 21 | the possible memory leaks: |
22 | 22 | ||
23 | # mount -t debugfs nodev /sys/kernel/debug/ | 23 | # mount -t debugfs nodev /sys/kernel/debug/ |
24 | # cat /sys/kernel/debug/kmemleak | 24 | # cat /sys/kernel/debug/kmemleak |
25 | 25 | ||
26 | To trigger an intermediate memory scan: | ||
27 | |||
28 | # echo scan > /sys/kernel/debug/kmemleak | ||
29 | |||
26 | Note that the orphan objects are listed in the order they were allocated | 30 | Note that the orphan objects are listed in the order they were allocated |
27 | and one object at the beginning of the list may cause other subsequent | 31 | and one object at the beginning of the list may cause other subsequent |
28 | objects to be reported as orphan. | 32 | objects to be reported as orphan. |
@@ -31,16 +35,21 @@ Memory scanning parameters can be modified at run-time by writing to the | |||
31 | /sys/kernel/debug/kmemleak file. The following parameters are supported: | 35 | /sys/kernel/debug/kmemleak file. The following parameters are supported: |
32 | 36 | ||
33 | off - disable kmemleak (irreversible) | 37 | off - disable kmemleak (irreversible) |
34 | stack=on - enable the task stacks scanning | 38 | stack=on - enable the task stacks scanning (default) |
35 | stack=off - disable the tasks stacks scanning | 39 | stack=off - disable the tasks stacks scanning |
36 | scan=on - start the automatic memory scanning thread | 40 | scan=on - start the automatic memory scanning thread (default) |
37 | scan=off - stop the automatic memory scanning thread | 41 | scan=off - stop the automatic memory scanning thread |
38 | scan=<secs> - set the automatic memory scanning period in seconds (0 | 42 | scan=<secs> - set the automatic memory scanning period in seconds |
39 | to disable it) | 43 | (default 600, 0 to stop the automatic scanning) |
44 | scan - trigger a memory scan | ||
40 | 45 | ||
41 | Kmemleak can also be disabled at boot-time by passing "kmemleak=off" on | 46 | Kmemleak can also be disabled at boot-time by passing "kmemleak=off" on |
42 | the kernel command line. | 47 | the kernel command line. |
43 | 48 | ||
49 | Memory may be allocated or freed before kmemleak is initialised and | ||
50 | these actions are stored in an early log buffer. The size of this buffer | ||
51 | is configured via the CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE option. | ||
52 | |||
44 | Basic Algorithm | 53 | Basic Algorithm |
45 | --------------- | 54 | --------------- |
46 | 55 | ||
diff --git a/Documentation/leds-lp3944.txt b/Documentation/leds-lp3944.txt new file mode 100644 index 000000000000..c6eda18b15ef --- /dev/null +++ b/Documentation/leds-lp3944.txt | |||
@@ -0,0 +1,50 @@ | |||
1 | Kernel driver lp3944 | ||
2 | ==================== | ||
3 | |||
4 | * National Semiconductor LP3944 Fun-light Chip | ||
5 | Prefix: 'lp3944' | ||
6 | Addresses scanned: None (see the Notes section below) | ||
7 | Datasheet: Publicly available at the National Semiconductor website | ||
8 | http://www.national.com/pf/LP/LP3944.html | ||
9 | |||
10 | Authors: | ||
11 | Antonio Ospite <ospite@studenti.unina.it> | ||
12 | |||
13 | |||
14 | Description | ||
15 | ----------- | ||
16 | The LP3944 is a helper chip that can drive up to 8 leds, with two programmable | ||
17 | DIM modes; it could even be used as a gpio expander but this driver assumes it | ||
18 | is used as a led controller. | ||
19 | |||
20 | The DIM modes are used to set _blink_ patterns for leds, the pattern is | ||
21 | specified supplying two parameters: | ||
22 | - period: from 0s to 1.6s | ||
23 | - duty cycle: percentage of the period the led is on, from 0 to 100 | ||
24 | |||
25 | Setting a led in DIM0 or DIM1 mode makes it blink according to the pattern. | ||
26 | See the datasheet for details. | ||
27 | |||
28 | LP3944 can be found on Motorola A910 smartphone, where it drives the rgb | ||
29 | leds, the camera flash light and the lcds power. | ||
30 | |||
31 | |||
32 | Notes | ||
33 | ----- | ||
34 | The chip is used mainly in embedded contexts, so this driver expects it is | ||
35 | registered using the i2c_board_info mechanism. | ||
36 | |||
37 | To register the chip at address 0x60 on adapter 0, set the platform data | ||
38 | according to include/linux/leds-lp3944.h, set the i2c board info: | ||
39 | |||
40 | static struct i2c_board_info __initdata a910_i2c_board_info[] = { | ||
41 | { | ||
42 | I2C_BOARD_INFO("lp3944", 0x60), | ||
43 | .platform_data = &a910_lp3944_leds, | ||
44 | }, | ||
45 | }; | ||
46 | |||
47 | and register it in the platform init function | ||
48 | |||
49 | i2c_register_board_info(0, a910_i2c_board_info, | ||
50 | ARRAY_SIZE(a910_i2c_board_info)); | ||
diff --git a/Documentation/powerpc/dts-bindings/gpio/led.txt b/Documentation/powerpc/dts-bindings/gpio/led.txt index 4fe14deedc0a..064db928c3c1 100644 --- a/Documentation/powerpc/dts-bindings/gpio/led.txt +++ b/Documentation/powerpc/dts-bindings/gpio/led.txt | |||
@@ -16,10 +16,17 @@ LED sub-node properties: | |||
16 | string defining the trigger assigned to the LED. Current triggers are: | 16 | string defining the trigger assigned to the LED. Current triggers are: |
17 | "backlight" - LED will act as a back-light, controlled by the framebuffer | 17 | "backlight" - LED will act as a back-light, controlled by the framebuffer |
18 | system | 18 | system |
19 | "default-on" - LED will turn on | 19 | "default-on" - LED will turn on, but see "default-state" below |
20 | "heartbeat" - LED "double" flashes at a load average based rate | 20 | "heartbeat" - LED "double" flashes at a load average based rate |
21 | "ide-disk" - LED indicates disk activity | 21 | "ide-disk" - LED indicates disk activity |
22 | "timer" - LED flashes at a fixed, configurable rate | 22 | "timer" - LED flashes at a fixed, configurable rate |
23 | - default-state: (optional) The initial state of the LED. Valid | ||
24 | values are "on", "off", and "keep". If the LED is already on or off | ||
25 | and the default-state property is set the to same value, then no | ||
26 | glitch should be produced where the LED momentarily turns off (or | ||
27 | on). The "keep" setting will keep the LED at whatever its current | ||
28 | state is, without producing a glitch. The default is off if this | ||
29 | property is not present. | ||
23 | 30 | ||
24 | Examples: | 31 | Examples: |
25 | 32 | ||
@@ -30,14 +37,22 @@ leds { | |||
30 | gpios = <&mcu_pio 0 1>; /* Active low */ | 37 | gpios = <&mcu_pio 0 1>; /* Active low */ |
31 | linux,default-trigger = "ide-disk"; | 38 | linux,default-trigger = "ide-disk"; |
32 | }; | 39 | }; |
40 | |||
41 | fault { | ||
42 | gpios = <&mcu_pio 1 0>; | ||
43 | /* Keep LED on if BIOS detected hardware fault */ | ||
44 | default-state = "keep"; | ||
45 | }; | ||
33 | }; | 46 | }; |
34 | 47 | ||
35 | run-control { | 48 | run-control { |
36 | compatible = "gpio-leds"; | 49 | compatible = "gpio-leds"; |
37 | red { | 50 | red { |
38 | gpios = <&mpc8572 6 0>; | 51 | gpios = <&mpc8572 6 0>; |
52 | default-state = "off"; | ||
39 | }; | 53 | }; |
40 | green { | 54 | green { |
41 | gpios = <&mpc8572 7 0>; | 55 | gpios = <&mpc8572 7 0>; |
56 | default-state = "on"; | ||
42 | }; | 57 | }; |
43 | } | 58 | } |
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index 0d8d23581c44..939a3dd58148 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt | |||
@@ -240,6 +240,7 @@ AD1986A | |||
240 | laptop-automute 2-channel with EAPD and HP-automute (Lenovo N100) | 240 | laptop-automute 2-channel with EAPD and HP-automute (Lenovo N100) |
241 | ultra 2-channel with EAPD (Samsung Ultra tablet PC) | 241 | ultra 2-channel with EAPD (Samsung Ultra tablet PC) |
242 | samsung 2-channel with EAPD (Samsung R65) | 242 | samsung 2-channel with EAPD (Samsung R65) |
243 | samsung-p50 2-channel with HP-automute (Samsung P50) | ||
243 | 244 | ||
244 | AD1988/AD1988B/AD1989A/AD1989B | 245 | AD1988/AD1988B/AD1989A/AD1989B |
245 | ============================== | 246 | ============================== |
diff --git a/Documentation/spi/spidev_test.c b/Documentation/spi/spidev_test.c index cf0e3ce0d526..c1a5aad3c75a 100644 --- a/Documentation/spi/spidev_test.c +++ b/Documentation/spi/spidev_test.c | |||
@@ -99,11 +99,13 @@ void parse_opts(int argc, char *argv[]) | |||
99 | { "lsb", 0, 0, 'L' }, | 99 | { "lsb", 0, 0, 'L' }, |
100 | { "cs-high", 0, 0, 'C' }, | 100 | { "cs-high", 0, 0, 'C' }, |
101 | { "3wire", 0, 0, '3' }, | 101 | { "3wire", 0, 0, '3' }, |
102 | { "no-cs", 0, 0, 'N' }, | ||
103 | { "ready", 0, 0, 'R' }, | ||
102 | { NULL, 0, 0, 0 }, | 104 | { NULL, 0, 0, 0 }, |
103 | }; | 105 | }; |
104 | int c; | 106 | int c; |
105 | 107 | ||
106 | c = getopt_long(argc, argv, "D:s:d:b:lHOLC3", lopts, NULL); | 108 | c = getopt_long(argc, argv, "D:s:d:b:lHOLC3NR", lopts, NULL); |
107 | 109 | ||
108 | if (c == -1) | 110 | if (c == -1) |
109 | break; | 111 | break; |
@@ -139,6 +141,12 @@ void parse_opts(int argc, char *argv[]) | |||
139 | case '3': | 141 | case '3': |
140 | mode |= SPI_3WIRE; | 142 | mode |= SPI_3WIRE; |
141 | break; | 143 | break; |
144 | case 'N': | ||
145 | mode |= SPI_NO_CS; | ||
146 | break; | ||
147 | case 'R': | ||
148 | mode |= SPI_READY; | ||
149 | break; | ||
142 | default: | 150 | default: |
143 | print_usage(argv[0]); | 151 | print_usage(argv[0]); |
144 | break; | 152 | break; |
diff --git a/MAINTAINERS b/MAINTAINERS index fa2a16def17a..381190c7949c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -867,12 +867,22 @@ M: alex@shark-linux.de | |||
867 | W: http://www.shark-linux.de/shark.html | 867 | W: http://www.shark-linux.de/shark.html |
868 | S: Maintained | 868 | S: Maintained |
869 | 869 | ||
870 | ARM/SAMSUNG ARM ARCHITECTURES | ||
871 | P: Ben Dooks | ||
872 | M: ben-linux@fluff.org | ||
873 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | ||
874 | W: http://www.fluff.org/ben/linux/ | ||
875 | S: Maintained | ||
876 | F: arch/arm/plat-s3c/ | ||
877 | F: arch/arm/plat-s3c24xx/ | ||
878 | |||
870 | ARM/S3C2410 ARM ARCHITECTURE | 879 | ARM/S3C2410 ARM ARCHITECTURE |
871 | P: Ben Dooks | 880 | P: Ben Dooks |
872 | M: ben-linux@fluff.org | 881 | M: ben-linux@fluff.org |
873 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 882 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
874 | W: http://www.fluff.org/ben/linux/ | 883 | W: http://www.fluff.org/ben/linux/ |
875 | S: Maintained | 884 | S: Maintained |
885 | F: arch/arm/mach-s3c2410/ | ||
876 | 886 | ||
877 | ARM/S3C2440 ARM ARCHITECTURE | 887 | ARM/S3C2440 ARM ARCHITECTURE |
878 | P: Ben Dooks | 888 | P: Ben Dooks |
@@ -880,6 +890,39 @@ M: ben-linux@fluff.org | |||
880 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 890 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
881 | W: http://www.fluff.org/ben/linux/ | 891 | W: http://www.fluff.org/ben/linux/ |
882 | S: Maintained | 892 | S: Maintained |
893 | F: arch/arm/mach-s3c2440/ | ||
894 | |||
895 | ARM/S3C2442 ARM ARCHITECTURE | ||
896 | P: Ben Dooks | ||
897 | M: ben-linux@fluff.org | ||
898 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | ||
899 | W: http://www.fluff.org/ben/linux/ | ||
900 | S: Maintained | ||
901 | F: arch/arm/mach-s3c2442/ | ||
902 | |||
903 | ARM/S3C2443 ARM ARCHITECTURE | ||
904 | P: Ben Dooks | ||
905 | M: ben-linux@fluff.org | ||
906 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | ||
907 | W: http://www.fluff.org/ben/linux/ | ||
908 | S: Maintained | ||
909 | F: arch/arm/mach-s3c2443/ | ||
910 | |||
911 | ARM/S3C6400 ARM ARCHITECTURE | ||
912 | P: Ben Dooks | ||
913 | M: ben-linux@fluff.org | ||
914 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | ||
915 | W: http://www.fluff.org/ben/linux/ | ||
916 | S: Maintained | ||
917 | F: arch/arm/mach-s3c6400/ | ||
918 | |||
919 | ARM/S3C6410 ARM ARCHITECTURE | ||
920 | P: Ben Dooks | ||
921 | M: ben-linux@fluff.org | ||
922 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | ||
923 | W: http://www.fluff.org/ben/linux/ | ||
924 | S: Maintained | ||
925 | F: arch/arm/mach-s3c6410/ | ||
883 | 926 | ||
884 | ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT | 927 | ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT |
885 | P: Lennert Buytenhek | 928 | P: Lennert Buytenhek |
@@ -2087,9 +2130,9 @@ F: drivers/edac/i5400_edac.c | |||
2087 | 2130 | ||
2088 | EDAC-I82975X | 2131 | EDAC-I82975X |
2089 | P: Ranganathan Desikan | 2132 | P: Ranganathan Desikan |
2090 | M: rdesikan@jetzbroadband.com | 2133 | M: ravi@jetztechnologies.com |
2091 | P: Arvind R. | 2134 | P: Arvind R. |
2092 | M: arvind@acarlab.com | 2135 | M: arvind@jetztechnologies.com |
2093 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 2136 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) |
2094 | W: bluesmoke.sourceforge.net | 2137 | W: bluesmoke.sourceforge.net |
2095 | S: Maintained | 2138 | S: Maintained |
@@ -2808,7 +2851,9 @@ S: Maintained | |||
2808 | 2851 | ||
2809 | IA64 (Itanium) PLATFORM | 2852 | IA64 (Itanium) PLATFORM |
2810 | P: Tony Luck | 2853 | P: Tony Luck |
2854 | P: Fenghua Yu | ||
2811 | M: tony.luck@intel.com | 2855 | M: tony.luck@intel.com |
2856 | M: fenghua.yu@intel.com | ||
2812 | L: linux-ia64@vger.kernel.org | 2857 | L: linux-ia64@vger.kernel.org |
2813 | W: http://www.ia64-linux.org/ | 2858 | W: http://www.ia64-linux.org/ |
2814 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6.git | 2859 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6.git |
@@ -2886,7 +2931,7 @@ P: Dmitry Eremin-Solenikov | |||
2886 | M: dbaryshkov@gmail.com | 2931 | M: dbaryshkov@gmail.com |
2887 | P: Sergey Lapin | 2932 | P: Sergey Lapin |
2888 | M: slapin@ossfans.org | 2933 | M: slapin@ossfans.org |
2889 | L: linux-zigbee-devel@lists.sourceforge.net | 2934 | L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers) |
2890 | W: http://apps.sourceforge.net/trac/linux-zigbee | 2935 | W: http://apps.sourceforge.net/trac/linux-zigbee |
2891 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git | 2936 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git |
2892 | S: Maintained | 2937 | S: Maintained |
@@ -5533,8 +5578,8 @@ F: drivers/staging/ | |||
5533 | 5578 | ||
5534 | STARFIRE/DURALAN NETWORK DRIVER | 5579 | STARFIRE/DURALAN NETWORK DRIVER |
5535 | P: Ion Badulescu | 5580 | P: Ion Badulescu |
5536 | M: ionut@cs.columbia.edu | 5581 | M: ionut@badula.org |
5537 | S: Maintained | 5582 | S: Odd Fixes |
5538 | F: drivers/net/starfire* | 5583 | F: drivers/net/starfire* |
5539 | 5584 | ||
5540 | STARMODE RADIO IP (STRIP) PROTOCOL DRIVER | 5585 | STARMODE RADIO IP (STRIP) PROTOCOL DRIVER |
@@ -5668,6 +5713,13 @@ F: drivers/misc/tifm* | |||
5668 | F: drivers/mmc/host/tifm_sd.c | 5713 | F: drivers/mmc/host/tifm_sd.c |
5669 | F: include/linux/tifm.h | 5714 | F: include/linux/tifm.h |
5670 | 5715 | ||
5716 | TI TWL4030 SERIES SOC CODEC DRIVER | ||
5717 | P: Peter Ujfalusi | ||
5718 | M: peter.ujfalusi@nokia.com | ||
5719 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | ||
5720 | S: Maintained | ||
5721 | F: sound/soc/codecs/twl4030* | ||
5722 | |||
5671 | TIPC NETWORK LAYER | 5723 | TIPC NETWORK LAYER |
5672 | P: Per Liden | 5724 | P: Per Liden |
5673 | M: per.liden@ericsson.com | 5725 | M: per.liden@ericsson.com |
diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h index 06c5c7a4afd3..b663f1f10b6a 100644 --- a/arch/alpha/include/asm/percpu.h +++ b/arch/alpha/include/asm/percpu.h | |||
@@ -30,7 +30,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; | |||
30 | 30 | ||
31 | #ifndef MODULE | 31 | #ifndef MODULE |
32 | #define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset)) | 32 | #define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset)) |
33 | #define PER_CPU_ATTRIBUTES | 33 | #define PER_CPU_DEF_ATTRIBUTES |
34 | #else | 34 | #else |
35 | /* | 35 | /* |
36 | * To calculate addresses of locally defined variables, GCC uses 32-bit | 36 | * To calculate addresses of locally defined variables, GCC uses 32-bit |
@@ -49,7 +49,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; | |||
49 | : "=&r"(__ptr), "=&r"(tmp_gp)); \ | 49 | : "=&r"(__ptr), "=&r"(tmp_gp)); \ |
50 | (typeof(&per_cpu_var(var)))(__ptr + (offset)); }) | 50 | (typeof(&per_cpu_var(var)))(__ptr + (offset)); }) |
51 | 51 | ||
52 | #define PER_CPU_ATTRIBUTES __used | 52 | #define PER_CPU_DEF_ATTRIBUTES __used |
53 | 53 | ||
54 | #endif /* MODULE */ | 54 | #endif /* MODULE */ |
55 | 55 | ||
@@ -71,7 +71,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; | |||
71 | #define __get_cpu_var(var) per_cpu_var(var) | 71 | #define __get_cpu_var(var) per_cpu_var(var) |
72 | #define __raw_get_cpu_var(var) per_cpu_var(var) | 72 | #define __raw_get_cpu_var(var) per_cpu_var(var) |
73 | 73 | ||
74 | #define PER_CPU_ATTRIBUTES | 74 | #define PER_CPU_DEF_ATTRIBUTES |
75 | 75 | ||
76 | #endif /* SMP */ | 76 | #endif /* SMP */ |
77 | 77 | ||
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index a71fd941ade7..a89e4734b8f0 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug | |||
@@ -99,14 +99,6 @@ config DEBUG_CLPS711X_UART2 | |||
99 | output to the second serial port on these devices. Saying N will | 99 | output to the second serial port on these devices. Saying N will |
100 | cause the debug messages to appear on the first serial port. | 100 | cause the debug messages to appear on the first serial port. |
101 | 101 | ||
102 | config DEBUG_S3C_PORT | ||
103 | depends on DEBUG_LL && PLAT_S3C | ||
104 | bool "Kernel low-level debugging messages via S3C UART" | ||
105 | help | ||
106 | Say Y here if you want debug print routines to go to one of the | ||
107 | S3C internal UARTs. The chosen UART must have been configured | ||
108 | before it is used. | ||
109 | |||
110 | config DEBUG_S3C_UART | 102 | config DEBUG_S3C_UART |
111 | depends on PLAT_S3C | 103 | depends on PLAT_S3C |
112 | int "S3C UART to use for low-level debug" | 104 | int "S3C UART to use for low-level debug" |
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig index 2d58b8fe59be..b49810461e41 100644 --- a/arch/arm/configs/s3c2410_defconfig +++ b/arch/arm/configs/s3c2410_defconfig | |||
@@ -260,6 +260,7 @@ CONFIG_MACH_NEXCODER_2440=y | |||
260 | CONFIG_SMDK2440_CPU2440=y | 260 | CONFIG_SMDK2440_CPU2440=y |
261 | CONFIG_MACH_AT2440EVB=y | 261 | CONFIG_MACH_AT2440EVB=y |
262 | CONFIG_CPU_S3C2442=y | 262 | CONFIG_CPU_S3C2442=y |
263 | CONFIG_MACH_MINI2440=y | ||
263 | 264 | ||
264 | # | 265 | # |
265 | # S3C2442 Machines | 266 | # S3C2442 Machines |
@@ -2298,7 +2299,6 @@ CONFIG_DEBUG_ERRORS=y | |||
2298 | # CONFIG_DEBUG_STACK_USAGE is not set | 2299 | # CONFIG_DEBUG_STACK_USAGE is not set |
2299 | CONFIG_DEBUG_LL=y | 2300 | CONFIG_DEBUG_LL=y |
2300 | # CONFIG_DEBUG_ICEDCC is not set | 2301 | # CONFIG_DEBUG_ICEDCC is not set |
2301 | CONFIG_DEBUG_S3C_PORT=y | ||
2302 | CONFIG_DEBUG_S3C_UART=0 | 2302 | CONFIG_DEBUG_S3C_UART=0 |
2303 | 2303 | ||
2304 | # | 2304 | # |
diff --git a/arch/arm/configs/s3c6400_defconfig b/arch/arm/configs/s3c6400_defconfig index 2e8fa50e9a09..32860609e057 100644 --- a/arch/arm/configs/s3c6400_defconfig +++ b/arch/arm/configs/s3c6400_defconfig | |||
@@ -816,7 +816,6 @@ CONFIG_DEBUG_ERRORS=y | |||
816 | # CONFIG_DEBUG_STACK_USAGE is not set | 816 | # CONFIG_DEBUG_STACK_USAGE is not set |
817 | CONFIG_DEBUG_LL=y | 817 | CONFIG_DEBUG_LL=y |
818 | # CONFIG_DEBUG_ICEDCC is not set | 818 | # CONFIG_DEBUG_ICEDCC is not set |
819 | CONFIG_DEBUG_S3C_PORT=y | ||
820 | CONFIG_DEBUG_S3C_UART=0 | 819 | CONFIG_DEBUG_S3C_UART=0 |
821 | 820 | ||
822 | # | 821 | # |
diff --git a/arch/arm/configs/tct_hammer_defconfig b/arch/arm/configs/tct_hammer_defconfig index 07dfb98df4f0..9d32faef05f6 100644 --- a/arch/arm/configs/tct_hammer_defconfig +++ b/arch/arm/configs/tct_hammer_defconfig | |||
@@ -857,7 +857,6 @@ CONFIG_DEBUG_ERRORS=y | |||
857 | # CONFIG_DEBUG_STACK_USAGE is not set | 857 | # CONFIG_DEBUG_STACK_USAGE is not set |
858 | CONFIG_DEBUG_LL=y | 858 | CONFIG_DEBUG_LL=y |
859 | # CONFIG_DEBUG_ICEDCC is not set | 859 | # CONFIG_DEBUG_ICEDCC is not set |
860 | # CONFIG_DEBUG_S3C_PORT is not set | ||
861 | CONFIG_DEBUG_S3C_UART=0 | 860 | CONFIG_DEBUG_S3C_UART=0 |
862 | 861 | ||
863 | # | 862 | # |
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index be962c1349c4..9c746af1bf6e 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | /* PAGE_SHIFT determines the page size */ | 13 | /* PAGE_SHIFT determines the page size */ |
14 | #define PAGE_SHIFT 12 | 14 | #define PAGE_SHIFT 12 |
15 | #define PAGE_SIZE (1UL << PAGE_SHIFT) | 15 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) |
16 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 16 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
17 | 17 | ||
18 | #ifndef __ASSEMBLY__ | 18 | #ifndef __ASSEMBLY__ |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 096f600dc8d8..b7c3490eaa24 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -98,17 +98,6 @@ unlock: | |||
98 | return 0; | 98 | return 0; |
99 | } | 99 | } |
100 | 100 | ||
101 | /* Handle bad interrupts */ | ||
102 | static struct irq_desc bad_irq_desc = { | ||
103 | .handle_irq = handle_bad_irq, | ||
104 | .lock = __SPIN_LOCK_UNLOCKED(bad_irq_desc.lock), | ||
105 | }; | ||
106 | |||
107 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
108 | /* We are not allocating bad_irq_desc.affinity or .pending_mask */ | ||
109 | #error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK." | ||
110 | #endif | ||
111 | |||
112 | /* | 101 | /* |
113 | * do_IRQ handles all hardware IRQ's. Decoded IRQs should not | 102 | * do_IRQ handles all hardware IRQ's. Decoded IRQs should not |
114 | * come via this function. Instead, they should provide their | 103 | * come via this function. Instead, they should provide their |
@@ -124,10 +113,13 @@ asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs) | |||
124 | * Some hardware gives randomly wrong interrupts. Rather | 113 | * Some hardware gives randomly wrong interrupts. Rather |
125 | * than crashing, do something sensible. | 114 | * than crashing, do something sensible. |
126 | */ | 115 | */ |
127 | if (irq >= NR_IRQS) | 116 | if (unlikely(irq >= NR_IRQS)) { |
128 | handle_bad_irq(irq, &bad_irq_desc); | 117 | if (printk_ratelimit()) |
129 | else | 118 | printk(KERN_WARNING "Bad IRQ%u\n", irq); |
119 | ack_bad_irq(irq); | ||
120 | } else { | ||
130 | generic_handle_irq(irq); | 121 | generic_handle_irq(irq); |
122 | } | ||
131 | 123 | ||
132 | /* AT91 specific workaround */ | 124 | /* AT91 specific workaround */ |
133 | irq_finish(irq); | 125 | irq_finish(irq); |
@@ -165,10 +157,6 @@ void __init init_IRQ(void) | |||
165 | for (irq = 0; irq < NR_IRQS; irq++) | 157 | for (irq = 0; irq < NR_IRQS; irq++) |
166 | irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE; | 158 | irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE; |
167 | 159 | ||
168 | #ifdef CONFIG_SMP | ||
169 | cpumask_setall(bad_irq_desc.affinity); | ||
170 | bad_irq_desc.node = smp_processor_id(); | ||
171 | #endif | ||
172 | init_arch_irq(); | 160 | init_arch_irq(); |
173 | } | 161 | } |
174 | 162 | ||
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 4340bf3d2c84..69371028a202 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <asm-generic/vmlinux.lds.h> | 6 | #include <asm-generic/vmlinux.lds.h> |
7 | #include <asm/thread_info.h> | 7 | #include <asm/thread_info.h> |
8 | #include <asm/memory.h> | 8 | #include <asm/memory.h> |
9 | #include <asm/page.h> | ||
9 | 10 | ||
10 | OUTPUT_ARCH(arm) | 11 | OUTPUT_ARCH(arm) |
11 | ENTRY(stext) | 12 | ENTRY(stext) |
@@ -63,7 +64,7 @@ SECTIONS | |||
63 | usr/built-in.o(.init.ramfs) | 64 | usr/built-in.o(.init.ramfs) |
64 | __initramfs_end = .; | 65 | __initramfs_end = .; |
65 | #endif | 66 | #endif |
66 | . = ALIGN(4096); | 67 | . = ALIGN(PAGE_SIZE); |
67 | __per_cpu_load = .; | 68 | __per_cpu_load = .; |
68 | __per_cpu_start = .; | 69 | __per_cpu_start = .; |
69 | *(.data.percpu.page_aligned) | 70 | *(.data.percpu.page_aligned) |
@@ -73,7 +74,7 @@ SECTIONS | |||
73 | #ifndef CONFIG_XIP_KERNEL | 74 | #ifndef CONFIG_XIP_KERNEL |
74 | __init_begin = _stext; | 75 | __init_begin = _stext; |
75 | INIT_DATA | 76 | INIT_DATA |
76 | . = ALIGN(4096); | 77 | . = ALIGN(PAGE_SIZE); |
77 | __init_end = .; | 78 | __init_end = .; |
78 | #endif | 79 | #endif |
79 | } | 80 | } |
@@ -118,7 +119,7 @@ SECTIONS | |||
118 | *(.got) /* Global offset table */ | 119 | *(.got) /* Global offset table */ |
119 | } | 120 | } |
120 | 121 | ||
121 | RODATA | 122 | RO_DATA(PAGE_SIZE) |
122 | 123 | ||
123 | _etext = .; /* End of text and rodata section */ | 124 | _etext = .; /* End of text and rodata section */ |
124 | 125 | ||
@@ -158,17 +159,17 @@ SECTIONS | |||
158 | *(.data.init_task) | 159 | *(.data.init_task) |
159 | 160 | ||
160 | #ifdef CONFIG_XIP_KERNEL | 161 | #ifdef CONFIG_XIP_KERNEL |
161 | . = ALIGN(4096); | 162 | . = ALIGN(PAGE_SIZE); |
162 | __init_begin = .; | 163 | __init_begin = .; |
163 | INIT_DATA | 164 | INIT_DATA |
164 | . = ALIGN(4096); | 165 | . = ALIGN(PAGE_SIZE); |
165 | __init_end = .; | 166 | __init_end = .; |
166 | #endif | 167 | #endif |
167 | 168 | ||
168 | . = ALIGN(4096); | 169 | . = ALIGN(PAGE_SIZE); |
169 | __nosave_begin = .; | 170 | __nosave_begin = .; |
170 | *(.data.nosave) | 171 | *(.data.nosave) |
171 | . = ALIGN(4096); | 172 | . = ALIGN(PAGE_SIZE); |
172 | __nosave_end = .; | 173 | __nosave_end = .; |
173 | 174 | ||
174 | /* | 175 | /* |
diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c index cc270beadd5d..a55398ed1211 100644 --- a/arch/arm/mach-at91/board-sam9g20ek.c +++ b/arch/arm/mach-at91/board-sam9g20ek.c | |||
@@ -24,6 +24,8 @@ | |||
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/spi/spi.h> | 25 | #include <linux/spi/spi.h> |
26 | #include <linux/spi/at73c213.h> | 26 | #include <linux/spi/at73c213.h> |
27 | #include <linux/gpio_keys.h> | ||
28 | #include <linux/input.h> | ||
27 | #include <linux/clk.h> | 29 | #include <linux/clk.h> |
28 | 30 | ||
29 | #include <mach/hardware.h> | 31 | #include <mach/hardware.h> |
@@ -218,6 +220,56 @@ static struct gpio_led ek_leds[] = { | |||
218 | } | 220 | } |
219 | }; | 221 | }; |
220 | 222 | ||
223 | |||
224 | /* | ||
225 | * GPIO Buttons | ||
226 | */ | ||
227 | #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) | ||
228 | static struct gpio_keys_button ek_buttons[] = { | ||
229 | { | ||
230 | .gpio = AT91_PIN_PA30, | ||
231 | .code = BTN_3, | ||
232 | .desc = "Button 3", | ||
233 | .active_low = 1, | ||
234 | .wakeup = 1, | ||
235 | }, | ||
236 | { | ||
237 | .gpio = AT91_PIN_PA31, | ||
238 | .code = BTN_4, | ||
239 | .desc = "Button 4", | ||
240 | .active_low = 1, | ||
241 | .wakeup = 1, | ||
242 | } | ||
243 | }; | ||
244 | |||
245 | static struct gpio_keys_platform_data ek_button_data = { | ||
246 | .buttons = ek_buttons, | ||
247 | .nbuttons = ARRAY_SIZE(ek_buttons), | ||
248 | }; | ||
249 | |||
250 | static struct platform_device ek_button_device = { | ||
251 | .name = "gpio-keys", | ||
252 | .id = -1, | ||
253 | .num_resources = 0, | ||
254 | .dev = { | ||
255 | .platform_data = &ek_button_data, | ||
256 | } | ||
257 | }; | ||
258 | |||
259 | static void __init ek_add_device_buttons(void) | ||
260 | { | ||
261 | at91_set_gpio_input(AT91_PIN_PA30, 1); /* btn3 */ | ||
262 | at91_set_deglitch(AT91_PIN_PA30, 1); | ||
263 | at91_set_gpio_input(AT91_PIN_PA31, 1); /* btn4 */ | ||
264 | at91_set_deglitch(AT91_PIN_PA31, 1); | ||
265 | |||
266 | platform_device_register(&ek_button_device); | ||
267 | } | ||
268 | #else | ||
269 | static void __init ek_add_device_buttons(void) {} | ||
270 | #endif | ||
271 | |||
272 | |||
221 | static struct i2c_board_info __initdata ek_i2c_devices[] = { | 273 | static struct i2c_board_info __initdata ek_i2c_devices[] = { |
222 | { | 274 | { |
223 | I2C_BOARD_INFO("24c512", 0x50), | 275 | I2C_BOARD_INFO("24c512", 0x50), |
@@ -245,6 +297,8 @@ static void __init ek_board_init(void) | |||
245 | at91_add_device_i2c(ek_i2c_devices, ARRAY_SIZE(ek_i2c_devices)); | 297 | at91_add_device_i2c(ek_i2c_devices, ARRAY_SIZE(ek_i2c_devices)); |
246 | /* LEDs */ | 298 | /* LEDs */ |
247 | at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds)); | 299 | at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds)); |
300 | /* Push Buttons */ | ||
301 | ek_add_device_buttons(); | ||
248 | /* PCK0 provides MCLK to the WM8731 */ | 302 | /* PCK0 provides MCLK to the WM8731 */ |
249 | at91_set_B_periph(AT91_PIN_PC1, 0); | 303 | at91_set_B_periph(AT91_PIN_PC1, 0); |
250 | /* SSC (for WM8731) */ | 304 | /* SSC (for WM8731) */ |
diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c index 35e12a49d1a6..f6b5672cabd6 100644 --- a/arch/arm/mach-at91/board-sam9rlek.c +++ b/arch/arm/mach-at91/board-sam9rlek.c | |||
@@ -186,19 +186,21 @@ static struct fb_monspecs at91fb_default_monspecs = { | |||
186 | static void at91_lcdc_power_control(int on) | 186 | static void at91_lcdc_power_control(int on) |
187 | { | 187 | { |
188 | if (on) | 188 | if (on) |
189 | at91_set_gpio_value(AT91_PIN_PA30, 0); /* power up */ | 189 | at91_set_gpio_value(AT91_PIN_PC1, 0); /* power up */ |
190 | else | 190 | else |
191 | at91_set_gpio_value(AT91_PIN_PA30, 1); /* power down */ | 191 | at91_set_gpio_value(AT91_PIN_PC1, 1); /* power down */ |
192 | } | 192 | } |
193 | 193 | ||
194 | /* Driver datas */ | 194 | /* Driver datas */ |
195 | static struct atmel_lcdfb_info __initdata ek_lcdc_data = { | 195 | static struct atmel_lcdfb_info __initdata ek_lcdc_data = { |
196 | .lcdcon_is_backlight = true, | ||
196 | .default_bpp = 16, | 197 | .default_bpp = 16, |
197 | .default_dmacon = ATMEL_LCDC_DMAEN, | 198 | .default_dmacon = ATMEL_LCDC_DMAEN, |
198 | .default_lcdcon2 = AT91SAM9RL_DEFAULT_LCDCON2, | 199 | .default_lcdcon2 = AT91SAM9RL_DEFAULT_LCDCON2, |
199 | .default_monspecs = &at91fb_default_monspecs, | 200 | .default_monspecs = &at91fb_default_monspecs, |
200 | .atmel_lcdfb_power_control = at91_lcdc_power_control, | 201 | .atmel_lcdfb_power_control = at91_lcdc_power_control, |
201 | .guard_time = 1, | 202 | .guard_time = 1, |
203 | .lcd_wiring_mode = ATMEL_LCDC_WIRING_RGB, | ||
202 | }; | 204 | }; |
203 | 205 | ||
204 | #else | 206 | #else |
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c index e70fc7c66bbb..ed2a48a9ce74 100644 --- a/arch/arm/mach-omap1/board-nokia770.c +++ b/arch/arm/mach-omap1/board-nokia770.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <mach/hwa742.h> | 36 | #include <mach/hwa742.h> |
37 | #include <mach/lcd_mipid.h> | 37 | #include <mach/lcd_mipid.h> |
38 | #include <mach/mmc.h> | 38 | #include <mach/mmc.h> |
39 | #include <mach/usb.h> | ||
40 | #include <mach/clock.h> | 39 | #include <mach/clock.h> |
41 | 40 | ||
42 | #define ADS7846_PENDOWN_GPIO 15 | 41 | #define ADS7846_PENDOWN_GPIO 15 |
@@ -205,9 +204,11 @@ static int nokia770_mmc_get_cover_state(struct device *dev, int slot) | |||
205 | static struct omap_mmc_platform_data nokia770_mmc2_data = { | 204 | static struct omap_mmc_platform_data nokia770_mmc2_data = { |
206 | .nr_slots = 1, | 205 | .nr_slots = 1, |
207 | .dma_mask = 0xffffffff, | 206 | .dma_mask = 0xffffffff, |
207 | .max_freq = 12000000, | ||
208 | .slots[0] = { | 208 | .slots[0] = { |
209 | .set_power = nokia770_mmc_set_power, | 209 | .set_power = nokia770_mmc_set_power, |
210 | .get_cover_state = nokia770_mmc_get_cover_state, | 210 | .get_cover_state = nokia770_mmc_get_cover_state, |
211 | .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, | ||
211 | .name = "mmcblk", | 212 | .name = "mmcblk", |
212 | }, | 213 | }, |
213 | }; | 214 | }; |
diff --git a/arch/arm/mach-omap1/mailbox.c b/arch/arm/mach-omap1/mailbox.c index 0af4d6c85b47..6810b4aeb02c 100644 --- a/arch/arm/mach-omap1/mailbox.c +++ b/arch/arm/mach-omap1/mailbox.c | |||
@@ -203,5 +203,5 @@ module_exit(omap1_mbox_exit); | |||
203 | 203 | ||
204 | MODULE_LICENSE("GPL v2"); | 204 | MODULE_LICENSE("GPL v2"); |
205 | MODULE_DESCRIPTION("omap mailbox: omap1 architecture specific functions"); | 205 | MODULE_DESCRIPTION("omap mailbox: omap1 architecture specific functions"); |
206 | MODULE_AUTHOR("Hiroshi DOYU" <Hiroshi.DOYU@nokia.com>); | 206 | MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); |
207 | MODULE_ALIAS("platform:omap1-mailbox"); | 207 | MODULE_ALIAS("platform:omap1-mailbox"); |
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index da93b86234ed..9a0bf6744a05 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c | |||
@@ -362,6 +362,7 @@ static struct omap_onenand_platform_data board_onenand_data = { | |||
362 | .gpio_irq = 65, | 362 | .gpio_irq = 65, |
363 | .parts = onenand_partitions, | 363 | .parts = onenand_partitions, |
364 | .nr_parts = ARRAY_SIZE(onenand_partitions), | 364 | .nr_parts = ARRAY_SIZE(onenand_partitions), |
365 | .flags = ONENAND_SYNC_READWRITE, | ||
365 | }; | 366 | }; |
366 | 367 | ||
367 | static void __init board_onenand_init(void) | 368 | static void __init board_onenand_init(void) |
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c index 2fd22f9c5f0e..54fec53a48e7 100644 --- a/arch/arm/mach-omap2/gpmc-onenand.c +++ b/arch/arm/mach-omap2/gpmc-onenand.c | |||
@@ -31,6 +31,8 @@ static struct platform_device gpmc_onenand_device = { | |||
31 | static int omap2_onenand_set_async_mode(int cs, void __iomem *onenand_base) | 31 | static int omap2_onenand_set_async_mode(int cs, void __iomem *onenand_base) |
32 | { | 32 | { |
33 | struct gpmc_timings t; | 33 | struct gpmc_timings t; |
34 | u32 reg; | ||
35 | int err; | ||
34 | 36 | ||
35 | const int t_cer = 15; | 37 | const int t_cer = 15; |
36 | const int t_avdp = 12; | 38 | const int t_avdp = 12; |
@@ -43,6 +45,11 @@ static int omap2_onenand_set_async_mode(int cs, void __iomem *onenand_base) | |||
43 | const int t_wpl = 40; | 45 | const int t_wpl = 40; |
44 | const int t_wph = 30; | 46 | const int t_wph = 30; |
45 | 47 | ||
48 | /* Ensure sync read and sync write are disabled */ | ||
49 | reg = readw(onenand_base + ONENAND_REG_SYS_CFG1); | ||
50 | reg &= ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE; | ||
51 | writew(reg, onenand_base + ONENAND_REG_SYS_CFG1); | ||
52 | |||
46 | memset(&t, 0, sizeof(t)); | 53 | memset(&t, 0, sizeof(t)); |
47 | t.sync_clk = 0; | 54 | t.sync_clk = 0; |
48 | t.cs_on = 0; | 55 | t.cs_on = 0; |
@@ -74,7 +81,16 @@ static int omap2_onenand_set_async_mode(int cs, void __iomem *onenand_base) | |||
74 | GPMC_CONFIG1_DEVICESIZE_16 | | 81 | GPMC_CONFIG1_DEVICESIZE_16 | |
75 | GPMC_CONFIG1_MUXADDDATA); | 82 | GPMC_CONFIG1_MUXADDDATA); |
76 | 83 | ||
77 | return gpmc_cs_set_timings(cs, &t); | 84 | err = gpmc_cs_set_timings(cs, &t); |
85 | if (err) | ||
86 | return err; | ||
87 | |||
88 | /* Ensure sync read and sync write are disabled */ | ||
89 | reg = readw(onenand_base + ONENAND_REG_SYS_CFG1); | ||
90 | reg &= ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE; | ||
91 | writew(reg, onenand_base + ONENAND_REG_SYS_CFG1); | ||
92 | |||
93 | return 0; | ||
78 | } | 94 | } |
79 | 95 | ||
80 | static void set_onenand_cfg(void __iomem *onenand_base, int latency, | 96 | static void set_onenand_cfg(void __iomem *onenand_base, int latency, |
@@ -124,7 +140,8 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg, | |||
124 | } else if (cfg->flags & ONENAND_SYNC_READWRITE) { | 140 | } else if (cfg->flags & ONENAND_SYNC_READWRITE) { |
125 | sync_read = 1; | 141 | sync_read = 1; |
126 | sync_write = 1; | 142 | sync_write = 1; |
127 | } | 143 | } else |
144 | return omap2_onenand_set_async_mode(cs, onenand_base); | ||
128 | 145 | ||
129 | if (!freq) { | 146 | if (!freq) { |
130 | /* Very first call freq is not known */ | 147 | /* Very first call freq is not known */ |
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 458990e20c60..a98201cc265c 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c | |||
@@ -48,6 +48,28 @@ int omap_chip_is(struct omap_chip_id oci) | |||
48 | } | 48 | } |
49 | EXPORT_SYMBOL(omap_chip_is); | 49 | EXPORT_SYMBOL(omap_chip_is); |
50 | 50 | ||
51 | int omap_type(void) | ||
52 | { | ||
53 | u32 val = 0; | ||
54 | |||
55 | if (cpu_is_omap24xx()) | ||
56 | val = omap_ctrl_readl(OMAP24XX_CONTROL_STATUS); | ||
57 | else if (cpu_is_omap34xx()) | ||
58 | val = omap_ctrl_readl(OMAP343X_CONTROL_STATUS); | ||
59 | else { | ||
60 | pr_err("Cannot detect omap type!\n"); | ||
61 | goto out; | ||
62 | } | ||
63 | |||
64 | val &= OMAP2_DEVICETYPE_MASK; | ||
65 | val >>= 8; | ||
66 | |||
67 | out: | ||
68 | return val; | ||
69 | } | ||
70 | EXPORT_SYMBOL(omap_type); | ||
71 | |||
72 | |||
51 | /*----------------------------------------------------------------------------*/ | 73 | /*----------------------------------------------------------------------------*/ |
52 | 74 | ||
53 | #define OMAP_TAP_IDCODE 0x0204 | 75 | #define OMAP_TAP_IDCODE 0x0204 |
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c index fd5b8a5925cc..6f71f3730c97 100644 --- a/arch/arm/mach-omap2/mailbox.c +++ b/arch/arm/mach-omap2/mailbox.c | |||
@@ -282,12 +282,12 @@ static int __devinit omap2_mbox_probe(struct platform_device *pdev) | |||
282 | return -ENOMEM; | 282 | return -ENOMEM; |
283 | 283 | ||
284 | /* DSP or IVA2 IRQ */ | 284 | /* DSP or IVA2 IRQ */ |
285 | mbox_dsp_info.irq = platform_get_irq(pdev, 0); | 285 | ret = platform_get_irq(pdev, 0); |
286 | if (mbox_dsp_info.irq < 0) { | 286 | if (ret < 0) { |
287 | dev_err(&pdev->dev, "invalid irq resource\n"); | 287 | dev_err(&pdev->dev, "invalid irq resource\n"); |
288 | ret = -ENODEV; | ||
289 | goto err_dsp; | 288 | goto err_dsp; |
290 | } | 289 | } |
290 | mbox_dsp_info.irq = ret; | ||
291 | 291 | ||
292 | ret = omap_mbox_register(&pdev->dev, &mbox_dsp_info); | 292 | ret = omap_mbox_register(&pdev->dev, &mbox_dsp_info); |
293 | if (ret) | 293 | if (ret) |
diff --git a/arch/arm/mach-omap2/mmc-twl4030.c b/arch/arm/mach-omap2/mmc-twl4030.c index 9756a878fd90..1541fd4c8d0f 100644 --- a/arch/arm/mach-omap2/mmc-twl4030.c +++ b/arch/arm/mach-omap2/mmc-twl4030.c | |||
@@ -263,8 +263,19 @@ static int twl_mmc1_set_power(struct device *dev, int slot, int power_on, | |||
263 | static int twl_mmc23_set_power(struct device *dev, int slot, int power_on, int vdd) | 263 | static int twl_mmc23_set_power(struct device *dev, int slot, int power_on, int vdd) |
264 | { | 264 | { |
265 | int ret = 0; | 265 | int ret = 0; |
266 | struct twl_mmc_controller *c = &hsmmc[1]; | 266 | struct twl_mmc_controller *c = NULL; |
267 | struct omap_mmc_platform_data *mmc = dev->platform_data; | 267 | struct omap_mmc_platform_data *mmc = dev->platform_data; |
268 | int i; | ||
269 | |||
270 | for (i = 1; i < ARRAY_SIZE(hsmmc); i++) { | ||
271 | if (mmc == hsmmc[i].mmc) { | ||
272 | c = &hsmmc[i]; | ||
273 | break; | ||
274 | } | ||
275 | } | ||
276 | |||
277 | if (c == NULL) | ||
278 | return -ENODEV; | ||
268 | 279 | ||
269 | /* If we don't see a Vcc regulator, assume it's a fixed | 280 | /* If we don't see a Vcc regulator, assume it's a fixed |
270 | * voltage always-on regulator. | 281 | * voltage always-on regulator. |
diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c index 6a5bc3021bdb..ec71a6965786 100644 --- a/arch/arm/mach-s3c2440/mach-mini2440.c +++ b/arch/arm/mach-s3c2440/mach-mini2440.c | |||
@@ -48,8 +48,6 @@ | |||
48 | #include <plat/mci.h> | 48 | #include <plat/mci.h> |
49 | #include <plat/udc.h> | 49 | #include <plat/udc.h> |
50 | 50 | ||
51 | #include <plat/regs-serial.h> | ||
52 | |||
53 | #include <linux/mtd/mtd.h> | 51 | #include <linux/mtd/mtd.h> |
54 | #include <linux/mtd/nand.h> | 52 | #include <linux/mtd/nand.h> |
55 | #include <linux/mtd/nand_ecc.h> | 53 | #include <linux/mtd/nand_ecc.h> |
@@ -275,6 +273,7 @@ static struct s3c2410_nand_set mini2440_nand_sets[] __initdata = { | |||
275 | .nr_chips = 1, | 273 | .nr_chips = 1, |
276 | .nr_partitions = ARRAY_SIZE(mini2440_default_nand_part), | 274 | .nr_partitions = ARRAY_SIZE(mini2440_default_nand_part), |
277 | .partitions = mini2440_default_nand_part, | 275 | .partitions = mini2440_default_nand_part, |
276 | .flash_bbt = 1, /* we use u-boot to create a BBT */ | ||
278 | }, | 277 | }, |
279 | }; | 278 | }; |
280 | 279 | ||
diff --git a/arch/arm/mach-s3c2442/mach-gta02.c b/arch/arm/mach-s3c2442/mach-gta02.c index e23b581aa0e1..0fb385bd9cd9 100644 --- a/arch/arm/mach-s3c2442/mach-gta02.c +++ b/arch/arm/mach-s3c2442/mach-gta02.c | |||
@@ -433,8 +433,7 @@ static struct s3c2410_nand_set gta02_nand_sets[] = { | |||
433 | */ | 433 | */ |
434 | .name = "neo1973-nand", | 434 | .name = "neo1973-nand", |
435 | .nr_chips = 1, | 435 | .nr_chips = 1, |
436 | .use_bbt = 1, | 436 | .flash_bbt = 1, |
437 | .force_soft_ecc = 1, | ||
438 | }, | 437 | }, |
439 | }; | 438 | }; |
440 | 439 | ||
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index def14ec265b3..7677a4a1cef2 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c | |||
@@ -2457,6 +2457,19 @@ static int __init omap_init_dma(void) | |||
2457 | setup_irq(irq, &omap24xx_dma_irq); | 2457 | setup_irq(irq, &omap24xx_dma_irq); |
2458 | } | 2458 | } |
2459 | 2459 | ||
2460 | /* Enable smartidle idlemodes and autoidle */ | ||
2461 | if (cpu_is_omap34xx()) { | ||
2462 | u32 v = dma_read(OCP_SYSCONFIG); | ||
2463 | v &= ~(DMA_SYSCONFIG_MIDLEMODE_MASK | | ||
2464 | DMA_SYSCONFIG_SIDLEMODE_MASK | | ||
2465 | DMA_SYSCONFIG_AUTOIDLE); | ||
2466 | v |= (DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_SMARTIDLE) | | ||
2467 | DMA_SYSCONFIG_SIDLEMODE(DMA_IDLEMODE_SMARTIDLE) | | ||
2468 | DMA_SYSCONFIG_AUTOIDLE); | ||
2469 | dma_write(v , OCP_SYSCONFIG); | ||
2470 | } | ||
2471 | |||
2472 | |||
2460 | /* FIXME: Update LCD DMA to work on 24xx */ | 2473 | /* FIXME: Update LCD DMA to work on 24xx */ |
2461 | if (cpu_class_is_omap1()) { | 2474 | if (cpu_class_is_omap1()) { |
2462 | r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0, | 2475 | r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0, |
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c index 7fd89ba8d3b5..26b387c12423 100644 --- a/arch/arm/plat-omap/gpio.c +++ b/arch/arm/plat-omap/gpio.c | |||
@@ -1585,6 +1585,7 @@ static int __init _omap_gpio_init(void) | |||
1585 | __raw_writel(0x00000000, bank->base + OMAP24XX_GPIO_IRQENABLE1); | 1585 | __raw_writel(0x00000000, bank->base + OMAP24XX_GPIO_IRQENABLE1); |
1586 | __raw_writel(0xffffffff, bank->base + OMAP24XX_GPIO_IRQSTATUS1); | 1586 | __raw_writel(0xffffffff, bank->base + OMAP24XX_GPIO_IRQSTATUS1); |
1587 | __raw_writew(0x0015, bank->base + OMAP24XX_GPIO_SYSCONFIG); | 1587 | __raw_writew(0x0015, bank->base + OMAP24XX_GPIO_SYSCONFIG); |
1588 | __raw_writel(0x00000000, bank->base + OMAP24XX_GPIO_DEBOUNCE_EN); | ||
1588 | 1589 | ||
1589 | /* Initialize interface clock ungated, module enabled */ | 1590 | /* Initialize interface clock ungated, module enabled */ |
1590 | __raw_writel(0, bank->base + OMAP24XX_GPIO_CTRL); | 1591 | __raw_writel(0, bank->base + OMAP24XX_GPIO_CTRL); |
diff --git a/arch/arm/plat-omap/include/mach/cpu.h b/arch/arm/plat-omap/include/mach/cpu.h index fc60c4ebcc28..285eaa3a8275 100644 --- a/arch/arm/plat-omap/include/mach/cpu.h +++ b/arch/arm/plat-omap/include/mach/cpu.h | |||
@@ -30,6 +30,17 @@ | |||
30 | #ifndef __ASM_ARCH_OMAP_CPU_H | 30 | #ifndef __ASM_ARCH_OMAP_CPU_H |
31 | #define __ASM_ARCH_OMAP_CPU_H | 31 | #define __ASM_ARCH_OMAP_CPU_H |
32 | 32 | ||
33 | /* | ||
34 | * Omap device type i.e. EMU/HS/TST/GP/BAD | ||
35 | */ | ||
36 | #define OMAP2_DEVICE_TYPE_TEST 0 | ||
37 | #define OMAP2_DEVICE_TYPE_EMU 1 | ||
38 | #define OMAP2_DEVICE_TYPE_SEC 2 | ||
39 | #define OMAP2_DEVICE_TYPE_GP 3 | ||
40 | #define OMAP2_DEVICE_TYPE_BAD 4 | ||
41 | |||
42 | int omap_type(void); | ||
43 | |||
33 | struct omap_chip_id { | 44 | struct omap_chip_id { |
34 | u8 oc; | 45 | u8 oc; |
35 | u8 type; | 46 | u8 type; |
@@ -424,17 +435,6 @@ IS_OMAP_TYPE(3430, 0x3430) | |||
424 | 435 | ||
425 | 436 | ||
426 | int omap_chip_is(struct omap_chip_id oci); | 437 | int omap_chip_is(struct omap_chip_id oci); |
427 | int omap_type(void); | ||
428 | |||
429 | /* | ||
430 | * Macro to detect device type i.e. EMU/HS/TST/GP/BAD | ||
431 | */ | ||
432 | #define OMAP2_DEVICE_TYPE_TEST 0 | ||
433 | #define OMAP2_DEVICE_TYPE_EMU 1 | ||
434 | #define OMAP2_DEVICE_TYPE_SEC 2 | ||
435 | #define OMAP2_DEVICE_TYPE_GP 3 | ||
436 | #define OMAP2_DEVICE_TYPE_BAD 4 | ||
437 | |||
438 | void omap2_check_revision(void); | 438 | void omap2_check_revision(void); |
439 | 439 | ||
440 | #endif /* defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) */ | 440 | #endif /* defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) */ |
diff --git a/arch/arm/plat-omap/include/mach/dma.h b/arch/arm/plat-omap/include/mach/dma.h index 8c1eae88737e..7b939cc01962 100644 --- a/arch/arm/plat-omap/include/mach/dma.h +++ b/arch/arm/plat-omap/include/mach/dma.h | |||
@@ -389,6 +389,21 @@ | |||
389 | #define DMA_THREAD_FIFO_25 (0x02 << 14) | 389 | #define DMA_THREAD_FIFO_25 (0x02 << 14) |
390 | #define DMA_THREAD_FIFO_50 (0x03 << 14) | 390 | #define DMA_THREAD_FIFO_50 (0x03 << 14) |
391 | 391 | ||
392 | /* DMA4_OCP_SYSCONFIG bits */ | ||
393 | #define DMA_SYSCONFIG_MIDLEMODE_MASK (3 << 12) | ||
394 | #define DMA_SYSCONFIG_CLOCKACTIVITY_MASK (3 << 8) | ||
395 | #define DMA_SYSCONFIG_EMUFREE (1 << 5) | ||
396 | #define DMA_SYSCONFIG_SIDLEMODE_MASK (3 << 3) | ||
397 | #define DMA_SYSCONFIG_SOFTRESET (1 << 2) | ||
398 | #define DMA_SYSCONFIG_AUTOIDLE (1 << 0) | ||
399 | |||
400 | #define DMA_SYSCONFIG_MIDLEMODE(n) ((n) << 12) | ||
401 | #define DMA_SYSCONFIG_SIDLEMODE(n) ((n) << 3) | ||
402 | |||
403 | #define DMA_IDLEMODE_SMARTIDLE 0x2 | ||
404 | #define DMA_IDLEMODE_NO_IDLE 0x1 | ||
405 | #define DMA_IDLEMODE_FORCE_IDLE 0x0 | ||
406 | |||
392 | /* Chaining modes*/ | 407 | /* Chaining modes*/ |
393 | #ifndef CONFIG_ARCH_OMAP1 | 408 | #ifndef CONFIG_ARCH_OMAP1 |
394 | #define OMAP_DMA_STATIC_CHAIN 0x1 | 409 | #define OMAP_DMA_STATIC_CHAIN 0x1 |
diff --git a/arch/arm/plat-omap/include/mach/io.h b/arch/arm/plat-omap/include/mach/io.h index 3b2814720569..73f483d56ca6 100644 --- a/arch/arm/plat-omap/include/mach/io.h +++ b/arch/arm/plat-omap/include/mach/io.h | |||
@@ -201,7 +201,7 @@ | |||
201 | #define OMAP2_IO_ADDRESS(pa) IOMEM(__OMAP2_IO_ADDRESS(pa)) | 201 | #define OMAP2_IO_ADDRESS(pa) IOMEM(__OMAP2_IO_ADDRESS(pa)) |
202 | 202 | ||
203 | #ifdef __ASSEMBLER__ | 203 | #ifdef __ASSEMBLER__ |
204 | #define IOMEM(x) x | 204 | #define IOMEM(x) (x) |
205 | #else | 205 | #else |
206 | #define IOMEM(x) ((void __force __iomem *)(x)) | 206 | #define IOMEM(x) ((void __force __iomem *)(x)) |
207 | 207 | ||
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c index 4cf449fa2cb5..4a0301399013 100644 --- a/arch/arm/plat-omap/iommu.c +++ b/arch/arm/plat-omap/iommu.c | |||
@@ -298,7 +298,7 @@ void flush_iotlb_page(struct iommu *obj, u32 da) | |||
298 | if ((start <= da) && (da < start + bytes)) { | 298 | if ((start <= da) && (da < start + bytes)) { |
299 | dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", | 299 | dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", |
300 | __func__, start, da, bytes); | 300 | __func__, start, da, bytes); |
301 | 301 | iotlb_load_cr(obj, &cr); | |
302 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); | 302 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); |
303 | } | 303 | } |
304 | } | 304 | } |
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c index 65006df3f1b7..4ea73804d21e 100644 --- a/arch/arm/plat-omap/sram.c +++ b/arch/arm/plat-omap/sram.c | |||
@@ -133,7 +133,12 @@ void __init omap_detect_sram(void) | |||
133 | if (cpu_is_omap34xx()) { | 133 | if (cpu_is_omap34xx()) { |
134 | omap_sram_base = OMAP3_SRAM_PUB_VA; | 134 | omap_sram_base = OMAP3_SRAM_PUB_VA; |
135 | omap_sram_start = OMAP3_SRAM_PUB_PA; | 135 | omap_sram_start = OMAP3_SRAM_PUB_PA; |
136 | omap_sram_size = 0x8000; /* 32K */ | 136 | if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) || |
137 | (omap_type() == OMAP2_DEVICE_TYPE_SEC)) { | ||
138 | omap_sram_size = 0x7000; /* 28K */ | ||
139 | } else { | ||
140 | omap_sram_size = 0x8000; /* 32K */ | ||
141 | } | ||
137 | } else { | 142 | } else { |
138 | omap_sram_base = OMAP2_SRAM_PUB_VA; | 143 | omap_sram_base = OMAP2_SRAM_PUB_VA; |
139 | omap_sram_start = OMAP2_SRAM_PUB_PA; | 144 | omap_sram_start = OMAP2_SRAM_PUB_PA; |
diff --git a/arch/arm/plat-s3c/Makefile b/arch/arm/plat-s3c/Makefile index 74bb7cb5da49..0761766b1833 100644 --- a/arch/arm/plat-s3c/Makefile +++ b/arch/arm/plat-s3c/Makefile | |||
@@ -34,7 +34,7 @@ obj-$(CONFIG_S3C_DEV_HSMMC) += dev-hsmmc.o | |||
34 | obj-$(CONFIG_S3C_DEV_HSMMC1) += dev-hsmmc1.o | 34 | obj-$(CONFIG_S3C_DEV_HSMMC1) += dev-hsmmc1.o |
35 | obj-y += dev-i2c0.o | 35 | obj-y += dev-i2c0.o |
36 | obj-$(CONFIG_S3C_DEV_I2C1) += dev-i2c1.o | 36 | obj-$(CONFIG_S3C_DEV_I2C1) += dev-i2c1.o |
37 | obj-$(CONFIG_SND_S3C24XX_SOC) += dev-audio.o | 37 | obj-$(CONFIG_SND_S3C64XX_SOC_I2S) += dev-audio.o |
38 | obj-$(CONFIG_S3C_DEV_FB) += dev-fb.o | 38 | obj-$(CONFIG_S3C_DEV_FB) += dev-fb.o |
39 | obj-$(CONFIG_S3C_DEV_USB_HOST) += dev-usb.o | 39 | obj-$(CONFIG_S3C_DEV_USB_HOST) += dev-usb.o |
40 | obj-$(CONFIG_S3C_DEV_USB_HSOTG) += dev-usb-hsotg.o | 40 | obj-$(CONFIG_S3C_DEV_USB_HSOTG) += dev-usb-hsotg.o |
diff --git a/arch/arm/plat-s3c/include/plat/devs.h b/arch/arm/plat-s3c/include/plat/devs.h index b5b9c4d46e9a..2e170827e0b0 100644 --- a/arch/arm/plat-s3c/include/plat/devs.h +++ b/arch/arm/plat-s3c/include/plat/devs.h | |||
@@ -37,6 +37,7 @@ extern struct platform_device s3c_device_i2c1; | |||
37 | extern struct platform_device s3c_device_rtc; | 37 | extern struct platform_device s3c_device_rtc; |
38 | extern struct platform_device s3c_device_adc; | 38 | extern struct platform_device s3c_device_adc; |
39 | extern struct platform_device s3c_device_sdi; | 39 | extern struct platform_device s3c_device_sdi; |
40 | extern struct platform_device s3c_device_iis; | ||
40 | extern struct platform_device s3c_device_hwmon; | 41 | extern struct platform_device s3c_device_hwmon; |
41 | extern struct platform_device s3c_device_hsmmc0; | 42 | extern struct platform_device s3c_device_hsmmc0; |
42 | extern struct platform_device s3c_device_hsmmc1; | 43 | extern struct platform_device s3c_device_hsmmc1; |
diff --git a/arch/arm/plat-s3c24xx/Makefile b/arch/arm/plat-s3c24xx/Makefile index 636cb12711df..579a165c2827 100644 --- a/arch/arm/plat-s3c24xx/Makefile +++ b/arch/arm/plat-s3c24xx/Makefile | |||
@@ -29,7 +29,7 @@ obj-$(CONFIG_PM_SIMTEC) += pm-simtec.o | |||
29 | obj-$(CONFIG_PM) += pm.o | 29 | obj-$(CONFIG_PM) += pm.o |
30 | obj-$(CONFIG_PM) += irq-pm.o | 30 | obj-$(CONFIG_PM) += irq-pm.o |
31 | obj-$(CONFIG_PM) += sleep.o | 31 | obj-$(CONFIG_PM) += sleep.o |
32 | obj-$(CONFIG_HAVE_PWM) += pwm.o | 32 | obj-$(CONFIG_S3C24XX_PWM) += pwm.o |
33 | obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o | 33 | obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o |
34 | obj-$(CONFIG_S3C2410_DMA) += dma.o | 34 | obj-$(CONFIG_S3C2410_DMA) += dma.o |
35 | obj-$(CONFIG_S3C24XX_ADC) += adc.o | 35 | obj-$(CONFIG_S3C24XX_ADC) += adc.o |
diff --git a/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c b/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c index 9edf7894eedd..da7a61728c18 100644 --- a/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c +++ b/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c | |||
@@ -12,8 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | 15 | #include <linux/gpio.h> | |
16 | #include <mach/hardware.h> | ||
17 | 16 | ||
18 | #include <mach/spi.h> | 17 | #include <mach/spi.h> |
19 | #include <mach/regs-gpio.h> | 18 | #include <mach/regs-gpio.h> |
diff --git a/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c b/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c index f34d0fc69ad8..86b9edc67413 100644 --- a/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c +++ b/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c | |||
@@ -12,8 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | 15 | #include <linux/gpio.h> | |
16 | #include <mach/hardware.h> | ||
17 | 16 | ||
18 | #include <mach/spi.h> | 17 | #include <mach/spi.h> |
19 | #include <mach/regs-gpio.h> | 18 | #include <mach/regs-gpio.h> |
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index 8a5bd7a9c6f5..b86e19c9b5b0 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig | |||
@@ -7,6 +7,7 @@ config FRV | |||
7 | default y | 7 | default y |
8 | select HAVE_IDE | 8 | select HAVE_IDE |
9 | select HAVE_ARCH_TRACEHOOK | 9 | select HAVE_ARCH_TRACEHOOK |
10 | select HAVE_PERF_COUNTERS | ||
10 | 11 | ||
11 | config ZONE_DMA | 12 | config ZONE_DMA |
12 | bool | 13 | bool |
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index 0409d981fd39..00a57af79afc 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h | |||
@@ -121,10 +121,72 @@ static inline void atomic_dec(atomic_t *v) | |||
121 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) | 121 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) |
122 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) | 122 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) |
123 | 123 | ||
124 | /* | ||
125 | * 64-bit atomic ops | ||
126 | */ | ||
127 | typedef struct { | ||
128 | volatile long long counter; | ||
129 | } atomic64_t; | ||
130 | |||
131 | #define ATOMIC64_INIT(i) { (i) } | ||
132 | |||
133 | static inline long long atomic64_read(atomic64_t *v) | ||
134 | { | ||
135 | long long counter; | ||
136 | |||
137 | asm("ldd%I1 %M1,%0" | ||
138 | : "=e"(counter) | ||
139 | : "m"(v->counter)); | ||
140 | return counter; | ||
141 | } | ||
142 | |||
143 | static inline void atomic64_set(atomic64_t *v, long long i) | ||
144 | { | ||
145 | asm volatile("std%I0 %1,%M0" | ||
146 | : "=m"(v->counter) | ||
147 | : "e"(i)); | ||
148 | } | ||
149 | |||
150 | extern long long atomic64_inc_return(atomic64_t *v); | ||
151 | extern long long atomic64_dec_return(atomic64_t *v); | ||
152 | extern long long atomic64_add_return(long long i, atomic64_t *v); | ||
153 | extern long long atomic64_sub_return(long long i, atomic64_t *v); | ||
154 | |||
155 | static inline long long atomic64_add_negative(long long i, atomic64_t *v) | ||
156 | { | ||
157 | return atomic64_add_return(i, v) < 0; | ||
158 | } | ||
159 | |||
160 | static inline void atomic64_add(long long i, atomic64_t *v) | ||
161 | { | ||
162 | atomic64_add_return(i, v); | ||
163 | } | ||
164 | |||
165 | static inline void atomic64_sub(long long i, atomic64_t *v) | ||
166 | { | ||
167 | atomic64_sub_return(i, v); | ||
168 | } | ||
169 | |||
170 | static inline void atomic64_inc(atomic64_t *v) | ||
171 | { | ||
172 | atomic64_inc_return(v); | ||
173 | } | ||
174 | |||
175 | static inline void atomic64_dec(atomic64_t *v) | ||
176 | { | ||
177 | atomic64_dec_return(v); | ||
178 | } | ||
179 | |||
180 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) | ||
181 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | ||
182 | #define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0) | ||
183 | |||
124 | /*****************************************************************************/ | 184 | /*****************************************************************************/ |
125 | /* | 185 | /* |
126 | * exchange value with memory | 186 | * exchange value with memory |
127 | */ | 187 | */ |
188 | extern uint64_t __xchg_64(uint64_t i, volatile void *v); | ||
189 | |||
128 | #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS | 190 | #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS |
129 | 191 | ||
130 | #define xchg(ptr, x) \ | 192 | #define xchg(ptr, x) \ |
@@ -174,8 +236,10 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v); | |||
174 | 236 | ||
175 | #define tas(ptr) (xchg((ptr), 1)) | 237 | #define tas(ptr) (xchg((ptr), 1)) |
176 | 238 | ||
177 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) | 239 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new)) |
178 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 240 | #define atomic_xchg(v, new) (xchg(&(v)->counter, new)) |
241 | #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter)) | ||
242 | #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter)) | ||
179 | 243 | ||
180 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | 244 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) |
181 | { | 245 | { |
diff --git a/arch/frv/include/asm/perf_counter.h b/arch/frv/include/asm/perf_counter.h new file mode 100644 index 000000000000..ccf726e61b2e --- /dev/null +++ b/arch/frv/include/asm/perf_counter.h | |||
@@ -0,0 +1,17 @@ | |||
1 | /* FRV performance counter support | ||
2 | * | ||
3 | * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _ASM_PERF_COUNTER_H | ||
13 | #define _ASM_PERF_COUNTER_H | ||
14 | |||
15 | #define PERF_COUNTER_INDEX_OFFSET 0 | ||
16 | |||
17 | #endif /* _ASM_PERF_COUNTER_H */ | ||
diff --git a/arch/frv/include/asm/system.h b/arch/frv/include/asm/system.h index 7742ec000cc4..efd22d9077ac 100644 --- a/arch/frv/include/asm/system.h +++ b/arch/frv/include/asm/system.h | |||
@@ -208,6 +208,8 @@ extern void free_initmem(void); | |||
208 | * - if (*ptr == test) then orig = *ptr; *ptr = test; | 208 | * - if (*ptr == test) then orig = *ptr; *ptr = test; |
209 | * - if (*ptr != test) then orig = *ptr; | 209 | * - if (*ptr != test) then orig = *ptr; |
210 | */ | 210 | */ |
211 | extern uint64_t __cmpxchg_64(uint64_t test, uint64_t new, volatile uint64_t *v); | ||
212 | |||
211 | #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS | 213 | #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS |
212 | 214 | ||
213 | #define cmpxchg(ptr, test, new) \ | 215 | #define cmpxchg(ptr, test, new) \ |
diff --git a/arch/frv/include/asm/unistd.h b/arch/frv/include/asm/unistd.h index 96d78d5d2c41..4a8fb427ce0a 100644 --- a/arch/frv/include/asm/unistd.h +++ b/arch/frv/include/asm/unistd.h | |||
@@ -341,10 +341,12 @@ | |||
341 | #define __NR_inotify_init1 332 | 341 | #define __NR_inotify_init1 332 |
342 | #define __NR_preadv 333 | 342 | #define __NR_preadv 333 |
343 | #define __NR_pwritev 334 | 343 | #define __NR_pwritev 334 |
344 | #define __NR_rt_tgsigqueueinfo 335 | ||
345 | #define __NR_perf_counter_open 336 | ||
344 | 346 | ||
345 | #ifdef __KERNEL__ | 347 | #ifdef __KERNEL__ |
346 | 348 | ||
347 | #define NR_syscalls 335 | 349 | #define NR_syscalls 337 |
348 | 350 | ||
349 | #define __ARCH_WANT_IPC_PARSE_VERSION | 351 | #define __ARCH_WANT_IPC_PARSE_VERSION |
350 | /* #define __ARCH_WANT_OLD_READDIR */ | 352 | /* #define __ARCH_WANT_OLD_READDIR */ |
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S index 356e0e327a89..fde1e446b440 100644 --- a/arch/frv/kernel/entry.S +++ b/arch/frv/kernel/entry.S | |||
@@ -1524,5 +1524,7 @@ sys_call_table: | |||
1524 | .long sys_inotify_init1 | 1524 | .long sys_inotify_init1 |
1525 | .long sys_preadv | 1525 | .long sys_preadv |
1526 | .long sys_pwritev | 1526 | .long sys_pwritev |
1527 | .long sys_rt_tgsigqueueinfo /* 335 */ | ||
1528 | .long sys_perf_counter_open | ||
1527 | 1529 | ||
1528 | syscall_table_size = (. - sys_call_table) | 1530 | syscall_table_size = (. - sys_call_table) |
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c index 0316b3c50eff..a89803b58b9a 100644 --- a/arch/frv/kernel/frv_ksyms.c +++ b/arch/frv/kernel/frv_ksyms.c | |||
@@ -67,6 +67,10 @@ EXPORT_SYMBOL(atomic_sub_return); | |||
67 | EXPORT_SYMBOL(__xchg_32); | 67 | EXPORT_SYMBOL(__xchg_32); |
68 | EXPORT_SYMBOL(__cmpxchg_32); | 68 | EXPORT_SYMBOL(__cmpxchg_32); |
69 | #endif | 69 | #endif |
70 | EXPORT_SYMBOL(atomic64_add_return); | ||
71 | EXPORT_SYMBOL(atomic64_sub_return); | ||
72 | EXPORT_SYMBOL(__xchg_64); | ||
73 | EXPORT_SYMBOL(__cmpxchg_64); | ||
70 | 74 | ||
71 | EXPORT_SYMBOL(__debug_bug_printk); | 75 | EXPORT_SYMBOL(__debug_bug_printk); |
72 | EXPORT_SYMBOL(__delay_loops_MHz); | 76 | EXPORT_SYMBOL(__delay_loops_MHz); |
diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile index 08be305c9f44..0a377210c89b 100644 --- a/arch/frv/lib/Makefile +++ b/arch/frv/lib/Makefile | |||
@@ -4,5 +4,5 @@ | |||
4 | 4 | ||
5 | lib-y := \ | 5 | lib-y := \ |
6 | __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \ | 6 | __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \ |
7 | checksum.o memcpy.o memset.o atomic-ops.o \ | 7 | checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \ |
8 | outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o | 8 | outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_counter.o |
diff --git a/arch/frv/lib/atomic-ops.S b/arch/frv/lib/atomic-ops.S index ee0ac905fb08..5e9e6ab5dd0e 100644 --- a/arch/frv/lib/atomic-ops.S +++ b/arch/frv/lib/atomic-ops.S | |||
@@ -163,11 +163,10 @@ __cmpxchg_32: | |||
163 | ld.p @(gr11,gr0),gr8 | 163 | ld.p @(gr11,gr0),gr8 |
164 | orcr cc7,cc7,cc3 | 164 | orcr cc7,cc7,cc3 |
165 | subcc gr8,gr9,gr7,icc0 | 165 | subcc gr8,gr9,gr7,icc0 |
166 | bne icc0,#0,1f | 166 | bnelr icc0,#0 |
167 | cst.p gr10,@(gr11,gr0) ,cc3,#1 | 167 | cst.p gr10,@(gr11,gr0) ,cc3,#1 |
168 | corcc gr29,gr29,gr0 ,cc3,#1 | 168 | corcc gr29,gr29,gr0 ,cc3,#1 |
169 | beq icc3,#0,0b | 169 | beq icc3,#0,0b |
170 | 1: | ||
171 | bralr | 170 | bralr |
172 | 171 | ||
173 | .size __cmpxchg_32, .-__cmpxchg_32 | 172 | .size __cmpxchg_32, .-__cmpxchg_32 |
diff --git a/arch/frv/lib/atomic64-ops.S b/arch/frv/lib/atomic64-ops.S new file mode 100644 index 000000000000..b6194eeac127 --- /dev/null +++ b/arch/frv/lib/atomic64-ops.S | |||
@@ -0,0 +1,162 @@ | |||
1 | /* kernel atomic64 operations | ||
2 | * | ||
3 | * For an explanation of how atomic ops work in this arch, see: | ||
4 | * Documentation/frv/atomic-ops.txt | ||
5 | * | ||
6 | * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. | ||
7 | * Written by David Howells (dhowells@redhat.com) | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * as published by the Free Software Foundation; either version | ||
12 | * 2 of the License, or (at your option) any later version. | ||
13 | */ | ||
14 | |||
15 | #include <asm/spr-regs.h> | ||
16 | |||
17 | .text | ||
18 | .balign 4 | ||
19 | |||
20 | |||
21 | ############################################################################### | ||
22 | # | ||
23 | # long long atomic64_inc_return(atomic64_t *v) | ||
24 | # | ||
25 | ############################################################################### | ||
26 | .globl atomic64_inc_return | ||
27 | .type atomic64_inc_return,@function | ||
28 | atomic64_inc_return: | ||
29 | or.p gr8,gr8,gr10 | ||
30 | 0: | ||
31 | orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ | ||
32 | ckeq icc3,cc7 | ||
33 | ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ | ||
34 | orcr cc7,cc7,cc3 /* set CC3 to true */ | ||
35 | addicc gr9,#1,gr9,icc0 | ||
36 | addxi gr8,#0,gr8,icc0 | ||
37 | cstd.p gr8,@(gr10,gr0) ,cc3,#1 | ||
38 | corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ | ||
39 | beq icc3,#0,0b | ||
40 | bralr | ||
41 | |||
42 | .size atomic64_inc_return, .-atomic64_inc_return | ||
43 | |||
44 | ############################################################################### | ||
45 | # | ||
46 | # long long atomic64_dec_return(atomic64_t *v) | ||
47 | # | ||
48 | ############################################################################### | ||
49 | .globl atomic64_dec_return | ||
50 | .type atomic64_dec_return,@function | ||
51 | atomic64_dec_return: | ||
52 | or.p gr8,gr8,gr10 | ||
53 | 0: | ||
54 | orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ | ||
55 | ckeq icc3,cc7 | ||
56 | ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ | ||
57 | orcr cc7,cc7,cc3 /* set CC3 to true */ | ||
58 | subicc gr9,#1,gr9,icc0 | ||
59 | subxi gr8,#0,gr8,icc0 | ||
60 | cstd.p gr8,@(gr10,gr0) ,cc3,#1 | ||
61 | corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ | ||
62 | beq icc3,#0,0b | ||
63 | bralr | ||
64 | |||
65 | .size atomic64_dec_return, .-atomic64_dec_return | ||
66 | |||
67 | ############################################################################### | ||
68 | # | ||
69 | # long long atomic64_add_return(long long i, atomic64_t *v) | ||
70 | # | ||
71 | ############################################################################### | ||
72 | .globl atomic64_add_return | ||
73 | .type atomic64_add_return,@function | ||
74 | atomic64_add_return: | ||
75 | or.p gr8,gr8,gr4 | ||
76 | or gr9,gr9,gr5 | ||
77 | 0: | ||
78 | orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ | ||
79 | ckeq icc3,cc7 | ||
80 | ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ | ||
81 | orcr cc7,cc7,cc3 /* set CC3 to true */ | ||
82 | addcc gr9,gr5,gr9,icc0 | ||
83 | addx gr8,gr4,gr8,icc0 | ||
84 | cstd.p gr8,@(gr10,gr0) ,cc3,#1 | ||
85 | corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ | ||
86 | beq icc3,#0,0b | ||
87 | bralr | ||
88 | |||
89 | .size atomic64_add_return, .-atomic64_add_return | ||
90 | |||
91 | ############################################################################### | ||
92 | # | ||
93 | # long long atomic64_sub_return(long long i, atomic64_t *v) | ||
94 | # | ||
95 | ############################################################################### | ||
96 | .globl atomic64_sub_return | ||
97 | .type atomic64_sub_return,@function | ||
98 | atomic64_sub_return: | ||
99 | or.p gr8,gr8,gr4 | ||
100 | or gr9,gr9,gr5 | ||
101 | 0: | ||
102 | orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ | ||
103 | ckeq icc3,cc7 | ||
104 | ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ | ||
105 | orcr cc7,cc7,cc3 /* set CC3 to true */ | ||
106 | subcc gr9,gr5,gr9,icc0 | ||
107 | subx gr8,gr4,gr8,icc0 | ||
108 | cstd.p gr8,@(gr10,gr0) ,cc3,#1 | ||
109 | corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ | ||
110 | beq icc3,#0,0b | ||
111 | bralr | ||
112 | |||
113 | .size atomic64_sub_return, .-atomic64_sub_return | ||
114 | |||
115 | ############################################################################### | ||
116 | # | ||
117 | # uint64_t __xchg_64(uint64_t i, uint64_t *v) | ||
118 | # | ||
119 | ############################################################################### | ||
120 | .globl __xchg_64 | ||
121 | .type __xchg_64,@function | ||
122 | __xchg_64: | ||
123 | or.p gr8,gr8,gr4 | ||
124 | or gr9,gr9,gr5 | ||
125 | 0: | ||
126 | orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ | ||
127 | ckeq icc3,cc7 | ||
128 | ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ | ||
129 | orcr cc7,cc7,cc3 /* set CC3 to true */ | ||
130 | cstd.p gr4,@(gr10,gr0) ,cc3,#1 | ||
131 | corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ | ||
132 | beq icc3,#0,0b | ||
133 | bralr | ||
134 | |||
135 | .size __xchg_64, .-__xchg_64 | ||
136 | |||
137 | ############################################################################### | ||
138 | # | ||
139 | # uint64_t __cmpxchg_64(uint64_t test, uint64_t new, uint64_t *v) | ||
140 | # | ||
141 | ############################################################################### | ||
142 | .globl __cmpxchg_64 | ||
143 | .type __cmpxchg_64,@function | ||
144 | __cmpxchg_64: | ||
145 | or.p gr8,gr8,gr4 | ||
146 | or gr9,gr9,gr5 | ||
147 | 0: | ||
148 | orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ | ||
149 | ckeq icc3,cc7 | ||
150 | ldd.p @(gr12,gr0),gr8 /* LDD.P/ORCR must be atomic */ | ||
151 | orcr cc7,cc7,cc3 | ||
152 | subcc gr8,gr4,gr0,icc0 | ||
153 | subcc.p gr9,gr5,gr0,icc1 | ||
154 | bnelr icc0,#0 | ||
155 | bnelr icc1,#0 | ||
156 | cstd.p gr10,@(gr12,gr0) ,cc3,#1 | ||
157 | corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ | ||
158 | beq icc3,#0,0b | ||
159 | bralr | ||
160 | |||
161 | .size __cmpxchg_64, .-__cmpxchg_64 | ||
162 | |||
diff --git a/arch/frv/lib/perf_counter.c b/arch/frv/lib/perf_counter.c new file mode 100644 index 000000000000..2000feecd571 --- /dev/null +++ b/arch/frv/lib/perf_counter.c | |||
@@ -0,0 +1,19 @@ | |||
1 | /* Performance counter handling | ||
2 | * | ||
3 | * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/perf_counter.h> | ||
13 | |||
14 | /* | ||
15 | * mark the performance counter as pending | ||
16 | */ | ||
17 | void set_perf_counter_pending(void) | ||
18 | { | ||
19 | } | ||
diff --git a/arch/ia64/kernel/esi.c b/arch/ia64/kernel/esi.c index ebf4e988e78c..d5764a3d74af 100644 --- a/arch/ia64/kernel/esi.c +++ b/arch/ia64/kernel/esi.c | |||
@@ -65,7 +65,7 @@ static int __init esi_init (void) | |||
65 | } | 65 | } |
66 | 66 | ||
67 | if (!esi) | 67 | if (!esi) |
68 | return -ENODEV;; | 68 | return -ENODEV; |
69 | 69 | ||
70 | systab = __va(esi); | 70 | systab = __va(esi); |
71 | 71 | ||
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index abce2468a40b..f1782705b1f7 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -5603,7 +5603,7 @@ pfm_interrupt_handler(int irq, void *arg) | |||
5603 | * /proc/perfmon interface, for debug only | 5603 | * /proc/perfmon interface, for debug only |
5604 | */ | 5604 | */ |
5605 | 5605 | ||
5606 | #define PFM_PROC_SHOW_HEADER ((void *)nr_cpu_ids+1) | 5606 | #define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1) |
5607 | 5607 | ||
5608 | static void * | 5608 | static void * |
5609 | pfm_proc_start(struct seq_file *m, loff_t *pos) | 5609 | pfm_proc_start(struct seq_file *m, loff_t *pos) |
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index 7053c55b7649..e6676fca4828 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c | |||
@@ -192,7 +192,7 @@ struct salinfo_platform_oemdata_parms { | |||
192 | static void | 192 | static void |
193 | salinfo_work_to_do(struct salinfo_data *data) | 193 | salinfo_work_to_do(struct salinfo_data *data) |
194 | { | 194 | { |
195 | down_trylock(&data->mutex); | 195 | (void)(down_trylock(&data->mutex) ?: 0); |
196 | up(&data->mutex); | 196 | up(&data->mutex); |
197 | } | 197 | } |
198 | 198 | ||
diff --git a/arch/ia64/kvm/kvm_lib.c b/arch/ia64/kvm/kvm_lib.c index a85cb611ecd7..f1268b8e6f9e 100644 --- a/arch/ia64/kvm/kvm_lib.c +++ b/arch/ia64/kvm/kvm_lib.c | |||
@@ -11,5 +11,11 @@ | |||
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | #undef CONFIG_MODULES | 13 | #undef CONFIG_MODULES |
14 | #include <linux/module.h> | ||
15 | #undef CONFIG_KALLSYMS | ||
16 | #undef EXPORT_SYMBOL | ||
17 | #undef EXPORT_SYMBOL_GPL | ||
18 | #define EXPORT_SYMBOL(sym) | ||
19 | #define EXPORT_SYMBOL_GPL(sym) | ||
14 | #include "../../../lib/vsprintf.c" | 20 | #include "../../../lib/vsprintf.c" |
15 | #include "../../../lib/ctype.c" | 21 | #include "../../../lib/ctype.c" |
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c index a8f84da04b49..bb862fb224f2 100644 --- a/arch/ia64/kvm/process.c +++ b/arch/ia64/kvm/process.c | |||
@@ -130,7 +130,7 @@ static void collect_interruption(struct kvm_vcpu *vcpu) | |||
130 | if (vdcr & IA64_DCR_PP) { | 130 | if (vdcr & IA64_DCR_PP) { |
131 | vpsr |= IA64_PSR_PP; | 131 | vpsr |= IA64_PSR_PP; |
132 | } else { | 132 | } else { |
133 | vpsr &= ~IA64_PSR_PP;; | 133 | vpsr &= ~IA64_PSR_PP; |
134 | } | 134 | } |
135 | 135 | ||
136 | vcpu_set_psr(vcpu, vpsr); | 136 | vcpu_set_psr(vcpu, vpsr); |
@@ -594,11 +594,11 @@ static void set_pal_call_data(struct kvm_vcpu *vcpu) | |||
594 | p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); | 594 | p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); |
595 | break; | 595 | break; |
596 | case PAL_BRAND_INFO: | 596 | case PAL_BRAND_INFO: |
597 | p->u.pal_data.gr29 = gr29;; | 597 | p->u.pal_data.gr29 = gr29; |
598 | p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30); | 598 | p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30); |
599 | break; | 599 | break; |
600 | default: | 600 | default: |
601 | p->u.pal_data.gr29 = gr29;; | 601 | p->u.pal_data.gr29 = gr29; |
602 | p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); | 602 | p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); |
603 | } | 603 | } |
604 | p->u.pal_data.gr28 = gr28; | 604 | p->u.pal_data.gr28 = gr28; |
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c index a2c6c15e4761..46b02cbcc874 100644 --- a/arch/ia64/kvm/vcpu.c +++ b/arch/ia64/kvm/vcpu.c | |||
@@ -406,7 +406,7 @@ void getreg(unsigned long regnum, unsigned long *val, | |||
406 | * Now look at registers in [0-31] range and init correct UNAT | 406 | * Now look at registers in [0-31] range and init correct UNAT |
407 | */ | 407 | */ |
408 | addr = (unsigned long)regs; | 408 | addr = (unsigned long)regs; |
409 | unat = ®s->eml_unat;; | 409 | unat = ®s->eml_unat; |
410 | 410 | ||
411 | addr += gr_info[regnum]; | 411 | addr += gr_info[regnum]; |
412 | 412 | ||
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index 4290a429bf7c..20b3852f7a6e 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c | |||
@@ -135,7 +135,7 @@ struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type) | |||
135 | u64 rid; | 135 | u64 rid; |
136 | 136 | ||
137 | rid = vcpu_get_rr(vcpu, va); | 137 | rid = vcpu_get_rr(vcpu, va); |
138 | rid = rid & RR_RID_MASK;; | 138 | rid = rid & RR_RID_MASK; |
139 | if (type == D_TLB) { | 139 | if (type == D_TLB) { |
140 | if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) { | 140 | if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) { |
141 | for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0; | 141 | for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0; |
@@ -518,7 +518,7 @@ struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) | |||
518 | 518 | ||
519 | struct thash_cb *hcb = &v->arch.vtlb; | 519 | struct thash_cb *hcb = &v->arch.vtlb; |
520 | 520 | ||
521 | cch = __vtr_lookup(v, va, is_data);; | 521 | cch = __vtr_lookup(v, va, is_data); |
522 | if (cch) | 522 | if (cch) |
523 | return cch; | 523 | return cch; |
524 | 524 | ||
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c index 76645cf6ac5d..25831c47c579 100644 --- a/arch/ia64/sn/kernel/io_common.c +++ b/arch/ia64/sn/kernel/io_common.c | |||
@@ -435,7 +435,8 @@ void sn_generate_path(struct pci_bus *pci_bus, char *address) | |||
435 | bricktype = MODULE_GET_BTYPE(moduleid); | 435 | bricktype = MODULE_GET_BTYPE(moduleid); |
436 | if ((bricktype == L1_BRICKTYPE_191010) || | 436 | if ((bricktype == L1_BRICKTYPE_191010) || |
437 | (bricktype == L1_BRICKTYPE_1932)) | 437 | (bricktype == L1_BRICKTYPE_1932)) |
438 | sprintf(address, "%s^%d", address, geo_slot(geoid)); | 438 | sprintf(address + strlen(address), "^%d", |
439 | geo_slot(geoid)); | ||
439 | } | 440 | } |
440 | 441 | ||
441 | void __devinit | 442 | void __devinit |
diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h index fef5b434dadc..fad68616af32 100644 --- a/arch/mn10300/include/asm/unistd.h +++ b/arch/mn10300/include/asm/unistd.h | |||
@@ -346,10 +346,12 @@ | |||
346 | #define __NR_inotify_init1 333 | 346 | #define __NR_inotify_init1 333 |
347 | #define __NR_preadv 334 | 347 | #define __NR_preadv 334 |
348 | #define __NR_pwritev 335 | 348 | #define __NR_pwritev 335 |
349 | #define __NR_rt_tgsigqueueinfo 336 | ||
350 | #define __NR_perf_counter_open 337 | ||
349 | 351 | ||
350 | #ifdef __KERNEL__ | 352 | #ifdef __KERNEL__ |
351 | 353 | ||
352 | #define NR_syscalls 326 | 354 | #define NR_syscalls 338 |
353 | 355 | ||
354 | /* | 356 | /* |
355 | * specify the deprecated syscalls we want to support on this arch | 357 | * specify the deprecated syscalls we want to support on this arch |
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S index 7408a27199f3..e0d2563af4f2 100644 --- a/arch/mn10300/kernel/entry.S +++ b/arch/mn10300/kernel/entry.S | |||
@@ -722,6 +722,8 @@ ENTRY(sys_call_table) | |||
722 | .long sys_inotify_init1 | 722 | .long sys_inotify_init1 |
723 | .long sys_preadv | 723 | .long sys_preadv |
724 | .long sys_pwritev /* 335 */ | 724 | .long sys_pwritev /* 335 */ |
725 | .long sys_rt_tgsigqueueinfo | ||
726 | .long sys_perf_counter_open | ||
725 | 727 | ||
726 | 728 | ||
727 | nr_syscalls=(.-sys_call_table)/4 | 729 | nr_syscalls=(.-sys_call_table)/4 |
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index 8ccd4e155768..0ea0639fcf75 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h | |||
@@ -61,6 +61,8 @@ struct pt_regs; | |||
61 | extern unsigned long perf_misc_flags(struct pt_regs *regs); | 61 | extern unsigned long perf_misc_flags(struct pt_regs *regs); |
62 | extern unsigned long perf_instruction_pointer(struct pt_regs *regs); | 62 | extern unsigned long perf_instruction_pointer(struct pt_regs *regs); |
63 | 63 | ||
64 | #define PERF_COUNTER_INDEX_OFFSET 1 | ||
65 | |||
64 | /* | 66 | /* |
65 | * Only override the default definitions in include/linux/perf_counter.h | 67 | * Only override the default definitions in include/linux/perf_counter.h |
66 | * if we have hardware PMU support. | 68 | * if we have hardware PMU support. |
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index a27d0d5a6f86..1cd02f6073a0 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -99,7 +99,9 @@ struct kvm_s390_sie_block { | |||
99 | __u8 reservedd0[48]; /* 0x00d0 */ | 99 | __u8 reservedd0[48]; /* 0x00d0 */ |
100 | __u64 gcr[16]; /* 0x0100 */ | 100 | __u64 gcr[16]; /* 0x0100 */ |
101 | __u64 gbea; /* 0x0180 */ | 101 | __u64 gbea; /* 0x0180 */ |
102 | __u8 reserved188[120]; /* 0x0188 */ | 102 | __u8 reserved188[24]; /* 0x0188 */ |
103 | __u32 fac; /* 0x01a0 */ | ||
104 | __u8 reserved1a4[92]; /* 0x01a4 */ | ||
103 | } __attribute__((packed)); | 105 | } __attribute__((packed)); |
104 | 106 | ||
105 | struct kvm_vcpu_stat { | 107 | struct kvm_vcpu_stat { |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index c18b21d6991c..90d9d1ba258b 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <asm/lowcore.h> | 25 | #include <asm/lowcore.h> |
26 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
27 | #include <asm/nmi.h> | 27 | #include <asm/nmi.h> |
28 | #include <asm/system.h> | ||
28 | #include "kvm-s390.h" | 29 | #include "kvm-s390.h" |
29 | #include "gaccess.h" | 30 | #include "gaccess.h" |
30 | 31 | ||
@@ -69,6 +70,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
69 | { NULL } | 70 | { NULL } |
70 | }; | 71 | }; |
71 | 72 | ||
73 | static unsigned long long *facilities; | ||
72 | 74 | ||
73 | /* Section: not file related */ | 75 | /* Section: not file related */ |
74 | void kvm_arch_hardware_enable(void *garbage) | 76 | void kvm_arch_hardware_enable(void *garbage) |
@@ -288,6 +290,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
288 | vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin; | 290 | vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin; |
289 | vcpu->arch.sie_block->ecb = 2; | 291 | vcpu->arch.sie_block->ecb = 2; |
290 | vcpu->arch.sie_block->eca = 0xC1002001U; | 292 | vcpu->arch.sie_block->eca = 0xC1002001U; |
293 | vcpu->arch.sie_block->fac = (int) (long) facilities; | ||
291 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); | 294 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
292 | tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, | 295 | tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, |
293 | (unsigned long) vcpu); | 296 | (unsigned long) vcpu); |
@@ -739,11 +742,29 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | |||
739 | 742 | ||
740 | static int __init kvm_s390_init(void) | 743 | static int __init kvm_s390_init(void) |
741 | { | 744 | { |
742 | return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE); | 745 | int ret; |
746 | ret = kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE); | ||
747 | if (ret) | ||
748 | return ret; | ||
749 | |||
750 | /* | ||
751 | * guests can ask for up to 255+1 double words, we need a full page | ||
752 | * to hold the maximum amount of facilites. On the other hand, we | ||
753 | * only set facilities that are known to work in KVM. | ||
754 | */ | ||
755 | facilities = (unsigned long long *) get_zeroed_page(GFP_DMA); | ||
756 | if (!facilities) { | ||
757 | kvm_exit(); | ||
758 | return -ENOMEM; | ||
759 | } | ||
760 | stfle(facilities, 1); | ||
761 | facilities[0] &= 0xff00fff3f0700000ULL; | ||
762 | return 0; | ||
743 | } | 763 | } |
744 | 764 | ||
745 | static void __exit kvm_s390_exit(void) | 765 | static void __exit kvm_s390_exit(void) |
746 | { | 766 | { |
767 | free_page((unsigned long) facilities); | ||
747 | kvm_exit(); | 768 | kvm_exit(); |
748 | } | 769 | } |
749 | 770 | ||
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 93ecd06e1a74..d426aac8095d 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -158,7 +158,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu) | |||
158 | 158 | ||
159 | vcpu->stat.instruction_stfl++; | 159 | vcpu->stat.instruction_stfl++; |
160 | /* only pass the facility bits, which we can handle */ | 160 | /* only pass the facility bits, which we can handle */ |
161 | facility_list &= 0xfe00fff3; | 161 | facility_list &= 0xff00fff3; |
162 | 162 | ||
163 | rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), | 163 | rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), |
164 | &facility_list, sizeof(facility_list)); | 164 | &facility_list, sizeof(facility_list)); |
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index 8ece0b5bd028..39224b57c6ef 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug | |||
@@ -61,10 +61,6 @@ config EARLY_PRINTK | |||
61 | select both the EARLY_SCIF_CONSOLE and SH_STANDARD_BIOS, using | 61 | select both the EARLY_SCIF_CONSOLE and SH_STANDARD_BIOS, using |
62 | the kernel command line option to toggle back and forth. | 62 | the kernel command line option to toggle back and forth. |
63 | 63 | ||
64 | config DEBUG_BOOTMEM | ||
65 | depends on DEBUG_KERNEL | ||
66 | bool "Debug BOOTMEM initialization" | ||
67 | |||
68 | config DEBUG_STACKOVERFLOW | 64 | config DEBUG_STACKOVERFLOW |
69 | bool "Check for stack overflows" | 65 | bool "Check for stack overflows" |
70 | depends on DEBUG_KERNEL && SUPERH32 | 66 | depends on DEBUG_KERNEL && SUPERH32 |
diff --git a/arch/sh/boards/mach-se/7206/io.c b/arch/sh/boards/mach-se/7206/io.c index 9c3a33210d61..180455642a43 100644 --- a/arch/sh/boards/mach-se/7206/io.c +++ b/arch/sh/boards/mach-se/7206/io.c | |||
@@ -50,7 +50,7 @@ unsigned char se7206_inb_p(unsigned long port) | |||
50 | 50 | ||
51 | unsigned short se7206_inw(unsigned long port) | 51 | unsigned short se7206_inw(unsigned long port) |
52 | { | 52 | { |
53 | return *port2adr(port);; | 53 | return *port2adr(port); |
54 | } | 54 | } |
55 | 55 | ||
56 | void se7206_outb(unsigned char value, unsigned long port) | 56 | void se7206_outb(unsigned char value, unsigned long port) |
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index 9cd04bd558b8..c050a8d76dfd 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c | |||
@@ -23,6 +23,8 @@ | |||
23 | #include <media/sh_mobile_ceu.h> | 23 | #include <media/sh_mobile_ceu.h> |
24 | #include <asm/io.h> | 24 | #include <asm/io.h> |
25 | #include <asm/heartbeat.h> | 25 | #include <asm/heartbeat.h> |
26 | #include <asm/sh_eth.h> | ||
27 | #include <asm/clock.h> | ||
26 | #include <asm/sh_keysc.h> | 28 | #include <asm/sh_keysc.h> |
27 | #include <cpu/sh7724.h> | 29 | #include <cpu/sh7724.h> |
28 | #include <mach-se/mach/se7724.h> | 30 | #include <mach-se/mach/se7724.h> |
@@ -272,6 +274,34 @@ static struct platform_device keysc_device = { | |||
272 | }, | 274 | }, |
273 | }; | 275 | }; |
274 | 276 | ||
277 | /* SH Eth */ | ||
278 | static struct resource sh_eth_resources[] = { | ||
279 | [0] = { | ||
280 | .start = SH_ETH_ADDR, | ||
281 | .end = SH_ETH_ADDR + 0x1FC, | ||
282 | .flags = IORESOURCE_MEM, | ||
283 | }, | ||
284 | [1] = { | ||
285 | .start = 91, | ||
286 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, | ||
287 | }, | ||
288 | }; | ||
289 | |||
290 | struct sh_eth_plat_data sh_eth_plat = { | ||
291 | .phy = 0x1f, /* SMSC LAN8187 */ | ||
292 | .edmac_endian = EDMAC_LITTLE_ENDIAN, | ||
293 | }; | ||
294 | |||
295 | static struct platform_device sh_eth_device = { | ||
296 | .name = "sh-eth", | ||
297 | .id = 0, | ||
298 | .dev = { | ||
299 | .platform_data = &sh_eth_plat, | ||
300 | }, | ||
301 | .num_resources = ARRAY_SIZE(sh_eth_resources), | ||
302 | .resource = sh_eth_resources, | ||
303 | }; | ||
304 | |||
275 | static struct platform_device *ms7724se_devices[] __initdata = { | 305 | static struct platform_device *ms7724se_devices[] __initdata = { |
276 | &heartbeat_device, | 306 | &heartbeat_device, |
277 | &smc91x_eth_device, | 307 | &smc91x_eth_device, |
@@ -280,8 +310,57 @@ static struct platform_device *ms7724se_devices[] __initdata = { | |||
280 | &ceu0_device, | 310 | &ceu0_device, |
281 | &ceu1_device, | 311 | &ceu1_device, |
282 | &keysc_device, | 312 | &keysc_device, |
313 | &sh_eth_device, | ||
283 | }; | 314 | }; |
284 | 315 | ||
316 | #define EEPROM_OP 0xBA206000 | ||
317 | #define EEPROM_ADR 0xBA206004 | ||
318 | #define EEPROM_DATA 0xBA20600C | ||
319 | #define EEPROM_STAT 0xBA206010 | ||
320 | #define EEPROM_STRT 0xBA206014 | ||
321 | static int __init sh_eth_is_eeprom_ready(void) | ||
322 | { | ||
323 | int t = 10000; | ||
324 | |||
325 | while (t--) { | ||
326 | if (!ctrl_inw(EEPROM_STAT)) | ||
327 | return 1; | ||
328 | cpu_relax(); | ||
329 | } | ||
330 | |||
331 | printk(KERN_ERR "ms7724se can not access to eeprom\n"); | ||
332 | return 0; | ||
333 | } | ||
334 | |||
335 | static void __init sh_eth_init(void) | ||
336 | { | ||
337 | int i; | ||
338 | u16 mac[3]; | ||
339 | |||
340 | /* check EEPROM status */ | ||
341 | if (!sh_eth_is_eeprom_ready()) | ||
342 | return; | ||
343 | |||
344 | /* read MAC addr from EEPROM */ | ||
345 | for (i = 0 ; i < 3 ; i++) { | ||
346 | ctrl_outw(0x0, EEPROM_OP); /* read */ | ||
347 | ctrl_outw(i*2, EEPROM_ADR); | ||
348 | ctrl_outw(0x1, EEPROM_STRT); | ||
349 | if (!sh_eth_is_eeprom_ready()) | ||
350 | return; | ||
351 | |||
352 | mac[i] = ctrl_inw(EEPROM_DATA); | ||
353 | mac[i] = ((mac[i] & 0xFF) << 8) | (mac[i] >> 8); /* swap */ | ||
354 | } | ||
355 | |||
356 | /* reset sh-eth */ | ||
357 | ctrl_outl(0x1, SH_ETH_ADDR + 0x0); | ||
358 | |||
359 | /* set MAC addr */ | ||
360 | ctrl_outl(((mac[0] << 16) | (mac[1])), SH_ETH_MAHR); | ||
361 | ctrl_outl((mac[2]), SH_ETH_MALR); | ||
362 | } | ||
363 | |||
285 | #define SW4140 0xBA201000 | 364 | #define SW4140 0xBA201000 |
286 | #define FPGA_OUT 0xBA200400 | 365 | #define FPGA_OUT 0xBA200400 |
287 | #define PORT_HIZA 0xA4050158 | 366 | #define PORT_HIZA 0xA4050158 |
@@ -302,7 +381,8 @@ static int __init devices_setup(void) | |||
302 | ctrl_outw(ctrl_inw(FPGA_OUT) & | 381 | ctrl_outw(ctrl_inw(FPGA_OUT) & |
303 | ~((1 << 1) | /* LAN */ | 382 | ~((1 << 1) | /* LAN */ |
304 | (1 << 6) | /* VIDEO DAC */ | 383 | (1 << 6) | /* VIDEO DAC */ |
305 | (1 << 12)), /* USB0 */ | 384 | (1 << 12) | /* USB0 */ |
385 | (1 << 14)), /* RMII */ | ||
306 | FPGA_OUT); | 386 | FPGA_OUT); |
307 | 387 | ||
308 | /* enable IRQ 0,1,2 */ | 388 | /* enable IRQ 0,1,2 */ |
@@ -374,7 +454,7 @@ static int __init devices_setup(void) | |||
374 | gpio_request(GPIO_FN_VIO0_CLK, NULL); | 454 | gpio_request(GPIO_FN_VIO0_CLK, NULL); |
375 | gpio_request(GPIO_FN_VIO0_FLD, NULL); | 455 | gpio_request(GPIO_FN_VIO0_FLD, NULL); |
376 | gpio_request(GPIO_FN_VIO0_HD, NULL); | 456 | gpio_request(GPIO_FN_VIO0_HD, NULL); |
377 | platform_resource_setup_memory(&ceu0_device, "ceu", 4 << 20); | 457 | platform_resource_setup_memory(&ceu0_device, "ceu0", 4 << 20); |
378 | 458 | ||
379 | /* enable CEU1 */ | 459 | /* enable CEU1 */ |
380 | gpio_request(GPIO_FN_VIO1_D7, NULL); | 460 | gpio_request(GPIO_FN_VIO1_D7, NULL); |
@@ -389,7 +469,7 @@ static int __init devices_setup(void) | |||
389 | gpio_request(GPIO_FN_VIO1_HD, NULL); | 469 | gpio_request(GPIO_FN_VIO1_HD, NULL); |
390 | gpio_request(GPIO_FN_VIO1_VD, NULL); | 470 | gpio_request(GPIO_FN_VIO1_VD, NULL); |
391 | gpio_request(GPIO_FN_VIO1_CLK, NULL); | 471 | gpio_request(GPIO_FN_VIO1_CLK, NULL); |
392 | platform_resource_setup_memory(&ceu1_device, "ceu", 4 << 20); | 472 | platform_resource_setup_memory(&ceu1_device, "ceu1", 4 << 20); |
393 | 473 | ||
394 | /* KEYSC */ | 474 | /* KEYSC */ |
395 | gpio_request(GPIO_FN_KEYOUT5_IN5, NULL); | 475 | gpio_request(GPIO_FN_KEYOUT5_IN5, NULL); |
@@ -404,6 +484,28 @@ static int __init devices_setup(void) | |||
404 | gpio_request(GPIO_FN_KEYOUT1, NULL); | 484 | gpio_request(GPIO_FN_KEYOUT1, NULL); |
405 | gpio_request(GPIO_FN_KEYOUT0, NULL); | 485 | gpio_request(GPIO_FN_KEYOUT0, NULL); |
406 | 486 | ||
487 | /* | ||
488 | * enable SH-Eth | ||
489 | * | ||
490 | * please remove J33 pin from your board !! | ||
491 | * | ||
492 | * ms7724 board should not use GPIO_FN_LNKSTA pin | ||
493 | * So, This time PTX5 is set to input pin | ||
494 | */ | ||
495 | gpio_request(GPIO_FN_RMII_RXD0, NULL); | ||
496 | gpio_request(GPIO_FN_RMII_RXD1, NULL); | ||
497 | gpio_request(GPIO_FN_RMII_TXD0, NULL); | ||
498 | gpio_request(GPIO_FN_RMII_TXD1, NULL); | ||
499 | gpio_request(GPIO_FN_RMII_REF_CLK, NULL); | ||
500 | gpio_request(GPIO_FN_RMII_TX_EN, NULL); | ||
501 | gpio_request(GPIO_FN_RMII_RX_ER, NULL); | ||
502 | gpio_request(GPIO_FN_RMII_CRS_DV, NULL); | ||
503 | gpio_request(GPIO_FN_MDIO, NULL); | ||
504 | gpio_request(GPIO_FN_MDC, NULL); | ||
505 | gpio_request(GPIO_PTX5, NULL); | ||
506 | gpio_direction_input(GPIO_PTX5); | ||
507 | sh_eth_init(); | ||
508 | |||
407 | if (sw & SW41_B) { | 509 | if (sw & SW41_B) { |
408 | /* SVGA */ | 510 | /* SVGA */ |
409 | lcdc_info.ch[0].lcd_cfg.xres = 800; | 511 | lcdc_info.ch[0].lcd_cfg.xres = 800; |
@@ -437,7 +539,7 @@ static int __init devices_setup(void) | |||
437 | } | 539 | } |
438 | 540 | ||
439 | return platform_add_devices(ms7724se_devices, | 541 | return platform_add_devices(ms7724se_devices, |
440 | ARRAY_SIZE(ms7724se_devices)); | 542 | ARRAY_SIZE(ms7724se_devices)); |
441 | } | 543 | } |
442 | device_initcall(devices_setup); | 544 | device_initcall(devices_setup); |
443 | 545 | ||
diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig index da627d22c009..b18cfd39cac6 100644 --- a/arch/sh/configs/migor_defconfig +++ b/arch/sh/configs/migor_defconfig | |||
@@ -309,7 +309,7 @@ CONFIG_ZERO_PAGE_OFFSET=0x00001000 | |||
309 | CONFIG_BOOT_LINK_OFFSET=0x00800000 | 309 | CONFIG_BOOT_LINK_OFFSET=0x00800000 |
310 | CONFIG_ENTRY_OFFSET=0x00001000 | 310 | CONFIG_ENTRY_OFFSET=0x00001000 |
311 | CONFIG_CMDLINE_BOOL=y | 311 | CONFIG_CMDLINE_BOOL=y |
312 | CONFIG_CMDLINE="console=ttySC0,115200 earlyprintk=serial ip=on root=/dev/nfs ip=dhcp" | 312 | CONFIG_CMDLINE="console=tty0 console=ttySC0,115200 earlyprintk=serial ip=on root=/dev/nfs ip=dhcp" |
313 | 313 | ||
314 | # | 314 | # |
315 | # Bus options | 315 | # Bus options |
@@ -858,7 +858,35 @@ CONFIG_VIDEO_SH_MOBILE_CEU=y | |||
858 | # | 858 | # |
859 | # CONFIG_VGASTATE is not set | 859 | # CONFIG_VGASTATE is not set |
860 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set | 860 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set |
861 | # CONFIG_FB is not set | 861 | CONFIG_FB=y |
862 | # CONFIG_FIRMWARE_EDID is not set | ||
863 | # CONFIG_FB_DDC is not set | ||
864 | # CONFIG_FB_BOOT_VESA_SUPPORT is not set | ||
865 | # CONFIG_FB_CFB_FILLRECT is not set | ||
866 | # CONFIG_FB_CFB_COPYAREA is not set | ||
867 | # CONFIG_FB_CFB_IMAGEBLIT is not set | ||
868 | # CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set | ||
869 | CONFIG_FB_SYS_FILLRECT=y | ||
870 | CONFIG_FB_SYS_COPYAREA=y | ||
871 | CONFIG_FB_SYS_IMAGEBLIT=y | ||
872 | # CONFIG_FB_FOREIGN_ENDIAN is not set | ||
873 | CONFIG_FB_SYS_FOPS=y | ||
874 | CONFIG_FB_DEFERRED_IO=y | ||
875 | # CONFIG_FB_SVGALIB is not set | ||
876 | # CONFIG_FB_MACMODES is not set | ||
877 | # CONFIG_FB_BACKLIGHT is not set | ||
878 | # CONFIG_FB_MODE_HELPERS is not set | ||
879 | # CONFIG_FB_TILEBLITTING is not set | ||
880 | |||
881 | # | ||
882 | # Frame buffer hardware drivers | ||
883 | # | ||
884 | # CONFIG_FB_S1D13XXX is not set | ||
885 | CONFIG_FB_SH_MOBILE_LCDC=y | ||
886 | # CONFIG_FB_VIRTUAL is not set | ||
887 | # CONFIG_FB_METRONOME is not set | ||
888 | # CONFIG_FB_MB862XX is not set | ||
889 | # CONFIG_FB_BROADSHEET is not set | ||
862 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set | 890 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set |
863 | 891 | ||
864 | # | 892 | # |
@@ -870,6 +898,27 @@ CONFIG_VIDEO_SH_MOBILE_CEU=y | |||
870 | # Console display driver support | 898 | # Console display driver support |
871 | # | 899 | # |
872 | CONFIG_DUMMY_CONSOLE=y | 900 | CONFIG_DUMMY_CONSOLE=y |
901 | CONFIG_FRAMEBUFFER_CONSOLE=y | ||
902 | CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y | ||
903 | # CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set | ||
904 | CONFIG_FONTS=y | ||
905 | # CONFIG_FONT_8x8 is not set | ||
906 | # CONFIG_FONT_8x16 is not set | ||
907 | # CONFIG_FONT_6x11 is not set | ||
908 | # CONFIG_FONT_7x14 is not set | ||
909 | # CONFIG_FONT_PEARL_8x8 is not set | ||
910 | # CONFIG_FONT_ACORN_8x8 is not set | ||
911 | CONFIG_FONT_MINI_4x6=y | ||
912 | # CONFIG_FONT_SUN8x16 is not set | ||
913 | # CONFIG_FONT_SUN12x22 is not set | ||
914 | # CONFIG_FONT_10x18 is not set | ||
915 | CONFIG_LOGO=y | ||
916 | # CONFIG_LOGO_LINUX_MONO is not set | ||
917 | # CONFIG_LOGO_LINUX_VGA16 is not set | ||
918 | # CONFIG_LOGO_LINUX_CLUT224 is not set | ||
919 | # CONFIG_LOGO_SUPERH_MONO is not set | ||
920 | CONFIG_LOGO_SUPERH_VGA16=y | ||
921 | # CONFIG_LOGO_SUPERH_CLUT224 is not set | ||
873 | # CONFIG_SOUND is not set | 922 | # CONFIG_SOUND is not set |
874 | CONFIG_HID_SUPPORT=y | 923 | CONFIG_HID_SUPPORT=y |
875 | CONFIG_HID=y | 924 | CONFIG_HID=y |
diff --git a/arch/sh/configs/se7724_defconfig b/arch/sh/configs/se7724_defconfig index 3840270283e4..3ee783a0a075 100644 --- a/arch/sh/configs/se7724_defconfig +++ b/arch/sh/configs/se7724_defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.30 | 3 | # Linux kernel version: 2.6.30 |
4 | # Thu Jun 18 16:09:05 2009 | 4 | # Mon Jun 29 16:28:43 2009 |
5 | # | 5 | # |
6 | CONFIG_SUPERH=y | 6 | CONFIG_SUPERH=y |
7 | CONFIG_SUPERH32=y | 7 | CONFIG_SUPERH32=y |
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y | |||
14 | CONFIG_GENERIC_HARDIRQS=y | 14 | CONFIG_GENERIC_HARDIRQS=y |
15 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y | 15 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y |
16 | CONFIG_GENERIC_IRQ_PROBE=y | 16 | CONFIG_GENERIC_IRQ_PROBE=y |
17 | CONFIG_IRQ_PER_CPU=y | ||
17 | CONFIG_GENERIC_GPIO=y | 18 | CONFIG_GENERIC_GPIO=y |
18 | CONFIG_GENERIC_TIME=y | 19 | CONFIG_GENERIC_TIME=y |
19 | CONFIG_GENERIC_CLOCKEVENTS=y | 20 | CONFIG_GENERIC_CLOCKEVENTS=y |
@@ -28,7 +29,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y | |||
28 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set | 29 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set |
29 | CONFIG_ARCH_NO_VIRT_TO_BUS=y | 30 | CONFIG_ARCH_NO_VIRT_TO_BUS=y |
30 | CONFIG_ARCH_HAS_DEFAULT_IDLE=y | 31 | CONFIG_ARCH_HAS_DEFAULT_IDLE=y |
32 | CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y | ||
31 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | 33 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" |
34 | CONFIG_CONSTRUCTORS=y | ||
32 | 35 | ||
33 | # | 36 | # |
34 | # General setup | 37 | # General setup |
@@ -88,10 +91,12 @@ CONFIG_TIMERFD=y | |||
88 | CONFIG_EVENTFD=y | 91 | CONFIG_EVENTFD=y |
89 | CONFIG_SHMEM=y | 92 | CONFIG_SHMEM=y |
90 | CONFIG_AIO=y | 93 | CONFIG_AIO=y |
94 | CONFIG_HAVE_PERF_COUNTERS=y | ||
91 | 95 | ||
92 | # | 96 | # |
93 | # Performance Counters | 97 | # Performance Counters |
94 | # | 98 | # |
99 | # CONFIG_PERF_COUNTERS is not set | ||
95 | CONFIG_VM_EVENT_COUNTERS=y | 100 | CONFIG_VM_EVENT_COUNTERS=y |
96 | # CONFIG_STRIP_ASM_SYMS is not set | 101 | # CONFIG_STRIP_ASM_SYMS is not set |
97 | CONFIG_COMPAT_BRK=y | 102 | CONFIG_COMPAT_BRK=y |
@@ -107,6 +112,10 @@ CONFIG_HAVE_KRETPROBES=y | |||
107 | CONFIG_HAVE_ARCH_TRACEHOOK=y | 112 | CONFIG_HAVE_ARCH_TRACEHOOK=y |
108 | CONFIG_HAVE_CLK=y | 113 | CONFIG_HAVE_CLK=y |
109 | CONFIG_HAVE_DMA_API_DEBUG=y | 114 | CONFIG_HAVE_DMA_API_DEBUG=y |
115 | |||
116 | # | ||
117 | # GCOV-based kernel profiling | ||
118 | # | ||
110 | # CONFIG_SLOW_WORK is not set | 119 | # CONFIG_SLOW_WORK is not set |
111 | CONFIG_HAVE_GENERIC_DMA_COHERENT=y | 120 | CONFIG_HAVE_GENERIC_DMA_COHERENT=y |
112 | CONFIG_SLABINFO=y | 121 | CONFIG_SLABINFO=y |
@@ -119,7 +128,7 @@ CONFIG_MODULE_UNLOAD=y | |||
119 | # CONFIG_MODVERSIONS is not set | 128 | # CONFIG_MODVERSIONS is not set |
120 | # CONFIG_MODULE_SRCVERSION_ALL is not set | 129 | # CONFIG_MODULE_SRCVERSION_ALL is not set |
121 | CONFIG_BLOCK=y | 130 | CONFIG_BLOCK=y |
122 | # CONFIG_LBD is not set | 131 | CONFIG_LBDAF=y |
123 | # CONFIG_BLK_DEV_BSG is not set | 132 | # CONFIG_BLK_DEV_BSG is not set |
124 | # CONFIG_BLK_DEV_INTEGRITY is not set | 133 | # CONFIG_BLK_DEV_INTEGRITY is not set |
125 | 134 | ||
@@ -584,7 +593,6 @@ CONFIG_SCSI_WAIT_SCAN=m | |||
584 | # CONFIG_SCSI_SRP_ATTRS is not set | 593 | # CONFIG_SCSI_SRP_ATTRS is not set |
585 | CONFIG_SCSI_LOWLEVEL=y | 594 | CONFIG_SCSI_LOWLEVEL=y |
586 | # CONFIG_ISCSI_TCP is not set | 595 | # CONFIG_ISCSI_TCP is not set |
587 | # CONFIG_SCSI_BNX2_ISCSI is not set | ||
588 | # CONFIG_LIBFC is not set | 596 | # CONFIG_LIBFC is not set |
589 | # CONFIG_LIBFCOE is not set | 597 | # CONFIG_LIBFCOE is not set |
590 | # CONFIG_SCSI_DEBUG is not set | 598 | # CONFIG_SCSI_DEBUG is not set |
@@ -624,7 +632,7 @@ CONFIG_NET_ETHERNET=y | |||
624 | CONFIG_MII=y | 632 | CONFIG_MII=y |
625 | # CONFIG_AX88796 is not set | 633 | # CONFIG_AX88796 is not set |
626 | # CONFIG_STNIC is not set | 634 | # CONFIG_STNIC is not set |
627 | # CONFIG_SH_ETH is not set | 635 | CONFIG_SH_ETH=y |
628 | CONFIG_SMC91X=y | 636 | CONFIG_SMC91X=y |
629 | # CONFIG_ENC28J60 is not set | 637 | # CONFIG_ENC28J60 is not set |
630 | # CONFIG_ETHOC is not set | 638 | # CONFIG_ETHOC is not set |
@@ -801,6 +809,11 @@ CONFIG_SPI_BITBANG=y | |||
801 | # | 809 | # |
802 | # CONFIG_SPI_SPIDEV is not set | 810 | # CONFIG_SPI_SPIDEV is not set |
803 | # CONFIG_SPI_TLE62X0 is not set | 811 | # CONFIG_SPI_TLE62X0 is not set |
812 | |||
813 | # | ||
814 | # PPS support | ||
815 | # | ||
816 | # CONFIG_PPS is not set | ||
804 | CONFIG_ARCH_REQUIRE_GPIOLIB=y | 817 | CONFIG_ARCH_REQUIRE_GPIOLIB=y |
805 | CONFIG_GPIOLIB=y | 818 | CONFIG_GPIOLIB=y |
806 | # CONFIG_GPIO_SYSFS is not set | 819 | # CONFIG_GPIO_SYSFS is not set |
@@ -851,6 +864,8 @@ CONFIG_SSB_POSSIBLE=y | |||
851 | # CONFIG_MFD_WM8400 is not set | 864 | # CONFIG_MFD_WM8400 is not set |
852 | # CONFIG_MFD_WM8350_I2C is not set | 865 | # CONFIG_MFD_WM8350_I2C is not set |
853 | # CONFIG_MFD_PCF50633 is not set | 866 | # CONFIG_MFD_PCF50633 is not set |
867 | # CONFIG_AB3100_CORE is not set | ||
868 | # CONFIG_EZX_PCAP is not set | ||
854 | # CONFIG_REGULATOR is not set | 869 | # CONFIG_REGULATOR is not set |
855 | CONFIG_MEDIA_SUPPORT=y | 870 | CONFIG_MEDIA_SUPPORT=y |
856 | 871 | ||
@@ -1196,6 +1211,7 @@ CONFIG_RTC_DRV_PCF8563=y | |||
1196 | # CONFIG_RTC_DRV_S35390A is not set | 1211 | # CONFIG_RTC_DRV_S35390A is not set |
1197 | # CONFIG_RTC_DRV_FM3130 is not set | 1212 | # CONFIG_RTC_DRV_FM3130 is not set |
1198 | # CONFIG_RTC_DRV_RX8581 is not set | 1213 | # CONFIG_RTC_DRV_RX8581 is not set |
1214 | # CONFIG_RTC_DRV_RX8025 is not set | ||
1199 | 1215 | ||
1200 | # | 1216 | # |
1201 | # SPI RTC drivers | 1217 | # SPI RTC drivers |
@@ -1260,6 +1276,7 @@ CONFIG_FS_MBCACHE=y | |||
1260 | # CONFIG_JFS_FS is not set | 1276 | # CONFIG_JFS_FS is not set |
1261 | CONFIG_FS_POSIX_ACL=y | 1277 | CONFIG_FS_POSIX_ACL=y |
1262 | # CONFIG_XFS_FS is not set | 1278 | # CONFIG_XFS_FS is not set |
1279 | # CONFIG_GFS2_FS is not set | ||
1263 | # CONFIG_OCFS2_FS is not set | 1280 | # CONFIG_OCFS2_FS is not set |
1264 | # CONFIG_BTRFS_FS is not set | 1281 | # CONFIG_BTRFS_FS is not set |
1265 | CONFIG_FILE_LOCKING=y | 1282 | CONFIG_FILE_LOCKING=y |
diff --git a/arch/sh/include/asm/perf_counter.h b/arch/sh/include/asm/perf_counter.h index a8153c2aa6fa..61c2b40c802c 100644 --- a/arch/sh/include/asm/perf_counter.h +++ b/arch/sh/include/asm/perf_counter.h | |||
@@ -2,6 +2,6 @@ | |||
2 | #define __ASM_SH_PERF_COUNTER_H | 2 | #define __ASM_SH_PERF_COUNTER_H |
3 | 3 | ||
4 | /* SH only supports software counters through this interface. */ | 4 | /* SH only supports software counters through this interface. */ |
5 | #define set_perf_counter_pending() do { } while (0) | 5 | static inline void set_perf_counter_pending(void) {} |
6 | 6 | ||
7 | #endif /* __ASM_SH_PERF_COUNTER_H */ | 7 | #endif /* __ASM_SH_PERF_COUNTER_H */ |
diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h index 5bc34681d994..6f83f2cc45c1 100644 --- a/arch/sh/include/asm/syscall_32.h +++ b/arch/sh/include/asm/syscall_32.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
5 | #include <linux/sched.h> | 5 | #include <linux/sched.h> |
6 | #include <linux/err.h> | ||
6 | #include <asm/ptrace.h> | 7 | #include <asm/ptrace.h> |
7 | 8 | ||
8 | /* The system call number is given by the user in R3 */ | 9 | /* The system call number is given by the user in R3 */ |
diff --git a/arch/sh/include/mach-se/mach/se7724.h b/arch/sh/include/mach-se/mach/se7724.h index 74164b60d0db..29514a39d0f5 100644 --- a/arch/sh/include/mach-se/mach/se7724.h +++ b/arch/sh/include/mach-se/mach/se7724.h | |||
@@ -20,6 +20,11 @@ | |||
20 | */ | 20 | */ |
21 | #include <asm/addrspace.h> | 21 | #include <asm/addrspace.h> |
22 | 22 | ||
23 | /* SH Eth */ | ||
24 | #define SH_ETH_ADDR (0xA4600000) | ||
25 | #define SH_ETH_MAHR (SH_ETH_ADDR + 0x1C0) | ||
26 | #define SH_ETH_MALR (SH_ETH_ADDR + 0x1C8) | ||
27 | |||
23 | #define PA_LED (0xba203000) /* 8bit LED */ | 28 | #define PA_LED (0xba203000) /* 8bit LED */ |
24 | #define IRQ_MODE (0xba200010) | 29 | #define IRQ_MODE (0xba200010) |
25 | #define IRQ0_SR (0xba200014) | 30 | #define IRQ0_SR (0xba200014) |
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index cc8ddbdf3d7a..71925946f1e1 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c | |||
@@ -15,12 +15,28 @@ | |||
15 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
16 | #include <linux/hardirq.h> | 16 | #include <linux/hardirq.h> |
17 | #include <linux/kprobes.h> | 17 | #include <linux/kprobes.h> |
18 | #include <linux/marker.h> | 18 | #include <linux/perf_counter.h> |
19 | #include <asm/io_trapped.h> | 19 | #include <asm/io_trapped.h> |
20 | #include <asm/system.h> | 20 | #include <asm/system.h> |
21 | #include <asm/mmu_context.h> | 21 | #include <asm/mmu_context.h> |
22 | #include <asm/tlbflush.h> | 22 | #include <asm/tlbflush.h> |
23 | 23 | ||
24 | static inline int notify_page_fault(struct pt_regs *regs, int trap) | ||
25 | { | ||
26 | int ret = 0; | ||
27 | |||
28 | #ifdef CONFIG_KPROBES | ||
29 | if (!user_mode(regs)) { | ||
30 | preempt_disable(); | ||
31 | if (kprobe_running() && kprobe_fault_handler(regs, trap)) | ||
32 | ret = 1; | ||
33 | preempt_enable(); | ||
34 | } | ||
35 | #endif | ||
36 | |||
37 | return ret; | ||
38 | } | ||
39 | |||
24 | /* | 40 | /* |
25 | * This routine handles page faults. It determines the address, | 41 | * This routine handles page faults. It determines the address, |
26 | * and the problem, and then passes it off to one of the appropriate | 42 | * and the problem, and then passes it off to one of the appropriate |
@@ -87,13 +103,16 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | |||
87 | return; | 103 | return; |
88 | } | 104 | } |
89 | 105 | ||
106 | mm = tsk->mm; | ||
107 | |||
108 | if (unlikely(notify_page_fault(regs, lookup_exception_vector()))) | ||
109 | return; | ||
110 | |||
90 | /* Only enable interrupts if they were on before the fault */ | 111 | /* Only enable interrupts if they were on before the fault */ |
91 | if ((regs->sr & SR_IMASK) != SR_IMASK) { | 112 | if ((regs->sr & SR_IMASK) != SR_IMASK) |
92 | trace_hardirqs_on(); | ||
93 | local_irq_enable(); | 113 | local_irq_enable(); |
94 | } | ||
95 | 114 | ||
96 | mm = tsk->mm; | 115 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); |
97 | 116 | ||
98 | /* | 117 | /* |
99 | * If we're in an interrupt or have no user | 118 | * If we're in an interrupt or have no user |
@@ -141,10 +160,15 @@ survive: | |||
141 | goto do_sigbus; | 160 | goto do_sigbus; |
142 | BUG(); | 161 | BUG(); |
143 | } | 162 | } |
144 | if (fault & VM_FAULT_MAJOR) | 163 | if (fault & VM_FAULT_MAJOR) { |
145 | tsk->maj_flt++; | 164 | tsk->maj_flt++; |
146 | else | 165 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, |
166 | regs, address); | ||
167 | } else { | ||
147 | tsk->min_flt++; | 168 | tsk->min_flt++; |
169 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | ||
170 | regs, address); | ||
171 | } | ||
148 | 172 | ||
149 | up_read(&mm->mmap_sem); | 173 | up_read(&mm->mmap_sem); |
150 | return; | 174 | return; |
@@ -245,22 +269,6 @@ do_sigbus: | |||
245 | goto no_context; | 269 | goto no_context; |
246 | } | 270 | } |
247 | 271 | ||
248 | static inline int notify_page_fault(struct pt_regs *regs, int trap) | ||
249 | { | ||
250 | int ret = 0; | ||
251 | |||
252 | #ifdef CONFIG_KPROBES | ||
253 | if (!user_mode(regs)) { | ||
254 | preempt_disable(); | ||
255 | if (kprobe_running() && kprobe_fault_handler(regs, trap)) | ||
256 | ret = 1; | ||
257 | preempt_enable(); | ||
258 | } | ||
259 | #endif | ||
260 | |||
261 | return ret; | ||
262 | } | ||
263 | |||
264 | /* | 272 | /* |
265 | * Called with interrupts disabled. | 273 | * Called with interrupts disabled. |
266 | */ | 274 | */ |
@@ -273,12 +281,7 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, | |||
273 | pmd_t *pmd; | 281 | pmd_t *pmd; |
274 | pte_t *pte; | 282 | pte_t *pte; |
275 | pte_t entry; | 283 | pte_t entry; |
276 | int ret = 0; | 284 | int ret = 1; |
277 | |||
278 | if (notify_page_fault(regs, lookup_exception_vector())) | ||
279 | goto out; | ||
280 | |||
281 | ret = 1; | ||
282 | 285 | ||
283 | /* | 286 | /* |
284 | * We don't take page faults for P1, P2, and parts of P4, these | 287 | * We don't take page faults for P1, P2, and parts of P4, these |
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index fcbb6e135cef..3ce40ea34824 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2000, 2001 Paolo Alberelli | 4 | * Copyright (C) 2000, 2001 Paolo Alberelli |
5 | * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes) | 5 | * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes) |
6 | * Copyright (C) 2003 Paul Mundt | 6 | * Copyright (C) 2003 - 2009 Paul Mundt |
7 | * | 7 | * |
8 | * This file is subject to the terms and conditions of the GNU General Public | 8 | * This file is subject to the terms and conditions of the GNU General Public |
9 | * License. See the file "COPYING" in the main directory of this archive | 9 | * License. See the file "COPYING" in the main directory of this archive |
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/mman.h> | 20 | #include <linux/mman.h> |
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
23 | #include <linux/perf_counter.h> | ||
23 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
24 | #include <asm/system.h> | 25 | #include <asm/system.h> |
25 | #include <asm/io.h> | 26 | #include <asm/io.h> |
@@ -115,6 +116,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | |||
115 | /* Not an IO address, so reenable interrupts */ | 116 | /* Not an IO address, so reenable interrupts */ |
116 | local_irq_enable(); | 117 | local_irq_enable(); |
117 | 118 | ||
119 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | ||
120 | |||
118 | /* | 121 | /* |
119 | * If we're in an interrupt or have no user | 122 | * If we're in an interrupt or have no user |
120 | * context, we must not take the fault.. | 123 | * context, we must not take the fault.. |
@@ -195,10 +198,16 @@ survive: | |||
195 | goto do_sigbus; | 198 | goto do_sigbus; |
196 | BUG(); | 199 | BUG(); |
197 | } | 200 | } |
198 | if (fault & VM_FAULT_MAJOR) | 201 | |
202 | if (fault & VM_FAULT_MAJOR) { | ||
199 | tsk->maj_flt++; | 203 | tsk->maj_flt++; |
200 | else | 204 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, |
205 | regs, address); | ||
206 | } else { | ||
201 | tsk->min_flt++; | 207 | tsk->min_flt++; |
208 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | ||
209 | regs, address); | ||
210 | } | ||
202 | 211 | ||
203 | /* If we get here, the page fault has been handled. Do the TLB refill | 212 | /* If we get here, the page fault has been handled. Do the TLB refill |
204 | now from the newly-setup PTE, to avoid having to fault again right | 213 | now from the newly-setup PTE, to avoid having to fault again right |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d1430ef6b4f9..c07f72205909 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -1913,25 +1913,14 @@ config DMAR_DEFAULT_ON | |||
1913 | recommended you say N here while the DMAR code remains | 1913 | recommended you say N here while the DMAR code remains |
1914 | experimental. | 1914 | experimental. |
1915 | 1915 | ||
1916 | config DMAR_GFX_WA | ||
1917 | def_bool y | ||
1918 | prompt "Support for Graphics workaround" | ||
1919 | depends on DMAR | ||
1920 | ---help--- | ||
1921 | Current Graphics drivers tend to use physical address | ||
1922 | for DMA and avoid using DMA APIs. Setting this config | ||
1923 | option permits the IOMMU driver to set a unity map for | ||
1924 | all the OS-visible memory. Hence the driver can continue | ||
1925 | to use physical addresses for DMA. | ||
1926 | |||
1927 | config DMAR_FLOPPY_WA | 1916 | config DMAR_FLOPPY_WA |
1928 | def_bool y | 1917 | def_bool y |
1929 | depends on DMAR | 1918 | depends on DMAR |
1930 | ---help--- | 1919 | ---help--- |
1931 | Floppy disk drivers are know to bypass DMA API calls | 1920 | Floppy disk drivers are known to bypass DMA API calls |
1932 | thereby failing to work when IOMMU is enabled. This | 1921 | thereby failing to work when IOMMU is enabled. This |
1933 | workaround will setup a 1:1 mapping for the first | 1922 | workaround will setup a 1:1 mapping for the first |
1934 | 16M to make floppy (an ISA device) work. | 1923 | 16MiB to make floppy (an ISA device) work. |
1935 | 1924 | ||
1936 | config INTR_REMAP | 1925 | config INTR_REMAP |
1937 | bool "Support for Interrupt Remapping (EXPERIMENTAL)" | 1926 | bool "Support for Interrupt Remapping (EXPERIMENTAL)" |
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h index 418e632d4a80..7a1065958ba9 100644 --- a/arch/x86/include/asm/boot.h +++ b/arch/x86/include/asm/boot.h | |||
@@ -8,7 +8,7 @@ | |||
8 | 8 | ||
9 | #ifdef __KERNEL__ | 9 | #ifdef __KERNEL__ |
10 | 10 | ||
11 | #include <asm/page_types.h> | 11 | #include <asm/pgtable_types.h> |
12 | 12 | ||
13 | /* Physical address where kernel should be loaded. */ | 13 | /* Physical address where kernel should be loaded. */ |
14 | #define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ | 14 | #define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ |
@@ -16,10 +16,10 @@ | |||
16 | & ~(CONFIG_PHYSICAL_ALIGN - 1)) | 16 | & ~(CONFIG_PHYSICAL_ALIGN - 1)) |
17 | 17 | ||
18 | /* Minimum kernel alignment, as a power of two */ | 18 | /* Minimum kernel alignment, as a power of two */ |
19 | #ifdef CONFIG_x86_64 | 19 | #ifdef CONFIG_X86_64 |
20 | #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT | 20 | #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT |
21 | #else | 21 | #else |
22 | #define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT+1) | 22 | #define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_ORDER) |
23 | #endif | 23 | #endif |
24 | #define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2) | 24 | #define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2) |
25 | 25 | ||
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 927958d13c19..1ff685ca221c 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -91,7 +91,7 @@ extern void pci_iommu_alloc(void); | |||
91 | 91 | ||
92 | #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) | 92 | #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) |
93 | 93 | ||
94 | #if defined(CONFIG_X86_64) || defined(CONFIG_DMA_API_DEBUG) | 94 | #if defined(CONFIG_X86_64) || defined(CONFIG_DMAR) || defined(CONFIG_DMA_API_DEBUG) |
95 | 95 | ||
96 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | 96 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ |
97 | dma_addr_t ADDR_NAME; | 97 | dma_addr_t ADDR_NAME; |
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 02ecb30982a3..103f1ddb0d85 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -42,6 +42,7 @@ | |||
42 | 42 | ||
43 | #else /* ...!ASSEMBLY */ | 43 | #else /* ...!ASSEMBLY */ |
44 | 44 | ||
45 | #include <linux/kernel.h> | ||
45 | #include <linux/stringify.h> | 46 | #include <linux/stringify.h> |
46 | 47 | ||
47 | #ifdef CONFIG_SMP | 48 | #ifdef CONFIG_SMP |
@@ -155,6 +156,15 @@ do { \ | |||
155 | /* We can use this directly for local CPU (faster). */ | 156 | /* We can use this directly for local CPU (faster). */ |
156 | DECLARE_PER_CPU(unsigned long, this_cpu_off); | 157 | DECLARE_PER_CPU(unsigned long, this_cpu_off); |
157 | 158 | ||
159 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
160 | void *pcpu_lpage_remapped(void *kaddr); | ||
161 | #else | ||
162 | static inline void *pcpu_lpage_remapped(void *kaddr) | ||
163 | { | ||
164 | return NULL; | ||
165 | } | ||
166 | #endif | ||
167 | |||
158 | #endif /* !__ASSEMBLY__ */ | 168 | #endif /* !__ASSEMBLY__ */ |
159 | 169 | ||
160 | #ifdef CONFIG_SMP | 170 | #ifdef CONFIG_SMP |
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h index 5fb33e160ea0..fa64e401589d 100644 --- a/arch/x86/include/asm/perf_counter.h +++ b/arch/x86/include/asm/perf_counter.h | |||
@@ -87,6 +87,9 @@ union cpuid10_edx { | |||
87 | #ifdef CONFIG_PERF_COUNTERS | 87 | #ifdef CONFIG_PERF_COUNTERS |
88 | extern void init_hw_perf_counters(void); | 88 | extern void init_hw_perf_counters(void); |
89 | extern void perf_counters_lapic_init(void); | 89 | extern void perf_counters_lapic_init(void); |
90 | |||
91 | #define PERF_COUNTER_INDEX_OFFSET 0 | ||
92 | |||
90 | #else | 93 | #else |
91 | static inline void init_hw_perf_counters(void) { } | 94 | static inline void init_hw_perf_counters(void) { } |
92 | static inline void perf_counters_lapic_init(void) { } | 95 | static inline void perf_counters_lapic_init(void) { } |
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h index 49fb3ecf3bb3..621f56d73121 100644 --- a/arch/x86/include/asm/proto.h +++ b/arch/x86/include/asm/proto.h | |||
@@ -22,7 +22,14 @@ extern int reboot_force; | |||
22 | 22 | ||
23 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); | 23 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); |
24 | 24 | ||
25 | #define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1)) | 25 | /* |
26 | #define round_down(x, y) ((x) & ~((y) - 1)) | 26 | * This looks more complex than it should be. But we need to |
27 | * get the type for the ~ right in round_down (it needs to be | ||
28 | * as wide as the result!), and we want to evaluate the macro | ||
29 | * arguments just once each. | ||
30 | */ | ||
31 | #define __round_mask(x,y) ((__typeof__(x))((y)-1)) | ||
32 | #define round_up(x,y) ((((x)-1) | __round_mask(x,y))+1) | ||
33 | #define round_down(x,y) ((x) & ~__round_mask(x,y)) | ||
27 | 34 | ||
28 | #endif /* _ASM_X86_PROTO_H */ | 35 | #endif /* _ASM_X86_PROTO_H */ |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index e5b27d8f1b47..28e5f5956042 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -258,13 +258,15 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | |||
258 | { | 258 | { |
259 | #ifdef CONFIG_X86_HT | 259 | #ifdef CONFIG_X86_HT |
260 | unsigned bits; | 260 | unsigned bits; |
261 | int cpu = smp_processor_id(); | ||
261 | 262 | ||
262 | bits = c->x86_coreid_bits; | 263 | bits = c->x86_coreid_bits; |
263 | |||
264 | /* Low order bits define the core id (index of core in socket) */ | 264 | /* Low order bits define the core id (index of core in socket) */ |
265 | c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); | 265 | c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); |
266 | /* Convert the initial APIC ID into the socket ID */ | 266 | /* Convert the initial APIC ID into the socket ID */ |
267 | c->phys_proc_id = c->initial_apicid >> bits; | 267 | c->phys_proc_id = c->initial_apicid >> bits; |
268 | /* use socket ID also for last level cache */ | ||
269 | per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; | ||
268 | #endif | 270 | #endif |
269 | } | 271 | } |
270 | 272 | ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 6b26d4deada0..f1961c07af9a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -848,9 +848,6 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
848 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | 848 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) |
849 | numa_add_cpu(smp_processor_id()); | 849 | numa_add_cpu(smp_processor_id()); |
850 | #endif | 850 | #endif |
851 | |||
852 | /* Cap the iomem address space to what is addressable on all CPUs */ | ||
853 | iomem_resource.end &= (1ULL << c->x86_phys_bits) - 1; | ||
854 | } | 851 | } |
855 | 852 | ||
856 | #ifdef CONFIG_X86_64 | 853 | #ifdef CONFIG_X86_64 |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 284d1de968bc..af425b83202b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -1117,7 +1117,7 @@ static void mcheck_timer(unsigned long data) | |||
1117 | *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ)); | 1117 | *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ)); |
1118 | 1118 | ||
1119 | t->expires = jiffies + *n; | 1119 | t->expires = jiffies + *n; |
1120 | add_timer(t); | 1120 | add_timer_on(t, smp_processor_id()); |
1121 | } | 1121 | } |
1122 | 1122 | ||
1123 | static void mce_do_trigger(struct work_struct *work) | 1123 | static void mce_do_trigger(struct work_struct *work) |
@@ -1321,7 +1321,7 @@ static void mce_init_timer(void) | |||
1321 | return; | 1321 | return; |
1322 | setup_timer(t, mcheck_timer, smp_processor_id()); | 1322 | setup_timer(t, mcheck_timer, smp_processor_id()); |
1323 | t->expires = round_jiffies(jiffies + *n); | 1323 | t->expires = round_jiffies(jiffies + *n); |
1324 | add_timer(t); | 1324 | add_timer_on(t, smp_processor_id()); |
1325 | } | 1325 | } |
1326 | 1326 | ||
1327 | /* | 1327 | /* |
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 76dfef23f789..d4cf4ce19aac 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -401,7 +401,7 @@ static const u64 amd_hw_cache_event_ids | |||
401 | [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */ | 401 | [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */ |
402 | }, | 402 | }, |
403 | [ C(OP_WRITE) ] = { | 403 | [ C(OP_WRITE) ] = { |
404 | [ C(RESULT_ACCESS) ] = 0x0042, /* Data Cache Refills from L2 */ | 404 | [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */ |
405 | [ C(RESULT_MISS) ] = 0, | 405 | [ C(RESULT_MISS) ] = 0, |
406 | }, | 406 | }, |
407 | [ C(OP_PREFETCH) ] = { | 407 | [ C(OP_PREFETCH) ] = { |
@@ -912,6 +912,8 @@ x86_perf_counter_set_period(struct perf_counter *counter, | |||
912 | err = checking_wrmsrl(hwc->counter_base + idx, | 912 | err = checking_wrmsrl(hwc->counter_base + idx, |
913 | (u64)(-left) & x86_pmu.counter_mask); | 913 | (u64)(-left) & x86_pmu.counter_mask); |
914 | 914 | ||
915 | perf_counter_update_userpage(counter); | ||
916 | |||
915 | return ret; | 917 | return ret; |
916 | } | 918 | } |
917 | 919 | ||
@@ -969,13 +971,6 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) | |||
969 | if (!x86_pmu.num_counters_fixed) | 971 | if (!x86_pmu.num_counters_fixed) |
970 | return -1; | 972 | return -1; |
971 | 973 | ||
972 | /* | ||
973 | * Quirk, IA32_FIXED_CTRs do not work on current Atom processors: | ||
974 | */ | ||
975 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | ||
976 | boot_cpu_data.x86_model == 28) | ||
977 | return -1; | ||
978 | |||
979 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; | 974 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; |
980 | 975 | ||
981 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) | 976 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) |
@@ -1041,6 +1036,8 @@ try_generic: | |||
1041 | x86_perf_counter_set_period(counter, hwc, idx); | 1036 | x86_perf_counter_set_period(counter, hwc, idx); |
1042 | x86_pmu.enable(hwc, idx); | 1037 | x86_pmu.enable(hwc, idx); |
1043 | 1038 | ||
1039 | perf_counter_update_userpage(counter); | ||
1040 | |||
1044 | return 0; | 1041 | return 0; |
1045 | } | 1042 | } |
1046 | 1043 | ||
@@ -1133,6 +1130,8 @@ static void x86_pmu_disable(struct perf_counter *counter) | |||
1133 | x86_perf_counter_update(counter, hwc, idx); | 1130 | x86_perf_counter_update(counter, hwc, idx); |
1134 | cpuc->counters[idx] = NULL; | 1131 | cpuc->counters[idx] = NULL; |
1135 | clear_bit(idx, cpuc->used_mask); | 1132 | clear_bit(idx, cpuc->used_mask); |
1133 | |||
1134 | perf_counter_update_userpage(counter); | ||
1136 | } | 1135 | } |
1137 | 1136 | ||
1138 | /* | 1137 | /* |
@@ -1428,8 +1427,6 @@ static int intel_pmu_init(void) | |||
1428 | */ | 1427 | */ |
1429 | x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); | 1428 | x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); |
1430 | 1429 | ||
1431 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); | ||
1432 | |||
1433 | /* | 1430 | /* |
1434 | * Install the hw-cache-events table: | 1431 | * Install the hw-cache-events table: |
1435 | */ | 1432 | */ |
@@ -1499,21 +1496,22 @@ void __init init_hw_perf_counters(void) | |||
1499 | pr_cont("%s PMU driver.\n", x86_pmu.name); | 1496 | pr_cont("%s PMU driver.\n", x86_pmu.name); |
1500 | 1497 | ||
1501 | if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { | 1498 | if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { |
1502 | x86_pmu.num_counters = X86_PMC_MAX_GENERIC; | ||
1503 | WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", | 1499 | WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", |
1504 | x86_pmu.num_counters, X86_PMC_MAX_GENERIC); | 1500 | x86_pmu.num_counters, X86_PMC_MAX_GENERIC); |
1501 | x86_pmu.num_counters = X86_PMC_MAX_GENERIC; | ||
1505 | } | 1502 | } |
1506 | perf_counter_mask = (1 << x86_pmu.num_counters) - 1; | 1503 | perf_counter_mask = (1 << x86_pmu.num_counters) - 1; |
1507 | perf_max_counters = x86_pmu.num_counters; | 1504 | perf_max_counters = x86_pmu.num_counters; |
1508 | 1505 | ||
1509 | if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { | 1506 | if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { |
1510 | x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; | ||
1511 | WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", | 1507 | WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", |
1512 | x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); | 1508 | x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); |
1509 | x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; | ||
1513 | } | 1510 | } |
1514 | 1511 | ||
1515 | perf_counter_mask |= | 1512 | perf_counter_mask |= |
1516 | ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; | 1513 | ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; |
1514 | x86_pmu.intel_ctrl = perf_counter_mask; | ||
1517 | 1515 | ||
1518 | perf_counters_lapic_init(); | 1516 | perf_counters_lapic_init(); |
1519 | register_die_notifier(&perf_counter_nmi_notifier); | 1517 | register_die_notifier(&perf_counter_nmi_notifier); |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 95ea5fa7d444..c8405718a4c3 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include "dumpstack.h" | 22 | #include "dumpstack.h" |
23 | 23 | ||
24 | int panic_on_unrecovered_nmi; | 24 | int panic_on_unrecovered_nmi; |
25 | int panic_on_io_nmi; | ||
25 | unsigned int code_bytes = 64; | 26 | unsigned int code_bytes = 64; |
26 | int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE; | 27 | int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE; |
27 | static int die_counter; | 28 | static int die_counter; |
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 7271fa33d791..c4ca89d9aaf4 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -1383,6 +1383,8 @@ static unsigned long ram_alignment(resource_size_t pos) | |||
1383 | return 32*1024*1024; | 1383 | return 32*1024*1024; |
1384 | } | 1384 | } |
1385 | 1385 | ||
1386 | #define MAX_RESOURCE_SIZE ((resource_size_t)-1) | ||
1387 | |||
1386 | void __init e820_reserve_resources_late(void) | 1388 | void __init e820_reserve_resources_late(void) |
1387 | { | 1389 | { |
1388 | int i; | 1390 | int i; |
@@ -1400,17 +1402,19 @@ void __init e820_reserve_resources_late(void) | |||
1400 | * avoid stolen RAM: | 1402 | * avoid stolen RAM: |
1401 | */ | 1403 | */ |
1402 | for (i = 0; i < e820.nr_map; i++) { | 1404 | for (i = 0; i < e820.nr_map; i++) { |
1403 | struct e820entry *entry = &e820_saved.map[i]; | 1405 | struct e820entry *entry = &e820.map[i]; |
1404 | resource_size_t start, end; | 1406 | u64 start, end; |
1405 | 1407 | ||
1406 | if (entry->type != E820_RAM) | 1408 | if (entry->type != E820_RAM) |
1407 | continue; | 1409 | continue; |
1408 | start = entry->addr + entry->size; | 1410 | start = entry->addr + entry->size; |
1409 | end = round_up(start, ram_alignment(start)); | 1411 | end = round_up(start, ram_alignment(start)) - 1; |
1410 | if (start == end) | 1412 | if (end > MAX_RESOURCE_SIZE) |
1413 | end = MAX_RESOURCE_SIZE; | ||
1414 | if (start >= end) | ||
1411 | continue; | 1415 | continue; |
1412 | reserve_region_with_split(&iomem_resource, start, | 1416 | reserve_region_with_split(&iomem_resource, start, end, |
1413 | end - 1, "RAM buffer"); | 1417 | "RAM buffer"); |
1414 | } | 1418 | } |
1415 | } | 1419 | } |
1416 | 1420 | ||
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 47630479b067..1a041bcf506b 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -211,11 +211,11 @@ static __init int iommu_setup(char *p) | |||
211 | #ifdef CONFIG_SWIOTLB | 211 | #ifdef CONFIG_SWIOTLB |
212 | if (!strncmp(p, "soft", 4)) | 212 | if (!strncmp(p, "soft", 4)) |
213 | swiotlb = 1; | 213 | swiotlb = 1; |
214 | #endif | ||
214 | if (!strncmp(p, "pt", 2)) { | 215 | if (!strncmp(p, "pt", 2)) { |
215 | iommu_pass_through = 1; | 216 | iommu_pass_through = 1; |
216 | return 1; | 217 | return 1; |
217 | } | 218 | } |
218 | #endif | ||
219 | 219 | ||
220 | gart_parse_options(p); | 220 | gart_parse_options(p); |
221 | 221 | ||
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index be5ae80f897f..de2cab132844 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -289,6 +289,20 @@ void * __init extend_brk(size_t size, size_t align) | |||
289 | return ret; | 289 | return ret; |
290 | } | 290 | } |
291 | 291 | ||
292 | #ifdef CONFIG_X86_64 | ||
293 | static void __init init_gbpages(void) | ||
294 | { | ||
295 | if (direct_gbpages && cpu_has_gbpages) | ||
296 | printk(KERN_INFO "Using GB pages for direct mapping\n"); | ||
297 | else | ||
298 | direct_gbpages = 0; | ||
299 | } | ||
300 | #else | ||
301 | static inline void init_gbpages(void) | ||
302 | { | ||
303 | } | ||
304 | #endif | ||
305 | |||
292 | static void __init reserve_brk(void) | 306 | static void __init reserve_brk(void) |
293 | { | 307 | { |
294 | if (_brk_end > _brk_start) | 308 | if (_brk_end > _brk_start) |
@@ -871,6 +885,8 @@ void __init setup_arch(char **cmdline_p) | |||
871 | 885 | ||
872 | reserve_brk(); | 886 | reserve_brk(); |
873 | 887 | ||
888 | init_gbpages(); | ||
889 | |||
874 | /* max_pfn_mapped is updated here */ | 890 | /* max_pfn_mapped is updated here */ |
875 | max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT); | 891 | max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT); |
876 | max_pfn_mapped = max_low_pfn_mapped; | 892 | max_pfn_mapped = max_low_pfn_mapped; |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 9c3f0823e6aa..29a3eef7cf4a 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -124,7 +124,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, | |||
124 | } | 124 | } |
125 | 125 | ||
126 | /* | 126 | /* |
127 | * Remap allocator | 127 | * Large page remap allocator |
128 | * | 128 | * |
129 | * This allocator uses PMD page as unit. A PMD page is allocated for | 129 | * This allocator uses PMD page as unit. A PMD page is allocated for |
130 | * each cpu and each is remapped into vmalloc area using PMD mapping. | 130 | * each cpu and each is remapped into vmalloc area using PMD mapping. |
@@ -137,105 +137,185 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, | |||
137 | * better than only using 4k mappings while still being NUMA friendly. | 137 | * better than only using 4k mappings while still being NUMA friendly. |
138 | */ | 138 | */ |
139 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 139 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
140 | static size_t pcpur_size __initdata; | 140 | struct pcpul_ent { |
141 | static void **pcpur_ptrs __initdata; | 141 | unsigned int cpu; |
142 | void *ptr; | ||
143 | }; | ||
144 | |||
145 | static size_t pcpul_size; | ||
146 | static struct pcpul_ent *pcpul_map; | ||
147 | static struct vm_struct pcpul_vm; | ||
142 | 148 | ||
143 | static struct page * __init pcpur_get_page(unsigned int cpu, int pageno) | 149 | static struct page * __init pcpul_get_page(unsigned int cpu, int pageno) |
144 | { | 150 | { |
145 | size_t off = (size_t)pageno << PAGE_SHIFT; | 151 | size_t off = (size_t)pageno << PAGE_SHIFT; |
146 | 152 | ||
147 | if (off >= pcpur_size) | 153 | if (off >= pcpul_size) |
148 | return NULL; | 154 | return NULL; |
149 | 155 | ||
150 | return virt_to_page(pcpur_ptrs[cpu] + off); | 156 | return virt_to_page(pcpul_map[cpu].ptr + off); |
151 | } | 157 | } |
152 | 158 | ||
153 | static ssize_t __init setup_pcpu_remap(size_t static_size) | 159 | static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) |
154 | { | 160 | { |
155 | static struct vm_struct vm; | 161 | size_t map_size, dyn_size; |
156 | size_t ptrs_size, dyn_size; | ||
157 | unsigned int cpu; | 162 | unsigned int cpu; |
163 | int i, j; | ||
158 | ssize_t ret; | 164 | ssize_t ret; |
159 | 165 | ||
160 | /* | 166 | if (!chosen) { |
161 | * If large page isn't supported, there's no benefit in doing | 167 | size_t vm_size = VMALLOC_END - VMALLOC_START; |
162 | * this. Also, on non-NUMA, embedding is better. | 168 | size_t tot_size = num_possible_cpus() * PMD_SIZE; |
163 | * | 169 | |
164 | * NOTE: disabled for now. | 170 | /* on non-NUMA, embedding is better */ |
165 | */ | 171 | if (!pcpu_need_numa()) |
166 | if (true || !cpu_has_pse || !pcpu_need_numa()) | 172 | return -EINVAL; |
173 | |||
174 | /* don't consume more than 20% of vmalloc area */ | ||
175 | if (tot_size > vm_size / 5) { | ||
176 | pr_info("PERCPU: too large chunk size %zuMB for " | ||
177 | "large page remap\n", tot_size >> 20); | ||
178 | return -EINVAL; | ||
179 | } | ||
180 | } | ||
181 | |||
182 | /* need PSE */ | ||
183 | if (!cpu_has_pse) { | ||
184 | pr_warning("PERCPU: lpage allocator requires PSE\n"); | ||
167 | return -EINVAL; | 185 | return -EINVAL; |
186 | } | ||
168 | 187 | ||
169 | /* | 188 | /* |
170 | * Currently supports only single page. Supporting multiple | 189 | * Currently supports only single page. Supporting multiple |
171 | * pages won't be too difficult if it ever becomes necessary. | 190 | * pages won't be too difficult if it ever becomes necessary. |
172 | */ | 191 | */ |
173 | pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + | 192 | pcpul_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + |
174 | PERCPU_DYNAMIC_RESERVE); | 193 | PERCPU_DYNAMIC_RESERVE); |
175 | if (pcpur_size > PMD_SIZE) { | 194 | if (pcpul_size > PMD_SIZE) { |
176 | pr_warning("PERCPU: static data is larger than large page, " | 195 | pr_warning("PERCPU: static data is larger than large page, " |
177 | "can't use large page\n"); | 196 | "can't use large page\n"); |
178 | return -EINVAL; | 197 | return -EINVAL; |
179 | } | 198 | } |
180 | dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; | 199 | dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; |
181 | 200 | ||
182 | /* allocate pointer array and alloc large pages */ | 201 | /* allocate pointer array and alloc large pages */ |
183 | ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); | 202 | map_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpul_map[0])); |
184 | pcpur_ptrs = alloc_bootmem(ptrs_size); | 203 | pcpul_map = alloc_bootmem(map_size); |
185 | 204 | ||
186 | for_each_possible_cpu(cpu) { | 205 | for_each_possible_cpu(cpu) { |
187 | pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE); | 206 | pcpul_map[cpu].cpu = cpu; |
188 | if (!pcpur_ptrs[cpu]) | 207 | pcpul_map[cpu].ptr = pcpu_alloc_bootmem(cpu, PMD_SIZE, |
208 | PMD_SIZE); | ||
209 | if (!pcpul_map[cpu].ptr) { | ||
210 | pr_warning("PERCPU: failed to allocate large page " | ||
211 | "for cpu%u\n", cpu); | ||
189 | goto enomem; | 212 | goto enomem; |
213 | } | ||
190 | 214 | ||
191 | /* | 215 | /* |
192 | * Only use pcpur_size bytes and give back the rest. | 216 | * Only use pcpul_size bytes and give back the rest. |
193 | * | 217 | * |
194 | * Ingo: The 2MB up-rounding bootmem is needed to make | 218 | * Ingo: The 2MB up-rounding bootmem is needed to make |
195 | * sure the partial 2MB page is still fully RAM - it's | 219 | * sure the partial 2MB page is still fully RAM - it's |
196 | * not well-specified to have a PAT-incompatible area | 220 | * not well-specified to have a PAT-incompatible area |
197 | * (unmapped RAM, device memory, etc.) in that hole. | 221 | * (unmapped RAM, device memory, etc.) in that hole. |
198 | */ | 222 | */ |
199 | free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size), | 223 | free_bootmem(__pa(pcpul_map[cpu].ptr + pcpul_size), |
200 | PMD_SIZE - pcpur_size); | 224 | PMD_SIZE - pcpul_size); |
201 | 225 | ||
202 | memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size); | 226 | memcpy(pcpul_map[cpu].ptr, __per_cpu_load, static_size); |
203 | } | 227 | } |
204 | 228 | ||
205 | /* allocate address and map */ | 229 | /* allocate address and map */ |
206 | vm.flags = VM_ALLOC; | 230 | pcpul_vm.flags = VM_ALLOC; |
207 | vm.size = num_possible_cpus() * PMD_SIZE; | 231 | pcpul_vm.size = num_possible_cpus() * PMD_SIZE; |
208 | vm_area_register_early(&vm, PMD_SIZE); | 232 | vm_area_register_early(&pcpul_vm, PMD_SIZE); |
209 | 233 | ||
210 | for_each_possible_cpu(cpu) { | 234 | for_each_possible_cpu(cpu) { |
211 | pmd_t *pmd; | 235 | pmd_t *pmd, pmd_v; |
212 | 236 | ||
213 | pmd = populate_extra_pmd((unsigned long)vm.addr | 237 | pmd = populate_extra_pmd((unsigned long)pcpul_vm.addr + |
214 | + cpu * PMD_SIZE); | 238 | cpu * PMD_SIZE); |
215 | set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])), | 239 | pmd_v = pfn_pmd(page_to_pfn(virt_to_page(pcpul_map[cpu].ptr)), |
216 | PAGE_KERNEL_LARGE)); | 240 | PAGE_KERNEL_LARGE); |
241 | set_pmd(pmd, pmd_v); | ||
217 | } | 242 | } |
218 | 243 | ||
219 | /* we're ready, commit */ | 244 | /* we're ready, commit */ |
220 | pr_info("PERCPU: Remapped at %p with large pages, static data " | 245 | pr_info("PERCPU: Remapped at %p with large pages, static data " |
221 | "%zu bytes\n", vm.addr, static_size); | 246 | "%zu bytes\n", pcpul_vm.addr, static_size); |
222 | 247 | ||
223 | ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, | 248 | ret = pcpu_setup_first_chunk(pcpul_get_page, static_size, |
224 | PERCPU_FIRST_CHUNK_RESERVE, dyn_size, | 249 | PERCPU_FIRST_CHUNK_RESERVE, dyn_size, |
225 | PMD_SIZE, vm.addr, NULL); | 250 | PMD_SIZE, pcpul_vm.addr, NULL); |
226 | goto out_free_ar; | 251 | |
252 | /* sort pcpul_map array for pcpu_lpage_remapped() */ | ||
253 | for (i = 0; i < num_possible_cpus() - 1; i++) | ||
254 | for (j = i + 1; j < num_possible_cpus(); j++) | ||
255 | if (pcpul_map[i].ptr > pcpul_map[j].ptr) { | ||
256 | struct pcpul_ent tmp = pcpul_map[i]; | ||
257 | pcpul_map[i] = pcpul_map[j]; | ||
258 | pcpul_map[j] = tmp; | ||
259 | } | ||
260 | |||
261 | return ret; | ||
227 | 262 | ||
228 | enomem: | 263 | enomem: |
229 | for_each_possible_cpu(cpu) | 264 | for_each_possible_cpu(cpu) |
230 | if (pcpur_ptrs[cpu]) | 265 | if (pcpul_map[cpu].ptr) |
231 | free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE); | 266 | free_bootmem(__pa(pcpul_map[cpu].ptr), pcpul_size); |
232 | ret = -ENOMEM; | 267 | free_bootmem(__pa(pcpul_map), map_size); |
233 | out_free_ar: | 268 | return -ENOMEM; |
234 | free_bootmem(__pa(pcpur_ptrs), ptrs_size); | 269 | } |
235 | return ret; | 270 | |
271 | /** | ||
272 | * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area | ||
273 | * @kaddr: the kernel address in question | ||
274 | * | ||
275 | * Determine whether @kaddr falls in the pcpul recycled area. This is | ||
276 | * used by pageattr to detect VM aliases and break up the pcpu PMD | ||
277 | * mapping such that the same physical page is not mapped under | ||
278 | * different attributes. | ||
279 | * | ||
280 | * The recycled area is always at the tail of a partially used PMD | ||
281 | * page. | ||
282 | * | ||
283 | * RETURNS: | ||
284 | * Address of corresponding remapped pcpu address if match is found; | ||
285 | * otherwise, NULL. | ||
286 | */ | ||
287 | void *pcpu_lpage_remapped(void *kaddr) | ||
288 | { | ||
289 | void *pmd_addr = (void *)((unsigned long)kaddr & PMD_MASK); | ||
290 | unsigned long offset = (unsigned long)kaddr & ~PMD_MASK; | ||
291 | int left = 0, right = num_possible_cpus() - 1; | ||
292 | int pos; | ||
293 | |||
294 | /* pcpul in use at all? */ | ||
295 | if (!pcpul_map) | ||
296 | return NULL; | ||
297 | |||
298 | /* okay, perform binary search */ | ||
299 | while (left <= right) { | ||
300 | pos = (left + right) / 2; | ||
301 | |||
302 | if (pcpul_map[pos].ptr < pmd_addr) | ||
303 | left = pos + 1; | ||
304 | else if (pcpul_map[pos].ptr > pmd_addr) | ||
305 | right = pos - 1; | ||
306 | else { | ||
307 | /* it shouldn't be in the area for the first chunk */ | ||
308 | WARN_ON(offset < pcpul_size); | ||
309 | |||
310 | return pcpul_vm.addr + | ||
311 | pcpul_map[pos].cpu * PMD_SIZE + offset; | ||
312 | } | ||
313 | } | ||
314 | |||
315 | return NULL; | ||
236 | } | 316 | } |
237 | #else | 317 | #else |
238 | static ssize_t __init setup_pcpu_remap(size_t static_size) | 318 | static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) |
239 | { | 319 | { |
240 | return -EINVAL; | 320 | return -EINVAL; |
241 | } | 321 | } |
@@ -249,7 +329,7 @@ static ssize_t __init setup_pcpu_remap(size_t static_size) | |||
249 | * mapping so that it can use PMD mapping without additional TLB | 329 | * mapping so that it can use PMD mapping without additional TLB |
250 | * pressure. | 330 | * pressure. |
251 | */ | 331 | */ |
252 | static ssize_t __init setup_pcpu_embed(size_t static_size) | 332 | static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen) |
253 | { | 333 | { |
254 | size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; | 334 | size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; |
255 | 335 | ||
@@ -258,7 +338,7 @@ static ssize_t __init setup_pcpu_embed(size_t static_size) | |||
258 | * this. Also, embedding allocation doesn't play well with | 338 | * this. Also, embedding allocation doesn't play well with |
259 | * NUMA. | 339 | * NUMA. |
260 | */ | 340 | */ |
261 | if (!cpu_has_pse || pcpu_need_numa()) | 341 | if (!chosen && (!cpu_has_pse || pcpu_need_numa())) |
262 | return -EINVAL; | 342 | return -EINVAL; |
263 | 343 | ||
264 | return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, | 344 | return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, |
@@ -308,8 +388,11 @@ static ssize_t __init setup_pcpu_4k(size_t static_size) | |||
308 | void *ptr; | 388 | void *ptr; |
309 | 389 | ||
310 | ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE); | 390 | ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE); |
311 | if (!ptr) | 391 | if (!ptr) { |
392 | pr_warning("PERCPU: failed to allocate " | ||
393 | "4k page for cpu%u\n", cpu); | ||
312 | goto enomem; | 394 | goto enomem; |
395 | } | ||
313 | 396 | ||
314 | memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE); | 397 | memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE); |
315 | pcpu4k_pages[j++] = virt_to_page(ptr); | 398 | pcpu4k_pages[j++] = virt_to_page(ptr); |
@@ -333,6 +416,16 @@ out_free_ar: | |||
333 | return ret; | 416 | return ret; |
334 | } | 417 | } |
335 | 418 | ||
419 | /* for explicit first chunk allocator selection */ | ||
420 | static char pcpu_chosen_alloc[16] __initdata; | ||
421 | |||
422 | static int __init percpu_alloc_setup(char *str) | ||
423 | { | ||
424 | strncpy(pcpu_chosen_alloc, str, sizeof(pcpu_chosen_alloc) - 1); | ||
425 | return 0; | ||
426 | } | ||
427 | early_param("percpu_alloc", percpu_alloc_setup); | ||
428 | |||
336 | static inline void setup_percpu_segment(int cpu) | 429 | static inline void setup_percpu_segment(int cpu) |
337 | { | 430 | { |
338 | #ifdef CONFIG_X86_32 | 431 | #ifdef CONFIG_X86_32 |
@@ -346,11 +439,6 @@ static inline void setup_percpu_segment(int cpu) | |||
346 | #endif | 439 | #endif |
347 | } | 440 | } |
348 | 441 | ||
349 | /* | ||
350 | * Great future plan: | ||
351 | * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. | ||
352 | * Always point %gs to its beginning | ||
353 | */ | ||
354 | void __init setup_per_cpu_areas(void) | 442 | void __init setup_per_cpu_areas(void) |
355 | { | 443 | { |
356 | size_t static_size = __per_cpu_end - __per_cpu_start; | 444 | size_t static_size = __per_cpu_end - __per_cpu_start; |
@@ -367,9 +455,26 @@ void __init setup_per_cpu_areas(void) | |||
367 | * of large page mappings. Please read comments on top of | 455 | * of large page mappings. Please read comments on top of |
368 | * each allocator for details. | 456 | * each allocator for details. |
369 | */ | 457 | */ |
370 | ret = setup_pcpu_remap(static_size); | 458 | ret = -EINVAL; |
371 | if (ret < 0) | 459 | if (strlen(pcpu_chosen_alloc)) { |
372 | ret = setup_pcpu_embed(static_size); | 460 | if (strcmp(pcpu_chosen_alloc, "4k")) { |
461 | if (!strcmp(pcpu_chosen_alloc, "lpage")) | ||
462 | ret = setup_pcpu_lpage(static_size, true); | ||
463 | else if (!strcmp(pcpu_chosen_alloc, "embed")) | ||
464 | ret = setup_pcpu_embed(static_size, true); | ||
465 | else | ||
466 | pr_warning("PERCPU: unknown allocator %s " | ||
467 | "specified\n", pcpu_chosen_alloc); | ||
468 | if (ret < 0) | ||
469 | pr_warning("PERCPU: %s allocator failed (%zd), " | ||
470 | "falling back to 4k\n", | ||
471 | pcpu_chosen_alloc, ret); | ||
472 | } | ||
473 | } else { | ||
474 | ret = setup_pcpu_lpage(static_size, false); | ||
475 | if (ret < 0) | ||
476 | ret = setup_pcpu_embed(static_size, false); | ||
477 | } | ||
373 | if (ret < 0) | 478 | if (ret < 0) |
374 | ret = setup_pcpu_4k(static_size); | 479 | ret = setup_pcpu_4k(static_size); |
375 | if (ret < 0) | 480 | if (ret < 0) |
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 124d40c575df..8ccabb8a2f6a 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -711,7 +711,6 @@ uv_activation_descriptor_init(int node, int pnode) | |||
711 | unsigned long pa; | 711 | unsigned long pa; |
712 | unsigned long m; | 712 | unsigned long m; |
713 | unsigned long n; | 713 | unsigned long n; |
714 | unsigned long mmr_image; | ||
715 | struct bau_desc *adp; | 714 | struct bau_desc *adp; |
716 | struct bau_desc *ad2; | 715 | struct bau_desc *ad2; |
717 | 716 | ||
@@ -727,12 +726,8 @@ uv_activation_descriptor_init(int node, int pnode) | |||
727 | n = pa >> uv_nshift; | 726 | n = pa >> uv_nshift; |
728 | m = pa & uv_mmask; | 727 | m = pa & uv_mmask; |
729 | 728 | ||
730 | mmr_image = uv_read_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE); | 729 | uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, |
731 | if (mmr_image) { | 730 | (n << UV_DESC_BASE_PNODE_SHIFT | m)); |
732 | uv_write_global_mmr64(pnode, (unsigned long) | ||
733 | UVH_LB_BAU_SB_DESCRIPTOR_BASE, | ||
734 | (n << UV_DESC_BASE_PNODE_SHIFT | m)); | ||
735 | } | ||
736 | 731 | ||
737 | /* | 732 | /* |
738 | * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each | 733 | * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index a0f48f5671c0..5204332f475d 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -346,6 +346,9 @@ io_check_error(unsigned char reason, struct pt_regs *regs) | |||
346 | printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n"); | 346 | printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n"); |
347 | show_registers(regs); | 347 | show_registers(regs); |
348 | 348 | ||
349 | if (panic_on_io_nmi) | ||
350 | panic("NMI IOCK error: Not continuing"); | ||
351 | |||
349 | /* Re-enable the IOCK line, wait for a few seconds */ | 352 | /* Re-enable the IOCK line, wait for a few seconds */ |
350 | reason = (reason & 0xf) | 8; | 353 | reason = (reason & 0xf) | 8; |
351 | outb(reason, 0x61); | 354 | outb(reason, 0x61); |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 5c3d6e81a7dc..7030b5f911bf 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2157,7 +2157,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level) | |||
2157 | else | 2157 | else |
2158 | /* 32 bits PSE 4MB page */ | 2158 | /* 32 bits PSE 4MB page */ |
2159 | context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); | 2159 | context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); |
2160 | context->rsvd_bits_mask[1][0] = ~0ull; | 2160 | context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0]; |
2161 | break; | 2161 | break; |
2162 | case PT32E_ROOT_LEVEL: | 2162 | case PT32E_ROOT_LEVEL: |
2163 | context->rsvd_bits_mask[0][2] = | 2163 | context->rsvd_bits_mask[0][2] = |
@@ -2170,7 +2170,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level) | |||
2170 | context->rsvd_bits_mask[1][1] = exb_bit_rsvd | | 2170 | context->rsvd_bits_mask[1][1] = exb_bit_rsvd | |
2171 | rsvd_bits(maxphyaddr, 62) | | 2171 | rsvd_bits(maxphyaddr, 62) | |
2172 | rsvd_bits(13, 20); /* large page */ | 2172 | rsvd_bits(13, 20); /* large page */ |
2173 | context->rsvd_bits_mask[1][0] = ~0ull; | 2173 | context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0]; |
2174 | break; | 2174 | break; |
2175 | case PT64_ROOT_LEVEL: | 2175 | case PT64_ROOT_LEVEL: |
2176 | context->rsvd_bits_mask[0][3] = exb_bit_rsvd | | 2176 | context->rsvd_bits_mask[0][3] = exb_bit_rsvd | |
@@ -2186,7 +2186,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level) | |||
2186 | context->rsvd_bits_mask[1][1] = exb_bit_rsvd | | 2186 | context->rsvd_bits_mask[1][1] = exb_bit_rsvd | |
2187 | rsvd_bits(maxphyaddr, 51) | | 2187 | rsvd_bits(maxphyaddr, 51) | |
2188 | rsvd_bits(13, 20); /* large page */ | 2188 | rsvd_bits(13, 20); /* large page */ |
2189 | context->rsvd_bits_mask[1][0] = ~0ull; | 2189 | context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0]; |
2190 | break; | 2190 | break; |
2191 | } | 2191 | } |
2192 | } | 2192 | } |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 258e4591e1ca..67785f635399 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -281,7 +281,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
281 | { | 281 | { |
282 | unsigned access = gw->pt_access; | 282 | unsigned access = gw->pt_access; |
283 | struct kvm_mmu_page *shadow_page; | 283 | struct kvm_mmu_page *shadow_page; |
284 | u64 spte, *sptep; | 284 | u64 spte, *sptep = NULL; |
285 | int direct; | 285 | int direct; |
286 | gfn_t table_gfn; | 286 | gfn_t table_gfn; |
287 | int r; | 287 | int r; |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e770bf349ec4..356a0ce85c68 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -3012,6 +3012,12 @@ static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3012 | return 1; | 3012 | return 1; |
3013 | } | 3013 | } |
3014 | 3014 | ||
3015 | static int handle_vmx_insn(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
3016 | { | ||
3017 | kvm_queue_exception(vcpu, UD_VECTOR); | ||
3018 | return 1; | ||
3019 | } | ||
3020 | |||
3015 | static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3021 | static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
3016 | { | 3022 | { |
3017 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 3023 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
@@ -3198,6 +3204,15 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
3198 | [EXIT_REASON_HLT] = handle_halt, | 3204 | [EXIT_REASON_HLT] = handle_halt, |
3199 | [EXIT_REASON_INVLPG] = handle_invlpg, | 3205 | [EXIT_REASON_INVLPG] = handle_invlpg, |
3200 | [EXIT_REASON_VMCALL] = handle_vmcall, | 3206 | [EXIT_REASON_VMCALL] = handle_vmcall, |
3207 | [EXIT_REASON_VMCLEAR] = handle_vmx_insn, | ||
3208 | [EXIT_REASON_VMLAUNCH] = handle_vmx_insn, | ||
3209 | [EXIT_REASON_VMPTRLD] = handle_vmx_insn, | ||
3210 | [EXIT_REASON_VMPTRST] = handle_vmx_insn, | ||
3211 | [EXIT_REASON_VMREAD] = handle_vmx_insn, | ||
3212 | [EXIT_REASON_VMRESUME] = handle_vmx_insn, | ||
3213 | [EXIT_REASON_VMWRITE] = handle_vmx_insn, | ||
3214 | [EXIT_REASON_VMOFF] = handle_vmx_insn, | ||
3215 | [EXIT_REASON_VMON] = handle_vmx_insn, | ||
3201 | [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, | 3216 | [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, |
3202 | [EXIT_REASON_APIC_ACCESS] = handle_apic_access, | 3217 | [EXIT_REASON_APIC_ACCESS] = handle_apic_access, |
3203 | [EXIT_REASON_WBINVD] = handle_wbinvd, | 3218 | [EXIT_REASON_WBINVD] = handle_wbinvd, |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 249540f98513..fe5474aec41a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -898,6 +898,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |||
898 | case MSR_VM_HSAVE_PA: | 898 | case MSR_VM_HSAVE_PA: |
899 | case MSR_P6_EVNTSEL0: | 899 | case MSR_P6_EVNTSEL0: |
900 | case MSR_P6_EVNTSEL1: | 900 | case MSR_P6_EVNTSEL1: |
901 | case MSR_K7_EVNTSEL0: | ||
901 | data = 0; | 902 | data = 0; |
902 | break; | 903 | break; |
903 | case MSR_MTRRcap: | 904 | case MSR_MTRRcap: |
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index c1b6c232e02b..616de4628d60 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c | |||
@@ -1361,7 +1361,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt, | |||
1361 | return 0; | 1361 | return 0; |
1362 | } | 1362 | } |
1363 | 1363 | ||
1364 | void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask) | 1364 | static void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask) |
1365 | { | 1365 | { |
1366 | u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(ctxt->vcpu, mask); | 1366 | u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(ctxt->vcpu, mask); |
1367 | /* | 1367 | /* |
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index f4568605d7d5..ff485d361182 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c | |||
@@ -55,8 +55,10 @@ static void delay_tsc(unsigned long loops) | |||
55 | 55 | ||
56 | preempt_disable(); | 56 | preempt_disable(); |
57 | cpu = smp_processor_id(); | 57 | cpu = smp_processor_id(); |
58 | rdtsc_barrier(); | ||
58 | rdtscl(bclock); | 59 | rdtscl(bclock); |
59 | for (;;) { | 60 | for (;;) { |
61 | rdtsc_barrier(); | ||
60 | rdtscl(now); | 62 | rdtscl(now); |
61 | if ((now - bclock) >= loops) | 63 | if ((now - bclock) >= loops) |
62 | break; | 64 | break; |
@@ -78,6 +80,7 @@ static void delay_tsc(unsigned long loops) | |||
78 | if (unlikely(cpu != smp_processor_id())) { | 80 | if (unlikely(cpu != smp_processor_id())) { |
79 | loops -= (now - bclock); | 81 | loops -= (now - bclock); |
80 | cpu = smp_processor_id(); | 82 | cpu = smp_processor_id(); |
83 | rdtsc_barrier(); | ||
81 | rdtscl(bclock); | 84 | rdtscl(bclock); |
82 | } | 85 | } |
83 | } | 86 | } |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index f53b57e4086f..47ce9a2ce5e7 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -177,20 +177,6 @@ static int __meminit save_mr(struct map_range *mr, int nr_range, | |||
177 | return nr_range; | 177 | return nr_range; |
178 | } | 178 | } |
179 | 179 | ||
180 | #ifdef CONFIG_X86_64 | ||
181 | static void __init init_gbpages(void) | ||
182 | { | ||
183 | if (direct_gbpages && cpu_has_gbpages) | ||
184 | printk(KERN_INFO "Using GB pages for direct mapping\n"); | ||
185 | else | ||
186 | direct_gbpages = 0; | ||
187 | } | ||
188 | #else | ||
189 | static inline void init_gbpages(void) | ||
190 | { | ||
191 | } | ||
192 | #endif | ||
193 | |||
194 | /* | 180 | /* |
195 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. | 181 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. |
196 | * This runs before bootmem is initialized and gets pages directly from | 182 | * This runs before bootmem is initialized and gets pages directly from |
@@ -210,9 +196,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
210 | 196 | ||
211 | printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end); | 197 | printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end); |
212 | 198 | ||
213 | if (!after_bootmem) | ||
214 | init_gbpages(); | ||
215 | |||
216 | #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) | 199 | #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) |
217 | /* | 200 | /* |
218 | * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. | 201 | * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index c4378f4fd4a5..b177652251a4 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -598,6 +598,8 @@ void __init paging_init(void) | |||
598 | 598 | ||
599 | sparse_memory_present_with_active_regions(MAX_NUMNODES); | 599 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
600 | sparse_init(); | 600 | sparse_init(); |
601 | /* clear the default setting with node 0 */ | ||
602 | nodes_clear(node_states[N_NORMAL_MEMORY]); | ||
601 | free_area_init_nodes(max_zone_pfns); | 603 | free_area_init_nodes(max_zone_pfns); |
602 | } | 604 | } |
603 | 605 | ||
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 3cfe9ced8a4c..1b734d7a8966 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
12 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
13 | #include <linux/debugfs.h> | 13 | #include <linux/debugfs.h> |
14 | #include <linux/pfn.h> | ||
14 | 15 | ||
15 | #include <asm/e820.h> | 16 | #include <asm/e820.h> |
16 | #include <asm/processor.h> | 17 | #include <asm/processor.h> |
@@ -681,8 +682,9 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); | |||
681 | static int cpa_process_alias(struct cpa_data *cpa) | 682 | static int cpa_process_alias(struct cpa_data *cpa) |
682 | { | 683 | { |
683 | struct cpa_data alias_cpa; | 684 | struct cpa_data alias_cpa; |
684 | int ret = 0; | 685 | unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT); |
685 | unsigned long temp_cpa_vaddr, vaddr; | 686 | unsigned long vaddr, remapped; |
687 | int ret; | ||
686 | 688 | ||
687 | if (cpa->pfn >= max_pfn_mapped) | 689 | if (cpa->pfn >= max_pfn_mapped) |
688 | return 0; | 690 | return 0; |
@@ -706,42 +708,55 @@ static int cpa_process_alias(struct cpa_data *cpa) | |||
706 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { | 708 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { |
707 | 709 | ||
708 | alias_cpa = *cpa; | 710 | alias_cpa = *cpa; |
709 | temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); | 711 | alias_cpa.vaddr = &laddr; |
710 | alias_cpa.vaddr = &temp_cpa_vaddr; | ||
711 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); | 712 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); |
712 | 713 | ||
713 | |||
714 | ret = __change_page_attr_set_clr(&alias_cpa, 0); | 714 | ret = __change_page_attr_set_clr(&alias_cpa, 0); |
715 | if (ret) | ||
716 | return ret; | ||
715 | } | 717 | } |
716 | 718 | ||
717 | #ifdef CONFIG_X86_64 | 719 | #ifdef CONFIG_X86_64 |
718 | if (ret) | ||
719 | return ret; | ||
720 | /* | 720 | /* |
721 | * No need to redo, when the primary call touched the high | 721 | * If the primary call didn't touch the high mapping already |
722 | * mapping already: | 722 | * and the physical address is inside the kernel map, we need |
723 | */ | ||
724 | if (within(vaddr, (unsigned long) _text, _brk_end)) | ||
725 | return 0; | ||
726 | |||
727 | /* | ||
728 | * If the physical address is inside the kernel map, we need | ||
729 | * to touch the high mapped kernel as well: | 723 | * to touch the high mapped kernel as well: |
730 | */ | 724 | */ |
731 | if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) | 725 | if (!within(vaddr, (unsigned long)_text, _brk_end) && |
732 | return 0; | 726 | within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) { |
727 | unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + | ||
728 | __START_KERNEL_map - phys_base; | ||
729 | alias_cpa = *cpa; | ||
730 | alias_cpa.vaddr = &temp_cpa_vaddr; | ||
731 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); | ||
733 | 732 | ||
734 | alias_cpa = *cpa; | 733 | /* |
735 | temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base; | 734 | * The high mapping range is imprecise, so ignore the |
736 | alias_cpa.vaddr = &temp_cpa_vaddr; | 735 | * return value. |
737 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); | 736 | */ |
737 | __change_page_attr_set_clr(&alias_cpa, 0); | ||
738 | } | ||
739 | #endif | ||
738 | 740 | ||
739 | /* | 741 | /* |
740 | * The high mapping range is imprecise, so ignore the return value. | 742 | * If the PMD page was partially used for per-cpu remapping, |
743 | * the recycled area needs to be split and modified. Because | ||
744 | * the area is always proper subset of a PMD page | ||
745 | * cpa->numpages is guaranteed to be 1 for these areas, so | ||
746 | * there's no need to loop over and check for further remaps. | ||
741 | */ | 747 | */ |
742 | __change_page_attr_set_clr(&alias_cpa, 0); | 748 | remapped = (unsigned long)pcpu_lpage_remapped((void *)laddr); |
743 | #endif | 749 | if (remapped) { |
744 | return ret; | 750 | WARN_ON(cpa->numpages > 1); |
751 | alias_cpa = *cpa; | ||
752 | alias_cpa.vaddr = &remapped; | ||
753 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); | ||
754 | ret = __change_page_attr_set_clr(&alias_cpa, 0); | ||
755 | if (ret) | ||
756 | return ret; | ||
757 | } | ||
758 | |||
759 | return 0; | ||
745 | } | 760 | } |
746 | 761 | ||
747 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) | 762 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) |
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index d277ef1eea51..b3d20b9cac63 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c | |||
@@ -244,7 +244,7 @@ static void __restore_processor_state(struct saved_context *ctxt) | |||
244 | do_fpu_end(); | 244 | do_fpu_end(); |
245 | mtrr_ap_init(); | 245 | mtrr_ap_init(); |
246 | 246 | ||
247 | #ifdef CONFIG_X86_32 | 247 | #ifdef CONFIG_X86_OLD_MCE |
248 | mcheck_init(&boot_cpu_data); | 248 | mcheck_init(&boot_cpu_data); |
249 | #endif | 249 | #endif |
250 | } | 250 | } |
diff --git a/block/Makefile b/block/Makefile index e9fa4dd690f2..6c54ed0ff755 100644 --- a/block/Makefile +++ b/block/Makefile | |||
@@ -5,7 +5,7 @@ | |||
5 | obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ | 5 | obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ |
6 | blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \ | 6 | blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \ |
7 | blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ | 7 | blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ |
8 | ioctl.o genhd.o scsi_ioctl.o cmd-filter.o | 8 | ioctl.o genhd.o scsi_ioctl.o |
9 | 9 | ||
10 | obj-$(CONFIG_BLK_DEV_BSG) += bsg.o | 10 | obj-$(CONFIG_BLK_DEV_BSG) += bsg.o |
11 | obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o | 11 | obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o |
diff --git a/block/blk-core.c b/block/blk-core.c index b06cf5c2a829..4b45435c6eaf 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -595,8 +595,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | |||
595 | 595 | ||
596 | q->sg_reserved_size = INT_MAX; | 596 | q->sg_reserved_size = INT_MAX; |
597 | 597 | ||
598 | blk_set_cmd_filter_defaults(&q->cmd_filter); | ||
599 | |||
600 | /* | 598 | /* |
601 | * all done | 599 | * all done |
602 | */ | 600 | */ |
@@ -1172,6 +1170,11 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1172 | const int unplug = bio_unplug(bio); | 1170 | const int unplug = bio_unplug(bio); |
1173 | int rw_flags; | 1171 | int rw_flags; |
1174 | 1172 | ||
1173 | if (bio_barrier(bio) && bio_has_data(bio) && | ||
1174 | (q->next_ordered == QUEUE_ORDERED_NONE)) { | ||
1175 | bio_endio(bio, -EOPNOTSUPP); | ||
1176 | return 0; | ||
1177 | } | ||
1175 | /* | 1178 | /* |
1176 | * low level driver can indicate that it wants pages above a | 1179 | * low level driver can indicate that it wants pages above a |
1177 | * certain limit bounced to low memory (ie for highmem, or even | 1180 | * certain limit bounced to low memory (ie for highmem, or even |
@@ -1472,11 +1475,6 @@ static inline void __generic_make_request(struct bio *bio) | |||
1472 | err = -EOPNOTSUPP; | 1475 | err = -EOPNOTSUPP; |
1473 | goto end_io; | 1476 | goto end_io; |
1474 | } | 1477 | } |
1475 | if (bio_barrier(bio) && bio_has_data(bio) && | ||
1476 | (q->next_ordered == QUEUE_ORDERED_NONE)) { | ||
1477 | err = -EOPNOTSUPP; | ||
1478 | goto end_io; | ||
1479 | } | ||
1480 | 1478 | ||
1481 | ret = q->make_request_fn(q, bio); | 1479 | ret = q->make_request_fn(q, bio); |
1482 | } while (ret); | 1480 | } while (ret); |
@@ -2365,7 +2363,7 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | |||
2365 | __bio_clone(bio, bio_src); | 2363 | __bio_clone(bio, bio_src); |
2366 | 2364 | ||
2367 | if (bio_integrity(bio_src) && | 2365 | if (bio_integrity(bio_src) && |
2368 | bio_integrity_clone(bio, bio_src, gfp_mask)) | 2366 | bio_integrity_clone(bio, bio_src, gfp_mask, bs)) |
2369 | goto free_and_out; | 2367 | goto free_and_out; |
2370 | 2368 | ||
2371 | if (bio_ctr && bio_ctr(bio, bio_src, data)) | 2369 | if (bio_ctr && bio_ctr(bio, bio_src, data)) |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 39ce64432ba6..e1999679a4d5 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -350,6 +350,12 @@ static int attempt_merge(struct request_queue *q, struct request *req, | |||
350 | if (blk_integrity_rq(req) != blk_integrity_rq(next)) | 350 | if (blk_integrity_rq(req) != blk_integrity_rq(next)) |
351 | return 0; | 351 | return 0; |
352 | 352 | ||
353 | /* don't merge requests of different failfast settings */ | ||
354 | if (blk_failfast_dev(req) != blk_failfast_dev(next) || | ||
355 | blk_failfast_transport(req) != blk_failfast_transport(next) || | ||
356 | blk_failfast_driver(req) != blk_failfast_driver(next)) | ||
357 | return 0; | ||
358 | |||
353 | /* | 359 | /* |
354 | * If we are allowed to merge, then append bio list | 360 | * If we are allowed to merge, then append bio list |
355 | * from next to rq and release next. merge_requests_fn | 361 | * from next to rq and release next. merge_requests_fn |
diff --git a/block/bsg.c b/block/bsg.c index e7d475254248..5f184bb3ff9e 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -186,7 +186,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, | |||
186 | return -EFAULT; | 186 | return -EFAULT; |
187 | 187 | ||
188 | if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { | 188 | if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { |
189 | if (blk_verify_command(&q->cmd_filter, rq->cmd, has_write_perm)) | 189 | if (blk_verify_command(rq->cmd, has_write_perm)) |
190 | return -EPERM; | 190 | return -EPERM; |
191 | } else if (!capable(CAP_SYS_RAWIO)) | 191 | } else if (!capable(CAP_SYS_RAWIO)) |
192 | return -EPERM; | 192 | return -EPERM; |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 833ec18eaa63..87276eb83f7f 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -71,6 +71,51 @@ struct cfq_rb_root { | |||
71 | #define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, } | 71 | #define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, } |
72 | 72 | ||
73 | /* | 73 | /* |
74 | * Per process-grouping structure | ||
75 | */ | ||
76 | struct cfq_queue { | ||
77 | /* reference count */ | ||
78 | atomic_t ref; | ||
79 | /* various state flags, see below */ | ||
80 | unsigned int flags; | ||
81 | /* parent cfq_data */ | ||
82 | struct cfq_data *cfqd; | ||
83 | /* service_tree member */ | ||
84 | struct rb_node rb_node; | ||
85 | /* service_tree key */ | ||
86 | unsigned long rb_key; | ||
87 | /* prio tree member */ | ||
88 | struct rb_node p_node; | ||
89 | /* prio tree root we belong to, if any */ | ||
90 | struct rb_root *p_root; | ||
91 | /* sorted list of pending requests */ | ||
92 | struct rb_root sort_list; | ||
93 | /* if fifo isn't expired, next request to serve */ | ||
94 | struct request *next_rq; | ||
95 | /* requests queued in sort_list */ | ||
96 | int queued[2]; | ||
97 | /* currently allocated requests */ | ||
98 | int allocated[2]; | ||
99 | /* fifo list of requests in sort_list */ | ||
100 | struct list_head fifo; | ||
101 | |||
102 | unsigned long slice_end; | ||
103 | long slice_resid; | ||
104 | unsigned int slice_dispatch; | ||
105 | |||
106 | /* pending metadata requests */ | ||
107 | int meta_pending; | ||
108 | /* number of requests that are on the dispatch list or inside driver */ | ||
109 | int dispatched; | ||
110 | |||
111 | /* io prio of this group */ | ||
112 | unsigned short ioprio, org_ioprio; | ||
113 | unsigned short ioprio_class, org_ioprio_class; | ||
114 | |||
115 | pid_t pid; | ||
116 | }; | ||
117 | |||
118 | /* | ||
74 | * Per block device queue structure | 119 | * Per block device queue structure |
75 | */ | 120 | */ |
76 | struct cfq_data { | 121 | struct cfq_data { |
@@ -135,51 +180,11 @@ struct cfq_data { | |||
135 | unsigned int cfq_slice_idle; | 180 | unsigned int cfq_slice_idle; |
136 | 181 | ||
137 | struct list_head cic_list; | 182 | struct list_head cic_list; |
138 | }; | ||
139 | |||
140 | /* | ||
141 | * Per process-grouping structure | ||
142 | */ | ||
143 | struct cfq_queue { | ||
144 | /* reference count */ | ||
145 | atomic_t ref; | ||
146 | /* various state flags, see below */ | ||
147 | unsigned int flags; | ||
148 | /* parent cfq_data */ | ||
149 | struct cfq_data *cfqd; | ||
150 | /* service_tree member */ | ||
151 | struct rb_node rb_node; | ||
152 | /* service_tree key */ | ||
153 | unsigned long rb_key; | ||
154 | /* prio tree member */ | ||
155 | struct rb_node p_node; | ||
156 | /* prio tree root we belong to, if any */ | ||
157 | struct rb_root *p_root; | ||
158 | /* sorted list of pending requests */ | ||
159 | struct rb_root sort_list; | ||
160 | /* if fifo isn't expired, next request to serve */ | ||
161 | struct request *next_rq; | ||
162 | /* requests queued in sort_list */ | ||
163 | int queued[2]; | ||
164 | /* currently allocated requests */ | ||
165 | int allocated[2]; | ||
166 | /* fifo list of requests in sort_list */ | ||
167 | struct list_head fifo; | ||
168 | 183 | ||
169 | unsigned long slice_end; | 184 | /* |
170 | long slice_resid; | 185 | * Fallback dummy cfqq for extreme OOM conditions |
171 | unsigned int slice_dispatch; | 186 | */ |
172 | 187 | struct cfq_queue oom_cfqq; | |
173 | /* pending metadata requests */ | ||
174 | int meta_pending; | ||
175 | /* number of requests that are on the dispatch list or inside driver */ | ||
176 | int dispatched; | ||
177 | |||
178 | /* io prio of this group */ | ||
179 | unsigned short ioprio, org_ioprio; | ||
180 | unsigned short ioprio_class, org_ioprio_class; | ||
181 | |||
182 | pid_t pid; | ||
183 | }; | 188 | }; |
184 | 189 | ||
185 | enum cfqq_state_flags { | 190 | enum cfqq_state_flags { |
@@ -1641,6 +1646,26 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc) | |||
1641 | ioc->ioprio_changed = 0; | 1646 | ioc->ioprio_changed = 0; |
1642 | } | 1647 | } |
1643 | 1648 | ||
1649 | static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, | ||
1650 | pid_t pid, int is_sync) | ||
1651 | { | ||
1652 | RB_CLEAR_NODE(&cfqq->rb_node); | ||
1653 | RB_CLEAR_NODE(&cfqq->p_node); | ||
1654 | INIT_LIST_HEAD(&cfqq->fifo); | ||
1655 | |||
1656 | atomic_set(&cfqq->ref, 0); | ||
1657 | cfqq->cfqd = cfqd; | ||
1658 | |||
1659 | cfq_mark_cfqq_prio_changed(cfqq); | ||
1660 | |||
1661 | if (is_sync) { | ||
1662 | if (!cfq_class_idle(cfqq)) | ||
1663 | cfq_mark_cfqq_idle_window(cfqq); | ||
1664 | cfq_mark_cfqq_sync(cfqq); | ||
1665 | } | ||
1666 | cfqq->pid = pid; | ||
1667 | } | ||
1668 | |||
1644 | static struct cfq_queue * | 1669 | static struct cfq_queue * |
1645 | cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, | 1670 | cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, |
1646 | struct io_context *ioc, gfp_t gfp_mask) | 1671 | struct io_context *ioc, gfp_t gfp_mask) |
@@ -1653,56 +1678,40 @@ retry: | |||
1653 | /* cic always exists here */ | 1678 | /* cic always exists here */ |
1654 | cfqq = cic_to_cfqq(cic, is_sync); | 1679 | cfqq = cic_to_cfqq(cic, is_sync); |
1655 | 1680 | ||
1656 | if (!cfqq) { | 1681 | /* |
1682 | * Always try a new alloc if we fell back to the OOM cfqq | ||
1683 | * originally, since it should just be a temporary situation. | ||
1684 | */ | ||
1685 | if (!cfqq || cfqq == &cfqd->oom_cfqq) { | ||
1686 | cfqq = NULL; | ||
1657 | if (new_cfqq) { | 1687 | if (new_cfqq) { |
1658 | cfqq = new_cfqq; | 1688 | cfqq = new_cfqq; |
1659 | new_cfqq = NULL; | 1689 | new_cfqq = NULL; |
1660 | } else if (gfp_mask & __GFP_WAIT) { | 1690 | } else if (gfp_mask & __GFP_WAIT) { |
1661 | /* | ||
1662 | * Inform the allocator of the fact that we will | ||
1663 | * just repeat this allocation if it fails, to allow | ||
1664 | * the allocator to do whatever it needs to attempt to | ||
1665 | * free memory. | ||
1666 | */ | ||
1667 | spin_unlock_irq(cfqd->queue->queue_lock); | 1691 | spin_unlock_irq(cfqd->queue->queue_lock); |
1668 | new_cfqq = kmem_cache_alloc_node(cfq_pool, | 1692 | new_cfqq = kmem_cache_alloc_node(cfq_pool, |
1669 | gfp_mask | __GFP_NOFAIL | __GFP_ZERO, | 1693 | gfp_mask | __GFP_ZERO, |
1670 | cfqd->queue->node); | 1694 | cfqd->queue->node); |
1671 | spin_lock_irq(cfqd->queue->queue_lock); | 1695 | spin_lock_irq(cfqd->queue->queue_lock); |
1672 | goto retry; | 1696 | if (new_cfqq) |
1697 | goto retry; | ||
1673 | } else { | 1698 | } else { |
1674 | cfqq = kmem_cache_alloc_node(cfq_pool, | 1699 | cfqq = kmem_cache_alloc_node(cfq_pool, |
1675 | gfp_mask | __GFP_ZERO, | 1700 | gfp_mask | __GFP_ZERO, |
1676 | cfqd->queue->node); | 1701 | cfqd->queue->node); |
1677 | if (!cfqq) | ||
1678 | goto out; | ||
1679 | } | 1702 | } |
1680 | 1703 | ||
1681 | RB_CLEAR_NODE(&cfqq->rb_node); | 1704 | if (cfqq) { |
1682 | RB_CLEAR_NODE(&cfqq->p_node); | 1705 | cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); |
1683 | INIT_LIST_HEAD(&cfqq->fifo); | 1706 | cfq_init_prio_data(cfqq, ioc); |
1684 | 1707 | cfq_log_cfqq(cfqd, cfqq, "alloced"); | |
1685 | atomic_set(&cfqq->ref, 0); | 1708 | } else |
1686 | cfqq->cfqd = cfqd; | 1709 | cfqq = &cfqd->oom_cfqq; |
1687 | |||
1688 | cfq_mark_cfqq_prio_changed(cfqq); | ||
1689 | |||
1690 | cfq_init_prio_data(cfqq, ioc); | ||
1691 | |||
1692 | if (is_sync) { | ||
1693 | if (!cfq_class_idle(cfqq)) | ||
1694 | cfq_mark_cfqq_idle_window(cfqq); | ||
1695 | cfq_mark_cfqq_sync(cfqq); | ||
1696 | } | ||
1697 | cfqq->pid = current->pid; | ||
1698 | cfq_log_cfqq(cfqd, cfqq, "alloced"); | ||
1699 | } | 1710 | } |
1700 | 1711 | ||
1701 | if (new_cfqq) | 1712 | if (new_cfqq) |
1702 | kmem_cache_free(cfq_pool, new_cfqq); | 1713 | kmem_cache_free(cfq_pool, new_cfqq); |
1703 | 1714 | ||
1704 | out: | ||
1705 | WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); | ||
1706 | return cfqq; | 1715 | return cfqq; |
1707 | } | 1716 | } |
1708 | 1717 | ||
@@ -1735,11 +1744,8 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc, | |||
1735 | cfqq = *async_cfqq; | 1744 | cfqq = *async_cfqq; |
1736 | } | 1745 | } |
1737 | 1746 | ||
1738 | if (!cfqq) { | 1747 | if (!cfqq) |
1739 | cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask); | 1748 | cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask); |
1740 | if (!cfqq) | ||
1741 | return NULL; | ||
1742 | } | ||
1743 | 1749 | ||
1744 | /* | 1750 | /* |
1745 | * pin the queue now that it's allocated, scheduler exit will prune it | 1751 | * pin the queue now that it's allocated, scheduler exit will prune it |
@@ -2307,10 +2313,6 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) | |||
2307 | cfqq = cic_to_cfqq(cic, is_sync); | 2313 | cfqq = cic_to_cfqq(cic, is_sync); |
2308 | if (!cfqq) { | 2314 | if (!cfqq) { |
2309 | cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); | 2315 | cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); |
2310 | |||
2311 | if (!cfqq) | ||
2312 | goto queue_fail; | ||
2313 | |||
2314 | cic_set_cfqq(cic, cfqq, is_sync); | 2316 | cic_set_cfqq(cic, cfqq, is_sync); |
2315 | } | 2317 | } |
2316 | 2318 | ||
@@ -2465,6 +2467,14 @@ static void *cfq_init_queue(struct request_queue *q) | |||
2465 | for (i = 0; i < CFQ_PRIO_LISTS; i++) | 2467 | for (i = 0; i < CFQ_PRIO_LISTS; i++) |
2466 | cfqd->prio_trees[i] = RB_ROOT; | 2468 | cfqd->prio_trees[i] = RB_ROOT; |
2467 | 2469 | ||
2470 | /* | ||
2471 | * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues. | ||
2472 | * Grab a permanent reference to it, so that the normal code flow | ||
2473 | * will not attempt to free it. | ||
2474 | */ | ||
2475 | cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); | ||
2476 | atomic_inc(&cfqd->oom_cfqq.ref); | ||
2477 | |||
2468 | INIT_LIST_HEAD(&cfqd->cic_list); | 2478 | INIT_LIST_HEAD(&cfqd->cic_list); |
2469 | 2479 | ||
2470 | cfqd->queue = q; | 2480 | cfqd->queue = q; |
diff --git a/block/cmd-filter.c b/block/cmd-filter.c deleted file mode 100644 index 572bbc2f900d..000000000000 --- a/block/cmd-filter.c +++ /dev/null | |||
@@ -1,233 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2004 Peter M. Jones <pjones@redhat.com> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public Licens | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/list.h> | ||
21 | #include <linux/genhd.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/capability.h> | ||
24 | #include <linux/bitops.h> | ||
25 | #include <linux/blkdev.h> | ||
26 | |||
27 | #include <scsi/scsi.h> | ||
28 | #include <linux/cdrom.h> | ||
29 | |||
30 | int blk_verify_command(struct blk_cmd_filter *filter, | ||
31 | unsigned char *cmd, fmode_t has_write_perm) | ||
32 | { | ||
33 | /* root can do any command. */ | ||
34 | if (capable(CAP_SYS_RAWIO)) | ||
35 | return 0; | ||
36 | |||
37 | /* if there's no filter set, assume we're filtering everything out */ | ||
38 | if (!filter) | ||
39 | return -EPERM; | ||
40 | |||
41 | /* Anybody who can open the device can do a read-safe command */ | ||
42 | if (test_bit(cmd[0], filter->read_ok)) | ||
43 | return 0; | ||
44 | |||
45 | /* Write-safe commands require a writable open */ | ||
46 | if (test_bit(cmd[0], filter->write_ok) && has_write_perm) | ||
47 | return 0; | ||
48 | |||
49 | return -EPERM; | ||
50 | } | ||
51 | EXPORT_SYMBOL(blk_verify_command); | ||
52 | |||
53 | #if 0 | ||
54 | /* and now, the sysfs stuff */ | ||
55 | static ssize_t rcf_cmds_show(struct blk_cmd_filter *filter, char *page, | ||
56 | int rw) | ||
57 | { | ||
58 | char *npage = page; | ||
59 | unsigned long *okbits; | ||
60 | int i; | ||
61 | |||
62 | if (rw == READ) | ||
63 | okbits = filter->read_ok; | ||
64 | else | ||
65 | okbits = filter->write_ok; | ||
66 | |||
67 | for (i = 0; i < BLK_SCSI_MAX_CMDS; i++) { | ||
68 | if (test_bit(i, okbits)) { | ||
69 | npage += sprintf(npage, "0x%02x", i); | ||
70 | if (i < BLK_SCSI_MAX_CMDS - 1) | ||
71 | sprintf(npage++, " "); | ||
72 | } | ||
73 | } | ||
74 | |||
75 | if (npage != page) | ||
76 | npage += sprintf(npage, "\n"); | ||
77 | |||
78 | return npage - page; | ||
79 | } | ||
80 | |||
81 | static ssize_t rcf_readcmds_show(struct blk_cmd_filter *filter, char *page) | ||
82 | { | ||
83 | return rcf_cmds_show(filter, page, READ); | ||
84 | } | ||
85 | |||
86 | static ssize_t rcf_writecmds_show(struct blk_cmd_filter *filter, | ||
87 | char *page) | ||
88 | { | ||
89 | return rcf_cmds_show(filter, page, WRITE); | ||
90 | } | ||
91 | |||
92 | static ssize_t rcf_cmds_store(struct blk_cmd_filter *filter, | ||
93 | const char *page, size_t count, int rw) | ||
94 | { | ||
95 | unsigned long okbits[BLK_SCSI_CMD_PER_LONG], *target_okbits; | ||
96 | int cmd, set; | ||
97 | char *p, *status; | ||
98 | |||
99 | if (rw == READ) { | ||
100 | memcpy(&okbits, filter->read_ok, sizeof(okbits)); | ||
101 | target_okbits = filter->read_ok; | ||
102 | } else { | ||
103 | memcpy(&okbits, filter->write_ok, sizeof(okbits)); | ||
104 | target_okbits = filter->write_ok; | ||
105 | } | ||
106 | |||
107 | while ((p = strsep((char **)&page, " ")) != NULL) { | ||
108 | set = 1; | ||
109 | |||
110 | if (p[0] == '+') { | ||
111 | p++; | ||
112 | } else if (p[0] == '-') { | ||
113 | set = 0; | ||
114 | p++; | ||
115 | } | ||
116 | |||
117 | cmd = simple_strtol(p, &status, 16); | ||
118 | |||
119 | /* either of these cases means invalid input, so do nothing. */ | ||
120 | if ((status == p) || cmd >= BLK_SCSI_MAX_CMDS) | ||
121 | return -EINVAL; | ||
122 | |||
123 | if (set) | ||
124 | __set_bit(cmd, okbits); | ||
125 | else | ||
126 | __clear_bit(cmd, okbits); | ||
127 | } | ||
128 | |||
129 | memcpy(target_okbits, okbits, sizeof(okbits)); | ||
130 | return count; | ||
131 | } | ||
132 | |||
133 | static ssize_t rcf_readcmds_store(struct blk_cmd_filter *filter, | ||
134 | const char *page, size_t count) | ||
135 | { | ||
136 | return rcf_cmds_store(filter, page, count, READ); | ||
137 | } | ||
138 | |||
139 | static ssize_t rcf_writecmds_store(struct blk_cmd_filter *filter, | ||
140 | const char *page, size_t count) | ||
141 | { | ||
142 | return rcf_cmds_store(filter, page, count, WRITE); | ||
143 | } | ||
144 | |||
145 | struct rcf_sysfs_entry { | ||
146 | struct attribute attr; | ||
147 | ssize_t (*show)(struct blk_cmd_filter *, char *); | ||
148 | ssize_t (*store)(struct blk_cmd_filter *, const char *, size_t); | ||
149 | }; | ||
150 | |||
151 | static struct rcf_sysfs_entry rcf_readcmds_entry = { | ||
152 | .attr = { .name = "read_table", .mode = S_IRUGO | S_IWUSR }, | ||
153 | .show = rcf_readcmds_show, | ||
154 | .store = rcf_readcmds_store, | ||
155 | }; | ||
156 | |||
157 | static struct rcf_sysfs_entry rcf_writecmds_entry = { | ||
158 | .attr = {.name = "write_table", .mode = S_IRUGO | S_IWUSR }, | ||
159 | .show = rcf_writecmds_show, | ||
160 | .store = rcf_writecmds_store, | ||
161 | }; | ||
162 | |||
163 | static struct attribute *default_attrs[] = { | ||
164 | &rcf_readcmds_entry.attr, | ||
165 | &rcf_writecmds_entry.attr, | ||
166 | NULL, | ||
167 | }; | ||
168 | |||
169 | #define to_rcf(atr) container_of((atr), struct rcf_sysfs_entry, attr) | ||
170 | |||
171 | static ssize_t | ||
172 | rcf_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | ||
173 | { | ||
174 | struct rcf_sysfs_entry *entry = to_rcf(attr); | ||
175 | struct blk_cmd_filter *filter; | ||
176 | |||
177 | filter = container_of(kobj, struct blk_cmd_filter, kobj); | ||
178 | if (entry->show) | ||
179 | return entry->show(filter, page); | ||
180 | |||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | static ssize_t | ||
185 | rcf_attr_store(struct kobject *kobj, struct attribute *attr, | ||
186 | const char *page, size_t length) | ||
187 | { | ||
188 | struct rcf_sysfs_entry *entry = to_rcf(attr); | ||
189 | struct blk_cmd_filter *filter; | ||
190 | |||
191 | if (!capable(CAP_SYS_RAWIO)) | ||
192 | return -EPERM; | ||
193 | |||
194 | if (!entry->store) | ||
195 | return -EINVAL; | ||
196 | |||
197 | filter = container_of(kobj, struct blk_cmd_filter, kobj); | ||
198 | return entry->store(filter, page, length); | ||
199 | } | ||
200 | |||
201 | static struct sysfs_ops rcf_sysfs_ops = { | ||
202 | .show = rcf_attr_show, | ||
203 | .store = rcf_attr_store, | ||
204 | }; | ||
205 | |||
206 | static struct kobj_type rcf_ktype = { | ||
207 | .sysfs_ops = &rcf_sysfs_ops, | ||
208 | .default_attrs = default_attrs, | ||
209 | }; | ||
210 | |||
211 | int blk_register_filter(struct gendisk *disk) | ||
212 | { | ||
213 | int ret; | ||
214 | struct blk_cmd_filter *filter = &disk->queue->cmd_filter; | ||
215 | |||
216 | ret = kobject_init_and_add(&filter->kobj, &rcf_ktype, | ||
217 | &disk_to_dev(disk)->kobj, | ||
218 | "%s", "cmd_filter"); | ||
219 | if (ret < 0) | ||
220 | return ret; | ||
221 | |||
222 | return 0; | ||
223 | } | ||
224 | EXPORT_SYMBOL(blk_register_filter); | ||
225 | |||
226 | void blk_unregister_filter(struct gendisk *disk) | ||
227 | { | ||
228 | struct blk_cmd_filter *filter = &disk->queue->cmd_filter; | ||
229 | |||
230 | kobject_put(&filter->kobj); | ||
231 | } | ||
232 | EXPORT_SYMBOL(blk_unregister_filter); | ||
233 | #endif | ||
diff --git a/block/elevator.c b/block/elevator.c index ca861927ba41..6f2375339a99 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -100,6 +100,14 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio) | |||
100 | if (bio_integrity(bio) != blk_integrity_rq(rq)) | 100 | if (bio_integrity(bio) != blk_integrity_rq(rq)) |
101 | return 0; | 101 | return 0; |
102 | 102 | ||
103 | /* | ||
104 | * Don't merge if failfast settings don't match | ||
105 | */ | ||
106 | if (bio_failfast_dev(bio) != blk_failfast_dev(rq) || | ||
107 | bio_failfast_transport(bio) != blk_failfast_transport(rq) || | ||
108 | bio_failfast_driver(bio) != blk_failfast_driver(rq)) | ||
109 | return 0; | ||
110 | |||
103 | if (!elv_iosched_allow_merge(rq, bio)) | 111 | if (!elv_iosched_allow_merge(rq, bio)) |
104 | return 0; | 112 | return 0; |
105 | 113 | ||
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 5f8e798ede4e..f0e0ce0a607d 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -32,6 +32,11 @@ | |||
32 | #include <scsi/scsi_ioctl.h> | 32 | #include <scsi/scsi_ioctl.h> |
33 | #include <scsi/scsi_cmnd.h> | 33 | #include <scsi/scsi_cmnd.h> |
34 | 34 | ||
35 | struct blk_cmd_filter { | ||
36 | unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; | ||
37 | unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; | ||
38 | } blk_default_cmd_filter; | ||
39 | |||
35 | /* Command group 3 is reserved and should never be used. */ | 40 | /* Command group 3 is reserved and should never be used. */ |
36 | const unsigned char scsi_command_size_tbl[8] = | 41 | const unsigned char scsi_command_size_tbl[8] = |
37 | { | 42 | { |
@@ -105,7 +110,7 @@ static int sg_emulated_host(struct request_queue *q, int __user *p) | |||
105 | return put_user(1, p); | 110 | return put_user(1, p); |
106 | } | 111 | } |
107 | 112 | ||
108 | void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter) | 113 | static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter) |
109 | { | 114 | { |
110 | /* Basic read-only commands */ | 115 | /* Basic read-only commands */ |
111 | __set_bit(TEST_UNIT_READY, filter->read_ok); | 116 | __set_bit(TEST_UNIT_READY, filter->read_ok); |
@@ -187,14 +192,37 @@ void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter) | |||
187 | __set_bit(GPCMD_SET_STREAMING, filter->write_ok); | 192 | __set_bit(GPCMD_SET_STREAMING, filter->write_ok); |
188 | __set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok); | 193 | __set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok); |
189 | } | 194 | } |
190 | EXPORT_SYMBOL_GPL(blk_set_cmd_filter_defaults); | 195 | |
196 | int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm) | ||
197 | { | ||
198 | struct blk_cmd_filter *filter = &blk_default_cmd_filter; | ||
199 | |||
200 | /* root can do any command. */ | ||
201 | if (capable(CAP_SYS_RAWIO)) | ||
202 | return 0; | ||
203 | |||
204 | /* if there's no filter set, assume we're filtering everything out */ | ||
205 | if (!filter) | ||
206 | return -EPERM; | ||
207 | |||
208 | /* Anybody who can open the device can do a read-safe command */ | ||
209 | if (test_bit(cmd[0], filter->read_ok)) | ||
210 | return 0; | ||
211 | |||
212 | /* Write-safe commands require a writable open */ | ||
213 | if (test_bit(cmd[0], filter->write_ok) && has_write_perm) | ||
214 | return 0; | ||
215 | |||
216 | return -EPERM; | ||
217 | } | ||
218 | EXPORT_SYMBOL(blk_verify_command); | ||
191 | 219 | ||
192 | static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, | 220 | static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, |
193 | struct sg_io_hdr *hdr, fmode_t mode) | 221 | struct sg_io_hdr *hdr, fmode_t mode) |
194 | { | 222 | { |
195 | if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) | 223 | if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) |
196 | return -EFAULT; | 224 | return -EFAULT; |
197 | if (blk_verify_command(&q->cmd_filter, rq->cmd, mode & FMODE_WRITE)) | 225 | if (blk_verify_command(rq->cmd, mode & FMODE_WRITE)) |
198 | return -EPERM; | 226 | return -EPERM; |
199 | 227 | ||
200 | /* | 228 | /* |
@@ -427,7 +455,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, | |||
427 | if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len)) | 455 | if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len)) |
428 | goto error; | 456 | goto error; |
429 | 457 | ||
430 | err = blk_verify_command(&q->cmd_filter, rq->cmd, mode & FMODE_WRITE); | 458 | err = blk_verify_command(rq->cmd, mode & FMODE_WRITE); |
431 | if (err) | 459 | if (err) |
432 | goto error; | 460 | goto error; |
433 | 461 | ||
@@ -645,5 +673,10 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod | |||
645 | blk_put_queue(q); | 673 | blk_put_queue(q); |
646 | return err; | 674 | return err; |
647 | } | 675 | } |
648 | |||
649 | EXPORT_SYMBOL(scsi_cmd_ioctl); | 676 | EXPORT_SYMBOL(scsi_cmd_ioctl); |
677 | |||
678 | int __init blk_scsi_ioctl_init(void) | ||
679 | { | ||
680 | blk_set_cmd_filter_defaults(&blk_default_cmd_filter); | ||
681 | return 0; | ||
682 | } | ||
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index c7a527c08a09..65a0655e7fc8 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -226,8 +226,18 @@ static inline void addQ(struct hlist_head *list, CommandList_struct *c) | |||
226 | 226 | ||
227 | static inline void removeQ(CommandList_struct *c) | 227 | static inline void removeQ(CommandList_struct *c) |
228 | { | 228 | { |
229 | if (WARN_ON(hlist_unhashed(&c->list))) | 229 | /* |
230 | * After kexec/dump some commands might still | ||
231 | * be in flight, which the firmware will try | ||
232 | * to complete. Resetting the firmware doesn't work | ||
233 | * with old fw revisions, so we have to mark | ||
234 | * them off as 'stale' to prevent the driver from | ||
235 | * falling over. | ||
236 | */ | ||
237 | if (WARN_ON(hlist_unhashed(&c->list))) { | ||
238 | c->cmd_type = CMD_MSG_STALE; | ||
230 | return; | 239 | return; |
240 | } | ||
231 | 241 | ||
232 | hlist_del_init(&c->list); | 242 | hlist_del_init(&c->list); |
233 | } | 243 | } |
@@ -4246,7 +4256,8 @@ static void fail_all_cmds(unsigned long ctlr) | |||
4246 | while (!hlist_empty(&h->cmpQ)) { | 4256 | while (!hlist_empty(&h->cmpQ)) { |
4247 | c = hlist_entry(h->cmpQ.first, CommandList_struct, list); | 4257 | c = hlist_entry(h->cmpQ.first, CommandList_struct, list); |
4248 | removeQ(c); | 4258 | removeQ(c); |
4249 | c->err_info->CommandStatus = CMD_HARDWARE_ERR; | 4259 | if (c->cmd_type != CMD_MSG_STALE) |
4260 | c->err_info->CommandStatus = CMD_HARDWARE_ERR; | ||
4250 | if (c->cmd_type == CMD_RWREQ) { | 4261 | if (c->cmd_type == CMD_RWREQ) { |
4251 | complete_command(h, c, 0); | 4262 | complete_command(h, c, 0); |
4252 | } else if (c->cmd_type == CMD_IOCTL_PEND) | 4263 | } else if (c->cmd_type == CMD_IOCTL_PEND) |
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h index cd665b00c7c5..dbaed1ea0da3 100644 --- a/drivers/block/cciss_cmd.h +++ b/drivers/block/cciss_cmd.h | |||
@@ -274,6 +274,7 @@ typedef struct _ErrorInfo_struct { | |||
274 | #define CMD_SCSI 0x03 | 274 | #define CMD_SCSI 0x03 |
275 | #define CMD_MSG_DONE 0x04 | 275 | #define CMD_MSG_DONE 0x04 |
276 | #define CMD_MSG_TIMEOUT 0x05 | 276 | #define CMD_MSG_TIMEOUT 0x05 |
277 | #define CMD_MSG_STALE 0xff | ||
277 | 278 | ||
278 | /* This structure needs to be divisible by 8 for new | 279 | /* This structure needs to be divisible by 8 for new |
279 | * indexing method. | 280 | * indexing method. |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 862b40c90181..91b753013780 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -3327,7 +3327,10 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g, | |||
3327 | if (!capable(CAP_SYS_ADMIN)) | 3327 | if (!capable(CAP_SYS_ADMIN)) |
3328 | return -EPERM; | 3328 | return -EPERM; |
3329 | mutex_lock(&open_lock); | 3329 | mutex_lock(&open_lock); |
3330 | LOCK_FDC(drive, 1); | 3330 | if (lock_fdc(drive, 1)) { |
3331 | mutex_unlock(&open_lock); | ||
3332 | return -EINTR; | ||
3333 | } | ||
3331 | floppy_type[type] = *g; | 3334 | floppy_type[type] = *g; |
3332 | floppy_type[type].name = "user format"; | 3335 | floppy_type[type].name = "user format"; |
3333 | for (cnt = type << 2; cnt < (type << 2) + 4; cnt++) | 3336 | for (cnt = type << 2; cnt < (type << 2) + 4; cnt++) |
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c index a19e935847b0..913aa8d3f1c5 100644 --- a/drivers/char/tty_ldisc.c +++ b/drivers/char/tty_ldisc.c | |||
@@ -867,15 +867,22 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty) | |||
867 | tty_ldisc_wait_idle(tty); | 867 | tty_ldisc_wait_idle(tty); |
868 | 868 | ||
869 | /* | 869 | /* |
870 | * Shutdown the current line discipline, and reset it to N_TTY. | 870 | * Now kill off the ldisc |
871 | * | ||
872 | * FIXME: this MUST get fixed for the new reflocking | ||
873 | */ | 871 | */ |
872 | tty_ldisc_close(tty, tty->ldisc); | ||
873 | tty_ldisc_put(tty->ldisc); | ||
874 | /* Force an oops if we mess this up */ | ||
875 | tty->ldisc = NULL; | ||
876 | |||
877 | /* Ensure the next open requests the N_TTY ldisc */ | ||
878 | tty_set_termios_ldisc(tty, N_TTY); | ||
874 | 879 | ||
875 | tty_ldisc_reinit(tty); | ||
876 | /* This will need doing differently if we need to lock */ | 880 | /* This will need doing differently if we need to lock */ |
877 | if (o_tty) | 881 | if (o_tty) |
878 | tty_ldisc_release(o_tty, NULL); | 882 | tty_ldisc_release(o_tty, NULL); |
883 | |||
884 | /* And the memory resources remaining (buffers, termios) will be | ||
885 | disposed of when the kref hits zero */ | ||
879 | } | 886 | } |
880 | 887 | ||
881 | /** | 888 | /** |
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 9ffb05f4095d..93c2322feab7 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c | |||
@@ -161,7 +161,7 @@ static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta, | |||
161 | if (periodic) | 161 | if (periodic) |
162 | sh_tmu_write(p, TCOR, delta); | 162 | sh_tmu_write(p, TCOR, delta); |
163 | else | 163 | else |
164 | sh_tmu_write(p, TCOR, 0); | 164 | sh_tmu_write(p, TCOR, 0xffffffff); |
165 | 165 | ||
166 | sh_tmu_write(p, TCNT, delta); | 166 | sh_tmu_write(p, TCNT, delta); |
167 | 167 | ||
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h index 3493c6bdb820..871c13b4c148 100644 --- a/drivers/edac/edac_core.h +++ b/drivers/edac/edac_core.h | |||
@@ -150,6 +150,8 @@ enum mem_type { | |||
150 | MEM_FB_DDR2, /* fully buffered DDR2 */ | 150 | MEM_FB_DDR2, /* fully buffered DDR2 */ |
151 | MEM_RDDR2, /* Registered DDR2 RAM */ | 151 | MEM_RDDR2, /* Registered DDR2 RAM */ |
152 | MEM_XDR, /* Rambus XDR */ | 152 | MEM_XDR, /* Rambus XDR */ |
153 | MEM_DDR3, /* DDR3 RAM */ | ||
154 | MEM_RDDR3, /* Registered DDR3 RAM */ | ||
153 | }; | 155 | }; |
154 | 156 | ||
155 | #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) | 157 | #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) |
@@ -167,6 +169,8 @@ enum mem_type { | |||
167 | #define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2) | 169 | #define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2) |
168 | #define MEM_FLAG_RDDR2 BIT(MEM_RDDR2) | 170 | #define MEM_FLAG_RDDR2 BIT(MEM_RDDR2) |
169 | #define MEM_FLAG_XDR BIT(MEM_XDR) | 171 | #define MEM_FLAG_XDR BIT(MEM_XDR) |
172 | #define MEM_FLAG_DDR3 BIT(MEM_DDR3) | ||
173 | #define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) | ||
170 | 174 | ||
171 | /* chipset Error Detection and Correction capabilities and mode */ | 175 | /* chipset Error Detection and Correction capabilities and mode */ |
172 | enum edac_type { | 176 | enum edac_type { |
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index ad218fe4942d..e1d4ce083481 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c | |||
@@ -94,7 +94,9 @@ static const char *mem_types[] = { | |||
94 | [MEM_DDR2] = "Unbuffered-DDR2", | 94 | [MEM_DDR2] = "Unbuffered-DDR2", |
95 | [MEM_FB_DDR2] = "FullyBuffered-DDR2", | 95 | [MEM_FB_DDR2] = "FullyBuffered-DDR2", |
96 | [MEM_RDDR2] = "Registered-DDR2", | 96 | [MEM_RDDR2] = "Registered-DDR2", |
97 | [MEM_XDR] = "XDR" | 97 | [MEM_XDR] = "XDR", |
98 | [MEM_DDR3] = "Unbuffered-DDR3", | ||
99 | [MEM_RDDR3] = "Registered-DDR3" | ||
98 | }; | 100 | }; |
99 | 101 | ||
100 | static const char *dev_types[] = { | 102 | static const char *dev_types[] = { |
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index 7c8c2d72916f..3f2ccfc6407c 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c | |||
@@ -757,6 +757,9 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci) | |||
757 | case DSC_SDTYPE_DDR2: | 757 | case DSC_SDTYPE_DDR2: |
758 | mtype = MEM_RDDR2; | 758 | mtype = MEM_RDDR2; |
759 | break; | 759 | break; |
760 | case DSC_SDTYPE_DDR3: | ||
761 | mtype = MEM_RDDR3; | ||
762 | break; | ||
760 | default: | 763 | default: |
761 | mtype = MEM_UNKNOWN; | 764 | mtype = MEM_UNKNOWN; |
762 | break; | 765 | break; |
@@ -769,6 +772,9 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci) | |||
769 | case DSC_SDTYPE_DDR2: | 772 | case DSC_SDTYPE_DDR2: |
770 | mtype = MEM_DDR2; | 773 | mtype = MEM_DDR2; |
771 | break; | 774 | break; |
775 | case DSC_SDTYPE_DDR3: | ||
776 | mtype = MEM_DDR3; | ||
777 | break; | ||
772 | default: | 778 | default: |
773 | mtype = MEM_UNKNOWN; | 779 | mtype = MEM_UNKNOWN; |
774 | break; | 780 | break; |
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h index 135b3539a030..52432ee7c4b9 100644 --- a/drivers/edac/mpc85xx_edac.h +++ b/drivers/edac/mpc85xx_edac.h | |||
@@ -53,6 +53,7 @@ | |||
53 | 53 | ||
54 | #define DSC_SDTYPE_DDR 0x02000000 | 54 | #define DSC_SDTYPE_DDR 0x02000000 |
55 | #define DSC_SDTYPE_DDR2 0x03000000 | 55 | #define DSC_SDTYPE_DDR2 0x03000000 |
56 | #define DSC_SDTYPE_DDR3 0x07000000 | ||
56 | #define DSC_X32_EN 0x00000020 | 57 | #define DSC_X32_EN 0x00000020 |
57 | 58 | ||
58 | /* Err_Int_En */ | 59 | /* Err_Int_En */ |
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c index aa8e7cb020d9..4ee4c8367a3f 100644 --- a/drivers/gpio/pl061.c +++ b/drivers/gpio/pl061.c | |||
@@ -109,6 +109,16 @@ static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value) | |||
109 | writeb(!!value << offset, chip->base + (1 << (offset + 2))); | 109 | writeb(!!value << offset, chip->base + (1 << (offset + 2))); |
110 | } | 110 | } |
111 | 111 | ||
112 | static int pl061_to_irq(struct gpio_chip *gc, unsigned offset) | ||
113 | { | ||
114 | struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc); | ||
115 | |||
116 | if (chip->irq_base == (unsigned) -1) | ||
117 | return -EINVAL; | ||
118 | |||
119 | return chip->irq_base + offset; | ||
120 | } | ||
121 | |||
112 | /* | 122 | /* |
113 | * PL061 GPIO IRQ | 123 | * PL061 GPIO IRQ |
114 | */ | 124 | */ |
@@ -200,7 +210,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc) | |||
200 | desc->chip->ack(irq); | 210 | desc->chip->ack(irq); |
201 | list_for_each(ptr, chip_list) { | 211 | list_for_each(ptr, chip_list) { |
202 | unsigned long pending; | 212 | unsigned long pending; |
203 | int gpio; | 213 | int offset; |
204 | 214 | ||
205 | chip = list_entry(ptr, struct pl061_gpio, list); | 215 | chip = list_entry(ptr, struct pl061_gpio, list); |
206 | pending = readb(chip->base + GPIOMIS); | 216 | pending = readb(chip->base + GPIOMIS); |
@@ -209,8 +219,8 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc) | |||
209 | if (pending == 0) | 219 | if (pending == 0) |
210 | continue; | 220 | continue; |
211 | 221 | ||
212 | for_each_bit(gpio, &pending, PL061_GPIO_NR) | 222 | for_each_bit(offset, &pending, PL061_GPIO_NR) |
213 | generic_handle_irq(gpio_to_irq(gpio)); | 223 | generic_handle_irq(pl061_to_irq(&chip->gc, offset)); |
214 | } | 224 | } |
215 | desc->chip->unmask(irq); | 225 | desc->chip->unmask(irq); |
216 | } | 226 | } |
@@ -221,7 +231,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id) | |||
221 | struct pl061_gpio *chip; | 231 | struct pl061_gpio *chip; |
222 | struct list_head *chip_list; | 232 | struct list_head *chip_list; |
223 | int ret, irq, i; | 233 | int ret, irq, i; |
224 | static unsigned long init_irq[BITS_TO_LONGS(NR_IRQS)]; | 234 | static DECLARE_BITMAP(init_irq, NR_IRQS); |
225 | 235 | ||
226 | pdata = dev->dev.platform_data; | 236 | pdata = dev->dev.platform_data; |
227 | if (pdata == NULL) | 237 | if (pdata == NULL) |
@@ -251,6 +261,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id) | |||
251 | chip->gc.direction_output = pl061_direction_output; | 261 | chip->gc.direction_output = pl061_direction_output; |
252 | chip->gc.get = pl061_get_value; | 262 | chip->gc.get = pl061_get_value; |
253 | chip->gc.set = pl061_set_value; | 263 | chip->gc.set = pl061_set_value; |
264 | chip->gc.to_irq = pl061_to_irq; | ||
254 | chip->gc.base = pdata->gpio_base; | 265 | chip->gc.base = pdata->gpio_base; |
255 | chip->gc.ngpio = PL061_GPIO_NR; | 266 | chip->gc.ngpio = PL061_GPIO_NR; |
256 | chip->gc.label = dev_name(&dev->dev); | 267 | chip->gc.label = dev_name(&dev->dev); |
@@ -280,6 +291,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id) | |||
280 | if (!test_and_set_bit(irq, init_irq)) { /* list initialized? */ | 291 | if (!test_and_set_bit(irq, init_irq)) { /* list initialized? */ |
281 | chip_list = kmalloc(sizeof(*chip_list), GFP_KERNEL); | 292 | chip_list = kmalloc(sizeof(*chip_list), GFP_KERNEL); |
282 | if (chip_list == NULL) { | 293 | if (chip_list == NULL) { |
294 | clear_bit(irq, init_irq); | ||
283 | ret = -ENOMEM; | 295 | ret = -ENOMEM; |
284 | goto iounmap; | 296 | goto iounmap; |
285 | } | 297 | } |
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c index 77f79d26b264..c509c9916464 100644 --- a/drivers/ide/ide-acpi.c +++ b/drivers/ide/ide-acpi.c | |||
@@ -92,6 +92,11 @@ int ide_acpi_init(void) | |||
92 | return 0; | 92 | return 0; |
93 | } | 93 | } |
94 | 94 | ||
95 | bool ide_port_acpi(ide_hwif_t *hwif) | ||
96 | { | ||
97 | return ide_noacpi == 0 && hwif->acpidata; | ||
98 | } | ||
99 | |||
95 | /** | 100 | /** |
96 | * ide_get_dev_handle - finds acpi_handle and PCI device.function | 101 | * ide_get_dev_handle - finds acpi_handle and PCI device.function |
97 | * @dev: device to locate | 102 | * @dev: device to locate |
@@ -352,9 +357,6 @@ int ide_acpi_exec_tfs(ide_drive_t *drive) | |||
352 | unsigned long gtf_address; | 357 | unsigned long gtf_address; |
353 | unsigned long obj_loc; | 358 | unsigned long obj_loc; |
354 | 359 | ||
355 | if (ide_noacpi) | ||
356 | return 0; | ||
357 | |||
358 | DEBPRINT("call get_GTF, drive=%s port=%d\n", drive->name, drive->dn); | 360 | DEBPRINT("call get_GTF, drive=%s port=%d\n", drive->name, drive->dn); |
359 | 361 | ||
360 | ret = do_drive_get_GTF(drive, >f_length, >f_address, &obj_loc); | 362 | ret = do_drive_get_GTF(drive, >f_length, >f_address, &obj_loc); |
@@ -389,16 +391,6 @@ void ide_acpi_get_timing(ide_hwif_t *hwif) | |||
389 | struct acpi_buffer output; | 391 | struct acpi_buffer output; |
390 | union acpi_object *out_obj; | 392 | union acpi_object *out_obj; |
391 | 393 | ||
392 | if (ide_noacpi) | ||
393 | return; | ||
394 | |||
395 | DEBPRINT("ENTER:\n"); | ||
396 | |||
397 | if (!hwif->acpidata) { | ||
398 | DEBPRINT("no ACPI data for %s\n", hwif->name); | ||
399 | return; | ||
400 | } | ||
401 | |||
402 | /* Setting up output buffer for _GTM */ | 394 | /* Setting up output buffer for _GTM */ |
403 | output.length = ACPI_ALLOCATE_BUFFER; | 395 | output.length = ACPI_ALLOCATE_BUFFER; |
404 | output.pointer = NULL; /* ACPI-CA sets this; save/free it later */ | 396 | output.pointer = NULL; /* ACPI-CA sets this; save/free it later */ |
@@ -479,16 +471,6 @@ void ide_acpi_push_timing(ide_hwif_t *hwif) | |||
479 | struct ide_acpi_drive_link *master = &hwif->acpidata->master; | 471 | struct ide_acpi_drive_link *master = &hwif->acpidata->master; |
480 | struct ide_acpi_drive_link *slave = &hwif->acpidata->slave; | 472 | struct ide_acpi_drive_link *slave = &hwif->acpidata->slave; |
481 | 473 | ||
482 | if (ide_noacpi) | ||
483 | return; | ||
484 | |||
485 | DEBPRINT("ENTER:\n"); | ||
486 | |||
487 | if (!hwif->acpidata) { | ||
488 | DEBPRINT("no ACPI data for %s\n", hwif->name); | ||
489 | return; | ||
490 | } | ||
491 | |||
492 | /* Give the GTM buffer + drive Identify data to the channel via the | 474 | /* Give the GTM buffer + drive Identify data to the channel via the |
493 | * _STM method: */ | 475 | * _STM method: */ |
494 | /* setup input parameters buffer for _STM */ | 476 | /* setup input parameters buffer for _STM */ |
@@ -527,16 +509,11 @@ void ide_acpi_set_state(ide_hwif_t *hwif, int on) | |||
527 | ide_drive_t *drive; | 509 | ide_drive_t *drive; |
528 | int i; | 510 | int i; |
529 | 511 | ||
530 | if (ide_noacpi || ide_noacpi_psx) | 512 | if (ide_noacpi_psx) |
531 | return; | 513 | return; |
532 | 514 | ||
533 | DEBPRINT("ENTER:\n"); | 515 | DEBPRINT("ENTER:\n"); |
534 | 516 | ||
535 | if (!hwif->acpidata) { | ||
536 | DEBPRINT("no ACPI data for %s\n", hwif->name); | ||
537 | return; | ||
538 | } | ||
539 | |||
540 | /* channel first and then drives for power on and verse versa for power off */ | 517 | /* channel first and then drives for power on and verse versa for power off */ |
541 | if (on) | 518 | if (on) |
542 | acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D0); | 519 | acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D0); |
@@ -616,7 +593,7 @@ void ide_acpi_port_init_devices(ide_hwif_t *hwif) | |||
616 | drive->name, err); | 593 | drive->name, err); |
617 | } | 594 | } |
618 | 595 | ||
619 | if (!ide_acpionboot) { | 596 | if (ide_noacpi || ide_acpionboot == 0) { |
620 | DEBPRINT("ACPI methods disabled on boot\n"); | 597 | DEBPRINT("ACPI methods disabled on boot\n"); |
621 | return; | 598 | return; |
622 | } | 599 | } |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index f0ede5953af8..6a9a769bffc1 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -592,9 +592,19 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
592 | } | 592 | } |
593 | } else if (!blk_pc_request(rq)) { | 593 | } else if (!blk_pc_request(rq)) { |
594 | ide_cd_request_sense_fixup(drive, cmd); | 594 | ide_cd_request_sense_fixup(drive, cmd); |
595 | /* complain if we still have data left to transfer */ | 595 | |
596 | uptodate = cmd->nleft ? 0 : 1; | 596 | uptodate = cmd->nleft ? 0 : 1; |
597 | if (uptodate == 0) | 597 | |
598 | /* | ||
599 | * suck out the remaining bytes from the drive in an | ||
600 | * attempt to complete the data xfer. (see BZ#13399) | ||
601 | */ | ||
602 | if (!(stat & ATA_ERR) && !uptodate && thislen) { | ||
603 | ide_pio_bytes(drive, cmd, write, thislen); | ||
604 | uptodate = cmd->nleft ? 0 : 1; | ||
605 | } | ||
606 | |||
607 | if (!uptodate) | ||
598 | rq->cmd_flags |= REQ_FAILED; | 608 | rq->cmd_flags |= REQ_FAILED; |
599 | } | 609 | } |
600 | goto out_end; | 610 | goto out_end; |
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c index 5bf958e5b1d5..1099bf7cf968 100644 --- a/drivers/ide/ide-devsets.c +++ b/drivers/ide/ide-devsets.c | |||
@@ -183,6 +183,6 @@ ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq) | |||
183 | err = setfunc(drive, *(int *)&rq->cmd[1]); | 183 | err = setfunc(drive, *(int *)&rq->cmd[1]); |
184 | if (err) | 184 | if (err) |
185 | rq->errors = err; | 185 | rq->errors = err; |
186 | ide_complete_rq(drive, err, ide_rq_bytes(rq)); | 186 | ide_complete_rq(drive, err, blk_rq_bytes(rq)); |
187 | return ide_stopped; | 187 | return ide_stopped; |
188 | } | 188 | } |
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c index 2b9141979613..e9abf2c3c335 100644 --- a/drivers/ide/ide-eh.c +++ b/drivers/ide/ide-eh.c | |||
@@ -149,7 +149,7 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err) | |||
149 | if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) { | 149 | if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) { |
150 | if (err <= 0 && rq->errors == 0) | 150 | if (err <= 0 && rq->errors == 0) |
151 | rq->errors = -EIO; | 151 | rq->errors = -EIO; |
152 | ide_complete_rq(drive, err ? err : 0, ide_rq_bytes(rq)); | 152 | ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq)); |
153 | } | 153 | } |
154 | } | 154 | } |
155 | 155 | ||
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 8b3f204f7d73..fefbdfc8db06 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c | |||
@@ -293,7 +293,7 @@ out_end: | |||
293 | drive->failed_pc = NULL; | 293 | drive->failed_pc = NULL; |
294 | if (blk_fs_request(rq) == 0 && rq->errors == 0) | 294 | if (blk_fs_request(rq) == 0 && rq->errors == 0) |
295 | rq->errors = -EIO; | 295 | rq->errors = -EIO; |
296 | ide_complete_rq(drive, -EIO, ide_rq_bytes(rq)); | 296 | ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); |
297 | return ide_stopped; | 297 | return ide_stopped; |
298 | } | 298 | } |
299 | 299 | ||
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 93b7886a2d6e..d5f3c77beadd 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -112,16 +112,6 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err) | |||
112 | } | 112 | } |
113 | } | 113 | } |
114 | 114 | ||
115 | /* obsolete, blk_rq_bytes() should be used instead */ | ||
116 | unsigned int ide_rq_bytes(struct request *rq) | ||
117 | { | ||
118 | if (blk_pc_request(rq)) | ||
119 | return blk_rq_bytes(rq); | ||
120 | else | ||
121 | return blk_rq_cur_sectors(rq) << 9; | ||
122 | } | ||
123 | EXPORT_SYMBOL_GPL(ide_rq_bytes); | ||
124 | |||
125 | int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes) | 115 | int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes) |
126 | { | 116 | { |
127 | ide_hwif_t *hwif = drive->hwif; | 117 | ide_hwif_t *hwif = drive->hwif; |
@@ -152,14 +142,14 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq) | |||
152 | 142 | ||
153 | if ((media == ide_floppy || media == ide_tape) && drv_req) { | 143 | if ((media == ide_floppy || media == ide_tape) && drv_req) { |
154 | rq->errors = 0; | 144 | rq->errors = 0; |
155 | ide_complete_rq(drive, 0, blk_rq_bytes(rq)); | ||
156 | } else { | 145 | } else { |
157 | if (media == ide_tape) | 146 | if (media == ide_tape) |
158 | rq->errors = IDE_DRV_ERROR_GENERAL; | 147 | rq->errors = IDE_DRV_ERROR_GENERAL; |
159 | else if (blk_fs_request(rq) == 0 && rq->errors == 0) | 148 | else if (blk_fs_request(rq) == 0 && rq->errors == 0) |
160 | rq->errors = -EIO; | 149 | rq->errors = -EIO; |
161 | ide_complete_rq(drive, -EIO, ide_rq_bytes(rq)); | ||
162 | } | 150 | } |
151 | |||
152 | ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); | ||
163 | } | 153 | } |
164 | 154 | ||
165 | static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) | 155 | static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) |
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c index 82f252c3ee6e..e246d3d3fbcc 100644 --- a/drivers/ide/ide-ioctls.c +++ b/drivers/ide/ide-ioctls.c | |||
@@ -64,7 +64,8 @@ static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd, | |||
64 | goto out; | 64 | goto out; |
65 | } | 65 | } |
66 | 66 | ||
67 | id = kmalloc(size, GFP_KERNEL); | 67 | /* ata_id_to_hd_driveid() relies on 'id' to be fully allocated. */ |
68 | id = kmalloc(ATA_ID_WORDS * 2, GFP_KERNEL); | ||
68 | if (id == NULL) { | 69 | if (id == NULL) { |
69 | rc = -ENOMEM; | 70 | rc = -ENOMEM; |
70 | goto out; | 71 | goto out; |
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c index c14ca144cffe..ad7be2669dcb 100644 --- a/drivers/ide/ide-pm.c +++ b/drivers/ide/ide-pm.c | |||
@@ -10,9 +10,11 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg) | |||
10 | struct request_pm_state rqpm; | 10 | struct request_pm_state rqpm; |
11 | int ret; | 11 | int ret; |
12 | 12 | ||
13 | /* call ACPI _GTM only once */ | 13 | if (ide_port_acpi(hwif)) { |
14 | if ((drive->dn & 1) == 0 || pair == NULL) | 14 | /* call ACPI _GTM only once */ |
15 | ide_acpi_get_timing(hwif); | 15 | if ((drive->dn & 1) == 0 || pair == NULL) |
16 | ide_acpi_get_timing(hwif); | ||
17 | } | ||
16 | 18 | ||
17 | memset(&rqpm, 0, sizeof(rqpm)); | 19 | memset(&rqpm, 0, sizeof(rqpm)); |
18 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); | 20 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); |
@@ -26,9 +28,11 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg) | |||
26 | ret = blk_execute_rq(drive->queue, NULL, rq, 0); | 28 | ret = blk_execute_rq(drive->queue, NULL, rq, 0); |
27 | blk_put_request(rq); | 29 | blk_put_request(rq); |
28 | 30 | ||
29 | /* call ACPI _PS3 only after both devices are suspended */ | 31 | if (ret == 0 && ide_port_acpi(hwif)) { |
30 | if (ret == 0 && ((drive->dn & 1) || pair == NULL)) | 32 | /* call ACPI _PS3 only after both devices are suspended */ |
31 | ide_acpi_set_state(hwif, 0); | 33 | if ((drive->dn & 1) || pair == NULL) |
34 | ide_acpi_set_state(hwif, 0); | ||
35 | } | ||
32 | 36 | ||
33 | return ret; | 37 | return ret; |
34 | } | 38 | } |
@@ -42,13 +46,15 @@ int generic_ide_resume(struct device *dev) | |||
42 | struct request_pm_state rqpm; | 46 | struct request_pm_state rqpm; |
43 | int err; | 47 | int err; |
44 | 48 | ||
45 | /* call ACPI _PS0 / _STM only once */ | 49 | if (ide_port_acpi(hwif)) { |
46 | if ((drive->dn & 1) == 0 || pair == NULL) { | 50 | /* call ACPI _PS0 / _STM only once */ |
47 | ide_acpi_set_state(hwif, 1); | 51 | if ((drive->dn & 1) == 0 || pair == NULL) { |
48 | ide_acpi_push_timing(hwif); | 52 | ide_acpi_set_state(hwif, 1); |
49 | } | 53 | ide_acpi_push_timing(hwif); |
54 | } | ||
50 | 55 | ||
51 | ide_acpi_exec_tfs(drive); | 56 | ide_acpi_exec_tfs(drive); |
57 | } | ||
52 | 58 | ||
53 | memset(&rqpm, 0, sizeof(rqpm)); | 59 | memset(&rqpm, 0, sizeof(rqpm)); |
54 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); | 60 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); |
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 9b60b6b684d9..7c8e7122aaa9 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig | |||
@@ -75,6 +75,7 @@ config LEDS_ALIX2 | |||
75 | depends on LEDS_CLASS && X86 && EXPERIMENTAL | 75 | depends on LEDS_CLASS && X86 && EXPERIMENTAL |
76 | help | 76 | help |
77 | This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs. | 77 | This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs. |
78 | You have to set leds-alix2.force=1 for boards with Award BIOS. | ||
78 | 79 | ||
79 | config LEDS_H1940 | 80 | config LEDS_H1940 |
80 | tristate "LED Support for iPAQ H1940 device" | 81 | tristate "LED Support for iPAQ H1940 device" |
@@ -145,15 +146,16 @@ config LEDS_GPIO_OF | |||
145 | of_platform devices. For instance, LEDs which are listed in a "dts" | 146 | of_platform devices. For instance, LEDs which are listed in a "dts" |
146 | file. | 147 | file. |
147 | 148 | ||
148 | config LEDS_LP5521 | 149 | config LEDS_LP3944 |
149 | tristate "LED Support for the LP5521 LEDs" | 150 | tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip" |
150 | depends on LEDS_CLASS && I2C | 151 | depends on LEDS_CLASS && I2C |
151 | help | 152 | help |
152 | If you say 'Y' here you get support for the National Semiconductor | 153 | This option enables support for LEDs connected to the National |
153 | LP5521 LED driver used in n8x0 boards. | 154 | Semiconductor LP3944 Lighting Management Unit (LMU) also known as |
155 | Fun Light Chip. | ||
154 | 156 | ||
155 | This driver can be built as a module by choosing 'M'. The module | 157 | To compile this driver as a module, choose M here: the |
156 | will be called leds-lp5521. | 158 | module will be called leds-lp3944. |
157 | 159 | ||
158 | config LEDS_CLEVO_MAIL | 160 | config LEDS_CLEVO_MAIL |
159 | tristate "Mail LED on Clevo notebook" | 161 | tristate "Mail LED on Clevo notebook" |
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 2d41c4dcf92f..e8cdcf77a4c3 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile | |||
@@ -20,6 +20,7 @@ obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o | |||
20 | obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o | 20 | obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o |
21 | obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o | 21 | obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o |
22 | obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o | 22 | obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o |
23 | obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o | ||
23 | obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o | 24 | obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o |
24 | obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o | 25 | obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o |
25 | obj-$(CONFIG_LEDS_FSG) += leds-fsg.o | 26 | obj-$(CONFIG_LEDS_FSG) += leds-fsg.o |
diff --git a/drivers/leds/leds-alix2.c b/drivers/leds/leds-alix2.c index ddbd7730dfc8..731d4eef3425 100644 --- a/drivers/leds/leds-alix2.c +++ b/drivers/leds/leds-alix2.c | |||
@@ -14,7 +14,7 @@ | |||
14 | 14 | ||
15 | static int force = 0; | 15 | static int force = 0; |
16 | module_param(force, bool, 0444); | 16 | module_param(force, bool, 0444); |
17 | MODULE_PARM_DESC(force, "Assume system has ALIX.2 style LEDs"); | 17 | MODULE_PARM_DESC(force, "Assume system has ALIX.2/ALIX.3 style LEDs"); |
18 | 18 | ||
19 | struct alix_led { | 19 | struct alix_led { |
20 | struct led_classdev cdev; | 20 | struct led_classdev cdev; |
@@ -155,6 +155,11 @@ static int __init alix_led_init(void) | |||
155 | goto out; | 155 | goto out; |
156 | } | 156 | } |
157 | 157 | ||
158 | /* enable output on GPIO for LED 1,2,3 */ | ||
159 | outl(1 << 6, 0x6104); | ||
160 | outl(1 << 9, 0x6184); | ||
161 | outl(1 << 11, 0x6184); | ||
162 | |||
158 | pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0); | 163 | pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0); |
159 | if (!IS_ERR(pdev)) { | 164 | if (!IS_ERR(pdev)) { |
160 | ret = platform_driver_probe(&alix_led_driver, alix_led_probe); | 165 | ret = platform_driver_probe(&alix_led_driver, alix_led_probe); |
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c index 4149ecb3a9b2..779d7f262c04 100644 --- a/drivers/leds/leds-bd2802.c +++ b/drivers/leds/leds-bd2802.c | |||
@@ -97,6 +97,10 @@ struct bd2802_led { | |||
97 | enum led_ids led_id; | 97 | enum led_ids led_id; |
98 | enum led_colors color; | 98 | enum led_colors color; |
99 | enum led_bits state; | 99 | enum led_bits state; |
100 | |||
101 | /* General attributes of RGB LEDs */ | ||
102 | int wave_pattern; | ||
103 | int rgb_current; | ||
100 | }; | 104 | }; |
101 | 105 | ||
102 | 106 | ||
@@ -254,7 +258,7 @@ static void bd2802_set_on(struct bd2802_led *led, enum led_ids id, | |||
254 | bd2802_reset_cancel(led); | 258 | bd2802_reset_cancel(led); |
255 | 259 | ||
256 | reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP); | 260 | reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP); |
257 | bd2802_write_byte(led->client, reg, BD2802_CURRENT_032); | 261 | bd2802_write_byte(led->client, reg, led->rgb_current); |
258 | reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP); | 262 | reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP); |
259 | bd2802_write_byte(led->client, reg, BD2802_CURRENT_000); | 263 | bd2802_write_byte(led->client, reg, BD2802_CURRENT_000); |
260 | reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN); | 264 | reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN); |
@@ -275,9 +279,9 @@ static void bd2802_set_blink(struct bd2802_led *led, enum led_ids id, | |||
275 | reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP); | 279 | reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP); |
276 | bd2802_write_byte(led->client, reg, BD2802_CURRENT_000); | 280 | bd2802_write_byte(led->client, reg, BD2802_CURRENT_000); |
277 | reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP); | 281 | reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP); |
278 | bd2802_write_byte(led->client, reg, BD2802_CURRENT_032); | 282 | bd2802_write_byte(led->client, reg, led->rgb_current); |
279 | reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN); | 283 | reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN); |
280 | bd2802_write_byte(led->client, reg, BD2802_PATTERN_HALF); | 284 | bd2802_write_byte(led->client, reg, led->wave_pattern); |
281 | 285 | ||
282 | bd2802_enable(led, id); | 286 | bd2802_enable(led, id); |
283 | bd2802_update_state(led, id, color, BD2802_BLINK); | 287 | bd2802_update_state(led, id, color, BD2802_BLINK); |
@@ -406,7 +410,7 @@ static void bd2802_enable_adv_conf(struct bd2802_led *led) | |||
406 | ret = device_create_file(&led->client->dev, | 410 | ret = device_create_file(&led->client->dev, |
407 | bd2802_addr_attributes[i]); | 411 | bd2802_addr_attributes[i]); |
408 | if (ret) { | 412 | if (ret) { |
409 | dev_err(&led->client->dev, "failed to sysfs file %s\n", | 413 | dev_err(&led->client->dev, "failed: sysfs file %s\n", |
410 | bd2802_addr_attributes[i]->attr.name); | 414 | bd2802_addr_attributes[i]->attr.name); |
411 | goto failed_remove_files; | 415 | goto failed_remove_files; |
412 | } | 416 | } |
@@ -483,6 +487,52 @@ static struct device_attribute bd2802_adv_conf_attr = { | |||
483 | .store = bd2802_store_adv_conf, | 487 | .store = bd2802_store_adv_conf, |
484 | }; | 488 | }; |
485 | 489 | ||
490 | #define BD2802_CONTROL_ATTR(attr_name, name_str) \ | ||
491 | static ssize_t bd2802_show_##attr_name(struct device *dev, \ | ||
492 | struct device_attribute *attr, char *buf) \ | ||
493 | { \ | ||
494 | struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\ | ||
495 | ssize_t ret; \ | ||
496 | down_read(&led->rwsem); \ | ||
497 | ret = sprintf(buf, "0x%02x\n", led->attr_name); \ | ||
498 | up_read(&led->rwsem); \ | ||
499 | return ret; \ | ||
500 | } \ | ||
501 | static ssize_t bd2802_store_##attr_name(struct device *dev, \ | ||
502 | struct device_attribute *attr, const char *buf, size_t count) \ | ||
503 | { \ | ||
504 | struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\ | ||
505 | unsigned long val; \ | ||
506 | int ret; \ | ||
507 | if (!count) \ | ||
508 | return -EINVAL; \ | ||
509 | ret = strict_strtoul(buf, 16, &val); \ | ||
510 | if (ret) \ | ||
511 | return ret; \ | ||
512 | down_write(&led->rwsem); \ | ||
513 | led->attr_name = val; \ | ||
514 | up_write(&led->rwsem); \ | ||
515 | return count; \ | ||
516 | } \ | ||
517 | static struct device_attribute bd2802_##attr_name##_attr = { \ | ||
518 | .attr = { \ | ||
519 | .name = name_str, \ | ||
520 | .mode = 0644, \ | ||
521 | .owner = THIS_MODULE \ | ||
522 | }, \ | ||
523 | .show = bd2802_show_##attr_name, \ | ||
524 | .store = bd2802_store_##attr_name, \ | ||
525 | }; | ||
526 | |||
527 | BD2802_CONTROL_ATTR(wave_pattern, "wave_pattern"); | ||
528 | BD2802_CONTROL_ATTR(rgb_current, "rgb_current"); | ||
529 | |||
530 | static struct device_attribute *bd2802_attributes[] = { | ||
531 | &bd2802_adv_conf_attr, | ||
532 | &bd2802_wave_pattern_attr, | ||
533 | &bd2802_rgb_current_attr, | ||
534 | }; | ||
535 | |||
486 | static void bd2802_led_work(struct work_struct *work) | 536 | static void bd2802_led_work(struct work_struct *work) |
487 | { | 537 | { |
488 | struct bd2802_led *led = container_of(work, struct bd2802_led, work); | 538 | struct bd2802_led *led = container_of(work, struct bd2802_led, work); |
@@ -538,7 +588,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led) | |||
538 | led->cdev_led1r.brightness = LED_OFF; | 588 | led->cdev_led1r.brightness = LED_OFF; |
539 | led->cdev_led1r.brightness_set = bd2802_set_led1r_brightness; | 589 | led->cdev_led1r.brightness_set = bd2802_set_led1r_brightness; |
540 | led->cdev_led1r.blink_set = bd2802_set_led1r_blink; | 590 | led->cdev_led1r.blink_set = bd2802_set_led1r_blink; |
541 | led->cdev_led1r.flags |= LED_CORE_SUSPENDRESUME; | ||
542 | 591 | ||
543 | ret = led_classdev_register(&led->client->dev, &led->cdev_led1r); | 592 | ret = led_classdev_register(&led->client->dev, &led->cdev_led1r); |
544 | if (ret < 0) { | 593 | if (ret < 0) { |
@@ -551,7 +600,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led) | |||
551 | led->cdev_led1g.brightness = LED_OFF; | 600 | led->cdev_led1g.brightness = LED_OFF; |
552 | led->cdev_led1g.brightness_set = bd2802_set_led1g_brightness; | 601 | led->cdev_led1g.brightness_set = bd2802_set_led1g_brightness; |
553 | led->cdev_led1g.blink_set = bd2802_set_led1g_blink; | 602 | led->cdev_led1g.blink_set = bd2802_set_led1g_blink; |
554 | led->cdev_led1g.flags |= LED_CORE_SUSPENDRESUME; | ||
555 | 603 | ||
556 | ret = led_classdev_register(&led->client->dev, &led->cdev_led1g); | 604 | ret = led_classdev_register(&led->client->dev, &led->cdev_led1g); |
557 | if (ret < 0) { | 605 | if (ret < 0) { |
@@ -564,7 +612,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led) | |||
564 | led->cdev_led1b.brightness = LED_OFF; | 612 | led->cdev_led1b.brightness = LED_OFF; |
565 | led->cdev_led1b.brightness_set = bd2802_set_led1b_brightness; | 613 | led->cdev_led1b.brightness_set = bd2802_set_led1b_brightness; |
566 | led->cdev_led1b.blink_set = bd2802_set_led1b_blink; | 614 | led->cdev_led1b.blink_set = bd2802_set_led1b_blink; |
567 | led->cdev_led1b.flags |= LED_CORE_SUSPENDRESUME; | ||
568 | 615 | ||
569 | ret = led_classdev_register(&led->client->dev, &led->cdev_led1b); | 616 | ret = led_classdev_register(&led->client->dev, &led->cdev_led1b); |
570 | if (ret < 0) { | 617 | if (ret < 0) { |
@@ -577,7 +624,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led) | |||
577 | led->cdev_led2r.brightness = LED_OFF; | 624 | led->cdev_led2r.brightness = LED_OFF; |
578 | led->cdev_led2r.brightness_set = bd2802_set_led2r_brightness; | 625 | led->cdev_led2r.brightness_set = bd2802_set_led2r_brightness; |
579 | led->cdev_led2r.blink_set = bd2802_set_led2r_blink; | 626 | led->cdev_led2r.blink_set = bd2802_set_led2r_blink; |
580 | led->cdev_led2r.flags |= LED_CORE_SUSPENDRESUME; | ||
581 | 627 | ||
582 | ret = led_classdev_register(&led->client->dev, &led->cdev_led2r); | 628 | ret = led_classdev_register(&led->client->dev, &led->cdev_led2r); |
583 | if (ret < 0) { | 629 | if (ret < 0) { |
@@ -590,7 +636,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led) | |||
590 | led->cdev_led2g.brightness = LED_OFF; | 636 | led->cdev_led2g.brightness = LED_OFF; |
591 | led->cdev_led2g.brightness_set = bd2802_set_led2g_brightness; | 637 | led->cdev_led2g.brightness_set = bd2802_set_led2g_brightness; |
592 | led->cdev_led2g.blink_set = bd2802_set_led2g_blink; | 638 | led->cdev_led2g.blink_set = bd2802_set_led2g_blink; |
593 | led->cdev_led2g.flags |= LED_CORE_SUSPENDRESUME; | ||
594 | 639 | ||
595 | ret = led_classdev_register(&led->client->dev, &led->cdev_led2g); | 640 | ret = led_classdev_register(&led->client->dev, &led->cdev_led2g); |
596 | if (ret < 0) { | 641 | if (ret < 0) { |
@@ -640,7 +685,7 @@ static int __devinit bd2802_probe(struct i2c_client *client, | |||
640 | { | 685 | { |
641 | struct bd2802_led *led; | 686 | struct bd2802_led *led; |
642 | struct bd2802_led_platform_data *pdata; | 687 | struct bd2802_led_platform_data *pdata; |
643 | int ret; | 688 | int ret, i; |
644 | 689 | ||
645 | led = kzalloc(sizeof(struct bd2802_led), GFP_KERNEL); | 690 | led = kzalloc(sizeof(struct bd2802_led), GFP_KERNEL); |
646 | if (!led) { | 691 | if (!led) { |
@@ -670,13 +715,20 @@ static int __devinit bd2802_probe(struct i2c_client *client, | |||
670 | /* To save the power, reset BD2802 after detecting */ | 715 | /* To save the power, reset BD2802 after detecting */ |
671 | gpio_set_value(led->pdata->reset_gpio, 0); | 716 | gpio_set_value(led->pdata->reset_gpio, 0); |
672 | 717 | ||
718 | /* Default attributes */ | ||
719 | led->wave_pattern = BD2802_PATTERN_HALF; | ||
720 | led->rgb_current = BD2802_CURRENT_032; | ||
721 | |||
673 | init_rwsem(&led->rwsem); | 722 | init_rwsem(&led->rwsem); |
674 | 723 | ||
675 | ret = device_create_file(&client->dev, &bd2802_adv_conf_attr); | 724 | for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++) { |
676 | if (ret) { | 725 | ret = device_create_file(&led->client->dev, |
677 | dev_err(&client->dev, "failed to create sysfs file %s\n", | 726 | bd2802_attributes[i]); |
678 | bd2802_adv_conf_attr.attr.name); | 727 | if (ret) { |
679 | goto failed_free; | 728 | dev_err(&led->client->dev, "failed: sysfs file %s\n", |
729 | bd2802_attributes[i]->attr.name); | ||
730 | goto failed_unregister_dev_file; | ||
731 | } | ||
680 | } | 732 | } |
681 | 733 | ||
682 | ret = bd2802_register_led_classdev(led); | 734 | ret = bd2802_register_led_classdev(led); |
@@ -686,7 +738,8 @@ static int __devinit bd2802_probe(struct i2c_client *client, | |||
686 | return 0; | 738 | return 0; |
687 | 739 | ||
688 | failed_unregister_dev_file: | 740 | failed_unregister_dev_file: |
689 | device_remove_file(&client->dev, &bd2802_adv_conf_attr); | 741 | for (i--; i >= 0; i--) |
742 | device_remove_file(&led->client->dev, bd2802_attributes[i]); | ||
690 | failed_free: | 743 | failed_free: |
691 | i2c_set_clientdata(client, NULL); | 744 | i2c_set_clientdata(client, NULL); |
692 | kfree(led); | 745 | kfree(led); |
@@ -697,12 +750,14 @@ failed_free: | |||
697 | static int __exit bd2802_remove(struct i2c_client *client) | 750 | static int __exit bd2802_remove(struct i2c_client *client) |
698 | { | 751 | { |
699 | struct bd2802_led *led = i2c_get_clientdata(client); | 752 | struct bd2802_led *led = i2c_get_clientdata(client); |
753 | int i; | ||
700 | 754 | ||
701 | bd2802_unregister_led_classdev(led); | ||
702 | gpio_set_value(led->pdata->reset_gpio, 0); | 755 | gpio_set_value(led->pdata->reset_gpio, 0); |
756 | bd2802_unregister_led_classdev(led); | ||
703 | if (led->adf_on) | 757 | if (led->adf_on) |
704 | bd2802_disable_adv_conf(led); | 758 | bd2802_disable_adv_conf(led); |
705 | device_remove_file(&client->dev, &bd2802_adv_conf_attr); | 759 | for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++) |
760 | device_remove_file(&led->client->dev, bd2802_attributes[i]); | ||
706 | i2c_set_clientdata(client, NULL); | 761 | i2c_set_clientdata(client, NULL); |
707 | kfree(led); | 762 | kfree(led); |
708 | 763 | ||
@@ -723,8 +778,7 @@ static int bd2802_resume(struct i2c_client *client) | |||
723 | struct bd2802_led *led = i2c_get_clientdata(client); | 778 | struct bd2802_led *led = i2c_get_clientdata(client); |
724 | 779 | ||
725 | if (!bd2802_is_all_off(led) || led->adf_on) { | 780 | if (!bd2802_is_all_off(led) || led->adf_on) { |
726 | gpio_set_value(led->pdata->reset_gpio, 1); | 781 | bd2802_reset_cancel(led); |
727 | udelay(100); | ||
728 | bd2802_restore_state(led); | 782 | bd2802_restore_state(led); |
729 | } | 783 | } |
730 | 784 | ||
@@ -762,4 +816,4 @@ module_exit(bd2802_exit); | |||
762 | 816 | ||
763 | MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>"); | 817 | MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>"); |
764 | MODULE_DESCRIPTION("BD2802 LED driver"); | 818 | MODULE_DESCRIPTION("BD2802 LED driver"); |
765 | MODULE_LICENSE("GPL"); | 819 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index d2109054de85..6b06638eb5b4 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c | |||
@@ -76,7 +76,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template, | |||
76 | struct gpio_led_data *led_dat, struct device *parent, | 76 | struct gpio_led_data *led_dat, struct device *parent, |
77 | int (*blink_set)(unsigned, unsigned long *, unsigned long *)) | 77 | int (*blink_set)(unsigned, unsigned long *, unsigned long *)) |
78 | { | 78 | { |
79 | int ret; | 79 | int ret, state; |
80 | 80 | ||
81 | /* skip leds that aren't available */ | 81 | /* skip leds that aren't available */ |
82 | if (!gpio_is_valid(template->gpio)) { | 82 | if (!gpio_is_valid(template->gpio)) { |
@@ -99,11 +99,15 @@ static int __devinit create_gpio_led(const struct gpio_led *template, | |||
99 | led_dat->cdev.blink_set = gpio_blink_set; | 99 | led_dat->cdev.blink_set = gpio_blink_set; |
100 | } | 100 | } |
101 | led_dat->cdev.brightness_set = gpio_led_set; | 101 | led_dat->cdev.brightness_set = gpio_led_set; |
102 | led_dat->cdev.brightness = LED_OFF; | 102 | if (template->default_state == LEDS_GPIO_DEFSTATE_KEEP) |
103 | state = !!gpio_get_value(led_dat->gpio) ^ led_dat->active_low; | ||
104 | else | ||
105 | state = (template->default_state == LEDS_GPIO_DEFSTATE_ON); | ||
106 | led_dat->cdev.brightness = state ? LED_FULL : LED_OFF; | ||
103 | if (!template->retain_state_suspended) | 107 | if (!template->retain_state_suspended) |
104 | led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; | 108 | led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; |
105 | 109 | ||
106 | ret = gpio_direction_output(led_dat->gpio, led_dat->active_low); | 110 | ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state); |
107 | if (ret < 0) | 111 | if (ret < 0) |
108 | goto err; | 112 | goto err; |
109 | 113 | ||
@@ -129,7 +133,7 @@ static void delete_gpio_led(struct gpio_led_data *led) | |||
129 | } | 133 | } |
130 | 134 | ||
131 | #ifdef CONFIG_LEDS_GPIO_PLATFORM | 135 | #ifdef CONFIG_LEDS_GPIO_PLATFORM |
132 | static int gpio_led_probe(struct platform_device *pdev) | 136 | static int __devinit gpio_led_probe(struct platform_device *pdev) |
133 | { | 137 | { |
134 | struct gpio_led_platform_data *pdata = pdev->dev.platform_data; | 138 | struct gpio_led_platform_data *pdata = pdev->dev.platform_data; |
135 | struct gpio_led_data *leds_data; | 139 | struct gpio_led_data *leds_data; |
@@ -223,12 +227,22 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev, | |||
223 | memset(&led, 0, sizeof(led)); | 227 | memset(&led, 0, sizeof(led)); |
224 | for_each_child_of_node(np, child) { | 228 | for_each_child_of_node(np, child) { |
225 | enum of_gpio_flags flags; | 229 | enum of_gpio_flags flags; |
230 | const char *state; | ||
226 | 231 | ||
227 | led.gpio = of_get_gpio_flags(child, 0, &flags); | 232 | led.gpio = of_get_gpio_flags(child, 0, &flags); |
228 | led.active_low = flags & OF_GPIO_ACTIVE_LOW; | 233 | led.active_low = flags & OF_GPIO_ACTIVE_LOW; |
229 | led.name = of_get_property(child, "label", NULL) ? : child->name; | 234 | led.name = of_get_property(child, "label", NULL) ? : child->name; |
230 | led.default_trigger = | 235 | led.default_trigger = |
231 | of_get_property(child, "linux,default-trigger", NULL); | 236 | of_get_property(child, "linux,default-trigger", NULL); |
237 | state = of_get_property(child, "default-state", NULL); | ||
238 | if (state) { | ||
239 | if (!strcmp(state, "keep")) | ||
240 | led.default_state = LEDS_GPIO_DEFSTATE_KEEP; | ||
241 | else if(!strcmp(state, "on")) | ||
242 | led.default_state = LEDS_GPIO_DEFSTATE_ON; | ||
243 | else | ||
244 | led.default_state = LEDS_GPIO_DEFSTATE_OFF; | ||
245 | } | ||
232 | 246 | ||
233 | ret = create_gpio_led(&led, &pdata->led_data[pdata->num_leds++], | 247 | ret = create_gpio_led(&led, &pdata->led_data[pdata->num_leds++], |
234 | &ofdev->dev, NULL); | 248 | &ofdev->dev, NULL); |
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c new file mode 100644 index 000000000000..5946208ba26e --- /dev/null +++ b/drivers/leds/leds-lp3944.c | |||
@@ -0,0 +1,466 @@ | |||
1 | /* | ||
2 | * leds-lp3944.c - driver for National Semiconductor LP3944 Funlight Chip | ||
3 | * | ||
4 | * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | /* | ||
13 | * I2C driver for National Semiconductor LP3944 Funlight Chip | ||
14 | * http://www.national.com/pf/LP/LP3944.html | ||
15 | * | ||
16 | * This helper chip can drive up to 8 leds, with two programmable DIM modes; | ||
17 | * it could even be used as a gpio expander but this driver assumes it is used | ||
18 | * as a led controller. | ||
19 | * | ||
20 | * The DIM modes are used to set _blink_ patterns for leds, the pattern is | ||
21 | * specified supplying two parameters: | ||
22 | * - period: from 0s to 1.6s | ||
23 | * - duty cycle: percentage of the period the led is on, from 0 to 100 | ||
24 | * | ||
25 | * LP3944 can be found on Motorola A910 smartphone, where it drives the rgb | ||
26 | * leds, the camera flash light and the displays backlights. | ||
27 | */ | ||
28 | |||
29 | #include <linux/module.h> | ||
30 | #include <linux/i2c.h> | ||
31 | #include <linux/leds.h> | ||
32 | #include <linux/mutex.h> | ||
33 | #include <linux/workqueue.h> | ||
34 | #include <linux/leds-lp3944.h> | ||
35 | |||
36 | /* Read Only Registers */ | ||
37 | #define LP3944_REG_INPUT1 0x00 /* LEDs 0-7 InputRegister (Read Only) */ | ||
38 | #define LP3944_REG_REGISTER1 0x01 /* None (Read Only) */ | ||
39 | |||
40 | #define LP3944_REG_PSC0 0x02 /* Frequency Prescaler 0 (R/W) */ | ||
41 | #define LP3944_REG_PWM0 0x03 /* PWM Register 0 (R/W) */ | ||
42 | #define LP3944_REG_PSC1 0x04 /* Frequency Prescaler 1 (R/W) */ | ||
43 | #define LP3944_REG_PWM1 0x05 /* PWM Register 1 (R/W) */ | ||
44 | #define LP3944_REG_LS0 0x06 /* LEDs 0-3 Selector (R/W) */ | ||
45 | #define LP3944_REG_LS1 0x07 /* LEDs 4-7 Selector (R/W) */ | ||
46 | |||
47 | /* These registers are not used to control leds in LP3944, they can store | ||
48 | * arbitrary values which the chip will ignore. | ||
49 | */ | ||
50 | #define LP3944_REG_REGISTER8 0x08 | ||
51 | #define LP3944_REG_REGISTER9 0x09 | ||
52 | |||
53 | #define LP3944_DIM0 0 | ||
54 | #define LP3944_DIM1 1 | ||
55 | |||
56 | /* period in ms */ | ||
57 | #define LP3944_PERIOD_MIN 0 | ||
58 | #define LP3944_PERIOD_MAX 1600 | ||
59 | |||
60 | /* duty cycle is a percentage */ | ||
61 | #define LP3944_DUTY_CYCLE_MIN 0 | ||
62 | #define LP3944_DUTY_CYCLE_MAX 100 | ||
63 | |||
64 | #define ldev_to_led(c) container_of(c, struct lp3944_led_data, ldev) | ||
65 | |||
66 | /* Saved data */ | ||
67 | struct lp3944_led_data { | ||
68 | u8 id; | ||
69 | enum lp3944_type type; | ||
70 | enum lp3944_status status; | ||
71 | struct led_classdev ldev; | ||
72 | struct i2c_client *client; | ||
73 | struct work_struct work; | ||
74 | }; | ||
75 | |||
76 | struct lp3944_data { | ||
77 | struct mutex lock; | ||
78 | struct i2c_client *client; | ||
79 | struct lp3944_led_data leds[LP3944_LEDS_MAX]; | ||
80 | }; | ||
81 | |||
82 | static int lp3944_reg_read(struct i2c_client *client, u8 reg, u8 *value) | ||
83 | { | ||
84 | int tmp; | ||
85 | |||
86 | tmp = i2c_smbus_read_byte_data(client, reg); | ||
87 | if (tmp < 0) | ||
88 | return -EINVAL; | ||
89 | |||
90 | *value = tmp; | ||
91 | |||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | static int lp3944_reg_write(struct i2c_client *client, u8 reg, u8 value) | ||
96 | { | ||
97 | return i2c_smbus_write_byte_data(client, reg, value); | ||
98 | } | ||
99 | |||
100 | /** | ||
101 | * Set the period for DIM status | ||
102 | * | ||
103 | * @client: the i2c client | ||
104 | * @dim: either LP3944_DIM0 or LP3944_DIM1 | ||
105 | * @period: period of a blink, that is a on/off cycle, expressed in ms. | ||
106 | */ | ||
107 | static int lp3944_dim_set_period(struct i2c_client *client, u8 dim, u16 period) | ||
108 | { | ||
109 | u8 psc_reg; | ||
110 | u8 psc_value; | ||
111 | int err; | ||
112 | |||
113 | if (dim == LP3944_DIM0) | ||
114 | psc_reg = LP3944_REG_PSC0; | ||
115 | else if (dim == LP3944_DIM1) | ||
116 | psc_reg = LP3944_REG_PSC1; | ||
117 | else | ||
118 | return -EINVAL; | ||
119 | |||
120 | /* Convert period to Prescaler value */ | ||
121 | if (period > LP3944_PERIOD_MAX) | ||
122 | return -EINVAL; | ||
123 | |||
124 | psc_value = (period * 255) / LP3944_PERIOD_MAX; | ||
125 | |||
126 | err = lp3944_reg_write(client, psc_reg, psc_value); | ||
127 | |||
128 | return err; | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * Set the duty cycle for DIM status | ||
133 | * | ||
134 | * @client: the i2c client | ||
135 | * @dim: either LP3944_DIM0 or LP3944_DIM1 | ||
136 | * @duty_cycle: percentage of a period during which a led is ON | ||
137 | */ | ||
138 | static int lp3944_dim_set_dutycycle(struct i2c_client *client, u8 dim, | ||
139 | u8 duty_cycle) | ||
140 | { | ||
141 | u8 pwm_reg; | ||
142 | u8 pwm_value; | ||
143 | int err; | ||
144 | |||
145 | if (dim == LP3944_DIM0) | ||
146 | pwm_reg = LP3944_REG_PWM0; | ||
147 | else if (dim == LP3944_DIM1) | ||
148 | pwm_reg = LP3944_REG_PWM1; | ||
149 | else | ||
150 | return -EINVAL; | ||
151 | |||
152 | /* Convert duty cycle to PWM value */ | ||
153 | if (duty_cycle > LP3944_DUTY_CYCLE_MAX) | ||
154 | return -EINVAL; | ||
155 | |||
156 | pwm_value = (duty_cycle * 255) / LP3944_DUTY_CYCLE_MAX; | ||
157 | |||
158 | err = lp3944_reg_write(client, pwm_reg, pwm_value); | ||
159 | |||
160 | return err; | ||
161 | } | ||
162 | |||
163 | /** | ||
164 | * Set the led status | ||
165 | * | ||
166 | * @led: a lp3944_led_data structure | ||
167 | * @status: one of LP3944_LED_STATUS_OFF | ||
168 | * LP3944_LED_STATUS_ON | ||
169 | * LP3944_LED_STATUS_DIM0 | ||
170 | * LP3944_LED_STATUS_DIM1 | ||
171 | */ | ||
172 | static int lp3944_led_set(struct lp3944_led_data *led, u8 status) | ||
173 | { | ||
174 | struct lp3944_data *data = i2c_get_clientdata(led->client); | ||
175 | u8 id = led->id; | ||
176 | u8 reg; | ||
177 | u8 val = 0; | ||
178 | int err; | ||
179 | |||
180 | dev_dbg(&led->client->dev, "%s: %s, status before normalization:%d\n", | ||
181 | __func__, led->ldev.name, status); | ||
182 | |||
183 | switch (id) { | ||
184 | case LP3944_LED0: | ||
185 | case LP3944_LED1: | ||
186 | case LP3944_LED2: | ||
187 | case LP3944_LED3: | ||
188 | reg = LP3944_REG_LS0; | ||
189 | break; | ||
190 | case LP3944_LED4: | ||
191 | case LP3944_LED5: | ||
192 | case LP3944_LED6: | ||
193 | case LP3944_LED7: | ||
194 | id -= LP3944_LED4; | ||
195 | reg = LP3944_REG_LS1; | ||
196 | break; | ||
197 | default: | ||
198 | return -EINVAL; | ||
199 | } | ||
200 | |||
201 | if (status > LP3944_LED_STATUS_DIM1) | ||
202 | return -EINVAL; | ||
203 | |||
204 | /* invert only 0 and 1, leave unchanged the other values, | ||
205 | * remember we are abusing status to set blink patterns | ||
206 | */ | ||
207 | if (led->type == LP3944_LED_TYPE_LED_INVERTED && status < 2) | ||
208 | status = 1 - status; | ||
209 | |||
210 | mutex_lock(&data->lock); | ||
211 | lp3944_reg_read(led->client, reg, &val); | ||
212 | |||
213 | val &= ~(LP3944_LED_STATUS_MASK << (id << 1)); | ||
214 | val |= (status << (id << 1)); | ||
215 | |||
216 | dev_dbg(&led->client->dev, "%s: %s, reg:%d id:%d status:%d val:%#x\n", | ||
217 | __func__, led->ldev.name, reg, id, status, val); | ||
218 | |||
219 | /* set led status */ | ||
220 | err = lp3944_reg_write(led->client, reg, val); | ||
221 | mutex_unlock(&data->lock); | ||
222 | |||
223 | return err; | ||
224 | } | ||
225 | |||
226 | static int lp3944_led_set_blink(struct led_classdev *led_cdev, | ||
227 | unsigned long *delay_on, | ||
228 | unsigned long *delay_off) | ||
229 | { | ||
230 | struct lp3944_led_data *led = ldev_to_led(led_cdev); | ||
231 | u16 period; | ||
232 | u8 duty_cycle; | ||
233 | int err; | ||
234 | |||
235 | /* units are in ms */ | ||
236 | if (*delay_on + *delay_off > LP3944_PERIOD_MAX) | ||
237 | return -EINVAL; | ||
238 | |||
239 | if (*delay_on == 0 && *delay_off == 0) { | ||
240 | /* Special case: the leds subsystem requires a default user | ||
241 | * friendly blink pattern for the LED. Let's blink the led | ||
242 | * slowly (1Hz). | ||
243 | */ | ||
244 | *delay_on = 500; | ||
245 | *delay_off = 500; | ||
246 | } | ||
247 | |||
248 | period = (*delay_on) + (*delay_off); | ||
249 | |||
250 | /* duty_cycle is the percentage of period during which the led is ON */ | ||
251 | duty_cycle = 100 * (*delay_on) / period; | ||
252 | |||
253 | /* invert duty cycle for inverted leds, this has the same effect of | ||
254 | * swapping delay_on and delay_off | ||
255 | */ | ||
256 | if (led->type == LP3944_LED_TYPE_LED_INVERTED) | ||
257 | duty_cycle = 100 - duty_cycle; | ||
258 | |||
259 | /* NOTE: using always the first DIM mode, this means that all leds | ||
260 | * will have the same blinking pattern. | ||
261 | * | ||
262 | * We could find a way later to have two leds blinking in hardware | ||
263 | * with different patterns at the same time, falling back to software | ||
264 | * control for the other ones. | ||
265 | */ | ||
266 | err = lp3944_dim_set_period(led->client, LP3944_DIM0, period); | ||
267 | if (err) | ||
268 | return err; | ||
269 | |||
270 | err = lp3944_dim_set_dutycycle(led->client, LP3944_DIM0, duty_cycle); | ||
271 | if (err) | ||
272 | return err; | ||
273 | |||
274 | dev_dbg(&led->client->dev, "%s: OK hardware accelerated blink!\n", | ||
275 | __func__); | ||
276 | |||
277 | led->status = LP3944_LED_STATUS_DIM0; | ||
278 | schedule_work(&led->work); | ||
279 | |||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | static void lp3944_led_set_brightness(struct led_classdev *led_cdev, | ||
284 | enum led_brightness brightness) | ||
285 | { | ||
286 | struct lp3944_led_data *led = ldev_to_led(led_cdev); | ||
287 | |||
288 | dev_dbg(&led->client->dev, "%s: %s, %d\n", | ||
289 | __func__, led_cdev->name, brightness); | ||
290 | |||
291 | led->status = brightness; | ||
292 | schedule_work(&led->work); | ||
293 | } | ||
294 | |||
295 | static void lp3944_led_work(struct work_struct *work) | ||
296 | { | ||
297 | struct lp3944_led_data *led; | ||
298 | |||
299 | led = container_of(work, struct lp3944_led_data, work); | ||
300 | lp3944_led_set(led, led->status); | ||
301 | } | ||
302 | |||
303 | static int lp3944_configure(struct i2c_client *client, | ||
304 | struct lp3944_data *data, | ||
305 | struct lp3944_platform_data *pdata) | ||
306 | { | ||
307 | int i, err = 0; | ||
308 | |||
309 | for (i = 0; i < pdata->leds_size; i++) { | ||
310 | struct lp3944_led *pled = &pdata->leds[i]; | ||
311 | struct lp3944_led_data *led = &data->leds[i]; | ||
312 | led->client = client; | ||
313 | led->id = i; | ||
314 | |||
315 | switch (pled->type) { | ||
316 | |||
317 | case LP3944_LED_TYPE_LED: | ||
318 | case LP3944_LED_TYPE_LED_INVERTED: | ||
319 | led->type = pled->type; | ||
320 | led->status = pled->status; | ||
321 | led->ldev.name = pled->name; | ||
322 | led->ldev.max_brightness = 1; | ||
323 | led->ldev.brightness_set = lp3944_led_set_brightness; | ||
324 | led->ldev.blink_set = lp3944_led_set_blink; | ||
325 | led->ldev.flags = LED_CORE_SUSPENDRESUME; | ||
326 | |||
327 | INIT_WORK(&led->work, lp3944_led_work); | ||
328 | err = led_classdev_register(&client->dev, &led->ldev); | ||
329 | if (err < 0) { | ||
330 | dev_err(&client->dev, | ||
331 | "couldn't register LED %s\n", | ||
332 | led->ldev.name); | ||
333 | goto exit; | ||
334 | } | ||
335 | |||
336 | /* to expose the default value to userspace */ | ||
337 | led->ldev.brightness = led->status; | ||
338 | |||
339 | /* Set the default led status */ | ||
340 | err = lp3944_led_set(led, led->status); | ||
341 | if (err < 0) { | ||
342 | dev_err(&client->dev, | ||
343 | "%s couldn't set STATUS %d\n", | ||
344 | led->ldev.name, led->status); | ||
345 | goto exit; | ||
346 | } | ||
347 | break; | ||
348 | |||
349 | case LP3944_LED_TYPE_NONE: | ||
350 | default: | ||
351 | break; | ||
352 | |||
353 | } | ||
354 | } | ||
355 | return 0; | ||
356 | |||
357 | exit: | ||
358 | if (i > 0) | ||
359 | for (i = i - 1; i >= 0; i--) | ||
360 | switch (pdata->leds[i].type) { | ||
361 | |||
362 | case LP3944_LED_TYPE_LED: | ||
363 | case LP3944_LED_TYPE_LED_INVERTED: | ||
364 | led_classdev_unregister(&data->leds[i].ldev); | ||
365 | cancel_work_sync(&data->leds[i].work); | ||
366 | break; | ||
367 | |||
368 | case LP3944_LED_TYPE_NONE: | ||
369 | default: | ||
370 | break; | ||
371 | } | ||
372 | |||
373 | return err; | ||
374 | } | ||
375 | |||
376 | static int __devinit lp3944_probe(struct i2c_client *client, | ||
377 | const struct i2c_device_id *id) | ||
378 | { | ||
379 | struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data; | ||
380 | struct lp3944_data *data; | ||
381 | |||
382 | if (lp3944_pdata == NULL) { | ||
383 | dev_err(&client->dev, "no platform data\n"); | ||
384 | return -EINVAL; | ||
385 | } | ||
386 | |||
387 | /* Let's see whether this adapter can support what we need. */ | ||
388 | if (!i2c_check_functionality(client->adapter, | ||
389 | I2C_FUNC_SMBUS_BYTE_DATA)) { | ||
390 | dev_err(&client->dev, "insufficient functionality!\n"); | ||
391 | return -ENODEV; | ||
392 | } | ||
393 | |||
394 | data = kzalloc(sizeof(struct lp3944_data), GFP_KERNEL); | ||
395 | if (!data) | ||
396 | return -ENOMEM; | ||
397 | |||
398 | data->client = client; | ||
399 | i2c_set_clientdata(client, data); | ||
400 | |||
401 | mutex_init(&data->lock); | ||
402 | |||
403 | dev_info(&client->dev, "lp3944 enabled\n"); | ||
404 | |||
405 | lp3944_configure(client, data, lp3944_pdata); | ||
406 | return 0; | ||
407 | } | ||
408 | |||
409 | static int __devexit lp3944_remove(struct i2c_client *client) | ||
410 | { | ||
411 | struct lp3944_platform_data *pdata = client->dev.platform_data; | ||
412 | struct lp3944_data *data = i2c_get_clientdata(client); | ||
413 | int i; | ||
414 | |||
415 | for (i = 0; i < pdata->leds_size; i++) | ||
416 | switch (data->leds[i].type) { | ||
417 | case LP3944_LED_TYPE_LED: | ||
418 | case LP3944_LED_TYPE_LED_INVERTED: | ||
419 | led_classdev_unregister(&data->leds[i].ldev); | ||
420 | cancel_work_sync(&data->leds[i].work); | ||
421 | break; | ||
422 | |||
423 | case LP3944_LED_TYPE_NONE: | ||
424 | default: | ||
425 | break; | ||
426 | } | ||
427 | |||
428 | kfree(data); | ||
429 | i2c_set_clientdata(client, NULL); | ||
430 | |||
431 | return 0; | ||
432 | } | ||
433 | |||
434 | /* lp3944 i2c driver struct */ | ||
435 | static const struct i2c_device_id lp3944_id[] = { | ||
436 | {"lp3944", 0}, | ||
437 | {} | ||
438 | }; | ||
439 | |||
440 | MODULE_DEVICE_TABLE(i2c, lp3944_id); | ||
441 | |||
442 | static struct i2c_driver lp3944_driver = { | ||
443 | .driver = { | ||
444 | .name = "lp3944", | ||
445 | }, | ||
446 | .probe = lp3944_probe, | ||
447 | .remove = __devexit_p(lp3944_remove), | ||
448 | .id_table = lp3944_id, | ||
449 | }; | ||
450 | |||
451 | static int __init lp3944_module_init(void) | ||
452 | { | ||
453 | return i2c_add_driver(&lp3944_driver); | ||
454 | } | ||
455 | |||
456 | static void __exit lp3944_module_exit(void) | ||
457 | { | ||
458 | i2c_del_driver(&lp3944_driver); | ||
459 | } | ||
460 | |||
461 | module_init(lp3944_module_init); | ||
462 | module_exit(lp3944_module_exit); | ||
463 | |||
464 | MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>"); | ||
465 | MODULE_DESCRIPTION("LP3944 Fun Light Chip"); | ||
466 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c index 3937244fdcab..dba8921240f2 100644 --- a/drivers/leds/leds-pca9532.c +++ b/drivers/leds/leds-pca9532.c | |||
@@ -35,7 +35,7 @@ struct pca9532_data { | |||
35 | struct pca9532_led leds[16]; | 35 | struct pca9532_led leds[16]; |
36 | struct mutex update_lock; | 36 | struct mutex update_lock; |
37 | struct input_dev *idev; | 37 | struct input_dev *idev; |
38 | struct work_struct work; | 38 | struct work_struct work; |
39 | u8 pwm[2]; | 39 | u8 pwm[2]; |
40 | u8 psc[2]; | 40 | u8 psc[2]; |
41 | }; | 41 | }; |
@@ -87,14 +87,14 @@ static int pca9532_calcpwm(struct i2c_client *client, int pwm, int blink, | |||
87 | if (b > 0xFF) | 87 | if (b > 0xFF) |
88 | return -EINVAL; | 88 | return -EINVAL; |
89 | data->pwm[pwm] = b; | 89 | data->pwm[pwm] = b; |
90 | data->psc[pwm] = blink; | 90 | data->psc[pwm] = blink; |
91 | return 0; | 91 | return 0; |
92 | } | 92 | } |
93 | 93 | ||
94 | static int pca9532_setpwm(struct i2c_client *client, int pwm) | 94 | static int pca9532_setpwm(struct i2c_client *client, int pwm) |
95 | { | 95 | { |
96 | struct pca9532_data *data = i2c_get_clientdata(client); | 96 | struct pca9532_data *data = i2c_get_clientdata(client); |
97 | mutex_lock(&data->update_lock); | 97 | mutex_lock(&data->update_lock); |
98 | i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm), | 98 | i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm), |
99 | data->pwm[pwm]); | 99 | data->pwm[pwm]); |
100 | i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm), | 100 | i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm), |
@@ -132,11 +132,11 @@ static void pca9532_set_brightness(struct led_classdev *led_cdev, | |||
132 | led->state = PCA9532_ON; | 132 | led->state = PCA9532_ON; |
133 | else { | 133 | else { |
134 | led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */ | 134 | led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */ |
135 | err = pca9532_calcpwm(led->client, 0, 0, value); | 135 | err = pca9532_calcpwm(led->client, 0, 0, value); |
136 | if (err) | 136 | if (err) |
137 | return; /* XXX: led api doesn't allow error code? */ | 137 | return; /* XXX: led api doesn't allow error code? */ |
138 | } | 138 | } |
139 | schedule_work(&led->work); | 139 | schedule_work(&led->work); |
140 | } | 140 | } |
141 | 141 | ||
142 | static int pca9532_set_blink(struct led_classdev *led_cdev, | 142 | static int pca9532_set_blink(struct led_classdev *led_cdev, |
@@ -145,7 +145,7 @@ static int pca9532_set_blink(struct led_classdev *led_cdev, | |||
145 | struct pca9532_led *led = ldev_to_led(led_cdev); | 145 | struct pca9532_led *led = ldev_to_led(led_cdev); |
146 | struct i2c_client *client = led->client; | 146 | struct i2c_client *client = led->client; |
147 | int psc; | 147 | int psc; |
148 | int err = 0; | 148 | int err = 0; |
149 | 149 | ||
150 | if (*delay_on == 0 && *delay_off == 0) { | 150 | if (*delay_on == 0 && *delay_off == 0) { |
151 | /* led subsystem ask us for a blink rate */ | 151 | /* led subsystem ask us for a blink rate */ |
@@ -157,11 +157,11 @@ static int pca9532_set_blink(struct led_classdev *led_cdev, | |||
157 | 157 | ||
158 | /* Thecus specific: only use PSC/PWM 0 */ | 158 | /* Thecus specific: only use PSC/PWM 0 */ |
159 | psc = (*delay_on * 152-1)/1000; | 159 | psc = (*delay_on * 152-1)/1000; |
160 | err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness); | 160 | err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness); |
161 | if (err) | 161 | if (err) |
162 | return err; | 162 | return err; |
163 | schedule_work(&led->work); | 163 | schedule_work(&led->work); |
164 | return 0; | 164 | return 0; |
165 | } | 165 | } |
166 | 166 | ||
167 | static int pca9532_event(struct input_dev *dev, unsigned int type, | 167 | static int pca9532_event(struct input_dev *dev, unsigned int type, |
@@ -178,15 +178,15 @@ static int pca9532_event(struct input_dev *dev, unsigned int type, | |||
178 | else | 178 | else |
179 | data->pwm[1] = 0; | 179 | data->pwm[1] = 0; |
180 | 180 | ||
181 | schedule_work(&data->work); | 181 | schedule_work(&data->work); |
182 | 182 | ||
183 | return 0; | 183 | return 0; |
184 | } | 184 | } |
185 | 185 | ||
186 | static void pca9532_input_work(struct work_struct *work) | 186 | static void pca9532_input_work(struct work_struct *work) |
187 | { | 187 | { |
188 | struct pca9532_data *data; | 188 | struct pca9532_data *data; |
189 | data = container_of(work, struct pca9532_data, work); | 189 | data = container_of(work, struct pca9532_data, work); |
190 | mutex_lock(&data->update_lock); | 190 | mutex_lock(&data->update_lock); |
191 | i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1), | 191 | i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1), |
192 | data->pwm[1]); | 192 | data->pwm[1]); |
@@ -195,11 +195,11 @@ static void pca9532_input_work(struct work_struct *work) | |||
195 | 195 | ||
196 | static void pca9532_led_work(struct work_struct *work) | 196 | static void pca9532_led_work(struct work_struct *work) |
197 | { | 197 | { |
198 | struct pca9532_led *led; | 198 | struct pca9532_led *led; |
199 | led = container_of(work, struct pca9532_led, work); | 199 | led = container_of(work, struct pca9532_led, work); |
200 | if (led->state == PCA9532_PWM0) | 200 | if (led->state == PCA9532_PWM0) |
201 | pca9532_setpwm(led->client, 0); | 201 | pca9532_setpwm(led->client, 0); |
202 | pca9532_setled(led); | 202 | pca9532_setled(led); |
203 | } | 203 | } |
204 | 204 | ||
205 | static int pca9532_configure(struct i2c_client *client, | 205 | static int pca9532_configure(struct i2c_client *client, |
@@ -232,7 +232,7 @@ static int pca9532_configure(struct i2c_client *client, | |||
232 | led->ldev.brightness = LED_OFF; | 232 | led->ldev.brightness = LED_OFF; |
233 | led->ldev.brightness_set = pca9532_set_brightness; | 233 | led->ldev.brightness_set = pca9532_set_brightness; |
234 | led->ldev.blink_set = pca9532_set_blink; | 234 | led->ldev.blink_set = pca9532_set_blink; |
235 | INIT_WORK(&led->work, pca9532_led_work); | 235 | INIT_WORK(&led->work, pca9532_led_work); |
236 | err = led_classdev_register(&client->dev, &led->ldev); | 236 | err = led_classdev_register(&client->dev, &led->ldev); |
237 | if (err < 0) { | 237 | if (err < 0) { |
238 | dev_err(&client->dev, | 238 | dev_err(&client->dev, |
@@ -262,11 +262,11 @@ static int pca9532_configure(struct i2c_client *client, | |||
262 | BIT_MASK(SND_TONE); | 262 | BIT_MASK(SND_TONE); |
263 | data->idev->event = pca9532_event; | 263 | data->idev->event = pca9532_event; |
264 | input_set_drvdata(data->idev, data); | 264 | input_set_drvdata(data->idev, data); |
265 | INIT_WORK(&data->work, pca9532_input_work); | 265 | INIT_WORK(&data->work, pca9532_input_work); |
266 | err = input_register_device(data->idev); | 266 | err = input_register_device(data->idev); |
267 | if (err) { | 267 | if (err) { |
268 | input_free_device(data->idev); | 268 | input_free_device(data->idev); |
269 | cancel_work_sync(&data->work); | 269 | cancel_work_sync(&data->work); |
270 | data->idev = NULL; | 270 | data->idev = NULL; |
271 | goto exit; | 271 | goto exit; |
272 | } | 272 | } |
@@ -283,13 +283,13 @@ exit: | |||
283 | break; | 283 | break; |
284 | case PCA9532_TYPE_LED: | 284 | case PCA9532_TYPE_LED: |
285 | led_classdev_unregister(&data->leds[i].ldev); | 285 | led_classdev_unregister(&data->leds[i].ldev); |
286 | cancel_work_sync(&data->leds[i].work); | 286 | cancel_work_sync(&data->leds[i].work); |
287 | break; | 287 | break; |
288 | case PCA9532_TYPE_N2100_BEEP: | 288 | case PCA9532_TYPE_N2100_BEEP: |
289 | if (data->idev != NULL) { | 289 | if (data->idev != NULL) { |
290 | input_unregister_device(data->idev); | 290 | input_unregister_device(data->idev); |
291 | input_free_device(data->idev); | 291 | input_free_device(data->idev); |
292 | cancel_work_sync(&data->work); | 292 | cancel_work_sync(&data->work); |
293 | data->idev = NULL; | 293 | data->idev = NULL; |
294 | } | 294 | } |
295 | break; | 295 | break; |
@@ -340,13 +340,13 @@ static int pca9532_remove(struct i2c_client *client) | |||
340 | break; | 340 | break; |
341 | case PCA9532_TYPE_LED: | 341 | case PCA9532_TYPE_LED: |
342 | led_classdev_unregister(&data->leds[i].ldev); | 342 | led_classdev_unregister(&data->leds[i].ldev); |
343 | cancel_work_sync(&data->leds[i].work); | 343 | cancel_work_sync(&data->leds[i].work); |
344 | break; | 344 | break; |
345 | case PCA9532_TYPE_N2100_BEEP: | 345 | case PCA9532_TYPE_N2100_BEEP: |
346 | if (data->idev != NULL) { | 346 | if (data->idev != NULL) { |
347 | input_unregister_device(data->idev); | 347 | input_unregister_device(data->idev); |
348 | input_free_device(data->idev); | 348 | input_free_device(data->idev); |
349 | cancel_work_sync(&data->work); | 349 | cancel_work_sync(&data->work); |
350 | data->idev = NULL; | 350 | data->idev = NULL; |
351 | } | 351 | } |
352 | break; | 352 | break; |
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index d4e8979735cb..9c3138265f8e 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h | |||
@@ -82,7 +82,7 @@ struct lg_cpu { | |||
82 | 82 | ||
83 | struct lg_eventfd { | 83 | struct lg_eventfd { |
84 | unsigned long addr; | 84 | unsigned long addr; |
85 | struct file *event; | 85 | struct eventfd_ctx *event; |
86 | }; | 86 | }; |
87 | 87 | ||
88 | struct lg_eventfd_map { | 88 | struct lg_eventfd_map { |
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index 32e297121058..9f9a2953b383 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c | |||
@@ -50,7 +50,7 @@ static int add_eventfd(struct lguest *lg, unsigned long addr, int fd) | |||
50 | 50 | ||
51 | /* Now append new entry. */ | 51 | /* Now append new entry. */ |
52 | new->map[new->num].addr = addr; | 52 | new->map[new->num].addr = addr; |
53 | new->map[new->num].event = eventfd_fget(fd); | 53 | new->map[new->num].event = eventfd_ctx_fdget(fd); |
54 | if (IS_ERR(new->map[new->num].event)) { | 54 | if (IS_ERR(new->map[new->num].event)) { |
55 | kfree(new); | 55 | kfree(new); |
56 | return PTR_ERR(new->map[new->num].event); | 56 | return PTR_ERR(new->map[new->num].event); |
@@ -357,7 +357,7 @@ static int close(struct inode *inode, struct file *file) | |||
357 | 357 | ||
358 | /* Release any eventfds they registered. */ | 358 | /* Release any eventfds they registered. */ |
359 | for (i = 0; i < lg->eventfds->num; i++) | 359 | for (i = 0; i < lg->eventfds->num; i++) |
360 | fput(lg->eventfds->map[i].event); | 360 | eventfd_ctx_put(lg->eventfds->map[i].event); |
361 | kfree(lg->eventfds); | 361 | kfree(lg->eventfds); |
362 | 362 | ||
363 | /* If lg->dead doesn't contain an error code it will be NULL or a | 363 | /* If lg->dead doesn't contain an error code it will be NULL or a |
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index c3ae51584b12..3710ff88fc10 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c | |||
@@ -195,7 +195,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, | |||
195 | struct dm_exception_store **store) | 195 | struct dm_exception_store **store) |
196 | { | 196 | { |
197 | int r = 0; | 197 | int r = 0; |
198 | struct dm_exception_store_type *type; | 198 | struct dm_exception_store_type *type = NULL; |
199 | struct dm_exception_store *tmp_store; | 199 | struct dm_exception_store *tmp_store; |
200 | char persistent; | 200 | char persistent; |
201 | 201 | ||
@@ -211,12 +211,15 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, | |||
211 | } | 211 | } |
212 | 212 | ||
213 | persistent = toupper(*argv[1]); | 213 | persistent = toupper(*argv[1]); |
214 | if (persistent != 'P' && persistent != 'N') { | 214 | if (persistent == 'P') |
215 | type = get_type("P"); | ||
216 | else if (persistent == 'N') | ||
217 | type = get_type("N"); | ||
218 | else { | ||
215 | ti->error = "Persistent flag is not P or N"; | 219 | ti->error = "Persistent flag is not P or N"; |
216 | return -EINVAL; | 220 | return -EINVAL; |
217 | } | 221 | } |
218 | 222 | ||
219 | type = get_type(&persistent); | ||
220 | if (!type) { | 223 | if (!type) { |
221 | ti->error = "Exception store type not recognised"; | 224 | ti->error = "Exception store type not recognised"; |
222 | r = -EINVAL; | 225 | r = -EINVAL; |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 4899ebe767c8..2cba557d9e61 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -495,7 +495,7 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, | |||
495 | return 0; | 495 | return 0; |
496 | } | 496 | } |
497 | 497 | ||
498 | if (blk_stack_limits(limits, &q->limits, start) < 0) | 498 | if (blk_stack_limits(limits, &q->limits, start << 9) < 0) |
499 | DMWARN("%s: target device %s is misaligned", | 499 | DMWARN("%s: target device %s is misaligned", |
500 | dm_device_name(ti->table->md), bdevname(bdev, b)); | 500 | dm_device_name(ti->table->md), bdevname(bdev, b)); |
501 | 501 | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 3c6d4ee8921d..9acd54a5cffb 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1017,7 +1017,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, | |||
1017 | clone->bi_flags |= 1 << BIO_CLONED; | 1017 | clone->bi_flags |= 1 << BIO_CLONED; |
1018 | 1018 | ||
1019 | if (bio_integrity(bio)) { | 1019 | if (bio_integrity(bio)) { |
1020 | bio_integrity_clone(clone, bio, GFP_NOIO); | 1020 | bio_integrity_clone(clone, bio, GFP_NOIO, bs); |
1021 | bio_integrity_trim(clone, | 1021 | bio_integrity_trim(clone, |
1022 | bio_sector_offset(bio, idx, offset), len); | 1022 | bio_sector_offset(bio, idx, offset), len); |
1023 | } | 1023 | } |
@@ -1045,7 +1045,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, | |||
1045 | clone->bi_flags &= ~(1 << BIO_SEG_VALID); | 1045 | clone->bi_flags &= ~(1 << BIO_SEG_VALID); |
1046 | 1046 | ||
1047 | if (bio_integrity(bio)) { | 1047 | if (bio_integrity(bio)) { |
1048 | bio_integrity_clone(clone, bio, GFP_NOIO); | 1048 | bio_integrity_clone(clone, bio, GFP_NOIO, bs); |
1049 | 1049 | ||
1050 | if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) | 1050 | if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) |
1051 | bio_integrity_trim(clone, | 1051 | bio_integrity_trim(clone, |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 15c8b7b25a9b..5810fa906af0 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -166,8 +166,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) | |||
166 | rdev->sectors = sectors * mddev->chunk_sectors; | 166 | rdev->sectors = sectors * mddev->chunk_sectors; |
167 | } | 167 | } |
168 | 168 | ||
169 | blk_queue_stack_limits(mddev->queue, | 169 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
170 | rdev->bdev->bd_disk->queue); | 170 | rdev->data_offset << 9); |
171 | /* as we don't honour merge_bvec_fn, we must never risk | 171 | /* as we don't honour merge_bvec_fn, we must never risk |
172 | * violating it, so limit ->max_sector to one PAGE, as | 172 | * violating it, so limit ->max_sector to one PAGE, as |
173 | * a one page request is never in violation. | 173 | * a one page request is never in violation. |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 09be637d52cb..0f4a70c43ffc 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -3573,7 +3573,8 @@ suspend_lo_store(mddev_t *mddev, const char *buf, size_t len) | |||
3573 | char *e; | 3573 | char *e; |
3574 | unsigned long long new = simple_strtoull(buf, &e, 10); | 3574 | unsigned long long new = simple_strtoull(buf, &e, 10); |
3575 | 3575 | ||
3576 | if (mddev->pers->quiesce == NULL) | 3576 | if (mddev->pers == NULL || |
3577 | mddev->pers->quiesce == NULL) | ||
3577 | return -EINVAL; | 3578 | return -EINVAL; |
3578 | if (buf == e || (*e && *e != '\n')) | 3579 | if (buf == e || (*e && *e != '\n')) |
3579 | return -EINVAL; | 3580 | return -EINVAL; |
@@ -3601,7 +3602,8 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len) | |||
3601 | char *e; | 3602 | char *e; |
3602 | unsigned long long new = simple_strtoull(buf, &e, 10); | 3603 | unsigned long long new = simple_strtoull(buf, &e, 10); |
3603 | 3604 | ||
3604 | if (mddev->pers->quiesce == NULL) | 3605 | if (mddev->pers == NULL || |
3606 | mddev->pers->quiesce == NULL) | ||
3605 | return -EINVAL; | 3607 | return -EINVAL; |
3606 | if (buf == e || (*e && *e != '\n')) | 3608 | if (buf == e || (*e && *e != '\n')) |
3607 | return -EINVAL; | 3609 | return -EINVAL; |
@@ -3844,11 +3846,9 @@ static int md_alloc(dev_t dev, char *name) | |||
3844 | flush_scheduled_work(); | 3846 | flush_scheduled_work(); |
3845 | 3847 | ||
3846 | mutex_lock(&disks_mutex); | 3848 | mutex_lock(&disks_mutex); |
3847 | if (mddev->gendisk) { | 3849 | error = -EEXIST; |
3848 | mutex_unlock(&disks_mutex); | 3850 | if (mddev->gendisk) |
3849 | mddev_put(mddev); | 3851 | goto abort; |
3850 | return -EEXIST; | ||
3851 | } | ||
3852 | 3852 | ||
3853 | if (name) { | 3853 | if (name) { |
3854 | /* Need to ensure that 'name' is not a duplicate. | 3854 | /* Need to ensure that 'name' is not a duplicate. |
@@ -3860,17 +3860,15 @@ static int md_alloc(dev_t dev, char *name) | |||
3860 | if (mddev2->gendisk && | 3860 | if (mddev2->gendisk && |
3861 | strcmp(mddev2->gendisk->disk_name, name) == 0) { | 3861 | strcmp(mddev2->gendisk->disk_name, name) == 0) { |
3862 | spin_unlock(&all_mddevs_lock); | 3862 | spin_unlock(&all_mddevs_lock); |
3863 | return -EEXIST; | 3863 | goto abort; |
3864 | } | 3864 | } |
3865 | spin_unlock(&all_mddevs_lock); | 3865 | spin_unlock(&all_mddevs_lock); |
3866 | } | 3866 | } |
3867 | 3867 | ||
3868 | error = -ENOMEM; | ||
3868 | mddev->queue = blk_alloc_queue(GFP_KERNEL); | 3869 | mddev->queue = blk_alloc_queue(GFP_KERNEL); |
3869 | if (!mddev->queue) { | 3870 | if (!mddev->queue) |
3870 | mutex_unlock(&disks_mutex); | 3871 | goto abort; |
3871 | mddev_put(mddev); | ||
3872 | return -ENOMEM; | ||
3873 | } | ||
3874 | mddev->queue->queuedata = mddev; | 3872 | mddev->queue->queuedata = mddev; |
3875 | 3873 | ||
3876 | /* Can be unlocked because the queue is new: no concurrency */ | 3874 | /* Can be unlocked because the queue is new: no concurrency */ |
@@ -3880,11 +3878,9 @@ static int md_alloc(dev_t dev, char *name) | |||
3880 | 3878 | ||
3881 | disk = alloc_disk(1 << shift); | 3879 | disk = alloc_disk(1 << shift); |
3882 | if (!disk) { | 3880 | if (!disk) { |
3883 | mutex_unlock(&disks_mutex); | ||
3884 | blk_cleanup_queue(mddev->queue); | 3881 | blk_cleanup_queue(mddev->queue); |
3885 | mddev->queue = NULL; | 3882 | mddev->queue = NULL; |
3886 | mddev_put(mddev); | 3883 | goto abort; |
3887 | return -ENOMEM; | ||
3888 | } | 3884 | } |
3889 | disk->major = MAJOR(mddev->unit); | 3885 | disk->major = MAJOR(mddev->unit); |
3890 | disk->first_minor = unit << shift; | 3886 | disk->first_minor = unit << shift; |
@@ -3906,16 +3902,22 @@ static int md_alloc(dev_t dev, char *name) | |||
3906 | mddev->gendisk = disk; | 3902 | mddev->gendisk = disk; |
3907 | error = kobject_init_and_add(&mddev->kobj, &md_ktype, | 3903 | error = kobject_init_and_add(&mddev->kobj, &md_ktype, |
3908 | &disk_to_dev(disk)->kobj, "%s", "md"); | 3904 | &disk_to_dev(disk)->kobj, "%s", "md"); |
3909 | mutex_unlock(&disks_mutex); | 3905 | if (error) { |
3910 | if (error) | 3906 | /* This isn't possible, but as kobject_init_and_add is marked |
3907 | * __must_check, we must do something with the result | ||
3908 | */ | ||
3911 | printk(KERN_WARNING "md: cannot register %s/md - name in use\n", | 3909 | printk(KERN_WARNING "md: cannot register %s/md - name in use\n", |
3912 | disk->disk_name); | 3910 | disk->disk_name); |
3913 | else { | 3911 | error = 0; |
3912 | } | ||
3913 | abort: | ||
3914 | mutex_unlock(&disks_mutex); | ||
3915 | if (!error) { | ||
3914 | kobject_uevent(&mddev->kobj, KOBJ_ADD); | 3916 | kobject_uevent(&mddev->kobj, KOBJ_ADD); |
3915 | mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state"); | 3917 | mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state"); |
3916 | } | 3918 | } |
3917 | mddev_put(mddev); | 3919 | mddev_put(mddev); |
3918 | return 0; | 3920 | return error; |
3919 | } | 3921 | } |
3920 | 3922 | ||
3921 | static struct kobject *md_probe(dev_t dev, int *part, void *data) | 3923 | static struct kobject *md_probe(dev_t dev, int *part, void *data) |
@@ -6334,10 +6336,16 @@ void md_do_sync(mddev_t *mddev) | |||
6334 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); | 6336 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); |
6335 | } | 6337 | } |
6336 | 6338 | ||
6337 | if (j >= mddev->resync_max) | 6339 | while (j >= mddev->resync_max && !kthread_should_stop()) { |
6338 | wait_event(mddev->recovery_wait, | 6340 | /* As this condition is controlled by user-space, |
6339 | mddev->resync_max > j | 6341 | * we can block indefinitely, so use '_interruptible' |
6340 | || kthread_should_stop()); | 6342 | * to avoid triggering warnings. |
6343 | */ | ||
6344 | flush_signals(current); /* just in case */ | ||
6345 | wait_event_interruptible(mddev->recovery_wait, | ||
6346 | mddev->resync_max > j | ||
6347 | || kthread_should_stop()); | ||
6348 | } | ||
6341 | 6349 | ||
6342 | if (kthread_should_stop()) | 6350 | if (kthread_should_stop()) |
6343 | goto interrupted; | 6351 | goto interrupted; |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index cbe368fa6598..237fe3fd235c 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -294,7 +294,8 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
294 | for (path = first; path <= last; path++) | 294 | for (path = first; path <= last; path++) |
295 | if ((p=conf->multipaths+path)->rdev == NULL) { | 295 | if ((p=conf->multipaths+path)->rdev == NULL) { |
296 | q = rdev->bdev->bd_disk->queue; | 296 | q = rdev->bdev->bd_disk->queue; |
297 | blk_queue_stack_limits(mddev->queue, q); | 297 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
298 | rdev->data_offset << 9); | ||
298 | 299 | ||
299 | /* as we don't honour merge_bvec_fn, we must never risk | 300 | /* as we don't honour merge_bvec_fn, we must never risk |
300 | * violating it, so limit ->max_sector to one PAGE, as | 301 | * violating it, so limit ->max_sector to one PAGE, as |
@@ -463,9 +464,9 @@ static int multipath_run (mddev_t *mddev) | |||
463 | 464 | ||
464 | disk = conf->multipaths + disk_idx; | 465 | disk = conf->multipaths + disk_idx; |
465 | disk->rdev = rdev; | 466 | disk->rdev = rdev; |
467 | disk_stack_limits(mddev->gendisk, rdev->bdev, | ||
468 | rdev->data_offset << 9); | ||
466 | 469 | ||
467 | blk_queue_stack_limits(mddev->queue, | ||
468 | rdev->bdev->bd_disk->queue); | ||
469 | /* as we don't honour merge_bvec_fn, we must never risk | 470 | /* as we don't honour merge_bvec_fn, we must never risk |
470 | * violating it, not that we ever expect a device with | 471 | * violating it, not that we ever expect a device with |
471 | * a merge_bvec_fn to be involved in multipath */ | 472 | * a merge_bvec_fn to be involved in multipath */ |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index ab4a489d8695..335f490dcad6 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -170,8 +170,8 @@ static int create_strip_zones(mddev_t *mddev) | |||
170 | } | 170 | } |
171 | dev[j] = rdev1; | 171 | dev[j] = rdev1; |
172 | 172 | ||
173 | blk_queue_stack_limits(mddev->queue, | 173 | disk_stack_limits(mddev->gendisk, rdev1->bdev, |
174 | rdev1->bdev->bd_disk->queue); | 174 | rdev1->data_offset << 9); |
175 | /* as we don't honour merge_bvec_fn, we must never risk | 175 | /* as we don't honour merge_bvec_fn, we must never risk |
176 | * violating it, so limit ->max_sector to one PAGE, as | 176 | * violating it, so limit ->max_sector to one PAGE, as |
177 | * a one page request is never in violation. | 177 | * a one page request is never in violation. |
@@ -250,6 +250,11 @@ static int create_strip_zones(mddev_t *mddev) | |||
250 | mddev->chunk_sectors << 9); | 250 | mddev->chunk_sectors << 9); |
251 | goto abort; | 251 | goto abort; |
252 | } | 252 | } |
253 | |||
254 | blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); | ||
255 | blk_queue_io_opt(mddev->queue, | ||
256 | (mddev->chunk_sectors << 9) * mddev->raid_disks); | ||
257 | |||
253 | printk(KERN_INFO "raid0: done.\n"); | 258 | printk(KERN_INFO "raid0: done.\n"); |
254 | mddev->private = conf; | 259 | mddev->private = conf; |
255 | return 0; | 260 | return 0; |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 89939a7aef57..0569efba0c02 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1123,8 +1123,8 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1123 | for (mirror = first; mirror <= last; mirror++) | 1123 | for (mirror = first; mirror <= last; mirror++) |
1124 | if ( !(p=conf->mirrors+mirror)->rdev) { | 1124 | if ( !(p=conf->mirrors+mirror)->rdev) { |
1125 | 1125 | ||
1126 | blk_queue_stack_limits(mddev->queue, | 1126 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
1127 | rdev->bdev->bd_disk->queue); | 1127 | rdev->data_offset << 9); |
1128 | /* as we don't honour merge_bvec_fn, we must never risk | 1128 | /* as we don't honour merge_bvec_fn, we must never risk |
1129 | * violating it, so limit ->max_sector to one PAGE, as | 1129 | * violating it, so limit ->max_sector to one PAGE, as |
1130 | * a one page request is never in violation. | 1130 | * a one page request is never in violation. |
@@ -1988,9 +1988,8 @@ static int run(mddev_t *mddev) | |||
1988 | disk = conf->mirrors + disk_idx; | 1988 | disk = conf->mirrors + disk_idx; |
1989 | 1989 | ||
1990 | disk->rdev = rdev; | 1990 | disk->rdev = rdev; |
1991 | 1991 | disk_stack_limits(mddev->gendisk, rdev->bdev, | |
1992 | blk_queue_stack_limits(mddev->queue, | 1992 | rdev->data_offset << 9); |
1993 | rdev->bdev->bd_disk->queue); | ||
1994 | /* as we don't honour merge_bvec_fn, we must never risk | 1993 | /* as we don't honour merge_bvec_fn, we must never risk |
1995 | * violating it, so limit ->max_sector to one PAGE, as | 1994 | * violating it, so limit ->max_sector to one PAGE, as |
1996 | * a one page request is never in violation. | 1995 | * a one page request is never in violation. |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index ae12ceafe10c..7298a5e5a183 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -1151,8 +1151,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1151 | for ( ; mirror <= last ; mirror++) | 1151 | for ( ; mirror <= last ; mirror++) |
1152 | if ( !(p=conf->mirrors+mirror)->rdev) { | 1152 | if ( !(p=conf->mirrors+mirror)->rdev) { |
1153 | 1153 | ||
1154 | blk_queue_stack_limits(mddev->queue, | 1154 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
1155 | rdev->bdev->bd_disk->queue); | 1155 | rdev->data_offset << 9); |
1156 | /* as we don't honour merge_bvec_fn, we must never risk | 1156 | /* as we don't honour merge_bvec_fn, we must never risk |
1157 | * violating it, so limit ->max_sector to one PAGE, as | 1157 | * violating it, so limit ->max_sector to one PAGE, as |
1158 | * a one page request is never in violation. | 1158 | * a one page request is never in violation. |
@@ -2044,7 +2044,7 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks) | |||
2044 | static int run(mddev_t *mddev) | 2044 | static int run(mddev_t *mddev) |
2045 | { | 2045 | { |
2046 | conf_t *conf; | 2046 | conf_t *conf; |
2047 | int i, disk_idx; | 2047 | int i, disk_idx, chunk_size; |
2048 | mirror_info_t *disk; | 2048 | mirror_info_t *disk; |
2049 | mdk_rdev_t *rdev; | 2049 | mdk_rdev_t *rdev; |
2050 | int nc, fc, fo; | 2050 | int nc, fc, fo; |
@@ -2130,6 +2130,14 @@ static int run(mddev_t *mddev) | |||
2130 | spin_lock_init(&conf->device_lock); | 2130 | spin_lock_init(&conf->device_lock); |
2131 | mddev->queue->queue_lock = &conf->device_lock; | 2131 | mddev->queue->queue_lock = &conf->device_lock; |
2132 | 2132 | ||
2133 | chunk_size = mddev->chunk_sectors << 9; | ||
2134 | blk_queue_io_min(mddev->queue, chunk_size); | ||
2135 | if (conf->raid_disks % conf->near_copies) | ||
2136 | blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks); | ||
2137 | else | ||
2138 | blk_queue_io_opt(mddev->queue, chunk_size * | ||
2139 | (conf->raid_disks / conf->near_copies)); | ||
2140 | |||
2133 | list_for_each_entry(rdev, &mddev->disks, same_set) { | 2141 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
2134 | disk_idx = rdev->raid_disk; | 2142 | disk_idx = rdev->raid_disk; |
2135 | if (disk_idx >= mddev->raid_disks | 2143 | if (disk_idx >= mddev->raid_disks |
@@ -2138,9 +2146,8 @@ static int run(mddev_t *mddev) | |||
2138 | disk = conf->mirrors + disk_idx; | 2146 | disk = conf->mirrors + disk_idx; |
2139 | 2147 | ||
2140 | disk->rdev = rdev; | 2148 | disk->rdev = rdev; |
2141 | 2149 | disk_stack_limits(mddev->gendisk, rdev->bdev, | |
2142 | blk_queue_stack_limits(mddev->queue, | 2150 | rdev->data_offset << 9); |
2143 | rdev->bdev->bd_disk->queue); | ||
2144 | /* as we don't honour merge_bvec_fn, we must never risk | 2151 | /* as we don't honour merge_bvec_fn, we must never risk |
2145 | * violating it, so limit ->max_sector to one PAGE, as | 2152 | * violating it, so limit ->max_sector to one PAGE, as |
2146 | * a one page request is never in violation. | 2153 | * a one page request is never in violation. |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index f9f991e6e138..37835538b58e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -3699,13 +3699,21 @@ static int make_request(struct request_queue *q, struct bio * bi) | |||
3699 | goto retry; | 3699 | goto retry; |
3700 | } | 3700 | } |
3701 | } | 3701 | } |
3702 | /* FIXME what if we get a false positive because these | 3702 | |
3703 | * are being updated. | 3703 | if (bio_data_dir(bi) == WRITE && |
3704 | */ | 3704 | logical_sector >= mddev->suspend_lo && |
3705 | if (logical_sector >= mddev->suspend_lo && | ||
3706 | logical_sector < mddev->suspend_hi) { | 3705 | logical_sector < mddev->suspend_hi) { |
3707 | release_stripe(sh); | 3706 | release_stripe(sh); |
3708 | schedule(); | 3707 | /* As the suspend_* range is controlled by |
3708 | * userspace, we want an interruptible | ||
3709 | * wait. | ||
3710 | */ | ||
3711 | flush_signals(current); | ||
3712 | prepare_to_wait(&conf->wait_for_overlap, | ||
3713 | &w, TASK_INTERRUPTIBLE); | ||
3714 | if (logical_sector >= mddev->suspend_lo && | ||
3715 | logical_sector < mddev->suspend_hi) | ||
3716 | schedule(); | ||
3709 | goto retry; | 3717 | goto retry; |
3710 | } | 3718 | } |
3711 | 3719 | ||
@@ -4452,7 +4460,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) | |||
4452 | static int run(mddev_t *mddev) | 4460 | static int run(mddev_t *mddev) |
4453 | { | 4461 | { |
4454 | raid5_conf_t *conf; | 4462 | raid5_conf_t *conf; |
4455 | int working_disks = 0; | 4463 | int working_disks = 0, chunk_size; |
4456 | mdk_rdev_t *rdev; | 4464 | mdk_rdev_t *rdev; |
4457 | 4465 | ||
4458 | if (mddev->recovery_cp != MaxSector) | 4466 | if (mddev->recovery_cp != MaxSector) |
@@ -4607,6 +4615,14 @@ static int run(mddev_t *mddev) | |||
4607 | md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); | 4615 | md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); |
4608 | 4616 | ||
4609 | blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); | 4617 | blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); |
4618 | chunk_size = mddev->chunk_sectors << 9; | ||
4619 | blk_queue_io_min(mddev->queue, chunk_size); | ||
4620 | blk_queue_io_opt(mddev->queue, chunk_size * | ||
4621 | (conf->raid_disks - conf->max_degraded)); | ||
4622 | |||
4623 | list_for_each_entry(rdev, &mddev->disks, same_set) | ||
4624 | disk_stack_limits(mddev->gendisk, rdev->bdev, | ||
4625 | rdev->data_offset << 9); | ||
4610 | 4626 | ||
4611 | return 0; | 4627 | return 0; |
4612 | abort: | 4628 | abort: |
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 240608cc7ae9..a461017ce5ce 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c | |||
@@ -1313,6 +1313,12 @@ static int mmc_spi_probe(struct spi_device *spi) | |||
1313 | struct mmc_spi_host *host; | 1313 | struct mmc_spi_host *host; |
1314 | int status; | 1314 | int status; |
1315 | 1315 | ||
1316 | /* We rely on full duplex transfers, mostly to reduce | ||
1317 | * per-transfer overheads (by making fewer transfers). | ||
1318 | */ | ||
1319 | if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) | ||
1320 | return -EINVAL; | ||
1321 | |||
1316 | /* MMC and SD specs only seem to care that sampling is on the | 1322 | /* MMC and SD specs only seem to care that sampling is on the |
1317 | * rising edge ... meaning SPI modes 0 or 3. So either SPI mode | 1323 | * rising edge ... meaning SPI modes 0 or 3. So either SPI mode |
1318 | * should be legit. We'll use mode 0 since the steady state is 0, | 1324 | * should be legit. We'll use mode 0 since the steady state is 0, |
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c index 5011fa73f918..1479da6d3aa6 100644 --- a/drivers/mtd/cmdlinepart.c +++ b/drivers/mtd/cmdlinepart.c | |||
@@ -194,7 +194,7 @@ static struct mtd_partition * newpart(char *s, | |||
194 | parts[this_part].name = extra_mem; | 194 | parts[this_part].name = extra_mem; |
195 | extra_mem += name_len + 1; | 195 | extra_mem += name_len + 1; |
196 | 196 | ||
197 | dbg(("partition %d: name <%s>, offset %x, size %x, mask flags %x\n", | 197 | dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n", |
198 | this_part, | 198 | this_part, |
199 | parts[this_part].name, | 199 | parts[this_part].name, |
200 | parts[this_part].offset, | 200 | parts[this_part].offset, |
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 59c46126a5ce..ae5fe91867e1 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c | |||
@@ -54,7 +54,7 @@ | |||
54 | #define SR_SRWD 0x80 /* SR write protect */ | 54 | #define SR_SRWD 0x80 /* SR write protect */ |
55 | 55 | ||
56 | /* Define max times to check status register before we give up. */ | 56 | /* Define max times to check status register before we give up. */ |
57 | #define MAX_READY_WAIT_JIFFIES (10 * HZ) /* eg. M25P128 specs 6s max sector erase */ | 57 | #define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */ |
58 | #define CMD_SIZE 4 | 58 | #define CMD_SIZE 4 |
59 | 59 | ||
60 | #ifdef CONFIG_M25PXX_USE_FAST_READ | 60 | #ifdef CONFIG_M25PXX_USE_FAST_READ |
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c index 73f05227dc8c..d8cf29c01cc4 100644 --- a/drivers/mtd/inftlcore.c +++ b/drivers/mtd/inftlcore.c | |||
@@ -226,7 +226,7 @@ static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate) | |||
226 | if (!desperate && inftl->numfreeEUNs < 2) { | 226 | if (!desperate && inftl->numfreeEUNs < 2) { |
227 | DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free " | 227 | DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free " |
228 | "EUNs (%d)\n", inftl->numfreeEUNs); | 228 | "EUNs (%d)\n", inftl->numfreeEUNs); |
229 | return 0xffff; | 229 | return BLOCK_NIL; |
230 | } | 230 | } |
231 | 231 | ||
232 | /* Scan for a free block */ | 232 | /* Scan for a free block */ |
@@ -281,7 +281,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned | |||
281 | silly = MAX_LOOPS; | 281 | silly = MAX_LOOPS; |
282 | while (thisEUN < inftl->nb_blocks) { | 282 | while (thisEUN < inftl->nb_blocks) { |
283 | for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) { | 283 | for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) { |
284 | if ((BlockMap[block] != 0xffff) || BlockDeleted[block]) | 284 | if ((BlockMap[block] != BLOCK_NIL) || |
285 | BlockDeleted[block]) | ||
285 | continue; | 286 | continue; |
286 | 287 | ||
287 | if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) | 288 | if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) |
@@ -525,7 +526,7 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block) | |||
525 | if (!silly--) { | 526 | if (!silly--) { |
526 | printk(KERN_WARNING "INFTL: infinite loop in " | 527 | printk(KERN_WARNING "INFTL: infinite loop in " |
527 | "Virtual Unit Chain 0x%x\n", thisVUC); | 528 | "Virtual Unit Chain 0x%x\n", thisVUC); |
528 | return 0xffff; | 529 | return BLOCK_NIL; |
529 | } | 530 | } |
530 | 531 | ||
531 | /* Skip to next block in chain */ | 532 | /* Skip to next block in chain */ |
@@ -549,7 +550,7 @@ hitused: | |||
549 | * waiting to be picked up. We're going to have to fold | 550 | * waiting to be picked up. We're going to have to fold |
550 | * a chain to make room. | 551 | * a chain to make room. |
551 | */ | 552 | */ |
552 | thisEUN = INFTL_makefreeblock(inftl, 0xffff); | 553 | thisEUN = INFTL_makefreeblock(inftl, BLOCK_NIL); |
553 | 554 | ||
554 | /* | 555 | /* |
555 | * Hopefully we free something, lets try again. | 556 | * Hopefully we free something, lets try again. |
@@ -631,7 +632,7 @@ hitused: | |||
631 | 632 | ||
632 | printk(KERN_WARNING "INFTL: error folding to make room for Virtual " | 633 | printk(KERN_WARNING "INFTL: error folding to make room for Virtual " |
633 | "Unit Chain 0x%x\n", thisVUC); | 634 | "Unit Chain 0x%x\n", thisVUC); |
634 | return 0xffff; | 635 | return BLOCK_NIL; |
635 | } | 636 | } |
636 | 637 | ||
637 | /* | 638 | /* |
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c index b08a798ee254..2aac41bde8b3 100644 --- a/drivers/mtd/maps/integrator-flash.c +++ b/drivers/mtd/maps/integrator-flash.c | |||
@@ -42,10 +42,8 @@ | |||
42 | #include <mach/hardware.h> | 42 | #include <mach/hardware.h> |
43 | #include <asm/system.h> | 43 | #include <asm/system.h> |
44 | 44 | ||
45 | #define SUBDEV_NAME_SIZE (BUS_ID_SIZE + 2) | ||
46 | |||
47 | struct armflash_subdev_info { | 45 | struct armflash_subdev_info { |
48 | char name[SUBDEV_NAME_SIZE]; | 46 | char *name; |
49 | struct mtd_info *mtd; | 47 | struct mtd_info *mtd; |
50 | struct map_info map; | 48 | struct map_info map; |
51 | struct flash_platform_data *plat; | 49 | struct flash_platform_data *plat; |
@@ -134,6 +132,8 @@ static void armflash_subdev_remove(struct armflash_subdev_info *subdev) | |||
134 | map_destroy(subdev->mtd); | 132 | map_destroy(subdev->mtd); |
135 | if (subdev->map.virt) | 133 | if (subdev->map.virt) |
136 | iounmap(subdev->map.virt); | 134 | iounmap(subdev->map.virt); |
135 | kfree(subdev->name); | ||
136 | subdev->name = NULL; | ||
137 | release_mem_region(subdev->map.phys, subdev->map.size); | 137 | release_mem_region(subdev->map.phys, subdev->map.size); |
138 | } | 138 | } |
139 | 139 | ||
@@ -177,16 +177,22 @@ static int armflash_probe(struct platform_device *dev) | |||
177 | 177 | ||
178 | if (nr == 1) | 178 | if (nr == 1) |
179 | /* No MTD concatenation, just use the default name */ | 179 | /* No MTD concatenation, just use the default name */ |
180 | snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s", | 180 | subdev->name = kstrdup(dev_name(&dev->dev), GFP_KERNEL); |
181 | dev_name(&dev->dev)); | ||
182 | else | 181 | else |
183 | snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s-%d", | 182 | subdev->name = kasprintf(GFP_KERNEL, "%s-%d", |
184 | dev_name(&dev->dev), i); | 183 | dev_name(&dev->dev), i); |
184 | if (!subdev->name) { | ||
185 | err = -ENOMEM; | ||
186 | break; | ||
187 | } | ||
185 | subdev->plat = plat; | 188 | subdev->plat = plat; |
186 | 189 | ||
187 | err = armflash_subdev_probe(subdev, res); | 190 | err = armflash_subdev_probe(subdev, res); |
188 | if (err) | 191 | if (err) { |
192 | kfree(subdev->name); | ||
193 | subdev->name = NULL; | ||
189 | break; | 194 | break; |
195 | } | ||
190 | } | 196 | } |
191 | info->nr_subdev = i; | 197 | info->nr_subdev = i; |
192 | 198 | ||
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index 2802992b39da..20c828ba9405 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c | |||
@@ -534,7 +534,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
534 | &num_partitions); | 534 | &num_partitions); |
535 | 535 | ||
536 | if ((!partitions) || (num_partitions == 0)) { | 536 | if ((!partitions) || (num_partitions == 0)) { |
537 | printk(KERN_ERR "atmel_nand: No parititions defined, or unsupported device.\n"); | 537 | printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n"); |
538 | res = ENXIO; | 538 | res = ENXIO; |
539 | goto err_no_partitions; | 539 | goto err_no_partitions; |
540 | } | 540 | } |
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 0cd76f89f4b0..ebd07e95b814 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/platform_device.h> | 11 | #include <linux/platform_device.h> |
12 | #include <linux/dma-mapping.h> | 12 | #include <linux/dma-mapping.h> |
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/jiffies.h> | ||
15 | #include <linux/sched.h> | ||
14 | #include <linux/mtd/mtd.h> | 16 | #include <linux/mtd/mtd.h> |
15 | #include <linux/mtd/nand.h> | 17 | #include <linux/mtd/nand.h> |
16 | #include <linux/mtd/partitions.h> | 18 | #include <linux/mtd/partitions.h> |
@@ -541,7 +543,7 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip) | |||
541 | struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, | 543 | struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, |
542 | mtd); | 544 | mtd); |
543 | unsigned long timeo = jiffies; | 545 | unsigned long timeo = jiffies; |
544 | int status, state = this->state; | 546 | int status = NAND_STATUS_FAIL, state = this->state; |
545 | 547 | ||
546 | if (state == FL_ERASING) | 548 | if (state == FL_ERASING) |
547 | timeo += (HZ * 400) / 1000; | 549 | timeo += (HZ * 400) / 1000; |
@@ -556,8 +558,9 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip) | |||
556 | 558 | ||
557 | while (time_before(jiffies, timeo)) { | 559 | while (time_before(jiffies, timeo)) { |
558 | status = __raw_readb(this->IO_ADDR_R); | 560 | status = __raw_readb(this->IO_ADDR_R); |
559 | if (!(status & 0x40)) | 561 | if (status & NAND_STATUS_READY) |
560 | break; | 562 | break; |
563 | cond_resched(); | ||
561 | } | 564 | } |
562 | return status; | 565 | return status; |
563 | } | 566 | } |
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c index e3f8495a94c2..fb86cacd5bdb 100644 --- a/drivers/mtd/nftlcore.c +++ b/drivers/mtd/nftlcore.c | |||
@@ -208,7 +208,7 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate ) | |||
208 | /* Normally, we force a fold to happen before we run out of free blocks completely */ | 208 | /* Normally, we force a fold to happen before we run out of free blocks completely */ |
209 | if (!desperate && nftl->numfreeEUNs < 2) { | 209 | if (!desperate && nftl->numfreeEUNs < 2) { |
210 | DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n"); | 210 | DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n"); |
211 | return 0xffff; | 211 | return BLOCK_NIL; |
212 | } | 212 | } |
213 | 213 | ||
214 | /* Scan for a free block */ | 214 | /* Scan for a free block */ |
@@ -230,11 +230,11 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate ) | |||
230 | printk("Argh! No free blocks found! LastFreeEUN = %d, " | 230 | printk("Argh! No free blocks found! LastFreeEUN = %d, " |
231 | "FirstEUN = %d\n", nftl->LastFreeEUN, | 231 | "FirstEUN = %d\n", nftl->LastFreeEUN, |
232 | le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN)); | 232 | le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN)); |
233 | return 0xffff; | 233 | return BLOCK_NIL; |
234 | } | 234 | } |
235 | } while (pot != nftl->LastFreeEUN); | 235 | } while (pot != nftl->LastFreeEUN); |
236 | 236 | ||
237 | return 0xffff; | 237 | return BLOCK_NIL; |
238 | } | 238 | } |
239 | 239 | ||
240 | static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock ) | 240 | static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock ) |
@@ -431,7 +431,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p | |||
431 | 431 | ||
432 | /* add the header so that it is now a valid chain */ | 432 | /* add the header so that it is now a valid chain */ |
433 | oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC); | 433 | oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC); |
434 | oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = 0xffff; | 434 | oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = BLOCK_NIL; |
435 | 435 | ||
436 | nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 8, | 436 | nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 8, |
437 | 8, &retlen, (char *)&oob.u); | 437 | 8, &retlen, (char *)&oob.u); |
@@ -515,7 +515,7 @@ static u16 NFTL_makefreeblock( struct NFTLrecord *nftl , unsigned pendingblock) | |||
515 | if (ChainLength < 2) { | 515 | if (ChainLength < 2) { |
516 | printk(KERN_WARNING "No Virtual Unit Chains available for folding. " | 516 | printk(KERN_WARNING "No Virtual Unit Chains available for folding. " |
517 | "Failing request\n"); | 517 | "Failing request\n"); |
518 | return 0xffff; | 518 | return BLOCK_NIL; |
519 | } | 519 | } |
520 | 520 | ||
521 | return NFTL_foldchain (nftl, LongestChain, pendingblock); | 521 | return NFTL_foldchain (nftl, LongestChain, pendingblock); |
@@ -578,7 +578,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block) | |||
578 | printk(KERN_WARNING | 578 | printk(KERN_WARNING |
579 | "Infinite loop in Virtual Unit Chain 0x%x\n", | 579 | "Infinite loop in Virtual Unit Chain 0x%x\n", |
580 | thisVUC); | 580 | thisVUC); |
581 | return 0xffff; | 581 | return BLOCK_NIL; |
582 | } | 582 | } |
583 | 583 | ||
584 | /* Skip to next block in chain */ | 584 | /* Skip to next block in chain */ |
@@ -601,7 +601,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block) | |||
601 | //u16 startEUN = nftl->EUNtable[thisVUC]; | 601 | //u16 startEUN = nftl->EUNtable[thisVUC]; |
602 | 602 | ||
603 | //printk("Write to VirtualUnitChain %d, calling makefreeblock()\n", thisVUC); | 603 | //printk("Write to VirtualUnitChain %d, calling makefreeblock()\n", thisVUC); |
604 | writeEUN = NFTL_makefreeblock(nftl, 0xffff); | 604 | writeEUN = NFTL_makefreeblock(nftl, BLOCK_NIL); |
605 | 605 | ||
606 | if (writeEUN == BLOCK_NIL) { | 606 | if (writeEUN == BLOCK_NIL) { |
607 | /* OK, we accept that the above comment is | 607 | /* OK, we accept that the above comment is |
@@ -673,7 +673,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block) | |||
673 | 673 | ||
674 | printk(KERN_WARNING "Error folding to make room for Virtual Unit Chain 0x%x\n", | 674 | printk(KERN_WARNING "Error folding to make room for Virtual Unit Chain 0x%x\n", |
675 | thisVUC); | 675 | thisVUC); |
676 | return 0xffff; | 676 | return BLOCK_NIL; |
677 | } | 677 | } |
678 | 678 | ||
679 | static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block, | 679 | static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block, |
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c index e4afbd628c23..607007d75b6f 100644 --- a/drivers/net/atl1c/atl1c_ethtool.c +++ b/drivers/net/atl1c/atl1c_ethtool.c | |||
@@ -281,6 +281,8 @@ static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
281 | if (wol->wolopts & WAKE_PHY) | 281 | if (wol->wolopts & WAKE_PHY) |
282 | adapter->wol |= AT_WUFC_LNKC; | 282 | adapter->wol |= AT_WUFC_LNKC; |
283 | 283 | ||
284 | device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); | ||
285 | |||
284 | return 0; | 286 | return 0; |
285 | } | 287 | } |
286 | 288 | ||
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c index 619c6583e1aa..4003955d7a96 100644 --- a/drivers/net/atl1e/atl1e_ethtool.c +++ b/drivers/net/atl1e/atl1e_ethtool.c | |||
@@ -365,6 +365,8 @@ static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
365 | if (wol->wolopts & WAKE_PHY) | 365 | if (wol->wolopts & WAKE_PHY) |
366 | adapter->wol |= AT_WUFC_LNKC; | 366 | adapter->wol |= AT_WUFC_LNKC; |
367 | 367 | ||
368 | device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); | ||
369 | |||
368 | return 0; | 370 | return 0; |
369 | } | 371 | } |
370 | 372 | ||
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index f703758f0a6e..5b4bf3d2cdc2 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h | |||
@@ -73,7 +73,7 @@ static inline char *nic_name(struct pci_dev *pdev) | |||
73 | #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) | 73 | #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) |
74 | 74 | ||
75 | #define BE_MAX_LRO_DESCRIPTORS 16 | 75 | #define BE_MAX_LRO_DESCRIPTORS 16 |
76 | #define BE_MAX_FRAGS_PER_FRAME 16 | 76 | #define BE_MAX_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS)) |
77 | 77 | ||
78 | struct be_dma_mem { | 78 | struct be_dma_mem { |
79 | void *va; | 79 | void *va; |
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index 9592f22e4c8c..cccc5419ad72 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c | |||
@@ -162,8 +162,8 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) | |||
162 | return -EINVAL; | 162 | return -EINVAL; |
163 | 163 | ||
164 | adapter->max_rx_coal = coalesce->rx_max_coalesced_frames; | 164 | adapter->max_rx_coal = coalesce->rx_max_coalesced_frames; |
165 | if (adapter->max_rx_coal > MAX_SKB_FRAGS) | 165 | if (adapter->max_rx_coal > BE_MAX_FRAGS_PER_FRAME) |
166 | adapter->max_rx_coal = MAX_SKB_FRAGS - 1; | 166 | adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME; |
167 | 167 | ||
168 | /* if AIC is being turned on now, start with an EQD of 0 */ | 168 | /* if AIC is being turned on now, start with an EQD of 0 */ |
169 | if (rx_eq->enable_aic == 0 && | 169 | if (rx_eq->enable_aic == 0 && |
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 66c10c87f517..308eb09ca56b 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
@@ -666,7 +666,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, | |||
666 | { | 666 | { |
667 | struct be_queue_info *rxq = &adapter->rx_obj.q; | 667 | struct be_queue_info *rxq = &adapter->rx_obj.q; |
668 | struct be_rx_page_info *page_info; | 668 | struct be_rx_page_info *page_info; |
669 | u16 rxq_idx, i, num_rcvd; | 669 | u16 rxq_idx, i, num_rcvd, j; |
670 | u32 pktsize, hdr_len, curr_frag_len; | 670 | u32 pktsize, hdr_len, curr_frag_len; |
671 | u8 *start; | 671 | u8 *start; |
672 | 672 | ||
@@ -709,22 +709,33 @@ static void skb_fill_rx_data(struct be_adapter *adapter, | |||
709 | 709 | ||
710 | /* More frags present for this completion */ | 710 | /* More frags present for this completion */ |
711 | pktsize -= curr_frag_len; /* account for above copied frag */ | 711 | pktsize -= curr_frag_len; /* account for above copied frag */ |
712 | for (i = 1; i < num_rcvd; i++) { | 712 | for (i = 1, j = 0; i < num_rcvd; i++) { |
713 | index_inc(&rxq_idx, rxq->len); | 713 | index_inc(&rxq_idx, rxq->len); |
714 | page_info = get_rx_page_info(adapter, rxq_idx); | 714 | page_info = get_rx_page_info(adapter, rxq_idx); |
715 | 715 | ||
716 | curr_frag_len = min(pktsize, rx_frag_size); | 716 | curr_frag_len = min(pktsize, rx_frag_size); |
717 | 717 | ||
718 | skb_shinfo(skb)->frags[i].page = page_info->page; | 718 | /* Coalesce all frags from the same physical page in one slot */ |
719 | skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset; | 719 | if (page_info->page_offset == 0) { |
720 | skb_shinfo(skb)->frags[i].size = curr_frag_len; | 720 | /* Fresh page */ |
721 | j++; | ||
722 | skb_shinfo(skb)->frags[j].page = page_info->page; | ||
723 | skb_shinfo(skb)->frags[j].page_offset = | ||
724 | page_info->page_offset; | ||
725 | skb_shinfo(skb)->frags[j].size = 0; | ||
726 | skb_shinfo(skb)->nr_frags++; | ||
727 | } else { | ||
728 | put_page(page_info->page); | ||
729 | } | ||
730 | |||
731 | skb_shinfo(skb)->frags[j].size += curr_frag_len; | ||
721 | skb->len += curr_frag_len; | 732 | skb->len += curr_frag_len; |
722 | skb->data_len += curr_frag_len; | 733 | skb->data_len += curr_frag_len; |
723 | skb_shinfo(skb)->nr_frags++; | ||
724 | pktsize -= curr_frag_len; | 734 | pktsize -= curr_frag_len; |
725 | 735 | ||
726 | memset(page_info, 0, sizeof(*page_info)); | 736 | memset(page_info, 0, sizeof(*page_info)); |
727 | } | 737 | } |
738 | BUG_ON(j > MAX_SKB_FRAGS); | ||
728 | 739 | ||
729 | done: | 740 | done: |
730 | be_rx_stats_update(adapter, pktsize, num_rcvd); | 741 | be_rx_stats_update(adapter, pktsize, num_rcvd); |
@@ -786,7 +797,7 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter, | |||
786 | struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME]; | 797 | struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME]; |
787 | struct be_queue_info *rxq = &adapter->rx_obj.q; | 798 | struct be_queue_info *rxq = &adapter->rx_obj.q; |
788 | u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; | 799 | u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; |
789 | u16 i, rxq_idx = 0, vid; | 800 | u16 i, rxq_idx = 0, vid, j; |
790 | 801 | ||
791 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); | 802 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); |
792 | pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); | 803 | pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); |
@@ -794,20 +805,28 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter, | |||
794 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | 805 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); |
795 | 806 | ||
796 | remaining = pkt_size; | 807 | remaining = pkt_size; |
797 | for (i = 0; i < num_rcvd; i++) { | 808 | for (i = 0, j = -1; i < num_rcvd; i++) { |
798 | page_info = get_rx_page_info(adapter, rxq_idx); | 809 | page_info = get_rx_page_info(adapter, rxq_idx); |
799 | 810 | ||
800 | curr_frag_len = min(remaining, rx_frag_size); | 811 | curr_frag_len = min(remaining, rx_frag_size); |
801 | 812 | ||
802 | rx_frags[i].page = page_info->page; | 813 | /* Coalesce all frags from the same physical page in one slot */ |
803 | rx_frags[i].page_offset = page_info->page_offset; | 814 | if (i == 0 || page_info->page_offset == 0) { |
804 | rx_frags[i].size = curr_frag_len; | 815 | /* First frag or Fresh page */ |
805 | remaining -= curr_frag_len; | 816 | j++; |
817 | rx_frags[j].page = page_info->page; | ||
818 | rx_frags[j].page_offset = page_info->page_offset; | ||
819 | rx_frags[j].size = 0; | ||
820 | } else { | ||
821 | put_page(page_info->page); | ||
822 | } | ||
823 | rx_frags[j].size += curr_frag_len; | ||
806 | 824 | ||
825 | remaining -= curr_frag_len; | ||
807 | index_inc(&rxq_idx, rxq->len); | 826 | index_inc(&rxq_idx, rxq->len); |
808 | |||
809 | memset(page_info, 0, sizeof(*page_info)); | 827 | memset(page_info, 0, sizeof(*page_info)); |
810 | } | 828 | } |
829 | BUG_ON(j > MAX_SKB_FRAGS); | ||
811 | 830 | ||
812 | if (likely(!vlanf)) { | 831 | if (likely(!vlanf)) { |
813 | lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size, | 832 | lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size, |
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index fbf1352e9c1c..951714a7f90a 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -8637,6 +8637,14 @@ static int bnx2x_nway_reset(struct net_device *dev) | |||
8637 | return 0; | 8637 | return 0; |
8638 | } | 8638 | } |
8639 | 8639 | ||
8640 | static u32 | ||
8641 | bnx2x_get_link(struct net_device *dev) | ||
8642 | { | ||
8643 | struct bnx2x *bp = netdev_priv(dev); | ||
8644 | |||
8645 | return bp->link_vars.link_up; | ||
8646 | } | ||
8647 | |||
8640 | static int bnx2x_get_eeprom_len(struct net_device *dev) | 8648 | static int bnx2x_get_eeprom_len(struct net_device *dev) |
8641 | { | 8649 | { |
8642 | struct bnx2x *bp = netdev_priv(dev); | 8650 | struct bnx2x *bp = netdev_priv(dev); |
@@ -10034,7 +10042,7 @@ static struct ethtool_ops bnx2x_ethtool_ops = { | |||
10034 | .get_msglevel = bnx2x_get_msglevel, | 10042 | .get_msglevel = bnx2x_get_msglevel, |
10035 | .set_msglevel = bnx2x_set_msglevel, | 10043 | .set_msglevel = bnx2x_set_msglevel, |
10036 | .nway_reset = bnx2x_nway_reset, | 10044 | .nway_reset = bnx2x_nway_reset, |
10037 | .get_link = ethtool_op_get_link, | 10045 | .get_link = bnx2x_get_link, |
10038 | .get_eeprom_len = bnx2x_get_eeprom_len, | 10046 | .get_eeprom_len = bnx2x_get_eeprom_len, |
10039 | .get_eeprom = bnx2x_get_eeprom, | 10047 | .get_eeprom = bnx2x_get_eeprom, |
10040 | .set_eeprom = bnx2x_set_eeprom, | 10048 | .set_eeprom = bnx2x_set_eeprom, |
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index 58afafbd3b9c..fd5e32cbcb87 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c | |||
@@ -1097,7 +1097,7 @@ static const struct net_device_ops cpmac_netdev_ops = { | |||
1097 | .ndo_start_xmit = cpmac_start_xmit, | 1097 | .ndo_start_xmit = cpmac_start_xmit, |
1098 | .ndo_tx_timeout = cpmac_tx_timeout, | 1098 | .ndo_tx_timeout = cpmac_tx_timeout, |
1099 | .ndo_set_multicast_list = cpmac_set_multicast_list, | 1099 | .ndo_set_multicast_list = cpmac_set_multicast_list, |
1100 | .ndo_so_ioctl = cpmac_ioctl, | 1100 | .ndo_do_ioctl = cpmac_ioctl, |
1101 | .ndo_set_config = cpmac_config, | 1101 | .ndo_set_config = cpmac_config, |
1102 | .ndo_change_mtu = eth_change_mtu, | 1102 | .ndo_change_mtu = eth_change_mtu, |
1103 | .ndo_validate_addr = eth_validate_addr, | 1103 | .ndo_validate_addr = eth_validate_addr, |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 5e3356f8eb5a..5b8cbdb4b520 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -2185,12 +2185,16 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter, | |||
2185 | /* Free all the Rx ring sk_buffs */ | 2185 | /* Free all the Rx ring sk_buffs */ |
2186 | for (i = 0; i < rx_ring->count; i++) { | 2186 | for (i = 0; i < rx_ring->count; i++) { |
2187 | buffer_info = &rx_ring->buffer_info[i]; | 2187 | buffer_info = &rx_ring->buffer_info[i]; |
2188 | if (buffer_info->skb) { | 2188 | if (buffer_info->dma) { |
2189 | pci_unmap_single(pdev, | 2189 | pci_unmap_single(pdev, |
2190 | buffer_info->dma, | 2190 | buffer_info->dma, |
2191 | buffer_info->length, | 2191 | buffer_info->length, |
2192 | PCI_DMA_FROMDEVICE); | 2192 | PCI_DMA_FROMDEVICE); |
2193 | } | ||
2194 | |||
2195 | buffer_info->dma = 0; | ||
2193 | 2196 | ||
2197 | if (buffer_info->skb) { | ||
2194 | dev_kfree_skb(buffer_info->skb); | 2198 | dev_kfree_skb(buffer_info->skb); |
2195 | buffer_info->skb = NULL; | 2199 | buffer_info->skb = NULL; |
2196 | } | 2200 | } |
@@ -4033,6 +4037,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
4033 | buffer_info->dma, | 4037 | buffer_info->dma, |
4034 | buffer_info->length, | 4038 | buffer_info->length, |
4035 | PCI_DMA_FROMDEVICE); | 4039 | PCI_DMA_FROMDEVICE); |
4040 | buffer_info->dma = 0; | ||
4036 | 4041 | ||
4037 | length = le16_to_cpu(rx_desc->length); | 4042 | length = le16_to_cpu(rx_desc->length); |
4038 | /* !EOP means multiple descriptors were used to store a single | 4043 | /* !EOP means multiple descriptors were used to store a single |
@@ -4222,6 +4227,7 @@ map_skb: | |||
4222 | pci_unmap_single(pdev, buffer_info->dma, | 4227 | pci_unmap_single(pdev, buffer_info->dma, |
4223 | adapter->rx_buffer_len, | 4228 | adapter->rx_buffer_len, |
4224 | PCI_DMA_FROMDEVICE); | 4229 | PCI_DMA_FROMDEVICE); |
4230 | buffer_info->dma = 0; | ||
4225 | 4231 | ||
4226 | break; /* while !buffer_info->skb */ | 4232 | break; /* while !buffer_info->skb */ |
4227 | } | 4233 | } |
@@ -4817,6 +4823,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, | |||
4817 | 4823 | ||
4818 | netif_device_detach(netdev); | 4824 | netif_device_detach(netdev); |
4819 | 4825 | ||
4826 | if (state == pci_channel_io_perm_failure) | ||
4827 | return PCI_ERS_RESULT_DISCONNECT; | ||
4828 | |||
4820 | if (netif_running(netdev)) | 4829 | if (netif_running(netdev)) |
4821 | e1000_down(adapter); | 4830 | e1000_down(adapter); |
4822 | pci_disable_device(pdev); | 4831 | pci_disable_device(pdev); |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 679885a122b4..63415bb6f48f 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -4785,6 +4785,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, | |||
4785 | 4785 | ||
4786 | netif_device_detach(netdev); | 4786 | netif_device_detach(netdev); |
4787 | 4787 | ||
4788 | if (state == pci_channel_io_perm_failure) | ||
4789 | return PCI_ERS_RESULT_DISCONNECT; | ||
4790 | |||
4788 | if (netif_running(netdev)) | 4791 | if (netif_running(netdev)) |
4789 | e1000e_down(adapter); | 4792 | e1000e_down(adapter); |
4790 | pci_disable_device(pdev); | 4793 | pci_disable_device(pdev); |
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c index 3af581303ca2..d167090248e2 100644 --- a/drivers/net/fsl_pq_mdio.c +++ b/drivers/net/fsl_pq_mdio.c | |||
@@ -188,7 +188,7 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus) | |||
188 | } | 188 | } |
189 | 189 | ||
190 | 190 | ||
191 | #ifdef CONFIG_GIANFAR | 191 | #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) |
192 | static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs) | 192 | static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs) |
193 | { | 193 | { |
194 | struct gfar __iomem *enet_regs; | 194 | struct gfar __iomem *enet_regs; |
@@ -206,7 +206,7 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs) | |||
206 | #endif | 206 | #endif |
207 | 207 | ||
208 | 208 | ||
209 | #ifdef CONFIG_UCC_GETH | 209 | #if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) |
210 | static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id) | 210 | static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id) |
211 | { | 211 | { |
212 | struct device_node *np = NULL; | 212 | struct device_node *np = NULL; |
@@ -291,7 +291,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev, | |||
291 | if (of_device_is_compatible(np, "fsl,gianfar-mdio") || | 291 | if (of_device_is_compatible(np, "fsl,gianfar-mdio") || |
292 | of_device_is_compatible(np, "fsl,gianfar-tbi") || | 292 | of_device_is_compatible(np, "fsl,gianfar-tbi") || |
293 | of_device_is_compatible(np, "gianfar")) { | 293 | of_device_is_compatible(np, "gianfar")) { |
294 | #ifdef CONFIG_GIANFAR | 294 | #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) |
295 | tbipa = get_gfar_tbipa(regs); | 295 | tbipa = get_gfar_tbipa(regs); |
296 | #else | 296 | #else |
297 | err = -ENODEV; | 297 | err = -ENODEV; |
@@ -299,7 +299,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev, | |||
299 | #endif | 299 | #endif |
300 | } else if (of_device_is_compatible(np, "fsl,ucc-mdio") || | 300 | } else if (of_device_is_compatible(np, "fsl,ucc-mdio") || |
301 | of_device_is_compatible(np, "ucc_geth_phy")) { | 301 | of_device_is_compatible(np, "ucc_geth_phy")) { |
302 | #ifdef CONFIG_UCC_GETH | 302 | #if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) |
303 | u32 id; | 303 | u32 id; |
304 | static u32 mii_mng_master; | 304 | static u32 mii_mng_master; |
305 | 305 | ||
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index ea17319624aa..be480292aba1 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -4549,11 +4549,12 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, | |||
4549 | cleaned = true; | 4549 | cleaned = true; |
4550 | cleaned_count++; | 4550 | cleaned_count++; |
4551 | 4551 | ||
4552 | /* this is the fast path for the non-packet split case */ | ||
4552 | if (!adapter->rx_ps_hdr_size) { | 4553 | if (!adapter->rx_ps_hdr_size) { |
4553 | pci_unmap_single(pdev, buffer_info->dma, | 4554 | pci_unmap_single(pdev, buffer_info->dma, |
4554 | adapter->rx_buffer_len + | 4555 | adapter->rx_buffer_len, |
4555 | NET_IP_ALIGN, | ||
4556 | PCI_DMA_FROMDEVICE); | 4556 | PCI_DMA_FROMDEVICE); |
4557 | buffer_info->dma = 0; | ||
4557 | skb_put(skb, length); | 4558 | skb_put(skb, length); |
4558 | goto send_up; | 4559 | goto send_up; |
4559 | } | 4560 | } |
@@ -4570,8 +4571,9 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, | |||
4570 | 4571 | ||
4571 | if (!skb_shinfo(skb)->nr_frags) { | 4572 | if (!skb_shinfo(skb)->nr_frags) { |
4572 | pci_unmap_single(pdev, buffer_info->dma, | 4573 | pci_unmap_single(pdev, buffer_info->dma, |
4573 | adapter->rx_ps_hdr_size + NET_IP_ALIGN, | 4574 | adapter->rx_ps_hdr_size, |
4574 | PCI_DMA_FROMDEVICE); | 4575 | PCI_DMA_FROMDEVICE); |
4576 | buffer_info->dma = 0; | ||
4575 | skb_put(skb, hlen); | 4577 | skb_put(skb, hlen); |
4576 | } | 4578 | } |
4577 | 4579 | ||
@@ -4713,7 +4715,6 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, | |||
4713 | bufsz = adapter->rx_ps_hdr_size; | 4715 | bufsz = adapter->rx_ps_hdr_size; |
4714 | else | 4716 | else |
4715 | bufsz = adapter->rx_buffer_len; | 4717 | bufsz = adapter->rx_buffer_len; |
4716 | bufsz += NET_IP_ALIGN; | ||
4717 | 4718 | ||
4718 | while (cleaned_count--) { | 4719 | while (cleaned_count--) { |
4719 | rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); | 4720 | rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); |
@@ -4737,7 +4738,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, | |||
4737 | } | 4738 | } |
4738 | 4739 | ||
4739 | if (!buffer_info->skb) { | 4740 | if (!buffer_info->skb) { |
4740 | skb = netdev_alloc_skb(netdev, bufsz); | 4741 | skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN); |
4741 | if (!skb) { | 4742 | if (!skb) { |
4742 | adapter->alloc_rx_buff_failed++; | 4743 | adapter->alloc_rx_buff_failed++; |
4743 | goto no_buffers; | 4744 | goto no_buffers; |
@@ -5338,6 +5339,9 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, | |||
5338 | 5339 | ||
5339 | netif_device_detach(netdev); | 5340 | netif_device_detach(netdev); |
5340 | 5341 | ||
5342 | if (state == pci_channel_io_perm_failure) | ||
5343 | return PCI_ERS_RESULT_DISCONNECT; | ||
5344 | |||
5341 | if (netif_running(netdev)) | 5345 | if (netif_running(netdev)) |
5342 | igb_down(adapter); | 5346 | igb_down(adapter); |
5343 | pci_disable_device(pdev); | 5347 | pci_disable_device(pdev); |
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c index f3eed6a8fba5..911c082cee5a 100644 --- a/drivers/net/irda/bfin_sir.c +++ b/drivers/net/irda/bfin_sir.c | |||
@@ -677,6 +677,14 @@ static int bfin_sir_init_iobuf(iobuff_t *io, int size) | |||
677 | return 0; | 677 | return 0; |
678 | } | 678 | } |
679 | 679 | ||
680 | static const struct net_device_ops bfin_sir_ndo = { | ||
681 | .ndo_open = bfin_sir_open, | ||
682 | .ndo_stop = bfin_sir_stop, | ||
683 | .ndo_start_xmit = bfin_sir_hard_xmit, | ||
684 | .ndo_do_ioctl = bfin_sir_ioctl, | ||
685 | .ndo_get_stats = bfin_sir_stats, | ||
686 | }; | ||
687 | |||
680 | static int __devinit bfin_sir_probe(struct platform_device *pdev) | 688 | static int __devinit bfin_sir_probe(struct platform_device *pdev) |
681 | { | 689 | { |
682 | struct net_device *dev; | 690 | struct net_device *dev; |
@@ -718,12 +726,8 @@ static int __devinit bfin_sir_probe(struct platform_device *pdev) | |||
718 | if (err) | 726 | if (err) |
719 | goto err_mem_3; | 727 | goto err_mem_3; |
720 | 728 | ||
721 | dev->hard_start_xmit = bfin_sir_hard_xmit; | 729 | dev->netdev_ops = &bfin_sir_ndo; |
722 | dev->open = bfin_sir_open; | 730 | dev->irq = sir_port->irq; |
723 | dev->stop = bfin_sir_stop; | ||
724 | dev->do_ioctl = bfin_sir_ioctl; | ||
725 | dev->get_stats = bfin_sir_stats; | ||
726 | dev->irq = sir_port->irq; | ||
727 | 731 | ||
728 | irda_init_max_qos_capabilies(&self->qos); | 732 | irda_init_max_qos_capabilies(&self->qos); |
729 | 733 | ||
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 86f4f3e36f27..0f7b6a3a2e68 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -139,7 +139,7 @@ static int ixgbe_get_settings(struct net_device *netdev, | |||
139 | ecmd->autoneg = AUTONEG_ENABLE; | 139 | ecmd->autoneg = AUTONEG_ENABLE; |
140 | ecmd->transceiver = XCVR_EXTERNAL; | 140 | ecmd->transceiver = XCVR_EXTERNAL; |
141 | if ((hw->phy.media_type == ixgbe_media_type_copper) || | 141 | if ((hw->phy.media_type == ixgbe_media_type_copper) || |
142 | (hw->mac.type == ixgbe_mac_82599EB)) { | 142 | (hw->phy.multispeed_fiber)) { |
143 | ecmd->supported |= (SUPPORTED_1000baseT_Full | | 143 | ecmd->supported |= (SUPPORTED_1000baseT_Full | |
144 | SUPPORTED_Autoneg); | 144 | SUPPORTED_Autoneg); |
145 | 145 | ||
@@ -217,7 +217,7 @@ static int ixgbe_set_settings(struct net_device *netdev, | |||
217 | s32 err = 0; | 217 | s32 err = 0; |
218 | 218 | ||
219 | if ((hw->phy.media_type == ixgbe_media_type_copper) || | 219 | if ((hw->phy.media_type == ixgbe_media_type_copper) || |
220 | (hw->mac.type == ixgbe_mac_82599EB)) { | 220 | (hw->phy.multispeed_fiber)) { |
221 | /* 10000/copper and 1000/copper must autoneg | 221 | /* 10000/copper and 1000/copper must autoneg |
222 | * this function does not support any duplex forcing, but can | 222 | * this function does not support any duplex forcing, but can |
223 | * limit the advertising of the adapter to only 10000 or 1000 */ | 223 | * limit the advertising of the adapter to only 10000 or 1000 */ |
@@ -245,6 +245,7 @@ static int ixgbe_set_settings(struct net_device *netdev, | |||
245 | } else { | 245 | } else { |
246 | /* in this case we currently only support 10Gb/FULL */ | 246 | /* in this case we currently only support 10Gb/FULL */ |
247 | if ((ecmd->autoneg == AUTONEG_ENABLE) || | 247 | if ((ecmd->autoneg == AUTONEG_ENABLE) || |
248 | (ecmd->advertising != ADVERTISED_10000baseT_Full) || | ||
248 | (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) | 249 | (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) |
249 | return -EINVAL; | 250 | return -EINVAL; |
250 | } | 251 | } |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index e756e220db32..5588ef493a3d 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -563,7 +563,6 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | |||
563 | union ixgbe_adv_rx_desc *rx_desc; | 563 | union ixgbe_adv_rx_desc *rx_desc; |
564 | struct ixgbe_rx_buffer *bi; | 564 | struct ixgbe_rx_buffer *bi; |
565 | unsigned int i; | 565 | unsigned int i; |
566 | unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN; | ||
567 | 566 | ||
568 | i = rx_ring->next_to_use; | 567 | i = rx_ring->next_to_use; |
569 | bi = &rx_ring->rx_buffer_info[i]; | 568 | bi = &rx_ring->rx_buffer_info[i]; |
@@ -593,7 +592,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | |||
593 | 592 | ||
594 | if (!bi->skb) { | 593 | if (!bi->skb) { |
595 | struct sk_buff *skb; | 594 | struct sk_buff *skb; |
596 | skb = netdev_alloc_skb(adapter->netdev, bufsz); | 595 | skb = netdev_alloc_skb(adapter->netdev, |
596 | (rx_ring->rx_buf_len + | ||
597 | NET_IP_ALIGN)); | ||
597 | 598 | ||
598 | if (!skb) { | 599 | if (!skb) { |
599 | adapter->alloc_rx_buff_failed++; | 600 | adapter->alloc_rx_buff_failed++; |
@@ -608,7 +609,8 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | |||
608 | skb_reserve(skb, NET_IP_ALIGN); | 609 | skb_reserve(skb, NET_IP_ALIGN); |
609 | 610 | ||
610 | bi->skb = skb; | 611 | bi->skb = skb; |
611 | bi->dma = pci_map_single(pdev, skb->data, bufsz, | 612 | bi->dma = pci_map_single(pdev, skb->data, |
613 | rx_ring->rx_buf_len, | ||
612 | PCI_DMA_FROMDEVICE); | 614 | PCI_DMA_FROMDEVICE); |
613 | } | 615 | } |
614 | /* Refresh the desc even if buffer_addrs didn't change because | 616 | /* Refresh the desc even if buffer_addrs didn't change because |
@@ -732,6 +734,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
732 | pci_unmap_single(pdev, rx_buffer_info->dma, | 734 | pci_unmap_single(pdev, rx_buffer_info->dma, |
733 | rx_ring->rx_buf_len, | 735 | rx_ring->rx_buf_len, |
734 | PCI_DMA_FROMDEVICE); | 736 | PCI_DMA_FROMDEVICE); |
737 | rx_buffer_info->dma = 0; | ||
735 | skb_put(skb, len); | 738 | skb_put(skb, len); |
736 | } | 739 | } |
737 | 740 | ||
@@ -2701,7 +2704,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2701 | */ | 2704 | */ |
2702 | err = hw->phy.ops.identify(hw); | 2705 | err = hw->phy.ops.identify(hw); |
2703 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | 2706 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { |
2704 | DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err); | 2707 | dev_err(&adapter->pdev->dev, "failed to initialize because " |
2708 | "an unsupported SFP+ module type was detected.\n" | ||
2709 | "Reload the driver after installing a supported " | ||
2710 | "module.\n"); | ||
2705 | ixgbe_down(adapter); | 2711 | ixgbe_down(adapter); |
2706 | return err; | 2712 | return err; |
2707 | } | 2713 | } |
@@ -2812,9 +2818,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | |||
2812 | } | 2818 | } |
2813 | if (!rx_buffer_info->page) | 2819 | if (!rx_buffer_info->page) |
2814 | continue; | 2820 | continue; |
2815 | pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2, | 2821 | if (rx_buffer_info->page_dma) { |
2816 | PCI_DMA_FROMDEVICE); | 2822 | pci_unmap_page(pdev, rx_buffer_info->page_dma, |
2817 | rx_buffer_info->page_dma = 0; | 2823 | PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); |
2824 | rx_buffer_info->page_dma = 0; | ||
2825 | } | ||
2818 | put_page(rx_buffer_info->page); | 2826 | put_page(rx_buffer_info->page); |
2819 | rx_buffer_info->page = NULL; | 2827 | rx_buffer_info->page = NULL; |
2820 | rx_buffer_info->page_offset = 0; | 2828 | rx_buffer_info->page_offset = 0; |
@@ -3720,10 +3728,11 @@ static void ixgbe_sfp_task(struct work_struct *work) | |||
3720 | goto reschedule; | 3728 | goto reschedule; |
3721 | ret = hw->phy.ops.reset(hw); | 3729 | ret = hw->phy.ops.reset(hw); |
3722 | if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { | 3730 | if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { |
3723 | DPRINTK(PROBE, ERR, "failed to initialize because an " | 3731 | dev_err(&adapter->pdev->dev, "failed to initialize " |
3724 | "unsupported SFP+ module type was detected.\n" | 3732 | "because an unsupported SFP+ module type " |
3725 | "Reload the driver after installing a " | 3733 | "was detected.\n" |
3726 | "supported module.\n"); | 3734 | "Reload the driver after installing a " |
3735 | "supported module.\n"); | ||
3727 | unregister_netdev(adapter->netdev); | 3736 | unregister_netdev(adapter->netdev); |
3728 | } else { | 3737 | } else { |
3729 | DPRINTK(PROBE, INFO, "detected SFP+: %d\n", | 3738 | DPRINTK(PROBE, INFO, "detected SFP+: %d\n", |
@@ -4502,7 +4511,8 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work) | |||
4502 | u32 autoneg; | 4511 | u32 autoneg; |
4503 | 4512 | ||
4504 | adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK; | 4513 | adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK; |
4505 | if (hw->mac.ops.get_link_capabilities) | 4514 | autoneg = hw->phy.autoneg_advertised; |
4515 | if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) | ||
4506 | hw->mac.ops.get_link_capabilities(hw, &autoneg, | 4516 | hw->mac.ops.get_link_capabilities(hw, &autoneg, |
4507 | &hw->mac.autoneg); | 4517 | &hw->mac.autoneg); |
4508 | if (hw->mac.ops.setup_link_speed) | 4518 | if (hw->mac.ops.setup_link_speed) |
@@ -4526,7 +4536,10 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work) | |||
4526 | adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK; | 4536 | adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK; |
4527 | err = hw->phy.ops.identify_sfp(hw); | 4537 | err = hw->phy.ops.identify_sfp(hw); |
4528 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | 4538 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { |
4529 | DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err); | 4539 | dev_err(&adapter->pdev->dev, "failed to initialize because " |
4540 | "an unsupported SFP+ module type was detected.\n" | ||
4541 | "Reload the driver after installing a supported " | ||
4542 | "module.\n"); | ||
4530 | ixgbe_down(adapter); | 4543 | ixgbe_down(adapter); |
4531 | return; | 4544 | return; |
4532 | } | 4545 | } |
@@ -5513,8 +5526,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5513 | round_jiffies(jiffies + (2 * HZ))); | 5526 | round_jiffies(jiffies + (2 * HZ))); |
5514 | err = 0; | 5527 | err = 0; |
5515 | } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | 5528 | } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { |
5516 | dev_err(&adapter->pdev->dev, "failed to load because an " | 5529 | dev_err(&adapter->pdev->dev, "failed to initialize because " |
5517 | "unsupported SFP+ module type was detected.\n"); | 5530 | "an unsupported SFP+ module type was detected.\n" |
5531 | "Reload the driver after installing a supported " | ||
5532 | "module.\n"); | ||
5518 | goto err_sw_init; | 5533 | goto err_sw_init; |
5519 | } else if (err) { | 5534 | } else if (err) { |
5520 | dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err); | 5535 | dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err); |
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c index dc45e9856c35..6851bdb2ce29 100644 --- a/drivers/net/mdio.c +++ b/drivers/net/mdio.c | |||
@@ -14,6 +14,10 @@ | |||
14 | #include <linux/mdio.h> | 14 | #include <linux/mdio.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | 16 | ||
17 | MODULE_DESCRIPTION("Generic support for MDIO-compatible transceivers"); | ||
18 | MODULE_AUTHOR("Copyright 2006-2009 Solarflare Communications Inc."); | ||
19 | MODULE_LICENSE("GPL"); | ||
20 | |||
17 | /** | 21 | /** |
18 | * mdio45_probe - probe for an MDIO (clause 45) device | 22 | * mdio45_probe - probe for an MDIO (clause 45) device |
19 | * @mdio: MDIO interface | 23 | * @mdio: MDIO interface |
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index 341882f959f3..a2d82ddb3b4d 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c | |||
@@ -865,8 +865,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
865 | struct sh_eth_private *mdp = netdev_priv(ndev); | 865 | struct sh_eth_private *mdp = netdev_priv(ndev); |
866 | struct sh_eth_cpu_data *cd = mdp->cd; | 866 | struct sh_eth_cpu_data *cd = mdp->cd; |
867 | irqreturn_t ret = IRQ_NONE; | 867 | irqreturn_t ret = IRQ_NONE; |
868 | u32 ioaddr, boguscnt = RX_RING_SIZE; | 868 | u32 ioaddr, intr_status = 0; |
869 | u32 intr_status = 0; | ||
870 | 869 | ||
871 | ioaddr = ndev->base_addr; | 870 | ioaddr = ndev->base_addr; |
872 | spin_lock(&mdp->lock); | 871 | spin_lock(&mdp->lock); |
@@ -901,12 +900,6 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
901 | if (intr_status & cd->eesr_err_check) | 900 | if (intr_status & cd->eesr_err_check) |
902 | sh_eth_error(ndev, intr_status); | 901 | sh_eth_error(ndev, intr_status); |
903 | 902 | ||
904 | if (--boguscnt < 0) { | ||
905 | printk(KERN_WARNING | ||
906 | "%s: Too much work at interrupt, status=0x%4.4x.\n", | ||
907 | ndev->name, intr_status); | ||
908 | } | ||
909 | |||
910 | other_irq: | 903 | other_irq: |
911 | spin_unlock(&mdp->lock); | 904 | spin_unlock(&mdp->lock); |
912 | 905 | ||
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 7681d28c53d7..daf961ab68bc 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -2495,7 +2495,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) | |||
2495 | if (likely(status >> 16 == (status & 0xffff))) { | 2495 | if (likely(status >> 16 == (status & 0xffff))) { |
2496 | skb = sky2->rx_ring[sky2->rx_next].skb; | 2496 | skb = sky2->rx_ring[sky2->rx_next].skb; |
2497 | skb->ip_summed = CHECKSUM_COMPLETE; | 2497 | skb->ip_summed = CHECKSUM_COMPLETE; |
2498 | skb->csum = status & 0xffff; | 2498 | skb->csum = le16_to_cpu(status); |
2499 | } else { | 2499 | } else { |
2500 | printk(KERN_NOTICE PFX "%s: hardware receive " | 2500 | printk(KERN_NOTICE PFX "%s: hardware receive " |
2501 | "checksum problem (status = %#x)\n", | 2501 | "checksum problem (status = %#x)\n", |
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c index 80e01778dd3b..cd35d50e46d4 100644 --- a/drivers/net/usb/cdc_eem.c +++ b/drivers/net/usb/cdc_eem.c | |||
@@ -319,7 +319,7 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
319 | return crc == crc2; | 319 | return crc == crc2; |
320 | 320 | ||
321 | if (unlikely(crc != crc2)) { | 321 | if (unlikely(crc != crc2)) { |
322 | dev->stats.rx_errors++; | 322 | dev->net->stats.rx_errors++; |
323 | dev_kfree_skb_any(skb2); | 323 | dev_kfree_skb_any(skb2); |
324 | } else | 324 | } else |
325 | usbnet_skb_return(dev, skb2); | 325 | usbnet_skb_return(dev, skb2); |
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 7ae82446b93a..1d3730d6690f 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c | |||
@@ -513,11 +513,11 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
513 | len = (skb->data[1] | (skb->data[2] << 8)) - 4; | 513 | len = (skb->data[1] | (skb->data[2] << 8)) - 4; |
514 | 514 | ||
515 | if (unlikely(status & 0xbf)) { | 515 | if (unlikely(status & 0xbf)) { |
516 | if (status & 0x01) dev->stats.rx_fifo_errors++; | 516 | if (status & 0x01) dev->net->stats.rx_fifo_errors++; |
517 | if (status & 0x02) dev->stats.rx_crc_errors++; | 517 | if (status & 0x02) dev->net->stats.rx_crc_errors++; |
518 | if (status & 0x04) dev->stats.rx_frame_errors++; | 518 | if (status & 0x04) dev->net->stats.rx_frame_errors++; |
519 | if (status & 0x20) dev->stats.rx_missed_errors++; | 519 | if (status & 0x20) dev->net->stats.rx_missed_errors++; |
520 | if (status & 0x90) dev->stats.rx_length_errors++; | 520 | if (status & 0x90) dev->net->stats.rx_length_errors++; |
521 | return 0; | 521 | return 0; |
522 | } | 522 | } |
523 | 523 | ||
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c index 034e8a73ca6b..aeb1ab03a9ee 100644 --- a/drivers/net/usb/net1080.c +++ b/drivers/net/usb/net1080.c | |||
@@ -433,7 +433,7 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
433 | dbg("rx framesize %d range %d..%d mtu %d", skb->len, | 433 | dbg("rx framesize %d range %d..%d mtu %d", skb->len, |
434 | net->hard_header_len, dev->hard_mtu, net->mtu); | 434 | net->hard_header_len, dev->hard_mtu, net->mtu); |
435 | #endif | 435 | #endif |
436 | dev->stats.rx_frame_errors++; | 436 | dev->net->stats.rx_frame_errors++; |
437 | nc_ensure_sync(dev); | 437 | nc_ensure_sync(dev); |
438 | return 0; | 438 | return 0; |
439 | } | 439 | } |
@@ -442,12 +442,12 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
442 | hdr_len = le16_to_cpup(&header->hdr_len); | 442 | hdr_len = le16_to_cpup(&header->hdr_len); |
443 | packet_len = le16_to_cpup(&header->packet_len); | 443 | packet_len = le16_to_cpup(&header->packet_len); |
444 | if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) { | 444 | if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) { |
445 | dev->stats.rx_frame_errors++; | 445 | dev->net->stats.rx_frame_errors++; |
446 | dbg("packet too big, %d", packet_len); | 446 | dbg("packet too big, %d", packet_len); |
447 | nc_ensure_sync(dev); | 447 | nc_ensure_sync(dev); |
448 | return 0; | 448 | return 0; |
449 | } else if (hdr_len < MIN_HEADER) { | 449 | } else if (hdr_len < MIN_HEADER) { |
450 | dev->stats.rx_frame_errors++; | 450 | dev->net->stats.rx_frame_errors++; |
451 | dbg("header too short, %d", hdr_len); | 451 | dbg("header too short, %d", hdr_len); |
452 | nc_ensure_sync(dev); | 452 | nc_ensure_sync(dev); |
453 | return 0; | 453 | return 0; |
@@ -465,21 +465,21 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
465 | 465 | ||
466 | if ((packet_len & 0x01) == 0) { | 466 | if ((packet_len & 0x01) == 0) { |
467 | if (skb->data [packet_len] != PAD_BYTE) { | 467 | if (skb->data [packet_len] != PAD_BYTE) { |
468 | dev->stats.rx_frame_errors++; | 468 | dev->net->stats.rx_frame_errors++; |
469 | dbg("bad pad"); | 469 | dbg("bad pad"); |
470 | return 0; | 470 | return 0; |
471 | } | 471 | } |
472 | skb_trim(skb, skb->len - 1); | 472 | skb_trim(skb, skb->len - 1); |
473 | } | 473 | } |
474 | if (skb->len != packet_len) { | 474 | if (skb->len != packet_len) { |
475 | dev->stats.rx_frame_errors++; | 475 | dev->net->stats.rx_frame_errors++; |
476 | dbg("bad packet len %d (expected %d)", | 476 | dbg("bad packet len %d (expected %d)", |
477 | skb->len, packet_len); | 477 | skb->len, packet_len); |
478 | nc_ensure_sync(dev); | 478 | nc_ensure_sync(dev); |
479 | return 0; | 479 | return 0; |
480 | } | 480 | } |
481 | if (header->packet_id != get_unaligned(&trailer->packet_id)) { | 481 | if (header->packet_id != get_unaligned(&trailer->packet_id)) { |
482 | dev->stats.rx_fifo_errors++; | 482 | dev->net->stats.rx_fifo_errors++; |
483 | dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x", | 483 | dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x", |
484 | le16_to_cpu(header->packet_id), | 484 | le16_to_cpu(header->packet_id), |
485 | le16_to_cpu(trailer->packet_id)); | 485 | le16_to_cpu(trailer->packet_id)); |
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 1bf243ef950e..2232232b7989 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c | |||
@@ -487,7 +487,7 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
487 | if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET | 487 | if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET |
488 | || skb->len < msg_len | 488 | || skb->len < msg_len |
489 | || (data_offset + data_len + 8) > msg_len)) { | 489 | || (data_offset + data_len + 8) > msg_len)) { |
490 | dev->stats.rx_frame_errors++; | 490 | dev->net->stats.rx_frame_errors++; |
491 | devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d", | 491 | devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d", |
492 | le32_to_cpu(hdr->msg_type), | 492 | le32_to_cpu(hdr->msg_type), |
493 | msg_len, data_offset, data_len, skb->len); | 493 | msg_len, data_offset, data_len, skb->len); |
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 89a91f8c22de..fe045896406b 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c | |||
@@ -1108,18 +1108,18 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
1108 | if (unlikely(header & RX_STS_ES_)) { | 1108 | if (unlikely(header & RX_STS_ES_)) { |
1109 | if (netif_msg_rx_err(dev)) | 1109 | if (netif_msg_rx_err(dev)) |
1110 | devdbg(dev, "Error header=0x%08x", header); | 1110 | devdbg(dev, "Error header=0x%08x", header); |
1111 | dev->stats.rx_errors++; | 1111 | dev->net->stats.rx_errors++; |
1112 | dev->stats.rx_dropped++; | 1112 | dev->net->stats.rx_dropped++; |
1113 | 1113 | ||
1114 | if (header & RX_STS_CRC_) { | 1114 | if (header & RX_STS_CRC_) { |
1115 | dev->stats.rx_crc_errors++; | 1115 | dev->net->stats.rx_crc_errors++; |
1116 | } else { | 1116 | } else { |
1117 | if (header & (RX_STS_TL_ | RX_STS_RF_)) | 1117 | if (header & (RX_STS_TL_ | RX_STS_RF_)) |
1118 | dev->stats.rx_frame_errors++; | 1118 | dev->net->stats.rx_frame_errors++; |
1119 | 1119 | ||
1120 | if ((header & RX_STS_LE_) && | 1120 | if ((header & RX_STS_LE_) && |
1121 | (!(header & RX_STS_FT_))) | 1121 | (!(header & RX_STS_FT_))) |
1122 | dev->stats.rx_length_errors++; | 1122 | dev->net->stats.rx_length_errors++; |
1123 | } | 1123 | } |
1124 | } else { | 1124 | } else { |
1125 | /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ | 1125 | /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ |
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 22c0585a0319..edfd9e10ceba 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -234,8 +234,8 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) | |||
234 | int status; | 234 | int status; |
235 | 235 | ||
236 | skb->protocol = eth_type_trans (skb, dev->net); | 236 | skb->protocol = eth_type_trans (skb, dev->net); |
237 | dev->stats.rx_packets++; | 237 | dev->net->stats.rx_packets++; |
238 | dev->stats.rx_bytes += skb->len; | 238 | dev->net->stats.rx_bytes += skb->len; |
239 | 239 | ||
240 | if (netif_msg_rx_status (dev)) | 240 | if (netif_msg_rx_status (dev)) |
241 | devdbg (dev, "< rx, len %zu, type 0x%x", | 241 | devdbg (dev, "< rx, len %zu, type 0x%x", |
@@ -397,7 +397,7 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb) | |||
397 | if (netif_msg_rx_err (dev)) | 397 | if (netif_msg_rx_err (dev)) |
398 | devdbg (dev, "drop"); | 398 | devdbg (dev, "drop"); |
399 | error: | 399 | error: |
400 | dev->stats.rx_errors++; | 400 | dev->net->stats.rx_errors++; |
401 | skb_queue_tail (&dev->done, skb); | 401 | skb_queue_tail (&dev->done, skb); |
402 | } | 402 | } |
403 | } | 403 | } |
@@ -420,8 +420,8 @@ static void rx_complete (struct urb *urb) | |||
420 | case 0: | 420 | case 0: |
421 | if (skb->len < dev->net->hard_header_len) { | 421 | if (skb->len < dev->net->hard_header_len) { |
422 | entry->state = rx_cleanup; | 422 | entry->state = rx_cleanup; |
423 | dev->stats.rx_errors++; | 423 | dev->net->stats.rx_errors++; |
424 | dev->stats.rx_length_errors++; | 424 | dev->net->stats.rx_length_errors++; |
425 | if (netif_msg_rx_err (dev)) | 425 | if (netif_msg_rx_err (dev)) |
426 | devdbg (dev, "rx length %d", skb->len); | 426 | devdbg (dev, "rx length %d", skb->len); |
427 | } | 427 | } |
@@ -433,7 +433,7 @@ static void rx_complete (struct urb *urb) | |||
433 | * storm, recovering as needed. | 433 | * storm, recovering as needed. |
434 | */ | 434 | */ |
435 | case -EPIPE: | 435 | case -EPIPE: |
436 | dev->stats.rx_errors++; | 436 | dev->net->stats.rx_errors++; |
437 | usbnet_defer_kevent (dev, EVENT_RX_HALT); | 437 | usbnet_defer_kevent (dev, EVENT_RX_HALT); |
438 | // FALLTHROUGH | 438 | // FALLTHROUGH |
439 | 439 | ||
@@ -451,7 +451,7 @@ static void rx_complete (struct urb *urb) | |||
451 | case -EPROTO: | 451 | case -EPROTO: |
452 | case -ETIME: | 452 | case -ETIME: |
453 | case -EILSEQ: | 453 | case -EILSEQ: |
454 | dev->stats.rx_errors++; | 454 | dev->net->stats.rx_errors++; |
455 | if (!timer_pending (&dev->delay)) { | 455 | if (!timer_pending (&dev->delay)) { |
456 | mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); | 456 | mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); |
457 | if (netif_msg_link (dev)) | 457 | if (netif_msg_link (dev)) |
@@ -465,12 +465,12 @@ block: | |||
465 | 465 | ||
466 | /* data overrun ... flush fifo? */ | 466 | /* data overrun ... flush fifo? */ |
467 | case -EOVERFLOW: | 467 | case -EOVERFLOW: |
468 | dev->stats.rx_over_errors++; | 468 | dev->net->stats.rx_over_errors++; |
469 | // FALLTHROUGH | 469 | // FALLTHROUGH |
470 | 470 | ||
471 | default: | 471 | default: |
472 | entry->state = rx_cleanup; | 472 | entry->state = rx_cleanup; |
473 | dev->stats.rx_errors++; | 473 | dev->net->stats.rx_errors++; |
474 | if (netif_msg_rx_err (dev)) | 474 | if (netif_msg_rx_err (dev)) |
475 | devdbg (dev, "rx status %d", urb_status); | 475 | devdbg (dev, "rx status %d", urb_status); |
476 | break; | 476 | break; |
@@ -583,8 +583,8 @@ int usbnet_stop (struct net_device *net) | |||
583 | 583 | ||
584 | if (netif_msg_ifdown (dev)) | 584 | if (netif_msg_ifdown (dev)) |
585 | devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld", | 585 | devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld", |
586 | dev->stats.rx_packets, dev->stats.tx_packets, | 586 | net->stats.rx_packets, net->stats.tx_packets, |
587 | dev->stats.rx_errors, dev->stats.tx_errors | 587 | net->stats.rx_errors, net->stats.tx_errors |
588 | ); | 588 | ); |
589 | 589 | ||
590 | // ensure there are no more active urbs | 590 | // ensure there are no more active urbs |
@@ -891,10 +891,10 @@ static void tx_complete (struct urb *urb) | |||
891 | struct usbnet *dev = entry->dev; | 891 | struct usbnet *dev = entry->dev; |
892 | 892 | ||
893 | if (urb->status == 0) { | 893 | if (urb->status == 0) { |
894 | dev->stats.tx_packets++; | 894 | dev->net->stats.tx_packets++; |
895 | dev->stats.tx_bytes += entry->length; | 895 | dev->net->stats.tx_bytes += entry->length; |
896 | } else { | 896 | } else { |
897 | dev->stats.tx_errors++; | 897 | dev->net->stats.tx_errors++; |
898 | 898 | ||
899 | switch (urb->status) { | 899 | switch (urb->status) { |
900 | case -EPIPE: | 900 | case -EPIPE: |
@@ -1020,7 +1020,7 @@ int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net) | |||
1020 | devdbg (dev, "drop, code %d", retval); | 1020 | devdbg (dev, "drop, code %d", retval); |
1021 | drop: | 1021 | drop: |
1022 | retval = NET_XMIT_SUCCESS; | 1022 | retval = NET_XMIT_SUCCESS; |
1023 | dev->stats.tx_dropped++; | 1023 | dev->net->stats.tx_dropped++; |
1024 | if (skb) | 1024 | if (skb) |
1025 | dev_kfree_skb_any (skb); | 1025 | dev_kfree_skb_any (skb); |
1026 | usb_free_urb (urb); | 1026 | usb_free_urb (urb); |
diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 87197dd9c788..1097c72e44d5 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c | |||
@@ -208,11 +208,14 @@ rx_drop: | |||
208 | 208 | ||
209 | static struct net_device_stats *veth_get_stats(struct net_device *dev) | 209 | static struct net_device_stats *veth_get_stats(struct net_device *dev) |
210 | { | 210 | { |
211 | struct veth_priv *priv = netdev_priv(dev); | 211 | struct veth_priv *priv; |
212 | struct net_device_stats *dev_stats = &dev->stats; | 212 | struct net_device_stats *dev_stats; |
213 | unsigned int cpu; | 213 | int cpu; |
214 | struct veth_net_stats *stats; | 214 | struct veth_net_stats *stats; |
215 | 215 | ||
216 | priv = netdev_priv(dev); | ||
217 | dev_stats = &dev->stats; | ||
218 | |||
216 | dev_stats->rx_packets = 0; | 219 | dev_stats->rx_packets = 0; |
217 | dev_stats->tx_packets = 0; | 220 | dev_stats->tx_packets = 0; |
218 | dev_stats->rx_bytes = 0; | 221 | dev_stats->rx_bytes = 0; |
@@ -220,17 +223,16 @@ static struct net_device_stats *veth_get_stats(struct net_device *dev) | |||
220 | dev_stats->tx_dropped = 0; | 223 | dev_stats->tx_dropped = 0; |
221 | dev_stats->rx_dropped = 0; | 224 | dev_stats->rx_dropped = 0; |
222 | 225 | ||
223 | if (priv->stats) | 226 | for_each_online_cpu(cpu) { |
224 | for_each_online_cpu(cpu) { | 227 | stats = per_cpu_ptr(priv->stats, cpu); |
225 | stats = per_cpu_ptr(priv->stats, cpu); | ||
226 | 228 | ||
227 | dev_stats->rx_packets += stats->rx_packets; | 229 | dev_stats->rx_packets += stats->rx_packets; |
228 | dev_stats->tx_packets += stats->tx_packets; | 230 | dev_stats->tx_packets += stats->tx_packets; |
229 | dev_stats->rx_bytes += stats->rx_bytes; | 231 | dev_stats->rx_bytes += stats->rx_bytes; |
230 | dev_stats->tx_bytes += stats->tx_bytes; | 232 | dev_stats->tx_bytes += stats->tx_bytes; |
231 | dev_stats->tx_dropped += stats->tx_dropped; | 233 | dev_stats->tx_dropped += stats->tx_dropped; |
232 | dev_stats->rx_dropped += stats->rx_dropped; | 234 | dev_stats->rx_dropped += stats->rx_dropped; |
233 | } | 235 | } |
234 | 236 | ||
235 | return dev_stats; | 237 | return dev_stats; |
236 | } | 238 | } |
@@ -257,8 +259,6 @@ static int veth_close(struct net_device *dev) | |||
257 | netif_carrier_off(dev); | 259 | netif_carrier_off(dev); |
258 | netif_carrier_off(priv->peer); | 260 | netif_carrier_off(priv->peer); |
259 | 261 | ||
260 | free_percpu(priv->stats); | ||
261 | priv->stats = NULL; | ||
262 | return 0; | 262 | return 0; |
263 | } | 263 | } |
264 | 264 | ||
@@ -289,6 +289,15 @@ static int veth_dev_init(struct net_device *dev) | |||
289 | return 0; | 289 | return 0; |
290 | } | 290 | } |
291 | 291 | ||
292 | static void veth_dev_free(struct net_device *dev) | ||
293 | { | ||
294 | struct veth_priv *priv; | ||
295 | |||
296 | priv = netdev_priv(dev); | ||
297 | free_percpu(priv->stats); | ||
298 | free_netdev(dev); | ||
299 | } | ||
300 | |||
292 | static const struct net_device_ops veth_netdev_ops = { | 301 | static const struct net_device_ops veth_netdev_ops = { |
293 | .ndo_init = veth_dev_init, | 302 | .ndo_init = veth_dev_init, |
294 | .ndo_open = veth_open, | 303 | .ndo_open = veth_open, |
@@ -306,7 +315,7 @@ static void veth_setup(struct net_device *dev) | |||
306 | dev->netdev_ops = &veth_netdev_ops; | 315 | dev->netdev_ops = &veth_netdev_ops; |
307 | dev->ethtool_ops = &veth_ethtool_ops; | 316 | dev->ethtool_ops = &veth_ethtool_ops; |
308 | dev->features |= NETIF_F_LLTX; | 317 | dev->features |= NETIF_F_LLTX; |
309 | dev->destructor = free_netdev; | 318 | dev->destructor = veth_dev_free; |
310 | } | 319 | } |
311 | 320 | ||
312 | /* | 321 | /* |
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 1032d5fdbd42..2597145a066e 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c | |||
@@ -2907,6 +2907,7 @@ enum parport_pc_pci_cards { | |||
2907 | netmos_9755, | 2907 | netmos_9755, |
2908 | netmos_9805, | 2908 | netmos_9805, |
2909 | netmos_9815, | 2909 | netmos_9815, |
2910 | netmos_9901, | ||
2910 | quatech_sppxp100, | 2911 | quatech_sppxp100, |
2911 | }; | 2912 | }; |
2912 | 2913 | ||
@@ -2987,7 +2988,7 @@ static struct parport_pc_pci { | |||
2987 | /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} }, | 2988 | /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} }, |
2988 | /* netmos_9805 */ { 1, { { 0, -1 }, } }, | 2989 | /* netmos_9805 */ { 1, { { 0, -1 }, } }, |
2989 | /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, | 2990 | /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, |
2990 | 2991 | /* netmos_9901 */ { 1, { { 0, -1 }, } }, | |
2991 | /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, | 2992 | /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, |
2992 | }; | 2993 | }; |
2993 | 2994 | ||
@@ -3089,6 +3090,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = { | |||
3089 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9805 }, | 3090 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9805 }, |
3090 | { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9815, | 3091 | { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9815, |
3091 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 }, | 3092 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 }, |
3093 | { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901, | ||
3094 | 0xA000, 0x2000, 0, 0, netmos_9901 }, | ||
3092 | /* Quatech SPPXP-100 Parallel port PCI ExpressCard */ | 3095 | /* Quatech SPPXP-100 Parallel port PCI ExpressCard */ |
3093 | { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100, | 3096 | { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100, |
3094 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 }, | 3097 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 }, |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index e53eacd75c8d..53075424a434 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -39,7 +39,6 @@ | |||
39 | #include <linux/sysdev.h> | 39 | #include <linux/sysdev.h> |
40 | #include <asm/cacheflush.h> | 40 | #include <asm/cacheflush.h> |
41 | #include <asm/iommu.h> | 41 | #include <asm/iommu.h> |
42 | #include <asm/e820.h> | ||
43 | #include "pci.h" | 42 | #include "pci.h" |
44 | 43 | ||
45 | #define ROOT_SIZE VTD_PAGE_SIZE | 44 | #define ROOT_SIZE VTD_PAGE_SIZE |
@@ -57,14 +56,32 @@ | |||
57 | #define MAX_AGAW_WIDTH 64 | 56 | #define MAX_AGAW_WIDTH 64 |
58 | 57 | ||
59 | #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) | 58 | #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) |
59 | #define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1) | ||
60 | 60 | ||
61 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) | 61 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) |
62 | #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) | 62 | #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) |
63 | #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) | 63 | #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) |
64 | 64 | ||
65 | #ifndef PHYSICAL_PAGE_MASK | 65 | |
66 | #define PHYSICAL_PAGE_MASK PAGE_MASK | 66 | /* VT-d pages must always be _smaller_ than MM pages. Otherwise things |
67 | #endif | 67 | are never going to work. */ |
68 | static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn) | ||
69 | { | ||
70 | return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT); | ||
71 | } | ||
72 | |||
73 | static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn) | ||
74 | { | ||
75 | return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT); | ||
76 | } | ||
77 | static inline unsigned long page_to_dma_pfn(struct page *pg) | ||
78 | { | ||
79 | return mm_to_dma_pfn(page_to_pfn(pg)); | ||
80 | } | ||
81 | static inline unsigned long virt_to_dma_pfn(void *p) | ||
82 | { | ||
83 | return page_to_dma_pfn(virt_to_page(p)); | ||
84 | } | ||
68 | 85 | ||
69 | /* global iommu list, set NULL for ignored DMAR units */ | 86 | /* global iommu list, set NULL for ignored DMAR units */ |
70 | static struct intel_iommu **g_iommus; | 87 | static struct intel_iommu **g_iommus; |
@@ -205,12 +222,17 @@ static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot) | |||
205 | 222 | ||
206 | static inline u64 dma_pte_addr(struct dma_pte *pte) | 223 | static inline u64 dma_pte_addr(struct dma_pte *pte) |
207 | { | 224 | { |
208 | return (pte->val & VTD_PAGE_MASK); | 225 | #ifdef CONFIG_64BIT |
226 | return pte->val & VTD_PAGE_MASK; | ||
227 | #else | ||
228 | /* Must have a full atomic 64-bit read */ | ||
229 | return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK; | ||
230 | #endif | ||
209 | } | 231 | } |
210 | 232 | ||
211 | static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr) | 233 | static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn) |
212 | { | 234 | { |
213 | pte->val |= (addr & VTD_PAGE_MASK); | 235 | pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT; |
214 | } | 236 | } |
215 | 237 | ||
216 | static inline bool dma_pte_present(struct dma_pte *pte) | 238 | static inline bool dma_pte_present(struct dma_pte *pte) |
@@ -218,6 +240,11 @@ static inline bool dma_pte_present(struct dma_pte *pte) | |||
218 | return (pte->val & 3) != 0; | 240 | return (pte->val & 3) != 0; |
219 | } | 241 | } |
220 | 242 | ||
243 | static inline int first_pte_in_page(struct dma_pte *pte) | ||
244 | { | ||
245 | return !((unsigned long)pte & ~VTD_PAGE_MASK); | ||
246 | } | ||
247 | |||
221 | /* | 248 | /* |
222 | * This domain is a statically identity mapping domain. | 249 | * This domain is a statically identity mapping domain. |
223 | * 1. This domain creats a static 1:1 mapping to all usable memory. | 250 | * 1. This domain creats a static 1:1 mapping to all usable memory. |
@@ -245,7 +272,6 @@ struct dmar_domain { | |||
245 | struct iova_domain iovad; /* iova's that belong to this domain */ | 272 | struct iova_domain iovad; /* iova's that belong to this domain */ |
246 | 273 | ||
247 | struct dma_pte *pgd; /* virtual address */ | 274 | struct dma_pte *pgd; /* virtual address */ |
248 | spinlock_t mapping_lock; /* page table lock */ | ||
249 | int gaw; /* max guest address width */ | 275 | int gaw; /* max guest address width */ |
250 | 276 | ||
251 | /* adjusted guest address width, 0 is level 2 30-bit */ | 277 | /* adjusted guest address width, 0 is level 2 30-bit */ |
@@ -649,80 +675,78 @@ static inline int width_to_agaw(int width) | |||
649 | 675 | ||
650 | static inline unsigned int level_to_offset_bits(int level) | 676 | static inline unsigned int level_to_offset_bits(int level) |
651 | { | 677 | { |
652 | return (12 + (level - 1) * LEVEL_STRIDE); | 678 | return (level - 1) * LEVEL_STRIDE; |
653 | } | 679 | } |
654 | 680 | ||
655 | static inline int address_level_offset(u64 addr, int level) | 681 | static inline int pfn_level_offset(unsigned long pfn, int level) |
656 | { | 682 | { |
657 | return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK); | 683 | return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; |
658 | } | 684 | } |
659 | 685 | ||
660 | static inline u64 level_mask(int level) | 686 | static inline unsigned long level_mask(int level) |
661 | { | 687 | { |
662 | return ((u64)-1 << level_to_offset_bits(level)); | 688 | return -1UL << level_to_offset_bits(level); |
663 | } | 689 | } |
664 | 690 | ||
665 | static inline u64 level_size(int level) | 691 | static inline unsigned long level_size(int level) |
666 | { | 692 | { |
667 | return ((u64)1 << level_to_offset_bits(level)); | 693 | return 1UL << level_to_offset_bits(level); |
668 | } | 694 | } |
669 | 695 | ||
670 | static inline u64 align_to_level(u64 addr, int level) | 696 | static inline unsigned long align_to_level(unsigned long pfn, int level) |
671 | { | 697 | { |
672 | return ((addr + level_size(level) - 1) & level_mask(level)); | 698 | return (pfn + level_size(level) - 1) & level_mask(level); |
673 | } | 699 | } |
674 | 700 | ||
675 | static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) | 701 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, |
702 | unsigned long pfn) | ||
676 | { | 703 | { |
677 | int addr_width = agaw_to_width(domain->agaw); | 704 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
678 | struct dma_pte *parent, *pte = NULL; | 705 | struct dma_pte *parent, *pte = NULL; |
679 | int level = agaw_to_level(domain->agaw); | 706 | int level = agaw_to_level(domain->agaw); |
680 | int offset; | 707 | int offset; |
681 | unsigned long flags; | ||
682 | 708 | ||
683 | BUG_ON(!domain->pgd); | 709 | BUG_ON(!domain->pgd); |
684 | 710 | BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); | |
685 | addr &= (((u64)1) << addr_width) - 1; | ||
686 | parent = domain->pgd; | 711 | parent = domain->pgd; |
687 | 712 | ||
688 | spin_lock_irqsave(&domain->mapping_lock, flags); | ||
689 | while (level > 0) { | 713 | while (level > 0) { |
690 | void *tmp_page; | 714 | void *tmp_page; |
691 | 715 | ||
692 | offset = address_level_offset(addr, level); | 716 | offset = pfn_level_offset(pfn, level); |
693 | pte = &parent[offset]; | 717 | pte = &parent[offset]; |
694 | if (level == 1) | 718 | if (level == 1) |
695 | break; | 719 | break; |
696 | 720 | ||
697 | if (!dma_pte_present(pte)) { | 721 | if (!dma_pte_present(pte)) { |
722 | uint64_t pteval; | ||
723 | |||
698 | tmp_page = alloc_pgtable_page(); | 724 | tmp_page = alloc_pgtable_page(); |
699 | 725 | ||
700 | if (!tmp_page) { | 726 | if (!tmp_page) |
701 | spin_unlock_irqrestore(&domain->mapping_lock, | ||
702 | flags); | ||
703 | return NULL; | 727 | return NULL; |
728 | |||
729 | domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); | ||
730 | pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; | ||
731 | if (cmpxchg64(&pte->val, 0ULL, pteval)) { | ||
732 | /* Someone else set it while we were thinking; use theirs. */ | ||
733 | free_pgtable_page(tmp_page); | ||
734 | } else { | ||
735 | dma_pte_addr(pte); | ||
736 | domain_flush_cache(domain, pte, sizeof(*pte)); | ||
704 | } | 737 | } |
705 | domain_flush_cache(domain, tmp_page, PAGE_SIZE); | ||
706 | dma_set_pte_addr(pte, virt_to_phys(tmp_page)); | ||
707 | /* | ||
708 | * high level table always sets r/w, last level page | ||
709 | * table control read/write | ||
710 | */ | ||
711 | dma_set_pte_readable(pte); | ||
712 | dma_set_pte_writable(pte); | ||
713 | domain_flush_cache(domain, pte, sizeof(*pte)); | ||
714 | } | 738 | } |
715 | parent = phys_to_virt(dma_pte_addr(pte)); | 739 | parent = phys_to_virt(dma_pte_addr(pte)); |
716 | level--; | 740 | level--; |
717 | } | 741 | } |
718 | 742 | ||
719 | spin_unlock_irqrestore(&domain->mapping_lock, flags); | ||
720 | return pte; | 743 | return pte; |
721 | } | 744 | } |
722 | 745 | ||
723 | /* return address's pte at specific level */ | 746 | /* return address's pte at specific level */ |
724 | static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr, | 747 | static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, |
725 | int level) | 748 | unsigned long pfn, |
749 | int level) | ||
726 | { | 750 | { |
727 | struct dma_pte *parent, *pte = NULL; | 751 | struct dma_pte *parent, *pte = NULL; |
728 | int total = agaw_to_level(domain->agaw); | 752 | int total = agaw_to_level(domain->agaw); |
@@ -730,7 +754,7 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr, | |||
730 | 754 | ||
731 | parent = domain->pgd; | 755 | parent = domain->pgd; |
732 | while (level <= total) { | 756 | while (level <= total) { |
733 | offset = address_level_offset(addr, total); | 757 | offset = pfn_level_offset(pfn, total); |
734 | pte = &parent[offset]; | 758 | pte = &parent[offset]; |
735 | if (level == total) | 759 | if (level == total) |
736 | return pte; | 760 | return pte; |
@@ -743,74 +767,82 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr, | |||
743 | return NULL; | 767 | return NULL; |
744 | } | 768 | } |
745 | 769 | ||
746 | /* clear one page's page table */ | ||
747 | static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr) | ||
748 | { | ||
749 | struct dma_pte *pte = NULL; | ||
750 | |||
751 | /* get last level pte */ | ||
752 | pte = dma_addr_level_pte(domain, addr, 1); | ||
753 | |||
754 | if (pte) { | ||
755 | dma_clear_pte(pte); | ||
756 | domain_flush_cache(domain, pte, sizeof(*pte)); | ||
757 | } | ||
758 | } | ||
759 | |||
760 | /* clear last level pte, a tlb flush should be followed */ | 770 | /* clear last level pte, a tlb flush should be followed */ |
761 | static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) | 771 | static void dma_pte_clear_range(struct dmar_domain *domain, |
772 | unsigned long start_pfn, | ||
773 | unsigned long last_pfn) | ||
762 | { | 774 | { |
763 | int addr_width = agaw_to_width(domain->agaw); | 775 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
764 | int npages; | 776 | struct dma_pte *first_pte, *pte; |
777 | |||
778 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); | ||
779 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); | ||
765 | 780 | ||
766 | start &= (((u64)1) << addr_width) - 1; | 781 | /* we don't need lock here; nobody else touches the iova range */ |
767 | end &= (((u64)1) << addr_width) - 1; | 782 | while (start_pfn <= last_pfn) { |
768 | /* in case it's partial page */ | 783 | first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1); |
769 | start &= PAGE_MASK; | 784 | if (!pte) { |
770 | end = PAGE_ALIGN(end); | 785 | start_pfn = align_to_level(start_pfn + 1, 2); |
771 | npages = (end - start) / VTD_PAGE_SIZE; | 786 | continue; |
787 | } | ||
788 | do { | ||
789 | dma_clear_pte(pte); | ||
790 | start_pfn++; | ||
791 | pte++; | ||
792 | } while (start_pfn <= last_pfn && !first_pte_in_page(pte)); | ||
772 | 793 | ||
773 | /* we don't need lock here, nobody else touches the iova range */ | 794 | domain_flush_cache(domain, first_pte, |
774 | while (npages--) { | 795 | (void *)pte - (void *)first_pte); |
775 | dma_pte_clear_one(domain, start); | ||
776 | start += VTD_PAGE_SIZE; | ||
777 | } | 796 | } |
778 | } | 797 | } |
779 | 798 | ||
780 | /* free page table pages. last level pte should already be cleared */ | 799 | /* free page table pages. last level pte should already be cleared */ |
781 | static void dma_pte_free_pagetable(struct dmar_domain *domain, | 800 | static void dma_pte_free_pagetable(struct dmar_domain *domain, |
782 | u64 start, u64 end) | 801 | unsigned long start_pfn, |
802 | unsigned long last_pfn) | ||
783 | { | 803 | { |
784 | int addr_width = agaw_to_width(domain->agaw); | 804 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
785 | struct dma_pte *pte; | 805 | struct dma_pte *first_pte, *pte; |
786 | int total = agaw_to_level(domain->agaw); | 806 | int total = agaw_to_level(domain->agaw); |
787 | int level; | 807 | int level; |
788 | u64 tmp; | 808 | unsigned long tmp; |
789 | 809 | ||
790 | start &= (((u64)1) << addr_width) - 1; | 810 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); |
791 | end &= (((u64)1) << addr_width) - 1; | 811 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); |
792 | 812 | ||
793 | /* we don't need lock here, nobody else touches the iova range */ | 813 | /* We don't need lock here; nobody else touches the iova range */ |
794 | level = 2; | 814 | level = 2; |
795 | while (level <= total) { | 815 | while (level <= total) { |
796 | tmp = align_to_level(start, level); | 816 | tmp = align_to_level(start_pfn, level); |
797 | if (tmp >= end || (tmp + level_size(level) > end)) | 817 | |
818 | /* If we can't even clear one PTE at this level, we're done */ | ||
819 | if (tmp + level_size(level) - 1 > last_pfn) | ||
798 | return; | 820 | return; |
799 | 821 | ||
800 | while (tmp < end) { | 822 | while (tmp + level_size(level) - 1 <= last_pfn) { |
801 | pte = dma_addr_level_pte(domain, tmp, level); | 823 | first_pte = pte = dma_pfn_level_pte(domain, tmp, level); |
802 | if (pte) { | 824 | if (!pte) { |
803 | free_pgtable_page( | 825 | tmp = align_to_level(tmp + 1, level + 1); |
804 | phys_to_virt(dma_pte_addr(pte))); | 826 | continue; |
805 | dma_clear_pte(pte); | ||
806 | domain_flush_cache(domain, pte, sizeof(*pte)); | ||
807 | } | 827 | } |
808 | tmp += level_size(level); | 828 | do { |
829 | if (dma_pte_present(pte)) { | ||
830 | free_pgtable_page(phys_to_virt(dma_pte_addr(pte))); | ||
831 | dma_clear_pte(pte); | ||
832 | } | ||
833 | pte++; | ||
834 | tmp += level_size(level); | ||
835 | } while (!first_pte_in_page(pte) && | ||
836 | tmp + level_size(level) - 1 <= last_pfn); | ||
837 | |||
838 | domain_flush_cache(domain, first_pte, | ||
839 | (void *)pte - (void *)first_pte); | ||
840 | |||
809 | } | 841 | } |
810 | level++; | 842 | level++; |
811 | } | 843 | } |
812 | /* free pgd */ | 844 | /* free pgd */ |
813 | if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) { | 845 | if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { |
814 | free_pgtable_page(domain->pgd); | 846 | free_pgtable_page(domain->pgd); |
815 | domain->pgd = NULL; | 847 | domain->pgd = NULL; |
816 | } | 848 | } |
@@ -1036,11 +1068,11 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, | |||
1036 | } | 1068 | } |
1037 | 1069 | ||
1038 | static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, | 1070 | static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, |
1039 | u64 addr, unsigned int pages) | 1071 | unsigned long pfn, unsigned int pages) |
1040 | { | 1072 | { |
1041 | unsigned int mask = ilog2(__roundup_pow_of_two(pages)); | 1073 | unsigned int mask = ilog2(__roundup_pow_of_two(pages)); |
1074 | uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; | ||
1042 | 1075 | ||
1043 | BUG_ON(addr & (~VTD_PAGE_MASK)); | ||
1044 | BUG_ON(pages == 0); | 1076 | BUG_ON(pages == 0); |
1045 | 1077 | ||
1046 | /* | 1078 | /* |
@@ -1055,7 +1087,12 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, | |||
1055 | else | 1087 | else |
1056 | iommu->flush.flush_iotlb(iommu, did, addr, mask, | 1088 | iommu->flush.flush_iotlb(iommu, did, addr, mask, |
1057 | DMA_TLB_PSI_FLUSH); | 1089 | DMA_TLB_PSI_FLUSH); |
1058 | if (did) | 1090 | |
1091 | /* | ||
1092 | * In caching mode, domain ID 0 is reserved for non-present to present | ||
1093 | * mapping flush. Device IOTLB doesn't need to be flushed in this case. | ||
1094 | */ | ||
1095 | if (!cap_caching_mode(iommu->cap) || did) | ||
1059 | iommu_flush_dev_iotlb(iommu->domains[did], addr, mask); | 1096 | iommu_flush_dev_iotlb(iommu->domains[did], addr, mask); |
1060 | } | 1097 | } |
1061 | 1098 | ||
@@ -1280,7 +1317,6 @@ static void dmar_init_reserved_ranges(void) | |||
1280 | struct pci_dev *pdev = NULL; | 1317 | struct pci_dev *pdev = NULL; |
1281 | struct iova *iova; | 1318 | struct iova *iova; |
1282 | int i; | 1319 | int i; |
1283 | u64 addr, size; | ||
1284 | 1320 | ||
1285 | init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); | 1321 | init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); |
1286 | 1322 | ||
@@ -1303,12 +1339,9 @@ static void dmar_init_reserved_ranges(void) | |||
1303 | r = &pdev->resource[i]; | 1339 | r = &pdev->resource[i]; |
1304 | if (!r->flags || !(r->flags & IORESOURCE_MEM)) | 1340 | if (!r->flags || !(r->flags & IORESOURCE_MEM)) |
1305 | continue; | 1341 | continue; |
1306 | addr = r->start; | 1342 | iova = reserve_iova(&reserved_iova_list, |
1307 | addr &= PHYSICAL_PAGE_MASK; | 1343 | IOVA_PFN(r->start), |
1308 | size = r->end - addr; | 1344 | IOVA_PFN(r->end)); |
1309 | size = PAGE_ALIGN(size); | ||
1310 | iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), | ||
1311 | IOVA_PFN(size + addr) - 1); | ||
1312 | if (!iova) | 1345 | if (!iova) |
1313 | printk(KERN_ERR "Reserve iova failed\n"); | 1346 | printk(KERN_ERR "Reserve iova failed\n"); |
1314 | } | 1347 | } |
@@ -1342,7 +1375,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
1342 | unsigned long sagaw; | 1375 | unsigned long sagaw; |
1343 | 1376 | ||
1344 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); | 1377 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); |
1345 | spin_lock_init(&domain->mapping_lock); | ||
1346 | spin_lock_init(&domain->iommu_lock); | 1378 | spin_lock_init(&domain->iommu_lock); |
1347 | 1379 | ||
1348 | domain_reserve_special_ranges(domain); | 1380 | domain_reserve_special_ranges(domain); |
@@ -1389,7 +1421,6 @@ static void domain_exit(struct dmar_domain *domain) | |||
1389 | { | 1421 | { |
1390 | struct dmar_drhd_unit *drhd; | 1422 | struct dmar_drhd_unit *drhd; |
1391 | struct intel_iommu *iommu; | 1423 | struct intel_iommu *iommu; |
1392 | u64 end; | ||
1393 | 1424 | ||
1394 | /* Domain 0 is reserved, so dont process it */ | 1425 | /* Domain 0 is reserved, so dont process it */ |
1395 | if (!domain) | 1426 | if (!domain) |
@@ -1398,14 +1429,12 @@ static void domain_exit(struct dmar_domain *domain) | |||
1398 | domain_remove_dev_info(domain); | 1429 | domain_remove_dev_info(domain); |
1399 | /* destroy iovas */ | 1430 | /* destroy iovas */ |
1400 | put_iova_domain(&domain->iovad); | 1431 | put_iova_domain(&domain->iovad); |
1401 | end = DOMAIN_MAX_ADDR(domain->gaw); | ||
1402 | end = end & (~PAGE_MASK); | ||
1403 | 1432 | ||
1404 | /* clear ptes */ | 1433 | /* clear ptes */ |
1405 | dma_pte_clear_range(domain, 0, end); | 1434 | dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); |
1406 | 1435 | ||
1407 | /* free page tables */ | 1436 | /* free page tables */ |
1408 | dma_pte_free_pagetable(domain, 0, end); | 1437 | dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); |
1409 | 1438 | ||
1410 | for_each_active_iommu(iommu, drhd) | 1439 | for_each_active_iommu(iommu, drhd) |
1411 | if (test_bit(iommu->seq_id, &domain->iommu_bmp)) | 1440 | if (test_bit(iommu->seq_id, &domain->iommu_bmp)) |
@@ -1619,42 +1648,86 @@ static int domain_context_mapped(struct pci_dev *pdev) | |||
1619 | tmp->devfn); | 1648 | tmp->devfn); |
1620 | } | 1649 | } |
1621 | 1650 | ||
1622 | static int | 1651 | static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, |
1623 | domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, | 1652 | struct scatterlist *sg, unsigned long phys_pfn, |
1624 | u64 hpa, size_t size, int prot) | 1653 | unsigned long nr_pages, int prot) |
1625 | { | 1654 | { |
1626 | u64 start_pfn, end_pfn; | 1655 | struct dma_pte *first_pte = NULL, *pte = NULL; |
1627 | struct dma_pte *pte; | 1656 | phys_addr_t uninitialized_var(pteval); |
1628 | int index; | 1657 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
1629 | int addr_width = agaw_to_width(domain->agaw); | 1658 | unsigned long sg_res; |
1630 | 1659 | ||
1631 | hpa &= (((u64)1) << addr_width) - 1; | 1660 | BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width); |
1632 | 1661 | ||
1633 | if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) | 1662 | if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) |
1634 | return -EINVAL; | 1663 | return -EINVAL; |
1635 | iova &= PAGE_MASK; | 1664 | |
1636 | start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT; | 1665 | prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP; |
1637 | end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT; | 1666 | |
1638 | index = 0; | 1667 | if (sg) |
1639 | while (start_pfn < end_pfn) { | 1668 | sg_res = 0; |
1640 | pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index); | 1669 | else { |
1641 | if (!pte) | 1670 | sg_res = nr_pages + 1; |
1642 | return -ENOMEM; | 1671 | pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; |
1672 | } | ||
1673 | |||
1674 | while (nr_pages--) { | ||
1675 | uint64_t tmp; | ||
1676 | |||
1677 | if (!sg_res) { | ||
1678 | sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT; | ||
1679 | sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; | ||
1680 | sg->dma_length = sg->length; | ||
1681 | pteval = page_to_phys(sg_page(sg)) | prot; | ||
1682 | } | ||
1683 | if (!pte) { | ||
1684 | first_pte = pte = pfn_to_dma_pte(domain, iov_pfn); | ||
1685 | if (!pte) | ||
1686 | return -ENOMEM; | ||
1687 | } | ||
1643 | /* We don't need lock here, nobody else | 1688 | /* We don't need lock here, nobody else |
1644 | * touches the iova range | 1689 | * touches the iova range |
1645 | */ | 1690 | */ |
1646 | BUG_ON(dma_pte_addr(pte)); | 1691 | tmp = cmpxchg64_local(&pte->val, 0ULL, pteval); |
1647 | dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); | 1692 | if (tmp) { |
1648 | dma_set_pte_prot(pte, prot); | 1693 | static int dumps = 5; |
1649 | if (prot & DMA_PTE_SNP) | 1694 | printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n", |
1650 | dma_set_pte_snp(pte); | 1695 | iov_pfn, tmp, (unsigned long long)pteval); |
1651 | domain_flush_cache(domain, pte, sizeof(*pte)); | 1696 | if (dumps) { |
1652 | start_pfn++; | 1697 | dumps--; |
1653 | index++; | 1698 | debug_dma_dump_mappings(NULL); |
1699 | } | ||
1700 | WARN_ON(1); | ||
1701 | } | ||
1702 | pte++; | ||
1703 | if (!nr_pages || first_pte_in_page(pte)) { | ||
1704 | domain_flush_cache(domain, first_pte, | ||
1705 | (void *)pte - (void *)first_pte); | ||
1706 | pte = NULL; | ||
1707 | } | ||
1708 | iov_pfn++; | ||
1709 | pteval += VTD_PAGE_SIZE; | ||
1710 | sg_res--; | ||
1711 | if (!sg_res) | ||
1712 | sg = sg_next(sg); | ||
1654 | } | 1713 | } |
1655 | return 0; | 1714 | return 0; |
1656 | } | 1715 | } |
1657 | 1716 | ||
1717 | static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | ||
1718 | struct scatterlist *sg, unsigned long nr_pages, | ||
1719 | int prot) | ||
1720 | { | ||
1721 | return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); | ||
1722 | } | ||
1723 | |||
1724 | static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | ||
1725 | unsigned long phys_pfn, unsigned long nr_pages, | ||
1726 | int prot) | ||
1727 | { | ||
1728 | return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot); | ||
1729 | } | ||
1730 | |||
1658 | static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn) | 1731 | static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn) |
1659 | { | 1732 | { |
1660 | if (!iommu) | 1733 | if (!iommu) |
@@ -1845,58 +1918,61 @@ error: | |||
1845 | 1918 | ||
1846 | static int iommu_identity_mapping; | 1919 | static int iommu_identity_mapping; |
1847 | 1920 | ||
1921 | static int iommu_domain_identity_map(struct dmar_domain *domain, | ||
1922 | unsigned long long start, | ||
1923 | unsigned long long end) | ||
1924 | { | ||
1925 | unsigned long first_vpfn = start >> VTD_PAGE_SHIFT; | ||
1926 | unsigned long last_vpfn = end >> VTD_PAGE_SHIFT; | ||
1927 | |||
1928 | if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn), | ||
1929 | dma_to_mm_pfn(last_vpfn))) { | ||
1930 | printk(KERN_ERR "IOMMU: reserve iova failed\n"); | ||
1931 | return -ENOMEM; | ||
1932 | } | ||
1933 | |||
1934 | pr_debug("Mapping reserved region %llx-%llx for domain %d\n", | ||
1935 | start, end, domain->id); | ||
1936 | /* | ||
1937 | * RMRR range might have overlap with physical memory range, | ||
1938 | * clear it first | ||
1939 | */ | ||
1940 | dma_pte_clear_range(domain, first_vpfn, last_vpfn); | ||
1941 | |||
1942 | return domain_pfn_mapping(domain, first_vpfn, first_vpfn, | ||
1943 | last_vpfn - first_vpfn + 1, | ||
1944 | DMA_PTE_READ|DMA_PTE_WRITE); | ||
1945 | } | ||
1946 | |||
1848 | static int iommu_prepare_identity_map(struct pci_dev *pdev, | 1947 | static int iommu_prepare_identity_map(struct pci_dev *pdev, |
1849 | unsigned long long start, | 1948 | unsigned long long start, |
1850 | unsigned long long end) | 1949 | unsigned long long end) |
1851 | { | 1950 | { |
1852 | struct dmar_domain *domain; | 1951 | struct dmar_domain *domain; |
1853 | unsigned long size; | ||
1854 | unsigned long long base; | ||
1855 | int ret; | 1952 | int ret; |
1856 | 1953 | ||
1857 | printk(KERN_INFO | 1954 | printk(KERN_INFO |
1858 | "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", | 1955 | "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", |
1859 | pci_name(pdev), start, end); | 1956 | pci_name(pdev), start, end); |
1860 | if (iommu_identity_mapping) | 1957 | |
1861 | domain = si_domain; | 1958 | domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); |
1862 | else | ||
1863 | /* page table init */ | ||
1864 | domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); | ||
1865 | if (!domain) | 1959 | if (!domain) |
1866 | return -ENOMEM; | 1960 | return -ENOMEM; |
1867 | 1961 | ||
1868 | /* The address might not be aligned */ | 1962 | ret = iommu_domain_identity_map(domain, start, end); |
1869 | base = start & PAGE_MASK; | ||
1870 | size = end - base; | ||
1871 | size = PAGE_ALIGN(size); | ||
1872 | if (!reserve_iova(&domain->iovad, IOVA_PFN(base), | ||
1873 | IOVA_PFN(base + size) - 1)) { | ||
1874 | printk(KERN_ERR "IOMMU: reserve iova failed\n"); | ||
1875 | ret = -ENOMEM; | ||
1876 | goto error; | ||
1877 | } | ||
1878 | |||
1879 | pr_debug("Mapping reserved region %lx@%llx for %s\n", | ||
1880 | size, base, pci_name(pdev)); | ||
1881 | /* | ||
1882 | * RMRR range might have overlap with physical memory range, | ||
1883 | * clear it first | ||
1884 | */ | ||
1885 | dma_pte_clear_range(domain, base, base + size); | ||
1886 | |||
1887 | ret = domain_page_mapping(domain, base, base, size, | ||
1888 | DMA_PTE_READ|DMA_PTE_WRITE); | ||
1889 | if (ret) | 1963 | if (ret) |
1890 | goto error; | 1964 | goto error; |
1891 | 1965 | ||
1892 | /* context entry init */ | 1966 | /* context entry init */ |
1893 | ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL); | 1967 | ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL); |
1894 | if (!ret) | 1968 | if (ret) |
1895 | return 0; | 1969 | goto error; |
1896 | error: | 1970 | |
1971 | return 0; | ||
1972 | |||
1973 | error: | ||
1897 | domain_exit(domain); | 1974 | domain_exit(domain); |
1898 | return ret; | 1975 | return ret; |
1899 | |||
1900 | } | 1976 | } |
1901 | 1977 | ||
1902 | static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, | 1978 | static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, |
@@ -1908,64 +1984,6 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, | |||
1908 | rmrr->end_address + 1); | 1984 | rmrr->end_address + 1); |
1909 | } | 1985 | } |
1910 | 1986 | ||
1911 | #ifdef CONFIG_DMAR_GFX_WA | ||
1912 | struct iommu_prepare_data { | ||
1913 | struct pci_dev *pdev; | ||
1914 | int ret; | ||
1915 | }; | ||
1916 | |||
1917 | static int __init iommu_prepare_work_fn(unsigned long start_pfn, | ||
1918 | unsigned long end_pfn, void *datax) | ||
1919 | { | ||
1920 | struct iommu_prepare_data *data; | ||
1921 | |||
1922 | data = (struct iommu_prepare_data *)datax; | ||
1923 | |||
1924 | data->ret = iommu_prepare_identity_map(data->pdev, | ||
1925 | start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); | ||
1926 | return data->ret; | ||
1927 | |||
1928 | } | ||
1929 | |||
1930 | static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev) | ||
1931 | { | ||
1932 | int nid; | ||
1933 | struct iommu_prepare_data data; | ||
1934 | |||
1935 | data.pdev = pdev; | ||
1936 | data.ret = 0; | ||
1937 | |||
1938 | for_each_online_node(nid) { | ||
1939 | work_with_active_regions(nid, iommu_prepare_work_fn, &data); | ||
1940 | if (data.ret) | ||
1941 | return data.ret; | ||
1942 | } | ||
1943 | return data.ret; | ||
1944 | } | ||
1945 | |||
1946 | static void __init iommu_prepare_gfx_mapping(void) | ||
1947 | { | ||
1948 | struct pci_dev *pdev = NULL; | ||
1949 | int ret; | ||
1950 | |||
1951 | for_each_pci_dev(pdev) { | ||
1952 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO || | ||
1953 | !IS_GFX_DEVICE(pdev)) | ||
1954 | continue; | ||
1955 | printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n", | ||
1956 | pci_name(pdev)); | ||
1957 | ret = iommu_prepare_with_active_regions(pdev); | ||
1958 | if (ret) | ||
1959 | printk(KERN_ERR "IOMMU: mapping reserved region failed\n"); | ||
1960 | } | ||
1961 | } | ||
1962 | #else /* !CONFIG_DMAR_GFX_WA */ | ||
1963 | static inline void iommu_prepare_gfx_mapping(void) | ||
1964 | { | ||
1965 | return; | ||
1966 | } | ||
1967 | #endif | ||
1968 | |||
1969 | #ifdef CONFIG_DMAR_FLOPPY_WA | 1987 | #ifdef CONFIG_DMAR_FLOPPY_WA |
1970 | static inline void iommu_prepare_isa(void) | 1988 | static inline void iommu_prepare_isa(void) |
1971 | { | 1989 | { |
@@ -1976,12 +1994,12 @@ static inline void iommu_prepare_isa(void) | |||
1976 | if (!pdev) | 1994 | if (!pdev) |
1977 | return; | 1995 | return; |
1978 | 1996 | ||
1979 | printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n"); | 1997 | printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n"); |
1980 | ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024); | 1998 | ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024); |
1981 | 1999 | ||
1982 | if (ret) | 2000 | if (ret) |
1983 | printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, " | 2001 | printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; " |
1984 | "floppy might not work\n"); | 2002 | "floppy might not work\n"); |
1985 | 2003 | ||
1986 | } | 2004 | } |
1987 | #else | 2005 | #else |
@@ -2009,16 +2027,30 @@ static int __init init_context_pass_through(void) | |||
2009 | } | 2027 | } |
2010 | 2028 | ||
2011 | static int md_domain_init(struct dmar_domain *domain, int guest_width); | 2029 | static int md_domain_init(struct dmar_domain *domain, int guest_width); |
2030 | |||
2031 | static int __init si_domain_work_fn(unsigned long start_pfn, | ||
2032 | unsigned long end_pfn, void *datax) | ||
2033 | { | ||
2034 | int *ret = datax; | ||
2035 | |||
2036 | *ret = iommu_domain_identity_map(si_domain, | ||
2037 | (uint64_t)start_pfn << PAGE_SHIFT, | ||
2038 | (uint64_t)end_pfn << PAGE_SHIFT); | ||
2039 | return *ret; | ||
2040 | |||
2041 | } | ||
2042 | |||
2012 | static int si_domain_init(void) | 2043 | static int si_domain_init(void) |
2013 | { | 2044 | { |
2014 | struct dmar_drhd_unit *drhd; | 2045 | struct dmar_drhd_unit *drhd; |
2015 | struct intel_iommu *iommu; | 2046 | struct intel_iommu *iommu; |
2016 | int ret = 0; | 2047 | int nid, ret = 0; |
2017 | 2048 | ||
2018 | si_domain = alloc_domain(); | 2049 | si_domain = alloc_domain(); |
2019 | if (!si_domain) | 2050 | if (!si_domain) |
2020 | return -EFAULT; | 2051 | return -EFAULT; |
2021 | 2052 | ||
2053 | pr_debug("Identity mapping domain is domain %d\n", si_domain->id); | ||
2022 | 2054 | ||
2023 | for_each_active_iommu(iommu, drhd) { | 2055 | for_each_active_iommu(iommu, drhd) { |
2024 | ret = iommu_attach_domain(si_domain, iommu); | 2056 | ret = iommu_attach_domain(si_domain, iommu); |
@@ -2035,6 +2067,12 @@ static int si_domain_init(void) | |||
2035 | 2067 | ||
2036 | si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; | 2068 | si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; |
2037 | 2069 | ||
2070 | for_each_online_node(nid) { | ||
2071 | work_with_active_regions(nid, si_domain_work_fn, &ret); | ||
2072 | if (ret) | ||
2073 | return ret; | ||
2074 | } | ||
2075 | |||
2038 | return 0; | 2076 | return 0; |
2039 | } | 2077 | } |
2040 | 2078 | ||
@@ -2081,7 +2119,6 @@ static int domain_add_dev_info(struct dmar_domain *domain, | |||
2081 | 2119 | ||
2082 | static int iommu_prepare_static_identity_mapping(void) | 2120 | static int iommu_prepare_static_identity_mapping(void) |
2083 | { | 2121 | { |
2084 | int i; | ||
2085 | struct pci_dev *pdev = NULL; | 2122 | struct pci_dev *pdev = NULL; |
2086 | int ret; | 2123 | int ret; |
2087 | 2124 | ||
@@ -2089,20 +2126,14 @@ static int iommu_prepare_static_identity_mapping(void) | |||
2089 | if (ret) | 2126 | if (ret) |
2090 | return -EFAULT; | 2127 | return -EFAULT; |
2091 | 2128 | ||
2092 | printk(KERN_INFO "IOMMU: Setting identity map:\n"); | ||
2093 | for_each_pci_dev(pdev) { | 2129 | for_each_pci_dev(pdev) { |
2094 | for (i = 0; i < e820.nr_map; i++) { | 2130 | printk(KERN_INFO "IOMMU: identity mapping for device %s\n", |
2095 | struct e820entry *ei = &e820.map[i]; | 2131 | pci_name(pdev)); |
2096 | 2132 | ||
2097 | if (ei->type == E820_RAM) { | 2133 | ret = domain_context_mapping(si_domain, pdev, |
2098 | ret = iommu_prepare_identity_map(pdev, | 2134 | CONTEXT_TT_MULTI_LEVEL); |
2099 | ei->addr, ei->addr + ei->size); | 2135 | if (ret) |
2100 | if (ret) { | 2136 | return ret; |
2101 | printk(KERN_INFO "1:1 mapping to one domain failed.\n"); | ||
2102 | return -EFAULT; | ||
2103 | } | ||
2104 | } | ||
2105 | } | ||
2106 | ret = domain_add_dev_info(si_domain, pdev); | 2137 | ret = domain_add_dev_info(si_domain, pdev); |
2107 | if (ret) | 2138 | if (ret) |
2108 | return ret; | 2139 | return ret; |
@@ -2293,8 +2324,6 @@ int __init init_dmars(void) | |||
2293 | } | 2324 | } |
2294 | } | 2325 | } |
2295 | 2326 | ||
2296 | iommu_prepare_gfx_mapping(); | ||
2297 | |||
2298 | iommu_prepare_isa(); | 2327 | iommu_prepare_isa(); |
2299 | } | 2328 | } |
2300 | 2329 | ||
@@ -2339,50 +2368,40 @@ error: | |||
2339 | return ret; | 2368 | return ret; |
2340 | } | 2369 | } |
2341 | 2370 | ||
2342 | static inline u64 aligned_size(u64 host_addr, size_t size) | 2371 | static inline unsigned long aligned_nrpages(unsigned long host_addr, |
2343 | { | 2372 | size_t size) |
2344 | u64 addr; | ||
2345 | addr = (host_addr & (~PAGE_MASK)) + size; | ||
2346 | return PAGE_ALIGN(addr); | ||
2347 | } | ||
2348 | |||
2349 | struct iova * | ||
2350 | iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end) | ||
2351 | { | 2373 | { |
2352 | struct iova *piova; | 2374 | host_addr &= ~PAGE_MASK; |
2375 | host_addr += size + PAGE_SIZE - 1; | ||
2353 | 2376 | ||
2354 | /* Make sure it's in range */ | 2377 | return host_addr >> VTD_PAGE_SHIFT; |
2355 | end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end); | ||
2356 | if (!size || (IOVA_START_ADDR + size > end)) | ||
2357 | return NULL; | ||
2358 | |||
2359 | piova = alloc_iova(&domain->iovad, | ||
2360 | size >> PAGE_SHIFT, IOVA_PFN(end), 1); | ||
2361 | return piova; | ||
2362 | } | 2378 | } |
2363 | 2379 | ||
2364 | static struct iova * | 2380 | static struct iova *intel_alloc_iova(struct device *dev, |
2365 | __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, | 2381 | struct dmar_domain *domain, |
2366 | size_t size, u64 dma_mask) | 2382 | unsigned long nrpages, uint64_t dma_mask) |
2367 | { | 2383 | { |
2368 | struct pci_dev *pdev = to_pci_dev(dev); | 2384 | struct pci_dev *pdev = to_pci_dev(dev); |
2369 | struct iova *iova = NULL; | 2385 | struct iova *iova = NULL; |
2370 | 2386 | ||
2371 | if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac) | 2387 | /* Restrict dma_mask to the width that the iommu can handle */ |
2372 | iova = iommu_alloc_iova(domain, size, dma_mask); | 2388 | dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); |
2373 | else { | 2389 | |
2390 | if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { | ||
2374 | /* | 2391 | /* |
2375 | * First try to allocate an io virtual address in | 2392 | * First try to allocate an io virtual address in |
2376 | * DMA_BIT_MASK(32) and if that fails then try allocating | 2393 | * DMA_BIT_MASK(32) and if that fails then try allocating |
2377 | * from higher range | 2394 | * from higher range |
2378 | */ | 2395 | */ |
2379 | iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32)); | 2396 | iova = alloc_iova(&domain->iovad, nrpages, |
2380 | if (!iova) | 2397 | IOVA_PFN(DMA_BIT_MASK(32)), 1); |
2381 | iova = iommu_alloc_iova(domain, size, dma_mask); | 2398 | if (iova) |
2382 | } | 2399 | return iova; |
2383 | 2400 | } | |
2384 | if (!iova) { | 2401 | iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); |
2385 | printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev)); | 2402 | if (unlikely(!iova)) { |
2403 | printk(KERN_ERR "Allocating %ld-page iova for %s failed", | ||
2404 | nrpages, pci_name(pdev)); | ||
2386 | return NULL; | 2405 | return NULL; |
2387 | } | 2406 | } |
2388 | 2407 | ||
@@ -2485,14 +2504,12 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2485 | return 0; | 2504 | return 0; |
2486 | 2505 | ||
2487 | iommu = domain_get_iommu(domain); | 2506 | iommu = domain_get_iommu(domain); |
2488 | size = aligned_size((u64)paddr, size); | 2507 | size = aligned_nrpages(paddr, size); |
2489 | 2508 | ||
2490 | iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); | 2509 | iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); |
2491 | if (!iova) | 2510 | if (!iova) |
2492 | goto error; | 2511 | goto error; |
2493 | 2512 | ||
2494 | start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; | ||
2495 | |||
2496 | /* | 2513 | /* |
2497 | * Check if DMAR supports zero-length reads on write only | 2514 | * Check if DMAR supports zero-length reads on write only |
2498 | * mappings.. | 2515 | * mappings.. |
@@ -2508,20 +2525,20 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2508 | * might have two guest_addr mapping to the same host paddr, but this | 2525 | * might have two guest_addr mapping to the same host paddr, but this |
2509 | * is not a big problem | 2526 | * is not a big problem |
2510 | */ | 2527 | */ |
2511 | ret = domain_page_mapping(domain, start_paddr, | 2528 | ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), |
2512 | ((u64)paddr) & PHYSICAL_PAGE_MASK, | 2529 | paddr >> VTD_PAGE_SHIFT, size, prot); |
2513 | size, prot); | ||
2514 | if (ret) | 2530 | if (ret) |
2515 | goto error; | 2531 | goto error; |
2516 | 2532 | ||
2517 | /* it's a non-present to present mapping. Only flush if caching mode */ | 2533 | /* it's a non-present to present mapping. Only flush if caching mode */ |
2518 | if (cap_caching_mode(iommu->cap)) | 2534 | if (cap_caching_mode(iommu->cap)) |
2519 | iommu_flush_iotlb_psi(iommu, 0, start_paddr, | 2535 | iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size); |
2520 | size >> VTD_PAGE_SHIFT); | ||
2521 | else | 2536 | else |
2522 | iommu_flush_write_buffer(iommu); | 2537 | iommu_flush_write_buffer(iommu); |
2523 | 2538 | ||
2524 | return start_paddr + ((u64)paddr & (~PAGE_MASK)); | 2539 | start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; |
2540 | start_paddr += paddr & ~PAGE_MASK; | ||
2541 | return start_paddr; | ||
2525 | 2542 | ||
2526 | error: | 2543 | error: |
2527 | if (iova) | 2544 | if (iova) |
@@ -2614,7 +2631,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
2614 | { | 2631 | { |
2615 | struct pci_dev *pdev = to_pci_dev(dev); | 2632 | struct pci_dev *pdev = to_pci_dev(dev); |
2616 | struct dmar_domain *domain; | 2633 | struct dmar_domain *domain; |
2617 | unsigned long start_addr; | 2634 | unsigned long start_pfn, last_pfn; |
2618 | struct iova *iova; | 2635 | struct iova *iova; |
2619 | struct intel_iommu *iommu; | 2636 | struct intel_iommu *iommu; |
2620 | 2637 | ||
@@ -2627,22 +2644,25 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
2627 | iommu = domain_get_iommu(domain); | 2644 | iommu = domain_get_iommu(domain); |
2628 | 2645 | ||
2629 | iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); | 2646 | iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); |
2630 | if (!iova) | 2647 | if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n", |
2648 | (unsigned long long)dev_addr)) | ||
2631 | return; | 2649 | return; |
2632 | 2650 | ||
2633 | start_addr = iova->pfn_lo << PAGE_SHIFT; | 2651 | start_pfn = mm_to_dma_pfn(iova->pfn_lo); |
2634 | size = aligned_size((u64)dev_addr, size); | 2652 | last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; |
2635 | 2653 | ||
2636 | pr_debug("Device %s unmapping: %zx@%llx\n", | 2654 | pr_debug("Device %s unmapping: pfn %lx-%lx\n", |
2637 | pci_name(pdev), size, (unsigned long long)start_addr); | 2655 | pci_name(pdev), start_pfn, last_pfn); |
2638 | 2656 | ||
2639 | /* clear the whole page */ | 2657 | /* clear the whole page */ |
2640 | dma_pte_clear_range(domain, start_addr, start_addr + size); | 2658 | dma_pte_clear_range(domain, start_pfn, last_pfn); |
2659 | |||
2641 | /* free page tables */ | 2660 | /* free page tables */ |
2642 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); | 2661 | dma_pte_free_pagetable(domain, start_pfn, last_pfn); |
2662 | |||
2643 | if (intel_iommu_strict) { | 2663 | if (intel_iommu_strict) { |
2644 | iommu_flush_iotlb_psi(iommu, domain->id, start_addr, | 2664 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, |
2645 | size >> VTD_PAGE_SHIFT); | 2665 | last_pfn - start_pfn + 1); |
2646 | /* free iova */ | 2666 | /* free iova */ |
2647 | __free_iova(&domain->iovad, iova); | 2667 | __free_iova(&domain->iovad, iova); |
2648 | } else { | 2668 | } else { |
@@ -2700,14 +2720,10 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2700 | int nelems, enum dma_data_direction dir, | 2720 | int nelems, enum dma_data_direction dir, |
2701 | struct dma_attrs *attrs) | 2721 | struct dma_attrs *attrs) |
2702 | { | 2722 | { |
2703 | int i; | ||
2704 | struct pci_dev *pdev = to_pci_dev(hwdev); | 2723 | struct pci_dev *pdev = to_pci_dev(hwdev); |
2705 | struct dmar_domain *domain; | 2724 | struct dmar_domain *domain; |
2706 | unsigned long start_addr; | 2725 | unsigned long start_pfn, last_pfn; |
2707 | struct iova *iova; | 2726 | struct iova *iova; |
2708 | size_t size = 0; | ||
2709 | phys_addr_t addr; | ||
2710 | struct scatterlist *sg; | ||
2711 | struct intel_iommu *iommu; | 2727 | struct intel_iommu *iommu; |
2712 | 2728 | ||
2713 | if (iommu_no_mapping(pdev)) | 2729 | if (iommu_no_mapping(pdev)) |
@@ -2719,22 +2735,21 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2719 | iommu = domain_get_iommu(domain); | 2735 | iommu = domain_get_iommu(domain); |
2720 | 2736 | ||
2721 | iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); | 2737 | iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); |
2722 | if (!iova) | 2738 | if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n", |
2739 | (unsigned long long)sglist[0].dma_address)) | ||
2723 | return; | 2740 | return; |
2724 | for_each_sg(sglist, sg, nelems, i) { | ||
2725 | addr = page_to_phys(sg_page(sg)) + sg->offset; | ||
2726 | size += aligned_size((u64)addr, sg->length); | ||
2727 | } | ||
2728 | 2741 | ||
2729 | start_addr = iova->pfn_lo << PAGE_SHIFT; | 2742 | start_pfn = mm_to_dma_pfn(iova->pfn_lo); |
2743 | last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; | ||
2730 | 2744 | ||
2731 | /* clear the whole page */ | 2745 | /* clear the whole page */ |
2732 | dma_pte_clear_range(domain, start_addr, start_addr + size); | 2746 | dma_pte_clear_range(domain, start_pfn, last_pfn); |
2747 | |||
2733 | /* free page tables */ | 2748 | /* free page tables */ |
2734 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); | 2749 | dma_pte_free_pagetable(domain, start_pfn, last_pfn); |
2735 | 2750 | ||
2736 | iommu_flush_iotlb_psi(iommu, domain->id, start_addr, | 2751 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, |
2737 | size >> VTD_PAGE_SHIFT); | 2752 | (last_pfn - start_pfn + 1)); |
2738 | 2753 | ||
2739 | /* free iova */ | 2754 | /* free iova */ |
2740 | __free_iova(&domain->iovad, iova); | 2755 | __free_iova(&domain->iovad, iova); |
@@ -2757,17 +2772,16 @@ static int intel_nontranslate_map_sg(struct device *hddev, | |||
2757 | static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | 2772 | static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, |
2758 | enum dma_data_direction dir, struct dma_attrs *attrs) | 2773 | enum dma_data_direction dir, struct dma_attrs *attrs) |
2759 | { | 2774 | { |
2760 | phys_addr_t addr; | ||
2761 | int i; | 2775 | int i; |
2762 | struct pci_dev *pdev = to_pci_dev(hwdev); | 2776 | struct pci_dev *pdev = to_pci_dev(hwdev); |
2763 | struct dmar_domain *domain; | 2777 | struct dmar_domain *domain; |
2764 | size_t size = 0; | 2778 | size_t size = 0; |
2765 | int prot = 0; | 2779 | int prot = 0; |
2766 | size_t offset = 0; | 2780 | size_t offset_pfn = 0; |
2767 | struct iova *iova = NULL; | 2781 | struct iova *iova = NULL; |
2768 | int ret; | 2782 | int ret; |
2769 | struct scatterlist *sg; | 2783 | struct scatterlist *sg; |
2770 | unsigned long start_addr; | 2784 | unsigned long start_vpfn; |
2771 | struct intel_iommu *iommu; | 2785 | struct intel_iommu *iommu; |
2772 | 2786 | ||
2773 | BUG_ON(dir == DMA_NONE); | 2787 | BUG_ON(dir == DMA_NONE); |
@@ -2780,12 +2794,10 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
2780 | 2794 | ||
2781 | iommu = domain_get_iommu(domain); | 2795 | iommu = domain_get_iommu(domain); |
2782 | 2796 | ||
2783 | for_each_sg(sglist, sg, nelems, i) { | 2797 | for_each_sg(sglist, sg, nelems, i) |
2784 | addr = page_to_phys(sg_page(sg)) + sg->offset; | 2798 | size += aligned_nrpages(sg->offset, sg->length); |
2785 | size += aligned_size((u64)addr, sg->length); | ||
2786 | } | ||
2787 | 2799 | ||
2788 | iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); | 2800 | iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); |
2789 | if (!iova) { | 2801 | if (!iova) { |
2790 | sglist->dma_length = 0; | 2802 | sglist->dma_length = 0; |
2791 | return 0; | 2803 | return 0; |
@@ -2801,35 +2813,24 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
2801 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) | 2813 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) |
2802 | prot |= DMA_PTE_WRITE; | 2814 | prot |= DMA_PTE_WRITE; |
2803 | 2815 | ||
2804 | start_addr = iova->pfn_lo << PAGE_SHIFT; | 2816 | start_vpfn = mm_to_dma_pfn(iova->pfn_lo); |
2805 | offset = 0; | 2817 | |
2806 | for_each_sg(sglist, sg, nelems, i) { | 2818 | ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot); |
2807 | addr = page_to_phys(sg_page(sg)) + sg->offset; | 2819 | if (unlikely(ret)) { |
2808 | size = aligned_size((u64)addr, sg->length); | 2820 | /* clear the page */ |
2809 | ret = domain_page_mapping(domain, start_addr + offset, | 2821 | dma_pte_clear_range(domain, start_vpfn, |
2810 | ((u64)addr) & PHYSICAL_PAGE_MASK, | 2822 | start_vpfn + size - 1); |
2811 | size, prot); | 2823 | /* free page tables */ |
2812 | if (ret) { | 2824 | dma_pte_free_pagetable(domain, start_vpfn, |
2813 | /* clear the page */ | 2825 | start_vpfn + size - 1); |
2814 | dma_pte_clear_range(domain, start_addr, | 2826 | /* free iova */ |
2815 | start_addr + offset); | 2827 | __free_iova(&domain->iovad, iova); |
2816 | /* free page tables */ | 2828 | return 0; |
2817 | dma_pte_free_pagetable(domain, start_addr, | ||
2818 | start_addr + offset); | ||
2819 | /* free iova */ | ||
2820 | __free_iova(&domain->iovad, iova); | ||
2821 | return 0; | ||
2822 | } | ||
2823 | sg->dma_address = start_addr + offset + | ||
2824 | ((u64)addr & (~PAGE_MASK)); | ||
2825 | sg->dma_length = sg->length; | ||
2826 | offset += size; | ||
2827 | } | 2829 | } |
2828 | 2830 | ||
2829 | /* it's a non-present to present mapping. Only flush if caching mode */ | 2831 | /* it's a non-present to present mapping. Only flush if caching mode */ |
2830 | if (cap_caching_mode(iommu->cap)) | 2832 | if (cap_caching_mode(iommu->cap)) |
2831 | iommu_flush_iotlb_psi(iommu, 0, start_addr, | 2833 | iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn); |
2832 | offset >> VTD_PAGE_SHIFT); | ||
2833 | else | 2834 | else |
2834 | iommu_flush_write_buffer(iommu); | 2835 | iommu_flush_write_buffer(iommu); |
2835 | 2836 | ||
@@ -3334,7 +3335,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) | |||
3334 | int adjust_width; | 3335 | int adjust_width; |
3335 | 3336 | ||
3336 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); | 3337 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); |
3337 | spin_lock_init(&domain->mapping_lock); | ||
3338 | spin_lock_init(&domain->iommu_lock); | 3338 | spin_lock_init(&domain->iommu_lock); |
3339 | 3339 | ||
3340 | domain_reserve_special_ranges(domain); | 3340 | domain_reserve_special_ranges(domain); |
@@ -3388,8 +3388,6 @@ static void iommu_free_vm_domain(struct dmar_domain *domain) | |||
3388 | 3388 | ||
3389 | static void vm_domain_exit(struct dmar_domain *domain) | 3389 | static void vm_domain_exit(struct dmar_domain *domain) |
3390 | { | 3390 | { |
3391 | u64 end; | ||
3392 | |||
3393 | /* Domain 0 is reserved, so dont process it */ | 3391 | /* Domain 0 is reserved, so dont process it */ |
3394 | if (!domain) | 3392 | if (!domain) |
3395 | return; | 3393 | return; |
@@ -3397,14 +3395,12 @@ static void vm_domain_exit(struct dmar_domain *domain) | |||
3397 | vm_domain_remove_all_dev_info(domain); | 3395 | vm_domain_remove_all_dev_info(domain); |
3398 | /* destroy iovas */ | 3396 | /* destroy iovas */ |
3399 | put_iova_domain(&domain->iovad); | 3397 | put_iova_domain(&domain->iovad); |
3400 | end = DOMAIN_MAX_ADDR(domain->gaw); | ||
3401 | end = end & (~VTD_PAGE_MASK); | ||
3402 | 3398 | ||
3403 | /* clear ptes */ | 3399 | /* clear ptes */ |
3404 | dma_pte_clear_range(domain, 0, end); | 3400 | dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); |
3405 | 3401 | ||
3406 | /* free page tables */ | 3402 | /* free page tables */ |
3407 | dma_pte_free_pagetable(domain, 0, end); | 3403 | dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); |
3408 | 3404 | ||
3409 | iommu_free_vm_domain(domain); | 3405 | iommu_free_vm_domain(domain); |
3410 | free_domain_mem(domain); | 3406 | free_domain_mem(domain); |
@@ -3513,7 +3509,7 @@ static int intel_iommu_map_range(struct iommu_domain *domain, | |||
3513 | if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) | 3509 | if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) |
3514 | prot |= DMA_PTE_SNP; | 3510 | prot |= DMA_PTE_SNP; |
3515 | 3511 | ||
3516 | max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size); | 3512 | max_addr = iova + size; |
3517 | if (dmar_domain->max_addr < max_addr) { | 3513 | if (dmar_domain->max_addr < max_addr) { |
3518 | int min_agaw; | 3514 | int min_agaw; |
3519 | u64 end; | 3515 | u64 end; |
@@ -3531,8 +3527,11 @@ static int intel_iommu_map_range(struct iommu_domain *domain, | |||
3531 | } | 3527 | } |
3532 | dmar_domain->max_addr = max_addr; | 3528 | dmar_domain->max_addr = max_addr; |
3533 | } | 3529 | } |
3534 | 3530 | /* Round up size to next multiple of PAGE_SIZE, if it and | |
3535 | ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot); | 3531 | the low bits of hpa would take us onto the next page */ |
3532 | size = aligned_nrpages(hpa, size); | ||
3533 | ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT, | ||
3534 | hpa >> VTD_PAGE_SHIFT, size, prot); | ||
3536 | return ret; | 3535 | return ret; |
3537 | } | 3536 | } |
3538 | 3537 | ||
@@ -3540,15 +3539,12 @@ static void intel_iommu_unmap_range(struct iommu_domain *domain, | |||
3540 | unsigned long iova, size_t size) | 3539 | unsigned long iova, size_t size) |
3541 | { | 3540 | { |
3542 | struct dmar_domain *dmar_domain = domain->priv; | 3541 | struct dmar_domain *dmar_domain = domain->priv; |
3543 | dma_addr_t base; | ||
3544 | 3542 | ||
3545 | /* The address might not be aligned */ | 3543 | dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, |
3546 | base = iova & VTD_PAGE_MASK; | 3544 | (iova + size - 1) >> VTD_PAGE_SHIFT); |
3547 | size = VTD_PAGE_ALIGN(size); | ||
3548 | dma_pte_clear_range(dmar_domain, base, base + size); | ||
3549 | 3545 | ||
3550 | if (dmar_domain->max_addr == base + size) | 3546 | if (dmar_domain->max_addr == iova + size) |
3551 | dmar_domain->max_addr = base; | 3547 | dmar_domain->max_addr = iova; |
3552 | } | 3548 | } |
3553 | 3549 | ||
3554 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, | 3550 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, |
@@ -3558,7 +3554,7 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, | |||
3558 | struct dma_pte *pte; | 3554 | struct dma_pte *pte; |
3559 | u64 phys = 0; | 3555 | u64 phys = 0; |
3560 | 3556 | ||
3561 | pte = addr_to_dma_pte(dmar_domain, iova); | 3557 | pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT); |
3562 | if (pte) | 3558 | if (pte) |
3563 | phys = dma_pte_addr(pte); | 3559 | phys = dma_pte_addr(pte); |
3564 | 3560 | ||
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c index 2287116e9822..46dd440e2315 100644 --- a/drivers/pci/iova.c +++ b/drivers/pci/iova.c | |||
@@ -1,9 +1,19 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, Intel Corporation. | 2 | * Copyright © 2006-2009, Intel Corporation. |
3 | * | 3 | * |
4 | * This file is released under the GPLv2. | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along with | ||
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
5 | * | 16 | * |
6 | * Copyright (C) 2006-2008 Intel Corporation | ||
7 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | 17 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
8 | */ | 18 | */ |
9 | 19 | ||
@@ -123,7 +133,15 @@ move_left: | |||
123 | /* Insert the new_iova into domain rbtree by holding writer lock */ | 133 | /* Insert the new_iova into domain rbtree by holding writer lock */ |
124 | /* Add new node and rebalance tree. */ | 134 | /* Add new node and rebalance tree. */ |
125 | { | 135 | { |
126 | struct rb_node **entry = &((prev)), *parent = NULL; | 136 | struct rb_node **entry, *parent = NULL; |
137 | |||
138 | /* If we have 'prev', it's a valid place to start the | ||
139 | insertion. Otherwise, start from the root. */ | ||
140 | if (prev) | ||
141 | entry = &prev; | ||
142 | else | ||
143 | entry = &iovad->rbroot.rb_node; | ||
144 | |||
127 | /* Figure out where to put new node */ | 145 | /* Figure out where to put new node */ |
128 | while (*entry) { | 146 | while (*entry) { |
129 | struct iova *this = container_of(*entry, | 147 | struct iova *this = container_of(*entry, |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index fee6a4022bc1..46dad12f952f 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
@@ -355,10 +355,9 @@ config EEEPC_LAPTOP | |||
355 | depends on INPUT | 355 | depends on INPUT |
356 | depends on EXPERIMENTAL | 356 | depends on EXPERIMENTAL |
357 | depends on RFKILL || RFKILL = n | 357 | depends on RFKILL || RFKILL = n |
358 | depends on HOTPLUG_PCI | ||
358 | select BACKLIGHT_CLASS_DEVICE | 359 | select BACKLIGHT_CLASS_DEVICE |
359 | select HWMON | 360 | select HWMON |
360 | select HOTPLUG | ||
361 | select HOTPLUG_PCI if PCI | ||
362 | ---help--- | 361 | ---help--- |
363 | This driver supports the Fn-Fx keys on Eee PC laptops. | 362 | This driver supports the Fn-Fx keys on Eee PC laptops. |
364 | 363 | ||
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c index aafd3e6ebb0d..a118eb0f1e67 100644 --- a/drivers/rtc/rtc-bfin.c +++ b/drivers/rtc/rtc-bfin.c | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Blackfin On-Chip Real Time Clock Driver | 2 | * Blackfin On-Chip Real Time Clock Driver |
3 | * Supports BF52[257]/BF53[123]/BF53[467]/BF54[24789] | 3 | * Supports BF51x/BF52x/BF53[123]/BF53[467]/BF54x |
4 | * | 4 | * |
5 | * Copyright 2004-2008 Analog Devices Inc. | 5 | * Copyright 2004-2009 Analog Devices Inc. |
6 | * | 6 | * |
7 | * Enter bugs at http://blackfin.uclinux.org/ | 7 | * Enter bugs at http://blackfin.uclinux.org/ |
8 | * | 8 | * |
@@ -363,7 +363,7 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev) | |||
363 | struct bfin_rtc *rtc; | 363 | struct bfin_rtc *rtc; |
364 | struct device *dev = &pdev->dev; | 364 | struct device *dev = &pdev->dev; |
365 | int ret = 0; | 365 | int ret = 0; |
366 | unsigned long timeout; | 366 | unsigned long timeout = jiffies + HZ; |
367 | 367 | ||
368 | dev_dbg_stamp(dev); | 368 | dev_dbg_stamp(dev); |
369 | 369 | ||
@@ -374,32 +374,32 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev) | |||
374 | platform_set_drvdata(pdev, rtc); | 374 | platform_set_drvdata(pdev, rtc); |
375 | device_init_wakeup(dev, 1); | 375 | device_init_wakeup(dev, 1); |
376 | 376 | ||
377 | /* Register our RTC with the RTC framework */ | ||
378 | rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops, | ||
379 | THIS_MODULE); | ||
380 | if (unlikely(IS_ERR(rtc->rtc_dev))) { | ||
381 | ret = PTR_ERR(rtc->rtc_dev); | ||
382 | goto err; | ||
383 | } | ||
384 | |||
377 | /* Grab the IRQ and init the hardware */ | 385 | /* Grab the IRQ and init the hardware */ |
378 | ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, pdev->name, dev); | 386 | ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, pdev->name, dev); |
379 | if (unlikely(ret)) | 387 | if (unlikely(ret)) |
380 | goto err; | 388 | goto err_reg; |
381 | /* sometimes the bootloader touched things, but the write complete was not | 389 | /* sometimes the bootloader touched things, but the write complete was not |
382 | * enabled, so let's just do a quick timeout here since the IRQ will not fire ... | 390 | * enabled, so let's just do a quick timeout here since the IRQ will not fire ... |
383 | */ | 391 | */ |
384 | timeout = jiffies + HZ; | ||
385 | while (bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING) | 392 | while (bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING) |
386 | if (time_after(jiffies, timeout)) | 393 | if (time_after(jiffies, timeout)) |
387 | break; | 394 | break; |
388 | bfin_rtc_reset(dev, RTC_ISTAT_WRITE_COMPLETE); | 395 | bfin_rtc_reset(dev, RTC_ISTAT_WRITE_COMPLETE); |
389 | bfin_write_RTC_SWCNT(0); | 396 | bfin_write_RTC_SWCNT(0); |
390 | 397 | ||
391 | /* Register our RTC with the RTC framework */ | ||
392 | rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops, THIS_MODULE); | ||
393 | if (unlikely(IS_ERR(rtc->rtc_dev))) { | ||
394 | ret = PTR_ERR(rtc->rtc_dev); | ||
395 | goto err_irq; | ||
396 | } | ||
397 | |||
398 | return 0; | 398 | return 0; |
399 | 399 | ||
400 | err_irq: | 400 | err_reg: |
401 | free_irq(IRQ_RTC, dev); | 401 | rtc_device_unregister(rtc->rtc_dev); |
402 | err: | 402 | err: |
403 | kfree(rtc); | 403 | kfree(rtc); |
404 | return ret; | 404 | return ret; |
405 | } | 405 | } |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c index 74369a3f963b..c399f485aa7d 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c +++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/inet.h> | 14 | #include <linux/inet.h> |
15 | #include <linux/crypto.h> | 15 | #include <linux/crypto.h> |
16 | #include <linux/if_vlan.h> | ||
16 | #include <net/dst.h> | 17 | #include <net/dst.h> |
17 | #include <net/tcp.h> | 18 | #include <net/tcp.h> |
18 | #include <scsi/scsi_cmnd.h> | 19 | #include <scsi/scsi_cmnd.h> |
@@ -184,6 +185,9 @@ static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev) | |||
184 | struct cxgb3i_adapter *snic; | 185 | struct cxgb3i_adapter *snic; |
185 | int i; | 186 | int i; |
186 | 187 | ||
188 | if (ndev->priv_flags & IFF_802_1Q_VLAN) | ||
189 | ndev = vlan_dev_real_dev(ndev); | ||
190 | |||
187 | read_lock(&cxgb3i_snic_rwlock); | 191 | read_lock(&cxgb3i_snic_rwlock); |
188 | list_for_each_entry(snic, &cxgb3i_snic_list, list_head) { | 192 | list_for_each_entry(snic, &cxgb3i_snic_list, list_head) { |
189 | for (i = 0; i < snic->hba_cnt; i++) { | 193 | for (i = 0; i < snic->hba_cnt; i++) { |
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index a84072865fc2..2c266c01dc5a 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c | |||
@@ -473,16 +473,16 @@ static int __devinit fnic_probe(struct pci_dev *pdev, | |||
473 | * limitation for the device. Try 40-bit first, and | 473 | * limitation for the device. Try 40-bit first, and |
474 | * fail to 32-bit. | 474 | * fail to 32-bit. |
475 | */ | 475 | */ |
476 | err = pci_set_dma_mask(pdev, DMA_40BIT_MASK); | 476 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); |
477 | if (err) { | 477 | if (err) { |
478 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | 478 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
479 | if (err) { | 479 | if (err) { |
480 | shost_printk(KERN_ERR, fnic->lport->host, | 480 | shost_printk(KERN_ERR, fnic->lport->host, |
481 | "No usable DMA configuration " | 481 | "No usable DMA configuration " |
482 | "aborting\n"); | 482 | "aborting\n"); |
483 | goto err_out_release_regions; | 483 | goto err_out_release_regions; |
484 | } | 484 | } |
485 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | 485 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
486 | if (err) { | 486 | if (err) { |
487 | shost_printk(KERN_ERR, fnic->lport->host, | 487 | shost_printk(KERN_ERR, fnic->lport->host, |
488 | "Unable to obtain 32-bit DMA " | 488 | "Unable to obtain 32-bit DMA " |
@@ -490,7 +490,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev, | |||
490 | goto err_out_release_regions; | 490 | goto err_out_release_regions; |
491 | } | 491 | } |
492 | } else { | 492 | } else { |
493 | err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK); | 493 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); |
494 | if (err) { | 494 | if (err) { |
495 | shost_printk(KERN_ERR, fnic->lport->host, | 495 | shost_printk(KERN_ERR, fnic->lport->host, |
496 | "Unable to obtain 40-bit DMA " | 496 | "Unable to obtain 40-bit DMA " |
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index eabf36502856..bfc996971b81 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c | |||
@@ -245,7 +245,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, | |||
245 | struct vnic_wq_copy *wq, | 245 | struct vnic_wq_copy *wq, |
246 | struct fnic_io_req *io_req, | 246 | struct fnic_io_req *io_req, |
247 | struct scsi_cmnd *sc, | 247 | struct scsi_cmnd *sc, |
248 | u32 sg_count) | 248 | int sg_count) |
249 | { | 249 | { |
250 | struct scatterlist *sg; | 250 | struct scatterlist *sg; |
251 | struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); | 251 | struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); |
@@ -260,9 +260,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, | |||
260 | char msg[2]; | 260 | char msg[2]; |
261 | 261 | ||
262 | if (sg_count) { | 262 | if (sg_count) { |
263 | BUG_ON(sg_count < 0); | ||
264 | BUG_ON(sg_count > FNIC_MAX_SG_DESC_CNT); | ||
265 | |||
266 | /* For each SGE, create a device desc entry */ | 263 | /* For each SGE, create a device desc entry */ |
267 | desc = io_req->sgl_list; | 264 | desc = io_req->sgl_list; |
268 | for_each_sg(scsi_sglist(sc), sg, sg_count, i) { | 265 | for_each_sg(scsi_sglist(sc), sg, sg_count, i) { |
@@ -344,7 +341,7 @@ int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | |||
344 | struct fnic *fnic; | 341 | struct fnic *fnic; |
345 | struct vnic_wq_copy *wq; | 342 | struct vnic_wq_copy *wq; |
346 | int ret; | 343 | int ret; |
347 | u32 sg_count; | 344 | int sg_count; |
348 | unsigned long flags; | 345 | unsigned long flags; |
349 | unsigned long ptr; | 346 | unsigned long ptr; |
350 | 347 | ||
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 869a11bdccbd..9928704e235f 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -1095,9 +1095,14 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct) | |||
1095 | MAX_INDIRECT_BUFS); | 1095 | MAX_INDIRECT_BUFS); |
1096 | hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; | 1096 | hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; |
1097 | } | 1097 | } |
1098 | |||
1099 | if (hostdata->madapter_info.os_type == 3) { | ||
1100 | enable_fast_fail(hostdata); | ||
1101 | return; | ||
1102 | } | ||
1098 | } | 1103 | } |
1099 | 1104 | ||
1100 | enable_fast_fail(hostdata); | 1105 | send_srp_login(hostdata); |
1101 | } | 1106 | } |
1102 | 1107 | ||
1103 | /** | 1108 | /** |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 2eee9e6e4fe8..292c02f810d0 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -3670,13 +3670,14 @@ static void | |||
3670 | fc_bsg_goose_queue(struct fc_rport *rport) | 3670 | fc_bsg_goose_queue(struct fc_rport *rport) |
3671 | { | 3671 | { |
3672 | int flagset; | 3672 | int flagset; |
3673 | unsigned long flags; | ||
3673 | 3674 | ||
3674 | if (!rport->rqst_q) | 3675 | if (!rport->rqst_q) |
3675 | return; | 3676 | return; |
3676 | 3677 | ||
3677 | get_device(&rport->dev); | 3678 | get_device(&rport->dev); |
3678 | 3679 | ||
3679 | spin_lock(rport->rqst_q->queue_lock); | 3680 | spin_lock_irqsave(rport->rqst_q->queue_lock, flags); |
3680 | flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) && | 3681 | flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) && |
3681 | !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); | 3682 | !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); |
3682 | if (flagset) | 3683 | if (flagset) |
@@ -3684,7 +3685,7 @@ fc_bsg_goose_queue(struct fc_rport *rport) | |||
3684 | __blk_run_queue(rport->rqst_q); | 3685 | __blk_run_queue(rport->rqst_q); |
3685 | if (flagset) | 3686 | if (flagset) |
3686 | queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); | 3687 | queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); |
3687 | spin_unlock(rport->rqst_q->queue_lock); | 3688 | spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); |
3688 | 3689 | ||
3689 | put_device(&rport->dev); | 3690 | put_device(&rport->dev); |
3690 | } | 3691 | } |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 8201387b4daa..ef142fd47a83 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -210,13 +210,11 @@ static void sg_put_dev(Sg_device *sdp); | |||
210 | static int sg_allow_access(struct file *filp, unsigned char *cmd) | 210 | static int sg_allow_access(struct file *filp, unsigned char *cmd) |
211 | { | 211 | { |
212 | struct sg_fd *sfp = (struct sg_fd *)filp->private_data; | 212 | struct sg_fd *sfp = (struct sg_fd *)filp->private_data; |
213 | struct request_queue *q = sfp->parentdp->device->request_queue; | ||
214 | 213 | ||
215 | if (sfp->parentdp->device->type == TYPE_SCANNER) | 214 | if (sfp->parentdp->device->type == TYPE_SCANNER) |
216 | return 0; | 215 | return 0; |
217 | 216 | ||
218 | return blk_verify_command(&q->cmd_filter, | 217 | return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE); |
219 | cmd, filp->f_mode & FMODE_WRITE); | ||
220 | } | 218 | } |
221 | 219 | ||
222 | static int | 220 | static int |
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c index 97f3158fa7b5..27e84e4b1fa9 100644 --- a/drivers/scsi/zalon.c +++ b/drivers/scsi/zalon.c | |||
@@ -134,7 +134,7 @@ zalon_probe(struct parisc_device *dev) | |||
134 | 134 | ||
135 | host = ncr_attach(&zalon7xx_template, unit, &device); | 135 | host = ncr_attach(&zalon7xx_template, unit, &device); |
136 | if (!host) | 136 | if (!host) |
137 | goto fail; | 137 | return -ENODEV; |
138 | 138 | ||
139 | if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) { | 139 | if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) { |
140 | dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ", | 140 | dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ", |
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index a07015d646dd..6160e03f410c 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c | |||
@@ -759,6 +759,8 @@ static int pci_netmos_init(struct pci_dev *dev) | |||
759 | /* subdevice 0x00PS means <P> parallel, <S> serial */ | 759 | /* subdevice 0x00PS means <P> parallel, <S> serial */ |
760 | unsigned int num_serial = dev->subsystem_device & 0xf; | 760 | unsigned int num_serial = dev->subsystem_device & 0xf; |
761 | 761 | ||
762 | if (dev->device == PCI_DEVICE_ID_NETMOS_9901) | ||
763 | return 0; | ||
762 | if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM && | 764 | if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM && |
763 | dev->subsystem_device == 0x0299) | 765 | dev->subsystem_device == 0x0299) |
764 | return 0; | 766 | return 0; |
@@ -3557,6 +3559,10 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
3557 | PCI_VENDOR_ID_IBM, 0x0299, | 3559 | PCI_VENDOR_ID_IBM, 0x0299, |
3558 | 0, 0, pbn_b0_bt_2_115200 }, | 3560 | 0, 0, pbn_b0_bt_2_115200 }, |
3559 | 3561 | ||
3562 | { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901, | ||
3563 | 0xA000, 0x1000, | ||
3564 | 0, 0, pbn_b0_1_115200 }, | ||
3565 | |||
3560 | /* | 3566 | /* |
3561 | * These entries match devices with class COMMUNICATION_SERIAL, | 3567 | * These entries match devices with class COMMUNICATION_SERIAL, |
3562 | * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL | 3568 | * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL |
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c index aa90ddb37066..8980a5640bd9 100644 --- a/drivers/spi/omap_uwire.c +++ b/drivers/spi/omap_uwire.c | |||
@@ -514,6 +514,8 @@ static int __init uwire_probe(struct platform_device *pdev) | |||
514 | /* the spi->mode bits understood by this driver: */ | 514 | /* the spi->mode bits understood by this driver: */ |
515 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; | 515 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; |
516 | 516 | ||
517 | master->flags = SPI_MASTER_HALF_DUPLEX; | ||
518 | |||
517 | master->bus_num = 2; /* "official" */ | 519 | master->bus_num = 2; /* "official" */ |
518 | master->num_chipselect = 4; | 520 | master->num_chipselect = 4; |
519 | master->setup = uwire_setup; | 521 | master->setup = uwire_setup; |
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c index 2a5abc08e857..f1db395dd889 100644 --- a/drivers/spi/spi_bitbang.c +++ b/drivers/spi/spi_bitbang.c | |||
@@ -258,6 +258,11 @@ static void bitbang_work(struct work_struct *work) | |||
258 | struct spi_bitbang *bitbang = | 258 | struct spi_bitbang *bitbang = |
259 | container_of(work, struct spi_bitbang, work); | 259 | container_of(work, struct spi_bitbang, work); |
260 | unsigned long flags; | 260 | unsigned long flags; |
261 | int do_setup = -1; | ||
262 | int (*setup_transfer)(struct spi_device *, | ||
263 | struct spi_transfer *); | ||
264 | |||
265 | setup_transfer = bitbang->setup_transfer; | ||
261 | 266 | ||
262 | spin_lock_irqsave(&bitbang->lock, flags); | 267 | spin_lock_irqsave(&bitbang->lock, flags); |
263 | bitbang->busy = 1; | 268 | bitbang->busy = 1; |
@@ -269,8 +274,6 @@ static void bitbang_work(struct work_struct *work) | |||
269 | unsigned tmp; | 274 | unsigned tmp; |
270 | unsigned cs_change; | 275 | unsigned cs_change; |
271 | int status; | 276 | int status; |
272 | int (*setup_transfer)(struct spi_device *, | ||
273 | struct spi_transfer *); | ||
274 | 277 | ||
275 | m = container_of(bitbang->queue.next, struct spi_message, | 278 | m = container_of(bitbang->queue.next, struct spi_message, |
276 | queue); | 279 | queue); |
@@ -287,19 +290,19 @@ static void bitbang_work(struct work_struct *work) | |||
287 | tmp = 0; | 290 | tmp = 0; |
288 | cs_change = 1; | 291 | cs_change = 1; |
289 | status = 0; | 292 | status = 0; |
290 | setup_transfer = NULL; | ||
291 | 293 | ||
292 | list_for_each_entry (t, &m->transfers, transfer_list) { | 294 | list_for_each_entry (t, &m->transfers, transfer_list) { |
293 | 295 | ||
294 | /* override or restore speed and wordsize */ | 296 | /* override speed or wordsize? */ |
295 | if (t->speed_hz || t->bits_per_word) { | 297 | if (t->speed_hz || t->bits_per_word) |
296 | setup_transfer = bitbang->setup_transfer; | 298 | do_setup = 1; |
299 | |||
300 | /* init (-1) or override (1) transfer params */ | ||
301 | if (do_setup != 0) { | ||
297 | if (!setup_transfer) { | 302 | if (!setup_transfer) { |
298 | status = -ENOPROTOOPT; | 303 | status = -ENOPROTOOPT; |
299 | break; | 304 | break; |
300 | } | 305 | } |
301 | } | ||
302 | if (setup_transfer) { | ||
303 | status = setup_transfer(spi, t); | 306 | status = setup_transfer(spi, t); |
304 | if (status < 0) | 307 | if (status < 0) |
305 | break; | 308 | break; |
@@ -363,9 +366,10 @@ static void bitbang_work(struct work_struct *work) | |||
363 | m->status = status; | 366 | m->status = status; |
364 | m->complete(m->context); | 367 | m->complete(m->context); |
365 | 368 | ||
366 | /* restore speed and wordsize */ | 369 | /* restore speed and wordsize if it was overridden */ |
367 | if (setup_transfer) | 370 | if (do_setup == 1) |
368 | setup_transfer(spi, NULL); | 371 | setup_transfer(spi, NULL); |
372 | do_setup = 0; | ||
369 | 373 | ||
370 | /* normally deactivate chipselect ... unless no error and | 374 | /* normally deactivate chipselect ... unless no error and |
371 | * cs_change has hinted that the next message will probably | 375 | * cs_change has hinted that the next message will probably |
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 5d869c4d3eb2..606e7a40a8da 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c | |||
@@ -58,15 +58,20 @@ static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG]; | |||
58 | 58 | ||
59 | 59 | ||
60 | /* Bit masks for spi_device.mode management. Note that incorrect | 60 | /* Bit masks for spi_device.mode management. Note that incorrect |
61 | * settings for CS_HIGH and 3WIRE can cause *lots* of trouble for other | 61 | * settings for some settings can cause *lots* of trouble for other |
62 | * devices on a shared bus: CS_HIGH, because this device will be | 62 | * devices on a shared bus: |
63 | * active when it shouldn't be; 3WIRE, because when active it won't | ||
64 | * behave as it should. | ||
65 | * | 63 | * |
66 | * REVISIT should changing those two modes be privileged? | 64 | * - CS_HIGH ... this device will be active when it shouldn't be |
65 | * - 3WIRE ... when active, it won't behave as it should | ||
66 | * - NO_CS ... there will be no explicit message boundaries; this | ||
67 | * is completely incompatible with the shared bus model | ||
68 | * - READY ... transfers may proceed when they shouldn't. | ||
69 | * | ||
70 | * REVISIT should changing those flags be privileged? | ||
67 | */ | 71 | */ |
68 | #define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \ | 72 | #define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \ |
69 | | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP) | 73 | | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \ |
74 | | SPI_NO_CS | SPI_READY) | ||
70 | 75 | ||
71 | struct spidev_data { | 76 | struct spidev_data { |
72 | dev_t devt; | 77 | dev_t devt; |
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c index 018850c116c6..497ff8af03ed 100644 --- a/drivers/video/atafb.c +++ b/drivers/video/atafb.c | |||
@@ -2414,7 +2414,10 @@ static int atafb_get_fix(struct fb_fix_screeninfo *fix, struct fb_info *info) | |||
2414 | if (err) | 2414 | if (err) |
2415 | return err; | 2415 | return err; |
2416 | memset(fix, 0, sizeof(struct fb_fix_screeninfo)); | 2416 | memset(fix, 0, sizeof(struct fb_fix_screeninfo)); |
2417 | return fbhw->encode_fix(fix, &par); | 2417 | mutex_lock(&info->mm_lock); |
2418 | err = fbhw->encode_fix(fix, &par); | ||
2419 | mutex_unlock(&info->mm_lock); | ||
2420 | return err; | ||
2418 | } | 2421 | } |
2419 | 2422 | ||
2420 | static int atafb_get_var(struct fb_var_screeninfo *var, struct fb_info *info) | 2423 | static int atafb_get_var(struct fb_var_screeninfo *var, struct fb_info *info) |
@@ -2743,7 +2746,9 @@ static int atafb_set_par(struct fb_info *info) | |||
2743 | 2746 | ||
2744 | /* Decode wanted screen parameters */ | 2747 | /* Decode wanted screen parameters */ |
2745 | fbhw->decode_var(&info->var, par); | 2748 | fbhw->decode_var(&info->var, par); |
2749 | mutex_lock(&info->mm_lock); | ||
2746 | fbhw->encode_fix(&info->fix, par); | 2750 | fbhw->encode_fix(&info->fix, par); |
2751 | mutex_unlock(&info->mm_lock); | ||
2747 | 2752 | ||
2748 | /* Set new videomode */ | 2753 | /* Set new videomode */ |
2749 | ata_set_par(par); | 2754 | ata_set_par(par); |
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c index 5afd64482f55..cb88394ba995 100644 --- a/drivers/video/atmel_lcdfb.c +++ b/drivers/video/atmel_lcdfb.c | |||
@@ -270,7 +270,9 @@ static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo) | |||
270 | 270 | ||
271 | smem_len = (var->xres_virtual * var->yres_virtual | 271 | smem_len = (var->xres_virtual * var->yres_virtual |
272 | * ((var->bits_per_pixel + 7) / 8)); | 272 | * ((var->bits_per_pixel + 7) / 8)); |
273 | mutex_lock(&info->mm_lock); | ||
273 | info->fix.smem_len = max(smem_len, sinfo->smem_len); | 274 | info->fix.smem_len = max(smem_len, sinfo->smem_len); |
275 | mutex_unlock(&info->mm_lock); | ||
274 | 276 | ||
275 | info->screen_base = dma_alloc_writecombine(info->device, info->fix.smem_len, | 277 | info->screen_base = dma_alloc_writecombine(info->device, info->fix.smem_len, |
276 | (dma_addr_t *)&info->fix.smem_start, GFP_KERNEL); | 278 | (dma_addr_t *)&info->fix.smem_start, GFP_KERNEL); |
diff --git a/drivers/video/aty/atyfb.h b/drivers/video/aty/atyfb.h index 7691e73823d3..1f39a62f899b 100644 --- a/drivers/video/aty/atyfb.h +++ b/drivers/video/aty/atyfb.h | |||
@@ -187,6 +187,8 @@ struct atyfb_par { | |||
187 | int mtrr_reg; | 187 | int mtrr_reg; |
188 | #endif | 188 | #endif |
189 | u32 mem_cntl; | 189 | u32 mem_cntl; |
190 | struct crtc saved_crtc; | ||
191 | union aty_pll saved_pll; | ||
190 | }; | 192 | }; |
191 | 193 | ||
192 | /* | 194 | /* |
@@ -217,6 +219,7 @@ struct atyfb_par { | |||
217 | #define M64F_XL_DLL 0x00080000 | 219 | #define M64F_XL_DLL 0x00080000 |
218 | #define M64F_MFB_FORCE_4 0x00100000 | 220 | #define M64F_MFB_FORCE_4 0x00100000 |
219 | #define M64F_HW_TRIPLE 0x00200000 | 221 | #define M64F_HW_TRIPLE 0x00200000 |
222 | #define M64F_XL_MEM 0x00400000 | ||
220 | /* | 223 | /* |
221 | * Register access | 224 | * Register access |
222 | */ | 225 | */ |
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index 1207c208a30b..63d3739d43a8 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c | |||
@@ -66,6 +66,8 @@ | |||
66 | #include <linux/spinlock.h> | 66 | #include <linux/spinlock.h> |
67 | #include <linux/wait.h> | 67 | #include <linux/wait.h> |
68 | #include <linux/backlight.h> | 68 | #include <linux/backlight.h> |
69 | #include <linux/reboot.h> | ||
70 | #include <linux/dmi.h> | ||
69 | 71 | ||
70 | #include <asm/io.h> | 72 | #include <asm/io.h> |
71 | #include <linux/uaccess.h> | 73 | #include <linux/uaccess.h> |
@@ -249,8 +251,6 @@ static int aty_init(struct fb_info *info); | |||
249 | static int store_video_par(char *videopar, unsigned char m64_num); | 251 | static int store_video_par(char *videopar, unsigned char m64_num); |
250 | #endif | 252 | #endif |
251 | 253 | ||
252 | static struct crtc saved_crtc; | ||
253 | static union aty_pll saved_pll; | ||
254 | static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc); | 254 | static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc); |
255 | 255 | ||
256 | static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc); | 256 | static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc); |
@@ -261,6 +261,8 @@ static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info); | |||
261 | static int read_aty_sense(const struct atyfb_par *par); | 261 | static int read_aty_sense(const struct atyfb_par *par); |
262 | #endif | 262 | #endif |
263 | 263 | ||
264 | static DEFINE_MUTEX(reboot_lock); | ||
265 | static struct fb_info *reboot_info; | ||
264 | 266 | ||
265 | /* | 267 | /* |
266 | * Interface used by the world | 268 | * Interface used by the world |
@@ -361,8 +363,8 @@ static unsigned long phys_guiregbase[FB_MAX] __devinitdata = { 0, }; | |||
361 | #define ATI_CHIP_264GTPRO (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D) | 363 | #define ATI_CHIP_264GTPRO (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D) |
362 | #define ATI_CHIP_264LTPRO (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D) | 364 | #define ATI_CHIP_264LTPRO (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D) |
363 | 365 | ||
364 | #define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4) | 366 | #define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM) |
365 | #define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_MOBIL_BUS) | 367 | #define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM | M64F_MOBIL_BUS) |
366 | 368 | ||
367 | static struct { | 369 | static struct { |
368 | u16 pci_id; | 370 | u16 pci_id; |
@@ -539,6 +541,7 @@ static char ram_edo[] __devinitdata = "EDO"; | |||
539 | static char ram_sdram[] __devinitdata = "SDRAM (1:1)"; | 541 | static char ram_sdram[] __devinitdata = "SDRAM (1:1)"; |
540 | static char ram_sgram[] __devinitdata = "SGRAM (1:1)"; | 542 | static char ram_sgram[] __devinitdata = "SGRAM (1:1)"; |
541 | static char ram_sdram32[] __devinitdata = "SDRAM (2:1) (32-bit)"; | 543 | static char ram_sdram32[] __devinitdata = "SDRAM (2:1) (32-bit)"; |
544 | static char ram_wram[] __devinitdata = "WRAM"; | ||
542 | static char ram_off[] __devinitdata = "OFF"; | 545 | static char ram_off[] __devinitdata = "OFF"; |
543 | #endif /* CONFIG_FB_ATY_CT */ | 546 | #endif /* CONFIG_FB_ATY_CT */ |
544 | 547 | ||
@@ -553,6 +556,10 @@ static char *aty_gx_ram[8] __devinitdata = { | |||
553 | #ifdef CONFIG_FB_ATY_CT | 556 | #ifdef CONFIG_FB_ATY_CT |
554 | static char *aty_ct_ram[8] __devinitdata = { | 557 | static char *aty_ct_ram[8] __devinitdata = { |
555 | ram_off, ram_dram, ram_edo, ram_edo, | 558 | ram_off, ram_dram, ram_edo, ram_edo, |
559 | ram_sdram, ram_sgram, ram_wram, ram_resv | ||
560 | }; | ||
561 | static char *aty_xl_ram[8] __devinitdata = { | ||
562 | ram_off, ram_dram, ram_edo, ram_edo, | ||
556 | ram_sdram, ram_sgram, ram_sdram32, ram_resv | 563 | ram_sdram, ram_sgram, ram_sdram32, ram_resv |
557 | }; | 564 | }; |
558 | #endif /* CONFIG_FB_ATY_CT */ | 565 | #endif /* CONFIG_FB_ATY_CT */ |
@@ -760,6 +767,17 @@ static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc) | |||
760 | #endif /* CONFIG_FB_ATY_GENERIC_LCD */ | 767 | #endif /* CONFIG_FB_ATY_GENERIC_LCD */ |
761 | } | 768 | } |
762 | 769 | ||
770 | static u32 calc_line_length(struct atyfb_par *par, u32 vxres, u32 bpp) | ||
771 | { | ||
772 | u32 line_length = vxres * bpp / 8; | ||
773 | |||
774 | if (par->ram_type == SGRAM || | ||
775 | (!M64_HAS(XL_MEM) && par->ram_type == WRAM)) | ||
776 | line_length = (line_length + 63) & ~63; | ||
777 | |||
778 | return line_length; | ||
779 | } | ||
780 | |||
763 | static int aty_var_to_crtc(const struct fb_info *info, | 781 | static int aty_var_to_crtc(const struct fb_info *info, |
764 | const struct fb_var_screeninfo *var, struct crtc *crtc) | 782 | const struct fb_var_screeninfo *var, struct crtc *crtc) |
765 | { | 783 | { |
@@ -769,13 +787,14 @@ static int aty_var_to_crtc(const struct fb_info *info, | |||
769 | u32 h_total, h_disp, h_sync_strt, h_sync_end, h_sync_dly, h_sync_wid, h_sync_pol; | 787 | u32 h_total, h_disp, h_sync_strt, h_sync_end, h_sync_dly, h_sync_wid, h_sync_pol; |
770 | u32 v_total, v_disp, v_sync_strt, v_sync_end, v_sync_wid, v_sync_pol, c_sync; | 788 | u32 v_total, v_disp, v_sync_strt, v_sync_end, v_sync_wid, v_sync_pol, c_sync; |
771 | u32 pix_width, dp_pix_width, dp_chain_mask; | 789 | u32 pix_width, dp_pix_width, dp_chain_mask; |
790 | u32 line_length; | ||
772 | 791 | ||
773 | /* input */ | 792 | /* input */ |
774 | xres = var->xres; | 793 | xres = (var->xres + 7) & ~7; |
775 | yres = var->yres; | 794 | yres = var->yres; |
776 | vxres = var->xres_virtual; | 795 | vxres = (var->xres_virtual + 7) & ~7; |
777 | vyres = var->yres_virtual; | 796 | vyres = var->yres_virtual; |
778 | xoffset = var->xoffset; | 797 | xoffset = (var->xoffset + 7) & ~7; |
779 | yoffset = var->yoffset; | 798 | yoffset = var->yoffset; |
780 | bpp = var->bits_per_pixel; | 799 | bpp = var->bits_per_pixel; |
781 | if (bpp == 16) | 800 | if (bpp == 16) |
@@ -827,7 +846,9 @@ static int aty_var_to_crtc(const struct fb_info *info, | |||
827 | } else | 846 | } else |
828 | FAIL("invalid bpp"); | 847 | FAIL("invalid bpp"); |
829 | 848 | ||
830 | if (vxres * vyres * bpp / 8 > info->fix.smem_len) | 849 | line_length = calc_line_length(par, vxres, bpp); |
850 | |||
851 | if (vyres * line_length > info->fix.smem_len) | ||
831 | FAIL("not enough video RAM"); | 852 | FAIL("not enough video RAM"); |
832 | 853 | ||
833 | h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1; | 854 | h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1; |
@@ -969,7 +990,9 @@ static int aty_var_to_crtc(const struct fb_info *info, | |||
969 | crtc->xoffset = xoffset; | 990 | crtc->xoffset = xoffset; |
970 | crtc->yoffset = yoffset; | 991 | crtc->yoffset = yoffset; |
971 | crtc->bpp = bpp; | 992 | crtc->bpp = bpp; |
972 | crtc->off_pitch = ((yoffset*vxres+xoffset)*bpp/64) | (vxres<<19); | 993 | crtc->off_pitch = |
994 | ((yoffset * line_length + xoffset * bpp / 8) / 8) | | ||
995 | ((line_length / bpp) << 22); | ||
973 | crtc->vline_crnt_vline = 0; | 996 | crtc->vline_crnt_vline = 0; |
974 | 997 | ||
975 | crtc->h_tot_disp = h_total | (h_disp<<16); | 998 | crtc->h_tot_disp = h_total | (h_disp<<16); |
@@ -1394,7 +1417,9 @@ static int atyfb_set_par(struct fb_info *info) | |||
1394 | } | 1417 | } |
1395 | aty_st_8(DAC_MASK, 0xff, par); | 1418 | aty_st_8(DAC_MASK, 0xff, par); |
1396 | 1419 | ||
1397 | info->fix.line_length = var->xres_virtual * var->bits_per_pixel/8; | 1420 | info->fix.line_length = calc_line_length(par, var->xres_virtual, |
1421 | var->bits_per_pixel); | ||
1422 | |||
1398 | info->fix.visual = var->bits_per_pixel <= 8 ? | 1423 | info->fix.visual = var->bits_per_pixel <= 8 ? |
1399 | FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; | 1424 | FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; |
1400 | 1425 | ||
@@ -1505,10 +1530,12 @@ static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info) | |||
1505 | { | 1530 | { |
1506 | u32 xoffset = info->var.xoffset; | 1531 | u32 xoffset = info->var.xoffset; |
1507 | u32 yoffset = info->var.yoffset; | 1532 | u32 yoffset = info->var.yoffset; |
1508 | u32 vxres = par->crtc.vxres; | 1533 | u32 line_length = info->fix.line_length; |
1509 | u32 bpp = info->var.bits_per_pixel; | 1534 | u32 bpp = info->var.bits_per_pixel; |
1510 | 1535 | ||
1511 | par->crtc.off_pitch = ((yoffset * vxres + xoffset) * bpp / 64) | (vxres << 19); | 1536 | par->crtc.off_pitch = |
1537 | ((yoffset * line_length + xoffset * bpp / 8) / 8) | | ||
1538 | ((line_length / bpp) << 22); | ||
1512 | } | 1539 | } |
1513 | 1540 | ||
1514 | 1541 | ||
@@ -2201,7 +2228,7 @@ static void __devinit aty_calc_mem_refresh(struct atyfb_par *par, int xclk) | |||
2201 | const int *refresh_tbl; | 2228 | const int *refresh_tbl; |
2202 | int i, size; | 2229 | int i, size; |
2203 | 2230 | ||
2204 | if (IS_XL(par->pci_id) || IS_MOBILITY(par->pci_id)) { | 2231 | if (M64_HAS(XL_MEM)) { |
2205 | refresh_tbl = ragexl_tbl; | 2232 | refresh_tbl = ragexl_tbl; |
2206 | size = ARRAY_SIZE(ragexl_tbl); | 2233 | size = ARRAY_SIZE(ragexl_tbl); |
2207 | } else { | 2234 | } else { |
@@ -2335,7 +2362,10 @@ static int __devinit aty_init(struct fb_info *info) | |||
2335 | par->pll_ops = &aty_pll_ct; | 2362 | par->pll_ops = &aty_pll_ct; |
2336 | par->bus_type = PCI; | 2363 | par->bus_type = PCI; |
2337 | par->ram_type = (aty_ld_le32(CNFG_STAT0, par) & 0x07); | 2364 | par->ram_type = (aty_ld_le32(CNFG_STAT0, par) & 0x07); |
2338 | ramname = aty_ct_ram[par->ram_type]; | 2365 | if (M64_HAS(XL_MEM)) |
2366 | ramname = aty_xl_ram[par->ram_type]; | ||
2367 | else | ||
2368 | ramname = aty_ct_ram[par->ram_type]; | ||
2339 | /* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */ | 2369 | /* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */ |
2340 | if (par->pll_limits.mclk == 67 && par->ram_type < SDRAM) | 2370 | if (par->pll_limits.mclk == 67 && par->ram_type < SDRAM) |
2341 | par->pll_limits.mclk = 63; | 2371 | par->pll_limits.mclk = 63; |
@@ -2390,9 +2420,9 @@ static int __devinit aty_init(struct fb_info *info) | |||
2390 | #endif /* CONFIG_FB_ATY_CT */ | 2420 | #endif /* CONFIG_FB_ATY_CT */ |
2391 | 2421 | ||
2392 | /* save previous video mode */ | 2422 | /* save previous video mode */ |
2393 | aty_get_crtc(par, &saved_crtc); | 2423 | aty_get_crtc(par, &par->saved_crtc); |
2394 | if(par->pll_ops->get_pll) | 2424 | if(par->pll_ops->get_pll) |
2395 | par->pll_ops->get_pll(info, &saved_pll); | 2425 | par->pll_ops->get_pll(info, &par->saved_pll); |
2396 | 2426 | ||
2397 | par->mem_cntl = aty_ld_le32(MEM_CNTL, par); | 2427 | par->mem_cntl = aty_ld_le32(MEM_CNTL, par); |
2398 | gtb_memsize = M64_HAS(GTB_DSP); | 2428 | gtb_memsize = M64_HAS(GTB_DSP); |
@@ -2667,8 +2697,8 @@ static int __devinit aty_init(struct fb_info *info) | |||
2667 | 2697 | ||
2668 | aty_init_exit: | 2698 | aty_init_exit: |
2669 | /* restore video mode */ | 2699 | /* restore video mode */ |
2670 | aty_set_crtc(par, &saved_crtc); | 2700 | aty_set_crtc(par, &par->saved_crtc); |
2671 | par->pll_ops->set_pll(info, &saved_pll); | 2701 | par->pll_ops->set_pll(info, &par->saved_pll); |
2672 | 2702 | ||
2673 | #ifdef CONFIG_MTRR | 2703 | #ifdef CONFIG_MTRR |
2674 | if (par->mtrr_reg >= 0) { | 2704 | if (par->mtrr_reg >= 0) { |
@@ -3502,6 +3532,11 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi | |||
3502 | par->mmap_map[1].prot_flag = _PAGE_E; | 3532 | par->mmap_map[1].prot_flag = _PAGE_E; |
3503 | #endif /* __sparc__ */ | 3533 | #endif /* __sparc__ */ |
3504 | 3534 | ||
3535 | mutex_lock(&reboot_lock); | ||
3536 | if (!reboot_info) | ||
3537 | reboot_info = info; | ||
3538 | mutex_unlock(&reboot_lock); | ||
3539 | |||
3505 | return 0; | 3540 | return 0; |
3506 | 3541 | ||
3507 | err_release_io: | 3542 | err_release_io: |
@@ -3614,8 +3649,8 @@ static void __devexit atyfb_remove(struct fb_info *info) | |||
3614 | struct atyfb_par *par = (struct atyfb_par *) info->par; | 3649 | struct atyfb_par *par = (struct atyfb_par *) info->par; |
3615 | 3650 | ||
3616 | /* restore video mode */ | 3651 | /* restore video mode */ |
3617 | aty_set_crtc(par, &saved_crtc); | 3652 | aty_set_crtc(par, &par->saved_crtc); |
3618 | par->pll_ops->set_pll(info, &saved_pll); | 3653 | par->pll_ops->set_pll(info, &par->saved_pll); |
3619 | 3654 | ||
3620 | unregister_framebuffer(info); | 3655 | unregister_framebuffer(info); |
3621 | 3656 | ||
@@ -3661,6 +3696,11 @@ static void __devexit atyfb_pci_remove(struct pci_dev *pdev) | |||
3661 | { | 3696 | { |
3662 | struct fb_info *info = pci_get_drvdata(pdev); | 3697 | struct fb_info *info = pci_get_drvdata(pdev); |
3663 | 3698 | ||
3699 | mutex_lock(&reboot_lock); | ||
3700 | if (reboot_info == info) | ||
3701 | reboot_info = NULL; | ||
3702 | mutex_unlock(&reboot_lock); | ||
3703 | |||
3664 | atyfb_remove(info); | 3704 | atyfb_remove(info); |
3665 | } | 3705 | } |
3666 | 3706 | ||
@@ -3808,6 +3848,56 @@ static int __init atyfb_setup(char *options) | |||
3808 | } | 3848 | } |
3809 | #endif /* MODULE */ | 3849 | #endif /* MODULE */ |
3810 | 3850 | ||
3851 | static int atyfb_reboot_notify(struct notifier_block *nb, | ||
3852 | unsigned long code, void *unused) | ||
3853 | { | ||
3854 | struct atyfb_par *par; | ||
3855 | |||
3856 | if (code != SYS_RESTART) | ||
3857 | return NOTIFY_DONE; | ||
3858 | |||
3859 | mutex_lock(&reboot_lock); | ||
3860 | |||
3861 | if (!reboot_info) | ||
3862 | goto out; | ||
3863 | |||
3864 | if (!lock_fb_info(reboot_info)) | ||
3865 | goto out; | ||
3866 | |||
3867 | par = reboot_info->par; | ||
3868 | |||
3869 | /* | ||
3870 | * HP OmniBook 500's BIOS doesn't like the state of the | ||
3871 | * hardware after atyfb has been used. Restore the hardware | ||
3872 | * to the original state to allow successful reboots. | ||
3873 | */ | ||
3874 | aty_set_crtc(par, &par->saved_crtc); | ||
3875 | par->pll_ops->set_pll(reboot_info, &par->saved_pll); | ||
3876 | |||
3877 | unlock_fb_info(reboot_info); | ||
3878 | out: | ||
3879 | mutex_unlock(&reboot_lock); | ||
3880 | |||
3881 | return NOTIFY_DONE; | ||
3882 | } | ||
3883 | |||
3884 | static struct notifier_block atyfb_reboot_notifier = { | ||
3885 | .notifier_call = atyfb_reboot_notify, | ||
3886 | }; | ||
3887 | |||
3888 | static const struct dmi_system_id atyfb_reboot_ids[] = { | ||
3889 | { | ||
3890 | .ident = "HP OmniBook 500", | ||
3891 | .matches = { | ||
3892 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
3893 | DMI_MATCH(DMI_PRODUCT_NAME, "HP OmniBook PC"), | ||
3894 | DMI_MATCH(DMI_PRODUCT_VERSION, "HP OmniBook 500 FA"), | ||
3895 | }, | ||
3896 | }, | ||
3897 | |||
3898 | { } | ||
3899 | }; | ||
3900 | |||
3811 | static int __init atyfb_init(void) | 3901 | static int __init atyfb_init(void) |
3812 | { | 3902 | { |
3813 | int err1 = 1, err2 = 1; | 3903 | int err1 = 1, err2 = 1; |
@@ -3826,11 +3916,20 @@ static int __init atyfb_init(void) | |||
3826 | err2 = atyfb_atari_probe(); | 3916 | err2 = atyfb_atari_probe(); |
3827 | #endif | 3917 | #endif |
3828 | 3918 | ||
3829 | return (err1 && err2) ? -ENODEV : 0; | 3919 | if (err1 && err2) |
3920 | return -ENODEV; | ||
3921 | |||
3922 | if (dmi_check_system(atyfb_reboot_ids)) | ||
3923 | register_reboot_notifier(&atyfb_reboot_notifier); | ||
3924 | |||
3925 | return 0; | ||
3830 | } | 3926 | } |
3831 | 3927 | ||
3832 | static void __exit atyfb_exit(void) | 3928 | static void __exit atyfb_exit(void) |
3833 | { | 3929 | { |
3930 | if (dmi_check_system(atyfb_reboot_ids)) | ||
3931 | unregister_reboot_notifier(&atyfb_reboot_notifier); | ||
3932 | |||
3834 | #ifdef CONFIG_PCI | 3933 | #ifdef CONFIG_PCI |
3835 | pci_unregister_driver(&atyfb_driver); | 3934 | pci_unregister_driver(&atyfb_driver); |
3836 | #endif | 3935 | #endif |
diff --git a/drivers/video/aty/mach64_accel.c b/drivers/video/aty/mach64_accel.c index 0cc9724e61a2..51fcc0a2c94a 100644 --- a/drivers/video/aty/mach64_accel.c +++ b/drivers/video/aty/mach64_accel.c | |||
@@ -63,14 +63,17 @@ static void reset_GTC_3D_engine(const struct atyfb_par *par) | |||
63 | void aty_init_engine(struct atyfb_par *par, struct fb_info *info) | 63 | void aty_init_engine(struct atyfb_par *par, struct fb_info *info) |
64 | { | 64 | { |
65 | u32 pitch_value; | 65 | u32 pitch_value; |
66 | u32 vxres; | ||
66 | 67 | ||
67 | /* determine modal information from global mode structure */ | 68 | /* determine modal information from global mode structure */ |
68 | pitch_value = info->var.xres_virtual; | 69 | pitch_value = info->fix.line_length / (info->var.bits_per_pixel / 8); |
70 | vxres = info->var.xres_virtual; | ||
69 | 71 | ||
70 | if (info->var.bits_per_pixel == 24) { | 72 | if (info->var.bits_per_pixel == 24) { |
71 | /* In 24 bpp, the engine is in 8 bpp - this requires that all */ | 73 | /* In 24 bpp, the engine is in 8 bpp - this requires that all */ |
72 | /* horizontal coordinates and widths must be adjusted */ | 74 | /* horizontal coordinates and widths must be adjusted */ |
73 | pitch_value *= 3; | 75 | pitch_value *= 3; |
76 | vxres *= 3; | ||
74 | } | 77 | } |
75 | 78 | ||
76 | /* On GTC (RagePro), we need to reset the 3D engine before */ | 79 | /* On GTC (RagePro), we need to reset the 3D engine before */ |
@@ -133,7 +136,7 @@ void aty_init_engine(struct atyfb_par *par, struct fb_info *info) | |||
133 | aty_st_le32(SC_LEFT, 0, par); | 136 | aty_st_le32(SC_LEFT, 0, par); |
134 | aty_st_le32(SC_TOP, 0, par); | 137 | aty_st_le32(SC_TOP, 0, par); |
135 | aty_st_le32(SC_BOTTOM, par->crtc.vyres - 1, par); | 138 | aty_st_le32(SC_BOTTOM, par->crtc.vyres - 1, par); |
136 | aty_st_le32(SC_RIGHT, pitch_value - 1, par); | 139 | aty_st_le32(SC_RIGHT, vxres - 1, par); |
137 | 140 | ||
138 | /* set background color to minimum value (usually BLACK) */ | 141 | /* set background color to minimum value (usually BLACK) */ |
139 | aty_st_le32(DP_BKGD_CLR, 0, par); | 142 | aty_st_le32(DP_BKGD_CLR, 0, par); |
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c index 1dae7f8f3c6b..51422fc4f606 100644 --- a/drivers/video/backlight/tdo24m.c +++ b/drivers/video/backlight/tdo24m.c | |||
@@ -356,7 +356,7 @@ static int __devinit tdo24m_probe(struct spi_device *spi) | |||
356 | lcd->power = FB_BLANK_POWERDOWN; | 356 | lcd->power = FB_BLANK_POWERDOWN; |
357 | lcd->mode = MODE_VGA; /* default to VGA */ | 357 | lcd->mode = MODE_VGA; /* default to VGA */ |
358 | 358 | ||
359 | lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, sizeof(GFP_KERNEL)); | 359 | lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, GFP_KERNEL); |
360 | if (lcd->buf == NULL) { | 360 | if (lcd->buf == NULL) { |
361 | kfree(lcd); | 361 | kfree(lcd); |
362 | return -ENOMEM; | 362 | return -ENOMEM; |
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index f8a09bf8d0cd..53ea05645ff8 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c | |||
@@ -1310,8 +1310,6 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd, | |||
1310 | 1310 | ||
1311 | static int | 1311 | static int |
1312 | fb_mmap(struct file *file, struct vm_area_struct * vma) | 1312 | fb_mmap(struct file *file, struct vm_area_struct * vma) |
1313 | __acquires(&info->lock) | ||
1314 | __releases(&info->lock) | ||
1315 | { | 1313 | { |
1316 | int fbidx = iminor(file->f_path.dentry->d_inode); | 1314 | int fbidx = iminor(file->f_path.dentry->d_inode); |
1317 | struct fb_info *info = registered_fb[fbidx]; | 1315 | struct fb_info *info = registered_fb[fbidx]; |
@@ -1325,16 +1323,14 @@ __releases(&info->lock) | |||
1325 | off = vma->vm_pgoff << PAGE_SHIFT; | 1323 | off = vma->vm_pgoff << PAGE_SHIFT; |
1326 | if (!fb) | 1324 | if (!fb) |
1327 | return -ENODEV; | 1325 | return -ENODEV; |
1326 | mutex_lock(&info->mm_lock); | ||
1328 | if (fb->fb_mmap) { | 1327 | if (fb->fb_mmap) { |
1329 | int res; | 1328 | int res; |
1330 | mutex_lock(&info->lock); | ||
1331 | res = fb->fb_mmap(info, vma); | 1329 | res = fb->fb_mmap(info, vma); |
1332 | mutex_unlock(&info->lock); | 1330 | mutex_unlock(&info->mm_lock); |
1333 | return res; | 1331 | return res; |
1334 | } | 1332 | } |
1335 | 1333 | ||
1336 | mutex_lock(&info->lock); | ||
1337 | |||
1338 | /* frame buffer memory */ | 1334 | /* frame buffer memory */ |
1339 | start = info->fix.smem_start; | 1335 | start = info->fix.smem_start; |
1340 | len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len); | 1336 | len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len); |
@@ -1342,13 +1338,13 @@ __releases(&info->lock) | |||
1342 | /* memory mapped io */ | 1338 | /* memory mapped io */ |
1343 | off -= len; | 1339 | off -= len; |
1344 | if (info->var.accel_flags) { | 1340 | if (info->var.accel_flags) { |
1345 | mutex_unlock(&info->lock); | 1341 | mutex_unlock(&info->mm_lock); |
1346 | return -EINVAL; | 1342 | return -EINVAL; |
1347 | } | 1343 | } |
1348 | start = info->fix.mmio_start; | 1344 | start = info->fix.mmio_start; |
1349 | len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len); | 1345 | len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len); |
1350 | } | 1346 | } |
1351 | mutex_unlock(&info->lock); | 1347 | mutex_unlock(&info->mm_lock); |
1352 | start &= PAGE_MASK; | 1348 | start &= PAGE_MASK; |
1353 | if ((vma->vm_end - vma->vm_start + off) > len) | 1349 | if ((vma->vm_end - vma->vm_start + off) > len) |
1354 | return -EINVAL; | 1350 | return -EINVAL; |
@@ -1518,6 +1514,7 @@ register_framebuffer(struct fb_info *fb_info) | |||
1518 | break; | 1514 | break; |
1519 | fb_info->node = i; | 1515 | fb_info->node = i; |
1520 | mutex_init(&fb_info->lock); | 1516 | mutex_init(&fb_info->lock); |
1517 | mutex_init(&fb_info->mm_lock); | ||
1521 | 1518 | ||
1522 | fb_info->dev = device_create(fb_class, fb_info->device, | 1519 | fb_info->dev = device_create(fb_class, fb_info->device, |
1523 | MKDEV(FB_MAJOR, i), NULL, "fb%d", i); | 1520 | MKDEV(FB_MAJOR, i), NULL, "fb%d", i); |
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c index f153c581cbd7..0bf2190928d0 100644 --- a/drivers/video/fsl-diu-fb.c +++ b/drivers/video/fsl-diu-fb.c | |||
@@ -750,24 +750,26 @@ static void update_lcdc(struct fb_info *info) | |||
750 | static int map_video_memory(struct fb_info *info) | 750 | static int map_video_memory(struct fb_info *info) |
751 | { | 751 | { |
752 | phys_addr_t phys; | 752 | phys_addr_t phys; |
753 | u32 smem_len = info->fix.line_length * info->var.yres_virtual; | ||
753 | 754 | ||
754 | pr_debug("info->var.xres_virtual = %d\n", info->var.xres_virtual); | 755 | pr_debug("info->var.xres_virtual = %d\n", info->var.xres_virtual); |
755 | pr_debug("info->var.yres_virtual = %d\n", info->var.yres_virtual); | 756 | pr_debug("info->var.yres_virtual = %d\n", info->var.yres_virtual); |
756 | pr_debug("info->fix.line_length = %d\n", info->fix.line_length); | 757 | pr_debug("info->fix.line_length = %d\n", info->fix.line_length); |
758 | pr_debug("MAP_VIDEO_MEMORY: smem_len = %u\n", smem_len); | ||
757 | 759 | ||
758 | info->fix.smem_len = info->fix.line_length * info->var.yres_virtual; | 760 | info->screen_base = fsl_diu_alloc(smem_len, &phys); |
759 | pr_debug("MAP_VIDEO_MEMORY: smem_len = %d\n", info->fix.smem_len); | ||
760 | info->screen_base = fsl_diu_alloc(info->fix.smem_len, &phys); | ||
761 | if (info->screen_base == NULL) { | 761 | if (info->screen_base == NULL) { |
762 | printk(KERN_ERR "Unable to allocate fb memory\n"); | 762 | printk(KERN_ERR "Unable to allocate fb memory\n"); |
763 | return -ENOMEM; | 763 | return -ENOMEM; |
764 | } | 764 | } |
765 | mutex_lock(&info->mm_lock); | ||
765 | info->fix.smem_start = (unsigned long) phys; | 766 | info->fix.smem_start = (unsigned long) phys; |
767 | info->fix.smem_len = smem_len; | ||
768 | mutex_unlock(&info->mm_lock); | ||
766 | info->screen_size = info->fix.smem_len; | 769 | info->screen_size = info->fix.smem_len; |
767 | 770 | ||
768 | pr_debug("Allocated fb @ paddr=0x%08lx, size=%d.\n", | 771 | pr_debug("Allocated fb @ paddr=0x%08lx, size=%d.\n", |
769 | info->fix.smem_start, | 772 | info->fix.smem_start, info->fix.smem_len); |
770 | info->fix.smem_len); | ||
771 | pr_debug("screen base %p\n", info->screen_base); | 773 | pr_debug("screen base %p\n", info->screen_base); |
772 | 774 | ||
773 | return 0; | 775 | return 0; |
@@ -776,9 +778,11 @@ static int map_video_memory(struct fb_info *info) | |||
776 | static void unmap_video_memory(struct fb_info *info) | 778 | static void unmap_video_memory(struct fb_info *info) |
777 | { | 779 | { |
778 | fsl_diu_free(info->screen_base, info->fix.smem_len); | 780 | fsl_diu_free(info->screen_base, info->fix.smem_len); |
781 | mutex_lock(&info->mm_lock); | ||
779 | info->screen_base = NULL; | 782 | info->screen_base = NULL; |
780 | info->fix.smem_start = 0; | 783 | info->fix.smem_start = 0; |
781 | info->fix.smem_len = 0; | 784 | info->fix.smem_len = 0; |
785 | mutex_unlock(&info->mm_lock); | ||
782 | } | 786 | } |
783 | 787 | ||
784 | /* | 788 | /* |
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c index 2e940199fc89..71960672d721 100644 --- a/drivers/video/i810/i810_main.c +++ b/drivers/video/i810/i810_main.c | |||
@@ -1090,8 +1090,10 @@ static int encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info) | |||
1090 | memset(fix, 0, sizeof(struct fb_fix_screeninfo)); | 1090 | memset(fix, 0, sizeof(struct fb_fix_screeninfo)); |
1091 | 1091 | ||
1092 | strcpy(fix->id, "I810"); | 1092 | strcpy(fix->id, "I810"); |
1093 | mutex_lock(&info->mm_lock); | ||
1093 | fix->smem_start = par->fb.physical; | 1094 | fix->smem_start = par->fb.physical; |
1094 | fix->smem_len = par->fb.size; | 1095 | fix->smem_len = par->fb.size; |
1096 | mutex_unlock(&info->mm_lock); | ||
1095 | fix->type = FB_TYPE_PACKED_PIXELS; | 1097 | fix->type = FB_TYPE_PACKED_PIXELS; |
1096 | fix->type_aux = 0; | 1098 | fix->type_aux = 0; |
1097 | fix->xpanstep = 8; | 1099 | fix->xpanstep = 8; |
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c index 8e7a275df50c..59c3a2e14913 100644 --- a/drivers/video/matrox/matroxfb_base.c +++ b/drivers/video/matrox/matroxfb_base.c | |||
@@ -724,8 +724,10 @@ static void matroxfb_update_fix(WPMINFO2) | |||
724 | struct fb_fix_screeninfo *fix = &ACCESS_FBINFO(fbcon).fix; | 724 | struct fb_fix_screeninfo *fix = &ACCESS_FBINFO(fbcon).fix; |
725 | DBG(__func__) | 725 | DBG(__func__) |
726 | 726 | ||
727 | mutex_lock(&ACCESS_FBINFO(fbcon).mm_lock); | ||
727 | fix->smem_start = ACCESS_FBINFO(video.base) + ACCESS_FBINFO(curr.ydstorg.bytes); | 728 | fix->smem_start = ACCESS_FBINFO(video.base) + ACCESS_FBINFO(curr.ydstorg.bytes); |
728 | fix->smem_len = ACCESS_FBINFO(video.len_usable) - ACCESS_FBINFO(curr.ydstorg.bytes); | 729 | fix->smem_len = ACCESS_FBINFO(video.len_usable) - ACCESS_FBINFO(curr.ydstorg.bytes); |
730 | mutex_unlock(&ACCESS_FBINFO(fbcon).mm_lock); | ||
729 | } | 731 | } |
730 | 732 | ||
731 | static int matroxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) | 733 | static int matroxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) |
@@ -2081,6 +2083,7 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm | |||
2081 | spin_lock_init(&ACCESS_FBINFO(lock.accel)); | 2083 | spin_lock_init(&ACCESS_FBINFO(lock.accel)); |
2082 | init_rwsem(&ACCESS_FBINFO(crtc2.lock)); | 2084 | init_rwsem(&ACCESS_FBINFO(crtc2.lock)); |
2083 | init_rwsem(&ACCESS_FBINFO(altout.lock)); | 2085 | init_rwsem(&ACCESS_FBINFO(altout.lock)); |
2086 | mutex_init(&ACCESS_FBINFO(fbcon).mm_lock); | ||
2084 | ACCESS_FBINFO(irq_flags) = 0; | 2087 | ACCESS_FBINFO(irq_flags) = 0; |
2085 | init_waitqueue_head(&ACCESS_FBINFO(crtc1.vsync.wait)); | 2088 | init_waitqueue_head(&ACCESS_FBINFO(crtc1.vsync.wait)); |
2086 | init_waitqueue_head(&ACCESS_FBINFO(crtc2.vsync.wait)); | 2089 | init_waitqueue_head(&ACCESS_FBINFO(crtc2.vsync.wait)); |
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c index 7ac4c5f6145d..909e10a11898 100644 --- a/drivers/video/matrox/matroxfb_crtc2.c +++ b/drivers/video/matrox/matroxfb_crtc2.c | |||
@@ -289,13 +289,16 @@ static int matroxfb_dh_release(struct fb_info* info, int user) { | |||
289 | #undef m2info | 289 | #undef m2info |
290 | } | 290 | } |
291 | 291 | ||
292 | static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info) { | 292 | static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info) |
293 | { | ||
293 | struct fb_fix_screeninfo *fix = &m2info->fbcon.fix; | 294 | struct fb_fix_screeninfo *fix = &m2info->fbcon.fix; |
294 | 295 | ||
295 | strcpy(fix->id, "MATROX DH"); | 296 | strcpy(fix->id, "MATROX DH"); |
296 | 297 | ||
298 | mutex_lock(&m2info->fbcon.mm_lock); | ||
297 | fix->smem_start = m2info->video.base; | 299 | fix->smem_start = m2info->video.base; |
298 | fix->smem_len = m2info->video.len_usable; | 300 | fix->smem_len = m2info->video.len_usable; |
301 | mutex_unlock(&m2info->fbcon.mm_lock); | ||
299 | fix->ypanstep = 1; | 302 | fix->ypanstep = 1; |
300 | fix->ywrapstep = 0; | 303 | fix->ywrapstep = 0; |
301 | fix->xpanstep = 8; /* TBD */ | 304 | fix->xpanstep = 8; /* TBD */ |
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c index b7af5256e887..567fb944bd2a 100644 --- a/drivers/video/mx3fb.c +++ b/drivers/video/mx3fb.c | |||
@@ -669,7 +669,7 @@ static uint32_t bpp_to_pixfmt(int bpp) | |||
669 | } | 669 | } |
670 | 670 | ||
671 | static int mx3fb_blank(int blank, struct fb_info *fbi); | 671 | static int mx3fb_blank(int blank, struct fb_info *fbi); |
672 | static int mx3fb_map_video_memory(struct fb_info *fbi); | 672 | static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len); |
673 | static int mx3fb_unmap_video_memory(struct fb_info *fbi); | 673 | static int mx3fb_unmap_video_memory(struct fb_info *fbi); |
674 | 674 | ||
675 | /** | 675 | /** |
@@ -742,8 +742,7 @@ static int mx3fb_set_par(struct fb_info *fbi) | |||
742 | if (fbi->fix.smem_start) | 742 | if (fbi->fix.smem_start) |
743 | mx3fb_unmap_video_memory(fbi); | 743 | mx3fb_unmap_video_memory(fbi); |
744 | 744 | ||
745 | fbi->fix.smem_len = mem_len; | 745 | if (mx3fb_map_video_memory(fbi, mem_len) < 0) { |
746 | if (mx3fb_map_video_memory(fbi) < 0) { | ||
747 | mutex_unlock(&mx3_fbi->mutex); | 746 | mutex_unlock(&mx3_fbi->mutex); |
748 | return -ENOMEM; | 747 | return -ENOMEM; |
749 | } | 748 | } |
@@ -1198,6 +1197,7 @@ static int mx3fb_resume(struct platform_device *pdev) | |||
1198 | /** | 1197 | /** |
1199 | * mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer. | 1198 | * mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer. |
1200 | * @fbi: framebuffer information pointer | 1199 | * @fbi: framebuffer information pointer |
1200 | * @mem_len: length of mapped memory | ||
1201 | * @return: Error code indicating success or failure | 1201 | * @return: Error code indicating success or failure |
1202 | * | 1202 | * |
1203 | * This buffer is remapped into a non-cached, non-buffered, memory region to | 1203 | * This buffer is remapped into a non-cached, non-buffered, memory region to |
@@ -1205,23 +1205,26 @@ static int mx3fb_resume(struct platform_device *pdev) | |||
1205 | * area is remapped, all virtual memory access to the video memory should occur | 1205 | * area is remapped, all virtual memory access to the video memory should occur |
1206 | * at the new region. | 1206 | * at the new region. |
1207 | */ | 1207 | */ |
1208 | static int mx3fb_map_video_memory(struct fb_info *fbi) | 1208 | static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len) |
1209 | { | 1209 | { |
1210 | int retval = 0; | 1210 | int retval = 0; |
1211 | dma_addr_t addr; | 1211 | dma_addr_t addr; |
1212 | 1212 | ||
1213 | fbi->screen_base = dma_alloc_writecombine(fbi->device, | 1213 | fbi->screen_base = dma_alloc_writecombine(fbi->device, |
1214 | fbi->fix.smem_len, | 1214 | mem_len, |
1215 | &addr, GFP_DMA); | 1215 | &addr, GFP_DMA); |
1216 | 1216 | ||
1217 | if (!fbi->screen_base) { | 1217 | if (!fbi->screen_base) { |
1218 | dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n", | 1218 | dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n", |
1219 | fbi->fix.smem_len); | 1219 | mem_len); |
1220 | retval = -EBUSY; | 1220 | retval = -EBUSY; |
1221 | goto err0; | 1221 | goto err0; |
1222 | } | 1222 | } |
1223 | 1223 | ||
1224 | mutex_lock(&fbi->mm_lock); | ||
1224 | fbi->fix.smem_start = addr; | 1225 | fbi->fix.smem_start = addr; |
1226 | fbi->fix.smem_len = mem_len; | ||
1227 | mutex_unlock(&fbi->mm_lock); | ||
1225 | 1228 | ||
1226 | dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n", | 1229 | dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n", |
1227 | (uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len); | 1230 | (uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len); |
@@ -1251,8 +1254,10 @@ static int mx3fb_unmap_video_memory(struct fb_info *fbi) | |||
1251 | fbi->screen_base, fbi->fix.smem_start); | 1254 | fbi->screen_base, fbi->fix.smem_start); |
1252 | 1255 | ||
1253 | fbi->screen_base = 0; | 1256 | fbi->screen_base = 0; |
1257 | mutex_lock(&fbi->mm_lock); | ||
1254 | fbi->fix.smem_start = 0; | 1258 | fbi->fix.smem_start = 0; |
1255 | fbi->fix.smem_len = 0; | 1259 | fbi->fix.smem_len = 0; |
1260 | mutex_unlock(&fbi->mm_lock); | ||
1256 | return 0; | 1261 | return 0; |
1257 | } | 1262 | } |
1258 | 1263 | ||
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c index 060d72fe57cb..4ea99bfc37b4 100644 --- a/drivers/video/omap/omapfb_main.c +++ b/drivers/video/omap/omapfb_main.c | |||
@@ -393,8 +393,10 @@ static void set_fb_fix(struct fb_info *fbi) | |||
393 | 393 | ||
394 | rg = &plane->fbdev->mem_desc.region[plane->idx]; | 394 | rg = &plane->fbdev->mem_desc.region[plane->idx]; |
395 | fbi->screen_base = rg->vaddr; | 395 | fbi->screen_base = rg->vaddr; |
396 | mutex_lock(&fbi->mm_lock); | ||
396 | fix->smem_start = rg->paddr; | 397 | fix->smem_start = rg->paddr; |
397 | fix->smem_len = rg->size; | 398 | fix->smem_len = rg->size; |
399 | mutex_unlock(&fbi->mm_lock); | ||
398 | 400 | ||
399 | fix->type = FB_TYPE_PACKED_PIXELS; | 401 | fix->type = FB_TYPE_PACKED_PIXELS; |
400 | bpp = var->bits_per_pixel; | 402 | bpp = var->bits_per_pixel; |
@@ -886,8 +888,10 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi) | |||
886 | * plane memory is dealloce'd, the other | 888 | * plane memory is dealloce'd, the other |
887 | * screen parameters in var / fix are invalid. | 889 | * screen parameters in var / fix are invalid. |
888 | */ | 890 | */ |
891 | mutex_lock(&fbi->mm_lock); | ||
889 | fbi->fix.smem_start = 0; | 892 | fbi->fix.smem_start = 0; |
890 | fbi->fix.smem_len = 0; | 893 | fbi->fix.smem_len = 0; |
894 | mutex_unlock(&fbi->mm_lock); | ||
891 | } | 895 | } |
892 | } | 896 | } |
893 | } | 897 | } |
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c index 03b3670130a0..bacfabd9ce16 100644 --- a/drivers/video/platinumfb.c +++ b/drivers/video/platinumfb.c | |||
@@ -141,7 +141,9 @@ static int platinumfb_set_par (struct fb_info *info) | |||
141 | offset = 0x10; | 141 | offset = 0x10; |
142 | 142 | ||
143 | info->screen_base = pinfo->frame_buffer + init->fb_offset + offset; | 143 | info->screen_base = pinfo->frame_buffer + init->fb_offset + offset; |
144 | mutex_lock(&info->mm_lock); | ||
144 | info->fix.smem_start = (pinfo->frame_buffer_phys) + init->fb_offset + offset; | 145 | info->fix.smem_start = (pinfo->frame_buffer_phys) + init->fb_offset + offset; |
146 | mutex_unlock(&info->mm_lock); | ||
145 | info->fix.visual = (pinfo->cmode == CMODE_8) ? | 147 | info->fix.visual = (pinfo->cmode == CMODE_8) ? |
146 | FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; | 148 | FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; |
147 | info->fix.line_length = vmode_attrs[pinfo->vmode-1].hres * (1<<pinfo->cmode) | 149 | info->fix.line_length = vmode_attrs[pinfo->vmode-1].hres * (1<<pinfo->cmode) |
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index 0889d50c3288..6506117c134b 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c | |||
@@ -815,8 +815,10 @@ static int overlayfb_map_video_memory(struct pxafb_layer *ofb) | |||
815 | ofb->video_mem_phys = virt_to_phys(ofb->video_mem); | 815 | ofb->video_mem_phys = virt_to_phys(ofb->video_mem); |
816 | ofb->video_mem_size = size; | 816 | ofb->video_mem_size = size; |
817 | 817 | ||
818 | mutex_lock(&ofb->fb.mm_lock); | ||
818 | ofb->fb.fix.smem_start = ofb->video_mem_phys; | 819 | ofb->fb.fix.smem_start = ofb->video_mem_phys; |
819 | ofb->fb.fix.smem_len = ofb->fb.fix.line_length * var->yres_virtual; | 820 | ofb->fb.fix.smem_len = ofb->fb.fix.line_length * var->yres_virtual; |
821 | mutex_unlock(&ofb->fb.mm_lock); | ||
820 | ofb->fb.screen_base = ofb->video_mem; | 822 | ofb->fb.screen_base = ofb->video_mem; |
821 | return 0; | 823 | return 0; |
822 | } | 824 | } |
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c index 653bdfee3057..9f6d6e61f0cc 100644 --- a/drivers/video/sh7760fb.c +++ b/drivers/video/sh7760fb.c | |||
@@ -120,18 +120,6 @@ static int sh7760_setcolreg (u_int regno, | |||
120 | return 0; | 120 | return 0; |
121 | } | 121 | } |
122 | 122 | ||
123 | static void encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info, | ||
124 | unsigned long stride) | ||
125 | { | ||
126 | memset(fix, 0, sizeof(struct fb_fix_screeninfo)); | ||
127 | strcpy(fix->id, "sh7760-lcdc"); | ||
128 | |||
129 | fix->smem_start = (unsigned long)info->screen_base; | ||
130 | fix->smem_len = info->screen_size; | ||
131 | |||
132 | fix->line_length = stride; | ||
133 | } | ||
134 | |||
135 | static int sh7760fb_get_color_info(struct device *dev, | 123 | static int sh7760fb_get_color_info(struct device *dev, |
136 | u16 lddfr, int *bpp, int *gray) | 124 | u16 lddfr, int *bpp, int *gray) |
137 | { | 125 | { |
@@ -334,7 +322,8 @@ static int sh7760fb_set_par(struct fb_info *info) | |||
334 | 322 | ||
335 | iowrite32(ldsarl, par->base + LDSARL); /* mem for lower half of DSTN */ | 323 | iowrite32(ldsarl, par->base + LDSARL); /* mem for lower half of DSTN */ |
336 | 324 | ||
337 | encode_fix(&info->fix, info, stride); | 325 | info->fix.line_length = stride; |
326 | |||
338 | sh7760fb_check_var(&info->var, info); | 327 | sh7760fb_check_var(&info->var, info); |
339 | 328 | ||
340 | sh7760fb_blank(FB_BLANK_UNBLANK, info); /* panel on! */ | 329 | sh7760fb_blank(FB_BLANK_UNBLANK, info); /* panel on! */ |
@@ -435,6 +424,8 @@ static int sh7760fb_alloc_mem(struct fb_info *info) | |||
435 | 424 | ||
436 | info->screen_base = fbmem; | 425 | info->screen_base = fbmem; |
437 | info->screen_size = vram; | 426 | info->screen_size = vram; |
427 | info->fix.smem_start = (unsigned long)info->screen_base; | ||
428 | info->fix.smem_len = info->screen_size; | ||
438 | 429 | ||
439 | return 0; | 430 | return 0; |
440 | } | 431 | } |
@@ -520,6 +511,8 @@ static int __devinit sh7760fb_probe(struct platform_device *pdev) | |||
520 | info->var.transp.length = 0; | 511 | info->var.transp.length = 0; |
521 | info->var.transp.msb_right = 0; | 512 | info->var.transp.msb_right = 0; |
522 | 513 | ||
514 | strcpy(info->fix.id, "sh7760-lcdc"); | ||
515 | |||
523 | /* set the DON2 bit now, before cmap allocation, as it will randomize | 516 | /* set the DON2 bit now, before cmap allocation, as it will randomize |
524 | * palette memory. | 517 | * palette memory. |
525 | */ | 518 | */ |
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index f10d2fbeda06..da983b720f08 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/dma-mapping.h> | 18 | #include <linux/dma-mapping.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/vmalloc.h> | ||
20 | #include <video/sh_mobile_lcdc.h> | 21 | #include <video/sh_mobile_lcdc.h> |
21 | #include <asm/atomic.h> | 22 | #include <asm/atomic.h> |
22 | 23 | ||
@@ -33,6 +34,7 @@ struct sh_mobile_lcdc_chan { | |||
33 | struct fb_info info; | 34 | struct fb_info info; |
34 | dma_addr_t dma_handle; | 35 | dma_addr_t dma_handle; |
35 | struct fb_deferred_io defio; | 36 | struct fb_deferred_io defio; |
37 | struct scatterlist *sglist; | ||
36 | unsigned long frame_end; | 38 | unsigned long frame_end; |
37 | wait_queue_head_t frame_end_wait; | 39 | wait_queue_head_t frame_end_wait; |
38 | }; | 40 | }; |
@@ -206,16 +208,38 @@ static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv) {} | |||
206 | static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv) {} | 208 | static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv) {} |
207 | #endif | 209 | #endif |
208 | 210 | ||
211 | static int sh_mobile_lcdc_sginit(struct fb_info *info, | ||
212 | struct list_head *pagelist) | ||
213 | { | ||
214 | struct sh_mobile_lcdc_chan *ch = info->par; | ||
215 | unsigned int nr_pages_max = info->fix.smem_len >> PAGE_SHIFT; | ||
216 | struct page *page; | ||
217 | int nr_pages = 0; | ||
218 | |||
219 | sg_init_table(ch->sglist, nr_pages_max); | ||
220 | |||
221 | list_for_each_entry(page, pagelist, lru) | ||
222 | sg_set_page(&ch->sglist[nr_pages++], page, PAGE_SIZE, 0); | ||
223 | |||
224 | return nr_pages; | ||
225 | } | ||
226 | |||
209 | static void sh_mobile_lcdc_deferred_io(struct fb_info *info, | 227 | static void sh_mobile_lcdc_deferred_io(struct fb_info *info, |
210 | struct list_head *pagelist) | 228 | struct list_head *pagelist) |
211 | { | 229 | { |
212 | struct sh_mobile_lcdc_chan *ch = info->par; | 230 | struct sh_mobile_lcdc_chan *ch = info->par; |
231 | unsigned int nr_pages; | ||
213 | 232 | ||
214 | /* enable clocks before accessing hardware */ | 233 | /* enable clocks before accessing hardware */ |
215 | sh_mobile_lcdc_clk_on(ch->lcdc); | 234 | sh_mobile_lcdc_clk_on(ch->lcdc); |
216 | 235 | ||
236 | nr_pages = sh_mobile_lcdc_sginit(info, pagelist); | ||
237 | dma_map_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE); | ||
238 | |||
217 | /* trigger panel update */ | 239 | /* trigger panel update */ |
218 | lcdc_write_chan(ch, LDSM2R, 1); | 240 | lcdc_write_chan(ch, LDSM2R, 1); |
241 | |||
242 | dma_unmap_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE); | ||
219 | } | 243 | } |
220 | 244 | ||
221 | static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info) | 245 | static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info) |
@@ -846,21 +870,31 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
846 | } | 870 | } |
847 | 871 | ||
848 | for (i = 0; i < j; i++) { | 872 | for (i = 0; i < j; i++) { |
849 | error = register_framebuffer(&priv->ch[i].info); | 873 | struct sh_mobile_lcdc_chan *ch = priv->ch + i; |
874 | |||
875 | info = &ch->info; | ||
876 | |||
877 | if (info->fbdefio) { | ||
878 | priv->ch->sglist = vmalloc(sizeof(struct scatterlist) * | ||
879 | info->fix.smem_len >> PAGE_SHIFT); | ||
880 | if (!priv->ch->sglist) { | ||
881 | dev_err(&pdev->dev, "cannot allocate sglist\n"); | ||
882 | goto err1; | ||
883 | } | ||
884 | } | ||
885 | |||
886 | error = register_framebuffer(info); | ||
850 | if (error < 0) | 887 | if (error < 0) |
851 | goto err1; | 888 | goto err1; |
852 | } | ||
853 | 889 | ||
854 | for (i = 0; i < j; i++) { | ||
855 | info = &priv->ch[i].info; | ||
856 | dev_info(info->dev, | 890 | dev_info(info->dev, |
857 | "registered %s/%s as %dx%d %dbpp.\n", | 891 | "registered %s/%s as %dx%d %dbpp.\n", |
858 | pdev->name, | 892 | pdev->name, |
859 | (priv->ch[i].cfg.chan == LCDC_CHAN_MAINLCD) ? | 893 | (ch->cfg.chan == LCDC_CHAN_MAINLCD) ? |
860 | "mainlcd" : "sublcd", | 894 | "mainlcd" : "sublcd", |
861 | (int) priv->ch[i].cfg.lcd_cfg.xres, | 895 | (int) ch->cfg.lcd_cfg.xres, |
862 | (int) priv->ch[i].cfg.lcd_cfg.yres, | 896 | (int) ch->cfg.lcd_cfg.yres, |
863 | priv->ch[i].cfg.bpp); | 897 | ch->cfg.bpp); |
864 | 898 | ||
865 | /* deferred io mode: disable clock to save power */ | 899 | /* deferred io mode: disable clock to save power */ |
866 | if (info->fbdefio) | 900 | if (info->fbdefio) |
@@ -892,6 +926,9 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev) | |||
892 | if (!info->device) | 926 | if (!info->device) |
893 | continue; | 927 | continue; |
894 | 928 | ||
929 | if (priv->ch[i].sglist) | ||
930 | vfree(priv->ch[i].sglist); | ||
931 | |||
895 | dma_free_coherent(&pdev->dev, info->fix.smem_len, | 932 | dma_free_coherent(&pdev->dev, info->fix.smem_len, |
896 | info->screen_base, priv->ch[i].dma_handle); | 933 | info->screen_base, priv->ch[i].dma_handle); |
897 | fb_dealloc_cmap(&info->cmap); | 934 | fb_dealloc_cmap(&info->cmap); |
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c index 7072d19080d5..fd33455389b8 100644 --- a/drivers/video/sis/sis_main.c +++ b/drivers/video/sis/sis_main.c | |||
@@ -1847,8 +1847,10 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info) | |||
1847 | 1847 | ||
1848 | strcpy(fix->id, ivideo->myid); | 1848 | strcpy(fix->id, ivideo->myid); |
1849 | 1849 | ||
1850 | mutex_lock(&info->mm_lock); | ||
1850 | fix->smem_start = ivideo->video_base + ivideo->video_offset; | 1851 | fix->smem_start = ivideo->video_base + ivideo->video_offset; |
1851 | fix->smem_len = ivideo->sisfb_mem; | 1852 | fix->smem_len = ivideo->sisfb_mem; |
1853 | mutex_unlock(&info->mm_lock); | ||
1852 | fix->type = FB_TYPE_PACKED_PIXELS; | 1854 | fix->type = FB_TYPE_PACKED_PIXELS; |
1853 | fix->type_aux = 0; | 1855 | fix->type_aux = 0; |
1854 | fix->visual = (ivideo->video_bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; | 1856 | fix->visual = (ivideo->video_bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; |
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c index eb5d73a06702..98f24f0ec00d 100644 --- a/drivers/video/sm501fb.c +++ b/drivers/video/sm501fb.c | |||
@@ -145,7 +145,7 @@ static inline void sm501fb_sync_regs(struct sm501fb_info *info) | |||
145 | #define SM501_MEMF_ACCEL (8) | 145 | #define SM501_MEMF_ACCEL (8) |
146 | 146 | ||
147 | static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem, | 147 | static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem, |
148 | unsigned int why, size_t size) | 148 | unsigned int why, size_t size, u32 smem_len) |
149 | { | 149 | { |
150 | struct sm501fb_par *par; | 150 | struct sm501fb_par *par; |
151 | struct fb_info *fbi; | 151 | struct fb_info *fbi; |
@@ -172,7 +172,7 @@ static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem, | |||
172 | if (ptr > 0) | 172 | if (ptr > 0) |
173 | ptr &= ~(PAGE_SIZE - 1); | 173 | ptr &= ~(PAGE_SIZE - 1); |
174 | 174 | ||
175 | if (fbi && ptr < fbi->fix.smem_len) | 175 | if (fbi && ptr < smem_len) |
176 | return -ENOMEM; | 176 | return -ENOMEM; |
177 | 177 | ||
178 | break; | 178 | break; |
@@ -197,7 +197,7 @@ static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem, | |||
197 | 197 | ||
198 | case SM501_MEMF_ACCEL: | 198 | case SM501_MEMF_ACCEL: |
199 | fbi = inf->fb[HEAD_CRT]; | 199 | fbi = inf->fb[HEAD_CRT]; |
200 | ptr = fbi ? fbi->fix.smem_len : 0; | 200 | ptr = fbi ? smem_len : 0; |
201 | 201 | ||
202 | fbi = inf->fb[HEAD_PANEL]; | 202 | fbi = inf->fb[HEAD_PANEL]; |
203 | if (fbi) { | 203 | if (fbi) { |
@@ -413,6 +413,7 @@ static int sm501fb_set_par_common(struct fb_info *info, | |||
413 | unsigned int mem_type; | 413 | unsigned int mem_type; |
414 | unsigned int clock_type; | 414 | unsigned int clock_type; |
415 | unsigned int head_addr; | 415 | unsigned int head_addr; |
416 | unsigned int smem_len; | ||
416 | 417 | ||
417 | dev_dbg(fbi->dev, "%s: %dx%d, bpp = %d, virtual %dx%d\n", | 418 | dev_dbg(fbi->dev, "%s: %dx%d, bpp = %d, virtual %dx%d\n", |
418 | __func__, var->xres, var->yres, var->bits_per_pixel, | 419 | __func__, var->xres, var->yres, var->bits_per_pixel, |
@@ -453,18 +454,20 @@ static int sm501fb_set_par_common(struct fb_info *info, | |||
453 | 454 | ||
454 | /* allocate fb memory within 501 */ | 455 | /* allocate fb memory within 501 */ |
455 | info->fix.line_length = (var->xres_virtual * var->bits_per_pixel)/8; | 456 | info->fix.line_length = (var->xres_virtual * var->bits_per_pixel)/8; |
456 | info->fix.smem_len = info->fix.line_length * var->yres_virtual; | 457 | smem_len = info->fix.line_length * var->yres_virtual; |
457 | 458 | ||
458 | dev_dbg(fbi->dev, "%s: line length = %u\n", __func__, | 459 | dev_dbg(fbi->dev, "%s: line length = %u\n", __func__, |
459 | info->fix.line_length); | 460 | info->fix.line_length); |
460 | 461 | ||
461 | if (sm501_alloc_mem(fbi, &par->screen, mem_type, | 462 | if (sm501_alloc_mem(fbi, &par->screen, mem_type, smem_len, smem_len)) { |
462 | info->fix.smem_len)) { | ||
463 | dev_err(fbi->dev, "no memory available\n"); | 463 | dev_err(fbi->dev, "no memory available\n"); |
464 | return -ENOMEM; | 464 | return -ENOMEM; |
465 | } | 465 | } |
466 | 466 | ||
467 | mutex_lock(&info->mm_lock); | ||
467 | info->fix.smem_start = fbi->fbmem_res->start + par->screen.sm_addr; | 468 | info->fix.smem_start = fbi->fbmem_res->start + par->screen.sm_addr; |
469 | info->fix.smem_len = smem_len; | ||
470 | mutex_unlock(&info->mm_lock); | ||
468 | 471 | ||
469 | info->screen_base = fbi->fbmem + par->screen.sm_addr; | 472 | info->screen_base = fbi->fbmem + par->screen.sm_addr; |
470 | info->screen_size = info->fix.smem_len; | 473 | info->screen_size = info->fix.smem_len; |
@@ -637,7 +640,8 @@ static int sm501fb_set_par_crt(struct fb_info *info) | |||
637 | if ((control & SM501_DC_CRT_CONTROL_SEL) == 0) { | 640 | if ((control & SM501_DC_CRT_CONTROL_SEL) == 0) { |
638 | /* the head is displaying panel data... */ | 641 | /* the head is displaying panel data... */ |
639 | 642 | ||
640 | sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0); | 643 | sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0, |
644 | info->fix.smem_len); | ||
641 | goto out_update; | 645 | goto out_update; |
642 | } | 646 | } |
643 | 647 | ||
@@ -1289,7 +1293,8 @@ static int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base) | |||
1289 | 1293 | ||
1290 | par->cursor_regs = info->regs + reg_base; | 1294 | par->cursor_regs = info->regs + reg_base; |
1291 | 1295 | ||
1292 | ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024); | 1296 | ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024, |
1297 | fbi->fix.smem_len); | ||
1293 | if (ret < 0) | 1298 | if (ret < 0) |
1294 | return ret; | 1299 | return ret; |
1295 | 1300 | ||
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c index d0674f1e3f10..8a141c2c637b 100644 --- a/drivers/video/w100fb.c +++ b/drivers/video/w100fb.c | |||
@@ -523,6 +523,7 @@ static int w100fb_set_par(struct fb_info *info) | |||
523 | info->fix.ywrapstep = 0; | 523 | info->fix.ywrapstep = 0; |
524 | info->fix.line_length = par->xres * BITS_PER_PIXEL / 8; | 524 | info->fix.line_length = par->xres * BITS_PER_PIXEL / 8; |
525 | 525 | ||
526 | mutex_lock(&info->mm_lock); | ||
526 | if ((par->xres*par->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)) { | 527 | if ((par->xres*par->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)) { |
527 | par->extmem_active = 1; | 528 | par->extmem_active = 1; |
528 | info->fix.smem_len = par->mach->mem->size+1; | 529 | info->fix.smem_len = par->mach->mem->size+1; |
@@ -530,6 +531,7 @@ static int w100fb_set_par(struct fb_info *info) | |||
530 | par->extmem_active = 0; | 531 | par->extmem_active = 0; |
531 | info->fix.smem_len = MEM_INT_SIZE+1; | 532 | info->fix.smem_len = MEM_INT_SIZE+1; |
532 | } | 533 | } |
534 | mutex_unlock(&info->mm_lock); | ||
533 | 535 | ||
534 | w100fb_activate_var(par); | 536 | w100fb_activate_var(par); |
535 | } | 537 | } |
diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 210acafe4a9b..3ff8bdd18fb3 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c | |||
@@ -432,7 +432,6 @@ vfs_rejected_lock: | |||
432 | list_del_init(&fl->fl_u.afs.link); | 432 | list_del_init(&fl->fl_u.afs.link); |
433 | if (list_empty(&vnode->granted_locks)) | 433 | if (list_empty(&vnode->granted_locks)) |
434 | afs_defer_unlock(vnode, key); | 434 | afs_defer_unlock(vnode, key); |
435 | spin_unlock(&vnode->lock); | ||
436 | goto abort_attempt; | 435 | goto abort_attempt; |
437 | } | 436 | } |
438 | 437 | ||
@@ -485,6 +485,8 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) | |||
485 | { | 485 | { |
486 | assert_spin_locked(&ctx->ctx_lock); | 486 | assert_spin_locked(&ctx->ctx_lock); |
487 | 487 | ||
488 | if (req->ki_eventfd != NULL) | ||
489 | eventfd_ctx_put(req->ki_eventfd); | ||
488 | if (req->ki_dtor) | 490 | if (req->ki_dtor) |
489 | req->ki_dtor(req); | 491 | req->ki_dtor(req); |
490 | if (req->ki_iovec != &req->ki_inline_vec) | 492 | if (req->ki_iovec != &req->ki_inline_vec) |
@@ -509,8 +511,6 @@ static void aio_fput_routine(struct work_struct *data) | |||
509 | /* Complete the fput(s) */ | 511 | /* Complete the fput(s) */ |
510 | if (req->ki_filp != NULL) | 512 | if (req->ki_filp != NULL) |
511 | __fput(req->ki_filp); | 513 | __fput(req->ki_filp); |
512 | if (req->ki_eventfd != NULL) | ||
513 | __fput(req->ki_eventfd); | ||
514 | 514 | ||
515 | /* Link the iocb into the context's free list */ | 515 | /* Link the iocb into the context's free list */ |
516 | spin_lock_irq(&ctx->ctx_lock); | 516 | spin_lock_irq(&ctx->ctx_lock); |
@@ -528,8 +528,6 @@ static void aio_fput_routine(struct work_struct *data) | |||
528 | */ | 528 | */ |
529 | static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | 529 | static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) |
530 | { | 530 | { |
531 | int schedule_putreq = 0; | ||
532 | |||
533 | dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", | 531 | dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", |
534 | req, atomic_long_read(&req->ki_filp->f_count)); | 532 | req, atomic_long_read(&req->ki_filp->f_count)); |
535 | 533 | ||
@@ -549,24 +547,16 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | |||
549 | * we would not be holding the last reference to the file*, so | 547 | * we would not be holding the last reference to the file*, so |
550 | * this function will be executed w/out any aio kthread wakeup. | 548 | * this function will be executed w/out any aio kthread wakeup. |
551 | */ | 549 | */ |
552 | if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) | 550 | if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) { |
553 | schedule_putreq++; | ||
554 | else | ||
555 | req->ki_filp = NULL; | ||
556 | if (req->ki_eventfd != NULL) { | ||
557 | if (unlikely(atomic_long_dec_and_test(&req->ki_eventfd->f_count))) | ||
558 | schedule_putreq++; | ||
559 | else | ||
560 | req->ki_eventfd = NULL; | ||
561 | } | ||
562 | if (unlikely(schedule_putreq)) { | ||
563 | get_ioctx(ctx); | 551 | get_ioctx(ctx); |
564 | spin_lock(&fput_lock); | 552 | spin_lock(&fput_lock); |
565 | list_add(&req->ki_list, &fput_head); | 553 | list_add(&req->ki_list, &fput_head); |
566 | spin_unlock(&fput_lock); | 554 | spin_unlock(&fput_lock); |
567 | queue_work(aio_wq, &fput_work); | 555 | queue_work(aio_wq, &fput_work); |
568 | } else | 556 | } else { |
557 | req->ki_filp = NULL; | ||
569 | really_put_req(ctx, req); | 558 | really_put_req(ctx, req); |
559 | } | ||
570 | return 1; | 560 | return 1; |
571 | } | 561 | } |
572 | 562 | ||
@@ -1622,7 +1612,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
1622 | * an eventfd() fd, and will be signaled for each completed | 1612 | * an eventfd() fd, and will be signaled for each completed |
1623 | * event using the eventfd_signal() function. | 1613 | * event using the eventfd_signal() function. |
1624 | */ | 1614 | */ |
1625 | req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd); | 1615 | req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); |
1626 | if (IS_ERR(req->ki_eventfd)) { | 1616 | if (IS_ERR(req->ki_eventfd)) { |
1627 | ret = PTR_ERR(req->ki_eventfd); | 1617 | ret = PTR_ERR(req->ki_eventfd); |
1628 | req->ki_eventfd = NULL; | 1618 | req->ki_eventfd = NULL; |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 9fa212b014a5..b7c1603cd4bd 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -1522,11 +1522,11 @@ static int fill_note_info(struct elfhdr *elf, int phdrs, | |||
1522 | info->thread = NULL; | 1522 | info->thread = NULL; |
1523 | 1523 | ||
1524 | psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL); | 1524 | psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL); |
1525 | fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); | ||
1526 | |||
1527 | if (psinfo == NULL) | 1525 | if (psinfo == NULL) |
1528 | return 0; | 1526 | return 0; |
1529 | 1527 | ||
1528 | fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); | ||
1529 | |||
1530 | /* | 1530 | /* |
1531 | * Figure out how many notes we're going to need for each thread. | 1531 | * Figure out how many notes we're going to need for each thread. |
1532 | */ | 1532 | */ |
@@ -1929,7 +1929,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un | |||
1929 | elf = kmalloc(sizeof(*elf), GFP_KERNEL); | 1929 | elf = kmalloc(sizeof(*elf), GFP_KERNEL); |
1930 | if (!elf) | 1930 | if (!elf) |
1931 | goto out; | 1931 | goto out; |
1932 | 1932 | /* | |
1933 | * The number of segs are recored into ELF header as 16bit value. | ||
1934 | * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here. | ||
1935 | */ | ||
1933 | segs = current->mm->map_count; | 1936 | segs = current->mm->map_count; |
1934 | #ifdef ELF_CORE_EXTRA_PHDRS | 1937 | #ifdef ELF_CORE_EXTRA_PHDRS |
1935 | segs += ELF_CORE_EXTRA_PHDRS; | 1938 | segs += ELF_CORE_EXTRA_PHDRS; |
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 31c46a241bac..49a34e7f7306 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * bio-integrity.c - bio data integrity extensions | 2 | * bio-integrity.c - bio data integrity extensions |
3 | * | 3 | * |
4 | * Copyright (C) 2007, 2008 Oracle Corporation | 4 | * Copyright (C) 2007, 2008, 2009 Oracle Corporation |
5 | * Written by: Martin K. Petersen <martin.petersen@oracle.com> | 5 | * Written by: Martin K. Petersen <martin.petersen@oracle.com> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
@@ -25,63 +25,121 @@ | |||
25 | #include <linux/bio.h> | 25 | #include <linux/bio.h> |
26 | #include <linux/workqueue.h> | 26 | #include <linux/workqueue.h> |
27 | 27 | ||
28 | static struct kmem_cache *bio_integrity_slab __read_mostly; | 28 | struct integrity_slab { |
29 | static mempool_t *bio_integrity_pool; | 29 | struct kmem_cache *slab; |
30 | static struct bio_set *integrity_bio_set; | 30 | unsigned short nr_vecs; |
31 | char name[8]; | ||
32 | }; | ||
33 | |||
34 | #define IS(x) { .nr_vecs = x, .name = "bip-"__stringify(x) } | ||
35 | struct integrity_slab bip_slab[BIOVEC_NR_POOLS] __read_mostly = { | ||
36 | IS(1), IS(4), IS(16), IS(64), IS(128), IS(BIO_MAX_PAGES), | ||
37 | }; | ||
38 | #undef IS | ||
39 | |||
31 | static struct workqueue_struct *kintegrityd_wq; | 40 | static struct workqueue_struct *kintegrityd_wq; |
32 | 41 | ||
42 | static inline unsigned int vecs_to_idx(unsigned int nr) | ||
43 | { | ||
44 | switch (nr) { | ||
45 | case 1: | ||
46 | return 0; | ||
47 | case 2 ... 4: | ||
48 | return 1; | ||
49 | case 5 ... 16: | ||
50 | return 2; | ||
51 | case 17 ... 64: | ||
52 | return 3; | ||
53 | case 65 ... 128: | ||
54 | return 4; | ||
55 | case 129 ... BIO_MAX_PAGES: | ||
56 | return 5; | ||
57 | default: | ||
58 | BUG(); | ||
59 | } | ||
60 | } | ||
61 | |||
62 | static inline int use_bip_pool(unsigned int idx) | ||
63 | { | ||
64 | if (idx == BIOVEC_NR_POOLS) | ||
65 | return 1; | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
33 | /** | 70 | /** |
34 | * bio_integrity_alloc - Allocate integrity payload and attach it to bio | 71 | * bio_integrity_alloc_bioset - Allocate integrity payload and attach it to bio |
35 | * @bio: bio to attach integrity metadata to | 72 | * @bio: bio to attach integrity metadata to |
36 | * @gfp_mask: Memory allocation mask | 73 | * @gfp_mask: Memory allocation mask |
37 | * @nr_vecs: Number of integrity metadata scatter-gather elements | 74 | * @nr_vecs: Number of integrity metadata scatter-gather elements |
75 | * @bs: bio_set to allocate from | ||
38 | * | 76 | * |
39 | * Description: This function prepares a bio for attaching integrity | 77 | * Description: This function prepares a bio for attaching integrity |
40 | * metadata. nr_vecs specifies the maximum number of pages containing | 78 | * metadata. nr_vecs specifies the maximum number of pages containing |
41 | * integrity metadata that can be attached. | 79 | * integrity metadata that can be attached. |
42 | */ | 80 | */ |
43 | struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, | 81 | struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio, |
44 | gfp_t gfp_mask, | 82 | gfp_t gfp_mask, |
45 | unsigned int nr_vecs) | 83 | unsigned int nr_vecs, |
84 | struct bio_set *bs) | ||
46 | { | 85 | { |
47 | struct bio_integrity_payload *bip; | 86 | struct bio_integrity_payload *bip; |
48 | struct bio_vec *iv; | 87 | unsigned int idx = vecs_to_idx(nr_vecs); |
49 | unsigned long idx; | ||
50 | 88 | ||
51 | BUG_ON(bio == NULL); | 89 | BUG_ON(bio == NULL); |
90 | bip = NULL; | ||
52 | 91 | ||
53 | bip = mempool_alloc(bio_integrity_pool, gfp_mask); | 92 | /* Lower order allocations come straight from slab */ |
54 | if (unlikely(bip == NULL)) { | 93 | if (!use_bip_pool(idx)) |
55 | printk(KERN_ERR "%s: could not alloc bip\n", __func__); | 94 | bip = kmem_cache_alloc(bip_slab[idx].slab, gfp_mask); |
56 | return NULL; | ||
57 | } | ||
58 | 95 | ||
59 | memset(bip, 0, sizeof(*bip)); | 96 | /* Use mempool if lower order alloc failed or max vecs were requested */ |
97 | if (bip == NULL) { | ||
98 | bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); | ||
60 | 99 | ||
61 | iv = bvec_alloc_bs(gfp_mask, nr_vecs, &idx, integrity_bio_set); | 100 | if (unlikely(bip == NULL)) { |
62 | if (unlikely(iv == NULL)) { | 101 | printk(KERN_ERR "%s: could not alloc bip\n", __func__); |
63 | printk(KERN_ERR "%s: could not alloc bip_vec\n", __func__); | 102 | return NULL; |
64 | mempool_free(bip, bio_integrity_pool); | 103 | } |
65 | return NULL; | ||
66 | } | 104 | } |
67 | 105 | ||
68 | bip->bip_pool = idx; | 106 | memset(bip, 0, sizeof(*bip)); |
69 | bip->bip_vec = iv; | 107 | |
108 | bip->bip_slab = idx; | ||
70 | bip->bip_bio = bio; | 109 | bip->bip_bio = bio; |
71 | bio->bi_integrity = bip; | 110 | bio->bi_integrity = bip; |
72 | 111 | ||
73 | return bip; | 112 | return bip; |
74 | } | 113 | } |
114 | EXPORT_SYMBOL(bio_integrity_alloc_bioset); | ||
115 | |||
116 | /** | ||
117 | * bio_integrity_alloc - Allocate integrity payload and attach it to bio | ||
118 | * @bio: bio to attach integrity metadata to | ||
119 | * @gfp_mask: Memory allocation mask | ||
120 | * @nr_vecs: Number of integrity metadata scatter-gather elements | ||
121 | * | ||
122 | * Description: This function prepares a bio for attaching integrity | ||
123 | * metadata. nr_vecs specifies the maximum number of pages containing | ||
124 | * integrity metadata that can be attached. | ||
125 | */ | ||
126 | struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, | ||
127 | gfp_t gfp_mask, | ||
128 | unsigned int nr_vecs) | ||
129 | { | ||
130 | return bio_integrity_alloc_bioset(bio, gfp_mask, nr_vecs, fs_bio_set); | ||
131 | } | ||
75 | EXPORT_SYMBOL(bio_integrity_alloc); | 132 | EXPORT_SYMBOL(bio_integrity_alloc); |
76 | 133 | ||
77 | /** | 134 | /** |
78 | * bio_integrity_free - Free bio integrity payload | 135 | * bio_integrity_free - Free bio integrity payload |
79 | * @bio: bio containing bip to be freed | 136 | * @bio: bio containing bip to be freed |
137 | * @bs: bio_set this bio was allocated from | ||
80 | * | 138 | * |
81 | * Description: Used to free the integrity portion of a bio. Usually | 139 | * Description: Used to free the integrity portion of a bio. Usually |
82 | * called from bio_free(). | 140 | * called from bio_free(). |
83 | */ | 141 | */ |
84 | void bio_integrity_free(struct bio *bio) | 142 | void bio_integrity_free(struct bio *bio, struct bio_set *bs) |
85 | { | 143 | { |
86 | struct bio_integrity_payload *bip = bio->bi_integrity; | 144 | struct bio_integrity_payload *bip = bio->bi_integrity; |
87 | 145 | ||
@@ -92,8 +150,10 @@ void bio_integrity_free(struct bio *bio) | |||
92 | && bip->bip_buf != NULL) | 150 | && bip->bip_buf != NULL) |
93 | kfree(bip->bip_buf); | 151 | kfree(bip->bip_buf); |
94 | 152 | ||
95 | bvec_free_bs(integrity_bio_set, bip->bip_vec, bip->bip_pool); | 153 | if (use_bip_pool(bip->bip_slab)) |
96 | mempool_free(bip, bio_integrity_pool); | 154 | mempool_free(bip, bs->bio_integrity_pool); |
155 | else | ||
156 | kmem_cache_free(bip_slab[bip->bip_slab].slab, bip); | ||
97 | 157 | ||
98 | bio->bi_integrity = NULL; | 158 | bio->bi_integrity = NULL; |
99 | } | 159 | } |
@@ -114,7 +174,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page, | |||
114 | struct bio_integrity_payload *bip = bio->bi_integrity; | 174 | struct bio_integrity_payload *bip = bio->bi_integrity; |
115 | struct bio_vec *iv; | 175 | struct bio_vec *iv; |
116 | 176 | ||
117 | if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_pool)) { | 177 | if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) { |
118 | printk(KERN_ERR "%s: bip_vec full\n", __func__); | 178 | printk(KERN_ERR "%s: bip_vec full\n", __func__); |
119 | return 0; | 179 | return 0; |
120 | } | 180 | } |
@@ -647,8 +707,8 @@ void bio_integrity_split(struct bio *bio, struct bio_pair *bp, int sectors) | |||
647 | bp->iv1 = bip->bip_vec[0]; | 707 | bp->iv1 = bip->bip_vec[0]; |
648 | bp->iv2 = bip->bip_vec[0]; | 708 | bp->iv2 = bip->bip_vec[0]; |
649 | 709 | ||
650 | bp->bip1.bip_vec = &bp->iv1; | 710 | bp->bip1.bip_vec[0] = bp->iv1; |
651 | bp->bip2.bip_vec = &bp->iv2; | 711 | bp->bip2.bip_vec[0] = bp->iv2; |
652 | 712 | ||
653 | bp->iv1.bv_len = sectors * bi->tuple_size; | 713 | bp->iv1.bv_len = sectors * bi->tuple_size; |
654 | bp->iv2.bv_offset += sectors * bi->tuple_size; | 714 | bp->iv2.bv_offset += sectors * bi->tuple_size; |
@@ -667,17 +727,19 @@ EXPORT_SYMBOL(bio_integrity_split); | |||
667 | * @bio: New bio | 727 | * @bio: New bio |
668 | * @bio_src: Original bio | 728 | * @bio_src: Original bio |
669 | * @gfp_mask: Memory allocation mask | 729 | * @gfp_mask: Memory allocation mask |
730 | * @bs: bio_set to allocate bip from | ||
670 | * | 731 | * |
671 | * Description: Called to allocate a bip when cloning a bio | 732 | * Description: Called to allocate a bip when cloning a bio |
672 | */ | 733 | */ |
673 | int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask) | 734 | int bio_integrity_clone(struct bio *bio, struct bio *bio_src, |
735 | gfp_t gfp_mask, struct bio_set *bs) | ||
674 | { | 736 | { |
675 | struct bio_integrity_payload *bip_src = bio_src->bi_integrity; | 737 | struct bio_integrity_payload *bip_src = bio_src->bi_integrity; |
676 | struct bio_integrity_payload *bip; | 738 | struct bio_integrity_payload *bip; |
677 | 739 | ||
678 | BUG_ON(bip_src == NULL); | 740 | BUG_ON(bip_src == NULL); |
679 | 741 | ||
680 | bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt); | 742 | bip = bio_integrity_alloc_bioset(bio, gfp_mask, bip_src->bip_vcnt, bs); |
681 | 743 | ||
682 | if (bip == NULL) | 744 | if (bip == NULL) |
683 | return -EIO; | 745 | return -EIO; |
@@ -693,25 +755,43 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask) | |||
693 | } | 755 | } |
694 | EXPORT_SYMBOL(bio_integrity_clone); | 756 | EXPORT_SYMBOL(bio_integrity_clone); |
695 | 757 | ||
696 | static int __init bio_integrity_init(void) | 758 | int bioset_integrity_create(struct bio_set *bs, int pool_size) |
697 | { | 759 | { |
698 | kintegrityd_wq = create_workqueue("kintegrityd"); | 760 | unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES); |
761 | |||
762 | bs->bio_integrity_pool = | ||
763 | mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab); | ||
699 | 764 | ||
765 | if (!bs->bio_integrity_pool) | ||
766 | return -1; | ||
767 | |||
768 | return 0; | ||
769 | } | ||
770 | EXPORT_SYMBOL(bioset_integrity_create); | ||
771 | |||
772 | void bioset_integrity_free(struct bio_set *bs) | ||
773 | { | ||
774 | if (bs->bio_integrity_pool) | ||
775 | mempool_destroy(bs->bio_integrity_pool); | ||
776 | } | ||
777 | EXPORT_SYMBOL(bioset_integrity_free); | ||
778 | |||
779 | void __init bio_integrity_init(void) | ||
780 | { | ||
781 | unsigned int i; | ||
782 | |||
783 | kintegrityd_wq = create_workqueue("kintegrityd"); | ||
700 | if (!kintegrityd_wq) | 784 | if (!kintegrityd_wq) |
701 | panic("Failed to create kintegrityd\n"); | 785 | panic("Failed to create kintegrityd\n"); |
702 | 786 | ||
703 | bio_integrity_slab = KMEM_CACHE(bio_integrity_payload, | 787 | for (i = 0 ; i < BIOVEC_NR_POOLS ; i++) { |
704 | SLAB_HWCACHE_ALIGN|SLAB_PANIC); | 788 | unsigned int size; |
705 | 789 | ||
706 | bio_integrity_pool = mempool_create_slab_pool(BIO_POOL_SIZE, | 790 | size = sizeof(struct bio_integrity_payload) |
707 | bio_integrity_slab); | 791 | + bip_slab[i].nr_vecs * sizeof(struct bio_vec); |
708 | if (!bio_integrity_pool) | ||
709 | panic("bio_integrity: can't allocate bip pool\n"); | ||
710 | 792 | ||
711 | integrity_bio_set = bioset_create(BIO_POOL_SIZE, 0); | 793 | bip_slab[i].slab = |
712 | if (!integrity_bio_set) | 794 | kmem_cache_create(bip_slab[i].name, size, 0, |
713 | panic("bio_integrity: can't allocate bio_set\n"); | 795 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
714 | 796 | } | |
715 | return 0; | ||
716 | } | 797 | } |
717 | subsys_initcall(bio_integrity_init); | ||
@@ -238,7 +238,7 @@ void bio_free(struct bio *bio, struct bio_set *bs) | |||
238 | bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio)); | 238 | bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio)); |
239 | 239 | ||
240 | if (bio_integrity(bio)) | 240 | if (bio_integrity(bio)) |
241 | bio_integrity_free(bio); | 241 | bio_integrity_free(bio, bs); |
242 | 242 | ||
243 | /* | 243 | /* |
244 | * If we have front padding, adjust the bio pointer before freeing | 244 | * If we have front padding, adjust the bio pointer before freeing |
@@ -341,7 +341,7 @@ struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) | |||
341 | static void bio_kmalloc_destructor(struct bio *bio) | 341 | static void bio_kmalloc_destructor(struct bio *bio) |
342 | { | 342 | { |
343 | if (bio_integrity(bio)) | 343 | if (bio_integrity(bio)) |
344 | bio_integrity_free(bio); | 344 | bio_integrity_free(bio, fs_bio_set); |
345 | kfree(bio); | 345 | kfree(bio); |
346 | } | 346 | } |
347 | 347 | ||
@@ -472,7 +472,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) | |||
472 | if (bio_integrity(bio)) { | 472 | if (bio_integrity(bio)) { |
473 | int ret; | 473 | int ret; |
474 | 474 | ||
475 | ret = bio_integrity_clone(b, bio, gfp_mask); | 475 | ret = bio_integrity_clone(b, bio, gfp_mask, fs_bio_set); |
476 | 476 | ||
477 | if (ret < 0) { | 477 | if (ret < 0) { |
478 | bio_put(b); | 478 | bio_put(b); |
@@ -1539,6 +1539,7 @@ void bioset_free(struct bio_set *bs) | |||
1539 | if (bs->bio_pool) | 1539 | if (bs->bio_pool) |
1540 | mempool_destroy(bs->bio_pool); | 1540 | mempool_destroy(bs->bio_pool); |
1541 | 1541 | ||
1542 | bioset_integrity_free(bs); | ||
1542 | biovec_free_pools(bs); | 1543 | biovec_free_pools(bs); |
1543 | bio_put_slab(bs); | 1544 | bio_put_slab(bs); |
1544 | 1545 | ||
@@ -1579,6 +1580,9 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) | |||
1579 | if (!bs->bio_pool) | 1580 | if (!bs->bio_pool) |
1580 | goto bad; | 1581 | goto bad; |
1581 | 1582 | ||
1583 | if (bioset_integrity_create(bs, pool_size)) | ||
1584 | goto bad; | ||
1585 | |||
1582 | if (!biovec_create_pools(bs, pool_size)) | 1586 | if (!biovec_create_pools(bs, pool_size)) |
1583 | return bs; | 1587 | return bs; |
1584 | 1588 | ||
@@ -1616,6 +1620,7 @@ static int __init init_bio(void) | |||
1616 | if (!bio_slabs) | 1620 | if (!bio_slabs) |
1617 | panic("bio: can't allocate bios\n"); | 1621 | panic("bio: can't allocate bios\n"); |
1618 | 1622 | ||
1623 | bio_integrity_init(); | ||
1619 | biovec_init_slabs(); | 1624 | biovec_init_slabs(); |
1620 | 1625 | ||
1621 | fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); | 1626 | fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); |
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 7f88628a1a72..6e4f6c50a120 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c | |||
@@ -299,8 +299,8 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) | |||
299 | "btrfs-%s-%d", workers->name, | 299 | "btrfs-%s-%d", workers->name, |
300 | workers->num_workers + i); | 300 | workers->num_workers + i); |
301 | if (IS_ERR(worker->task)) { | 301 | if (IS_ERR(worker->task)) { |
302 | kfree(worker); | ||
303 | ret = PTR_ERR(worker->task); | 302 | ret = PTR_ERR(worker->task); |
303 | kfree(worker); | ||
304 | goto fail; | 304 | goto fail; |
305 | } | 305 | } |
306 | 306 | ||
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2779c2f5360a..98a873838717 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -2074,8 +2074,7 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, | |||
2074 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); | 2074 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); |
2075 | int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); | 2075 | int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); |
2076 | int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); | 2076 | int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); |
2077 | int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root | 2077 | int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref); |
2078 | *root); | ||
2079 | int btrfs_drop_subtree(struct btrfs_trans_handle *trans, | 2078 | int btrfs_drop_subtree(struct btrfs_trans_handle *trans, |
2080 | struct btrfs_root *root, | 2079 | struct btrfs_root *root, |
2081 | struct extent_buffer *node, | 2080 | struct extent_buffer *node, |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index edc7d208c5ce..a5aca3997d42 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -990,15 +990,13 @@ static inline int extent_ref_type(u64 parent, u64 owner) | |||
990 | return type; | 990 | return type; |
991 | } | 991 | } |
992 | 992 | ||
993 | static int find_next_key(struct btrfs_path *path, struct btrfs_key *key) | 993 | static int find_next_key(struct btrfs_path *path, int level, |
994 | struct btrfs_key *key) | ||
994 | 995 | ||
995 | { | 996 | { |
996 | int level; | 997 | for (; level < BTRFS_MAX_LEVEL; level++) { |
997 | BUG_ON(!path->keep_locks); | ||
998 | for (level = 0; level < BTRFS_MAX_LEVEL; level++) { | ||
999 | if (!path->nodes[level]) | 998 | if (!path->nodes[level]) |
1000 | break; | 999 | break; |
1001 | btrfs_assert_tree_locked(path->nodes[level]); | ||
1002 | if (path->slots[level] + 1 >= | 1000 | if (path->slots[level] + 1 >= |
1003 | btrfs_header_nritems(path->nodes[level])) | 1001 | btrfs_header_nritems(path->nodes[level])) |
1004 | continue; | 1002 | continue; |
@@ -1158,7 +1156,8 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, | |||
1158 | * For simplicity, we just do not add new inline back | 1156 | * For simplicity, we just do not add new inline back |
1159 | * ref if there is any kind of item for this block | 1157 | * ref if there is any kind of item for this block |
1160 | */ | 1158 | */ |
1161 | if (find_next_key(path, &key) == 0 && key.objectid == bytenr && | 1159 | if (find_next_key(path, 0, &key) == 0 && |
1160 | key.objectid == bytenr && | ||
1162 | key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) { | 1161 | key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) { |
1163 | err = -EAGAIN; | 1162 | err = -EAGAIN; |
1164 | goto out; | 1163 | goto out; |
@@ -2697,7 +2696,7 @@ again: | |||
2697 | 2696 | ||
2698 | printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes" | 2697 | printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes" |
2699 | ", %llu bytes_used, %llu bytes_reserved, " | 2698 | ", %llu bytes_used, %llu bytes_reserved, " |
2700 | "%llu bytes_pinned, %llu bytes_readonly, %llu may use" | 2699 | "%llu bytes_pinned, %llu bytes_readonly, %llu may use " |
2701 | "%llu total\n", (unsigned long long)bytes, | 2700 | "%llu total\n", (unsigned long long)bytes, |
2702 | (unsigned long long)data_sinfo->bytes_delalloc, | 2701 | (unsigned long long)data_sinfo->bytes_delalloc, |
2703 | (unsigned long long)data_sinfo->bytes_used, | 2702 | (unsigned long long)data_sinfo->bytes_used, |
@@ -4128,6 +4127,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, | |||
4128 | return buf; | 4127 | return buf; |
4129 | } | 4128 | } |
4130 | 4129 | ||
4130 | #if 0 | ||
4131 | int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, | 4131 | int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, |
4132 | struct btrfs_root *root, struct extent_buffer *leaf) | 4132 | struct btrfs_root *root, struct extent_buffer *leaf) |
4133 | { | 4133 | { |
@@ -4171,8 +4171,6 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, | |||
4171 | return 0; | 4171 | return 0; |
4172 | } | 4172 | } |
4173 | 4173 | ||
4174 | #if 0 | ||
4175 | |||
4176 | static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans, | 4174 | static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans, |
4177 | struct btrfs_root *root, | 4175 | struct btrfs_root *root, |
4178 | struct btrfs_leaf_ref *ref) | 4176 | struct btrfs_leaf_ref *ref) |
@@ -4553,262 +4551,471 @@ out: | |||
4553 | } | 4551 | } |
4554 | #endif | 4552 | #endif |
4555 | 4553 | ||
4554 | struct walk_control { | ||
4555 | u64 refs[BTRFS_MAX_LEVEL]; | ||
4556 | u64 flags[BTRFS_MAX_LEVEL]; | ||
4557 | struct btrfs_key update_progress; | ||
4558 | int stage; | ||
4559 | int level; | ||
4560 | int shared_level; | ||
4561 | int update_ref; | ||
4562 | int keep_locks; | ||
4563 | }; | ||
4564 | |||
4565 | #define DROP_REFERENCE 1 | ||
4566 | #define UPDATE_BACKREF 2 | ||
4567 | |||
4556 | /* | 4568 | /* |
4557 | * helper function for drop_subtree, this function is similar to | 4569 | * hepler to process tree block while walking down the tree. |
4558 | * walk_down_tree. The main difference is that it checks reference | 4570 | * |
4559 | * counts while tree blocks are locked. | 4571 | * when wc->stage == DROP_REFERENCE, this function checks |
4572 | * reference count of the block. if the block is shared and | ||
4573 | * we need update back refs for the subtree rooted at the | ||
4574 | * block, this function changes wc->stage to UPDATE_BACKREF | ||
4575 | * | ||
4576 | * when wc->stage == UPDATE_BACKREF, this function updates | ||
4577 | * back refs for pointers in the block. | ||
4578 | * | ||
4579 | * NOTE: return value 1 means we should stop walking down. | ||
4560 | */ | 4580 | */ |
4561 | static noinline int walk_down_tree(struct btrfs_trans_handle *trans, | 4581 | static noinline int walk_down_proc(struct btrfs_trans_handle *trans, |
4562 | struct btrfs_root *root, | 4582 | struct btrfs_root *root, |
4563 | struct btrfs_path *path, int *level) | 4583 | struct btrfs_path *path, |
4584 | struct walk_control *wc) | ||
4564 | { | 4585 | { |
4565 | struct extent_buffer *next; | 4586 | int level = wc->level; |
4566 | struct extent_buffer *cur; | 4587 | struct extent_buffer *eb = path->nodes[level]; |
4567 | struct extent_buffer *parent; | 4588 | struct btrfs_key key; |
4568 | u64 bytenr; | 4589 | u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF; |
4569 | u64 ptr_gen; | ||
4570 | u64 refs; | ||
4571 | u64 flags; | ||
4572 | u32 blocksize; | ||
4573 | int ret; | 4590 | int ret; |
4574 | 4591 | ||
4575 | cur = path->nodes[*level]; | 4592 | if (wc->stage == UPDATE_BACKREF && |
4576 | ret = btrfs_lookup_extent_info(trans, root, cur->start, cur->len, | 4593 | btrfs_header_owner(eb) != root->root_key.objectid) |
4577 | &refs, &flags); | 4594 | return 1; |
4578 | BUG_ON(ret); | ||
4579 | if (refs > 1) | ||
4580 | goto out; | ||
4581 | 4595 | ||
4582 | BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); | 4596 | /* |
4597 | * when reference count of tree block is 1, it won't increase | ||
4598 | * again. once full backref flag is set, we never clear it. | ||
4599 | */ | ||
4600 | if ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || | ||
4601 | (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag))) { | ||
4602 | BUG_ON(!path->locks[level]); | ||
4603 | ret = btrfs_lookup_extent_info(trans, root, | ||
4604 | eb->start, eb->len, | ||
4605 | &wc->refs[level], | ||
4606 | &wc->flags[level]); | ||
4607 | BUG_ON(ret); | ||
4608 | BUG_ON(wc->refs[level] == 0); | ||
4609 | } | ||
4583 | 4610 | ||
4584 | while (*level >= 0) { | 4611 | if (wc->stage == DROP_REFERENCE && |
4585 | cur = path->nodes[*level]; | 4612 | wc->update_ref && wc->refs[level] > 1) { |
4586 | if (*level == 0) { | 4613 | BUG_ON(eb == root->node); |
4587 | ret = btrfs_drop_leaf_ref(trans, root, cur); | 4614 | BUG_ON(path->slots[level] > 0); |
4588 | BUG_ON(ret); | 4615 | if (level == 0) |
4589 | clean_tree_block(trans, root, cur); | 4616 | btrfs_item_key_to_cpu(eb, &key, path->slots[level]); |
4590 | break; | 4617 | else |
4591 | } | 4618 | btrfs_node_key_to_cpu(eb, &key, path->slots[level]); |
4592 | if (path->slots[*level] >= btrfs_header_nritems(cur)) { | 4619 | if (btrfs_header_owner(eb) == root->root_key.objectid && |
4593 | clean_tree_block(trans, root, cur); | 4620 | btrfs_comp_cpu_keys(&key, &wc->update_progress) >= 0) { |
4594 | break; | 4621 | wc->stage = UPDATE_BACKREF; |
4622 | wc->shared_level = level; | ||
4595 | } | 4623 | } |
4624 | } | ||
4596 | 4625 | ||
4597 | bytenr = btrfs_node_blockptr(cur, path->slots[*level]); | 4626 | if (wc->stage == DROP_REFERENCE) { |
4598 | blocksize = btrfs_level_size(root, *level - 1); | 4627 | if (wc->refs[level] > 1) |
4599 | ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); | 4628 | return 1; |
4600 | 4629 | ||
4601 | next = read_tree_block(root, bytenr, blocksize, ptr_gen); | 4630 | if (path->locks[level] && !wc->keep_locks) { |
4602 | btrfs_tree_lock(next); | 4631 | btrfs_tree_unlock(eb); |
4603 | btrfs_set_lock_blocking(next); | 4632 | path->locks[level] = 0; |
4633 | } | ||
4634 | return 0; | ||
4635 | } | ||
4604 | 4636 | ||
4605 | ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize, | 4637 | /* wc->stage == UPDATE_BACKREF */ |
4606 | &refs, &flags); | 4638 | if (!(wc->flags[level] & flag)) { |
4639 | BUG_ON(!path->locks[level]); | ||
4640 | ret = btrfs_inc_ref(trans, root, eb, 1); | ||
4607 | BUG_ON(ret); | 4641 | BUG_ON(ret); |
4608 | if (refs > 1) { | 4642 | ret = btrfs_dec_ref(trans, root, eb, 0); |
4609 | parent = path->nodes[*level]; | 4643 | BUG_ON(ret); |
4610 | ret = btrfs_free_extent(trans, root, bytenr, | 4644 | ret = btrfs_set_disk_extent_flags(trans, root, eb->start, |
4611 | blocksize, parent->start, | 4645 | eb->len, flag, 0); |
4612 | btrfs_header_owner(parent), | 4646 | BUG_ON(ret); |
4613 | *level - 1, 0); | 4647 | wc->flags[level] |= flag; |
4648 | } | ||
4649 | |||
4650 | /* | ||
4651 | * the block is shared by multiple trees, so it's not good to | ||
4652 | * keep the tree lock | ||
4653 | */ | ||
4654 | if (path->locks[level] && level > 0) { | ||
4655 | btrfs_tree_unlock(eb); | ||
4656 | path->locks[level] = 0; | ||
4657 | } | ||
4658 | return 0; | ||
4659 | } | ||
4660 | |||
4661 | /* | ||
4662 | * hepler to process tree block while walking up the tree. | ||
4663 | * | ||
4664 | * when wc->stage == DROP_REFERENCE, this function drops | ||
4665 | * reference count on the block. | ||
4666 | * | ||
4667 | * when wc->stage == UPDATE_BACKREF, this function changes | ||
4668 | * wc->stage back to DROP_REFERENCE if we changed wc->stage | ||
4669 | * to UPDATE_BACKREF previously while processing the block. | ||
4670 | * | ||
4671 | * NOTE: return value 1 means we should stop walking up. | ||
4672 | */ | ||
4673 | static noinline int walk_up_proc(struct btrfs_trans_handle *trans, | ||
4674 | struct btrfs_root *root, | ||
4675 | struct btrfs_path *path, | ||
4676 | struct walk_control *wc) | ||
4677 | { | ||
4678 | int ret = 0; | ||
4679 | int level = wc->level; | ||
4680 | struct extent_buffer *eb = path->nodes[level]; | ||
4681 | u64 parent = 0; | ||
4682 | |||
4683 | if (wc->stage == UPDATE_BACKREF) { | ||
4684 | BUG_ON(wc->shared_level < level); | ||
4685 | if (level < wc->shared_level) | ||
4686 | goto out; | ||
4687 | |||
4688 | BUG_ON(wc->refs[level] <= 1); | ||
4689 | ret = find_next_key(path, level + 1, &wc->update_progress); | ||
4690 | if (ret > 0) | ||
4691 | wc->update_ref = 0; | ||
4692 | |||
4693 | wc->stage = DROP_REFERENCE; | ||
4694 | wc->shared_level = -1; | ||
4695 | path->slots[level] = 0; | ||
4696 | |||
4697 | /* | ||
4698 | * check reference count again if the block isn't locked. | ||
4699 | * we should start walking down the tree again if reference | ||
4700 | * count is one. | ||
4701 | */ | ||
4702 | if (!path->locks[level]) { | ||
4703 | BUG_ON(level == 0); | ||
4704 | btrfs_tree_lock(eb); | ||
4705 | btrfs_set_lock_blocking(eb); | ||
4706 | path->locks[level] = 1; | ||
4707 | |||
4708 | ret = btrfs_lookup_extent_info(trans, root, | ||
4709 | eb->start, eb->len, | ||
4710 | &wc->refs[level], | ||
4711 | &wc->flags[level]); | ||
4614 | BUG_ON(ret); | 4712 | BUG_ON(ret); |
4615 | path->slots[*level]++; | 4713 | BUG_ON(wc->refs[level] == 0); |
4616 | btrfs_tree_unlock(next); | 4714 | if (wc->refs[level] == 1) { |
4617 | free_extent_buffer(next); | 4715 | btrfs_tree_unlock(eb); |
4618 | continue; | 4716 | path->locks[level] = 0; |
4717 | return 1; | ||
4718 | } | ||
4719 | } else { | ||
4720 | BUG_ON(level != 0); | ||
4619 | } | 4721 | } |
4722 | } | ||
4620 | 4723 | ||
4621 | BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); | 4724 | /* wc->stage == DROP_REFERENCE */ |
4725 | BUG_ON(wc->refs[level] > 1 && !path->locks[level]); | ||
4622 | 4726 | ||
4623 | *level = btrfs_header_level(next); | 4727 | if (wc->refs[level] == 1) { |
4624 | path->nodes[*level] = next; | 4728 | if (level == 0) { |
4625 | path->slots[*level] = 0; | 4729 | if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) |
4626 | path->locks[*level] = 1; | 4730 | ret = btrfs_dec_ref(trans, root, eb, 1); |
4627 | cond_resched(); | 4731 | else |
4732 | ret = btrfs_dec_ref(trans, root, eb, 0); | ||
4733 | BUG_ON(ret); | ||
4734 | } | ||
4735 | /* make block locked assertion in clean_tree_block happy */ | ||
4736 | if (!path->locks[level] && | ||
4737 | btrfs_header_generation(eb) == trans->transid) { | ||
4738 | btrfs_tree_lock(eb); | ||
4739 | btrfs_set_lock_blocking(eb); | ||
4740 | path->locks[level] = 1; | ||
4741 | } | ||
4742 | clean_tree_block(trans, root, eb); | ||
4743 | } | ||
4744 | |||
4745 | if (eb == root->node) { | ||
4746 | if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) | ||
4747 | parent = eb->start; | ||
4748 | else | ||
4749 | BUG_ON(root->root_key.objectid != | ||
4750 | btrfs_header_owner(eb)); | ||
4751 | } else { | ||
4752 | if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF) | ||
4753 | parent = path->nodes[level + 1]->start; | ||
4754 | else | ||
4755 | BUG_ON(root->root_key.objectid != | ||
4756 | btrfs_header_owner(path->nodes[level + 1])); | ||
4628 | } | 4757 | } |
4629 | out: | ||
4630 | if (path->nodes[*level] == root->node) | ||
4631 | parent = path->nodes[*level]; | ||
4632 | else | ||
4633 | parent = path->nodes[*level + 1]; | ||
4634 | bytenr = path->nodes[*level]->start; | ||
4635 | blocksize = path->nodes[*level]->len; | ||
4636 | 4758 | ||
4637 | ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent->start, | 4759 | ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent, |
4638 | btrfs_header_owner(parent), *level, 0); | 4760 | root->root_key.objectid, level, 0); |
4639 | BUG_ON(ret); | 4761 | BUG_ON(ret); |
4762 | out: | ||
4763 | wc->refs[level] = 0; | ||
4764 | wc->flags[level] = 0; | ||
4765 | return ret; | ||
4766 | } | ||
4767 | |||
4768 | static noinline int walk_down_tree(struct btrfs_trans_handle *trans, | ||
4769 | struct btrfs_root *root, | ||
4770 | struct btrfs_path *path, | ||
4771 | struct walk_control *wc) | ||
4772 | { | ||
4773 | struct extent_buffer *next; | ||
4774 | struct extent_buffer *cur; | ||
4775 | u64 bytenr; | ||
4776 | u64 ptr_gen; | ||
4777 | u32 blocksize; | ||
4778 | int level = wc->level; | ||
4779 | int ret; | ||
4780 | |||
4781 | while (level >= 0) { | ||
4782 | cur = path->nodes[level]; | ||
4783 | BUG_ON(path->slots[level] >= btrfs_header_nritems(cur)); | ||
4640 | 4784 | ||
4641 | if (path->locks[*level]) { | 4785 | ret = walk_down_proc(trans, root, path, wc); |
4642 | btrfs_tree_unlock(path->nodes[*level]); | 4786 | if (ret > 0) |
4643 | path->locks[*level] = 0; | 4787 | break; |
4788 | |||
4789 | if (level == 0) | ||
4790 | break; | ||
4791 | |||
4792 | bytenr = btrfs_node_blockptr(cur, path->slots[level]); | ||
4793 | blocksize = btrfs_level_size(root, level - 1); | ||
4794 | ptr_gen = btrfs_node_ptr_generation(cur, path->slots[level]); | ||
4795 | |||
4796 | next = read_tree_block(root, bytenr, blocksize, ptr_gen); | ||
4797 | btrfs_tree_lock(next); | ||
4798 | btrfs_set_lock_blocking(next); | ||
4799 | |||
4800 | level--; | ||
4801 | BUG_ON(level != btrfs_header_level(next)); | ||
4802 | path->nodes[level] = next; | ||
4803 | path->slots[level] = 0; | ||
4804 | path->locks[level] = 1; | ||
4805 | wc->level = level; | ||
4644 | } | 4806 | } |
4645 | free_extent_buffer(path->nodes[*level]); | ||
4646 | path->nodes[*level] = NULL; | ||
4647 | *level += 1; | ||
4648 | cond_resched(); | ||
4649 | return 0; | 4807 | return 0; |
4650 | } | 4808 | } |
4651 | 4809 | ||
4652 | /* | ||
4653 | * helper for dropping snapshots. This walks back up the tree in the path | ||
4654 | * to find the first node higher up where we haven't yet gone through | ||
4655 | * all the slots | ||
4656 | */ | ||
4657 | static noinline int walk_up_tree(struct btrfs_trans_handle *trans, | 4810 | static noinline int walk_up_tree(struct btrfs_trans_handle *trans, |
4658 | struct btrfs_root *root, | 4811 | struct btrfs_root *root, |
4659 | struct btrfs_path *path, | 4812 | struct btrfs_path *path, |
4660 | int *level, int max_level) | 4813 | struct walk_control *wc, int max_level) |
4661 | { | 4814 | { |
4662 | struct btrfs_root_item *root_item = &root->root_item; | 4815 | int level = wc->level; |
4663 | int i; | ||
4664 | int slot; | ||
4665 | int ret; | 4816 | int ret; |
4666 | 4817 | ||
4667 | for (i = *level; i < max_level && path->nodes[i]; i++) { | 4818 | path->slots[level] = btrfs_header_nritems(path->nodes[level]); |
4668 | slot = path->slots[i]; | 4819 | while (level < max_level && path->nodes[level]) { |
4669 | if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { | 4820 | wc->level = level; |
4670 | /* | 4821 | if (path->slots[level] + 1 < |
4671 | * there is more work to do in this level. | 4822 | btrfs_header_nritems(path->nodes[level])) { |
4672 | * Update the drop_progress marker to reflect | 4823 | path->slots[level]++; |
4673 | * the work we've done so far, and then bump | ||
4674 | * the slot number | ||
4675 | */ | ||
4676 | path->slots[i]++; | ||
4677 | WARN_ON(*level == 0); | ||
4678 | if (max_level == BTRFS_MAX_LEVEL) { | ||
4679 | btrfs_node_key(path->nodes[i], | ||
4680 | &root_item->drop_progress, | ||
4681 | path->slots[i]); | ||
4682 | root_item->drop_level = i; | ||
4683 | } | ||
4684 | *level = i; | ||
4685 | return 0; | 4824 | return 0; |
4686 | } else { | 4825 | } else { |
4687 | struct extent_buffer *parent; | 4826 | ret = walk_up_proc(trans, root, path, wc); |
4688 | 4827 | if (ret > 0) | |
4689 | /* | 4828 | return 0; |
4690 | * this whole node is done, free our reference | ||
4691 | * on it and go up one level | ||
4692 | */ | ||
4693 | if (path->nodes[*level] == root->node) | ||
4694 | parent = path->nodes[*level]; | ||
4695 | else | ||
4696 | parent = path->nodes[*level + 1]; | ||
4697 | 4829 | ||
4698 | clean_tree_block(trans, root, path->nodes[i]); | 4830 | if (path->locks[level]) { |
4699 | ret = btrfs_free_extent(trans, root, | 4831 | btrfs_tree_unlock(path->nodes[level]); |
4700 | path->nodes[i]->start, | 4832 | path->locks[level] = 0; |
4701 | path->nodes[i]->len, | ||
4702 | parent->start, | ||
4703 | btrfs_header_owner(parent), | ||
4704 | *level, 0); | ||
4705 | BUG_ON(ret); | ||
4706 | if (path->locks[*level]) { | ||
4707 | btrfs_tree_unlock(path->nodes[i]); | ||
4708 | path->locks[i] = 0; | ||
4709 | } | 4833 | } |
4710 | free_extent_buffer(path->nodes[i]); | 4834 | free_extent_buffer(path->nodes[level]); |
4711 | path->nodes[i] = NULL; | 4835 | path->nodes[level] = NULL; |
4712 | *level = i + 1; | 4836 | level++; |
4713 | } | 4837 | } |
4714 | } | 4838 | } |
4715 | return 1; | 4839 | return 1; |
4716 | } | 4840 | } |
4717 | 4841 | ||
4718 | /* | 4842 | /* |
4719 | * drop the reference count on the tree rooted at 'snap'. This traverses | 4843 | * drop a subvolume tree. |
4720 | * the tree freeing any blocks that have a ref count of zero after being | 4844 | * |
4721 | * decremented. | 4845 | * this function traverses the tree freeing any blocks that only |
4846 | * referenced by the tree. | ||
4847 | * | ||
4848 | * when a shared tree block is found. this function decreases its | ||
4849 | * reference count by one. if update_ref is true, this function | ||
4850 | * also make sure backrefs for the shared block and all lower level | ||
4851 | * blocks are properly updated. | ||
4722 | */ | 4852 | */ |
4723 | int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root | 4853 | int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref) |
4724 | *root) | ||
4725 | { | 4854 | { |
4726 | int ret = 0; | ||
4727 | int wret; | ||
4728 | int level; | ||
4729 | struct btrfs_path *path; | 4855 | struct btrfs_path *path; |
4730 | int update_count; | 4856 | struct btrfs_trans_handle *trans; |
4857 | struct btrfs_root *tree_root = root->fs_info->tree_root; | ||
4731 | struct btrfs_root_item *root_item = &root->root_item; | 4858 | struct btrfs_root_item *root_item = &root->root_item; |
4859 | struct walk_control *wc; | ||
4860 | struct btrfs_key key; | ||
4861 | int err = 0; | ||
4862 | int ret; | ||
4863 | int level; | ||
4732 | 4864 | ||
4733 | path = btrfs_alloc_path(); | 4865 | path = btrfs_alloc_path(); |
4734 | BUG_ON(!path); | 4866 | BUG_ON(!path); |
4735 | 4867 | ||
4736 | level = btrfs_header_level(root->node); | 4868 | wc = kzalloc(sizeof(*wc), GFP_NOFS); |
4869 | BUG_ON(!wc); | ||
4870 | |||
4871 | trans = btrfs_start_transaction(tree_root, 1); | ||
4872 | |||
4737 | if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { | 4873 | if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { |
4874 | level = btrfs_header_level(root->node); | ||
4738 | path->nodes[level] = btrfs_lock_root_node(root); | 4875 | path->nodes[level] = btrfs_lock_root_node(root); |
4739 | btrfs_set_lock_blocking(path->nodes[level]); | 4876 | btrfs_set_lock_blocking(path->nodes[level]); |
4740 | path->slots[level] = 0; | 4877 | path->slots[level] = 0; |
4741 | path->locks[level] = 1; | 4878 | path->locks[level] = 1; |
4879 | memset(&wc->update_progress, 0, | ||
4880 | sizeof(wc->update_progress)); | ||
4742 | } else { | 4881 | } else { |
4743 | struct btrfs_key key; | ||
4744 | struct btrfs_disk_key found_key; | ||
4745 | struct extent_buffer *node; | ||
4746 | |||
4747 | btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); | 4882 | btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); |
4883 | memcpy(&wc->update_progress, &key, | ||
4884 | sizeof(wc->update_progress)); | ||
4885 | |||
4748 | level = root_item->drop_level; | 4886 | level = root_item->drop_level; |
4887 | BUG_ON(level == 0); | ||
4749 | path->lowest_level = level; | 4888 | path->lowest_level = level; |
4750 | wret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 4889 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
4751 | if (wret < 0) { | 4890 | path->lowest_level = 0; |
4752 | ret = wret; | 4891 | if (ret < 0) { |
4892 | err = ret; | ||
4753 | goto out; | 4893 | goto out; |
4754 | } | 4894 | } |
4755 | node = path->nodes[level]; | 4895 | btrfs_node_key_to_cpu(path->nodes[level], &key, |
4756 | btrfs_node_key(node, &found_key, path->slots[level]); | 4896 | path->slots[level]); |
4757 | WARN_ON(memcmp(&found_key, &root_item->drop_progress, | 4897 | WARN_ON(memcmp(&key, &wc->update_progress, sizeof(key))); |
4758 | sizeof(found_key))); | 4898 | |
4759 | /* | 4899 | /* |
4760 | * unlock our path, this is safe because only this | 4900 | * unlock our path, this is safe because only this |
4761 | * function is allowed to delete this snapshot | 4901 | * function is allowed to delete this snapshot |
4762 | */ | 4902 | */ |
4763 | btrfs_unlock_up_safe(path, 0); | 4903 | btrfs_unlock_up_safe(path, 0); |
4904 | |||
4905 | level = btrfs_header_level(root->node); | ||
4906 | while (1) { | ||
4907 | btrfs_tree_lock(path->nodes[level]); | ||
4908 | btrfs_set_lock_blocking(path->nodes[level]); | ||
4909 | |||
4910 | ret = btrfs_lookup_extent_info(trans, root, | ||
4911 | path->nodes[level]->start, | ||
4912 | path->nodes[level]->len, | ||
4913 | &wc->refs[level], | ||
4914 | &wc->flags[level]); | ||
4915 | BUG_ON(ret); | ||
4916 | BUG_ON(wc->refs[level] == 0); | ||
4917 | |||
4918 | if (level == root_item->drop_level) | ||
4919 | break; | ||
4920 | |||
4921 | btrfs_tree_unlock(path->nodes[level]); | ||
4922 | WARN_ON(wc->refs[level] != 1); | ||
4923 | level--; | ||
4924 | } | ||
4764 | } | 4925 | } |
4926 | |||
4927 | wc->level = level; | ||
4928 | wc->shared_level = -1; | ||
4929 | wc->stage = DROP_REFERENCE; | ||
4930 | wc->update_ref = update_ref; | ||
4931 | wc->keep_locks = 0; | ||
4932 | |||
4765 | while (1) { | 4933 | while (1) { |
4766 | unsigned long update; | 4934 | ret = walk_down_tree(trans, root, path, wc); |
4767 | wret = walk_down_tree(trans, root, path, &level); | 4935 | if (ret < 0) { |
4768 | if (wret > 0) | 4936 | err = ret; |
4769 | break; | 4937 | break; |
4770 | if (wret < 0) | 4938 | } |
4771 | ret = wret; | ||
4772 | 4939 | ||
4773 | wret = walk_up_tree(trans, root, path, &level, | 4940 | ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL); |
4774 | BTRFS_MAX_LEVEL); | 4941 | if (ret < 0) { |
4775 | if (wret > 0) | 4942 | err = ret; |
4776 | break; | 4943 | break; |
4777 | if (wret < 0) | 4944 | } |
4778 | ret = wret; | 4945 | |
4779 | if (trans->transaction->in_commit || | 4946 | if (ret > 0) { |
4780 | trans->transaction->delayed_refs.flushing) { | 4947 | BUG_ON(wc->stage != DROP_REFERENCE); |
4781 | ret = -EAGAIN; | ||
4782 | break; | 4948 | break; |
4783 | } | 4949 | } |
4784 | for (update_count = 0; update_count < 16; update_count++) { | 4950 | |
4951 | if (wc->stage == DROP_REFERENCE) { | ||
4952 | level = wc->level; | ||
4953 | btrfs_node_key(path->nodes[level], | ||
4954 | &root_item->drop_progress, | ||
4955 | path->slots[level]); | ||
4956 | root_item->drop_level = level; | ||
4957 | } | ||
4958 | |||
4959 | BUG_ON(wc->level == 0); | ||
4960 | if (trans->transaction->in_commit || | ||
4961 | trans->transaction->delayed_refs.flushing) { | ||
4962 | ret = btrfs_update_root(trans, tree_root, | ||
4963 | &root->root_key, | ||
4964 | root_item); | ||
4965 | BUG_ON(ret); | ||
4966 | |||
4967 | btrfs_end_transaction(trans, tree_root); | ||
4968 | trans = btrfs_start_transaction(tree_root, 1); | ||
4969 | } else { | ||
4970 | unsigned long update; | ||
4785 | update = trans->delayed_ref_updates; | 4971 | update = trans->delayed_ref_updates; |
4786 | trans->delayed_ref_updates = 0; | 4972 | trans->delayed_ref_updates = 0; |
4787 | if (update) | 4973 | if (update) |
4788 | btrfs_run_delayed_refs(trans, root, update); | 4974 | btrfs_run_delayed_refs(trans, tree_root, |
4789 | else | 4975 | update); |
4790 | break; | ||
4791 | } | 4976 | } |
4792 | } | 4977 | } |
4978 | btrfs_release_path(root, path); | ||
4979 | BUG_ON(err); | ||
4980 | |||
4981 | ret = btrfs_del_root(trans, tree_root, &root->root_key); | ||
4982 | BUG_ON(ret); | ||
4983 | |||
4984 | free_extent_buffer(root->node); | ||
4985 | free_extent_buffer(root->commit_root); | ||
4986 | kfree(root); | ||
4793 | out: | 4987 | out: |
4988 | btrfs_end_transaction(trans, tree_root); | ||
4989 | kfree(wc); | ||
4794 | btrfs_free_path(path); | 4990 | btrfs_free_path(path); |
4795 | return ret; | 4991 | return err; |
4796 | } | 4992 | } |
4797 | 4993 | ||
4994 | /* | ||
4995 | * drop subtree rooted at tree block 'node'. | ||
4996 | * | ||
4997 | * NOTE: this function will unlock and release tree block 'node' | ||
4998 | */ | ||
4798 | int btrfs_drop_subtree(struct btrfs_trans_handle *trans, | 4999 | int btrfs_drop_subtree(struct btrfs_trans_handle *trans, |
4799 | struct btrfs_root *root, | 5000 | struct btrfs_root *root, |
4800 | struct extent_buffer *node, | 5001 | struct extent_buffer *node, |
4801 | struct extent_buffer *parent) | 5002 | struct extent_buffer *parent) |
4802 | { | 5003 | { |
4803 | struct btrfs_path *path; | 5004 | struct btrfs_path *path; |
5005 | struct walk_control *wc; | ||
4804 | int level; | 5006 | int level; |
4805 | int parent_level; | 5007 | int parent_level; |
4806 | int ret = 0; | 5008 | int ret = 0; |
4807 | int wret; | 5009 | int wret; |
4808 | 5010 | ||
5011 | BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); | ||
5012 | |||
4809 | path = btrfs_alloc_path(); | 5013 | path = btrfs_alloc_path(); |
4810 | BUG_ON(!path); | 5014 | BUG_ON(!path); |
4811 | 5015 | ||
5016 | wc = kzalloc(sizeof(*wc), GFP_NOFS); | ||
5017 | BUG_ON(!wc); | ||
5018 | |||
4812 | btrfs_assert_tree_locked(parent); | 5019 | btrfs_assert_tree_locked(parent); |
4813 | parent_level = btrfs_header_level(parent); | 5020 | parent_level = btrfs_header_level(parent); |
4814 | extent_buffer_get(parent); | 5021 | extent_buffer_get(parent); |
@@ -4817,24 +5024,33 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, | |||
4817 | 5024 | ||
4818 | btrfs_assert_tree_locked(node); | 5025 | btrfs_assert_tree_locked(node); |
4819 | level = btrfs_header_level(node); | 5026 | level = btrfs_header_level(node); |
4820 | extent_buffer_get(node); | ||
4821 | path->nodes[level] = node; | 5027 | path->nodes[level] = node; |
4822 | path->slots[level] = 0; | 5028 | path->slots[level] = 0; |
5029 | path->locks[level] = 1; | ||
5030 | |||
5031 | wc->refs[parent_level] = 1; | ||
5032 | wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; | ||
5033 | wc->level = level; | ||
5034 | wc->shared_level = -1; | ||
5035 | wc->stage = DROP_REFERENCE; | ||
5036 | wc->update_ref = 0; | ||
5037 | wc->keep_locks = 1; | ||
4823 | 5038 | ||
4824 | while (1) { | 5039 | while (1) { |
4825 | wret = walk_down_tree(trans, root, path, &level); | 5040 | wret = walk_down_tree(trans, root, path, wc); |
4826 | if (wret < 0) | 5041 | if (wret < 0) { |
4827 | ret = wret; | 5042 | ret = wret; |
4828 | if (wret != 0) | ||
4829 | break; | 5043 | break; |
5044 | } | ||
4830 | 5045 | ||
4831 | wret = walk_up_tree(trans, root, path, &level, parent_level); | 5046 | wret = walk_up_tree(trans, root, path, wc, parent_level); |
4832 | if (wret < 0) | 5047 | if (wret < 0) |
4833 | ret = wret; | 5048 | ret = wret; |
4834 | if (wret != 0) | 5049 | if (wret != 0) |
4835 | break; | 5050 | break; |
4836 | } | 5051 | } |
4837 | 5052 | ||
5053 | kfree(wc); | ||
4838 | btrfs_free_path(path); | 5054 | btrfs_free_path(path); |
4839 | return ret; | 5055 | return ret; |
4840 | } | 5056 | } |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 126477eaecf5..7c3cd248d8d6 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -151,7 +151,10 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, | |||
151 | } | 151 | } |
152 | if (end_pos > isize) { | 152 | if (end_pos > isize) { |
153 | i_size_write(inode, end_pos); | 153 | i_size_write(inode, end_pos); |
154 | btrfs_update_inode(trans, root, inode); | 154 | /* we've only changed i_size in ram, and we haven't updated |
155 | * the disk i_size. There is no need to log the inode | ||
156 | * at this time. | ||
157 | */ | ||
155 | } | 158 | } |
156 | err = btrfs_end_transaction(trans, root); | 159 | err = btrfs_end_transaction(trans, root); |
157 | out_unlock: | 160 | out_unlock: |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index dbe1aabf96cd..7ffa3d34ea19 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -3580,12 +3580,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |||
3580 | owner = 1; | 3580 | owner = 1; |
3581 | BTRFS_I(inode)->block_group = | 3581 | BTRFS_I(inode)->block_group = |
3582 | btrfs_find_block_group(root, 0, alloc_hint, owner); | 3582 | btrfs_find_block_group(root, 0, alloc_hint, owner); |
3583 | if ((mode & S_IFREG)) { | ||
3584 | if (btrfs_test_opt(root, NODATASUM)) | ||
3585 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; | ||
3586 | if (btrfs_test_opt(root, NODATACOW)) | ||
3587 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW; | ||
3588 | } | ||
3589 | 3583 | ||
3590 | key[0].objectid = objectid; | 3584 | key[0].objectid = objectid; |
3591 | btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); | 3585 | btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); |
@@ -3640,6 +3634,13 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |||
3640 | 3634 | ||
3641 | btrfs_inherit_iflags(inode, dir); | 3635 | btrfs_inherit_iflags(inode, dir); |
3642 | 3636 | ||
3637 | if ((mode & S_IFREG)) { | ||
3638 | if (btrfs_test_opt(root, NODATASUM)) | ||
3639 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; | ||
3640 | if (btrfs_test_opt(root, NODATACOW)) | ||
3641 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW; | ||
3642 | } | ||
3643 | |||
3643 | insert_inode_hash(inode); | 3644 | insert_inode_hash(inode); |
3644 | inode_tree_add(inode); | 3645 | inode_tree_add(inode); |
3645 | return inode; | 3646 | return inode; |
@@ -5082,6 +5083,7 @@ static long btrfs_fallocate(struct inode *inode, int mode, | |||
5082 | u64 mask = BTRFS_I(inode)->root->sectorsize - 1; | 5083 | u64 mask = BTRFS_I(inode)->root->sectorsize - 1; |
5083 | struct extent_map *em; | 5084 | struct extent_map *em; |
5084 | struct btrfs_trans_handle *trans; | 5085 | struct btrfs_trans_handle *trans; |
5086 | struct btrfs_root *root; | ||
5085 | int ret; | 5087 | int ret; |
5086 | 5088 | ||
5087 | alloc_start = offset & ~mask; | 5089 | alloc_start = offset & ~mask; |
@@ -5100,6 +5102,13 @@ static long btrfs_fallocate(struct inode *inode, int mode, | |||
5100 | goto out; | 5102 | goto out; |
5101 | } | 5103 | } |
5102 | 5104 | ||
5105 | root = BTRFS_I(inode)->root; | ||
5106 | |||
5107 | ret = btrfs_check_data_free_space(root, inode, | ||
5108 | alloc_end - alloc_start); | ||
5109 | if (ret) | ||
5110 | goto out; | ||
5111 | |||
5103 | locked_end = alloc_end - 1; | 5112 | locked_end = alloc_end - 1; |
5104 | while (1) { | 5113 | while (1) { |
5105 | struct btrfs_ordered_extent *ordered; | 5114 | struct btrfs_ordered_extent *ordered; |
@@ -5107,7 +5116,7 @@ static long btrfs_fallocate(struct inode *inode, int mode, | |||
5107 | trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1); | 5116 | trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1); |
5108 | if (!trans) { | 5117 | if (!trans) { |
5109 | ret = -EIO; | 5118 | ret = -EIO; |
5110 | goto out; | 5119 | goto out_free; |
5111 | } | 5120 | } |
5112 | 5121 | ||
5113 | /* the extent lock is ordered inside the running | 5122 | /* the extent lock is ordered inside the running |
@@ -5168,6 +5177,8 @@ static long btrfs_fallocate(struct inode *inode, int mode, | |||
5168 | GFP_NOFS); | 5177 | GFP_NOFS); |
5169 | 5178 | ||
5170 | btrfs_end_transaction(trans, BTRFS_I(inode)->root); | 5179 | btrfs_end_transaction(trans, BTRFS_I(inode)->root); |
5180 | out_free: | ||
5181 | btrfs_free_reserved_data_space(root, inode, alloc_end - alloc_start); | ||
5171 | out: | 5182 | out: |
5172 | mutex_unlock(&inode->i_mutex); | 5183 | mutex_unlock(&inode->i_mutex); |
5173 | return ret; | 5184 | return ret; |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index eff18f5b5362..9f4db848db10 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -1028,7 +1028,8 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
1028 | struct btrfs_file_extent_item); | 1028 | struct btrfs_file_extent_item); |
1029 | comp = btrfs_file_extent_compression(leaf, extent); | 1029 | comp = btrfs_file_extent_compression(leaf, extent); |
1030 | type = btrfs_file_extent_type(leaf, extent); | 1030 | type = btrfs_file_extent_type(leaf, extent); |
1031 | if (type == BTRFS_FILE_EXTENT_REG) { | 1031 | if (type == BTRFS_FILE_EXTENT_REG || |
1032 | type == BTRFS_FILE_EXTENT_PREALLOC) { | ||
1032 | disko = btrfs_file_extent_disk_bytenr(leaf, | 1033 | disko = btrfs_file_extent_disk_bytenr(leaf, |
1033 | extent); | 1034 | extent); |
1034 | diskl = btrfs_file_extent_disk_num_bytes(leaf, | 1035 | diskl = btrfs_file_extent_disk_num_bytes(leaf, |
@@ -1051,7 +1052,8 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
1051 | new_key.objectid = inode->i_ino; | 1052 | new_key.objectid = inode->i_ino; |
1052 | new_key.offset = key.offset + destoff - off; | 1053 | new_key.offset = key.offset + destoff - off; |
1053 | 1054 | ||
1054 | if (type == BTRFS_FILE_EXTENT_REG) { | 1055 | if (type == BTRFS_FILE_EXTENT_REG || |
1056 | type == BTRFS_FILE_EXTENT_PREALLOC) { | ||
1055 | ret = btrfs_insert_empty_item(trans, root, path, | 1057 | ret = btrfs_insert_empty_item(trans, root, path, |
1056 | &new_key, size); | 1058 | &new_key, size); |
1057 | if (ret) | 1059 | if (ret) |
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index b23dc209ae10..008397934778 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -1788,7 +1788,7 @@ static void merge_func(struct btrfs_work *work) | |||
1788 | btrfs_end_transaction(trans, root); | 1788 | btrfs_end_transaction(trans, root); |
1789 | } | 1789 | } |
1790 | 1790 | ||
1791 | btrfs_drop_dead_root(reloc_root); | 1791 | btrfs_drop_snapshot(reloc_root, 0); |
1792 | 1792 | ||
1793 | if (atomic_dec_and_test(async->num_pending)) | 1793 | if (atomic_dec_and_test(async->num_pending)) |
1794 | complete(async->done); | 1794 | complete(async->done); |
@@ -2075,9 +2075,6 @@ static int do_relocation(struct btrfs_trans_handle *trans, | |||
2075 | 2075 | ||
2076 | ret = btrfs_drop_subtree(trans, root, eb, upper->eb); | 2076 | ret = btrfs_drop_subtree(trans, root, eb, upper->eb); |
2077 | BUG_ON(ret); | 2077 | BUG_ON(ret); |
2078 | |||
2079 | btrfs_tree_unlock(eb); | ||
2080 | free_extent_buffer(eb); | ||
2081 | } | 2078 | } |
2082 | if (!lowest) { | 2079 | if (!lowest) { |
2083 | btrfs_tree_unlock(upper->eb); | 2080 | btrfs_tree_unlock(upper->eb); |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 4e83457ea253..2dbf1c1f56ee 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -593,6 +593,7 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) | |||
593 | return 0; | 593 | return 0; |
594 | } | 594 | } |
595 | 595 | ||
596 | #if 0 | ||
596 | /* | 597 | /* |
597 | * when dropping snapshots, we generate a ton of delayed refs, and it makes | 598 | * when dropping snapshots, we generate a ton of delayed refs, and it makes |
598 | * sense not to join the transaction while it is trying to flush the current | 599 | * sense not to join the transaction while it is trying to flush the current |
@@ -681,6 +682,7 @@ int btrfs_drop_dead_root(struct btrfs_root *root) | |||
681 | btrfs_btree_balance_dirty(tree_root, nr); | 682 | btrfs_btree_balance_dirty(tree_root, nr); |
682 | return ret; | 683 | return ret; |
683 | } | 684 | } |
685 | #endif | ||
684 | 686 | ||
685 | /* | 687 | /* |
686 | * new snapshots need to be created at a very specific time in the | 688 | * new snapshots need to be created at a very specific time in the |
@@ -1081,7 +1083,7 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root) | |||
1081 | while (!list_empty(&list)) { | 1083 | while (!list_empty(&list)) { |
1082 | root = list_entry(list.next, struct btrfs_root, root_list); | 1084 | root = list_entry(list.next, struct btrfs_root, root_list); |
1083 | list_del_init(&root->root_list); | 1085 | list_del_init(&root->root_list); |
1084 | btrfs_drop_dead_root(root); | 1086 | btrfs_drop_snapshot(root, 0); |
1085 | } | 1087 | } |
1086 | return 0; | 1088 | return 0; |
1087 | } | 1089 | } |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index ebdbe62a829c..97ce4bf89d15 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -493,9 +493,9 @@ static int cifs_reopen_file(struct file *file, bool can_flush) | |||
493 | return -EBADF; | 493 | return -EBADF; |
494 | 494 | ||
495 | xid = GetXid(); | 495 | xid = GetXid(); |
496 | mutex_unlock(&pCifsFile->fh_mutex); | 496 | mutex_lock(&pCifsFile->fh_mutex); |
497 | if (!pCifsFile->invalidHandle) { | 497 | if (!pCifsFile->invalidHandle) { |
498 | mutex_lock(&pCifsFile->fh_mutex); | 498 | mutex_unlock(&pCifsFile->fh_mutex); |
499 | rc = 0; | 499 | rc = 0; |
500 | FreeXid(xid); | 500 | FreeXid(xid); |
501 | return rc; | 501 | return rc; |
@@ -527,7 +527,7 @@ static int cifs_reopen_file(struct file *file, bool can_flush) | |||
527 | if (full_path == NULL) { | 527 | if (full_path == NULL) { |
528 | rc = -ENOMEM; | 528 | rc = -ENOMEM; |
529 | reopen_error_exit: | 529 | reopen_error_exit: |
530 | mutex_lock(&pCifsFile->fh_mutex); | 530 | mutex_unlock(&pCifsFile->fh_mutex); |
531 | FreeXid(xid); | 531 | FreeXid(xid); |
532 | return rc; | 532 | return rc; |
533 | } | 533 | } |
@@ -569,14 +569,14 @@ reopen_error_exit: | |||
569 | cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & | 569 | cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & |
570 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 570 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
571 | if (rc) { | 571 | if (rc) { |
572 | mutex_lock(&pCifsFile->fh_mutex); | 572 | mutex_unlock(&pCifsFile->fh_mutex); |
573 | cFYI(1, ("cifs_open returned 0x%x", rc)); | 573 | cFYI(1, ("cifs_open returned 0x%x", rc)); |
574 | cFYI(1, ("oplock: %d", oplock)); | 574 | cFYI(1, ("oplock: %d", oplock)); |
575 | } else { | 575 | } else { |
576 | reopen_success: | 576 | reopen_success: |
577 | pCifsFile->netfid = netfid; | 577 | pCifsFile->netfid = netfid; |
578 | pCifsFile->invalidHandle = false; | 578 | pCifsFile->invalidHandle = false; |
579 | mutex_lock(&pCifsFile->fh_mutex); | 579 | mutex_unlock(&pCifsFile->fh_mutex); |
580 | pCifsInode = CIFS_I(inode); | 580 | pCifsInode = CIFS_I(inode); |
581 | if (pCifsInode) { | 581 | if (pCifsInode) { |
582 | if (can_flush) { | 582 | if (can_flush) { |
diff --git a/fs/eventfd.c b/fs/eventfd.c index 3f0e1974abdc..31d12de83a2a 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c | |||
@@ -14,35 +14,44 @@ | |||
14 | #include <linux/list.h> | 14 | #include <linux/list.h> |
15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
16 | #include <linux/anon_inodes.h> | 16 | #include <linux/anon_inodes.h> |
17 | #include <linux/eventfd.h> | ||
18 | #include <linux/syscalls.h> | 17 | #include <linux/syscalls.h> |
19 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/kref.h> | ||
20 | #include <linux/eventfd.h> | ||
20 | 21 | ||
21 | struct eventfd_ctx { | 22 | struct eventfd_ctx { |
23 | struct kref kref; | ||
22 | wait_queue_head_t wqh; | 24 | wait_queue_head_t wqh; |
23 | /* | 25 | /* |
24 | * Every time that a write(2) is performed on an eventfd, the | 26 | * Every time that a write(2) is performed on an eventfd, the |
25 | * value of the __u64 being written is added to "count" and a | 27 | * value of the __u64 being written is added to "count" and a |
26 | * wakeup is performed on "wqh". A read(2) will return the "count" | 28 | * wakeup is performed on "wqh". A read(2) will return the "count" |
27 | * value to userspace, and will reset "count" to zero. The kernel | 29 | * value to userspace, and will reset "count" to zero. The kernel |
28 | * size eventfd_signal() also, adds to the "count" counter and | 30 | * side eventfd_signal() also, adds to the "count" counter and |
29 | * issue a wakeup. | 31 | * issue a wakeup. |
30 | */ | 32 | */ |
31 | __u64 count; | 33 | __u64 count; |
32 | unsigned int flags; | 34 | unsigned int flags; |
33 | }; | 35 | }; |
34 | 36 | ||
35 | /* | 37 | /** |
36 | * Adds "n" to the eventfd counter "count". Returns "n" in case of | 38 | * eventfd_signal - Adds @n to the eventfd counter. |
37 | * success, or a value lower then "n" in case of coutner overflow. | 39 | * @ctx: [in] Pointer to the eventfd context. |
38 | * This function is supposed to be called by the kernel in paths | 40 | * @n: [in] Value of the counter to be added to the eventfd internal counter. |
39 | * that do not allow sleeping. In this function we allow the counter | 41 | * The value cannot be negative. |
40 | * to reach the ULLONG_MAX value, and we signal this as overflow | 42 | * |
41 | * condition by returining a POLLERR to poll(2). | 43 | * This function is supposed to be called by the kernel in paths that do not |
44 | * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX | ||
45 | * value, and we signal this as overflow condition by returining a POLLERR | ||
46 | * to poll(2). | ||
47 | * | ||
48 | * Returns @n in case of success, a non-negative number lower than @n in case | ||
49 | * of overflow, or the following error codes: | ||
50 | * | ||
51 | * -EINVAL : The value of @n is negative. | ||
42 | */ | 52 | */ |
43 | int eventfd_signal(struct file *file, int n) | 53 | int eventfd_signal(struct eventfd_ctx *ctx, int n) |
44 | { | 54 | { |
45 | struct eventfd_ctx *ctx = file->private_data; | ||
46 | unsigned long flags; | 55 | unsigned long flags; |
47 | 56 | ||
48 | if (n < 0) | 57 | if (n < 0) |
@@ -59,9 +68,45 @@ int eventfd_signal(struct file *file, int n) | |||
59 | } | 68 | } |
60 | EXPORT_SYMBOL_GPL(eventfd_signal); | 69 | EXPORT_SYMBOL_GPL(eventfd_signal); |
61 | 70 | ||
71 | static void eventfd_free(struct kref *kref) | ||
72 | { | ||
73 | struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref); | ||
74 | |||
75 | kfree(ctx); | ||
76 | } | ||
77 | |||
78 | /** | ||
79 | * eventfd_ctx_get - Acquires a reference to the internal eventfd context. | ||
80 | * @ctx: [in] Pointer to the eventfd context. | ||
81 | * | ||
82 | * Returns: In case of success, returns a pointer to the eventfd context. | ||
83 | */ | ||
84 | struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx) | ||
85 | { | ||
86 | kref_get(&ctx->kref); | ||
87 | return ctx; | ||
88 | } | ||
89 | EXPORT_SYMBOL_GPL(eventfd_ctx_get); | ||
90 | |||
91 | /** | ||
92 | * eventfd_ctx_put - Releases a reference to the internal eventfd context. | ||
93 | * @ctx: [in] Pointer to eventfd context. | ||
94 | * | ||
95 | * The eventfd context reference must have been previously acquired either | ||
96 | * with eventfd_ctx_get() or eventfd_ctx_fdget()). | ||
97 | */ | ||
98 | void eventfd_ctx_put(struct eventfd_ctx *ctx) | ||
99 | { | ||
100 | kref_put(&ctx->kref, eventfd_free); | ||
101 | } | ||
102 | EXPORT_SYMBOL_GPL(eventfd_ctx_put); | ||
103 | |||
62 | static int eventfd_release(struct inode *inode, struct file *file) | 104 | static int eventfd_release(struct inode *inode, struct file *file) |
63 | { | 105 | { |
64 | kfree(file->private_data); | 106 | struct eventfd_ctx *ctx = file->private_data; |
107 | |||
108 | wake_up_poll(&ctx->wqh, POLLHUP); | ||
109 | eventfd_ctx_put(ctx); | ||
65 | return 0; | 110 | return 0; |
66 | } | 111 | } |
67 | 112 | ||
@@ -185,6 +230,16 @@ static const struct file_operations eventfd_fops = { | |||
185 | .write = eventfd_write, | 230 | .write = eventfd_write, |
186 | }; | 231 | }; |
187 | 232 | ||
233 | /** | ||
234 | * eventfd_fget - Acquire a reference of an eventfd file descriptor. | ||
235 | * @fd: [in] Eventfd file descriptor. | ||
236 | * | ||
237 | * Returns a pointer to the eventfd file structure in case of success, or the | ||
238 | * following error pointer: | ||
239 | * | ||
240 | * -EBADF : Invalid @fd file descriptor. | ||
241 | * -EINVAL : The @fd file descriptor is not an eventfd file. | ||
242 | */ | ||
188 | struct file *eventfd_fget(int fd) | 243 | struct file *eventfd_fget(int fd) |
189 | { | 244 | { |
190 | struct file *file; | 245 | struct file *file; |
@@ -201,6 +256,48 @@ struct file *eventfd_fget(int fd) | |||
201 | } | 256 | } |
202 | EXPORT_SYMBOL_GPL(eventfd_fget); | 257 | EXPORT_SYMBOL_GPL(eventfd_fget); |
203 | 258 | ||
259 | /** | ||
260 | * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context. | ||
261 | * @fd: [in] Eventfd file descriptor. | ||
262 | * | ||
263 | * Returns a pointer to the internal eventfd context, otherwise the error | ||
264 | * pointers returned by the following functions: | ||
265 | * | ||
266 | * eventfd_fget | ||
267 | */ | ||
268 | struct eventfd_ctx *eventfd_ctx_fdget(int fd) | ||
269 | { | ||
270 | struct file *file; | ||
271 | struct eventfd_ctx *ctx; | ||
272 | |||
273 | file = eventfd_fget(fd); | ||
274 | if (IS_ERR(file)) | ||
275 | return (struct eventfd_ctx *) file; | ||
276 | ctx = eventfd_ctx_get(file->private_data); | ||
277 | fput(file); | ||
278 | |||
279 | return ctx; | ||
280 | } | ||
281 | EXPORT_SYMBOL_GPL(eventfd_ctx_fdget); | ||
282 | |||
283 | /** | ||
284 | * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context. | ||
285 | * @file: [in] Eventfd file pointer. | ||
286 | * | ||
287 | * Returns a pointer to the internal eventfd context, otherwise the error | ||
288 | * pointer: | ||
289 | * | ||
290 | * -EINVAL : The @fd file descriptor is not an eventfd file. | ||
291 | */ | ||
292 | struct eventfd_ctx *eventfd_ctx_fileget(struct file *file) | ||
293 | { | ||
294 | if (file->f_op != &eventfd_fops) | ||
295 | return ERR_PTR(-EINVAL); | ||
296 | |||
297 | return eventfd_ctx_get(file->private_data); | ||
298 | } | ||
299 | EXPORT_SYMBOL_GPL(eventfd_ctx_fileget); | ||
300 | |||
204 | SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) | 301 | SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) |
205 | { | 302 | { |
206 | int fd; | 303 | int fd; |
@@ -217,6 +314,7 @@ SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) | |||
217 | if (!ctx) | 314 | if (!ctx) |
218 | return -ENOMEM; | 315 | return -ENOMEM; |
219 | 316 | ||
317 | kref_init(&ctx->kref); | ||
220 | init_waitqueue_head(&ctx->wqh); | 318 | init_waitqueue_head(&ctx->wqh); |
221 | ctx->count = count; | 319 | ctx->count = count; |
222 | ctx->flags = flags; | 320 | ctx->flags = flags; |
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 6524ecaebb7a..e1dedb0f7873 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c | |||
@@ -66,8 +66,16 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, str | |||
66 | inode = NULL; | 66 | inode = NULL; |
67 | if (ino) { | 67 | if (ino) { |
68 | inode = ext2_iget(dir->i_sb, ino); | 68 | inode = ext2_iget(dir->i_sb, ino); |
69 | if (IS_ERR(inode)) | 69 | if (unlikely(IS_ERR(inode))) { |
70 | return ERR_CAST(inode); | 70 | if (PTR_ERR(inode) == -ESTALE) { |
71 | ext2_error(dir->i_sb, __func__, | ||
72 | "deleted inode referenced: %lu", | ||
73 | ino); | ||
74 | return ERR_PTR(-EIO); | ||
75 | } else { | ||
76 | return ERR_CAST(inode); | ||
77 | } | ||
78 | } | ||
71 | } | 79 | } |
72 | return d_splice_alias(inode, dentry); | 80 | return d_splice_alias(inode, dentry); |
73 | } | 81 | } |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 8fed2ed12f38..f58ecbc416c8 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -849,6 +849,81 @@ err: | |||
849 | return err; | 849 | return err; |
850 | } | 850 | } |
851 | 851 | ||
852 | static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size, | ||
853 | struct fuse_copy_state *cs) | ||
854 | { | ||
855 | struct fuse_notify_inval_inode_out outarg; | ||
856 | int err = -EINVAL; | ||
857 | |||
858 | if (size != sizeof(outarg)) | ||
859 | goto err; | ||
860 | |||
861 | err = fuse_copy_one(cs, &outarg, sizeof(outarg)); | ||
862 | if (err) | ||
863 | goto err; | ||
864 | fuse_copy_finish(cs); | ||
865 | |||
866 | down_read(&fc->killsb); | ||
867 | err = -ENOENT; | ||
868 | if (!fc->sb) | ||
869 | goto err_unlock; | ||
870 | |||
871 | err = fuse_reverse_inval_inode(fc->sb, outarg.ino, | ||
872 | outarg.off, outarg.len); | ||
873 | |||
874 | err_unlock: | ||
875 | up_read(&fc->killsb); | ||
876 | return err; | ||
877 | |||
878 | err: | ||
879 | fuse_copy_finish(cs); | ||
880 | return err; | ||
881 | } | ||
882 | |||
883 | static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, | ||
884 | struct fuse_copy_state *cs) | ||
885 | { | ||
886 | struct fuse_notify_inval_entry_out outarg; | ||
887 | int err = -EINVAL; | ||
888 | char buf[FUSE_NAME_MAX+1]; | ||
889 | struct qstr name; | ||
890 | |||
891 | if (size < sizeof(outarg)) | ||
892 | goto err; | ||
893 | |||
894 | err = fuse_copy_one(cs, &outarg, sizeof(outarg)); | ||
895 | if (err) | ||
896 | goto err; | ||
897 | |||
898 | err = -ENAMETOOLONG; | ||
899 | if (outarg.namelen > FUSE_NAME_MAX) | ||
900 | goto err; | ||
901 | |||
902 | name.name = buf; | ||
903 | name.len = outarg.namelen; | ||
904 | err = fuse_copy_one(cs, buf, outarg.namelen + 1); | ||
905 | if (err) | ||
906 | goto err; | ||
907 | fuse_copy_finish(cs); | ||
908 | buf[outarg.namelen] = 0; | ||
909 | name.hash = full_name_hash(name.name, name.len); | ||
910 | |||
911 | down_read(&fc->killsb); | ||
912 | err = -ENOENT; | ||
913 | if (!fc->sb) | ||
914 | goto err_unlock; | ||
915 | |||
916 | err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name); | ||
917 | |||
918 | err_unlock: | ||
919 | up_read(&fc->killsb); | ||
920 | return err; | ||
921 | |||
922 | err: | ||
923 | fuse_copy_finish(cs); | ||
924 | return err; | ||
925 | } | ||
926 | |||
852 | static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, | 927 | static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, |
853 | unsigned int size, struct fuse_copy_state *cs) | 928 | unsigned int size, struct fuse_copy_state *cs) |
854 | { | 929 | { |
@@ -856,6 +931,12 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, | |||
856 | case FUSE_NOTIFY_POLL: | 931 | case FUSE_NOTIFY_POLL: |
857 | return fuse_notify_poll(fc, size, cs); | 932 | return fuse_notify_poll(fc, size, cs); |
858 | 933 | ||
934 | case FUSE_NOTIFY_INVAL_INODE: | ||
935 | return fuse_notify_inval_inode(fc, size, cs); | ||
936 | |||
937 | case FUSE_NOTIFY_INVAL_ENTRY: | ||
938 | return fuse_notify_inval_entry(fc, size, cs); | ||
939 | |||
859 | default: | 940 | default: |
860 | fuse_copy_finish(cs); | 941 | fuse_copy_finish(cs); |
861 | return -EINVAL; | 942 | return -EINVAL; |
@@ -910,7 +991,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, | |||
910 | unsigned long nr_segs, loff_t pos) | 991 | unsigned long nr_segs, loff_t pos) |
911 | { | 992 | { |
912 | int err; | 993 | int err; |
913 | unsigned nbytes = iov_length(iov, nr_segs); | 994 | size_t nbytes = iov_length(iov, nr_segs); |
914 | struct fuse_req *req; | 995 | struct fuse_req *req; |
915 | struct fuse_out_header oh; | 996 | struct fuse_out_header oh; |
916 | struct fuse_copy_state cs; | 997 | struct fuse_copy_state cs; |
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index b3089a083d30..e703654e7f40 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -375,7 +375,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, | |||
375 | struct fuse_conn *fc = get_fuse_conn(dir); | 375 | struct fuse_conn *fc = get_fuse_conn(dir); |
376 | struct fuse_req *req; | 376 | struct fuse_req *req; |
377 | struct fuse_req *forget_req; | 377 | struct fuse_req *forget_req; |
378 | struct fuse_open_in inarg; | 378 | struct fuse_create_in inarg; |
379 | struct fuse_open_out outopen; | 379 | struct fuse_open_out outopen; |
380 | struct fuse_entry_out outentry; | 380 | struct fuse_entry_out outentry; |
381 | struct fuse_file *ff; | 381 | struct fuse_file *ff; |
@@ -399,15 +399,20 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, | |||
399 | if (!ff) | 399 | if (!ff) |
400 | goto out_put_request; | 400 | goto out_put_request; |
401 | 401 | ||
402 | if (!fc->dont_mask) | ||
403 | mode &= ~current_umask(); | ||
404 | |||
402 | flags &= ~O_NOCTTY; | 405 | flags &= ~O_NOCTTY; |
403 | memset(&inarg, 0, sizeof(inarg)); | 406 | memset(&inarg, 0, sizeof(inarg)); |
404 | memset(&outentry, 0, sizeof(outentry)); | 407 | memset(&outentry, 0, sizeof(outentry)); |
405 | inarg.flags = flags; | 408 | inarg.flags = flags; |
406 | inarg.mode = mode; | 409 | inarg.mode = mode; |
410 | inarg.umask = current_umask(); | ||
407 | req->in.h.opcode = FUSE_CREATE; | 411 | req->in.h.opcode = FUSE_CREATE; |
408 | req->in.h.nodeid = get_node_id(dir); | 412 | req->in.h.nodeid = get_node_id(dir); |
409 | req->in.numargs = 2; | 413 | req->in.numargs = 2; |
410 | req->in.args[0].size = sizeof(inarg); | 414 | req->in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) : |
415 | sizeof(inarg); | ||
411 | req->in.args[0].value = &inarg; | 416 | req->in.args[0].value = &inarg; |
412 | req->in.args[1].size = entry->d_name.len + 1; | 417 | req->in.args[1].size = entry->d_name.len + 1; |
413 | req->in.args[1].value = entry->d_name.name; | 418 | req->in.args[1].value = entry->d_name.name; |
@@ -546,12 +551,17 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode, | |||
546 | if (IS_ERR(req)) | 551 | if (IS_ERR(req)) |
547 | return PTR_ERR(req); | 552 | return PTR_ERR(req); |
548 | 553 | ||
554 | if (!fc->dont_mask) | ||
555 | mode &= ~current_umask(); | ||
556 | |||
549 | memset(&inarg, 0, sizeof(inarg)); | 557 | memset(&inarg, 0, sizeof(inarg)); |
550 | inarg.mode = mode; | 558 | inarg.mode = mode; |
551 | inarg.rdev = new_encode_dev(rdev); | 559 | inarg.rdev = new_encode_dev(rdev); |
560 | inarg.umask = current_umask(); | ||
552 | req->in.h.opcode = FUSE_MKNOD; | 561 | req->in.h.opcode = FUSE_MKNOD; |
553 | req->in.numargs = 2; | 562 | req->in.numargs = 2; |
554 | req->in.args[0].size = sizeof(inarg); | 563 | req->in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE : |
564 | sizeof(inarg); | ||
555 | req->in.args[0].value = &inarg; | 565 | req->in.args[0].value = &inarg; |
556 | req->in.args[1].size = entry->d_name.len + 1; | 566 | req->in.args[1].size = entry->d_name.len + 1; |
557 | req->in.args[1].value = entry->d_name.name; | 567 | req->in.args[1].value = entry->d_name.name; |
@@ -578,8 +588,12 @@ static int fuse_mkdir(struct inode *dir, struct dentry *entry, int mode) | |||
578 | if (IS_ERR(req)) | 588 | if (IS_ERR(req)) |
579 | return PTR_ERR(req); | 589 | return PTR_ERR(req); |
580 | 590 | ||
591 | if (!fc->dont_mask) | ||
592 | mode &= ~current_umask(); | ||
593 | |||
581 | memset(&inarg, 0, sizeof(inarg)); | 594 | memset(&inarg, 0, sizeof(inarg)); |
582 | inarg.mode = mode; | 595 | inarg.mode = mode; |
596 | inarg.umask = current_umask(); | ||
583 | req->in.h.opcode = FUSE_MKDIR; | 597 | req->in.h.opcode = FUSE_MKDIR; |
584 | req->in.numargs = 2; | 598 | req->in.numargs = 2; |
585 | req->in.args[0].size = sizeof(inarg); | 599 | req->in.args[0].size = sizeof(inarg); |
@@ -845,6 +859,43 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat, | |||
845 | return err; | 859 | return err; |
846 | } | 860 | } |
847 | 861 | ||
862 | int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid, | ||
863 | struct qstr *name) | ||
864 | { | ||
865 | int err = -ENOTDIR; | ||
866 | struct inode *parent; | ||
867 | struct dentry *dir; | ||
868 | struct dentry *entry; | ||
869 | |||
870 | parent = ilookup5(sb, parent_nodeid, fuse_inode_eq, &parent_nodeid); | ||
871 | if (!parent) | ||
872 | return -ENOENT; | ||
873 | |||
874 | mutex_lock(&parent->i_mutex); | ||
875 | if (!S_ISDIR(parent->i_mode)) | ||
876 | goto unlock; | ||
877 | |||
878 | err = -ENOENT; | ||
879 | dir = d_find_alias(parent); | ||
880 | if (!dir) | ||
881 | goto unlock; | ||
882 | |||
883 | entry = d_lookup(dir, name); | ||
884 | dput(dir); | ||
885 | if (!entry) | ||
886 | goto unlock; | ||
887 | |||
888 | fuse_invalidate_attr(parent); | ||
889 | fuse_invalidate_entry(entry); | ||
890 | dput(entry); | ||
891 | err = 0; | ||
892 | |||
893 | unlock: | ||
894 | mutex_unlock(&parent->i_mutex); | ||
895 | iput(parent); | ||
896 | return err; | ||
897 | } | ||
898 | |||
848 | /* | 899 | /* |
849 | * Calling into a user-controlled filesystem gives the filesystem | 900 | * Calling into a user-controlled filesystem gives the filesystem |
850 | * daemon ptrace-like capabilities over the requester process. This | 901 | * daemon ptrace-like capabilities over the requester process. This |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index fce6ce694fde..cbc464043b6f 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -1922,7 +1922,7 @@ unsigned fuse_file_poll(struct file *file, poll_table *wait) | |||
1922 | 1922 | ||
1923 | req = fuse_get_req(fc); | 1923 | req = fuse_get_req(fc); |
1924 | if (IS_ERR(req)) | 1924 | if (IS_ERR(req)) |
1925 | return PTR_ERR(req); | 1925 | return POLLERR; |
1926 | 1926 | ||
1927 | req->in.h.opcode = FUSE_POLL; | 1927 | req->in.h.opcode = FUSE_POLL; |
1928 | req->in.h.nodeid = ff->nodeid; | 1928 | req->in.h.nodeid = ff->nodeid; |
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index aaf2f9ff970e..52b641fc0faf 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
@@ -446,6 +446,9 @@ struct fuse_conn { | |||
446 | /** Do multi-page cached writes */ | 446 | /** Do multi-page cached writes */ |
447 | unsigned big_writes:1; | 447 | unsigned big_writes:1; |
448 | 448 | ||
449 | /** Don't apply umask to creation modes */ | ||
450 | unsigned dont_mask:1; | ||
451 | |||
449 | /** The number of requests waiting for completion */ | 452 | /** The number of requests waiting for completion */ |
450 | atomic_t num_waiting; | 453 | atomic_t num_waiting; |
451 | 454 | ||
@@ -481,6 +484,12 @@ struct fuse_conn { | |||
481 | 484 | ||
482 | /** Called on final put */ | 485 | /** Called on final put */ |
483 | void (*release)(struct fuse_conn *); | 486 | void (*release)(struct fuse_conn *); |
487 | |||
488 | /** Super block for this connection. */ | ||
489 | struct super_block *sb; | ||
490 | |||
491 | /** Read/write semaphore to hold when accessing sb. */ | ||
492 | struct rw_semaphore killsb; | ||
484 | }; | 493 | }; |
485 | 494 | ||
486 | static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb) | 495 | static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb) |
@@ -509,6 +518,11 @@ extern const struct file_operations fuse_dev_operations; | |||
509 | extern const struct dentry_operations fuse_dentry_operations; | 518 | extern const struct dentry_operations fuse_dentry_operations; |
510 | 519 | ||
511 | /** | 520 | /** |
521 | * Inode to nodeid comparison. | ||
522 | */ | ||
523 | int fuse_inode_eq(struct inode *inode, void *_nodeidp); | ||
524 | |||
525 | /** | ||
512 | * Get a filled in inode | 526 | * Get a filled in inode |
513 | */ | 527 | */ |
514 | struct inode *fuse_iget(struct super_block *sb, u64 nodeid, | 528 | struct inode *fuse_iget(struct super_block *sb, u64 nodeid, |
@@ -708,6 +722,19 @@ void fuse_release_nowrite(struct inode *inode); | |||
708 | 722 | ||
709 | u64 fuse_get_attr_version(struct fuse_conn *fc); | 723 | u64 fuse_get_attr_version(struct fuse_conn *fc); |
710 | 724 | ||
725 | /** | ||
726 | * File-system tells the kernel to invalidate cache for the given node id. | ||
727 | */ | ||
728 | int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, | ||
729 | loff_t offset, loff_t len); | ||
730 | |||
731 | /** | ||
732 | * File-system tells the kernel to invalidate parent attributes and | ||
733 | * the dentry matching parent/name. | ||
734 | */ | ||
735 | int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid, | ||
736 | struct qstr *name); | ||
737 | |||
711 | int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, | 738 | int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, |
712 | bool isdir); | 739 | bool isdir); |
713 | ssize_t fuse_direct_io(struct file *file, const char __user *buf, | 740 | ssize_t fuse_direct_io(struct file *file, const char __user *buf, |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index d8673ccf90b7..f91ccc4a189d 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -206,7 +206,7 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr) | |||
206 | BUG(); | 206 | BUG(); |
207 | } | 207 | } |
208 | 208 | ||
209 | static int fuse_inode_eq(struct inode *inode, void *_nodeidp) | 209 | int fuse_inode_eq(struct inode *inode, void *_nodeidp) |
210 | { | 210 | { |
211 | u64 nodeid = *(u64 *) _nodeidp; | 211 | u64 nodeid = *(u64 *) _nodeidp; |
212 | if (get_node_id(inode) == nodeid) | 212 | if (get_node_id(inode) == nodeid) |
@@ -257,6 +257,31 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid, | |||
257 | return inode; | 257 | return inode; |
258 | } | 258 | } |
259 | 259 | ||
260 | int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, | ||
261 | loff_t offset, loff_t len) | ||
262 | { | ||
263 | struct inode *inode; | ||
264 | pgoff_t pg_start; | ||
265 | pgoff_t pg_end; | ||
266 | |||
267 | inode = ilookup5(sb, nodeid, fuse_inode_eq, &nodeid); | ||
268 | if (!inode) | ||
269 | return -ENOENT; | ||
270 | |||
271 | fuse_invalidate_attr(inode); | ||
272 | if (offset >= 0) { | ||
273 | pg_start = offset >> PAGE_CACHE_SHIFT; | ||
274 | if (len <= 0) | ||
275 | pg_end = -1; | ||
276 | else | ||
277 | pg_end = (offset + len - 1) >> PAGE_CACHE_SHIFT; | ||
278 | invalidate_inode_pages2_range(inode->i_mapping, | ||
279 | pg_start, pg_end); | ||
280 | } | ||
281 | iput(inode); | ||
282 | return 0; | ||
283 | } | ||
284 | |||
260 | static void fuse_umount_begin(struct super_block *sb) | 285 | static void fuse_umount_begin(struct super_block *sb) |
261 | { | 286 | { |
262 | fuse_abort_conn(get_fuse_conn_super(sb)); | 287 | fuse_abort_conn(get_fuse_conn_super(sb)); |
@@ -480,6 +505,7 @@ void fuse_conn_init(struct fuse_conn *fc) | |||
480 | memset(fc, 0, sizeof(*fc)); | 505 | memset(fc, 0, sizeof(*fc)); |
481 | spin_lock_init(&fc->lock); | 506 | spin_lock_init(&fc->lock); |
482 | mutex_init(&fc->inst_mutex); | 507 | mutex_init(&fc->inst_mutex); |
508 | init_rwsem(&fc->killsb); | ||
483 | atomic_set(&fc->count, 1); | 509 | atomic_set(&fc->count, 1); |
484 | init_waitqueue_head(&fc->waitq); | 510 | init_waitqueue_head(&fc->waitq); |
485 | init_waitqueue_head(&fc->blocked_waitq); | 511 | init_waitqueue_head(&fc->blocked_waitq); |
@@ -725,6 +751,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) | |||
725 | } | 751 | } |
726 | if (arg->flags & FUSE_BIG_WRITES) | 752 | if (arg->flags & FUSE_BIG_WRITES) |
727 | fc->big_writes = 1; | 753 | fc->big_writes = 1; |
754 | if (arg->flags & FUSE_DONT_MASK) | ||
755 | fc->dont_mask = 1; | ||
728 | } else { | 756 | } else { |
729 | ra_pages = fc->max_read / PAGE_CACHE_SIZE; | 757 | ra_pages = fc->max_read / PAGE_CACHE_SIZE; |
730 | fc->no_lock = 1; | 758 | fc->no_lock = 1; |
@@ -748,7 +776,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) | |||
748 | arg->minor = FUSE_KERNEL_MINOR_VERSION; | 776 | arg->minor = FUSE_KERNEL_MINOR_VERSION; |
749 | arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; | 777 | arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; |
750 | arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | | 778 | arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | |
751 | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES; | 779 | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK; |
752 | req->in.h.opcode = FUSE_INIT; | 780 | req->in.h.opcode = FUSE_INIT; |
753 | req->in.numargs = 1; | 781 | req->in.numargs = 1; |
754 | req->in.args[0].size = sizeof(*arg); | 782 | req->in.args[0].size = sizeof(*arg); |
@@ -860,10 +888,16 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) | |||
860 | fuse_conn_init(fc); | 888 | fuse_conn_init(fc); |
861 | 889 | ||
862 | fc->dev = sb->s_dev; | 890 | fc->dev = sb->s_dev; |
891 | fc->sb = sb; | ||
863 | err = fuse_bdi_init(fc, sb); | 892 | err = fuse_bdi_init(fc, sb); |
864 | if (err) | 893 | if (err) |
865 | goto err_put_conn; | 894 | goto err_put_conn; |
866 | 895 | ||
896 | /* Handle umasking inside the fuse code */ | ||
897 | if (sb->s_flags & MS_POSIXACL) | ||
898 | fc->dont_mask = 1; | ||
899 | sb->s_flags |= MS_POSIXACL; | ||
900 | |||
867 | fc->release = fuse_free_conn; | 901 | fc->release = fuse_free_conn; |
868 | fc->flags = d.flags; | 902 | fc->flags = d.flags; |
869 | fc->user_id = d.user_id; | 903 | fc->user_id = d.user_id; |
@@ -941,12 +975,25 @@ static int fuse_get_sb(struct file_system_type *fs_type, | |||
941 | return get_sb_nodev(fs_type, flags, raw_data, fuse_fill_super, mnt); | 975 | return get_sb_nodev(fs_type, flags, raw_data, fuse_fill_super, mnt); |
942 | } | 976 | } |
943 | 977 | ||
978 | static void fuse_kill_sb_anon(struct super_block *sb) | ||
979 | { | ||
980 | struct fuse_conn *fc = get_fuse_conn_super(sb); | ||
981 | |||
982 | if (fc) { | ||
983 | down_write(&fc->killsb); | ||
984 | fc->sb = NULL; | ||
985 | up_write(&fc->killsb); | ||
986 | } | ||
987 | |||
988 | kill_anon_super(sb); | ||
989 | } | ||
990 | |||
944 | static struct file_system_type fuse_fs_type = { | 991 | static struct file_system_type fuse_fs_type = { |
945 | .owner = THIS_MODULE, | 992 | .owner = THIS_MODULE, |
946 | .name = "fuse", | 993 | .name = "fuse", |
947 | .fs_flags = FS_HAS_SUBTYPE, | 994 | .fs_flags = FS_HAS_SUBTYPE, |
948 | .get_sb = fuse_get_sb, | 995 | .get_sb = fuse_get_sb, |
949 | .kill_sb = kill_anon_super, | 996 | .kill_sb = fuse_kill_sb_anon, |
950 | }; | 997 | }; |
951 | 998 | ||
952 | #ifdef CONFIG_BLOCK | 999 | #ifdef CONFIG_BLOCK |
@@ -958,11 +1005,24 @@ static int fuse_get_sb_blk(struct file_system_type *fs_type, | |||
958 | mnt); | 1005 | mnt); |
959 | } | 1006 | } |
960 | 1007 | ||
1008 | static void fuse_kill_sb_blk(struct super_block *sb) | ||
1009 | { | ||
1010 | struct fuse_conn *fc = get_fuse_conn_super(sb); | ||
1011 | |||
1012 | if (fc) { | ||
1013 | down_write(&fc->killsb); | ||
1014 | fc->sb = NULL; | ||
1015 | up_write(&fc->killsb); | ||
1016 | } | ||
1017 | |||
1018 | kill_block_super(sb); | ||
1019 | } | ||
1020 | |||
961 | static struct file_system_type fuseblk_fs_type = { | 1021 | static struct file_system_type fuseblk_fs_type = { |
962 | .owner = THIS_MODULE, | 1022 | .owner = THIS_MODULE, |
963 | .name = "fuseblk", | 1023 | .name = "fuseblk", |
964 | .get_sb = fuse_get_sb_blk, | 1024 | .get_sb = fuse_get_sb_blk, |
965 | .kill_sb = kill_block_super, | 1025 | .kill_sb = fuse_kill_sb_blk, |
966 | .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE, | 1026 | .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE, |
967 | }; | 1027 | }; |
968 | 1028 | ||
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index fe02ad4740e7..032604e5ef2c 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c | |||
@@ -972,6 +972,7 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent) | |||
972 | sb->s_blocksize_bits = 10; | 972 | sb->s_blocksize_bits = 10; |
973 | sb->s_magic = HOSTFS_SUPER_MAGIC; | 973 | sb->s_magic = HOSTFS_SUPER_MAGIC; |
974 | sb->s_op = &hostfs_sbops; | 974 | sb->s_op = &hostfs_sbops; |
975 | sb->s_maxbytes = MAX_LFS_FILESIZE; | ||
975 | 976 | ||
976 | /* NULL is printed as <NULL> by sprintf: avoid that. */ | 977 | /* NULL is printed as <NULL> by sprintf: avoid that. */ |
977 | if (req_root == NULL) | 978 | if (req_root == NULL) |
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index 7515e73e2bfb..696686cc206e 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c | |||
@@ -130,9 +130,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
130 | if (jffs2_sum_active()) { | 130 | if (jffs2_sum_active()) { |
131 | s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL); | 131 | s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL); |
132 | if (!s) { | 132 | if (!s) { |
133 | kfree(flashbuf); | ||
134 | JFFS2_WARNING("Can't allocate memory for summary\n"); | 133 | JFFS2_WARNING("Can't allocate memory for summary\n"); |
135 | return -ENOMEM; | 134 | ret = -ENOMEM; |
135 | goto out; | ||
136 | } | 136 | } |
137 | } | 137 | } |
138 | 138 | ||
diff --git a/fs/namei.c b/fs/namei.c index 5b961eb71cbf..f3c5b278895a 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1761,6 +1761,10 @@ do_last: | |||
1761 | goto exit; | 1761 | goto exit; |
1762 | } | 1762 | } |
1763 | filp = nameidata_to_filp(&nd, open_flag); | 1763 | filp = nameidata_to_filp(&nd, open_flag); |
1764 | if (IS_ERR(filp)) | ||
1765 | ima_counts_put(&nd.path, | ||
1766 | acc_mode & (MAY_READ | MAY_WRITE | | ||
1767 | MAY_EXEC)); | ||
1764 | mnt_drop_write(nd.path.mnt); | 1768 | mnt_drop_write(nd.path.mnt); |
1765 | if (nd.root.mnt) | 1769 | if (nd.root.mnt) |
1766 | path_put(&nd.root); | 1770 | path_put(&nd.root); |
@@ -1817,6 +1821,9 @@ ok: | |||
1817 | goto exit; | 1821 | goto exit; |
1818 | } | 1822 | } |
1819 | filp = nameidata_to_filp(&nd, open_flag); | 1823 | filp = nameidata_to_filp(&nd, open_flag); |
1824 | if (IS_ERR(filp)) | ||
1825 | ima_counts_put(&nd.path, | ||
1826 | acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC)); | ||
1820 | /* | 1827 | /* |
1821 | * It is now safe to drop the mnt write | 1828 | * It is now safe to drop the mnt write |
1822 | * because the filp has had a write taken | 1829 | * because the filp has had a write taken |
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index ff231ad23895..ff27a2965844 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c | |||
@@ -296,12 +296,15 @@ static int inotify_fasync(int fd, struct file *file, int on) | |||
296 | static int inotify_release(struct inode *ignored, struct file *file) | 296 | static int inotify_release(struct inode *ignored, struct file *file) |
297 | { | 297 | { |
298 | struct fsnotify_group *group = file->private_data; | 298 | struct fsnotify_group *group = file->private_data; |
299 | struct user_struct *user = group->inotify_data.user; | ||
299 | 300 | ||
300 | fsnotify_clear_marks_by_group(group); | 301 | fsnotify_clear_marks_by_group(group); |
301 | 302 | ||
302 | /* free this group, matching get was inotify_init->fsnotify_obtain_group */ | 303 | /* free this group, matching get was inotify_init->fsnotify_obtain_group */ |
303 | fsnotify_put_group(group); | 304 | fsnotify_put_group(group); |
304 | 305 | ||
306 | atomic_dec(&user->inotify_devs); | ||
307 | |||
305 | return 0; | 308 | return 0; |
306 | } | 309 | } |
307 | 310 | ||
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index d7d50d7ee51e..aa00800adacc 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h | |||
@@ -97,4 +97,8 @@ extern void setup_per_cpu_areas(void); | |||
97 | #define PER_CPU_ATTRIBUTES | 97 | #define PER_CPU_ATTRIBUTES |
98 | #endif | 98 | #endif |
99 | 99 | ||
100 | #ifndef PER_CPU_DEF_ATTRIBUTES | ||
101 | #define PER_CPU_DEF_ATTRIBUTES | ||
102 | #endif | ||
103 | |||
100 | #endif /* _ASM_GENERIC_PERCPU_H_ */ | 104 | #endif /* _ASM_GENERIC_PERCPU_H_ */ |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 720af4c72206..a553f1041cf1 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -441,7 +441,8 @@ | |||
441 | } | 441 | } |
442 | 442 | ||
443 | #ifdef CONFIG_CONSTRUCTORS | 443 | #ifdef CONFIG_CONSTRUCTORS |
444 | #define KERNEL_CTORS() VMLINUX_SYMBOL(__ctors_start) = .; \ | 444 | #define KERNEL_CTORS() . = ALIGN(8); \ |
445 | VMLINUX_SYMBOL(__ctors_start) = .; \ | ||
445 | *(.ctors) \ | 446 | *(.ctors) \ |
446 | VMLINUX_SYMBOL(__ctors_end) = .; | 447 | VMLINUX_SYMBOL(__ctors_end) = .; |
447 | #else | 448 | #else |
diff --git a/include/linux/aio.h b/include/linux/aio.h index b16a957030f8..47f7d932a01d 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h | |||
@@ -121,9 +121,9 @@ struct kiocb { | |||
121 | 121 | ||
122 | /* | 122 | /* |
123 | * If the aio_resfd field of the userspace iocb is not zero, | 123 | * If the aio_resfd field of the userspace iocb is not zero, |
124 | * this is the underlying file* to deliver event to. | 124 | * this is the underlying eventfd context to deliver events to. |
125 | */ | 125 | */ |
126 | struct file *ki_eventfd; | 126 | struct eventfd_ctx *ki_eventfd; |
127 | }; | 127 | }; |
128 | 128 | ||
129 | #define is_sync_kiocb(iocb) ((iocb)->ki_key == KIOCB_SYNC_KEY) | 129 | #define is_sync_kiocb(iocb) ((iocb)->ki_key == KIOCB_SYNC_KEY) |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 2a04eb54c0dd..2892b710771c 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -319,7 +319,6 @@ static inline int bio_has_allocated_vec(struct bio *bio) | |||
319 | */ | 319 | */ |
320 | struct bio_integrity_payload { | 320 | struct bio_integrity_payload { |
321 | struct bio *bip_bio; /* parent bio */ | 321 | struct bio *bip_bio; /* parent bio */ |
322 | struct bio_vec *bip_vec; /* integrity data vector */ | ||
323 | 322 | ||
324 | sector_t bip_sector; /* virtual start sector */ | 323 | sector_t bip_sector; /* virtual start sector */ |
325 | 324 | ||
@@ -328,11 +327,12 @@ struct bio_integrity_payload { | |||
328 | 327 | ||
329 | unsigned int bip_size; | 328 | unsigned int bip_size; |
330 | 329 | ||
331 | unsigned short bip_pool; /* pool the ivec came from */ | 330 | unsigned short bip_slab; /* slab the bip came from */ |
332 | unsigned short bip_vcnt; /* # of integrity bio_vecs */ | 331 | unsigned short bip_vcnt; /* # of integrity bio_vecs */ |
333 | unsigned short bip_idx; /* current bip_vec index */ | 332 | unsigned short bip_idx; /* current bip_vec index */ |
334 | 333 | ||
335 | struct work_struct bip_work; /* I/O completion */ | 334 | struct work_struct bip_work; /* I/O completion */ |
335 | struct bio_vec bip_vec[0]; /* embedded bvec array */ | ||
336 | }; | 336 | }; |
337 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 337 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
338 | 338 | ||
@@ -430,6 +430,9 @@ struct bio_set { | |||
430 | unsigned int front_pad; | 430 | unsigned int front_pad; |
431 | 431 | ||
432 | mempool_t *bio_pool; | 432 | mempool_t *bio_pool; |
433 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | ||
434 | mempool_t *bio_integrity_pool; | ||
435 | #endif | ||
433 | mempool_t *bvec_pool; | 436 | mempool_t *bvec_pool; |
434 | }; | 437 | }; |
435 | 438 | ||
@@ -634,8 +637,9 @@ static inline struct bio *bio_list_get(struct bio_list *bl) | |||
634 | 637 | ||
635 | #define bio_integrity(bio) (bio->bi_integrity != NULL) | 638 | #define bio_integrity(bio) (bio->bi_integrity != NULL) |
636 | 639 | ||
640 | extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *); | ||
637 | extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); | 641 | extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); |
638 | extern void bio_integrity_free(struct bio *); | 642 | extern void bio_integrity_free(struct bio *, struct bio_set *); |
639 | extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); | 643 | extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); |
640 | extern int bio_integrity_enabled(struct bio *bio); | 644 | extern int bio_integrity_enabled(struct bio *bio); |
641 | extern int bio_integrity_set_tag(struct bio *, void *, unsigned int); | 645 | extern int bio_integrity_set_tag(struct bio *, void *, unsigned int); |
@@ -645,21 +649,27 @@ extern void bio_integrity_endio(struct bio *, int); | |||
645 | extern void bio_integrity_advance(struct bio *, unsigned int); | 649 | extern void bio_integrity_advance(struct bio *, unsigned int); |
646 | extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); | 650 | extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); |
647 | extern void bio_integrity_split(struct bio *, struct bio_pair *, int); | 651 | extern void bio_integrity_split(struct bio *, struct bio_pair *, int); |
648 | extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); | 652 | extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t, struct bio_set *); |
653 | extern int bioset_integrity_create(struct bio_set *, int); | ||
654 | extern void bioset_integrity_free(struct bio_set *); | ||
655 | extern void bio_integrity_init(void); | ||
649 | 656 | ||
650 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | 657 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
651 | 658 | ||
652 | #define bio_integrity(a) (0) | 659 | #define bio_integrity(a) (0) |
660 | #define bioset_integrity_create(a, b) (0) | ||
653 | #define bio_integrity_prep(a) (0) | 661 | #define bio_integrity_prep(a) (0) |
654 | #define bio_integrity_enabled(a) (0) | 662 | #define bio_integrity_enabled(a) (0) |
655 | #define bio_integrity_clone(a, b, c) (0) | 663 | #define bio_integrity_clone(a, b, c, d) (0) |
656 | #define bio_integrity_free(a) do { } while (0) | 664 | #define bioset_integrity_free(a) do { } while (0) |
665 | #define bio_integrity_free(a, b) do { } while (0) | ||
657 | #define bio_integrity_endio(a, b) do { } while (0) | 666 | #define bio_integrity_endio(a, b) do { } while (0) |
658 | #define bio_integrity_advance(a, b) do { } while (0) | 667 | #define bio_integrity_advance(a, b) do { } while (0) |
659 | #define bio_integrity_trim(a, b, c) do { } while (0) | 668 | #define bio_integrity_trim(a, b, c) do { } while (0) |
660 | #define bio_integrity_split(a, b, c) do { } while (0) | 669 | #define bio_integrity_split(a, b, c) do { } while (0) |
661 | #define bio_integrity_set_tag(a, b, c) do { } while (0) | 670 | #define bio_integrity_set_tag(a, b, c) do { } while (0) |
662 | #define bio_integrity_get_tag(a, b, c) do { } while (0) | 671 | #define bio_integrity_get_tag(a, b, c) do { } while (0) |
672 | #define bio_integrity_init(a) do { } while (0) | ||
663 | 673 | ||
664 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 674 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
665 | 675 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8963d9149b5f..49ae07951d55 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -301,12 +301,6 @@ struct blk_queue_tag { | |||
301 | #define BLK_SCSI_MAX_CMDS (256) | 301 | #define BLK_SCSI_MAX_CMDS (256) |
302 | #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) | 302 | #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) |
303 | 303 | ||
304 | struct blk_cmd_filter { | ||
305 | unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; | ||
306 | unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; | ||
307 | struct kobject kobj; | ||
308 | }; | ||
309 | |||
310 | struct queue_limits { | 304 | struct queue_limits { |
311 | unsigned long bounce_pfn; | 305 | unsigned long bounce_pfn; |
312 | unsigned long seg_boundary_mask; | 306 | unsigned long seg_boundary_mask; |
@@ -445,7 +439,6 @@ struct request_queue | |||
445 | #if defined(CONFIG_BLK_DEV_BSG) | 439 | #if defined(CONFIG_BLK_DEV_BSG) |
446 | struct bsg_class_device bsg_dev; | 440 | struct bsg_class_device bsg_dev; |
447 | #endif | 441 | #endif |
448 | struct blk_cmd_filter cmd_filter; | ||
449 | }; | 442 | }; |
450 | 443 | ||
451 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | 444 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ |
@@ -998,13 +991,7 @@ static inline int sb_issue_discard(struct super_block *sb, | |||
998 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); | 991 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); |
999 | } | 992 | } |
1000 | 993 | ||
1001 | /* | 994 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); |
1002 | * command filter functions | ||
1003 | */ | ||
1004 | extern int blk_verify_command(struct blk_cmd_filter *filter, | ||
1005 | unsigned char *cmd, fmode_t has_write_perm); | ||
1006 | extern void blk_unregister_filter(struct gendisk *disk); | ||
1007 | extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); | ||
1008 | 995 | ||
1009 | #define MAX_PHYS_SEGMENTS 128 | 996 | #define MAX_PHYS_SEGMENTS 128 |
1010 | #define MAX_HW_SEGMENTS 128 | 997 | #define MAX_HW_SEGMENTS 128 |
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index f45a8ae5f828..3b85ba6479f4 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h | |||
@@ -8,10 +8,8 @@ | |||
8 | #ifndef _LINUX_EVENTFD_H | 8 | #ifndef _LINUX_EVENTFD_H |
9 | #define _LINUX_EVENTFD_H | 9 | #define _LINUX_EVENTFD_H |
10 | 10 | ||
11 | #ifdef CONFIG_EVENTFD | ||
12 | |||
13 | /* For O_CLOEXEC and O_NONBLOCK */ | ||
14 | #include <linux/fcntl.h> | 11 | #include <linux/fcntl.h> |
12 | #include <linux/file.h> | ||
15 | 13 | ||
16 | /* | 14 | /* |
17 | * CAREFUL: Check include/asm-generic/fcntl.h when defining | 15 | * CAREFUL: Check include/asm-generic/fcntl.h when defining |
@@ -27,16 +25,37 @@ | |||
27 | #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) | 25 | #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) |
28 | #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE) | 26 | #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE) |
29 | 27 | ||
28 | #ifdef CONFIG_EVENTFD | ||
29 | |||
30 | struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx); | ||
31 | void eventfd_ctx_put(struct eventfd_ctx *ctx); | ||
30 | struct file *eventfd_fget(int fd); | 32 | struct file *eventfd_fget(int fd); |
31 | int eventfd_signal(struct file *file, int n); | 33 | struct eventfd_ctx *eventfd_ctx_fdget(int fd); |
34 | struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); | ||
35 | int eventfd_signal(struct eventfd_ctx *ctx, int n); | ||
32 | 36 | ||
33 | #else /* CONFIG_EVENTFD */ | 37 | #else /* CONFIG_EVENTFD */ |
34 | 38 | ||
35 | #define eventfd_fget(fd) ERR_PTR(-ENOSYS) | 39 | /* |
36 | static inline int eventfd_signal(struct file *file, int n) | 40 | * Ugly ugly ugly error layer to support modules that uses eventfd but |
37 | { return 0; } | 41 | * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO. |
42 | */ | ||
43 | static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) | ||
44 | { | ||
45 | return ERR_PTR(-ENOSYS); | ||
46 | } | ||
47 | |||
48 | static inline int eventfd_signal(struct eventfd_ctx *ctx, int n) | ||
49 | { | ||
50 | return -ENOSYS; | ||
51 | } | ||
52 | |||
53 | static inline void eventfd_ctx_put(struct eventfd_ctx *ctx) | ||
54 | { | ||
55 | |||
56 | } | ||
38 | 57 | ||
39 | #endif /* CONFIG_EVENTFD */ | 58 | #endif |
40 | 59 | ||
41 | #endif /* _LINUX_EVENTFD_H */ | 60 | #endif /* _LINUX_EVENTFD_H */ |
42 | 61 | ||
diff --git a/include/linux/fb.h b/include/linux/fb.h index dd68358996b7..f847df9e99b6 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
@@ -819,6 +819,7 @@ struct fb_info { | |||
819 | int node; | 819 | int node; |
820 | int flags; | 820 | int flags; |
821 | struct mutex lock; /* Lock for open/release/ioctl funcs */ | 821 | struct mutex lock; /* Lock for open/release/ioctl funcs */ |
822 | struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */ | ||
822 | struct fb_var_screeninfo var; /* Current var */ | 823 | struct fb_var_screeninfo var; /* Current var */ |
823 | struct fb_fix_screeninfo fix; /* Current fix */ | 824 | struct fb_fix_screeninfo fix; /* Current fix */ |
824 | struct fb_monspecs monspecs; /* Current Monitor specs */ | 825 | struct fb_monspecs monspecs; /* Current Monitor specs */ |
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 44848aa830dc..6c3de999fb34 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h | |||
@@ -280,7 +280,7 @@ static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) | |||
280 | assert_spin_locked(&dentry->d_lock); | 280 | assert_spin_locked(&dentry->d_lock); |
281 | 281 | ||
282 | parent = dentry->d_parent; | 282 | parent = dentry->d_parent; |
283 | if (fsnotify_inode_watches_children(parent->d_inode)) | 283 | if (parent->d_inode && fsnotify_inode_watches_children(parent->d_inode)) |
284 | dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; | 284 | dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; |
285 | else | 285 | else |
286 | dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; | 286 | dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; |
diff --git a/include/linux/fuse.h b/include/linux/fuse.h index d41ed593f79f..cf593bf9fd32 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h | |||
@@ -25,6 +25,11 @@ | |||
25 | * - add IOCTL message | 25 | * - add IOCTL message |
26 | * - add unsolicited notification support | 26 | * - add unsolicited notification support |
27 | * - add POLL message and NOTIFY_POLL notification | 27 | * - add POLL message and NOTIFY_POLL notification |
28 | * | ||
29 | * 7.12 | ||
30 | * - add umask flag to input argument of open, mknod and mkdir | ||
31 | * - add notification messages for invalidation of inodes and | ||
32 | * directory entries | ||
28 | */ | 33 | */ |
29 | 34 | ||
30 | #ifndef _LINUX_FUSE_H | 35 | #ifndef _LINUX_FUSE_H |
@@ -36,7 +41,7 @@ | |||
36 | #define FUSE_KERNEL_VERSION 7 | 41 | #define FUSE_KERNEL_VERSION 7 |
37 | 42 | ||
38 | /** Minor version number of this interface */ | 43 | /** Minor version number of this interface */ |
39 | #define FUSE_KERNEL_MINOR_VERSION 11 | 44 | #define FUSE_KERNEL_MINOR_VERSION 12 |
40 | 45 | ||
41 | /** The node ID of the root inode */ | 46 | /** The node ID of the root inode */ |
42 | #define FUSE_ROOT_ID 1 | 47 | #define FUSE_ROOT_ID 1 |
@@ -112,6 +117,7 @@ struct fuse_file_lock { | |||
112 | * INIT request/reply flags | 117 | * INIT request/reply flags |
113 | * | 118 | * |
114 | * FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".." | 119 | * FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".." |
120 | * FUSE_DONT_MASK: don't apply umask to file mode on create operations | ||
115 | */ | 121 | */ |
116 | #define FUSE_ASYNC_READ (1 << 0) | 122 | #define FUSE_ASYNC_READ (1 << 0) |
117 | #define FUSE_POSIX_LOCKS (1 << 1) | 123 | #define FUSE_POSIX_LOCKS (1 << 1) |
@@ -119,6 +125,7 @@ struct fuse_file_lock { | |||
119 | #define FUSE_ATOMIC_O_TRUNC (1 << 3) | 125 | #define FUSE_ATOMIC_O_TRUNC (1 << 3) |
120 | #define FUSE_EXPORT_SUPPORT (1 << 4) | 126 | #define FUSE_EXPORT_SUPPORT (1 << 4) |
121 | #define FUSE_BIG_WRITES (1 << 5) | 127 | #define FUSE_BIG_WRITES (1 << 5) |
128 | #define FUSE_DONT_MASK (1 << 6) | ||
122 | 129 | ||
123 | /** | 130 | /** |
124 | * CUSE INIT request/reply flags | 131 | * CUSE INIT request/reply flags |
@@ -224,6 +231,8 @@ enum fuse_opcode { | |||
224 | 231 | ||
225 | enum fuse_notify_code { | 232 | enum fuse_notify_code { |
226 | FUSE_NOTIFY_POLL = 1, | 233 | FUSE_NOTIFY_POLL = 1, |
234 | FUSE_NOTIFY_INVAL_INODE = 2, | ||
235 | FUSE_NOTIFY_INVAL_ENTRY = 3, | ||
227 | FUSE_NOTIFY_CODE_MAX, | 236 | FUSE_NOTIFY_CODE_MAX, |
228 | }; | 237 | }; |
229 | 238 | ||
@@ -262,14 +271,18 @@ struct fuse_attr_out { | |||
262 | struct fuse_attr attr; | 271 | struct fuse_attr attr; |
263 | }; | 272 | }; |
264 | 273 | ||
274 | #define FUSE_COMPAT_MKNOD_IN_SIZE 8 | ||
275 | |||
265 | struct fuse_mknod_in { | 276 | struct fuse_mknod_in { |
266 | __u32 mode; | 277 | __u32 mode; |
267 | __u32 rdev; | 278 | __u32 rdev; |
279 | __u32 umask; | ||
280 | __u32 padding; | ||
268 | }; | 281 | }; |
269 | 282 | ||
270 | struct fuse_mkdir_in { | 283 | struct fuse_mkdir_in { |
271 | __u32 mode; | 284 | __u32 mode; |
272 | __u32 padding; | 285 | __u32 umask; |
273 | }; | 286 | }; |
274 | 287 | ||
275 | struct fuse_rename_in { | 288 | struct fuse_rename_in { |
@@ -301,7 +314,14 @@ struct fuse_setattr_in { | |||
301 | 314 | ||
302 | struct fuse_open_in { | 315 | struct fuse_open_in { |
303 | __u32 flags; | 316 | __u32 flags; |
317 | __u32 unused; | ||
318 | }; | ||
319 | |||
320 | struct fuse_create_in { | ||
321 | __u32 flags; | ||
304 | __u32 mode; | 322 | __u32 mode; |
323 | __u32 umask; | ||
324 | __u32 padding; | ||
305 | }; | 325 | }; |
306 | 326 | ||
307 | struct fuse_open_out { | 327 | struct fuse_open_out { |
@@ -508,4 +528,16 @@ struct fuse_dirent { | |||
508 | #define FUSE_DIRENT_SIZE(d) \ | 528 | #define FUSE_DIRENT_SIZE(d) \ |
509 | FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen) | 529 | FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen) |
510 | 530 | ||
531 | struct fuse_notify_inval_inode_out { | ||
532 | __u64 ino; | ||
533 | __s64 off; | ||
534 | __s64 len; | ||
535 | }; | ||
536 | |||
537 | struct fuse_notify_inval_entry_out { | ||
538 | __u64 parent; | ||
539 | __u32 namelen; | ||
540 | __u32 padding; | ||
541 | }; | ||
542 | |||
511 | #endif /* _LINUX_FUSE_H */ | 543 | #endif /* _LINUX_FUSE_H */ |
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 7400900de94a..54648e625efd 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/list.h> | 21 | #include <linux/list.h> |
22 | #include <linux/wait.h> | 22 | #include <linux/wait.h> |
23 | #include <linux/percpu.h> | 23 | #include <linux/percpu.h> |
24 | #include <linux/timer.h> | ||
24 | 25 | ||
25 | 26 | ||
26 | struct hrtimer_clock_base; | 27 | struct hrtimer_clock_base; |
@@ -447,6 +448,8 @@ extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf, | |||
447 | 448 | ||
448 | static inline void timer_stats_account_hrtimer(struct hrtimer *timer) | 449 | static inline void timer_stats_account_hrtimer(struct hrtimer *timer) |
449 | { | 450 | { |
451 | if (likely(!timer->start_pid)) | ||
452 | return; | ||
450 | timer_stats_update_stats(timer, timer->start_pid, timer->start_site, | 453 | timer_stats_update_stats(timer, timer->start_pid, timer->start_site, |
451 | timer->function, timer->start_comm, 0); | 454 | timer->function, timer->start_comm, 0); |
452 | } | 455 | } |
@@ -456,6 +459,8 @@ extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, | |||
456 | 459 | ||
457 | static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) | 460 | static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) |
458 | { | 461 | { |
462 | if (likely(!timer_stats_active)) | ||
463 | return; | ||
459 | __timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0)); | 464 | __timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0)); |
460 | } | 465 | } |
461 | 466 | ||
diff --git a/include/linux/ide.h b/include/linux/ide.h index cf1f3888067c..edc93a6d931d 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -1062,7 +1062,6 @@ int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned l | |||
1062 | extern int ide_vlb_clk; | 1062 | extern int ide_vlb_clk; |
1063 | extern int ide_pci_clk; | 1063 | extern int ide_pci_clk; |
1064 | 1064 | ||
1065 | unsigned int ide_rq_bytes(struct request *); | ||
1066 | int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int); | 1065 | int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int); |
1067 | void ide_kill_rq(ide_drive_t *, struct request *); | 1066 | void ide_kill_rq(ide_drive_t *, struct request *); |
1068 | 1067 | ||
@@ -1420,6 +1419,7 @@ static inline void ide_dma_unmap_sg(ide_drive_t *drive, | |||
1420 | 1419 | ||
1421 | #ifdef CONFIG_BLK_DEV_IDEACPI | 1420 | #ifdef CONFIG_BLK_DEV_IDEACPI |
1422 | int ide_acpi_init(void); | 1421 | int ide_acpi_init(void); |
1422 | bool ide_port_acpi(ide_hwif_t *hwif); | ||
1423 | extern int ide_acpi_exec_tfs(ide_drive_t *drive); | 1423 | extern int ide_acpi_exec_tfs(ide_drive_t *drive); |
1424 | extern void ide_acpi_get_timing(ide_hwif_t *hwif); | 1424 | extern void ide_acpi_get_timing(ide_hwif_t *hwif); |
1425 | extern void ide_acpi_push_timing(ide_hwif_t *hwif); | 1425 | extern void ide_acpi_push_timing(ide_hwif_t *hwif); |
@@ -1428,6 +1428,7 @@ void ide_acpi_port_init_devices(ide_hwif_t *); | |||
1428 | extern void ide_acpi_set_state(ide_hwif_t *hwif, int on); | 1428 | extern void ide_acpi_set_state(ide_hwif_t *hwif, int on); |
1429 | #else | 1429 | #else |
1430 | static inline int ide_acpi_init(void) { return 0; } | 1430 | static inline int ide_acpi_init(void) { return 0; } |
1431 | static inline bool ide_port_acpi(ide_hwif_t *hwif) { return 0; } | ||
1431 | static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; } | 1432 | static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; } |
1432 | static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; } | 1433 | static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; } |
1433 | static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; } | 1434 | static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; } |
diff --git a/include/linux/ima.h b/include/linux/ima.h index b1b827d091a9..0e3f2a4c25f6 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h | |||
@@ -24,6 +24,7 @@ extern int ima_path_check(struct path *path, int mask, int update_counts); | |||
24 | extern void ima_file_free(struct file *file); | 24 | extern void ima_file_free(struct file *file); |
25 | extern int ima_file_mmap(struct file *file, unsigned long prot); | 25 | extern int ima_file_mmap(struct file *file, unsigned long prot); |
26 | extern void ima_counts_get(struct file *file); | 26 | extern void ima_counts_get(struct file *file); |
27 | extern void ima_counts_put(struct path *path, int mask); | ||
27 | 28 | ||
28 | #else | 29 | #else |
29 | static inline int ima_bprm_check(struct linux_binprm *bprm) | 30 | static inline int ima_bprm_check(struct linux_binprm *bprm) |
@@ -60,5 +61,10 @@ static inline void ima_counts_get(struct file *file) | |||
60 | { | 61 | { |
61 | return; | 62 | return; |
62 | } | 63 | } |
64 | |||
65 | static inline void ima_counts_put(struct path *path, int mask) | ||
66 | { | ||
67 | return; | ||
68 | } | ||
63 | #endif /* CONFIG_IMA_H */ | 69 | #endif /* CONFIG_IMA_H */ |
64 | #endif /* _LINUX_IMA_H */ | 70 | #endif /* _LINUX_IMA_H */ |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index fac104e7186a..d6320a3e8def 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -303,6 +303,7 @@ extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in | |||
303 | extern int panic_timeout; | 303 | extern int panic_timeout; |
304 | extern int panic_on_oops; | 304 | extern int panic_on_oops; |
305 | extern int panic_on_unrecovered_nmi; | 305 | extern int panic_on_unrecovered_nmi; |
306 | extern int panic_on_io_nmi; | ||
306 | extern const char *print_tainted(void); | 307 | extern const char *print_tainted(void); |
307 | extern void add_taint(unsigned flag); | 308 | extern void add_taint(unsigned flag); |
308 | extern int test_taint(unsigned flag); | 309 | extern int test_taint(unsigned flag); |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index aacc5449f586..16713dc672e4 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -125,6 +125,7 @@ struct kvm_kernel_irq_routing_entry { | |||
125 | struct kvm { | 125 | struct kvm { |
126 | struct mutex lock; /* protects the vcpus array and APIC accesses */ | 126 | struct mutex lock; /* protects the vcpus array and APIC accesses */ |
127 | spinlock_t mmu_lock; | 127 | spinlock_t mmu_lock; |
128 | spinlock_t requests_lock; | ||
128 | struct rw_semaphore slots_lock; | 129 | struct rw_semaphore slots_lock; |
129 | struct mm_struct *mm; /* userspace tied to this vm */ | 130 | struct mm_struct *mm; /* userspace tied to this vm */ |
130 | int nmemslots; | 131 | int nmemslots; |
diff --git a/include/linux/leds-lp3944.h b/include/linux/leds-lp3944.h new file mode 100644 index 000000000000..afc9f9fd70f5 --- /dev/null +++ b/include/linux/leds-lp3944.h | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * leds-lp3944.h - platform data structure for lp3944 led controller | ||
3 | * | ||
4 | * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #ifndef __LINUX_LEDS_LP3944_H | ||
13 | #define __LINUX_LEDS_LP3944_H | ||
14 | |||
15 | #include <linux/leds.h> | ||
16 | #include <linux/workqueue.h> | ||
17 | |||
18 | #define LP3944_LED0 0 | ||
19 | #define LP3944_LED1 1 | ||
20 | #define LP3944_LED2 2 | ||
21 | #define LP3944_LED3 3 | ||
22 | #define LP3944_LED4 4 | ||
23 | #define LP3944_LED5 5 | ||
24 | #define LP3944_LED6 6 | ||
25 | #define LP3944_LED7 7 | ||
26 | #define LP3944_LEDS_MAX 8 | ||
27 | |||
28 | #define LP3944_LED_STATUS_MASK 0x03 | ||
29 | enum lp3944_status { | ||
30 | LP3944_LED_STATUS_OFF = 0x0, | ||
31 | LP3944_LED_STATUS_ON = 0x1, | ||
32 | LP3944_LED_STATUS_DIM0 = 0x2, | ||
33 | LP3944_LED_STATUS_DIM1 = 0x3 | ||
34 | }; | ||
35 | |||
36 | enum lp3944_type { | ||
37 | LP3944_LED_TYPE_NONE, | ||
38 | LP3944_LED_TYPE_LED, | ||
39 | LP3944_LED_TYPE_LED_INVERTED, | ||
40 | }; | ||
41 | |||
42 | struct lp3944_led { | ||
43 | char *name; | ||
44 | enum lp3944_type type; | ||
45 | enum lp3944_status status; | ||
46 | }; | ||
47 | |||
48 | struct lp3944_platform_data { | ||
49 | struct lp3944_led leds[LP3944_LEDS_MAX]; | ||
50 | u8 leds_size; | ||
51 | }; | ||
52 | |||
53 | #endif /* __LINUX_LEDS_LP3944_H */ | ||
diff --git a/include/linux/leds.h b/include/linux/leds.h index 376fe07732ea..d8bf9665e70c 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h | |||
@@ -45,7 +45,10 @@ struct led_classdev { | |||
45 | /* Get LED brightness level */ | 45 | /* Get LED brightness level */ |
46 | enum led_brightness (*brightness_get)(struct led_classdev *led_cdev); | 46 | enum led_brightness (*brightness_get)(struct led_classdev *led_cdev); |
47 | 47 | ||
48 | /* Activate hardware accelerated blink */ | 48 | /* Activate hardware accelerated blink, delays are in |
49 | * miliseconds and if none is provided then a sensible default | ||
50 | * should be chosen. The call can adjust the timings if it can't | ||
51 | * match the values specified exactly. */ | ||
49 | int (*blink_set)(struct led_classdev *led_cdev, | 52 | int (*blink_set)(struct led_classdev *led_cdev, |
50 | unsigned long *delay_on, | 53 | unsigned long *delay_on, |
51 | unsigned long *delay_off); | 54 | unsigned long *delay_off); |
@@ -141,9 +144,14 @@ struct gpio_led { | |||
141 | const char *name; | 144 | const char *name; |
142 | const char *default_trigger; | 145 | const char *default_trigger; |
143 | unsigned gpio; | 146 | unsigned gpio; |
144 | u8 active_low : 1; | 147 | unsigned active_low : 1; |
145 | u8 retain_state_suspended : 1; | 148 | unsigned retain_state_suspended : 1; |
149 | unsigned default_state : 2; | ||
150 | /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ | ||
146 | }; | 151 | }; |
152 | #define LEDS_GPIO_DEFSTATE_OFF 0 | ||
153 | #define LEDS_GPIO_DEFSTATE_ON 1 | ||
154 | #define LEDS_GPIO_DEFSTATE_KEEP 2 | ||
147 | 155 | ||
148 | struct gpio_led_platform_data { | 156 | struct gpio_led_platform_data { |
149 | int num_leds; | 157 | int num_leds; |
diff --git a/include/linux/netfilter/xt_conntrack.h b/include/linux/netfilter/xt_conntrack.h index 3430c7751948..7ae05338e94c 100644 --- a/include/linux/netfilter/xt_conntrack.h +++ b/include/linux/netfilter/xt_conntrack.h | |||
@@ -81,4 +81,17 @@ struct xt_conntrack_mtinfo1 { | |||
81 | __u8 state_mask, status_mask; | 81 | __u8 state_mask, status_mask; |
82 | }; | 82 | }; |
83 | 83 | ||
84 | struct xt_conntrack_mtinfo2 { | ||
85 | union nf_inet_addr origsrc_addr, origsrc_mask; | ||
86 | union nf_inet_addr origdst_addr, origdst_mask; | ||
87 | union nf_inet_addr replsrc_addr, replsrc_mask; | ||
88 | union nf_inet_addr repldst_addr, repldst_mask; | ||
89 | __u32 expires_min, expires_max; | ||
90 | __u16 l4proto; | ||
91 | __be16 origsrc_port, origdst_port; | ||
92 | __be16 replsrc_port, repldst_port; | ||
93 | __u16 match_flags, invert_flags; | ||
94 | __u16 state_mask, status_mask; | ||
95 | }; | ||
96 | |||
84 | #endif /*_XT_CONNTRACK_H*/ | 97 | #endif /*_XT_CONNTRACK_H*/ |
diff --git a/include/linux/netfilter/xt_osf.h b/include/linux/netfilter/xt_osf.h index fd2272e0959a..18afa495f973 100644 --- a/include/linux/netfilter/xt_osf.h +++ b/include/linux/netfilter/xt_osf.h | |||
@@ -20,6 +20,8 @@ | |||
20 | #ifndef _XT_OSF_H | 20 | #ifndef _XT_OSF_H |
21 | #define _XT_OSF_H | 21 | #define _XT_OSF_H |
22 | 22 | ||
23 | #include <linux/types.h> | ||
24 | |||
23 | #define MAXGENRELEN 32 | 25 | #define MAXGENRELEN 32 |
24 | 26 | ||
25 | #define XT_OSF_GENRE (1<<0) | 27 | #define XT_OSF_GENRE (1<<0) |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index a3b000365795..73b46b6b904f 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -2645,6 +2645,7 @@ | |||
2645 | #define PCI_DEVICE_ID_NETMOS_9835 0x9835 | 2645 | #define PCI_DEVICE_ID_NETMOS_9835 0x9835 |
2646 | #define PCI_DEVICE_ID_NETMOS_9845 0x9845 | 2646 | #define PCI_DEVICE_ID_NETMOS_9845 0x9845 |
2647 | #define PCI_DEVICE_ID_NETMOS_9855 0x9855 | 2647 | #define PCI_DEVICE_ID_NETMOS_9855 0x9855 |
2648 | #define PCI_DEVICE_ID_NETMOS_9901 0x9901 | ||
2648 | 2649 | ||
2649 | #define PCI_VENDOR_ID_3COM_2 0xa727 | 2650 | #define PCI_VENDOR_ID_3COM_2 0xa727 |
2650 | 2651 | ||
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 8f921d74f49f..68438e18fff4 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h | |||
@@ -24,7 +24,8 @@ | |||
24 | 24 | ||
25 | #define DEFINE_PER_CPU_SECTION(type, name, section) \ | 25 | #define DEFINE_PER_CPU_SECTION(type, name, section) \ |
26 | __attribute__((__section__(PER_CPU_BASE_SECTION section))) \ | 26 | __attribute__((__section__(PER_CPU_BASE_SECTION section))) \ |
27 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | 27 | PER_CPU_ATTRIBUTES PER_CPU_DEF_ATTRIBUTES \ |
28 | __typeof__(type) per_cpu__##name | ||
28 | 29 | ||
29 | /* | 30 | /* |
30 | * Variant on the per-CPU variable declaration/definition theme used for | 31 | * Variant on the per-CPU variable declaration/definition theme used for |
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 89698d8aba5c..5e970c7d3fd5 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -178,8 +178,10 @@ struct perf_counter_attr { | |||
178 | mmap : 1, /* include mmap data */ | 178 | mmap : 1, /* include mmap data */ |
179 | comm : 1, /* include comm data */ | 179 | comm : 1, /* include comm data */ |
180 | freq : 1, /* use freq, not period */ | 180 | freq : 1, /* use freq, not period */ |
181 | inherit_stat : 1, /* per task counts */ | ||
182 | enable_on_exec : 1, /* next exec enables */ | ||
181 | 183 | ||
182 | __reserved_1 : 53; | 184 | __reserved_1 : 51; |
183 | 185 | ||
184 | __u32 wakeup_events; /* wakeup every n events */ | 186 | __u32 wakeup_events; /* wakeup every n events */ |
185 | __u32 __reserved_2; | 187 | __u32 __reserved_2; |
@@ -232,6 +234,14 @@ struct perf_counter_mmap_page { | |||
232 | __u32 lock; /* seqlock for synchronization */ | 234 | __u32 lock; /* seqlock for synchronization */ |
233 | __u32 index; /* hardware counter identifier */ | 235 | __u32 index; /* hardware counter identifier */ |
234 | __s64 offset; /* add to hardware counter value */ | 236 | __s64 offset; /* add to hardware counter value */ |
237 | __u64 time_enabled; /* time counter active */ | ||
238 | __u64 time_running; /* time counter on cpu */ | ||
239 | |||
240 | /* | ||
241 | * Hole for extension of the self monitor capabilities | ||
242 | */ | ||
243 | |||
244 | __u64 __reserved[123]; /* align to 1k */ | ||
235 | 245 | ||
236 | /* | 246 | /* |
237 | * Control data for the mmap() data buffer. | 247 | * Control data for the mmap() data buffer. |
@@ -253,7 +263,6 @@ struct perf_counter_mmap_page { | |||
253 | #define PERF_EVENT_MISC_KERNEL (1 << 0) | 263 | #define PERF_EVENT_MISC_KERNEL (1 << 0) |
254 | #define PERF_EVENT_MISC_USER (2 << 0) | 264 | #define PERF_EVENT_MISC_USER (2 << 0) |
255 | #define PERF_EVENT_MISC_HYPERVISOR (3 << 0) | 265 | #define PERF_EVENT_MISC_HYPERVISOR (3 << 0) |
256 | #define PERF_EVENT_MISC_OVERFLOW (1 << 2) | ||
257 | 266 | ||
258 | struct perf_event_header { | 267 | struct perf_event_header { |
259 | __u32 type; | 268 | __u32 type; |
@@ -327,9 +336,18 @@ enum perf_event_type { | |||
327 | PERF_EVENT_FORK = 7, | 336 | PERF_EVENT_FORK = 7, |
328 | 337 | ||
329 | /* | 338 | /* |
330 | * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field | 339 | * struct { |
331 | * will be PERF_SAMPLE_* | 340 | * struct perf_event_header header; |
332 | * | 341 | * u32 pid, tid; |
342 | * u64 value; | ||
343 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | ||
344 | * { u64 time_running; } && PERF_FORMAT_RUNNING | ||
345 | * { u64 parent_id; } && PERF_FORMAT_ID | ||
346 | * }; | ||
347 | */ | ||
348 | PERF_EVENT_READ = 8, | ||
349 | |||
350 | /* | ||
333 | * struct { | 351 | * struct { |
334 | * struct perf_event_header header; | 352 | * struct perf_event_header header; |
335 | * | 353 | * |
@@ -337,8 +355,9 @@ enum perf_event_type { | |||
337 | * { u32 pid, tid; } && PERF_SAMPLE_TID | 355 | * { u32 pid, tid; } && PERF_SAMPLE_TID |
338 | * { u64 time; } && PERF_SAMPLE_TIME | 356 | * { u64 time; } && PERF_SAMPLE_TIME |
339 | * { u64 addr; } && PERF_SAMPLE_ADDR | 357 | * { u64 addr; } && PERF_SAMPLE_ADDR |
340 | * { u64 config; } && PERF_SAMPLE_CONFIG | 358 | * { u64 id; } && PERF_SAMPLE_ID |
341 | * { u32 cpu, res; } && PERF_SAMPLE_CPU | 359 | * { u32 cpu, res; } && PERF_SAMPLE_CPU |
360 | * { u64 period; } && PERF_SAMPLE_PERIOD | ||
342 | * | 361 | * |
343 | * { u64 nr; | 362 | * { u64 nr; |
344 | * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP | 363 | * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP |
@@ -347,6 +366,9 @@ enum perf_event_type { | |||
347 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN | 366 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN |
348 | * }; | 367 | * }; |
349 | */ | 368 | */ |
369 | PERF_EVENT_SAMPLE = 9, | ||
370 | |||
371 | PERF_EVENT_MAX, /* non-ABI */ | ||
350 | }; | 372 | }; |
351 | 373 | ||
352 | enum perf_callchain_context { | 374 | enum perf_callchain_context { |
@@ -582,6 +604,7 @@ struct perf_counter_context { | |||
582 | int nr_counters; | 604 | int nr_counters; |
583 | int nr_active; | 605 | int nr_active; |
584 | int is_active; | 606 | int is_active; |
607 | int nr_stat; | ||
585 | atomic_t refcount; | 608 | atomic_t refcount; |
586 | struct task_struct *task; | 609 | struct task_struct *task; |
587 | 610 | ||
@@ -669,7 +692,16 @@ static inline int is_software_counter(struct perf_counter *counter) | |||
669 | (counter->attr.type != PERF_TYPE_HW_CACHE); | 692 | (counter->attr.type != PERF_TYPE_HW_CACHE); |
670 | } | 693 | } |
671 | 694 | ||
672 | extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); | 695 | extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; |
696 | |||
697 | extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); | ||
698 | |||
699 | static inline void | ||
700 | perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | ||
701 | { | ||
702 | if (atomic_read(&perf_swcounter_enabled[event])) | ||
703 | __perf_swcounter_event(event, nr, nmi, regs, addr); | ||
704 | } | ||
673 | 705 | ||
674 | extern void __perf_counter_mmap(struct vm_area_struct *vma); | 706 | extern void __perf_counter_mmap(struct vm_area_struct *vma); |
675 | 707 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 4d0754269884..0085d758d645 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -349,8 +349,20 @@ extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); | |||
349 | struct nsproxy; | 349 | struct nsproxy; |
350 | struct user_namespace; | 350 | struct user_namespace; |
351 | 351 | ||
352 | /* Maximum number of active map areas.. This is a random (large) number */ | 352 | /* |
353 | #define DEFAULT_MAX_MAP_COUNT 65536 | 353 | * Default maximum number of active map areas, this limits the number of vmas |
354 | * per mm struct. Users can overwrite this number by sysctl but there is a | ||
355 | * problem. | ||
356 | * | ||
357 | * When a program's coredump is generated as ELF format, a section is created | ||
358 | * per a vma. In ELF, the number of sections is represented in unsigned short. | ||
359 | * This means the number of sections should be smaller than 65535 at coredump. | ||
360 | * Because the kernel adds some informative sections to a image of program at | ||
361 | * generating coredump, we need some margin. The number of extra sections is | ||
362 | * 1-3 now and depends on arch. We use "5" as safe margin, here. | ||
363 | */ | ||
364 | #define MAPCOUNT_ELF_CORE_MARGIN (5) | ||
365 | #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN) | ||
354 | 366 | ||
355 | extern int sysctl_max_map_count; | 367 | extern int sysctl_max_map_count; |
356 | 368 | ||
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 9c4cd27f4685..c47c4b4da97e 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -80,6 +80,8 @@ struct spi_device { | |||
80 | #define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */ | 80 | #define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */ |
81 | #define SPI_3WIRE 0x10 /* SI/SO signals shared */ | 81 | #define SPI_3WIRE 0x10 /* SI/SO signals shared */ |
82 | #define SPI_LOOP 0x20 /* loopback mode */ | 82 | #define SPI_LOOP 0x20 /* loopback mode */ |
83 | #define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */ | ||
84 | #define SPI_READY 0x80 /* slave pulls low to pause */ | ||
83 | u8 bits_per_word; | 85 | u8 bits_per_word; |
84 | int irq; | 86 | int irq; |
85 | void *controller_state; | 87 | void *controller_state; |
@@ -248,6 +250,10 @@ struct spi_master { | |||
248 | /* spi_device.mode flags understood by this controller driver */ | 250 | /* spi_device.mode flags understood by this controller driver */ |
249 | u16 mode_bits; | 251 | u16 mode_bits; |
250 | 252 | ||
253 | /* other constraints relevant to this driver */ | ||
254 | u16 flags; | ||
255 | #define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ | ||
256 | |||
251 | /* Setup mode and clock, etc (spi driver may call many times). | 257 | /* Setup mode and clock, etc (spi driver may call many times). |
252 | * | 258 | * |
253 | * IMPORTANT: this may be called when transfers to another | 259 | * IMPORTANT: this may be called when transfers to another |
diff --git a/include/linux/spi/spidev.h b/include/linux/spi/spidev.h index 95251ccd5a07..bf0570a84f7a 100644 --- a/include/linux/spi/spidev.h +++ b/include/linux/spi/spidev.h | |||
@@ -40,6 +40,8 @@ | |||
40 | #define SPI_LSB_FIRST 0x08 | 40 | #define SPI_LSB_FIRST 0x08 |
41 | #define SPI_3WIRE 0x10 | 41 | #define SPI_3WIRE 0x10 |
42 | #define SPI_LOOP 0x20 | 42 | #define SPI_LOOP 0x20 |
43 | #define SPI_NO_CS 0x40 | ||
44 | #define SPI_READY 0x80 | ||
43 | 45 | ||
44 | /*---------------------------------------------------------------------------*/ | 46 | /*---------------------------------------------------------------------------*/ |
45 | 47 | ||
diff --git a/include/linux/timer.h b/include/linux/timer.h index ccf882eed8f8..be62ec2ebea5 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h | |||
@@ -190,6 +190,8 @@ extern unsigned long get_next_timer_interrupt(unsigned long now); | |||
190 | */ | 190 | */ |
191 | #ifdef CONFIG_TIMER_STATS | 191 | #ifdef CONFIG_TIMER_STATS |
192 | 192 | ||
193 | extern int timer_stats_active; | ||
194 | |||
193 | #define TIMER_STATS_FLAG_DEFERRABLE 0x1 | 195 | #define TIMER_STATS_FLAG_DEFERRABLE 0x1 |
194 | 196 | ||
195 | extern void init_timer_stats(void); | 197 | extern void init_timer_stats(void); |
@@ -203,6 +205,8 @@ extern void __timer_stats_timer_set_start_info(struct timer_list *timer, | |||
203 | 205 | ||
204 | static inline void timer_stats_timer_set_start_info(struct timer_list *timer) | 206 | static inline void timer_stats_timer_set_start_info(struct timer_list *timer) |
205 | { | 207 | { |
208 | if (likely(!timer_stats_active)) | ||
209 | return; | ||
206 | __timer_stats_timer_set_start_info(timer, __builtin_return_address(0)); | 210 | __timer_stats_timer_set_start_info(timer, __builtin_return_address(0)); |
207 | } | 211 | } |
208 | 212 | ||
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 5d44059f6d63..310e18a880ff 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h | |||
@@ -42,7 +42,6 @@ struct usbnet { | |||
42 | 42 | ||
43 | /* protocol/interface state */ | 43 | /* protocol/interface state */ |
44 | struct net_device *net; | 44 | struct net_device *net; |
45 | struct net_device_stats stats; | ||
46 | int msg_enable; | 45 | int msg_enable; |
47 | unsigned long data [5]; | 46 | unsigned long data [5]; |
48 | u32 xid; | 47 | u32 xid; |
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index a632689b61b4..cbdd6284996d 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h | |||
@@ -258,8 +258,8 @@ static inline bool nf_ct_kill(struct nf_conn *ct) | |||
258 | /* Update TCP window tracking data when NAT mangles the packet */ | 258 | /* Update TCP window tracking data when NAT mangles the packet */ |
259 | extern void nf_conntrack_tcp_update(const struct sk_buff *skb, | 259 | extern void nf_conntrack_tcp_update(const struct sk_buff *skb, |
260 | unsigned int dataoff, | 260 | unsigned int dataoff, |
261 | struct nf_conn *ct, | 261 | struct nf_conn *ct, int dir, |
262 | int dir); | 262 | s16 offset); |
263 | 263 | ||
264 | /* Fake conntrack entry for untracked connections */ | 264 | /* Fake conntrack entry for untracked connections */ |
265 | extern struct nf_conn nf_conntrack_untracked; | 265 | extern struct nf_conn nf_conntrack_untracked; |
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h index 5054dc5ea2c2..29d126736611 100644 --- a/include/net/phonet/pn_dev.h +++ b/include/net/phonet/pn_dev.h | |||
@@ -45,6 +45,7 @@ int phonet_address_add(struct net_device *dev, u8 addr); | |||
45 | int phonet_address_del(struct net_device *dev, u8 addr); | 45 | int phonet_address_del(struct net_device *dev, u8 addr); |
46 | u8 phonet_address_get(struct net_device *dev, u8 addr); | 46 | u8 phonet_address_get(struct net_device *dev, u8 addr); |
47 | int phonet_address_lookup(struct net *net, u8 addr); | 47 | int phonet_address_lookup(struct net *net, u8 addr); |
48 | void phonet_address_notify(int event, struct net_device *dev, u8 addr); | ||
48 | 49 | ||
49 | #define PN_NO_ADDR 0xff | 50 | #define PN_NO_ADDR 0xff |
50 | 51 | ||
diff --git a/ipc/mqueue.c b/ipc/mqueue.c index e35ba2c3a8d7..c5e68adc6732 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/nsproxy.h> | 32 | #include <linux/nsproxy.h> |
33 | #include <linux/pid.h> | 33 | #include <linux/pid.h> |
34 | #include <linux/ipc_namespace.h> | 34 | #include <linux/ipc_namespace.h> |
35 | #include <linux/ima.h> | ||
35 | 36 | ||
36 | #include <net/sock.h> | 37 | #include <net/sock.h> |
37 | #include "util.h" | 38 | #include "util.h" |
@@ -733,6 +734,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode, | |||
733 | error = PTR_ERR(filp); | 734 | error = PTR_ERR(filp); |
734 | goto out_putfd; | 735 | goto out_putfd; |
735 | } | 736 | } |
737 | ima_counts_get(filp); | ||
736 | 738 | ||
737 | fd_install(fd, filp); | 739 | fd_install(fd, filp); |
738 | goto out_upsem; | 740 | goto out_upsem; |
diff --git a/kernel/Makefile b/kernel/Makefile index 780c8dcf4516..2093a691f1c2 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -96,6 +96,7 @@ obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o | |||
96 | obj-$(CONFIG_FUNCTION_TRACER) += trace/ | 96 | obj-$(CONFIG_FUNCTION_TRACER) += trace/ |
97 | obj-$(CONFIG_TRACING) += trace/ | 97 | obj-$(CONFIG_TRACING) += trace/ |
98 | obj-$(CONFIG_X86_DS) += trace/ | 98 | obj-$(CONFIG_X86_DS) += trace/ |
99 | obj-$(CONFIG_RING_BUFFER) += trace/ | ||
99 | obj-$(CONFIG_SMP) += sched_cpupri.o | 100 | obj-$(CONFIG_SMP) += sched_cpupri.o |
100 | obj-$(CONFIG_SLOW_WORK) += slow-work.o | 101 | obj-$(CONFIG_SLOW_WORK) += slow-work.o |
101 | obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o | 102 | obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o |
diff --git a/kernel/acct.c b/kernel/acct.c index 7afa31564162..9f3391090b3e 100644 --- a/kernel/acct.c +++ b/kernel/acct.c | |||
@@ -215,6 +215,7 @@ static void acct_file_reopen(struct bsd_acct_struct *acct, struct file *file, | |||
215 | static int acct_on(char *name) | 215 | static int acct_on(char *name) |
216 | { | 216 | { |
217 | struct file *file; | 217 | struct file *file; |
218 | struct vfsmount *mnt; | ||
218 | int error; | 219 | int error; |
219 | struct pid_namespace *ns; | 220 | struct pid_namespace *ns; |
220 | struct bsd_acct_struct *acct = NULL; | 221 | struct bsd_acct_struct *acct = NULL; |
@@ -256,11 +257,12 @@ static int acct_on(char *name) | |||
256 | acct = NULL; | 257 | acct = NULL; |
257 | } | 258 | } |
258 | 259 | ||
259 | mnt_pin(file->f_path.mnt); | 260 | mnt = file->f_path.mnt; |
261 | mnt_pin(mnt); | ||
260 | acct_file_reopen(ns->bacct, file, ns); | 262 | acct_file_reopen(ns->bacct, file, ns); |
261 | spin_unlock(&acct_lock); | 263 | spin_unlock(&acct_lock); |
262 | 264 | ||
263 | mntput(file->f_path.mnt); /* it's pinned, now give up active reference */ | 265 | mntput(mnt); /* it's pinned, now give up active reference */ |
264 | kfree(acct); | 266 | kfree(acct); |
265 | 267 | ||
266 | return 0; | 268 | return 0; |
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 1a933a221ea4..d55a50da2347 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -236,6 +236,8 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) | |||
236 | 236 | ||
237 | list_add_rcu(&counter->event_entry, &ctx->event_list); | 237 | list_add_rcu(&counter->event_entry, &ctx->event_list); |
238 | ctx->nr_counters++; | 238 | ctx->nr_counters++; |
239 | if (counter->attr.inherit_stat) | ||
240 | ctx->nr_stat++; | ||
239 | } | 241 | } |
240 | 242 | ||
241 | /* | 243 | /* |
@@ -250,6 +252,8 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) | |||
250 | if (list_empty(&counter->list_entry)) | 252 | if (list_empty(&counter->list_entry)) |
251 | return; | 253 | return; |
252 | ctx->nr_counters--; | 254 | ctx->nr_counters--; |
255 | if (counter->attr.inherit_stat) | ||
256 | ctx->nr_stat--; | ||
253 | 257 | ||
254 | list_del_init(&counter->list_entry); | 258 | list_del_init(&counter->list_entry); |
255 | list_del_rcu(&counter->event_entry); | 259 | list_del_rcu(&counter->event_entry); |
@@ -1006,6 +1010,81 @@ static int context_equiv(struct perf_counter_context *ctx1, | |||
1006 | && !ctx1->pin_count && !ctx2->pin_count; | 1010 | && !ctx1->pin_count && !ctx2->pin_count; |
1007 | } | 1011 | } |
1008 | 1012 | ||
1013 | static void __perf_counter_read(void *counter); | ||
1014 | |||
1015 | static void __perf_counter_sync_stat(struct perf_counter *counter, | ||
1016 | struct perf_counter *next_counter) | ||
1017 | { | ||
1018 | u64 value; | ||
1019 | |||
1020 | if (!counter->attr.inherit_stat) | ||
1021 | return; | ||
1022 | |||
1023 | /* | ||
1024 | * Update the counter value, we cannot use perf_counter_read() | ||
1025 | * because we're in the middle of a context switch and have IRQs | ||
1026 | * disabled, which upsets smp_call_function_single(), however | ||
1027 | * we know the counter must be on the current CPU, therefore we | ||
1028 | * don't need to use it. | ||
1029 | */ | ||
1030 | switch (counter->state) { | ||
1031 | case PERF_COUNTER_STATE_ACTIVE: | ||
1032 | __perf_counter_read(counter); | ||
1033 | break; | ||
1034 | |||
1035 | case PERF_COUNTER_STATE_INACTIVE: | ||
1036 | update_counter_times(counter); | ||
1037 | break; | ||
1038 | |||
1039 | default: | ||
1040 | break; | ||
1041 | } | ||
1042 | |||
1043 | /* | ||
1044 | * In order to keep per-task stats reliable we need to flip the counter | ||
1045 | * values when we flip the contexts. | ||
1046 | */ | ||
1047 | value = atomic64_read(&next_counter->count); | ||
1048 | value = atomic64_xchg(&counter->count, value); | ||
1049 | atomic64_set(&next_counter->count, value); | ||
1050 | |||
1051 | swap(counter->total_time_enabled, next_counter->total_time_enabled); | ||
1052 | swap(counter->total_time_running, next_counter->total_time_running); | ||
1053 | |||
1054 | /* | ||
1055 | * Since we swizzled the values, update the user visible data too. | ||
1056 | */ | ||
1057 | perf_counter_update_userpage(counter); | ||
1058 | perf_counter_update_userpage(next_counter); | ||
1059 | } | ||
1060 | |||
1061 | #define list_next_entry(pos, member) \ | ||
1062 | list_entry(pos->member.next, typeof(*pos), member) | ||
1063 | |||
1064 | static void perf_counter_sync_stat(struct perf_counter_context *ctx, | ||
1065 | struct perf_counter_context *next_ctx) | ||
1066 | { | ||
1067 | struct perf_counter *counter, *next_counter; | ||
1068 | |||
1069 | if (!ctx->nr_stat) | ||
1070 | return; | ||
1071 | |||
1072 | counter = list_first_entry(&ctx->event_list, | ||
1073 | struct perf_counter, event_entry); | ||
1074 | |||
1075 | next_counter = list_first_entry(&next_ctx->event_list, | ||
1076 | struct perf_counter, event_entry); | ||
1077 | |||
1078 | while (&counter->event_entry != &ctx->event_list && | ||
1079 | &next_counter->event_entry != &next_ctx->event_list) { | ||
1080 | |||
1081 | __perf_counter_sync_stat(counter, next_counter); | ||
1082 | |||
1083 | counter = list_next_entry(counter, event_entry); | ||
1084 | next_counter = list_next_entry(counter, event_entry); | ||
1085 | } | ||
1086 | } | ||
1087 | |||
1009 | /* | 1088 | /* |
1010 | * Called from scheduler to remove the counters of the current task, | 1089 | * Called from scheduler to remove the counters of the current task, |
1011 | * with interrupts disabled. | 1090 | * with interrupts disabled. |
@@ -1061,6 +1140,8 @@ void perf_counter_task_sched_out(struct task_struct *task, | |||
1061 | ctx->task = next; | 1140 | ctx->task = next; |
1062 | next_ctx->task = task; | 1141 | next_ctx->task = task; |
1063 | do_switch = 0; | 1142 | do_switch = 0; |
1143 | |||
1144 | perf_counter_sync_stat(ctx, next_ctx); | ||
1064 | } | 1145 | } |
1065 | spin_unlock(&next_ctx->lock); | 1146 | spin_unlock(&next_ctx->lock); |
1066 | spin_unlock(&ctx->lock); | 1147 | spin_unlock(&ctx->lock); |
@@ -1348,9 +1429,56 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) | |||
1348 | } | 1429 | } |
1349 | 1430 | ||
1350 | /* | 1431 | /* |
1432 | * Enable all of a task's counters that have been marked enable-on-exec. | ||
1433 | * This expects task == current. | ||
1434 | */ | ||
1435 | static void perf_counter_enable_on_exec(struct task_struct *task) | ||
1436 | { | ||
1437 | struct perf_counter_context *ctx; | ||
1438 | struct perf_counter *counter; | ||
1439 | unsigned long flags; | ||
1440 | int enabled = 0; | ||
1441 | |||
1442 | local_irq_save(flags); | ||
1443 | ctx = task->perf_counter_ctxp; | ||
1444 | if (!ctx || !ctx->nr_counters) | ||
1445 | goto out; | ||
1446 | |||
1447 | __perf_counter_task_sched_out(ctx); | ||
1448 | |||
1449 | spin_lock(&ctx->lock); | ||
1450 | |||
1451 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
1452 | if (!counter->attr.enable_on_exec) | ||
1453 | continue; | ||
1454 | counter->attr.enable_on_exec = 0; | ||
1455 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) | ||
1456 | continue; | ||
1457 | counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
1458 | counter->tstamp_enabled = | ||
1459 | ctx->time - counter->total_time_enabled; | ||
1460 | enabled = 1; | ||
1461 | } | ||
1462 | |||
1463 | /* | ||
1464 | * Unclone this context if we enabled any counter. | ||
1465 | */ | ||
1466 | if (enabled && ctx->parent_ctx) { | ||
1467 | put_ctx(ctx->parent_ctx); | ||
1468 | ctx->parent_ctx = NULL; | ||
1469 | } | ||
1470 | |||
1471 | spin_unlock(&ctx->lock); | ||
1472 | |||
1473 | perf_counter_task_sched_in(task, smp_processor_id()); | ||
1474 | out: | ||
1475 | local_irq_restore(flags); | ||
1476 | } | ||
1477 | |||
1478 | /* | ||
1351 | * Cross CPU call to read the hardware counter | 1479 | * Cross CPU call to read the hardware counter |
1352 | */ | 1480 | */ |
1353 | static void __read(void *info) | 1481 | static void __perf_counter_read(void *info) |
1354 | { | 1482 | { |
1355 | struct perf_counter *counter = info; | 1483 | struct perf_counter *counter = info; |
1356 | struct perf_counter_context *ctx = counter->ctx; | 1484 | struct perf_counter_context *ctx = counter->ctx; |
@@ -1372,7 +1500,7 @@ static u64 perf_counter_read(struct perf_counter *counter) | |||
1372 | */ | 1500 | */ |
1373 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) { | 1501 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) { |
1374 | smp_call_function_single(counter->oncpu, | 1502 | smp_call_function_single(counter->oncpu, |
1375 | __read, counter, 1); | 1503 | __perf_counter_read, counter, 1); |
1376 | } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) { | 1504 | } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) { |
1377 | update_counter_times(counter); | 1505 | update_counter_times(counter); |
1378 | } | 1506 | } |
@@ -1508,11 +1636,13 @@ static void free_counter(struct perf_counter *counter) | |||
1508 | { | 1636 | { |
1509 | perf_pending_sync(counter); | 1637 | perf_pending_sync(counter); |
1510 | 1638 | ||
1511 | atomic_dec(&nr_counters); | 1639 | if (!counter->parent) { |
1512 | if (counter->attr.mmap) | 1640 | atomic_dec(&nr_counters); |
1513 | atomic_dec(&nr_mmap_counters); | 1641 | if (counter->attr.mmap) |
1514 | if (counter->attr.comm) | 1642 | atomic_dec(&nr_mmap_counters); |
1515 | atomic_dec(&nr_comm_counters); | 1643 | if (counter->attr.comm) |
1644 | atomic_dec(&nr_comm_counters); | ||
1645 | } | ||
1516 | 1646 | ||
1517 | if (counter->destroy) | 1647 | if (counter->destroy) |
1518 | counter->destroy(counter); | 1648 | counter->destroy(counter); |
@@ -1751,6 +1881,14 @@ int perf_counter_task_disable(void) | |||
1751 | return 0; | 1881 | return 0; |
1752 | } | 1882 | } |
1753 | 1883 | ||
1884 | static int perf_counter_index(struct perf_counter *counter) | ||
1885 | { | ||
1886 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) | ||
1887 | return 0; | ||
1888 | |||
1889 | return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET; | ||
1890 | } | ||
1891 | |||
1754 | /* | 1892 | /* |
1755 | * Callers need to ensure there can be no nesting of this function, otherwise | 1893 | * Callers need to ensure there can be no nesting of this function, otherwise |
1756 | * the seqlock logic goes bad. We can not serialize this because the arch | 1894 | * the seqlock logic goes bad. We can not serialize this because the arch |
@@ -1775,11 +1913,17 @@ void perf_counter_update_userpage(struct perf_counter *counter) | |||
1775 | preempt_disable(); | 1913 | preempt_disable(); |
1776 | ++userpg->lock; | 1914 | ++userpg->lock; |
1777 | barrier(); | 1915 | barrier(); |
1778 | userpg->index = counter->hw.idx; | 1916 | userpg->index = perf_counter_index(counter); |
1779 | userpg->offset = atomic64_read(&counter->count); | 1917 | userpg->offset = atomic64_read(&counter->count); |
1780 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) | 1918 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) |
1781 | userpg->offset -= atomic64_read(&counter->hw.prev_count); | 1919 | userpg->offset -= atomic64_read(&counter->hw.prev_count); |
1782 | 1920 | ||
1921 | userpg->time_enabled = counter->total_time_enabled + | ||
1922 | atomic64_read(&counter->child_total_time_enabled); | ||
1923 | |||
1924 | userpg->time_running = counter->total_time_running + | ||
1925 | atomic64_read(&counter->child_total_time_running); | ||
1926 | |||
1783 | barrier(); | 1927 | barrier(); |
1784 | ++userpg->lock; | 1928 | ++userpg->lock; |
1785 | preempt_enable(); | 1929 | preempt_enable(); |
@@ -2483,15 +2627,14 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2483 | u32 cpu, reserved; | 2627 | u32 cpu, reserved; |
2484 | } cpu_entry; | 2628 | } cpu_entry; |
2485 | 2629 | ||
2486 | header.type = 0; | 2630 | header.type = PERF_EVENT_SAMPLE; |
2487 | header.size = sizeof(header); | 2631 | header.size = sizeof(header); |
2488 | 2632 | ||
2489 | header.misc = PERF_EVENT_MISC_OVERFLOW; | 2633 | header.misc = 0; |
2490 | header.misc |= perf_misc_flags(data->regs); | 2634 | header.misc |= perf_misc_flags(data->regs); |
2491 | 2635 | ||
2492 | if (sample_type & PERF_SAMPLE_IP) { | 2636 | if (sample_type & PERF_SAMPLE_IP) { |
2493 | ip = perf_instruction_pointer(data->regs); | 2637 | ip = perf_instruction_pointer(data->regs); |
2494 | header.type |= PERF_SAMPLE_IP; | ||
2495 | header.size += sizeof(ip); | 2638 | header.size += sizeof(ip); |
2496 | } | 2639 | } |
2497 | 2640 | ||
@@ -2500,7 +2643,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2500 | tid_entry.pid = perf_counter_pid(counter, current); | 2643 | tid_entry.pid = perf_counter_pid(counter, current); |
2501 | tid_entry.tid = perf_counter_tid(counter, current); | 2644 | tid_entry.tid = perf_counter_tid(counter, current); |
2502 | 2645 | ||
2503 | header.type |= PERF_SAMPLE_TID; | ||
2504 | header.size += sizeof(tid_entry); | 2646 | header.size += sizeof(tid_entry); |
2505 | } | 2647 | } |
2506 | 2648 | ||
@@ -2510,34 +2652,25 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2510 | */ | 2652 | */ |
2511 | time = sched_clock(); | 2653 | time = sched_clock(); |
2512 | 2654 | ||
2513 | header.type |= PERF_SAMPLE_TIME; | ||
2514 | header.size += sizeof(u64); | 2655 | header.size += sizeof(u64); |
2515 | } | 2656 | } |
2516 | 2657 | ||
2517 | if (sample_type & PERF_SAMPLE_ADDR) { | 2658 | if (sample_type & PERF_SAMPLE_ADDR) |
2518 | header.type |= PERF_SAMPLE_ADDR; | ||
2519 | header.size += sizeof(u64); | 2659 | header.size += sizeof(u64); |
2520 | } | ||
2521 | 2660 | ||
2522 | if (sample_type & PERF_SAMPLE_ID) { | 2661 | if (sample_type & PERF_SAMPLE_ID) |
2523 | header.type |= PERF_SAMPLE_ID; | ||
2524 | header.size += sizeof(u64); | 2662 | header.size += sizeof(u64); |
2525 | } | ||
2526 | 2663 | ||
2527 | if (sample_type & PERF_SAMPLE_CPU) { | 2664 | if (sample_type & PERF_SAMPLE_CPU) { |
2528 | header.type |= PERF_SAMPLE_CPU; | ||
2529 | header.size += sizeof(cpu_entry); | 2665 | header.size += sizeof(cpu_entry); |
2530 | 2666 | ||
2531 | cpu_entry.cpu = raw_smp_processor_id(); | 2667 | cpu_entry.cpu = raw_smp_processor_id(); |
2532 | } | 2668 | } |
2533 | 2669 | ||
2534 | if (sample_type & PERF_SAMPLE_PERIOD) { | 2670 | if (sample_type & PERF_SAMPLE_PERIOD) |
2535 | header.type |= PERF_SAMPLE_PERIOD; | ||
2536 | header.size += sizeof(u64); | 2671 | header.size += sizeof(u64); |
2537 | } | ||
2538 | 2672 | ||
2539 | if (sample_type & PERF_SAMPLE_GROUP) { | 2673 | if (sample_type & PERF_SAMPLE_GROUP) { |
2540 | header.type |= PERF_SAMPLE_GROUP; | ||
2541 | header.size += sizeof(u64) + | 2674 | header.size += sizeof(u64) + |
2542 | counter->nr_siblings * sizeof(group_entry); | 2675 | counter->nr_siblings * sizeof(group_entry); |
2543 | } | 2676 | } |
@@ -2547,10 +2680,9 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2547 | 2680 | ||
2548 | if (callchain) { | 2681 | if (callchain) { |
2549 | callchain_size = (1 + callchain->nr) * sizeof(u64); | 2682 | callchain_size = (1 + callchain->nr) * sizeof(u64); |
2550 | |||
2551 | header.type |= PERF_SAMPLE_CALLCHAIN; | ||
2552 | header.size += callchain_size; | 2683 | header.size += callchain_size; |
2553 | } | 2684 | } else |
2685 | header.size += sizeof(u64); | ||
2554 | } | 2686 | } |
2555 | 2687 | ||
2556 | ret = perf_output_begin(&handle, counter, header.size, nmi, 1); | 2688 | ret = perf_output_begin(&handle, counter, header.size, nmi, 1); |
@@ -2601,13 +2733,79 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2601 | } | 2733 | } |
2602 | } | 2734 | } |
2603 | 2735 | ||
2604 | if (callchain) | 2736 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
2605 | perf_output_copy(&handle, callchain, callchain_size); | 2737 | if (callchain) |
2738 | perf_output_copy(&handle, callchain, callchain_size); | ||
2739 | else { | ||
2740 | u64 nr = 0; | ||
2741 | perf_output_put(&handle, nr); | ||
2742 | } | ||
2743 | } | ||
2606 | 2744 | ||
2607 | perf_output_end(&handle); | 2745 | perf_output_end(&handle); |
2608 | } | 2746 | } |
2609 | 2747 | ||
2610 | /* | 2748 | /* |
2749 | * read event | ||
2750 | */ | ||
2751 | |||
2752 | struct perf_read_event { | ||
2753 | struct perf_event_header header; | ||
2754 | |||
2755 | u32 pid; | ||
2756 | u32 tid; | ||
2757 | u64 value; | ||
2758 | u64 format[3]; | ||
2759 | }; | ||
2760 | |||
2761 | static void | ||
2762 | perf_counter_read_event(struct perf_counter *counter, | ||
2763 | struct task_struct *task) | ||
2764 | { | ||
2765 | struct perf_output_handle handle; | ||
2766 | struct perf_read_event event = { | ||
2767 | .header = { | ||
2768 | .type = PERF_EVENT_READ, | ||
2769 | .misc = 0, | ||
2770 | .size = sizeof(event) - sizeof(event.format), | ||
2771 | }, | ||
2772 | .pid = perf_counter_pid(counter, task), | ||
2773 | .tid = perf_counter_tid(counter, task), | ||
2774 | .value = atomic64_read(&counter->count), | ||
2775 | }; | ||
2776 | int ret, i = 0; | ||
2777 | |||
2778 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
2779 | event.header.size += sizeof(u64); | ||
2780 | event.format[i++] = counter->total_time_enabled; | ||
2781 | } | ||
2782 | |||
2783 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
2784 | event.header.size += sizeof(u64); | ||
2785 | event.format[i++] = counter->total_time_running; | ||
2786 | } | ||
2787 | |||
2788 | if (counter->attr.read_format & PERF_FORMAT_ID) { | ||
2789 | u64 id; | ||
2790 | |||
2791 | event.header.size += sizeof(u64); | ||
2792 | if (counter->parent) | ||
2793 | id = counter->parent->id; | ||
2794 | else | ||
2795 | id = counter->id; | ||
2796 | |||
2797 | event.format[i++] = id; | ||
2798 | } | ||
2799 | |||
2800 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); | ||
2801 | if (ret) | ||
2802 | return; | ||
2803 | |||
2804 | perf_output_copy(&handle, &event, event.header.size); | ||
2805 | perf_output_end(&handle); | ||
2806 | } | ||
2807 | |||
2808 | /* | ||
2611 | * fork tracking | 2809 | * fork tracking |
2612 | */ | 2810 | */ |
2613 | 2811 | ||
@@ -2798,6 +2996,9 @@ void perf_counter_comm(struct task_struct *task) | |||
2798 | { | 2996 | { |
2799 | struct perf_comm_event comm_event; | 2997 | struct perf_comm_event comm_event; |
2800 | 2998 | ||
2999 | if (task->perf_counter_ctxp) | ||
3000 | perf_counter_enable_on_exec(task); | ||
3001 | |||
2801 | if (!atomic_read(&nr_comm_counters)) | 3002 | if (!atomic_read(&nr_comm_counters)) |
2802 | return; | 3003 | return; |
2803 | 3004 | ||
@@ -3317,8 +3518,8 @@ out: | |||
3317 | put_cpu_var(perf_cpu_context); | 3518 | put_cpu_var(perf_cpu_context); |
3318 | } | 3519 | } |
3319 | 3520 | ||
3320 | void | 3521 | void __perf_swcounter_event(u32 event, u64 nr, int nmi, |
3321 | perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | 3522 | struct pt_regs *regs, u64 addr) |
3322 | { | 3523 | { |
3323 | struct perf_sample_data data = { | 3524 | struct perf_sample_data data = { |
3324 | .regs = regs, | 3525 | .regs = regs, |
@@ -3509,9 +3710,21 @@ static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) | |||
3509 | } | 3710 | } |
3510 | #endif | 3711 | #endif |
3511 | 3712 | ||
3713 | atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; | ||
3714 | |||
3715 | static void sw_perf_counter_destroy(struct perf_counter *counter) | ||
3716 | { | ||
3717 | u64 event = counter->attr.config; | ||
3718 | |||
3719 | WARN_ON(counter->parent); | ||
3720 | |||
3721 | atomic_dec(&perf_swcounter_enabled[event]); | ||
3722 | } | ||
3723 | |||
3512 | static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) | 3724 | static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) |
3513 | { | 3725 | { |
3514 | const struct pmu *pmu = NULL; | 3726 | const struct pmu *pmu = NULL; |
3727 | u64 event = counter->attr.config; | ||
3515 | 3728 | ||
3516 | /* | 3729 | /* |
3517 | * Software counters (currently) can't in general distinguish | 3730 | * Software counters (currently) can't in general distinguish |
@@ -3520,7 +3733,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) | |||
3520 | * to be kernel events, and page faults are never hypervisor | 3733 | * to be kernel events, and page faults are never hypervisor |
3521 | * events. | 3734 | * events. |
3522 | */ | 3735 | */ |
3523 | switch (counter->attr.config) { | 3736 | switch (event) { |
3524 | case PERF_COUNT_SW_CPU_CLOCK: | 3737 | case PERF_COUNT_SW_CPU_CLOCK: |
3525 | pmu = &perf_ops_cpu_clock; | 3738 | pmu = &perf_ops_cpu_clock; |
3526 | 3739 | ||
@@ -3541,6 +3754,10 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) | |||
3541 | case PERF_COUNT_SW_PAGE_FAULTS_MAJ: | 3754 | case PERF_COUNT_SW_PAGE_FAULTS_MAJ: |
3542 | case PERF_COUNT_SW_CONTEXT_SWITCHES: | 3755 | case PERF_COUNT_SW_CONTEXT_SWITCHES: |
3543 | case PERF_COUNT_SW_CPU_MIGRATIONS: | 3756 | case PERF_COUNT_SW_CPU_MIGRATIONS: |
3757 | if (!counter->parent) { | ||
3758 | atomic_inc(&perf_swcounter_enabled[event]); | ||
3759 | counter->destroy = sw_perf_counter_destroy; | ||
3760 | } | ||
3544 | pmu = &perf_ops_generic; | 3761 | pmu = &perf_ops_generic; |
3545 | break; | 3762 | break; |
3546 | } | 3763 | } |
@@ -3556,6 +3773,7 @@ perf_counter_alloc(struct perf_counter_attr *attr, | |||
3556 | int cpu, | 3773 | int cpu, |
3557 | struct perf_counter_context *ctx, | 3774 | struct perf_counter_context *ctx, |
3558 | struct perf_counter *group_leader, | 3775 | struct perf_counter *group_leader, |
3776 | struct perf_counter *parent_counter, | ||
3559 | gfp_t gfpflags) | 3777 | gfp_t gfpflags) |
3560 | { | 3778 | { |
3561 | const struct pmu *pmu; | 3779 | const struct pmu *pmu; |
@@ -3591,6 +3809,8 @@ perf_counter_alloc(struct perf_counter_attr *attr, | |||
3591 | counter->ctx = ctx; | 3809 | counter->ctx = ctx; |
3592 | counter->oncpu = -1; | 3810 | counter->oncpu = -1; |
3593 | 3811 | ||
3812 | counter->parent = parent_counter; | ||
3813 | |||
3594 | counter->ns = get_pid_ns(current->nsproxy->pid_ns); | 3814 | counter->ns = get_pid_ns(current->nsproxy->pid_ns); |
3595 | counter->id = atomic64_inc_return(&perf_counter_id); | 3815 | counter->id = atomic64_inc_return(&perf_counter_id); |
3596 | 3816 | ||
@@ -3648,11 +3868,13 @@ done: | |||
3648 | 3868 | ||
3649 | counter->pmu = pmu; | 3869 | counter->pmu = pmu; |
3650 | 3870 | ||
3651 | atomic_inc(&nr_counters); | 3871 | if (!counter->parent) { |
3652 | if (counter->attr.mmap) | 3872 | atomic_inc(&nr_counters); |
3653 | atomic_inc(&nr_mmap_counters); | 3873 | if (counter->attr.mmap) |
3654 | if (counter->attr.comm) | 3874 | atomic_inc(&nr_mmap_counters); |
3655 | atomic_inc(&nr_comm_counters); | 3875 | if (counter->attr.comm) |
3876 | atomic_inc(&nr_comm_counters); | ||
3877 | } | ||
3656 | 3878 | ||
3657 | return counter; | 3879 | return counter; |
3658 | } | 3880 | } |
@@ -3815,7 +4037,7 @@ SYSCALL_DEFINE5(perf_counter_open, | |||
3815 | } | 4037 | } |
3816 | 4038 | ||
3817 | counter = perf_counter_alloc(&attr, cpu, ctx, group_leader, | 4039 | counter = perf_counter_alloc(&attr, cpu, ctx, group_leader, |
3818 | GFP_KERNEL); | 4040 | NULL, GFP_KERNEL); |
3819 | ret = PTR_ERR(counter); | 4041 | ret = PTR_ERR(counter); |
3820 | if (IS_ERR(counter)) | 4042 | if (IS_ERR(counter)) |
3821 | goto err_put_context; | 4043 | goto err_put_context; |
@@ -3881,7 +4103,8 @@ inherit_counter(struct perf_counter *parent_counter, | |||
3881 | 4103 | ||
3882 | child_counter = perf_counter_alloc(&parent_counter->attr, | 4104 | child_counter = perf_counter_alloc(&parent_counter->attr, |
3883 | parent_counter->cpu, child_ctx, | 4105 | parent_counter->cpu, child_ctx, |
3884 | group_leader, GFP_KERNEL); | 4106 | group_leader, parent_counter, |
4107 | GFP_KERNEL); | ||
3885 | if (IS_ERR(child_counter)) | 4108 | if (IS_ERR(child_counter)) |
3886 | return child_counter; | 4109 | return child_counter; |
3887 | get_ctx(child_ctx); | 4110 | get_ctx(child_ctx); |
@@ -3904,12 +4127,6 @@ inherit_counter(struct perf_counter *parent_counter, | |||
3904 | */ | 4127 | */ |
3905 | add_counter_to_ctx(child_counter, child_ctx); | 4128 | add_counter_to_ctx(child_counter, child_ctx); |
3906 | 4129 | ||
3907 | child_counter->parent = parent_counter; | ||
3908 | /* | ||
3909 | * inherit into child's child as well: | ||
3910 | */ | ||
3911 | child_counter->attr.inherit = 1; | ||
3912 | |||
3913 | /* | 4130 | /* |
3914 | * Get a reference to the parent filp - we will fput it | 4131 | * Get a reference to the parent filp - we will fput it |
3915 | * when the child counter exits. This is safe to do because | 4132 | * when the child counter exits. This is safe to do because |
@@ -3953,10 +4170,14 @@ static int inherit_group(struct perf_counter *parent_counter, | |||
3953 | } | 4170 | } |
3954 | 4171 | ||
3955 | static void sync_child_counter(struct perf_counter *child_counter, | 4172 | static void sync_child_counter(struct perf_counter *child_counter, |
3956 | struct perf_counter *parent_counter) | 4173 | struct task_struct *child) |
3957 | { | 4174 | { |
4175 | struct perf_counter *parent_counter = child_counter->parent; | ||
3958 | u64 child_val; | 4176 | u64 child_val; |
3959 | 4177 | ||
4178 | if (child_counter->attr.inherit_stat) | ||
4179 | perf_counter_read_event(child_counter, child); | ||
4180 | |||
3960 | child_val = atomic64_read(&child_counter->count); | 4181 | child_val = atomic64_read(&child_counter->count); |
3961 | 4182 | ||
3962 | /* | 4183 | /* |
@@ -3985,7 +4206,8 @@ static void sync_child_counter(struct perf_counter *child_counter, | |||
3985 | 4206 | ||
3986 | static void | 4207 | static void |
3987 | __perf_counter_exit_task(struct perf_counter *child_counter, | 4208 | __perf_counter_exit_task(struct perf_counter *child_counter, |
3988 | struct perf_counter_context *child_ctx) | 4209 | struct perf_counter_context *child_ctx, |
4210 | struct task_struct *child) | ||
3989 | { | 4211 | { |
3990 | struct perf_counter *parent_counter; | 4212 | struct perf_counter *parent_counter; |
3991 | 4213 | ||
@@ -3999,7 +4221,7 @@ __perf_counter_exit_task(struct perf_counter *child_counter, | |||
3999 | * counters need to be zapped - but otherwise linger. | 4221 | * counters need to be zapped - but otherwise linger. |
4000 | */ | 4222 | */ |
4001 | if (parent_counter) { | 4223 | if (parent_counter) { |
4002 | sync_child_counter(child_counter, parent_counter); | 4224 | sync_child_counter(child_counter, child); |
4003 | free_counter(child_counter); | 4225 | free_counter(child_counter); |
4004 | } | 4226 | } |
4005 | } | 4227 | } |
@@ -4061,7 +4283,7 @@ void perf_counter_exit_task(struct task_struct *child) | |||
4061 | again: | 4283 | again: |
4062 | list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, | 4284 | list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, |
4063 | list_entry) | 4285 | list_entry) |
4064 | __perf_counter_exit_task(child_counter, child_ctx); | 4286 | __perf_counter_exit_task(child_counter, child_ctx, child); |
4065 | 4287 | ||
4066 | /* | 4288 | /* |
4067 | * If the last counter was a group counter, it will have appended all | 4289 | * If the last counter was a group counter, it will have appended all |
diff --git a/kernel/pid.c b/kernel/pid.c index 31310b5d3f50..5fa1db48d8b7 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/pid_namespace.h> | 36 | #include <linux/pid_namespace.h> |
37 | #include <linux/init_task.h> | 37 | #include <linux/init_task.h> |
38 | #include <linux/syscalls.h> | 38 | #include <linux/syscalls.h> |
39 | #include <linux/kmemleak.h> | ||
39 | 40 | ||
40 | #define pid_hashfn(nr, ns) \ | 41 | #define pid_hashfn(nr, ns) \ |
41 | hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) | 42 | hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) |
@@ -512,6 +513,12 @@ void __init pidhash_init(void) | |||
512 | pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); | 513 | pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); |
513 | if (!pid_hash) | 514 | if (!pid_hash) |
514 | panic("Could not alloc pidhash!\n"); | 515 | panic("Could not alloc pidhash!\n"); |
516 | /* | ||
517 | * pid_hash contains references to allocated struct pid objects and it | ||
518 | * must be scanned by kmemleak to avoid false positives. | ||
519 | */ | ||
520 | kmemleak_alloc(pid_hash, pidhash_size * sizeof(*(pid_hash)), 0, | ||
521 | GFP_KERNEL); | ||
515 | for (i = 0; i < pidhash_size; i++) | 522 | for (i = 0; i < pidhash_size; i++) |
516 | INIT_HLIST_HEAD(&pid_hash[i]); | 523 | INIT_HLIST_HEAD(&pid_hash[i]); |
517 | } | 524 | } |
diff --git a/kernel/resource.c b/kernel/resource.c index ac5f3a36923f..78b087221c15 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -787,7 +787,7 @@ static int __init reserve_setup(char *str) | |||
787 | static struct resource reserve[MAXRESERVE]; | 787 | static struct resource reserve[MAXRESERVE]; |
788 | 788 | ||
789 | for (;;) { | 789 | for (;;) { |
790 | int io_start, io_num; | 790 | unsigned int io_start, io_num; |
791 | int x = reserved; | 791 | int x = reserved; |
792 | 792 | ||
793 | if (get_option (&str, &io_start) != 2) | 793 | if (get_option (&str, &io_start) != 2) |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 62e4ff9968b5..98e02328c67d 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -335,7 +335,10 @@ static struct ctl_table kern_table[] = { | |||
335 | .data = &sysctl_timer_migration, | 335 | .data = &sysctl_timer_migration, |
336 | .maxlen = sizeof(unsigned int), | 336 | .maxlen = sizeof(unsigned int), |
337 | .mode = 0644, | 337 | .mode = 0644, |
338 | .proc_handler = &proc_dointvec, | 338 | .proc_handler = &proc_dointvec_minmax, |
339 | .strategy = &sysctl_intvec, | ||
340 | .extra1 = &zero, | ||
341 | .extra2 = &one, | ||
339 | }, | 342 | }, |
340 | #endif | 343 | #endif |
341 | { | 344 | { |
@@ -744,6 +747,14 @@ static struct ctl_table kern_table[] = { | |||
744 | .proc_handler = &proc_dointvec, | 747 | .proc_handler = &proc_dointvec, |
745 | }, | 748 | }, |
746 | { | 749 | { |
750 | .ctl_name = CTL_UNNUMBERED, | ||
751 | .procname = "panic_on_io_nmi", | ||
752 | .data = &panic_on_io_nmi, | ||
753 | .maxlen = sizeof(int), | ||
754 | .mode = 0644, | ||
755 | .proc_handler = &proc_dointvec, | ||
756 | }, | ||
757 | { | ||
747 | .ctl_name = KERN_BOOTLOADER_TYPE, | 758 | .ctl_name = KERN_BOOTLOADER_TYPE, |
748 | .procname = "bootloader_type", | 759 | .procname = "bootloader_type", |
749 | .data = &bootloader_type, | 760 | .data = &bootloader_type, |
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c index c994530d166d..4cde8b9c716f 100644 --- a/kernel/time/timer_stats.c +++ b/kernel/time/timer_stats.c | |||
@@ -96,7 +96,7 @@ static DEFINE_MUTEX(show_mutex); | |||
96 | /* | 96 | /* |
97 | * Collection status, active/inactive: | 97 | * Collection status, active/inactive: |
98 | */ | 98 | */ |
99 | static int __read_mostly active; | 99 | int __read_mostly timer_stats_active; |
100 | 100 | ||
101 | /* | 101 | /* |
102 | * Beginning/end timestamps of measurement: | 102 | * Beginning/end timestamps of measurement: |
@@ -242,7 +242,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, | |||
242 | struct entry *entry, input; | 242 | struct entry *entry, input; |
243 | unsigned long flags; | 243 | unsigned long flags; |
244 | 244 | ||
245 | if (likely(!active)) | 245 | if (likely(!timer_stats_active)) |
246 | return; | 246 | return; |
247 | 247 | ||
248 | lock = &per_cpu(lookup_lock, raw_smp_processor_id()); | 248 | lock = &per_cpu(lookup_lock, raw_smp_processor_id()); |
@@ -254,7 +254,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, | |||
254 | input.timer_flag = timer_flag; | 254 | input.timer_flag = timer_flag; |
255 | 255 | ||
256 | spin_lock_irqsave(lock, flags); | 256 | spin_lock_irqsave(lock, flags); |
257 | if (!active) | 257 | if (!timer_stats_active) |
258 | goto out_unlock; | 258 | goto out_unlock; |
259 | 259 | ||
260 | entry = tstat_lookup(&input, comm); | 260 | entry = tstat_lookup(&input, comm); |
@@ -290,7 +290,7 @@ static int tstats_show(struct seq_file *m, void *v) | |||
290 | /* | 290 | /* |
291 | * If still active then calculate up to now: | 291 | * If still active then calculate up to now: |
292 | */ | 292 | */ |
293 | if (active) | 293 | if (timer_stats_active) |
294 | time_stop = ktime_get(); | 294 | time_stop = ktime_get(); |
295 | 295 | ||
296 | time = ktime_sub(time_stop, time_start); | 296 | time = ktime_sub(time_stop, time_start); |
@@ -368,18 +368,18 @@ static ssize_t tstats_write(struct file *file, const char __user *buf, | |||
368 | mutex_lock(&show_mutex); | 368 | mutex_lock(&show_mutex); |
369 | switch (ctl[0]) { | 369 | switch (ctl[0]) { |
370 | case '0': | 370 | case '0': |
371 | if (active) { | 371 | if (timer_stats_active) { |
372 | active = 0; | 372 | timer_stats_active = 0; |
373 | time_stop = ktime_get(); | 373 | time_stop = ktime_get(); |
374 | sync_access(); | 374 | sync_access(); |
375 | } | 375 | } |
376 | break; | 376 | break; |
377 | case '1': | 377 | case '1': |
378 | if (!active) { | 378 | if (!timer_stats_active) { |
379 | reset_entries(); | 379 | reset_entries(); |
380 | time_start = ktime_get(); | 380 | time_start = ktime_get(); |
381 | smp_mb(); | 381 | smp_mb(); |
382 | active = 1; | 382 | timer_stats_active = 1; |
383 | } | 383 | } |
384 | break; | 384 | break; |
385 | default: | 385 | default: |
diff --git a/kernel/timer.c b/kernel/timer.c index 54d3912f8cad..0b36b9e5cc8b 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -380,6 +380,8 @@ static void timer_stats_account_timer(struct timer_list *timer) | |||
380 | { | 380 | { |
381 | unsigned int flag = 0; | 381 | unsigned int flag = 0; |
382 | 382 | ||
383 | if (likely(!timer->start_site)) | ||
384 | return; | ||
383 | if (unlikely(tbase_get_deferrable(timer->base))) | 385 | if (unlikely(tbase_get_deferrable(timer->base))) |
384 | flag |= TIMER_STATS_FLAG_DEFERRABLE; | 386 | flag |= TIMER_STATS_FLAG_DEFERRABLE; |
385 | 387 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 3718d55fb4c3..f3716bf04df6 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -291,7 +291,9 @@ function_stat_next(void *v, int idx) | |||
291 | pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); | 291 | pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); |
292 | 292 | ||
293 | again: | 293 | again: |
294 | rec++; | 294 | if (idx != 0) |
295 | rec++; | ||
296 | |||
295 | if ((void *)rec >= (void *)&pg->records[pg->index]) { | 297 | if ((void *)rec >= (void *)&pg->records[pg->index]) { |
296 | pg = pg->next; | 298 | pg = pg->next; |
297 | if (!pg) | 299 | if (!pg) |
@@ -1417,10 +1419,20 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos) | |||
1417 | { | 1419 | { |
1418 | struct ftrace_iterator *iter = m->private; | 1420 | struct ftrace_iterator *iter = m->private; |
1419 | void *p = NULL; | 1421 | void *p = NULL; |
1422 | loff_t l; | ||
1423 | |||
1424 | if (!(iter->flags & FTRACE_ITER_HASH)) | ||
1425 | *pos = 0; | ||
1420 | 1426 | ||
1421 | iter->flags |= FTRACE_ITER_HASH; | 1427 | iter->flags |= FTRACE_ITER_HASH; |
1422 | 1428 | ||
1423 | return t_hash_next(m, p, pos); | 1429 | iter->hidx = 0; |
1430 | for (l = 0; l <= *pos; ) { | ||
1431 | p = t_hash_next(m, p, &l); | ||
1432 | if (!p) | ||
1433 | break; | ||
1434 | } | ||
1435 | return p; | ||
1424 | } | 1436 | } |
1425 | 1437 | ||
1426 | static int t_hash_show(struct seq_file *m, void *v) | 1438 | static int t_hash_show(struct seq_file *m, void *v) |
@@ -1467,8 +1479,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
1467 | iter->pg = iter->pg->next; | 1479 | iter->pg = iter->pg->next; |
1468 | iter->idx = 0; | 1480 | iter->idx = 0; |
1469 | goto retry; | 1481 | goto retry; |
1470 | } else { | ||
1471 | iter->idx = -1; | ||
1472 | } | 1482 | } |
1473 | } else { | 1483 | } else { |
1474 | rec = &iter->pg->records[iter->idx++]; | 1484 | rec = &iter->pg->records[iter->idx++]; |
@@ -1497,6 +1507,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
1497 | { | 1507 | { |
1498 | struct ftrace_iterator *iter = m->private; | 1508 | struct ftrace_iterator *iter = m->private; |
1499 | void *p = NULL; | 1509 | void *p = NULL; |
1510 | loff_t l; | ||
1500 | 1511 | ||
1501 | mutex_lock(&ftrace_lock); | 1512 | mutex_lock(&ftrace_lock); |
1502 | /* | 1513 | /* |
@@ -1508,23 +1519,21 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
1508 | if (*pos > 0) | 1519 | if (*pos > 0) |
1509 | return t_hash_start(m, pos); | 1520 | return t_hash_start(m, pos); |
1510 | iter->flags |= FTRACE_ITER_PRINTALL; | 1521 | iter->flags |= FTRACE_ITER_PRINTALL; |
1511 | (*pos)++; | ||
1512 | return iter; | 1522 | return iter; |
1513 | } | 1523 | } |
1514 | 1524 | ||
1515 | if (iter->flags & FTRACE_ITER_HASH) | 1525 | if (iter->flags & FTRACE_ITER_HASH) |
1516 | return t_hash_start(m, pos); | 1526 | return t_hash_start(m, pos); |
1517 | 1527 | ||
1518 | if (*pos > 0) { | 1528 | iter->pg = ftrace_pages_start; |
1519 | if (iter->idx < 0) | 1529 | iter->idx = 0; |
1520 | return p; | 1530 | for (l = 0; l <= *pos; ) { |
1521 | (*pos)--; | 1531 | p = t_next(m, p, &l); |
1522 | iter->idx--; | 1532 | if (!p) |
1533 | break; | ||
1523 | } | 1534 | } |
1524 | 1535 | ||
1525 | p = t_next(m, p, pos); | 1536 | if (!p && iter->flags & FTRACE_ITER_FILTER) |
1526 | |||
1527 | if (!p) | ||
1528 | return t_hash_start(m, pos); | 1537 | return t_hash_start(m, pos); |
1529 | 1538 | ||
1530 | return p; | 1539 | return p; |
@@ -2500,32 +2509,31 @@ int ftrace_graph_count; | |||
2500 | unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; | 2509 | unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; |
2501 | 2510 | ||
2502 | static void * | 2511 | static void * |
2503 | g_next(struct seq_file *m, void *v, loff_t *pos) | 2512 | __g_next(struct seq_file *m, loff_t *pos) |
2504 | { | 2513 | { |
2505 | unsigned long *array = m->private; | 2514 | unsigned long *array = m->private; |
2506 | int index = *pos; | ||
2507 | 2515 | ||
2508 | (*pos)++; | 2516 | if (*pos >= ftrace_graph_count) |
2509 | |||
2510 | if (index >= ftrace_graph_count) | ||
2511 | return NULL; | 2517 | return NULL; |
2518 | return &array[*pos]; | ||
2519 | } | ||
2512 | 2520 | ||
2513 | return &array[index]; | 2521 | static void * |
2522 | g_next(struct seq_file *m, void *v, loff_t *pos) | ||
2523 | { | ||
2524 | (*pos)++; | ||
2525 | return __g_next(m, pos); | ||
2514 | } | 2526 | } |
2515 | 2527 | ||
2516 | static void *g_start(struct seq_file *m, loff_t *pos) | 2528 | static void *g_start(struct seq_file *m, loff_t *pos) |
2517 | { | 2529 | { |
2518 | void *p = NULL; | ||
2519 | |||
2520 | mutex_lock(&graph_lock); | 2530 | mutex_lock(&graph_lock); |
2521 | 2531 | ||
2522 | /* Nothing, tell g_show to print all functions are enabled */ | 2532 | /* Nothing, tell g_show to print all functions are enabled */ |
2523 | if (!ftrace_graph_count && !*pos) | 2533 | if (!ftrace_graph_count && !*pos) |
2524 | return (void *)1; | 2534 | return (void *)1; |
2525 | 2535 | ||
2526 | p = g_next(m, p, pos); | 2536 | return __g_next(m, pos); |
2527 | |||
2528 | return p; | ||
2529 | } | 2537 | } |
2530 | 2538 | ||
2531 | static void g_stop(struct seq_file *m, void *p) | 2539 | static void g_stop(struct seq_file *m, void *p) |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 04dac2638258..bf27bb7a63e2 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1563,6 +1563,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
1563 | return NULL; | 1563 | return NULL; |
1564 | } | 1564 | } |
1565 | 1565 | ||
1566 | #ifdef CONFIG_TRACING | ||
1567 | |||
1566 | #define TRACE_RECURSIVE_DEPTH 16 | 1568 | #define TRACE_RECURSIVE_DEPTH 16 |
1567 | 1569 | ||
1568 | static int trace_recursive_lock(void) | 1570 | static int trace_recursive_lock(void) |
@@ -1593,6 +1595,13 @@ static void trace_recursive_unlock(void) | |||
1593 | current->trace_recursion--; | 1595 | current->trace_recursion--; |
1594 | } | 1596 | } |
1595 | 1597 | ||
1598 | #else | ||
1599 | |||
1600 | #define trace_recursive_lock() (0) | ||
1601 | #define trace_recursive_unlock() do { } while (0) | ||
1602 | |||
1603 | #endif | ||
1604 | |||
1596 | static DEFINE_PER_CPU(int, rb_need_resched); | 1605 | static DEFINE_PER_CPU(int, rb_need_resched); |
1597 | 1606 | ||
1598 | /** | 1607 | /** |
@@ -3104,6 +3113,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
3104 | } | 3113 | } |
3105 | EXPORT_SYMBOL_GPL(ring_buffer_read_page); | 3114 | EXPORT_SYMBOL_GPL(ring_buffer_read_page); |
3106 | 3115 | ||
3116 | #ifdef CONFIG_TRACING | ||
3107 | static ssize_t | 3117 | static ssize_t |
3108 | rb_simple_read(struct file *filp, char __user *ubuf, | 3118 | rb_simple_read(struct file *filp, char __user *ubuf, |
3109 | size_t cnt, loff_t *ppos) | 3119 | size_t cnt, loff_t *ppos) |
@@ -3171,6 +3181,7 @@ static __init int rb_init_debugfs(void) | |||
3171 | } | 3181 | } |
3172 | 3182 | ||
3173 | fs_initcall(rb_init_debugfs); | 3183 | fs_initcall(rb_init_debugfs); |
3184 | #endif | ||
3174 | 3185 | ||
3175 | #ifdef CONFIG_HOTPLUG_CPU | 3186 | #ifdef CONFIG_HOTPLUG_CPU |
3176 | static int rb_cpu_notify(struct notifier_block *self, | 3187 | static int rb_cpu_notify(struct notifier_block *self, |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 076fa6f0ee48..3aa0a0dfdfa8 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -284,13 +284,12 @@ void trace_wake_up(void) | |||
284 | static int __init set_buf_size(char *str) | 284 | static int __init set_buf_size(char *str) |
285 | { | 285 | { |
286 | unsigned long buf_size; | 286 | unsigned long buf_size; |
287 | int ret; | ||
288 | 287 | ||
289 | if (!str) | 288 | if (!str) |
290 | return 0; | 289 | return 0; |
291 | ret = strict_strtoul(str, 0, &buf_size); | 290 | buf_size = memparse(str, &str); |
292 | /* nr_entries can not be zero */ | 291 | /* nr_entries can not be zero */ |
293 | if (ret < 0 || buf_size == 0) | 292 | if (buf_size == 0) |
294 | return 0; | 293 | return 0; |
295 | trace_buf_size = buf_size; | 294 | trace_buf_size = buf_size; |
296 | return 1; | 295 | return 1; |
@@ -2053,25 +2052,23 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
2053 | static void * | 2052 | static void * |
2054 | t_next(struct seq_file *m, void *v, loff_t *pos) | 2053 | t_next(struct seq_file *m, void *v, loff_t *pos) |
2055 | { | 2054 | { |
2056 | struct tracer *t = m->private; | 2055 | struct tracer *t = v; |
2057 | 2056 | ||
2058 | (*pos)++; | 2057 | (*pos)++; |
2059 | 2058 | ||
2060 | if (t) | 2059 | if (t) |
2061 | t = t->next; | 2060 | t = t->next; |
2062 | 2061 | ||
2063 | m->private = t; | ||
2064 | |||
2065 | return t; | 2062 | return t; |
2066 | } | 2063 | } |
2067 | 2064 | ||
2068 | static void *t_start(struct seq_file *m, loff_t *pos) | 2065 | static void *t_start(struct seq_file *m, loff_t *pos) |
2069 | { | 2066 | { |
2070 | struct tracer *t = m->private; | 2067 | struct tracer *t; |
2071 | loff_t l = 0; | 2068 | loff_t l = 0; |
2072 | 2069 | ||
2073 | mutex_lock(&trace_types_lock); | 2070 | mutex_lock(&trace_types_lock); |
2074 | for (; t && l < *pos; t = t_next(m, t, &l)) | 2071 | for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) |
2075 | ; | 2072 | ; |
2076 | 2073 | ||
2077 | return t; | 2074 | return t; |
@@ -2107,18 +2104,10 @@ static struct seq_operations show_traces_seq_ops = { | |||
2107 | 2104 | ||
2108 | static int show_traces_open(struct inode *inode, struct file *file) | 2105 | static int show_traces_open(struct inode *inode, struct file *file) |
2109 | { | 2106 | { |
2110 | int ret; | ||
2111 | |||
2112 | if (tracing_disabled) | 2107 | if (tracing_disabled) |
2113 | return -ENODEV; | 2108 | return -ENODEV; |
2114 | 2109 | ||
2115 | ret = seq_open(file, &show_traces_seq_ops); | 2110 | return seq_open(file, &show_traces_seq_ops); |
2116 | if (!ret) { | ||
2117 | struct seq_file *m = file->private_data; | ||
2118 | m->private = trace_types; | ||
2119 | } | ||
2120 | |||
2121 | return ret; | ||
2122 | } | 2111 | } |
2123 | 2112 | ||
2124 | static ssize_t | 2113 | static ssize_t |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 6e735d4771f8..3548ae5cc780 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -597,6 +597,7 @@ print_graph_function(struct trace_iterator *iter) | |||
597 | 597 | ||
598 | extern struct pid *ftrace_pid_trace; | 598 | extern struct pid *ftrace_pid_trace; |
599 | 599 | ||
600 | #ifdef CONFIG_FUNCTION_TRACER | ||
600 | static inline int ftrace_trace_task(struct task_struct *task) | 601 | static inline int ftrace_trace_task(struct task_struct *task) |
601 | { | 602 | { |
602 | if (!ftrace_pid_trace) | 603 | if (!ftrace_pid_trace) |
@@ -604,6 +605,12 @@ static inline int ftrace_trace_task(struct task_struct *task) | |||
604 | 605 | ||
605 | return test_tsk_trace_trace(task); | 606 | return test_tsk_trace_trace(task); |
606 | } | 607 | } |
608 | #else | ||
609 | static inline int ftrace_trace_task(struct task_struct *task) | ||
610 | { | ||
611 | return 1; | ||
612 | } | ||
613 | #endif | ||
607 | 614 | ||
608 | /* | 615 | /* |
609 | * trace_iterator_flags is an enumeration that defines bit | 616 | * trace_iterator_flags is an enumeration that defines bit |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index aa08be69a1b6..53c8fd376a88 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -300,10 +300,18 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
300 | 300 | ||
301 | static void *t_start(struct seq_file *m, loff_t *pos) | 301 | static void *t_start(struct seq_file *m, loff_t *pos) |
302 | { | 302 | { |
303 | struct ftrace_event_call *call = NULL; | ||
304 | loff_t l; | ||
305 | |||
303 | mutex_lock(&event_mutex); | 306 | mutex_lock(&event_mutex); |
304 | if (*pos == 0) | 307 | |
305 | m->private = ftrace_events.next; | 308 | m->private = ftrace_events.next; |
306 | return t_next(m, NULL, pos); | 309 | for (l = 0; l <= *pos; ) { |
310 | call = t_next(m, NULL, &l); | ||
311 | if (!call) | ||
312 | break; | ||
313 | } | ||
314 | return call; | ||
307 | } | 315 | } |
308 | 316 | ||
309 | static void * | 317 | static void * |
@@ -332,10 +340,18 @@ s_next(struct seq_file *m, void *v, loff_t *pos) | |||
332 | 340 | ||
333 | static void *s_start(struct seq_file *m, loff_t *pos) | 341 | static void *s_start(struct seq_file *m, loff_t *pos) |
334 | { | 342 | { |
343 | struct ftrace_event_call *call = NULL; | ||
344 | loff_t l; | ||
345 | |||
335 | mutex_lock(&event_mutex); | 346 | mutex_lock(&event_mutex); |
336 | if (*pos == 0) | 347 | |
337 | m->private = ftrace_events.next; | 348 | m->private = ftrace_events.next; |
338 | return s_next(m, NULL, pos); | 349 | for (l = 0; l <= *pos; ) { |
350 | call = s_next(m, NULL, &l); | ||
351 | if (!call) | ||
352 | break; | ||
353 | } | ||
354 | return call; | ||
339 | } | 355 | } |
340 | 356 | ||
341 | static int t_show(struct seq_file *m, void *v) | 357 | static int t_show(struct seq_file *m, void *v) |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 90f134764837..7402144bff21 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -302,8 +302,7 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, | |||
302 | if (count == -1) | 302 | if (count == -1) |
303 | seq_printf(m, ":unlimited\n"); | 303 | seq_printf(m, ":unlimited\n"); |
304 | else | 304 | else |
305 | seq_printf(m, ":count=%ld", count); | 305 | seq_printf(m, ":count=%ld\n", count); |
306 | seq_putc(m, '\n'); | ||
307 | 306 | ||
308 | return 0; | 307 | return 0; |
309 | } | 308 | } |
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 9bece9687b62..7b6278110827 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c | |||
@@ -155,25 +155,19 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) | |||
155 | EXPORT_SYMBOL_GPL(__ftrace_vprintk); | 155 | EXPORT_SYMBOL_GPL(__ftrace_vprintk); |
156 | 156 | ||
157 | static void * | 157 | static void * |
158 | t_next(struct seq_file *m, void *v, loff_t *pos) | 158 | t_start(struct seq_file *m, loff_t *pos) |
159 | { | 159 | { |
160 | const char **fmt = m->private; | 160 | const char **fmt = __start___trace_bprintk_fmt + *pos; |
161 | const char **next = fmt; | ||
162 | |||
163 | (*pos)++; | ||
164 | 161 | ||
165 | if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt) | 162 | if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt) |
166 | return NULL; | 163 | return NULL; |
167 | |||
168 | next = fmt; | ||
169 | m->private = ++next; | ||
170 | |||
171 | return fmt; | 164 | return fmt; |
172 | } | 165 | } |
173 | 166 | ||
174 | static void *t_start(struct seq_file *m, loff_t *pos) | 167 | static void *t_next(struct seq_file *m, void * v, loff_t *pos) |
175 | { | 168 | { |
176 | return t_next(m, NULL, pos); | 169 | (*pos)++; |
170 | return t_start(m, pos); | ||
177 | } | 171 | } |
178 | 172 | ||
179 | static int t_show(struct seq_file *m, void *v) | 173 | static int t_show(struct seq_file *m, void *v) |
@@ -224,15 +218,7 @@ static const struct seq_operations show_format_seq_ops = { | |||
224 | static int | 218 | static int |
225 | ftrace_formats_open(struct inode *inode, struct file *file) | 219 | ftrace_formats_open(struct inode *inode, struct file *file) |
226 | { | 220 | { |
227 | int ret; | 221 | return seq_open(file, &show_format_seq_ops); |
228 | |||
229 | ret = seq_open(file, &show_format_seq_ops); | ||
230 | if (!ret) { | ||
231 | struct seq_file *m = file->private_data; | ||
232 | |||
233 | m->private = __start___trace_bprintk_fmt; | ||
234 | } | ||
235 | return ret; | ||
236 | } | 222 | } |
237 | 223 | ||
238 | static const struct file_operations ftrace_formats_fops = { | 224 | static const struct file_operations ftrace_formats_fops = { |
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index c00643733f4c..e66f5e493342 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c | |||
@@ -199,17 +199,13 @@ static void *stat_seq_start(struct seq_file *s, loff_t *pos) | |||
199 | mutex_lock(&session->stat_mutex); | 199 | mutex_lock(&session->stat_mutex); |
200 | 200 | ||
201 | /* If we are in the beginning of the file, print the headers */ | 201 | /* If we are in the beginning of the file, print the headers */ |
202 | if (!*pos && session->ts->stat_headers) { | 202 | if (!*pos && session->ts->stat_headers) |
203 | (*pos)++; | ||
204 | return SEQ_START_TOKEN; | 203 | return SEQ_START_TOKEN; |
205 | } | ||
206 | 204 | ||
207 | node = rb_first(&session->stat_root); | 205 | node = rb_first(&session->stat_root); |
208 | for (i = 0; node && i < *pos; i++) | 206 | for (i = 0; node && i < *pos; i++) |
209 | node = rb_next(node); | 207 | node = rb_next(node); |
210 | 208 | ||
211 | (*pos)++; | ||
212 | |||
213 | return node; | 209 | return node; |
214 | } | 210 | } |
215 | 211 | ||
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 4c32b1a1a06e..12327b2bb785 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -359,6 +359,18 @@ config DEBUG_KMEMLEAK | |||
359 | In order to access the kmemleak file, debugfs needs to be | 359 | In order to access the kmemleak file, debugfs needs to be |
360 | mounted (usually at /sys/kernel/debug). | 360 | mounted (usually at /sys/kernel/debug). |
361 | 361 | ||
362 | config DEBUG_KMEMLEAK_EARLY_LOG_SIZE | ||
363 | int "Maximum kmemleak early log entries" | ||
364 | depends on DEBUG_KMEMLEAK | ||
365 | range 200 2000 | ||
366 | default 400 | ||
367 | help | ||
368 | Kmemleak must track all the memory allocations to avoid | ||
369 | reporting false positives. Since memory may be allocated or | ||
370 | freed before kmemleak is initialised, an early log buffer is | ||
371 | used to store these actions. If kmemleak reports "early log | ||
372 | buffer exceeded", please increase this value. | ||
373 | |||
362 | config DEBUG_KMEMLEAK_TEST | 374 | config DEBUG_KMEMLEAK_TEST |
363 | tristate "Simple test for the kernel memory leak detector" | 375 | tristate "Simple test for the kernel memory leak detector" |
364 | depends on DEBUG_KMEMLEAK | 376 | depends on DEBUG_KMEMLEAK |
diff --git a/mm/dmapool.c b/mm/dmapool.c index b1f0885dda22..3df063706f53 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c | |||
@@ -86,10 +86,12 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf) | |||
86 | unsigned pages = 0; | 86 | unsigned pages = 0; |
87 | unsigned blocks = 0; | 87 | unsigned blocks = 0; |
88 | 88 | ||
89 | spin_lock_irq(&pool->lock); | ||
89 | list_for_each_entry(page, &pool->page_list, page_list) { | 90 | list_for_each_entry(page, &pool->page_list, page_list) { |
90 | pages++; | 91 | pages++; |
91 | blocks += page->in_use; | 92 | blocks += page->in_use; |
92 | } | 93 | } |
94 | spin_unlock_irq(&pool->lock); | ||
93 | 95 | ||
94 | /* per-pool info, no real statistics yet */ | 96 | /* per-pool info, no real statistics yet */ |
95 | temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", | 97 | temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index c96f2c8700aa..e766e1da09d2 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -48,10 +48,10 @@ | |||
48 | * scanned. This list is only modified during a scanning episode when the | 48 | * scanned. This list is only modified during a scanning episode when the |
49 | * scan_mutex is held. At the end of a scan, the gray_list is always empty. | 49 | * scan_mutex is held. At the end of a scan, the gray_list is always empty. |
50 | * Note that the kmemleak_object.use_count is incremented when an object is | 50 | * Note that the kmemleak_object.use_count is incremented when an object is |
51 | * added to the gray_list and therefore cannot be freed | 51 | * added to the gray_list and therefore cannot be freed. This mutex also |
52 | * - kmemleak_mutex (mutex): prevents multiple users of the "kmemleak" debugfs | 52 | * prevents multiple users of the "kmemleak" debugfs file together with |
53 | * file together with modifications to the memory scanning parameters | 53 | * modifications to the memory scanning parameters including the scan_thread |
54 | * including the scan_thread pointer | 54 | * pointer |
55 | * | 55 | * |
56 | * The kmemleak_object structures have a use_count incremented or decremented | 56 | * The kmemleak_object structures have a use_count incremented or decremented |
57 | * using the get_object()/put_object() functions. When the use_count becomes | 57 | * using the get_object()/put_object() functions. When the use_count becomes |
@@ -105,7 +105,6 @@ | |||
105 | #define MAX_TRACE 16 /* stack trace length */ | 105 | #define MAX_TRACE 16 /* stack trace length */ |
106 | #define REPORTS_NR 50 /* maximum number of reported leaks */ | 106 | #define REPORTS_NR 50 /* maximum number of reported leaks */ |
107 | #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ | 107 | #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ |
108 | #define MSECS_SCAN_YIELD 10 /* CPU yielding period */ | ||
109 | #define SECS_FIRST_SCAN 60 /* delay before the first scan */ | 108 | #define SECS_FIRST_SCAN 60 /* delay before the first scan */ |
110 | #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ | 109 | #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ |
111 | 110 | ||
@@ -186,19 +185,16 @@ static atomic_t kmemleak_error = ATOMIC_INIT(0); | |||
186 | static unsigned long min_addr = ULONG_MAX; | 185 | static unsigned long min_addr = ULONG_MAX; |
187 | static unsigned long max_addr; | 186 | static unsigned long max_addr; |
188 | 187 | ||
189 | /* used for yielding the CPU to other tasks during scanning */ | ||
190 | static unsigned long next_scan_yield; | ||
191 | static struct task_struct *scan_thread; | 188 | static struct task_struct *scan_thread; |
192 | static unsigned long jiffies_scan_yield; | 189 | /* used to avoid reporting of recently allocated objects */ |
193 | static unsigned long jiffies_min_age; | 190 | static unsigned long jiffies_min_age; |
191 | static unsigned long jiffies_last_scan; | ||
194 | /* delay between automatic memory scannings */ | 192 | /* delay between automatic memory scannings */ |
195 | static signed long jiffies_scan_wait; | 193 | static signed long jiffies_scan_wait; |
196 | /* enables or disables the task stacks scanning */ | 194 | /* enables or disables the task stacks scanning */ |
197 | static int kmemleak_stack_scan; | 195 | static int kmemleak_stack_scan = 1; |
198 | /* mutex protecting the memory scanning */ | 196 | /* protects the memory scanning, parameters and debug/kmemleak file access */ |
199 | static DEFINE_MUTEX(scan_mutex); | 197 | static DEFINE_MUTEX(scan_mutex); |
200 | /* mutex protecting the access to the /sys/kernel/debug/kmemleak file */ | ||
201 | static DEFINE_MUTEX(kmemleak_mutex); | ||
202 | 198 | ||
203 | /* number of leaks reported (for limitation purposes) */ | 199 | /* number of leaks reported (for limitation purposes) */ |
204 | static int reported_leaks; | 200 | static int reported_leaks; |
@@ -235,7 +231,7 @@ struct early_log { | |||
235 | }; | 231 | }; |
236 | 232 | ||
237 | /* early logging buffer and current position */ | 233 | /* early logging buffer and current position */ |
238 | static struct early_log early_log[200]; | 234 | static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE]; |
239 | static int crt_early_log; | 235 | static int crt_early_log; |
240 | 236 | ||
241 | static void kmemleak_disable(void); | 237 | static void kmemleak_disable(void); |
@@ -279,15 +275,6 @@ static int color_gray(const struct kmemleak_object *object) | |||
279 | } | 275 | } |
280 | 276 | ||
281 | /* | 277 | /* |
282 | * Objects are considered referenced if their color is gray and they have not | ||
283 | * been deleted. | ||
284 | */ | ||
285 | static int referenced_object(struct kmemleak_object *object) | ||
286 | { | ||
287 | return (object->flags & OBJECT_ALLOCATED) && color_gray(object); | ||
288 | } | ||
289 | |||
290 | /* | ||
291 | * Objects are considered unreferenced only if their color is white, they have | 278 | * Objects are considered unreferenced only if their color is white, they have |
292 | * not be deleted and have a minimum age to avoid false positives caused by | 279 | * not be deleted and have a minimum age to avoid false positives caused by |
293 | * pointers temporarily stored in CPU registers. | 280 | * pointers temporarily stored in CPU registers. |
@@ -295,42 +282,28 @@ static int referenced_object(struct kmemleak_object *object) | |||
295 | static int unreferenced_object(struct kmemleak_object *object) | 282 | static int unreferenced_object(struct kmemleak_object *object) |
296 | { | 283 | { |
297 | return (object->flags & OBJECT_ALLOCATED) && color_white(object) && | 284 | return (object->flags & OBJECT_ALLOCATED) && color_white(object) && |
298 | time_is_before_eq_jiffies(object->jiffies + jiffies_min_age); | 285 | time_before_eq(object->jiffies + jiffies_min_age, |
286 | jiffies_last_scan); | ||
299 | } | 287 | } |
300 | 288 | ||
301 | /* | 289 | /* |
302 | * Printing of the (un)referenced objects information, either to the seq file | 290 | * Printing of the unreferenced objects information to the seq file. The |
303 | * or to the kernel log. The print_referenced/print_unreferenced functions | 291 | * print_unreferenced function must be called with the object->lock held. |
304 | * must be called with the object->lock held. | ||
305 | */ | 292 | */ |
306 | #define print_helper(seq, x...) do { \ | ||
307 | struct seq_file *s = (seq); \ | ||
308 | if (s) \ | ||
309 | seq_printf(s, x); \ | ||
310 | else \ | ||
311 | pr_info(x); \ | ||
312 | } while (0) | ||
313 | |||
314 | static void print_referenced(struct kmemleak_object *object) | ||
315 | { | ||
316 | pr_info("referenced object 0x%08lx (size %zu)\n", | ||
317 | object->pointer, object->size); | ||
318 | } | ||
319 | |||
320 | static void print_unreferenced(struct seq_file *seq, | 293 | static void print_unreferenced(struct seq_file *seq, |
321 | struct kmemleak_object *object) | 294 | struct kmemleak_object *object) |
322 | { | 295 | { |
323 | int i; | 296 | int i; |
324 | 297 | ||
325 | print_helper(seq, "unreferenced object 0x%08lx (size %zu):\n", | 298 | seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", |
326 | object->pointer, object->size); | 299 | object->pointer, object->size); |
327 | print_helper(seq, " comm \"%s\", pid %d, jiffies %lu\n", | 300 | seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n", |
328 | object->comm, object->pid, object->jiffies); | 301 | object->comm, object->pid, object->jiffies); |
329 | print_helper(seq, " backtrace:\n"); | 302 | seq_printf(seq, " backtrace:\n"); |
330 | 303 | ||
331 | for (i = 0; i < object->trace_len; i++) { | 304 | for (i = 0; i < object->trace_len; i++) { |
332 | void *ptr = (void *)object->trace[i]; | 305 | void *ptr = (void *)object->trace[i]; |
333 | print_helper(seq, " [<%p>] %pS\n", ptr, ptr); | 306 | seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); |
334 | } | 307 | } |
335 | } | 308 | } |
336 | 309 | ||
@@ -554,8 +527,10 @@ static void delete_object(unsigned long ptr) | |||
554 | write_lock_irqsave(&kmemleak_lock, flags); | 527 | write_lock_irqsave(&kmemleak_lock, flags); |
555 | object = lookup_object(ptr, 0); | 528 | object = lookup_object(ptr, 0); |
556 | if (!object) { | 529 | if (!object) { |
530 | #ifdef DEBUG | ||
557 | kmemleak_warn("Freeing unknown object at 0x%08lx\n", | 531 | kmemleak_warn("Freeing unknown object at 0x%08lx\n", |
558 | ptr); | 532 | ptr); |
533 | #endif | ||
559 | write_unlock_irqrestore(&kmemleak_lock, flags); | 534 | write_unlock_irqrestore(&kmemleak_lock, flags); |
560 | return; | 535 | return; |
561 | } | 536 | } |
@@ -571,8 +546,6 @@ static void delete_object(unsigned long ptr) | |||
571 | * cannot be freed when it is being scanned. | 546 | * cannot be freed when it is being scanned. |
572 | */ | 547 | */ |
573 | spin_lock_irqsave(&object->lock, flags); | 548 | spin_lock_irqsave(&object->lock, flags); |
574 | if (object->flags & OBJECT_REPORTED) | ||
575 | print_referenced(object); | ||
576 | object->flags &= ~OBJECT_ALLOCATED; | 549 | object->flags &= ~OBJECT_ALLOCATED; |
577 | spin_unlock_irqrestore(&object->lock, flags); | 550 | spin_unlock_irqrestore(&object->lock, flags); |
578 | put_object(object); | 551 | put_object(object); |
@@ -696,7 +669,8 @@ static void log_early(int op_type, const void *ptr, size_t size, | |||
696 | struct early_log *log; | 669 | struct early_log *log; |
697 | 670 | ||
698 | if (crt_early_log >= ARRAY_SIZE(early_log)) { | 671 | if (crt_early_log >= ARRAY_SIZE(early_log)) { |
699 | kmemleak_stop("Early log buffer exceeded\n"); | 672 | pr_warning("Early log buffer exceeded\n"); |
673 | kmemleak_disable(); | ||
700 | return; | 674 | return; |
701 | } | 675 | } |
702 | 676 | ||
@@ -808,21 +782,6 @@ void kmemleak_no_scan(const void *ptr) | |||
808 | EXPORT_SYMBOL(kmemleak_no_scan); | 782 | EXPORT_SYMBOL(kmemleak_no_scan); |
809 | 783 | ||
810 | /* | 784 | /* |
811 | * Yield the CPU so that other tasks get a chance to run. The yielding is | ||
812 | * rate-limited to avoid excessive number of calls to the schedule() function | ||
813 | * during memory scanning. | ||
814 | */ | ||
815 | static void scan_yield(void) | ||
816 | { | ||
817 | might_sleep(); | ||
818 | |||
819 | if (time_is_before_eq_jiffies(next_scan_yield)) { | ||
820 | schedule(); | ||
821 | next_scan_yield = jiffies + jiffies_scan_yield; | ||
822 | } | ||
823 | } | ||
824 | |||
825 | /* | ||
826 | * Memory scanning is a long process and it needs to be interruptable. This | 785 | * Memory scanning is a long process and it needs to be interruptable. This |
827 | * function checks whether such interrupt condition occured. | 786 | * function checks whether such interrupt condition occured. |
828 | */ | 787 | */ |
@@ -862,15 +821,6 @@ static void scan_block(void *_start, void *_end, | |||
862 | if (scan_should_stop()) | 821 | if (scan_should_stop()) |
863 | break; | 822 | break; |
864 | 823 | ||
865 | /* | ||
866 | * When scanning a memory block with a corresponding | ||
867 | * kmemleak_object, the CPU yielding is handled in the calling | ||
868 | * code since it holds the object->lock to avoid the block | ||
869 | * freeing. | ||
870 | */ | ||
871 | if (!scanned) | ||
872 | scan_yield(); | ||
873 | |||
874 | object = find_and_get_object(pointer, 1); | 824 | object = find_and_get_object(pointer, 1); |
875 | if (!object) | 825 | if (!object) |
876 | continue; | 826 | continue; |
@@ -952,6 +902,9 @@ static void kmemleak_scan(void) | |||
952 | struct kmemleak_object *object, *tmp; | 902 | struct kmemleak_object *object, *tmp; |
953 | struct task_struct *task; | 903 | struct task_struct *task; |
954 | int i; | 904 | int i; |
905 | int new_leaks = 0; | ||
906 | |||
907 | jiffies_last_scan = jiffies; | ||
955 | 908 | ||
956 | /* prepare the kmemleak_object's */ | 909 | /* prepare the kmemleak_object's */ |
957 | rcu_read_lock(); | 910 | rcu_read_lock(); |
@@ -1033,7 +986,7 @@ static void kmemleak_scan(void) | |||
1033 | */ | 986 | */ |
1034 | object = list_entry(gray_list.next, typeof(*object), gray_list); | 987 | object = list_entry(gray_list.next, typeof(*object), gray_list); |
1035 | while (&object->gray_list != &gray_list) { | 988 | while (&object->gray_list != &gray_list) { |
1036 | scan_yield(); | 989 | cond_resched(); |
1037 | 990 | ||
1038 | /* may add new objects to the list */ | 991 | /* may add new objects to the list */ |
1039 | if (!scan_should_stop()) | 992 | if (!scan_should_stop()) |
@@ -1049,6 +1002,32 @@ static void kmemleak_scan(void) | |||
1049 | object = tmp; | 1002 | object = tmp; |
1050 | } | 1003 | } |
1051 | WARN_ON(!list_empty(&gray_list)); | 1004 | WARN_ON(!list_empty(&gray_list)); |
1005 | |||
1006 | /* | ||
1007 | * If scanning was stopped do not report any new unreferenced objects. | ||
1008 | */ | ||
1009 | if (scan_should_stop()) | ||
1010 | return; | ||
1011 | |||
1012 | /* | ||
1013 | * Scanning result reporting. | ||
1014 | */ | ||
1015 | rcu_read_lock(); | ||
1016 | list_for_each_entry_rcu(object, &object_list, object_list) { | ||
1017 | spin_lock_irqsave(&object->lock, flags); | ||
1018 | if (unreferenced_object(object) && | ||
1019 | !(object->flags & OBJECT_REPORTED)) { | ||
1020 | object->flags |= OBJECT_REPORTED; | ||
1021 | new_leaks++; | ||
1022 | } | ||
1023 | spin_unlock_irqrestore(&object->lock, flags); | ||
1024 | } | ||
1025 | rcu_read_unlock(); | ||
1026 | |||
1027 | if (new_leaks) | ||
1028 | pr_info("%d new suspected memory leaks (see " | ||
1029 | "/sys/kernel/debug/kmemleak)\n", new_leaks); | ||
1030 | |||
1052 | } | 1031 | } |
1053 | 1032 | ||
1054 | /* | 1033 | /* |
@@ -1070,36 +1049,12 @@ static int kmemleak_scan_thread(void *arg) | |||
1070 | } | 1049 | } |
1071 | 1050 | ||
1072 | while (!kthread_should_stop()) { | 1051 | while (!kthread_should_stop()) { |
1073 | struct kmemleak_object *object; | ||
1074 | signed long timeout = jiffies_scan_wait; | 1052 | signed long timeout = jiffies_scan_wait; |
1075 | 1053 | ||
1076 | mutex_lock(&scan_mutex); | 1054 | mutex_lock(&scan_mutex); |
1077 | |||
1078 | kmemleak_scan(); | 1055 | kmemleak_scan(); |
1079 | reported_leaks = 0; | ||
1080 | |||
1081 | rcu_read_lock(); | ||
1082 | list_for_each_entry_rcu(object, &object_list, object_list) { | ||
1083 | unsigned long flags; | ||
1084 | |||
1085 | if (reported_leaks >= REPORTS_NR) | ||
1086 | break; | ||
1087 | spin_lock_irqsave(&object->lock, flags); | ||
1088 | if (!(object->flags & OBJECT_REPORTED) && | ||
1089 | unreferenced_object(object)) { | ||
1090 | print_unreferenced(NULL, object); | ||
1091 | object->flags |= OBJECT_REPORTED; | ||
1092 | reported_leaks++; | ||
1093 | } else if ((object->flags & OBJECT_REPORTED) && | ||
1094 | referenced_object(object)) { | ||
1095 | print_referenced(object); | ||
1096 | object->flags &= ~OBJECT_REPORTED; | ||
1097 | } | ||
1098 | spin_unlock_irqrestore(&object->lock, flags); | ||
1099 | } | ||
1100 | rcu_read_unlock(); | ||
1101 | |||
1102 | mutex_unlock(&scan_mutex); | 1056 | mutex_unlock(&scan_mutex); |
1057 | |||
1103 | /* wait before the next scan */ | 1058 | /* wait before the next scan */ |
1104 | while (timeout && !kthread_should_stop()) | 1059 | while (timeout && !kthread_should_stop()) |
1105 | timeout = schedule_timeout_interruptible(timeout); | 1060 | timeout = schedule_timeout_interruptible(timeout); |
@@ -1112,7 +1067,7 @@ static int kmemleak_scan_thread(void *arg) | |||
1112 | 1067 | ||
1113 | /* | 1068 | /* |
1114 | * Start the automatic memory scanning thread. This function must be called | 1069 | * Start the automatic memory scanning thread. This function must be called |
1115 | * with the kmemleak_mutex held. | 1070 | * with the scan_mutex held. |
1116 | */ | 1071 | */ |
1117 | void start_scan_thread(void) | 1072 | void start_scan_thread(void) |
1118 | { | 1073 | { |
@@ -1127,7 +1082,7 @@ void start_scan_thread(void) | |||
1127 | 1082 | ||
1128 | /* | 1083 | /* |
1129 | * Stop the automatic memory scanning thread. This function must be called | 1084 | * Stop the automatic memory scanning thread. This function must be called |
1130 | * with the kmemleak_mutex held. | 1085 | * with the scan_mutex held. |
1131 | */ | 1086 | */ |
1132 | void stop_scan_thread(void) | 1087 | void stop_scan_thread(void) |
1133 | { | 1088 | { |
@@ -1147,10 +1102,8 @@ static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) | |||
1147 | struct kmemleak_object *object; | 1102 | struct kmemleak_object *object; |
1148 | loff_t n = *pos; | 1103 | loff_t n = *pos; |
1149 | 1104 | ||
1150 | if (!n) { | 1105 | if (!n) |
1151 | kmemleak_scan(); | ||
1152 | reported_leaks = 0; | 1106 | reported_leaks = 0; |
1153 | } | ||
1154 | if (reported_leaks >= REPORTS_NR) | 1107 | if (reported_leaks >= REPORTS_NR) |
1155 | return NULL; | 1108 | return NULL; |
1156 | 1109 | ||
@@ -1211,11 +1164,10 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v) | |||
1211 | unsigned long flags; | 1164 | unsigned long flags; |
1212 | 1165 | ||
1213 | spin_lock_irqsave(&object->lock, flags); | 1166 | spin_lock_irqsave(&object->lock, flags); |
1214 | if (!unreferenced_object(object)) | 1167 | if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) { |
1215 | goto out; | 1168 | print_unreferenced(seq, object); |
1216 | print_unreferenced(seq, object); | 1169 | reported_leaks++; |
1217 | reported_leaks++; | 1170 | } |
1218 | out: | ||
1219 | spin_unlock_irqrestore(&object->lock, flags); | 1171 | spin_unlock_irqrestore(&object->lock, flags); |
1220 | return 0; | 1172 | return 0; |
1221 | } | 1173 | } |
@@ -1234,13 +1186,10 @@ static int kmemleak_open(struct inode *inode, struct file *file) | |||
1234 | if (!atomic_read(&kmemleak_enabled)) | 1186 | if (!atomic_read(&kmemleak_enabled)) |
1235 | return -EBUSY; | 1187 | return -EBUSY; |
1236 | 1188 | ||
1237 | ret = mutex_lock_interruptible(&kmemleak_mutex); | 1189 | ret = mutex_lock_interruptible(&scan_mutex); |
1238 | if (ret < 0) | 1190 | if (ret < 0) |
1239 | goto out; | 1191 | goto out; |
1240 | if (file->f_mode & FMODE_READ) { | 1192 | if (file->f_mode & FMODE_READ) { |
1241 | ret = mutex_lock_interruptible(&scan_mutex); | ||
1242 | if (ret < 0) | ||
1243 | goto kmemleak_unlock; | ||
1244 | ret = seq_open(file, &kmemleak_seq_ops); | 1193 | ret = seq_open(file, &kmemleak_seq_ops); |
1245 | if (ret < 0) | 1194 | if (ret < 0) |
1246 | goto scan_unlock; | 1195 | goto scan_unlock; |
@@ -1249,8 +1198,6 @@ static int kmemleak_open(struct inode *inode, struct file *file) | |||
1249 | 1198 | ||
1250 | scan_unlock: | 1199 | scan_unlock: |
1251 | mutex_unlock(&scan_mutex); | 1200 | mutex_unlock(&scan_mutex); |
1252 | kmemleak_unlock: | ||
1253 | mutex_unlock(&kmemleak_mutex); | ||
1254 | out: | 1201 | out: |
1255 | return ret; | 1202 | return ret; |
1256 | } | 1203 | } |
@@ -1259,11 +1206,9 @@ static int kmemleak_release(struct inode *inode, struct file *file) | |||
1259 | { | 1206 | { |
1260 | int ret = 0; | 1207 | int ret = 0; |
1261 | 1208 | ||
1262 | if (file->f_mode & FMODE_READ) { | 1209 | if (file->f_mode & FMODE_READ) |
1263 | seq_release(inode, file); | 1210 | seq_release(inode, file); |
1264 | mutex_unlock(&scan_mutex); | 1211 | mutex_unlock(&scan_mutex); |
1265 | } | ||
1266 | mutex_unlock(&kmemleak_mutex); | ||
1267 | 1212 | ||
1268 | return ret; | 1213 | return ret; |
1269 | } | 1214 | } |
@@ -1278,6 +1223,7 @@ static int kmemleak_release(struct inode *inode, struct file *file) | |||
1278 | * scan=off - stop the automatic memory scanning thread | 1223 | * scan=off - stop the automatic memory scanning thread |
1279 | * scan=... - set the automatic memory scanning period in seconds (0 to | 1224 | * scan=... - set the automatic memory scanning period in seconds (0 to |
1280 | * disable it) | 1225 | * disable it) |
1226 | * scan - trigger a memory scan | ||
1281 | */ | 1227 | */ |
1282 | static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, | 1228 | static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, |
1283 | size_t size, loff_t *ppos) | 1229 | size_t size, loff_t *ppos) |
@@ -1315,7 +1261,9 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, | |||
1315 | jiffies_scan_wait = msecs_to_jiffies(secs * 1000); | 1261 | jiffies_scan_wait = msecs_to_jiffies(secs * 1000); |
1316 | start_scan_thread(); | 1262 | start_scan_thread(); |
1317 | } | 1263 | } |
1318 | } else | 1264 | } else if (strncmp(buf, "scan", 4) == 0) |
1265 | kmemleak_scan(); | ||
1266 | else | ||
1319 | return -EINVAL; | 1267 | return -EINVAL; |
1320 | 1268 | ||
1321 | /* ignore the rest of the buffer, only one command at a time */ | 1269 | /* ignore the rest of the buffer, only one command at a time */ |
@@ -1340,11 +1288,9 @@ static int kmemleak_cleanup_thread(void *arg) | |||
1340 | { | 1288 | { |
1341 | struct kmemleak_object *object; | 1289 | struct kmemleak_object *object; |
1342 | 1290 | ||
1343 | mutex_lock(&kmemleak_mutex); | 1291 | mutex_lock(&scan_mutex); |
1344 | stop_scan_thread(); | 1292 | stop_scan_thread(); |
1345 | mutex_unlock(&kmemleak_mutex); | ||
1346 | 1293 | ||
1347 | mutex_lock(&scan_mutex); | ||
1348 | rcu_read_lock(); | 1294 | rcu_read_lock(); |
1349 | list_for_each_entry_rcu(object, &object_list, object_list) | 1295 | list_for_each_entry_rcu(object, &object_list, object_list) |
1350 | delete_object(object->pointer); | 1296 | delete_object(object->pointer); |
@@ -1411,7 +1357,6 @@ void __init kmemleak_init(void) | |||
1411 | int i; | 1357 | int i; |
1412 | unsigned long flags; | 1358 | unsigned long flags; |
1413 | 1359 | ||
1414 | jiffies_scan_yield = msecs_to_jiffies(MSECS_SCAN_YIELD); | ||
1415 | jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); | 1360 | jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); |
1416 | jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); | 1361 | jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); |
1417 | 1362 | ||
@@ -1486,9 +1431,9 @@ static int __init kmemleak_late_init(void) | |||
1486 | &kmemleak_fops); | 1431 | &kmemleak_fops); |
1487 | if (!dentry) | 1432 | if (!dentry) |
1488 | pr_warning("Failed to create the debugfs kmemleak file\n"); | 1433 | pr_warning("Failed to create the debugfs kmemleak file\n"); |
1489 | mutex_lock(&kmemleak_mutex); | 1434 | mutex_lock(&scan_mutex); |
1490 | start_scan_thread(); | 1435 | start_scan_thread(); |
1491 | mutex_unlock(&kmemleak_mutex); | 1436 | mutex_unlock(&scan_mutex); |
1492 | 1437 | ||
1493 | pr_info("Kernel memory leak detector initialized\n"); | 1438 | pr_info("Kernel memory leak detector initialized\n"); |
1494 | 1439 | ||
diff --git a/mm/nommu.c b/mm/nommu.c index bf0cc762a7d2..53cab10fece4 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -238,6 +238,27 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
238 | } | 238 | } |
239 | EXPORT_SYMBOL(get_user_pages); | 239 | EXPORT_SYMBOL(get_user_pages); |
240 | 240 | ||
241 | /** | ||
242 | * follow_pfn - look up PFN at a user virtual address | ||
243 | * @vma: memory mapping | ||
244 | * @address: user virtual address | ||
245 | * @pfn: location to store found PFN | ||
246 | * | ||
247 | * Only IO mappings and raw PFN mappings are allowed. | ||
248 | * | ||
249 | * Returns zero and the pfn at @pfn on success, -ve otherwise. | ||
250 | */ | ||
251 | int follow_pfn(struct vm_area_struct *vma, unsigned long address, | ||
252 | unsigned long *pfn) | ||
253 | { | ||
254 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) | ||
255 | return -EINVAL; | ||
256 | |||
257 | *pfn = address >> PAGE_SHIFT; | ||
258 | return 0; | ||
259 | } | ||
260 | EXPORT_SYMBOL(follow_pfn); | ||
261 | |||
241 | DEFINE_RWLOCK(vmlist_lock); | 262 | DEFINE_RWLOCK(vmlist_lock); |
242 | struct vm_struct *vmlist; | 263 | struct vm_struct *vmlist; |
243 | 264 | ||
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 7b0dcea4935b..7687879253b9 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -541,8 +541,11 @@ static void balance_dirty_pages(struct address_space *mapping) | |||
541 | * filesystems (i.e. NFS) in which data may have been | 541 | * filesystems (i.e. NFS) in which data may have been |
542 | * written to the server's write cache, but has not yet | 542 | * written to the server's write cache, but has not yet |
543 | * been flushed to permanent storage. | 543 | * been flushed to permanent storage. |
544 | * Only move pages to writeback if this bdi is over its | ||
545 | * threshold otherwise wait until the disk writes catch | ||
546 | * up. | ||
544 | */ | 547 | */ |
545 | if (bdi_nr_reclaimable) { | 548 | if (bdi_nr_reclaimable > bdi_thresh) { |
546 | writeback_inodes(&wbc); | 549 | writeback_inodes(&wbc); |
547 | pages_written += write_chunk - wbc.nr_to_write; | 550 | pages_written += write_chunk - wbc.nr_to_write; |
548 | get_dirty_limits(&background_thresh, &dirty_thresh, | 551 | get_dirty_limits(&background_thresh, &dirty_thresh, |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5d714f8fb303..e0f2cdf9d8b1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -4032,6 +4032,8 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) | |||
4032 | int i, nid; | 4032 | int i, nid; |
4033 | unsigned long usable_startpfn; | 4033 | unsigned long usable_startpfn; |
4034 | unsigned long kernelcore_node, kernelcore_remaining; | 4034 | unsigned long kernelcore_node, kernelcore_remaining; |
4035 | /* save the state before borrow the nodemask */ | ||
4036 | nodemask_t saved_node_state = node_states[N_HIGH_MEMORY]; | ||
4035 | unsigned long totalpages = early_calculate_totalpages(); | 4037 | unsigned long totalpages = early_calculate_totalpages(); |
4036 | int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); | 4038 | int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); |
4037 | 4039 | ||
@@ -4059,7 +4061,7 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) | |||
4059 | 4061 | ||
4060 | /* If kernelcore was not specified, there is no ZONE_MOVABLE */ | 4062 | /* If kernelcore was not specified, there is no ZONE_MOVABLE */ |
4061 | if (!required_kernelcore) | 4063 | if (!required_kernelcore) |
4062 | return; | 4064 | goto out; |
4063 | 4065 | ||
4064 | /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ | 4066 | /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ |
4065 | find_usable_zone_for_movable(); | 4067 | find_usable_zone_for_movable(); |
@@ -4158,6 +4160,10 @@ restart: | |||
4158 | for (nid = 0; nid < MAX_NUMNODES; nid++) | 4160 | for (nid = 0; nid < MAX_NUMNODES; nid++) |
4159 | zone_movable_pfn[nid] = | 4161 | zone_movable_pfn[nid] = |
4160 | roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); | 4162 | roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); |
4163 | |||
4164 | out: | ||
4165 | /* restore the node_state */ | ||
4166 | node_states[N_HIGH_MEMORY] = saved_node_state; | ||
4161 | } | 4167 | } |
4162 | 4168 | ||
4163 | /* Any regular memory on that node ? */ | 4169 | /* Any regular memory on that node ? */ |
@@ -4242,11 +4248,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) | |||
4242 | early_node_map[i].start_pfn, | 4248 | early_node_map[i].start_pfn, |
4243 | early_node_map[i].end_pfn); | 4249 | early_node_map[i].end_pfn); |
4244 | 4250 | ||
4245 | /* | ||
4246 | * find_zone_movable_pfns_for_nodes/early_calculate_totalpages init | ||
4247 | * that node_mask, clear it at first | ||
4248 | */ | ||
4249 | nodes_clear(node_states[N_HIGH_MEMORY]); | ||
4250 | /* Initialise every node */ | 4251 | /* Initialise every node */ |
4251 | mminit_verify_pageflags_layout(); | 4252 | mminit_verify_pageflags_layout(); |
4252 | setup_nr_node_ids(); | 4253 | setup_nr_node_ids(); |
diff --git a/mm/percpu.c b/mm/percpu.c index c0b2c1a76e81..b70f2acd8853 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -549,14 +549,14 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) | |||
549 | * @chunk: chunk of interest | 549 | * @chunk: chunk of interest |
550 | * @page_start: page index of the first page to unmap | 550 | * @page_start: page index of the first page to unmap |
551 | * @page_end: page index of the last page to unmap + 1 | 551 | * @page_end: page index of the last page to unmap + 1 |
552 | * @flush: whether to flush cache and tlb or not | 552 | * @flush_tlb: whether to flush tlb or not |
553 | * | 553 | * |
554 | * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. | 554 | * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. |
555 | * If @flush is true, vcache is flushed before unmapping and tlb | 555 | * If @flush is true, vcache is flushed before unmapping and tlb |
556 | * after. | 556 | * after. |
557 | */ | 557 | */ |
558 | static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end, | 558 | static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end, |
559 | bool flush) | 559 | bool flush_tlb) |
560 | { | 560 | { |
561 | unsigned int last = num_possible_cpus() - 1; | 561 | unsigned int last = num_possible_cpus() - 1; |
562 | unsigned int cpu; | 562 | unsigned int cpu; |
@@ -569,9 +569,8 @@ static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end, | |||
569 | * the whole region at once rather than doing it for each cpu. | 569 | * the whole region at once rather than doing it for each cpu. |
570 | * This could be an overkill but is more scalable. | 570 | * This could be an overkill but is more scalable. |
571 | */ | 571 | */ |
572 | if (flush) | 572 | flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start), |
573 | flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start), | 573 | pcpu_chunk_addr(chunk, last, page_end)); |
574 | pcpu_chunk_addr(chunk, last, page_end)); | ||
575 | 574 | ||
576 | for_each_possible_cpu(cpu) | 575 | for_each_possible_cpu(cpu) |
577 | unmap_kernel_range_noflush( | 576 | unmap_kernel_range_noflush( |
@@ -579,7 +578,7 @@ static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end, | |||
579 | (page_end - page_start) << PAGE_SHIFT); | 578 | (page_end - page_start) << PAGE_SHIFT); |
580 | 579 | ||
581 | /* ditto as flush_cache_vunmap() */ | 580 | /* ditto as flush_cache_vunmap() */ |
582 | if (flush) | 581 | if (flush_tlb) |
583 | flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start), | 582 | flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start), |
584 | pcpu_chunk_addr(chunk, last, page_end)); | 583 | pcpu_chunk_addr(chunk, last, page_end)); |
585 | } | 584 | } |
@@ -1234,6 +1233,7 @@ static struct page * __init pcpue_get_page(unsigned int cpu, int pageno) | |||
1234 | ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, | 1233 | ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, |
1235 | ssize_t dyn_size, ssize_t unit_size) | 1234 | ssize_t dyn_size, ssize_t unit_size) |
1236 | { | 1235 | { |
1236 | size_t chunk_size; | ||
1237 | unsigned int cpu; | 1237 | unsigned int cpu; |
1238 | 1238 | ||
1239 | /* determine parameters and allocate */ | 1239 | /* determine parameters and allocate */ |
@@ -1248,11 +1248,15 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, | |||
1248 | } else | 1248 | } else |
1249 | pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE); | 1249 | pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE); |
1250 | 1250 | ||
1251 | pcpue_ptr = __alloc_bootmem_nopanic( | 1251 | chunk_size = pcpue_unit_size * num_possible_cpus(); |
1252 | num_possible_cpus() * pcpue_unit_size, | 1252 | |
1253 | PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | 1253 | pcpue_ptr = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE, |
1254 | if (!pcpue_ptr) | 1254 | __pa(MAX_DMA_ADDRESS)); |
1255 | if (!pcpue_ptr) { | ||
1256 | pr_warning("PERCPU: failed to allocate %zu bytes for " | ||
1257 | "embedding\n", chunk_size); | ||
1255 | return -ENOMEM; | 1258 | return -ENOMEM; |
1259 | } | ||
1256 | 1260 | ||
1257 | /* return the leftover and copy */ | 1261 | /* return the leftover and copy */ |
1258 | for_each_possible_cpu(cpu) { | 1262 | for_each_possible_cpu(cpu) { |
diff --git a/net/bridge/br.c b/net/bridge/br.c index 9aac5213105a..e1241c76239a 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c | |||
@@ -93,7 +93,7 @@ static void __exit br_deinit(void) | |||
93 | 93 | ||
94 | unregister_pernet_subsys(&br_net_ops); | 94 | unregister_pernet_subsys(&br_net_ops); |
95 | 95 | ||
96 | synchronize_net(); | 96 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ |
97 | 97 | ||
98 | br_netfilter_fini(); | 98 | br_netfilter_fini(); |
99 | #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE) | 99 | #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE) |
diff --git a/net/core/dev.c b/net/core/dev.c index 60b572812278..70c27e0c7c32 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2823,9 +2823,11 @@ static void net_rx_action(struct softirq_action *h) | |||
2823 | * move the instance around on the list at-will. | 2823 | * move the instance around on the list at-will. |
2824 | */ | 2824 | */ |
2825 | if (unlikely(work == weight)) { | 2825 | if (unlikely(work == weight)) { |
2826 | if (unlikely(napi_disable_pending(n))) | 2826 | if (unlikely(napi_disable_pending(n))) { |
2827 | __napi_complete(n); | 2827 | local_irq_enable(); |
2828 | else | 2828 | napi_complete(n); |
2829 | local_irq_disable(); | ||
2830 | } else | ||
2829 | list_move_tail(&n->poll_list, list); | 2831 | list_move_tail(&n->poll_list, list); |
2830 | } | 2832 | } |
2831 | 2833 | ||
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index d351b8db0df5..77d40289653c 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -2413,6 +2413,8 @@ static void __exit decnet_exit(void) | |||
2413 | proc_net_remove(&init_net, "decnet"); | 2413 | proc_net_remove(&init_net, "decnet"); |
2414 | 2414 | ||
2415 | proto_unregister(&dn_proto); | 2415 | proto_unregister(&dn_proto); |
2416 | |||
2417 | rcu_barrier_bh(); /* Wait for completion of call_rcu_bh()'s */ | ||
2416 | } | 2418 | } |
2417 | module_exit(decnet_exit); | 2419 | module_exit(decnet_exit); |
2418 | #endif | 2420 | #endif |
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c index 105ad10876af..27eda9fdf3c2 100644 --- a/net/ieee802154/netlink.c +++ b/net/ieee802154/netlink.c | |||
@@ -276,6 +276,9 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info) | |||
276 | else | 276 | else |
277 | return NULL; | 277 | return NULL; |
278 | 278 | ||
279 | if (!dev) | ||
280 | return NULL; | ||
281 | |||
279 | if (dev->type != ARPHRD_IEEE802154) { | 282 | if (dev->type != ARPHRD_IEEE802154) { |
280 | dev_put(dev); | 283 | dev_put(dev); |
281 | return NULL; | 284 | return NULL; |
@@ -521,3 +524,6 @@ static void __exit ieee802154_nl_exit(void) | |||
521 | } | 524 | } |
522 | module_exit(ieee802154_nl_exit); | 525 | module_exit(ieee802154_nl_exit); |
523 | 526 | ||
527 | MODULE_LICENSE("GPL v2"); | ||
528 | MODULE_DESCRIPTION("ieee 802.15.4 configuration interface"); | ||
529 | |||
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 8a3881e28aca..c29d75d8f1b1 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -801,11 +801,8 @@ static int arp_process(struct sk_buff *skb) | |||
801 | * cache. | 801 | * cache. |
802 | */ | 802 | */ |
803 | 803 | ||
804 | /* | 804 | /* Special case: IPv4 duplicate address detection packet (RFC2131) */ |
805 | * Special case: IPv4 duplicate address detection packet (RFC2131) | 805 | if (sip == 0) { |
806 | * and Gratuitous ARP/ARP Announce. (RFC3927, Section 2.4) | ||
807 | */ | ||
808 | if (sip == 0 || tip == sip) { | ||
809 | if (arp->ar_op == htons(ARPOP_REQUEST) && | 806 | if (arp->ar_op == htons(ARPOP_REQUEST) && |
810 | inet_addr_type(net, tip) == RTN_LOCAL && | 807 | inet_addr_type(net, tip) == RTN_LOCAL && |
811 | !arp_ignore(in_dev, sip, tip)) | 808 | !arp_ignore(in_dev, sip, tip)) |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 012cf5a68581..00a54b246dfe 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -1021,6 +1021,9 @@ static void trie_rebalance(struct trie *t, struct tnode *tn) | |||
1021 | (struct node *)tn, wasfull); | 1021 | (struct node *)tn, wasfull); |
1022 | 1022 | ||
1023 | tp = node_parent((struct node *) tn); | 1023 | tp = node_parent((struct node *) tn); |
1024 | if (!tp) | ||
1025 | rcu_assign_pointer(t->trie, (struct node *)tn); | ||
1026 | |||
1024 | tnode_free_flush(); | 1027 | tnode_free_flush(); |
1025 | if (!tp) | 1028 | if (!tp) |
1026 | break; | 1029 | break; |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 490ce20faf38..db46b4b5b2b9 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -440,6 +440,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, | |||
440 | /* Remove any debris in the socket control block */ | 440 | /* Remove any debris in the socket control block */ |
441 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); | 441 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); |
442 | 442 | ||
443 | /* Must drop socket now because of tproxy. */ | ||
444 | skb_orphan(skb); | ||
445 | |||
443 | return NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, dev, NULL, | 446 | return NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, dev, NULL, |
444 | ip_rcv_finish); | 447 | ip_rcv_finish); |
445 | 448 | ||
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c index 155c008626c8..09172a65d9b6 100644 --- a/net/ipv4/netfilter/nf_nat_helper.c +++ b/net/ipv4/netfilter/nf_nat_helper.c | |||
@@ -191,7 +191,8 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb, | |||
191 | ct, ctinfo); | 191 | ct, ctinfo); |
192 | /* Tell TCP window tracking about seq change */ | 192 | /* Tell TCP window tracking about seq change */ |
193 | nf_conntrack_tcp_update(skb, ip_hdrlen(skb), | 193 | nf_conntrack_tcp_update(skb, ip_hdrlen(skb), |
194 | ct, CTINFO2DIR(ctinfo)); | 194 | ct, CTINFO2DIR(ctinfo), |
195 | (int)rep_len - (int)match_len); | ||
195 | 196 | ||
196 | nf_conntrack_event_cache(IPCT_NATSEQADJ, ct); | 197 | nf_conntrack_event_cache(IPCT_NATSEQADJ, ct); |
197 | } | 198 | } |
@@ -377,6 +378,7 @@ nf_nat_seq_adjust(struct sk_buff *skb, | |||
377 | struct tcphdr *tcph; | 378 | struct tcphdr *tcph; |
378 | int dir; | 379 | int dir; |
379 | __be32 newseq, newack; | 380 | __be32 newseq, newack; |
381 | s16 seqoff, ackoff; | ||
380 | struct nf_conn_nat *nat = nfct_nat(ct); | 382 | struct nf_conn_nat *nat = nfct_nat(ct); |
381 | struct nf_nat_seq *this_way, *other_way; | 383 | struct nf_nat_seq *this_way, *other_way; |
382 | 384 | ||
@@ -390,15 +392,18 @@ nf_nat_seq_adjust(struct sk_buff *skb, | |||
390 | 392 | ||
391 | tcph = (void *)skb->data + ip_hdrlen(skb); | 393 | tcph = (void *)skb->data + ip_hdrlen(skb); |
392 | if (after(ntohl(tcph->seq), this_way->correction_pos)) | 394 | if (after(ntohl(tcph->seq), this_way->correction_pos)) |
393 | newseq = htonl(ntohl(tcph->seq) + this_way->offset_after); | 395 | seqoff = this_way->offset_after; |
394 | else | 396 | else |
395 | newseq = htonl(ntohl(tcph->seq) + this_way->offset_before); | 397 | seqoff = this_way->offset_before; |
396 | 398 | ||
397 | if (after(ntohl(tcph->ack_seq) - other_way->offset_before, | 399 | if (after(ntohl(tcph->ack_seq) - other_way->offset_before, |
398 | other_way->correction_pos)) | 400 | other_way->correction_pos)) |
399 | newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_after); | 401 | ackoff = other_way->offset_after; |
400 | else | 402 | else |
401 | newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before); | 403 | ackoff = other_way->offset_before; |
404 | |||
405 | newseq = htonl(ntohl(tcph->seq) + seqoff); | ||
406 | newack = htonl(ntohl(tcph->ack_seq) - ackoff); | ||
402 | 407 | ||
403 | inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0); | 408 | inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0); |
404 | inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0); | 409 | inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0); |
@@ -413,7 +418,7 @@ nf_nat_seq_adjust(struct sk_buff *skb, | |||
413 | if (!nf_nat_sack_adjust(skb, tcph, ct, ctinfo)) | 418 | if (!nf_nat_sack_adjust(skb, tcph, ct, ctinfo)) |
414 | return 0; | 419 | return 0; |
415 | 420 | ||
416 | nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir); | 421 | nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir, seqoff); |
417 | 422 | ||
418 | return 1; | 423 | return 1; |
419 | } | 424 | } |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 17b89c523f9d..7870a535dac6 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -903,13 +903,17 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
903 | iov++; | 903 | iov++; |
904 | 904 | ||
905 | while (seglen > 0) { | 905 | while (seglen > 0) { |
906 | int copy; | 906 | int copy = 0; |
907 | int max = size_goal; | ||
907 | 908 | ||
908 | skb = tcp_write_queue_tail(sk); | 909 | skb = tcp_write_queue_tail(sk); |
910 | if (tcp_send_head(sk)) { | ||
911 | if (skb->ip_summed == CHECKSUM_NONE) | ||
912 | max = mss_now; | ||
913 | copy = max - skb->len; | ||
914 | } | ||
909 | 915 | ||
910 | if (!tcp_send_head(sk) || | 916 | if (copy <= 0) { |
911 | (copy = size_goal - skb->len) <= 0) { | ||
912 | |||
913 | new_segment: | 917 | new_segment: |
914 | /* Allocate new segment. If the interface is SG, | 918 | /* Allocate new segment. If the interface is SG, |
915 | * allocate skb fitting to single page. | 919 | * allocate skb fitting to single page. |
@@ -930,6 +934,7 @@ new_segment: | |||
930 | 934 | ||
931 | skb_entail(sk, skb); | 935 | skb_entail(sk, skb); |
932 | copy = size_goal; | 936 | copy = size_goal; |
937 | max = size_goal; | ||
933 | } | 938 | } |
934 | 939 | ||
935 | /* Try to append data to the end of skb. */ | 940 | /* Try to append data to the end of skb. */ |
@@ -1028,7 +1033,7 @@ new_segment: | |||
1028 | if ((seglen -= copy) == 0 && iovlen == 0) | 1033 | if ((seglen -= copy) == 0 && iovlen == 0) |
1029 | goto out; | 1034 | goto out; |
1030 | 1035 | ||
1031 | if (skb->len < size_goal || (flags & MSG_OOB)) | 1036 | if (skb->len < max || (flags & MSG_OOB)) |
1032 | continue; | 1037 | continue; |
1033 | 1038 | ||
1034 | if (forced_push(tp)) { | 1039 | if (forced_push(tp)) { |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 43bbba7926ee..f8d67ccc64f3 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -128,7 +128,8 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, | |||
128 | goto kill_with_rst; | 128 | goto kill_with_rst; |
129 | 129 | ||
130 | /* Dup ACK? */ | 130 | /* Dup ACK? */ |
131 | if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || | 131 | if (!th->ack || |
132 | !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || | ||
132 | TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { | 133 | TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { |
133 | inet_twsk_put(tw); | 134 | inet_twsk_put(tw); |
134 | return TCP_TW_SUCCESS; | 135 | return TCP_TW_SUCCESS; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 416fc4c2e7eb..5bdf08d312d9 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -725,7 +725,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) | |||
725 | static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, | 725 | static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, |
726 | unsigned int mss_now) | 726 | unsigned int mss_now) |
727 | { | 727 | { |
728 | if (skb->len <= mss_now || !sk_can_gso(sk)) { | 728 | if (skb->len <= mss_now || !sk_can_gso(sk) || |
729 | skb->ip_summed == CHECKSUM_NONE) { | ||
729 | /* Avoid the costly divide in the normal | 730 | /* Avoid the costly divide in the normal |
730 | * non-TSO case. | 731 | * non-TSO case. |
731 | */ | 732 | */ |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 8c1e86afbbf5..3883b4036a74 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -3362,7 +3362,10 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, | |||
3362 | valid = ifa->valid_lft; | 3362 | valid = ifa->valid_lft; |
3363 | if (preferred != INFINITY_LIFE_TIME) { | 3363 | if (preferred != INFINITY_LIFE_TIME) { |
3364 | long tval = (jiffies - ifa->tstamp)/HZ; | 3364 | long tval = (jiffies - ifa->tstamp)/HZ; |
3365 | preferred -= tval; | 3365 | if (preferred > tval) |
3366 | preferred -= tval; | ||
3367 | else | ||
3368 | preferred = 0; | ||
3366 | if (valid != INFINITY_LIFE_TIME) | 3369 | if (valid != INFINITY_LIFE_TIME) |
3367 | valid -= tval; | 3370 | valid -= tval; |
3368 | } | 3371 | } |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 85b3d0036afd..caa0278d30a9 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -1284,6 +1284,8 @@ static void __exit inet6_exit(void) | |||
1284 | proto_unregister(&udplitev6_prot); | 1284 | proto_unregister(&udplitev6_prot); |
1285 | proto_unregister(&udpv6_prot); | 1285 | proto_unregister(&udpv6_prot); |
1286 | proto_unregister(&tcpv6_prot); | 1286 | proto_unregister(&tcpv6_prot); |
1287 | |||
1288 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ | ||
1287 | } | 1289 | } |
1288 | module_exit(inet6_exit); | 1290 | module_exit(inet6_exit); |
1289 | 1291 | ||
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index c3a07d75b5f5..6d6a4277c677 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
@@ -139,6 +139,9 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
139 | 139 | ||
140 | rcu_read_unlock(); | 140 | rcu_read_unlock(); |
141 | 141 | ||
142 | /* Must drop socket now because of tproxy. */ | ||
143 | skb_orphan(skb); | ||
144 | |||
142 | return NF_HOOK(PF_INET6, NF_INET_PRE_ROUTING, skb, dev, NULL, | 145 | return NF_HOOK(PF_INET6, NF_INET_PRE_ROUTING, skb, dev, NULL, |
143 | ip6_rcv_finish); | 146 | ip6_rcv_finish); |
144 | err: | 147 | err: |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index fc712e60705d..11cf45bce38a 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -494,7 +494,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) | |||
494 | * should it be using the interface and enqueuing | 494 | * should it be using the interface and enqueuing |
495 | * frames at this very time on another CPU. | 495 | * frames at this very time on another CPU. |
496 | */ | 496 | */ |
497 | synchronize_rcu(); | 497 | rcu_barrier(); /* Wait for RX path and call_rcu()'s */ |
498 | skb_queue_purge(&sdata->u.mesh.skb_queue); | 498 | skb_queue_purge(&sdata->u.mesh.skb_queue); |
499 | } | 499 | } |
500 | 500 | ||
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index afde8f991646..2032dfe25ca8 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c | |||
@@ -617,8 +617,10 @@ err1: | |||
617 | void nf_conntrack_expect_fini(struct net *net) | 617 | void nf_conntrack_expect_fini(struct net *net) |
618 | { | 618 | { |
619 | exp_proc_remove(net); | 619 | exp_proc_remove(net); |
620 | if (net_eq(net, &init_net)) | 620 | if (net_eq(net, &init_net)) { |
621 | rcu_barrier(); /* Wait for call_rcu() before destroy */ | ||
621 | kmem_cache_destroy(nf_ct_expect_cachep); | 622 | kmem_cache_destroy(nf_ct_expect_cachep); |
623 | } | ||
622 | nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc, | 624 | nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc, |
623 | nf_ct_expect_hsize); | 625 | nf_ct_expect_hsize); |
624 | } | 626 | } |
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index 4b2c769d555f..fef95be334bd 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c | |||
@@ -186,6 +186,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type) | |||
186 | rcu_assign_pointer(nf_ct_ext_types[type->id], NULL); | 186 | rcu_assign_pointer(nf_ct_ext_types[type->id], NULL); |
187 | update_alloc_size(type); | 187 | update_alloc_size(type); |
188 | mutex_unlock(&nf_ct_ext_type_mutex); | 188 | mutex_unlock(&nf_ct_ext_type_mutex); |
189 | synchronize_rcu(); | 189 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ |
190 | } | 190 | } |
191 | EXPORT_SYMBOL_GPL(nf_ct_extend_unregister); | 191 | EXPORT_SYMBOL_GPL(nf_ct_extend_unregister); |
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 33fc0a443f3d..97a82ba75376 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
@@ -720,8 +720,8 @@ static bool tcp_in_window(const struct nf_conn *ct, | |||
720 | /* Caller must linearize skb at tcp header. */ | 720 | /* Caller must linearize skb at tcp header. */ |
721 | void nf_conntrack_tcp_update(const struct sk_buff *skb, | 721 | void nf_conntrack_tcp_update(const struct sk_buff *skb, |
722 | unsigned int dataoff, | 722 | unsigned int dataoff, |
723 | struct nf_conn *ct, | 723 | struct nf_conn *ct, int dir, |
724 | int dir) | 724 | s16 offset) |
725 | { | 725 | { |
726 | const struct tcphdr *tcph = (const void *)skb->data + dataoff; | 726 | const struct tcphdr *tcph = (const void *)skb->data + dataoff; |
727 | const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[dir]; | 727 | const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[dir]; |
@@ -734,7 +734,7 @@ void nf_conntrack_tcp_update(const struct sk_buff *skb, | |||
734 | /* | 734 | /* |
735 | * We have to worry for the ack in the reply packet only... | 735 | * We have to worry for the ack in the reply packet only... |
736 | */ | 736 | */ |
737 | if (after(end, ct->proto.tcp.seen[dir].td_end)) | 737 | if (ct->proto.tcp.seen[dir].td_end + offset == end) |
738 | ct->proto.tcp.seen[dir].td_end = end; | 738 | ct->proto.tcp.seen[dir].td_end = end; |
739 | ct->proto.tcp.last_end = end; | 739 | ct->proto.tcp.last_end = end; |
740 | spin_unlock_bh(&ct->lock); | 740 | spin_unlock_bh(&ct->lock); |
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c index 0b7139f3dd78..fc581800698e 100644 --- a/net/netfilter/xt_conntrack.c +++ b/net/netfilter/xt_conntrack.c | |||
@@ -129,7 +129,7 @@ conntrack_addrcmp(const union nf_inet_addr *kaddr, | |||
129 | 129 | ||
130 | static inline bool | 130 | static inline bool |
131 | conntrack_mt_origsrc(const struct nf_conn *ct, | 131 | conntrack_mt_origsrc(const struct nf_conn *ct, |
132 | const struct xt_conntrack_mtinfo1 *info, | 132 | const struct xt_conntrack_mtinfo2 *info, |
133 | u_int8_t family) | 133 | u_int8_t family) |
134 | { | 134 | { |
135 | return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3, | 135 | return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3, |
@@ -138,7 +138,7 @@ conntrack_mt_origsrc(const struct nf_conn *ct, | |||
138 | 138 | ||
139 | static inline bool | 139 | static inline bool |
140 | conntrack_mt_origdst(const struct nf_conn *ct, | 140 | conntrack_mt_origdst(const struct nf_conn *ct, |
141 | const struct xt_conntrack_mtinfo1 *info, | 141 | const struct xt_conntrack_mtinfo2 *info, |
142 | u_int8_t family) | 142 | u_int8_t family) |
143 | { | 143 | { |
144 | return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3, | 144 | return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3, |
@@ -147,7 +147,7 @@ conntrack_mt_origdst(const struct nf_conn *ct, | |||
147 | 147 | ||
148 | static inline bool | 148 | static inline bool |
149 | conntrack_mt_replsrc(const struct nf_conn *ct, | 149 | conntrack_mt_replsrc(const struct nf_conn *ct, |
150 | const struct xt_conntrack_mtinfo1 *info, | 150 | const struct xt_conntrack_mtinfo2 *info, |
151 | u_int8_t family) | 151 | u_int8_t family) |
152 | { | 152 | { |
153 | return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3, | 153 | return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3, |
@@ -156,7 +156,7 @@ conntrack_mt_replsrc(const struct nf_conn *ct, | |||
156 | 156 | ||
157 | static inline bool | 157 | static inline bool |
158 | conntrack_mt_repldst(const struct nf_conn *ct, | 158 | conntrack_mt_repldst(const struct nf_conn *ct, |
159 | const struct xt_conntrack_mtinfo1 *info, | 159 | const struct xt_conntrack_mtinfo2 *info, |
160 | u_int8_t family) | 160 | u_int8_t family) |
161 | { | 161 | { |
162 | return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3, | 162 | return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3, |
@@ -164,7 +164,7 @@ conntrack_mt_repldst(const struct nf_conn *ct, | |||
164 | } | 164 | } |
165 | 165 | ||
166 | static inline bool | 166 | static inline bool |
167 | ct_proto_port_check(const struct xt_conntrack_mtinfo1 *info, | 167 | ct_proto_port_check(const struct xt_conntrack_mtinfo2 *info, |
168 | const struct nf_conn *ct) | 168 | const struct nf_conn *ct) |
169 | { | 169 | { |
170 | const struct nf_conntrack_tuple *tuple; | 170 | const struct nf_conntrack_tuple *tuple; |
@@ -204,7 +204,7 @@ ct_proto_port_check(const struct xt_conntrack_mtinfo1 *info, | |||
204 | static bool | 204 | static bool |
205 | conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 205 | conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par) |
206 | { | 206 | { |
207 | const struct xt_conntrack_mtinfo1 *info = par->matchinfo; | 207 | const struct xt_conntrack_mtinfo2 *info = par->matchinfo; |
208 | enum ip_conntrack_info ctinfo; | 208 | enum ip_conntrack_info ctinfo; |
209 | const struct nf_conn *ct; | 209 | const struct nf_conn *ct; |
210 | unsigned int statebit; | 210 | unsigned int statebit; |
@@ -278,6 +278,16 @@ conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
278 | return true; | 278 | return true; |
279 | } | 279 | } |
280 | 280 | ||
281 | static bool | ||
282 | conntrack_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par) | ||
283 | { | ||
284 | const struct xt_conntrack_mtinfo2 *const *info = par->matchinfo; | ||
285 | struct xt_match_param newpar = *par; | ||
286 | |||
287 | newpar.matchinfo = *info; | ||
288 | return conntrack_mt(skb, &newpar); | ||
289 | } | ||
290 | |||
281 | static bool conntrack_mt_check(const struct xt_mtchk_param *par) | 291 | static bool conntrack_mt_check(const struct xt_mtchk_param *par) |
282 | { | 292 | { |
283 | if (nf_ct_l3proto_try_module_get(par->family) < 0) { | 293 | if (nf_ct_l3proto_try_module_get(par->family) < 0) { |
@@ -288,11 +298,45 @@ static bool conntrack_mt_check(const struct xt_mtchk_param *par) | |||
288 | return true; | 298 | return true; |
289 | } | 299 | } |
290 | 300 | ||
301 | static bool conntrack_mt_check_v1(const struct xt_mtchk_param *par) | ||
302 | { | ||
303 | struct xt_conntrack_mtinfo1 *info = par->matchinfo; | ||
304 | struct xt_conntrack_mtinfo2 *up; | ||
305 | int ret = conntrack_mt_check(par); | ||
306 | |||
307 | if (ret < 0) | ||
308 | return ret; | ||
309 | |||
310 | up = kmalloc(sizeof(*up), GFP_KERNEL); | ||
311 | if (up == NULL) { | ||
312 | nf_ct_l3proto_module_put(par->family); | ||
313 | return -ENOMEM; | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * The strategy here is to minimize the overhead of v1 matching, | ||
318 | * by prebuilding a v2 struct and putting the pointer into the | ||
319 | * v1 dataspace. | ||
320 | */ | ||
321 | memcpy(up, info, offsetof(typeof(*info), state_mask)); | ||
322 | up->state_mask = info->state_mask; | ||
323 | up->status_mask = info->status_mask; | ||
324 | *(void **)info = up; | ||
325 | return true; | ||
326 | } | ||
327 | |||
291 | static void conntrack_mt_destroy(const struct xt_mtdtor_param *par) | 328 | static void conntrack_mt_destroy(const struct xt_mtdtor_param *par) |
292 | { | 329 | { |
293 | nf_ct_l3proto_module_put(par->family); | 330 | nf_ct_l3proto_module_put(par->family); |
294 | } | 331 | } |
295 | 332 | ||
333 | static void conntrack_mt_destroy_v1(const struct xt_mtdtor_param *par) | ||
334 | { | ||
335 | struct xt_conntrack_mtinfo2 **info = par->matchinfo; | ||
336 | kfree(*info); | ||
337 | conntrack_mt_destroy(par); | ||
338 | } | ||
339 | |||
296 | #ifdef CONFIG_COMPAT | 340 | #ifdef CONFIG_COMPAT |
297 | struct compat_xt_conntrack_info | 341 | struct compat_xt_conntrack_info |
298 | { | 342 | { |
@@ -363,6 +407,16 @@ static struct xt_match conntrack_mt_reg[] __read_mostly = { | |||
363 | .revision = 1, | 407 | .revision = 1, |
364 | .family = NFPROTO_UNSPEC, | 408 | .family = NFPROTO_UNSPEC, |
365 | .matchsize = sizeof(struct xt_conntrack_mtinfo1), | 409 | .matchsize = sizeof(struct xt_conntrack_mtinfo1), |
410 | .match = conntrack_mt_v1, | ||
411 | .checkentry = conntrack_mt_check_v1, | ||
412 | .destroy = conntrack_mt_destroy_v1, | ||
413 | .me = THIS_MODULE, | ||
414 | }, | ||
415 | { | ||
416 | .name = "conntrack", | ||
417 | .revision = 2, | ||
418 | .family = NFPROTO_UNSPEC, | ||
419 | .matchsize = sizeof(struct xt_conntrack_mtinfo2), | ||
366 | .match = conntrack_mt, | 420 | .match = conntrack_mt, |
367 | .checkentry = conntrack_mt_check, | 421 | .checkentry = conntrack_mt_check, |
368 | .destroy = conntrack_mt_destroy, | 422 | .destroy = conntrack_mt_destroy, |
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index 80a322d77909..b0d6ddd82a9d 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c | |||
@@ -69,10 +69,27 @@ static struct phonet_device *__phonet_get(struct net_device *dev) | |||
69 | return NULL; | 69 | return NULL; |
70 | } | 70 | } |
71 | 71 | ||
72 | static void __phonet_device_free(struct phonet_device *pnd) | 72 | static void phonet_device_destroy(struct net_device *dev) |
73 | { | 73 | { |
74 | list_del(&pnd->list); | 74 | struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); |
75 | kfree(pnd); | 75 | struct phonet_device *pnd; |
76 | |||
77 | ASSERT_RTNL(); | ||
78 | |||
79 | spin_lock_bh(&pndevs->lock); | ||
80 | pnd = __phonet_get(dev); | ||
81 | if (pnd) | ||
82 | list_del(&pnd->list); | ||
83 | spin_unlock_bh(&pndevs->lock); | ||
84 | |||
85 | if (pnd) { | ||
86 | u8 addr; | ||
87 | |||
88 | for (addr = find_first_bit(pnd->addrs, 64); addr < 64; | ||
89 | addr = find_next_bit(pnd->addrs, 64, 1+addr)) | ||
90 | phonet_address_notify(RTM_DELADDR, dev, addr); | ||
91 | kfree(pnd); | ||
92 | } | ||
76 | } | 93 | } |
77 | 94 | ||
78 | struct net_device *phonet_device_get(struct net *net) | 95 | struct net_device *phonet_device_get(struct net *net) |
@@ -126,8 +143,10 @@ int phonet_address_del(struct net_device *dev, u8 addr) | |||
126 | pnd = __phonet_get(dev); | 143 | pnd = __phonet_get(dev); |
127 | if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) | 144 | if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) |
128 | err = -EADDRNOTAVAIL; | 145 | err = -EADDRNOTAVAIL; |
129 | else if (bitmap_empty(pnd->addrs, 64)) | 146 | else if (bitmap_empty(pnd->addrs, 64)) { |
130 | __phonet_device_free(pnd); | 147 | list_del(&pnd->list); |
148 | kfree(pnd); | ||
149 | } | ||
131 | spin_unlock_bh(&pndevs->lock); | 150 | spin_unlock_bh(&pndevs->lock); |
132 | return err; | 151 | return err; |
133 | } | 152 | } |
@@ -181,18 +200,8 @@ static int phonet_device_notify(struct notifier_block *me, unsigned long what, | |||
181 | { | 200 | { |
182 | struct net_device *dev = arg; | 201 | struct net_device *dev = arg; |
183 | 202 | ||
184 | if (what == NETDEV_UNREGISTER) { | 203 | if (what == NETDEV_UNREGISTER) |
185 | struct phonet_device_list *pndevs; | 204 | phonet_device_destroy(dev); |
186 | struct phonet_device *pnd; | ||
187 | |||
188 | /* Destroy phonet-specific device data */ | ||
189 | pndevs = phonet_device_list(dev_net(dev)); | ||
190 | spin_lock_bh(&pndevs->lock); | ||
191 | pnd = __phonet_get(dev); | ||
192 | if (pnd) | ||
193 | __phonet_device_free(pnd); | ||
194 | spin_unlock_bh(&pndevs->lock); | ||
195 | } | ||
196 | return 0; | 205 | return 0; |
197 | 206 | ||
198 | } | 207 | } |
@@ -218,11 +227,12 @@ static int phonet_init_net(struct net *net) | |||
218 | static void phonet_exit_net(struct net *net) | 227 | static void phonet_exit_net(struct net *net) |
219 | { | 228 | { |
220 | struct phonet_net *pnn = net_generic(net, phonet_net_id); | 229 | struct phonet_net *pnn = net_generic(net, phonet_net_id); |
221 | struct phonet_device *pnd, *n; | 230 | struct net_device *dev; |
222 | |||
223 | list_for_each_entry_safe(pnd, n, &pnn->pndevs.list, list) | ||
224 | __phonet_device_free(pnd); | ||
225 | 231 | ||
232 | rtnl_lock(); | ||
233 | for_each_netdev(net, dev) | ||
234 | phonet_device_destroy(dev); | ||
235 | rtnl_unlock(); | ||
226 | kfree(pnn); | 236 | kfree(pnn); |
227 | } | 237 | } |
228 | 238 | ||
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c index cec4e5951681..f8b4cee434c2 100644 --- a/net/phonet/pn_netlink.c +++ b/net/phonet/pn_netlink.c | |||
@@ -32,7 +32,7 @@ | |||
32 | static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, | 32 | static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, |
33 | u32 pid, u32 seq, int event); | 33 | u32 pid, u32 seq, int event); |
34 | 34 | ||
35 | static void rtmsg_notify(int event, struct net_device *dev, u8 addr) | 35 | void phonet_address_notify(int event, struct net_device *dev, u8 addr) |
36 | { | 36 | { |
37 | struct sk_buff *skb; | 37 | struct sk_buff *skb; |
38 | int err = -ENOBUFS; | 38 | int err = -ENOBUFS; |
@@ -94,7 +94,7 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr) | |||
94 | else | 94 | else |
95 | err = phonet_address_del(dev, pnaddr); | 95 | err = phonet_address_del(dev, pnaddr); |
96 | if (!err) | 96 | if (!err) |
97 | rtmsg_notify(nlh->nlmsg_type, dev, pnaddr); | 97 | phonet_address_notify(nlh->nlmsg_type, dev, pnaddr); |
98 | return err; | 98 | return err; |
99 | } | 99 | } |
100 | 100 | ||
diff --git a/net/sctp/output.c b/net/sctp/output.c index b76411444515..b94c21190566 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -407,7 +407,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
407 | } | 407 | } |
408 | dst = dst_clone(tp->dst); | 408 | dst = dst_clone(tp->dst); |
409 | skb_dst_set(nskb, dst); | 409 | skb_dst_set(nskb, dst); |
410 | if (dst) | 410 | if (!dst) |
411 | goto no_route; | 411 | goto no_route; |
412 | 412 | ||
413 | /* Build the SCTP header. */ | 413 | /* Build the SCTP header. */ |
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index 843629f55763..adaa81982f74 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c | |||
@@ -66,6 +66,7 @@ cleanup_sunrpc(void) | |||
66 | #ifdef CONFIG_PROC_FS | 66 | #ifdef CONFIG_PROC_FS |
67 | rpc_proc_exit(); | 67 | rpc_proc_exit(); |
68 | #endif | 68 | #endif |
69 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ | ||
69 | } | 70 | } |
70 | MODULE_LICENSE("GPL"); | 71 | MODULE_LICENSE("GPL"); |
71 | module_init(init_sunrpc); | 72 | module_init(init_sunrpc); |
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index d31ccb487730..faf54c6bf96b 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c | |||
@@ -292,8 +292,8 @@ static struct xfrm_algo_desc ealg_list[] = { | |||
292 | } | 292 | } |
293 | }, | 293 | }, |
294 | { | 294 | { |
295 | .name = "cbc(cast128)", | 295 | .name = "cbc(cast5)", |
296 | .compat = "cast128", | 296 | .compat = "cast5", |
297 | 297 | ||
298 | .uinfo = { | 298 | .uinfo = { |
299 | .encr = { | 299 | .encr = { |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 5f1f86565f16..f2f7c638083e 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -668,22 +668,10 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, xfrm_address_t *d | |||
668 | hlist_for_each_entry(x, entry, net->xfrm.state_byspi+h, byspi) { | 668 | hlist_for_each_entry(x, entry, net->xfrm.state_byspi+h, byspi) { |
669 | if (x->props.family != family || | 669 | if (x->props.family != family || |
670 | x->id.spi != spi || | 670 | x->id.spi != spi || |
671 | x->id.proto != proto) | 671 | x->id.proto != proto || |
672 | xfrm_addr_cmp(&x->id.daddr, daddr, family)) | ||
672 | continue; | 673 | continue; |
673 | 674 | ||
674 | switch (family) { | ||
675 | case AF_INET: | ||
676 | if (x->id.daddr.a4 != daddr->a4) | ||
677 | continue; | ||
678 | break; | ||
679 | case AF_INET6: | ||
680 | if (!ipv6_addr_equal((struct in6_addr *)daddr, | ||
681 | (struct in6_addr *) | ||
682 | x->id.daddr.a6)) | ||
683 | continue; | ||
684 | break; | ||
685 | } | ||
686 | |||
687 | xfrm_state_hold(x); | 675 | xfrm_state_hold(x); |
688 | return x; | 676 | return x; |
689 | } | 677 | } |
@@ -699,26 +687,11 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, xfrm_addre | |||
699 | 687 | ||
700 | hlist_for_each_entry(x, entry, net->xfrm.state_bysrc+h, bysrc) { | 688 | hlist_for_each_entry(x, entry, net->xfrm.state_bysrc+h, bysrc) { |
701 | if (x->props.family != family || | 689 | if (x->props.family != family || |
702 | x->id.proto != proto) | 690 | x->id.proto != proto || |
691 | xfrm_addr_cmp(&x->id.daddr, daddr, family) || | ||
692 | xfrm_addr_cmp(&x->props.saddr, saddr, family)) | ||
703 | continue; | 693 | continue; |
704 | 694 | ||
705 | switch (family) { | ||
706 | case AF_INET: | ||
707 | if (x->id.daddr.a4 != daddr->a4 || | ||
708 | x->props.saddr.a4 != saddr->a4) | ||
709 | continue; | ||
710 | break; | ||
711 | case AF_INET6: | ||
712 | if (!ipv6_addr_equal((struct in6_addr *)daddr, | ||
713 | (struct in6_addr *) | ||
714 | x->id.daddr.a6) || | ||
715 | !ipv6_addr_equal((struct in6_addr *)saddr, | ||
716 | (struct in6_addr *) | ||
717 | x->props.saddr.a6)) | ||
718 | continue; | ||
719 | break; | ||
720 | } | ||
721 | |||
722 | xfrm_state_hold(x); | 695 | xfrm_state_hold(x); |
723 | return x; | 696 | return x; |
724 | } | 697 | } |
@@ -1001,25 +974,11 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family | |||
1001 | x->props.family != family || | 974 | x->props.family != family || |
1002 | x->km.state != XFRM_STATE_ACQ || | 975 | x->km.state != XFRM_STATE_ACQ || |
1003 | x->id.spi != 0 || | 976 | x->id.spi != 0 || |
1004 | x->id.proto != proto) | 977 | x->id.proto != proto || |
978 | xfrm_addr_cmp(&x->id.daddr, daddr, family) || | ||
979 | xfrm_addr_cmp(&x->props.saddr, saddr, family)) | ||
1005 | continue; | 980 | continue; |
1006 | 981 | ||
1007 | switch (family) { | ||
1008 | case AF_INET: | ||
1009 | if (x->id.daddr.a4 != daddr->a4 || | ||
1010 | x->props.saddr.a4 != saddr->a4) | ||
1011 | continue; | ||
1012 | break; | ||
1013 | case AF_INET6: | ||
1014 | if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6, | ||
1015 | (struct in6_addr *)daddr) || | ||
1016 | !ipv6_addr_equal((struct in6_addr *) | ||
1017 | x->props.saddr.a6, | ||
1018 | (struct in6_addr *)saddr)) | ||
1019 | continue; | ||
1020 | break; | ||
1021 | } | ||
1022 | |||
1023 | xfrm_state_hold(x); | 982 | xfrm_state_hold(x); |
1024 | return x; | 983 | return x; |
1025 | } | 984 | } |
diff --git a/scripts/kernel-doc b/scripts/kernel-doc index ed591e9b7d1d..b52d340d759d 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc | |||
@@ -1426,6 +1426,8 @@ sub dump_struct($$) { | |||
1426 | # strip comments: | 1426 | # strip comments: |
1427 | $members =~ s/\/\*.*?\*\///gos; | 1427 | $members =~ s/\/\*.*?\*\///gos; |
1428 | $nested =~ s/\/\*.*?\*\///gos; | 1428 | $nested =~ s/\/\*.*?\*\///gos; |
1429 | # strip kmemcheck_bitfield_{begin,end}.*; | ||
1430 | $members =~ s/kmemcheck_bitfield_.*?;//gos; | ||
1429 | 1431 | ||
1430 | create_parameterlist($members, ';', $file); | 1432 | create_parameterlist($members, ';', $file); |
1431 | check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested); | 1433 | check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested); |
@@ -1468,8 +1470,6 @@ sub dump_enum($$) { | |||
1468 | } | 1470 | } |
1469 | 1471 | ||
1470 | } | 1472 | } |
1471 | # strip kmemcheck_bitfield_{begin,end}.*; | ||
1472 | $members =~ s/kmemcheck_bitfield_.*?;//gos; | ||
1473 | 1473 | ||
1474 | output_declaration($declaration_name, | 1474 | output_declaration($declaration_name, |
1475 | 'enum', | 1475 | 'enum', |
diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c index 64f5ddb09ea6..5c113123ed9f 100644 --- a/scripts/pnmtologo.c +++ b/scripts/pnmtologo.c | |||
@@ -237,7 +237,7 @@ static void write_header(void) | |||
237 | fprintf(out, " * Linux logo %s\n", logoname); | 237 | fprintf(out, " * Linux logo %s\n", logoname); |
238 | fputs(" */\n\n", out); | 238 | fputs(" */\n\n", out); |
239 | fputs("#include <linux/linux_logo.h>\n\n", out); | 239 | fputs("#include <linux/linux_logo.h>\n\n", out); |
240 | fprintf(out, "static const unsigned char %s_data[] __initconst = {\n", | 240 | fprintf(out, "static unsigned char %s_data[] __initdata = {\n", |
241 | logoname); | 241 | logoname); |
242 | } | 242 | } |
243 | 243 | ||
@@ -374,7 +374,7 @@ static void write_logo_clut224(void) | |||
374 | fputs("\n};\n\n", out); | 374 | fputs("\n};\n\n", out); |
375 | 375 | ||
376 | /* write logo clut */ | 376 | /* write logo clut */ |
377 | fprintf(out, "static const unsigned char %s_clut[] __initconst = {\n", | 377 | fprintf(out, "static unsigned char %s_clut[] __initdata = {\n", |
378 | logoname); | 378 | logoname); |
379 | write_hex_cnt = 0; | 379 | write_hex_cnt = 0; |
380 | for (i = 0; i < logo_clutsize; i++) { | 380 | for (i = 0; i < logo_clutsize; i++) { |
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 6f611874d10e..101c512564ec 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c | |||
@@ -238,7 +238,34 @@ out: | |||
238 | } | 238 | } |
239 | 239 | ||
240 | /* | 240 | /* |
241 | * ima_opens_get - increment file counts | 241 | * ima_counts_put - decrement file counts |
242 | * | ||
243 | * File counts are incremented in ima_path_check. On file open | ||
244 | * error, such as ETXTBSY, decrement the counts to prevent | ||
245 | * unnecessary imbalance messages. | ||
246 | */ | ||
247 | void ima_counts_put(struct path *path, int mask) | ||
248 | { | ||
249 | struct inode *inode = path->dentry->d_inode; | ||
250 | struct ima_iint_cache *iint; | ||
251 | |||
252 | if (!ima_initialized || !S_ISREG(inode->i_mode)) | ||
253 | return; | ||
254 | iint = ima_iint_find_insert_get(inode); | ||
255 | if (!iint) | ||
256 | return; | ||
257 | |||
258 | mutex_lock(&iint->mutex); | ||
259 | iint->opencount--; | ||
260 | if ((mask & MAY_WRITE) || (mask == 0)) | ||
261 | iint->writecount--; | ||
262 | else if (mask & (MAY_READ | MAY_EXEC)) | ||
263 | iint->readcount--; | ||
264 | mutex_unlock(&iint->mutex); | ||
265 | } | ||
266 | |||
267 | /* | ||
268 | * ima_counts_get - increment file counts | ||
242 | * | 269 | * |
243 | * - for IPC shm and shmat file. | 270 | * - for IPC shm and shmat file. |
244 | * - for nfsd exported files. | 271 | * - for nfsd exported files. |
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c index 7ec94314ac0c..a0880e9c8e05 100644 --- a/security/integrity/ima/ima_queue.c +++ b/security/integrity/ima/ima_queue.c | |||
@@ -134,7 +134,8 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation, | |||
134 | } | 134 | } |
135 | out: | 135 | out: |
136 | mutex_unlock(&ima_extend_list_mutex); | 136 | mutex_unlock(&ima_extend_list_mutex); |
137 | integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, entry->template_name, | 137 | integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, |
138 | entry->template.file_name, | ||
138 | op, audit_cause, result, audit_info); | 139 | op, audit_cause, result, audit_info); |
139 | return result; | 140 | return result; |
140 | } | 141 | } |
diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c index de83608719ea..3ee0269e5bd0 100644 --- a/sound/isa/cmi8330.c +++ b/sound/isa/cmi8330.c | |||
@@ -338,7 +338,7 @@ static int __devinit snd_cmi8330_pnp(int dev, struct snd_cmi8330 *acard, | |||
338 | return -EBUSY; | 338 | return -EBUSY; |
339 | 339 | ||
340 | acard->mpu = pnp_request_card_device(card, id->devs[2].id, NULL); | 340 | acard->mpu = pnp_request_card_device(card, id->devs[2].id, NULL); |
341 | if (acard->play == NULL) | 341 | if (acard->mpu == NULL) |
342 | return -EBUSY; | 342 | return -EBUSY; |
343 | 343 | ||
344 | pdev = acard->cap; | 344 | pdev = acard->cap; |
diff --git a/sound/oss/kahlua.c b/sound/oss/kahlua.c index c180598f1710..89466b056be7 100644 --- a/sound/oss/kahlua.c +++ b/sound/oss/kahlua.c | |||
@@ -199,7 +199,7 @@ MODULE_LICENSE("GPL"); | |||
199 | */ | 199 | */ |
200 | 200 | ||
201 | static struct pci_device_id id_tbl[] = { | 201 | static struct pci_device_id id_tbl[] = { |
202 | { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_AUDIO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | 202 | { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_AUDIO), 0 }, |
203 | { } | 203 | { } |
204 | }; | 204 | }; |
205 | 205 | ||
diff --git a/sound/oss/mpu401.c b/sound/oss/mpu401.c index 6c0a770ed054..1b2316f35b1f 100644 --- a/sound/oss/mpu401.c +++ b/sound/oss/mpu401.c | |||
@@ -926,31 +926,21 @@ static struct midi_operations mpu401_midi_operations[MAX_MIDI_DEV]; | |||
926 | static void mpu401_chk_version(int n, struct mpu_config *devc) | 926 | static void mpu401_chk_version(int n, struct mpu_config *devc) |
927 | { | 927 | { |
928 | int tmp; | 928 | int tmp; |
929 | unsigned long flags; | ||
930 | 929 | ||
931 | devc->version = devc->revision = 0; | 930 | devc->version = devc->revision = 0; |
932 | 931 | ||
933 | spin_lock_irqsave(&devc->lock,flags); | 932 | tmp = mpu_cmd(n, 0xAC, 0); |
934 | if ((tmp = mpu_cmd(n, 0xAC, 0)) < 0) | 933 | if (tmp < 0) |
935 | { | ||
936 | spin_unlock_irqrestore(&devc->lock,flags); | ||
937 | return; | 934 | return; |
938 | } | ||
939 | if ((tmp & 0xf0) > 0x20) /* Why it's larger than 2.x ??? */ | 935 | if ((tmp & 0xf0) > 0x20) /* Why it's larger than 2.x ??? */ |
940 | { | ||
941 | spin_unlock_irqrestore(&devc->lock,flags); | ||
942 | return; | 936 | return; |
943 | } | ||
944 | devc->version = tmp; | 937 | devc->version = tmp; |
945 | 938 | ||
946 | if ((tmp = mpu_cmd(n, 0xAD, 0)) < 0) | 939 | if ((tmp = mpu_cmd(n, 0xAD, 0)) < 0) { |
947 | { | ||
948 | devc->version = 0; | 940 | devc->version = 0; |
949 | spin_unlock_irqrestore(&devc->lock,flags); | ||
950 | return; | 941 | return; |
951 | } | 942 | } |
952 | devc->revision = tmp; | 943 | devc->revision = tmp; |
953 | spin_unlock_irqrestore(&devc->lock,flags); | ||
954 | } | 944 | } |
955 | 945 | ||
956 | int attach_mpu401(struct address_info *hw_config, struct module *owner) | 946 | int attach_mpu401(struct address_info *hw_config, struct module *owner) |
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c index 71515ddb4593..d6752dff2a44 100644 --- a/sound/pci/atiixp.c +++ b/sound/pci/atiixp.c | |||
@@ -287,10 +287,10 @@ struct atiixp { | |||
287 | /* | 287 | /* |
288 | */ | 288 | */ |
289 | static struct pci_device_id snd_atiixp_ids[] = { | 289 | static struct pci_device_id snd_atiixp_ids[] = { |
290 | { 0x1002, 0x4341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* SB200 */ | 290 | { PCI_VDEVICE(ATI, 0x4341), 0 }, /* SB200 */ |
291 | { 0x1002, 0x4361, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* SB300 */ | 291 | { PCI_VDEVICE(ATI, 0x4361), 0 }, /* SB300 */ |
292 | { 0x1002, 0x4370, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* SB400 */ | 292 | { PCI_VDEVICE(ATI, 0x4370), 0 }, /* SB400 */ |
293 | { 0x1002, 0x4382, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* SB600 */ | 293 | { PCI_VDEVICE(ATI, 0x4382), 0 }, /* SB600 */ |
294 | { 0, } | 294 | { 0, } |
295 | }; | 295 | }; |
296 | 296 | ||
diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c index c3136cccc559..e7e147bf8eb2 100644 --- a/sound/pci/atiixp_modem.c +++ b/sound/pci/atiixp_modem.c | |||
@@ -262,8 +262,8 @@ struct atiixp_modem { | |||
262 | /* | 262 | /* |
263 | */ | 263 | */ |
264 | static struct pci_device_id snd_atiixp_ids[] = { | 264 | static struct pci_device_id snd_atiixp_ids[] = { |
265 | { 0x1002, 0x434d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* SB200 */ | 265 | { PCI_VDEVICE(ATI, 0x434d), 0 }, /* SB200 */ |
266 | { 0x1002, 0x4378, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* SB400 */ | 266 | { PCI_VDEVICE(ATI, 0x4378), 0 }, /* SB400 */ |
267 | { 0, } | 267 | { 0, } |
268 | }; | 268 | }; |
269 | 269 | ||
diff --git a/sound/pci/au88x0/au8810.c b/sound/pci/au88x0/au8810.c index fce22c7af0ea..c0e8c6b295cb 100644 --- a/sound/pci/au88x0/au8810.c +++ b/sound/pci/au88x0/au8810.c | |||
@@ -1,8 +1,7 @@ | |||
1 | #include "au8810.h" | 1 | #include "au8810.h" |
2 | #include "au88x0.h" | 2 | #include "au88x0.h" |
3 | static struct pci_device_id snd_vortex_ids[] = { | 3 | static struct pci_device_id snd_vortex_ids[] = { |
4 | {PCI_VENDOR_ID_AUREAL, PCI_DEVICE_ID_AUREAL_ADVANTAGE, | 4 | {PCI_VDEVICE(AUREAL, PCI_DEVICE_ID_AUREAL_ADVANTAGE), 1,}, |
5 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1,}, | ||
6 | {0,} | 5 | {0,} |
7 | }; | 6 | }; |
8 | 7 | ||
diff --git a/sound/pci/au88x0/au8820.c b/sound/pci/au88x0/au8820.c index d1fbcce07257..a6527330df58 100644 --- a/sound/pci/au88x0/au8820.c +++ b/sound/pci/au88x0/au8820.c | |||
@@ -1,8 +1,7 @@ | |||
1 | #include "au8820.h" | 1 | #include "au8820.h" |
2 | #include "au88x0.h" | 2 | #include "au88x0.h" |
3 | static struct pci_device_id snd_vortex_ids[] = { | 3 | static struct pci_device_id snd_vortex_ids[] = { |
4 | {PCI_VENDOR_ID_AUREAL, PCI_DEVICE_ID_AUREAL_VORTEX_1, | 4 | {PCI_VDEVICE(AUREAL, PCI_DEVICE_ID_AUREAL_VORTEX_1), 0,}, |
5 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,}, | ||
6 | {0,} | 5 | {0,} |
7 | }; | 6 | }; |
8 | 7 | ||
diff --git a/sound/pci/au88x0/au8830.c b/sound/pci/au88x0/au8830.c index d4f2717c14fb..6c702ad4352a 100644 --- a/sound/pci/au88x0/au8830.c +++ b/sound/pci/au88x0/au8830.c | |||
@@ -1,8 +1,7 @@ | |||
1 | #include "au8830.h" | 1 | #include "au8830.h" |
2 | #include "au88x0.h" | 2 | #include "au88x0.h" |
3 | static struct pci_device_id snd_vortex_ids[] = { | 3 | static struct pci_device_id snd_vortex_ids[] = { |
4 | {PCI_VENDOR_ID_AUREAL, PCI_DEVICE_ID_AUREAL_VORTEX_2, | 4 | {PCI_VDEVICE(AUREAL, PCI_DEVICE_ID_AUREAL_VORTEX_2), 0,}, |
5 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,}, | ||
6 | {0,} | 5 | {0,} |
7 | }; | 6 | }; |
8 | 7 | ||
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c index 57b992a5c057..f24bf1ecb36d 100644 --- a/sound/pci/ca0106/ca0106_main.c +++ b/sound/pci/ca0106/ca0106_main.c | |||
@@ -1876,7 +1876,7 @@ static int snd_ca0106_resume(struct pci_dev *pci) | |||
1876 | 1876 | ||
1877 | // PCI IDs | 1877 | // PCI IDs |
1878 | static struct pci_device_id snd_ca0106_ids[] = { | 1878 | static struct pci_device_id snd_ca0106_ids[] = { |
1879 | { 0x1102, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* Audigy LS or Live 24bit */ | 1879 | { PCI_VDEVICE(CREATIVE, 0x0007), 0 }, /* Audigy LS or Live 24bit */ |
1880 | { 0, } | 1880 | { 0, } |
1881 | }; | 1881 | }; |
1882 | MODULE_DEVICE_TABLE(pci, snd_ca0106_ids); | 1882 | MODULE_DEVICE_TABLE(pci, snd_ca0106_ids); |
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c index 449fe02f666e..ddcd4a9fd7e6 100644 --- a/sound/pci/cmipci.c +++ b/sound/pci/cmipci.c | |||
@@ -2797,11 +2797,11 @@ static inline void snd_cmipci_proc_init(struct cmipci *cm) {} | |||
2797 | 2797 | ||
2798 | 2798 | ||
2799 | static struct pci_device_id snd_cmipci_ids[] = { | 2799 | static struct pci_device_id snd_cmipci_ids[] = { |
2800 | {PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | 2800 | {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338A), 0}, |
2801 | {PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | 2801 | {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338B), 0}, |
2802 | {PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | 2802 | {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738), 0}, |
2803 | {PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | 2803 | {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738B), 0}, |
2804 | {PCI_VENDOR_ID_AL, PCI_DEVICE_ID_CMEDIA_CM8738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | 2804 | {PCI_VDEVICE(AL, PCI_DEVICE_ID_CMEDIA_CM8738), 0}, |
2805 | {0,}, | 2805 | {0,}, |
2806 | }; | 2806 | }; |
2807 | 2807 | ||
diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c index f6286f84a221..e2e0359bb056 100644 --- a/sound/pci/cs4281.c +++ b/sound/pci/cs4281.c | |||
@@ -495,7 +495,7 @@ struct cs4281 { | |||
495 | static irqreturn_t snd_cs4281_interrupt(int irq, void *dev_id); | 495 | static irqreturn_t snd_cs4281_interrupt(int irq, void *dev_id); |
496 | 496 | ||
497 | static struct pci_device_id snd_cs4281_ids[] = { | 497 | static struct pci_device_id snd_cs4281_ids[] = { |
498 | { 0x1013, 0x6005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* CS4281 */ | 498 | { PCI_VDEVICE(CIRRUS, 0x6005), 0, }, /* CS4281 */ |
499 | { 0, } | 499 | { 0, } |
500 | }; | 500 | }; |
501 | 501 | ||
diff --git a/sound/pci/cs46xx/cs46xx.c b/sound/pci/cs46xx/cs46xx.c index c9b3e3d48cbc..033aec430117 100644 --- a/sound/pci/cs46xx/cs46xx.c +++ b/sound/pci/cs46xx/cs46xx.c | |||
@@ -65,9 +65,9 @@ module_param_array(mmap_valid, bool, NULL, 0444); | |||
65 | MODULE_PARM_DESC(mmap_valid, "Support OSS mmap."); | 65 | MODULE_PARM_DESC(mmap_valid, "Support OSS mmap."); |
66 | 66 | ||
67 | static struct pci_device_id snd_cs46xx_ids[] = { | 67 | static struct pci_device_id snd_cs46xx_ids[] = { |
68 | { 0x1013, 0x6001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* CS4280 */ | 68 | { PCI_VDEVICE(CIRRUS, 0x6001), 0, }, /* CS4280 */ |
69 | { 0x1013, 0x6003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* CS4612 */ | 69 | { PCI_VDEVICE(CIRRUS, 0x6003), 0, }, /* CS4612 */ |
70 | { 0x1013, 0x6004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* CS4615 */ | 70 | { PCI_VDEVICE(CIRRUS, 0x6004), 0, }, /* CS4615 */ |
71 | { 0, } | 71 | { 0, } |
72 | }; | 72 | }; |
73 | 73 | ||
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c index c7f3b994101c..168af67d938e 100644 --- a/sound/pci/emu10k1/emu10k1.c +++ b/sound/pci/emu10k1/emu10k1.c | |||
@@ -77,9 +77,9 @@ MODULE_PARM_DESC(subsystem, "Force card subsystem model."); | |||
77 | * Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value Model:SB0400 | 77 | * Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value Model:SB0400 |
78 | */ | 78 | */ |
79 | static struct pci_device_id snd_emu10k1_ids[] = { | 79 | static struct pci_device_id snd_emu10k1_ids[] = { |
80 | { 0x1102, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* EMU10K1 */ | 80 | { PCI_VDEVICE(CREATIVE, 0x0002), 0 }, /* EMU10K1 */ |
81 | { 0x1102, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, /* Audigy */ | 81 | { PCI_VDEVICE(CREATIVE, 0x0004), 1 }, /* Audigy */ |
82 | { 0x1102, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, /* Audigy 2 Value SB0400 */ | 82 | { PCI_VDEVICE(CREATIVE, 0x0008), 1 }, /* Audigy 2 Value SB0400 */ |
83 | { 0, } | 83 | { 0, } |
84 | }; | 84 | }; |
85 | 85 | ||
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c index 4d3ad793e98f..36e08bd2b3cc 100644 --- a/sound/pci/emu10k1/emu10k1x.c +++ b/sound/pci/emu10k1/emu10k1x.c | |||
@@ -1607,7 +1607,7 @@ static void __devexit snd_emu10k1x_remove(struct pci_dev *pci) | |||
1607 | 1607 | ||
1608 | // PCI IDs | 1608 | // PCI IDs |
1609 | static struct pci_device_id snd_emu10k1x_ids[] = { | 1609 | static struct pci_device_id snd_emu10k1x_ids[] = { |
1610 | { 0x1102, 0x0006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* Dell OEM version (EMU10K1) */ | 1610 | { PCI_VDEVICE(CREATIVE, 0x0006), 0 }, /* Dell OEM version (EMU10K1) */ |
1611 | { 0, } | 1611 | { 0, } |
1612 | }; | 1612 | }; |
1613 | MODULE_DEVICE_TABLE(pci, snd_emu10k1x_ids); | 1613 | MODULE_DEVICE_TABLE(pci, snd_emu10k1x_ids); |
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c index 18f4d1e98c46..2b82c5c723e1 100644 --- a/sound/pci/ens1370.c +++ b/sound/pci/ens1370.c | |||
@@ -445,12 +445,12 @@ static irqreturn_t snd_audiopci_interrupt(int irq, void *dev_id); | |||
445 | 445 | ||
446 | static struct pci_device_id snd_audiopci_ids[] = { | 446 | static struct pci_device_id snd_audiopci_ids[] = { |
447 | #ifdef CHIP1370 | 447 | #ifdef CHIP1370 |
448 | { 0x1274, 0x5000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* ES1370 */ | 448 | { PCI_VDEVICE(ENSONIQ, 0x5000), 0, }, /* ES1370 */ |
449 | #endif | 449 | #endif |
450 | #ifdef CHIP1371 | 450 | #ifdef CHIP1371 |
451 | { 0x1274, 0x1371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* ES1371 */ | 451 | { PCI_VDEVICE(ENSONIQ, 0x1371), 0, }, /* ES1371 */ |
452 | { 0x1274, 0x5880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* ES1373 - CT5880 */ | 452 | { PCI_VDEVICE(ENSONIQ, 0x5880), 0, }, /* ES1373 - CT5880 */ |
453 | { 0x1102, 0x8938, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* Ectiva EV1938 */ | 453 | { PCI_VDEVICE(ECTIVA, 0x8938), 0, }, /* Ectiva EV1938 */ |
454 | #endif | 454 | #endif |
455 | { 0, } | 455 | { 0, } |
456 | }; | 456 | }; |
diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c index fbd2ac09aa34..820318ee62c1 100644 --- a/sound/pci/es1938.c +++ b/sound/pci/es1938.c | |||
@@ -244,7 +244,7 @@ struct es1938 { | |||
244 | static irqreturn_t snd_es1938_interrupt(int irq, void *dev_id); | 244 | static irqreturn_t snd_es1938_interrupt(int irq, void *dev_id); |
245 | 245 | ||
246 | static struct pci_device_id snd_es1938_ids[] = { | 246 | static struct pci_device_id snd_es1938_ids[] = { |
247 | { 0x125d, 0x1969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* Solo-1 */ | 247 | { PCI_VDEVICE(ESS, 0x1969), 0, }, /* Solo-1 */ |
248 | { 0, } | 248 | { 0, } |
249 | }; | 249 | }; |
250 | 250 | ||
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index 84cc49ca9148..1988582d1ab8 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c | |||
@@ -72,6 +72,7 @@ struct ad198x_spec { | |||
72 | hda_nid_t private_dac_nids[AUTO_CFG_MAX_OUTS]; | 72 | hda_nid_t private_dac_nids[AUTO_CFG_MAX_OUTS]; |
73 | 73 | ||
74 | unsigned int jack_present :1; | 74 | unsigned int jack_present :1; |
75 | unsigned int inv_jack_detect:1; | ||
75 | 76 | ||
76 | #ifdef CONFIG_SND_HDA_POWER_SAVE | 77 | #ifdef CONFIG_SND_HDA_POWER_SAVE |
77 | struct hda_loopback_check loopback; | 78 | struct hda_loopback_check loopback; |
@@ -669,39 +670,13 @@ static struct hda_input_mux ad1986a_automic_capture_source = { | |||
669 | }, | 670 | }, |
670 | }; | 671 | }; |
671 | 672 | ||
672 | static struct snd_kcontrol_new ad1986a_laptop_eapd_mixers[] = { | 673 | static struct snd_kcontrol_new ad1986a_laptop_master_mixers[] = { |
673 | HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol), | 674 | HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol), |
674 | HDA_BIND_SW("Master Playback Switch", &ad1986a_laptop_master_sw), | 675 | HDA_BIND_SW("Master Playback Switch", &ad1986a_laptop_master_sw), |
675 | HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT), | ||
676 | HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT), | ||
677 | HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0, HDA_OUTPUT), | ||
678 | HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0, HDA_OUTPUT), | ||
679 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT), | ||
680 | HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT), | ||
681 | HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT), | ||
682 | HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT), | ||
683 | HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT), | ||
684 | { | ||
685 | .iface = SNDRV_CTL_ELEM_IFACE_MIXER, | ||
686 | .name = "Capture Source", | ||
687 | .info = ad198x_mux_enum_info, | ||
688 | .get = ad198x_mux_enum_get, | ||
689 | .put = ad198x_mux_enum_put, | ||
690 | }, | ||
691 | { | ||
692 | .iface = SNDRV_CTL_ELEM_IFACE_MIXER, | ||
693 | .name = "External Amplifier", | ||
694 | .info = ad198x_eapd_info, | ||
695 | .get = ad198x_eapd_get, | ||
696 | .put = ad198x_eapd_put, | ||
697 | .private_value = 0x1b | (1 << 8), /* port-D, inversed */ | ||
698 | }, | ||
699 | { } /* end */ | 676 | { } /* end */ |
700 | }; | 677 | }; |
701 | 678 | ||
702 | static struct snd_kcontrol_new ad1986a_samsung_mixers[] = { | 679 | static struct snd_kcontrol_new ad1986a_laptop_eapd_mixers[] = { |
703 | HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol), | ||
704 | HDA_BIND_SW("Master Playback Switch", &ad1986a_laptop_master_sw), | ||
705 | HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT), | 680 | HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT), |
706 | HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT), | 681 | HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT), |
707 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT), | 682 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT), |
@@ -727,6 +702,12 @@ static struct snd_kcontrol_new ad1986a_samsung_mixers[] = { | |||
727 | { } /* end */ | 702 | { } /* end */ |
728 | }; | 703 | }; |
729 | 704 | ||
705 | static struct snd_kcontrol_new ad1986a_laptop_intmic_mixers[] = { | ||
706 | HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0, HDA_OUTPUT), | ||
707 | HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0, HDA_OUTPUT), | ||
708 | { } /* end */ | ||
709 | }; | ||
710 | |||
730 | /* re-connect the mic boost input according to the jack sensing */ | 711 | /* re-connect the mic boost input according to the jack sensing */ |
731 | static void ad1986a_automic(struct hda_codec *codec) | 712 | static void ad1986a_automic(struct hda_codec *codec) |
732 | { | 713 | { |
@@ -776,8 +757,9 @@ static void ad1986a_hp_automute(struct hda_codec *codec) | |||
776 | unsigned int present; | 757 | unsigned int present; |
777 | 758 | ||
778 | present = snd_hda_codec_read(codec, 0x1a, 0, AC_VERB_GET_PIN_SENSE, 0); | 759 | present = snd_hda_codec_read(codec, 0x1a, 0, AC_VERB_GET_PIN_SENSE, 0); |
779 | /* Lenovo N100 seems to report the reversed bit for HP jack-sensing */ | 760 | spec->jack_present = !!(present & 0x80000000); |
780 | spec->jack_present = !(present & 0x80000000); | 761 | if (spec->inv_jack_detect) |
762 | spec->jack_present = !spec->jack_present; | ||
781 | ad1986a_update_hp(codec); | 763 | ad1986a_update_hp(codec); |
782 | } | 764 | } |
783 | 765 | ||
@@ -816,7 +798,7 @@ static int ad1986a_hp_master_sw_put(struct snd_kcontrol *kcontrol, | |||
816 | return change; | 798 | return change; |
817 | } | 799 | } |
818 | 800 | ||
819 | static struct snd_kcontrol_new ad1986a_laptop_automute_mixers[] = { | 801 | static struct snd_kcontrol_new ad1986a_automute_master_mixers[] = { |
820 | HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol), | 802 | HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol), |
821 | { | 803 | { |
822 | .iface = SNDRV_CTL_ELEM_IFACE_MIXER, | 804 | .iface = SNDRV_CTL_ELEM_IFACE_MIXER, |
@@ -826,33 +808,10 @@ static struct snd_kcontrol_new ad1986a_laptop_automute_mixers[] = { | |||
826 | .put = ad1986a_hp_master_sw_put, | 808 | .put = ad1986a_hp_master_sw_put, |
827 | .private_value = HDA_COMPOSE_AMP_VAL(0x1a, 3, 0, HDA_OUTPUT), | 809 | .private_value = HDA_COMPOSE_AMP_VAL(0x1a, 3, 0, HDA_OUTPUT), |
828 | }, | 810 | }, |
829 | HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT), | ||
830 | HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT), | ||
831 | HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x0, HDA_OUTPUT), | ||
832 | HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0x0, HDA_OUTPUT), | ||
833 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT), | ||
834 | HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT), | ||
835 | HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT), | ||
836 | HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT), | ||
837 | HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT), | ||
838 | { | ||
839 | .iface = SNDRV_CTL_ELEM_IFACE_MIXER, | ||
840 | .name = "Capture Source", | ||
841 | .info = ad198x_mux_enum_info, | ||
842 | .get = ad198x_mux_enum_get, | ||
843 | .put = ad198x_mux_enum_put, | ||
844 | }, | ||
845 | { | ||
846 | .iface = SNDRV_CTL_ELEM_IFACE_MIXER, | ||
847 | .name = "External Amplifier", | ||
848 | .info = ad198x_eapd_info, | ||
849 | .get = ad198x_eapd_get, | ||
850 | .put = ad198x_eapd_put, | ||
851 | .private_value = 0x1b | (1 << 8), /* port-D, inversed */ | ||
852 | }, | ||
853 | { } /* end */ | 811 | { } /* end */ |
854 | }; | 812 | }; |
855 | 813 | ||
814 | |||
856 | /* | 815 | /* |
857 | * initialization verbs | 816 | * initialization verbs |
858 | */ | 817 | */ |
@@ -981,6 +940,27 @@ static struct hda_verb ad1986a_hp_init_verbs[] = { | |||
981 | {} | 940 | {} |
982 | }; | 941 | }; |
983 | 942 | ||
943 | static void ad1986a_samsung_p50_unsol_event(struct hda_codec *codec, | ||
944 | unsigned int res) | ||
945 | { | ||
946 | switch (res >> 26) { | ||
947 | case AD1986A_HP_EVENT: | ||
948 | ad1986a_hp_automute(codec); | ||
949 | break; | ||
950 | case AD1986A_MIC_EVENT: | ||
951 | ad1986a_automic(codec); | ||
952 | break; | ||
953 | } | ||
954 | } | ||
955 | |||
956 | static int ad1986a_samsung_p50_init(struct hda_codec *codec) | ||
957 | { | ||
958 | ad198x_init(codec); | ||
959 | ad1986a_hp_automute(codec); | ||
960 | ad1986a_automic(codec); | ||
961 | return 0; | ||
962 | } | ||
963 | |||
984 | 964 | ||
985 | /* models */ | 965 | /* models */ |
986 | enum { | 966 | enum { |
@@ -991,6 +971,7 @@ enum { | |||
991 | AD1986A_LAPTOP_AUTOMUTE, | 971 | AD1986A_LAPTOP_AUTOMUTE, |
992 | AD1986A_ULTRA, | 972 | AD1986A_ULTRA, |
993 | AD1986A_SAMSUNG, | 973 | AD1986A_SAMSUNG, |
974 | AD1986A_SAMSUNG_P50, | ||
994 | AD1986A_MODELS | 975 | AD1986A_MODELS |
995 | }; | 976 | }; |
996 | 977 | ||
@@ -1002,6 +983,7 @@ static const char *ad1986a_models[AD1986A_MODELS] = { | |||
1002 | [AD1986A_LAPTOP_AUTOMUTE] = "laptop-automute", | 983 | [AD1986A_LAPTOP_AUTOMUTE] = "laptop-automute", |
1003 | [AD1986A_ULTRA] = "ultra", | 984 | [AD1986A_ULTRA] = "ultra", |
1004 | [AD1986A_SAMSUNG] = "samsung", | 985 | [AD1986A_SAMSUNG] = "samsung", |
986 | [AD1986A_SAMSUNG_P50] = "samsung-p50", | ||
1005 | }; | 987 | }; |
1006 | 988 | ||
1007 | static struct snd_pci_quirk ad1986a_cfg_tbl[] = { | 989 | static struct snd_pci_quirk ad1986a_cfg_tbl[] = { |
@@ -1024,6 +1006,7 @@ static struct snd_pci_quirk ad1986a_cfg_tbl[] = { | |||
1024 | SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba", AD1986A_LAPTOP_EAPD), | 1006 | SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba", AD1986A_LAPTOP_EAPD), |
1025 | SND_PCI_QUIRK(0x144d, 0xb03c, "Samsung R55", AD1986A_3STACK), | 1007 | SND_PCI_QUIRK(0x144d, 0xb03c, "Samsung R55", AD1986A_3STACK), |
1026 | SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_LAPTOP), | 1008 | SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_LAPTOP), |
1009 | SND_PCI_QUIRK(0x144d, 0xc024, "Samsung P50", AD1986A_SAMSUNG_P50), | ||
1027 | SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_ULTRA), | 1010 | SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_ULTRA), |
1028 | SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_SAMSUNG), | 1011 | SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_SAMSUNG), |
1029 | SND_PCI_QUIRK(0x144d, 0xc504, "Samsung Q35", AD1986A_3STACK), | 1012 | SND_PCI_QUIRK(0x144d, 0xc504, "Samsung Q35", AD1986A_3STACK), |
@@ -1111,7 +1094,10 @@ static int patch_ad1986a(struct hda_codec *codec) | |||
1111 | spec->multiout.dac_nids = ad1986a_laptop_dac_nids; | 1094 | spec->multiout.dac_nids = ad1986a_laptop_dac_nids; |
1112 | break; | 1095 | break; |
1113 | case AD1986A_LAPTOP_EAPD: | 1096 | case AD1986A_LAPTOP_EAPD: |
1114 | spec->mixers[0] = ad1986a_laptop_eapd_mixers; | 1097 | spec->num_mixers = 3; |
1098 | spec->mixers[0] = ad1986a_laptop_master_mixers; | ||
1099 | spec->mixers[1] = ad1986a_laptop_eapd_mixers; | ||
1100 | spec->mixers[2] = ad1986a_laptop_intmic_mixers; | ||
1115 | spec->num_init_verbs = 2; | 1101 | spec->num_init_verbs = 2; |
1116 | spec->init_verbs[1] = ad1986a_eapd_init_verbs; | 1102 | spec->init_verbs[1] = ad1986a_eapd_init_verbs; |
1117 | spec->multiout.max_channels = 2; | 1103 | spec->multiout.max_channels = 2; |
@@ -1122,7 +1108,9 @@ static int patch_ad1986a(struct hda_codec *codec) | |||
1122 | spec->input_mux = &ad1986a_laptop_eapd_capture_source; | 1108 | spec->input_mux = &ad1986a_laptop_eapd_capture_source; |
1123 | break; | 1109 | break; |
1124 | case AD1986A_SAMSUNG: | 1110 | case AD1986A_SAMSUNG: |
1125 | spec->mixers[0] = ad1986a_samsung_mixers; | 1111 | spec->num_mixers = 2; |
1112 | spec->mixers[0] = ad1986a_laptop_master_mixers; | ||
1113 | spec->mixers[1] = ad1986a_laptop_eapd_mixers; | ||
1126 | spec->num_init_verbs = 3; | 1114 | spec->num_init_verbs = 3; |
1127 | spec->init_verbs[1] = ad1986a_eapd_init_verbs; | 1115 | spec->init_verbs[1] = ad1986a_eapd_init_verbs; |
1128 | spec->init_verbs[2] = ad1986a_automic_verbs; | 1116 | spec->init_verbs[2] = ad1986a_automic_verbs; |
@@ -1135,8 +1123,28 @@ static int patch_ad1986a(struct hda_codec *codec) | |||
1135 | codec->patch_ops.unsol_event = ad1986a_automic_unsol_event; | 1123 | codec->patch_ops.unsol_event = ad1986a_automic_unsol_event; |
1136 | codec->patch_ops.init = ad1986a_automic_init; | 1124 | codec->patch_ops.init = ad1986a_automic_init; |
1137 | break; | 1125 | break; |
1126 | case AD1986A_SAMSUNG_P50: | ||
1127 | spec->num_mixers = 2; | ||
1128 | spec->mixers[0] = ad1986a_automute_master_mixers; | ||
1129 | spec->mixers[1] = ad1986a_laptop_eapd_mixers; | ||
1130 | spec->num_init_verbs = 4; | ||
1131 | spec->init_verbs[1] = ad1986a_eapd_init_verbs; | ||
1132 | spec->init_verbs[2] = ad1986a_automic_verbs; | ||
1133 | spec->init_verbs[3] = ad1986a_hp_init_verbs; | ||
1134 | spec->multiout.max_channels = 2; | ||
1135 | spec->multiout.num_dacs = 1; | ||
1136 | spec->multiout.dac_nids = ad1986a_laptop_dac_nids; | ||
1137 | if (!is_jack_available(codec, 0x25)) | ||
1138 | spec->multiout.dig_out_nid = 0; | ||
1139 | spec->input_mux = &ad1986a_automic_capture_source; | ||
1140 | codec->patch_ops.unsol_event = ad1986a_samsung_p50_unsol_event; | ||
1141 | codec->patch_ops.init = ad1986a_samsung_p50_init; | ||
1142 | break; | ||
1138 | case AD1986A_LAPTOP_AUTOMUTE: | 1143 | case AD1986A_LAPTOP_AUTOMUTE: |
1139 | spec->mixers[0] = ad1986a_laptop_automute_mixers; | 1144 | spec->num_mixers = 3; |
1145 | spec->mixers[0] = ad1986a_automute_master_mixers; | ||
1146 | spec->mixers[1] = ad1986a_laptop_eapd_mixers; | ||
1147 | spec->mixers[2] = ad1986a_laptop_intmic_mixers; | ||
1140 | spec->num_init_verbs = 3; | 1148 | spec->num_init_verbs = 3; |
1141 | spec->init_verbs[1] = ad1986a_eapd_init_verbs; | 1149 | spec->init_verbs[1] = ad1986a_eapd_init_verbs; |
1142 | spec->init_verbs[2] = ad1986a_hp_init_verbs; | 1150 | spec->init_verbs[2] = ad1986a_hp_init_verbs; |
@@ -1148,6 +1156,10 @@ static int patch_ad1986a(struct hda_codec *codec) | |||
1148 | spec->input_mux = &ad1986a_laptop_eapd_capture_source; | 1156 | spec->input_mux = &ad1986a_laptop_eapd_capture_source; |
1149 | codec->patch_ops.unsol_event = ad1986a_hp_unsol_event; | 1157 | codec->patch_ops.unsol_event = ad1986a_hp_unsol_event; |
1150 | codec->patch_ops.init = ad1986a_hp_init; | 1158 | codec->patch_ops.init = ad1986a_hp_init; |
1159 | /* Lenovo N100 seems to report the reversed bit | ||
1160 | * for HP jack-sensing | ||
1161 | */ | ||
1162 | spec->inv_jack_detect = 1; | ||
1151 | break; | 1163 | break; |
1152 | case AD1986A_ULTRA: | 1164 | case AD1986A_ULTRA: |
1153 | spec->mixers[0] = ad1986a_laptop_eapd_mixers; | 1165 | spec->mixers[0] = ad1986a_laptop_eapd_mixers; |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 334533197425..3a8e58c483df 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -945,12 +945,13 @@ static void alc_fix_pll_init(struct hda_codec *codec, hda_nid_t nid, | |||
945 | static void alc_automute_pin(struct hda_codec *codec) | 945 | static void alc_automute_pin(struct hda_codec *codec) |
946 | { | 946 | { |
947 | struct alc_spec *spec = codec->spec; | 947 | struct alc_spec *spec = codec->spec; |
948 | unsigned int present; | 948 | unsigned int present, pincap; |
949 | unsigned int nid = spec->autocfg.hp_pins[0]; | 949 | unsigned int nid = spec->autocfg.hp_pins[0]; |
950 | int i; | 950 | int i; |
951 | 951 | ||
952 | /* need to execute and sync at first */ | 952 | pincap = snd_hda_query_pin_caps(codec, nid); |
953 | snd_hda_codec_read(codec, nid, 0, AC_VERB_SET_PIN_SENSE, 0); | 953 | if (pincap & AC_PINCAP_TRIG_REQ) /* need trigger? */ |
954 | snd_hda_codec_read(codec, nid, 0, AC_VERB_SET_PIN_SENSE, 0); | ||
954 | present = snd_hda_codec_read(codec, nid, 0, | 955 | present = snd_hda_codec_read(codec, nid, 0, |
955 | AC_VERB_GET_PIN_SENSE, 0); | 956 | AC_VERB_GET_PIN_SENSE, 0); |
956 | spec->jack_present = (present & AC_PINSENSE_PRESENCE) != 0; | 957 | spec->jack_present = (present & AC_PINSENSE_PRESENCE) != 0; |
@@ -1392,7 +1393,7 @@ static struct hda_verb alc888_fujitsu_xa3530_verbs[] = { | |||
1392 | static void alc_automute_amp(struct hda_codec *codec) | 1393 | static void alc_automute_amp(struct hda_codec *codec) |
1393 | { | 1394 | { |
1394 | struct alc_spec *spec = codec->spec; | 1395 | struct alc_spec *spec = codec->spec; |
1395 | unsigned int val, mute; | 1396 | unsigned int val, mute, pincap; |
1396 | hda_nid_t nid; | 1397 | hda_nid_t nid; |
1397 | int i; | 1398 | int i; |
1398 | 1399 | ||
@@ -1401,6 +1402,10 @@ static void alc_automute_amp(struct hda_codec *codec) | |||
1401 | nid = spec->autocfg.hp_pins[i]; | 1402 | nid = spec->autocfg.hp_pins[i]; |
1402 | if (!nid) | 1403 | if (!nid) |
1403 | break; | 1404 | break; |
1405 | pincap = snd_hda_query_pin_caps(codec, nid); | ||
1406 | if (pincap & AC_PINCAP_TRIG_REQ) /* need trigger? */ | ||
1407 | snd_hda_codec_read(codec, nid, 0, | ||
1408 | AC_VERB_SET_PIN_SENSE, 0); | ||
1404 | val = snd_hda_codec_read(codec, nid, 0, | 1409 | val = snd_hda_codec_read(codec, nid, 0, |
1405 | AC_VERB_GET_PIN_SENSE, 0); | 1410 | AC_VERB_GET_PIN_SENSE, 0); |
1406 | if (val & AC_PINSENSE_PRESENCE) { | 1411 | if (val & AC_PINSENSE_PRESENCE) { |
@@ -1471,6 +1476,10 @@ static struct hda_verb alc888_acer_aspire_4930g_verbs[] = { | |||
1471 | static struct hda_verb alc888_acer_aspire_6530g_verbs[] = { | 1476 | static struct hda_verb alc888_acer_aspire_6530g_verbs[] = { |
1472 | /* Bias voltage on for external mic port */ | 1477 | /* Bias voltage on for external mic port */ |
1473 | {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN | PIN_VREF80}, | 1478 | {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN | PIN_VREF80}, |
1479 | /* Front Mic: set to PIN_IN (empty by default) */ | ||
1480 | {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, | ||
1481 | /* Unselect Front Mic by default in input mixer 3 */ | ||
1482 | {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0xb)}, | ||
1474 | /* Enable unsolicited event for HP jack */ | 1483 | /* Enable unsolicited event for HP jack */ |
1475 | {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN}, | 1484 | {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN}, |
1476 | /* Enable speaker output */ | 1485 | /* Enable speaker output */ |
@@ -1560,18 +1569,22 @@ static struct hda_input_mux alc888_2_capture_sources[2] = { | |||
1560 | static struct hda_input_mux alc888_acer_aspire_6530_sources[2] = { | 1569 | static struct hda_input_mux alc888_acer_aspire_6530_sources[2] = { |
1561 | /* Interal mic only available on one ADC */ | 1570 | /* Interal mic only available on one ADC */ |
1562 | { | 1571 | { |
1563 | .num_items = 3, | 1572 | .num_items = 5, |
1564 | .items = { | 1573 | .items = { |
1565 | { "Ext Mic", 0x0 }, | 1574 | { "Ext Mic", 0x0 }, |
1575 | { "Line In", 0x2 }, | ||
1566 | { "CD", 0x4 }, | 1576 | { "CD", 0x4 }, |
1577 | { "Input Mix", 0xa }, | ||
1567 | { "Int Mic", 0xb }, | 1578 | { "Int Mic", 0xb }, |
1568 | }, | 1579 | }, |
1569 | }, | 1580 | }, |
1570 | { | 1581 | { |
1571 | .num_items = 2, | 1582 | .num_items = 4, |
1572 | .items = { | 1583 | .items = { |
1573 | { "Ext Mic", 0x0 }, | 1584 | { "Ext Mic", 0x0 }, |
1585 | { "Line In", 0x2 }, | ||
1574 | { "CD", 0x4 }, | 1586 | { "CD", 0x4 }, |
1587 | { "Input Mix", 0xa }, | ||
1575 | }, | 1588 | }, |
1576 | } | 1589 | } |
1577 | }; | 1590 | }; |
@@ -1639,6 +1652,17 @@ static void alc888_acer_aspire_4930g_init_hook(struct hda_codec *codec) | |||
1639 | alc_automute_amp(codec); | 1652 | alc_automute_amp(codec); |
1640 | } | 1653 | } |
1641 | 1654 | ||
1655 | static void alc888_acer_aspire_6530g_init_hook(struct hda_codec *codec) | ||
1656 | { | ||
1657 | struct alc_spec *spec = codec->spec; | ||
1658 | |||
1659 | spec->autocfg.hp_pins[0] = 0x15; | ||
1660 | spec->autocfg.speaker_pins[0] = 0x14; | ||
1661 | spec->autocfg.speaker_pins[1] = 0x16; | ||
1662 | spec->autocfg.speaker_pins[2] = 0x17; | ||
1663 | alc_automute_amp(codec); | ||
1664 | } | ||
1665 | |||
1642 | static void alc889_acer_aspire_8930g_init_hook(struct hda_codec *codec) | 1666 | static void alc889_acer_aspire_8930g_init_hook(struct hda_codec *codec) |
1643 | { | 1667 | { |
1644 | struct alc_spec *spec = codec->spec; | 1668 | struct alc_spec *spec = codec->spec; |
@@ -8189,6 +8213,8 @@ static struct snd_kcontrol_new alc888_acer_aspire_6530_mixer[] = { | |||
8189 | HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), | 8213 | HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), |
8190 | HDA_CODEC_VOLUME("LFE Playback Volume", 0x0f, 0x0, HDA_OUTPUT), | 8214 | HDA_CODEC_VOLUME("LFE Playback Volume", 0x0f, 0x0, HDA_OUTPUT), |
8191 | HDA_BIND_MUTE("LFE Playback Switch", 0x0f, 2, HDA_INPUT), | 8215 | HDA_BIND_MUTE("LFE Playback Switch", 0x0f, 2, HDA_INPUT), |
8216 | HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), | ||
8217 | HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), | ||
8192 | HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), | 8218 | HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), |
8193 | HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT), | 8219 | HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT), |
8194 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), | 8220 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), |
@@ -9064,7 +9090,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = { | |||
9064 | SND_PCI_QUIRK(0x1025, 0x0157, "Acer X3200", ALC883_AUTO), | 9090 | SND_PCI_QUIRK(0x1025, 0x0157, "Acer X3200", ALC883_AUTO), |
9065 | SND_PCI_QUIRK(0x1025, 0x0158, "Acer AX1700-U3700A", ALC883_AUTO), | 9091 | SND_PCI_QUIRK(0x1025, 0x0158, "Acer AX1700-U3700A", ALC883_AUTO), |
9066 | SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G", | 9092 | SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G", |
9067 | ALC888_ACER_ASPIRE_4930G), | 9093 | ALC888_ACER_ASPIRE_6530G), |
9068 | SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G", | 9094 | SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G", |
9069 | ALC888_ACER_ASPIRE_6530G), | 9095 | ALC888_ACER_ASPIRE_6530G), |
9070 | /* default Acer -- disabled as it causes more problems. | 9096 | /* default Acer -- disabled as it causes more problems. |
@@ -9317,7 +9343,7 @@ static struct alc_config_preset alc883_presets[] = { | |||
9317 | ARRAY_SIZE(alc888_2_capture_sources), | 9343 | ARRAY_SIZE(alc888_2_capture_sources), |
9318 | .input_mux = alc888_acer_aspire_6530_sources, | 9344 | .input_mux = alc888_acer_aspire_6530_sources, |
9319 | .unsol_event = alc_automute_amp_unsol_event, | 9345 | .unsol_event = alc_automute_amp_unsol_event, |
9320 | .init_hook = alc888_acer_aspire_4930g_init_hook, | 9346 | .init_hook = alc888_acer_aspire_6530g_init_hook, |
9321 | }, | 9347 | }, |
9322 | [ALC888_ACER_ASPIRE_8930G] = { | 9348 | [ALC888_ACER_ASPIRE_8930G] = { |
9323 | .mixers = { alc888_base_mixer, | 9349 | .mixers = { alc888_base_mixer, |
@@ -12437,6 +12463,8 @@ static int alc268_parse_auto_config(struct hda_codec *codec) | |||
12437 | if (err < 0) | 12463 | if (err < 0) |
12438 | return err; | 12464 | return err; |
12439 | 12465 | ||
12466 | alc_ssid_check(codec, 0x15, 0x1b, 0x14); | ||
12467 | |||
12440 | return 1; | 12468 | return 1; |
12441 | } | 12469 | } |
12442 | 12470 | ||
@@ -13345,6 +13373,8 @@ static int alc269_parse_auto_config(struct hda_codec *codec) | |||
13345 | if (!spec->cap_mixer && !spec->no_analog) | 13373 | if (!spec->cap_mixer && !spec->no_analog) |
13346 | set_capture_mixer(spec); | 13374 | set_capture_mixer(spec); |
13347 | 13375 | ||
13376 | alc_ssid_check(codec, 0x15, 0x1b, 0x14); | ||
13377 | |||
13348 | return 1; | 13378 | return 1; |
13349 | } | 13379 | } |
13350 | 13380 | ||
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c index 0d0cdbdb4486..cecf1ffeeaaa 100644 --- a/sound/pci/ice1712/ice1712.c +++ b/sound/pci/ice1712/ice1712.c | |||
@@ -107,7 +107,7 @@ MODULE_PARM_DESC(dxr_enable, "Enable DXR support for Terratec DMX6FIRE."); | |||
107 | 107 | ||
108 | 108 | ||
109 | static const struct pci_device_id snd_ice1712_ids[] = { | 109 | static const struct pci_device_id snd_ice1712_ids[] = { |
110 | { PCI_VENDOR_ID_ICE, PCI_DEVICE_ID_ICE_1712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* ICE1712 */ | 110 | { PCI_VDEVICE(ICE, PCI_DEVICE_ID_ICE_1712), 0 }, /* ICE1712 */ |
111 | { 0, } | 111 | { 0, } |
112 | }; | 112 | }; |
113 | 113 | ||
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c index 36ade77cf371..cc84a831eb21 100644 --- a/sound/pci/ice1712/ice1724.c +++ b/sound/pci/ice1712/ice1724.c | |||
@@ -93,7 +93,7 @@ MODULE_PARM_DESC(model, "Use the given board model."); | |||
93 | 93 | ||
94 | /* Both VT1720 and VT1724 have the same PCI IDs */ | 94 | /* Both VT1720 and VT1724 have the same PCI IDs */ |
95 | static const struct pci_device_id snd_vt1724_ids[] = { | 95 | static const struct pci_device_id snd_vt1724_ids[] = { |
96 | { PCI_VENDOR_ID_ICE, PCI_DEVICE_ID_VT1724, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | 96 | { PCI_VDEVICE(ICE, PCI_DEVICE_ID_VT1724), 0 }, |
97 | { 0, } | 97 | { 0, } |
98 | }; | 98 | }; |
99 | 99 | ||
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c index 8aa5687f392a..171ada535209 100644 --- a/sound/pci/intel8x0.c +++ b/sound/pci/intel8x0.c | |||
@@ -421,29 +421,29 @@ struct intel8x0 { | |||
421 | }; | 421 | }; |
422 | 422 | ||
423 | static struct pci_device_id snd_intel8x0_ids[] = { | 423 | static struct pci_device_id snd_intel8x0_ids[] = { |
424 | { 0x8086, 0x2415, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 82801AA */ | 424 | { PCI_VDEVICE(INTEL, 0x2415), DEVICE_INTEL }, /* 82801AA */ |
425 | { 0x8086, 0x2425, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 82901AB */ | 425 | { PCI_VDEVICE(INTEL, 0x2425), DEVICE_INTEL }, /* 82901AB */ |
426 | { 0x8086, 0x2445, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 82801BA */ | 426 | { PCI_VDEVICE(INTEL, 0x2445), DEVICE_INTEL }, /* 82801BA */ |
427 | { 0x8086, 0x2485, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* ICH3 */ | 427 | { PCI_VDEVICE(INTEL, 0x2485), DEVICE_INTEL }, /* ICH3 */ |
428 | { 0x8086, 0x24c5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL_ICH4 }, /* ICH4 */ | 428 | { PCI_VDEVICE(INTEL, 0x24c5), DEVICE_INTEL_ICH4 }, /* ICH4 */ |
429 | { 0x8086, 0x24d5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL_ICH4 }, /* ICH5 */ | 429 | { PCI_VDEVICE(INTEL, 0x24d5), DEVICE_INTEL_ICH4 }, /* ICH5 */ |
430 | { 0x8086, 0x25a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL_ICH4 }, /* ESB */ | 430 | { PCI_VDEVICE(INTEL, 0x25a6), DEVICE_INTEL_ICH4 }, /* ESB */ |
431 | { 0x8086, 0x266e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL_ICH4 }, /* ICH6 */ | 431 | { PCI_VDEVICE(INTEL, 0x266e), DEVICE_INTEL_ICH4 }, /* ICH6 */ |
432 | { 0x8086, 0x27de, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL_ICH4 }, /* ICH7 */ | 432 | { PCI_VDEVICE(INTEL, 0x27de), DEVICE_INTEL_ICH4 }, /* ICH7 */ |
433 | { 0x8086, 0x2698, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL_ICH4 }, /* ESB2 */ | 433 | { PCI_VDEVICE(INTEL, 0x2698), DEVICE_INTEL_ICH4 }, /* ESB2 */ |
434 | { 0x8086, 0x7195, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 440MX */ | 434 | { PCI_VDEVICE(INTEL, 0x7195), DEVICE_INTEL }, /* 440MX */ |
435 | { 0x1039, 0x7012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_SIS }, /* SI7012 */ | 435 | { PCI_VDEVICE(SI, 0x7012), DEVICE_SIS }, /* SI7012 */ |
436 | { 0x10de, 0x01b1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE */ | 436 | { PCI_VDEVICE(NVIDIA, 0x01b1), DEVICE_NFORCE }, /* NFORCE */ |
437 | { 0x10de, 0x003a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* MCP04 */ | 437 | { PCI_VDEVICE(NVIDIA, 0x003a), DEVICE_NFORCE }, /* MCP04 */ |
438 | { 0x10de, 0x006a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE2 */ | 438 | { PCI_VDEVICE(NVIDIA, 0x006a), DEVICE_NFORCE }, /* NFORCE2 */ |
439 | { 0x10de, 0x0059, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* CK804 */ | 439 | { PCI_VDEVICE(NVIDIA, 0x0059), DEVICE_NFORCE }, /* CK804 */ |
440 | { 0x10de, 0x008a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* CK8 */ | 440 | { PCI_VDEVICE(NVIDIA, 0x008a), DEVICE_NFORCE }, /* CK8 */ |
441 | { 0x10de, 0x00da, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE3 */ | 441 | { PCI_VDEVICE(NVIDIA, 0x00da), DEVICE_NFORCE }, /* NFORCE3 */ |
442 | { 0x10de, 0x00ea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* CK8S */ | 442 | { PCI_VDEVICE(NVIDIA, 0x00ea), DEVICE_NFORCE }, /* CK8S */ |
443 | { 0x10de, 0x026b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* MCP51 */ | 443 | { PCI_VDEVICE(NVIDIA, 0x026b), DEVICE_NFORCE }, /* MCP51 */ |
444 | { 0x1022, 0x746d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* AMD8111 */ | 444 | { PCI_VDEVICE(AMD, 0x746d), DEVICE_INTEL }, /* AMD8111 */ |
445 | { 0x1022, 0x7445, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* AMD768 */ | 445 | { PCI_VDEVICE(AMD, 0x7445), DEVICE_INTEL }, /* AMD768 */ |
446 | { 0x10b9, 0x5455, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_ALI }, /* Ali5455 */ | 446 | { PCI_VDEVICE(AL, 0x5455), DEVICE_ALI }, /* Ali5455 */ |
447 | { 0, } | 447 | { 0, } |
448 | }; | 448 | }; |
449 | 449 | ||
diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c index 6ec0fc50d6be..9e7d12e7673f 100644 --- a/sound/pci/intel8x0m.c +++ b/sound/pci/intel8x0m.c | |||
@@ -220,24 +220,24 @@ struct intel8x0m { | |||
220 | }; | 220 | }; |
221 | 221 | ||
222 | static struct pci_device_id snd_intel8x0m_ids[] = { | 222 | static struct pci_device_id snd_intel8x0m_ids[] = { |
223 | { 0x8086, 0x2416, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 82801AA */ | 223 | { PCI_VDEVICE(INTEL, 0x2416), DEVICE_INTEL }, /* 82801AA */ |
224 | { 0x8086, 0x2426, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 82901AB */ | 224 | { PCI_VDEVICE(INTEL, 0x2426), DEVICE_INTEL }, /* 82901AB */ |
225 | { 0x8086, 0x2446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 82801BA */ | 225 | { PCI_VDEVICE(INTEL, 0x2446), DEVICE_INTEL }, /* 82801BA */ |
226 | { 0x8086, 0x2486, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* ICH3 */ | 226 | { PCI_VDEVICE(INTEL, 0x2486), DEVICE_INTEL }, /* ICH3 */ |
227 | { 0x8086, 0x24c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* ICH4 */ | 227 | { PCI_VDEVICE(INTEL, 0x24c6), DEVICE_INTEL }, /* ICH4 */ |
228 | { 0x8086, 0x24d6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* ICH5 */ | 228 | { PCI_VDEVICE(INTEL, 0x24d6), DEVICE_INTEL }, /* ICH5 */ |
229 | { 0x8086, 0x266d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* ICH6 */ | 229 | { PCI_VDEVICE(INTEL, 0x266d), DEVICE_INTEL }, /* ICH6 */ |
230 | { 0x8086, 0x27dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* ICH7 */ | 230 | { PCI_VDEVICE(INTEL, 0x27dd), DEVICE_INTEL }, /* ICH7 */ |
231 | { 0x8086, 0x7196, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 440MX */ | 231 | { PCI_VDEVICE(INTEL, 0x7196), DEVICE_INTEL }, /* 440MX */ |
232 | { 0x1022, 0x7446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* AMD768 */ | 232 | { PCI_VDEVICE(AMD, 0x7446), DEVICE_INTEL }, /* AMD768 */ |
233 | { 0x1039, 0x7013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_SIS }, /* SI7013 */ | 233 | { PCI_VDEVICE(SI, 0x7013), DEVICE_SIS }, /* SI7013 */ |
234 | { 0x10de, 0x01c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE */ | 234 | { PCI_VDEVICE(NVIDIA, 0x01c1), DEVICE_NFORCE }, /* NFORCE */ |
235 | { 0x10de, 0x0069, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE2 */ | 235 | { PCI_VDEVICE(NVIDIA, 0x0069), DEVICE_NFORCE }, /* NFORCE2 */ |
236 | { 0x10de, 0x0089, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE2s */ | 236 | { PCI_VDEVICE(NVIDIA, 0x0089), DEVICE_NFORCE }, /* NFORCE2s */ |
237 | { 0x10de, 0x00d9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE3 */ | 237 | { PCI_VDEVICE(NVIDIA, 0x00d9), DEVICE_NFORCE }, /* NFORCE3 */ |
238 | #if 0 | 238 | #if 0 |
239 | { 0x1022, 0x746d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* AMD8111 */ | 239 | { PCI_VDEVICE(AMD, 0x746d), DEVICE_INTEL }, /* AMD8111 */ |
240 | { 0x10b9, 0x5455, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_ALI }, /* Ali5455 */ | 240 | { PCI_VDEVICE(AL, 0x5455), DEVICE_ALI }, /* Ali5455 */ |
241 | #endif | 241 | #endif |
242 | { 0, } | 242 | { 0, } |
243 | }; | 243 | }; |
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c index 18da2ef04d09..11b8c6514b3d 100644 --- a/sound/pci/lx6464es/lx6464es.c +++ b/sound/pci/lx6464es/lx6464es.c | |||
@@ -654,13 +654,12 @@ static int __devinit lx_init_ethersound_config(struct lx6464es *chip) | |||
654 | int i; | 654 | int i; |
655 | u32 orig_conf_es = lx_dsp_reg_read(chip, eReg_CONFES); | 655 | u32 orig_conf_es = lx_dsp_reg_read(chip, eReg_CONFES); |
656 | 656 | ||
657 | u32 default_conf_es = (64 << IOCR_OUTPUTS_OFFSET) | | 657 | /* configure 64 io channels */ |
658 | u32 conf_es = (orig_conf_es & CONFES_READ_PART_MASK) | | ||
658 | (64 << IOCR_INPUTS_OFFSET) | | 659 | (64 << IOCR_INPUTS_OFFSET) | |
660 | (64 << IOCR_OUTPUTS_OFFSET) | | ||
659 | (FREQ_RATIO_SINGLE_MODE << FREQ_RATIO_OFFSET); | 661 | (FREQ_RATIO_SINGLE_MODE << FREQ_RATIO_OFFSET); |
660 | 662 | ||
661 | u32 conf_es = (orig_conf_es & CONFES_READ_PART_MASK) | ||
662 | | (default_conf_es & CONFES_WRITE_PART_MASK); | ||
663 | |||
664 | snd_printdd("->lx_init_ethersound\n"); | 663 | snd_printdd("->lx_init_ethersound\n"); |
665 | 664 | ||
666 | chip->freq_ratio = FREQ_RATIO_SINGLE_MODE; | 665 | chip->freq_ratio = FREQ_RATIO_SINGLE_MODE; |
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c index 82bc5b9e7629..a83d1968a845 100644 --- a/sound/pci/mixart/mixart.c +++ b/sound/pci/mixart/mixart.c | |||
@@ -61,7 +61,7 @@ MODULE_PARM_DESC(enable, "Enable Digigram " CARD_NAME " soundcard."); | |||
61 | */ | 61 | */ |
62 | 62 | ||
63 | static struct pci_device_id snd_mixart_ids[] = { | 63 | static struct pci_device_id snd_mixart_ids[] = { |
64 | { 0x1057, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* MC8240 */ | 64 | { PCI_VDEVICE(MOTOROLA, 0x0003), 0, }, /* MC8240 */ |
65 | { 0, } | 65 | { 0, } |
66 | }; | 66 | }; |
67 | 67 | ||
diff --git a/sound/pci/nm256/nm256.c b/sound/pci/nm256/nm256.c index 522a040855d4..97a0731331a1 100644 --- a/sound/pci/nm256/nm256.c +++ b/sound/pci/nm256/nm256.c | |||
@@ -263,9 +263,9 @@ struct nm256 { | |||
263 | * PCI ids | 263 | * PCI ids |
264 | */ | 264 | */ |
265 | static struct pci_device_id snd_nm256_ids[] = { | 265 | static struct pci_device_id snd_nm256_ids[] = { |
266 | {PCI_VENDOR_ID_NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | 266 | {PCI_VDEVICE(NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO), 0}, |
267 | {PCI_VENDOR_ID_NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | 267 | {PCI_VDEVICE(NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO), 0}, |
268 | {PCI_VENDOR_ID_NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | 268 | {PCI_VDEVICE(NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO), 0}, |
269 | {0,}, | 269 | {0,}, |
270 | }; | 270 | }; |
271 | 271 | ||
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c index 304da169bfdc..5401c547c4e3 100644 --- a/sound/pci/oxygen/oxygen_mixer.c +++ b/sound/pci/oxygen/oxygen_mixer.c | |||
@@ -575,8 +575,10 @@ static int ac97_switch_put(struct snd_kcontrol *ctl, | |||
575 | static int ac97_volume_info(struct snd_kcontrol *ctl, | 575 | static int ac97_volume_info(struct snd_kcontrol *ctl, |
576 | struct snd_ctl_elem_info *info) | 576 | struct snd_ctl_elem_info *info) |
577 | { | 577 | { |
578 | int stereo = (ctl->private_value >> 16) & 1; | ||
579 | |||
578 | info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; | 580 | info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; |
579 | info->count = 2; | 581 | info->count = stereo ? 2 : 1; |
580 | info->value.integer.min = 0; | 582 | info->value.integer.min = 0; |
581 | info->value.integer.max = 0x1f; | 583 | info->value.integer.max = 0x1f; |
582 | return 0; | 584 | return 0; |
@@ -587,6 +589,7 @@ static int ac97_volume_get(struct snd_kcontrol *ctl, | |||
587 | { | 589 | { |
588 | struct oxygen *chip = ctl->private_data; | 590 | struct oxygen *chip = ctl->private_data; |
589 | unsigned int codec = (ctl->private_value >> 24) & 1; | 591 | unsigned int codec = (ctl->private_value >> 24) & 1; |
592 | int stereo = (ctl->private_value >> 16) & 1; | ||
590 | unsigned int index = ctl->private_value & 0xff; | 593 | unsigned int index = ctl->private_value & 0xff; |
591 | u16 reg; | 594 | u16 reg; |
592 | 595 | ||
@@ -594,7 +597,8 @@ static int ac97_volume_get(struct snd_kcontrol *ctl, | |||
594 | reg = oxygen_read_ac97(chip, codec, index); | 597 | reg = oxygen_read_ac97(chip, codec, index); |
595 | mutex_unlock(&chip->mutex); | 598 | mutex_unlock(&chip->mutex); |
596 | value->value.integer.value[0] = 31 - (reg & 0x1f); | 599 | value->value.integer.value[0] = 31 - (reg & 0x1f); |
597 | value->value.integer.value[1] = 31 - ((reg >> 8) & 0x1f); | 600 | if (stereo) |
601 | value->value.integer.value[1] = 31 - ((reg >> 8) & 0x1f); | ||
598 | return 0; | 602 | return 0; |
599 | } | 603 | } |
600 | 604 | ||
@@ -603,6 +607,7 @@ static int ac97_volume_put(struct snd_kcontrol *ctl, | |||
603 | { | 607 | { |
604 | struct oxygen *chip = ctl->private_data; | 608 | struct oxygen *chip = ctl->private_data; |
605 | unsigned int codec = (ctl->private_value >> 24) & 1; | 609 | unsigned int codec = (ctl->private_value >> 24) & 1; |
610 | int stereo = (ctl->private_value >> 16) & 1; | ||
606 | unsigned int index = ctl->private_value & 0xff; | 611 | unsigned int index = ctl->private_value & 0xff; |
607 | u16 oldreg, newreg; | 612 | u16 oldreg, newreg; |
608 | int change; | 613 | int change; |
@@ -612,8 +617,11 @@ static int ac97_volume_put(struct snd_kcontrol *ctl, | |||
612 | newreg = oldreg; | 617 | newreg = oldreg; |
613 | newreg = (newreg & ~0x1f) | | 618 | newreg = (newreg & ~0x1f) | |
614 | (31 - (value->value.integer.value[0] & 0x1f)); | 619 | (31 - (value->value.integer.value[0] & 0x1f)); |
615 | newreg = (newreg & ~0x1f00) | | 620 | if (stereo) |
616 | ((31 - (value->value.integer.value[0] & 0x1f)) << 8); | 621 | newreg = (newreg & ~0x1f00) | |
622 | ((31 - (value->value.integer.value[1] & 0x1f)) << 8); | ||
623 | else | ||
624 | newreg = (newreg & ~0x1f00) | ((newreg & 0x1f) << 8); | ||
617 | change = newreg != oldreg; | 625 | change = newreg != oldreg; |
618 | if (change) | 626 | if (change) |
619 | oxygen_write_ac97(chip, codec, index, newreg); | 627 | oxygen_write_ac97(chip, codec, index, newreg); |
@@ -673,7 +681,7 @@ static int ac97_fp_rec_volume_put(struct snd_kcontrol *ctl, | |||
673 | .private_value = ((codec) << 24) | ((invert) << 16) | \ | 681 | .private_value = ((codec) << 24) | ((invert) << 16) | \ |
674 | ((bitnr) << 8) | (index), \ | 682 | ((bitnr) << 8) | (index), \ |
675 | } | 683 | } |
676 | #define AC97_VOLUME(xname, codec, index) { \ | 684 | #define AC97_VOLUME(xname, codec, index, stereo) { \ |
677 | .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ | 685 | .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ |
678 | .name = xname, \ | 686 | .name = xname, \ |
679 | .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \ | 687 | .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \ |
@@ -682,7 +690,7 @@ static int ac97_fp_rec_volume_put(struct snd_kcontrol *ctl, | |||
682 | .get = ac97_volume_get, \ | 690 | .get = ac97_volume_get, \ |
683 | .put = ac97_volume_put, \ | 691 | .put = ac97_volume_put, \ |
684 | .tlv = { .p = ac97_db_scale, }, \ | 692 | .tlv = { .p = ac97_db_scale, }, \ |
685 | .private_value = ((codec) << 24) | (index), \ | 693 | .private_value = ((codec) << 24) | ((stereo) << 16) | (index), \ |
686 | } | 694 | } |
687 | 695 | ||
688 | static DECLARE_TLV_DB_SCALE(monitor_db_scale, -1000, 1000, 0); | 696 | static DECLARE_TLV_DB_SCALE(monitor_db_scale, -1000, 1000, 0); |
@@ -882,18 +890,18 @@ static const struct { | |||
882 | }; | 890 | }; |
883 | 891 | ||
884 | static const struct snd_kcontrol_new ac97_controls[] = { | 892 | static const struct snd_kcontrol_new ac97_controls[] = { |
885 | AC97_VOLUME("Mic Capture Volume", 0, AC97_MIC), | 893 | AC97_VOLUME("Mic Capture Volume", 0, AC97_MIC, 0), |
886 | AC97_SWITCH("Mic Capture Switch", 0, AC97_MIC, 15, 1), | 894 | AC97_SWITCH("Mic Capture Switch", 0, AC97_MIC, 15, 1), |
887 | AC97_SWITCH("Mic Boost (+20dB)", 0, AC97_MIC, 6, 0), | 895 | AC97_SWITCH("Mic Boost (+20dB)", 0, AC97_MIC, 6, 0), |
888 | AC97_SWITCH("Line Capture Switch", 0, AC97_LINE, 15, 1), | 896 | AC97_SWITCH("Line Capture Switch", 0, AC97_LINE, 15, 1), |
889 | AC97_VOLUME("CD Capture Volume", 0, AC97_CD), | 897 | AC97_VOLUME("CD Capture Volume", 0, AC97_CD, 1), |
890 | AC97_SWITCH("CD Capture Switch", 0, AC97_CD, 15, 1), | 898 | AC97_SWITCH("CD Capture Switch", 0, AC97_CD, 15, 1), |
891 | AC97_VOLUME("Aux Capture Volume", 0, AC97_AUX), | 899 | AC97_VOLUME("Aux Capture Volume", 0, AC97_AUX, 1), |
892 | AC97_SWITCH("Aux Capture Switch", 0, AC97_AUX, 15, 1), | 900 | AC97_SWITCH("Aux Capture Switch", 0, AC97_AUX, 15, 1), |
893 | }; | 901 | }; |
894 | 902 | ||
895 | static const struct snd_kcontrol_new ac97_fp_controls[] = { | 903 | static const struct snd_kcontrol_new ac97_fp_controls[] = { |
896 | AC97_VOLUME("Front Panel Playback Volume", 1, AC97_HEADPHONE), | 904 | AC97_VOLUME("Front Panel Playback Volume", 1, AC97_HEADPHONE, 1), |
897 | AC97_SWITCH("Front Panel Playback Switch", 1, AC97_HEADPHONE, 15, 1), | 905 | AC97_SWITCH("Front Panel Playback Switch", 1, AC97_HEADPHONE, 15, 1), |
898 | { | 906 | { |
899 | .iface = SNDRV_CTL_ELEM_IFACE_MIXER, | 907 | .iface = SNDRV_CTL_ELEM_IFACE_MIXER, |
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c index d7b966e7c4cf..f977dba7cbd0 100644 --- a/sound/pci/rme32.c +++ b/sound/pci/rme32.c | |||
@@ -227,12 +227,9 @@ struct rme32 { | |||
227 | }; | 227 | }; |
228 | 228 | ||
229 | static struct pci_device_id snd_rme32_ids[] = { | 229 | static struct pci_device_id snd_rme32_ids[] = { |
230 | {PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_RME_DIGI32, | 230 | {PCI_VDEVICE(XILINX_RME, PCI_DEVICE_ID_RME_DIGI32), 0,}, |
231 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,}, | 231 | {PCI_VDEVICE(XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_8), 0,}, |
232 | {PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_8, | 232 | {PCI_VDEVICE(XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_PRO), 0,}, |
233 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,}, | ||
234 | {PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_PRO, | ||
235 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,}, | ||
236 | {0,} | 233 | {0,} |
237 | }; | 234 | }; |
238 | 235 | ||
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c index 55fb1c131f58..2ba5c0fd55db 100644 --- a/sound/pci/rme96.c +++ b/sound/pci/rme96.c | |||
@@ -232,14 +232,10 @@ struct rme96 { | |||
232 | }; | 232 | }; |
233 | 233 | ||
234 | static struct pci_device_id snd_rme96_ids[] = { | 234 | static struct pci_device_id snd_rme96_ids[] = { |
235 | { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96, | 235 | { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96), 0, }, |
236 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, | 236 | { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96_8), 0, }, |
237 | { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96_8, | 237 | { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PRO), 0, }, |
238 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, | 238 | { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST), 0, }, |
239 | { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PRO, | ||
240 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, | ||
241 | { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST, | ||
242 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, | ||
243 | { 0, } | 239 | { 0, } |
244 | }; | 240 | }; |
245 | 241 | ||
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c index 7dc60ad4772e..1f6406c4534d 100644 --- a/sound/pci/sonicvibes.c +++ b/sound/pci/sonicvibes.c | |||
@@ -243,7 +243,7 @@ struct sonicvibes { | |||
243 | }; | 243 | }; |
244 | 244 | ||
245 | static struct pci_device_id snd_sonic_ids[] = { | 245 | static struct pci_device_id snd_sonic_ids[] = { |
246 | { 0x5333, 0xca00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, | 246 | { PCI_VDEVICE(S3, 0xca00), 0, }, |
247 | { 0, } | 247 | { 0, } |
248 | }; | 248 | }; |
249 | 249 | ||
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c index 949fcaf6b70e..acfa4760da49 100644 --- a/sound/pci/via82xx.c +++ b/sound/pci/via82xx.c | |||
@@ -402,9 +402,9 @@ struct via82xx { | |||
402 | 402 | ||
403 | static struct pci_device_id snd_via82xx_ids[] = { | 403 | static struct pci_device_id snd_via82xx_ids[] = { |
404 | /* 0x1106, 0x3058 */ | 404 | /* 0x1106, 0x3058 */ |
405 | { PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPE_CARD_VIA686, }, /* 686A */ | 405 | { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C686_5), TYPE_CARD_VIA686, }, /* 686A */ |
406 | /* 0x1106, 0x3059 */ | 406 | /* 0x1106, 0x3059 */ |
407 | { PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233_5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPE_CARD_VIA8233, }, /* VT8233 */ | 407 | { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8233_5), TYPE_CARD_VIA8233, }, /* VT8233 */ |
408 | { 0, } | 408 | { 0, } |
409 | }; | 409 | }; |
410 | 410 | ||
diff --git a/sound/pci/via82xx_modem.c b/sound/pci/via82xx_modem.c index 0d54e3503c1e..47eb61561dfc 100644 --- a/sound/pci/via82xx_modem.c +++ b/sound/pci/via82xx_modem.c | |||
@@ -261,7 +261,7 @@ struct via82xx_modem { | |||
261 | }; | 261 | }; |
262 | 262 | ||
263 | static struct pci_device_id snd_via82xx_modem_ids[] = { | 263 | static struct pci_device_id snd_via82xx_modem_ids[] = { |
264 | { 0x1106, 0x3068, PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPE_CARD_VIA82XX_MODEM, }, | 264 | { PCI_VDEVICE(VIA, 0x3068), TYPE_CARD_VIA82XX_MODEM, }, |
265 | { 0, } | 265 | { 0, } |
266 | }; | 266 | }; |
267 | 267 | ||
diff --git a/sound/pci/ymfpci/ymfpci.c b/sound/pci/ymfpci/ymfpci.c index 4af66661f9b0..e6b18b90d451 100644 --- a/sound/pci/ymfpci/ymfpci.c +++ b/sound/pci/ymfpci/ymfpci.c | |||
@@ -67,12 +67,12 @@ module_param_array(rear_switch, bool, NULL, 0444); | |||
67 | MODULE_PARM_DESC(rear_switch, "Enable shared rear/line-in switch"); | 67 | MODULE_PARM_DESC(rear_switch, "Enable shared rear/line-in switch"); |
68 | 68 | ||
69 | static struct pci_device_id snd_ymfpci_ids[] = { | 69 | static struct pci_device_id snd_ymfpci_ids[] = { |
70 | { 0x1073, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* YMF724 */ | 70 | { PCI_VDEVICE(YAMAHA, 0x0004), 0, }, /* YMF724 */ |
71 | { 0x1073, 0x000d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* YMF724F */ | 71 | { PCI_VDEVICE(YAMAHA, 0x000d), 0, }, /* YMF724F */ |
72 | { 0x1073, 0x000a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* YMF740 */ | 72 | { PCI_VDEVICE(YAMAHA, 0x000a), 0, }, /* YMF740 */ |
73 | { 0x1073, 0x000c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* YMF740C */ | 73 | { PCI_VDEVICE(YAMAHA, 0x000c), 0, }, /* YMF740C */ |
74 | { 0x1073, 0x0010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* YMF744 */ | 74 | { PCI_VDEVICE(YAMAHA, 0x0010), 0, }, /* YMF744 */ |
75 | { 0x1073, 0x0012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* YMF754 */ | 75 | { PCI_VDEVICE(YAMAHA, 0x0012), 0, }, /* YMF754 */ |
76 | { 0, } | 76 | { 0, } |
77 | }; | 77 | }; |
78 | 78 | ||
diff --git a/tools/perf/CREDITS b/tools/perf/CREDITS new file mode 100644 index 000000000000..c2ddcb3acbd0 --- /dev/null +++ b/tools/perf/CREDITS | |||
@@ -0,0 +1,30 @@ | |||
1 | Most of the infrastructure that 'perf' uses here has been reused | ||
2 | from the Git project, as of version: | ||
3 | |||
4 | 66996ec: Sync with 1.6.2.4 | ||
5 | |||
6 | Here is an (incomplete!) list of main contributors to those files | ||
7 | in util/* and elsewhere: | ||
8 | |||
9 | Alex Riesen | ||
10 | Christian Couder | ||
11 | Dmitry Potapov | ||
12 | Jeff King | ||
13 | Johannes Schindelin | ||
14 | Johannes Sixt | ||
15 | Junio C Hamano | ||
16 | Linus Torvalds | ||
17 | Matthias Kestenholz | ||
18 | Michal Ostrowski | ||
19 | Miklos Vajna | ||
20 | Petr Baudis | ||
21 | Pierre Habouzit | ||
22 | René Scharfe | ||
23 | Samuel Tardieu | ||
24 | Shawn O. Pearce | ||
25 | Steffen Prohaska | ||
26 | Steve Haslam | ||
27 | |||
28 | Thanks guys! | ||
29 | |||
30 | The full history of the files can be found in the upstream Git commits. | ||
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index 52d3fc6846a9..8aa3f8c88707 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt | |||
@@ -13,13 +13,25 @@ SYNOPSIS | |||
13 | DESCRIPTION | 13 | DESCRIPTION |
14 | ----------- | 14 | ----------- |
15 | This command displays the performance counter profile information recorded | 15 | This command displays the performance counter profile information recorded |
16 | via perf report. | 16 | via perf record. |
17 | 17 | ||
18 | OPTIONS | 18 | OPTIONS |
19 | ------- | 19 | ------- |
20 | -i:: | 20 | -i:: |
21 | --input=:: | 21 | --input=:: |
22 | Input file name. (default: perf.data) | 22 | Input file name. (default: perf.data) |
23 | -d:: | ||
24 | --dsos=:: | ||
25 | Only consider symbols in these dsos. CSV that understands | ||
26 | file://filename entries. | ||
27 | -C:: | ||
28 | --comms=:: | ||
29 | Only consider symbols in these comms. CSV that understands | ||
30 | file://filename entries. | ||
31 | -S:: | ||
32 | --symbols=:: | ||
33 | Only consider these symbols. CSV that understands | ||
34 | file://filename entries. | ||
23 | 35 | ||
24 | SEE ALSO | 36 | SEE ALSO |
25 | -------- | 37 | -------- |
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index c368a72721d7..0d74346d21ab 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt | |||
@@ -8,8 +8,8 @@ perf-stat - Run a command and gather performance counter statistics | |||
8 | SYNOPSIS | 8 | SYNOPSIS |
9 | -------- | 9 | -------- |
10 | [verse] | 10 | [verse] |
11 | 'perf stat' [-e <EVENT> | --event=EVENT] [-l] [-a] <command> | 11 | 'perf stat' [-e <EVENT> | --event=EVENT] [-S] [-a] <command> |
12 | 'perf stat' [-e <EVENT> | --event=EVENT] [-l] [-a] -- <command> [<options>] | 12 | 'perf stat' [-e <EVENT> | --event=EVENT] [-S] [-a] -- <command> [<options>] |
13 | 13 | ||
14 | DESCRIPTION | 14 | DESCRIPTION |
15 | ----------- | 15 | ----------- |
@@ -40,7 +40,7 @@ OPTIONS | |||
40 | -a:: | 40 | -a:: |
41 | system-wide collection | 41 | system-wide collection |
42 | 42 | ||
43 | -l:: | 43 | -S:: |
44 | scale counter values | 44 | scale counter values |
45 | 45 | ||
46 | EXAMPLES | 46 | EXAMPLES |
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 36d7eef49913..9c6d0ae3708e 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -290,7 +290,7 @@ LIB_FILE=libperf.a | |||
290 | 290 | ||
291 | LIB_H += ../../include/linux/perf_counter.h | 291 | LIB_H += ../../include/linux/perf_counter.h |
292 | LIB_H += perf.h | 292 | LIB_H += perf.h |
293 | LIB_H += types.h | 293 | LIB_H += util/types.h |
294 | LIB_H += util/list.h | 294 | LIB_H += util/list.h |
295 | LIB_H += util/rbtree.h | 295 | LIB_H += util/rbtree.h |
296 | LIB_H += util/levenshtein.h | 296 | LIB_H += util/levenshtein.h |
@@ -301,6 +301,7 @@ LIB_H += util/util.h | |||
301 | LIB_H += util/help.h | 301 | LIB_H += util/help.h |
302 | LIB_H += util/strbuf.h | 302 | LIB_H += util/strbuf.h |
303 | LIB_H += util/string.h | 303 | LIB_H += util/string.h |
304 | LIB_H += util/strlist.h | ||
304 | LIB_H += util/run-command.h | 305 | LIB_H += util/run-command.h |
305 | LIB_H += util/sigchain.h | 306 | LIB_H += util/sigchain.h |
306 | LIB_H += util/symbol.h | 307 | LIB_H += util/symbol.h |
@@ -322,12 +323,15 @@ LIB_OBJS += util/run-command.o | |||
322 | LIB_OBJS += util/quote.o | 323 | LIB_OBJS += util/quote.o |
323 | LIB_OBJS += util/strbuf.o | 324 | LIB_OBJS += util/strbuf.o |
324 | LIB_OBJS += util/string.o | 325 | LIB_OBJS += util/string.o |
326 | LIB_OBJS += util/strlist.o | ||
325 | LIB_OBJS += util/usage.o | 327 | LIB_OBJS += util/usage.o |
326 | LIB_OBJS += util/wrapper.o | 328 | LIB_OBJS += util/wrapper.o |
327 | LIB_OBJS += util/sigchain.o | 329 | LIB_OBJS += util/sigchain.o |
328 | LIB_OBJS += util/symbol.o | 330 | LIB_OBJS += util/symbol.o |
329 | LIB_OBJS += util/color.o | 331 | LIB_OBJS += util/color.o |
330 | LIB_OBJS += util/pager.o | 332 | LIB_OBJS += util/pager.o |
333 | LIB_OBJS += util/header.o | ||
334 | LIB_OBJS += util/callchain.o | ||
331 | 335 | ||
332 | BUILTIN_OBJS += builtin-annotate.o | 336 | BUILTIN_OBJS += builtin-annotate.o |
333 | BUILTIN_OBJS += builtin-help.o | 337 | BUILTIN_OBJS += builtin-help.o |
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 7e58e3ad1508..722c0f54e549 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c | |||
@@ -855,7 +855,7 @@ static unsigned long total = 0, | |||
855 | total_unknown = 0; | 855 | total_unknown = 0; |
856 | 856 | ||
857 | static int | 857 | static int |
858 | process_overflow_event(event_t *event, unsigned long offset, unsigned long head) | 858 | process_sample_event(event_t *event, unsigned long offset, unsigned long head) |
859 | { | 859 | { |
860 | char level; | 860 | char level; |
861 | int show = 0; | 861 | int show = 0; |
@@ -1013,10 +1013,10 @@ process_period_event(event_t *event, unsigned long offset, unsigned long head) | |||
1013 | static int | 1013 | static int |
1014 | process_event(event_t *event, unsigned long offset, unsigned long head) | 1014 | process_event(event_t *event, unsigned long offset, unsigned long head) |
1015 | { | 1015 | { |
1016 | if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) | ||
1017 | return process_overflow_event(event, offset, head); | ||
1018 | |||
1019 | switch (event->header.type) { | 1016 | switch (event->header.type) { |
1017 | case PERF_EVENT_SAMPLE: | ||
1018 | return process_sample_event(event, offset, head); | ||
1019 | |||
1020 | case PERF_EVENT_MMAP: | 1020 | case PERF_EVENT_MMAP: |
1021 | return process_mmap_event(event, offset, head); | 1021 | return process_mmap_event(event, offset, head); |
1022 | 1022 | ||
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index d7ebbd757543..d18546f37d7c 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -14,6 +14,8 @@ | |||
14 | #include "util/parse-events.h" | 14 | #include "util/parse-events.h" |
15 | #include "util/string.h" | 15 | #include "util/string.h" |
16 | 16 | ||
17 | #include "util/header.h" | ||
18 | |||
17 | #include <unistd.h> | 19 | #include <unistd.h> |
18 | #include <sched.h> | 20 | #include <sched.h> |
19 | 21 | ||
@@ -39,6 +41,8 @@ static int force = 0; | |||
39 | static int append_file = 0; | 41 | static int append_file = 0; |
40 | static int call_graph = 0; | 42 | static int call_graph = 0; |
41 | static int verbose = 0; | 43 | static int verbose = 0; |
44 | static int inherit_stat = 0; | ||
45 | static int no_samples = 0; | ||
42 | 46 | ||
43 | static long samples; | 47 | static long samples; |
44 | static struct timeval last_read; | 48 | static struct timeval last_read; |
@@ -52,7 +56,8 @@ static int nr_poll; | |||
52 | static int nr_cpu; | 56 | static int nr_cpu; |
53 | 57 | ||
54 | static int file_new = 1; | 58 | static int file_new = 1; |
55 | static struct perf_file_header file_header; | 59 | |
60 | struct perf_header *header; | ||
56 | 61 | ||
57 | struct mmap_event { | 62 | struct mmap_event { |
58 | struct perf_event_header header; | 63 | struct perf_event_header header; |
@@ -306,12 +311,11 @@ static void pid_synthesize_mmap_samples(pid_t pid) | |||
306 | continue; | 311 | continue; |
307 | pbf += n + 3; | 312 | pbf += n + 3; |
308 | if (*pbf == 'x') { /* vm_exec */ | 313 | if (*pbf == 'x') { /* vm_exec */ |
309 | char *execname = strrchr(bf, ' '); | 314 | char *execname = strchr(bf, '/'); |
310 | 315 | ||
311 | if (execname == NULL || execname[1] != '/') | 316 | if (execname == NULL) |
312 | continue; | 317 | continue; |
313 | 318 | ||
314 | execname += 1; | ||
315 | size = strlen(execname); | 319 | size = strlen(execname); |
316 | execname[size - 1] = '\0'; /* Remove \n */ | 320 | execname[size - 1] = '\0'; /* Remove \n */ |
317 | memcpy(mmap_ev.filename, execname, size); | 321 | memcpy(mmap_ev.filename, execname, size); |
@@ -329,7 +333,7 @@ static void pid_synthesize_mmap_samples(pid_t pid) | |||
329 | fclose(fp); | 333 | fclose(fp); |
330 | } | 334 | } |
331 | 335 | ||
332 | static void synthesize_samples(void) | 336 | static void synthesize_all(void) |
333 | { | 337 | { |
334 | DIR *proc; | 338 | DIR *proc; |
335 | struct dirent dirent, *next; | 339 | struct dirent dirent, *next; |
@@ -353,10 +357,35 @@ static void synthesize_samples(void) | |||
353 | 357 | ||
354 | static int group_fd; | 358 | static int group_fd; |
355 | 359 | ||
360 | static struct perf_header_attr *get_header_attr(struct perf_counter_attr *a, int nr) | ||
361 | { | ||
362 | struct perf_header_attr *h_attr; | ||
363 | |||
364 | if (nr < header->attrs) { | ||
365 | h_attr = header->attr[nr]; | ||
366 | } else { | ||
367 | h_attr = perf_header_attr__new(a); | ||
368 | perf_header__add_attr(header, h_attr); | ||
369 | } | ||
370 | |||
371 | return h_attr; | ||
372 | } | ||
373 | |||
356 | static void create_counter(int counter, int cpu, pid_t pid) | 374 | static void create_counter(int counter, int cpu, pid_t pid) |
357 | { | 375 | { |
358 | struct perf_counter_attr *attr = attrs + counter; | 376 | struct perf_counter_attr *attr = attrs + counter; |
359 | int track = 1; | 377 | struct perf_header_attr *h_attr; |
378 | int track = !counter; /* only the first counter needs these */ | ||
379 | struct { | ||
380 | u64 count; | ||
381 | u64 time_enabled; | ||
382 | u64 time_running; | ||
383 | u64 id; | ||
384 | } read_data; | ||
385 | |||
386 | attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | | ||
387 | PERF_FORMAT_TOTAL_TIME_RUNNING | | ||
388 | PERF_FORMAT_ID; | ||
360 | 389 | ||
361 | attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; | 390 | attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; |
362 | 391 | ||
@@ -366,25 +395,20 @@ static void create_counter(int counter, int cpu, pid_t pid) | |||
366 | attr->sample_freq = freq; | 395 | attr->sample_freq = freq; |
367 | } | 396 | } |
368 | 397 | ||
398 | if (no_samples) | ||
399 | attr->sample_freq = 0; | ||
400 | |||
401 | if (inherit_stat) | ||
402 | attr->inherit_stat = 1; | ||
403 | |||
369 | if (call_graph) | 404 | if (call_graph) |
370 | attr->sample_type |= PERF_SAMPLE_CALLCHAIN; | 405 | attr->sample_type |= PERF_SAMPLE_CALLCHAIN; |
371 | 406 | ||
372 | if (file_new) { | ||
373 | file_header.sample_type = attr->sample_type; | ||
374 | } else { | ||
375 | if (file_header.sample_type != attr->sample_type) { | ||
376 | fprintf(stderr, "incompatible append\n"); | ||
377 | exit(-1); | ||
378 | } | ||
379 | } | ||
380 | |||
381 | attr->mmap = track; | 407 | attr->mmap = track; |
382 | attr->comm = track; | 408 | attr->comm = track; |
383 | attr->inherit = (cpu < 0) && inherit; | 409 | attr->inherit = (cpu < 0) && inherit; |
384 | attr->disabled = 1; | 410 | attr->disabled = 1; |
385 | 411 | ||
386 | track = 0; /* only the first counter needs these */ | ||
387 | |||
388 | try_again: | 412 | try_again: |
389 | fd[nr_cpu][counter] = sys_perf_counter_open(attr, pid, cpu, group_fd, 0); | 413 | fd[nr_cpu][counter] = sys_perf_counter_open(attr, pid, cpu, group_fd, 0); |
390 | 414 | ||
@@ -415,6 +439,22 @@ try_again: | |||
415 | exit(-1); | 439 | exit(-1); |
416 | } | 440 | } |
417 | 441 | ||
442 | h_attr = get_header_attr(attr, counter); | ||
443 | |||
444 | if (!file_new) { | ||
445 | if (memcmp(&h_attr->attr, attr, sizeof(*attr))) { | ||
446 | fprintf(stderr, "incompatible append\n"); | ||
447 | exit(-1); | ||
448 | } | ||
449 | } | ||
450 | |||
451 | if (read(fd[nr_cpu][counter], &read_data, sizeof(read_data)) == -1) { | ||
452 | perror("Unable to read perf file descriptor\n"); | ||
453 | exit(-1); | ||
454 | } | ||
455 | |||
456 | perf_header_attr__add_id(h_attr, read_data.id); | ||
457 | |||
418 | assert(fd[nr_cpu][counter] >= 0); | 458 | assert(fd[nr_cpu][counter] >= 0); |
419 | fcntl(fd[nr_cpu][counter], F_SETFL, O_NONBLOCK); | 459 | fcntl(fd[nr_cpu][counter], F_SETFL, O_NONBLOCK); |
420 | 460 | ||
@@ -445,11 +485,6 @@ static void open_counters(int cpu, pid_t pid) | |||
445 | { | 485 | { |
446 | int counter; | 486 | int counter; |
447 | 487 | ||
448 | if (pid > 0) { | ||
449 | pid_synthesize_comm_event(pid, 0); | ||
450 | pid_synthesize_mmap_samples(pid); | ||
451 | } | ||
452 | |||
453 | group_fd = -1; | 488 | group_fd = -1; |
454 | for (counter = 0; counter < nr_counters; counter++) | 489 | for (counter = 0; counter < nr_counters; counter++) |
455 | create_counter(counter, cpu, pid); | 490 | create_counter(counter, cpu, pid); |
@@ -459,17 +494,16 @@ static void open_counters(int cpu, pid_t pid) | |||
459 | 494 | ||
460 | static void atexit_header(void) | 495 | static void atexit_header(void) |
461 | { | 496 | { |
462 | file_header.data_size += bytes_written; | 497 | header->data_size += bytes_written; |
463 | 498 | ||
464 | if (pwrite(output, &file_header, sizeof(file_header), 0) == -1) | 499 | perf_header__write(header, output); |
465 | perror("failed to write on file headers"); | ||
466 | } | 500 | } |
467 | 501 | ||
468 | static int __cmd_record(int argc, const char **argv) | 502 | static int __cmd_record(int argc, const char **argv) |
469 | { | 503 | { |
470 | int i, counter; | 504 | int i, counter; |
471 | struct stat st; | 505 | struct stat st; |
472 | pid_t pid; | 506 | pid_t pid = 0; |
473 | int flags; | 507 | int flags; |
474 | int ret; | 508 | int ret; |
475 | 509 | ||
@@ -500,22 +534,31 @@ static int __cmd_record(int argc, const char **argv) | |||
500 | exit(-1); | 534 | exit(-1); |
501 | } | 535 | } |
502 | 536 | ||
503 | if (!file_new) { | 537 | if (!file_new) |
504 | if (read(output, &file_header, sizeof(file_header)) == -1) { | 538 | header = perf_header__read(output); |
505 | perror("failed to read file headers"); | 539 | else |
506 | exit(-1); | 540 | header = perf_header__new(); |
507 | } | ||
508 | |||
509 | lseek(output, file_header.data_size, SEEK_CUR); | ||
510 | } | ||
511 | 541 | ||
512 | atexit(atexit_header); | 542 | atexit(atexit_header); |
513 | 543 | ||
514 | if (!system_wide) { | 544 | if (!system_wide) { |
515 | open_counters(-1, target_pid != -1 ? target_pid : getpid()); | 545 | pid = target_pid; |
546 | if (pid == -1) | ||
547 | pid = getpid(); | ||
548 | |||
549 | open_counters(-1, pid); | ||
516 | } else for (i = 0; i < nr_cpus; i++) | 550 | } else for (i = 0; i < nr_cpus; i++) |
517 | open_counters(i, target_pid); | 551 | open_counters(i, target_pid); |
518 | 552 | ||
553 | if (file_new) | ||
554 | perf_header__write(header, output); | ||
555 | |||
556 | if (!system_wide) { | ||
557 | pid_synthesize_comm_event(pid, 0); | ||
558 | pid_synthesize_mmap_samples(pid); | ||
559 | } else | ||
560 | synthesize_all(); | ||
561 | |||
519 | if (target_pid == -1 && argc) { | 562 | if (target_pid == -1 && argc) { |
520 | pid = fork(); | 563 | pid = fork(); |
521 | if (pid < 0) | 564 | if (pid < 0) |
@@ -539,10 +582,7 @@ static int __cmd_record(int argc, const char **argv) | |||
539 | } | 582 | } |
540 | } | 583 | } |
541 | 584 | ||
542 | if (system_wide) | 585 | for (;;) { |
543 | synthesize_samples(); | ||
544 | |||
545 | while (!done) { | ||
546 | int hits = samples; | 586 | int hits = samples; |
547 | 587 | ||
548 | for (i = 0; i < nr_cpu; i++) { | 588 | for (i = 0; i < nr_cpu; i++) { |
@@ -550,8 +590,11 @@ static int __cmd_record(int argc, const char **argv) | |||
550 | mmap_read(&mmap_array[i][counter]); | 590 | mmap_read(&mmap_array[i][counter]); |
551 | } | 591 | } |
552 | 592 | ||
553 | if (hits == samples) | 593 | if (hits == samples) { |
594 | if (done) | ||
595 | break; | ||
554 | ret = poll(event_array, nr_poll, 100); | 596 | ret = poll(event_array, nr_poll, 100); |
597 | } | ||
555 | } | 598 | } |
556 | 599 | ||
557 | /* | 600 | /* |
@@ -600,6 +643,10 @@ static const struct option options[] = { | |||
600 | "do call-graph (stack chain/backtrace) recording"), | 643 | "do call-graph (stack chain/backtrace) recording"), |
601 | OPT_BOOLEAN('v', "verbose", &verbose, | 644 | OPT_BOOLEAN('v', "verbose", &verbose, |
602 | "be more verbose (show counter open errors, etc)"), | 645 | "be more verbose (show counter open errors, etc)"), |
646 | OPT_BOOLEAN('s', "stat", &inherit_stat, | ||
647 | "per thread counts"), | ||
648 | OPT_BOOLEAN('n', "no-samples", &no_samples, | ||
649 | "don't sample"), | ||
603 | OPT_END() | 650 | OPT_END() |
604 | }; | 651 | }; |
605 | 652 | ||
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 5eb5566f0c95..135b7837e6bf 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c | |||
@@ -15,8 +15,11 @@ | |||
15 | #include "util/rbtree.h" | 15 | #include "util/rbtree.h" |
16 | #include "util/symbol.h" | 16 | #include "util/symbol.h" |
17 | #include "util/string.h" | 17 | #include "util/string.h" |
18 | #include "util/callchain.h" | ||
19 | #include "util/strlist.h" | ||
18 | 20 | ||
19 | #include "perf.h" | 21 | #include "perf.h" |
22 | #include "util/header.h" | ||
20 | 23 | ||
21 | #include "util/parse-options.h" | 24 | #include "util/parse-options.h" |
22 | #include "util/parse-events.h" | 25 | #include "util/parse-events.h" |
@@ -30,6 +33,8 @@ static char *vmlinux = NULL; | |||
30 | 33 | ||
31 | static char default_sort_order[] = "comm,dso"; | 34 | static char default_sort_order[] = "comm,dso"; |
32 | static char *sort_order = default_sort_order; | 35 | static char *sort_order = default_sort_order; |
36 | static char *dso_list_str, *comm_list_str, *sym_list_str; | ||
37 | static struct strlist *dso_list, *comm_list, *sym_list; | ||
33 | 38 | ||
34 | static int input; | 39 | static int input; |
35 | static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; | 40 | static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; |
@@ -51,6 +56,9 @@ static char *parent_pattern = default_parent_pattern; | |||
51 | static regex_t parent_regex; | 56 | static regex_t parent_regex; |
52 | 57 | ||
53 | static int exclude_other = 1; | 58 | static int exclude_other = 1; |
59 | static int callchain; | ||
60 | |||
61 | static u64 sample_type; | ||
54 | 62 | ||
55 | struct ip_event { | 63 | struct ip_event { |
56 | struct perf_event_header header; | 64 | struct perf_event_header header; |
@@ -59,11 +67,6 @@ struct ip_event { | |||
59 | unsigned char __more_data[]; | 67 | unsigned char __more_data[]; |
60 | }; | 68 | }; |
61 | 69 | ||
62 | struct ip_callchain { | ||
63 | u64 nr; | ||
64 | u64 ips[0]; | ||
65 | }; | ||
66 | |||
67 | struct mmap_event { | 70 | struct mmap_event { |
68 | struct perf_event_header header; | 71 | struct perf_event_header header; |
69 | u32 pid, tid; | 72 | u32 pid, tid; |
@@ -97,6 +100,13 @@ struct lost_event { | |||
97 | u64 lost; | 100 | u64 lost; |
98 | }; | 101 | }; |
99 | 102 | ||
103 | struct read_event { | ||
104 | struct perf_event_header header; | ||
105 | u32 pid,tid; | ||
106 | u64 value; | ||
107 | u64 format[3]; | ||
108 | }; | ||
109 | |||
100 | typedef union event_union { | 110 | typedef union event_union { |
101 | struct perf_event_header header; | 111 | struct perf_event_header header; |
102 | struct ip_event ip; | 112 | struct ip_event ip; |
@@ -105,6 +115,7 @@ typedef union event_union { | |||
105 | struct fork_event fork; | 115 | struct fork_event fork; |
106 | struct period_event period; | 116 | struct period_event period; |
107 | struct lost_event lost; | 117 | struct lost_event lost; |
118 | struct read_event read; | ||
108 | } event_t; | 119 | } event_t; |
109 | 120 | ||
110 | static LIST_HEAD(dsos); | 121 | static LIST_HEAD(dsos); |
@@ -229,7 +240,7 @@ static u64 vdso__map_ip(struct map *map, u64 ip) | |||
229 | 240 | ||
230 | static inline int is_anon_memory(const char *filename) | 241 | static inline int is_anon_memory(const char *filename) |
231 | { | 242 | { |
232 | return strcmp(filename, "//anon") == 0; | 243 | return strcmp(filename, "//anon") == 0; |
233 | } | 244 | } |
234 | 245 | ||
235 | static struct map *map__new(struct mmap_event *event) | 246 | static struct map *map__new(struct mmap_event *event) |
@@ -400,9 +411,27 @@ static void thread__insert_map(struct thread *self, struct map *map) | |||
400 | 411 | ||
401 | list_for_each_entry_safe(pos, tmp, &self->maps, node) { | 412 | list_for_each_entry_safe(pos, tmp, &self->maps, node) { |
402 | if (map__overlap(pos, map)) { | 413 | if (map__overlap(pos, map)) { |
403 | list_del_init(&pos->node); | 414 | if (verbose >= 2) { |
404 | /* XXX leaks dsos */ | 415 | printf("overlapping maps:\n"); |
405 | free(pos); | 416 | map__fprintf(map, stdout); |
417 | map__fprintf(pos, stdout); | ||
418 | } | ||
419 | |||
420 | if (map->start <= pos->start && map->end > pos->start) | ||
421 | pos->start = map->end; | ||
422 | |||
423 | if (map->end >= pos->end && map->start < pos->end) | ||
424 | pos->end = map->start; | ||
425 | |||
426 | if (verbose >= 2) { | ||
427 | printf("after collision:\n"); | ||
428 | map__fprintf(pos, stdout); | ||
429 | } | ||
430 | |||
431 | if (pos->start >= pos->end) { | ||
432 | list_del_init(&pos->node); | ||
433 | free(pos); | ||
434 | } | ||
406 | } | 435 | } |
407 | } | 436 | } |
408 | 437 | ||
@@ -464,17 +493,19 @@ static size_t threads__fprintf(FILE *fp) | |||
464 | static struct rb_root hist; | 493 | static struct rb_root hist; |
465 | 494 | ||
466 | struct hist_entry { | 495 | struct hist_entry { |
467 | struct rb_node rb_node; | 496 | struct rb_node rb_node; |
468 | 497 | ||
469 | struct thread *thread; | 498 | struct thread *thread; |
470 | struct map *map; | 499 | struct map *map; |
471 | struct dso *dso; | 500 | struct dso *dso; |
472 | struct symbol *sym; | 501 | struct symbol *sym; |
473 | struct symbol *parent; | 502 | struct symbol *parent; |
474 | u64 ip; | 503 | u64 ip; |
475 | char level; | 504 | char level; |
476 | 505 | struct callchain_node callchain; | |
477 | u64 count; | 506 | struct rb_root sorted_chain; |
507 | |||
508 | u64 count; | ||
478 | }; | 509 | }; |
479 | 510 | ||
480 | /* | 511 | /* |
@@ -745,6 +776,48 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) | |||
745 | } | 776 | } |
746 | 777 | ||
747 | static size_t | 778 | static size_t |
779 | callchain__fprintf(FILE *fp, struct callchain_node *self, u64 total_samples) | ||
780 | { | ||
781 | struct callchain_list *chain; | ||
782 | size_t ret = 0; | ||
783 | |||
784 | if (!self) | ||
785 | return 0; | ||
786 | |||
787 | ret += callchain__fprintf(fp, self->parent, total_samples); | ||
788 | |||
789 | |||
790 | list_for_each_entry(chain, &self->val, list) | ||
791 | ret += fprintf(fp, " %p\n", (void *)chain->ip); | ||
792 | |||
793 | return ret; | ||
794 | } | ||
795 | |||
796 | static size_t | ||
797 | hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, | ||
798 | u64 total_samples) | ||
799 | { | ||
800 | struct rb_node *rb_node; | ||
801 | struct callchain_node *chain; | ||
802 | size_t ret = 0; | ||
803 | |||
804 | rb_node = rb_first(&self->sorted_chain); | ||
805 | while (rb_node) { | ||
806 | double percent; | ||
807 | |||
808 | chain = rb_entry(rb_node, struct callchain_node, rb_node); | ||
809 | percent = chain->hit * 100.0 / total_samples; | ||
810 | ret += fprintf(fp, " %6.2f%%\n", percent); | ||
811 | ret += callchain__fprintf(fp, chain, total_samples); | ||
812 | ret += fprintf(fp, "\n"); | ||
813 | rb_node = rb_next(rb_node); | ||
814 | } | ||
815 | |||
816 | return ret; | ||
817 | } | ||
818 | |||
819 | |||
820 | static size_t | ||
748 | hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples) | 821 | hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples) |
749 | { | 822 | { |
750 | struct sort_entry *se; | 823 | struct sort_entry *se; |
@@ -784,6 +857,9 @@ hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples) | |||
784 | 857 | ||
785 | ret += fprintf(fp, "\n"); | 858 | ret += fprintf(fp, "\n"); |
786 | 859 | ||
860 | if (callchain) | ||
861 | hist_entry_callchain__fprintf(fp, self, total_samples); | ||
862 | |||
787 | return ret; | 863 | return ret; |
788 | } | 864 | } |
789 | 865 | ||
@@ -797,7 +873,7 @@ resolve_symbol(struct thread *thread, struct map **mapp, | |||
797 | { | 873 | { |
798 | struct dso *dso = dsop ? *dsop : NULL; | 874 | struct dso *dso = dsop ? *dsop : NULL; |
799 | struct map *map = mapp ? *mapp : NULL; | 875 | struct map *map = mapp ? *mapp : NULL; |
800 | uint64_t ip = *ipp; | 876 | u64 ip = *ipp; |
801 | 877 | ||
802 | if (!thread) | 878 | if (!thread) |
803 | return NULL; | 879 | return NULL; |
@@ -814,7 +890,6 @@ resolve_symbol(struct thread *thread, struct map **mapp, | |||
814 | *mapp = map; | 890 | *mapp = map; |
815 | got_map: | 891 | got_map: |
816 | ip = map->map_ip(map, ip); | 892 | ip = map->map_ip(map, ip); |
817 | *ipp = ip; | ||
818 | 893 | ||
819 | dso = map->dso; | 894 | dso = map->dso; |
820 | } else { | 895 | } else { |
@@ -828,6 +903,8 @@ got_map: | |||
828 | dso = kernel_dso; | 903 | dso = kernel_dso; |
829 | } | 904 | } |
830 | dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>"); | 905 | dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>"); |
906 | dprintf(" ...... map: %Lx -> %Lx\n", *ipp, ip); | ||
907 | *ipp = ip; | ||
831 | 908 | ||
832 | if (dsop) | 909 | if (dsop) |
833 | *dsop = dso; | 910 | *dsop = dso; |
@@ -867,6 +944,7 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, | |||
867 | .level = level, | 944 | .level = level, |
868 | .count = count, | 945 | .count = count, |
869 | .parent = NULL, | 946 | .parent = NULL, |
947 | .sorted_chain = RB_ROOT | ||
870 | }; | 948 | }; |
871 | int cmp; | 949 | int cmp; |
872 | 950 | ||
@@ -909,6 +987,8 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, | |||
909 | 987 | ||
910 | if (!cmp) { | 988 | if (!cmp) { |
911 | he->count += count; | 989 | he->count += count; |
990 | if (callchain) | ||
991 | append_chain(&he->callchain, chain); | ||
912 | return 0; | 992 | return 0; |
913 | } | 993 | } |
914 | 994 | ||
@@ -922,6 +1002,10 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, | |||
922 | if (!he) | 1002 | if (!he) |
923 | return -ENOMEM; | 1003 | return -ENOMEM; |
924 | *he = entry; | 1004 | *he = entry; |
1005 | if (callchain) { | ||
1006 | callchain_init(&he->callchain); | ||
1007 | append_chain(&he->callchain, chain); | ||
1008 | } | ||
925 | rb_link_node(&he->rb_node, parent, p); | 1009 | rb_link_node(&he->rb_node, parent, p); |
926 | rb_insert_color(&he->rb_node, &hist); | 1010 | rb_insert_color(&he->rb_node, &hist); |
927 | 1011 | ||
@@ -998,6 +1082,9 @@ static void output__insert_entry(struct hist_entry *he) | |||
998 | struct rb_node *parent = NULL; | 1082 | struct rb_node *parent = NULL; |
999 | struct hist_entry *iter; | 1083 | struct hist_entry *iter; |
1000 | 1084 | ||
1085 | if (callchain) | ||
1086 | sort_chain_to_rbtree(&he->sorted_chain, &he->callchain); | ||
1087 | |||
1001 | while (*p != NULL) { | 1088 | while (*p != NULL) { |
1002 | parent = *p; | 1089 | parent = *p; |
1003 | iter = rb_entry(parent, struct hist_entry, rb_node); | 1090 | iter = rb_entry(parent, struct hist_entry, rb_node); |
@@ -1115,7 +1202,7 @@ static int validate_chain(struct ip_callchain *chain, event_t *event) | |||
1115 | } | 1202 | } |
1116 | 1203 | ||
1117 | static int | 1204 | static int |
1118 | process_overflow_event(event_t *event, unsigned long offset, unsigned long head) | 1205 | process_sample_event(event_t *event, unsigned long offset, unsigned long head) |
1119 | { | 1206 | { |
1120 | char level; | 1207 | char level; |
1121 | int show = 0; | 1208 | int show = 0; |
@@ -1127,12 +1214,12 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head) | |||
1127 | void *more_data = event->ip.__more_data; | 1214 | void *more_data = event->ip.__more_data; |
1128 | struct ip_callchain *chain = NULL; | 1215 | struct ip_callchain *chain = NULL; |
1129 | 1216 | ||
1130 | if (event->header.type & PERF_SAMPLE_PERIOD) { | 1217 | if (sample_type & PERF_SAMPLE_PERIOD) { |
1131 | period = *(u64 *)more_data; | 1218 | period = *(u64 *)more_data; |
1132 | more_data += sizeof(u64); | 1219 | more_data += sizeof(u64); |
1133 | } | 1220 | } |
1134 | 1221 | ||
1135 | dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p period: %Ld\n", | 1222 | dprintf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d: %p period: %Ld\n", |
1136 | (void *)(offset + head), | 1223 | (void *)(offset + head), |
1137 | (void *)(long)(event->header.size), | 1224 | (void *)(long)(event->header.size), |
1138 | event->header.misc, | 1225 | event->header.misc, |
@@ -1140,7 +1227,7 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head) | |||
1140 | (void *)(long)ip, | 1227 | (void *)(long)ip, |
1141 | (long long)period); | 1228 | (long long)period); |
1142 | 1229 | ||
1143 | if (event->header.type & PERF_SAMPLE_CALLCHAIN) { | 1230 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
1144 | int i; | 1231 | int i; |
1145 | 1232 | ||
1146 | chain = (void *)more_data; | 1233 | chain = (void *)more_data; |
@@ -1166,6 +1253,9 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head) | |||
1166 | return -1; | 1253 | return -1; |
1167 | } | 1254 | } |
1168 | 1255 | ||
1256 | if (comm_list && !strlist__has_entry(comm_list, thread->comm)) | ||
1257 | return 0; | ||
1258 | |||
1169 | if (event->header.misc & PERF_EVENT_MISC_KERNEL) { | 1259 | if (event->header.misc & PERF_EVENT_MISC_KERNEL) { |
1170 | show = SHOW_KERNEL; | 1260 | show = SHOW_KERNEL; |
1171 | level = 'k'; | 1261 | level = 'k'; |
@@ -1188,6 +1278,12 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head) | |||
1188 | if (show & show_mask) { | 1278 | if (show & show_mask) { |
1189 | struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip); | 1279 | struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip); |
1190 | 1280 | ||
1281 | if (dso_list && dso && dso->name && !strlist__has_entry(dso_list, dso->name)) | ||
1282 | return 0; | ||
1283 | |||
1284 | if (sym_list && sym && !strlist__has_entry(sym_list, sym->name)) | ||
1285 | return 0; | ||
1286 | |||
1191 | if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) { | 1287 | if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) { |
1192 | eprintf("problem incrementing symbol count, skipping event\n"); | 1288 | eprintf("problem incrementing symbol count, skipping event\n"); |
1193 | return -1; | 1289 | return -1; |
@@ -1328,14 +1424,27 @@ static void trace_event(event_t *event) | |||
1328 | } | 1424 | } |
1329 | 1425 | ||
1330 | static int | 1426 | static int |
1427 | process_read_event(event_t *event, unsigned long offset, unsigned long head) | ||
1428 | { | ||
1429 | dprintf("%p [%p]: PERF_EVENT_READ: %d %d %Lu\n", | ||
1430 | (void *)(offset + head), | ||
1431 | (void *)(long)(event->header.size), | ||
1432 | event->read.pid, | ||
1433 | event->read.tid, | ||
1434 | event->read.value); | ||
1435 | |||
1436 | return 0; | ||
1437 | } | ||
1438 | |||
1439 | static int | ||
1331 | process_event(event_t *event, unsigned long offset, unsigned long head) | 1440 | process_event(event_t *event, unsigned long offset, unsigned long head) |
1332 | { | 1441 | { |
1333 | trace_event(event); | 1442 | trace_event(event); |
1334 | 1443 | ||
1335 | if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) | ||
1336 | return process_overflow_event(event, offset, head); | ||
1337 | |||
1338 | switch (event->header.type) { | 1444 | switch (event->header.type) { |
1445 | case PERF_EVENT_SAMPLE: | ||
1446 | return process_sample_event(event, offset, head); | ||
1447 | |||
1339 | case PERF_EVENT_MMAP: | 1448 | case PERF_EVENT_MMAP: |
1340 | return process_mmap_event(event, offset, head); | 1449 | return process_mmap_event(event, offset, head); |
1341 | 1450 | ||
@@ -1351,6 +1460,9 @@ process_event(event_t *event, unsigned long offset, unsigned long head) | |||
1351 | case PERF_EVENT_LOST: | 1460 | case PERF_EVENT_LOST: |
1352 | return process_lost_event(event, offset, head); | 1461 | return process_lost_event(event, offset, head); |
1353 | 1462 | ||
1463 | case PERF_EVENT_READ: | ||
1464 | return process_read_event(event, offset, head); | ||
1465 | |||
1354 | /* | 1466 | /* |
1355 | * We dont process them right now but they are fine: | 1467 | * We dont process them right now but they are fine: |
1356 | */ | 1468 | */ |
@@ -1366,13 +1478,30 @@ process_event(event_t *event, unsigned long offset, unsigned long head) | |||
1366 | return 0; | 1478 | return 0; |
1367 | } | 1479 | } |
1368 | 1480 | ||
1369 | static struct perf_file_header file_header; | 1481 | static struct perf_header *header; |
1482 | |||
1483 | static u64 perf_header__sample_type(void) | ||
1484 | { | ||
1485 | u64 sample_type = 0; | ||
1486 | int i; | ||
1487 | |||
1488 | for (i = 0; i < header->attrs; i++) { | ||
1489 | struct perf_header_attr *attr = header->attr[i]; | ||
1490 | |||
1491 | if (!sample_type) | ||
1492 | sample_type = attr->attr.sample_type; | ||
1493 | else if (sample_type != attr->attr.sample_type) | ||
1494 | die("non matching sample_type"); | ||
1495 | } | ||
1496 | |||
1497 | return sample_type; | ||
1498 | } | ||
1370 | 1499 | ||
1371 | static int __cmd_report(void) | 1500 | static int __cmd_report(void) |
1372 | { | 1501 | { |
1373 | int ret, rc = EXIT_FAILURE; | 1502 | int ret, rc = EXIT_FAILURE; |
1374 | unsigned long offset = 0; | 1503 | unsigned long offset = 0; |
1375 | unsigned long head = sizeof(file_header); | 1504 | unsigned long head, shift; |
1376 | struct stat stat; | 1505 | struct stat stat; |
1377 | event_t *event; | 1506 | event_t *event; |
1378 | uint32_t size; | 1507 | uint32_t size; |
@@ -1400,13 +1529,12 @@ static int __cmd_report(void) | |||
1400 | exit(0); | 1529 | exit(0); |
1401 | } | 1530 | } |
1402 | 1531 | ||
1403 | if (read(input, &file_header, sizeof(file_header)) == -1) { | 1532 | header = perf_header__read(input); |
1404 | perror("failed to read file headers"); | 1533 | head = header->data_offset; |
1405 | exit(-1); | ||
1406 | } | ||
1407 | 1534 | ||
1408 | if (sort__has_parent && | 1535 | sample_type = perf_header__sample_type(); |
1409 | !(file_header.sample_type & PERF_SAMPLE_CALLCHAIN)) { | 1536 | |
1537 | if (sort__has_parent && !(sample_type & PERF_SAMPLE_CALLCHAIN)) { | ||
1410 | fprintf(stderr, "selected --sort parent, but no callchain data\n"); | 1538 | fprintf(stderr, "selected --sort parent, but no callchain data\n"); |
1411 | exit(-1); | 1539 | exit(-1); |
1412 | } | 1540 | } |
@@ -1426,6 +1554,11 @@ static int __cmd_report(void) | |||
1426 | cwd = NULL; | 1554 | cwd = NULL; |
1427 | cwdlen = 0; | 1555 | cwdlen = 0; |
1428 | } | 1556 | } |
1557 | |||
1558 | shift = page_size * (head / page_size); | ||
1559 | offset += shift; | ||
1560 | head -= shift; | ||
1561 | |||
1429 | remap: | 1562 | remap: |
1430 | buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, | 1563 | buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, |
1431 | MAP_SHARED, input, offset); | 1564 | MAP_SHARED, input, offset); |
@@ -1442,9 +1575,10 @@ more: | |||
1442 | size = 8; | 1575 | size = 8; |
1443 | 1576 | ||
1444 | if (head + event->header.size >= page_size * mmap_window) { | 1577 | if (head + event->header.size >= page_size * mmap_window) { |
1445 | unsigned long shift = page_size * (head / page_size); | ||
1446 | int ret; | 1578 | int ret; |
1447 | 1579 | ||
1580 | shift = page_size * (head / page_size); | ||
1581 | |||
1448 | ret = munmap(buf, page_size * mmap_window); | 1582 | ret = munmap(buf, page_size * mmap_window); |
1449 | assert(ret == 0); | 1583 | assert(ret == 0); |
1450 | 1584 | ||
@@ -1482,7 +1616,7 @@ more: | |||
1482 | 1616 | ||
1483 | head += size; | 1617 | head += size; |
1484 | 1618 | ||
1485 | if (offset + head >= sizeof(file_header) + file_header.data_size) | 1619 | if (offset + head >= header->data_offset + header->data_size) |
1486 | goto done; | 1620 | goto done; |
1487 | 1621 | ||
1488 | if (offset + head < stat.st_size) | 1622 | if (offset + head < stat.st_size) |
@@ -1536,6 +1670,13 @@ static const struct option options[] = { | |||
1536 | "regex filter to identify parent, see: '--sort parent'"), | 1670 | "regex filter to identify parent, see: '--sort parent'"), |
1537 | OPT_BOOLEAN('x', "exclude-other", &exclude_other, | 1671 | OPT_BOOLEAN('x', "exclude-other", &exclude_other, |
1538 | "Only display entries with parent-match"), | 1672 | "Only display entries with parent-match"), |
1673 | OPT_BOOLEAN('c', "callchain", &callchain, "Display callchains"), | ||
1674 | OPT_STRING('d', "dsos", &dso_list_str, "dso[,dso...]", | ||
1675 | "only consider symbols in these dsos"), | ||
1676 | OPT_STRING('C', "comms", &comm_list_str, "comm[,comm...]", | ||
1677 | "only consider symbols in these comms"), | ||
1678 | OPT_STRING('S', "symbols", &sym_list_str, "symbol[,symbol...]", | ||
1679 | "only consider these symbols"), | ||
1539 | OPT_END() | 1680 | OPT_END() |
1540 | }; | 1681 | }; |
1541 | 1682 | ||
@@ -1554,6 +1695,19 @@ static void setup_sorting(void) | |||
1554 | free(str); | 1695 | free(str); |
1555 | } | 1696 | } |
1556 | 1697 | ||
1698 | static void setup_list(struct strlist **list, const char *list_str, | ||
1699 | const char *list_name) | ||
1700 | { | ||
1701 | if (list_str) { | ||
1702 | *list = strlist__new(true, list_str); | ||
1703 | if (!*list) { | ||
1704 | fprintf(stderr, "problems parsing %s list\n", | ||
1705 | list_name); | ||
1706 | exit(129); | ||
1707 | } | ||
1708 | } | ||
1709 | } | ||
1710 | |||
1557 | int cmd_report(int argc, const char **argv, const char *prefix) | 1711 | int cmd_report(int argc, const char **argv, const char *prefix) |
1558 | { | 1712 | { |
1559 | symbol__init(); | 1713 | symbol__init(); |
@@ -1575,6 +1729,10 @@ int cmd_report(int argc, const char **argv, const char *prefix) | |||
1575 | if (argc) | 1729 | if (argc) |
1576 | usage_with_options(report_usage, options); | 1730 | usage_with_options(report_usage, options); |
1577 | 1731 | ||
1732 | setup_list(&dso_list, dso_list_str, "dso"); | ||
1733 | setup_list(&comm_list, comm_list_str, "comm"); | ||
1734 | setup_list(&sym_list, sym_list_str, "symbol"); | ||
1735 | |||
1578 | setup_pager(); | 1736 | setup_pager(); |
1579 | 1737 | ||
1580 | return __cmd_report(); | 1738 | return __cmd_report(); |
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 6d3eeac1ea25..2e03524a1de0 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -32,6 +32,7 @@ | |||
32 | * Wu Fengguang <fengguang.wu@intel.com> | 32 | * Wu Fengguang <fengguang.wu@intel.com> |
33 | * Mike Galbraith <efault@gmx.de> | 33 | * Mike Galbraith <efault@gmx.de> |
34 | * Paul Mackerras <paulus@samba.org> | 34 | * Paul Mackerras <paulus@samba.org> |
35 | * Jaswinder Singh Rajput <jaswinder@kernel.org> | ||
35 | * | 36 | * |
36 | * Released under the GPL v2. (and only v2, not any later version) | 37 | * Released under the GPL v2. (and only v2, not any later version) |
37 | */ | 38 | */ |
@@ -45,7 +46,7 @@ | |||
45 | #include <sys/prctl.h> | 46 | #include <sys/prctl.h> |
46 | #include <math.h> | 47 | #include <math.h> |
47 | 48 | ||
48 | static struct perf_counter_attr default_attrs[MAX_COUNTERS] = { | 49 | static struct perf_counter_attr default_attrs[] = { |
49 | 50 | ||
50 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, | 51 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, |
51 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES}, | 52 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES}, |
@@ -59,42 +60,28 @@ static struct perf_counter_attr default_attrs[MAX_COUNTERS] = { | |||
59 | 60 | ||
60 | }; | 61 | }; |
61 | 62 | ||
63 | #define MAX_RUN 100 | ||
64 | |||
62 | static int system_wide = 0; | 65 | static int system_wide = 0; |
63 | static int inherit = 1; | ||
64 | static int verbose = 0; | 66 | static int verbose = 0; |
65 | |||
66 | static int fd[MAX_NR_CPUS][MAX_COUNTERS]; | ||
67 | |||
68 | static int target_pid = -1; | ||
69 | static int nr_cpus = 0; | 67 | static int nr_cpus = 0; |
70 | static unsigned int page_size; | 68 | static int run_idx = 0; |
71 | 69 | ||
70 | static int run_count = 1; | ||
71 | static int inherit = 1; | ||
72 | static int scale = 1; | 72 | static int scale = 1; |
73 | static int target_pid = -1; | ||
74 | static int null_run = 0; | ||
73 | 75 | ||
74 | static const unsigned int default_count[] = { | 76 | static int fd[MAX_NR_CPUS][MAX_COUNTERS]; |
75 | 1000000, | ||
76 | 1000000, | ||
77 | 10000, | ||
78 | 10000, | ||
79 | 1000000, | ||
80 | 10000, | ||
81 | }; | ||
82 | |||
83 | #define MAX_RUN 100 | ||
84 | |||
85 | static int run_count = 1; | ||
86 | static int run_idx = 0; | ||
87 | |||
88 | static u64 event_res[MAX_RUN][MAX_COUNTERS][3]; | ||
89 | static u64 event_scaled[MAX_RUN][MAX_COUNTERS]; | ||
90 | |||
91 | //static u64 event_hist[MAX_RUN][MAX_COUNTERS][3]; | ||
92 | |||
93 | 77 | ||
94 | static u64 runtime_nsecs[MAX_RUN]; | 78 | static u64 runtime_nsecs[MAX_RUN]; |
95 | static u64 walltime_nsecs[MAX_RUN]; | 79 | static u64 walltime_nsecs[MAX_RUN]; |
96 | static u64 runtime_cycles[MAX_RUN]; | 80 | static u64 runtime_cycles[MAX_RUN]; |
97 | 81 | ||
82 | static u64 event_res[MAX_RUN][MAX_COUNTERS][3]; | ||
83 | static u64 event_scaled[MAX_RUN][MAX_COUNTERS]; | ||
84 | |||
98 | static u64 event_res_avg[MAX_COUNTERS][3]; | 85 | static u64 event_res_avg[MAX_COUNTERS][3]; |
99 | static u64 event_res_noise[MAX_COUNTERS][3]; | 86 | static u64 event_res_noise[MAX_COUNTERS][3]; |
100 | 87 | ||
@@ -109,7 +96,10 @@ static u64 walltime_nsecs_noise; | |||
109 | static u64 runtime_cycles_avg; | 96 | static u64 runtime_cycles_avg; |
110 | static u64 runtime_cycles_noise; | 97 | static u64 runtime_cycles_noise; |
111 | 98 | ||
112 | static void create_perf_stat_counter(int counter) | 99 | #define ERR_PERF_OPEN \ |
100 | "Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n" | ||
101 | |||
102 | static void create_perf_stat_counter(int counter, int pid) | ||
113 | { | 103 | { |
114 | struct perf_counter_attr *attr = attrs + counter; | 104 | struct perf_counter_attr *attr = attrs + counter; |
115 | 105 | ||
@@ -119,20 +109,21 @@ static void create_perf_stat_counter(int counter) | |||
119 | 109 | ||
120 | if (system_wide) { | 110 | if (system_wide) { |
121 | int cpu; | 111 | int cpu; |
122 | for (cpu = 0; cpu < nr_cpus; cpu ++) { | 112 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
123 | fd[cpu][counter] = sys_perf_counter_open(attr, -1, cpu, -1, 0); | 113 | fd[cpu][counter] = sys_perf_counter_open(attr, -1, cpu, -1, 0); |
124 | if (fd[cpu][counter] < 0 && verbose) { | 114 | if (fd[cpu][counter] < 0 && verbose) |
125 | printf("Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n", counter, fd[cpu][counter], strerror(errno)); | 115 | fprintf(stderr, ERR_PERF_OPEN, counter, |
126 | } | 116 | fd[cpu][counter], strerror(errno)); |
127 | } | 117 | } |
128 | } else { | 118 | } else { |
129 | attr->inherit = inherit; | 119 | attr->inherit = inherit; |
130 | attr->disabled = 1; | 120 | attr->disabled = 1; |
131 | 121 | attr->enable_on_exec = 1; | |
132 | fd[0][counter] = sys_perf_counter_open(attr, 0, -1, -1, 0); | 122 | |
133 | if (fd[0][counter] < 0 && verbose) { | 123 | fd[0][counter] = sys_perf_counter_open(attr, pid, -1, -1, 0); |
134 | printf("Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n", counter, fd[0][counter], strerror(errno)); | 124 | if (fd[0][counter] < 0 && verbose) |
135 | } | 125 | fprintf(stderr, ERR_PERF_OPEN, counter, |
126 | fd[0][counter], strerror(errno)); | ||
136 | } | 127 | } |
137 | } | 128 | } |
138 | 129 | ||
@@ -168,7 +159,7 @@ static void read_counter(int counter) | |||
168 | count[0] = count[1] = count[2] = 0; | 159 | count[0] = count[1] = count[2] = 0; |
169 | 160 | ||
170 | nv = scale ? 3 : 1; | 161 | nv = scale ? 3 : 1; |
171 | for (cpu = 0; cpu < nr_cpus; cpu ++) { | 162 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
172 | if (fd[cpu][counter] < 0) | 163 | if (fd[cpu][counter] < 0) |
173 | continue; | 164 | continue; |
174 | 165 | ||
@@ -215,32 +206,67 @@ static int run_perf_stat(int argc, const char **argv) | |||
215 | int status = 0; | 206 | int status = 0; |
216 | int counter; | 207 | int counter; |
217 | int pid; | 208 | int pid; |
209 | int child_ready_pipe[2], go_pipe[2]; | ||
210 | char buf; | ||
218 | 211 | ||
219 | if (!system_wide) | 212 | if (!system_wide) |
220 | nr_cpus = 1; | 213 | nr_cpus = 1; |
221 | 214 | ||
222 | for (counter = 0; counter < nr_counters; counter++) | 215 | if (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0) { |
223 | create_perf_stat_counter(counter); | 216 | perror("failed to create pipes"); |
224 | 217 | exit(1); | |
225 | /* | 218 | } |
226 | * Enable counters and exec the command: | ||
227 | */ | ||
228 | t0 = rdclock(); | ||
229 | prctl(PR_TASK_PERF_COUNTERS_ENABLE); | ||
230 | 219 | ||
231 | if ((pid = fork()) < 0) | 220 | if ((pid = fork()) < 0) |
232 | perror("failed to fork"); | 221 | perror("failed to fork"); |
233 | 222 | ||
234 | if (!pid) { | 223 | if (!pid) { |
235 | if (execvp(argv[0], (char **)argv)) { | 224 | close(child_ready_pipe[0]); |
236 | perror(argv[0]); | 225 | close(go_pipe[1]); |
237 | exit(-1); | 226 | fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); |
238 | } | 227 | |
228 | /* | ||
229 | * Do a dummy execvp to get the PLT entry resolved, | ||
230 | * so we avoid the resolver overhead on the real | ||
231 | * execvp call. | ||
232 | */ | ||
233 | execvp("", (char **)argv); | ||
234 | |||
235 | /* | ||
236 | * Tell the parent we're ready to go | ||
237 | */ | ||
238 | close(child_ready_pipe[1]); | ||
239 | |||
240 | /* | ||
241 | * Wait until the parent tells us to go. | ||
242 | */ | ||
243 | read(go_pipe[0], &buf, 1); | ||
244 | |||
245 | execvp(argv[0], (char **)argv); | ||
246 | |||
247 | perror(argv[0]); | ||
248 | exit(-1); | ||
239 | } | 249 | } |
240 | 250 | ||
251 | /* | ||
252 | * Wait for the child to be ready to exec. | ||
253 | */ | ||
254 | close(child_ready_pipe[1]); | ||
255 | close(go_pipe[0]); | ||
256 | read(child_ready_pipe[0], &buf, 1); | ||
257 | close(child_ready_pipe[0]); | ||
258 | |||
259 | for (counter = 0; counter < nr_counters; counter++) | ||
260 | create_perf_stat_counter(counter, pid); | ||
261 | |||
262 | /* | ||
263 | * Enable counters and exec the command: | ||
264 | */ | ||
265 | t0 = rdclock(); | ||
266 | |||
267 | close(go_pipe[1]); | ||
241 | wait(&status); | 268 | wait(&status); |
242 | 269 | ||
243 | prctl(PR_TASK_PERF_COUNTERS_DISABLE); | ||
244 | t1 = rdclock(); | 270 | t1 = rdclock(); |
245 | 271 | ||
246 | walltime_nsecs[run_idx] = t1 - t0; | 272 | walltime_nsecs[run_idx] = t1 - t0; |
@@ -262,7 +288,7 @@ static void nsec_printout(int counter, u64 *count, u64 *noise) | |||
262 | { | 288 | { |
263 | double msecs = (double)count[0] / 1000000; | 289 | double msecs = (double)count[0] / 1000000; |
264 | 290 | ||
265 | fprintf(stderr, " %14.6f %-20s", msecs, event_name(counter)); | 291 | fprintf(stderr, " %14.6f %-24s", msecs, event_name(counter)); |
266 | 292 | ||
267 | if (attrs[counter].type == PERF_TYPE_SOFTWARE && | 293 | if (attrs[counter].type == PERF_TYPE_SOFTWARE && |
268 | attrs[counter].config == PERF_COUNT_SW_TASK_CLOCK) { | 294 | attrs[counter].config == PERF_COUNT_SW_TASK_CLOCK) { |
@@ -276,7 +302,7 @@ static void nsec_printout(int counter, u64 *count, u64 *noise) | |||
276 | 302 | ||
277 | static void abs_printout(int counter, u64 *count, u64 *noise) | 303 | static void abs_printout(int counter, u64 *count, u64 *noise) |
278 | { | 304 | { |
279 | fprintf(stderr, " %14Ld %-20s", count[0], event_name(counter)); | 305 | fprintf(stderr, " %14Ld %-24s", count[0], event_name(counter)); |
280 | 306 | ||
281 | if (runtime_cycles_avg && | 307 | if (runtime_cycles_avg && |
282 | attrs[counter].type == PERF_TYPE_HARDWARE && | 308 | attrs[counter].type == PERF_TYPE_HARDWARE && |
@@ -306,7 +332,7 @@ static void print_counter(int counter) | |||
306 | scaled = event_scaled_avg[counter]; | 332 | scaled = event_scaled_avg[counter]; |
307 | 333 | ||
308 | if (scaled == -1) { | 334 | if (scaled == -1) { |
309 | fprintf(stderr, " %14s %-20s\n", | 335 | fprintf(stderr, " %14s %-24s\n", |
310 | "<not counted>", event_name(counter)); | 336 | "<not counted>", event_name(counter)); |
311 | return; | 337 | return; |
312 | } | 338 | } |
@@ -364,8 +390,11 @@ static void calc_avg(void) | |||
364 | event_res_avg[j]+1, event_res[i][j]+1); | 390 | event_res_avg[j]+1, event_res[i][j]+1); |
365 | update_avg("counter/2", j, | 391 | update_avg("counter/2", j, |
366 | event_res_avg[j]+2, event_res[i][j]+2); | 392 | event_res_avg[j]+2, event_res[i][j]+2); |
367 | update_avg("scaled", j, | 393 | if (event_scaled[i][j] != -1) |
368 | event_scaled_avg + j, event_scaled[i]+j); | 394 | update_avg("scaled", j, |
395 | event_scaled_avg + j, event_scaled[i]+j); | ||
396 | else | ||
397 | event_scaled_avg[j] = -1; | ||
369 | } | 398 | } |
370 | } | 399 | } |
371 | runtime_nsecs_avg /= run_count; | 400 | runtime_nsecs_avg /= run_count; |
@@ -429,11 +458,14 @@ static void print_stat(int argc, const char **argv) | |||
429 | for (counter = 0; counter < nr_counters; counter++) | 458 | for (counter = 0; counter < nr_counters; counter++) |
430 | print_counter(counter); | 459 | print_counter(counter); |
431 | 460 | ||
432 | |||
433 | fprintf(stderr, "\n"); | 461 | fprintf(stderr, "\n"); |
434 | fprintf(stderr, " %14.9f seconds time elapsed.\n", | 462 | fprintf(stderr, " %14.9f seconds time elapsed", |
435 | (double)walltime_nsecs_avg/1e9); | 463 | (double)walltime_nsecs_avg/1e9); |
436 | fprintf(stderr, "\n"); | 464 | if (run_count > 1) { |
465 | fprintf(stderr, " ( +- %7.3f%% )", | ||
466 | 100.0*(double)walltime_nsecs_noise/(double)walltime_nsecs_avg); | ||
467 | } | ||
468 | fprintf(stderr, "\n\n"); | ||
437 | } | 469 | } |
438 | 470 | ||
439 | static volatile int signr = -1; | 471 | static volatile int signr = -1; |
@@ -466,13 +498,15 @@ static const struct option options[] = { | |||
466 | OPT_INTEGER('p', "pid", &target_pid, | 498 | OPT_INTEGER('p', "pid", &target_pid, |
467 | "stat events on existing pid"), | 499 | "stat events on existing pid"), |
468 | OPT_BOOLEAN('a', "all-cpus", &system_wide, | 500 | OPT_BOOLEAN('a', "all-cpus", &system_wide, |
469 | "system-wide collection from all CPUs"), | 501 | "system-wide collection from all CPUs"), |
470 | OPT_BOOLEAN('S', "scale", &scale, | 502 | OPT_BOOLEAN('S', "scale", &scale, |
471 | "scale/normalize counters"), | 503 | "scale/normalize counters"), |
472 | OPT_BOOLEAN('v', "verbose", &verbose, | 504 | OPT_BOOLEAN('v', "verbose", &verbose, |
473 | "be more verbose (show counter open errors, etc)"), | 505 | "be more verbose (show counter open errors, etc)"), |
474 | OPT_INTEGER('r', "repeat", &run_count, | 506 | OPT_INTEGER('r', "repeat", &run_count, |
475 | "repeat command and print average + stddev (max: 100)"), | 507 | "repeat command and print average + stddev (max: 100)"), |
508 | OPT_BOOLEAN('n', "null", &null_run, | ||
509 | "null run - dont start any counters"), | ||
476 | OPT_END() | 510 | OPT_END() |
477 | }; | 511 | }; |
478 | 512 | ||
@@ -480,18 +514,17 @@ int cmd_stat(int argc, const char **argv, const char *prefix) | |||
480 | { | 514 | { |
481 | int status; | 515 | int status; |
482 | 516 | ||
483 | page_size = sysconf(_SC_PAGE_SIZE); | ||
484 | |||
485 | memcpy(attrs, default_attrs, sizeof(attrs)); | ||
486 | |||
487 | argc = parse_options(argc, argv, options, stat_usage, 0); | 517 | argc = parse_options(argc, argv, options, stat_usage, 0); |
488 | if (!argc) | 518 | if (!argc) |
489 | usage_with_options(stat_usage, options); | 519 | usage_with_options(stat_usage, options); |
490 | if (run_count <= 0 || run_count > MAX_RUN) | 520 | if (run_count <= 0 || run_count > MAX_RUN) |
491 | usage_with_options(stat_usage, options); | 521 | usage_with_options(stat_usage, options); |
492 | 522 | ||
493 | if (!nr_counters) | 523 | /* Set attrs and nr_counters if no event is selected and !null_run */ |
494 | nr_counters = 8; | 524 | if (!null_run && !nr_counters) { |
525 | memcpy(attrs, default_attrs, sizeof(default_attrs)); | ||
526 | nr_counters = ARRAY_SIZE(default_attrs); | ||
527 | } | ||
495 | 528 | ||
496 | nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); | 529 | nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); |
497 | assert(nr_cpus <= MAX_NR_CPUS); | 530 | assert(nr_cpus <= MAX_NR_CPUS); |
@@ -511,7 +544,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix) | |||
511 | status = 0; | 544 | status = 0; |
512 | for (run_idx = 0; run_idx < run_count; run_idx++) { | 545 | for (run_idx = 0; run_idx < run_count; run_idx++) { |
513 | if (run_count != 1 && verbose) | 546 | if (run_count != 1 && verbose) |
514 | fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx+1); | 547 | fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1); |
515 | status = run_perf_stat(argc, argv); | 548 | status = run_perf_stat(argc, argv); |
516 | } | 549 | } |
517 | 550 | ||
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 5352b5e352ed..cf0d21f1ae10 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -392,11 +392,11 @@ static void record_ip(u64 ip, int counter) | |||
392 | samples--; | 392 | samples--; |
393 | } | 393 | } |
394 | 394 | ||
395 | static void process_event(u64 ip, int counter) | 395 | static void process_event(u64 ip, int counter, int user) |
396 | { | 396 | { |
397 | samples++; | 397 | samples++; |
398 | 398 | ||
399 | if (ip < min_ip || ip > max_ip) { | 399 | if (user) { |
400 | userspace_samples++; | 400 | userspace_samples++; |
401 | return; | 401 | return; |
402 | } | 402 | } |
@@ -509,9 +509,10 @@ static void mmap_read_counter(struct mmap_data *md) | |||
509 | 509 | ||
510 | old += size; | 510 | old += size; |
511 | 511 | ||
512 | if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) { | 512 | if (event->header.type == PERF_EVENT_SAMPLE) { |
513 | if (event->header.type & PERF_SAMPLE_IP) | 513 | int user = |
514 | process_event(event->ip.ip, md->counter); | 514 | (event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK) == PERF_EVENT_MISC_USER; |
515 | process_event(event->ip.ip, md->counter, user); | ||
515 | } | 516 | } |
516 | } | 517 | } |
517 | 518 | ||
diff --git a/tools/perf/perf.h b/tools/perf/perf.h index ceb68aa51f7f..8f729aedc1a3 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h | |||
@@ -19,13 +19,23 @@ | |||
19 | #define cpu_relax() asm volatile("" ::: "memory"); | 19 | #define cpu_relax() asm volatile("" ::: "memory"); |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #ifdef __sh__ | ||
23 | #include "../../arch/sh/include/asm/unistd.h" | ||
24 | #if defined(__SH4A__) || defined(__SH5__) | ||
25 | # define rmb() asm volatile("synco" ::: "memory") | ||
26 | #else | ||
27 | # define rmb() asm volatile("" ::: "memory") | ||
28 | #endif | ||
29 | #define cpu_relax() asm volatile("" ::: "memory") | ||
30 | #endif | ||
31 | |||
22 | #include <time.h> | 32 | #include <time.h> |
23 | #include <unistd.h> | 33 | #include <unistd.h> |
24 | #include <sys/types.h> | 34 | #include <sys/types.h> |
25 | #include <sys/syscall.h> | 35 | #include <sys/syscall.h> |
26 | 36 | ||
27 | #include "../../include/linux/perf_counter.h" | 37 | #include "../../include/linux/perf_counter.h" |
28 | #include "types.h" | 38 | #include "util/types.h" |
29 | 39 | ||
30 | /* | 40 | /* |
31 | * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all | 41 | * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all |
@@ -72,10 +82,9 @@ sys_perf_counter_open(struct perf_counter_attr *attr, | |||
72 | #define MAX_COUNTERS 256 | 82 | #define MAX_COUNTERS 256 |
73 | #define MAX_NR_CPUS 256 | 83 | #define MAX_NR_CPUS 256 |
74 | 84 | ||
75 | struct perf_file_header { | 85 | struct ip_callchain { |
76 | u64 version; | 86 | u64 nr; |
77 | u64 sample_type; | 87 | u64 ips[0]; |
78 | u64 data_size; | ||
79 | }; | 88 | }; |
80 | 89 | ||
81 | #endif | 90 | #endif |
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c new file mode 100644 index 000000000000..ad3c28578961 --- /dev/null +++ b/tools/perf/util/callchain.c | |||
@@ -0,0 +1,174 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com> | ||
3 | * | ||
4 | * Handle the callchains from the stream in an ad-hoc radix tree and then | ||
5 | * sort them in an rbtree. | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | #include <stdlib.h> | ||
10 | #include <stdio.h> | ||
11 | #include <stdbool.h> | ||
12 | #include <errno.h> | ||
13 | |||
14 | #include "callchain.h" | ||
15 | |||
16 | |||
17 | static void rb_insert_callchain(struct rb_root *root, struct callchain_node *chain) | ||
18 | { | ||
19 | struct rb_node **p = &root->rb_node; | ||
20 | struct rb_node *parent = NULL; | ||
21 | struct callchain_node *rnode; | ||
22 | |||
23 | while (*p) { | ||
24 | parent = *p; | ||
25 | rnode = rb_entry(parent, struct callchain_node, rb_node); | ||
26 | |||
27 | if (rnode->hit < chain->hit) | ||
28 | p = &(*p)->rb_left; | ||
29 | else | ||
30 | p = &(*p)->rb_right; | ||
31 | } | ||
32 | |||
33 | rb_link_node(&chain->rb_node, parent, p); | ||
34 | rb_insert_color(&chain->rb_node, root); | ||
35 | } | ||
36 | |||
37 | /* | ||
38 | * Once we get every callchains from the stream, we can now | ||
39 | * sort them by hit | ||
40 | */ | ||
41 | void sort_chain_to_rbtree(struct rb_root *rb_root, struct callchain_node *node) | ||
42 | { | ||
43 | struct callchain_node *child; | ||
44 | |||
45 | list_for_each_entry(child, &node->children, brothers) | ||
46 | sort_chain_to_rbtree(rb_root, child); | ||
47 | |||
48 | if (node->hit) | ||
49 | rb_insert_callchain(rb_root, node); | ||
50 | } | ||
51 | |||
52 | static struct callchain_node *create_child(struct callchain_node *parent) | ||
53 | { | ||
54 | struct callchain_node *new; | ||
55 | |||
56 | new = malloc(sizeof(*new)); | ||
57 | if (!new) { | ||
58 | perror("not enough memory to create child for code path tree"); | ||
59 | return NULL; | ||
60 | } | ||
61 | new->parent = parent; | ||
62 | INIT_LIST_HEAD(&new->children); | ||
63 | INIT_LIST_HEAD(&new->val); | ||
64 | list_add_tail(&new->brothers, &parent->children); | ||
65 | |||
66 | return new; | ||
67 | } | ||
68 | |||
69 | static void | ||
70 | fill_node(struct callchain_node *node, struct ip_callchain *chain, int start) | ||
71 | { | ||
72 | int i; | ||
73 | |||
74 | for (i = start; i < chain->nr; i++) { | ||
75 | struct callchain_list *call; | ||
76 | |||
77 | call = malloc(sizeof(*chain)); | ||
78 | if (!call) { | ||
79 | perror("not enough memory for the code path tree"); | ||
80 | return; | ||
81 | } | ||
82 | call->ip = chain->ips[i]; | ||
83 | list_add_tail(&call->list, &node->val); | ||
84 | } | ||
85 | node->val_nr = i - start; | ||
86 | } | ||
87 | |||
88 | static void add_child(struct callchain_node *parent, struct ip_callchain *chain) | ||
89 | { | ||
90 | struct callchain_node *new; | ||
91 | |||
92 | new = create_child(parent); | ||
93 | fill_node(new, chain, parent->val_nr); | ||
94 | |||
95 | new->hit = 1; | ||
96 | } | ||
97 | |||
98 | static void | ||
99 | split_add_child(struct callchain_node *parent, struct ip_callchain *chain, | ||
100 | struct callchain_list *to_split, int idx) | ||
101 | { | ||
102 | struct callchain_node *new; | ||
103 | |||
104 | /* split */ | ||
105 | new = create_child(parent); | ||
106 | list_move_tail(&to_split->list, &new->val); | ||
107 | new->hit = parent->hit; | ||
108 | parent->hit = 0; | ||
109 | parent->val_nr = idx; | ||
110 | |||
111 | /* create the new one */ | ||
112 | add_child(parent, chain); | ||
113 | } | ||
114 | |||
115 | static int | ||
116 | __append_chain(struct callchain_node *root, struct ip_callchain *chain, | ||
117 | int start); | ||
118 | |||
119 | static int | ||
120 | __append_chain_children(struct callchain_node *root, struct ip_callchain *chain) | ||
121 | { | ||
122 | struct callchain_node *rnode; | ||
123 | |||
124 | /* lookup in childrens */ | ||
125 | list_for_each_entry(rnode, &root->children, brothers) { | ||
126 | int ret = __append_chain(rnode, chain, root->val_nr); | ||
127 | if (!ret) | ||
128 | return 0; | ||
129 | } | ||
130 | return -1; | ||
131 | } | ||
132 | |||
133 | static int | ||
134 | __append_chain(struct callchain_node *root, struct ip_callchain *chain, | ||
135 | int start) | ||
136 | { | ||
137 | struct callchain_list *cnode; | ||
138 | int i = start; | ||
139 | bool found = false; | ||
140 | |||
141 | /* lookup in the current node */ | ||
142 | list_for_each_entry(cnode, &root->val, list) { | ||
143 | if (cnode->ip != chain->ips[i++]) | ||
144 | break; | ||
145 | if (!found) | ||
146 | found = true; | ||
147 | if (i == chain->nr) | ||
148 | break; | ||
149 | } | ||
150 | |||
151 | /* matches not, relay on the parent */ | ||
152 | if (!found) | ||
153 | return -1; | ||
154 | |||
155 | /* we match only a part of the node. Split it and add the new chain */ | ||
156 | if (i < root->val_nr) { | ||
157 | split_add_child(root, chain, cnode, i); | ||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | /* we match 100% of the path, increment the hit */ | ||
162 | if (i == root->val_nr) { | ||
163 | root->hit++; | ||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | return __append_chain_children(root, chain); | ||
168 | } | ||
169 | |||
170 | void append_chain(struct callchain_node *root, struct ip_callchain *chain) | ||
171 | { | ||
172 | if (__append_chain_children(root, chain) == -1) | ||
173 | add_child(root, chain); | ||
174 | } | ||
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h new file mode 100644 index 000000000000..fa1cd2f71fd3 --- /dev/null +++ b/tools/perf/util/callchain.h | |||
@@ -0,0 +1,33 @@ | |||
1 | #ifndef __PERF_CALLCHAIN_H | ||
2 | #define __PERF_CALLCHAIN_H | ||
3 | |||
4 | #include "../perf.h" | ||
5 | #include "list.h" | ||
6 | #include "rbtree.h" | ||
7 | |||
8 | |||
9 | struct callchain_node { | ||
10 | struct callchain_node *parent; | ||
11 | struct list_head brothers; | ||
12 | struct list_head children; | ||
13 | struct list_head val; | ||
14 | struct rb_node rb_node; | ||
15 | int val_nr; | ||
16 | int hit; | ||
17 | }; | ||
18 | |||
19 | struct callchain_list { | ||
20 | unsigned long ip; | ||
21 | struct list_head list; | ||
22 | }; | ||
23 | |||
24 | static inline void callchain_init(struct callchain_node *node) | ||
25 | { | ||
26 | INIT_LIST_HEAD(&node->brothers); | ||
27 | INIT_LIST_HEAD(&node->children); | ||
28 | INIT_LIST_HEAD(&node->val); | ||
29 | } | ||
30 | |||
31 | void append_chain(struct callchain_node *root, struct ip_callchain *chain); | ||
32 | void sort_chain_to_rbtree(struct rb_root *rb_root, struct callchain_node *node); | ||
33 | #endif | ||
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c new file mode 100644 index 000000000000..450384b3bbe5 --- /dev/null +++ b/tools/perf/util/header.c | |||
@@ -0,0 +1,242 @@ | |||
1 | #include <sys/types.h> | ||
2 | #include <unistd.h> | ||
3 | #include <stdio.h> | ||
4 | #include <stdlib.h> | ||
5 | |||
6 | #include "util.h" | ||
7 | #include "header.h" | ||
8 | |||
9 | /* | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | struct perf_header_attr *perf_header_attr__new(struct perf_counter_attr *attr) | ||
14 | { | ||
15 | struct perf_header_attr *self = malloc(sizeof(*self)); | ||
16 | |||
17 | if (!self) | ||
18 | die("nomem"); | ||
19 | |||
20 | self->attr = *attr; | ||
21 | self->ids = 0; | ||
22 | self->size = 1; | ||
23 | self->id = malloc(sizeof(u64)); | ||
24 | |||
25 | if (!self->id) | ||
26 | die("nomem"); | ||
27 | |||
28 | return self; | ||
29 | } | ||
30 | |||
31 | void perf_header_attr__add_id(struct perf_header_attr *self, u64 id) | ||
32 | { | ||
33 | int pos = self->ids; | ||
34 | |||
35 | self->ids++; | ||
36 | if (self->ids > self->size) { | ||
37 | self->size *= 2; | ||
38 | self->id = realloc(self->id, self->size * sizeof(u64)); | ||
39 | if (!self->id) | ||
40 | die("nomem"); | ||
41 | } | ||
42 | self->id[pos] = id; | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | * | ||
47 | */ | ||
48 | |||
49 | struct perf_header *perf_header__new(void) | ||
50 | { | ||
51 | struct perf_header *self = malloc(sizeof(*self)); | ||
52 | |||
53 | if (!self) | ||
54 | die("nomem"); | ||
55 | |||
56 | self->frozen = 0; | ||
57 | |||
58 | self->attrs = 0; | ||
59 | self->size = 1; | ||
60 | self->attr = malloc(sizeof(void *)); | ||
61 | |||
62 | if (!self->attr) | ||
63 | die("nomem"); | ||
64 | |||
65 | self->data_offset = 0; | ||
66 | self->data_size = 0; | ||
67 | |||
68 | return self; | ||
69 | } | ||
70 | |||
71 | void perf_header__add_attr(struct perf_header *self, | ||
72 | struct perf_header_attr *attr) | ||
73 | { | ||
74 | int pos = self->attrs; | ||
75 | |||
76 | if (self->frozen) | ||
77 | die("frozen"); | ||
78 | |||
79 | self->attrs++; | ||
80 | if (self->attrs > self->size) { | ||
81 | self->size *= 2; | ||
82 | self->attr = realloc(self->attr, self->size * sizeof(void *)); | ||
83 | if (!self->attr) | ||
84 | die("nomem"); | ||
85 | } | ||
86 | self->attr[pos] = attr; | ||
87 | } | ||
88 | |||
89 | static const char *__perf_magic = "PERFFILE"; | ||
90 | |||
91 | #define PERF_MAGIC (*(u64 *)__perf_magic) | ||
92 | |||
93 | struct perf_file_section { | ||
94 | u64 offset; | ||
95 | u64 size; | ||
96 | }; | ||
97 | |||
98 | struct perf_file_attr { | ||
99 | struct perf_counter_attr attr; | ||
100 | struct perf_file_section ids; | ||
101 | }; | ||
102 | |||
103 | struct perf_file_header { | ||
104 | u64 magic; | ||
105 | u64 size; | ||
106 | u64 attr_size; | ||
107 | struct perf_file_section attrs; | ||
108 | struct perf_file_section data; | ||
109 | }; | ||
110 | |||
111 | static void do_write(int fd, void *buf, size_t size) | ||
112 | { | ||
113 | while (size) { | ||
114 | int ret = write(fd, buf, size); | ||
115 | |||
116 | if (ret < 0) | ||
117 | die("failed to write"); | ||
118 | |||
119 | size -= ret; | ||
120 | buf += ret; | ||
121 | } | ||
122 | } | ||
123 | |||
124 | void perf_header__write(struct perf_header *self, int fd) | ||
125 | { | ||
126 | struct perf_file_header f_header; | ||
127 | struct perf_file_attr f_attr; | ||
128 | struct perf_header_attr *attr; | ||
129 | int i; | ||
130 | |||
131 | lseek(fd, sizeof(f_header), SEEK_SET); | ||
132 | |||
133 | |||
134 | for (i = 0; i < self->attrs; i++) { | ||
135 | attr = self->attr[i]; | ||
136 | |||
137 | attr->id_offset = lseek(fd, 0, SEEK_CUR); | ||
138 | do_write(fd, attr->id, attr->ids * sizeof(u64)); | ||
139 | } | ||
140 | |||
141 | |||
142 | self->attr_offset = lseek(fd, 0, SEEK_CUR); | ||
143 | |||
144 | for (i = 0; i < self->attrs; i++) { | ||
145 | attr = self->attr[i]; | ||
146 | |||
147 | f_attr = (struct perf_file_attr){ | ||
148 | .attr = attr->attr, | ||
149 | .ids = { | ||
150 | .offset = attr->id_offset, | ||
151 | .size = attr->ids * sizeof(u64), | ||
152 | } | ||
153 | }; | ||
154 | do_write(fd, &f_attr, sizeof(f_attr)); | ||
155 | } | ||
156 | |||
157 | |||
158 | self->data_offset = lseek(fd, 0, SEEK_CUR); | ||
159 | |||
160 | f_header = (struct perf_file_header){ | ||
161 | .magic = PERF_MAGIC, | ||
162 | .size = sizeof(f_header), | ||
163 | .attr_size = sizeof(f_attr), | ||
164 | .attrs = { | ||
165 | .offset = self->attr_offset, | ||
166 | .size = self->attrs * sizeof(f_attr), | ||
167 | }, | ||
168 | .data = { | ||
169 | .offset = self->data_offset, | ||
170 | .size = self->data_size, | ||
171 | }, | ||
172 | }; | ||
173 | |||
174 | lseek(fd, 0, SEEK_SET); | ||
175 | do_write(fd, &f_header, sizeof(f_header)); | ||
176 | lseek(fd, self->data_offset + self->data_size, SEEK_SET); | ||
177 | |||
178 | self->frozen = 1; | ||
179 | } | ||
180 | |||
181 | static void do_read(int fd, void *buf, size_t size) | ||
182 | { | ||
183 | while (size) { | ||
184 | int ret = read(fd, buf, size); | ||
185 | |||
186 | if (ret < 0) | ||
187 | die("failed to read"); | ||
188 | |||
189 | size -= ret; | ||
190 | buf += ret; | ||
191 | } | ||
192 | } | ||
193 | |||
194 | struct perf_header *perf_header__read(int fd) | ||
195 | { | ||
196 | struct perf_header *self = perf_header__new(); | ||
197 | struct perf_file_header f_header; | ||
198 | struct perf_file_attr f_attr; | ||
199 | u64 f_id; | ||
200 | |||
201 | int nr_attrs, nr_ids, i, j; | ||
202 | |||
203 | lseek(fd, 0, SEEK_SET); | ||
204 | do_read(fd, &f_header, sizeof(f_header)); | ||
205 | |||
206 | if (f_header.magic != PERF_MAGIC || | ||
207 | f_header.size != sizeof(f_header) || | ||
208 | f_header.attr_size != sizeof(f_attr)) | ||
209 | die("incompatible file format"); | ||
210 | |||
211 | nr_attrs = f_header.attrs.size / sizeof(f_attr); | ||
212 | lseek(fd, f_header.attrs.offset, SEEK_SET); | ||
213 | |||
214 | for (i = 0; i < nr_attrs; i++) { | ||
215 | struct perf_header_attr *attr; | ||
216 | off_t tmp = lseek(fd, 0, SEEK_CUR); | ||
217 | |||
218 | do_read(fd, &f_attr, sizeof(f_attr)); | ||
219 | |||
220 | attr = perf_header_attr__new(&f_attr.attr); | ||
221 | |||
222 | nr_ids = f_attr.ids.size / sizeof(u64); | ||
223 | lseek(fd, f_attr.ids.offset, SEEK_SET); | ||
224 | |||
225 | for (j = 0; j < nr_ids; j++) { | ||
226 | do_read(fd, &f_id, sizeof(f_id)); | ||
227 | |||
228 | perf_header_attr__add_id(attr, f_id); | ||
229 | } | ||
230 | perf_header__add_attr(self, attr); | ||
231 | lseek(fd, tmp, SEEK_SET); | ||
232 | } | ||
233 | |||
234 | self->data_offset = f_header.data.offset; | ||
235 | self->data_size = f_header.data.size; | ||
236 | |||
237 | lseek(fd, self->data_offset + self->data_size, SEEK_SET); | ||
238 | |||
239 | self->frozen = 1; | ||
240 | |||
241 | return self; | ||
242 | } | ||
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h new file mode 100644 index 000000000000..b5ef53ad4c7a --- /dev/null +++ b/tools/perf/util/header.h | |||
@@ -0,0 +1,37 @@ | |||
1 | #ifndef _PERF_HEADER_H | ||
2 | #define _PERF_HEADER_H | ||
3 | |||
4 | #include "../../../include/linux/perf_counter.h" | ||
5 | #include <sys/types.h> | ||
6 | #include "types.h" | ||
7 | |||
8 | struct perf_header_attr { | ||
9 | struct perf_counter_attr attr; | ||
10 | int ids, size; | ||
11 | u64 *id; | ||
12 | off_t id_offset; | ||
13 | }; | ||
14 | |||
15 | struct perf_header { | ||
16 | int frozen; | ||
17 | int attrs, size; | ||
18 | struct perf_header_attr **attr; | ||
19 | off_t attr_offset; | ||
20 | u64 data_offset; | ||
21 | u64 data_size; | ||
22 | }; | ||
23 | |||
24 | struct perf_header *perf_header__read(int fd); | ||
25 | void perf_header__write(struct perf_header *self, int fd); | ||
26 | |||
27 | void perf_header__add_attr(struct perf_header *self, | ||
28 | struct perf_header_attr *attr); | ||
29 | |||
30 | struct perf_header_attr * | ||
31 | perf_header_attr__new(struct perf_counter_attr *attr); | ||
32 | void perf_header_attr__add_id(struct perf_header_attr *self, u64 id); | ||
33 | |||
34 | |||
35 | struct perf_header *perf_header__new(void); | ||
36 | |||
37 | #endif /* _PERF_HEADER_H */ | ||
diff --git a/tools/perf/util/help.c b/tools/perf/util/help.c index 6653f7dd1d78..17a00e0df2c4 100644 --- a/tools/perf/util/help.c +++ b/tools/perf/util/help.c | |||
@@ -126,21 +126,6 @@ static int is_executable(const char *name) | |||
126 | !S_ISREG(st.st_mode)) | 126 | !S_ISREG(st.st_mode)) |
127 | return 0; | 127 | return 0; |
128 | 128 | ||
129 | #ifdef __MINGW32__ | ||
130 | /* cannot trust the executable bit, peek into the file instead */ | ||
131 | char buf[3] = { 0 }; | ||
132 | int n; | ||
133 | int fd = open(name, O_RDONLY); | ||
134 | st.st_mode &= ~S_IXUSR; | ||
135 | if (fd >= 0) { | ||
136 | n = read(fd, buf, 2); | ||
137 | if (n == 2) | ||
138 | /* DOS executables start with "MZ" */ | ||
139 | if (!strcmp(buf, "#!") || !strcmp(buf, "MZ")) | ||
140 | st.st_mode |= S_IXUSR; | ||
141 | close(fd); | ||
142 | } | ||
143 | #endif | ||
144 | return st.st_mode & S_IXUSR; | 129 | return st.st_mode & S_IXUSR; |
145 | } | 130 | } |
146 | 131 | ||
diff --git a/tools/perf/util/pager.c b/tools/perf/util/pager.c index a28bccae5458..1915de20dcac 100644 --- a/tools/perf/util/pager.c +++ b/tools/perf/util/pager.c | |||
@@ -9,7 +9,6 @@ | |||
9 | 9 | ||
10 | static int spawned_pager; | 10 | static int spawned_pager; |
11 | 11 | ||
12 | #ifndef __MINGW32__ | ||
13 | static void pager_preexec(void) | 12 | static void pager_preexec(void) |
14 | { | 13 | { |
15 | /* | 14 | /* |
@@ -24,7 +23,6 @@ static void pager_preexec(void) | |||
24 | 23 | ||
25 | setenv("LESS", "FRSX", 0); | 24 | setenv("LESS", "FRSX", 0); |
26 | } | 25 | } |
27 | #endif | ||
28 | 26 | ||
29 | static const char *pager_argv[] = { "sh", "-c", NULL, NULL }; | 27 | static const char *pager_argv[] = { "sh", "-c", NULL, NULL }; |
30 | static struct child_process pager_process; | 28 | static struct child_process pager_process; |
@@ -70,9 +68,8 @@ void setup_pager(void) | |||
70 | pager_argv[2] = pager; | 68 | pager_argv[2] = pager; |
71 | pager_process.argv = pager_argv; | 69 | pager_process.argv = pager_argv; |
72 | pager_process.in = -1; | 70 | pager_process.in = -1; |
73 | #ifndef __MINGW32__ | ||
74 | pager_process.preexec_cb = pager_preexec; | 71 | pager_process.preexec_cb = pager_preexec; |
75 | #endif | 72 | |
76 | if (start_command(&pager_process)) | 73 | if (start_command(&pager_process)) |
77 | return; | 74 | return; |
78 | 75 | ||
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 35d04da38d6a..4d042f104cdc 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c | |||
@@ -16,32 +16,28 @@ struct event_symbol { | |||
16 | u8 type; | 16 | u8 type; |
17 | u64 config; | 17 | u64 config; |
18 | char *symbol; | 18 | char *symbol; |
19 | char *alias; | ||
19 | }; | 20 | }; |
20 | 21 | ||
21 | #define C(x, y) .type = PERF_TYPE_##x, .config = PERF_COUNT_##y | 22 | #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x |
22 | #define CR(x, y) .type = PERF_TYPE_##x, .config = y | 23 | #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x |
23 | 24 | ||
24 | static struct event_symbol event_symbols[] = { | 25 | static struct event_symbol event_symbols[] = { |
25 | { C(HARDWARE, HW_CPU_CYCLES), "cpu-cycles", }, | 26 | { CHW(CPU_CYCLES), "cpu-cycles", "cycles" }, |
26 | { C(HARDWARE, HW_CPU_CYCLES), "cycles", }, | 27 | { CHW(INSTRUCTIONS), "instructions", "" }, |
27 | { C(HARDWARE, HW_INSTRUCTIONS), "instructions", }, | 28 | { CHW(CACHE_REFERENCES), "cache-references", "" }, |
28 | { C(HARDWARE, HW_CACHE_REFERENCES), "cache-references", }, | 29 | { CHW(CACHE_MISSES), "cache-misses", "" }, |
29 | { C(HARDWARE, HW_CACHE_MISSES), "cache-misses", }, | 30 | { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, |
30 | { C(HARDWARE, HW_BRANCH_INSTRUCTIONS),"branch-instructions", }, | 31 | { CHW(BRANCH_MISSES), "branch-misses", "" }, |
31 | { C(HARDWARE, HW_BRANCH_INSTRUCTIONS),"branches", }, | 32 | { CHW(BUS_CYCLES), "bus-cycles", "" }, |
32 | { C(HARDWARE, HW_BRANCH_MISSES), "branch-misses", }, | 33 | |
33 | { C(HARDWARE, HW_BUS_CYCLES), "bus-cycles", }, | 34 | { CSW(CPU_CLOCK), "cpu-clock", "" }, |
34 | 35 | { CSW(TASK_CLOCK), "task-clock", "" }, | |
35 | { C(SOFTWARE, SW_CPU_CLOCK), "cpu-clock", }, | 36 | { CSW(PAGE_FAULTS), "page-faults", "faults" }, |
36 | { C(SOFTWARE, SW_TASK_CLOCK), "task-clock", }, | 37 | { CSW(PAGE_FAULTS_MIN), "minor-faults", "" }, |
37 | { C(SOFTWARE, SW_PAGE_FAULTS), "page-faults", }, | 38 | { CSW(PAGE_FAULTS_MAJ), "major-faults", "" }, |
38 | { C(SOFTWARE, SW_PAGE_FAULTS), "faults", }, | 39 | { CSW(CONTEXT_SWITCHES), "context-switches", "cs" }, |
39 | { C(SOFTWARE, SW_PAGE_FAULTS_MIN), "minor-faults", }, | 40 | { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, |
40 | { C(SOFTWARE, SW_PAGE_FAULTS_MAJ), "major-faults", }, | ||
41 | { C(SOFTWARE, SW_CONTEXT_SWITCHES), "context-switches", }, | ||
42 | { C(SOFTWARE, SW_CONTEXT_SWITCHES), "cs", }, | ||
43 | { C(SOFTWARE, SW_CPU_MIGRATIONS), "cpu-migrations", }, | ||
44 | { C(SOFTWARE, SW_CPU_MIGRATIONS), "migrations", }, | ||
45 | }; | 41 | }; |
46 | 42 | ||
47 | #define __PERF_COUNTER_FIELD(config, name) \ | 43 | #define __PERF_COUNTER_FIELD(config, name) \ |
@@ -74,26 +70,70 @@ static char *sw_event_names[] = { | |||
74 | 70 | ||
75 | #define MAX_ALIASES 8 | 71 | #define MAX_ALIASES 8 |
76 | 72 | ||
77 | static char *hw_cache [][MAX_ALIASES] = { | 73 | static char *hw_cache[][MAX_ALIASES] = { |
78 | { "L1-data" , "l1-d", "l1d" }, | 74 | { "L1-d$", "l1-d", "l1d", "L1-data", }, |
79 | { "L1-instruction" , "l1-i", "l1i" }, | 75 | { "L1-i$", "l1-i", "l1i", "L1-instruction", }, |
80 | { "L2" , "l2" }, | 76 | { "LLC", "L2" }, |
81 | { "Data-TLB" , "dtlb", "d-tlb" }, | 77 | { "dTLB", "d-tlb", "Data-TLB", }, |
82 | { "Instruction-TLB" , "itlb", "i-tlb" }, | 78 | { "iTLB", "i-tlb", "Instruction-TLB", }, |
83 | { "Branch" , "bpu" , "btb", "bpc" }, | 79 | { "branch", "branches", "bpu", "btb", "bpc", }, |
84 | }; | 80 | }; |
85 | 81 | ||
86 | static char *hw_cache_op [][MAX_ALIASES] = { | 82 | static char *hw_cache_op[][MAX_ALIASES] = { |
87 | { "Load" , "read" }, | 83 | { "load", "loads", "read", }, |
88 | { "Store" , "write" }, | 84 | { "store", "stores", "write", }, |
89 | { "Prefetch" , "speculative-read", "speculative-load" }, | 85 | { "prefetch", "prefetches", "speculative-read", "speculative-load", }, |
90 | }; | 86 | }; |
91 | 87 | ||
92 | static char *hw_cache_result [][MAX_ALIASES] = { | 88 | static char *hw_cache_result[][MAX_ALIASES] = { |
93 | { "Reference" , "ops", "access" }, | 89 | { "refs", "Reference", "ops", "access", }, |
94 | { "Miss" }, | 90 | { "misses", "miss", }, |
95 | }; | 91 | }; |
96 | 92 | ||
93 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
94 | #define CACHE_READ (1 << C(OP_READ)) | ||
95 | #define CACHE_WRITE (1 << C(OP_WRITE)) | ||
96 | #define CACHE_PREFETCH (1 << C(OP_PREFETCH)) | ||
97 | #define COP(x) (1 << x) | ||
98 | |||
99 | /* | ||
100 | * cache operartion stat | ||
101 | * L1I : Read and prefetch only | ||
102 | * ITLB and BPU : Read-only | ||
103 | */ | ||
104 | static unsigned long hw_cache_stat[C(MAX)] = { | ||
105 | [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | ||
106 | [C(L1I)] = (CACHE_READ | CACHE_PREFETCH), | ||
107 | [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | ||
108 | [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | ||
109 | [C(ITLB)] = (CACHE_READ), | ||
110 | [C(BPU)] = (CACHE_READ), | ||
111 | }; | ||
112 | |||
113 | static int is_cache_op_valid(u8 cache_type, u8 cache_op) | ||
114 | { | ||
115 | if (hw_cache_stat[cache_type] & COP(cache_op)) | ||
116 | return 1; /* valid */ | ||
117 | else | ||
118 | return 0; /* invalid */ | ||
119 | } | ||
120 | |||
121 | static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result) | ||
122 | { | ||
123 | static char name[50]; | ||
124 | |||
125 | if (cache_result) { | ||
126 | sprintf(name, "%s-%s-%s", hw_cache[cache_type][0], | ||
127 | hw_cache_op[cache_op][0], | ||
128 | hw_cache_result[cache_result][0]); | ||
129 | } else { | ||
130 | sprintf(name, "%s-%s", hw_cache[cache_type][0], | ||
131 | hw_cache_op[cache_op][1]); | ||
132 | } | ||
133 | |||
134 | return name; | ||
135 | } | ||
136 | |||
97 | char *event_name(int counter) | 137 | char *event_name(int counter) |
98 | { | 138 | { |
99 | u64 config = attrs[counter].config; | 139 | u64 config = attrs[counter].config; |
@@ -113,7 +153,6 @@ char *event_name(int counter) | |||
113 | 153 | ||
114 | case PERF_TYPE_HW_CACHE: { | 154 | case PERF_TYPE_HW_CACHE: { |
115 | u8 cache_type, cache_op, cache_result; | 155 | u8 cache_type, cache_op, cache_result; |
116 | static char name[100]; | ||
117 | 156 | ||
118 | cache_type = (config >> 0) & 0xff; | 157 | cache_type = (config >> 0) & 0xff; |
119 | if (cache_type > PERF_COUNT_HW_CACHE_MAX) | 158 | if (cache_type > PERF_COUNT_HW_CACHE_MAX) |
@@ -127,12 +166,10 @@ char *event_name(int counter) | |||
127 | if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX) | 166 | if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX) |
128 | return "unknown-ext-hardware-cache-result"; | 167 | return "unknown-ext-hardware-cache-result"; |
129 | 168 | ||
130 | sprintf(name, "%s-Cache-%s-%ses", | 169 | if (!is_cache_op_valid(cache_type, cache_op)) |
131 | hw_cache[cache_type][0], | 170 | return "invalid-cache"; |
132 | hw_cache_op[cache_op][0], | ||
133 | hw_cache_result[cache_result][0]); | ||
134 | 171 | ||
135 | return name; | 172 | return event_cache_name(cache_type, cache_op, cache_result); |
136 | } | 173 | } |
137 | 174 | ||
138 | case PERF_TYPE_SOFTWARE: | 175 | case PERF_TYPE_SOFTWARE: |
@@ -163,7 +200,8 @@ static int parse_aliases(const char *str, char *names[][MAX_ALIASES], int size) | |||
163 | return -1; | 200 | return -1; |
164 | } | 201 | } |
165 | 202 | ||
166 | static int parse_generic_hw_symbols(const char *str, struct perf_counter_attr *attr) | 203 | static int |
204 | parse_generic_hw_symbols(const char *str, struct perf_counter_attr *attr) | ||
167 | { | 205 | { |
168 | int cache_type = -1, cache_op = 0, cache_result = 0; | 206 | int cache_type = -1, cache_op = 0, cache_result = 0; |
169 | 207 | ||
@@ -182,6 +220,9 @@ static int parse_generic_hw_symbols(const char *str, struct perf_counter_attr *a | |||
182 | if (cache_op == -1) | 220 | if (cache_op == -1) |
183 | cache_op = PERF_COUNT_HW_CACHE_OP_READ; | 221 | cache_op = PERF_COUNT_HW_CACHE_OP_READ; |
184 | 222 | ||
223 | if (!is_cache_op_valid(cache_type, cache_op)) | ||
224 | return -EINVAL; | ||
225 | |||
185 | cache_result = parse_aliases(str, hw_cache_result, | 226 | cache_result = parse_aliases(str, hw_cache_result, |
186 | PERF_COUNT_HW_CACHE_RESULT_MAX); | 227 | PERF_COUNT_HW_CACHE_RESULT_MAX); |
187 | /* | 228 | /* |
@@ -196,6 +237,19 @@ static int parse_generic_hw_symbols(const char *str, struct perf_counter_attr *a | |||
196 | return 0; | 237 | return 0; |
197 | } | 238 | } |
198 | 239 | ||
240 | static int check_events(const char *str, unsigned int i) | ||
241 | { | ||
242 | if (!strncmp(str, event_symbols[i].symbol, | ||
243 | strlen(event_symbols[i].symbol))) | ||
244 | return 1; | ||
245 | |||
246 | if (strlen(event_symbols[i].alias)) | ||
247 | if (!strncmp(str, event_symbols[i].alias, | ||
248 | strlen(event_symbols[i].alias))) | ||
249 | return 1; | ||
250 | return 0; | ||
251 | } | ||
252 | |||
199 | /* | 253 | /* |
200 | * Each event can have multiple symbolic names. | 254 | * Each event can have multiple symbolic names. |
201 | * Symbolic names are (almost) exactly matched. | 255 | * Symbolic names are (almost) exactly matched. |
@@ -235,9 +289,7 @@ static int parse_event_symbols(const char *str, struct perf_counter_attr *attr) | |||
235 | } | 289 | } |
236 | 290 | ||
237 | for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { | 291 | for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { |
238 | if (!strncmp(str, event_symbols[i].symbol, | 292 | if (check_events(str, i)) { |
239 | strlen(event_symbols[i].symbol))) { | ||
240 | |||
241 | attr->type = event_symbols[i].type; | 293 | attr->type = event_symbols[i].type; |
242 | attr->config = event_symbols[i].config; | 294 | attr->config = event_symbols[i].config; |
243 | 295 | ||
@@ -289,6 +341,7 @@ void print_events(void) | |||
289 | { | 341 | { |
290 | struct event_symbol *syms = event_symbols; | 342 | struct event_symbol *syms = event_symbols; |
291 | unsigned int i, type, prev_type = -1; | 343 | unsigned int i, type, prev_type = -1; |
344 | char name[40]; | ||
292 | 345 | ||
293 | fprintf(stderr, "\n"); | 346 | fprintf(stderr, "\n"); |
294 | fprintf(stderr, "List of pre-defined events (to be used in -e):\n"); | 347 | fprintf(stderr, "List of pre-defined events (to be used in -e):\n"); |
@@ -301,14 +354,18 @@ void print_events(void) | |||
301 | if (type != prev_type) | 354 | if (type != prev_type) |
302 | fprintf(stderr, "\n"); | 355 | fprintf(stderr, "\n"); |
303 | 356 | ||
304 | fprintf(stderr, " %-30s [%s]\n", syms->symbol, | 357 | if (strlen(syms->alias)) |
358 | sprintf(name, "%s OR %s", syms->symbol, syms->alias); | ||
359 | else | ||
360 | strcpy(name, syms->symbol); | ||
361 | fprintf(stderr, " %-40s [%s]\n", name, | ||
305 | event_type_descriptors[type]); | 362 | event_type_descriptors[type]); |
306 | 363 | ||
307 | prev_type = type; | 364 | prev_type = type; |
308 | } | 365 | } |
309 | 366 | ||
310 | fprintf(stderr, "\n"); | 367 | fprintf(stderr, "\n"); |
311 | fprintf(stderr, " %-30s [raw hardware event descriptor]\n", | 368 | fprintf(stderr, " %-40s [raw hardware event descriptor]\n", |
312 | "rNNN"); | 369 | "rNNN"); |
313 | fprintf(stderr, "\n"); | 370 | fprintf(stderr, "\n"); |
314 | 371 | ||
diff --git a/tools/perf/util/run-command.c b/tools/perf/util/run-command.c index b2f5e854f40a..a3935343091a 100644 --- a/tools/perf/util/run-command.c +++ b/tools/perf/util/run-command.c | |||
@@ -65,7 +65,6 @@ int start_command(struct child_process *cmd) | |||
65 | cmd->err = fderr[0]; | 65 | cmd->err = fderr[0]; |
66 | } | 66 | } |
67 | 67 | ||
68 | #ifndef __MINGW32__ | ||
69 | fflush(NULL); | 68 | fflush(NULL); |
70 | cmd->pid = fork(); | 69 | cmd->pid = fork(); |
71 | if (!cmd->pid) { | 70 | if (!cmd->pid) { |
@@ -118,71 +117,6 @@ int start_command(struct child_process *cmd) | |||
118 | } | 117 | } |
119 | exit(127); | 118 | exit(127); |
120 | } | 119 | } |
121 | #else | ||
122 | int s0 = -1, s1 = -1, s2 = -1; /* backups of stdin, stdout, stderr */ | ||
123 | const char **sargv = cmd->argv; | ||
124 | char **env = environ; | ||
125 | |||
126 | if (cmd->no_stdin) { | ||
127 | s0 = dup(0); | ||
128 | dup_devnull(0); | ||
129 | } else if (need_in) { | ||
130 | s0 = dup(0); | ||
131 | dup2(fdin[0], 0); | ||
132 | } else if (cmd->in) { | ||
133 | s0 = dup(0); | ||
134 | dup2(cmd->in, 0); | ||
135 | } | ||
136 | |||
137 | if (cmd->no_stderr) { | ||
138 | s2 = dup(2); | ||
139 | dup_devnull(2); | ||
140 | } else if (need_err) { | ||
141 | s2 = dup(2); | ||
142 | dup2(fderr[1], 2); | ||
143 | } | ||
144 | |||
145 | if (cmd->no_stdout) { | ||
146 | s1 = dup(1); | ||
147 | dup_devnull(1); | ||
148 | } else if (cmd->stdout_to_stderr) { | ||
149 | s1 = dup(1); | ||
150 | dup2(2, 1); | ||
151 | } else if (need_out) { | ||
152 | s1 = dup(1); | ||
153 | dup2(fdout[1], 1); | ||
154 | } else if (cmd->out > 1) { | ||
155 | s1 = dup(1); | ||
156 | dup2(cmd->out, 1); | ||
157 | } | ||
158 | |||
159 | if (cmd->dir) | ||
160 | die("chdir in start_command() not implemented"); | ||
161 | if (cmd->env) { | ||
162 | env = copy_environ(); | ||
163 | for (; *cmd->env; cmd->env++) | ||
164 | env = env_setenv(env, *cmd->env); | ||
165 | } | ||
166 | |||
167 | if (cmd->perf_cmd) { | ||
168 | cmd->argv = prepare_perf_cmd(cmd->argv); | ||
169 | } | ||
170 | |||
171 | cmd->pid = mingw_spawnvpe(cmd->argv[0], cmd->argv, env); | ||
172 | |||
173 | if (cmd->env) | ||
174 | free_environ(env); | ||
175 | if (cmd->perf_cmd) | ||
176 | free(cmd->argv); | ||
177 | |||
178 | cmd->argv = sargv; | ||
179 | if (s0 >= 0) | ||
180 | dup2(s0, 0), close(s0); | ||
181 | if (s1 >= 0) | ||
182 | dup2(s1, 1), close(s1); | ||
183 | if (s2 >= 0) | ||
184 | dup2(s2, 2), close(s2); | ||
185 | #endif | ||
186 | 120 | ||
187 | if (cmd->pid < 0) { | 121 | if (cmd->pid < 0) { |
188 | int err = errno; | 122 | int err = errno; |
@@ -288,14 +222,6 @@ int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const | |||
288 | return run_command(&cmd); | 222 | return run_command(&cmd); |
289 | } | 223 | } |
290 | 224 | ||
291 | #ifdef __MINGW32__ | ||
292 | static __stdcall unsigned run_thread(void *data) | ||
293 | { | ||
294 | struct async *async = data; | ||
295 | return async->proc(async->fd_for_proc, async->data); | ||
296 | } | ||
297 | #endif | ||
298 | |||
299 | int start_async(struct async *async) | 225 | int start_async(struct async *async) |
300 | { | 226 | { |
301 | int pipe_out[2]; | 227 | int pipe_out[2]; |
@@ -304,7 +230,6 @@ int start_async(struct async *async) | |||
304 | return error("cannot create pipe: %s", strerror(errno)); | 230 | return error("cannot create pipe: %s", strerror(errno)); |
305 | async->out = pipe_out[0]; | 231 | async->out = pipe_out[0]; |
306 | 232 | ||
307 | #ifndef __MINGW32__ | ||
308 | /* Flush stdio before fork() to avoid cloning buffers */ | 233 | /* Flush stdio before fork() to avoid cloning buffers */ |
309 | fflush(NULL); | 234 | fflush(NULL); |
310 | 235 | ||
@@ -319,33 +244,17 @@ int start_async(struct async *async) | |||
319 | exit(!!async->proc(pipe_out[1], async->data)); | 244 | exit(!!async->proc(pipe_out[1], async->data)); |
320 | } | 245 | } |
321 | close(pipe_out[1]); | 246 | close(pipe_out[1]); |
322 | #else | 247 | |
323 | async->fd_for_proc = pipe_out[1]; | ||
324 | async->tid = (HANDLE) _beginthreadex(NULL, 0, run_thread, async, 0, NULL); | ||
325 | if (!async->tid) { | ||
326 | error("cannot create thread: %s", strerror(errno)); | ||
327 | close_pair(pipe_out); | ||
328 | return -1; | ||
329 | } | ||
330 | #endif | ||
331 | return 0; | 248 | return 0; |
332 | } | 249 | } |
333 | 250 | ||
334 | int finish_async(struct async *async) | 251 | int finish_async(struct async *async) |
335 | { | 252 | { |
336 | #ifndef __MINGW32__ | ||
337 | int ret = 0; | 253 | int ret = 0; |
338 | 254 | ||
339 | if (wait_or_whine(async->pid)) | 255 | if (wait_or_whine(async->pid)) |
340 | ret = error("waitpid (async) failed"); | 256 | ret = error("waitpid (async) failed"); |
341 | #else | 257 | |
342 | DWORD ret = 0; | ||
343 | if (WaitForSingleObject(async->tid, INFINITE) != WAIT_OBJECT_0) | ||
344 | ret = error("waiting for thread failed: %lu", GetLastError()); | ||
345 | else if (!GetExitCodeThread(async->tid, &ret)) | ||
346 | ret = error("cannot get thread exit code: %lu", GetLastError()); | ||
347 | CloseHandle(async->tid); | ||
348 | #endif | ||
349 | return ret; | 258 | return ret; |
350 | } | 259 | } |
351 | 260 | ||
diff --git a/tools/perf/util/run-command.h b/tools/perf/util/run-command.h index 328289f23669..cc1837deba88 100644 --- a/tools/perf/util/run-command.h +++ b/tools/perf/util/run-command.h | |||
@@ -79,12 +79,7 @@ struct async { | |||
79 | int (*proc)(int fd, void *data); | 79 | int (*proc)(int fd, void *data); |
80 | void *data; | 80 | void *data; |
81 | int out; /* caller reads from here and closes it */ | 81 | int out; /* caller reads from here and closes it */ |
82 | #ifndef __MINGW32__ | ||
83 | pid_t pid; | 82 | pid_t pid; |
84 | #else | ||
85 | HANDLE tid; | ||
86 | int fd_for_proc; | ||
87 | #endif | ||
88 | }; | 83 | }; |
89 | 84 | ||
90 | int start_async(struct async *async); | 85 | int start_async(struct async *async); |
diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c index eaba09306802..464e7ca898cf 100644 --- a/tools/perf/util/strbuf.c +++ b/tools/perf/util/strbuf.c | |||
@@ -259,7 +259,7 @@ size_t strbuf_fread(struct strbuf *sb, size_t size, FILE *f) | |||
259 | res = fread(sb->buf + sb->len, 1, size, f); | 259 | res = fread(sb->buf + sb->len, 1, size, f); |
260 | if (res > 0) | 260 | if (res > 0) |
261 | strbuf_setlen(sb, sb->len + res); | 261 | strbuf_setlen(sb, sb->len + res); |
262 | else if (res < 0 && oldalloc == 0) | 262 | else if (oldalloc == 0) |
263 | strbuf_release(sb); | 263 | strbuf_release(sb); |
264 | return res; | 264 | return res; |
265 | } | 265 | } |
diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h index 37b03255b425..3dca2f654cd0 100644 --- a/tools/perf/util/string.h +++ b/tools/perf/util/string.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef _PERF_STRING_H_ | 1 | #ifndef _PERF_STRING_H_ |
2 | #define _PERF_STRING_H_ | 2 | #define _PERF_STRING_H_ |
3 | 3 | ||
4 | #include "../types.h" | 4 | #include "types.h" |
5 | 5 | ||
6 | int hex2u64(const char *ptr, u64 *val); | 6 | int hex2u64(const char *ptr, u64 *val); |
7 | 7 | ||
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c new file mode 100644 index 000000000000..025a78edfffe --- /dev/null +++ b/tools/perf/util/strlist.c | |||
@@ -0,0 +1,184 @@ | |||
1 | /* | ||
2 | * (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com> | ||
3 | * | ||
4 | * Licensed under the GPLv2. | ||
5 | */ | ||
6 | |||
7 | #include "strlist.h" | ||
8 | #include <errno.h> | ||
9 | #include <stdio.h> | ||
10 | #include <stdlib.h> | ||
11 | #include <string.h> | ||
12 | |||
13 | static struct str_node *str_node__new(const char *s, bool dupstr) | ||
14 | { | ||
15 | struct str_node *self = malloc(sizeof(*self)); | ||
16 | |||
17 | if (self != NULL) { | ||
18 | if (dupstr) { | ||
19 | s = strdup(s); | ||
20 | if (s == NULL) | ||
21 | goto out_delete; | ||
22 | } | ||
23 | self->s = s; | ||
24 | } | ||
25 | |||
26 | return self; | ||
27 | |||
28 | out_delete: | ||
29 | free(self); | ||
30 | return NULL; | ||
31 | } | ||
32 | |||
33 | static void str_node__delete(struct str_node *self, bool dupstr) | ||
34 | { | ||
35 | if (dupstr) | ||
36 | free((void *)self->s); | ||
37 | free(self); | ||
38 | } | ||
39 | |||
40 | int strlist__add(struct strlist *self, const char *new_entry) | ||
41 | { | ||
42 | struct rb_node **p = &self->entries.rb_node; | ||
43 | struct rb_node *parent = NULL; | ||
44 | struct str_node *sn; | ||
45 | |||
46 | while (*p != NULL) { | ||
47 | int rc; | ||
48 | |||
49 | parent = *p; | ||
50 | sn = rb_entry(parent, struct str_node, rb_node); | ||
51 | rc = strcmp(sn->s, new_entry); | ||
52 | |||
53 | if (rc > 0) | ||
54 | p = &(*p)->rb_left; | ||
55 | else if (rc < 0) | ||
56 | p = &(*p)->rb_right; | ||
57 | else | ||
58 | return -EEXIST; | ||
59 | } | ||
60 | |||
61 | sn = str_node__new(new_entry, self->dupstr); | ||
62 | if (sn == NULL) | ||
63 | return -ENOMEM; | ||
64 | |||
65 | rb_link_node(&sn->rb_node, parent, p); | ||
66 | rb_insert_color(&sn->rb_node, &self->entries); | ||
67 | |||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | int strlist__load(struct strlist *self, const char *filename) | ||
72 | { | ||
73 | char entry[1024]; | ||
74 | int err; | ||
75 | FILE *fp = fopen(filename, "r"); | ||
76 | |||
77 | if (fp == NULL) | ||
78 | return errno; | ||
79 | |||
80 | while (fgets(entry, sizeof(entry), fp) != NULL) { | ||
81 | const size_t len = strlen(entry); | ||
82 | |||
83 | if (len == 0) | ||
84 | continue; | ||
85 | entry[len - 1] = '\0'; | ||
86 | |||
87 | err = strlist__add(self, entry); | ||
88 | if (err != 0) | ||
89 | goto out; | ||
90 | } | ||
91 | |||
92 | err = 0; | ||
93 | out: | ||
94 | fclose(fp); | ||
95 | return err; | ||
96 | } | ||
97 | |||
98 | void strlist__remove(struct strlist *self, struct str_node *sn) | ||
99 | { | ||
100 | rb_erase(&sn->rb_node, &self->entries); | ||
101 | str_node__delete(sn, self->dupstr); | ||
102 | } | ||
103 | |||
104 | bool strlist__has_entry(struct strlist *self, const char *entry) | ||
105 | { | ||
106 | struct rb_node **p = &self->entries.rb_node; | ||
107 | struct rb_node *parent = NULL; | ||
108 | |||
109 | while (*p != NULL) { | ||
110 | struct str_node *sn; | ||
111 | int rc; | ||
112 | |||
113 | parent = *p; | ||
114 | sn = rb_entry(parent, struct str_node, rb_node); | ||
115 | rc = strcmp(sn->s, entry); | ||
116 | |||
117 | if (rc > 0) | ||
118 | p = &(*p)->rb_left; | ||
119 | else if (rc < 0) | ||
120 | p = &(*p)->rb_right; | ||
121 | else | ||
122 | return true; | ||
123 | } | ||
124 | |||
125 | return false; | ||
126 | } | ||
127 | |||
128 | static int strlist__parse_list_entry(struct strlist *self, const char *s) | ||
129 | { | ||
130 | if (strncmp(s, "file://", 7) == 0) | ||
131 | return strlist__load(self, s + 7); | ||
132 | |||
133 | return strlist__add(self, s); | ||
134 | } | ||
135 | |||
136 | int strlist__parse_list(struct strlist *self, const char *s) | ||
137 | { | ||
138 | char *sep; | ||
139 | int err; | ||
140 | |||
141 | while ((sep = strchr(s, ',')) != NULL) { | ||
142 | *sep = '\0'; | ||
143 | err = strlist__parse_list_entry(self, s); | ||
144 | *sep = ','; | ||
145 | if (err != 0) | ||
146 | return err; | ||
147 | s = sep + 1; | ||
148 | } | ||
149 | |||
150 | return *s ? strlist__parse_list_entry(self, s) : 0; | ||
151 | } | ||
152 | |||
153 | struct strlist *strlist__new(bool dupstr, const char *slist) | ||
154 | { | ||
155 | struct strlist *self = malloc(sizeof(*self)); | ||
156 | |||
157 | if (self != NULL) { | ||
158 | self->entries = RB_ROOT; | ||
159 | self->dupstr = dupstr; | ||
160 | if (slist && strlist__parse_list(self, slist) != 0) | ||
161 | goto out_error; | ||
162 | } | ||
163 | |||
164 | return self; | ||
165 | out_error: | ||
166 | free(self); | ||
167 | return NULL; | ||
168 | } | ||
169 | |||
170 | void strlist__delete(struct strlist *self) | ||
171 | { | ||
172 | if (self != NULL) { | ||
173 | struct str_node *pos; | ||
174 | struct rb_node *next = rb_first(&self->entries); | ||
175 | |||
176 | while (next) { | ||
177 | pos = rb_entry(next, struct str_node, rb_node); | ||
178 | next = rb_next(&pos->rb_node); | ||
179 | strlist__remove(self, pos); | ||
180 | } | ||
181 | self->entries = RB_ROOT; | ||
182 | free(self); | ||
183 | } | ||
184 | } | ||
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h new file mode 100644 index 000000000000..2fb117fb4b67 --- /dev/null +++ b/tools/perf/util/strlist.h | |||
@@ -0,0 +1,32 @@ | |||
1 | #ifndef STRLIST_H_ | ||
2 | #define STRLIST_H_ | ||
3 | |||
4 | #include "rbtree.h" | ||
5 | #include <stdbool.h> | ||
6 | |||
7 | struct str_node { | ||
8 | struct rb_node rb_node; | ||
9 | const char *s; | ||
10 | }; | ||
11 | |||
12 | struct strlist { | ||
13 | struct rb_root entries; | ||
14 | bool dupstr; | ||
15 | }; | ||
16 | |||
17 | struct strlist *strlist__new(bool dupstr, const char *slist); | ||
18 | void strlist__delete(struct strlist *self); | ||
19 | |||
20 | void strlist__remove(struct strlist *self, struct str_node *sn); | ||
21 | int strlist__load(struct strlist *self, const char *filename); | ||
22 | int strlist__add(struct strlist *self, const char *str); | ||
23 | |||
24 | bool strlist__has_entry(struct strlist *self, const char *entry); | ||
25 | |||
26 | static inline bool strlist__empty(const struct strlist *self) | ||
27 | { | ||
28 | return rb_first(&self->entries) == NULL; | ||
29 | } | ||
30 | |||
31 | int strlist__parse_list(struct strlist *self, const char *s); | ||
32 | #endif /* STRLIST_H_ */ | ||
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 86e14375e74e..78c2efde01b7 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -520,7 +520,9 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, | |||
520 | nr_syms = shdr.sh_size / shdr.sh_entsize; | 520 | nr_syms = shdr.sh_size / shdr.sh_entsize; |
521 | 521 | ||
522 | memset(&sym, 0, sizeof(sym)); | 522 | memset(&sym, 0, sizeof(sym)); |
523 | 523 | self->prelinked = elf_section_by_name(elf, &ehdr, &shdr, | |
524 | ".gnu.prelink_undo", | ||
525 | NULL) != NULL; | ||
524 | elf_symtab__for_each_symbol(syms, nr_syms, index, sym) { | 526 | elf_symtab__for_each_symbol(syms, nr_syms, index, sym) { |
525 | struct symbol *f; | 527 | struct symbol *f; |
526 | u64 obj_start; | 528 | u64 obj_start; |
@@ -535,7 +537,13 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, | |||
535 | gelf_getshdr(sec, &shdr); | 537 | gelf_getshdr(sec, &shdr); |
536 | obj_start = sym.st_value; | 538 | obj_start = sym.st_value; |
537 | 539 | ||
538 | sym.st_value -= shdr.sh_addr - shdr.sh_offset; | 540 | if (self->prelinked) { |
541 | if (verbose >= 2) | ||
542 | printf("adjusting symbol: st_value: %Lx sh_addr: %Lx sh_offset: %Lx\n", | ||
543 | (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset); | ||
544 | |||
545 | sym.st_value -= shdr.sh_addr - shdr.sh_offset; | ||
546 | } | ||
539 | 547 | ||
540 | f = symbol__new(sym.st_value, sym.st_size, | 548 | f = symbol__new(sym.st_value, sym.st_size, |
541 | elf_sym__name(&sym, symstrs), | 549 | elf_sym__name(&sym, symstrs), |
@@ -569,6 +577,8 @@ int dso__load(struct dso *self, symbol_filter_t filter, int verbose) | |||
569 | if (!name) | 577 | if (!name) |
570 | return -1; | 578 | return -1; |
571 | 579 | ||
580 | self->prelinked = 0; | ||
581 | |||
572 | if (strncmp(self->name, "/tmp/perf-", 10) == 0) | 582 | if (strncmp(self->name, "/tmp/perf-", 10) == 0) |
573 | return dso__load_perf_map(self, filter, verbose); | 583 | return dso__load_perf_map(self, filter, verbose); |
574 | 584 | ||
@@ -629,7 +639,7 @@ int dso__load_kernel(struct dso *self, const char *vmlinux, | |||
629 | if (vmlinux) | 639 | if (vmlinux) |
630 | err = dso__load_vmlinux(self, vmlinux, filter, verbose); | 640 | err = dso__load_vmlinux(self, vmlinux, filter, verbose); |
631 | 641 | ||
632 | if (err) | 642 | if (err < 0) |
633 | err = dso__load_kallsyms(self, filter, verbose); | 643 | err = dso__load_kallsyms(self, filter, verbose); |
634 | 644 | ||
635 | return err; | 645 | return err; |
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index ea332e56e458..2c48ace8203b 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _PERF_SYMBOL_ 1 | 2 | #define _PERF_SYMBOL_ 1 |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include "../types.h" | 5 | #include "types.h" |
6 | #include "list.h" | 6 | #include "list.h" |
7 | #include "rbtree.h" | 7 | #include "rbtree.h" |
8 | 8 | ||
@@ -20,8 +20,9 @@ struct symbol { | |||
20 | struct dso { | 20 | struct dso { |
21 | struct list_head node; | 21 | struct list_head node; |
22 | struct rb_root syms; | 22 | struct rb_root syms; |
23 | unsigned int sym_priv_size; | ||
24 | struct symbol *(*find_symbol)(struct dso *, u64 ip); | 23 | struct symbol *(*find_symbol)(struct dso *, u64 ip); |
24 | unsigned int sym_priv_size; | ||
25 | unsigned char prelinked; | ||
25 | char name[0]; | 26 | char name[0]; |
26 | }; | 27 | }; |
27 | 28 | ||
diff --git a/tools/perf/types.h b/tools/perf/util/types.h index 5e75f9005940..5e75f9005940 100644 --- a/tools/perf/types.h +++ b/tools/perf/util/types.h | |||
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index b8cfed776d81..b4be6071c105 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h | |||
@@ -67,7 +67,6 @@ | |||
67 | #include <assert.h> | 67 | #include <assert.h> |
68 | #include <regex.h> | 68 | #include <regex.h> |
69 | #include <utime.h> | 69 | #include <utime.h> |
70 | #ifndef __MINGW32__ | ||
71 | #include <sys/wait.h> | 70 | #include <sys/wait.h> |
72 | #include <sys/poll.h> | 71 | #include <sys/poll.h> |
73 | #include <sys/socket.h> | 72 | #include <sys/socket.h> |
@@ -81,20 +80,6 @@ | |||
81 | #include <netdb.h> | 80 | #include <netdb.h> |
82 | #include <pwd.h> | 81 | #include <pwd.h> |
83 | #include <inttypes.h> | 82 | #include <inttypes.h> |
84 | #if defined(__CYGWIN__) | ||
85 | #undef _XOPEN_SOURCE | ||
86 | #include <grp.h> | ||
87 | #define _XOPEN_SOURCE 600 | ||
88 | #include "compat/cygwin.h" | ||
89 | #else | ||
90 | #undef _ALL_SOURCE /* AIX 5.3L defines a struct list with _ALL_SOURCE. */ | ||
91 | #include <grp.h> | ||
92 | #define _ALL_SOURCE 1 | ||
93 | #endif | ||
94 | #else /* __MINGW32__ */ | ||
95 | /* pull in Windows compatibility stuff */ | ||
96 | #include "compat/mingw.h" | ||
97 | #endif /* __MINGW32__ */ | ||
98 | 83 | ||
99 | #ifndef NO_ICONV | 84 | #ifndef NO_ICONV |
100 | #include <iconv.h> | 85 | #include <iconv.h> |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 764554350ed8..2884baf1d5f9 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -746,6 +746,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) | |||
746 | cpumask_clear(cpus); | 746 | cpumask_clear(cpus); |
747 | 747 | ||
748 | me = get_cpu(); | 748 | me = get_cpu(); |
749 | spin_lock(&kvm->requests_lock); | ||
749 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 750 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
750 | vcpu = kvm->vcpus[i]; | 751 | vcpu = kvm->vcpus[i]; |
751 | if (!vcpu) | 752 | if (!vcpu) |
@@ -762,6 +763,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) | |||
762 | smp_call_function_many(cpus, ack_flush, NULL, 1); | 763 | smp_call_function_many(cpus, ack_flush, NULL, 1); |
763 | else | 764 | else |
764 | called = false; | 765 | called = false; |
766 | spin_unlock(&kvm->requests_lock); | ||
765 | put_cpu(); | 767 | put_cpu(); |
766 | free_cpumask_var(cpus); | 768 | free_cpumask_var(cpus); |
767 | return called; | 769 | return called; |
@@ -982,6 +984,7 @@ static struct kvm *kvm_create_vm(void) | |||
982 | kvm->mm = current->mm; | 984 | kvm->mm = current->mm; |
983 | atomic_inc(&kvm->mm->mm_count); | 985 | atomic_inc(&kvm->mm->mm_count); |
984 | spin_lock_init(&kvm->mmu_lock); | 986 | spin_lock_init(&kvm->mmu_lock); |
987 | spin_lock_init(&kvm->requests_lock); | ||
985 | kvm_io_bus_init(&kvm->pio_bus); | 988 | kvm_io_bus_init(&kvm->pio_bus); |
986 | mutex_init(&kvm->lock); | 989 | mutex_init(&kvm->lock); |
987 | kvm_io_bus_init(&kvm->mmio_bus); | 990 | kvm_io_bus_init(&kvm->mmio_bus); |
@@ -1194,6 +1197,8 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
1194 | if (!new.dirty_bitmap) | 1197 | if (!new.dirty_bitmap) |
1195 | goto out_free; | 1198 | goto out_free; |
1196 | memset(new.dirty_bitmap, 0, dirty_bytes); | 1199 | memset(new.dirty_bitmap, 0, dirty_bytes); |
1200 | if (old.npages) | ||
1201 | kvm_arch_flush_shadow(kvm); | ||
1197 | } | 1202 | } |
1198 | #endif /* not defined CONFIG_S390 */ | 1203 | #endif /* not defined CONFIG_S390 */ |
1199 | 1204 | ||