diff options
596 files changed, 10459 insertions, 3823 deletions
diff --git a/Documentation/ABI/obsolete/proc-pid-oom_adj b/Documentation/ABI/obsolete/proc-pid-oom_adj new file mode 100644 index 000000000000..cf63f264ce0f --- /dev/null +++ b/Documentation/ABI/obsolete/proc-pid-oom_adj | |||
@@ -0,0 +1,22 @@ | |||
1 | What: /proc/<pid>/oom_adj | ||
2 | When: August 2012 | ||
3 | Why: /proc/<pid>/oom_adj allows userspace to influence the oom killer's | ||
4 | badness heuristic used to determine which task to kill when the kernel | ||
5 | is out of memory. | ||
6 | |||
7 | The badness heuristic has since been rewritten since the introduction of | ||
8 | this tunable such that its meaning is deprecated. The value was | ||
9 | implemented as a bitshift on a score generated by the badness() | ||
10 | function that did not have any precise units of measure. With the | ||
11 | rewrite, the score is given as a proportion of available memory to the | ||
12 | task allocating pages, so using a bitshift which grows the score | ||
13 | exponentially is, thus, impossible to tune with fine granularity. | ||
14 | |||
15 | A much more powerful interface, /proc/<pid>/oom_score_adj, was | ||
16 | introduced with the oom killer rewrite that allows users to increase or | ||
17 | decrease the badness() score linearly. This interface will replace | ||
18 | /proc/<pid>/oom_adj. | ||
19 | |||
20 | A warning will be emitted to the kernel log if an application uses this | ||
21 | deprecated interface. After it is printed once, future warnings will be | ||
22 | suppressed until the kernel is rebooted. | ||
diff --git a/Documentation/arm/OMAP/DSS b/Documentation/arm/OMAP/DSS index 0af0e9eed5d6..888ae7b83ae4 100644 --- a/Documentation/arm/OMAP/DSS +++ b/Documentation/arm/OMAP/DSS | |||
@@ -255,9 +255,10 @@ framebuffer parameters. | |||
255 | Kernel boot arguments | 255 | Kernel boot arguments |
256 | --------------------- | 256 | --------------------- |
257 | 257 | ||
258 | vram=<size> | 258 | vram=<size>[,<physaddr>] |
259 | - Amount of total VRAM to preallocate. For example, "10M". omapfb | 259 | - Amount of total VRAM to preallocate and optionally a physical start |
260 | allocates memory for framebuffers from VRAM. | 260 | memory address. For example, "10M". omapfb allocates memory for |
261 | framebuffers from VRAM. | ||
261 | 262 | ||
262 | omapfb.mode=<display>:<mode>[,...] | 263 | omapfb.mode=<display>:<mode>[,...] |
263 | - Default video mode for specified displays. For example, | 264 | - Default video mode for specified displays. For example, |
diff --git a/Documentation/block/switching-sched.txt b/Documentation/block/switching-sched.txt index d5af3f630814..71cfbdc0f74d 100644 --- a/Documentation/block/switching-sched.txt +++ b/Documentation/block/switching-sched.txt | |||
@@ -16,7 +16,7 @@ you can do so by typing: | |||
16 | As of the Linux 2.6.10 kernel, it is now possible to change the | 16 | As of the Linux 2.6.10 kernel, it is now possible to change the |
17 | IO scheduler for a given block device on the fly (thus making it possible, | 17 | IO scheduler for a given block device on the fly (thus making it possible, |
18 | for instance, to set the CFQ scheduler for the system default, but | 18 | for instance, to set the CFQ scheduler for the system default, but |
19 | set a specific device to use the anticipatory or noop schedulers - which | 19 | set a specific device to use the deadline or noop schedulers - which |
20 | can improve that device's throughput). | 20 | can improve that device's throughput). |
21 | 21 | ||
22 | To set a specific scheduler, simply do this: | 22 | To set a specific scheduler, simply do this: |
@@ -31,7 +31,7 @@ a "cat /sys/block/DEV/queue/scheduler" - the list of valid names | |||
31 | will be displayed, with the currently selected scheduler in brackets: | 31 | will be displayed, with the currently selected scheduler in brackets: |
32 | 32 | ||
33 | # cat /sys/block/hda/queue/scheduler | 33 | # cat /sys/block/hda/queue/scheduler |
34 | noop anticipatory deadline [cfq] | 34 | noop deadline [cfq] |
35 | # echo anticipatory > /sys/block/hda/queue/scheduler | 35 | # echo deadline > /sys/block/hda/queue/scheduler |
36 | # cat /sys/block/hda/queue/scheduler | 36 | # cat /sys/block/hda/queue/scheduler |
37 | noop [anticipatory] deadline cfq | 37 | noop [deadline] cfq |
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index d8f36f984faa..6c2f55e05f13 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -554,3 +554,13 @@ Why: This is a legacy interface which have been replaced by a more | |||
554 | Who: NeilBrown <neilb@suse.de> | 554 | Who: NeilBrown <neilb@suse.de> |
555 | 555 | ||
556 | ---------------------------- | 556 | ---------------------------- |
557 | |||
558 | What: i2c_adapter.id | ||
559 | When: June 2011 | ||
560 | Why: This field is deprecated. I2C device drivers shouldn't change their | ||
561 | behavior based on the underlying I2C adapter. Instead, the I2C | ||
562 | adapter driver should instantiate the I2C devices and provide the | ||
563 | needed platform-specific information. | ||
564 | Who: Jean Delvare <khali@linux-fr.org> | ||
565 | |||
566 | ---------------------------- | ||
diff --git a/Documentation/filesystems/xfs-delayed-logging-design.txt b/Documentation/filesystems/xfs-delayed-logging-design.txt index 96d0df28bed3..7445bf335dae 100644 --- a/Documentation/filesystems/xfs-delayed-logging-design.txt +++ b/Documentation/filesystems/xfs-delayed-logging-design.txt | |||
@@ -794,17 +794,6 @@ designed. | |||
794 | 794 | ||
795 | Roadmap: | 795 | Roadmap: |
796 | 796 | ||
797 | 2.6.37 Remove experimental tag from mount option | ||
798 | => should be roughly 6 months after initial merge | ||
799 | => enough time to: | ||
800 | => gain confidence and fix problems reported by early | ||
801 | adopters (a.k.a. guinea pigs) | ||
802 | => address worst performance regressions and undesired | ||
803 | behaviours | ||
804 | => start tuning/optimising code for parallelism | ||
805 | => start tuning/optimising algorithms consuming | ||
806 | excessive CPU time | ||
807 | |||
808 | 2.6.39 Switch default mount option to use delayed logging | 797 | 2.6.39 Switch default mount option to use delayed logging |
809 | => should be roughly 12 months after initial merge | 798 | => should be roughly 12 months after initial merge |
810 | => enough time to shake out remaining problems before next round of | 799 | => enough time to shake out remaining problems before next round of |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index ed45e9802aa8..92e83e53148f 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -706,7 +706,7 @@ and is between 256 and 4096 characters. It is defined in the file | |||
706 | arch/x86/kernel/cpu/cpufreq/elanfreq.c. | 706 | arch/x86/kernel/cpu/cpufreq/elanfreq.c. |
707 | 707 | ||
708 | elevator= [IOSCHED] | 708 | elevator= [IOSCHED] |
709 | Format: {"anticipatory" | "cfq" | "deadline" | "noop"} | 709 | Format: {"cfq" | "deadline" | "noop"} |
710 | See Documentation/block/as-iosched.txt and | 710 | See Documentation/block/as-iosched.txt and |
711 | Documentation/block/deadline-iosched.txt for details. | 711 | Documentation/block/deadline-iosched.txt for details. |
712 | 712 | ||
diff --git a/Documentation/leds-class.txt b/Documentation/leds-class.txt index 8fd5ca2ae32d..58b266bd1846 100644 --- a/Documentation/leds-class.txt +++ b/Documentation/leds-class.txt | |||
@@ -60,15 +60,18 @@ Hardware accelerated blink of LEDs | |||
60 | 60 | ||
61 | Some LEDs can be programmed to blink without any CPU interaction. To | 61 | Some LEDs can be programmed to blink without any CPU interaction. To |
62 | support this feature, a LED driver can optionally implement the | 62 | support this feature, a LED driver can optionally implement the |
63 | blink_set() function (see <linux/leds.h>). If implemented, triggers can | 63 | blink_set() function (see <linux/leds.h>). To set an LED to blinking, |
64 | attempt to use it before falling back to software timers. The blink_set() | 64 | however, it is better to use use the API function led_blink_set(), |
65 | function should return 0 if the blink setting is supported, or -EINVAL | 65 | as it will check and implement software fallback if necessary. |
66 | otherwise, which means that LED blinking will be handled by software. | 66 | |
67 | 67 | To turn off blinking again, use the API function led_brightness_set() | |
68 | The blink_set() function should choose a user friendly blinking | 68 | as that will not just set the LED brightness but also stop any software |
69 | value if it is called with *delay_on==0 && *delay_off==0 parameters. In | 69 | timers that may have been required for blinking. |
70 | this case the driver should give back the chosen value through delay_on | 70 | |
71 | and delay_off parameters to the leds subsystem. | 71 | The blink_set() function should choose a user friendly blinking value |
72 | if it is called with *delay_on==0 && *delay_off==0 parameters. In this | ||
73 | case the driver should give back the chosen value through delay_on and | ||
74 | delay_off parameters to the leds subsystem. | ||
72 | 75 | ||
73 | Setting the brightness to zero with brightness_set() callback function | 76 | Setting the brightness to zero with brightness_set() callback function |
74 | should completely turn off the LED and cancel the previously programmed | 77 | should completely turn off the LED and cancel the previously programmed |
diff --git a/Documentation/leds/leds-lp5521.txt b/Documentation/leds/leds-lp5521.txt new file mode 100644 index 000000000000..c4d8d151e0fe --- /dev/null +++ b/Documentation/leds/leds-lp5521.txt | |||
@@ -0,0 +1,88 @@ | |||
1 | Kernel driver for lp5521 | ||
2 | ======================== | ||
3 | |||
4 | * National Semiconductor LP5521 led driver chip | ||
5 | * Datasheet: http://www.national.com/pf/LP/LP5521.html | ||
6 | |||
7 | Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo | ||
8 | Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com) | ||
9 | |||
10 | Description | ||
11 | ----------- | ||
12 | |||
13 | LP5521 can drive up to 3 channels. Leds can be controlled directly via | ||
14 | the led class control interface. Channels have generic names: | ||
15 | lp5521:channelx, where x is 0 .. 2 | ||
16 | |||
17 | All three channels can be also controlled using the engine micro programs. | ||
18 | More details of the instructions can be found from the public data sheet. | ||
19 | |||
20 | Control interface for the engines: | ||
21 | x is 1 .. 3 | ||
22 | enginex_mode : disabled, load, run | ||
23 | enginex_load : store program (visible only in engine load mode) | ||
24 | |||
25 | Example (start to blink the channel 2 led): | ||
26 | cd /sys/class/leds/lp5521:channel2/device | ||
27 | echo "load" > engine3_mode | ||
28 | echo "037f4d0003ff6000" > engine3_load | ||
29 | echo "run" > engine3_mode | ||
30 | |||
31 | stop the engine: | ||
32 | echo "disabled" > engine3_mode | ||
33 | |||
34 | sysfs contains a selftest entry. | ||
35 | The test communicates with the chip and checks that | ||
36 | the clock mode is automatically set to the requested one. | ||
37 | |||
38 | Each channel has its own led current settings. | ||
39 | /sys/class/leds/lp5521:channel0/led_current - RW | ||
40 | /sys/class/leds/lp5521:channel0/max_current - RO | ||
41 | Format: 10x mA i.e 10 means 1.0 mA | ||
42 | |||
43 | example platform data: | ||
44 | |||
45 | Note: chan_nr can have values between 0 and 2. | ||
46 | |||
47 | static struct lp5521_led_config lp5521_led_config[] = { | ||
48 | { | ||
49 | .chan_nr = 0, | ||
50 | .led_current = 50, | ||
51 | .max_current = 130, | ||
52 | }, { | ||
53 | .chan_nr = 1, | ||
54 | .led_current = 0, | ||
55 | .max_current = 130, | ||
56 | }, { | ||
57 | .chan_nr = 2, | ||
58 | .led_current = 0, | ||
59 | .max_current = 130, | ||
60 | } | ||
61 | }; | ||
62 | |||
63 | static int lp5521_setup(void) | ||
64 | { | ||
65 | /* setup HW resources */ | ||
66 | } | ||
67 | |||
68 | static void lp5521_release(void) | ||
69 | { | ||
70 | /* Release HW resources */ | ||
71 | } | ||
72 | |||
73 | static void lp5521_enable(bool state) | ||
74 | { | ||
75 | /* Control of chip enable signal */ | ||
76 | } | ||
77 | |||
78 | static struct lp5521_platform_data lp5521_platform_data = { | ||
79 | .led_config = lp5521_led_config, | ||
80 | .num_channels = ARRAY_SIZE(lp5521_led_config), | ||
81 | .clock_mode = LP5521_CLOCK_EXT, | ||
82 | .setup_resources = lp5521_setup, | ||
83 | .release_resources = lp5521_release, | ||
84 | .enable = lp5521_enable, | ||
85 | }; | ||
86 | |||
87 | If the current is set to 0 in the platform data, that channel is | ||
88 | disabled and it is not visible in the sysfs. | ||
diff --git a/Documentation/leds/leds-lp5523.txt b/Documentation/leds/leds-lp5523.txt new file mode 100644 index 000000000000..fad2feb8b7ce --- /dev/null +++ b/Documentation/leds/leds-lp5523.txt | |||
@@ -0,0 +1,83 @@ | |||
1 | Kernel driver for lp5523 | ||
2 | ======================== | ||
3 | |||
4 | * National Semiconductor LP5523 led driver chip | ||
5 | * Datasheet: http://www.national.com/pf/LP/LP5523.html | ||
6 | |||
7 | Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo | ||
8 | Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com) | ||
9 | |||
10 | Description | ||
11 | ----------- | ||
12 | LP5523 can drive up to 9 channels. Leds can be controlled directly via | ||
13 | the led class control interface. Channels have generic names: | ||
14 | lp5523:channelx where x is 0...8 | ||
15 | |||
16 | The chip provides 3 engines. Each engine can control channels without | ||
17 | interaction from the main CPU. Details of the micro engine code can be found | ||
18 | from the public data sheet. Leds can be muxed to different channels. | ||
19 | |||
20 | Control interface for the engines: | ||
21 | x is 1 .. 3 | ||
22 | enginex_mode : disabled, load, run | ||
23 | enginex_load : microcode load (visible only in load mode) | ||
24 | enginex_leds : led mux control (visible only in load mode) | ||
25 | |||
26 | cd /sys/class/leds/lp5523:channel2/device | ||
27 | echo "load" > engine3_mode | ||
28 | echo "9d80400004ff05ff437f0000" > engine3_load | ||
29 | echo "111111111" > engine3_leds | ||
30 | echo "run" > engine3_mode | ||
31 | |||
32 | sysfs contains a selftest entry. It measures each channel | ||
33 | voltage level and checks if it looks reasonable. If the level is too high, | ||
34 | the led is missing; if the level is too low, there is a short circuit. | ||
35 | |||
36 | Selftest uses always the current from the platform data. | ||
37 | |||
38 | Each channel contains led current settings. | ||
39 | /sys/class/leds/lp5523:channel2/led_current - RW | ||
40 | /sys/class/leds/lp5523:channel2/max_current - RO | ||
41 | Format: 10x mA i.e 10 means 1.0 mA | ||
42 | |||
43 | Example platform data: | ||
44 | |||
45 | Note - chan_nr can have values between 0 and 8. | ||
46 | |||
47 | static struct lp5523_led_config lp5523_led_config[] = { | ||
48 | { | ||
49 | .chan_nr = 0, | ||
50 | .led_current = 50, | ||
51 | .max_current = 130, | ||
52 | }, | ||
53 | ... | ||
54 | }, { | ||
55 | .chan_nr = 8, | ||
56 | .led_current = 50, | ||
57 | .max_current = 130, | ||
58 | } | ||
59 | }; | ||
60 | |||
61 | static int lp5523_setup(void) | ||
62 | { | ||
63 | /* Setup HW resources */ | ||
64 | } | ||
65 | |||
66 | static void lp5523_release(void) | ||
67 | { | ||
68 | /* Release HW resources */ | ||
69 | } | ||
70 | |||
71 | static void lp5523_enable(bool state) | ||
72 | { | ||
73 | /* Control chip enable signal */ | ||
74 | } | ||
75 | |||
76 | static struct lp5523_platform_data lp5523_platform_data = { | ||
77 | .led_config = lp5523_led_config, | ||
78 | .num_channels = ARRAY_SIZE(lp5523_led_config), | ||
79 | .clock_mode = LP5523_CLOCK_EXT, | ||
80 | .setup_resources = lp5523_setup, | ||
81 | .release_resources = lp5523_release, | ||
82 | .enable = lp5523_enable, | ||
83 | }; | ||
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index c7165f4cb792..fe95105992c5 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt | |||
@@ -20,6 +20,15 @@ ip_no_pmtu_disc - BOOLEAN | |||
20 | min_pmtu - INTEGER | 20 | min_pmtu - INTEGER |
21 | default 562 - minimum discovered Path MTU | 21 | default 562 - minimum discovered Path MTU |
22 | 22 | ||
23 | route/max_size - INTEGER | ||
24 | Maximum number of routes allowed in the kernel. Increase | ||
25 | this when using large numbers of interfaces and/or routes. | ||
26 | |||
27 | neigh/default/gc_thresh3 - INTEGER | ||
28 | Maximum number of neighbor entries allowed. Increase this | ||
29 | when using large numbers of interfaces and when communicating | ||
30 | with large numbers of directly-connected peers. | ||
31 | |||
23 | mtu_expires - INTEGER | 32 | mtu_expires - INTEGER |
24 | Time, in seconds, that cached PMTU information is kept. | 33 | Time, in seconds, that cached PMTU information is kept. |
25 | 34 | ||
diff --git a/Documentation/rbtree.txt b/Documentation/rbtree.txt index 221f38be98f4..19f8278c3854 100644 --- a/Documentation/rbtree.txt +++ b/Documentation/rbtree.txt | |||
@@ -21,8 +21,8 @@ three rotations, respectively, to balance the tree), with slightly slower | |||
21 | To quote Linux Weekly News: | 21 | To quote Linux Weekly News: |
22 | 22 | ||
23 | There are a number of red-black trees in use in the kernel. | 23 | There are a number of red-black trees in use in the kernel. |
24 | The anticipatory, deadline, and CFQ I/O schedulers all employ | 24 | The deadline and CFQ I/O schedulers employ rbtrees to |
25 | rbtrees to track requests; the packet CD/DVD driver does the same. | 25 | track requests; the packet CD/DVD driver does the same. |
26 | The high-resolution timer code uses an rbtree to organize outstanding | 26 | The high-resolution timer code uses an rbtree to organize outstanding |
27 | timer requests. The ext3 filesystem tracks directory entries in a | 27 | timer requests. The ext3 filesystem tracks directory entries in a |
28 | red-black tree. Virtual memory areas (VMAs) are tracked with red-black | 28 | red-black tree. Virtual memory areas (VMAs) are tracked with red-black |
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 3894eaa23486..209e1584c3dc 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt | |||
@@ -28,6 +28,7 @@ show up in /proc/sys/kernel: | |||
28 | - core_uses_pid | 28 | - core_uses_pid |
29 | - ctrl-alt-del | 29 | - ctrl-alt-del |
30 | - dentry-state | 30 | - dentry-state |
31 | - dmesg_restrict | ||
31 | - domainname | 32 | - domainname |
32 | - hostname | 33 | - hostname |
33 | - hotplug | 34 | - hotplug |
@@ -213,6 +214,19 @@ to decide what to do with it. | |||
213 | 214 | ||
214 | ============================================================== | 215 | ============================================================== |
215 | 216 | ||
217 | dmesg_restrict: | ||
218 | |||
219 | This toggle indicates whether unprivileged users are prevented from using | ||
220 | dmesg(8) to view messages from the kernel's log buffer. When | ||
221 | dmesg_restrict is set to (0) there are no restrictions. When | ||
222 | dmesg_restrict is set set to (1), users must have CAP_SYS_ADMIN to use | ||
223 | dmesg(8). | ||
224 | |||
225 | The kernel config option CONFIG_SECURITY_DMESG_RESTRICT sets the default | ||
226 | value of dmesg_restrict. | ||
227 | |||
228 | ============================================================== | ||
229 | |||
216 | domainname & hostname: | 230 | domainname & hostname: |
217 | 231 | ||
218 | These files can be used to set the NIS/YP domainname and the | 232 | These files can be used to set the NIS/YP domainname and the |
diff --git a/MAINTAINERS b/MAINTAINERS index 0094224ca79b..8e6548dbd5db 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -161,7 +161,7 @@ M: Greg Kroah-Hartman <gregkh@suse.de> | |||
161 | L: linux-serial@vger.kernel.org | 161 | L: linux-serial@vger.kernel.org |
162 | W: http://serial.sourceforge.net | 162 | W: http://serial.sourceforge.net |
163 | S: Maintained | 163 | S: Maintained |
164 | T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ | 164 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git |
165 | F: drivers/serial/8250* | 165 | F: drivers/serial/8250* |
166 | F: include/linux/serial_8250.h | 166 | F: include/linux/serial_8250.h |
167 | 167 | ||
@@ -945,7 +945,7 @@ M: Magnus Damm <magnus.damm@gmail.com> | |||
945 | L: linux-sh@vger.kernel.org | 945 | L: linux-sh@vger.kernel.org |
946 | W: http://oss.renesas.com | 946 | W: http://oss.renesas.com |
947 | Q: http://patchwork.kernel.org/project/linux-sh/list/ | 947 | Q: http://patchwork.kernel.org/project/linux-sh/list/ |
948 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/genesis-2.6.git | 948 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git rmobile-latest |
949 | S: Supported | 949 | S: Supported |
950 | F: arch/arm/mach-shmobile/ | 950 | F: arch/arm/mach-shmobile/ |
951 | F: drivers/sh/ | 951 | F: drivers/sh/ |
@@ -2435,6 +2435,7 @@ F: drivers/net/wan/sdla.c | |||
2435 | FRAMEBUFFER LAYER | 2435 | FRAMEBUFFER LAYER |
2436 | L: linux-fbdev@vger.kernel.org | 2436 | L: linux-fbdev@vger.kernel.org |
2437 | W: http://linux-fbdev.sourceforge.net/ | 2437 | W: http://linux-fbdev.sourceforge.net/ |
2438 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/fbdev-2.6.git | ||
2438 | S: Orphan | 2439 | S: Orphan |
2439 | F: Documentation/fb/ | 2440 | F: Documentation/fb/ |
2440 | F: drivers/video/fb* | 2441 | F: drivers/video/fb* |
@@ -5676,7 +5677,7 @@ S: Maintained | |||
5676 | 5677 | ||
5677 | STAGING SUBSYSTEM | 5678 | STAGING SUBSYSTEM |
5678 | M: Greg Kroah-Hartman <gregkh@suse.de> | 5679 | M: Greg Kroah-Hartman <gregkh@suse.de> |
5679 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-next-2.6.git | 5680 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6.git |
5680 | L: devel@driverdev.osuosl.org | 5681 | L: devel@driverdev.osuosl.org |
5681 | S: Maintained | 5682 | S: Maintained |
5682 | F: drivers/staging/ | 5683 | F: drivers/staging/ |
@@ -5705,7 +5706,7 @@ M: Paul Mundt <lethal@linux-sh.org> | |||
5705 | L: linux-sh@vger.kernel.org | 5706 | L: linux-sh@vger.kernel.org |
5706 | W: http://www.linux-sh.org | 5707 | W: http://www.linux-sh.org |
5707 | Q: http://patchwork.kernel.org/project/linux-sh/list/ | 5708 | Q: http://patchwork.kernel.org/project/linux-sh/list/ |
5708 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git | 5709 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git sh-latest |
5709 | S: Supported | 5710 | S: Supported |
5710 | F: Documentation/sh/ | 5711 | F: Documentation/sh/ |
5711 | F: arch/sh/ | 5712 | F: arch/sh/ |
@@ -5910,7 +5911,7 @@ S: Maintained | |||
5910 | TTY LAYER | 5911 | TTY LAYER |
5911 | M: Greg Kroah-Hartman <gregkh@suse.de> | 5912 | M: Greg Kroah-Hartman <gregkh@suse.de> |
5912 | S: Maintained | 5913 | S: Maintained |
5913 | T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ | 5914 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git |
5914 | F: drivers/char/tty_* | 5915 | F: drivers/char/tty_* |
5915 | F: drivers/serial/serial_core.c | 5916 | F: drivers/serial/serial_core.c |
5916 | F: include/linux/serial_core.h | 5917 | F: include/linux/serial_core.h |
@@ -6233,7 +6234,7 @@ USB SUBSYSTEM | |||
6233 | M: Greg Kroah-Hartman <gregkh@suse.de> | 6234 | M: Greg Kroah-Hartman <gregkh@suse.de> |
6234 | L: linux-usb@vger.kernel.org | 6235 | L: linux-usb@vger.kernel.org |
6235 | W: http://www.linux-usb.org | 6236 | W: http://www.linux-usb.org |
6236 | T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ | 6237 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git |
6237 | S: Supported | 6238 | S: Supported |
6238 | F: Documentation/usb/ | 6239 | F: Documentation/usb/ |
6239 | F: drivers/net/usb/ | 6240 | F: drivers/net/usb/ |
@@ -6598,14 +6599,14 @@ F: drivers/platform/x86 | |||
6598 | 6599 | ||
6599 | XEN PCI SUBSYSTEM | 6600 | XEN PCI SUBSYSTEM |
6600 | M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 6601 | M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
6601 | L: xen-devel@lists.xensource.com | 6602 | L: xen-devel@lists.xensource.com (moderated for non-subscribers) |
6602 | S: Supported | 6603 | S: Supported |
6603 | F: arch/x86/pci/*xen* | 6604 | F: arch/x86/pci/*xen* |
6604 | F: drivers/pci/*xen* | 6605 | F: drivers/pci/*xen* |
6605 | 6606 | ||
6606 | XEN SWIOTLB SUBSYSTEM | 6607 | XEN SWIOTLB SUBSYSTEM |
6607 | M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 6608 | M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
6608 | L: xen-devel@lists.xensource.com | 6609 | L: xen-devel@lists.xensource.com (moderated for non-subscribers) |
6609 | S: Supported | 6610 | S: Supported |
6610 | F: arch/x86/xen/*swiotlb* | 6611 | F: arch/x86/xen/*swiotlb* |
6611 | F: drivers/xen/*swiotlb* | 6612 | F: drivers/xen/*swiotlb* |
@@ -6613,7 +6614,7 @@ F: drivers/xen/*swiotlb* | |||
6613 | XEN HYPERVISOR INTERFACE | 6614 | XEN HYPERVISOR INTERFACE |
6614 | M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 6615 | M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> |
6615 | M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 6616 | M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
6616 | L: xen-devel@lists.xen.org | 6617 | L: xen-devel@lists.xensource.com (moderated for non-subscribers) |
6617 | L: virtualization@lists.osdl.org | 6618 | L: virtualization@lists.osdl.org |
6618 | S: Supported | 6619 | S: Supported |
6619 | F: arch/x86/xen/ | 6620 | F: arch/x86/xen/ |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 37 | 3 | SUBLEVEL = 37 |
4 | EXTRAVERSION = -rc1 | 4 | EXTRAVERSION = -rc2 |
5 | NAME = Flesh-Eating Bats with Fangs | 5 | NAME = Flesh-Eating Bats with Fangs |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index a19a5266d5fc..db524e75c4a2 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -6,7 +6,7 @@ config ARM | |||
6 | select HAVE_MEMBLOCK | 6 | select HAVE_MEMBLOCK |
7 | select RTC_LIB | 7 | select RTC_LIB |
8 | select SYS_SUPPORTS_APM_EMULATION | 8 | select SYS_SUPPORTS_APM_EMULATION |
9 | select GENERIC_ATOMIC64 if (!CPU_32v6K) | 9 | select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI) |
10 | select HAVE_OPROFILE if (HAVE_PERF_EVENTS) | 10 | select HAVE_OPROFILE if (HAVE_PERF_EVENTS) |
11 | select HAVE_ARCH_KGDB | 11 | select HAVE_ARCH_KGDB |
12 | select HAVE_KPROBES if (!XIP_KERNEL) | 12 | select HAVE_KPROBES if (!XIP_KERNEL) |
@@ -646,7 +646,7 @@ config ARCH_S3C2410 | |||
646 | select ARCH_HAS_CPUFREQ | 646 | select ARCH_HAS_CPUFREQ |
647 | select HAVE_CLK | 647 | select HAVE_CLK |
648 | select ARCH_USES_GETTIMEOFFSET | 648 | select ARCH_USES_GETTIMEOFFSET |
649 | select HAVE_S3C2410_I2C | 649 | select HAVE_S3C2410_I2C if I2C |
650 | help | 650 | help |
651 | Samsung S3C2410X CPU based systems, such as the Simtec Electronics | 651 | Samsung S3C2410X CPU based systems, such as the Simtec Electronics |
652 | BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or | 652 | BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or |
@@ -676,8 +676,8 @@ config ARCH_S3C64XX | |||
676 | select S3C_DEV_NAND | 676 | select S3C_DEV_NAND |
677 | select USB_ARCH_HAS_OHCI | 677 | select USB_ARCH_HAS_OHCI |
678 | select SAMSUNG_GPIOLIB_4BIT | 678 | select SAMSUNG_GPIOLIB_4BIT |
679 | select HAVE_S3C2410_I2C | 679 | select HAVE_S3C2410_I2C if I2C |
680 | select HAVE_S3C2410_WATCHDOG | 680 | select HAVE_S3C2410_WATCHDOG if WATCHDOG |
681 | help | 681 | help |
682 | Samsung S3C64XX series based systems | 682 | Samsung S3C64XX series based systems |
683 | 683 | ||
@@ -686,10 +686,10 @@ config ARCH_S5P64X0 | |||
686 | select CPU_V6 | 686 | select CPU_V6 |
687 | select GENERIC_GPIO | 687 | select GENERIC_GPIO |
688 | select HAVE_CLK | 688 | select HAVE_CLK |
689 | select HAVE_S3C2410_WATCHDOG | 689 | select HAVE_S3C2410_WATCHDOG if WATCHDOG |
690 | select ARCH_USES_GETTIMEOFFSET | 690 | select ARCH_USES_GETTIMEOFFSET |
691 | select HAVE_S3C2410_I2C | 691 | select HAVE_S3C2410_I2C if I2C |
692 | select HAVE_S3C_RTC | 692 | select HAVE_S3C_RTC if RTC_CLASS |
693 | help | 693 | help |
694 | Samsung S5P64X0 CPU based systems, such as the Samsung SMDK6440, | 694 | Samsung S5P64X0 CPU based systems, such as the Samsung SMDK6440, |
695 | SMDK6450. | 695 | SMDK6450. |
@@ -700,7 +700,7 @@ config ARCH_S5P6442 | |||
700 | select GENERIC_GPIO | 700 | select GENERIC_GPIO |
701 | select HAVE_CLK | 701 | select HAVE_CLK |
702 | select ARCH_USES_GETTIMEOFFSET | 702 | select ARCH_USES_GETTIMEOFFSET |
703 | select HAVE_S3C2410_WATCHDOG | 703 | select HAVE_S3C2410_WATCHDOG if WATCHDOG |
704 | help | 704 | help |
705 | Samsung S5P6442 CPU based systems | 705 | Samsung S5P6442 CPU based systems |
706 | 706 | ||
@@ -711,9 +711,9 @@ config ARCH_S5PC100 | |||
711 | select CPU_V7 | 711 | select CPU_V7 |
712 | select ARM_L1_CACHE_SHIFT_6 | 712 | select ARM_L1_CACHE_SHIFT_6 |
713 | select ARCH_USES_GETTIMEOFFSET | 713 | select ARCH_USES_GETTIMEOFFSET |
714 | select HAVE_S3C2410_I2C | 714 | select HAVE_S3C2410_I2C if I2C |
715 | select HAVE_S3C_RTC | 715 | select HAVE_S3C_RTC if RTC_CLASS |
716 | select HAVE_S3C2410_WATCHDOG | 716 | select HAVE_S3C2410_WATCHDOG if WATCHDOG |
717 | help | 717 | help |
718 | Samsung S5PC100 series based systems | 718 | Samsung S5PC100 series based systems |
719 | 719 | ||
@@ -726,9 +726,9 @@ config ARCH_S5PV210 | |||
726 | select ARM_L1_CACHE_SHIFT_6 | 726 | select ARM_L1_CACHE_SHIFT_6 |
727 | select ARCH_HAS_CPUFREQ | 727 | select ARCH_HAS_CPUFREQ |
728 | select ARCH_USES_GETTIMEOFFSET | 728 | select ARCH_USES_GETTIMEOFFSET |
729 | select HAVE_S3C2410_I2C | 729 | select HAVE_S3C2410_I2C if I2C |
730 | select HAVE_S3C_RTC | 730 | select HAVE_S3C_RTC if RTC_CLASS |
731 | select HAVE_S3C2410_WATCHDOG | 731 | select HAVE_S3C2410_WATCHDOG if WATCHDOG |
732 | help | 732 | help |
733 | Samsung S5PV210/S5PC110 series based systems | 733 | Samsung S5PV210/S5PC110 series based systems |
734 | 734 | ||
@@ -739,9 +739,9 @@ config ARCH_S5PV310 | |||
739 | select GENERIC_GPIO | 739 | select GENERIC_GPIO |
740 | select HAVE_CLK | 740 | select HAVE_CLK |
741 | select GENERIC_CLOCKEVENTS | 741 | select GENERIC_CLOCKEVENTS |
742 | select HAVE_S3C_RTC | 742 | select HAVE_S3C_RTC if RTC_CLASS |
743 | select HAVE_S3C2410_I2C | 743 | select HAVE_S3C2410_I2C if I2C |
744 | select HAVE_S3C2410_WATCHDOG | 744 | select HAVE_S3C2410_WATCHDOG if WATCHDOG |
745 | help | 745 | help |
746 | Samsung S5PV310 series based systems | 746 | Samsung S5PV310 series based systems |
747 | 747 | ||
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index ada6359160eb..772f95f1aecd 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c | |||
@@ -251,15 +251,16 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base, | |||
251 | writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); | 251 | writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); |
252 | 252 | ||
253 | /* | 253 | /* |
254 | * Set priority on all interrupts. | 254 | * Set priority on all global interrupts. |
255 | */ | 255 | */ |
256 | for (i = 0; i < max_irq; i += 4) | 256 | for (i = 32; i < max_irq; i += 4) |
257 | writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); | 257 | writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); |
258 | 258 | ||
259 | /* | 259 | /* |
260 | * Disable all interrupts. | 260 | * Disable all interrupts. Leave the PPI and SGIs alone |
261 | * as these enables are banked registers. | ||
261 | */ | 262 | */ |
262 | for (i = 0; i < max_irq; i += 32) | 263 | for (i = 32; i < max_irq; i += 32) |
263 | writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); | 264 | writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); |
264 | 265 | ||
265 | /* | 266 | /* |
@@ -277,11 +278,30 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base, | |||
277 | 278 | ||
278 | void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base) | 279 | void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base) |
279 | { | 280 | { |
281 | void __iomem *dist_base; | ||
282 | int i; | ||
283 | |||
280 | if (gic_nr >= MAX_GIC_NR) | 284 | if (gic_nr >= MAX_GIC_NR) |
281 | BUG(); | 285 | BUG(); |
282 | 286 | ||
287 | dist_base = gic_data[gic_nr].dist_base; | ||
288 | BUG_ON(!dist_base); | ||
289 | |||
283 | gic_data[gic_nr].cpu_base = base; | 290 | gic_data[gic_nr].cpu_base = base; |
284 | 291 | ||
292 | /* | ||
293 | * Deal with the banked PPI and SGI interrupts - disable all | ||
294 | * PPI interrupts, ensure all SGI interrupts are enabled. | ||
295 | */ | ||
296 | writel(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR); | ||
297 | writel(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET); | ||
298 | |||
299 | /* | ||
300 | * Set priority on PPI and SGI interrupts | ||
301 | */ | ||
302 | for (i = 0; i < 32; i += 4) | ||
303 | writel(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4); | ||
304 | |||
285 | writel(0xf0, base + GIC_CPU_PRIMASK); | 305 | writel(0xf0, base + GIC_CPU_PRIMASK); |
286 | writel(1, base + GIC_CPU_CTRL); | 306 | writel(1, base + GIC_CPU_CTRL); |
287 | } | 307 | } |
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h index 6700c7fc7ebd..21fa272301f8 100644 --- a/arch/arm/include/asm/hardware/it8152.h +++ b/arch/arm/include/asm/hardware/it8152.h | |||
@@ -75,7 +75,7 @@ extern unsigned long it8152_base_address; | |||
75 | IT8152_PD_IRQ(1) USB (USBR) | 75 | IT8152_PD_IRQ(1) USB (USBR) |
76 | IT8152_PD_IRQ(0) Audio controller (ACR) | 76 | IT8152_PD_IRQ(0) Audio controller (ACR) |
77 | */ | 77 | */ |
78 | #define IT8152_IRQ(x) (IRQ_BOARD_END + (x)) | 78 | #define IT8152_IRQ(x) (IRQ_BOARD_START + (x)) |
79 | 79 | ||
80 | /* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */ | 80 | /* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */ |
81 | #define IT8152_LD_IRQ_COUNT 9 | 81 | #define IT8152_LD_IRQ_COUNT 9 |
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 54593b0c241b..21e3a4ab3b8c 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
@@ -748,8 +748,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, | |||
748 | breakpoint_handler(addr, regs); | 748 | breakpoint_handler(addr, regs); |
749 | break; | 749 | break; |
750 | case ARM_ENTRY_ASYNC_WATCHPOINT: | 750 | case ARM_ENTRY_ASYNC_WATCHPOINT: |
751 | WARN_ON("Asynchronous watchpoint exception taken. " | 751 | WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); |
752 | "Debugging results may be unreliable"); | ||
753 | case ARM_ENTRY_SYNC_WATCHPOINT: | 752 | case ARM_ENTRY_SYNC_WATCHPOINT: |
754 | watchpoint_handler(addr, regs); | 753 | watchpoint_handler(addr, regs); |
755 | break; | 754 | break; |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 49643b1467e6..07a50357492a 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -1749,7 +1749,7 @@ static inline int armv7_pmnc_has_overflowed(unsigned long pmnc) | |||
1749 | static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, | 1749 | static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, |
1750 | enum armv7_counters counter) | 1750 | enum armv7_counters counter) |
1751 | { | 1751 | { |
1752 | int ret; | 1752 | int ret = 0; |
1753 | 1753 | ||
1754 | if (counter == ARMV7_CYCLE_COUNTER) | 1754 | if (counter == ARMV7_CYCLE_COUNTER) |
1755 | ret = pmnc & ARMV7_FLAG_C; | 1755 | ret = pmnc & ARMV7_FLAG_C; |
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 20b7411e47fd..c2e112e1a05f 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c | |||
@@ -28,7 +28,7 @@ int notrace unwind_frame(struct stackframe *frame) | |||
28 | 28 | ||
29 | /* only go to a higher address on the stack */ | 29 | /* only go to a higher address on the stack */ |
30 | low = frame->sp; | 30 | low = frame->sp; |
31 | high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE; | 31 | high = ALIGN(low, THREAD_SIZE); |
32 | 32 | ||
33 | /* check current frame pointer is within bounds */ | 33 | /* check current frame pointer is within bounds */ |
34 | if (fp < (low + 12) || fp + 4 >= high) | 34 | if (fp < (low + 12) || fp + 4 >= high) |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index cda78d59aa31..446aee97436f 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -53,10 +53,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long); | |||
53 | void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) | 53 | void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) |
54 | { | 54 | { |
55 | #ifdef CONFIG_KALLSYMS | 55 | #ifdef CONFIG_KALLSYMS |
56 | char sym1[KSYM_SYMBOL_LEN], sym2[KSYM_SYMBOL_LEN]; | 56 | printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); |
57 | sprint_symbol(sym1, where); | ||
58 | sprint_symbol(sym2, from); | ||
59 | printk("[<%08lx>] (%s) from [<%08lx>] (%s)\n", where, sym1, from, sym2); | ||
60 | #else | 57 | #else |
61 | printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); | 58 | printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); |
62 | #endif | 59 | #endif |
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index 2a161765f6d5..d2cb0b3c9872 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c | |||
@@ -279,7 +279,7 @@ int unwind_frame(struct stackframe *frame) | |||
279 | 279 | ||
280 | /* only go to a higher address on the stack */ | 280 | /* only go to a higher address on the stack */ |
281 | low = frame->sp; | 281 | low = frame->sp; |
282 | high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE; | 282 | high = ALIGN(low, THREAD_SIZE); |
283 | 283 | ||
284 | pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__, | 284 | pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__, |
285 | frame->pc, frame->lr, frame->sp); | 285 | frame->pc, frame->lr, frame->sp); |
diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h index 3a5961d3f3b1..5e31b2b25da9 100644 --- a/arch/arm/mach-ep93xx/include/mach/dma.h +++ b/arch/arm/mach-ep93xx/include/mach/dma.h | |||
@@ -1,5 +1,13 @@ | |||
1 | /* | 1 | /** |
2 | * arch/arm/mach-ep93xx/include/mach/dma.h | 2 | * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine |
3 | * | ||
4 | * The EP93xx DMA M2P subsystem handles DMA transfers between memory and | ||
5 | * peripherals. DMA M2P channels are available for audio, UARTs and IrDA. | ||
6 | * See chapter 10 of the EP93xx users guide for full details on the DMA M2P | ||
7 | * engine. | ||
8 | * | ||
9 | * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code. | ||
10 | * | ||
3 | */ | 11 | */ |
4 | 12 | ||
5 | #ifndef __ASM_ARCH_DMA_H | 13 | #ifndef __ASM_ARCH_DMA_H |
@@ -8,12 +16,34 @@ | |||
8 | #include <linux/list.h> | 16 | #include <linux/list.h> |
9 | #include <linux/types.h> | 17 | #include <linux/types.h> |
10 | 18 | ||
19 | /** | ||
20 | * struct ep93xx_dma_buffer - Information about a buffer to be transferred | ||
21 | * using the DMA M2P engine | ||
22 | * | ||
23 | * @list: Entry in DMA buffer list | ||
24 | * @bus_addr: Physical address of the buffer | ||
25 | * @size: Size of the buffer in bytes | ||
26 | */ | ||
11 | struct ep93xx_dma_buffer { | 27 | struct ep93xx_dma_buffer { |
12 | struct list_head list; | 28 | struct list_head list; |
13 | u32 bus_addr; | 29 | u32 bus_addr; |
14 | u16 size; | 30 | u16 size; |
15 | }; | 31 | }; |
16 | 32 | ||
33 | /** | ||
34 | * struct ep93xx_dma_m2p_client - Information about a DMA M2P client | ||
35 | * | ||
36 | * @name: Unique name for this client | ||
37 | * @flags: Client flags | ||
38 | * @cookie: User data to pass to callback functions | ||
39 | * @buffer_started: Non NULL function to call when a transfer is started. | ||
40 | * The arguments are the user data cookie and the DMA | ||
41 | * buffer which is starting. | ||
42 | * @buffer_finished: Non NULL function to call when a transfer is completed. | ||
43 | * The arguments are the user data cookie, the DMA buffer | ||
44 | * which has completed, and a boolean flag indicating if | ||
45 | * the transfer had an error. | ||
46 | */ | ||
17 | struct ep93xx_dma_m2p_client { | 47 | struct ep93xx_dma_m2p_client { |
18 | char *name; | 48 | char *name; |
19 | u8 flags; | 49 | u8 flags; |
@@ -24,10 +54,11 @@ struct ep93xx_dma_m2p_client { | |||
24 | struct ep93xx_dma_buffer *buf, | 54 | struct ep93xx_dma_buffer *buf, |
25 | int bytes, int error); | 55 | int bytes, int error); |
26 | 56 | ||
27 | /* Internal to the DMA code. */ | 57 | /* private: Internal use only */ |
28 | void *channel; | 58 | void *channel; |
29 | }; | 59 | }; |
30 | 60 | ||
61 | /* DMA M2P ports */ | ||
31 | #define EP93XX_DMA_M2P_PORT_I2S1 0x00 | 62 | #define EP93XX_DMA_M2P_PORT_I2S1 0x00 |
32 | #define EP93XX_DMA_M2P_PORT_I2S2 0x01 | 63 | #define EP93XX_DMA_M2P_PORT_I2S2 0x01 |
33 | #define EP93XX_DMA_M2P_PORT_AAC1 0x02 | 64 | #define EP93XX_DMA_M2P_PORT_AAC1 0x02 |
@@ -39,18 +70,80 @@ struct ep93xx_dma_m2p_client { | |||
39 | #define EP93XX_DMA_M2P_PORT_UART3 0x08 | 70 | #define EP93XX_DMA_M2P_PORT_UART3 0x08 |
40 | #define EP93XX_DMA_M2P_PORT_IRDA 0x09 | 71 | #define EP93XX_DMA_M2P_PORT_IRDA 0x09 |
41 | #define EP93XX_DMA_M2P_PORT_MASK 0x0f | 72 | #define EP93XX_DMA_M2P_PORT_MASK 0x0f |
42 | #define EP93XX_DMA_M2P_TX 0x00 | ||
43 | #define EP93XX_DMA_M2P_RX 0x10 | ||
44 | #define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20 | ||
45 | #define EP93XX_DMA_M2P_IGNORE_ERROR 0x40 | ||
46 | #define EP93XX_DMA_M2P_ERROR_MASK 0x60 | ||
47 | 73 | ||
48 | int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p); | 74 | /* DMA M2P client flags */ |
75 | #define EP93XX_DMA_M2P_TX 0x00 /* Memory to peripheral */ | ||
76 | #define EP93XX_DMA_M2P_RX 0x10 /* Peripheral to memory */ | ||
77 | |||
78 | /* | ||
79 | * DMA M2P client error handling flags. See the EP93xx users guide | ||
80 | * documentation on the DMA M2P CONTROL register for more details | ||
81 | */ | ||
82 | #define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20 /* Abort on peripheral error */ | ||
83 | #define EP93XX_DMA_M2P_IGNORE_ERROR 0x40 /* Ignore peripheral errors */ | ||
84 | #define EP93XX_DMA_M2P_ERROR_MASK 0x60 /* Mask of error bits */ | ||
85 | |||
86 | /** | ||
87 | * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P | ||
88 | * subsystem | ||
89 | * | ||
90 | * @m2p: Client information to register | ||
91 | * returns 0 on success | ||
92 | * | ||
93 | * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA | ||
94 | * client | ||
95 | */ | ||
96 | int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p); | ||
97 | |||
98 | /** | ||
99 | * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P | ||
100 | * subsystem | ||
101 | * | ||
102 | * @m2p: Client to unregister | ||
103 | * | ||
104 | * Any transfers currently in progress will be completed in hardware, but | ||
105 | * ignored in software. | ||
106 | */ | ||
49 | void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p); | 107 | void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p); |
108 | |||
109 | /** | ||
110 | * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer | ||
111 | * | ||
112 | * @m2p: DMA Client to submit the transfer on | ||
113 | * @buf: DMA Buffer to submit | ||
114 | * | ||
115 | * If the current or next transfer positions are free on the M2P client then | ||
116 | * the transfer is started immediately. If not, the transfer is added to the | ||
117 | * list of pending transfers. This function must not be called from the | ||
118 | * buffer_finished callback for an M2P channel. | ||
119 | * | ||
120 | */ | ||
50 | void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p, | 121 | void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p, |
51 | struct ep93xx_dma_buffer *buf); | 122 | struct ep93xx_dma_buffer *buf); |
123 | |||
124 | /** | ||
125 | * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list | ||
126 | * for an M2P channel | ||
127 | * | ||
128 | * @m2p: DMA Client to submit the transfer on | ||
129 | * @buf: DMA Buffer to submit | ||
130 | * | ||
131 | * This function must only be called from the buffer_finished callback for an | ||
132 | * M2P channel. It is commonly used to add the next transfer in a chained list | ||
133 | * of DMA transfers. | ||
134 | */ | ||
52 | void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p, | 135 | void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p, |
53 | struct ep93xx_dma_buffer *buf); | 136 | struct ep93xx_dma_buffer *buf); |
137 | |||
138 | /** | ||
139 | * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client | ||
140 | * | ||
141 | * @m2p: DMA client to flush transfers on | ||
142 | * | ||
143 | * Any transfers currently in progress will be completed in hardware, but | ||
144 | * ignored in software. | ||
145 | * | ||
146 | */ | ||
54 | void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p); | 147 | void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p); |
55 | 148 | ||
56 | #endif /* __ASM_ARCH_DMA_H */ | 149 | #endif /* __ASM_ARCH_DMA_H */ |
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c index 51ff23b72d3a..3688123b5ad8 100644 --- a/arch/arm/mach-kirkwood/common.c +++ b/arch/arm/mach-kirkwood/common.c | |||
@@ -854,10 +854,9 @@ int __init kirkwood_find_tclk(void) | |||
854 | 854 | ||
855 | kirkwood_pcie_id(&dev, &rev); | 855 | kirkwood_pcie_id(&dev, &rev); |
856 | 856 | ||
857 | if ((dev == MV88F6281_DEV_ID && (rev == MV88F6281_REV_A0 || | 857 | if (dev == MV88F6281_DEV_ID || dev == MV88F6282_DEV_ID) |
858 | rev == MV88F6281_REV_A1)) || | 858 | if (((readl(SAMPLE_AT_RESET) >> 21) & 1) == 0) |
859 | (dev == MV88F6282_DEV_ID)) | 859 | return 200000000; |
860 | return 200000000; | ||
861 | 860 | ||
862 | return 166666667; | 861 | return 166666667; |
863 | } | 862 | } |
diff --git a/arch/arm/mach-kirkwood/d2net_v2-setup.c b/arch/arm/mach-kirkwood/d2net_v2-setup.c index 4aa86e4a152c..a31c9499ab36 100644 --- a/arch/arm/mach-kirkwood/d2net_v2-setup.c +++ b/arch/arm/mach-kirkwood/d2net_v2-setup.c | |||
@@ -225,5 +225,5 @@ MACHINE_START(D2NET_V2, "LaCie d2 Network v2") | |||
225 | .init_machine = d2net_v2_init, | 225 | .init_machine = d2net_v2_init, |
226 | .map_io = kirkwood_map_io, | 226 | .map_io = kirkwood_map_io, |
227 | .init_irq = kirkwood_init_irq, | 227 | .init_irq = kirkwood_init_irq, |
228 | .timer = &lacie_v2_timer, | 228 | .timer = &kirkwood_timer, |
229 | MACHINE_END | 229 | MACHINE_END |
diff --git a/arch/arm/mach-kirkwood/lacie_v2-common.c b/arch/arm/mach-kirkwood/lacie_v2-common.c index d3ea1b6c8a02..285edab776e9 100644 --- a/arch/arm/mach-kirkwood/lacie_v2-common.c +++ b/arch/arm/mach-kirkwood/lacie_v2-common.c | |||
@@ -111,17 +111,3 @@ void __init lacie_v2_hdd_power_init(int hdd_num) | |||
111 | pr_err("Failed to power up HDD%d\n", i + 1); | 111 | pr_err("Failed to power up HDD%d\n", i + 1); |
112 | } | 112 | } |
113 | } | 113 | } |
114 | |||
115 | /***************************************************************************** | ||
116 | * Timer | ||
117 | ****************************************************************************/ | ||
118 | |||
119 | static void lacie_v2_timer_init(void) | ||
120 | { | ||
121 | kirkwood_tclk = 166666667; | ||
122 | orion_time_init(IRQ_KIRKWOOD_BRIDGE, kirkwood_tclk); | ||
123 | } | ||
124 | |||
125 | struct sys_timer lacie_v2_timer = { | ||
126 | .init = lacie_v2_timer_init, | ||
127 | }; | ||
diff --git a/arch/arm/mach-kirkwood/lacie_v2-common.h b/arch/arm/mach-kirkwood/lacie_v2-common.h index af521315b87b..fc64f578536e 100644 --- a/arch/arm/mach-kirkwood/lacie_v2-common.h +++ b/arch/arm/mach-kirkwood/lacie_v2-common.h | |||
@@ -13,6 +13,4 @@ void lacie_v2_register_flash(void); | |||
13 | void lacie_v2_register_i2c_devices(void); | 13 | void lacie_v2_register_i2c_devices(void); |
14 | void lacie_v2_hdd_power_init(int hdd_num); | 14 | void lacie_v2_hdd_power_init(int hdd_num); |
15 | 15 | ||
16 | extern struct sys_timer lacie_v2_timer; | ||
17 | |||
18 | #endif | 16 | #endif |
diff --git a/arch/arm/mach-kirkwood/mpp.c b/arch/arm/mach-kirkwood/mpp.c index 065187d177c6..27901f702feb 100644 --- a/arch/arm/mach-kirkwood/mpp.c +++ b/arch/arm/mach-kirkwood/mpp.c | |||
@@ -59,7 +59,7 @@ void __init kirkwood_mpp_conf(unsigned int *mpp_list) | |||
59 | } | 59 | } |
60 | printk("\n"); | 60 | printk("\n"); |
61 | 61 | ||
62 | while (*mpp_list) { | 62 | for ( ; *mpp_list; mpp_list++) { |
63 | unsigned int num = MPP_NUM(*mpp_list); | 63 | unsigned int num = MPP_NUM(*mpp_list); |
64 | unsigned int sel = MPP_SEL(*mpp_list); | 64 | unsigned int sel = MPP_SEL(*mpp_list); |
65 | int shift, gpio_mode; | 65 | int shift, gpio_mode; |
@@ -88,8 +88,6 @@ void __init kirkwood_mpp_conf(unsigned int *mpp_list) | |||
88 | if (sel != 0) | 88 | if (sel != 0) |
89 | gpio_mode = 0; | 89 | gpio_mode = 0; |
90 | orion_gpio_set_valid(num, gpio_mode); | 90 | orion_gpio_set_valid(num, gpio_mode); |
91 | |||
92 | mpp_list++; | ||
93 | } | 91 | } |
94 | 92 | ||
95 | printk(KERN_DEBUG " final MPP regs:"); | 93 | printk(KERN_DEBUG " final MPP regs:"); |
diff --git a/arch/arm/mach-kirkwood/netspace_v2-setup.c b/arch/arm/mach-kirkwood/netspace_v2-setup.c index 5ea66f1f4178..65ee21fd2f3b 100644 --- a/arch/arm/mach-kirkwood/netspace_v2-setup.c +++ b/arch/arm/mach-kirkwood/netspace_v2-setup.c | |||
@@ -262,7 +262,7 @@ MACHINE_START(NETSPACE_V2, "LaCie Network Space v2") | |||
262 | .init_machine = netspace_v2_init, | 262 | .init_machine = netspace_v2_init, |
263 | .map_io = kirkwood_map_io, | 263 | .map_io = kirkwood_map_io, |
264 | .init_irq = kirkwood_init_irq, | 264 | .init_irq = kirkwood_init_irq, |
265 | .timer = &lacie_v2_timer, | 265 | .timer = &kirkwood_timer, |
266 | MACHINE_END | 266 | MACHINE_END |
267 | #endif | 267 | #endif |
268 | 268 | ||
@@ -272,7 +272,7 @@ MACHINE_START(INETSPACE_V2, "LaCie Internet Space v2") | |||
272 | .init_machine = netspace_v2_init, | 272 | .init_machine = netspace_v2_init, |
273 | .map_io = kirkwood_map_io, | 273 | .map_io = kirkwood_map_io, |
274 | .init_irq = kirkwood_init_irq, | 274 | .init_irq = kirkwood_init_irq, |
275 | .timer = &lacie_v2_timer, | 275 | .timer = &kirkwood_timer, |
276 | MACHINE_END | 276 | MACHINE_END |
277 | #endif | 277 | #endif |
278 | 278 | ||
@@ -282,6 +282,6 @@ MACHINE_START(NETSPACE_MAX_V2, "LaCie Network Space Max v2") | |||
282 | .init_machine = netspace_v2_init, | 282 | .init_machine = netspace_v2_init, |
283 | .map_io = kirkwood_map_io, | 283 | .map_io = kirkwood_map_io, |
284 | .init_irq = kirkwood_init_irq, | 284 | .init_irq = kirkwood_init_irq, |
285 | .timer = &lacie_v2_timer, | 285 | .timer = &kirkwood_timer, |
286 | MACHINE_END | 286 | MACHINE_END |
287 | #endif | 287 | #endif |
diff --git a/arch/arm/mach-kirkwood/netxbig_v2-setup.c b/arch/arm/mach-kirkwood/netxbig_v2-setup.c index a1b45d501aef..93afd3c8bfd8 100644 --- a/arch/arm/mach-kirkwood/netxbig_v2-setup.c +++ b/arch/arm/mach-kirkwood/netxbig_v2-setup.c | |||
@@ -403,7 +403,7 @@ MACHINE_START(NET2BIG_V2, "LaCie 2Big Network v2") | |||
403 | .init_machine = netxbig_v2_init, | 403 | .init_machine = netxbig_v2_init, |
404 | .map_io = kirkwood_map_io, | 404 | .map_io = kirkwood_map_io, |
405 | .init_irq = kirkwood_init_irq, | 405 | .init_irq = kirkwood_init_irq, |
406 | .timer = &lacie_v2_timer, | 406 | .timer = &kirkwood_timer, |
407 | MACHINE_END | 407 | MACHINE_END |
408 | #endif | 408 | #endif |
409 | 409 | ||
@@ -413,6 +413,6 @@ MACHINE_START(NET5BIG_V2, "LaCie 5Big Network v2") | |||
413 | .init_machine = netxbig_v2_init, | 413 | .init_machine = netxbig_v2_init, |
414 | .map_io = kirkwood_map_io, | 414 | .map_io = kirkwood_map_io, |
415 | .init_irq = kirkwood_init_irq, | 415 | .init_irq = kirkwood_init_irq, |
416 | .timer = &lacie_v2_timer, | 416 | .timer = &kirkwood_timer, |
417 | MACHINE_END | 417 | MACHINE_END |
418 | #endif | 418 | #endif |
diff --git a/arch/arm/mach-kirkwood/ts41x-setup.c b/arch/arm/mach-kirkwood/ts41x-setup.c index 8be09a0ce4ac..3587a281d993 100644 --- a/arch/arm/mach-kirkwood/ts41x-setup.c +++ b/arch/arm/mach-kirkwood/ts41x-setup.c | |||
@@ -27,6 +27,10 @@ | |||
27 | #include "mpp.h" | 27 | #include "mpp.h" |
28 | #include "tsx1x-common.h" | 28 | #include "tsx1x-common.h" |
29 | 29 | ||
30 | /* for the PCIe reset workaround */ | ||
31 | #include <plat/pcie.h> | ||
32 | |||
33 | |||
30 | #define QNAP_TS41X_JUMPER_JP1 45 | 34 | #define QNAP_TS41X_JUMPER_JP1 45 |
31 | 35 | ||
32 | static struct i2c_board_info __initdata qnap_ts41x_i2c_rtc = { | 36 | static struct i2c_board_info __initdata qnap_ts41x_i2c_rtc = { |
@@ -140,8 +144,16 @@ static void __init qnap_ts41x_init(void) | |||
140 | 144 | ||
141 | static int __init ts41x_pci_init(void) | 145 | static int __init ts41x_pci_init(void) |
142 | { | 146 | { |
143 | if (machine_is_ts41x()) | 147 | if (machine_is_ts41x()) { |
148 | /* | ||
149 | * Without this explicit reset, the PCIe SATA controller | ||
150 | * (Marvell 88sx7042/sata_mv) is known to stop working | ||
151 | * after a few minutes. | ||
152 | */ | ||
153 | orion_pcie_reset((void __iomem *)PCIE_VIRT_BASE); | ||
154 | |||
144 | kirkwood_pcie_init(KW_PCIE0); | 155 | kirkwood_pcie_init(KW_PCIE0); |
156 | } | ||
145 | 157 | ||
146 | return 0; | 158 | return 0; |
147 | } | 159 | } |
diff --git a/arch/arm/mach-mmp/include/mach/cputype.h b/arch/arm/mach-mmp/include/mach/cputype.h index f43a68b213f1..8a3b56dfd35d 100644 --- a/arch/arm/mach-mmp/include/mach/cputype.h +++ b/arch/arm/mach-mmp/include/mach/cputype.h | |||
@@ -46,7 +46,8 @@ static inline int cpu_is_pxa910(void) | |||
46 | #ifdef CONFIG_CPU_MMP2 | 46 | #ifdef CONFIG_CPU_MMP2 |
47 | static inline int cpu_is_mmp2(void) | 47 | static inline int cpu_is_mmp2(void) |
48 | { | 48 | { |
49 | return (((cpu_readid_id() >> 8) & 0xff) == 0x58); | 49 | return (((read_cpuid_id() >> 8) & 0xff) == 0x58); |
50 | } | ||
50 | #else | 51 | #else |
51 | #define cpu_is_mmp2() (0) | 52 | #define cpu_is_mmp2() (0) |
52 | #endif | 53 | #endif |
diff --git a/arch/arm/mach-mv78xx0/mpp.c b/arch/arm/mach-mv78xx0/mpp.c index 354ac514eb89..84db2dfc475c 100644 --- a/arch/arm/mach-mv78xx0/mpp.c +++ b/arch/arm/mach-mv78xx0/mpp.c | |||
@@ -54,7 +54,7 @@ void __init mv78xx0_mpp_conf(unsigned int *mpp_list) | |||
54 | } | 54 | } |
55 | printk("\n"); | 55 | printk("\n"); |
56 | 56 | ||
57 | while (*mpp_list) { | 57 | for ( ; *mpp_list; mpp_list++) { |
58 | unsigned int num = MPP_NUM(*mpp_list); | 58 | unsigned int num = MPP_NUM(*mpp_list); |
59 | unsigned int sel = MPP_SEL(*mpp_list); | 59 | unsigned int sel = MPP_SEL(*mpp_list); |
60 | int shift, gpio_mode; | 60 | int shift, gpio_mode; |
@@ -83,8 +83,6 @@ void __init mv78xx0_mpp_conf(unsigned int *mpp_list) | |||
83 | if (sel != 0) | 83 | if (sel != 0) |
84 | gpio_mode = 0; | 84 | gpio_mode = 0; |
85 | orion_gpio_set_valid(num, gpio_mode); | 85 | orion_gpio_set_valid(num, gpio_mode); |
86 | |||
87 | mpp_list++; | ||
88 | } | 86 | } |
89 | 87 | ||
90 | printk(KERN_DEBUG " final MPP regs:"); | 88 | printk(KERN_DEBUG " final MPP regs:"); |
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c index ea0d80a89da7..e7f9ee63dce5 100644 --- a/arch/arm/mach-omap1/devices.c +++ b/arch/arm/mach-omap1/devices.c | |||
@@ -321,10 +321,9 @@ static struct platform_device omap_wdt_device = { | |||
321 | static int __init omap_init_wdt(void) | 321 | static int __init omap_init_wdt(void) |
322 | { | 322 | { |
323 | if (!cpu_is_omap16xx()) | 323 | if (!cpu_is_omap16xx()) |
324 | return; | 324 | return -ENODEV; |
325 | 325 | ||
326 | platform_device_register(&omap_wdt_device); | 326 | return platform_device_register(&omap_wdt_device); |
327 | return 0; | ||
328 | } | 327 | } |
329 | subsys_initcall(omap_init_wdt); | 328 | subsys_initcall(omap_init_wdt); |
330 | #endif | 329 | #endif |
diff --git a/arch/arm/mach-omap1/include/mach/camera.h b/arch/arm/mach-omap1/include/mach/camera.h index fd54b452eb22..847d00f0bb0a 100644 --- a/arch/arm/mach-omap1/include/mach/camera.h +++ b/arch/arm/mach-omap1/include/mach/camera.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef __ASM_ARCH_CAMERA_H_ | 1 | #ifndef __ASM_ARCH_CAMERA_H_ |
2 | #define __ASM_ARCH_CAMERA_H_ | 2 | #define __ASM_ARCH_CAMERA_H_ |
3 | 3 | ||
4 | #include <media/omap1_camera.h> | ||
5 | |||
4 | void omap1_camera_init(void *); | 6 | void omap1_camera_init(void *); |
5 | 7 | ||
6 | static inline void omap1_set_camera_info(struct omap1_cam_platform_data *info) | 8 | static inline void omap1_set_camera_info(struct omap1_cam_platform_data *info) |
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c index 067f4379c87f..53ac762518bd 100644 --- a/arch/arm/mach-omap2/board-devkit8000.c +++ b/arch/arm/mach-omap2/board-devkit8000.c | |||
@@ -242,9 +242,6 @@ static int devkit8000_twl_gpio_setup(struct device *dev, | |||
242 | mmc[0].gpio_cd = gpio + 0; | 242 | mmc[0].gpio_cd = gpio + 0; |
243 | omap2_hsmmc_init(mmc); | 243 | omap2_hsmmc_init(mmc); |
244 | 244 | ||
245 | /* link regulators to MMC adapters */ | ||
246 | devkit8000_vmmc1_supply.dev = mmc[0].dev; | ||
247 | |||
248 | /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */ | 245 | /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */ |
249 | gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; | 246 | gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; |
250 | 247 | ||
diff --git a/arch/arm/mach-orion5x/mpp.c b/arch/arm/mach-orion5x/mpp.c index bc4c3b9aaf83..db485d3b8144 100644 --- a/arch/arm/mach-orion5x/mpp.c +++ b/arch/arm/mach-orion5x/mpp.c | |||
@@ -127,7 +127,7 @@ void __init orion5x_mpp_conf(struct orion5x_mpp_mode *mode) | |||
127 | /* Initialize gpiolib. */ | 127 | /* Initialize gpiolib. */ |
128 | orion_gpio_init(); | 128 | orion_gpio_init(); |
129 | 129 | ||
130 | while (mode->mpp >= 0) { | 130 | for ( ; mode->mpp >= 0; mode++) { |
131 | u32 *reg; | 131 | u32 *reg; |
132 | int num_type; | 132 | int num_type; |
133 | int shift; | 133 | int shift; |
@@ -160,8 +160,6 @@ void __init orion5x_mpp_conf(struct orion5x_mpp_mode *mode) | |||
160 | orion_gpio_set_unused(mode->mpp); | 160 | orion_gpio_set_unused(mode->mpp); |
161 | 161 | ||
162 | orion_gpio_set_valid(mode->mpp, !!(mode->type == MPP_GPIO)); | 162 | orion_gpio_set_valid(mode->mpp, !!(mode->type == MPP_GPIO)); |
163 | |||
164 | mode++; | ||
165 | } | 163 | } |
166 | 164 | ||
167 | writel(mpp_0_7_ctrl, MPP_0_7_CTRL); | 165 | writel(mpp_0_7_ctrl, MPP_0_7_CTRL); |
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c index 16f1bd5324be..c1c1cd04bdde 100644 --- a/arch/arm/mach-orion5x/ts78xx-setup.c +++ b/arch/arm/mach-orion5x/ts78xx-setup.c | |||
@@ -239,7 +239,7 @@ static struct platform_nand_data ts78xx_ts_nand_data = { | |||
239 | static struct resource ts78xx_ts_nand_resources = { | 239 | static struct resource ts78xx_ts_nand_resources = { |
240 | .start = TS_NAND_DATA, | 240 | .start = TS_NAND_DATA, |
241 | .end = TS_NAND_DATA + 4, | 241 | .end = TS_NAND_DATA + 4, |
242 | .flags = IORESOURCE_IO, | 242 | .flags = IORESOURCE_MEM, |
243 | }; | 243 | }; |
244 | 244 | ||
245 | static struct platform_device ts78xx_ts_nand_device = { | 245 | static struct platform_device ts78xx_ts_nand_device = { |
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c index ac5598ce9724..d34b99febeb9 100644 --- a/arch/arm/mach-pxa/cm-x2xx.c +++ b/arch/arm/mach-pxa/cm-x2xx.c | |||
@@ -476,8 +476,6 @@ static void __init cmx2xx_init(void) | |||
476 | 476 | ||
477 | static void __init cmx2xx_init_irq(void) | 477 | static void __init cmx2xx_init_irq(void) |
478 | { | 478 | { |
479 | pxa27x_init_irq(); | ||
480 | |||
481 | if (cpu_is_pxa25x()) { | 479 | if (cpu_is_pxa25x()) { |
482 | pxa25x_init_irq(); | 480 | pxa25x_init_irq(); |
483 | cmx2xx_pci_init_irq(CMX255_GPIO_IT8152_IRQ); | 481 | cmx2xx_pci_init_irq(CMX255_GPIO_IT8152_IRQ); |
diff --git a/arch/arm/mach-pxa/saar.c b/arch/arm/mach-pxa/saar.c index 4b521e045d75..ffa50e633ee6 100644 --- a/arch/arm/mach-pxa/saar.c +++ b/arch/arm/mach-pxa/saar.c | |||
@@ -116,7 +116,7 @@ static struct platform_device smc91x_device = { | |||
116 | }, | 116 | }, |
117 | }; | 117 | }; |
118 | 118 | ||
119 | #if defined(CONFIG_FB_PXA) || (CONFIG_FB_PXA_MODULE) | 119 | #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) |
120 | static uint16_t lcd_power_on[] = { | 120 | static uint16_t lcd_power_on[] = { |
121 | /* single frame */ | 121 | /* single frame */ |
122 | SMART_CMD_NOOP, | 122 | SMART_CMD_NOOP, |
diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig index 1ca7bdc6485c..579d2f0f4dd0 100644 --- a/arch/arm/mach-s3c64xx/Kconfig +++ b/arch/arm/mach-s3c64xx/Kconfig | |||
@@ -143,7 +143,7 @@ config MACH_SMDK6410 | |||
143 | select S3C_DEV_USB_HSOTG | 143 | select S3C_DEV_USB_HSOTG |
144 | select S3C_DEV_WDT | 144 | select S3C_DEV_WDT |
145 | select SAMSUNG_DEV_KEYPAD | 145 | select SAMSUNG_DEV_KEYPAD |
146 | select HAVE_S3C2410_WATCHDOG | 146 | select HAVE_S3C2410_WATCHDOG if WATCHDOG |
147 | select S3C64XX_SETUP_SDHCI | 147 | select S3C64XX_SETUP_SDHCI |
148 | select S3C64XX_SETUP_I2C1 | 148 | select S3C64XX_SETUP_I2C1 |
149 | select S3C64XX_SETUP_IDE | 149 | select S3C64XX_SETUP_IDE |
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index 32d9e2816e56..d3260542b943 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c | |||
@@ -163,11 +163,13 @@ static struct mtd_partition nor_flash_partitions[] = { | |||
163 | .name = "loader", | 163 | .name = "loader", |
164 | .offset = 0x00000000, | 164 | .offset = 0x00000000, |
165 | .size = 512 * 1024, | 165 | .size = 512 * 1024, |
166 | .mask_flags = MTD_WRITEABLE, | ||
166 | }, | 167 | }, |
167 | { | 168 | { |
168 | .name = "bootenv", | 169 | .name = "bootenv", |
169 | .offset = MTDPART_OFS_APPEND, | 170 | .offset = MTDPART_OFS_APPEND, |
170 | .size = 512 * 1024, | 171 | .size = 512 * 1024, |
172 | .mask_flags = MTD_WRITEABLE, | ||
171 | }, | 173 | }, |
172 | { | 174 | { |
173 | .name = "kernel_ro", | 175 | .name = "kernel_ro", |
@@ -581,6 +583,10 @@ static int fsi_set_rate(int is_porta, int rate) | |||
581 | return -EINVAL; | 583 | return -EINVAL; |
582 | 584 | ||
583 | switch (rate) { | 585 | switch (rate) { |
586 | case 44100: | ||
587 | clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 11283000)); | ||
588 | ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64; | ||
589 | break; | ||
584 | case 48000: | 590 | case 48000: |
585 | clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 85428000)); | 591 | clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 85428000)); |
586 | clk_set_rate(fdiv_clk, clk_round_rate(fdiv_clk, 12204000)); | 592 | clk_set_rate(fdiv_clk, clk_round_rate(fdiv_clk, 12204000)); |
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c index 4cd3cae38e72..30b2f400666a 100644 --- a/arch/arm/mach-shmobile/intc-sh7372.c +++ b/arch/arm/mach-shmobile/intc-sh7372.c | |||
@@ -98,7 +98,7 @@ static struct intc_vect intca_vectors[] __initdata = { | |||
98 | INTC_VECT(IRQ14A, 0x03c0), INTC_VECT(IRQ15A, 0x03e0), | 98 | INTC_VECT(IRQ14A, 0x03c0), INTC_VECT(IRQ15A, 0x03e0), |
99 | INTC_VECT(IRQ16A, 0x3200), INTC_VECT(IRQ17A, 0x3220), | 99 | INTC_VECT(IRQ16A, 0x3200), INTC_VECT(IRQ17A, 0x3220), |
100 | INTC_VECT(IRQ18A, 0x3240), INTC_VECT(IRQ19A, 0x3260), | 100 | INTC_VECT(IRQ18A, 0x3240), INTC_VECT(IRQ19A, 0x3260), |
101 | INTC_VECT(IRQ20A, 0x3280), INTC_VECT(IRQ31A, 0x32a0), | 101 | INTC_VECT(IRQ20A, 0x3280), INTC_VECT(IRQ21A, 0x32a0), |
102 | INTC_VECT(IRQ22A, 0x32c0), INTC_VECT(IRQ23A, 0x32e0), | 102 | INTC_VECT(IRQ22A, 0x32c0), INTC_VECT(IRQ23A, 0x32e0), |
103 | INTC_VECT(IRQ24A, 0x3300), INTC_VECT(IRQ25A, 0x3320), | 103 | INTC_VECT(IRQ24A, 0x3300), INTC_VECT(IRQ25A, 0x3320), |
104 | INTC_VECT(IRQ26A, 0x3340), INTC_VECT(IRQ27A, 0x3360), | 104 | INTC_VECT(IRQ26A, 0x3340), INTC_VECT(IRQ27A, 0x3360), |
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c index c2e405a9e025..fd25ccd7272f 100644 --- a/arch/arm/mach-vexpress/ct-ca9x4.c +++ b/arch/arm/mach-vexpress/ct-ca9x4.c | |||
@@ -54,7 +54,9 @@ static struct map_desc ct_ca9x4_io_desc[] __initdata = { | |||
54 | 54 | ||
55 | static void __init ct_ca9x4_map_io(void) | 55 | static void __init ct_ca9x4_map_io(void) |
56 | { | 56 | { |
57 | #ifdef CONFIG_LOCAL_TIMERS | ||
57 | twd_base = MMIO_P2V(A9_MPCORE_TWD); | 58 | twd_base = MMIO_P2V(A9_MPCORE_TWD); |
59 | #endif | ||
58 | v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc)); | 60 | v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc)); |
59 | } | 61 | } |
60 | 62 | ||
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index e4dd0646e859..ac6a36142fcd 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -198,7 +198,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) | |||
198 | * fragmentation of the DMA space, and also prevents allocations | 198 | * fragmentation of the DMA space, and also prevents allocations |
199 | * smaller than a section from crossing a section boundary. | 199 | * smaller than a section from crossing a section boundary. |
200 | */ | 200 | */ |
201 | bit = fls(size - 1) + 1; | 201 | bit = fls(size - 1); |
202 | if (bit > SECTION_SHIFT) | 202 | if (bit > SECTION_SHIFT) |
203 | bit = SECTION_SHIFT; | 203 | bit = SECTION_SHIFT; |
204 | align = 1 << bit; | 204 | align = 1 << bit; |
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c index 6f42a18b8aa4..fc819120978d 100644 --- a/arch/arm/plat-omap/devices.c +++ b/arch/arm/plat-omap/devices.c | |||
@@ -284,12 +284,14 @@ void __init omap_dsp_reserve_sdram_memblock(void) | |||
284 | if (!size) | 284 | if (!size) |
285 | return; | 285 | return; |
286 | 286 | ||
287 | paddr = __memblock_alloc_base(size, SZ_1M, MEMBLOCK_REAL_LIMIT); | 287 | paddr = memblock_alloc(size, SZ_1M); |
288 | if (!paddr) { | 288 | if (!paddr) { |
289 | pr_err("%s: failed to reserve %x bytes\n", | 289 | pr_err("%s: failed to reserve %x bytes\n", |
290 | __func__, size); | 290 | __func__, size); |
291 | return; | 291 | return; |
292 | } | 292 | } |
293 | memblock_free(paddr, size); | ||
294 | memblock_remove(paddr, size); | ||
293 | 295 | ||
294 | omap_dsp_phys_mempool_base = paddr; | 296 | omap_dsp_phys_mempool_base = paddr; |
295 | } | 297 | } |
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index f5c5b8da9a87..2c2826571d45 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c | |||
@@ -1983,6 +1983,8 @@ static int omap2_dma_handle_ch(int ch) | |||
1983 | 1983 | ||
1984 | dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(ch)); | 1984 | dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(ch)); |
1985 | dma_write(1 << ch, IRQSTATUS_L0); | 1985 | dma_write(1 << ch, IRQSTATUS_L0); |
1986 | /* read back the register to flush the write */ | ||
1987 | dma_read(IRQSTATUS_L0); | ||
1986 | 1988 | ||
1987 | /* If the ch is not chained then chain_id will be -1 */ | 1989 | /* If the ch is not chained then chain_id will be -1 */ |
1988 | if (dma_chan[ch].chain_id != -1) { | 1990 | if (dma_chan[ch].chain_id != -1) { |
diff --git a/arch/arm/plat-orion/include/plat/pcie.h b/arch/arm/plat-orion/include/plat/pcie.h index 3ebfef72b4e7..cc99163e73fd 100644 --- a/arch/arm/plat-orion/include/plat/pcie.h +++ b/arch/arm/plat-orion/include/plat/pcie.h | |||
@@ -11,12 +11,15 @@ | |||
11 | #ifndef __PLAT_PCIE_H | 11 | #ifndef __PLAT_PCIE_H |
12 | #define __PLAT_PCIE_H | 12 | #define __PLAT_PCIE_H |
13 | 13 | ||
14 | struct pci_bus; | ||
15 | |||
14 | u32 orion_pcie_dev_id(void __iomem *base); | 16 | u32 orion_pcie_dev_id(void __iomem *base); |
15 | u32 orion_pcie_rev(void __iomem *base); | 17 | u32 orion_pcie_rev(void __iomem *base); |
16 | int orion_pcie_link_up(void __iomem *base); | 18 | int orion_pcie_link_up(void __iomem *base); |
17 | int orion_pcie_x4_mode(void __iomem *base); | 19 | int orion_pcie_x4_mode(void __iomem *base); |
18 | int orion_pcie_get_local_bus_nr(void __iomem *base); | 20 | int orion_pcie_get_local_bus_nr(void __iomem *base); |
19 | void orion_pcie_set_local_bus_nr(void __iomem *base, int nr); | 21 | void orion_pcie_set_local_bus_nr(void __iomem *base, int nr); |
22 | void orion_pcie_reset(void __iomem *base); | ||
20 | void orion_pcie_setup(void __iomem *base, | 23 | void orion_pcie_setup(void __iomem *base, |
21 | struct mbus_dram_target_info *dram); | 24 | struct mbus_dram_target_info *dram); |
22 | int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus, | 25 | int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus, |
diff --git a/arch/arm/plat-orion/pcie.c b/arch/arm/plat-orion/pcie.c index 779553a1595e..af2d733c50b5 100644 --- a/arch/arm/plat-orion/pcie.c +++ b/arch/arm/plat-orion/pcie.c | |||
@@ -182,11 +182,6 @@ void __init orion_pcie_setup(void __iomem *base, | |||
182 | u32 mask; | 182 | u32 mask; |
183 | 183 | ||
184 | /* | 184 | /* |
185 | * soft reset PCIe unit | ||
186 | */ | ||
187 | orion_pcie_reset(base); | ||
188 | |||
189 | /* | ||
190 | * Point PCIe unit MBUS decode windows to DRAM space. | 185 | * Point PCIe unit MBUS decode windows to DRAM space. |
191 | */ | 186 | */ |
192 | orion_pcie_setup_wins(base, dram); | 187 | orion_pcie_setup_wins(base, dram); |
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c index 3a078ad3aa44..331de723c676 100644 --- a/arch/ia64/hp/sim/simscsi.c +++ b/arch/ia64/hp/sim/simscsi.c | |||
@@ -202,7 +202,7 @@ simscsi_readwrite10 (struct scsi_cmnd *sc, int mode) | |||
202 | } | 202 | } |
203 | 203 | ||
204 | static int | 204 | static int |
205 | simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | 205 | simscsi_queuecommand_lck (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) |
206 | { | 206 | { |
207 | unsigned int target_id = sc->device->id; | 207 | unsigned int target_id = sc->device->id; |
208 | char fname[MAX_ROOT_LEN+16]; | 208 | char fname[MAX_ROOT_LEN+16]; |
@@ -326,6 +326,8 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | |||
326 | return 0; | 326 | return 0; |
327 | } | 327 | } |
328 | 328 | ||
329 | static DEF_SCSI_QCMD(simscsi_queuecommand) | ||
330 | |||
329 | static int | 331 | static int |
330 | simscsi_host_reset (struct scsi_cmnd *sc) | 332 | simscsi_host_reset (struct scsi_cmnd *sc) |
331 | { | 333 | { |
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug index 45e0c6199f36..05221b13ffb1 100644 --- a/arch/s390/Kconfig.debug +++ b/arch/s390/Kconfig.debug | |||
@@ -6,6 +6,18 @@ config TRACE_IRQFLAGS_SUPPORT | |||
6 | 6 | ||
7 | source "lib/Kconfig.debug" | 7 | source "lib/Kconfig.debug" |
8 | 8 | ||
9 | config STRICT_DEVMEM | ||
10 | def_bool y | ||
11 | prompt "Filter access to /dev/mem" | ||
12 | ---help--- | ||
13 | This option restricts access to /dev/mem. If this option is | ||
14 | disabled, you allow userspace access to all memory, including | ||
15 | kernel and userspace memory. Accidental memory access is likely | ||
16 | to be disastrous. | ||
17 | Memory access is required for experts who want to debug the kernel. | ||
18 | |||
19 | If you are unsure, say Y. | ||
20 | |||
9 | config DEBUG_STRICT_USER_COPY_CHECKS | 21 | config DEBUG_STRICT_USER_COPY_CHECKS |
10 | bool "Strict user copy size checks" | 22 | bool "Strict user copy size checks" |
11 | ---help--- | 23 | ---help--- |
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index a8729ea7e9ac..3c987e9ec8d6 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h | |||
@@ -130,6 +130,11 @@ struct page; | |||
130 | void arch_free_page(struct page *page, int order); | 130 | void arch_free_page(struct page *page, int order); |
131 | void arch_alloc_page(struct page *page, int order); | 131 | void arch_alloc_page(struct page *page, int order); |
132 | 132 | ||
133 | static inline int devmem_is_allowed(unsigned long pfn) | ||
134 | { | ||
135 | return 0; | ||
136 | } | ||
137 | |||
133 | #define HAVE_ARCH_FREE_PAGE | 138 | #define HAVE_ARCH_FREE_PAGE |
134 | #define HAVE_ARCH_ALLOC_PAGE | 139 | #define HAVE_ARCH_ALLOC_PAGE |
135 | 140 | ||
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index d60fc4398516..2564793ec2b6 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/sections.h> | 30 | #include <asm/sections.h> |
31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/hardirq.h> | ||
33 | 34 | ||
34 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | 35 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
35 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | 36 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
@@ -212,7 +213,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |||
212 | /* Set the PER control regs, turns on single step for this address */ | 213 | /* Set the PER control regs, turns on single step for this address */ |
213 | __ctl_load(kprobe_per_regs, 9, 11); | 214 | __ctl_load(kprobe_per_regs, 9, 11); |
214 | regs->psw.mask |= PSW_MASK_PER; | 215 | regs->psw.mask |= PSW_MASK_PER; |
215 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK); | 216 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); |
216 | } | 217 | } |
217 | 218 | ||
218 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | 219 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) |
@@ -239,7 +240,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | |||
239 | __get_cpu_var(current_kprobe) = p; | 240 | __get_cpu_var(current_kprobe) = p; |
240 | /* Save the interrupt and per flags */ | 241 | /* Save the interrupt and per flags */ |
241 | kcb->kprobe_saved_imask = regs->psw.mask & | 242 | kcb->kprobe_saved_imask = regs->psw.mask & |
242 | (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK); | 243 | (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT); |
243 | /* Save the control regs that govern PER */ | 244 | /* Save the control regs that govern PER */ |
244 | __ctl_store(kcb->kprobe_saved_ctl, 9, 11); | 245 | __ctl_store(kcb->kprobe_saved_ctl, 9, 11); |
245 | } | 246 | } |
@@ -316,8 +317,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
316 | return 1; | 317 | return 1; |
317 | 318 | ||
318 | ss_probe: | 319 | ss_probe: |
319 | if (regs->psw.mask & (PSW_MASK_PER | PSW_MASK_IO)) | ||
320 | local_irq_disable(); | ||
321 | prepare_singlestep(p, regs); | 320 | prepare_singlestep(p, regs); |
322 | kcb->kprobe_status = KPROBE_HIT_SS; | 321 | kcb->kprobe_status = KPROBE_HIT_SS; |
323 | return 1; | 322 | return 1; |
@@ -350,6 +349,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, | |||
350 | struct hlist_node *node, *tmp; | 349 | struct hlist_node *node, *tmp; |
351 | unsigned long flags, orig_ret_address = 0; | 350 | unsigned long flags, orig_ret_address = 0; |
352 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; | 351 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; |
352 | kprobe_opcode_t *correct_ret_addr = NULL; | ||
353 | 353 | ||
354 | INIT_HLIST_HEAD(&empty_rp); | 354 | INIT_HLIST_HEAD(&empty_rp); |
355 | kretprobe_hash_lock(current, &head, &flags); | 355 | kretprobe_hash_lock(current, &head, &flags); |
@@ -372,10 +372,32 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, | |||
372 | /* another task is sharing our hash bucket */ | 372 | /* another task is sharing our hash bucket */ |
373 | continue; | 373 | continue; |
374 | 374 | ||
375 | if (ri->rp && ri->rp->handler) | 375 | orig_ret_address = (unsigned long)ri->ret_addr; |
376 | ri->rp->handler(ri, regs); | 376 | |
377 | if (orig_ret_address != trampoline_address) | ||
378 | /* | ||
379 | * This is the real return address. Any other | ||
380 | * instances associated with this task are for | ||
381 | * other calls deeper on the call stack | ||
382 | */ | ||
383 | break; | ||
384 | } | ||
385 | |||
386 | kretprobe_assert(ri, orig_ret_address, trampoline_address); | ||
387 | |||
388 | correct_ret_addr = ri->ret_addr; | ||
389 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | ||
390 | if (ri->task != current) | ||
391 | /* another task is sharing our hash bucket */ | ||
392 | continue; | ||
377 | 393 | ||
378 | orig_ret_address = (unsigned long)ri->ret_addr; | 394 | orig_ret_address = (unsigned long)ri->ret_addr; |
395 | |||
396 | if (ri->rp && ri->rp->handler) { | ||
397 | ri->ret_addr = correct_ret_addr; | ||
398 | ri->rp->handler(ri, regs); | ||
399 | } | ||
400 | |||
379 | recycle_rp_inst(ri, &empty_rp); | 401 | recycle_rp_inst(ri, &empty_rp); |
380 | 402 | ||
381 | if (orig_ret_address != trampoline_address) { | 403 | if (orig_ret_address != trampoline_address) { |
@@ -387,7 +409,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, | |||
387 | break; | 409 | break; |
388 | } | 410 | } |
389 | } | 411 | } |
390 | kretprobe_assert(ri, orig_ret_address, trampoline_address); | 412 | |
391 | regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; | 413 | regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; |
392 | 414 | ||
393 | reset_current_kprobe(); | 415 | reset_current_kprobe(); |
@@ -465,8 +487,6 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs) | |||
465 | goto out; | 487 | goto out; |
466 | } | 488 | } |
467 | reset_current_kprobe(); | 489 | reset_current_kprobe(); |
468 | if (regs->psw.mask & (PSW_MASK_PER | PSW_MASK_IO)) | ||
469 | local_irq_enable(); | ||
470 | out: | 490 | out: |
471 | preempt_enable_no_resched(); | 491 | preempt_enable_no_resched(); |
472 | 492 | ||
@@ -482,7 +502,7 @@ out: | |||
482 | return 1; | 502 | return 1; |
483 | } | 503 | } |
484 | 504 | ||
485 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | 505 | static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr) |
486 | { | 506 | { |
487 | struct kprobe *cur = kprobe_running(); | 507 | struct kprobe *cur = kprobe_running(); |
488 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 508 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
@@ -508,8 +528,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
508 | restore_previous_kprobe(kcb); | 528 | restore_previous_kprobe(kcb); |
509 | else { | 529 | else { |
510 | reset_current_kprobe(); | 530 | reset_current_kprobe(); |
511 | if (regs->psw.mask & (PSW_MASK_PER | PSW_MASK_IO)) | ||
512 | local_irq_enable(); | ||
513 | } | 531 | } |
514 | preempt_enable_no_resched(); | 532 | preempt_enable_no_resched(); |
515 | break; | 533 | break; |
@@ -553,6 +571,18 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
553 | return 0; | 571 | return 0; |
554 | } | 572 | } |
555 | 573 | ||
574 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | ||
575 | { | ||
576 | int ret; | ||
577 | |||
578 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) | ||
579 | local_irq_disable(); | ||
580 | ret = kprobe_trap_handler(regs, trapnr); | ||
581 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) | ||
582 | local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); | ||
583 | return ret; | ||
584 | } | ||
585 | |||
556 | /* | 586 | /* |
557 | * Wrapper routine to for handling exceptions. | 587 | * Wrapper routine to for handling exceptions. |
558 | */ | 588 | */ |
@@ -560,8 +590,12 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
560 | unsigned long val, void *data) | 590 | unsigned long val, void *data) |
561 | { | 591 | { |
562 | struct die_args *args = (struct die_args *)data; | 592 | struct die_args *args = (struct die_args *)data; |
593 | struct pt_regs *regs = args->regs; | ||
563 | int ret = NOTIFY_DONE; | 594 | int ret = NOTIFY_DONE; |
564 | 595 | ||
596 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) | ||
597 | local_irq_disable(); | ||
598 | |||
565 | switch (val) { | 599 | switch (val) { |
566 | case DIE_BPT: | 600 | case DIE_BPT: |
567 | if (kprobe_handler(args->regs)) | 601 | if (kprobe_handler(args->regs)) |
@@ -572,16 +606,17 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
572 | ret = NOTIFY_STOP; | 606 | ret = NOTIFY_STOP; |
573 | break; | 607 | break; |
574 | case DIE_TRAP: | 608 | case DIE_TRAP: |
575 | /* kprobe_running() needs smp_processor_id() */ | 609 | if (!preemptible() && kprobe_running() && |
576 | preempt_disable(); | 610 | kprobe_trap_handler(args->regs, args->trapnr)) |
577 | if (kprobe_running() && | ||
578 | kprobe_fault_handler(args->regs, args->trapnr)) | ||
579 | ret = NOTIFY_STOP; | 611 | ret = NOTIFY_STOP; |
580 | preempt_enable(); | ||
581 | break; | 612 | break; |
582 | default: | 613 | default: |
583 | break; | 614 | break; |
584 | } | 615 | } |
616 | |||
617 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) | ||
618 | local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); | ||
619 | |||
585 | return ret; | 620 | return ret; |
586 | } | 621 | } |
587 | 622 | ||
@@ -595,6 +630,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
595 | 630 | ||
596 | /* setup return addr to the jprobe handler routine */ | 631 | /* setup return addr to the jprobe handler routine */ |
597 | regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE; | 632 | regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE; |
633 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); | ||
598 | 634 | ||
599 | /* r14 is the function return address */ | 635 | /* r14 is the function return address */ |
600 | kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14]; | 636 | kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14]; |
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 38e641cdd977..45b405ca2567 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c | |||
@@ -20,18 +20,17 @@ | |||
20 | static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, | 20 | static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, |
21 | unsigned long end, int write, struct page **pages, int *nr) | 21 | unsigned long end, int write, struct page **pages, int *nr) |
22 | { | 22 | { |
23 | unsigned long mask, result; | 23 | unsigned long mask; |
24 | pte_t *ptep, pte; | 24 | pte_t *ptep, pte; |
25 | struct page *page; | 25 | struct page *page; |
26 | 26 | ||
27 | result = write ? 0 : _PAGE_RO; | 27 | mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL; |
28 | mask = result | _PAGE_INVALID | _PAGE_SPECIAL; | ||
29 | 28 | ||
30 | ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr); | 29 | ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr); |
31 | do { | 30 | do { |
32 | pte = *ptep; | 31 | pte = *ptep; |
33 | barrier(); | 32 | barrier(); |
34 | if ((pte_val(pte) & mask) != result) | 33 | if ((pte_val(pte) & mask) != 0) |
35 | return 0; | 34 | return 0; |
36 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); | 35 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); |
37 | page = pte_page(pte); | 36 | page = pte_page(pte); |
diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h index 2cd899f75a3c..b7c5bab9bd77 100644 --- a/arch/um/include/asm/ptrace-generic.h +++ b/arch/um/include/asm/ptrace-generic.h | |||
@@ -38,8 +38,8 @@ struct pt_regs { | |||
38 | 38 | ||
39 | struct task_struct; | 39 | struct task_struct; |
40 | 40 | ||
41 | extern long subarch_ptrace(struct task_struct *child, long request, long addr, | 41 | extern long subarch_ptrace(struct task_struct *child, long request, |
42 | long data); | 42 | unsigned long addr, unsigned long data); |
43 | extern unsigned long getreg(struct task_struct *child, int regno); | 43 | extern unsigned long getreg(struct task_struct *child, int regno); |
44 | extern int putreg(struct task_struct *child, int regno, unsigned long value); | 44 | extern int putreg(struct task_struct *child, int regno, unsigned long value); |
45 | extern int get_fpregs(struct user_i387_struct __user *buf, | 45 | extern int get_fpregs(struct user_i387_struct __user *buf, |
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c index a5e33f29bbeb..701b672c1122 100644 --- a/arch/um/kernel/ptrace.c +++ b/arch/um/kernel/ptrace.c | |||
@@ -122,7 +122,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
122 | break; | 122 | break; |
123 | 123 | ||
124 | case PTRACE_SET_THREAD_AREA: | 124 | case PTRACE_SET_THREAD_AREA: |
125 | ret = ptrace_set_thread_area(child, addr, datavp); | 125 | ret = ptrace_set_thread_area(child, addr, vp); |
126 | break; | 126 | break; |
127 | 127 | ||
128 | case PTRACE_FAULTINFO: { | 128 | case PTRACE_FAULTINFO: { |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 286de34b0ed6..f6ce0bda3b98 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -141,13 +141,13 @@ static inline void native_apic_msr_write(u32 reg, u32 v) | |||
141 | 141 | ||
142 | static inline u32 native_apic_msr_read(u32 reg) | 142 | static inline u32 native_apic_msr_read(u32 reg) |
143 | { | 143 | { |
144 | u32 low, high; | 144 | u64 msr; |
145 | 145 | ||
146 | if (reg == APIC_DFR) | 146 | if (reg == APIC_DFR) |
147 | return -1; | 147 | return -1; |
148 | 148 | ||
149 | rdmsr(APIC_BASE_MSR + (reg >> 4), low, high); | 149 | rdmsrl(APIC_BASE_MSR + (reg >> 4), msr); |
150 | return low; | 150 | return (u32)msr; |
151 | } | 151 | } |
152 | 152 | ||
153 | static inline void native_x2apic_wait_icr_idle(void) | 153 | static inline void native_x2apic_wait_icr_idle(void) |
@@ -181,12 +181,12 @@ extern void enable_x2apic(void); | |||
181 | extern void x2apic_icr_write(u32 low, u32 id); | 181 | extern void x2apic_icr_write(u32 low, u32 id); |
182 | static inline int x2apic_enabled(void) | 182 | static inline int x2apic_enabled(void) |
183 | { | 183 | { |
184 | int msr, msr2; | 184 | u64 msr; |
185 | 185 | ||
186 | if (!cpu_has_x2apic) | 186 | if (!cpu_has_x2apic) |
187 | return 0; | 187 | return 0; |
188 | 188 | ||
189 | rdmsr(MSR_IA32_APICBASE, msr, msr2); | 189 | rdmsrl(MSR_IA32_APICBASE, msr); |
190 | if (msr & X2APIC_ENABLE) | 190 | if (msr & X2APIC_ENABLE) |
191 | return 1; | 191 | return 1; |
192 | return 0; | 192 | return 0; |
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h index b2f2d2e05cec..6d90adf4428a 100644 --- a/arch/x86/include/asm/uv/uv_mmrs.h +++ b/arch/x86/include/asm/uv/uv_mmrs.h | |||
@@ -806,6 +806,78 @@ union uvh_node_present_table_u { | |||
806 | }; | 806 | }; |
807 | 807 | ||
808 | /* ========================================================================= */ | 808 | /* ========================================================================= */ |
809 | /* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */ | ||
810 | /* ========================================================================= */ | ||
811 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL | ||
812 | |||
813 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24 | ||
814 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL | ||
815 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48 | ||
816 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL | ||
817 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63 | ||
818 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL | ||
819 | |||
820 | union uvh_rh_gam_alias210_overlay_config_0_mmr_u { | ||
821 | unsigned long v; | ||
822 | struct uvh_rh_gam_alias210_overlay_config_0_mmr_s { | ||
823 | unsigned long rsvd_0_23: 24; /* */ | ||
824 | unsigned long base : 8; /* RW */ | ||
825 | unsigned long rsvd_32_47: 16; /* */ | ||
826 | unsigned long m_alias : 5; /* RW */ | ||
827 | unsigned long rsvd_53_62: 10; /* */ | ||
828 | unsigned long enable : 1; /* RW */ | ||
829 | } s; | ||
830 | }; | ||
831 | |||
832 | /* ========================================================================= */ | ||
833 | /* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */ | ||
834 | /* ========================================================================= */ | ||
835 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL | ||
836 | |||
837 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24 | ||
838 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL | ||
839 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48 | ||
840 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL | ||
841 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63 | ||
842 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL | ||
843 | |||
844 | union uvh_rh_gam_alias210_overlay_config_1_mmr_u { | ||
845 | unsigned long v; | ||
846 | struct uvh_rh_gam_alias210_overlay_config_1_mmr_s { | ||
847 | unsigned long rsvd_0_23: 24; /* */ | ||
848 | unsigned long base : 8; /* RW */ | ||
849 | unsigned long rsvd_32_47: 16; /* */ | ||
850 | unsigned long m_alias : 5; /* RW */ | ||
851 | unsigned long rsvd_53_62: 10; /* */ | ||
852 | unsigned long enable : 1; /* RW */ | ||
853 | } s; | ||
854 | }; | ||
855 | |||
856 | /* ========================================================================= */ | ||
857 | /* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */ | ||
858 | /* ========================================================================= */ | ||
859 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL | ||
860 | |||
861 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24 | ||
862 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL | ||
863 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48 | ||
864 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL | ||
865 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63 | ||
866 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL | ||
867 | |||
868 | union uvh_rh_gam_alias210_overlay_config_2_mmr_u { | ||
869 | unsigned long v; | ||
870 | struct uvh_rh_gam_alias210_overlay_config_2_mmr_s { | ||
871 | unsigned long rsvd_0_23: 24; /* */ | ||
872 | unsigned long base : 8; /* RW */ | ||
873 | unsigned long rsvd_32_47: 16; /* */ | ||
874 | unsigned long m_alias : 5; /* RW */ | ||
875 | unsigned long rsvd_53_62: 10; /* */ | ||
876 | unsigned long enable : 1; /* RW */ | ||
877 | } s; | ||
878 | }; | ||
879 | |||
880 | /* ========================================================================= */ | ||
809 | /* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */ | 881 | /* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */ |
810 | /* ========================================================================= */ | 882 | /* ========================================================================= */ |
811 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL | 883 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL |
@@ -857,6 +929,29 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u { | |||
857 | }; | 929 | }; |
858 | 930 | ||
859 | /* ========================================================================= */ | 931 | /* ========================================================================= */ |
932 | /* UVH_RH_GAM_CONFIG_MMR */ | ||
933 | /* ========================================================================= */ | ||
934 | #define UVH_RH_GAM_CONFIG_MMR 0x1600000UL | ||
935 | |||
936 | #define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0 | ||
937 | #define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL | ||
938 | #define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 | ||
939 | #define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL | ||
940 | #define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12 | ||
941 | #define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL | ||
942 | |||
943 | union uvh_rh_gam_config_mmr_u { | ||
944 | unsigned long v; | ||
945 | struct uvh_rh_gam_config_mmr_s { | ||
946 | unsigned long m_skt : 6; /* RW */ | ||
947 | unsigned long n_skt : 4; /* RW */ | ||
948 | unsigned long rsvd_10_11: 2; /* */ | ||
949 | unsigned long mmiol_cfg : 1; /* RW */ | ||
950 | unsigned long rsvd_13_63: 51; /* */ | ||
951 | } s; | ||
952 | }; | ||
953 | |||
954 | /* ========================================================================= */ | ||
860 | /* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ | 955 | /* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ |
861 | /* ========================================================================= */ | 956 | /* ========================================================================= */ |
862 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL | 957 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL |
@@ -987,97 +1082,5 @@ union uvh_rtc1_int_config_u { | |||
987 | } s; | 1082 | } s; |
988 | }; | 1083 | }; |
989 | 1084 | ||
990 | /* ========================================================================= */ | ||
991 | /* UVH_SI_ADDR_MAP_CONFIG */ | ||
992 | /* ========================================================================= */ | ||
993 | #define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL | ||
994 | |||
995 | #define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0 | ||
996 | #define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL | ||
997 | #define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8 | ||
998 | #define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL | ||
999 | |||
1000 | union uvh_si_addr_map_config_u { | ||
1001 | unsigned long v; | ||
1002 | struct uvh_si_addr_map_config_s { | ||
1003 | unsigned long m_skt : 6; /* RW */ | ||
1004 | unsigned long rsvd_6_7: 2; /* */ | ||
1005 | unsigned long n_skt : 4; /* RW */ | ||
1006 | unsigned long rsvd_12_63: 52; /* */ | ||
1007 | } s; | ||
1008 | }; | ||
1009 | |||
1010 | /* ========================================================================= */ | ||
1011 | /* UVH_SI_ALIAS0_OVERLAY_CONFIG */ | ||
1012 | /* ========================================================================= */ | ||
1013 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL | ||
1014 | |||
1015 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24 | ||
1016 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL | ||
1017 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48 | ||
1018 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL | ||
1019 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63 | ||
1020 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL | ||
1021 | |||
1022 | union uvh_si_alias0_overlay_config_u { | ||
1023 | unsigned long v; | ||
1024 | struct uvh_si_alias0_overlay_config_s { | ||
1025 | unsigned long rsvd_0_23: 24; /* */ | ||
1026 | unsigned long base : 8; /* RW */ | ||
1027 | unsigned long rsvd_32_47: 16; /* */ | ||
1028 | unsigned long m_alias : 5; /* RW */ | ||
1029 | unsigned long rsvd_53_62: 10; /* */ | ||
1030 | unsigned long enable : 1; /* RW */ | ||
1031 | } s; | ||
1032 | }; | ||
1033 | |||
1034 | /* ========================================================================= */ | ||
1035 | /* UVH_SI_ALIAS1_OVERLAY_CONFIG */ | ||
1036 | /* ========================================================================= */ | ||
1037 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL | ||
1038 | |||
1039 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24 | ||
1040 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL | ||
1041 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48 | ||
1042 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL | ||
1043 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63 | ||
1044 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL | ||
1045 | |||
1046 | union uvh_si_alias1_overlay_config_u { | ||
1047 | unsigned long v; | ||
1048 | struct uvh_si_alias1_overlay_config_s { | ||
1049 | unsigned long rsvd_0_23: 24; /* */ | ||
1050 | unsigned long base : 8; /* RW */ | ||
1051 | unsigned long rsvd_32_47: 16; /* */ | ||
1052 | unsigned long m_alias : 5; /* RW */ | ||
1053 | unsigned long rsvd_53_62: 10; /* */ | ||
1054 | unsigned long enable : 1; /* RW */ | ||
1055 | } s; | ||
1056 | }; | ||
1057 | |||
1058 | /* ========================================================================= */ | ||
1059 | /* UVH_SI_ALIAS2_OVERLAY_CONFIG */ | ||
1060 | /* ========================================================================= */ | ||
1061 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL | ||
1062 | |||
1063 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24 | ||
1064 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL | ||
1065 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48 | ||
1066 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL | ||
1067 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63 | ||
1068 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL | ||
1069 | |||
1070 | union uvh_si_alias2_overlay_config_u { | ||
1071 | unsigned long v; | ||
1072 | struct uvh_si_alias2_overlay_config_s { | ||
1073 | unsigned long rsvd_0_23: 24; /* */ | ||
1074 | unsigned long base : 8; /* RW */ | ||
1075 | unsigned long rsvd_32_47: 16; /* */ | ||
1076 | unsigned long m_alias : 5; /* RW */ | ||
1077 | unsigned long rsvd_53_62: 10; /* */ | ||
1078 | unsigned long enable : 1; /* RW */ | ||
1079 | } s; | ||
1080 | }; | ||
1081 | |||
1082 | 1085 | ||
1083 | #endif /* _ASM_X86_UV_UV_MMRS_H */ | 1086 | #endif /* __ASM_UV_MMRS_X86_H__ */ |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 850657d1b0ed..3f838d537392 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -52,7 +52,6 @@ | |||
52 | #include <asm/mce.h> | 52 | #include <asm/mce.h> |
53 | #include <asm/kvm_para.h> | 53 | #include <asm/kvm_para.h> |
54 | #include <asm/tsc.h> | 54 | #include <asm/tsc.h> |
55 | #include <asm/atomic.h> | ||
56 | 55 | ||
57 | unsigned int num_processors; | 56 | unsigned int num_processors; |
58 | 57 | ||
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index ed4118de249e..194539aea175 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -379,14 +379,14 @@ struct redir_addr { | |||
379 | #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT | 379 | #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT |
380 | 380 | ||
381 | static __initdata struct redir_addr redir_addrs[] = { | 381 | static __initdata struct redir_addr redir_addrs[] = { |
382 | {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG}, | 382 | {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR}, |
383 | {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG}, | 383 | {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR}, |
384 | {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG}, | 384 | {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR}, |
385 | }; | 385 | }; |
386 | 386 | ||
387 | static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) | 387 | static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) |
388 | { | 388 | { |
389 | union uvh_si_alias0_overlay_config_u alias; | 389 | union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias; |
390 | union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; | 390 | union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; |
391 | int i; | 391 | int i; |
392 | 392 | ||
@@ -660,7 +660,7 @@ void uv_nmi_init(void) | |||
660 | 660 | ||
661 | void __init uv_system_init(void) | 661 | void __init uv_system_init(void) |
662 | { | 662 | { |
663 | union uvh_si_addr_map_config_u m_n_config; | 663 | union uvh_rh_gam_config_mmr_u m_n_config; |
664 | union uvh_node_id_u node_id; | 664 | union uvh_node_id_u node_id; |
665 | unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; | 665 | unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; |
666 | int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; | 666 | int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; |
@@ -670,7 +670,7 @@ void __init uv_system_init(void) | |||
670 | 670 | ||
671 | map_low_mmrs(); | 671 | map_low_mmrs(); |
672 | 672 | ||
673 | m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); | 673 | m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR ); |
674 | m_val = m_n_config.s.m_skt; | 674 | m_val = m_n_config.s.m_skt; |
675 | n_val = m_n_config.s.n_skt; | 675 | n_val = m_n_config.s.n_skt; |
676 | mmr_base = | 676 | mmr_base = |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 46d58448c3af..e421b8cd6944 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -280,11 +280,11 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) | |||
280 | struct amd_nb *nb; | 280 | struct amd_nb *nb; |
281 | int i; | 281 | int i; |
282 | 282 | ||
283 | nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL); | 283 | nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO, |
284 | cpu_to_node(cpu)); | ||
284 | if (!nb) | 285 | if (!nb) |
285 | return NULL; | 286 | return NULL; |
286 | 287 | ||
287 | memset(nb, 0, sizeof(*nb)); | ||
288 | nb->nb_id = nb_id; | 288 | nb->nb_id = nb_id; |
289 | 289 | ||
290 | /* | 290 | /* |
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index e1af7c055c7d..ce0cb4721c9a 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -212,7 +212,7 @@ static int install_equiv_cpu_table(const u8 *buf) | |||
212 | return 0; | 212 | return 0; |
213 | } | 213 | } |
214 | 214 | ||
215 | equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size); | 215 | equiv_cpu_table = vmalloc(size); |
216 | if (!equiv_cpu_table) { | 216 | if (!equiv_cpu_table) { |
217 | pr_err("failed to allocate equivalent CPU table\n"); | 217 | pr_err("failed to allocate equivalent CPU table\n"); |
218 | return 0; | 218 | return 0; |
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c index 71825806cd44..6da143c2a6b8 100644 --- a/arch/x86/kernel/mmconf-fam10h_64.c +++ b/arch/x86/kernel/mmconf-fam10h_64.c | |||
@@ -217,13 +217,13 @@ void __cpuinit fam10h_check_enable_mmcfg(void) | |||
217 | wrmsrl(address, val); | 217 | wrmsrl(address, val); |
218 | } | 218 | } |
219 | 219 | ||
220 | static int __devinit set_check_enable_amd_mmconf(const struct dmi_system_id *d) | 220 | static int __init set_check_enable_amd_mmconf(const struct dmi_system_id *d) |
221 | { | 221 | { |
222 | pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF; | 222 | pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF; |
223 | return 0; | 223 | return 0; |
224 | } | 224 | } |
225 | 225 | ||
226 | static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = { | 226 | static const struct dmi_system_id __initconst mmconf_dmi_table[] = { |
227 | { | 227 | { |
228 | .callback = set_check_enable_amd_mmconf, | 228 | .callback = set_check_enable_amd_mmconf, |
229 | .ident = "Sun Microsystems Machine", | 229 | .ident = "Sun Microsystems Machine", |
@@ -234,7 +234,8 @@ static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = { | |||
234 | {} | 234 | {} |
235 | }; | 235 | }; |
236 | 236 | ||
237 | void __cpuinit check_enable_amd_mmconf_dmi(void) | 237 | /* Called from a __cpuinit function, but only on the BSP. */ |
238 | void __ref check_enable_amd_mmconf_dmi(void) | ||
238 | { | 239 | { |
239 | dmi_check_system(mmconf_dmi_table); | 240 | dmi_check_system(mmconf_dmi_table); |
240 | } | 241 | } |
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index bab3b9e6f66d..008b91eefa18 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c | |||
@@ -41,44 +41,6 @@ void pvclock_set_flags(u8 flags) | |||
41 | valid_flags = flags; | 41 | valid_flags = flags; |
42 | } | 42 | } |
43 | 43 | ||
44 | /* | ||
45 | * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction, | ||
46 | * yielding a 64-bit result. | ||
47 | */ | ||
48 | static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift) | ||
49 | { | ||
50 | u64 product; | ||
51 | #ifdef __i386__ | ||
52 | u32 tmp1, tmp2; | ||
53 | #endif | ||
54 | |||
55 | if (shift < 0) | ||
56 | delta >>= -shift; | ||
57 | else | ||
58 | delta <<= shift; | ||
59 | |||
60 | #ifdef __i386__ | ||
61 | __asm__ ( | ||
62 | "mul %5 ; " | ||
63 | "mov %4,%%eax ; " | ||
64 | "mov %%edx,%4 ; " | ||
65 | "mul %5 ; " | ||
66 | "xor %5,%5 ; " | ||
67 | "add %4,%%eax ; " | ||
68 | "adc %5,%%edx ; " | ||
69 | : "=A" (product), "=r" (tmp1), "=r" (tmp2) | ||
70 | : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) ); | ||
71 | #elif defined(__x86_64__) | ||
72 | __asm__ ( | ||
73 | "mul %%rdx ; shrd $32,%%rdx,%%rax" | ||
74 | : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) ); | ||
75 | #else | ||
76 | #error implement me! | ||
77 | #endif | ||
78 | |||
79 | return product; | ||
80 | } | ||
81 | |||
82 | static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow) | 44 | static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow) |
83 | { | 45 | { |
84 | u64 delta = native_read_tsc() - shadow->tsc_timestamp; | 46 | u64 delta = native_read_tsc() - shadow->tsc_timestamp; |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 49358481c733..12cdbb17ad18 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -251,7 +251,7 @@ static void __cpuinit calculate_tlb_offset(void) | |||
251 | } | 251 | } |
252 | } | 252 | } |
253 | 253 | ||
254 | static int tlb_cpuhp_notify(struct notifier_block *n, | 254 | static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n, |
255 | unsigned long action, void *hcpu) | 255 | unsigned long action, void *hcpu) |
256 | { | 256 | { |
257 | switch (action & 0xf) { | 257 | switch (action & 0xf) { |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 15466c096ba5..0972315c3860 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -138,7 +138,6 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
138 | struct acpi_resource_address64 addr; | 138 | struct acpi_resource_address64 addr; |
139 | acpi_status status; | 139 | acpi_status status; |
140 | unsigned long flags; | 140 | unsigned long flags; |
141 | struct resource *root, *conflict; | ||
142 | u64 start, end; | 141 | u64 start, end; |
143 | 142 | ||
144 | status = resource_to_addr(acpi_res, &addr); | 143 | status = resource_to_addr(acpi_res, &addr); |
@@ -146,12 +145,10 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
146 | return AE_OK; | 145 | return AE_OK; |
147 | 146 | ||
148 | if (addr.resource_type == ACPI_MEMORY_RANGE) { | 147 | if (addr.resource_type == ACPI_MEMORY_RANGE) { |
149 | root = &iomem_resource; | ||
150 | flags = IORESOURCE_MEM; | 148 | flags = IORESOURCE_MEM; |
151 | if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY) | 149 | if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY) |
152 | flags |= IORESOURCE_PREFETCH; | 150 | flags |= IORESOURCE_PREFETCH; |
153 | } else if (addr.resource_type == ACPI_IO_RANGE) { | 151 | } else if (addr.resource_type == ACPI_IO_RANGE) { |
154 | root = &ioport_resource; | ||
155 | flags = IORESOURCE_IO; | 152 | flags = IORESOURCE_IO; |
156 | } else | 153 | } else |
157 | return AE_OK; | 154 | return AE_OK; |
@@ -172,25 +169,90 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
172 | return AE_OK; | 169 | return AE_OK; |
173 | } | 170 | } |
174 | 171 | ||
175 | conflict = insert_resource_conflict(root, res); | 172 | info->res_num++; |
176 | if (conflict) { | 173 | if (addr.translation_offset) |
177 | dev_err(&info->bridge->dev, | 174 | dev_info(&info->bridge->dev, "host bridge window %pR " |
178 | "address space collision: host bridge window %pR " | 175 | "(PCI address [%#llx-%#llx])\n", |
179 | "conflicts with %s %pR\n", | 176 | res, res->start - addr.translation_offset, |
180 | res, conflict->name, conflict); | 177 | res->end - addr.translation_offset); |
181 | } else { | 178 | else |
182 | pci_bus_add_resource(info->bus, res, 0); | 179 | dev_info(&info->bridge->dev, "host bridge window %pR\n", res); |
183 | info->res_num++; | 180 | |
184 | if (addr.translation_offset) | 181 | return AE_OK; |
185 | dev_info(&info->bridge->dev, "host bridge window %pR " | 182 | } |
186 | "(PCI address [%#llx-%#llx])\n", | 183 | |
187 | res, res->start - addr.translation_offset, | 184 | static bool resource_contains(struct resource *res, resource_size_t point) |
188 | res->end - addr.translation_offset); | 185 | { |
186 | if (res->start <= point && point <= res->end) | ||
187 | return true; | ||
188 | return false; | ||
189 | } | ||
190 | |||
191 | static void coalesce_windows(struct pci_root_info *info, int type) | ||
192 | { | ||
193 | int i, j; | ||
194 | struct resource *res1, *res2; | ||
195 | |||
196 | for (i = 0; i < info->res_num; i++) { | ||
197 | res1 = &info->res[i]; | ||
198 | if (!(res1->flags & type)) | ||
199 | continue; | ||
200 | |||
201 | for (j = i + 1; j < info->res_num; j++) { | ||
202 | res2 = &info->res[j]; | ||
203 | if (!(res2->flags & type)) | ||
204 | continue; | ||
205 | |||
206 | /* | ||
207 | * I don't like throwing away windows because then | ||
208 | * our resources no longer match the ACPI _CRS, but | ||
209 | * the kernel resource tree doesn't allow overlaps. | ||
210 | */ | ||
211 | if (resource_contains(res1, res2->start) || | ||
212 | resource_contains(res1, res2->end) || | ||
213 | resource_contains(res2, res1->start) || | ||
214 | resource_contains(res2, res1->end)) { | ||
215 | res1->start = min(res1->start, res2->start); | ||
216 | res1->end = max(res1->end, res2->end); | ||
217 | dev_info(&info->bridge->dev, | ||
218 | "host bridge window expanded to %pR; %pR ignored\n", | ||
219 | res1, res2); | ||
220 | res2->flags = 0; | ||
221 | } | ||
222 | } | ||
223 | } | ||
224 | } | ||
225 | |||
226 | static void add_resources(struct pci_root_info *info) | ||
227 | { | ||
228 | int i; | ||
229 | struct resource *res, *root, *conflict; | ||
230 | |||
231 | if (!pci_use_crs) | ||
232 | return; | ||
233 | |||
234 | coalesce_windows(info, IORESOURCE_MEM); | ||
235 | coalesce_windows(info, IORESOURCE_IO); | ||
236 | |||
237 | for (i = 0; i < info->res_num; i++) { | ||
238 | res = &info->res[i]; | ||
239 | |||
240 | if (res->flags & IORESOURCE_MEM) | ||
241 | root = &iomem_resource; | ||
242 | else if (res->flags & IORESOURCE_IO) | ||
243 | root = &ioport_resource; | ||
189 | else | 244 | else |
190 | dev_info(&info->bridge->dev, | 245 | continue; |
191 | "host bridge window %pR\n", res); | 246 | |
247 | conflict = insert_resource_conflict(root, res); | ||
248 | if (conflict) | ||
249 | dev_err(&info->bridge->dev, | ||
250 | "address space collision: host bridge window %pR " | ||
251 | "conflicts with %s %pR\n", | ||
252 | res, conflict->name, conflict); | ||
253 | else | ||
254 | pci_bus_add_resource(info->bus, res, 0); | ||
192 | } | 255 | } |
193 | return AE_OK; | ||
194 | } | 256 | } |
195 | 257 | ||
196 | static void | 258 | static void |
@@ -224,6 +286,7 @@ get_current_resources(struct acpi_device *device, int busnum, | |||
224 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource, | 286 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource, |
225 | &info); | 287 | &info); |
226 | 288 | ||
289 | add_resources(&info); | ||
227 | return; | 290 | return; |
228 | 291 | ||
229 | name_alloc_fail: | 292 | name_alloc_fail: |
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 117f5b8daf75..d7b5109f7a9c 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c | |||
@@ -147,8 +147,10 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
147 | irq = xen_allocate_pirq(v[i], 0, /* not sharable */ | 147 | irq = xen_allocate_pirq(v[i], 0, /* not sharable */ |
148 | (type == PCI_CAP_ID_MSIX) ? | 148 | (type == PCI_CAP_ID_MSIX) ? |
149 | "pcifront-msi-x" : "pcifront-msi"); | 149 | "pcifront-msi-x" : "pcifront-msi"); |
150 | if (irq < 0) | 150 | if (irq < 0) { |
151 | return -1; | 151 | ret = -1; |
152 | goto free; | ||
153 | } | ||
152 | 154 | ||
153 | ret = set_irq_msi(irq, msidesc); | 155 | ret = set_irq_msi(irq, msidesc); |
154 | if (ret) | 156 | if (ret) |
@@ -164,7 +166,7 @@ error: | |||
164 | if (ret == -ENODEV) | 166 | if (ret == -ENODEV) |
165 | dev_err(&dev->dev, "Xen PCI frontend has not registered" \ | 167 | dev_err(&dev->dev, "Xen PCI frontend has not registered" \ |
166 | " MSI/MSI-X support!\n"); | 168 | " MSI/MSI-X support!\n"); |
167 | 169 | free: | |
168 | kfree(v); | 170 | kfree(v); |
169 | return ret; | 171 | return ret; |
170 | } | 172 | } |
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 20ea20a39e2a..a318194002b5 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -1343,8 +1343,8 @@ uv_activation_descriptor_init(int node, int pnode) | |||
1343 | * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR) | 1343 | * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR) |
1344 | * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub | 1344 | * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub |
1345 | */ | 1345 | */ |
1346 | bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)* | 1346 | bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE |
1347 | UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); | 1347 | * UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); |
1348 | BUG_ON(!bau_desc); | 1348 | BUG_ON(!bau_desc); |
1349 | 1349 | ||
1350 | pa = uv_gpa(bau_desc); /* need the real nasid*/ | 1350 | pa = uv_gpa(bau_desc); /* need the real nasid*/ |
@@ -1402,9 +1402,9 @@ uv_payload_queue_init(int node, int pnode) | |||
1402 | struct bau_payload_queue_entry *pqp_malloc; | 1402 | struct bau_payload_queue_entry *pqp_malloc; |
1403 | struct bau_control *bcp; | 1403 | struct bau_control *bcp; |
1404 | 1404 | ||
1405 | pqp = (struct bau_payload_queue_entry *) kmalloc_node( | 1405 | pqp = kmalloc_node((DEST_Q_SIZE + 1) |
1406 | (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry), | 1406 | * sizeof(struct bau_payload_queue_entry), |
1407 | GFP_KERNEL, node); | 1407 | GFP_KERNEL, node); |
1408 | BUG_ON(!pqp); | 1408 | BUG_ON(!pqp); |
1409 | pqp_malloc = pqp; | 1409 | pqp_malloc = pqp; |
1410 | 1410 | ||
@@ -1520,8 +1520,7 @@ static void __init uv_init_per_cpu(int nuvhubs) | |||
1520 | 1520 | ||
1521 | timeout_us = calculate_destination_timeout(); | 1521 | timeout_us = calculate_destination_timeout(); |
1522 | 1522 | ||
1523 | uvhub_descs = (struct uvhub_desc *) | 1523 | uvhub_descs = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL); |
1524 | kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL); | ||
1525 | memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); | 1524 | memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); |
1526 | uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL); | 1525 | uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL); |
1527 | for_each_present_cpu(cpu) { | 1526 | for_each_present_cpu(cpu) { |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index c237b810b03f..21ed8d7f75a5 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -2126,7 +2126,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | |||
2126 | { | 2126 | { |
2127 | pmd_t *kernel_pmd; | 2127 | pmd_t *kernel_pmd; |
2128 | 2128 | ||
2129 | level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE); | 2129 | level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); |
2130 | 2130 | ||
2131 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + | 2131 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + |
2132 | xen_start_info->nr_pt_frames * PAGE_SIZE + | 2132 | xen_start_info->nr_pt_frames * PAGE_SIZE + |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index b1dbdaa23ecc..769c4b01fa32 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -118,16 +118,18 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn, | |||
118 | const struct e820map *e820) | 118 | const struct e820map *e820) |
119 | { | 119 | { |
120 | phys_addr_t max_addr = PFN_PHYS(max_pfn); | 120 | phys_addr_t max_addr = PFN_PHYS(max_pfn); |
121 | phys_addr_t last_end = 0; | 121 | phys_addr_t last_end = ISA_END_ADDRESS; |
122 | unsigned long released = 0; | 122 | unsigned long released = 0; |
123 | int i; | 123 | int i; |
124 | 124 | ||
125 | /* Free any unused memory above the low 1Mbyte. */ | ||
125 | for (i = 0; i < e820->nr_map && last_end < max_addr; i++) { | 126 | for (i = 0; i < e820->nr_map && last_end < max_addr; i++) { |
126 | phys_addr_t end = e820->map[i].addr; | 127 | phys_addr_t end = e820->map[i].addr; |
127 | end = min(max_addr, end); | 128 | end = min(max_addr, end); |
128 | 129 | ||
129 | released += xen_release_chunk(last_end, end); | 130 | if (last_end < end) |
130 | last_end = e820->map[i].addr + e820->map[i].size; | 131 | released += xen_release_chunk(last_end, end); |
132 | last_end = max(last_end, e820->map[i].addr + e820->map[i].size); | ||
131 | } | 133 | } |
132 | 134 | ||
133 | if (last_end < max_addr) | 135 | if (last_end < max_addr) |
@@ -164,6 +166,7 @@ char * __init xen_memory_setup(void) | |||
164 | XENMEM_memory_map; | 166 | XENMEM_memory_map; |
165 | rc = HYPERVISOR_memory_op(op, &memmap); | 167 | rc = HYPERVISOR_memory_op(op, &memmap); |
166 | if (rc == -ENOSYS) { | 168 | if (rc == -ENOSYS) { |
169 | BUG_ON(xen_initial_domain()); | ||
167 | memmap.nr_entries = 1; | 170 | memmap.nr_entries = 1; |
168 | map[0].addr = 0ULL; | 171 | map[0].addr = 0ULL; |
169 | map[0].size = mem_end; | 172 | map[0].size = mem_end; |
@@ -201,12 +204,13 @@ char * __init xen_memory_setup(void) | |||
201 | } | 204 | } |
202 | 205 | ||
203 | /* | 206 | /* |
204 | * Even though this is normal, usable memory under Xen, reserve | 207 | * In domU, the ISA region is normal, usable memory, but we |
205 | * ISA memory anyway because too many things think they can poke | 208 | * reserve ISA memory anyway because too many things poke |
206 | * about in there. | 209 | * about in there. |
207 | * | 210 | * |
208 | * In a dom0 kernel, this region is identity mapped with the | 211 | * In Dom0, the host E820 information can leave gaps in the |
209 | * hardware ISA area, so it really is out of bounds. | 212 | * ISA range, which would cause us to release those pages. To |
213 | * avoid this, we unconditionally reserve them here. | ||
210 | */ | 214 | */ |
211 | e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, | 215 | e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, |
212 | E820_RESERVED); | 216 | E820_RESERVED); |
diff --git a/block/blk-core.c b/block/blk-core.c index f0834e2f5727..4ce953f1b390 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1194,13 +1194,6 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1194 | int where = ELEVATOR_INSERT_SORT; | 1194 | int where = ELEVATOR_INSERT_SORT; |
1195 | int rw_flags; | 1195 | int rw_flags; |
1196 | 1196 | ||
1197 | /* REQ_HARDBARRIER is no more */ | ||
1198 | if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER, | ||
1199 | "block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) { | ||
1200 | bio_endio(bio, -EOPNOTSUPP); | ||
1201 | return 0; | ||
1202 | } | ||
1203 | |||
1204 | /* | 1197 | /* |
1205 | * low level driver can indicate that it wants pages above a | 1198 | * low level driver can indicate that it wants pages above a |
1206 | * certain limit bounced to low memory (ie for highmem, or even | 1199 | * certain limit bounced to low memory (ie for highmem, or even |
@@ -1351,7 +1344,7 @@ static void handle_bad_sector(struct bio *bio) | |||
1351 | bdevname(bio->bi_bdev, b), | 1344 | bdevname(bio->bi_bdev, b), |
1352 | bio->bi_rw, | 1345 | bio->bi_rw, |
1353 | (unsigned long long)bio->bi_sector + bio_sectors(bio), | 1346 | (unsigned long long)bio->bi_sector + bio_sectors(bio), |
1354 | (long long)(bio->bi_bdev->bd_inode->i_size >> 9)); | 1347 | (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); |
1355 | 1348 | ||
1356 | set_bit(BIO_EOF, &bio->bi_flags); | 1349 | set_bit(BIO_EOF, &bio->bi_flags); |
1357 | } | 1350 | } |
@@ -1404,7 +1397,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) | |||
1404 | return 0; | 1397 | return 0; |
1405 | 1398 | ||
1406 | /* Test device or partition size, when known. */ | 1399 | /* Test device or partition size, when known. */ |
1407 | maxsector = bio->bi_bdev->bd_inode->i_size >> 9; | 1400 | maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; |
1408 | if (maxsector) { | 1401 | if (maxsector) { |
1409 | sector_t sector = bio->bi_sector; | 1402 | sector_t sector = bio->bi_sector; |
1410 | 1403 | ||
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index d22c4c55c406..3c7a339fe381 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c | |||
@@ -153,20 +153,6 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node) | |||
153 | } | 153 | } |
154 | EXPORT_SYMBOL(get_io_context); | 154 | EXPORT_SYMBOL(get_io_context); |
155 | 155 | ||
156 | void copy_io_context(struct io_context **pdst, struct io_context **psrc) | ||
157 | { | ||
158 | struct io_context *src = *psrc; | ||
159 | struct io_context *dst = *pdst; | ||
160 | |||
161 | if (src) { | ||
162 | BUG_ON(atomic_long_read(&src->refcount) == 0); | ||
163 | atomic_long_inc(&src->refcount); | ||
164 | put_io_context(dst); | ||
165 | *pdst = src; | ||
166 | } | ||
167 | } | ||
168 | EXPORT_SYMBOL(copy_io_context); | ||
169 | |||
170 | static int __init blk_ioc_init(void) | 156 | static int __init blk_ioc_init(void) |
171 | { | 157 | { |
172 | iocontext_cachep = kmem_cache_create("blkdev_ioc", | 158 | iocontext_cachep = kmem_cache_create("blkdev_ioc", |
diff --git a/block/blk-map.c b/block/blk-map.c index d4a586d8691e..5d5dbe47c228 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -205,6 +205,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |||
205 | unaligned = 1; | 205 | unaligned = 1; |
206 | break; | 206 | break; |
207 | } | 207 | } |
208 | if (!iov[i].iov_len) | ||
209 | return -EINVAL; | ||
208 | } | 210 | } |
209 | 211 | ||
210 | if (unaligned || (q->dma_pad_mask & len) || map_data) | 212 | if (unaligned || (q->dma_pad_mask & len) || map_data) |
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index 119f07b74dc0..58c6ee5b010c 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c | |||
@@ -744,13 +744,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
744 | bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; | 744 | bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; |
745 | return 0; | 745 | return 0; |
746 | case BLKGETSIZE: | 746 | case BLKGETSIZE: |
747 | size = bdev->bd_inode->i_size; | 747 | size = i_size_read(bdev->bd_inode); |
748 | if ((size >> 9) > ~0UL) | 748 | if ((size >> 9) > ~0UL) |
749 | return -EFBIG; | 749 | return -EFBIG; |
750 | return compat_put_ulong(arg, size >> 9); | 750 | return compat_put_ulong(arg, size >> 9); |
751 | 751 | ||
752 | case BLKGETSIZE64_32: | 752 | case BLKGETSIZE64_32: |
753 | return compat_put_u64(arg, bdev->bd_inode->i_size); | 753 | return compat_put_u64(arg, i_size_read(bdev->bd_inode)); |
754 | 754 | ||
755 | case BLKTRACESETUP32: | 755 | case BLKTRACESETUP32: |
756 | case BLKTRACESTART: /* compatible */ | 756 | case BLKTRACESTART: /* compatible */ |
diff --git a/block/elevator.c b/block/elevator.c index 282e8308f7e2..2569512830d3 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -429,7 +429,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) | |||
429 | q->nr_sorted--; | 429 | q->nr_sorted--; |
430 | 430 | ||
431 | boundary = q->end_sector; | 431 | boundary = q->end_sector; |
432 | stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED; | 432 | stop_flags = REQ_SOFTBARRIER | REQ_STARTED; |
433 | list_for_each_prev(entry, &q->queue_head) { | 433 | list_for_each_prev(entry, &q->queue_head) { |
434 | struct request *pos = list_entry_rq(entry); | 434 | struct request *pos = list_entry_rq(entry); |
435 | 435 | ||
@@ -691,7 +691,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) | |||
691 | void __elv_add_request(struct request_queue *q, struct request *rq, int where, | 691 | void __elv_add_request(struct request_queue *q, struct request *rq, int where, |
692 | int plug) | 692 | int plug) |
693 | { | 693 | { |
694 | if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { | 694 | if (rq->cmd_flags & REQ_SOFTBARRIER) { |
695 | /* barriers are scheduling boundary, update end_sector */ | 695 | /* barriers are scheduling boundary, update end_sector */ |
696 | if (rq->cmd_type == REQ_TYPE_FS || | 696 | if (rq->cmd_type == REQ_TYPE_FS || |
697 | (rq->cmd_flags & REQ_DISCARD)) { | 697 | (rq->cmd_flags & REQ_DISCARD)) { |
diff --git a/block/ioctl.c b/block/ioctl.c index d724ceb1d465..3d866d0037f2 100644 --- a/block/ioctl.c +++ b/block/ioctl.c | |||
@@ -125,7 +125,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start, | |||
125 | start >>= 9; | 125 | start >>= 9; |
126 | len >>= 9; | 126 | len >>= 9; |
127 | 127 | ||
128 | if (start + len > (bdev->bd_inode->i_size >> 9)) | 128 | if (start + len > (i_size_read(bdev->bd_inode) >> 9)) |
129 | return -EINVAL; | 129 | return -EINVAL; |
130 | if (secure) | 130 | if (secure) |
131 | flags |= BLKDEV_DISCARD_SECURE; | 131 | flags |= BLKDEV_DISCARD_SECURE; |
@@ -242,6 +242,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, | |||
242 | * We need to set the startsect first, the driver may | 242 | * We need to set the startsect first, the driver may |
243 | * want to override it. | 243 | * want to override it. |
244 | */ | 244 | */ |
245 | memset(&geo, 0, sizeof(geo)); | ||
245 | geo.start = get_start_sect(bdev); | 246 | geo.start = get_start_sect(bdev); |
246 | ret = disk->fops->getgeo(bdev, &geo); | 247 | ret = disk->fops->getgeo(bdev, &geo); |
247 | if (ret) | 248 | if (ret) |
@@ -307,12 +308,12 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, | |||
307 | ret = blkdev_reread_part(bdev); | 308 | ret = blkdev_reread_part(bdev); |
308 | break; | 309 | break; |
309 | case BLKGETSIZE: | 310 | case BLKGETSIZE: |
310 | size = bdev->bd_inode->i_size; | 311 | size = i_size_read(bdev->bd_inode); |
311 | if ((size >> 9) > ~0UL) | 312 | if ((size >> 9) > ~0UL) |
312 | return -EFBIG; | 313 | return -EFBIG; |
313 | return put_ulong(arg, size >> 9); | 314 | return put_ulong(arg, size >> 9); |
314 | case BLKGETSIZE64: | 315 | case BLKGETSIZE64: |
315 | return put_u64(arg, bdev->bd_inode->i_size); | 316 | return put_u64(arg, i_size_read(bdev->bd_inode)); |
316 | case BLKTRACESTART: | 317 | case BLKTRACESTART: |
317 | case BLKTRACESTOP: | 318 | case BLKTRACESTOP: |
318 | case BLKTRACESETUP: | 319 | case BLKTRACESETUP: |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index a8b5a10eb5b0..4f4230b79bb6 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -321,33 +321,47 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, | |||
321 | if (hdr->iovec_count) { | 321 | if (hdr->iovec_count) { |
322 | const int size = sizeof(struct sg_iovec) * hdr->iovec_count; | 322 | const int size = sizeof(struct sg_iovec) * hdr->iovec_count; |
323 | size_t iov_data_len; | 323 | size_t iov_data_len; |
324 | struct sg_iovec *iov; | 324 | struct sg_iovec *sg_iov; |
325 | struct iovec *iov; | ||
326 | int i; | ||
325 | 327 | ||
326 | iov = kmalloc(size, GFP_KERNEL); | 328 | sg_iov = kmalloc(size, GFP_KERNEL); |
327 | if (!iov) { | 329 | if (!sg_iov) { |
328 | ret = -ENOMEM; | 330 | ret = -ENOMEM; |
329 | goto out; | 331 | goto out; |
330 | } | 332 | } |
331 | 333 | ||
332 | if (copy_from_user(iov, hdr->dxferp, size)) { | 334 | if (copy_from_user(sg_iov, hdr->dxferp, size)) { |
333 | kfree(iov); | 335 | kfree(sg_iov); |
334 | ret = -EFAULT; | 336 | ret = -EFAULT; |
335 | goto out; | 337 | goto out; |
336 | } | 338 | } |
337 | 339 | ||
340 | /* | ||
341 | * Sum up the vecs, making sure they don't overflow | ||
342 | */ | ||
343 | iov = (struct iovec *) sg_iov; | ||
344 | iov_data_len = 0; | ||
345 | for (i = 0; i < hdr->iovec_count; i++) { | ||
346 | if (iov_data_len + iov[i].iov_len < iov_data_len) { | ||
347 | kfree(sg_iov); | ||
348 | ret = -EINVAL; | ||
349 | goto out; | ||
350 | } | ||
351 | iov_data_len += iov[i].iov_len; | ||
352 | } | ||
353 | |||
338 | /* SG_IO howto says that the shorter of the two wins */ | 354 | /* SG_IO howto says that the shorter of the two wins */ |
339 | iov_data_len = iov_length((struct iovec *)iov, | ||
340 | hdr->iovec_count); | ||
341 | if (hdr->dxfer_len < iov_data_len) { | 355 | if (hdr->dxfer_len < iov_data_len) { |
342 | hdr->iovec_count = iov_shorten((struct iovec *)iov, | 356 | hdr->iovec_count = iov_shorten(iov, |
343 | hdr->iovec_count, | 357 | hdr->iovec_count, |
344 | hdr->dxfer_len); | 358 | hdr->dxfer_len); |
345 | iov_data_len = hdr->dxfer_len; | 359 | iov_data_len = hdr->dxfer_len; |
346 | } | 360 | } |
347 | 361 | ||
348 | ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count, | 362 | ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count, |
349 | iov_data_len, GFP_KERNEL); | 363 | iov_data_len, GFP_KERNEL); |
350 | kfree(iov); | 364 | kfree(sg_iov); |
351 | } else if (hdr->dxfer_len) | 365 | } else if (hdr->dxfer_len) |
352 | ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, | 366 | ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, |
353 | GFP_KERNEL); | 367 | GFP_KERNEL); |
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index de3078215fe6..75586f1f86e7 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c | |||
@@ -504,7 +504,6 @@ err: | |||
504 | 504 | ||
505 | static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) | 505 | static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) |
506 | { | 506 | { |
507 | kobject_put(&pcrypt->pinst->kobj); | ||
508 | free_cpumask_var(pcrypt->cb_cpumask->mask); | 507 | free_cpumask_var(pcrypt->cb_cpumask->mask); |
509 | kfree(pcrypt->cb_cpumask); | 508 | kfree(pcrypt->cb_cpumask); |
510 | 509 | ||
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c index 6355b575ee5a..5df67f1d6c61 100644 --- a/drivers/acpi/debugfs.c +++ b/drivers/acpi/debugfs.c | |||
@@ -80,7 +80,7 @@ int __init acpi_debugfs_init(void) | |||
80 | if (!acpi_dir) | 80 | if (!acpi_dir) |
81 | goto err; | 81 | goto err; |
82 | 82 | ||
83 | cm_dentry = debugfs_create_file("custom_method", S_IWUGO, | 83 | cm_dentry = debugfs_create_file("custom_method", S_IWUSR, |
84 | acpi_dir, NULL, &cm_fops); | 84 | acpi_dir, NULL, &cm_fops); |
85 | if (!cm_dentry) | 85 | if (!cm_dentry) |
86 | goto err; | 86 | goto err; |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index d050e073e570..19835d39289d 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -2552,8 +2552,11 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) | |||
2552 | * | 2552 | * |
2553 | * If door lock fails, always clear sdev->locked to | 2553 | * If door lock fails, always clear sdev->locked to |
2554 | * avoid this infinite loop. | 2554 | * avoid this infinite loop. |
2555 | * | ||
2556 | * This may happen before SCSI scan is complete. Make | ||
2557 | * sure qc->dev->sdev isn't NULL before dereferencing. | ||
2555 | */ | 2558 | */ |
2556 | if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL) | 2559 | if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev) |
2557 | qc->dev->sdev->locked = 0; | 2560 | qc->dev->sdev->locked = 0; |
2558 | 2561 | ||
2559 | qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; | 2562 | qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; |
@@ -3180,7 +3183,7 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, | |||
3180 | * Return value from __ata_scsi_queuecmd() if @cmd can be queued, | 3183 | * Return value from __ata_scsi_queuecmd() if @cmd can be queued, |
3181 | * 0 otherwise. | 3184 | * 0 otherwise. |
3182 | */ | 3185 | */ |
3183 | int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | 3186 | static int ata_scsi_queuecmd_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
3184 | { | 3187 | { |
3185 | struct ata_port *ap; | 3188 | struct ata_port *ap; |
3186 | struct ata_device *dev; | 3189 | struct ata_device *dev; |
@@ -3208,6 +3211,8 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | |||
3208 | return rc; | 3211 | return rc; |
3209 | } | 3212 | } |
3210 | 3213 | ||
3214 | DEF_SCSI_QCMD(ata_scsi_queuecmd) | ||
3215 | |||
3211 | /** | 3216 | /** |
3212 | * ata_scsi_simulate - simulate SCSI command on ATA device | 3217 | * ata_scsi_simulate - simulate SCSI command on ATA device |
3213 | * @dev: the target device | 3218 | * @dev: the target device |
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c index eaf194138f21..6bd9425ba5ab 100644 --- a/drivers/ata/pata_legacy.c +++ b/drivers/ata/pata_legacy.c | |||
@@ -142,7 +142,7 @@ static int autospeed; /* Chip present which snoops speed changes */ | |||
142 | static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */ | 142 | static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */ |
143 | static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */ | 143 | static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */ |
144 | 144 | ||
145 | #ifdef PATA_WINBOND_VLB_MODULE | 145 | #ifdef CONFIG_PATA_WINBOND_VLB_MODULE |
146 | static int winbond = 1; /* Set to probe Winbond controllers, | 146 | static int winbond = 1; /* Set to probe Winbond controllers, |
147 | give I/O port if non standard */ | 147 | give I/O port if non standard */ |
148 | #else | 148 | #else |
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 74b829817891..fa1b95a9a7ff 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c | |||
@@ -653,8 +653,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance) | |||
653 | 653 | ||
654 | ap = host->ports[i]; | 654 | ap = host->ports[i]; |
655 | ocd = ap->dev->platform_data; | 655 | ocd = ap->dev->platform_data; |
656 | |||
657 | ocd = ap->dev->platform_data; | ||
658 | cf_port = ap->private_data; | 656 | cf_port = ap->private_data; |
659 | dma_int.u64 = | 657 | dma_int.u64 = |
660 | cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine)); | 658 | cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine)); |
diff --git a/drivers/atm/solos-attrlist.c b/drivers/atm/solos-attrlist.c index 1a9332e4efe0..9a676ee30824 100644 --- a/drivers/atm/solos-attrlist.c +++ b/drivers/atm/solos-attrlist.c | |||
@@ -1,6 +1,7 @@ | |||
1 | SOLOS_ATTR_RO(DriverVersion) | 1 | SOLOS_ATTR_RO(DriverVersion) |
2 | SOLOS_ATTR_RO(APIVersion) | 2 | SOLOS_ATTR_RO(APIVersion) |
3 | SOLOS_ATTR_RO(FirmwareVersion) | 3 | SOLOS_ATTR_RO(FirmwareVersion) |
4 | SOLOS_ATTR_RO(Version) | ||
4 | // SOLOS_ATTR_RO(DspVersion) | 5 | // SOLOS_ATTR_RO(DspVersion) |
5 | // SOLOS_ATTR_RO(CommonHandshake) | 6 | // SOLOS_ATTR_RO(CommonHandshake) |
6 | SOLOS_ATTR_RO(Connected) | 7 | SOLOS_ATTR_RO(Connected) |
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index f46138ab38b6..2e08c996fd30 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c | |||
@@ -1161,6 +1161,14 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
1161 | dev_info(&dev->dev, "Solos FPGA Version %d.%02d svn-%d\n", | 1161 | dev_info(&dev->dev, "Solos FPGA Version %d.%02d svn-%d\n", |
1162 | major_ver, minor_ver, fpga_ver); | 1162 | major_ver, minor_ver, fpga_ver); |
1163 | 1163 | ||
1164 | if (fpga_ver < 37 && (fpga_upgrade || firmware_upgrade || | ||
1165 | db_fpga_upgrade || db_firmware_upgrade)) { | ||
1166 | dev_warn(&dev->dev, | ||
1167 | "FPGA too old; cannot upgrade flash. Use JTAG.\n"); | ||
1168 | fpga_upgrade = firmware_upgrade = 0; | ||
1169 | db_fpga_upgrade = db_firmware_upgrade = 0; | ||
1170 | } | ||
1171 | |||
1164 | if (card->fpga_version >= DMA_SUPPORTED){ | 1172 | if (card->fpga_version >= DMA_SUPPORTED){ |
1165 | card->using_dma = 1; | 1173 | card->using_dma = 1; |
1166 | } else { | 1174 | } else { |
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 541e18879965..528f6318ded1 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c | |||
@@ -180,9 +180,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio) | |||
180 | BUG(); | 180 | BUG(); |
181 | bio_endio(bio, -ENXIO); | 181 | bio_endio(bio, -ENXIO); |
182 | return 0; | 182 | return 0; |
183 | } else if (bio->bi_rw & REQ_HARDBARRIER) { | ||
184 | bio_endio(bio, -EOPNOTSUPP); | ||
185 | return 0; | ||
186 | } else if (bio->bi_io_vec == NULL) { | 183 | } else if (bio->bi_io_vec == NULL) { |
187 | printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); | 184 | printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); |
188 | BUG(); | 185 | BUG(); |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 2cc4dda46279..a67d0a611a8a 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -113,6 +113,8 @@ static struct board_type products[] = { | |||
113 | {0x409D0E11, "Smart Array 6400 EM", &SA5_access}, | 113 | {0x409D0E11, "Smart Array 6400 EM", &SA5_access}, |
114 | {0x40910E11, "Smart Array 6i", &SA5_access}, | 114 | {0x40910E11, "Smart Array 6i", &SA5_access}, |
115 | {0x3225103C, "Smart Array P600", &SA5_access}, | 115 | {0x3225103C, "Smart Array P600", &SA5_access}, |
116 | {0x3223103C, "Smart Array P800", &SA5_access}, | ||
117 | {0x3234103C, "Smart Array P400", &SA5_access}, | ||
116 | {0x3235103C, "Smart Array P400i", &SA5_access}, | 118 | {0x3235103C, "Smart Array P400i", &SA5_access}, |
117 | {0x3211103C, "Smart Array E200i", &SA5_access}, | 119 | {0x3211103C, "Smart Array E200i", &SA5_access}, |
118 | {0x3212103C, "Smart Array E200", &SA5_access}, | 120 | {0x3212103C, "Smart Array E200", &SA5_access}, |
@@ -3753,7 +3755,7 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h) | |||
3753 | for (i = 0; i < MAX_CONFIG_WAIT; i++) { | 3755 | for (i = 0; i < MAX_CONFIG_WAIT; i++) { |
3754 | if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) | 3756 | if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) |
3755 | break; | 3757 | break; |
3756 | msleep(10); | 3758 | usleep_range(10000, 20000); |
3757 | } | 3759 | } |
3758 | } | 3760 | } |
3759 | 3761 | ||
@@ -3937,10 +3939,9 @@ static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id) | |||
3937 | *board_id = ((subsystem_device_id << 16) & 0xffff0000) | | 3939 | *board_id = ((subsystem_device_id << 16) & 0xffff0000) | |
3938 | subsystem_vendor_id; | 3940 | subsystem_vendor_id; |
3939 | 3941 | ||
3940 | for (i = 0; i < ARRAY_SIZE(products); i++) { | 3942 | for (i = 0; i < ARRAY_SIZE(products); i++) |
3941 | if (*board_id == products[i].board_id) | 3943 | if (*board_id == products[i].board_id) |
3942 | return i; | 3944 | return i; |
3943 | } | ||
3944 | dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n", | 3945 | dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n", |
3945 | *board_id); | 3946 | *board_id); |
3946 | return -ENODEV; | 3947 | return -ENODEV; |
@@ -3971,18 +3972,31 @@ static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, | |||
3971 | return -ENODEV; | 3972 | return -ENODEV; |
3972 | } | 3973 | } |
3973 | 3974 | ||
3974 | static int __devinit cciss_wait_for_board_ready(ctlr_info_t *h) | 3975 | static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev, |
3976 | void __iomem *vaddr, int wait_for_ready) | ||
3977 | #define BOARD_READY 1 | ||
3978 | #define BOARD_NOT_READY 0 | ||
3975 | { | 3979 | { |
3976 | int i; | 3980 | int i, iterations; |
3977 | u32 scratchpad; | 3981 | u32 scratchpad; |
3978 | 3982 | ||
3979 | for (i = 0; i < CCISS_BOARD_READY_ITERATIONS; i++) { | 3983 | if (wait_for_ready) |
3980 | scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); | 3984 | iterations = CCISS_BOARD_READY_ITERATIONS; |
3981 | if (scratchpad == CCISS_FIRMWARE_READY) | 3985 | else |
3982 | return 0; | 3986 | iterations = CCISS_BOARD_NOT_READY_ITERATIONS; |
3987 | |||
3988 | for (i = 0; i < iterations; i++) { | ||
3989 | scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); | ||
3990 | if (wait_for_ready) { | ||
3991 | if (scratchpad == CCISS_FIRMWARE_READY) | ||
3992 | return 0; | ||
3993 | } else { | ||
3994 | if (scratchpad != CCISS_FIRMWARE_READY) | ||
3995 | return 0; | ||
3996 | } | ||
3983 | msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS); | 3997 | msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS); |
3984 | } | 3998 | } |
3985 | dev_warn(&h->pdev->dev, "board not ready, timed out.\n"); | 3999 | dev_warn(&pdev->dev, "board not ready, timed out.\n"); |
3986 | return -ENODEV; | 4000 | return -ENODEV; |
3987 | } | 4001 | } |
3988 | 4002 | ||
@@ -4031,6 +4045,11 @@ static int __devinit cciss_find_cfgtables(ctlr_info_t *h) | |||
4031 | static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h) | 4045 | static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h) |
4032 | { | 4046 | { |
4033 | h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); | 4047 | h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); |
4048 | |||
4049 | /* Limit commands in memory limited kdump scenario. */ | ||
4050 | if (reset_devices && h->max_commands > 32) | ||
4051 | h->max_commands = 32; | ||
4052 | |||
4034 | if (h->max_commands < 16) { | 4053 | if (h->max_commands < 16) { |
4035 | dev_warn(&h->pdev->dev, "Controller reports " | 4054 | dev_warn(&h->pdev->dev, "Controller reports " |
4036 | "max supported commands of %d, an obvious lie. " | 4055 | "max supported commands of %d, an obvious lie. " |
@@ -4148,7 +4167,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h) | |||
4148 | err = -ENOMEM; | 4167 | err = -ENOMEM; |
4149 | goto err_out_free_res; | 4168 | goto err_out_free_res; |
4150 | } | 4169 | } |
4151 | err = cciss_wait_for_board_ready(h); | 4170 | err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); |
4152 | if (err) | 4171 | if (err) |
4153 | goto err_out_free_res; | 4172 | goto err_out_free_res; |
4154 | err = cciss_find_cfgtables(h); | 4173 | err = cciss_find_cfgtables(h); |
@@ -4313,36 +4332,6 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u | |||
4313 | #define cciss_soft_reset_controller(p) cciss_message(p, 1, 0) | 4332 | #define cciss_soft_reset_controller(p) cciss_message(p, 1, 0) |
4314 | #define cciss_noop(p) cciss_message(p, 3, 0) | 4333 | #define cciss_noop(p) cciss_message(p, 3, 0) |
4315 | 4334 | ||
4316 | static __devinit int cciss_reset_msi(struct pci_dev *pdev) | ||
4317 | { | ||
4318 | /* the #defines are stolen from drivers/pci/msi.h. */ | ||
4319 | #define msi_control_reg(base) (base + PCI_MSI_FLAGS) | ||
4320 | #define PCI_MSIX_FLAGS_ENABLE (1 << 15) | ||
4321 | |||
4322 | int pos; | ||
4323 | u16 control = 0; | ||
4324 | |||
4325 | pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); | ||
4326 | if (pos) { | ||
4327 | pci_read_config_word(pdev, msi_control_reg(pos), &control); | ||
4328 | if (control & PCI_MSI_FLAGS_ENABLE) { | ||
4329 | dev_info(&pdev->dev, "resetting MSI\n"); | ||
4330 | pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE); | ||
4331 | } | ||
4332 | } | ||
4333 | |||
4334 | pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); | ||
4335 | if (pos) { | ||
4336 | pci_read_config_word(pdev, msi_control_reg(pos), &control); | ||
4337 | if (control & PCI_MSIX_FLAGS_ENABLE) { | ||
4338 | dev_info(&pdev->dev, "resetting MSI-X\n"); | ||
4339 | pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE); | ||
4340 | } | ||
4341 | } | ||
4342 | |||
4343 | return 0; | ||
4344 | } | ||
4345 | |||
4346 | static int cciss_controller_hard_reset(struct pci_dev *pdev, | 4335 | static int cciss_controller_hard_reset(struct pci_dev *pdev, |
4347 | void * __iomem vaddr, bool use_doorbell) | 4336 | void * __iomem vaddr, bool use_doorbell) |
4348 | { | 4337 | { |
@@ -4397,17 +4386,17 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev, | |||
4397 | * states or using the doorbell register. */ | 4386 | * states or using the doorbell register. */ |
4398 | static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) | 4387 | static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) |
4399 | { | 4388 | { |
4400 | u16 saved_config_space[32]; | ||
4401 | u64 cfg_offset; | 4389 | u64 cfg_offset; |
4402 | u32 cfg_base_addr; | 4390 | u32 cfg_base_addr; |
4403 | u64 cfg_base_addr_index; | 4391 | u64 cfg_base_addr_index; |
4404 | void __iomem *vaddr; | 4392 | void __iomem *vaddr; |
4405 | unsigned long paddr; | 4393 | unsigned long paddr; |
4406 | u32 misc_fw_support, active_transport; | 4394 | u32 misc_fw_support, active_transport; |
4407 | int rc, i; | 4395 | int rc; |
4408 | CfgTable_struct __iomem *cfgtable; | 4396 | CfgTable_struct __iomem *cfgtable; |
4409 | bool use_doorbell; | 4397 | bool use_doorbell; |
4410 | u32 board_id; | 4398 | u32 board_id; |
4399 | u16 command_register; | ||
4411 | 4400 | ||
4412 | /* For controllers as old a the p600, this is very nearly | 4401 | /* For controllers as old a the p600, this is very nearly |
4413 | * the same thing as | 4402 | * the same thing as |
@@ -4417,14 +4406,6 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) | |||
4417 | * pci_set_power_state(pci_dev, PCI_D0); | 4406 | * pci_set_power_state(pci_dev, PCI_D0); |
4418 | * pci_restore_state(pci_dev); | 4407 | * pci_restore_state(pci_dev); |
4419 | * | 4408 | * |
4420 | * but we can't use these nice canned kernel routines on | ||
4421 | * kexec, because they also check the MSI/MSI-X state in PCI | ||
4422 | * configuration space and do the wrong thing when it is | ||
4423 | * set/cleared. Also, the pci_save/restore_state functions | ||
4424 | * violate the ordering requirements for restoring the | ||
4425 | * configuration space from the CCISS document (see the | ||
4426 | * comment below). So we roll our own .... | ||
4427 | * | ||
4428 | * For controllers newer than the P600, the pci power state | 4409 | * For controllers newer than the P600, the pci power state |
4429 | * method of resetting doesn't work so we have another way | 4410 | * method of resetting doesn't work so we have another way |
4430 | * using the doorbell register. | 4411 | * using the doorbell register. |
@@ -4443,8 +4424,13 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) | |||
4443 | return -ENODEV; | 4424 | return -ENODEV; |
4444 | } | 4425 | } |
4445 | 4426 | ||
4446 | for (i = 0; i < 32; i++) | 4427 | /* Save the PCI command register */ |
4447 | pci_read_config_word(pdev, 2*i, &saved_config_space[i]); | 4428 | pci_read_config_word(pdev, 4, &command_register); |
4429 | /* Turn the board off. This is so that later pci_restore_state() | ||
4430 | * won't turn the board on before the rest of config space is ready. | ||
4431 | */ | ||
4432 | pci_disable_device(pdev); | ||
4433 | pci_save_state(pdev); | ||
4448 | 4434 | ||
4449 | /* find the first memory BAR, so we can find the cfg table */ | 4435 | /* find the first memory BAR, so we can find the cfg table */ |
4450 | rc = cciss_pci_find_memory_BAR(pdev, &paddr); | 4436 | rc = cciss_pci_find_memory_BAR(pdev, &paddr); |
@@ -4479,26 +4465,32 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) | |||
4479 | rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); | 4465 | rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); |
4480 | if (rc) | 4466 | if (rc) |
4481 | goto unmap_cfgtable; | 4467 | goto unmap_cfgtable; |
4482 | 4468 | pci_restore_state(pdev); | |
4483 | /* Restore the PCI configuration space. The Open CISS | 4469 | rc = pci_enable_device(pdev); |
4484 | * Specification says, "Restore the PCI Configuration | 4470 | if (rc) { |
4485 | * Registers, offsets 00h through 60h. It is important to | 4471 | dev_warn(&pdev->dev, "failed to enable device.\n"); |
4486 | * restore the command register, 16-bits at offset 04h, | 4472 | goto unmap_cfgtable; |
4487 | * last. Do not restore the configuration status register, | ||
4488 | * 16-bits at offset 06h." Note that the offset is 2*i. | ||
4489 | */ | ||
4490 | for (i = 0; i < 32; i++) { | ||
4491 | if (i == 2 || i == 3) | ||
4492 | continue; | ||
4493 | pci_write_config_word(pdev, 2*i, saved_config_space[i]); | ||
4494 | } | 4473 | } |
4495 | wmb(); | 4474 | pci_write_config_word(pdev, 4, command_register); |
4496 | pci_write_config_word(pdev, 4, saved_config_space[2]); | ||
4497 | 4475 | ||
4498 | /* Some devices (notably the HP Smart Array 5i Controller) | 4476 | /* Some devices (notably the HP Smart Array 5i Controller) |
4499 | need a little pause here */ | 4477 | need a little pause here */ |
4500 | msleep(CCISS_POST_RESET_PAUSE_MSECS); | 4478 | msleep(CCISS_POST_RESET_PAUSE_MSECS); |
4501 | 4479 | ||
4480 | /* Wait for board to become not ready, then ready. */ | ||
4481 | dev_info(&pdev->dev, "Waiting for board to become ready.\n"); | ||
4482 | rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); | ||
4483 | if (rc) /* Don't bail, might be E500, etc. which can't be reset */ | ||
4484 | dev_warn(&pdev->dev, | ||
4485 | "failed waiting for board to become not ready\n"); | ||
4486 | rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY); | ||
4487 | if (rc) { | ||
4488 | dev_warn(&pdev->dev, | ||
4489 | "failed waiting for board to become ready\n"); | ||
4490 | goto unmap_cfgtable; | ||
4491 | } | ||
4492 | dev_info(&pdev->dev, "board ready.\n"); | ||
4493 | |||
4502 | /* Controller should be in simple mode at this point. If it's not, | 4494 | /* Controller should be in simple mode at this point. If it's not, |
4503 | * It means we're on one of those controllers which doesn't support | 4495 | * It means we're on one of those controllers which doesn't support |
4504 | * the doorbell reset method and on which the PCI power management reset | 4496 | * the doorbell reset method and on which the PCI power management reset |
@@ -4539,8 +4531,6 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev) | |||
4539 | return 0; /* just try to do the kdump anyhow. */ | 4531 | return 0; /* just try to do the kdump anyhow. */ |
4540 | if (rc) | 4532 | if (rc) |
4541 | return -ENODEV; | 4533 | return -ENODEV; |
4542 | if (cciss_reset_msi(pdev)) | ||
4543 | return -ENODEV; | ||
4544 | 4534 | ||
4545 | /* Now try to get the controller to respond to a no-op */ | 4535 | /* Now try to get the controller to respond to a no-op */ |
4546 | for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) { | 4536 | for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) { |
@@ -4936,7 +4926,8 @@ static void __exit cciss_cleanup(void) | |||
4936 | } | 4926 | } |
4937 | } | 4927 | } |
4938 | kthread_stop(cciss_scan_thread); | 4928 | kthread_stop(cciss_scan_thread); |
4939 | remove_proc_entry("driver/cciss", NULL); | 4929 | if (proc_cciss) |
4930 | remove_proc_entry("driver/cciss", NULL); | ||
4940 | bus_unregister(&cciss_bus_type); | 4931 | bus_unregister(&cciss_bus_type); |
4941 | } | 4932 | } |
4942 | 4933 | ||
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index ae340ffc8f81..4b8933d778f1 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h | |||
@@ -200,10 +200,14 @@ struct ctlr_info | |||
200 | * the above. | 200 | * the above. |
201 | */ | 201 | */ |
202 | #define CCISS_BOARD_READY_WAIT_SECS (120) | 202 | #define CCISS_BOARD_READY_WAIT_SECS (120) |
203 | #define CCISS_BOARD_NOT_READY_WAIT_SECS (10) | ||
203 | #define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100) | 204 | #define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100) |
204 | #define CCISS_BOARD_READY_ITERATIONS \ | 205 | #define CCISS_BOARD_READY_ITERATIONS \ |
205 | ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \ | 206 | ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \ |
206 | CCISS_BOARD_READY_POLL_INTERVAL_MSECS) | 207 | CCISS_BOARD_READY_POLL_INTERVAL_MSECS) |
208 | #define CCISS_BOARD_NOT_READY_ITERATIONS \ | ||
209 | ((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \ | ||
210 | CCISS_BOARD_READY_POLL_INTERVAL_MSECS) | ||
207 | #define CCISS_POST_RESET_PAUSE_MSECS (3000) | 211 | #define CCISS_POST_RESET_PAUSE_MSECS (3000) |
208 | #define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000) | 212 | #define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000) |
209 | #define CCISS_POST_RESET_NOOP_RETRIES (12) | 213 | #define CCISS_POST_RESET_NOOP_RETRIES (12) |
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 575495f3c4b8..727d0225b7d0 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c | |||
@@ -62,8 +62,8 @@ static int cciss_scsi_proc_info( | |||
62 | int length, /* length of data in buffer */ | 62 | int length, /* length of data in buffer */ |
63 | int func); /* 0 == read, 1 == write */ | 63 | int func); /* 0 == read, 1 == write */ |
64 | 64 | ||
65 | static int cciss_scsi_queue_command (struct scsi_cmnd *cmd, | 65 | static int cciss_scsi_queue_command (struct Scsi_Host *h, |
66 | void (* done)(struct scsi_cmnd *)); | 66 | struct scsi_cmnd *cmd); |
67 | static int cciss_eh_device_reset_handler(struct scsi_cmnd *); | 67 | static int cciss_eh_device_reset_handler(struct scsi_cmnd *); |
68 | static int cciss_eh_abort_handler(struct scsi_cmnd *); | 68 | static int cciss_eh_abort_handler(struct scsi_cmnd *); |
69 | 69 | ||
@@ -1406,7 +1406,7 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c, | |||
1406 | 1406 | ||
1407 | 1407 | ||
1408 | static int | 1408 | static int |
1409 | cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | 1409 | cciss_scsi_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
1410 | { | 1410 | { |
1411 | ctlr_info_t *h; | 1411 | ctlr_info_t *h; |
1412 | int rc; | 1412 | int rc; |
@@ -1504,6 +1504,8 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd | |||
1504 | return 0; | 1504 | return 0; |
1505 | } | 1505 | } |
1506 | 1506 | ||
1507 | static DEF_SCSI_QCMD(cciss_scsi_queue_command) | ||
1508 | |||
1507 | static void cciss_unregister_scsi(ctlr_info_t *h) | 1509 | static void cciss_unregister_scsi(ctlr_info_t *h) |
1508 | { | 1510 | { |
1509 | struct cciss_scsi_adapter_data_t *sa; | 1511 | struct cciss_scsi_adapter_data_t *sa; |
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index ac04ef97eac2..ba95cba192be 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c | |||
@@ -78,11 +78,10 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, | |||
78 | init_completion(&md_io.event); | 78 | init_completion(&md_io.event); |
79 | md_io.error = 0; | 79 | md_io.error = 0; |
80 | 80 | ||
81 | if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags)) | 81 | if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags)) |
82 | rw |= REQ_HARDBARRIER; | 82 | rw |= REQ_FUA; |
83 | rw |= REQ_UNPLUG | REQ_SYNC; | 83 | rw |= REQ_UNPLUG | REQ_SYNC; |
84 | 84 | ||
85 | retry: | ||
86 | bio = bio_alloc(GFP_NOIO, 1); | 85 | bio = bio_alloc(GFP_NOIO, 1); |
87 | bio->bi_bdev = bdev->md_bdev; | 86 | bio->bi_bdev = bdev->md_bdev; |
88 | bio->bi_sector = sector; | 87 | bio->bi_sector = sector; |
@@ -100,17 +99,6 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, | |||
100 | wait_for_completion(&md_io.event); | 99 | wait_for_completion(&md_io.event); |
101 | ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0; | 100 | ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0; |
102 | 101 | ||
103 | /* check for unsupported barrier op. | ||
104 | * would rather check on EOPNOTSUPP, but that is not reliable. | ||
105 | * don't try again for ANY return value != 0 */ | ||
106 | if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) { | ||
107 | /* Try again with no barrier */ | ||
108 | dev_warn(DEV, "Barriers not supported on meta data device - disabling\n"); | ||
109 | set_bit(MD_NO_BARRIER, &mdev->flags); | ||
110 | rw &= ~REQ_HARDBARRIER; | ||
111 | bio_put(bio); | ||
112 | goto retry; | ||
113 | } | ||
114 | out: | 102 | out: |
115 | bio_put(bio); | 103 | bio_put(bio); |
116 | return ok; | 104 | return ok; |
@@ -284,18 +272,32 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) | |||
284 | u32 xor_sum = 0; | 272 | u32 xor_sum = 0; |
285 | 273 | ||
286 | if (!get_ldev(mdev)) { | 274 | if (!get_ldev(mdev)) { |
287 | dev_err(DEV, "get_ldev() failed in w_al_write_transaction\n"); | 275 | dev_err(DEV, |
276 | "disk is %s, cannot start al transaction (-%d +%d)\n", | ||
277 | drbd_disk_str(mdev->state.disk), evicted, new_enr); | ||
288 | complete(&((struct update_al_work *)w)->event); | 278 | complete(&((struct update_al_work *)w)->event); |
289 | return 1; | 279 | return 1; |
290 | } | 280 | } |
291 | /* do we have to do a bitmap write, first? | 281 | /* do we have to do a bitmap write, first? |
292 | * TODO reduce maximum latency: | 282 | * TODO reduce maximum latency: |
293 | * submit both bios, then wait for both, | 283 | * submit both bios, then wait for both, |
294 | * instead of doing two synchronous sector writes. */ | 284 | * instead of doing two synchronous sector writes. |
285 | * For now, we must not write the transaction, | ||
286 | * if we cannot write out the bitmap of the evicted extent. */ | ||
295 | if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) | 287 | if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) |
296 | drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT); | 288 | drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT); |
297 | 289 | ||
298 | mutex_lock(&mdev->md_io_mutex); /* protects md_io_page, al_tr_cycle, ... */ | 290 | /* The bitmap write may have failed, causing a state change. */ |
291 | if (mdev->state.disk < D_INCONSISTENT) { | ||
292 | dev_err(DEV, | ||
293 | "disk is %s, cannot write al transaction (-%d +%d)\n", | ||
294 | drbd_disk_str(mdev->state.disk), evicted, new_enr); | ||
295 | complete(&((struct update_al_work *)w)->event); | ||
296 | put_ldev(mdev); | ||
297 | return 1; | ||
298 | } | ||
299 | |||
300 | mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */ | ||
299 | buffer = (struct al_transaction *)page_address(mdev->md_io_page); | 301 | buffer = (struct al_transaction *)page_address(mdev->md_io_page); |
300 | 302 | ||
301 | buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC); | 303 | buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC); |
@@ -739,7 +741,7 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev) | |||
739 | unsigned int enr; | 741 | unsigned int enr; |
740 | unsigned long add = 0; | 742 | unsigned long add = 0; |
741 | char ppb[10]; | 743 | char ppb[10]; |
742 | int i; | 744 | int i, tmp; |
743 | 745 | ||
744 | wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); | 746 | wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); |
745 | 747 | ||
@@ -747,7 +749,9 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev) | |||
747 | enr = lc_element_by_index(mdev->act_log, i)->lc_number; | 749 | enr = lc_element_by_index(mdev->act_log, i)->lc_number; |
748 | if (enr == LC_FREE) | 750 | if (enr == LC_FREE) |
749 | continue; | 751 | continue; |
750 | add += drbd_bm_ALe_set_all(mdev, enr); | 752 | tmp = drbd_bm_ALe_set_all(mdev, enr); |
753 | dynamic_dev_dbg(DEV, "AL: set %d bits in extent %u\n", tmp, enr); | ||
754 | add += tmp; | ||
751 | } | 755 | } |
752 | 756 | ||
753 | lc_unlock(mdev->act_log); | 757 | lc_unlock(mdev->act_log); |
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 9bdcf4393c0a..1ea1a34e78b2 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
@@ -114,11 +114,11 @@ struct drbd_conf; | |||
114 | #define D_ASSERT(exp) if (!(exp)) \ | 114 | #define D_ASSERT(exp) if (!(exp)) \ |
115 | dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__) | 115 | dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__) |
116 | 116 | ||
117 | #define ERR_IF(exp) if (({ \ | 117 | #define ERR_IF(exp) if (({ \ |
118 | int _b = (exp) != 0; \ | 118 | int _b = (exp) != 0; \ |
119 | if (_b) dev_err(DEV, "%s: (%s) in %s:%d\n", \ | 119 | if (_b) dev_err(DEV, "ASSERT FAILED: %s: (%s) in %s:%d\n", \ |
120 | __func__, #exp, __FILE__, __LINE__); \ | 120 | __func__, #exp, __FILE__, __LINE__); \ |
121 | _b; \ | 121 | _b; \ |
122 | })) | 122 | })) |
123 | 123 | ||
124 | /* Defines to control fault insertion */ | 124 | /* Defines to control fault insertion */ |
@@ -749,17 +749,12 @@ struct drbd_epoch { | |||
749 | 749 | ||
750 | /* drbd_epoch flag bits */ | 750 | /* drbd_epoch flag bits */ |
751 | enum { | 751 | enum { |
752 | DE_BARRIER_IN_NEXT_EPOCH_ISSUED, | ||
753 | DE_BARRIER_IN_NEXT_EPOCH_DONE, | ||
754 | DE_CONTAINS_A_BARRIER, | ||
755 | DE_HAVE_BARRIER_NUMBER, | 752 | DE_HAVE_BARRIER_NUMBER, |
756 | DE_IS_FINISHING, | ||
757 | }; | 753 | }; |
758 | 754 | ||
759 | enum epoch_event { | 755 | enum epoch_event { |
760 | EV_PUT, | 756 | EV_PUT, |
761 | EV_GOT_BARRIER_NR, | 757 | EV_GOT_BARRIER_NR, |
762 | EV_BARRIER_DONE, | ||
763 | EV_BECAME_LAST, | 758 | EV_BECAME_LAST, |
764 | EV_CLEANUP = 32, /* used as flag */ | 759 | EV_CLEANUP = 32, /* used as flag */ |
765 | }; | 760 | }; |
@@ -801,11 +796,6 @@ enum { | |||
801 | __EE_CALL_AL_COMPLETE_IO, | 796 | __EE_CALL_AL_COMPLETE_IO, |
802 | __EE_MAY_SET_IN_SYNC, | 797 | __EE_MAY_SET_IN_SYNC, |
803 | 798 | ||
804 | /* This epoch entry closes an epoch using a barrier. | ||
805 | * On sucessful completion, the epoch is released, | ||
806 | * and the P_BARRIER_ACK send. */ | ||
807 | __EE_IS_BARRIER, | ||
808 | |||
809 | /* In case a barrier failed, | 799 | /* In case a barrier failed, |
810 | * we need to resubmit without the barrier flag. */ | 800 | * we need to resubmit without the barrier flag. */ |
811 | __EE_RESUBMITTED, | 801 | __EE_RESUBMITTED, |
@@ -820,7 +810,6 @@ enum { | |||
820 | }; | 810 | }; |
821 | #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) | 811 | #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) |
822 | #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) | 812 | #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) |
823 | #define EE_IS_BARRIER (1<<__EE_IS_BARRIER) | ||
824 | #define EE_RESUBMITTED (1<<__EE_RESUBMITTED) | 813 | #define EE_RESUBMITTED (1<<__EE_RESUBMITTED) |
825 | #define EE_WAS_ERROR (1<<__EE_WAS_ERROR) | 814 | #define EE_WAS_ERROR (1<<__EE_WAS_ERROR) |
826 | #define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST) | 815 | #define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST) |
@@ -843,16 +832,15 @@ enum { | |||
843 | * Gets cleared when the state.conn | 832 | * Gets cleared when the state.conn |
844 | * goes into C_CONNECTED state. */ | 833 | * goes into C_CONNECTED state. */ |
845 | WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */ | 834 | WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */ |
846 | NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */ | ||
847 | CONSIDER_RESYNC, | 835 | CONSIDER_RESYNC, |
848 | 836 | ||
849 | MD_NO_BARRIER, /* meta data device does not support barriers, | 837 | MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ |
850 | so don't even try */ | ||
851 | SUSPEND_IO, /* suspend application io */ | 838 | SUSPEND_IO, /* suspend application io */ |
852 | BITMAP_IO, /* suspend application io; | 839 | BITMAP_IO, /* suspend application io; |
853 | once no more io in flight, start bitmap io */ | 840 | once no more io in flight, start bitmap io */ |
854 | BITMAP_IO_QUEUED, /* Started bitmap IO */ | 841 | BITMAP_IO_QUEUED, /* Started bitmap IO */ |
855 | GO_DISKLESS, /* Disk failed, local_cnt reached zero, we are going diskless */ | 842 | GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */ |
843 | WAS_IO_ERROR, /* Local disk failed returned IO error */ | ||
856 | RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ | 844 | RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ |
857 | NET_CONGESTED, /* The data socket is congested */ | 845 | NET_CONGESTED, /* The data socket is congested */ |
858 | 846 | ||
@@ -947,7 +935,6 @@ enum write_ordering_e { | |||
947 | WO_none, | 935 | WO_none, |
948 | WO_drain_io, | 936 | WO_drain_io, |
949 | WO_bdev_flush, | 937 | WO_bdev_flush, |
950 | WO_bio_barrier | ||
951 | }; | 938 | }; |
952 | 939 | ||
953 | struct fifo_buffer { | 940 | struct fifo_buffer { |
@@ -1281,6 +1268,7 @@ extern int drbd_bmio_set_n_write(struct drbd_conf *mdev); | |||
1281 | extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); | 1268 | extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); |
1282 | extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why); | 1269 | extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why); |
1283 | extern void drbd_go_diskless(struct drbd_conf *mdev); | 1270 | extern void drbd_go_diskless(struct drbd_conf *mdev); |
1271 | extern void drbd_ldev_destroy(struct drbd_conf *mdev); | ||
1284 | 1272 | ||
1285 | 1273 | ||
1286 | /* Meta data layout | 1274 | /* Meta data layout |
@@ -1798,17 +1786,17 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, | |||
1798 | case EP_PASS_ON: | 1786 | case EP_PASS_ON: |
1799 | if (!forcedetach) { | 1787 | if (!forcedetach) { |
1800 | if (__ratelimit(&drbd_ratelimit_state)) | 1788 | if (__ratelimit(&drbd_ratelimit_state)) |
1801 | dev_err(DEV, "Local IO failed in %s." | 1789 | dev_err(DEV, "Local IO failed in %s.\n", where); |
1802 | "Passing error on...\n", where); | ||
1803 | break; | 1790 | break; |
1804 | } | 1791 | } |
1805 | /* NOTE fall through to detach case if forcedetach set */ | 1792 | /* NOTE fall through to detach case if forcedetach set */ |
1806 | case EP_DETACH: | 1793 | case EP_DETACH: |
1807 | case EP_CALL_HELPER: | 1794 | case EP_CALL_HELPER: |
1795 | set_bit(WAS_IO_ERROR, &mdev->flags); | ||
1808 | if (mdev->state.disk > D_FAILED) { | 1796 | if (mdev->state.disk > D_FAILED) { |
1809 | _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); | 1797 | _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); |
1810 | dev_err(DEV, "Local IO failed in %s." | 1798 | dev_err(DEV, |
1811 | "Detaching...\n", where); | 1799 | "Local IO failed in %s. Detaching...\n", where); |
1812 | } | 1800 | } |
1813 | break; | 1801 | break; |
1814 | } | 1802 | } |
@@ -1874,7 +1862,7 @@ static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev) | |||
1874 | static inline sector_t drbd_get_capacity(struct block_device *bdev) | 1862 | static inline sector_t drbd_get_capacity(struct block_device *bdev) |
1875 | { | 1863 | { |
1876 | /* return bdev ? get_capacity(bdev->bd_disk) : 0; */ | 1864 | /* return bdev ? get_capacity(bdev->bd_disk) : 0; */ |
1877 | return bdev ? bdev->bd_inode->i_size >> 9 : 0; | 1865 | return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0; |
1878 | } | 1866 | } |
1879 | 1867 | ||
1880 | /** | 1868 | /** |
@@ -2127,7 +2115,11 @@ static inline void put_ldev(struct drbd_conf *mdev) | |||
2127 | __release(local); | 2115 | __release(local); |
2128 | D_ASSERT(i >= 0); | 2116 | D_ASSERT(i >= 0); |
2129 | if (i == 0) { | 2117 | if (i == 0) { |
2118 | if (mdev->state.disk == D_DISKLESS) | ||
2119 | /* even internal references gone, safe to destroy */ | ||
2120 | drbd_ldev_destroy(mdev); | ||
2130 | if (mdev->state.disk == D_FAILED) | 2121 | if (mdev->state.disk == D_FAILED) |
2122 | /* all application IO references gone. */ | ||
2131 | drbd_go_diskless(mdev); | 2123 | drbd_go_diskless(mdev); |
2132 | wake_up(&mdev->misc_wait); | 2124 | wake_up(&mdev->misc_wait); |
2133 | } | 2125 | } |
@@ -2138,6 +2130,10 @@ static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_stat | |||
2138 | { | 2130 | { |
2139 | int io_allowed; | 2131 | int io_allowed; |
2140 | 2132 | ||
2133 | /* never get a reference while D_DISKLESS */ | ||
2134 | if (mdev->state.disk == D_DISKLESS) | ||
2135 | return 0; | ||
2136 | |||
2141 | atomic_inc(&mdev->local_cnt); | 2137 | atomic_inc(&mdev->local_cnt); |
2142 | io_allowed = (mdev->state.disk >= mins); | 2138 | io_allowed = (mdev->state.disk >= mins); |
2143 | if (!io_allowed) | 2139 | if (!io_allowed) |
@@ -2406,12 +2402,12 @@ static inline void drbd_md_flush(struct drbd_conf *mdev) | |||
2406 | { | 2402 | { |
2407 | int r; | 2403 | int r; |
2408 | 2404 | ||
2409 | if (test_bit(MD_NO_BARRIER, &mdev->flags)) | 2405 | if (test_bit(MD_NO_FUA, &mdev->flags)) |
2410 | return; | 2406 | return; |
2411 | 2407 | ||
2412 | r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL); | 2408 | r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL); |
2413 | if (r) { | 2409 | if (r) { |
2414 | set_bit(MD_NO_BARRIER, &mdev->flags); | 2410 | set_bit(MD_NO_FUA, &mdev->flags); |
2415 | dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); | 2411 | dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); |
2416 | } | 2412 | } |
2417 | } | 2413 | } |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 25c7a73c5062..6be5401d0e88 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -835,6 +835,15 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state | |||
835 | ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN) | 835 | ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN) |
836 | ns.conn = os.conn; | 836 | ns.conn = os.conn; |
837 | 837 | ||
838 | /* we cannot fail (again) if we already detached */ | ||
839 | if (ns.disk == D_FAILED && os.disk == D_DISKLESS) | ||
840 | ns.disk = D_DISKLESS; | ||
841 | |||
842 | /* if we are only D_ATTACHING yet, | ||
843 | * we can (and should) go directly to D_DISKLESS. */ | ||
844 | if (ns.disk == D_FAILED && os.disk == D_ATTACHING) | ||
845 | ns.disk = D_DISKLESS; | ||
846 | |||
838 | /* After C_DISCONNECTING only C_STANDALONE may follow */ | 847 | /* After C_DISCONNECTING only C_STANDALONE may follow */ |
839 | if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE) | 848 | if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE) |
840 | ns.conn = os.conn; | 849 | ns.conn = os.conn; |
@@ -1056,7 +1065,15 @@ int __drbd_set_state(struct drbd_conf *mdev, | |||
1056 | !test_and_set_bit(CONFIG_PENDING, &mdev->flags)) | 1065 | !test_and_set_bit(CONFIG_PENDING, &mdev->flags)) |
1057 | set_bit(DEVICE_DYING, &mdev->flags); | 1066 | set_bit(DEVICE_DYING, &mdev->flags); |
1058 | 1067 | ||
1059 | mdev->state.i = ns.i; | 1068 | /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference |
1069 | * on the ldev here, to be sure the transition -> D_DISKLESS resp. | ||
1070 | * drbd_ldev_destroy() won't happen before our corresponding | ||
1071 | * after_state_ch works run, where we put_ldev again. */ | ||
1072 | if ((os.disk != D_FAILED && ns.disk == D_FAILED) || | ||
1073 | (os.disk != D_DISKLESS && ns.disk == D_DISKLESS)) | ||
1074 | atomic_inc(&mdev->local_cnt); | ||
1075 | |||
1076 | mdev->state = ns; | ||
1060 | wake_up(&mdev->misc_wait); | 1077 | wake_up(&mdev->misc_wait); |
1061 | wake_up(&mdev->state_wait); | 1078 | wake_up(&mdev->state_wait); |
1062 | 1079 | ||
@@ -1268,7 +1285,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | |||
1268 | if (test_bit(NEW_CUR_UUID, &mdev->flags)) { | 1285 | if (test_bit(NEW_CUR_UUID, &mdev->flags)) { |
1269 | drbd_uuid_new_current(mdev); | 1286 | drbd_uuid_new_current(mdev); |
1270 | clear_bit(NEW_CUR_UUID, &mdev->flags); | 1287 | clear_bit(NEW_CUR_UUID, &mdev->flags); |
1271 | drbd_md_sync(mdev); | ||
1272 | } | 1288 | } |
1273 | spin_lock_irq(&mdev->req_lock); | 1289 | spin_lock_irq(&mdev->req_lock); |
1274 | _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL); | 1290 | _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL); |
@@ -1365,63 +1381,64 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | |||
1365 | os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT) | 1381 | os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT) |
1366 | drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate"); | 1382 | drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate"); |
1367 | 1383 | ||
1368 | /* first half of local IO error */ | 1384 | /* first half of local IO error, failure to attach, |
1369 | if (os.disk > D_FAILED && ns.disk == D_FAILED) { | 1385 | * or administrative detach */ |
1370 | enum drbd_io_error_p eh = EP_PASS_ON; | 1386 | if (os.disk != D_FAILED && ns.disk == D_FAILED) { |
1387 | enum drbd_io_error_p eh; | ||
1388 | int was_io_error; | ||
1389 | /* corresponding get_ldev was in __drbd_set_state, to serialize | ||
1390 | * our cleanup here with the transition to D_DISKLESS, | ||
1391 | * so it is safe to dreference ldev here. */ | ||
1392 | eh = mdev->ldev->dc.on_io_error; | ||
1393 | was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags); | ||
1394 | |||
1395 | /* current state still has to be D_FAILED, | ||
1396 | * there is only one way out: to D_DISKLESS, | ||
1397 | * and that may only happen after our put_ldev below. */ | ||
1398 | if (mdev->state.disk != D_FAILED) | ||
1399 | dev_err(DEV, | ||
1400 | "ASSERT FAILED: disk is %s during detach\n", | ||
1401 | drbd_disk_str(mdev->state.disk)); | ||
1371 | 1402 | ||
1372 | if (drbd_send_state(mdev)) | 1403 | if (drbd_send_state(mdev)) |
1373 | dev_warn(DEV, "Notified peer that my disk is broken.\n"); | 1404 | dev_warn(DEV, "Notified peer that I am detaching my disk\n"); |
1374 | else | 1405 | else |
1375 | dev_err(DEV, "Sending state for drbd_io_error() failed\n"); | 1406 | dev_err(DEV, "Sending state for detaching disk failed\n"); |
1376 | 1407 | ||
1377 | drbd_rs_cancel_all(mdev); | 1408 | drbd_rs_cancel_all(mdev); |
1378 | 1409 | ||
1379 | if (get_ldev_if_state(mdev, D_FAILED)) { | 1410 | /* In case we want to get something to stable storage still, |
1380 | eh = mdev->ldev->dc.on_io_error; | 1411 | * this may be the last chance. |
1381 | put_ldev(mdev); | 1412 | * Following put_ldev may transition to D_DISKLESS. */ |
1382 | } | 1413 | drbd_md_sync(mdev); |
1383 | if (eh == EP_CALL_HELPER) | 1414 | put_ldev(mdev); |
1415 | |||
1416 | if (was_io_error && eh == EP_CALL_HELPER) | ||
1384 | drbd_khelper(mdev, "local-io-error"); | 1417 | drbd_khelper(mdev, "local-io-error"); |
1385 | } | 1418 | } |
1386 | 1419 | ||
1420 | /* second half of local IO error, failure to attach, | ||
1421 | * or administrative detach, | ||
1422 | * after local_cnt references have reached zero again */ | ||
1423 | if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) { | ||
1424 | /* We must still be diskless, | ||
1425 | * re-attach has to be serialized with this! */ | ||
1426 | if (mdev->state.disk != D_DISKLESS) | ||
1427 | dev_err(DEV, | ||
1428 | "ASSERT FAILED: disk is %s while going diskless\n", | ||
1429 | drbd_disk_str(mdev->state.disk)); | ||
1387 | 1430 | ||
1388 | /* second half of local IO error handling, | 1431 | mdev->rs_total = 0; |
1389 | * after local_cnt references have reached zero: */ | 1432 | mdev->rs_failed = 0; |
1390 | if (os.disk == D_FAILED && ns.disk == D_DISKLESS) { | 1433 | atomic_set(&mdev->rs_pending_cnt, 0); |
1391 | mdev->rs_total = 0; | ||
1392 | mdev->rs_failed = 0; | ||
1393 | atomic_set(&mdev->rs_pending_cnt, 0); | ||
1394 | } | ||
1395 | |||
1396 | if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) { | ||
1397 | /* We must still be diskless, | ||
1398 | * re-attach has to be serialized with this! */ | ||
1399 | if (mdev->state.disk != D_DISKLESS) | ||
1400 | dev_err(DEV, | ||
1401 | "ASSERT FAILED: disk is %s while going diskless\n", | ||
1402 | drbd_disk_str(mdev->state.disk)); | ||
1403 | 1434 | ||
1404 | /* we cannot assert local_cnt == 0 here, as get_ldev_if_state | ||
1405 | * will inc/dec it frequently. Since we became D_DISKLESS, no | ||
1406 | * one has touched the protected members anymore, though, so we | ||
1407 | * are safe to free them here. */ | ||
1408 | if (drbd_send_state(mdev)) | 1435 | if (drbd_send_state(mdev)) |
1409 | dev_warn(DEV, "Notified peer that I detached my disk.\n"); | 1436 | dev_warn(DEV, "Notified peer that I'm now diskless.\n"); |
1410 | else | 1437 | else |
1411 | dev_err(DEV, "Sending state for detach failed\n"); | 1438 | dev_err(DEV, "Sending state for being diskless failed\n"); |
1412 | 1439 | /* corresponding get_ldev in __drbd_set_state | |
1413 | lc_destroy(mdev->resync); | 1440 | * this may finaly trigger drbd_ldev_destroy. */ |
1414 | mdev->resync = NULL; | 1441 | put_ldev(mdev); |
1415 | lc_destroy(mdev->act_log); | ||
1416 | mdev->act_log = NULL; | ||
1417 | __no_warn(local, | ||
1418 | drbd_free_bc(mdev->ldev); | ||
1419 | mdev->ldev = NULL;); | ||
1420 | |||
1421 | if (mdev->md_io_tmpp) { | ||
1422 | __free_page(mdev->md_io_tmpp); | ||
1423 | mdev->md_io_tmpp = NULL; | ||
1424 | } | ||
1425 | } | 1442 | } |
1426 | 1443 | ||
1427 | /* Disks got bigger while they were detached */ | 1444 | /* Disks got bigger while they were detached */ |
@@ -2772,11 +2789,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) | |||
2772 | 2789 | ||
2773 | drbd_set_defaults(mdev); | 2790 | drbd_set_defaults(mdev); |
2774 | 2791 | ||
2775 | /* for now, we do NOT yet support it, | ||
2776 | * even though we start some framework | ||
2777 | * to eventually support barriers */ | ||
2778 | set_bit(NO_BARRIER_SUPP, &mdev->flags); | ||
2779 | |||
2780 | atomic_set(&mdev->ap_bio_cnt, 0); | 2792 | atomic_set(&mdev->ap_bio_cnt, 0); |
2781 | atomic_set(&mdev->ap_pending_cnt, 0); | 2793 | atomic_set(&mdev->ap_pending_cnt, 0); |
2782 | atomic_set(&mdev->rs_pending_cnt, 0); | 2794 | atomic_set(&mdev->rs_pending_cnt, 0); |
@@ -2842,7 +2854,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) | |||
2842 | drbd_thread_init(mdev, &mdev->asender, drbd_asender); | 2854 | drbd_thread_init(mdev, &mdev->asender, drbd_asender); |
2843 | 2855 | ||
2844 | mdev->agreed_pro_version = PRO_VERSION_MAX; | 2856 | mdev->agreed_pro_version = PRO_VERSION_MAX; |
2845 | mdev->write_ordering = WO_bio_barrier; | 2857 | mdev->write_ordering = WO_bdev_flush; |
2846 | mdev->resync_wenr = LC_FREE; | 2858 | mdev->resync_wenr = LC_FREE; |
2847 | } | 2859 | } |
2848 | 2860 | ||
@@ -2899,7 +2911,6 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev) | |||
2899 | D_ASSERT(list_empty(&mdev->resync_work.list)); | 2911 | D_ASSERT(list_empty(&mdev->resync_work.list)); |
2900 | D_ASSERT(list_empty(&mdev->unplug_work.list)); | 2912 | D_ASSERT(list_empty(&mdev->unplug_work.list)); |
2901 | D_ASSERT(list_empty(&mdev->go_diskless.list)); | 2913 | D_ASSERT(list_empty(&mdev->go_diskless.list)); |
2902 | |||
2903 | } | 2914 | } |
2904 | 2915 | ||
2905 | 2916 | ||
@@ -3660,6 +3671,8 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local) | |||
3660 | 3671 | ||
3661 | get_random_bytes(&val, sizeof(u64)); | 3672 | get_random_bytes(&val, sizeof(u64)); |
3662 | _drbd_uuid_set(mdev, UI_CURRENT, val); | 3673 | _drbd_uuid_set(mdev, UI_CURRENT, val); |
3674 | /* get it to stable storage _now_ */ | ||
3675 | drbd_md_sync(mdev); | ||
3663 | } | 3676 | } |
3664 | 3677 | ||
3665 | void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) | 3678 | void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) |
@@ -3756,19 +3769,31 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused) | |||
3756 | return 1; | 3769 | return 1; |
3757 | } | 3770 | } |
3758 | 3771 | ||
3772 | void drbd_ldev_destroy(struct drbd_conf *mdev) | ||
3773 | { | ||
3774 | lc_destroy(mdev->resync); | ||
3775 | mdev->resync = NULL; | ||
3776 | lc_destroy(mdev->act_log); | ||
3777 | mdev->act_log = NULL; | ||
3778 | __no_warn(local, | ||
3779 | drbd_free_bc(mdev->ldev); | ||
3780 | mdev->ldev = NULL;); | ||
3781 | |||
3782 | if (mdev->md_io_tmpp) { | ||
3783 | __free_page(mdev->md_io_tmpp); | ||
3784 | mdev->md_io_tmpp = NULL; | ||
3785 | } | ||
3786 | clear_bit(GO_DISKLESS, &mdev->flags); | ||
3787 | } | ||
3788 | |||
3759 | static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused) | 3789 | static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused) |
3760 | { | 3790 | { |
3761 | D_ASSERT(mdev->state.disk == D_FAILED); | 3791 | D_ASSERT(mdev->state.disk == D_FAILED); |
3762 | /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will | 3792 | /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will |
3763 | * inc/dec it frequently. Once we are D_DISKLESS, no one will touch | 3793 | * inc/dec it frequently. Once we are D_DISKLESS, no one will touch |
3764 | * the protected members anymore, though, so in the after_state_ch work | 3794 | * the protected members anymore, though, so once put_ldev reaches zero |
3765 | * it will be safe to free them. */ | 3795 | * again, it will be safe to free them. */ |
3766 | drbd_force_state(mdev, NS(disk, D_DISKLESS)); | 3796 | drbd_force_state(mdev, NS(disk, D_DISKLESS)); |
3767 | /* We need to wait for return of references checked out while we still | ||
3768 | * have been D_FAILED, though (drbd_md_sync, bitmap io). */ | ||
3769 | wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); | ||
3770 | |||
3771 | clear_bit(GO_DISKLESS, &mdev->flags); | ||
3772 | return 1; | 3797 | return 1; |
3773 | } | 3798 | } |
3774 | 3799 | ||
@@ -3777,9 +3802,6 @@ void drbd_go_diskless(struct drbd_conf *mdev) | |||
3777 | D_ASSERT(mdev->state.disk == D_FAILED); | 3802 | D_ASSERT(mdev->state.disk == D_FAILED); |
3778 | if (!test_and_set_bit(GO_DISKLESS, &mdev->flags)) | 3803 | if (!test_and_set_bit(GO_DISKLESS, &mdev->flags)) |
3779 | drbd_queue_work(&mdev->data.work, &mdev->go_diskless); | 3804 | drbd_queue_work(&mdev->data.work, &mdev->go_diskless); |
3780 | /* don't drbd_queue_work_front, | ||
3781 | * we need to serialize with the after_state_ch work | ||
3782 | * of the -> D_FAILED transition. */ | ||
3783 | } | 3805 | } |
3784 | 3806 | ||
3785 | /** | 3807 | /** |
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 87925e97e613..29e5c70e4e26 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c | |||
@@ -870,6 +870,11 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
870 | retcode = ERR_DISK_CONFIGURED; | 870 | retcode = ERR_DISK_CONFIGURED; |
871 | goto fail; | 871 | goto fail; |
872 | } | 872 | } |
873 | /* It may just now have detached because of IO error. Make sure | ||
874 | * drbd_ldev_destroy is done already, we may end up here very fast, | ||
875 | * e.g. if someone calls attach from the on-io-error handler, | ||
876 | * to realize a "hot spare" feature (not that I'd recommend that) */ | ||
877 | wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); | ||
873 | 878 | ||
874 | /* allocation not in the IO path, cqueue thread context */ | 879 | /* allocation not in the IO path, cqueue thread context */ |
875 | nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); | 880 | nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); |
@@ -1098,9 +1103,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1098 | /* Reset the "barriers don't work" bits here, then force meta data to | 1103 | /* Reset the "barriers don't work" bits here, then force meta data to |
1099 | * be written, to ensure we determine if barriers are supported. */ | 1104 | * be written, to ensure we determine if barriers are supported. */ |
1100 | if (nbc->dc.no_md_flush) | 1105 | if (nbc->dc.no_md_flush) |
1101 | set_bit(MD_NO_BARRIER, &mdev->flags); | 1106 | set_bit(MD_NO_FUA, &mdev->flags); |
1102 | else | 1107 | else |
1103 | clear_bit(MD_NO_BARRIER, &mdev->flags); | 1108 | clear_bit(MD_NO_FUA, &mdev->flags); |
1104 | 1109 | ||
1105 | /* Point of no return reached. | 1110 | /* Point of no return reached. |
1106 | * Devices and memory are no longer released by error cleanup below. | 1111 | * Devices and memory are no longer released by error cleanup below. |
@@ -1112,8 +1117,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1112 | nbc = NULL; | 1117 | nbc = NULL; |
1113 | resync_lru = NULL; | 1118 | resync_lru = NULL; |
1114 | 1119 | ||
1115 | mdev->write_ordering = WO_bio_barrier; | 1120 | mdev->write_ordering = WO_bdev_flush; |
1116 | drbd_bump_write_ordering(mdev, WO_bio_barrier); | 1121 | drbd_bump_write_ordering(mdev, WO_bdev_flush); |
1117 | 1122 | ||
1118 | if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY)) | 1123 | if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY)) |
1119 | set_bit(CRASHED_PRIMARY, &mdev->flags); | 1124 | set_bit(CRASHED_PRIMARY, &mdev->flags); |
@@ -1262,7 +1267,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1262 | force_diskless_dec: | 1267 | force_diskless_dec: |
1263 | put_ldev(mdev); | 1268 | put_ldev(mdev); |
1264 | force_diskless: | 1269 | force_diskless: |
1265 | drbd_force_state(mdev, NS(disk, D_DISKLESS)); | 1270 | drbd_force_state(mdev, NS(disk, D_FAILED)); |
1266 | drbd_md_sync(mdev); | 1271 | drbd_md_sync(mdev); |
1267 | release_bdev2_fail: | 1272 | release_bdev2_fail: |
1268 | if (nbc) | 1273 | if (nbc) |
@@ -1285,10 +1290,19 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1285 | return 0; | 1290 | return 0; |
1286 | } | 1291 | } |
1287 | 1292 | ||
1293 | /* Detaching the disk is a process in multiple stages. First we need to lock | ||
1294 | * out application IO, in-flight IO, IO stuck in drbd_al_begin_io. | ||
1295 | * Then we transition to D_DISKLESS, and wait for put_ldev() to return all | ||
1296 | * internal references as well. | ||
1297 | * Only then we have finally detached. */ | ||
1288 | static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 1298 | static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, |
1289 | struct drbd_nl_cfg_reply *reply) | 1299 | struct drbd_nl_cfg_reply *reply) |
1290 | { | 1300 | { |
1301 | drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */ | ||
1291 | reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); | 1302 | reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); |
1303 | if (mdev->state.disk == D_DISKLESS) | ||
1304 | wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); | ||
1305 | drbd_resume_io(mdev); | ||
1292 | return 0; | 1306 | return 0; |
1293 | } | 1307 | } |
1294 | 1308 | ||
@@ -1953,7 +1967,6 @@ static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1953 | if (test_bit(NEW_CUR_UUID, &mdev->flags)) { | 1967 | if (test_bit(NEW_CUR_UUID, &mdev->flags)) { |
1954 | drbd_uuid_new_current(mdev); | 1968 | drbd_uuid_new_current(mdev); |
1955 | clear_bit(NEW_CUR_UUID, &mdev->flags); | 1969 | clear_bit(NEW_CUR_UUID, &mdev->flags); |
1956 | drbd_md_sync(mdev); | ||
1957 | } | 1970 | } |
1958 | drbd_suspend_io(mdev); | 1971 | drbd_suspend_io(mdev); |
1959 | reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); | 1972 | reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); |
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index ad325c5d0ce1..7e6ac307e2de 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c | |||
@@ -158,7 +158,6 @@ static int drbd_seq_show(struct seq_file *seq, void *v) | |||
158 | [WO_none] = 'n', | 158 | [WO_none] = 'n', |
159 | [WO_drain_io] = 'd', | 159 | [WO_drain_io] = 'd', |
160 | [WO_bdev_flush] = 'f', | 160 | [WO_bdev_flush] = 'f', |
161 | [WO_bio_barrier] = 'b', | ||
162 | }; | 161 | }; |
163 | 162 | ||
164 | seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n", | 163 | seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n", |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index efd6169acf2f..d299fe9e78c8 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -49,11 +49,6 @@ | |||
49 | 49 | ||
50 | #include "drbd_vli.h" | 50 | #include "drbd_vli.h" |
51 | 51 | ||
52 | struct flush_work { | ||
53 | struct drbd_work w; | ||
54 | struct drbd_epoch *epoch; | ||
55 | }; | ||
56 | |||
57 | enum finish_epoch { | 52 | enum finish_epoch { |
58 | FE_STILL_LIVE, | 53 | FE_STILL_LIVE, |
59 | FE_DESTROYED, | 54 | FE_DESTROYED, |
@@ -66,16 +61,6 @@ static int drbd_do_auth(struct drbd_conf *mdev); | |||
66 | static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event); | 61 | static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event); |
67 | static int e_end_block(struct drbd_conf *, struct drbd_work *, int); | 62 | static int e_end_block(struct drbd_conf *, struct drbd_work *, int); |
68 | 63 | ||
69 | static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch) | ||
70 | { | ||
71 | struct drbd_epoch *prev; | ||
72 | spin_lock(&mdev->epoch_lock); | ||
73 | prev = list_entry(epoch->list.prev, struct drbd_epoch, list); | ||
74 | if (prev == epoch || prev == mdev->current_epoch) | ||
75 | prev = NULL; | ||
76 | spin_unlock(&mdev->epoch_lock); | ||
77 | return prev; | ||
78 | } | ||
79 | 64 | ||
80 | #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) | 65 | #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) |
81 | 66 | ||
@@ -981,7 +966,7 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi | |||
981 | return TRUE; | 966 | return TRUE; |
982 | } | 967 | } |
983 | 968 | ||
984 | static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch) | 969 | static void drbd_flush(struct drbd_conf *mdev) |
985 | { | 970 | { |
986 | int rv; | 971 | int rv; |
987 | 972 | ||
@@ -997,24 +982,6 @@ static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d | |||
997 | } | 982 | } |
998 | put_ldev(mdev); | 983 | put_ldev(mdev); |
999 | } | 984 | } |
1000 | |||
1001 | return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE); | ||
1002 | } | ||
1003 | |||
1004 | static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | ||
1005 | { | ||
1006 | struct flush_work *fw = (struct flush_work *)w; | ||
1007 | struct drbd_epoch *epoch = fw->epoch; | ||
1008 | |||
1009 | kfree(w); | ||
1010 | |||
1011 | if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags)) | ||
1012 | drbd_flush_after_epoch(mdev, epoch); | ||
1013 | |||
1014 | drbd_may_finish_epoch(mdev, epoch, EV_PUT | | ||
1015 | (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0)); | ||
1016 | |||
1017 | return 1; | ||
1018 | } | 985 | } |
1019 | 986 | ||
1020 | /** | 987 | /** |
@@ -1027,15 +994,13 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, | |||
1027 | struct drbd_epoch *epoch, | 994 | struct drbd_epoch *epoch, |
1028 | enum epoch_event ev) | 995 | enum epoch_event ev) |
1029 | { | 996 | { |
1030 | int finish, epoch_size; | 997 | int epoch_size; |
1031 | struct drbd_epoch *next_epoch; | 998 | struct drbd_epoch *next_epoch; |
1032 | int schedule_flush = 0; | ||
1033 | enum finish_epoch rv = FE_STILL_LIVE; | 999 | enum finish_epoch rv = FE_STILL_LIVE; |
1034 | 1000 | ||
1035 | spin_lock(&mdev->epoch_lock); | 1001 | spin_lock(&mdev->epoch_lock); |
1036 | do { | 1002 | do { |
1037 | next_epoch = NULL; | 1003 | next_epoch = NULL; |
1038 | finish = 0; | ||
1039 | 1004 | ||
1040 | epoch_size = atomic_read(&epoch->epoch_size); | 1005 | epoch_size = atomic_read(&epoch->epoch_size); |
1041 | 1006 | ||
@@ -1045,16 +1010,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, | |||
1045 | break; | 1010 | break; |
1046 | case EV_GOT_BARRIER_NR: | 1011 | case EV_GOT_BARRIER_NR: |
1047 | set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags); | 1012 | set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags); |
1048 | |||
1049 | /* Special case: If we just switched from WO_bio_barrier to | ||
1050 | WO_bdev_flush we should not finish the current epoch */ | ||
1051 | if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 && | ||
1052 | mdev->write_ordering != WO_bio_barrier && | ||
1053 | epoch == mdev->current_epoch) | ||
1054 | clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags); | ||
1055 | break; | ||
1056 | case EV_BARRIER_DONE: | ||
1057 | set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags); | ||
1058 | break; | 1013 | break; |
1059 | case EV_BECAME_LAST: | 1014 | case EV_BECAME_LAST: |
1060 | /* nothing to do*/ | 1015 | /* nothing to do*/ |
@@ -1063,23 +1018,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, | |||
1063 | 1018 | ||
1064 | if (epoch_size != 0 && | 1019 | if (epoch_size != 0 && |
1065 | atomic_read(&epoch->active) == 0 && | 1020 | atomic_read(&epoch->active) == 0 && |
1066 | test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) && | 1021 | test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) { |
1067 | epoch->list.prev == &mdev->current_epoch->list && | ||
1068 | !test_bit(DE_IS_FINISHING, &epoch->flags)) { | ||
1069 | /* Nearly all conditions are met to finish that epoch... */ | ||
1070 | if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) || | ||
1071 | mdev->write_ordering == WO_none || | ||
1072 | (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) || | ||
1073 | ev & EV_CLEANUP) { | ||
1074 | finish = 1; | ||
1075 | set_bit(DE_IS_FINISHING, &epoch->flags); | ||
1076 | } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) && | ||
1077 | mdev->write_ordering == WO_bio_barrier) { | ||
1078 | atomic_inc(&epoch->active); | ||
1079 | schedule_flush = 1; | ||
1080 | } | ||
1081 | } | ||
1082 | if (finish) { | ||
1083 | if (!(ev & EV_CLEANUP)) { | 1022 | if (!(ev & EV_CLEANUP)) { |
1084 | spin_unlock(&mdev->epoch_lock); | 1023 | spin_unlock(&mdev->epoch_lock); |
1085 | drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size); | 1024 | drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size); |
@@ -1102,6 +1041,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, | |||
1102 | /* atomic_set(&epoch->active, 0); is already zero */ | 1041 | /* atomic_set(&epoch->active, 0); is already zero */ |
1103 | if (rv == FE_STILL_LIVE) | 1042 | if (rv == FE_STILL_LIVE) |
1104 | rv = FE_RECYCLED; | 1043 | rv = FE_RECYCLED; |
1044 | wake_up(&mdev->ee_wait); | ||
1105 | } | 1045 | } |
1106 | } | 1046 | } |
1107 | 1047 | ||
@@ -1113,22 +1053,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, | |||
1113 | 1053 | ||
1114 | spin_unlock(&mdev->epoch_lock); | 1054 | spin_unlock(&mdev->epoch_lock); |
1115 | 1055 | ||
1116 | if (schedule_flush) { | ||
1117 | struct flush_work *fw; | ||
1118 | fw = kmalloc(sizeof(*fw), GFP_ATOMIC); | ||
1119 | if (fw) { | ||
1120 | fw->w.cb = w_flush; | ||
1121 | fw->epoch = epoch; | ||
1122 | drbd_queue_work(&mdev->data.work, &fw->w); | ||
1123 | } else { | ||
1124 | dev_warn(DEV, "Could not kmalloc a flush_work obj\n"); | ||
1125 | set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); | ||
1126 | /* That is not a recursion, only one level */ | ||
1127 | drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE); | ||
1128 | drbd_may_finish_epoch(mdev, epoch, EV_PUT); | ||
1129 | } | ||
1130 | } | ||
1131 | |||
1132 | return rv; | 1056 | return rv; |
1133 | } | 1057 | } |
1134 | 1058 | ||
@@ -1144,19 +1068,16 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) | |||
1144 | [WO_none] = "none", | 1068 | [WO_none] = "none", |
1145 | [WO_drain_io] = "drain", | 1069 | [WO_drain_io] = "drain", |
1146 | [WO_bdev_flush] = "flush", | 1070 | [WO_bdev_flush] = "flush", |
1147 | [WO_bio_barrier] = "barrier", | ||
1148 | }; | 1071 | }; |
1149 | 1072 | ||
1150 | pwo = mdev->write_ordering; | 1073 | pwo = mdev->write_ordering; |
1151 | wo = min(pwo, wo); | 1074 | wo = min(pwo, wo); |
1152 | if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier) | ||
1153 | wo = WO_bdev_flush; | ||
1154 | if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush) | 1075 | if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush) |
1155 | wo = WO_drain_io; | 1076 | wo = WO_drain_io; |
1156 | if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain) | 1077 | if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain) |
1157 | wo = WO_none; | 1078 | wo = WO_none; |
1158 | mdev->write_ordering = wo; | 1079 | mdev->write_ordering = wo; |
1159 | if (pwo != mdev->write_ordering || wo == WO_bio_barrier) | 1080 | if (pwo != mdev->write_ordering || wo == WO_bdev_flush) |
1160 | dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]); | 1081 | dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]); |
1161 | } | 1082 | } |
1162 | 1083 | ||
@@ -1192,7 +1113,7 @@ next_bio: | |||
1192 | bio->bi_sector = sector; | 1113 | bio->bi_sector = sector; |
1193 | bio->bi_bdev = mdev->ldev->backing_bdev; | 1114 | bio->bi_bdev = mdev->ldev->backing_bdev; |
1194 | /* we special case some flags in the multi-bio case, see below | 1115 | /* we special case some flags in the multi-bio case, see below |
1195 | * (REQ_UNPLUG, REQ_HARDBARRIER) */ | 1116 | * (REQ_UNPLUG) */ |
1196 | bio->bi_rw = rw; | 1117 | bio->bi_rw = rw; |
1197 | bio->bi_private = e; | 1118 | bio->bi_private = e; |
1198 | bio->bi_end_io = drbd_endio_sec; | 1119 | bio->bi_end_io = drbd_endio_sec; |
@@ -1226,11 +1147,6 @@ next_bio: | |||
1226 | bio->bi_rw &= ~REQ_UNPLUG; | 1147 | bio->bi_rw &= ~REQ_UNPLUG; |
1227 | 1148 | ||
1228 | drbd_generic_make_request(mdev, fault_type, bio); | 1149 | drbd_generic_make_request(mdev, fault_type, bio); |
1229 | |||
1230 | /* strip off REQ_HARDBARRIER, | ||
1231 | * unless it is the first or last bio */ | ||
1232 | if (bios && bios->bi_next) | ||
1233 | bios->bi_rw &= ~REQ_HARDBARRIER; | ||
1234 | } while (bios); | 1150 | } while (bios); |
1235 | maybe_kick_lo(mdev); | 1151 | maybe_kick_lo(mdev); |
1236 | return 0; | 1152 | return 0; |
@@ -1244,45 +1160,9 @@ fail: | |||
1244 | return -ENOMEM; | 1160 | return -ENOMEM; |
1245 | } | 1161 | } |
1246 | 1162 | ||
1247 | /** | ||
1248 | * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set | ||
1249 | * @mdev: DRBD device. | ||
1250 | * @w: work object. | ||
1251 | * @cancel: The connection will be closed anyways (unused in this callback) | ||
1252 | */ | ||
1253 | int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local) | ||
1254 | { | ||
1255 | struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; | ||
1256 | /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place, | ||
1257 | (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch) | ||
1258 | so that we can finish that epoch in drbd_may_finish_epoch(). | ||
1259 | That is necessary if we already have a long chain of Epochs, before | ||
1260 | we realize that REQ_HARDBARRIER is actually not supported */ | ||
1261 | |||
1262 | /* As long as the -ENOTSUPP on the barrier is reported immediately | ||
1263 | that will never trigger. If it is reported late, we will just | ||
1264 | print that warning and continue correctly for all future requests | ||
1265 | with WO_bdev_flush */ | ||
1266 | if (previous_epoch(mdev, e->epoch)) | ||
1267 | dev_warn(DEV, "Write ordering was not enforced (one time event)\n"); | ||
1268 | |||
1269 | /* we still have a local reference, | ||
1270 | * get_ldev was done in receive_Data. */ | ||
1271 | |||
1272 | e->w.cb = e_end_block; | ||
1273 | if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) { | ||
1274 | /* drbd_submit_ee fails for one reason only: | ||
1275 | * if was not able to allocate sufficient bios. | ||
1276 | * requeue, try again later. */ | ||
1277 | e->w.cb = w_e_reissue; | ||
1278 | drbd_queue_work(&mdev->data.work, &e->w); | ||
1279 | } | ||
1280 | return 1; | ||
1281 | } | ||
1282 | |||
1283 | static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | 1163 | static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) |
1284 | { | 1164 | { |
1285 | int rv, issue_flush; | 1165 | int rv; |
1286 | struct p_barrier *p = &mdev->data.rbuf.barrier; | 1166 | struct p_barrier *p = &mdev->data.rbuf.barrier; |
1287 | struct drbd_epoch *epoch; | 1167 | struct drbd_epoch *epoch; |
1288 | 1168 | ||
@@ -1300,44 +1180,40 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign | |||
1300 | * Therefore we must send the barrier_ack after the barrier request was | 1180 | * Therefore we must send the barrier_ack after the barrier request was |
1301 | * completed. */ | 1181 | * completed. */ |
1302 | switch (mdev->write_ordering) { | 1182 | switch (mdev->write_ordering) { |
1303 | case WO_bio_barrier: | ||
1304 | case WO_none: | 1183 | case WO_none: |
1305 | if (rv == FE_RECYCLED) | 1184 | if (rv == FE_RECYCLED) |
1306 | return TRUE; | 1185 | return TRUE; |
1307 | break; | 1186 | |
1187 | /* receiver context, in the writeout path of the other node. | ||
1188 | * avoid potential distributed deadlock */ | ||
1189 | epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); | ||
1190 | if (epoch) | ||
1191 | break; | ||
1192 | else | ||
1193 | dev_warn(DEV, "Allocation of an epoch failed, slowing down\n"); | ||
1194 | /* Fall through */ | ||
1308 | 1195 | ||
1309 | case WO_bdev_flush: | 1196 | case WO_bdev_flush: |
1310 | case WO_drain_io: | 1197 | case WO_drain_io: |
1311 | if (rv == FE_STILL_LIVE) { | ||
1312 | set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags); | ||
1313 | drbd_wait_ee_list_empty(mdev, &mdev->active_ee); | ||
1314 | rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); | ||
1315 | } | ||
1316 | if (rv == FE_RECYCLED) | ||
1317 | return TRUE; | ||
1318 | |||
1319 | /* The asender will send all the ACKs and barrier ACKs out, since | ||
1320 | all EEs moved from the active_ee to the done_ee. We need to | ||
1321 | provide a new epoch object for the EEs that come in soon */ | ||
1322 | break; | ||
1323 | } | ||
1324 | |||
1325 | /* receiver context, in the writeout path of the other node. | ||
1326 | * avoid potential distributed deadlock */ | ||
1327 | epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); | ||
1328 | if (!epoch) { | ||
1329 | dev_warn(DEV, "Allocation of an epoch failed, slowing down\n"); | ||
1330 | issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags); | ||
1331 | drbd_wait_ee_list_empty(mdev, &mdev->active_ee); | 1198 | drbd_wait_ee_list_empty(mdev, &mdev->active_ee); |
1332 | if (issue_flush) { | 1199 | drbd_flush(mdev); |
1333 | rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); | 1200 | |
1334 | if (rv == FE_RECYCLED) | 1201 | if (atomic_read(&mdev->current_epoch->epoch_size)) { |
1335 | return TRUE; | 1202 | epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); |
1203 | if (epoch) | ||
1204 | break; | ||
1336 | } | 1205 | } |
1337 | 1206 | ||
1338 | drbd_wait_ee_list_empty(mdev, &mdev->done_ee); | 1207 | epoch = mdev->current_epoch; |
1208 | wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0); | ||
1209 | |||
1210 | D_ASSERT(atomic_read(&epoch->active) == 0); | ||
1211 | D_ASSERT(epoch->flags == 0); | ||
1339 | 1212 | ||
1340 | return TRUE; | 1213 | return TRUE; |
1214 | default: | ||
1215 | dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering); | ||
1216 | return FALSE; | ||
1341 | } | 1217 | } |
1342 | 1218 | ||
1343 | epoch->flags = 0; | 1219 | epoch->flags = 0; |
@@ -1652,15 +1528,8 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
1652 | { | 1528 | { |
1653 | struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; | 1529 | struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; |
1654 | sector_t sector = e->sector; | 1530 | sector_t sector = e->sector; |
1655 | struct drbd_epoch *epoch; | ||
1656 | int ok = 1, pcmd; | 1531 | int ok = 1, pcmd; |
1657 | 1532 | ||
1658 | if (e->flags & EE_IS_BARRIER) { | ||
1659 | epoch = previous_epoch(mdev, e->epoch); | ||
1660 | if (epoch) | ||
1661 | drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0)); | ||
1662 | } | ||
1663 | |||
1664 | if (mdev->net_conf->wire_protocol == DRBD_PROT_C) { | 1533 | if (mdev->net_conf->wire_protocol == DRBD_PROT_C) { |
1665 | if (likely((e->flags & EE_WAS_ERROR) == 0)) { | 1534 | if (likely((e->flags & EE_WAS_ERROR) == 0)) { |
1666 | pcmd = (mdev->state.conn >= C_SYNC_SOURCE && | 1535 | pcmd = (mdev->state.conn >= C_SYNC_SOURCE && |
@@ -1817,27 +1686,6 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
1817 | e->epoch = mdev->current_epoch; | 1686 | e->epoch = mdev->current_epoch; |
1818 | atomic_inc(&e->epoch->epoch_size); | 1687 | atomic_inc(&e->epoch->epoch_size); |
1819 | atomic_inc(&e->epoch->active); | 1688 | atomic_inc(&e->epoch->active); |
1820 | |||
1821 | if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) { | ||
1822 | struct drbd_epoch *epoch; | ||
1823 | /* Issue a barrier if we start a new epoch, and the previous epoch | ||
1824 | was not a epoch containing a single request which already was | ||
1825 | a Barrier. */ | ||
1826 | epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list); | ||
1827 | if (epoch == e->epoch) { | ||
1828 | set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); | ||
1829 | rw |= REQ_HARDBARRIER; | ||
1830 | e->flags |= EE_IS_BARRIER; | ||
1831 | } else { | ||
1832 | if (atomic_read(&epoch->epoch_size) > 1 || | ||
1833 | !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) { | ||
1834 | set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); | ||
1835 | set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); | ||
1836 | rw |= REQ_HARDBARRIER; | ||
1837 | e->flags |= EE_IS_BARRIER; | ||
1838 | } | ||
1839 | } | ||
1840 | } | ||
1841 | spin_unlock(&mdev->epoch_lock); | 1689 | spin_unlock(&mdev->epoch_lock); |
1842 | 1690 | ||
1843 | dp_flags = be32_to_cpu(p->dp_flags); | 1691 | dp_flags = be32_to_cpu(p->dp_flags); |
@@ -1995,10 +1843,11 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
1995 | break; | 1843 | break; |
1996 | } | 1844 | } |
1997 | 1845 | ||
1998 | if (mdev->state.pdsk == D_DISKLESS) { | 1846 | if (mdev->state.pdsk < D_INCONSISTENT) { |
1999 | /* In case we have the only disk of the cluster, */ | 1847 | /* In case we have the only disk of the cluster, */ |
2000 | drbd_set_out_of_sync(mdev, e->sector, e->size); | 1848 | drbd_set_out_of_sync(mdev, e->sector, e->size); |
2001 | e->flags |= EE_CALL_AL_COMPLETE_IO; | 1849 | e->flags |= EE_CALL_AL_COMPLETE_IO; |
1850 | e->flags &= ~EE_MAY_SET_IN_SYNC; | ||
2002 | drbd_al_begin_io(mdev, e->sector); | 1851 | drbd_al_begin_io(mdev, e->sector); |
2003 | } | 1852 | } |
2004 | 1853 | ||
@@ -3362,7 +3211,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3362 | if (ns.conn == C_MASK) { | 3211 | if (ns.conn == C_MASK) { |
3363 | ns.conn = C_CONNECTED; | 3212 | ns.conn = C_CONNECTED; |
3364 | if (mdev->state.disk == D_NEGOTIATING) { | 3213 | if (mdev->state.disk == D_NEGOTIATING) { |
3365 | drbd_force_state(mdev, NS(disk, D_DISKLESS)); | 3214 | drbd_force_state(mdev, NS(disk, D_FAILED)); |
3366 | } else if (peer_state.disk == D_NEGOTIATING) { | 3215 | } else if (peer_state.disk == D_NEGOTIATING) { |
3367 | dev_err(DEV, "Disk attach process on the peer node was aborted.\n"); | 3216 | dev_err(DEV, "Disk attach process on the peer node was aborted.\n"); |
3368 | peer_state.disk = D_DISKLESS; | 3217 | peer_state.disk = D_DISKLESS; |
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 9e91a2545fc8..11a75d32a2e2 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -258,7 +258,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m) | |||
258 | if (!hlist_unhashed(&req->colision)) | 258 | if (!hlist_unhashed(&req->colision)) |
259 | hlist_del(&req->colision); | 259 | hlist_del(&req->colision); |
260 | else | 260 | else |
261 | D_ASSERT((s & RQ_NET_MASK) == 0); | 261 | D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0); |
262 | 262 | ||
263 | /* for writes we need to do some extra housekeeping */ | 263 | /* for writes we need to do some extra housekeeping */ |
264 | if (rw == WRITE) | 264 | if (rw == WRITE) |
@@ -813,7 +813,8 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) | |||
813 | mdev->state.conn >= C_CONNECTED)); | 813 | mdev->state.conn >= C_CONNECTED)); |
814 | 814 | ||
815 | if (!(local || remote) && !is_susp(mdev->state)) { | 815 | if (!(local || remote) && !is_susp(mdev->state)) { |
816 | dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); | 816 | if (__ratelimit(&drbd_ratelimit_state)) |
817 | dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); | ||
817 | goto fail_free_complete; | 818 | goto fail_free_complete; |
818 | } | 819 | } |
819 | 820 | ||
@@ -942,12 +943,21 @@ allocate_barrier: | |||
942 | if (local) { | 943 | if (local) { |
943 | req->private_bio->bi_bdev = mdev->ldev->backing_bdev; | 944 | req->private_bio->bi_bdev = mdev->ldev->backing_bdev; |
944 | 945 | ||
945 | if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR | 946 | /* State may have changed since we grabbed our reference on the |
946 | : rw == READ ? DRBD_FAULT_DT_RD | 947 | * mdev->ldev member. Double check, and short-circuit to endio. |
947 | : DRBD_FAULT_DT_RA)) | 948 | * In case the last activity log transaction failed to get on |
949 | * stable storage, and this is a WRITE, we may not even submit | ||
950 | * this bio. */ | ||
951 | if (get_ldev(mdev)) { | ||
952 | if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR | ||
953 | : rw == READ ? DRBD_FAULT_DT_RD | ||
954 | : DRBD_FAULT_DT_RA)) | ||
955 | bio_endio(req->private_bio, -EIO); | ||
956 | else | ||
957 | generic_make_request(req->private_bio); | ||
958 | put_ldev(mdev); | ||
959 | } else | ||
948 | bio_endio(req->private_bio, -EIO); | 960 | bio_endio(req->private_bio, -EIO); |
949 | else | ||
950 | generic_make_request(req->private_bio); | ||
951 | } | 961 | } |
952 | 962 | ||
953 | /* we need to plug ALWAYS since we possibly need to kick lo_dev. | 963 | /* we need to plug ALWAYS since we possibly need to kick lo_dev. |
@@ -1022,20 +1032,6 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) | |||
1022 | return 0; | 1032 | return 0; |
1023 | } | 1033 | } |
1024 | 1034 | ||
1025 | /* Reject barrier requests if we know the underlying device does | ||
1026 | * not support them. | ||
1027 | * XXX: Need to get this info from peer as well some how so we | ||
1028 | * XXX: reject if EITHER side/data/metadata area does not support them. | ||
1029 | * | ||
1030 | * because of those XXX, this is not yet enabled, | ||
1031 | * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit. | ||
1032 | */ | ||
1033 | if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) { | ||
1034 | /* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */ | ||
1035 | bio_endio(bio, -EOPNOTSUPP); | ||
1036 | return 0; | ||
1037 | } | ||
1038 | |||
1039 | /* | 1035 | /* |
1040 | * what we "blindly" assume: | 1036 | * what we "blindly" assume: |
1041 | */ | 1037 | */ |
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 108d58015cd1..b0551ba7ad0c 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c | |||
@@ -102,12 +102,6 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local) | |||
102 | put_ldev(mdev); | 102 | put_ldev(mdev); |
103 | } | 103 | } |
104 | 104 | ||
105 | static int is_failed_barrier(int ee_flags) | ||
106 | { | ||
107 | return (ee_flags & (EE_IS_BARRIER|EE_WAS_ERROR|EE_RESUBMITTED)) | ||
108 | == (EE_IS_BARRIER|EE_WAS_ERROR); | ||
109 | } | ||
110 | |||
111 | /* writes on behalf of the partner, or resync writes, | 105 | /* writes on behalf of the partner, or resync writes, |
112 | * "submitted" by the receiver, final stage. */ | 106 | * "submitted" by the receiver, final stage. */ |
113 | static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local) | 107 | static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local) |
@@ -119,21 +113,6 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo | |||
119 | int is_syncer_req; | 113 | int is_syncer_req; |
120 | int do_al_complete_io; | 114 | int do_al_complete_io; |
121 | 115 | ||
122 | /* if this is a failed barrier request, disable use of barriers, | ||
123 | * and schedule for resubmission */ | ||
124 | if (is_failed_barrier(e->flags)) { | ||
125 | drbd_bump_write_ordering(mdev, WO_bdev_flush); | ||
126 | spin_lock_irqsave(&mdev->req_lock, flags); | ||
127 | list_del(&e->w.list); | ||
128 | e->flags = (e->flags & ~EE_WAS_ERROR) | EE_RESUBMITTED; | ||
129 | e->w.cb = w_e_reissue; | ||
130 | /* put_ldev actually happens below, once we come here again. */ | ||
131 | __release(local); | ||
132 | spin_unlock_irqrestore(&mdev->req_lock, flags); | ||
133 | drbd_queue_work(&mdev->data.work, &e->w); | ||
134 | return; | ||
135 | } | ||
136 | |||
137 | D_ASSERT(e->block_id != ID_VACANT); | 116 | D_ASSERT(e->block_id != ID_VACANT); |
138 | 117 | ||
139 | /* after we moved e to done_ee, | 118 | /* after we moved e to done_ee, |
@@ -925,7 +904,7 @@ out: | |||
925 | drbd_md_sync(mdev); | 904 | drbd_md_sync(mdev); |
926 | 905 | ||
927 | if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) { | 906 | if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) { |
928 | dev_warn(DEV, "Writing the whole bitmap, due to failed kmalloc\n"); | 907 | dev_info(DEV, "Writing the whole bitmap\n"); |
929 | drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); | 908 | drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); |
930 | } | 909 | } |
931 | 910 | ||
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 1e5284ef65fa..7ea0bea2f7e3 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -481,12 +481,6 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) | |||
481 | if (bio_rw(bio) == WRITE) { | 481 | if (bio_rw(bio) == WRITE) { |
482 | struct file *file = lo->lo_backing_file; | 482 | struct file *file = lo->lo_backing_file; |
483 | 483 | ||
484 | /* REQ_HARDBARRIER is deprecated */ | ||
485 | if (bio->bi_rw & REQ_HARDBARRIER) { | ||
486 | ret = -EOPNOTSUPP; | ||
487 | goto out; | ||
488 | } | ||
489 | |||
490 | if (bio->bi_rw & REQ_FLUSH) { | 484 | if (bio->bi_rw & REQ_FLUSH) { |
491 | ret = vfs_fsync(file, 0); | 485 | ret = vfs_fsync(file, 0); |
492 | if (unlikely(ret && ret != -EINVAL)) { | 486 | if (unlikely(ret && ret != -EINVAL)) { |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 06e2812ba124..255035cfc88a 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -289,8 +289,6 @@ static int blkif_queue_request(struct request *req) | |||
289 | 289 | ||
290 | ring_req->operation = rq_data_dir(req) ? | 290 | ring_req->operation = rq_data_dir(req) ? |
291 | BLKIF_OP_WRITE : BLKIF_OP_READ; | 291 | BLKIF_OP_WRITE : BLKIF_OP_READ; |
292 | if (req->cmd_flags & REQ_HARDBARRIER) | ||
293 | ring_req->operation = BLKIF_OP_WRITE_BARRIER; | ||
294 | 292 | ||
295 | ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); | 293 | ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); |
296 | BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); | 294 | BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index d120a5c1c093..ab3894f742c3 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -68,6 +68,9 @@ static struct usb_device_id btusb_table[] = { | |||
68 | /* Apple MacBookPro6,2 */ | 68 | /* Apple MacBookPro6,2 */ |
69 | { USB_DEVICE(0x05ac, 0x8218) }, | 69 | { USB_DEVICE(0x05ac, 0x8218) }, |
70 | 70 | ||
71 | /* Apple MacBookAir3,1, MacBookAir3,2 */ | ||
72 | { USB_DEVICE(0x05ac, 0x821b) }, | ||
73 | |||
71 | /* AVM BlueFRITZ! USB v2.0 */ | 74 | /* AVM BlueFRITZ! USB v2.0 */ |
72 | { USB_DEVICE(0x057c, 0x3800) }, | 75 | { USB_DEVICE(0x057c, 0x3800) }, |
73 | 76 | ||
@@ -1029,6 +1032,8 @@ static int btusb_probe(struct usb_interface *intf, | |||
1029 | 1032 | ||
1030 | usb_set_intfdata(intf, data); | 1033 | usb_set_intfdata(intf, data); |
1031 | 1034 | ||
1035 | usb_enable_autosuspend(interface_to_usbdev(intf)); | ||
1036 | |||
1032 | return 0; | 1037 | return 0; |
1033 | } | 1038 | } |
1034 | 1039 | ||
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 6b6760ea2435..9272c38dd3c6 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c | |||
@@ -1210,14 +1210,14 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry, | |||
1210 | unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; | 1210 | unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; |
1211 | u32 pte_flags; | 1211 | u32 pte_flags; |
1212 | 1212 | ||
1213 | if (type_mask == AGP_USER_UNCACHED_MEMORY) | 1213 | if (type_mask == AGP_USER_MEMORY) |
1214 | pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; | 1214 | pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; |
1215 | else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { | 1215 | else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { |
1216 | pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; | 1216 | pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; |
1217 | if (gfdt) | 1217 | if (gfdt) |
1218 | pte_flags |= GEN6_PTE_GFDT; | 1218 | pte_flags |= GEN6_PTE_GFDT; |
1219 | } else { /* set 'normal'/'cached' to LLC by default */ | 1219 | } else { /* set 'normal'/'cached' to LLC by default */ |
1220 | pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; | 1220 | pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; |
1221 | if (gfdt) | 1221 | if (gfdt) |
1222 | pte_flags |= GEN6_PTE_GFDT; | 1222 | pte_flags |= GEN6_PTE_GFDT; |
1223 | } | 1223 | } |
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c index b0a70461a12c..c0bd6f472c52 100644 --- a/drivers/char/amiserial.c +++ b/drivers/char/amiserial.c | |||
@@ -1299,7 +1299,6 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file, | |||
1299 | { | 1299 | { |
1300 | struct async_struct * info = tty->driver_data; | 1300 | struct async_struct * info = tty->driver_data; |
1301 | struct async_icount cprev, cnow; /* kernel counter temps */ | 1301 | struct async_icount cprev, cnow; /* kernel counter temps */ |
1302 | struct serial_icounter_struct icount; | ||
1303 | void __user *argp = (void __user *)arg; | 1302 | void __user *argp = (void __user *)arg; |
1304 | unsigned long flags; | 1303 | unsigned long flags; |
1305 | 1304 | ||
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c index 3bc0eef88717..d72433f2d310 100644 --- a/drivers/char/i8k.c +++ b/drivers/char/i8k.c | |||
@@ -120,7 +120,7 @@ static int i8k_smm(struct smm_regs *regs) | |||
120 | int eax = regs->eax; | 120 | int eax = regs->eax; |
121 | 121 | ||
122 | #if defined(CONFIG_X86_64) | 122 | #if defined(CONFIG_X86_64) |
123 | asm("pushq %%rax\n\t" | 123 | asm volatile("pushq %%rax\n\t" |
124 | "movl 0(%%rax),%%edx\n\t" | 124 | "movl 0(%%rax),%%edx\n\t" |
125 | "pushq %%rdx\n\t" | 125 | "pushq %%rdx\n\t" |
126 | "movl 4(%%rax),%%ebx\n\t" | 126 | "movl 4(%%rax),%%ebx\n\t" |
@@ -146,7 +146,7 @@ static int i8k_smm(struct smm_regs *regs) | |||
146 | : "a"(regs) | 146 | : "a"(regs) |
147 | : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); | 147 | : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); |
148 | #else | 148 | #else |
149 | asm("pushl %%eax\n\t" | 149 | asm volatile("pushl %%eax\n\t" |
150 | "movl 0(%%eax),%%edx\n\t" | 150 | "movl 0(%%eax),%%edx\n\t" |
151 | "push %%edx\n\t" | 151 | "push %%edx\n\t" |
152 | "movl 4(%%eax),%%ebx\n\t" | 152 | "movl 4(%%eax),%%ebx\n\t" |
@@ -167,7 +167,8 @@ static int i8k_smm(struct smm_regs *regs) | |||
167 | "movl %%edx,0(%%eax)\n\t" | 167 | "movl %%edx,0(%%eax)\n\t" |
168 | "lahf\n\t" | 168 | "lahf\n\t" |
169 | "shrl $8,%%eax\n\t" | 169 | "shrl $8,%%eax\n\t" |
170 | "andl $1,%%eax\n":"=a"(rc) | 170 | "andl $1,%%eax\n" |
171 | :"=a"(rc) | ||
171 | : "a"(regs) | 172 | : "a"(regs) |
172 | : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); | 173 | : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); |
173 | #endif | 174 | #endif |
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c index dd3f9b1f11b4..294d03e8c61a 100644 --- a/drivers/char/nozomi.c +++ b/drivers/char/nozomi.c | |||
@@ -1828,7 +1828,6 @@ static int ntty_ioctl(struct tty_struct *tty, struct file *file, | |||
1828 | unsigned int cmd, unsigned long arg) | 1828 | unsigned int cmd, unsigned long arg) |
1829 | { | 1829 | { |
1830 | struct port *port = tty->driver_data; | 1830 | struct port *port = tty->driver_data; |
1831 | void __user *argp = (void __user *)arg; | ||
1832 | int rval = -ENOIOCTLCMD; | 1831 | int rval = -ENOIOCTLCMD; |
1833 | 1832 | ||
1834 | DBG1("******** IOCTL, cmd: %d", cmd); | 1833 | DBG1("******** IOCTL, cmd: %d", cmd); |
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index d962f25dcc2a..777181a2e603 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c | |||
@@ -979,8 +979,9 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count, | |||
979 | if (dev->flags0 & 1) { | 979 | if (dev->flags0 & 1) { |
980 | set_bit(IS_CMM_ABSENT, &dev->flags); | 980 | set_bit(IS_CMM_ABSENT, &dev->flags); |
981 | rc = -ENODEV; | 981 | rc = -ENODEV; |
982 | } else { | ||
983 | rc = -EIO; | ||
982 | } | 984 | } |
983 | rc = -EIO; | ||
984 | goto release_io; | 985 | goto release_io; |
985 | } | 986 | } |
986 | 987 | ||
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index bfc10f89d951..eaa41992fbe2 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c | |||
@@ -2796,6 +2796,7 @@ static const struct tty_operations mgslpc_ops = { | |||
2796 | .hangup = mgslpc_hangup, | 2796 | .hangup = mgslpc_hangup, |
2797 | .tiocmget = tiocmget, | 2797 | .tiocmget = tiocmget, |
2798 | .tiocmset = tiocmset, | 2798 | .tiocmset = tiocmset, |
2799 | .get_icount = mgslpc_get_icount, | ||
2799 | .proc_fops = &mgslpc_proc_fops, | 2800 | .proc_fops = &mgslpc_proc_fops, |
2800 | }; | 2801 | }; |
2801 | 2802 | ||
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 88ee01510ec0..76141262ea1d 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c | |||
@@ -1832,7 +1832,7 @@ static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node, | |||
1832 | return -ENODEV; | 1832 | return -ENODEV; |
1833 | 1833 | ||
1834 | ino = mdesc_get_property(mdesc, node, "ino", &ino_len); | 1834 | ino = mdesc_get_property(mdesc, node, "ino", &ino_len); |
1835 | if (!intr) | 1835 | if (!ino) |
1836 | return -ENODEV; | 1836 | return -ENODEV; |
1837 | 1837 | ||
1838 | if (intr_len != ino_len) | 1838 | if (intr_len != ino_len) |
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 2e992bc8015b..8a515baa38f7 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c | |||
@@ -286,7 +286,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, | |||
286 | if (initial) | 286 | if (initial) |
287 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ | 287 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ |
288 | : "+S" (input), "+D" (output), "+a" (iv) | 288 | : "+S" (input), "+D" (output), "+a" (iv) |
289 | : "d" (control_word), "b" (key), "c" (count)); | 289 | : "d" (control_word), "b" (key), "c" (initial)); |
290 | 290 | ||
291 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ | 291 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ |
292 | : "+S" (input), "+D" (output), "+a" (iv) | 292 | : "+S" (input), "+D" (output), "+a" (iv) |
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index bfae4b309791..afa576a75a8e 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c | |||
@@ -1468,7 +1468,7 @@ static int sbp2_map_scatterlist(struct sbp2_command_orb *orb, | |||
1468 | 1468 | ||
1469 | /* SCSI stack integration */ | 1469 | /* SCSI stack integration */ |
1470 | 1470 | ||
1471 | static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | 1471 | static int sbp2_scsi_queuecommand_lck(struct scsi_cmnd *cmd, scsi_done_fn_t done) |
1472 | { | 1472 | { |
1473 | struct sbp2_logical_unit *lu = cmd->device->hostdata; | 1473 | struct sbp2_logical_unit *lu = cmd->device->hostdata; |
1474 | struct fw_device *device = target_device(lu->tgt); | 1474 | struct fw_device *device = target_device(lu->tgt); |
@@ -1534,6 +1534,8 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
1534 | return retval; | 1534 | return retval; |
1535 | } | 1535 | } |
1536 | 1536 | ||
1537 | static DEF_SCSI_QCMD(sbp2_scsi_queuecommand) | ||
1538 | |||
1537 | static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) | 1539 | static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) |
1538 | { | 1540 | { |
1539 | struct sbp2_logical_unit *lu = sdev->hostdata; | 1541 | struct sbp2_logical_unit *lu = sdev->hostdata; |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index dcbeb98f195a..f7af91cb273d 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder, | |||
276 | struct drm_crtc *tmp; | 276 | struct drm_crtc *tmp; |
277 | int crtc_mask = 1; | 277 | int crtc_mask = 1; |
278 | 278 | ||
279 | WARN(!crtc, "checking null crtc?"); | 279 | WARN(!crtc, "checking null crtc?\n"); |
280 | 280 | ||
281 | dev = crtc->dev; | 281 | dev = crtc->dev; |
282 | 282 | ||
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c1a26217a530..a245d17165ae 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -240,7 +240,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, | |||
240 | .addr = DDC_ADDR, | 240 | .addr = DDC_ADDR, |
241 | .flags = I2C_M_RD, | 241 | .flags = I2C_M_RD, |
242 | .len = len, | 242 | .len = len, |
243 | .buf = buf + start, | 243 | .buf = buf, |
244 | } | 244 | } |
245 | }; | 245 | }; |
246 | 246 | ||
@@ -253,7 +253,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, | |||
253 | static u8 * | 253 | static u8 * |
254 | drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) | 254 | drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) |
255 | { | 255 | { |
256 | int i, j = 0; | 256 | int i, j = 0, valid_extensions = 0; |
257 | u8 *block, *new; | 257 | u8 *block, *new; |
258 | 258 | ||
259 | if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) | 259 | if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) |
@@ -280,14 +280,28 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) | |||
280 | 280 | ||
281 | for (j = 1; j <= block[0x7e]; j++) { | 281 | for (j = 1; j <= block[0x7e]; j++) { |
282 | for (i = 0; i < 4; i++) { | 282 | for (i = 0; i < 4; i++) { |
283 | if (drm_do_probe_ddc_edid(adapter, block, j, | 283 | if (drm_do_probe_ddc_edid(adapter, |
284 | EDID_LENGTH)) | 284 | block + (valid_extensions + 1) * EDID_LENGTH, |
285 | j, EDID_LENGTH)) | ||
285 | goto out; | 286 | goto out; |
286 | if (drm_edid_block_valid(block + j * EDID_LENGTH)) | 287 | if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) { |
288 | valid_extensions++; | ||
287 | break; | 289 | break; |
290 | } | ||
288 | } | 291 | } |
289 | if (i == 4) | 292 | if (i == 4) |
290 | goto carp; | 293 | dev_warn(connector->dev->dev, |
294 | "%s: Ignoring invalid EDID block %d.\n", | ||
295 | drm_get_connector_name(connector), j); | ||
296 | } | ||
297 | |||
298 | if (valid_extensions != block[0x7e]) { | ||
299 | block[EDID_LENGTH-1] += block[0x7e] - valid_extensions; | ||
300 | block[0x7e] = valid_extensions; | ||
301 | new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); | ||
302 | if (!new) | ||
303 | goto out; | ||
304 | block = new; | ||
291 | } | 305 | } |
292 | 306 | ||
293 | return block; | 307 | return block; |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3467dd420760..80745f85902c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -44,7 +44,7 @@ unsigned int i915_fbpercrtc = 0; | |||
44 | module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); | 44 | module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); |
45 | 45 | ||
46 | unsigned int i915_powersave = 1; | 46 | unsigned int i915_powersave = 1; |
47 | module_param_named(powersave, i915_powersave, int, 0400); | 47 | module_param_named(powersave, i915_powersave, int, 0600); |
48 | 48 | ||
49 | unsigned int i915_lvds_downclock = 0; | 49 | unsigned int i915_lvds_downclock = 0; |
50 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); | 50 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2c2c19b6285e..90414ae86afc 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1321,6 +1321,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, | |||
1321 | 1321 | ||
1322 | #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) | 1322 | #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) |
1323 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) | 1323 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) |
1324 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) | ||
1324 | 1325 | ||
1325 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 1326 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
1326 | 1327 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8eb8453208b5..ef188e391406 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2172,7 +2172,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2172 | static int i915_ring_idle(struct drm_device *dev, | 2172 | static int i915_ring_idle(struct drm_device *dev, |
2173 | struct intel_ring_buffer *ring) | 2173 | struct intel_ring_buffer *ring) |
2174 | { | 2174 | { |
2175 | if (list_empty(&ring->gpu_write_list)) | 2175 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) |
2176 | return 0; | 2176 | return 0; |
2177 | 2177 | ||
2178 | i915_gem_flush_ring(dev, NULL, ring, | 2178 | i915_gem_flush_ring(dev, NULL, ring, |
@@ -2190,9 +2190,7 @@ i915_gpu_idle(struct drm_device *dev) | |||
2190 | int ret; | 2190 | int ret; |
2191 | 2191 | ||
2192 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && | 2192 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && |
2193 | list_empty(&dev_priv->render_ring.active_list) && | 2193 | list_empty(&dev_priv->mm.active_list)); |
2194 | list_empty(&dev_priv->bsd_ring.active_list) && | ||
2195 | list_empty(&dev_priv->blt_ring.active_list)); | ||
2196 | if (lists_empty) | 2194 | if (lists_empty) |
2197 | return 0; | 2195 | return 0; |
2198 | 2196 | ||
@@ -3108,7 +3106,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | |||
3108 | * write domain | 3106 | * write domain |
3109 | */ | 3107 | */ |
3110 | if (obj->write_domain && | 3108 | if (obj->write_domain && |
3111 | obj->write_domain != obj->pending_read_domains) { | 3109 | (obj->write_domain != obj->pending_read_domains || |
3110 | obj_priv->ring != ring)) { | ||
3112 | flush_domains |= obj->write_domain; | 3111 | flush_domains |= obj->write_domain; |
3113 | invalidate_domains |= | 3112 | invalidate_domains |= |
3114 | obj->pending_read_domains & ~obj->write_domain; | 3113 | obj->pending_read_domains & ~obj->write_domain; |
@@ -3497,6 +3496,52 @@ i915_gem_execbuffer_pin(struct drm_device *dev, | |||
3497 | return 0; | 3496 | return 0; |
3498 | } | 3497 | } |
3499 | 3498 | ||
3499 | static int | ||
3500 | i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, | ||
3501 | struct drm_file *file, | ||
3502 | struct intel_ring_buffer *ring, | ||
3503 | struct drm_gem_object **objects, | ||
3504 | int count) | ||
3505 | { | ||
3506 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3507 | int ret, i; | ||
3508 | |||
3509 | /* Zero the global flush/invalidate flags. These | ||
3510 | * will be modified as new domains are computed | ||
3511 | * for each object | ||
3512 | */ | ||
3513 | dev->invalidate_domains = 0; | ||
3514 | dev->flush_domains = 0; | ||
3515 | dev_priv->mm.flush_rings = 0; | ||
3516 | for (i = 0; i < count; i++) | ||
3517 | i915_gem_object_set_to_gpu_domain(objects[i], ring); | ||
3518 | |||
3519 | if (dev->invalidate_domains | dev->flush_domains) { | ||
3520 | #if WATCH_EXEC | ||
3521 | DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", | ||
3522 | __func__, | ||
3523 | dev->invalidate_domains, | ||
3524 | dev->flush_domains); | ||
3525 | #endif | ||
3526 | i915_gem_flush(dev, file, | ||
3527 | dev->invalidate_domains, | ||
3528 | dev->flush_domains, | ||
3529 | dev_priv->mm.flush_rings); | ||
3530 | } | ||
3531 | |||
3532 | for (i = 0; i < count; i++) { | ||
3533 | struct drm_i915_gem_object *obj = to_intel_bo(objects[i]); | ||
3534 | /* XXX replace with semaphores */ | ||
3535 | if (obj->ring && ring != obj->ring) { | ||
3536 | ret = i915_gem_object_wait_rendering(&obj->base, true); | ||
3537 | if (ret) | ||
3538 | return ret; | ||
3539 | } | ||
3540 | } | ||
3541 | |||
3542 | return 0; | ||
3543 | } | ||
3544 | |||
3500 | /* Throttle our rendering by waiting until the ring has completed our requests | 3545 | /* Throttle our rendering by waiting until the ring has completed our requests |
3501 | * emitted over 20 msec ago. | 3546 | * emitted over 20 msec ago. |
3502 | * | 3547 | * |
@@ -3757,33 +3802,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3757 | goto err; | 3802 | goto err; |
3758 | } | 3803 | } |
3759 | 3804 | ||
3760 | /* Zero the global flush/invalidate flags. These | 3805 | ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring, |
3761 | * will be modified as new domains are computed | 3806 | object_list, args->buffer_count); |
3762 | * for each object | 3807 | if (ret) |
3763 | */ | 3808 | goto err; |
3764 | dev->invalidate_domains = 0; | ||
3765 | dev->flush_domains = 0; | ||
3766 | dev_priv->mm.flush_rings = 0; | ||
3767 | |||
3768 | for (i = 0; i < args->buffer_count; i++) { | ||
3769 | struct drm_gem_object *obj = object_list[i]; | ||
3770 | |||
3771 | /* Compute new gpu domains and update invalidate/flush */ | ||
3772 | i915_gem_object_set_to_gpu_domain(obj, ring); | ||
3773 | } | ||
3774 | |||
3775 | if (dev->invalidate_domains | dev->flush_domains) { | ||
3776 | #if WATCH_EXEC | ||
3777 | DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", | ||
3778 | __func__, | ||
3779 | dev->invalidate_domains, | ||
3780 | dev->flush_domains); | ||
3781 | #endif | ||
3782 | i915_gem_flush(dev, file, | ||
3783 | dev->invalidate_domains, | ||
3784 | dev->flush_domains, | ||
3785 | dev_priv->mm.flush_rings); | ||
3786 | } | ||
3787 | 3809 | ||
3788 | for (i = 0; i < args->buffer_count; i++) { | 3810 | for (i = 0; i < args->buffer_count; i++) { |
3789 | struct drm_gem_object *obj = object_list[i]; | 3811 | struct drm_gem_object *obj = object_list[i]; |
@@ -4043,8 +4065,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
4043 | alignment = i915_gem_get_gtt_alignment(obj); | 4065 | alignment = i915_gem_get_gtt_alignment(obj); |
4044 | if (obj_priv->gtt_offset & (alignment - 1)) { | 4066 | if (obj_priv->gtt_offset & (alignment - 1)) { |
4045 | WARN(obj_priv->pin_count, | 4067 | WARN(obj_priv->pin_count, |
4046 | "bo is already pinned with incorrect alignment:" | 4068 | "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n", |
4047 | " offset=%x, req.alignment=%x\n", | ||
4048 | obj_priv->gtt_offset, alignment); | 4069 | obj_priv->gtt_offset, alignment); |
4049 | ret = i915_gem_object_unbind(obj); | 4070 | ret = i915_gem_object_unbind(obj); |
4050 | if (ret) | 4071 | if (ret) |
@@ -4856,17 +4877,24 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
4856 | struct drm_file *file_priv) | 4877 | struct drm_file *file_priv) |
4857 | { | 4878 | { |
4858 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 4879 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
4859 | void *obj_addr; | 4880 | void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset; |
4860 | int ret; | 4881 | char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; |
4861 | char __user *user_data; | ||
4862 | 4882 | ||
4863 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 4883 | DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size); |
4864 | obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; | ||
4865 | 4884 | ||
4866 | DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size); | 4885 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { |
4867 | ret = copy_from_user(obj_addr, user_data, args->size); | 4886 | unsigned long unwritten; |
4868 | if (ret) | 4887 | |
4869 | return -EFAULT; | 4888 | /* The physical object once assigned is fixed for the lifetime |
4889 | * of the obj, so we can safely drop the lock and continue | ||
4890 | * to access vaddr. | ||
4891 | */ | ||
4892 | mutex_unlock(&dev->struct_mutex); | ||
4893 | unwritten = copy_from_user(vaddr, user_data, args->size); | ||
4894 | mutex_lock(&dev->struct_mutex); | ||
4895 | if (unwritten) | ||
4896 | return -EFAULT; | ||
4897 | } | ||
4870 | 4898 | ||
4871 | drm_agp_chipset_flush(dev); | 4899 | drm_agp_chipset_flush(dev); |
4872 | return 0; | 4900 | return 0; |
@@ -4900,9 +4928,7 @@ i915_gpu_is_active(struct drm_device *dev) | |||
4900 | int lists_empty; | 4928 | int lists_empty; |
4901 | 4929 | ||
4902 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && | 4930 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && |
4903 | list_empty(&dev_priv->render_ring.active_list) && | 4931 | list_empty(&dev_priv->mm.active_list); |
4904 | list_empty(&dev_priv->bsd_ring.active_list) && | ||
4905 | list_empty(&dev_priv->blt_ring.active_list); | ||
4906 | 4932 | ||
4907 | return !lists_empty; | 4933 | return !lists_empty; |
4908 | } | 4934 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 43a4013f53fa..d8ae7d1d0cc6 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -165,9 +165,7 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
165 | 165 | ||
166 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | 166 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && |
167 | list_empty(&dev_priv->mm.flushing_list) && | 167 | list_empty(&dev_priv->mm.flushing_list) && |
168 | list_empty(&dev_priv->render_ring.active_list) && | 168 | list_empty(&dev_priv->mm.active_list)); |
169 | list_empty(&dev_priv->bsd_ring.active_list) && | ||
170 | list_empty(&dev_priv->blt_ring.active_list)); | ||
171 | if (lists_empty) | 169 | if (lists_empty) |
172 | return -ENOSPC; | 170 | return -ENOSPC; |
173 | 171 | ||
@@ -184,9 +182,7 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
184 | 182 | ||
185 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | 183 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && |
186 | list_empty(&dev_priv->mm.flushing_list) && | 184 | list_empty(&dev_priv->mm.flushing_list) && |
187 | list_empty(&dev_priv->render_ring.active_list) && | 185 | list_empty(&dev_priv->mm.active_list)); |
188 | list_empty(&dev_priv->bsd_ring.active_list) && | ||
189 | list_empty(&dev_priv->blt_ring.active_list)); | ||
190 | BUG_ON(!lists_empty); | 186 | BUG_ON(!lists_empty); |
191 | 187 | ||
192 | return 0; | 188 | return 0; |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 989c19d2d959..454c064f8ef7 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev) | |||
862 | /* Clock gating state */ | 862 | /* Clock gating state */ |
863 | intel_init_clock_gating(dev); | 863 | intel_init_clock_gating(dev); |
864 | 864 | ||
865 | if (HAS_PCH_SPLIT(dev)) | 865 | if (HAS_PCH_SPLIT(dev)) { |
866 | ironlake_enable_drps(dev); | 866 | ironlake_enable_drps(dev); |
867 | intel_init_emon(dev); | ||
868 | } | ||
867 | 869 | ||
868 | /* Cache mode state */ | 870 | /* Cache mode state */ |
869 | I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); | 871 | I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 990f065374b2..48d8fd686ea9 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1681,6 +1681,37 @@ static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) | |||
1681 | udelay(500); | 1681 | udelay(500); |
1682 | } | 1682 | } |
1683 | 1683 | ||
1684 | static void intel_fdi_normal_train(struct drm_crtc *crtc) | ||
1685 | { | ||
1686 | struct drm_device *dev = crtc->dev; | ||
1687 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1688 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1689 | int pipe = intel_crtc->pipe; | ||
1690 | u32 reg, temp; | ||
1691 | |||
1692 | /* enable normal train */ | ||
1693 | reg = FDI_TX_CTL(pipe); | ||
1694 | temp = I915_READ(reg); | ||
1695 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1696 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; | ||
1697 | I915_WRITE(reg, temp); | ||
1698 | |||
1699 | reg = FDI_RX_CTL(pipe); | ||
1700 | temp = I915_READ(reg); | ||
1701 | if (HAS_PCH_CPT(dev)) { | ||
1702 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
1703 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | ||
1704 | } else { | ||
1705 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1706 | temp |= FDI_LINK_TRAIN_NONE; | ||
1707 | } | ||
1708 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | ||
1709 | |||
1710 | /* wait one idle pattern time */ | ||
1711 | POSTING_READ(reg); | ||
1712 | udelay(1000); | ||
1713 | } | ||
1714 | |||
1684 | /* The FDI link training functions for ILK/Ibexpeak. */ | 1715 | /* The FDI link training functions for ILK/Ibexpeak. */ |
1685 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) | 1716 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) |
1686 | { | 1717 | { |
@@ -1767,27 +1798,6 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1767 | 1798 | ||
1768 | DRM_DEBUG_KMS("FDI train done\n"); | 1799 | DRM_DEBUG_KMS("FDI train done\n"); |
1769 | 1800 | ||
1770 | /* enable normal train */ | ||
1771 | reg = FDI_TX_CTL(pipe); | ||
1772 | temp = I915_READ(reg); | ||
1773 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1774 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; | ||
1775 | I915_WRITE(reg, temp); | ||
1776 | |||
1777 | reg = FDI_RX_CTL(pipe); | ||
1778 | temp = I915_READ(reg); | ||
1779 | if (HAS_PCH_CPT(dev)) { | ||
1780 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
1781 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | ||
1782 | } else { | ||
1783 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1784 | temp |= FDI_LINK_TRAIN_NONE; | ||
1785 | } | ||
1786 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | ||
1787 | |||
1788 | /* wait one idle pattern time */ | ||
1789 | POSTING_READ(reg); | ||
1790 | udelay(1000); | ||
1791 | } | 1801 | } |
1792 | 1802 | ||
1793 | static const int const snb_b_fdi_train_param [] = { | 1803 | static const int const snb_b_fdi_train_param [] = { |
@@ -2090,6 +2100,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2090 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); | 2100 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); |
2091 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); | 2101 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); |
2092 | 2102 | ||
2103 | intel_fdi_normal_train(crtc); | ||
2104 | |||
2093 | /* For PCH DP, enable TRANS_DP_CTL */ | 2105 | /* For PCH DP, enable TRANS_DP_CTL */ |
2094 | if (HAS_PCH_CPT(dev) && | 2106 | if (HAS_PCH_CPT(dev) && |
2095 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | 2107 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
@@ -2200,9 +2212,10 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
2200 | udelay(100); | 2212 | udelay(100); |
2201 | 2213 | ||
2202 | /* Ironlake workaround, disable clock pointer after downing FDI */ | 2214 | /* Ironlake workaround, disable clock pointer after downing FDI */ |
2203 | I915_WRITE(FDI_RX_CHICKEN(pipe), | 2215 | if (HAS_PCH_IBX(dev)) |
2204 | I915_READ(FDI_RX_CHICKEN(pipe) & | 2216 | I915_WRITE(FDI_RX_CHICKEN(pipe), |
2205 | ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); | 2217 | I915_READ(FDI_RX_CHICKEN(pipe) & |
2218 | ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); | ||
2206 | 2219 | ||
2207 | /* still set train pattern 1 */ | 2220 | /* still set train pattern 1 */ |
2208 | reg = FDI_TX_CTL(pipe); | 2221 | reg = FDI_TX_CTL(pipe); |
@@ -5581,20 +5594,19 @@ void ironlake_enable_drps(struct drm_device *dev) | |||
5581 | fmin = (rgvmodectl & MEMMODE_FMIN_MASK); | 5594 | fmin = (rgvmodectl & MEMMODE_FMIN_MASK); |
5582 | fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> | 5595 | fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> |
5583 | MEMMODE_FSTART_SHIFT; | 5596 | MEMMODE_FSTART_SHIFT; |
5584 | fstart = fmax; | ||
5585 | 5597 | ||
5586 | vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> | 5598 | vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> |
5587 | PXVFREQ_PX_SHIFT; | 5599 | PXVFREQ_PX_SHIFT; |
5588 | 5600 | ||
5589 | dev_priv->fmax = fstart; /* IPS callback will increase this */ | 5601 | dev_priv->fmax = fmax; /* IPS callback will increase this */ |
5590 | dev_priv->fstart = fstart; | 5602 | dev_priv->fstart = fstart; |
5591 | 5603 | ||
5592 | dev_priv->max_delay = fmax; | 5604 | dev_priv->max_delay = fstart; |
5593 | dev_priv->min_delay = fmin; | 5605 | dev_priv->min_delay = fmin; |
5594 | dev_priv->cur_delay = fstart; | 5606 | dev_priv->cur_delay = fstart; |
5595 | 5607 | ||
5596 | DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, | 5608 | DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", |
5597 | fstart); | 5609 | fmax, fmin, fstart); |
5598 | 5610 | ||
5599 | I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); | 5611 | I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); |
5600 | 5612 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 891f4f1d63b1..c8e005553310 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1517,7 +1517,7 @@ g4x_dp_detect(struct intel_dp *intel_dp) | |||
1517 | status = connector_status_connected; | 1517 | status = connector_status_connected; |
1518 | } | 1518 | } |
1519 | 1519 | ||
1520 | return bit; | 1520 | return status; |
1521 | } | 1521 | } |
1522 | 1522 | ||
1523 | /** | 1523 | /** |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 9af9f86a8765..21551fe74541 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -296,6 +296,7 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | |||
296 | extern void intel_init_clock_gating(struct drm_device *dev); | 296 | extern void intel_init_clock_gating(struct drm_device *dev); |
297 | extern void ironlake_enable_drps(struct drm_device *dev); | 297 | extern void ironlake_enable_drps(struct drm_device *dev); |
298 | extern void ironlake_disable_drps(struct drm_device *dev); | 298 | extern void ironlake_disable_drps(struct drm_device *dev); |
299 | extern void intel_init_emon(struct drm_device *dev); | ||
299 | 300 | ||
300 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, | 301 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, |
301 | struct drm_gem_object *obj, | 302 | struct drm_gem_object *obj, |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index f1a649990ea9..4324a326f98e 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -481,11 +481,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector) | |||
481 | struct drm_device *dev = connector->dev; | 481 | struct drm_device *dev = connector->dev; |
482 | struct drm_display_mode *mode; | 482 | struct drm_display_mode *mode; |
483 | 483 | ||
484 | if (intel_lvds->edid) { | 484 | if (intel_lvds->edid) |
485 | drm_mode_connector_update_edid_property(connector, | ||
486 | intel_lvds->edid); | ||
487 | return drm_add_edid_modes(connector, intel_lvds->edid); | 485 | return drm_add_edid_modes(connector, intel_lvds->edid); |
488 | } | ||
489 | 486 | ||
490 | mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); | 487 | mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); |
491 | if (mode == 0) | 488 | if (mode == 0) |
@@ -939,7 +936,16 @@ void intel_lvds_init(struct drm_device *dev) | |||
939 | */ | 936 | */ |
940 | intel_lvds->edid = drm_get_edid(connector, | 937 | intel_lvds->edid = drm_get_edid(connector, |
941 | &dev_priv->gmbus[pin].adapter); | 938 | &dev_priv->gmbus[pin].adapter); |
942 | 939 | if (intel_lvds->edid) { | |
940 | if (drm_add_edid_modes(connector, | ||
941 | intel_lvds->edid)) { | ||
942 | drm_mode_connector_update_edid_property(connector, | ||
943 | intel_lvds->edid); | ||
944 | } else { | ||
945 | kfree(intel_lvds->edid); | ||
946 | intel_lvds->edid = NULL; | ||
947 | } | ||
948 | } | ||
943 | if (!intel_lvds->edid) { | 949 | if (!intel_lvds->edid) { |
944 | /* Didn't get an EDID, so | 950 | /* Didn't get an EDID, so |
945 | * Set wide sync ranges so we get all modes | 951 | * Set wide sync ranges so we get all modes |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 917c7dc3cd6b..9b0d9a867aea 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -512,6 +512,6 @@ int intel_opregion_setup(struct drm_device *dev) | |||
512 | return 0; | 512 | return 0; |
513 | 513 | ||
514 | err_out: | 514 | err_out: |
515 | iounmap(opregion->header); | 515 | iounmap(base); |
516 | return err; | 516 | return err; |
517 | } | 517 | } |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index afb96d25219a..02ff0a481f47 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -946,7 +946,9 @@ static int check_overlay_src(struct drm_device *dev, | |||
946 | { | 946 | { |
947 | int uv_hscale = uv_hsubsampling(rec->flags); | 947 | int uv_hscale = uv_hsubsampling(rec->flags); |
948 | int uv_vscale = uv_vsubsampling(rec->flags); | 948 | int uv_vscale = uv_vsubsampling(rec->flags); |
949 | u32 stride_mask, depth, tmp; | 949 | u32 stride_mask; |
950 | int depth; | ||
951 | u32 tmp; | ||
950 | 952 | ||
951 | /* check src dimensions */ | 953 | /* check src dimensions */ |
952 | if (IS_845G(dev) || IS_I830(dev)) { | 954 | if (IS_845G(dev) || IS_I830(dev)) { |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 09f2dc353ae2..b83306f9244b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -177,7 +177,7 @@ static int init_ring_common(struct drm_device *dev, | |||
177 | 177 | ||
178 | I915_WRITE_CTL(ring, | 178 | I915_WRITE_CTL(ring, |
179 | ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) | 179 | ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) |
180 | | RING_NO_REPORT | RING_VALID); | 180 | | RING_REPORT_64K | RING_VALID); |
181 | 181 | ||
182 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | 182 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
183 | /* If the head is still not zero, the ring is dead */ | 183 | /* If the head is still not zero, the ring is dead */ |
@@ -654,6 +654,10 @@ void intel_cleanup_ring_buffer(struct drm_device *dev, | |||
654 | i915_gem_object_unpin(ring->gem_object); | 654 | i915_gem_object_unpin(ring->gem_object); |
655 | drm_gem_object_unreference(ring->gem_object); | 655 | drm_gem_object_unreference(ring->gem_object); |
656 | ring->gem_object = NULL; | 656 | ring->gem_object = NULL; |
657 | |||
658 | if (ring->cleanup) | ||
659 | ring->cleanup(ring); | ||
660 | |||
657 | cleanup_status_page(dev, ring); | 661 | cleanup_status_page(dev, ring); |
658 | } | 662 | } |
659 | 663 | ||
@@ -688,6 +692,17 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
688 | { | 692 | { |
689 | unsigned long end; | 693 | unsigned long end; |
690 | drm_i915_private_t *dev_priv = dev->dev_private; | 694 | drm_i915_private_t *dev_priv = dev->dev_private; |
695 | u32 head; | ||
696 | |||
697 | head = intel_read_status_page(ring, 4); | ||
698 | if (head) { | ||
699 | ring->head = head & HEAD_ADDR; | ||
700 | ring->space = ring->head - (ring->tail + 8); | ||
701 | if (ring->space < 0) | ||
702 | ring->space += ring->size; | ||
703 | if (ring->space >= n) | ||
704 | return 0; | ||
705 | } | ||
691 | 706 | ||
692 | trace_i915_ring_wait_begin (dev); | 707 | trace_i915_ring_wait_begin (dev); |
693 | end = jiffies + 3 * HZ; | 708 | end = jiffies + 3 * HZ; |
@@ -854,19 +869,125 @@ blt_ring_put_user_irq(struct drm_device *dev, | |||
854 | /* do nothing */ | 869 | /* do nothing */ |
855 | } | 870 | } |
856 | 871 | ||
872 | |||
873 | /* Workaround for some stepping of SNB, | ||
874 | * each time when BLT engine ring tail moved, | ||
875 | * the first command in the ring to be parsed | ||
876 | * should be MI_BATCH_BUFFER_START | ||
877 | */ | ||
878 | #define NEED_BLT_WORKAROUND(dev) \ | ||
879 | (IS_GEN6(dev) && (dev->pdev->revision < 8)) | ||
880 | |||
881 | static inline struct drm_i915_gem_object * | ||
882 | to_blt_workaround(struct intel_ring_buffer *ring) | ||
883 | { | ||
884 | return ring->private; | ||
885 | } | ||
886 | |||
887 | static int blt_ring_init(struct drm_device *dev, | ||
888 | struct intel_ring_buffer *ring) | ||
889 | { | ||
890 | if (NEED_BLT_WORKAROUND(dev)) { | ||
891 | struct drm_i915_gem_object *obj; | ||
892 | u32 __iomem *ptr; | ||
893 | int ret; | ||
894 | |||
895 | obj = to_intel_bo(i915_gem_alloc_object(dev, 4096)); | ||
896 | if (obj == NULL) | ||
897 | return -ENOMEM; | ||
898 | |||
899 | ret = i915_gem_object_pin(&obj->base, 4096); | ||
900 | if (ret) { | ||
901 | drm_gem_object_unreference(&obj->base); | ||
902 | return ret; | ||
903 | } | ||
904 | |||
905 | ptr = kmap(obj->pages[0]); | ||
906 | iowrite32(MI_BATCH_BUFFER_END, ptr); | ||
907 | iowrite32(MI_NOOP, ptr+1); | ||
908 | kunmap(obj->pages[0]); | ||
909 | |||
910 | ret = i915_gem_object_set_to_gtt_domain(&obj->base, false); | ||
911 | if (ret) { | ||
912 | i915_gem_object_unpin(&obj->base); | ||
913 | drm_gem_object_unreference(&obj->base); | ||
914 | return ret; | ||
915 | } | ||
916 | |||
917 | ring->private = obj; | ||
918 | } | ||
919 | |||
920 | return init_ring_common(dev, ring); | ||
921 | } | ||
922 | |||
923 | static void blt_ring_begin(struct drm_device *dev, | ||
924 | struct intel_ring_buffer *ring, | ||
925 | int num_dwords) | ||
926 | { | ||
927 | if (ring->private) { | ||
928 | intel_ring_begin(dev, ring, num_dwords+2); | ||
929 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START); | ||
930 | intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset); | ||
931 | } else | ||
932 | intel_ring_begin(dev, ring, 4); | ||
933 | } | ||
934 | |||
935 | static void blt_ring_flush(struct drm_device *dev, | ||
936 | struct intel_ring_buffer *ring, | ||
937 | u32 invalidate_domains, | ||
938 | u32 flush_domains) | ||
939 | { | ||
940 | blt_ring_begin(dev, ring, 4); | ||
941 | intel_ring_emit(dev, ring, MI_FLUSH_DW); | ||
942 | intel_ring_emit(dev, ring, 0); | ||
943 | intel_ring_emit(dev, ring, 0); | ||
944 | intel_ring_emit(dev, ring, 0); | ||
945 | intel_ring_advance(dev, ring); | ||
946 | } | ||
947 | |||
948 | static u32 | ||
949 | blt_ring_add_request(struct drm_device *dev, | ||
950 | struct intel_ring_buffer *ring, | ||
951 | u32 flush_domains) | ||
952 | { | ||
953 | u32 seqno = i915_gem_get_seqno(dev); | ||
954 | |||
955 | blt_ring_begin(dev, ring, 4); | ||
956 | intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); | ||
957 | intel_ring_emit(dev, ring, | ||
958 | I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
959 | intel_ring_emit(dev, ring, seqno); | ||
960 | intel_ring_emit(dev, ring, MI_USER_INTERRUPT); | ||
961 | intel_ring_advance(dev, ring); | ||
962 | |||
963 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); | ||
964 | return seqno; | ||
965 | } | ||
966 | |||
967 | static void blt_ring_cleanup(struct intel_ring_buffer *ring) | ||
968 | { | ||
969 | if (!ring->private) | ||
970 | return; | ||
971 | |||
972 | i915_gem_object_unpin(ring->private); | ||
973 | drm_gem_object_unreference(ring->private); | ||
974 | ring->private = NULL; | ||
975 | } | ||
976 | |||
857 | static const struct intel_ring_buffer gen6_blt_ring = { | 977 | static const struct intel_ring_buffer gen6_blt_ring = { |
858 | .name = "blt ring", | 978 | .name = "blt ring", |
859 | .id = RING_BLT, | 979 | .id = RING_BLT, |
860 | .mmio_base = BLT_RING_BASE, | 980 | .mmio_base = BLT_RING_BASE, |
861 | .size = 32 * PAGE_SIZE, | 981 | .size = 32 * PAGE_SIZE, |
862 | .init = init_ring_common, | 982 | .init = blt_ring_init, |
863 | .write_tail = ring_write_tail, | 983 | .write_tail = ring_write_tail, |
864 | .flush = gen6_ring_flush, | 984 | .flush = blt_ring_flush, |
865 | .add_request = ring_add_request, | 985 | .add_request = blt_ring_add_request, |
866 | .get_seqno = ring_status_page_get_seqno, | 986 | .get_seqno = ring_status_page_get_seqno, |
867 | .user_irq_get = blt_ring_get_user_irq, | 987 | .user_irq_get = blt_ring_get_user_irq, |
868 | .user_irq_put = blt_ring_put_user_irq, | 988 | .user_irq_put = blt_ring_put_user_irq, |
869 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, | 989 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, |
990 | .cleanup = blt_ring_cleanup, | ||
870 | }; | 991 | }; |
871 | 992 | ||
872 | int intel_init_render_ring_buffer(struct drm_device *dev) | 993 | int intel_init_render_ring_buffer(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index a05aff0e5764..3126c2681983 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -63,6 +63,7 @@ struct intel_ring_buffer { | |||
63 | struct drm_i915_gem_execbuffer2 *exec, | 63 | struct drm_i915_gem_execbuffer2 *exec, |
64 | struct drm_clip_rect *cliprects, | 64 | struct drm_clip_rect *cliprects, |
65 | uint64_t exec_offset); | 65 | uint64_t exec_offset); |
66 | void (*cleanup)(struct intel_ring_buffer *ring); | ||
66 | 67 | ||
67 | /** | 68 | /** |
68 | * List of objects currently involved in rendering from the | 69 | * List of objects currently involved in rendering from the |
@@ -98,6 +99,8 @@ struct intel_ring_buffer { | |||
98 | 99 | ||
99 | wait_queue_head_t irq_queue; | 100 | wait_queue_head_t irq_queue; |
100 | drm_local_map_t map; | 101 | drm_local_map_t map; |
102 | |||
103 | void *private; | ||
101 | }; | 104 | }; |
102 | 105 | ||
103 | static inline u32 | 106 | static inline u32 |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f12a5b3ec050..488c36c8f5e6 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -2033,7 +2033,7 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
2033 | u32 grbm_int_cntl = 0; | 2033 | u32 grbm_int_cntl = 0; |
2034 | 2034 | ||
2035 | if (!rdev->irq.installed) { | 2035 | if (!rdev->irq.installed) { |
2036 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | 2036 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
2037 | return -EINVAL; | 2037 | return -EINVAL; |
2038 | } | 2038 | } |
2039 | /* don't enable anything if the ih is disabled */ | 2039 | /* don't enable anything if the ih is disabled */ |
@@ -2295,6 +2295,7 @@ restart_ih: | |||
2295 | case 0: /* D1 vblank */ | 2295 | case 0: /* D1 vblank */ |
2296 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { | 2296 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { |
2297 | drm_handle_vblank(rdev->ddev, 0); | 2297 | drm_handle_vblank(rdev->ddev, 0); |
2298 | rdev->pm.vblank_sync = true; | ||
2298 | wake_up(&rdev->irq.vblank_queue); | 2299 | wake_up(&rdev->irq.vblank_queue); |
2299 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; | 2300 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; |
2300 | DRM_DEBUG("IH: D1 vblank\n"); | 2301 | DRM_DEBUG("IH: D1 vblank\n"); |
@@ -2316,6 +2317,7 @@ restart_ih: | |||
2316 | case 0: /* D2 vblank */ | 2317 | case 0: /* D2 vblank */ |
2317 | if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { | 2318 | if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { |
2318 | drm_handle_vblank(rdev->ddev, 1); | 2319 | drm_handle_vblank(rdev->ddev, 1); |
2320 | rdev->pm.vblank_sync = true; | ||
2319 | wake_up(&rdev->irq.vblank_queue); | 2321 | wake_up(&rdev->irq.vblank_queue); |
2320 | disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; | 2322 | disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; |
2321 | DRM_DEBUG("IH: D2 vblank\n"); | 2323 | DRM_DEBUG("IH: D2 vblank\n"); |
@@ -2337,6 +2339,7 @@ restart_ih: | |||
2337 | case 0: /* D3 vblank */ | 2339 | case 0: /* D3 vblank */ |
2338 | if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { | 2340 | if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { |
2339 | drm_handle_vblank(rdev->ddev, 2); | 2341 | drm_handle_vblank(rdev->ddev, 2); |
2342 | rdev->pm.vblank_sync = true; | ||
2340 | wake_up(&rdev->irq.vblank_queue); | 2343 | wake_up(&rdev->irq.vblank_queue); |
2341 | disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; | 2344 | disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; |
2342 | DRM_DEBUG("IH: D3 vblank\n"); | 2345 | DRM_DEBUG("IH: D3 vblank\n"); |
@@ -2358,6 +2361,7 @@ restart_ih: | |||
2358 | case 0: /* D4 vblank */ | 2361 | case 0: /* D4 vblank */ |
2359 | if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { | 2362 | if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { |
2360 | drm_handle_vblank(rdev->ddev, 3); | 2363 | drm_handle_vblank(rdev->ddev, 3); |
2364 | rdev->pm.vblank_sync = true; | ||
2361 | wake_up(&rdev->irq.vblank_queue); | 2365 | wake_up(&rdev->irq.vblank_queue); |
2362 | disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; | 2366 | disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; |
2363 | DRM_DEBUG("IH: D4 vblank\n"); | 2367 | DRM_DEBUG("IH: D4 vblank\n"); |
@@ -2379,6 +2383,7 @@ restart_ih: | |||
2379 | case 0: /* D5 vblank */ | 2383 | case 0: /* D5 vblank */ |
2380 | if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { | 2384 | if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { |
2381 | drm_handle_vblank(rdev->ddev, 4); | 2385 | drm_handle_vblank(rdev->ddev, 4); |
2386 | rdev->pm.vblank_sync = true; | ||
2382 | wake_up(&rdev->irq.vblank_queue); | 2387 | wake_up(&rdev->irq.vblank_queue); |
2383 | disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; | 2388 | disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; |
2384 | DRM_DEBUG("IH: D5 vblank\n"); | 2389 | DRM_DEBUG("IH: D5 vblank\n"); |
@@ -2400,6 +2405,7 @@ restart_ih: | |||
2400 | case 0: /* D6 vblank */ | 2405 | case 0: /* D6 vblank */ |
2401 | if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { | 2406 | if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { |
2402 | drm_handle_vblank(rdev->ddev, 5); | 2407 | drm_handle_vblank(rdev->ddev, 5); |
2408 | rdev->pm.vblank_sync = true; | ||
2403 | wake_up(&rdev->irq.vblank_queue); | 2409 | wake_up(&rdev->irq.vblank_queue); |
2404 | disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; | 2410 | disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; |
2405 | DRM_DEBUG("IH: D6 vblank\n"); | 2411 | DRM_DEBUG("IH: D6 vblank\n"); |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 0e8f28a68927..8e10aa9f74b0 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -442,7 +442,7 @@ int r100_pci_gart_init(struct radeon_device *rdev) | |||
442 | int r; | 442 | int r; |
443 | 443 | ||
444 | if (rdev->gart.table.ram.ptr) { | 444 | if (rdev->gart.table.ram.ptr) { |
445 | WARN(1, "R100 PCI GART already initialized.\n"); | 445 | WARN(1, "R100 PCI GART already initialized\n"); |
446 | return 0; | 446 | return 0; |
447 | } | 447 | } |
448 | /* Initialize common gart structure */ | 448 | /* Initialize common gart structure */ |
@@ -516,7 +516,7 @@ int r100_irq_set(struct radeon_device *rdev) | |||
516 | uint32_t tmp = 0; | 516 | uint32_t tmp = 0; |
517 | 517 | ||
518 | if (!rdev->irq.installed) { | 518 | if (!rdev->irq.installed) { |
519 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | 519 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
520 | WREG32(R_000040_GEN_INT_CNTL, 0); | 520 | WREG32(R_000040_GEN_INT_CNTL, 0); |
521 | return -EINVAL; | 521 | return -EINVAL; |
522 | } | 522 | } |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 34527e600fe9..cde1d3480d93 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -91,7 +91,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev) | |||
91 | int r; | 91 | int r; |
92 | 92 | ||
93 | if (rdev->gart.table.vram.robj) { | 93 | if (rdev->gart.table.vram.robj) { |
94 | WARN(1, "RV370 PCIE GART already initialized.\n"); | 94 | WARN(1, "RV370 PCIE GART already initialized\n"); |
95 | return 0; | 95 | return 0; |
96 | } | 96 | } |
97 | /* Initialize common gart structure */ | 97 | /* Initialize common gart structure */ |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 33952a12f0a3..0f806cc7dc75 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -97,14 +97,8 @@ u32 rv6xx_get_temp(struct radeon_device *rdev) | |||
97 | { | 97 | { |
98 | u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> | 98 | u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> |
99 | ASIC_T_SHIFT; | 99 | ASIC_T_SHIFT; |
100 | u32 actual_temp = 0; | ||
101 | 100 | ||
102 | if ((temp >> 7) & 1) | 101 | return temp * 1000; |
103 | actual_temp = 0; | ||
104 | else | ||
105 | actual_temp = (temp >> 1) & 0xff; | ||
106 | |||
107 | return actual_temp * 1000; | ||
108 | } | 102 | } |
109 | 103 | ||
110 | void r600_pm_get_dynpm_state(struct radeon_device *rdev) | 104 | void r600_pm_get_dynpm_state(struct radeon_device *rdev) |
@@ -919,7 +913,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev) | |||
919 | int r; | 913 | int r; |
920 | 914 | ||
921 | if (rdev->gart.table.vram.robj) { | 915 | if (rdev->gart.table.vram.robj) { |
922 | WARN(1, "R600 PCIE GART already initialized.\n"); | 916 | WARN(1, "R600 PCIE GART already initialized\n"); |
923 | return 0; | 917 | return 0; |
924 | } | 918 | } |
925 | /* Initialize common gart structure */ | 919 | /* Initialize common gart structure */ |
@@ -2995,7 +2989,7 @@ int r600_irq_set(struct radeon_device *rdev) | |||
2995 | u32 hdmi1, hdmi2; | 2989 | u32 hdmi1, hdmi2; |
2996 | 2990 | ||
2997 | if (!rdev->irq.installed) { | 2991 | if (!rdev->irq.installed) { |
2998 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | 2992 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
2999 | return -EINVAL; | 2993 | return -EINVAL; |
3000 | } | 2994 | } |
3001 | /* don't enable anything if the ih is disabled */ | 2995 | /* don't enable anything if the ih is disabled */ |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 04cac7ec9039..87ead090c7d5 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -526,8 +526,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
526 | if (crev < 2) | 526 | if (crev < 2) |
527 | return false; | 527 | return false; |
528 | 528 | ||
529 | router.valid = false; | ||
530 | |||
531 | obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); | 529 | obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); |
532 | path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) | 530 | path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) |
533 | (ctx->bios + data_offset + | 531 | (ctx->bios + data_offset + |
@@ -624,6 +622,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
624 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) | 622 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) |
625 | continue; | 623 | continue; |
626 | 624 | ||
625 | router.ddc_valid = false; | ||
626 | router.cd_valid = false; | ||
627 | for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { | 627 | for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { |
628 | uint8_t grph_obj_id, grph_obj_num, grph_obj_type; | 628 | uint8_t grph_obj_id, grph_obj_num, grph_obj_type; |
629 | 629 | ||
@@ -647,9 +647,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
647 | usDeviceTag)); | 647 | usDeviceTag)); |
648 | 648 | ||
649 | } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { | 649 | } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { |
650 | router.valid = false; | ||
651 | for (k = 0; k < router_obj->ucNumberOfObjects; k++) { | 650 | for (k = 0; k < router_obj->ucNumberOfObjects; k++) { |
652 | u16 router_obj_id = le16_to_cpu(router_obj->asObjects[j].usObjectID); | 651 | u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID); |
653 | if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) { | 652 | if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) { |
654 | ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) | 653 | ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) |
655 | (ctx->bios + data_offset + | 654 | (ctx->bios + data_offset + |
@@ -657,6 +656,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
657 | ATOM_I2C_RECORD *i2c_record; | 656 | ATOM_I2C_RECORD *i2c_record; |
658 | ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; | 657 | ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; |
659 | ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path; | 658 | ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path; |
659 | ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path; | ||
660 | ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table = | 660 | ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table = |
661 | (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) | 661 | (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) |
662 | (ctx->bios + data_offset + | 662 | (ctx->bios + data_offset + |
@@ -690,10 +690,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
690 | case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE: | 690 | case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE: |
691 | ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *) | 691 | ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *) |
692 | record; | 692 | record; |
693 | router.valid = true; | 693 | router.ddc_valid = true; |
694 | router.mux_type = ddc_path->ucMuxType; | 694 | router.ddc_mux_type = ddc_path->ucMuxType; |
695 | router.mux_control_pin = ddc_path->ucMuxControlPin; | 695 | router.ddc_mux_control_pin = ddc_path->ucMuxControlPin; |
696 | router.mux_state = ddc_path->ucMuxState[enum_id]; | 696 | router.ddc_mux_state = ddc_path->ucMuxState[enum_id]; |
697 | break; | ||
698 | case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE: | ||
699 | cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *) | ||
700 | record; | ||
701 | router.cd_valid = true; | ||
702 | router.cd_mux_type = cd_path->ucMuxType; | ||
703 | router.cd_mux_control_pin = cd_path->ucMuxControlPin; | ||
704 | router.cd_mux_state = cd_path->ucMuxState[enum_id]; | ||
697 | break; | 705 | break; |
698 | } | 706 | } |
699 | record = (ATOM_COMMON_RECORD_HEADER *) | 707 | record = (ATOM_COMMON_RECORD_HEADER *) |
@@ -860,7 +868,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
860 | size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; | 868 | size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; |
861 | struct radeon_router router; | 869 | struct radeon_router router; |
862 | 870 | ||
863 | router.valid = false; | 871 | router.ddc_valid = false; |
872 | router.cd_valid = false; | ||
864 | 873 | ||
865 | bios_connectors = kzalloc(bc_size, GFP_KERNEL); | 874 | bios_connectors = kzalloc(bc_size, GFP_KERNEL); |
866 | if (!bios_connectors) | 875 | if (!bios_connectors) |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 4dac4b0a02ee..fe6c74780f18 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -183,13 +183,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, | |||
183 | continue; | 183 | continue; |
184 | 184 | ||
185 | if (priority == true) { | 185 | if (priority == true) { |
186 | DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); | 186 | DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); |
187 | DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); | 187 | DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector)); |
188 | conflict->status = connector_status_disconnected; | 188 | conflict->status = connector_status_disconnected; |
189 | radeon_connector_update_scratch_regs(conflict, connector_status_disconnected); | 189 | radeon_connector_update_scratch_regs(conflict, connector_status_disconnected); |
190 | } else { | 190 | } else { |
191 | DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector)); | 191 | DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector)); |
192 | DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict)); | 192 | DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict)); |
193 | current_status = connector_status_disconnected; | 193 | current_status = connector_status_disconnected; |
194 | } | 194 | } |
195 | break; | 195 | break; |
@@ -432,13 +432,13 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, | |||
432 | mode->vdisplay == native_mode->vdisplay) { | 432 | mode->vdisplay == native_mode->vdisplay) { |
433 | *native_mode = *mode; | 433 | *native_mode = *mode; |
434 | drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V); | 434 | drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V); |
435 | DRM_INFO("Determined LVDS native mode details from EDID\n"); | 435 | DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n"); |
436 | break; | 436 | break; |
437 | } | 437 | } |
438 | } | 438 | } |
439 | } | 439 | } |
440 | if (!native_mode->clock) { | 440 | if (!native_mode->clock) { |
441 | DRM_INFO("No LVDS native mode details, disabling RMX\n"); | 441 | DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n"); |
442 | radeon_encoder->rmx_type = RMX_OFF; | 442 | radeon_encoder->rmx_type = RMX_OFF; |
443 | } | 443 | } |
444 | } | 444 | } |
@@ -1116,7 +1116,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1116 | radeon_connector->shared_ddc = true; | 1116 | radeon_connector->shared_ddc = true; |
1117 | shared_ddc = true; | 1117 | shared_ddc = true; |
1118 | } | 1118 | } |
1119 | if (radeon_connector->router_bus && router->valid && | 1119 | if (radeon_connector->router_bus && router->ddc_valid && |
1120 | (radeon_connector->router.router_id == router->router_id)) { | 1120 | (radeon_connector->router.router_id == router->router_id)) { |
1121 | radeon_connector->shared_ddc = false; | 1121 | radeon_connector->shared_ddc = false; |
1122 | shared_ddc = false; | 1122 | shared_ddc = false; |
@@ -1136,7 +1136,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1136 | radeon_connector->connector_object_id = connector_object_id; | 1136 | radeon_connector->connector_object_id = connector_object_id; |
1137 | radeon_connector->hpd = *hpd; | 1137 | radeon_connector->hpd = *hpd; |
1138 | radeon_connector->router = *router; | 1138 | radeon_connector->router = *router; |
1139 | if (router->valid) { | 1139 | if (router->ddc_valid || router->cd_valid) { |
1140 | radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); | 1140 | radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); |
1141 | if (!radeon_connector->router_bus) | 1141 | if (!radeon_connector->router_bus) |
1142 | goto failed; | 1142 | goto failed; |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 0383631da69c..1df4dc6c063c 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -315,10 +315,14 @@ static void radeon_print_display_setup(struct drm_device *dev) | |||
315 | radeon_connector->ddc_bus->rec.en_data_reg, | 315 | radeon_connector->ddc_bus->rec.en_data_reg, |
316 | radeon_connector->ddc_bus->rec.y_clk_reg, | 316 | radeon_connector->ddc_bus->rec.y_clk_reg, |
317 | radeon_connector->ddc_bus->rec.y_data_reg); | 317 | radeon_connector->ddc_bus->rec.y_data_reg); |
318 | if (radeon_connector->router_bus) | 318 | if (radeon_connector->router.ddc_valid) |
319 | DRM_INFO(" DDC Router 0x%x/0x%x\n", | 319 | DRM_INFO(" DDC Router 0x%x/0x%x\n", |
320 | radeon_connector->router.mux_control_pin, | 320 | radeon_connector->router.ddc_mux_control_pin, |
321 | radeon_connector->router.mux_state); | 321 | radeon_connector->router.ddc_mux_state); |
322 | if (radeon_connector->router.cd_valid) | ||
323 | DRM_INFO(" Clock/Data Router 0x%x/0x%x\n", | ||
324 | radeon_connector->router.cd_mux_control_pin, | ||
325 | radeon_connector->router.cd_mux_state); | ||
322 | } else { | 326 | } else { |
323 | if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || | 327 | if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || |
324 | connector->connector_type == DRM_MODE_CONNECTOR_DVII || | 328 | connector->connector_type == DRM_MODE_CONNECTOR_DVII || |
@@ -398,8 +402,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) | |||
398 | int ret = 0; | 402 | int ret = 0; |
399 | 403 | ||
400 | /* on hw with routers, select right port */ | 404 | /* on hw with routers, select right port */ |
401 | if (radeon_connector->router.valid) | 405 | if (radeon_connector->router.ddc_valid) |
402 | radeon_router_select_port(radeon_connector); | 406 | radeon_router_select_ddc_port(radeon_connector); |
403 | 407 | ||
404 | if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || | 408 | if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || |
405 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { | 409 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { |
@@ -432,8 +436,8 @@ static int radeon_ddc_dump(struct drm_connector *connector) | |||
432 | int ret = 0; | 436 | int ret = 0; |
433 | 437 | ||
434 | /* on hw with routers, select right port */ | 438 | /* on hw with routers, select right port */ |
435 | if (radeon_connector->router.valid) | 439 | if (radeon_connector->router.ddc_valid) |
436 | radeon_router_select_port(radeon_connector); | 440 | radeon_router_select_ddc_port(radeon_connector); |
437 | 441 | ||
438 | if (!radeon_connector->ddc_bus) | 442 | if (!radeon_connector->ddc_bus) |
439 | return -1; | 443 | return -1; |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index ae58b6849a2e..f678257c42e6 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -1520,6 +1520,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec | |||
1520 | static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) | 1520 | static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) |
1521 | { | 1521 | { |
1522 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1522 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1523 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1523 | 1524 | ||
1524 | if (radeon_encoder->active_device & | 1525 | if (radeon_encoder->active_device & |
1525 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { | 1526 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { |
@@ -1531,6 +1532,13 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) | |||
1531 | radeon_atom_output_lock(encoder, true); | 1532 | radeon_atom_output_lock(encoder, true); |
1532 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | 1533 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); |
1533 | 1534 | ||
1535 | /* select the clock/data port if it uses a router */ | ||
1536 | if (connector) { | ||
1537 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1538 | if (radeon_connector->router.cd_valid) | ||
1539 | radeon_router_select_cd_port(radeon_connector); | ||
1540 | } | ||
1541 | |||
1534 | /* this is needed for the pll/ss setup to work correctly in some cases */ | 1542 | /* this is needed for the pll/ss setup to work correctly in some cases */ |
1535 | atombios_set_encoder_crtc_source(encoder); | 1543 | atombios_set_encoder_crtc_source(encoder); |
1536 | } | 1544 | } |
@@ -1547,6 +1555,23 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) | |||
1547 | struct radeon_device *rdev = dev->dev_private; | 1555 | struct radeon_device *rdev = dev->dev_private; |
1548 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1556 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1549 | struct radeon_encoder_atom_dig *dig; | 1557 | struct radeon_encoder_atom_dig *dig; |
1558 | |||
1559 | /* check for pre-DCE3 cards with shared encoders; | ||
1560 | * can't really use the links individually, so don't disable | ||
1561 | * the encoder if it's in use by another connector | ||
1562 | */ | ||
1563 | if (!ASIC_IS_DCE3(rdev)) { | ||
1564 | struct drm_encoder *other_encoder; | ||
1565 | struct radeon_encoder *other_radeon_encoder; | ||
1566 | |||
1567 | list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { | ||
1568 | other_radeon_encoder = to_radeon_encoder(other_encoder); | ||
1569 | if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) && | ||
1570 | drm_helper_encoder_in_use(other_encoder)) | ||
1571 | goto disable_done; | ||
1572 | } | ||
1573 | } | ||
1574 | |||
1550 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | 1575 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); |
1551 | 1576 | ||
1552 | switch (radeon_encoder->encoder_id) { | 1577 | switch (radeon_encoder->encoder_id) { |
@@ -1586,6 +1611,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) | |||
1586 | break; | 1611 | break; |
1587 | } | 1612 | } |
1588 | 1613 | ||
1614 | disable_done: | ||
1589 | if (radeon_encoder_is_digital(encoder)) { | 1615 | if (radeon_encoder_is_digital(encoder)) { |
1590 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) | 1616 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) |
1591 | r600_hdmi_disable(encoder); | 1617 | r600_hdmi_disable(encoder); |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 216392d0353b..daacb281dfaf 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -240,7 +240,8 @@ retry: | |||
240 | */ | 240 | */ |
241 | if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { | 241 | if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { |
242 | /* good news we believe it's a lockup */ | 242 | /* good news we believe it's a lockup */ |
243 | WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq); | 243 | WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", |
244 | fence->seq, seq); | ||
244 | /* FIXME: what should we do ? marking everyone | 245 | /* FIXME: what should we do ? marking everyone |
245 | * as signaled for now | 246 | * as signaled for now |
246 | */ | 247 | */ |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 6a13ee38a5b9..0cfbba02c4d0 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
@@ -53,8 +53,8 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector) | |||
53 | }; | 53 | }; |
54 | 54 | ||
55 | /* on hw with routers, select right port */ | 55 | /* on hw with routers, select right port */ |
56 | if (radeon_connector->router.valid) | 56 | if (radeon_connector->router.ddc_valid) |
57 | radeon_router_select_port(radeon_connector); | 57 | radeon_router_select_ddc_port(radeon_connector); |
58 | 58 | ||
59 | ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); | 59 | ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); |
60 | if (ret == 2) | 60 | if (ret == 2) |
@@ -1084,26 +1084,51 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus, | |||
1084 | addr, val); | 1084 | addr, val); |
1085 | } | 1085 | } |
1086 | 1086 | ||
1087 | /* router switching */ | 1087 | /* ddc router switching */ |
1088 | void radeon_router_select_port(struct radeon_connector *radeon_connector) | 1088 | void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector) |
1089 | { | 1089 | { |
1090 | u8 val; | 1090 | u8 val; |
1091 | 1091 | ||
1092 | if (!radeon_connector->router.valid) | 1092 | if (!radeon_connector->router.ddc_valid) |
1093 | return; | 1093 | return; |
1094 | 1094 | ||
1095 | radeon_i2c_get_byte(radeon_connector->router_bus, | 1095 | radeon_i2c_get_byte(radeon_connector->router_bus, |
1096 | radeon_connector->router.i2c_addr, | 1096 | radeon_connector->router.i2c_addr, |
1097 | 0x3, &val); | 1097 | 0x3, &val); |
1098 | val &= radeon_connector->router.mux_control_pin; | 1098 | val &= ~radeon_connector->router.ddc_mux_control_pin; |
1099 | radeon_i2c_put_byte(radeon_connector->router_bus, | 1099 | radeon_i2c_put_byte(radeon_connector->router_bus, |
1100 | radeon_connector->router.i2c_addr, | 1100 | radeon_connector->router.i2c_addr, |
1101 | 0x3, val); | 1101 | 0x3, val); |
1102 | radeon_i2c_get_byte(radeon_connector->router_bus, | 1102 | radeon_i2c_get_byte(radeon_connector->router_bus, |
1103 | radeon_connector->router.i2c_addr, | 1103 | radeon_connector->router.i2c_addr, |
1104 | 0x1, &val); | 1104 | 0x1, &val); |
1105 | val &= radeon_connector->router.mux_control_pin; | 1105 | val &= ~radeon_connector->router.ddc_mux_control_pin; |
1106 | val |= radeon_connector->router.mux_state; | 1106 | val |= radeon_connector->router.ddc_mux_state; |
1107 | radeon_i2c_put_byte(radeon_connector->router_bus, | ||
1108 | radeon_connector->router.i2c_addr, | ||
1109 | 0x1, val); | ||
1110 | } | ||
1111 | |||
1112 | /* clock/data router switching */ | ||
1113 | void radeon_router_select_cd_port(struct radeon_connector *radeon_connector) | ||
1114 | { | ||
1115 | u8 val; | ||
1116 | |||
1117 | if (!radeon_connector->router.cd_valid) | ||
1118 | return; | ||
1119 | |||
1120 | radeon_i2c_get_byte(radeon_connector->router_bus, | ||
1121 | radeon_connector->router.i2c_addr, | ||
1122 | 0x3, &val); | ||
1123 | val &= ~radeon_connector->router.cd_mux_control_pin; | ||
1124 | radeon_i2c_put_byte(radeon_connector->router_bus, | ||
1125 | radeon_connector->router.i2c_addr, | ||
1126 | 0x3, val); | ||
1127 | radeon_i2c_get_byte(radeon_connector->router_bus, | ||
1128 | radeon_connector->router.i2c_addr, | ||
1129 | 0x1, &val); | ||
1130 | val &= ~radeon_connector->router.cd_mux_control_pin; | ||
1131 | val |= radeon_connector->router.cd_mux_state; | ||
1107 | radeon_i2c_put_byte(radeon_connector->router_bus, | 1132 | radeon_i2c_put_byte(radeon_connector->router_bus, |
1108 | radeon_connector->router.i2c_addr, | 1133 | radeon_connector->router.i2c_addr, |
1109 | 0x1, val); | 1134 | 0x1, val); |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 92457163d070..680f57644e86 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -401,13 +401,19 @@ struct radeon_hpd { | |||
401 | }; | 401 | }; |
402 | 402 | ||
403 | struct radeon_router { | 403 | struct radeon_router { |
404 | bool valid; | ||
405 | u32 router_id; | 404 | u32 router_id; |
406 | struct radeon_i2c_bus_rec i2c_info; | 405 | struct radeon_i2c_bus_rec i2c_info; |
407 | u8 i2c_addr; | 406 | u8 i2c_addr; |
408 | u8 mux_type; | 407 | /* i2c mux */ |
409 | u8 mux_control_pin; | 408 | bool ddc_valid; |
410 | u8 mux_state; | 409 | u8 ddc_mux_type; |
410 | u8 ddc_mux_control_pin; | ||
411 | u8 ddc_mux_state; | ||
412 | /* clock/data mux */ | ||
413 | bool cd_valid; | ||
414 | u8 cd_mux_type; | ||
415 | u8 cd_mux_control_pin; | ||
416 | u8 cd_mux_state; | ||
411 | }; | 417 | }; |
412 | 418 | ||
413 | struct radeon_connector { | 419 | struct radeon_connector { |
@@ -488,7 +494,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c, | |||
488 | u8 slave_addr, | 494 | u8 slave_addr, |
489 | u8 addr, | 495 | u8 addr, |
490 | u8 val); | 496 | u8 val); |
491 | extern void radeon_router_select_port(struct radeon_connector *radeon_connector); | 497 | extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); |
498 | extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); | ||
492 | extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); | 499 | extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); |
493 | extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); | 500 | extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); |
494 | 501 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d7ab91416410..8eb183466015 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -102,6 +102,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, | |||
102 | type = ttm_bo_type_device; | 102 | type = ttm_bo_type_device; |
103 | } | 103 | } |
104 | *bo_ptr = NULL; | 104 | *bo_ptr = NULL; |
105 | |||
106 | retry: | ||
105 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); | 107 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
106 | if (bo == NULL) | 108 | if (bo == NULL) |
107 | return -ENOMEM; | 109 | return -ENOMEM; |
@@ -109,8 +111,6 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, | |||
109 | bo->gobj = gobj; | 111 | bo->gobj = gobj; |
110 | bo->surface_reg = -1; | 112 | bo->surface_reg = -1; |
111 | INIT_LIST_HEAD(&bo->list); | 113 | INIT_LIST_HEAD(&bo->list); |
112 | |||
113 | retry: | ||
114 | radeon_ttm_placement_from_domain(bo, domain); | 114 | radeon_ttm_placement_from_domain(bo, domain); |
115 | /* Kernel allocation are uninterruptible */ | 115 | /* Kernel allocation are uninterruptible */ |
116 | mutex_lock(&rdev->vram_mutex); | 116 | mutex_lock(&rdev->vram_mutex); |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index fe95bb35317e..01c2c736a1da 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -689,7 +689,8 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend, | |||
689 | gtt = container_of(backend, struct radeon_ttm_backend, backend); | 689 | gtt = container_of(backend, struct radeon_ttm_backend, backend); |
690 | gtt->offset = bo_mem->start << PAGE_SHIFT; | 690 | gtt->offset = bo_mem->start << PAGE_SHIFT; |
691 | if (!gtt->num_pages) { | 691 | if (!gtt->num_pages) { |
692 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); | 692 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", |
693 | gtt->num_pages, bo_mem, backend); | ||
693 | } | 694 | } |
694 | r = radeon_gart_bind(gtt->rdev, gtt->offset, | 695 | r = radeon_gart_bind(gtt->rdev, gtt->offset, |
695 | gtt->num_pages, gtt->pages); | 696 | gtt->num_pages, gtt->pages); |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index f683e51a2a06..5512e4e5e636 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -78,7 +78,7 @@ int rs400_gart_init(struct radeon_device *rdev) | |||
78 | int r; | 78 | int r; |
79 | 79 | ||
80 | if (rdev->gart.table.ram.ptr) { | 80 | if (rdev->gart.table.ram.ptr) { |
81 | WARN(1, "RS400 GART already initialized.\n"); | 81 | WARN(1, "RS400 GART already initialized\n"); |
82 | return 0; | 82 | return 0; |
83 | } | 83 | } |
84 | /* Check gart size */ | 84 | /* Check gart size */ |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index b091a1f6fa4e..f1c6e02c2e6b 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -375,7 +375,7 @@ int rs600_gart_init(struct radeon_device *rdev) | |||
375 | int r; | 375 | int r; |
376 | 376 | ||
377 | if (rdev->gart.table.vram.robj) { | 377 | if (rdev->gart.table.vram.robj) { |
378 | WARN(1, "RS600 GART already initialized.\n"); | 378 | WARN(1, "RS600 GART already initialized\n"); |
379 | return 0; | 379 | return 0; |
380 | } | 380 | } |
381 | /* Initialize common gart structure */ | 381 | /* Initialize common gart structure */ |
@@ -505,7 +505,7 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
505 | ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); | 505 | ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); |
506 | 506 | ||
507 | if (!rdev->irq.installed) { | 507 | if (!rdev->irq.installed) { |
508 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | 508 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
509 | WREG32(R_000040_GEN_INT_CNTL, 0); | 509 | WREG32(R_000040_GEN_INT_CNTL, 0); |
510 | return -EINVAL; | 510 | return -EINVAL; |
511 | } | 511 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a1cb783c7131..3ca77dc03915 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -27,14 +27,6 @@ | |||
27 | /* | 27 | /* |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
29 | */ | 29 | */ |
30 | /* Notes: | ||
31 | * | ||
32 | * We store bo pointer in drm_mm_node struct so we know which bo own a | ||
33 | * specific node. There is no protection on the pointer, thus to make | ||
34 | * sure things don't go berserk you have to access this pointer while | ||
35 | * holding the global lru lock and make sure anytime you free a node you | ||
36 | * reset the pointer to NULL. | ||
37 | */ | ||
38 | 30 | ||
39 | #include "ttm/ttm_module.h" | 31 | #include "ttm/ttm_module.h" |
40 | #include "ttm/ttm_bo_driver.h" | 32 | #include "ttm/ttm_bo_driver.h" |
@@ -45,6 +37,7 @@ | |||
45 | #include <linux/mm.h> | 37 | #include <linux/mm.h> |
46 | #include <linux/file.h> | 38 | #include <linux/file.h> |
47 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <asm/atomic.h> | ||
48 | 41 | ||
49 | #define TTM_ASSERT_LOCKED(param) | 42 | #define TTM_ASSERT_LOCKED(param) |
50 | #define TTM_DEBUG(fmt, arg...) | 43 | #define TTM_DEBUG(fmt, arg...) |
@@ -452,6 +445,11 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) | |||
452 | ttm_bo_mem_put(bo, &bo->mem); | 445 | ttm_bo_mem_put(bo, &bo->mem); |
453 | 446 | ||
454 | atomic_set(&bo->reserved, 0); | 447 | atomic_set(&bo->reserved, 0); |
448 | |||
449 | /* | ||
450 | * Make processes trying to reserve really pick it up. | ||
451 | */ | ||
452 | smp_mb__after_atomic_dec(); | ||
455 | wake_up_all(&bo->event_queue); | 453 | wake_up_all(&bo->event_queue); |
456 | } | 454 | } |
457 | 455 | ||
@@ -460,7 +458,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |||
460 | struct ttm_bo_device *bdev = bo->bdev; | 458 | struct ttm_bo_device *bdev = bo->bdev; |
461 | struct ttm_bo_global *glob = bo->glob; | 459 | struct ttm_bo_global *glob = bo->glob; |
462 | struct ttm_bo_driver *driver; | 460 | struct ttm_bo_driver *driver; |
463 | void *sync_obj; | 461 | void *sync_obj = NULL; |
464 | void *sync_obj_arg; | 462 | void *sync_obj_arg; |
465 | int put_count; | 463 | int put_count; |
466 | int ret; | 464 | int ret; |
@@ -495,17 +493,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |||
495 | spin_lock(&glob->lru_lock); | 493 | spin_lock(&glob->lru_lock); |
496 | } | 494 | } |
497 | queue: | 495 | queue: |
498 | sync_obj = bo->sync_obj; | ||
499 | sync_obj_arg = bo->sync_obj_arg; | ||
500 | driver = bdev->driver; | 496 | driver = bdev->driver; |
497 | if (bo->sync_obj) | ||
498 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | ||
499 | sync_obj_arg = bo->sync_obj_arg; | ||
501 | 500 | ||
502 | kref_get(&bo->list_kref); | 501 | kref_get(&bo->list_kref); |
503 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); | 502 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); |
504 | spin_unlock(&glob->lru_lock); | 503 | spin_unlock(&glob->lru_lock); |
505 | spin_unlock(&bo->lock); | 504 | spin_unlock(&bo->lock); |
506 | 505 | ||
507 | if (sync_obj) | 506 | if (sync_obj) { |
508 | driver->sync_obj_flush(sync_obj, sync_obj_arg); | 507 | driver->sync_obj_flush(sync_obj, sync_obj_arg); |
508 | driver->sync_obj_unref(&sync_obj); | ||
509 | } | ||
509 | schedule_delayed_work(&bdev->wq, | 510 | schedule_delayed_work(&bdev->wq, |
510 | ((HZ / 100) < 1) ? 1 : HZ / 100); | 511 | ((HZ / 100) < 1) ? 1 : HZ / 100); |
511 | } | 512 | } |
@@ -822,7 +823,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, | |||
822 | bool no_wait_gpu) | 823 | bool no_wait_gpu) |
823 | { | 824 | { |
824 | struct ttm_bo_device *bdev = bo->bdev; | 825 | struct ttm_bo_device *bdev = bo->bdev; |
825 | struct ttm_bo_global *glob = bdev->glob; | ||
826 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | 826 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
827 | int ret; | 827 | int ret; |
828 | 828 | ||
@@ -832,12 +832,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, | |||
832 | return ret; | 832 | return ret; |
833 | if (mem->mm_node) | 833 | if (mem->mm_node) |
834 | break; | 834 | break; |
835 | spin_lock(&glob->lru_lock); | ||
836 | if (list_empty(&man->lru)) { | ||
837 | spin_unlock(&glob->lru_lock); | ||
838 | break; | ||
839 | } | ||
840 | spin_unlock(&glob->lru_lock); | ||
841 | ret = ttm_mem_evict_first(bdev, mem_type, interruptible, | 835 | ret = ttm_mem_evict_first(bdev, mem_type, interruptible, |
842 | no_wait_reserve, no_wait_gpu); | 836 | no_wait_reserve, no_wait_gpu); |
843 | if (unlikely(ret != 0)) | 837 | if (unlikely(ret != 0)) |
@@ -1125,35 +1119,9 @@ EXPORT_SYMBOL(ttm_bo_validate); | |||
1125 | int ttm_bo_check_placement(struct ttm_buffer_object *bo, | 1119 | int ttm_bo_check_placement(struct ttm_buffer_object *bo, |
1126 | struct ttm_placement *placement) | 1120 | struct ttm_placement *placement) |
1127 | { | 1121 | { |
1128 | int i; | 1122 | BUG_ON((placement->fpfn || placement->lpfn) && |
1123 | (bo->mem.num_pages > (placement->lpfn - placement->fpfn))); | ||
1129 | 1124 | ||
1130 | if (placement->fpfn || placement->lpfn) { | ||
1131 | if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) { | ||
1132 | printk(KERN_ERR TTM_PFX "Page number range to small " | ||
1133 | "Need %lu pages, range is [%u, %u]\n", | ||
1134 | bo->mem.num_pages, placement->fpfn, | ||
1135 | placement->lpfn); | ||
1136 | return -EINVAL; | ||
1137 | } | ||
1138 | } | ||
1139 | for (i = 0; i < placement->num_placement; i++) { | ||
1140 | if (!capable(CAP_SYS_ADMIN)) { | ||
1141 | if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) { | ||
1142 | printk(KERN_ERR TTM_PFX "Need to be root to " | ||
1143 | "modify NO_EVICT status.\n"); | ||
1144 | return -EINVAL; | ||
1145 | } | ||
1146 | } | ||
1147 | } | ||
1148 | for (i = 0; i < placement->num_busy_placement; i++) { | ||
1149 | if (!capable(CAP_SYS_ADMIN)) { | ||
1150 | if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) { | ||
1151 | printk(KERN_ERR TTM_PFX "Need to be root to " | ||
1152 | "modify NO_EVICT status.\n"); | ||
1153 | return -EINVAL; | ||
1154 | } | ||
1155 | } | ||
1156 | } | ||
1157 | return 0; | 1125 | return 0; |
1158 | } | 1126 | } |
1159 | 1127 | ||
@@ -1176,6 +1144,10 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1176 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 1144 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1177 | if (num_pages == 0) { | 1145 | if (num_pages == 0) { |
1178 | printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); | 1146 | printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); |
1147 | if (destroy) | ||
1148 | (*destroy)(bo); | ||
1149 | else | ||
1150 | kfree(bo); | ||
1179 | return -EINVAL; | 1151 | return -EINVAL; |
1180 | } | 1152 | } |
1181 | bo->destroy = destroy; | 1153 | bo->destroy = destroy; |
@@ -1369,18 +1341,9 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, | |||
1369 | int ret = -EINVAL; | 1341 | int ret = -EINVAL; |
1370 | struct ttm_mem_type_manager *man; | 1342 | struct ttm_mem_type_manager *man; |
1371 | 1343 | ||
1372 | if (type >= TTM_NUM_MEM_TYPES) { | 1344 | BUG_ON(type >= TTM_NUM_MEM_TYPES); |
1373 | printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type); | ||
1374 | return ret; | ||
1375 | } | ||
1376 | |||
1377 | man = &bdev->man[type]; | 1345 | man = &bdev->man[type]; |
1378 | if (man->has_type) { | 1346 | BUG_ON(man->has_type); |
1379 | printk(KERN_ERR TTM_PFX | ||
1380 | "Memory manager already initialized for type %d\n", | ||
1381 | type); | ||
1382 | return ret; | ||
1383 | } | ||
1384 | 1347 | ||
1385 | ret = bdev->driver->init_mem_type(bdev, type, man); | 1348 | ret = bdev->driver->init_mem_type(bdev, type, man); |
1386 | if (ret) | 1349 | if (ret) |
@@ -1389,13 +1352,6 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, | |||
1389 | 1352 | ||
1390 | ret = 0; | 1353 | ret = 0; |
1391 | if (type != TTM_PL_SYSTEM) { | 1354 | if (type != TTM_PL_SYSTEM) { |
1392 | if (!p_size) { | ||
1393 | printk(KERN_ERR TTM_PFX | ||
1394 | "Zero size memory manager type %d\n", | ||
1395 | type); | ||
1396 | return ret; | ||
1397 | } | ||
1398 | |||
1399 | ret = (*man->func->init)(man, p_size); | 1355 | ret = (*man->func->init)(man, p_size); |
1400 | if (ret) | 1356 | if (ret) |
1401 | return ret; | 1357 | return ret; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index 7410c190c891..038e947d00f9 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /************************************************************************** | 1 | /************************************************************************** |
2 | * | 2 | * |
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA | 3 | * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. | 4 | * All Rights Reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
@@ -31,20 +31,29 @@ | |||
31 | #include "ttm/ttm_module.h" | 31 | #include "ttm/ttm_module.h" |
32 | #include "ttm/ttm_bo_driver.h" | 32 | #include "ttm/ttm_bo_driver.h" |
33 | #include "ttm/ttm_placement.h" | 33 | #include "ttm/ttm_placement.h" |
34 | #include <linux/jiffies.h> | 34 | #include "drm_mm.h" |
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
36 | #include <linux/sched.h> | 36 | #include <linux/spinlock.h> |
37 | #include <linux/mm.h> | ||
38 | #include <linux/file.h> | ||
39 | #include <linux/module.h> | 37 | #include <linux/module.h> |
40 | 38 | ||
39 | /** | ||
40 | * Currently we use a spinlock for the lock, but a mutex *may* be | ||
41 | * more appropriate to reduce scheduling latency if the range manager | ||
42 | * ends up with very fragmented allocation patterns. | ||
43 | */ | ||
44 | |||
45 | struct ttm_range_manager { | ||
46 | struct drm_mm mm; | ||
47 | spinlock_t lock; | ||
48 | }; | ||
49 | |||
41 | static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, | 50 | static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, |
42 | struct ttm_buffer_object *bo, | 51 | struct ttm_buffer_object *bo, |
43 | struct ttm_placement *placement, | 52 | struct ttm_placement *placement, |
44 | struct ttm_mem_reg *mem) | 53 | struct ttm_mem_reg *mem) |
45 | { | 54 | { |
46 | struct ttm_bo_global *glob = man->bdev->glob; | 55 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; |
47 | struct drm_mm *mm = man->priv; | 56 | struct drm_mm *mm = &rman->mm; |
48 | struct drm_mm_node *node = NULL; | 57 | struct drm_mm_node *node = NULL; |
49 | unsigned long lpfn; | 58 | unsigned long lpfn; |
50 | int ret; | 59 | int ret; |
@@ -57,19 +66,19 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, | |||
57 | if (unlikely(ret)) | 66 | if (unlikely(ret)) |
58 | return ret; | 67 | return ret; |
59 | 68 | ||
60 | spin_lock(&glob->lru_lock); | 69 | spin_lock(&rman->lock); |
61 | node = drm_mm_search_free_in_range(mm, | 70 | node = drm_mm_search_free_in_range(mm, |
62 | mem->num_pages, mem->page_alignment, | 71 | mem->num_pages, mem->page_alignment, |
63 | placement->fpfn, lpfn, 1); | 72 | placement->fpfn, lpfn, 1); |
64 | if (unlikely(node == NULL)) { | 73 | if (unlikely(node == NULL)) { |
65 | spin_unlock(&glob->lru_lock); | 74 | spin_unlock(&rman->lock); |
66 | return 0; | 75 | return 0; |
67 | } | 76 | } |
68 | node = drm_mm_get_block_atomic_range(node, mem->num_pages, | 77 | node = drm_mm_get_block_atomic_range(node, mem->num_pages, |
69 | mem->page_alignment, | 78 | mem->page_alignment, |
70 | placement->fpfn, | 79 | placement->fpfn, |
71 | lpfn); | 80 | lpfn); |
72 | spin_unlock(&glob->lru_lock); | 81 | spin_unlock(&rman->lock); |
73 | } while (node == NULL); | 82 | } while (node == NULL); |
74 | 83 | ||
75 | mem->mm_node = node; | 84 | mem->mm_node = node; |
@@ -80,12 +89,12 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, | |||
80 | static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, | 89 | static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, |
81 | struct ttm_mem_reg *mem) | 90 | struct ttm_mem_reg *mem) |
82 | { | 91 | { |
83 | struct ttm_bo_global *glob = man->bdev->glob; | 92 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; |
84 | 93 | ||
85 | if (mem->mm_node) { | 94 | if (mem->mm_node) { |
86 | spin_lock(&glob->lru_lock); | 95 | spin_lock(&rman->lock); |
87 | drm_mm_put_block(mem->mm_node); | 96 | drm_mm_put_block(mem->mm_node); |
88 | spin_unlock(&glob->lru_lock); | 97 | spin_unlock(&rman->lock); |
89 | mem->mm_node = NULL; | 98 | mem->mm_node = NULL; |
90 | } | 99 | } |
91 | } | 100 | } |
@@ -93,49 +102,49 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, | |||
93 | static int ttm_bo_man_init(struct ttm_mem_type_manager *man, | 102 | static int ttm_bo_man_init(struct ttm_mem_type_manager *man, |
94 | unsigned long p_size) | 103 | unsigned long p_size) |
95 | { | 104 | { |
96 | struct drm_mm *mm; | 105 | struct ttm_range_manager *rman; |
97 | int ret; | 106 | int ret; |
98 | 107 | ||
99 | mm = kzalloc(sizeof(*mm), GFP_KERNEL); | 108 | rman = kzalloc(sizeof(*rman), GFP_KERNEL); |
100 | if (!mm) | 109 | if (!rman) |
101 | return -ENOMEM; | 110 | return -ENOMEM; |
102 | 111 | ||
103 | ret = drm_mm_init(mm, 0, p_size); | 112 | ret = drm_mm_init(&rman->mm, 0, p_size); |
104 | if (ret) { | 113 | if (ret) { |
105 | kfree(mm); | 114 | kfree(rman); |
106 | return ret; | 115 | return ret; |
107 | } | 116 | } |
108 | 117 | ||
109 | man->priv = mm; | 118 | spin_lock_init(&rman->lock); |
119 | man->priv = rman; | ||
110 | return 0; | 120 | return 0; |
111 | } | 121 | } |
112 | 122 | ||
113 | static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) | 123 | static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) |
114 | { | 124 | { |
115 | struct ttm_bo_global *glob = man->bdev->glob; | 125 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; |
116 | struct drm_mm *mm = man->priv; | 126 | struct drm_mm *mm = &rman->mm; |
117 | int ret = 0; | ||
118 | 127 | ||
119 | spin_lock(&glob->lru_lock); | 128 | spin_lock(&rman->lock); |
120 | if (drm_mm_clean(mm)) { | 129 | if (drm_mm_clean(mm)) { |
121 | drm_mm_takedown(mm); | 130 | drm_mm_takedown(mm); |
122 | kfree(mm); | 131 | spin_unlock(&rman->lock); |
132 | kfree(rman); | ||
123 | man->priv = NULL; | 133 | man->priv = NULL; |
124 | } else | 134 | return 0; |
125 | ret = -EBUSY; | 135 | } |
126 | spin_unlock(&glob->lru_lock); | 136 | spin_unlock(&rman->lock); |
127 | return ret; | 137 | return -EBUSY; |
128 | } | 138 | } |
129 | 139 | ||
130 | static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, | 140 | static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, |
131 | const char *prefix) | 141 | const char *prefix) |
132 | { | 142 | { |
133 | struct ttm_bo_global *glob = man->bdev->glob; | 143 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; |
134 | struct drm_mm *mm = man->priv; | ||
135 | 144 | ||
136 | spin_lock(&glob->lru_lock); | 145 | spin_lock(&rman->lock); |
137 | drm_mm_debug_table(mm, prefix); | 146 | drm_mm_debug_table(&rman->mm, prefix); |
138 | spin_unlock(&glob->lru_lock); | 147 | spin_unlock(&rman->lock); |
139 | } | 148 | } |
140 | 149 | ||
141 | const struct ttm_mem_type_manager_func ttm_bo_manager_func = { | 150 | const struct ttm_mem_type_manager_func ttm_bo_manager_func = { |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index a7bab87a548b..af789dc869b9 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -440,10 +440,8 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) | |||
440 | return ret; | 440 | return ret; |
441 | 441 | ||
442 | ret = be->func->bind(be, bo_mem); | 442 | ret = be->func->bind(be, bo_mem); |
443 | if (ret) { | 443 | if (unlikely(ret != 0)) |
444 | printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n"); | ||
445 | return ret; | 444 | return ret; |
446 | } | ||
447 | 445 | ||
448 | ttm->state = tt_bound; | 446 | ttm->state = tt_bound; |
449 | 447 | ||
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c index 9b5b4d9dd62c..3e038a394c51 100644 --- a/drivers/gpu/drm/via/via_dmablit.c +++ b/drivers/gpu/drm/via/via_dmablit.c | |||
@@ -235,9 +235,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) | |||
235 | vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - | 235 | vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - |
236 | first_pfn + 1; | 236 | first_pfn + 1; |
237 | 237 | ||
238 | if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) | 238 | vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages); |
239 | if (NULL == vsg->pages) | ||
239 | return -ENOMEM; | 240 | return -ENOMEM; |
240 | memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); | ||
241 | down_read(¤t->mm->mmap_sem); | 241 | down_read(¤t->mm->mmap_sem); |
242 | ret = get_user_pages(current, current->mm, | 242 | ret = get_user_pages(current, current->mm, |
243 | (unsigned long)xfer->mem_addr, | 243 | (unsigned long)xfer->mem_addr, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 51d9f9f1d7f2..76954e3528c1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -691,6 +691,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
691 | 691 | ||
692 | fence_rep.error = ret; | 692 | fence_rep.error = ret; |
693 | fence_rep.fence_seq = (uint64_t) sequence; | 693 | fence_rep.fence_seq = (uint64_t) sequence; |
694 | fence_rep.pad64 = 0; | ||
694 | 695 | ||
695 | user_fence_rep = (struct drm_vmw_fence_rep __user *) | 696 | user_fence_rep = (struct drm_vmw_fence_rep __user *) |
696 | (unsigned long)arg->fence_rep; | 697 | (unsigned long)arg->fence_rep; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 87c6e6156d7d..cceeb42789b6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -720,6 +720,8 @@ static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb) | |||
720 | &vmw_vram_ne_placement, | 720 | &vmw_vram_ne_placement, |
721 | false, &vmw_dmabuf_bo_free); | 721 | false, &vmw_dmabuf_bo_free); |
722 | vmw_overlay_resume_all(dev_priv); | 722 | vmw_overlay_resume_all(dev_priv); |
723 | if (unlikely(ret != 0)) | ||
724 | vfbs->buffer = NULL; | ||
723 | 725 | ||
724 | return ret; | 726 | return ret; |
725 | } | 727 | } |
@@ -730,6 +732,9 @@ static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb) | |||
730 | struct vmw_framebuffer_surface *vfbs = | 732 | struct vmw_framebuffer_surface *vfbs = |
731 | vmw_framebuffer_to_vfbs(&vfb->base); | 733 | vmw_framebuffer_to_vfbs(&vfb->base); |
732 | 734 | ||
735 | if (unlikely(vfbs->buffer == NULL)) | ||
736 | return 0; | ||
737 | |||
733 | bo = &vfbs->buffer->base; | 738 | bo = &vfbs->buffer->base; |
734 | ttm_bo_unref(&bo); | 739 | ttm_bo_unref(&bo); |
735 | vfbs->buffer = NULL; | 740 | vfbs->buffer = NULL; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index a01c47ddb5bc..29113c9b26a8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
@@ -557,7 +557,7 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | |||
557 | return -EINVAL; | 557 | return -EINVAL; |
558 | } | 558 | } |
559 | 559 | ||
560 | dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv)); | 560 | dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL); |
561 | 561 | ||
562 | if (!dev_priv->ldu_priv) | 562 | if (!dev_priv->ldu_priv) |
563 | return -ENOMEM; | 563 | return -ENOMEM; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index df2036ed18d5..f1a52f9e7298 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | |||
@@ -585,7 +585,7 @@ int vmw_overlay_init(struct vmw_private *dev_priv) | |||
585 | return -ENOSYS; | 585 | return -ENOSYS; |
586 | } | 586 | } |
587 | 587 | ||
588 | overlay = kmalloc(GFP_KERNEL, sizeof(*overlay)); | 588 | overlay = kmalloc(sizeof(*overlay), GFP_KERNEL); |
589 | if (!overlay) | 589 | if (!overlay) |
590 | return -ENOMEM; | 590 | return -ENOMEM; |
591 | 591 | ||
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig index 742c423567cf..0e1edd7311ff 100644 --- a/drivers/gpu/stub/Kconfig +++ b/drivers/gpu/stub/Kconfig | |||
@@ -3,6 +3,9 @@ config STUB_POULSBO | |||
3 | depends on PCI | 3 | depends on PCI |
4 | # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled | 4 | # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled |
5 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick | 5 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick |
6 | select VIDEO_OUTPUT_CONTROL if ACPI | ||
7 | select BACKLIGHT_CLASS_DEVICE if ACPI | ||
8 | select INPUT if ACPI | ||
6 | select ACPI_VIDEO if ACPI | 9 | select ACPI_VIDEO if ACPI |
7 | help | 10 | help |
8 | Choose this option if you have a system that has Intel GMA500 | 11 | Choose this option if you have a system that has Intel GMA500 |
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c index 1e4c21fc1a89..86d822aa9bbf 100644 --- a/drivers/hwmon/ad7414.c +++ b/drivers/hwmon/ad7414.c | |||
@@ -178,11 +178,13 @@ static int ad7414_probe(struct i2c_client *client, | |||
178 | { | 178 | { |
179 | struct ad7414_data *data; | 179 | struct ad7414_data *data; |
180 | int conf; | 180 | int conf; |
181 | int err = 0; | 181 | int err; |
182 | 182 | ||
183 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA | | 183 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA | |
184 | I2C_FUNC_SMBUS_READ_WORD_DATA)) | 184 | I2C_FUNC_SMBUS_READ_WORD_DATA)) { |
185 | err = -EOPNOTSUPP; | ||
185 | goto exit; | 186 | goto exit; |
187 | } | ||
186 | 188 | ||
187 | data = kzalloc(sizeof(struct ad7414_data), GFP_KERNEL); | 189 | data = kzalloc(sizeof(struct ad7414_data), GFP_KERNEL); |
188 | if (!data) { | 190 | if (!data) { |
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c index 9e775717abb7..87d92a56a939 100644 --- a/drivers/hwmon/adt7470.c +++ b/drivers/hwmon/adt7470.c | |||
@@ -1286,8 +1286,10 @@ static int adt7470_probe(struct i2c_client *client, | |||
1286 | init_completion(&data->auto_update_stop); | 1286 | init_completion(&data->auto_update_stop); |
1287 | data->auto_update = kthread_run(adt7470_update_thread, client, | 1287 | data->auto_update = kthread_run(adt7470_update_thread, client, |
1288 | dev_name(data->hwmon_dev)); | 1288 | dev_name(data->hwmon_dev)); |
1289 | if (IS_ERR(data->auto_update)) | 1289 | if (IS_ERR(data->auto_update)) { |
1290 | err = PTR_ERR(data->auto_update); | ||
1290 | goto exit_unregister; | 1291 | goto exit_unregister; |
1292 | } | ||
1291 | 1293 | ||
1292 | return 0; | 1294 | return 0; |
1293 | 1295 | ||
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c index fa9708c2d723..4033974d1bb3 100644 --- a/drivers/hwmon/amc6821.c +++ b/drivers/hwmon/amc6821.c | |||
@@ -4,7 +4,7 @@ | |||
4 | Copyright (C) 2009 T. Mertelj <tomaz.mertelj@guest.arnes.si> | 4 | Copyright (C) 2009 T. Mertelj <tomaz.mertelj@guest.arnes.si> |
5 | 5 | ||
6 | Based on max6650.c: | 6 | Based on max6650.c: |
7 | Copyright (C) 2007 Hans J. Koch <hjk@linutronix.de> | 7 | Copyright (C) 2007 Hans J. Koch <hjk@hansjkoch.de> |
8 | 8 | ||
9 | This program is free software; you can redistribute it and/or modify | 9 | This program is free software; you can redistribute it and/or modify |
10 | it under the terms of the GNU General Public License as published by | 10 | it under the terms of the GNU General Public License as published by |
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c index aa701a183707..f141a1de519c 100644 --- a/drivers/hwmon/gpio-fan.c +++ b/drivers/hwmon/gpio-fan.c | |||
@@ -376,10 +376,6 @@ static int fan_ctrl_init(struct gpio_fan_data *fan_data, | |||
376 | } | 376 | } |
377 | } | 377 | } |
378 | 378 | ||
379 | err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_ctrl_group); | ||
380 | if (err) | ||
381 | goto err_free_gpio; | ||
382 | |||
383 | fan_data->num_ctrl = num_ctrl; | 379 | fan_data->num_ctrl = num_ctrl; |
384 | fan_data->ctrl = ctrl; | 380 | fan_data->ctrl = ctrl; |
385 | fan_data->num_speed = pdata->num_speed; | 381 | fan_data->num_speed = pdata->num_speed; |
@@ -391,6 +387,10 @@ static int fan_ctrl_init(struct gpio_fan_data *fan_data, | |||
391 | goto err_free_gpio; | 387 | goto err_free_gpio; |
392 | } | 388 | } |
393 | 389 | ||
390 | err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_ctrl_group); | ||
391 | if (err) | ||
392 | goto err_free_gpio; | ||
393 | |||
394 | return 0; | 394 | return 0; |
395 | 395 | ||
396 | err_free_gpio: | 396 | err_free_gpio: |
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c index 6669255aadcf..c9ed14eba5a6 100644 --- a/drivers/hwmon/lm93.c +++ b/drivers/hwmon/lm93.c | |||
@@ -20,7 +20,7 @@ | |||
20 | Adapted to 2.6.20 by Carsten Emde <cbe@osadl.org> | 20 | Adapted to 2.6.20 by Carsten Emde <cbe@osadl.org> |
21 | Copyright (c) 2006 Carsten Emde, Open Source Automation Development Lab | 21 | Copyright (c) 2006 Carsten Emde, Open Source Automation Development Lab |
22 | 22 | ||
23 | Modified for mainline integration by Hans J. Koch <hjk@linutronix.de> | 23 | Modified for mainline integration by Hans J. Koch <hjk@hansjkoch.de> |
24 | Copyright (c) 2007 Hans J. Koch, Linutronix GmbH | 24 | Copyright (c) 2007 Hans J. Koch, Linutronix GmbH |
25 | 25 | ||
26 | This program is free software; you can redistribute it and/or modify | 26 | This program is free software; you can redistribute it and/or modify |
@@ -2629,7 +2629,7 @@ static void __exit lm93_exit(void) | |||
2629 | } | 2629 | } |
2630 | 2630 | ||
2631 | MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>, " | 2631 | MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>, " |
2632 | "Hans J. Koch <hjk@linutronix.de"); | 2632 | "Hans J. Koch <hjk@hansjkoch.de>"); |
2633 | MODULE_DESCRIPTION("LM93 driver"); | 2633 | MODULE_DESCRIPTION("LM93 driver"); |
2634 | MODULE_LICENSE("GPL"); | 2634 | MODULE_LICENSE("GPL"); |
2635 | 2635 | ||
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c index 464340f25496..4546d82f024a 100644 --- a/drivers/hwmon/lm95241.c +++ b/drivers/hwmon/lm95241.c | |||
@@ -128,9 +128,12 @@ static ssize_t set_interval(struct device *dev, struct device_attribute *attr, | |||
128 | { | 128 | { |
129 | struct i2c_client *client = to_i2c_client(dev); | 129 | struct i2c_client *client = to_i2c_client(dev); |
130 | struct lm95241_data *data = i2c_get_clientdata(client); | 130 | struct lm95241_data *data = i2c_get_clientdata(client); |
131 | unsigned long val; | ||
131 | 132 | ||
132 | strict_strtol(buf, 10, &data->interval); | 133 | if (strict_strtoul(buf, 10, &val) < 0) |
133 | data->interval = data->interval * HZ / 1000; | 134 | return -EINVAL; |
135 | |||
136 | data->interval = val * HZ / 1000; | ||
134 | 137 | ||
135 | return count; | 138 | return count; |
136 | } | 139 | } |
@@ -188,7 +191,9 @@ static ssize_t set_type##flag(struct device *dev, \ | |||
188 | struct lm95241_data *data = i2c_get_clientdata(client); \ | 191 | struct lm95241_data *data = i2c_get_clientdata(client); \ |
189 | \ | 192 | \ |
190 | long val; \ | 193 | long val; \ |
191 | strict_strtol(buf, 10, &val); \ | 194 | \ |
195 | if (strict_strtol(buf, 10, &val) < 0) \ | ||
196 | return -EINVAL; \ | ||
192 | \ | 197 | \ |
193 | if ((val == 1) || (val == 2)) { \ | 198 | if ((val == 1) || (val == 2)) { \ |
194 | \ | 199 | \ |
@@ -227,7 +232,9 @@ static ssize_t set_min##flag(struct device *dev, \ | |||
227 | struct lm95241_data *data = i2c_get_clientdata(client); \ | 232 | struct lm95241_data *data = i2c_get_clientdata(client); \ |
228 | \ | 233 | \ |
229 | long val; \ | 234 | long val; \ |
230 | strict_strtol(buf, 10, &val); \ | 235 | \ |
236 | if (strict_strtol(buf, 10, &val) < 0) \ | ||
237 | return -EINVAL;\ | ||
231 | \ | 238 | \ |
232 | mutex_lock(&data->update_lock); \ | 239 | mutex_lock(&data->update_lock); \ |
233 | \ | 240 | \ |
@@ -256,7 +263,9 @@ static ssize_t set_max##flag(struct device *dev, \ | |||
256 | struct lm95241_data *data = i2c_get_clientdata(client); \ | 263 | struct lm95241_data *data = i2c_get_clientdata(client); \ |
257 | \ | 264 | \ |
258 | long val; \ | 265 | long val; \ |
259 | strict_strtol(buf, 10, &val); \ | 266 | \ |
267 | if (strict_strtol(buf, 10, &val) < 0) \ | ||
268 | return -EINVAL; \ | ||
260 | \ | 269 | \ |
261 | mutex_lock(&data->update_lock); \ | 270 | mutex_lock(&data->update_lock); \ |
262 | \ | 271 | \ |
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c index a0160ee5caef..9a11532ecae8 100644 --- a/drivers/hwmon/max6650.c +++ b/drivers/hwmon/max6650.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * max6650.c - Part of lm_sensors, Linux kernel modules for hardware | 2 | * max6650.c - Part of lm_sensors, Linux kernel modules for hardware |
3 | * monitoring. | 3 | * monitoring. |
4 | * | 4 | * |
5 | * (C) 2007 by Hans J. Koch <hjk@linutronix.de> | 5 | * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de> |
6 | * | 6 | * |
7 | * based on code written by John Morris <john.morris@spirentcom.com> | 7 | * based on code written by John Morris <john.morris@spirentcom.com> |
8 | * Copyright (c) 2003 Spirent Communications | 8 | * Copyright (c) 2003 Spirent Communications |
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c index 1d840aa83782..cdbc7448491e 100644 --- a/drivers/hwmon/w83795.c +++ b/drivers/hwmon/w83795.c | |||
@@ -165,10 +165,14 @@ static const u8 IN_LSB_SHIFT_IDX[][2] = { | |||
165 | 165 | ||
166 | #define W83795_REG_VID_CTRL 0x6A | 166 | #define W83795_REG_VID_CTRL 0x6A |
167 | 167 | ||
168 | #define W83795_REG_ALARM_CTRL 0x40 | ||
169 | #define ALARM_CTRL_RTSACS (1 << 7) | ||
168 | #define W83795_REG_ALARM(index) (0x41 + (index)) | 170 | #define W83795_REG_ALARM(index) (0x41 + (index)) |
171 | #define W83795_REG_CLR_CHASSIS 0x4D | ||
169 | #define W83795_REG_BEEP(index) (0x50 + (index)) | 172 | #define W83795_REG_BEEP(index) (0x50 + (index)) |
170 | 173 | ||
171 | #define W83795_REG_CLR_CHASSIS 0x4D | 174 | #define W83795_REG_OVT_CFG 0x58 |
175 | #define OVT_CFG_SEL (1 << 7) | ||
172 | 176 | ||
173 | 177 | ||
174 | #define W83795_REG_FCMS1 0x201 | 178 | #define W83795_REG_FCMS1 0x201 |
@@ -178,6 +182,14 @@ static const u8 IN_LSB_SHIFT_IDX[][2] = { | |||
178 | 182 | ||
179 | #define W83795_REG_TSS(index) (0x209 + (index)) | 183 | #define W83795_REG_TSS(index) (0x209 + (index)) |
180 | 184 | ||
185 | #define TSS_MAP_RESERVED 0xff | ||
186 | static const u8 tss_map[4][6] = { | ||
187 | { 0, 1, 2, 3, 4, 5}, | ||
188 | { 6, 7, 8, 9, 0, 1}, | ||
189 | {10, 11, 12, 13, 2, 3}, | ||
190 | { 4, 5, 4, 5, TSS_MAP_RESERVED, TSS_MAP_RESERVED}, | ||
191 | }; | ||
192 | |||
181 | #define PWM_OUTPUT 0 | 193 | #define PWM_OUTPUT 0 |
182 | #define PWM_FREQ 1 | 194 | #define PWM_FREQ 1 |
183 | #define PWM_START 2 | 195 | #define PWM_START 2 |
@@ -369,6 +381,7 @@ struct w83795_data { | |||
369 | u8 setup_pwm[3]; /* Register value */ | 381 | u8 setup_pwm[3]; /* Register value */ |
370 | 382 | ||
371 | u8 alarms[6]; /* Register value */ | 383 | u8 alarms[6]; /* Register value */ |
384 | u8 enable_beep; | ||
372 | u8 beeps[6]; /* Register value */ | 385 | u8 beeps[6]; /* Register value */ |
373 | 386 | ||
374 | char valid; | 387 | char valid; |
@@ -499,8 +512,11 @@ static void w83795_update_limits(struct i2c_client *client) | |||
499 | } | 512 | } |
500 | 513 | ||
501 | /* Read beep settings */ | 514 | /* Read beep settings */ |
502 | for (i = 0; i < ARRAY_SIZE(data->beeps); i++) | 515 | if (data->enable_beep) { |
503 | data->beeps[i] = w83795_read(client, W83795_REG_BEEP(i)); | 516 | for (i = 0; i < ARRAY_SIZE(data->beeps); i++) |
517 | data->beeps[i] = | ||
518 | w83795_read(client, W83795_REG_BEEP(i)); | ||
519 | } | ||
504 | 520 | ||
505 | data->valid_limits = 1; | 521 | data->valid_limits = 1; |
506 | } | 522 | } |
@@ -577,6 +593,7 @@ static struct w83795_data *w83795_update_device(struct device *dev) | |||
577 | struct i2c_client *client = to_i2c_client(dev); | 593 | struct i2c_client *client = to_i2c_client(dev); |
578 | struct w83795_data *data = i2c_get_clientdata(client); | 594 | struct w83795_data *data = i2c_get_clientdata(client); |
579 | u16 tmp; | 595 | u16 tmp; |
596 | u8 intrusion; | ||
580 | int i; | 597 | int i; |
581 | 598 | ||
582 | mutex_lock(&data->update_lock); | 599 | mutex_lock(&data->update_lock); |
@@ -648,9 +665,24 @@ static struct w83795_data *w83795_update_device(struct device *dev) | |||
648 | w83795_read(client, W83795_REG_PWM(i, PWM_OUTPUT)); | 665 | w83795_read(client, W83795_REG_PWM(i, PWM_OUTPUT)); |
649 | } | 666 | } |
650 | 667 | ||
651 | /* update alarm */ | 668 | /* Update intrusion and alarms |
669 | * It is important to read intrusion first, because reading from | ||
670 | * register SMI STS6 clears the interrupt status temporarily. */ | ||
671 | tmp = w83795_read(client, W83795_REG_ALARM_CTRL); | ||
672 | /* Switch to interrupt status for intrusion if needed */ | ||
673 | if (tmp & ALARM_CTRL_RTSACS) | ||
674 | w83795_write(client, W83795_REG_ALARM_CTRL, | ||
675 | tmp & ~ALARM_CTRL_RTSACS); | ||
676 | intrusion = w83795_read(client, W83795_REG_ALARM(5)) & (1 << 6); | ||
677 | /* Switch to real-time alarms */ | ||
678 | w83795_write(client, W83795_REG_ALARM_CTRL, tmp | ALARM_CTRL_RTSACS); | ||
652 | for (i = 0; i < ARRAY_SIZE(data->alarms); i++) | 679 | for (i = 0; i < ARRAY_SIZE(data->alarms); i++) |
653 | data->alarms[i] = w83795_read(client, W83795_REG_ALARM(i)); | 680 | data->alarms[i] = w83795_read(client, W83795_REG_ALARM(i)); |
681 | data->alarms[5] |= intrusion; | ||
682 | /* Restore original configuration if needed */ | ||
683 | if (!(tmp & ALARM_CTRL_RTSACS)) | ||
684 | w83795_write(client, W83795_REG_ALARM_CTRL, | ||
685 | tmp & ~ALARM_CTRL_RTSACS); | ||
654 | 686 | ||
655 | data->last_updated = jiffies; | 687 | data->last_updated = jiffies; |
656 | data->valid = 1; | 688 | data->valid = 1; |
@@ -730,6 +762,10 @@ store_chassis_clear(struct device *dev, | |||
730 | val = w83795_read(client, W83795_REG_CLR_CHASSIS); | 762 | val = w83795_read(client, W83795_REG_CLR_CHASSIS); |
731 | val |= 0x80; | 763 | val |= 0x80; |
732 | w83795_write(client, W83795_REG_CLR_CHASSIS, val); | 764 | w83795_write(client, W83795_REG_CLR_CHASSIS, val); |
765 | |||
766 | /* Clear status and force cache refresh */ | ||
767 | w83795_read(client, W83795_REG_ALARM(5)); | ||
768 | data->valid = 0; | ||
733 | mutex_unlock(&data->update_lock); | 769 | mutex_unlock(&data->update_lock); |
734 | return count; | 770 | return count; |
735 | } | 771 | } |
@@ -857,20 +893,20 @@ show_pwm_enable(struct device *dev, struct device_attribute *attr, char *buf) | |||
857 | int index = sensor_attr->index; | 893 | int index = sensor_attr->index; |
858 | u8 tmp; | 894 | u8 tmp; |
859 | 895 | ||
860 | if (1 == (data->pwm_fcms[0] & (1 << index))) { | 896 | /* Speed cruise mode */ |
897 | if (data->pwm_fcms[0] & (1 << index)) { | ||
861 | tmp = 2; | 898 | tmp = 2; |
862 | goto out; | 899 | goto out; |
863 | } | 900 | } |
901 | /* Thermal cruise or SmartFan IV mode */ | ||
864 | for (tmp = 0; tmp < 6; tmp++) { | 902 | for (tmp = 0; tmp < 6; tmp++) { |
865 | if (data->pwm_tfmr[tmp] & (1 << index)) { | 903 | if (data->pwm_tfmr[tmp] & (1 << index)) { |
866 | tmp = 3; | 904 | tmp = 3; |
867 | goto out; | 905 | goto out; |
868 | } | 906 | } |
869 | } | 907 | } |
870 | if (data->pwm_fomc & (1 << index)) | 908 | /* Manual mode */ |
871 | tmp = 0; | 909 | tmp = 1; |
872 | else | ||
873 | tmp = 1; | ||
874 | 910 | ||
875 | out: | 911 | out: |
876 | return sprintf(buf, "%u\n", tmp); | 912 | return sprintf(buf, "%u\n", tmp); |
@@ -890,23 +926,21 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr, | |||
890 | 926 | ||
891 | if (strict_strtoul(buf, 10, &val) < 0) | 927 | if (strict_strtoul(buf, 10, &val) < 0) |
892 | return -EINVAL; | 928 | return -EINVAL; |
893 | if (val > 2) | 929 | if (val < 1 || val > 2) |
894 | return -EINVAL; | 930 | return -EINVAL; |
895 | 931 | ||
896 | mutex_lock(&data->update_lock); | 932 | mutex_lock(&data->update_lock); |
897 | switch (val) { | 933 | switch (val) { |
898 | case 0: | ||
899 | case 1: | 934 | case 1: |
935 | /* Clear speed cruise mode bits */ | ||
900 | data->pwm_fcms[0] &= ~(1 << index); | 936 | data->pwm_fcms[0] &= ~(1 << index); |
901 | w83795_write(client, W83795_REG_FCMS1, data->pwm_fcms[0]); | 937 | w83795_write(client, W83795_REG_FCMS1, data->pwm_fcms[0]); |
938 | /* Clear thermal cruise mode bits */ | ||
902 | for (i = 0; i < 6; i++) { | 939 | for (i = 0; i < 6; i++) { |
903 | data->pwm_tfmr[i] &= ~(1 << index); | 940 | data->pwm_tfmr[i] &= ~(1 << index); |
904 | w83795_write(client, W83795_REG_TFMR(i), | 941 | w83795_write(client, W83795_REG_TFMR(i), |
905 | data->pwm_tfmr[i]); | 942 | data->pwm_tfmr[i]); |
906 | } | 943 | } |
907 | data->pwm_fomc |= 1 << index; | ||
908 | data->pwm_fomc ^= val << index; | ||
909 | w83795_write(client, W83795_REG_FOMC, data->pwm_fomc); | ||
910 | break; | 944 | break; |
911 | case 2: | 945 | case 2: |
912 | data->pwm_fcms[0] |= (1 << index); | 946 | data->pwm_fcms[0] |= (1 << index); |
@@ -918,23 +952,60 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr, | |||
918 | } | 952 | } |
919 | 953 | ||
920 | static ssize_t | 954 | static ssize_t |
955 | show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf) | ||
956 | { | ||
957 | struct w83795_data *data = w83795_update_pwm_config(dev); | ||
958 | int index = to_sensor_dev_attr_2(attr)->index; | ||
959 | unsigned int mode; | ||
960 | |||
961 | if (data->pwm_fomc & (1 << index)) | ||
962 | mode = 0; /* DC */ | ||
963 | else | ||
964 | mode = 1; /* PWM */ | ||
965 | |||
966 | return sprintf(buf, "%u\n", mode); | ||
967 | } | ||
968 | |||
969 | /* | ||
970 | * Check whether a given temperature source can ever be useful. | ||
971 | * Returns the number of selectable temperature channels which are | ||
972 | * enabled. | ||
973 | */ | ||
974 | static int w83795_tss_useful(const struct w83795_data *data, int tsrc) | ||
975 | { | ||
976 | int useful = 0, i; | ||
977 | |||
978 | for (i = 0; i < 4; i++) { | ||
979 | if (tss_map[i][tsrc] == TSS_MAP_RESERVED) | ||
980 | continue; | ||
981 | if (tss_map[i][tsrc] < 6) /* Analog */ | ||
982 | useful += (data->has_temp >> tss_map[i][tsrc]) & 1; | ||
983 | else /* Digital */ | ||
984 | useful += (data->has_dts >> (tss_map[i][tsrc] - 6)) & 1; | ||
985 | } | ||
986 | |||
987 | return useful; | ||
988 | } | ||
989 | |||
990 | static ssize_t | ||
921 | show_temp_src(struct device *dev, struct device_attribute *attr, char *buf) | 991 | show_temp_src(struct device *dev, struct device_attribute *attr, char *buf) |
922 | { | 992 | { |
923 | struct sensor_device_attribute_2 *sensor_attr = | 993 | struct sensor_device_attribute_2 *sensor_attr = |
924 | to_sensor_dev_attr_2(attr); | 994 | to_sensor_dev_attr_2(attr); |
925 | struct w83795_data *data = w83795_update_pwm_config(dev); | 995 | struct w83795_data *data = w83795_update_pwm_config(dev); |
926 | int index = sensor_attr->index; | 996 | int index = sensor_attr->index; |
927 | u8 val = index / 2; | 997 | u8 tmp = data->temp_src[index / 2]; |
928 | u8 tmp = data->temp_src[val]; | ||
929 | 998 | ||
930 | if (index & 1) | 999 | if (index & 1) |
931 | val = 4; | 1000 | tmp >>= 4; /* Pick high nibble */ |
932 | else | 1001 | else |
933 | val = 0; | 1002 | tmp &= 0x0f; /* Pick low nibble */ |
934 | tmp >>= val; | ||
935 | tmp &= 0x0f; | ||
936 | 1003 | ||
937 | return sprintf(buf, "%u\n", tmp); | 1004 | /* Look-up the actual temperature channel number */ |
1005 | if (tmp >= 4 || tss_map[tmp][index] == TSS_MAP_RESERVED) | ||
1006 | return -EINVAL; /* Shouldn't happen */ | ||
1007 | |||
1008 | return sprintf(buf, "%u\n", (unsigned int)tss_map[tmp][index] + 1); | ||
938 | } | 1009 | } |
939 | 1010 | ||
940 | static ssize_t | 1011 | static ssize_t |
@@ -946,12 +1017,21 @@ store_temp_src(struct device *dev, struct device_attribute *attr, | |||
946 | struct sensor_device_attribute_2 *sensor_attr = | 1017 | struct sensor_device_attribute_2 *sensor_attr = |
947 | to_sensor_dev_attr_2(attr); | 1018 | to_sensor_dev_attr_2(attr); |
948 | int index = sensor_attr->index; | 1019 | int index = sensor_attr->index; |
949 | unsigned long tmp; | 1020 | int tmp; |
1021 | unsigned long channel; | ||
950 | u8 val = index / 2; | 1022 | u8 val = index / 2; |
951 | 1023 | ||
952 | if (strict_strtoul(buf, 10, &tmp) < 0) | 1024 | if (strict_strtoul(buf, 10, &channel) < 0 || |
1025 | channel < 1 || channel > 14) | ||
1026 | return -EINVAL; | ||
1027 | |||
1028 | /* Check if request can be fulfilled */ | ||
1029 | for (tmp = 0; tmp < 4; tmp++) { | ||
1030 | if (tss_map[tmp][index] == channel - 1) | ||
1031 | break; | ||
1032 | } | ||
1033 | if (tmp == 4) /* No match */ | ||
953 | return -EINVAL; | 1034 | return -EINVAL; |
954 | tmp = SENSORS_LIMIT(tmp, 0, 15); | ||
955 | 1035 | ||
956 | mutex_lock(&data->update_lock); | 1036 | mutex_lock(&data->update_lock); |
957 | if (index & 1) { | 1037 | if (index & 1) { |
@@ -1515,7 +1595,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr, | |||
1515 | 1595 | ||
1516 | #define NOT_USED -1 | 1596 | #define NOT_USED -1 |
1517 | 1597 | ||
1518 | /* Don't change the attribute order, _max and _min are accessed by index | 1598 | /* Don't change the attribute order, _max, _min and _beep are accessed by index |
1519 | * somewhere else in the code */ | 1599 | * somewhere else in the code */ |
1520 | #define SENSOR_ATTR_IN(index) { \ | 1600 | #define SENSOR_ATTR_IN(index) { \ |
1521 | SENSOR_ATTR_2(in##index##_input, S_IRUGO, show_in, NULL, \ | 1601 | SENSOR_ATTR_2(in##index##_input, S_IRUGO, show_in, NULL, \ |
@@ -1530,6 +1610,8 @@ store_sf_setup(struct device *dev, struct device_attribute *attr, | |||
1530 | show_alarm_beep, store_beep, BEEP_ENABLE, \ | 1610 | show_alarm_beep, store_beep, BEEP_ENABLE, \ |
1531 | index + ((index > 14) ? 1 : 0)) } | 1611 | index + ((index > 14) ? 1 : 0)) } |
1532 | 1612 | ||
1613 | /* Don't change the attribute order, _beep is accessed by index | ||
1614 | * somewhere else in the code */ | ||
1533 | #define SENSOR_ATTR_FAN(index) { \ | 1615 | #define SENSOR_ATTR_FAN(index) { \ |
1534 | SENSOR_ATTR_2(fan##index##_input, S_IRUGO, show_fan, \ | 1616 | SENSOR_ATTR_2(fan##index##_input, S_IRUGO, show_fan, \ |
1535 | NULL, FAN_INPUT, index - 1), \ | 1617 | NULL, FAN_INPUT, index - 1), \ |
@@ -1553,9 +1635,13 @@ store_sf_setup(struct device *dev, struct device_attribute *attr, | |||
1553 | show_pwm, store_pwm, PWM_FREQ, index - 1), \ | 1635 | show_pwm, store_pwm, PWM_FREQ, index - 1), \ |
1554 | SENSOR_ATTR_2(pwm##index##_enable, S_IWUSR | S_IRUGO, \ | 1636 | SENSOR_ATTR_2(pwm##index##_enable, S_IWUSR | S_IRUGO, \ |
1555 | show_pwm_enable, store_pwm_enable, NOT_USED, index - 1), \ | 1637 | show_pwm_enable, store_pwm_enable, NOT_USED, index - 1), \ |
1638 | SENSOR_ATTR_2(pwm##index##_mode, S_IRUGO, \ | ||
1639 | show_pwm_mode, NULL, NOT_USED, index - 1), \ | ||
1556 | SENSOR_ATTR_2(fan##index##_target, S_IWUSR | S_IRUGO, \ | 1640 | SENSOR_ATTR_2(fan##index##_target, S_IWUSR | S_IRUGO, \ |
1557 | show_fanin, store_fanin, FANIN_TARGET, index - 1) } | 1641 | show_fanin, store_fanin, FANIN_TARGET, index - 1) } |
1558 | 1642 | ||
1643 | /* Don't change the attribute order, _beep is accessed by index | ||
1644 | * somewhere else in the code */ | ||
1559 | #define SENSOR_ATTR_DTS(index) { \ | 1645 | #define SENSOR_ATTR_DTS(index) { \ |
1560 | SENSOR_ATTR_2(temp##index##_type, S_IRUGO , \ | 1646 | SENSOR_ATTR_2(temp##index##_type, S_IRUGO , \ |
1561 | show_dts_mode, NULL, NOT_USED, index - 7), \ | 1647 | show_dts_mode, NULL, NOT_USED, index - 7), \ |
@@ -1574,6 +1660,8 @@ store_sf_setup(struct device *dev, struct device_attribute *attr, | |||
1574 | SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \ | 1660 | SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \ |
1575 | show_alarm_beep, store_beep, BEEP_ENABLE, index + 17) } | 1661 | show_alarm_beep, store_beep, BEEP_ENABLE, index + 17) } |
1576 | 1662 | ||
1663 | /* Don't change the attribute order, _beep is accessed by index | ||
1664 | * somewhere else in the code */ | ||
1577 | #define SENSOR_ATTR_TEMP(index) { \ | 1665 | #define SENSOR_ATTR_TEMP(index) { \ |
1578 | SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \ | 1666 | SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \ |
1579 | show_temp_mode, store_temp_mode, NOT_USED, index - 1), \ | 1667 | show_temp_mode, store_temp_mode, NOT_USED, index - 1), \ |
@@ -1593,8 +1681,6 @@ store_sf_setup(struct device *dev, struct device_attribute *attr, | |||
1593 | SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \ | 1681 | SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \ |
1594 | show_alarm_beep, store_beep, BEEP_ENABLE, \ | 1682 | show_alarm_beep, store_beep, BEEP_ENABLE, \ |
1595 | index + (index > 4 ? 11 : 17)), \ | 1683 | index + (index > 4 ? 11 : 17)), \ |
1596 | SENSOR_ATTR_2(temp##index##_source_sel, S_IWUSR | S_IRUGO, \ | ||
1597 | show_temp_src, store_temp_src, NOT_USED, index - 1), \ | ||
1598 | SENSOR_ATTR_2(temp##index##_pwm_enable, S_IWUSR | S_IRUGO, \ | 1684 | SENSOR_ATTR_2(temp##index##_pwm_enable, S_IWUSR | S_IRUGO, \ |
1599 | show_temp_pwm_enable, store_temp_pwm_enable, \ | 1685 | show_temp_pwm_enable, store_temp_pwm_enable, \ |
1600 | TEMP_PWM_ENABLE, index - 1), \ | 1686 | TEMP_PWM_ENABLE, index - 1), \ |
@@ -1680,7 +1766,7 @@ static const struct sensor_device_attribute_2 w83795_fan[][4] = { | |||
1680 | SENSOR_ATTR_FAN(14), | 1766 | SENSOR_ATTR_FAN(14), |
1681 | }; | 1767 | }; |
1682 | 1768 | ||
1683 | static const struct sensor_device_attribute_2 w83795_temp[][29] = { | 1769 | static const struct sensor_device_attribute_2 w83795_temp[][28] = { |
1684 | SENSOR_ATTR_TEMP(1), | 1770 | SENSOR_ATTR_TEMP(1), |
1685 | SENSOR_ATTR_TEMP(2), | 1771 | SENSOR_ATTR_TEMP(2), |
1686 | SENSOR_ATTR_TEMP(3), | 1772 | SENSOR_ATTR_TEMP(3), |
@@ -1700,7 +1786,7 @@ static const struct sensor_device_attribute_2 w83795_dts[][8] = { | |||
1700 | SENSOR_ATTR_DTS(14), | 1786 | SENSOR_ATTR_DTS(14), |
1701 | }; | 1787 | }; |
1702 | 1788 | ||
1703 | static const struct sensor_device_attribute_2 w83795_pwm[][7] = { | 1789 | static const struct sensor_device_attribute_2 w83795_pwm[][8] = { |
1704 | SENSOR_ATTR_PWM(1), | 1790 | SENSOR_ATTR_PWM(1), |
1705 | SENSOR_ATTR_PWM(2), | 1791 | SENSOR_ATTR_PWM(2), |
1706 | SENSOR_ATTR_PWM(3), | 1792 | SENSOR_ATTR_PWM(3), |
@@ -1711,13 +1797,24 @@ static const struct sensor_device_attribute_2 w83795_pwm[][7] = { | |||
1711 | SENSOR_ATTR_PWM(8), | 1797 | SENSOR_ATTR_PWM(8), |
1712 | }; | 1798 | }; |
1713 | 1799 | ||
1800 | static const struct sensor_device_attribute_2 w83795_tss[6] = { | ||
1801 | SENSOR_ATTR_2(temp1_source_sel, S_IWUSR | S_IRUGO, | ||
1802 | show_temp_src, store_temp_src, NOT_USED, 0), | ||
1803 | SENSOR_ATTR_2(temp2_source_sel, S_IWUSR | S_IRUGO, | ||
1804 | show_temp_src, store_temp_src, NOT_USED, 1), | ||
1805 | SENSOR_ATTR_2(temp3_source_sel, S_IWUSR | S_IRUGO, | ||
1806 | show_temp_src, store_temp_src, NOT_USED, 2), | ||
1807 | SENSOR_ATTR_2(temp4_source_sel, S_IWUSR | S_IRUGO, | ||
1808 | show_temp_src, store_temp_src, NOT_USED, 3), | ||
1809 | SENSOR_ATTR_2(temp5_source_sel, S_IWUSR | S_IRUGO, | ||
1810 | show_temp_src, store_temp_src, NOT_USED, 4), | ||
1811 | SENSOR_ATTR_2(temp6_source_sel, S_IWUSR | S_IRUGO, | ||
1812 | show_temp_src, store_temp_src, NOT_USED, 5), | ||
1813 | }; | ||
1814 | |||
1714 | static const struct sensor_device_attribute_2 sda_single_files[] = { | 1815 | static const struct sensor_device_attribute_2 sda_single_files[] = { |
1715 | SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep, | 1816 | SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep, |
1716 | store_chassis_clear, ALARM_STATUS, 46), | 1817 | store_chassis_clear, ALARM_STATUS, 46), |
1717 | SENSOR_ATTR_2(intrusion0_beep, S_IWUSR | S_IRUGO, show_alarm_beep, | ||
1718 | store_beep, BEEP_ENABLE, 46), | ||
1719 | SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_alarm_beep, | ||
1720 | store_beep, BEEP_ENABLE, 47), | ||
1721 | #ifdef CONFIG_SENSORS_W83795_FANCTRL | 1818 | #ifdef CONFIG_SENSORS_W83795_FANCTRL |
1722 | SENSOR_ATTR_2(speed_cruise_tolerance, S_IWUSR | S_IRUGO, show_fanin, | 1819 | SENSOR_ATTR_2(speed_cruise_tolerance, S_IWUSR | S_IRUGO, show_fanin, |
1723 | store_fanin, FANIN_TOL, NOT_USED), | 1820 | store_fanin, FANIN_TOL, NOT_USED), |
@@ -1730,6 +1827,13 @@ static const struct sensor_device_attribute_2 sda_single_files[] = { | |||
1730 | #endif | 1827 | #endif |
1731 | }; | 1828 | }; |
1732 | 1829 | ||
1830 | static const struct sensor_device_attribute_2 sda_beep_files[] = { | ||
1831 | SENSOR_ATTR_2(intrusion0_beep, S_IWUSR | S_IRUGO, show_alarm_beep, | ||
1832 | store_beep, BEEP_ENABLE, 46), | ||
1833 | SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_alarm_beep, | ||
1834 | store_beep, BEEP_ENABLE, 47), | ||
1835 | }; | ||
1836 | |||
1733 | /* | 1837 | /* |
1734 | * Driver interface | 1838 | * Driver interface |
1735 | */ | 1839 | */ |
@@ -1859,6 +1963,8 @@ static int w83795_handle_files(struct device *dev, int (*fn)(struct device *, | |||
1859 | if (!(data->has_in & (1 << i))) | 1963 | if (!(data->has_in & (1 << i))) |
1860 | continue; | 1964 | continue; |
1861 | for (j = 0; j < ARRAY_SIZE(w83795_in[0]); j++) { | 1965 | for (j = 0; j < ARRAY_SIZE(w83795_in[0]); j++) { |
1966 | if (j == 4 && !data->enable_beep) | ||
1967 | continue; | ||
1862 | err = fn(dev, &w83795_in[i][j].dev_attr); | 1968 | err = fn(dev, &w83795_in[i][j].dev_attr); |
1863 | if (err) | 1969 | if (err) |
1864 | return err; | 1970 | return err; |
@@ -1869,18 +1975,37 @@ static int w83795_handle_files(struct device *dev, int (*fn)(struct device *, | |||
1869 | if (!(data->has_fan & (1 << i))) | 1975 | if (!(data->has_fan & (1 << i))) |
1870 | continue; | 1976 | continue; |
1871 | for (j = 0; j < ARRAY_SIZE(w83795_fan[0]); j++) { | 1977 | for (j = 0; j < ARRAY_SIZE(w83795_fan[0]); j++) { |
1978 | if (j == 3 && !data->enable_beep) | ||
1979 | continue; | ||
1872 | err = fn(dev, &w83795_fan[i][j].dev_attr); | 1980 | err = fn(dev, &w83795_fan[i][j].dev_attr); |
1873 | if (err) | 1981 | if (err) |
1874 | return err; | 1982 | return err; |
1875 | } | 1983 | } |
1876 | } | 1984 | } |
1877 | 1985 | ||
1986 | for (i = 0; i < ARRAY_SIZE(w83795_tss); i++) { | ||
1987 | j = w83795_tss_useful(data, i); | ||
1988 | if (!j) | ||
1989 | continue; | ||
1990 | err = fn(dev, &w83795_tss[i].dev_attr); | ||
1991 | if (err) | ||
1992 | return err; | ||
1993 | } | ||
1994 | |||
1878 | for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) { | 1995 | for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) { |
1879 | err = fn(dev, &sda_single_files[i].dev_attr); | 1996 | err = fn(dev, &sda_single_files[i].dev_attr); |
1880 | if (err) | 1997 | if (err) |
1881 | return err; | 1998 | return err; |
1882 | } | 1999 | } |
1883 | 2000 | ||
2001 | if (data->enable_beep) { | ||
2002 | for (i = 0; i < ARRAY_SIZE(sda_beep_files); i++) { | ||
2003 | err = fn(dev, &sda_beep_files[i].dev_attr); | ||
2004 | if (err) | ||
2005 | return err; | ||
2006 | } | ||
2007 | } | ||
2008 | |||
1884 | #ifdef CONFIG_SENSORS_W83795_FANCTRL | 2009 | #ifdef CONFIG_SENSORS_W83795_FANCTRL |
1885 | for (i = 0; i < data->has_pwm; i++) { | 2010 | for (i = 0; i < data->has_pwm; i++) { |
1886 | for (j = 0; j < ARRAY_SIZE(w83795_pwm[0]); j++) { | 2011 | for (j = 0; j < ARRAY_SIZE(w83795_pwm[0]); j++) { |
@@ -1899,6 +2024,8 @@ static int w83795_handle_files(struct device *dev, int (*fn)(struct device *, | |||
1899 | #else | 2024 | #else |
1900 | for (j = 0; j < 8; j++) { | 2025 | for (j = 0; j < 8; j++) { |
1901 | #endif | 2026 | #endif |
2027 | if (j == 7 && !data->enable_beep) | ||
2028 | continue; | ||
1902 | err = fn(dev, &w83795_temp[i][j].dev_attr); | 2029 | err = fn(dev, &w83795_temp[i][j].dev_attr); |
1903 | if (err) | 2030 | if (err) |
1904 | return err; | 2031 | return err; |
@@ -1910,6 +2037,8 @@ static int w83795_handle_files(struct device *dev, int (*fn)(struct device *, | |||
1910 | if (!(data->has_dts & (1 << i))) | 2037 | if (!(data->has_dts & (1 << i))) |
1911 | continue; | 2038 | continue; |
1912 | for (j = 0; j < ARRAY_SIZE(w83795_dts[0]); j++) { | 2039 | for (j = 0; j < ARRAY_SIZE(w83795_dts[0]); j++) { |
2040 | if (j == 7 && !data->enable_beep) | ||
2041 | continue; | ||
1913 | err = fn(dev, &w83795_dts[i][j].dev_attr); | 2042 | err = fn(dev, &w83795_dts[i][j].dev_attr); |
1914 | if (err) | 2043 | if (err) |
1915 | return err; | 2044 | return err; |
@@ -2049,6 +2178,18 @@ static int w83795_probe(struct i2c_client *client, | |||
2049 | else | 2178 | else |
2050 | data->has_pwm = 2; | 2179 | data->has_pwm = 2; |
2051 | 2180 | ||
2181 | /* Check if BEEP pin is available */ | ||
2182 | if (data->chip_type == w83795g) { | ||
2183 | /* The W83795G has a dedicated BEEP pin */ | ||
2184 | data->enable_beep = 1; | ||
2185 | } else { | ||
2186 | /* The W83795ADG has a shared pin for OVT# and BEEP, so you | ||
2187 | * can't have both */ | ||
2188 | tmp = w83795_read(client, W83795_REG_OVT_CFG); | ||
2189 | if ((tmp & OVT_CFG_SEL) == 0) | ||
2190 | data->enable_beep = 1; | ||
2191 | } | ||
2192 | |||
2052 | err = w83795_handle_files(dev, device_create_file); | 2193 | err = w83795_handle_files(dev, device_create_file); |
2053 | if (err) | 2194 | if (err) |
2054 | goto exit_remove; | 2195 | goto exit_remove; |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index d231f683f576..6b4cc567645b 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -848,6 +848,18 @@ static int i2c_register_adapter(struct i2c_adapter *adap) | |||
848 | goto out_list; | 848 | goto out_list; |
849 | } | 849 | } |
850 | 850 | ||
851 | /* Sanity checks */ | ||
852 | if (unlikely(adap->name[0] == '\0')) { | ||
853 | pr_err("i2c-core: Attempt to register an adapter with " | ||
854 | "no name!\n"); | ||
855 | return -EINVAL; | ||
856 | } | ||
857 | if (unlikely(!adap->algo)) { | ||
858 | pr_err("i2c-core: Attempt to register adapter '%s' with " | ||
859 | "no algo!\n", adap->name); | ||
860 | return -EINVAL; | ||
861 | } | ||
862 | |||
851 | rt_mutex_init(&adap->bus_lock); | 863 | rt_mutex_init(&adap->bus_lock); |
852 | mutex_init(&adap->userspace_clients_lock); | 864 | mutex_init(&adap->userspace_clients_lock); |
853 | INIT_LIST_HEAD(&adap->userspace_clients); | 865 | INIT_LIST_HEAD(&adap->userspace_clients); |
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c index d32a4843fc3a..d7a4833be416 100644 --- a/drivers/i2c/i2c-mux.c +++ b/drivers/i2c/i2c-mux.c | |||
@@ -120,7 +120,6 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent, | |||
120 | snprintf(priv->adap.name, sizeof(priv->adap.name), | 120 | snprintf(priv->adap.name, sizeof(priv->adap.name), |
121 | "i2c-%d-mux (chan_id %d)", i2c_adapter_id(parent), chan_id); | 121 | "i2c-%d-mux (chan_id %d)", i2c_adapter_id(parent), chan_id); |
122 | priv->adap.owner = THIS_MODULE; | 122 | priv->adap.owner = THIS_MODULE; |
123 | priv->adap.id = parent->id; | ||
124 | priv->adap.algo = &priv->algo; | 123 | priv->adap.algo = &priv->algo; |
125 | priv->adap.algo_data = priv; | 124 | priv->adap.algo_data = priv; |
126 | priv->adap.dev.parent = &parent->dev; | 125 | priv->adap.dev.parent = &parent->dev; |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index cfc1d65c4577..1e1e347a7715 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -1123,7 +1123,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) | |||
1123 | } | 1123 | } |
1124 | } | 1124 | } |
1125 | 1125 | ||
1126 | static int srp_queuecommand(struct scsi_cmnd *scmnd, | 1126 | static int srp_queuecommand_lck(struct scsi_cmnd *scmnd, |
1127 | void (*done)(struct scsi_cmnd *)) | 1127 | void (*done)(struct scsi_cmnd *)) |
1128 | { | 1128 | { |
1129 | struct srp_target_port *target = host_to_target(scmnd->device->host); | 1129 | struct srp_target_port *target = host_to_target(scmnd->device->host); |
@@ -1196,6 +1196,8 @@ err: | |||
1196 | return SCSI_MLQUEUE_HOST_BUSY; | 1196 | return SCSI_MLQUEUE_HOST_BUSY; |
1197 | } | 1197 | } |
1198 | 1198 | ||
1199 | static DEF_SCSI_QCMD(srp_queuecommand) | ||
1200 | |||
1199 | static int srp_alloc_iu_bufs(struct srp_target_port *target) | 1201 | static int srp_alloc_iu_bufs(struct srp_target_port *target) |
1200 | { | 1202 | { |
1201 | int i; | 1203 | int i; |
diff --git a/drivers/input/input.c b/drivers/input/input.c index d092ef9291da..7f26ca6ecf75 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
@@ -74,6 +74,7 @@ static int input_defuzz_abs_event(int value, int old_val, int fuzz) | |||
74 | * dev->event_lock held and interrupts disabled. | 74 | * dev->event_lock held and interrupts disabled. |
75 | */ | 75 | */ |
76 | static void input_pass_event(struct input_dev *dev, | 76 | static void input_pass_event(struct input_dev *dev, |
77 | struct input_handler *src_handler, | ||
77 | unsigned int type, unsigned int code, int value) | 78 | unsigned int type, unsigned int code, int value) |
78 | { | 79 | { |
79 | struct input_handler *handler; | 80 | struct input_handler *handler; |
@@ -92,6 +93,15 @@ static void input_pass_event(struct input_dev *dev, | |||
92 | continue; | 93 | continue; |
93 | 94 | ||
94 | handler = handle->handler; | 95 | handler = handle->handler; |
96 | |||
97 | /* | ||
98 | * If this is the handler that injected this | ||
99 | * particular event we want to skip it to avoid | ||
100 | * filters firing again and again. | ||
101 | */ | ||
102 | if (handler == src_handler) | ||
103 | continue; | ||
104 | |||
95 | if (!handler->filter) { | 105 | if (!handler->filter) { |
96 | if (filtered) | 106 | if (filtered) |
97 | break; | 107 | break; |
@@ -121,7 +131,7 @@ static void input_repeat_key(unsigned long data) | |||
121 | if (test_bit(dev->repeat_key, dev->key) && | 131 | if (test_bit(dev->repeat_key, dev->key) && |
122 | is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { | 132 | is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { |
123 | 133 | ||
124 | input_pass_event(dev, EV_KEY, dev->repeat_key, 2); | 134 | input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2); |
125 | 135 | ||
126 | if (dev->sync) { | 136 | if (dev->sync) { |
127 | /* | 137 | /* |
@@ -130,7 +140,7 @@ static void input_repeat_key(unsigned long data) | |||
130 | * Otherwise assume that the driver will send | 140 | * Otherwise assume that the driver will send |
131 | * SYN_REPORT once it's done. | 141 | * SYN_REPORT once it's done. |
132 | */ | 142 | */ |
133 | input_pass_event(dev, EV_SYN, SYN_REPORT, 1); | 143 | input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); |
134 | } | 144 | } |
135 | 145 | ||
136 | if (dev->rep[REP_PERIOD]) | 146 | if (dev->rep[REP_PERIOD]) |
@@ -163,6 +173,7 @@ static void input_stop_autorepeat(struct input_dev *dev) | |||
163 | #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) | 173 | #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) |
164 | 174 | ||
165 | static int input_handle_abs_event(struct input_dev *dev, | 175 | static int input_handle_abs_event(struct input_dev *dev, |
176 | struct input_handler *src_handler, | ||
166 | unsigned int code, int *pval) | 177 | unsigned int code, int *pval) |
167 | { | 178 | { |
168 | bool is_mt_event; | 179 | bool is_mt_event; |
@@ -206,13 +217,15 @@ static int input_handle_abs_event(struct input_dev *dev, | |||
206 | /* Flush pending "slot" event */ | 217 | /* Flush pending "slot" event */ |
207 | if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { | 218 | if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { |
208 | input_abs_set_val(dev, ABS_MT_SLOT, dev->slot); | 219 | input_abs_set_val(dev, ABS_MT_SLOT, dev->slot); |
209 | input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot); | 220 | input_pass_event(dev, src_handler, |
221 | EV_ABS, ABS_MT_SLOT, dev->slot); | ||
210 | } | 222 | } |
211 | 223 | ||
212 | return INPUT_PASS_TO_HANDLERS; | 224 | return INPUT_PASS_TO_HANDLERS; |
213 | } | 225 | } |
214 | 226 | ||
215 | static void input_handle_event(struct input_dev *dev, | 227 | static void input_handle_event(struct input_dev *dev, |
228 | struct input_handler *src_handler, | ||
216 | unsigned int type, unsigned int code, int value) | 229 | unsigned int type, unsigned int code, int value) |
217 | { | 230 | { |
218 | int disposition = INPUT_IGNORE_EVENT; | 231 | int disposition = INPUT_IGNORE_EVENT; |
@@ -265,7 +278,8 @@ static void input_handle_event(struct input_dev *dev, | |||
265 | 278 | ||
266 | case EV_ABS: | 279 | case EV_ABS: |
267 | if (is_event_supported(code, dev->absbit, ABS_MAX)) | 280 | if (is_event_supported(code, dev->absbit, ABS_MAX)) |
268 | disposition = input_handle_abs_event(dev, code, &value); | 281 | disposition = input_handle_abs_event(dev, src_handler, |
282 | code, &value); | ||
269 | 283 | ||
270 | break; | 284 | break; |
271 | 285 | ||
@@ -323,7 +337,7 @@ static void input_handle_event(struct input_dev *dev, | |||
323 | dev->event(dev, type, code, value); | 337 | dev->event(dev, type, code, value); |
324 | 338 | ||
325 | if (disposition & INPUT_PASS_TO_HANDLERS) | 339 | if (disposition & INPUT_PASS_TO_HANDLERS) |
326 | input_pass_event(dev, type, code, value); | 340 | input_pass_event(dev, src_handler, type, code, value); |
327 | } | 341 | } |
328 | 342 | ||
329 | /** | 343 | /** |
@@ -352,7 +366,7 @@ void input_event(struct input_dev *dev, | |||
352 | 366 | ||
353 | spin_lock_irqsave(&dev->event_lock, flags); | 367 | spin_lock_irqsave(&dev->event_lock, flags); |
354 | add_input_randomness(type, code, value); | 368 | add_input_randomness(type, code, value); |
355 | input_handle_event(dev, type, code, value); | 369 | input_handle_event(dev, NULL, type, code, value); |
356 | spin_unlock_irqrestore(&dev->event_lock, flags); | 370 | spin_unlock_irqrestore(&dev->event_lock, flags); |
357 | } | 371 | } |
358 | } | 372 | } |
@@ -382,7 +396,8 @@ void input_inject_event(struct input_handle *handle, | |||
382 | rcu_read_lock(); | 396 | rcu_read_lock(); |
383 | grab = rcu_dereference(dev->grab); | 397 | grab = rcu_dereference(dev->grab); |
384 | if (!grab || grab == handle) | 398 | if (!grab || grab == handle) |
385 | input_handle_event(dev, type, code, value); | 399 | input_handle_event(dev, handle->handler, |
400 | type, code, value); | ||
386 | rcu_read_unlock(); | 401 | rcu_read_unlock(); |
387 | 402 | ||
388 | spin_unlock_irqrestore(&dev->event_lock, flags); | 403 | spin_unlock_irqrestore(&dev->event_lock, flags); |
@@ -595,10 +610,10 @@ static void input_dev_release_keys(struct input_dev *dev) | |||
595 | for (code = 0; code <= KEY_MAX; code++) { | 610 | for (code = 0; code <= KEY_MAX; code++) { |
596 | if (is_event_supported(code, dev->keybit, KEY_MAX) && | 611 | if (is_event_supported(code, dev->keybit, KEY_MAX) && |
597 | __test_and_clear_bit(code, dev->key)) { | 612 | __test_and_clear_bit(code, dev->key)) { |
598 | input_pass_event(dev, EV_KEY, code, 0); | 613 | input_pass_event(dev, NULL, EV_KEY, code, 0); |
599 | } | 614 | } |
600 | } | 615 | } |
601 | input_pass_event(dev, EV_SYN, SYN_REPORT, 1); | 616 | input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); |
602 | } | 617 | } |
603 | } | 618 | } |
604 | 619 | ||
@@ -873,9 +888,9 @@ int input_set_keycode(struct input_dev *dev, | |||
873 | !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && | 888 | !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && |
874 | __test_and_clear_bit(old_keycode, dev->key)) { | 889 | __test_and_clear_bit(old_keycode, dev->key)) { |
875 | 890 | ||
876 | input_pass_event(dev, EV_KEY, old_keycode, 0); | 891 | input_pass_event(dev, NULL, EV_KEY, old_keycode, 0); |
877 | if (dev->sync) | 892 | if (dev->sync) |
878 | input_pass_event(dev, EV_SYN, SYN_REPORT, 1); | 893 | input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); |
879 | } | 894 | } |
880 | 895 | ||
881 | out: | 896 | out: |
@@ -1565,8 +1580,7 @@ static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env) | |||
1565 | } \ | 1580 | } \ |
1566 | } while (0) | 1581 | } while (0) |
1567 | 1582 | ||
1568 | #ifdef CONFIG_PM | 1583 | static void input_dev_toggle(struct input_dev *dev, bool activate) |
1569 | static void input_dev_reset(struct input_dev *dev, bool activate) | ||
1570 | { | 1584 | { |
1571 | if (!dev->event) | 1585 | if (!dev->event) |
1572 | return; | 1586 | return; |
@@ -1580,12 +1594,44 @@ static void input_dev_reset(struct input_dev *dev, bool activate) | |||
1580 | } | 1594 | } |
1581 | } | 1595 | } |
1582 | 1596 | ||
1597 | /** | ||
1598 | * input_reset_device() - reset/restore the state of input device | ||
1599 | * @dev: input device whose state needs to be reset | ||
1600 | * | ||
1601 | * This function tries to reset the state of an opened input device and | ||
1602 | * bring internal state and state if the hardware in sync with each other. | ||
1603 | * We mark all keys as released, restore LED state, repeat rate, etc. | ||
1604 | */ | ||
1605 | void input_reset_device(struct input_dev *dev) | ||
1606 | { | ||
1607 | mutex_lock(&dev->mutex); | ||
1608 | |||
1609 | if (dev->users) { | ||
1610 | input_dev_toggle(dev, true); | ||
1611 | |||
1612 | /* | ||
1613 | * Keys that have been pressed at suspend time are unlikely | ||
1614 | * to be still pressed when we resume. | ||
1615 | */ | ||
1616 | spin_lock_irq(&dev->event_lock); | ||
1617 | input_dev_release_keys(dev); | ||
1618 | spin_unlock_irq(&dev->event_lock); | ||
1619 | } | ||
1620 | |||
1621 | mutex_unlock(&dev->mutex); | ||
1622 | } | ||
1623 | EXPORT_SYMBOL(input_reset_device); | ||
1624 | |||
1625 | #ifdef CONFIG_PM | ||
1583 | static int input_dev_suspend(struct device *dev) | 1626 | static int input_dev_suspend(struct device *dev) |
1584 | { | 1627 | { |
1585 | struct input_dev *input_dev = to_input_dev(dev); | 1628 | struct input_dev *input_dev = to_input_dev(dev); |
1586 | 1629 | ||
1587 | mutex_lock(&input_dev->mutex); | 1630 | mutex_lock(&input_dev->mutex); |
1588 | input_dev_reset(input_dev, false); | 1631 | |
1632 | if (input_dev->users) | ||
1633 | input_dev_toggle(input_dev, false); | ||
1634 | |||
1589 | mutex_unlock(&input_dev->mutex); | 1635 | mutex_unlock(&input_dev->mutex); |
1590 | 1636 | ||
1591 | return 0; | 1637 | return 0; |
@@ -1595,18 +1641,7 @@ static int input_dev_resume(struct device *dev) | |||
1595 | { | 1641 | { |
1596 | struct input_dev *input_dev = to_input_dev(dev); | 1642 | struct input_dev *input_dev = to_input_dev(dev); |
1597 | 1643 | ||
1598 | mutex_lock(&input_dev->mutex); | 1644 | input_reset_device(input_dev); |
1599 | input_dev_reset(input_dev, true); | ||
1600 | |||
1601 | /* | ||
1602 | * Keys that have been pressed at suspend time are unlikely | ||
1603 | * to be still pressed when we resume. | ||
1604 | */ | ||
1605 | spin_lock_irq(&input_dev->event_lock); | ||
1606 | input_dev_release_keys(input_dev); | ||
1607 | spin_unlock_irq(&input_dev->event_lock); | ||
1608 | |||
1609 | mutex_unlock(&input_dev->mutex); | ||
1610 | 1645 | ||
1611 | return 0; | 1646 | return 0; |
1612 | } | 1647 | } |
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c index b92d1cd5cba1..af45d275f686 100644 --- a/drivers/input/keyboard/adp5588-keys.c +++ b/drivers/input/keyboard/adp5588-keys.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * I2C QWERTY Keypad and IO Expander | 4 | * I2C QWERTY Keypad and IO Expander |
5 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ | 5 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ |
6 | * | 6 | * |
7 | * Copyright (C) 2008-2009 Analog Devices Inc. | 7 | * Copyright (C) 2008-2010 Analog Devices Inc. |
8 | * Licensed under the GPL-2 or later. | 8 | * Licensed under the GPL-2 or later. |
9 | */ | 9 | */ |
10 | 10 | ||
@@ -24,29 +24,6 @@ | |||
24 | 24 | ||
25 | #include <linux/i2c/adp5588.h> | 25 | #include <linux/i2c/adp5588.h> |
26 | 26 | ||
27 | /* Configuration Register1 */ | ||
28 | #define AUTO_INC (1 << 7) | ||
29 | #define GPIEM_CFG (1 << 6) | ||
30 | #define OVR_FLOW_M (1 << 5) | ||
31 | #define INT_CFG (1 << 4) | ||
32 | #define OVR_FLOW_IEN (1 << 3) | ||
33 | #define K_LCK_IM (1 << 2) | ||
34 | #define GPI_IEN (1 << 1) | ||
35 | #define KE_IEN (1 << 0) | ||
36 | |||
37 | /* Interrupt Status Register */ | ||
38 | #define CMP2_INT (1 << 5) | ||
39 | #define CMP1_INT (1 << 4) | ||
40 | #define OVR_FLOW_INT (1 << 3) | ||
41 | #define K_LCK_INT (1 << 2) | ||
42 | #define GPI_INT (1 << 1) | ||
43 | #define KE_INT (1 << 0) | ||
44 | |||
45 | /* Key Lock and Event Counter Register */ | ||
46 | #define K_LCK_EN (1 << 6) | ||
47 | #define LCK21 0x30 | ||
48 | #define KEC 0xF | ||
49 | |||
50 | /* Key Event Register xy */ | 27 | /* Key Event Register xy */ |
51 | #define KEY_EV_PRESSED (1 << 7) | 28 | #define KEY_EV_PRESSED (1 << 7) |
52 | #define KEY_EV_MASK (0x7F) | 29 | #define KEY_EV_MASK (0x7F) |
@@ -55,10 +32,6 @@ | |||
55 | 32 | ||
56 | #define KEYP_MAX_EVENT 10 | 33 | #define KEYP_MAX_EVENT 10 |
57 | 34 | ||
58 | #define MAXGPIO 18 | ||
59 | #define ADP_BANK(offs) ((offs) >> 3) | ||
60 | #define ADP_BIT(offs) (1u << ((offs) & 0x7)) | ||
61 | |||
62 | /* | 35 | /* |
63 | * Early pre 4.0 Silicon required to delay readout by at least 25ms, | 36 | * Early pre 4.0 Silicon required to delay readout by at least 25ms, |
64 | * since the Event Counter Register updated 25ms after the interrupt | 37 | * since the Event Counter Register updated 25ms after the interrupt |
@@ -75,7 +48,7 @@ struct adp5588_kpad { | |||
75 | const struct adp5588_gpi_map *gpimap; | 48 | const struct adp5588_gpi_map *gpimap; |
76 | unsigned short gpimapsize; | 49 | unsigned short gpimapsize; |
77 | #ifdef CONFIG_GPIOLIB | 50 | #ifdef CONFIG_GPIOLIB |
78 | unsigned char gpiomap[MAXGPIO]; | 51 | unsigned char gpiomap[ADP5588_MAXGPIO]; |
79 | bool export_gpio; | 52 | bool export_gpio; |
80 | struct gpio_chip gc; | 53 | struct gpio_chip gc; |
81 | struct mutex gpio_lock; /* Protect cached dir, dat_out */ | 54 | struct mutex gpio_lock; /* Protect cached dir, dat_out */ |
@@ -103,8 +76,8 @@ static int adp5588_write(struct i2c_client *client, u8 reg, u8 val) | |||
103 | static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off) | 76 | static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off) |
104 | { | 77 | { |
105 | struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); | 78 | struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); |
106 | unsigned int bank = ADP_BANK(kpad->gpiomap[off]); | 79 | unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]); |
107 | unsigned int bit = ADP_BIT(kpad->gpiomap[off]); | 80 | unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]); |
108 | 81 | ||
109 | return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit); | 82 | return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit); |
110 | } | 83 | } |
@@ -113,8 +86,8 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip, | |||
113 | unsigned off, int val) | 86 | unsigned off, int val) |
114 | { | 87 | { |
115 | struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); | 88 | struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); |
116 | unsigned int bank = ADP_BANK(kpad->gpiomap[off]); | 89 | unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]); |
117 | unsigned int bit = ADP_BIT(kpad->gpiomap[off]); | 90 | unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]); |
118 | 91 | ||
119 | mutex_lock(&kpad->gpio_lock); | 92 | mutex_lock(&kpad->gpio_lock); |
120 | 93 | ||
@@ -132,8 +105,8 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip, | |||
132 | static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off) | 105 | static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off) |
133 | { | 106 | { |
134 | struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); | 107 | struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); |
135 | unsigned int bank = ADP_BANK(kpad->gpiomap[off]); | 108 | unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]); |
136 | unsigned int bit = ADP_BIT(kpad->gpiomap[off]); | 109 | unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]); |
137 | int ret; | 110 | int ret; |
138 | 111 | ||
139 | mutex_lock(&kpad->gpio_lock); | 112 | mutex_lock(&kpad->gpio_lock); |
@@ -150,8 +123,8 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip, | |||
150 | unsigned off, int val) | 123 | unsigned off, int val) |
151 | { | 124 | { |
152 | struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); | 125 | struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); |
153 | unsigned int bank = ADP_BANK(kpad->gpiomap[off]); | 126 | unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]); |
154 | unsigned int bit = ADP_BIT(kpad->gpiomap[off]); | 127 | unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]); |
155 | int ret; | 128 | int ret; |
156 | 129 | ||
157 | mutex_lock(&kpad->gpio_lock); | 130 | mutex_lock(&kpad->gpio_lock); |
@@ -176,7 +149,7 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip, | |||
176 | static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad, | 149 | static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad, |
177 | const struct adp5588_kpad_platform_data *pdata) | 150 | const struct adp5588_kpad_platform_data *pdata) |
178 | { | 151 | { |
179 | bool pin_used[MAXGPIO]; | 152 | bool pin_used[ADP5588_MAXGPIO]; |
180 | int n_unused = 0; | 153 | int n_unused = 0; |
181 | int i; | 154 | int i; |
182 | 155 | ||
@@ -191,7 +164,7 @@ static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad, | |||
191 | for (i = 0; i < kpad->gpimapsize; i++) | 164 | for (i = 0; i < kpad->gpimapsize; i++) |
192 | pin_used[kpad->gpimap[i].pin - GPI_PIN_BASE] = true; | 165 | pin_used[kpad->gpimap[i].pin - GPI_PIN_BASE] = true; |
193 | 166 | ||
194 | for (i = 0; i < MAXGPIO; i++) | 167 | for (i = 0; i < ADP5588_MAXGPIO; i++) |
195 | if (!pin_used[i]) | 168 | if (!pin_used[i]) |
196 | kpad->gpiomap[n_unused++] = i; | 169 | kpad->gpiomap[n_unused++] = i; |
197 | 170 | ||
@@ -234,7 +207,7 @@ static int __devinit adp5588_gpio_add(struct adp5588_kpad *kpad) | |||
234 | return error; | 207 | return error; |
235 | } | 208 | } |
236 | 209 | ||
237 | for (i = 0; i <= ADP_BANK(MAXGPIO); i++) { | 210 | for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) { |
238 | kpad->dat_out[i] = adp5588_read(kpad->client, | 211 | kpad->dat_out[i] = adp5588_read(kpad->client, |
239 | GPIO_DAT_OUT1 + i); | 212 | GPIO_DAT_OUT1 + i); |
240 | kpad->dir[i] = adp5588_read(kpad->client, GPIO_DIR1 + i); | 213 | kpad->dir[i] = adp5588_read(kpad->client, GPIO_DIR1 + i); |
@@ -318,11 +291,11 @@ static void adp5588_work(struct work_struct *work) | |||
318 | 291 | ||
319 | status = adp5588_read(client, INT_STAT); | 292 | status = adp5588_read(client, INT_STAT); |
320 | 293 | ||
321 | if (status & OVR_FLOW_INT) /* Unlikely and should never happen */ | 294 | if (status & ADP5588_OVR_FLOW_INT) /* Unlikely and should never happen */ |
322 | dev_err(&client->dev, "Event Overflow Error\n"); | 295 | dev_err(&client->dev, "Event Overflow Error\n"); |
323 | 296 | ||
324 | if (status & KE_INT) { | 297 | if (status & ADP5588_KE_INT) { |
325 | ev_cnt = adp5588_read(client, KEY_LCK_EC_STAT) & KEC; | 298 | ev_cnt = adp5588_read(client, KEY_LCK_EC_STAT) & ADP5588_KEC; |
326 | if (ev_cnt) { | 299 | if (ev_cnt) { |
327 | adp5588_report_events(kpad, ev_cnt); | 300 | adp5588_report_events(kpad, ev_cnt); |
328 | input_sync(kpad->input); | 301 | input_sync(kpad->input); |
@@ -360,7 +333,7 @@ static int __devinit adp5588_setup(struct i2c_client *client) | |||
360 | if (pdata->en_keylock) { | 333 | if (pdata->en_keylock) { |
361 | ret |= adp5588_write(client, UNLOCK1, pdata->unlock_key1); | 334 | ret |= adp5588_write(client, UNLOCK1, pdata->unlock_key1); |
362 | ret |= adp5588_write(client, UNLOCK2, pdata->unlock_key2); | 335 | ret |= adp5588_write(client, UNLOCK2, pdata->unlock_key2); |
363 | ret |= adp5588_write(client, KEY_LCK_EC_STAT, K_LCK_EN); | 336 | ret |= adp5588_write(client, KEY_LCK_EC_STAT, ADP5588_K_LCK_EN); |
364 | } | 337 | } |
365 | 338 | ||
366 | for (i = 0; i < KEYP_MAX_EVENT; i++) | 339 | for (i = 0; i < KEYP_MAX_EVENT; i++) |
@@ -384,7 +357,7 @@ static int __devinit adp5588_setup(struct i2c_client *client) | |||
384 | } | 357 | } |
385 | 358 | ||
386 | if (gpio_data) { | 359 | if (gpio_data) { |
387 | for (i = 0; i <= ADP_BANK(MAXGPIO); i++) { | 360 | for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) { |
388 | int pull_mask = gpio_data->pullup_dis_mask; | 361 | int pull_mask = gpio_data->pullup_dis_mask; |
389 | 362 | ||
390 | ret |= adp5588_write(client, GPIO_PULL1 + i, | 363 | ret |= adp5588_write(client, GPIO_PULL1 + i, |
@@ -392,11 +365,14 @@ static int __devinit adp5588_setup(struct i2c_client *client) | |||
392 | } | 365 | } |
393 | } | 366 | } |
394 | 367 | ||
395 | ret |= adp5588_write(client, INT_STAT, CMP2_INT | CMP1_INT | | 368 | ret |= adp5588_write(client, INT_STAT, |
396 | OVR_FLOW_INT | K_LCK_INT | | 369 | ADP5588_CMP2_INT | ADP5588_CMP1_INT | |
397 | GPI_INT | KE_INT); /* Status is W1C */ | 370 | ADP5588_OVR_FLOW_INT | ADP5588_K_LCK_INT | |
371 | ADP5588_GPI_INT | ADP5588_KE_INT); /* Status is W1C */ | ||
398 | 372 | ||
399 | ret |= adp5588_write(client, CFG, INT_CFG | OVR_FLOW_IEN | KE_IEN); | 373 | ret |= adp5588_write(client, CFG, ADP5588_INT_CFG | |
374 | ADP5588_OVR_FLOW_IEN | | ||
375 | ADP5588_KE_IEN); | ||
400 | 376 | ||
401 | if (ret < 0) { | 377 | if (ret < 0) { |
402 | dev_err(&client->dev, "Write Error\n"); | 378 | dev_err(&client->dev, "Write Error\n"); |
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index d358ef8623f4..11478eb2c27d 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c | |||
@@ -63,6 +63,10 @@ static bool atkbd_extra; | |||
63 | module_param_named(extra, atkbd_extra, bool, 0); | 63 | module_param_named(extra, atkbd_extra, bool, 0); |
64 | MODULE_PARM_DESC(extra, "Enable extra LEDs and keys on IBM RapidAcces, EzKey and similar keyboards"); | 64 | MODULE_PARM_DESC(extra, "Enable extra LEDs and keys on IBM RapidAcces, EzKey and similar keyboards"); |
65 | 65 | ||
66 | static bool atkbd_terminal; | ||
67 | module_param_named(terminal, atkbd_terminal, bool, 0); | ||
68 | MODULE_PARM_DESC(terminal, "Enable break codes on an IBM Terminal keyboard connected via AT/PS2"); | ||
69 | |||
66 | /* | 70 | /* |
67 | * Scancode to keycode tables. These are just the default setting, and | 71 | * Scancode to keycode tables. These are just the default setting, and |
68 | * are loadable via a userland utility. | 72 | * are loadable via a userland utility. |
@@ -136,7 +140,8 @@ static const unsigned short atkbd_unxlate_table[128] = { | |||
136 | #define ATKBD_CMD_ENABLE 0x00f4 | 140 | #define ATKBD_CMD_ENABLE 0x00f4 |
137 | #define ATKBD_CMD_RESET_DIS 0x00f5 /* Reset to defaults and disable */ | 141 | #define ATKBD_CMD_RESET_DIS 0x00f5 /* Reset to defaults and disable */ |
138 | #define ATKBD_CMD_RESET_DEF 0x00f6 /* Reset to defaults */ | 142 | #define ATKBD_CMD_RESET_DEF 0x00f6 /* Reset to defaults */ |
139 | #define ATKBD_CMD_SETALL_MBR 0x00fa | 143 | #define ATKBD_CMD_SETALL_MB 0x00f8 /* Set all keys to give break codes */ |
144 | #define ATKBD_CMD_SETALL_MBR 0x00fa /* ... and repeat */ | ||
140 | #define ATKBD_CMD_RESET_BAT 0x02ff | 145 | #define ATKBD_CMD_RESET_BAT 0x02ff |
141 | #define ATKBD_CMD_RESEND 0x00fe | 146 | #define ATKBD_CMD_RESEND 0x00fe |
142 | #define ATKBD_CMD_EX_ENABLE 0x10ea | 147 | #define ATKBD_CMD_EX_ENABLE 0x10ea |
@@ -764,6 +769,11 @@ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra | |||
764 | } | 769 | } |
765 | } | 770 | } |
766 | 771 | ||
772 | if (atkbd_terminal) { | ||
773 | ps2_command(ps2dev, param, ATKBD_CMD_SETALL_MB); | ||
774 | return 3; | ||
775 | } | ||
776 | |||
767 | if (target_set != 3) | 777 | if (target_set != 3) |
768 | return 2; | 778 | return 2; |
769 | 779 | ||
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c index 4b42ffc0532a..d1583aea1721 100644 --- a/drivers/input/misc/pcf8574_keypad.c +++ b/drivers/input/misc/pcf8574_keypad.c | |||
@@ -127,14 +127,6 @@ static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2 | |||
127 | idev->id.product = 0x0001; | 127 | idev->id.product = 0x0001; |
128 | idev->id.version = 0x0100; | 128 | idev->id.version = 0x0100; |
129 | 129 | ||
130 | input_set_drvdata(idev, lp); | ||
131 | |||
132 | ret = input_register_device(idev); | ||
133 | if (ret) { | ||
134 | dev_err(&client->dev, "input_register_device() failed\n"); | ||
135 | goto fail_register; | ||
136 | } | ||
137 | |||
138 | lp->laststate = read_state(lp); | 130 | lp->laststate = read_state(lp); |
139 | 131 | ||
140 | ret = request_threaded_irq(client->irq, NULL, pcf8574_kp_irq_handler, | 132 | ret = request_threaded_irq(client->irq, NULL, pcf8574_kp_irq_handler, |
@@ -142,16 +134,21 @@ static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2 | |||
142 | DRV_NAME, lp); | 134 | DRV_NAME, lp); |
143 | if (ret) { | 135 | if (ret) { |
144 | dev_err(&client->dev, "IRQ %d is not free\n", client->irq); | 136 | dev_err(&client->dev, "IRQ %d is not free\n", client->irq); |
145 | goto fail_irq; | 137 | goto fail_free_device; |
138 | } | ||
139 | |||
140 | ret = input_register_device(idev); | ||
141 | if (ret) { | ||
142 | dev_err(&client->dev, "input_register_device() failed\n"); | ||
143 | goto fail_free_irq; | ||
146 | } | 144 | } |
147 | 145 | ||
148 | i2c_set_clientdata(client, lp); | 146 | i2c_set_clientdata(client, lp); |
149 | return 0; | 147 | return 0; |
150 | 148 | ||
151 | fail_irq: | 149 | fail_free_irq: |
152 | input_unregister_device(idev); | 150 | free_irq(client->irq, lp); |
153 | fail_register: | 151 | fail_free_device: |
154 | input_set_drvdata(idev, NULL); | ||
155 | input_free_device(idev); | 152 | input_free_device(idev); |
156 | fail_allocate: | 153 | fail_allocate: |
157 | kfree(lp); | 154 | kfree(lp); |
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index ed7ad7416b24..a5475b577086 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h | |||
@@ -351,6 +351,17 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { | |||
351 | }, | 351 | }, |
352 | }, | 352 | }, |
353 | { | 353 | { |
354 | /* | ||
355 | * Most (all?) VAIOs do not have external PS/2 ports nor | ||
356 | * they implement active multiplexing properly, and | ||
357 | * MUX discovery usually messes up keyboard/touchpad. | ||
358 | */ | ||
359 | .matches = { | ||
360 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
361 | DMI_MATCH(DMI_BOARD_NAME, "VAIO"), | ||
362 | }, | ||
363 | }, | ||
364 | { | ||
354 | /* Amoi M636/A737 */ | 365 | /* Amoi M636/A737 */ |
355 | .matches = { | 366 | .matches = { |
356 | DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."), | 367 | DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."), |
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c index aea9a9399a36..d94f7e9aa997 100644 --- a/drivers/input/tablet/acecad.c +++ b/drivers/input/tablet/acecad.c | |||
@@ -229,12 +229,13 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_ | |||
229 | 229 | ||
230 | err = input_register_device(acecad->input); | 230 | err = input_register_device(acecad->input); |
231 | if (err) | 231 | if (err) |
232 | goto fail2; | 232 | goto fail3; |
233 | 233 | ||
234 | usb_set_intfdata(intf, acecad); | 234 | usb_set_intfdata(intf, acecad); |
235 | 235 | ||
236 | return 0; | 236 | return 0; |
237 | 237 | ||
238 | fail3: usb_free_urb(acecad->irq); | ||
238 | fail2: usb_free_coherent(dev, 8, acecad->data, acecad->data_dma); | 239 | fail2: usb_free_coherent(dev, 8, acecad->data, acecad->data_dma); |
239 | fail1: input_free_device(input_dev); | 240 | fail1: input_free_device(input_dev); |
240 | kfree(acecad); | 241 | kfree(acecad); |
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index cc2a88d5192f..77b8fd20cd90 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig | |||
@@ -10,7 +10,7 @@ menuconfig NEW_LEDS | |||
10 | if NEW_LEDS | 10 | if NEW_LEDS |
11 | 11 | ||
12 | config LEDS_CLASS | 12 | config LEDS_CLASS |
13 | tristate "LED Class Support" | 13 | bool "LED Class Support" |
14 | help | 14 | help |
15 | This option enables the led sysfs class in /sys/class/leds. You'll | 15 | This option enables the led sysfs class in /sys/class/leds. You'll |
16 | need this to do anything useful with LEDs. If unsure, say N. | 16 | need this to do anything useful with LEDs. If unsure, say N. |
@@ -176,6 +176,24 @@ config LEDS_LP3944 | |||
176 | To compile this driver as a module, choose M here: the | 176 | To compile this driver as a module, choose M here: the |
177 | module will be called leds-lp3944. | 177 | module will be called leds-lp3944. |
178 | 178 | ||
179 | config LEDS_LP5521 | ||
180 | tristate "LED Support for N.S. LP5521 LED driver chip" | ||
181 | depends on LEDS_CLASS && I2C | ||
182 | help | ||
183 | If you say yes here you get support for the National Semiconductor | ||
184 | LP5521 LED driver. It is 3 channel chip with programmable engines. | ||
185 | Driver provides direct control via LED class and interface for | ||
186 | programming the engines. | ||
187 | |||
188 | config LEDS_LP5523 | ||
189 | tristate "LED Support for N.S. LP5523 LED driver chip" | ||
190 | depends on LEDS_CLASS && I2C | ||
191 | help | ||
192 | If you say yes here you get support for the National Semiconductor | ||
193 | LP5523 LED driver. It is 9 channel chip with programmable engines. | ||
194 | Driver provides direct control via LED class and interface for | ||
195 | programming the engines. | ||
196 | |||
179 | config LEDS_CLEVO_MAIL | 197 | config LEDS_CLEVO_MAIL |
180 | tristate "Mail LED on Clevo notebook" | 198 | tristate "Mail LED on Clevo notebook" |
181 | depends on X86 && SERIO_I8042 && DMI | 199 | depends on X86 && SERIO_I8042 && DMI |
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 9c96db40ef6d..aae6989ff6b6 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile | |||
@@ -23,6 +23,8 @@ obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o | |||
23 | obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o | 23 | obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o |
24 | obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o | 24 | obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o |
25 | obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o | 25 | obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o |
26 | obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o | ||
27 | obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o | ||
26 | obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o | 28 | obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o |
27 | obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o | 29 | obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o |
28 | obj-$(CONFIG_LEDS_FSG) += leds-fsg.o | 30 | obj-$(CONFIG_LEDS_FSG) += leds-fsg.o |
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 260660076507..211e21f34bd5 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c | |||
@@ -81,6 +81,79 @@ static struct device_attribute led_class_attrs[] = { | |||
81 | __ATTR_NULL, | 81 | __ATTR_NULL, |
82 | }; | 82 | }; |
83 | 83 | ||
84 | static void led_timer_function(unsigned long data) | ||
85 | { | ||
86 | struct led_classdev *led_cdev = (void *)data; | ||
87 | unsigned long brightness; | ||
88 | unsigned long delay; | ||
89 | |||
90 | if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) { | ||
91 | led_set_brightness(led_cdev, LED_OFF); | ||
92 | return; | ||
93 | } | ||
94 | |||
95 | brightness = led_get_brightness(led_cdev); | ||
96 | if (!brightness) { | ||
97 | /* Time to switch the LED on. */ | ||
98 | brightness = led_cdev->blink_brightness; | ||
99 | delay = led_cdev->blink_delay_on; | ||
100 | } else { | ||
101 | /* Store the current brightness value to be able | ||
102 | * to restore it when the delay_off period is over. | ||
103 | */ | ||
104 | led_cdev->blink_brightness = brightness; | ||
105 | brightness = LED_OFF; | ||
106 | delay = led_cdev->blink_delay_off; | ||
107 | } | ||
108 | |||
109 | led_set_brightness(led_cdev, brightness); | ||
110 | |||
111 | mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay)); | ||
112 | } | ||
113 | |||
114 | static void led_stop_software_blink(struct led_classdev *led_cdev) | ||
115 | { | ||
116 | /* deactivate previous settings */ | ||
117 | del_timer_sync(&led_cdev->blink_timer); | ||
118 | led_cdev->blink_delay_on = 0; | ||
119 | led_cdev->blink_delay_off = 0; | ||
120 | } | ||
121 | |||
122 | static void led_set_software_blink(struct led_classdev *led_cdev, | ||
123 | unsigned long delay_on, | ||
124 | unsigned long delay_off) | ||
125 | { | ||
126 | int current_brightness; | ||
127 | |||
128 | current_brightness = led_get_brightness(led_cdev); | ||
129 | if (current_brightness) | ||
130 | led_cdev->blink_brightness = current_brightness; | ||
131 | if (!led_cdev->blink_brightness) | ||
132 | led_cdev->blink_brightness = led_cdev->max_brightness; | ||
133 | |||
134 | if (delay_on == led_cdev->blink_delay_on && | ||
135 | delay_off == led_cdev->blink_delay_off) | ||
136 | return; | ||
137 | |||
138 | led_stop_software_blink(led_cdev); | ||
139 | |||
140 | led_cdev->blink_delay_on = delay_on; | ||
141 | led_cdev->blink_delay_off = delay_off; | ||
142 | |||
143 | /* never on - don't blink */ | ||
144 | if (!delay_on) | ||
145 | return; | ||
146 | |||
147 | /* never off - just set to brightness */ | ||
148 | if (!delay_off) { | ||
149 | led_set_brightness(led_cdev, led_cdev->blink_brightness); | ||
150 | return; | ||
151 | } | ||
152 | |||
153 | mod_timer(&led_cdev->blink_timer, jiffies + 1); | ||
154 | } | ||
155 | |||
156 | |||
84 | /** | 157 | /** |
85 | * led_classdev_suspend - suspend an led_classdev. | 158 | * led_classdev_suspend - suspend an led_classdev. |
86 | * @led_cdev: the led_classdev to suspend. | 159 | * @led_cdev: the led_classdev to suspend. |
@@ -148,6 +221,10 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev) | |||
148 | 221 | ||
149 | led_update_brightness(led_cdev); | 222 | led_update_brightness(led_cdev); |
150 | 223 | ||
224 | init_timer(&led_cdev->blink_timer); | ||
225 | led_cdev->blink_timer.function = led_timer_function; | ||
226 | led_cdev->blink_timer.data = (unsigned long)led_cdev; | ||
227 | |||
151 | #ifdef CONFIG_LEDS_TRIGGERS | 228 | #ifdef CONFIG_LEDS_TRIGGERS |
152 | led_trigger_set_default(led_cdev); | 229 | led_trigger_set_default(led_cdev); |
153 | #endif | 230 | #endif |
@@ -157,7 +234,6 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev) | |||
157 | 234 | ||
158 | return 0; | 235 | return 0; |
159 | } | 236 | } |
160 | |||
161 | EXPORT_SYMBOL_GPL(led_classdev_register); | 237 | EXPORT_SYMBOL_GPL(led_classdev_register); |
162 | 238 | ||
163 | /** | 239 | /** |
@@ -175,6 +251,9 @@ void led_classdev_unregister(struct led_classdev *led_cdev) | |||
175 | up_write(&led_cdev->trigger_lock); | 251 | up_write(&led_cdev->trigger_lock); |
176 | #endif | 252 | #endif |
177 | 253 | ||
254 | /* Stop blinking */ | ||
255 | led_brightness_set(led_cdev, LED_OFF); | ||
256 | |||
178 | device_unregister(led_cdev->dev); | 257 | device_unregister(led_cdev->dev); |
179 | 258 | ||
180 | down_write(&leds_list_lock); | 259 | down_write(&leds_list_lock); |
@@ -183,6 +262,30 @@ void led_classdev_unregister(struct led_classdev *led_cdev) | |||
183 | } | 262 | } |
184 | EXPORT_SYMBOL_GPL(led_classdev_unregister); | 263 | EXPORT_SYMBOL_GPL(led_classdev_unregister); |
185 | 264 | ||
265 | void led_blink_set(struct led_classdev *led_cdev, | ||
266 | unsigned long *delay_on, | ||
267 | unsigned long *delay_off) | ||
268 | { | ||
269 | if (led_cdev->blink_set && | ||
270 | led_cdev->blink_set(led_cdev, delay_on, delay_off)) | ||
271 | return; | ||
272 | |||
273 | /* blink with 1 Hz as default if nothing specified */ | ||
274 | if (!*delay_on && !*delay_off) | ||
275 | *delay_on = *delay_off = 500; | ||
276 | |||
277 | led_set_software_blink(led_cdev, *delay_on, *delay_off); | ||
278 | } | ||
279 | EXPORT_SYMBOL(led_blink_set); | ||
280 | |||
281 | void led_brightness_set(struct led_classdev *led_cdev, | ||
282 | enum led_brightness brightness) | ||
283 | { | ||
284 | led_stop_software_blink(led_cdev); | ||
285 | led_cdev->brightness_set(led_cdev, brightness); | ||
286 | } | ||
287 | EXPORT_SYMBOL(led_brightness_set); | ||
288 | |||
186 | static int __init leds_init(void) | 289 | static int __init leds_init(void) |
187 | { | 290 | { |
188 | leds_class = class_create(THIS_MODULE, "leds"); | 291 | leds_class = class_create(THIS_MODULE, "leds"); |
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c index f1c00db88b5e..c41eb6180c9c 100644 --- a/drivers/leds/led-triggers.c +++ b/drivers/leds/led-triggers.c | |||
@@ -113,7 +113,7 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger) | |||
113 | if (led_cdev->trigger->deactivate) | 113 | if (led_cdev->trigger->deactivate) |
114 | led_cdev->trigger->deactivate(led_cdev); | 114 | led_cdev->trigger->deactivate(led_cdev); |
115 | led_cdev->trigger = NULL; | 115 | led_cdev->trigger = NULL; |
116 | led_set_brightness(led_cdev, LED_OFF); | 116 | led_brightness_set(led_cdev, LED_OFF); |
117 | } | 117 | } |
118 | if (trigger) { | 118 | if (trigger) { |
119 | write_lock_irqsave(&trigger->leddev_list_lock, flags); | 119 | write_lock_irqsave(&trigger->leddev_list_lock, flags); |
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index ea57e05d08f3..4d9fa38d9ff6 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c | |||
@@ -316,7 +316,7 @@ static struct of_platform_driver of_gpio_leds_driver = { | |||
316 | 316 | ||
317 | static int __init gpio_led_init(void) | 317 | static int __init gpio_led_init(void) |
318 | { | 318 | { |
319 | int ret; | 319 | int ret = 0; |
320 | 320 | ||
321 | #ifdef CONFIG_LEDS_GPIO_PLATFORM | 321 | #ifdef CONFIG_LEDS_GPIO_PLATFORM |
322 | ret = platform_driver_register(&gpio_led_driver); | 322 | ret = platform_driver_register(&gpio_led_driver); |
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c new file mode 100644 index 000000000000..3782f31f06d2 --- /dev/null +++ b/drivers/leds/leds-lp5521.c | |||
@@ -0,0 +1,821 @@ | |||
1 | /* | ||
2 | * LP5521 LED chip driver. | ||
3 | * | ||
4 | * Copyright (C) 2010 Nokia Corporation | ||
5 | * | ||
6 | * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | */ | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/i2c.h> | ||
26 | #include <linux/mutex.h> | ||
27 | #include <linux/gpio.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/ctype.h> | ||
31 | #include <linux/spinlock.h> | ||
32 | #include <linux/wait.h> | ||
33 | #include <linux/leds.h> | ||
34 | #include <linux/leds-lp5521.h> | ||
35 | #include <linux/workqueue.h> | ||
36 | #include <linux/slab.h> | ||
37 | |||
38 | #define LP5521_PROGRAM_LENGTH 32 /* in bytes */ | ||
39 | |||
40 | #define LP5521_MAX_LEDS 3 /* Maximum number of LEDs */ | ||
41 | #define LP5521_MAX_ENGINES 3 /* Maximum number of engines */ | ||
42 | |||
43 | #define LP5521_ENG_MASK_BASE 0x30 /* 00110000 */ | ||
44 | #define LP5521_ENG_STATUS_MASK 0x07 /* 00000111 */ | ||
45 | |||
46 | #define LP5521_CMD_LOAD 0x15 /* 00010101 */ | ||
47 | #define LP5521_CMD_RUN 0x2a /* 00101010 */ | ||
48 | #define LP5521_CMD_DIRECT 0x3f /* 00111111 */ | ||
49 | #define LP5521_CMD_DISABLED 0x00 /* 00000000 */ | ||
50 | |||
51 | /* Registers */ | ||
52 | #define LP5521_REG_ENABLE 0x00 | ||
53 | #define LP5521_REG_OP_MODE 0x01 | ||
54 | #define LP5521_REG_R_PWM 0x02 | ||
55 | #define LP5521_REG_G_PWM 0x03 | ||
56 | #define LP5521_REG_B_PWM 0x04 | ||
57 | #define LP5521_REG_R_CURRENT 0x05 | ||
58 | #define LP5521_REG_G_CURRENT 0x06 | ||
59 | #define LP5521_REG_B_CURRENT 0x07 | ||
60 | #define LP5521_REG_CONFIG 0x08 | ||
61 | #define LP5521_REG_R_CHANNEL_PC 0x09 | ||
62 | #define LP5521_REG_G_CHANNEL_PC 0x0A | ||
63 | #define LP5521_REG_B_CHANNEL_PC 0x0B | ||
64 | #define LP5521_REG_STATUS 0x0C | ||
65 | #define LP5521_REG_RESET 0x0D | ||
66 | #define LP5521_REG_GPO 0x0E | ||
67 | #define LP5521_REG_R_PROG_MEM 0x10 | ||
68 | #define LP5521_REG_G_PROG_MEM 0x30 | ||
69 | #define LP5521_REG_B_PROG_MEM 0x50 | ||
70 | |||
71 | #define LP5521_PROG_MEM_BASE LP5521_REG_R_PROG_MEM | ||
72 | #define LP5521_PROG_MEM_SIZE 0x20 | ||
73 | |||
74 | /* Base register to set LED current */ | ||
75 | #define LP5521_REG_LED_CURRENT_BASE LP5521_REG_R_CURRENT | ||
76 | |||
77 | /* Base register to set the brightness */ | ||
78 | #define LP5521_REG_LED_PWM_BASE LP5521_REG_R_PWM | ||
79 | |||
80 | /* Bits in ENABLE register */ | ||
81 | #define LP5521_MASTER_ENABLE 0x40 /* Chip master enable */ | ||
82 | #define LP5521_LOGARITHMIC_PWM 0x80 /* Logarithmic PWM adjustment */ | ||
83 | #define LP5521_EXEC_RUN 0x2A | ||
84 | |||
85 | /* Bits in CONFIG register */ | ||
86 | #define LP5521_PWM_HF 0x40 /* PWM: 0 = 256Hz, 1 = 558Hz */ | ||
87 | #define LP5521_PWRSAVE_EN 0x20 /* 1 = Power save mode */ | ||
88 | #define LP5521_CP_MODE_OFF 0 /* Charge pump (CP) off */ | ||
89 | #define LP5521_CP_MODE_BYPASS 8 /* CP forced to bypass mode */ | ||
90 | #define LP5521_CP_MODE_1X5 0x10 /* CP forced to 1.5x mode */ | ||
91 | #define LP5521_CP_MODE_AUTO 0x18 /* Automatic mode selection */ | ||
92 | #define LP5521_R_TO_BATT 4 /* R out: 0 = CP, 1 = Vbat */ | ||
93 | #define LP5521_CLK_SRC_EXT 0 /* Ext-clk source (CLK_32K) */ | ||
94 | #define LP5521_CLK_INT 1 /* Internal clock */ | ||
95 | #define LP5521_CLK_AUTO 2 /* Automatic clock selection */ | ||
96 | |||
97 | /* Status */ | ||
98 | #define LP5521_EXT_CLK_USED 0x08 | ||
99 | |||
100 | struct lp5521_engine { | ||
101 | const struct attribute_group *attributes; | ||
102 | int id; | ||
103 | u8 mode; | ||
104 | u8 prog_page; | ||
105 | u8 engine_mask; | ||
106 | }; | ||
107 | |||
108 | struct lp5521_led { | ||
109 | int id; | ||
110 | u8 chan_nr; | ||
111 | u8 led_current; | ||
112 | u8 max_current; | ||
113 | struct led_classdev cdev; | ||
114 | struct work_struct brightness_work; | ||
115 | u8 brightness; | ||
116 | }; | ||
117 | |||
118 | struct lp5521_chip { | ||
119 | struct lp5521_platform_data *pdata; | ||
120 | struct mutex lock; /* Serialize control */ | ||
121 | struct i2c_client *client; | ||
122 | struct lp5521_engine engines[LP5521_MAX_ENGINES]; | ||
123 | struct lp5521_led leds[LP5521_MAX_LEDS]; | ||
124 | u8 num_channels; | ||
125 | u8 num_leds; | ||
126 | }; | ||
127 | |||
128 | #define cdev_to_led(c) container_of(c, struct lp5521_led, cdev) | ||
129 | #define engine_to_lp5521(eng) container_of((eng), struct lp5521_chip, \ | ||
130 | engines[(eng)->id - 1]) | ||
131 | #define led_to_lp5521(led) container_of((led), struct lp5521_chip, \ | ||
132 | leds[(led)->id]) | ||
133 | |||
134 | static void lp5521_led_brightness_work(struct work_struct *work); | ||
135 | |||
136 | static inline int lp5521_write(struct i2c_client *client, u8 reg, u8 value) | ||
137 | { | ||
138 | return i2c_smbus_write_byte_data(client, reg, value); | ||
139 | } | ||
140 | |||
141 | static int lp5521_read(struct i2c_client *client, u8 reg, u8 *buf) | ||
142 | { | ||
143 | s32 ret; | ||
144 | |||
145 | ret = i2c_smbus_read_byte_data(client, reg); | ||
146 | if (ret < 0) | ||
147 | return -EIO; | ||
148 | |||
149 | *buf = ret; | ||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static int lp5521_set_engine_mode(struct lp5521_engine *engine, u8 mode) | ||
154 | { | ||
155 | struct lp5521_chip *chip = engine_to_lp5521(engine); | ||
156 | struct i2c_client *client = chip->client; | ||
157 | int ret; | ||
158 | u8 engine_state; | ||
159 | |||
160 | /* Only transition between RUN and DIRECT mode are handled here */ | ||
161 | if (mode == LP5521_CMD_LOAD) | ||
162 | return 0; | ||
163 | |||
164 | if (mode == LP5521_CMD_DISABLED) | ||
165 | mode = LP5521_CMD_DIRECT; | ||
166 | |||
167 | ret = lp5521_read(client, LP5521_REG_OP_MODE, &engine_state); | ||
168 | |||
169 | /* set mode only for this engine */ | ||
170 | engine_state &= ~(engine->engine_mask); | ||
171 | mode &= engine->engine_mask; | ||
172 | engine_state |= mode; | ||
173 | ret |= lp5521_write(client, LP5521_REG_OP_MODE, engine_state); | ||
174 | |||
175 | return ret; | ||
176 | } | ||
177 | |||
178 | static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern) | ||
179 | { | ||
180 | struct lp5521_chip *chip = engine_to_lp5521(eng); | ||
181 | struct i2c_client *client = chip->client; | ||
182 | int ret; | ||
183 | int addr; | ||
184 | u8 mode; | ||
185 | |||
186 | /* move current engine to direct mode and remember the state */ | ||
187 | ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT); | ||
188 | usleep_range(1000, 10000); | ||
189 | ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode); | ||
190 | |||
191 | /* For loading, all the engines to load mode */ | ||
192 | lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT); | ||
193 | usleep_range(1000, 10000); | ||
194 | lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD); | ||
195 | usleep_range(1000, 10000); | ||
196 | |||
197 | addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE; | ||
198 | i2c_smbus_write_i2c_block_data(client, | ||
199 | addr, | ||
200 | LP5521_PROG_MEM_SIZE, | ||
201 | pattern); | ||
202 | |||
203 | ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode); | ||
204 | return ret; | ||
205 | } | ||
206 | |||
207 | static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr) | ||
208 | { | ||
209 | return lp5521_write(chip->client, | ||
210 | LP5521_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr, | ||
211 | curr); | ||
212 | } | ||
213 | |||
214 | static void lp5521_init_engine(struct lp5521_chip *chip, | ||
215 | const struct attribute_group *attr_group) | ||
216 | { | ||
217 | int i; | ||
218 | for (i = 0; i < ARRAY_SIZE(chip->engines); i++) { | ||
219 | chip->engines[i].id = i + 1; | ||
220 | chip->engines[i].engine_mask = LP5521_ENG_MASK_BASE >> (i * 2); | ||
221 | chip->engines[i].prog_page = i; | ||
222 | chip->engines[i].attributes = &attr_group[i]; | ||
223 | } | ||
224 | } | ||
225 | |||
226 | static int lp5521_configure(struct i2c_client *client, | ||
227 | const struct attribute_group *attr_group) | ||
228 | { | ||
229 | struct lp5521_chip *chip = i2c_get_clientdata(client); | ||
230 | int ret; | ||
231 | |||
232 | lp5521_init_engine(chip, attr_group); | ||
233 | |||
234 | lp5521_write(client, LP5521_REG_RESET, 0xff); | ||
235 | |||
236 | usleep_range(10000, 20000); | ||
237 | |||
238 | /* Set all PWMs to direct control mode */ | ||
239 | ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F); | ||
240 | |||
241 | /* Enable auto-powersave, set charge pump to auto, red to battery */ | ||
242 | ret |= lp5521_write(client, LP5521_REG_CONFIG, | ||
243 | LP5521_PWRSAVE_EN | LP5521_CP_MODE_AUTO | LP5521_R_TO_BATT); | ||
244 | |||
245 | /* Initialize all channels PWM to zero -> leds off */ | ||
246 | ret |= lp5521_write(client, LP5521_REG_R_PWM, 0); | ||
247 | ret |= lp5521_write(client, LP5521_REG_G_PWM, 0); | ||
248 | ret |= lp5521_write(client, LP5521_REG_B_PWM, 0); | ||
249 | |||
250 | /* Set engines are set to run state when OP_MODE enables engines */ | ||
251 | ret |= lp5521_write(client, LP5521_REG_ENABLE, | ||
252 | LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM | | ||
253 | LP5521_EXEC_RUN); | ||
254 | /* enable takes 500us */ | ||
255 | usleep_range(500, 20000); | ||
256 | |||
257 | return ret; | ||
258 | } | ||
259 | |||
260 | static int lp5521_run_selftest(struct lp5521_chip *chip, char *buf) | ||
261 | { | ||
262 | int ret; | ||
263 | u8 status; | ||
264 | |||
265 | ret = lp5521_read(chip->client, LP5521_REG_STATUS, &status); | ||
266 | if (ret < 0) | ||
267 | return ret; | ||
268 | |||
269 | /* Check that ext clock is really in use if requested */ | ||
270 | if (chip->pdata && chip->pdata->clock_mode == LP5521_CLOCK_EXT) | ||
271 | if ((status & LP5521_EXT_CLK_USED) == 0) | ||
272 | return -EIO; | ||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static void lp5521_set_brightness(struct led_classdev *cdev, | ||
277 | enum led_brightness brightness) | ||
278 | { | ||
279 | struct lp5521_led *led = cdev_to_led(cdev); | ||
280 | led->brightness = (u8)brightness; | ||
281 | schedule_work(&led->brightness_work); | ||
282 | } | ||
283 | |||
284 | static void lp5521_led_brightness_work(struct work_struct *work) | ||
285 | { | ||
286 | struct lp5521_led *led = container_of(work, | ||
287 | struct lp5521_led, | ||
288 | brightness_work); | ||
289 | struct lp5521_chip *chip = led_to_lp5521(led); | ||
290 | struct i2c_client *client = chip->client; | ||
291 | |||
292 | mutex_lock(&chip->lock); | ||
293 | lp5521_write(client, LP5521_REG_LED_PWM_BASE + led->chan_nr, | ||
294 | led->brightness); | ||
295 | mutex_unlock(&chip->lock); | ||
296 | } | ||
297 | |||
298 | /* Detect the chip by setting its ENABLE register and reading it back. */ | ||
299 | static int lp5521_detect(struct i2c_client *client) | ||
300 | { | ||
301 | int ret; | ||
302 | u8 buf; | ||
303 | |||
304 | ret = lp5521_write(client, LP5521_REG_ENABLE, | ||
305 | LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM); | ||
306 | if (ret) | ||
307 | return ret; | ||
308 | usleep_range(1000, 10000); | ||
309 | ret = lp5521_read(client, LP5521_REG_ENABLE, &buf); | ||
310 | if (ret) | ||
311 | return ret; | ||
312 | if (buf != (LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM)) | ||
313 | return -ENODEV; | ||
314 | |||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | /* Set engine mode and create appropriate sysfs attributes, if required. */ | ||
319 | static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode) | ||
320 | { | ||
321 | struct lp5521_chip *chip = engine_to_lp5521(engine); | ||
322 | struct i2c_client *client = chip->client; | ||
323 | struct device *dev = &client->dev; | ||
324 | int ret = 0; | ||
325 | |||
326 | /* if in that mode already do nothing, except for run */ | ||
327 | if (mode == engine->mode && mode != LP5521_CMD_RUN) | ||
328 | return 0; | ||
329 | |||
330 | if (mode == LP5521_CMD_RUN) { | ||
331 | ret = lp5521_set_engine_mode(engine, LP5521_CMD_RUN); | ||
332 | } else if (mode == LP5521_CMD_LOAD) { | ||
333 | lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED); | ||
334 | lp5521_set_engine_mode(engine, LP5521_CMD_LOAD); | ||
335 | |||
336 | ret = sysfs_create_group(&dev->kobj, engine->attributes); | ||
337 | if (ret) | ||
338 | return ret; | ||
339 | } else if (mode == LP5521_CMD_DISABLED) { | ||
340 | lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED); | ||
341 | } | ||
342 | |||
343 | /* remove load attribute from sysfs if not in load mode */ | ||
344 | if (engine->mode == LP5521_CMD_LOAD && mode != LP5521_CMD_LOAD) | ||
345 | sysfs_remove_group(&dev->kobj, engine->attributes); | ||
346 | |||
347 | engine->mode = mode; | ||
348 | |||
349 | return ret; | ||
350 | } | ||
351 | |||
352 | static int lp5521_do_store_load(struct lp5521_engine *engine, | ||
353 | const char *buf, size_t len) | ||
354 | { | ||
355 | struct lp5521_chip *chip = engine_to_lp5521(engine); | ||
356 | struct i2c_client *client = chip->client; | ||
357 | int ret, nrchars, offset = 0, i = 0; | ||
358 | char c[3]; | ||
359 | unsigned cmd; | ||
360 | u8 pattern[LP5521_PROGRAM_LENGTH] = {0}; | ||
361 | |||
362 | while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) { | ||
363 | /* separate sscanfs because length is working only for %s */ | ||
364 | ret = sscanf(buf + offset, "%2s%n ", c, &nrchars); | ||
365 | ret = sscanf(c, "%2x", &cmd); | ||
366 | if (ret != 1) | ||
367 | goto fail; | ||
368 | pattern[i] = (u8)cmd; | ||
369 | |||
370 | offset += nrchars; | ||
371 | i++; | ||
372 | } | ||
373 | |||
374 | /* Each instruction is 16bit long. Check that length is even */ | ||
375 | if (i % 2) | ||
376 | goto fail; | ||
377 | |||
378 | mutex_lock(&chip->lock); | ||
379 | ret = lp5521_load_program(engine, pattern); | ||
380 | mutex_unlock(&chip->lock); | ||
381 | |||
382 | if (ret) { | ||
383 | dev_err(&client->dev, "failed loading pattern\n"); | ||
384 | return ret; | ||
385 | } | ||
386 | |||
387 | return len; | ||
388 | fail: | ||
389 | dev_err(&client->dev, "wrong pattern format\n"); | ||
390 | return -EINVAL; | ||
391 | } | ||
392 | |||
393 | static ssize_t store_engine_load(struct device *dev, | ||
394 | struct device_attribute *attr, | ||
395 | const char *buf, size_t len, int nr) | ||
396 | { | ||
397 | struct i2c_client *client = to_i2c_client(dev); | ||
398 | struct lp5521_chip *chip = i2c_get_clientdata(client); | ||
399 | return lp5521_do_store_load(&chip->engines[nr - 1], buf, len); | ||
400 | } | ||
401 | |||
402 | #define store_load(nr) \ | ||
403 | static ssize_t store_engine##nr##_load(struct device *dev, \ | ||
404 | struct device_attribute *attr, \ | ||
405 | const char *buf, size_t len) \ | ||
406 | { \ | ||
407 | return store_engine_load(dev, attr, buf, len, nr); \ | ||
408 | } | ||
409 | store_load(1) | ||
410 | store_load(2) | ||
411 | store_load(3) | ||
412 | |||
413 | static ssize_t show_engine_mode(struct device *dev, | ||
414 | struct device_attribute *attr, | ||
415 | char *buf, int nr) | ||
416 | { | ||
417 | struct i2c_client *client = to_i2c_client(dev); | ||
418 | struct lp5521_chip *chip = i2c_get_clientdata(client); | ||
419 | switch (chip->engines[nr - 1].mode) { | ||
420 | case LP5521_CMD_RUN: | ||
421 | return sprintf(buf, "run\n"); | ||
422 | case LP5521_CMD_LOAD: | ||
423 | return sprintf(buf, "load\n"); | ||
424 | case LP5521_CMD_DISABLED: | ||
425 | return sprintf(buf, "disabled\n"); | ||
426 | default: | ||
427 | return sprintf(buf, "disabled\n"); | ||
428 | } | ||
429 | } | ||
430 | |||
431 | #define show_mode(nr) \ | ||
432 | static ssize_t show_engine##nr##_mode(struct device *dev, \ | ||
433 | struct device_attribute *attr, \ | ||
434 | char *buf) \ | ||
435 | { \ | ||
436 | return show_engine_mode(dev, attr, buf, nr); \ | ||
437 | } | ||
438 | show_mode(1) | ||
439 | show_mode(2) | ||
440 | show_mode(3) | ||
441 | |||
442 | static ssize_t store_engine_mode(struct device *dev, | ||
443 | struct device_attribute *attr, | ||
444 | const char *buf, size_t len, int nr) | ||
445 | { | ||
446 | struct i2c_client *client = to_i2c_client(dev); | ||
447 | struct lp5521_chip *chip = i2c_get_clientdata(client); | ||
448 | struct lp5521_engine *engine = &chip->engines[nr - 1]; | ||
449 | mutex_lock(&chip->lock); | ||
450 | |||
451 | if (!strncmp(buf, "run", 3)) | ||
452 | lp5521_set_mode(engine, LP5521_CMD_RUN); | ||
453 | else if (!strncmp(buf, "load", 4)) | ||
454 | lp5521_set_mode(engine, LP5521_CMD_LOAD); | ||
455 | else if (!strncmp(buf, "disabled", 8)) | ||
456 | lp5521_set_mode(engine, LP5521_CMD_DISABLED); | ||
457 | |||
458 | mutex_unlock(&chip->lock); | ||
459 | return len; | ||
460 | } | ||
461 | |||
462 | #define store_mode(nr) \ | ||
463 | static ssize_t store_engine##nr##_mode(struct device *dev, \ | ||
464 | struct device_attribute *attr, \ | ||
465 | const char *buf, size_t len) \ | ||
466 | { \ | ||
467 | return store_engine_mode(dev, attr, buf, len, nr); \ | ||
468 | } | ||
469 | store_mode(1) | ||
470 | store_mode(2) | ||
471 | store_mode(3) | ||
472 | |||
473 | static ssize_t show_max_current(struct device *dev, | ||
474 | struct device_attribute *attr, | ||
475 | char *buf) | ||
476 | { | ||
477 | struct led_classdev *led_cdev = dev_get_drvdata(dev); | ||
478 | struct lp5521_led *led = cdev_to_led(led_cdev); | ||
479 | |||
480 | return sprintf(buf, "%d\n", led->max_current); | ||
481 | } | ||
482 | |||
483 | static ssize_t show_current(struct device *dev, | ||
484 | struct device_attribute *attr, | ||
485 | char *buf) | ||
486 | { | ||
487 | struct led_classdev *led_cdev = dev_get_drvdata(dev); | ||
488 | struct lp5521_led *led = cdev_to_led(led_cdev); | ||
489 | |||
490 | return sprintf(buf, "%d\n", led->led_current); | ||
491 | } | ||
492 | |||
493 | static ssize_t store_current(struct device *dev, | ||
494 | struct device_attribute *attr, | ||
495 | const char *buf, size_t len) | ||
496 | { | ||
497 | struct led_classdev *led_cdev = dev_get_drvdata(dev); | ||
498 | struct lp5521_led *led = cdev_to_led(led_cdev); | ||
499 | struct lp5521_chip *chip = led_to_lp5521(led); | ||
500 | ssize_t ret; | ||
501 | unsigned long curr; | ||
502 | |||
503 | if (strict_strtoul(buf, 0, &curr)) | ||
504 | return -EINVAL; | ||
505 | |||
506 | if (curr > led->max_current) | ||
507 | return -EINVAL; | ||
508 | |||
509 | mutex_lock(&chip->lock); | ||
510 | ret = lp5521_set_led_current(chip, led->id, curr); | ||
511 | mutex_unlock(&chip->lock); | ||
512 | |||
513 | if (ret < 0) | ||
514 | return ret; | ||
515 | |||
516 | led->led_current = (u8)curr; | ||
517 | |||
518 | return len; | ||
519 | } | ||
520 | |||
521 | static ssize_t lp5521_selftest(struct device *dev, | ||
522 | struct device_attribute *attr, | ||
523 | char *buf) | ||
524 | { | ||
525 | struct i2c_client *client = to_i2c_client(dev); | ||
526 | struct lp5521_chip *chip = i2c_get_clientdata(client); | ||
527 | int ret; | ||
528 | |||
529 | mutex_lock(&chip->lock); | ||
530 | ret = lp5521_run_selftest(chip, buf); | ||
531 | mutex_unlock(&chip->lock); | ||
532 | return sprintf(buf, "%s\n", ret ? "FAIL" : "OK"); | ||
533 | } | ||
534 | |||
535 | /* led class device attributes */ | ||
536 | static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current); | ||
537 | static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL); | ||
538 | |||
539 | static struct attribute *lp5521_led_attributes[] = { | ||
540 | &dev_attr_led_current.attr, | ||
541 | &dev_attr_max_current.attr, | ||
542 | NULL, | ||
543 | }; | ||
544 | |||
545 | static struct attribute_group lp5521_led_attribute_group = { | ||
546 | .attrs = lp5521_led_attributes | ||
547 | }; | ||
548 | |||
549 | /* device attributes */ | ||
550 | static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO, | ||
551 | show_engine1_mode, store_engine1_mode); | ||
552 | static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO, | ||
553 | show_engine2_mode, store_engine2_mode); | ||
554 | static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO, | ||
555 | show_engine3_mode, store_engine3_mode); | ||
556 | static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load); | ||
557 | static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load); | ||
558 | static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load); | ||
559 | static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL); | ||
560 | |||
561 | static struct attribute *lp5521_attributes[] = { | ||
562 | &dev_attr_engine1_mode.attr, | ||
563 | &dev_attr_engine2_mode.attr, | ||
564 | &dev_attr_engine3_mode.attr, | ||
565 | &dev_attr_selftest.attr, | ||
566 | NULL | ||
567 | }; | ||
568 | |||
569 | static struct attribute *lp5521_engine1_attributes[] = { | ||
570 | &dev_attr_engine1_load.attr, | ||
571 | NULL | ||
572 | }; | ||
573 | |||
574 | static struct attribute *lp5521_engine2_attributes[] = { | ||
575 | &dev_attr_engine2_load.attr, | ||
576 | NULL | ||
577 | }; | ||
578 | |||
579 | static struct attribute *lp5521_engine3_attributes[] = { | ||
580 | &dev_attr_engine3_load.attr, | ||
581 | NULL | ||
582 | }; | ||
583 | |||
584 | static const struct attribute_group lp5521_group = { | ||
585 | .attrs = lp5521_attributes, | ||
586 | }; | ||
587 | |||
588 | static const struct attribute_group lp5521_engine_group[] = { | ||
589 | {.attrs = lp5521_engine1_attributes }, | ||
590 | {.attrs = lp5521_engine2_attributes }, | ||
591 | {.attrs = lp5521_engine3_attributes }, | ||
592 | }; | ||
593 | |||
594 | static int lp5521_register_sysfs(struct i2c_client *client) | ||
595 | { | ||
596 | struct device *dev = &client->dev; | ||
597 | return sysfs_create_group(&dev->kobj, &lp5521_group); | ||
598 | } | ||
599 | |||
600 | static void lp5521_unregister_sysfs(struct i2c_client *client) | ||
601 | { | ||
602 | struct lp5521_chip *chip = i2c_get_clientdata(client); | ||
603 | struct device *dev = &client->dev; | ||
604 | int i; | ||
605 | |||
606 | sysfs_remove_group(&dev->kobj, &lp5521_group); | ||
607 | |||
608 | for (i = 0; i < ARRAY_SIZE(chip->engines); i++) { | ||
609 | if (chip->engines[i].mode == LP5521_CMD_LOAD) | ||
610 | sysfs_remove_group(&dev->kobj, | ||
611 | chip->engines[i].attributes); | ||
612 | } | ||
613 | |||
614 | for (i = 0; i < chip->num_leds; i++) | ||
615 | sysfs_remove_group(&chip->leds[i].cdev.dev->kobj, | ||
616 | &lp5521_led_attribute_group); | ||
617 | } | ||
618 | |||
619 | static int __init lp5521_init_led(struct lp5521_led *led, | ||
620 | struct i2c_client *client, | ||
621 | int chan, struct lp5521_platform_data *pdata) | ||
622 | { | ||
623 | struct device *dev = &client->dev; | ||
624 | char name[32]; | ||
625 | int res; | ||
626 | |||
627 | if (chan >= LP5521_MAX_LEDS) | ||
628 | return -EINVAL; | ||
629 | |||
630 | if (pdata->led_config[chan].led_current == 0) | ||
631 | return 0; | ||
632 | |||
633 | led->led_current = pdata->led_config[chan].led_current; | ||
634 | led->max_current = pdata->led_config[chan].max_current; | ||
635 | led->chan_nr = pdata->led_config[chan].chan_nr; | ||
636 | |||
637 | if (led->chan_nr >= LP5521_MAX_LEDS) { | ||
638 | dev_err(dev, "Use channel numbers between 0 and %d\n", | ||
639 | LP5521_MAX_LEDS - 1); | ||
640 | return -EINVAL; | ||
641 | } | ||
642 | |||
643 | snprintf(name, sizeof(name), "%s:channel%d", client->name, chan); | ||
644 | led->cdev.brightness_set = lp5521_set_brightness; | ||
645 | led->cdev.name = name; | ||
646 | res = led_classdev_register(dev, &led->cdev); | ||
647 | if (res < 0) { | ||
648 | dev_err(dev, "couldn't register led on channel %d\n", chan); | ||
649 | return res; | ||
650 | } | ||
651 | |||
652 | res = sysfs_create_group(&led->cdev.dev->kobj, | ||
653 | &lp5521_led_attribute_group); | ||
654 | if (res < 0) { | ||
655 | dev_err(dev, "couldn't register current attribute\n"); | ||
656 | led_classdev_unregister(&led->cdev); | ||
657 | return res; | ||
658 | } | ||
659 | return 0; | ||
660 | } | ||
661 | |||
662 | static int lp5521_probe(struct i2c_client *client, | ||
663 | const struct i2c_device_id *id) | ||
664 | { | ||
665 | struct lp5521_chip *chip; | ||
666 | struct lp5521_platform_data *pdata; | ||
667 | int ret, i, led; | ||
668 | |||
669 | chip = kzalloc(sizeof(*chip), GFP_KERNEL); | ||
670 | if (!chip) | ||
671 | return -ENOMEM; | ||
672 | |||
673 | i2c_set_clientdata(client, chip); | ||
674 | chip->client = client; | ||
675 | |||
676 | pdata = client->dev.platform_data; | ||
677 | |||
678 | if (!pdata) { | ||
679 | dev_err(&client->dev, "no platform data\n"); | ||
680 | ret = -EINVAL; | ||
681 | goto fail1; | ||
682 | } | ||
683 | |||
684 | mutex_init(&chip->lock); | ||
685 | |||
686 | chip->pdata = pdata; | ||
687 | |||
688 | if (pdata->setup_resources) { | ||
689 | ret = pdata->setup_resources(); | ||
690 | if (ret < 0) | ||
691 | goto fail1; | ||
692 | } | ||
693 | |||
694 | if (pdata->enable) { | ||
695 | pdata->enable(0); | ||
696 | usleep_range(1000, 10000); | ||
697 | pdata->enable(1); | ||
698 | usleep_range(1000, 10000); /* Spec says min 500us */ | ||
699 | } | ||
700 | |||
701 | ret = lp5521_detect(client); | ||
702 | |||
703 | if (ret) { | ||
704 | dev_err(&client->dev, "Chip not found\n"); | ||
705 | goto fail2; | ||
706 | } | ||
707 | |||
708 | dev_info(&client->dev, "%s programmable led chip found\n", id->name); | ||
709 | |||
710 | ret = lp5521_configure(client, lp5521_engine_group); | ||
711 | if (ret < 0) { | ||
712 | dev_err(&client->dev, "error configuring chip\n"); | ||
713 | goto fail2; | ||
714 | } | ||
715 | |||
716 | /* Initialize leds */ | ||
717 | chip->num_channels = pdata->num_channels; | ||
718 | chip->num_leds = 0; | ||
719 | led = 0; | ||
720 | for (i = 0; i < pdata->num_channels; i++) { | ||
721 | /* Do not initialize channels that are not connected */ | ||
722 | if (pdata->led_config[i].led_current == 0) | ||
723 | continue; | ||
724 | |||
725 | ret = lp5521_init_led(&chip->leds[led], client, i, pdata); | ||
726 | if (ret) { | ||
727 | dev_err(&client->dev, "error initializing leds\n"); | ||
728 | goto fail3; | ||
729 | } | ||
730 | chip->num_leds++; | ||
731 | |||
732 | chip->leds[led].id = led; | ||
733 | /* Set initial LED current */ | ||
734 | lp5521_set_led_current(chip, led, | ||
735 | chip->leds[led].led_current); | ||
736 | |||
737 | INIT_WORK(&(chip->leds[led].brightness_work), | ||
738 | lp5521_led_brightness_work); | ||
739 | |||
740 | led++; | ||
741 | } | ||
742 | |||
743 | ret = lp5521_register_sysfs(client); | ||
744 | if (ret) { | ||
745 | dev_err(&client->dev, "registering sysfs failed\n"); | ||
746 | goto fail3; | ||
747 | } | ||
748 | return ret; | ||
749 | fail3: | ||
750 | for (i = 0; i < chip->num_leds; i++) { | ||
751 | led_classdev_unregister(&chip->leds[i].cdev); | ||
752 | cancel_work_sync(&chip->leds[i].brightness_work); | ||
753 | } | ||
754 | fail2: | ||
755 | if (pdata->enable) | ||
756 | pdata->enable(0); | ||
757 | if (pdata->release_resources) | ||
758 | pdata->release_resources(); | ||
759 | fail1: | ||
760 | kfree(chip); | ||
761 | return ret; | ||
762 | } | ||
763 | |||
764 | static int lp5521_remove(struct i2c_client *client) | ||
765 | { | ||
766 | struct lp5521_chip *chip = i2c_get_clientdata(client); | ||
767 | int i; | ||
768 | |||
769 | lp5521_unregister_sysfs(client); | ||
770 | |||
771 | for (i = 0; i < chip->num_leds; i++) { | ||
772 | led_classdev_unregister(&chip->leds[i].cdev); | ||
773 | cancel_work_sync(&chip->leds[i].brightness_work); | ||
774 | } | ||
775 | |||
776 | if (chip->pdata->enable) | ||
777 | chip->pdata->enable(0); | ||
778 | if (chip->pdata->release_resources) | ||
779 | chip->pdata->release_resources(); | ||
780 | kfree(chip); | ||
781 | return 0; | ||
782 | } | ||
783 | |||
784 | static const struct i2c_device_id lp5521_id[] = { | ||
785 | { "lp5521", 0 }, /* Three channel chip */ | ||
786 | { } | ||
787 | }; | ||
788 | MODULE_DEVICE_TABLE(i2c, lp5521_id); | ||
789 | |||
790 | static struct i2c_driver lp5521_driver = { | ||
791 | .driver = { | ||
792 | .name = "lp5521", | ||
793 | }, | ||
794 | .probe = lp5521_probe, | ||
795 | .remove = lp5521_remove, | ||
796 | .id_table = lp5521_id, | ||
797 | }; | ||
798 | |||
799 | static int __init lp5521_init(void) | ||
800 | { | ||
801 | int ret; | ||
802 | |||
803 | ret = i2c_add_driver(&lp5521_driver); | ||
804 | |||
805 | if (ret < 0) | ||
806 | printk(KERN_ALERT "Adding lp5521 driver failed\n"); | ||
807 | |||
808 | return ret; | ||
809 | } | ||
810 | |||
811 | static void __exit lp5521_exit(void) | ||
812 | { | ||
813 | i2c_del_driver(&lp5521_driver); | ||
814 | } | ||
815 | |||
816 | module_init(lp5521_init); | ||
817 | module_exit(lp5521_exit); | ||
818 | |||
819 | MODULE_AUTHOR("Mathias Nyman, Yuri Zaporozhets, Samu Onkalo"); | ||
820 | MODULE_DESCRIPTION("LP5521 LED engine"); | ||
821 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c new file mode 100644 index 000000000000..1e11fcc08b28 --- /dev/null +++ b/drivers/leds/leds-lp5523.c | |||
@@ -0,0 +1,1065 @@ | |||
1 | /* | ||
2 | * lp5523.c - LP5523 LED Driver | ||
3 | * | ||
4 | * Copyright (C) 2010 Nokia Corporation | ||
5 | * | ||
6 | * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | */ | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/i2c.h> | ||
26 | #include <linux/mutex.h> | ||
27 | #include <linux/gpio.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/ctype.h> | ||
31 | #include <linux/spinlock.h> | ||
32 | #include <linux/wait.h> | ||
33 | #include <linux/leds.h> | ||
34 | #include <linux/leds-lp5523.h> | ||
35 | #include <linux/workqueue.h> | ||
36 | #include <linux/slab.h> | ||
37 | |||
38 | #define LP5523_REG_ENABLE 0x00 | ||
39 | #define LP5523_REG_OP_MODE 0x01 | ||
40 | #define LP5523_REG_RATIOMETRIC_MSB 0x02 | ||
41 | #define LP5523_REG_RATIOMETRIC_LSB 0x03 | ||
42 | #define LP5523_REG_ENABLE_LEDS_MSB 0x04 | ||
43 | #define LP5523_REG_ENABLE_LEDS_LSB 0x05 | ||
44 | #define LP5523_REG_LED_CNTRL_BASE 0x06 | ||
45 | #define LP5523_REG_LED_PWM_BASE 0x16 | ||
46 | #define LP5523_REG_LED_CURRENT_BASE 0x26 | ||
47 | #define LP5523_REG_CONFIG 0x36 | ||
48 | #define LP5523_REG_CHANNEL1_PC 0x37 | ||
49 | #define LP5523_REG_CHANNEL2_PC 0x38 | ||
50 | #define LP5523_REG_CHANNEL3_PC 0x39 | ||
51 | #define LP5523_REG_STATUS 0x3a | ||
52 | #define LP5523_REG_GPO 0x3b | ||
53 | #define LP5523_REG_VARIABLE 0x3c | ||
54 | #define LP5523_REG_RESET 0x3d | ||
55 | #define LP5523_REG_TEMP_CTRL 0x3e | ||
56 | #define LP5523_REG_TEMP_READ 0x3f | ||
57 | #define LP5523_REG_TEMP_WRITE 0x40 | ||
58 | #define LP5523_REG_LED_TEST_CTRL 0x41 | ||
59 | #define LP5523_REG_LED_TEST_ADC 0x42 | ||
60 | #define LP5523_REG_ENG1_VARIABLE 0x45 | ||
61 | #define LP5523_REG_ENG2_VARIABLE 0x46 | ||
62 | #define LP5523_REG_ENG3_VARIABLE 0x47 | ||
63 | #define LP5523_REG_MASTER_FADER1 0x48 | ||
64 | #define LP5523_REG_MASTER_FADER2 0x49 | ||
65 | #define LP5523_REG_MASTER_FADER3 0x4a | ||
66 | #define LP5523_REG_CH1_PROG_START 0x4c | ||
67 | #define LP5523_REG_CH2_PROG_START 0x4d | ||
68 | #define LP5523_REG_CH3_PROG_START 0x4e | ||
69 | #define LP5523_REG_PROG_PAGE_SEL 0x4f | ||
70 | #define LP5523_REG_PROG_MEM 0x50 | ||
71 | |||
72 | #define LP5523_CMD_LOAD 0x15 /* 00010101 */ | ||
73 | #define LP5523_CMD_RUN 0x2a /* 00101010 */ | ||
74 | #define LP5523_CMD_DISABLED 0x00 /* 00000000 */ | ||
75 | |||
76 | #define LP5523_ENABLE 0x40 | ||
77 | #define LP5523_AUTO_INC 0x40 | ||
78 | #define LP5523_PWR_SAVE 0x20 | ||
79 | #define LP5523_PWM_PWR_SAVE 0x04 | ||
80 | #define LP5523_CP_1 0x08 | ||
81 | #define LP5523_CP_1_5 0x10 | ||
82 | #define LP5523_CP_AUTO 0x18 | ||
83 | #define LP5523_INT_CLK 0x01 | ||
84 | #define LP5523_AUTO_CLK 0x02 | ||
85 | #define LP5523_EN_LEDTEST 0x80 | ||
86 | #define LP5523_LEDTEST_DONE 0x80 | ||
87 | |||
88 | #define LP5523_DEFAULT_CURRENT 50 /* microAmps */ | ||
89 | #define LP5523_PROGRAM_LENGTH 32 /* in bytes */ | ||
90 | #define LP5523_PROGRAM_PAGES 6 | ||
91 | #define LP5523_ADC_SHORTCIRC_LIM 80 | ||
92 | |||
93 | #define LP5523_LEDS 9 | ||
94 | #define LP5523_ENGINES 3 | ||
95 | |||
96 | #define LP5523_ENG_MASK_BASE 0x30 /* 00110000 */ | ||
97 | |||
98 | #define LP5523_ENG_STATUS_MASK 0x07 /* 00000111 */ | ||
99 | |||
100 | #define LP5523_IRQ_FLAGS IRQF_TRIGGER_FALLING | ||
101 | |||
102 | #define LP5523_EXT_CLK_USED 0x08 | ||
103 | |||
104 | #define LED_ACTIVE(mux, led) (!!(mux & (0x0001 << led))) | ||
105 | #define SHIFT_MASK(id) (((id) - 1) * 2) | ||
106 | |||
107 | struct lp5523_engine { | ||
108 | const struct attribute_group *attributes; | ||
109 | int id; | ||
110 | u8 mode; | ||
111 | u8 prog_page; | ||
112 | u8 mux_page; | ||
113 | u16 led_mux; | ||
114 | u8 engine_mask; | ||
115 | }; | ||
116 | |||
117 | struct lp5523_led { | ||
118 | int id; | ||
119 | u8 chan_nr; | ||
120 | u8 led_current; | ||
121 | u8 max_current; | ||
122 | struct led_classdev cdev; | ||
123 | struct work_struct brightness_work; | ||
124 | u8 brightness; | ||
125 | }; | ||
126 | |||
127 | struct lp5523_chip { | ||
128 | struct mutex lock; /* Serialize control */ | ||
129 | struct i2c_client *client; | ||
130 | struct lp5523_engine engines[LP5523_ENGINES]; | ||
131 | struct lp5523_led leds[LP5523_LEDS]; | ||
132 | struct lp5523_platform_data *pdata; | ||
133 | u8 num_channels; | ||
134 | u8 num_leds; | ||
135 | }; | ||
136 | |||
137 | #define cdev_to_led(c) container_of(c, struct lp5523_led, cdev) | ||
138 | |||
139 | static struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine) | ||
140 | { | ||
141 | return container_of(engine, struct lp5523_chip, | ||
142 | engines[engine->id - 1]); | ||
143 | } | ||
144 | |||
145 | static struct lp5523_chip *led_to_lp5523(struct lp5523_led *led) | ||
146 | { | ||
147 | return container_of(led, struct lp5523_chip, | ||
148 | leds[led->id]); | ||
149 | } | ||
150 | |||
151 | static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode); | ||
152 | static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode); | ||
153 | static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern); | ||
154 | |||
155 | static void lp5523_led_brightness_work(struct work_struct *work); | ||
156 | |||
157 | static int lp5523_write(struct i2c_client *client, u8 reg, u8 value) | ||
158 | { | ||
159 | return i2c_smbus_write_byte_data(client, reg, value); | ||
160 | } | ||
161 | |||
162 | static int lp5523_read(struct i2c_client *client, u8 reg, u8 *buf) | ||
163 | { | ||
164 | s32 ret = i2c_smbus_read_byte_data(client, reg); | ||
165 | |||
166 | if (ret < 0) | ||
167 | return -EIO; | ||
168 | |||
169 | *buf = ret; | ||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static int lp5523_detect(struct i2c_client *client) | ||
174 | { | ||
175 | int ret; | ||
176 | u8 buf; | ||
177 | |||
178 | ret = lp5523_write(client, LP5523_REG_ENABLE, 0x40); | ||
179 | if (ret) | ||
180 | return ret; | ||
181 | ret = lp5523_read(client, LP5523_REG_ENABLE, &buf); | ||
182 | if (ret) | ||
183 | return ret; | ||
184 | if (buf == 0x40) | ||
185 | return 0; | ||
186 | else | ||
187 | return -ENODEV; | ||
188 | } | ||
189 | |||
190 | static int lp5523_configure(struct i2c_client *client) | ||
191 | { | ||
192 | struct lp5523_chip *chip = i2c_get_clientdata(client); | ||
193 | int ret = 0; | ||
194 | u8 status; | ||
195 | |||
196 | /* one pattern per engine setting led mux start and stop addresses */ | ||
197 | u8 pattern[][LP5523_PROGRAM_LENGTH] = { | ||
198 | { 0x9c, 0x30, 0x9c, 0xb0, 0x9d, 0x80, 0xd8, 0x00, 0}, | ||
199 | { 0x9c, 0x40, 0x9c, 0xc0, 0x9d, 0x80, 0xd8, 0x00, 0}, | ||
200 | { 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0}, | ||
201 | }; | ||
202 | |||
203 | lp5523_write(client, LP5523_REG_RESET, 0xff); | ||
204 | |||
205 | usleep_range(10000, 100000); | ||
206 | |||
207 | ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE); | ||
208 | /* Chip startup time after reset is 500 us */ | ||
209 | usleep_range(1000, 10000); | ||
210 | |||
211 | ret |= lp5523_write(client, LP5523_REG_CONFIG, | ||
212 | LP5523_AUTO_INC | LP5523_PWR_SAVE | | ||
213 | LP5523_CP_AUTO | LP5523_AUTO_CLK | | ||
214 | LP5523_PWM_PWR_SAVE); | ||
215 | |||
216 | /* turn on all leds */ | ||
217 | ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_MSB, 0x01); | ||
218 | ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_LSB, 0xff); | ||
219 | |||
220 | /* hardcode 32 bytes of memory for each engine from program memory */ | ||
221 | ret |= lp5523_write(client, LP5523_REG_CH1_PROG_START, 0x00); | ||
222 | ret |= lp5523_write(client, LP5523_REG_CH2_PROG_START, 0x10); | ||
223 | ret |= lp5523_write(client, LP5523_REG_CH3_PROG_START, 0x20); | ||
224 | |||
225 | /* write led mux address space for each channel */ | ||
226 | ret |= lp5523_load_program(&chip->engines[0], pattern[0]); | ||
227 | ret |= lp5523_load_program(&chip->engines[1], pattern[1]); | ||
228 | ret |= lp5523_load_program(&chip->engines[2], pattern[2]); | ||
229 | |||
230 | if (ret) { | ||
231 | dev_err(&client->dev, "could not load mux programs\n"); | ||
232 | return -1; | ||
233 | } | ||
234 | |||
235 | /* set all engines exec state and mode to run 00101010 */ | ||
236 | ret |= lp5523_write(client, LP5523_REG_ENABLE, | ||
237 | (LP5523_CMD_RUN | LP5523_ENABLE)); | ||
238 | |||
239 | ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_RUN); | ||
240 | |||
241 | if (ret) { | ||
242 | dev_err(&client->dev, "could not start mux programs\n"); | ||
243 | return -1; | ||
244 | } | ||
245 | |||
246 | /* Wait 3ms and check the engine status */ | ||
247 | usleep_range(3000, 20000); | ||
248 | lp5523_read(client, LP5523_REG_STATUS, &status); | ||
249 | status &= LP5523_ENG_STATUS_MASK; | ||
250 | |||
251 | if (status == LP5523_ENG_STATUS_MASK) { | ||
252 | dev_dbg(&client->dev, "all engines configured\n"); | ||
253 | } else { | ||
254 | dev_info(&client->dev, "status == %x\n", status); | ||
255 | dev_err(&client->dev, "cound not configure LED engine\n"); | ||
256 | return -1; | ||
257 | } | ||
258 | |||
259 | dev_info(&client->dev, "disabling engines\n"); | ||
260 | |||
261 | ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_DISABLED); | ||
262 | |||
263 | return ret; | ||
264 | } | ||
265 | |||
266 | static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode) | ||
267 | { | ||
268 | struct lp5523_chip *chip = engine_to_lp5523(engine); | ||
269 | struct i2c_client *client = chip->client; | ||
270 | int ret; | ||
271 | u8 engine_state; | ||
272 | |||
273 | ret = lp5523_read(client, LP5523_REG_OP_MODE, &engine_state); | ||
274 | if (ret) | ||
275 | goto fail; | ||
276 | |||
277 | engine_state &= ~(engine->engine_mask); | ||
278 | |||
279 | /* set mode only for this engine */ | ||
280 | mode &= engine->engine_mask; | ||
281 | |||
282 | engine_state |= mode; | ||
283 | |||
284 | ret |= lp5523_write(client, LP5523_REG_OP_MODE, engine_state); | ||
285 | fail: | ||
286 | return ret; | ||
287 | } | ||
288 | |||
289 | static int lp5523_load_mux(struct lp5523_engine *engine, u16 mux) | ||
290 | { | ||
291 | struct lp5523_chip *chip = engine_to_lp5523(engine); | ||
292 | struct i2c_client *client = chip->client; | ||
293 | int ret = 0; | ||
294 | |||
295 | ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD); | ||
296 | |||
297 | ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL, engine->mux_page); | ||
298 | ret |= lp5523_write(client, LP5523_REG_PROG_MEM, | ||
299 | (u8)(mux >> 8)); | ||
300 | ret |= lp5523_write(client, LP5523_REG_PROG_MEM + 1, (u8)(mux)); | ||
301 | engine->led_mux = mux; | ||
302 | |||
303 | return ret; | ||
304 | } | ||
305 | |||
306 | static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern) | ||
307 | { | ||
308 | struct lp5523_chip *chip = engine_to_lp5523(engine); | ||
309 | struct i2c_client *client = chip->client; | ||
310 | |||
311 | int ret = 0; | ||
312 | |||
313 | ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD); | ||
314 | |||
315 | ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL, | ||
316 | engine->prog_page); | ||
317 | ret |= i2c_smbus_write_i2c_block_data(client, LP5523_REG_PROG_MEM, | ||
318 | LP5523_PROGRAM_LENGTH, pattern); | ||
319 | |||
320 | return ret; | ||
321 | } | ||
322 | |||
323 | static int lp5523_run_program(struct lp5523_engine *engine) | ||
324 | { | ||
325 | struct lp5523_chip *chip = engine_to_lp5523(engine); | ||
326 | struct i2c_client *client = chip->client; | ||
327 | int ret; | ||
328 | |||
329 | ret = lp5523_write(client, LP5523_REG_ENABLE, | ||
330 | LP5523_CMD_RUN | LP5523_ENABLE); | ||
331 | if (ret) | ||
332 | goto fail; | ||
333 | |||
334 | ret = lp5523_set_engine_mode(engine, LP5523_CMD_RUN); | ||
335 | fail: | ||
336 | return ret; | ||
337 | } | ||
338 | |||
339 | static int lp5523_mux_parse(const char *buf, u16 *mux, size_t len) | ||
340 | { | ||
341 | int i; | ||
342 | u16 tmp_mux = 0; | ||
343 | len = len < LP5523_LEDS ? len : LP5523_LEDS; | ||
344 | for (i = 0; i < len; i++) { | ||
345 | switch (buf[i]) { | ||
346 | case '1': | ||
347 | tmp_mux |= (1 << i); | ||
348 | break; | ||
349 | case '0': | ||
350 | break; | ||
351 | case '\n': | ||
352 | i = len; | ||
353 | break; | ||
354 | default: | ||
355 | return -1; | ||
356 | } | ||
357 | } | ||
358 | *mux = tmp_mux; | ||
359 | |||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | static void lp5523_mux_to_array(u16 led_mux, char *array) | ||
364 | { | ||
365 | int i, pos = 0; | ||
366 | for (i = 0; i < LP5523_LEDS; i++) | ||
367 | pos += sprintf(array + pos, "%x", LED_ACTIVE(led_mux, i)); | ||
368 | |||
369 | array[pos] = '\0'; | ||
370 | } | ||
371 | |||
372 | /*--------------------------------------------------------------*/ | ||
373 | /* Sysfs interface */ | ||
374 | /*--------------------------------------------------------------*/ | ||
375 | |||
376 | static ssize_t show_engine_leds(struct device *dev, | ||
377 | struct device_attribute *attr, | ||
378 | char *buf, int nr) | ||
379 | { | ||
380 | struct i2c_client *client = to_i2c_client(dev); | ||
381 | struct lp5523_chip *chip = i2c_get_clientdata(client); | ||
382 | char mux[LP5523_LEDS + 1]; | ||
383 | |||
384 | lp5523_mux_to_array(chip->engines[nr - 1].led_mux, mux); | ||
385 | |||
386 | return sprintf(buf, "%s\n", mux); | ||
387 | } | ||
388 | |||
389 | #define show_leds(nr) \ | ||
390 | static ssize_t show_engine##nr##_leds(struct device *dev, \ | ||
391 | struct device_attribute *attr, \ | ||
392 | char *buf) \ | ||
393 | { \ | ||
394 | return show_engine_leds(dev, attr, buf, nr); \ | ||
395 | } | ||
396 | show_leds(1) | ||
397 | show_leds(2) | ||
398 | show_leds(3) | ||
399 | |||
400 | static ssize_t store_engine_leds(struct device *dev, | ||
401 | struct device_attribute *attr, | ||
402 | const char *buf, size_t len, int nr) | ||
403 | { | ||
404 | struct i2c_client *client = to_i2c_client(dev); | ||
405 | struct lp5523_chip *chip = i2c_get_clientdata(client); | ||
406 | u16 mux = 0; | ||
407 | |||
408 | if (lp5523_mux_parse(buf, &mux, len)) | ||
409 | return -EINVAL; | ||
410 | |||
411 | if (lp5523_load_mux(&chip->engines[nr - 1], mux)) | ||
412 | return -EINVAL; | ||
413 | |||
414 | return len; | ||
415 | } | ||
416 | |||
417 | #define store_leds(nr) \ | ||
418 | static ssize_t store_engine##nr##_leds(struct device *dev, \ | ||
419 | struct device_attribute *attr, \ | ||
420 | const char *buf, size_t len) \ | ||
421 | { \ | ||
422 | return store_engine_leds(dev, attr, buf, len, nr); \ | ||
423 | } | ||
424 | store_leds(1) | ||
425 | store_leds(2) | ||
426 | store_leds(3) | ||
427 | |||
428 | static ssize_t lp5523_selftest(struct device *dev, | ||
429 | struct device_attribute *attr, | ||
430 | char *buf) | ||
431 | { | ||
432 | struct i2c_client *client = to_i2c_client(dev); | ||
433 | struct lp5523_chip *chip = i2c_get_clientdata(client); | ||
434 | int i, ret, pos = 0; | ||
435 | int led = 0; | ||
436 | u8 status, adc, vdd; | ||
437 | |||
438 | mutex_lock(&chip->lock); | ||
439 | |||
440 | ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status); | ||
441 | if (ret < 0) | ||
442 | goto fail; | ||
443 | |||
444 | /* Check that ext clock is really in use if requested */ | ||
445 | if ((chip->pdata) && (chip->pdata->clock_mode == LP5523_CLOCK_EXT)) | ||
446 | if ((status & LP5523_EXT_CLK_USED) == 0) | ||
447 | goto fail; | ||
448 | |||
449 | /* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */ | ||
450 | lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL, | ||
451 | LP5523_EN_LEDTEST | 16); | ||
452 | usleep_range(3000, 10000); | ||
453 | ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status); | ||
454 | if (!(status & LP5523_LEDTEST_DONE)) | ||
455 | usleep_range(3000, 10000); | ||
456 | |||
457 | ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd); | ||
458 | vdd--; /* There may be some fluctuation in measurement */ | ||
459 | |||
460 | for (i = 0; i < LP5523_LEDS; i++) { | ||
461 | /* Skip non-existing channels */ | ||
462 | if (chip->pdata->led_config[i].led_current == 0) | ||
463 | continue; | ||
464 | |||
465 | /* Set default current */ | ||
466 | lp5523_write(chip->client, | ||
467 | LP5523_REG_LED_CURRENT_BASE + i, | ||
468 | chip->pdata->led_config[i].led_current); | ||
469 | |||
470 | lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff); | ||
471 | /* let current stabilize 2ms before measurements start */ | ||
472 | usleep_range(2000, 10000); | ||
473 | lp5523_write(chip->client, | ||
474 | LP5523_REG_LED_TEST_CTRL, | ||
475 | LP5523_EN_LEDTEST | i); | ||
476 | /* ledtest takes 2.7ms */ | ||
477 | usleep_range(3000, 10000); | ||
478 | ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status); | ||
479 | if (!(status & LP5523_LEDTEST_DONE)) | ||
480 | usleep_range(3000, 10000); | ||
481 | ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc); | ||
482 | |||
483 | if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM) | ||
484 | pos += sprintf(buf + pos, "LED %d FAIL\n", i); | ||
485 | |||
486 | lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0x00); | ||
487 | |||
488 | /* Restore current */ | ||
489 | lp5523_write(chip->client, | ||
490 | LP5523_REG_LED_CURRENT_BASE + i, | ||
491 | chip->leds[led].led_current); | ||
492 | led++; | ||
493 | } | ||
494 | if (pos == 0) | ||
495 | pos = sprintf(buf, "OK\n"); | ||
496 | goto release_lock; | ||
497 | fail: | ||
498 | pos = sprintf(buf, "FAIL\n"); | ||
499 | |||
500 | release_lock: | ||
501 | mutex_unlock(&chip->lock); | ||
502 | |||
503 | return pos; | ||
504 | } | ||
505 | |||
506 | static void lp5523_set_brightness(struct led_classdev *cdev, | ||
507 | enum led_brightness brightness) | ||
508 | { | ||
509 | struct lp5523_led *led = cdev_to_led(cdev); | ||
510 | |||
511 | led->brightness = (u8)brightness; | ||
512 | |||
513 | schedule_work(&led->brightness_work); | ||
514 | } | ||
515 | |||
516 | static void lp5523_led_brightness_work(struct work_struct *work) | ||
517 | { | ||
518 | struct lp5523_led *led = container_of(work, | ||
519 | struct lp5523_led, | ||
520 | brightness_work); | ||
521 | struct lp5523_chip *chip = led_to_lp5523(led); | ||
522 | struct i2c_client *client = chip->client; | ||
523 | |||
524 | mutex_lock(&chip->lock); | ||
525 | |||
526 | lp5523_write(client, LP5523_REG_LED_PWM_BASE + led->chan_nr, | ||
527 | led->brightness); | ||
528 | |||
529 | mutex_unlock(&chip->lock); | ||
530 | } | ||
531 | |||
532 | static int lp5523_do_store_load(struct lp5523_engine *engine, | ||
533 | const char *buf, size_t len) | ||
534 | { | ||
535 | struct lp5523_chip *chip = engine_to_lp5523(engine); | ||
536 | struct i2c_client *client = chip->client; | ||
537 | int ret, nrchars, offset = 0, i = 0; | ||
538 | char c[3]; | ||
539 | unsigned cmd; | ||
540 | u8 pattern[LP5523_PROGRAM_LENGTH] = {0}; | ||
541 | |||
542 | while ((offset < len - 1) && (i < LP5523_PROGRAM_LENGTH)) { | ||
543 | /* separate sscanfs because length is working only for %s */ | ||
544 | ret = sscanf(buf + offset, "%2s%n ", c, &nrchars); | ||
545 | ret = sscanf(c, "%2x", &cmd); | ||
546 | if (ret != 1) | ||
547 | goto fail; | ||
548 | pattern[i] = (u8)cmd; | ||
549 | |||
550 | offset += nrchars; | ||
551 | i++; | ||
552 | } | ||
553 | |||
554 | /* Each instruction is 16bit long. Check that length is even */ | ||
555 | if (i % 2) | ||
556 | goto fail; | ||
557 | |||
558 | mutex_lock(&chip->lock); | ||
559 | |||
560 | ret = lp5523_load_program(engine, pattern); | ||
561 | mutex_unlock(&chip->lock); | ||
562 | |||
563 | if (ret) { | ||
564 | dev_err(&client->dev, "failed loading pattern\n"); | ||
565 | return ret; | ||
566 | } | ||
567 | |||
568 | return len; | ||
569 | fail: | ||
570 | dev_err(&client->dev, "wrong pattern format\n"); | ||
571 | return -EINVAL; | ||
572 | } | ||
573 | |||
574 | static ssize_t store_engine_load(struct device *dev, | ||
575 | struct device_attribute *attr, | ||
576 | const char *buf, size_t len, int nr) | ||
577 | { | ||
578 | struct i2c_client *client = to_i2c_client(dev); | ||
579 | struct lp5523_chip *chip = i2c_get_clientdata(client); | ||
580 | return lp5523_do_store_load(&chip->engines[nr - 1], buf, len); | ||
581 | } | ||
582 | |||
583 | #define store_load(nr) \ | ||
584 | static ssize_t store_engine##nr##_load(struct device *dev, \ | ||
585 | struct device_attribute *attr, \ | ||
586 | const char *buf, size_t len) \ | ||
587 | { \ | ||
588 | return store_engine_load(dev, attr, buf, len, nr); \ | ||
589 | } | ||
590 | store_load(1) | ||
591 | store_load(2) | ||
592 | store_load(3) | ||
593 | |||
594 | static ssize_t show_engine_mode(struct device *dev, | ||
595 | struct device_attribute *attr, | ||
596 | char *buf, int nr) | ||
597 | { | ||
598 | struct i2c_client *client = to_i2c_client(dev); | ||
599 | struct lp5523_chip *chip = i2c_get_clientdata(client); | ||
600 | switch (chip->engines[nr - 1].mode) { | ||
601 | case LP5523_CMD_RUN: | ||
602 | return sprintf(buf, "run\n"); | ||
603 | case LP5523_CMD_LOAD: | ||
604 | return sprintf(buf, "load\n"); | ||
605 | case LP5523_CMD_DISABLED: | ||
606 | return sprintf(buf, "disabled\n"); | ||
607 | default: | ||
608 | return sprintf(buf, "disabled\n"); | ||
609 | } | ||
610 | } | ||
611 | |||
612 | #define show_mode(nr) \ | ||
613 | static ssize_t show_engine##nr##_mode(struct device *dev, \ | ||
614 | struct device_attribute *attr, \ | ||
615 | char *buf) \ | ||
616 | { \ | ||
617 | return show_engine_mode(dev, attr, buf, nr); \ | ||
618 | } | ||
619 | show_mode(1) | ||
620 | show_mode(2) | ||
621 | show_mode(3) | ||
622 | |||
623 | static ssize_t store_engine_mode(struct device *dev, | ||
624 | struct device_attribute *attr, | ||
625 | const char *buf, size_t len, int nr) | ||
626 | { | ||
627 | struct i2c_client *client = to_i2c_client(dev); | ||
628 | struct lp5523_chip *chip = i2c_get_clientdata(client); | ||
629 | struct lp5523_engine *engine = &chip->engines[nr - 1]; | ||
630 | mutex_lock(&chip->lock); | ||
631 | |||
632 | if (!strncmp(buf, "run", 3)) | ||
633 | lp5523_set_mode(engine, LP5523_CMD_RUN); | ||
634 | else if (!strncmp(buf, "load", 4)) | ||
635 | lp5523_set_mode(engine, LP5523_CMD_LOAD); | ||
636 | else if (!strncmp(buf, "disabled", 8)) | ||
637 | lp5523_set_mode(engine, LP5523_CMD_DISABLED); | ||
638 | |||
639 | mutex_unlock(&chip->lock); | ||
640 | return len; | ||
641 | } | ||
642 | |||
643 | #define store_mode(nr) \ | ||
644 | static ssize_t store_engine##nr##_mode(struct device *dev, \ | ||
645 | struct device_attribute *attr, \ | ||
646 | const char *buf, size_t len) \ | ||
647 | { \ | ||
648 | return store_engine_mode(dev, attr, buf, len, nr); \ | ||
649 | } | ||
650 | store_mode(1) | ||
651 | store_mode(2) | ||
652 | store_mode(3) | ||
653 | |||
654 | static ssize_t show_max_current(struct device *dev, | ||
655 | struct device_attribute *attr, | ||
656 | char *buf) | ||
657 | { | ||
658 | struct led_classdev *led_cdev = dev_get_drvdata(dev); | ||
659 | struct lp5523_led *led = cdev_to_led(led_cdev); | ||
660 | |||
661 | return sprintf(buf, "%d\n", led->max_current); | ||
662 | } | ||
663 | |||
664 | static ssize_t show_current(struct device *dev, | ||
665 | struct device_attribute *attr, | ||
666 | char *buf) | ||
667 | { | ||
668 | struct led_classdev *led_cdev = dev_get_drvdata(dev); | ||
669 | struct lp5523_led *led = cdev_to_led(led_cdev); | ||
670 | |||
671 | return sprintf(buf, "%d\n", led->led_current); | ||
672 | } | ||
673 | |||
674 | static ssize_t store_current(struct device *dev, | ||
675 | struct device_attribute *attr, | ||
676 | const char *buf, size_t len) | ||
677 | { | ||
678 | struct led_classdev *led_cdev = dev_get_drvdata(dev); | ||
679 | struct lp5523_led *led = cdev_to_led(led_cdev); | ||
680 | struct lp5523_chip *chip = led_to_lp5523(led); | ||
681 | ssize_t ret; | ||
682 | unsigned long curr; | ||
683 | |||
684 | if (strict_strtoul(buf, 0, &curr)) | ||
685 | return -EINVAL; | ||
686 | |||
687 | if (curr > led->max_current) | ||
688 | return -EINVAL; | ||
689 | |||
690 | mutex_lock(&chip->lock); | ||
691 | ret = lp5523_write(chip->client, | ||
692 | LP5523_REG_LED_CURRENT_BASE + led->chan_nr, | ||
693 | (u8)curr); | ||
694 | mutex_unlock(&chip->lock); | ||
695 | |||
696 | if (ret < 0) | ||
697 | return ret; | ||
698 | |||
699 | led->led_current = (u8)curr; | ||
700 | |||
701 | return len; | ||
702 | } | ||
703 | |||
704 | /* led class device attributes */ | ||
705 | static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current); | ||
706 | static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL); | ||
707 | |||
708 | static struct attribute *lp5523_led_attributes[] = { | ||
709 | &dev_attr_led_current.attr, | ||
710 | &dev_attr_max_current.attr, | ||
711 | NULL, | ||
712 | }; | ||
713 | |||
714 | static struct attribute_group lp5523_led_attribute_group = { | ||
715 | .attrs = lp5523_led_attributes | ||
716 | }; | ||
717 | |||
718 | /* device attributes */ | ||
719 | static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO, | ||
720 | show_engine1_mode, store_engine1_mode); | ||
721 | static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO, | ||
722 | show_engine2_mode, store_engine2_mode); | ||
723 | static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO, | ||
724 | show_engine3_mode, store_engine3_mode); | ||
725 | static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUGO, | ||
726 | show_engine1_leds, store_engine1_leds); | ||
727 | static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUGO, | ||
728 | show_engine2_leds, store_engine2_leds); | ||
729 | static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUGO, | ||
730 | show_engine3_leds, store_engine3_leds); | ||
731 | static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load); | ||
732 | static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load); | ||
733 | static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load); | ||
734 | static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL); | ||
735 | |||
736 | static struct attribute *lp5523_attributes[] = { | ||
737 | &dev_attr_engine1_mode.attr, | ||
738 | &dev_attr_engine2_mode.attr, | ||
739 | &dev_attr_engine3_mode.attr, | ||
740 | &dev_attr_selftest.attr, | ||
741 | NULL | ||
742 | }; | ||
743 | |||
744 | static struct attribute *lp5523_engine1_attributes[] = { | ||
745 | &dev_attr_engine1_load.attr, | ||
746 | &dev_attr_engine1_leds.attr, | ||
747 | NULL | ||
748 | }; | ||
749 | |||
750 | static struct attribute *lp5523_engine2_attributes[] = { | ||
751 | &dev_attr_engine2_load.attr, | ||
752 | &dev_attr_engine2_leds.attr, | ||
753 | NULL | ||
754 | }; | ||
755 | |||
756 | static struct attribute *lp5523_engine3_attributes[] = { | ||
757 | &dev_attr_engine3_load.attr, | ||
758 | &dev_attr_engine3_leds.attr, | ||
759 | NULL | ||
760 | }; | ||
761 | |||
762 | static const struct attribute_group lp5523_group = { | ||
763 | .attrs = lp5523_attributes, | ||
764 | }; | ||
765 | |||
766 | static const struct attribute_group lp5523_engine_group[] = { | ||
767 | {.attrs = lp5523_engine1_attributes }, | ||
768 | {.attrs = lp5523_engine2_attributes }, | ||
769 | {.attrs = lp5523_engine3_attributes }, | ||
770 | }; | ||
771 | |||
772 | static int lp5523_register_sysfs(struct i2c_client *client) | ||
773 | { | ||
774 | struct device *dev = &client->dev; | ||
775 | int ret; | ||
776 | |||
777 | ret = sysfs_create_group(&dev->kobj, &lp5523_group); | ||
778 | if (ret < 0) | ||
779 | return ret; | ||
780 | |||
781 | return 0; | ||
782 | } | ||
783 | |||
784 | static void lp5523_unregister_sysfs(struct i2c_client *client) | ||
785 | { | ||
786 | struct lp5523_chip *chip = i2c_get_clientdata(client); | ||
787 | struct device *dev = &client->dev; | ||
788 | int i; | ||
789 | |||
790 | sysfs_remove_group(&dev->kobj, &lp5523_group); | ||
791 | |||
792 | for (i = 0; i < ARRAY_SIZE(chip->engines); i++) | ||
793 | if (chip->engines[i].mode == LP5523_CMD_LOAD) | ||
794 | sysfs_remove_group(&dev->kobj, &lp5523_engine_group[i]); | ||
795 | |||
796 | for (i = 0; i < chip->num_leds; i++) | ||
797 | sysfs_remove_group(&chip->leds[i].cdev.dev->kobj, | ||
798 | &lp5523_led_attribute_group); | ||
799 | } | ||
800 | |||
801 | /*--------------------------------------------------------------*/ | ||
802 | /* Set chip operating mode */ | ||
803 | /*--------------------------------------------------------------*/ | ||
804 | static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode) | ||
805 | { | ||
806 | /* engine to chip */ | ||
807 | struct lp5523_chip *chip = engine_to_lp5523(engine); | ||
808 | struct i2c_client *client = chip->client; | ||
809 | struct device *dev = &client->dev; | ||
810 | int ret = 0; | ||
811 | |||
812 | /* if in that mode already do nothing, except for run */ | ||
813 | if (mode == engine->mode && mode != LP5523_CMD_RUN) | ||
814 | return 0; | ||
815 | |||
816 | if (mode == LP5523_CMD_RUN) { | ||
817 | ret = lp5523_run_program(engine); | ||
818 | } else if (mode == LP5523_CMD_LOAD) { | ||
819 | lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED); | ||
820 | lp5523_set_engine_mode(engine, LP5523_CMD_LOAD); | ||
821 | |||
822 | ret = sysfs_create_group(&dev->kobj, engine->attributes); | ||
823 | if (ret) | ||
824 | return ret; | ||
825 | } else if (mode == LP5523_CMD_DISABLED) { | ||
826 | lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED); | ||
827 | } | ||
828 | |||
829 | /* remove load attribute from sysfs if not in load mode */ | ||
830 | if (engine->mode == LP5523_CMD_LOAD && mode != LP5523_CMD_LOAD) | ||
831 | sysfs_remove_group(&dev->kobj, engine->attributes); | ||
832 | |||
833 | engine->mode = mode; | ||
834 | |||
835 | return ret; | ||
836 | } | ||
837 | |||
838 | /*--------------------------------------------------------------*/ | ||
839 | /* Probe, Attach, Remove */ | ||
840 | /*--------------------------------------------------------------*/ | ||
841 | static int __init lp5523_init_engine(struct lp5523_engine *engine, int id) | ||
842 | { | ||
843 | if (id < 1 || id > LP5523_ENGINES) | ||
844 | return -1; | ||
845 | engine->id = id; | ||
846 | engine->engine_mask = LP5523_ENG_MASK_BASE >> SHIFT_MASK(id); | ||
847 | engine->prog_page = id - 1; | ||
848 | engine->mux_page = id + 2; | ||
849 | engine->attributes = &lp5523_engine_group[id - 1]; | ||
850 | |||
851 | return 0; | ||
852 | } | ||
853 | |||
854 | static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev, | ||
855 | int chan, struct lp5523_platform_data *pdata) | ||
856 | { | ||
857 | char name[32]; | ||
858 | int res; | ||
859 | |||
860 | if (chan >= LP5523_LEDS) | ||
861 | return -EINVAL; | ||
862 | |||
863 | if (pdata->led_config[chan].led_current) { | ||
864 | led->led_current = pdata->led_config[chan].led_current; | ||
865 | led->max_current = pdata->led_config[chan].max_current; | ||
866 | led->chan_nr = pdata->led_config[chan].chan_nr; | ||
867 | |||
868 | if (led->chan_nr >= LP5523_LEDS) { | ||
869 | dev_err(dev, "Use channel numbers between 0 and %d\n", | ||
870 | LP5523_LEDS - 1); | ||
871 | return -EINVAL; | ||
872 | } | ||
873 | |||
874 | snprintf(name, 32, "lp5523:channel%d", chan); | ||
875 | |||
876 | led->cdev.name = name; | ||
877 | led->cdev.brightness_set = lp5523_set_brightness; | ||
878 | res = led_classdev_register(dev, &led->cdev); | ||
879 | if (res < 0) { | ||
880 | dev_err(dev, "couldn't register led on channel %d\n", | ||
881 | chan); | ||
882 | return res; | ||
883 | } | ||
884 | res = sysfs_create_group(&led->cdev.dev->kobj, | ||
885 | &lp5523_led_attribute_group); | ||
886 | if (res < 0) { | ||
887 | dev_err(dev, "couldn't register current attribute\n"); | ||
888 | led_classdev_unregister(&led->cdev); | ||
889 | return res; | ||
890 | } | ||
891 | } else { | ||
892 | led->led_current = 0; | ||
893 | } | ||
894 | return 0; | ||
895 | } | ||
896 | |||
897 | static struct i2c_driver lp5523_driver; | ||
898 | |||
899 | static int lp5523_probe(struct i2c_client *client, | ||
900 | const struct i2c_device_id *id) | ||
901 | { | ||
902 | struct lp5523_chip *chip; | ||
903 | struct lp5523_platform_data *pdata; | ||
904 | int ret, i, led; | ||
905 | |||
906 | chip = kzalloc(sizeof(*chip), GFP_KERNEL); | ||
907 | if (!chip) | ||
908 | return -ENOMEM; | ||
909 | |||
910 | i2c_set_clientdata(client, chip); | ||
911 | chip->client = client; | ||
912 | |||
913 | pdata = client->dev.platform_data; | ||
914 | |||
915 | if (!pdata) { | ||
916 | dev_err(&client->dev, "no platform data\n"); | ||
917 | ret = -EINVAL; | ||
918 | goto fail1; | ||
919 | } | ||
920 | |||
921 | mutex_init(&chip->lock); | ||
922 | |||
923 | chip->pdata = pdata; | ||
924 | |||
925 | if (pdata->setup_resources) { | ||
926 | ret = pdata->setup_resources(); | ||
927 | if (ret < 0) | ||
928 | goto fail1; | ||
929 | } | ||
930 | |||
931 | if (pdata->enable) { | ||
932 | pdata->enable(0); | ||
933 | usleep_range(1000, 10000); | ||
934 | pdata->enable(1); | ||
935 | usleep_range(1000, 10000); /* Spec says min 500us */ | ||
936 | } | ||
937 | |||
938 | ret = lp5523_detect(client); | ||
939 | if (ret) | ||
940 | goto fail2; | ||
941 | |||
942 | dev_info(&client->dev, "LP5523 Programmable led chip found\n"); | ||
943 | |||
944 | /* Initialize engines */ | ||
945 | for (i = 0; i < ARRAY_SIZE(chip->engines); i++) { | ||
946 | ret = lp5523_init_engine(&chip->engines[i], i + 1); | ||
947 | if (ret) { | ||
948 | dev_err(&client->dev, "error initializing engine\n"); | ||
949 | goto fail2; | ||
950 | } | ||
951 | } | ||
952 | ret = lp5523_configure(client); | ||
953 | if (ret < 0) { | ||
954 | dev_err(&client->dev, "error configuring chip\n"); | ||
955 | goto fail2; | ||
956 | } | ||
957 | |||
958 | /* Initialize leds */ | ||
959 | chip->num_channels = pdata->num_channels; | ||
960 | chip->num_leds = 0; | ||
961 | led = 0; | ||
962 | for (i = 0; i < pdata->num_channels; i++) { | ||
963 | /* Do not initialize channels that are not connected */ | ||
964 | if (pdata->led_config[i].led_current == 0) | ||
965 | continue; | ||
966 | |||
967 | ret = lp5523_init_led(&chip->leds[led], &client->dev, i, pdata); | ||
968 | if (ret) { | ||
969 | dev_err(&client->dev, "error initializing leds\n"); | ||
970 | goto fail3; | ||
971 | } | ||
972 | chip->num_leds++; | ||
973 | |||
974 | chip->leds[led].id = led; | ||
975 | /* Set LED current */ | ||
976 | lp5523_write(client, | ||
977 | LP5523_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr, | ||
978 | chip->leds[led].led_current); | ||
979 | |||
980 | INIT_WORK(&(chip->leds[led].brightness_work), | ||
981 | lp5523_led_brightness_work); | ||
982 | |||
983 | led++; | ||
984 | } | ||
985 | |||
986 | ret = lp5523_register_sysfs(client); | ||
987 | if (ret) { | ||
988 | dev_err(&client->dev, "registering sysfs failed\n"); | ||
989 | goto fail3; | ||
990 | } | ||
991 | return ret; | ||
992 | fail3: | ||
993 | for (i = 0; i < chip->num_leds; i++) { | ||
994 | led_classdev_unregister(&chip->leds[i].cdev); | ||
995 | cancel_work_sync(&chip->leds[i].brightness_work); | ||
996 | } | ||
997 | fail2: | ||
998 | if (pdata->enable) | ||
999 | pdata->enable(0); | ||
1000 | if (pdata->release_resources) | ||
1001 | pdata->release_resources(); | ||
1002 | fail1: | ||
1003 | kfree(chip); | ||
1004 | return ret; | ||
1005 | } | ||
1006 | |||
1007 | static int lp5523_remove(struct i2c_client *client) | ||
1008 | { | ||
1009 | struct lp5523_chip *chip = i2c_get_clientdata(client); | ||
1010 | int i; | ||
1011 | |||
1012 | lp5523_unregister_sysfs(client); | ||
1013 | |||
1014 | for (i = 0; i < chip->num_leds; i++) { | ||
1015 | led_classdev_unregister(&chip->leds[i].cdev); | ||
1016 | cancel_work_sync(&chip->leds[i].brightness_work); | ||
1017 | } | ||
1018 | |||
1019 | if (chip->pdata->enable) | ||
1020 | chip->pdata->enable(0); | ||
1021 | if (chip->pdata->release_resources) | ||
1022 | chip->pdata->release_resources(); | ||
1023 | kfree(chip); | ||
1024 | return 0; | ||
1025 | } | ||
1026 | |||
1027 | static const struct i2c_device_id lp5523_id[] = { | ||
1028 | { "lp5523", 0 }, | ||
1029 | { } | ||
1030 | }; | ||
1031 | |||
1032 | MODULE_DEVICE_TABLE(i2c, lp5523_id); | ||
1033 | |||
1034 | static struct i2c_driver lp5523_driver = { | ||
1035 | .driver = { | ||
1036 | .name = "lp5523", | ||
1037 | }, | ||
1038 | .probe = lp5523_probe, | ||
1039 | .remove = lp5523_remove, | ||
1040 | .id_table = lp5523_id, | ||
1041 | }; | ||
1042 | |||
1043 | static int __init lp5523_init(void) | ||
1044 | { | ||
1045 | int ret; | ||
1046 | |||
1047 | ret = i2c_add_driver(&lp5523_driver); | ||
1048 | |||
1049 | if (ret < 0) | ||
1050 | printk(KERN_ALERT "Adding lp5523 driver failed\n"); | ||
1051 | |||
1052 | return ret; | ||
1053 | } | ||
1054 | |||
1055 | static void __exit lp5523_exit(void) | ||
1056 | { | ||
1057 | i2c_del_driver(&lp5523_driver); | ||
1058 | } | ||
1059 | |||
1060 | module_init(lp5523_init); | ||
1061 | module_exit(lp5523_exit); | ||
1062 | |||
1063 | MODULE_AUTHOR("Mathias Nyman <mathias.nyman@nokia.com>"); | ||
1064 | MODULE_DESCRIPTION("LP5523 LED engine"); | ||
1065 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c index 82b77bd482ff..b09bcbeade9c 100644 --- a/drivers/leds/ledtrig-timer.c +++ b/drivers/leds/ledtrig-timer.c | |||
@@ -12,73 +12,25 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/jiffies.h> | ||
16 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
17 | #include <linux/init.h> | 16 | #include <linux/init.h> |
18 | #include <linux/list.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/device.h> | 17 | #include <linux/device.h> |
21 | #include <linux/sysdev.h> | ||
22 | #include <linux/timer.h> | ||
23 | #include <linux/ctype.h> | 18 | #include <linux/ctype.h> |
24 | #include <linux/leds.h> | 19 | #include <linux/leds.h> |
25 | #include <linux/slab.h> | ||
26 | #include "leds.h" | 20 | #include "leds.h" |
27 | 21 | ||
28 | struct timer_trig_data { | ||
29 | int brightness_on; /* LED brightness during "on" period. | ||
30 | * (LED_OFF < brightness_on <= LED_FULL) | ||
31 | */ | ||
32 | unsigned long delay_on; /* milliseconds on */ | ||
33 | unsigned long delay_off; /* milliseconds off */ | ||
34 | struct timer_list timer; | ||
35 | }; | ||
36 | |||
37 | static void led_timer_function(unsigned long data) | ||
38 | { | ||
39 | struct led_classdev *led_cdev = (struct led_classdev *) data; | ||
40 | struct timer_trig_data *timer_data = led_cdev->trigger_data; | ||
41 | unsigned long brightness; | ||
42 | unsigned long delay; | ||
43 | |||
44 | if (!timer_data->delay_on || !timer_data->delay_off) { | ||
45 | led_set_brightness(led_cdev, LED_OFF); | ||
46 | return; | ||
47 | } | ||
48 | |||
49 | brightness = led_get_brightness(led_cdev); | ||
50 | if (!brightness) { | ||
51 | /* Time to switch the LED on. */ | ||
52 | brightness = timer_data->brightness_on; | ||
53 | delay = timer_data->delay_on; | ||
54 | } else { | ||
55 | /* Store the current brightness value to be able | ||
56 | * to restore it when the delay_off period is over. | ||
57 | */ | ||
58 | timer_data->brightness_on = brightness; | ||
59 | brightness = LED_OFF; | ||
60 | delay = timer_data->delay_off; | ||
61 | } | ||
62 | |||
63 | led_set_brightness(led_cdev, brightness); | ||
64 | |||
65 | mod_timer(&timer_data->timer, jiffies + msecs_to_jiffies(delay)); | ||
66 | } | ||
67 | |||
68 | static ssize_t led_delay_on_show(struct device *dev, | 22 | static ssize_t led_delay_on_show(struct device *dev, |
69 | struct device_attribute *attr, char *buf) | 23 | struct device_attribute *attr, char *buf) |
70 | { | 24 | { |
71 | struct led_classdev *led_cdev = dev_get_drvdata(dev); | 25 | struct led_classdev *led_cdev = dev_get_drvdata(dev); |
72 | struct timer_trig_data *timer_data = led_cdev->trigger_data; | ||
73 | 26 | ||
74 | return sprintf(buf, "%lu\n", timer_data->delay_on); | 27 | return sprintf(buf, "%lu\n", led_cdev->blink_delay_on); |
75 | } | 28 | } |
76 | 29 | ||
77 | static ssize_t led_delay_on_store(struct device *dev, | 30 | static ssize_t led_delay_on_store(struct device *dev, |
78 | struct device_attribute *attr, const char *buf, size_t size) | 31 | struct device_attribute *attr, const char *buf, size_t size) |
79 | { | 32 | { |
80 | struct led_classdev *led_cdev = dev_get_drvdata(dev); | 33 | struct led_classdev *led_cdev = dev_get_drvdata(dev); |
81 | struct timer_trig_data *timer_data = led_cdev->trigger_data; | ||
82 | int ret = -EINVAL; | 34 | int ret = -EINVAL; |
83 | char *after; | 35 | char *after; |
84 | unsigned long state = simple_strtoul(buf, &after, 10); | 36 | unsigned long state = simple_strtoul(buf, &after, 10); |
@@ -88,21 +40,7 @@ static ssize_t led_delay_on_store(struct device *dev, | |||
88 | count++; | 40 | count++; |
89 | 41 | ||
90 | if (count == size) { | 42 | if (count == size) { |
91 | if (timer_data->delay_on != state) { | 43 | led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off); |
92 | /* the new value differs from the previous */ | ||
93 | timer_data->delay_on = state; | ||
94 | |||
95 | /* deactivate previous settings */ | ||
96 | del_timer_sync(&timer_data->timer); | ||
97 | |||
98 | /* try to activate hardware acceleration, if any */ | ||
99 | if (!led_cdev->blink_set || | ||
100 | led_cdev->blink_set(led_cdev, | ||
101 | &timer_data->delay_on, &timer_data->delay_off)) { | ||
102 | /* no hardware acceleration, blink via timer */ | ||
103 | mod_timer(&timer_data->timer, jiffies + 1); | ||
104 | } | ||
105 | } | ||
106 | ret = count; | 44 | ret = count; |
107 | } | 45 | } |
108 | 46 | ||
@@ -113,16 +51,14 @@ static ssize_t led_delay_off_show(struct device *dev, | |||
113 | struct device_attribute *attr, char *buf) | 51 | struct device_attribute *attr, char *buf) |
114 | { | 52 | { |
115 | struct led_classdev *led_cdev = dev_get_drvdata(dev); | 53 | struct led_classdev *led_cdev = dev_get_drvdata(dev); |
116 | struct timer_trig_data *timer_data = led_cdev->trigger_data; | ||
117 | 54 | ||
118 | return sprintf(buf, "%lu\n", timer_data->delay_off); | 55 | return sprintf(buf, "%lu\n", led_cdev->blink_delay_off); |
119 | } | 56 | } |
120 | 57 | ||
121 | static ssize_t led_delay_off_store(struct device *dev, | 58 | static ssize_t led_delay_off_store(struct device *dev, |
122 | struct device_attribute *attr, const char *buf, size_t size) | 59 | struct device_attribute *attr, const char *buf, size_t size) |
123 | { | 60 | { |
124 | struct led_classdev *led_cdev = dev_get_drvdata(dev); | 61 | struct led_classdev *led_cdev = dev_get_drvdata(dev); |
125 | struct timer_trig_data *timer_data = led_cdev->trigger_data; | ||
126 | int ret = -EINVAL; | 62 | int ret = -EINVAL; |
127 | char *after; | 63 | char *after; |
128 | unsigned long state = simple_strtoul(buf, &after, 10); | 64 | unsigned long state = simple_strtoul(buf, &after, 10); |
@@ -132,21 +68,7 @@ static ssize_t led_delay_off_store(struct device *dev, | |||
132 | count++; | 68 | count++; |
133 | 69 | ||
134 | if (count == size) { | 70 | if (count == size) { |
135 | if (timer_data->delay_off != state) { | 71 | led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state); |
136 | /* the new value differs from the previous */ | ||
137 | timer_data->delay_off = state; | ||
138 | |||
139 | /* deactivate previous settings */ | ||
140 | del_timer_sync(&timer_data->timer); | ||
141 | |||
142 | /* try to activate hardware acceleration, if any */ | ||
143 | if (!led_cdev->blink_set || | ||
144 | led_cdev->blink_set(led_cdev, | ||
145 | &timer_data->delay_on, &timer_data->delay_off)) { | ||
146 | /* no hardware acceleration, blink via timer */ | ||
147 | mod_timer(&timer_data->timer, jiffies + 1); | ||
148 | } | ||
149 | } | ||
150 | ret = count; | 72 | ret = count; |
151 | } | 73 | } |
152 | 74 | ||
@@ -158,60 +80,34 @@ static DEVICE_ATTR(delay_off, 0644, led_delay_off_show, led_delay_off_store); | |||
158 | 80 | ||
159 | static void timer_trig_activate(struct led_classdev *led_cdev) | 81 | static void timer_trig_activate(struct led_classdev *led_cdev) |
160 | { | 82 | { |
161 | struct timer_trig_data *timer_data; | ||
162 | int rc; | 83 | int rc; |
163 | 84 | ||
164 | timer_data = kzalloc(sizeof(struct timer_trig_data), GFP_KERNEL); | 85 | led_cdev->trigger_data = NULL; |
165 | if (!timer_data) | ||
166 | return; | ||
167 | |||
168 | timer_data->brightness_on = led_get_brightness(led_cdev); | ||
169 | if (timer_data->brightness_on == LED_OFF) | ||
170 | timer_data->brightness_on = led_cdev->max_brightness; | ||
171 | led_cdev->trigger_data = timer_data; | ||
172 | |||
173 | init_timer(&timer_data->timer); | ||
174 | timer_data->timer.function = led_timer_function; | ||
175 | timer_data->timer.data = (unsigned long) led_cdev; | ||
176 | 86 | ||
177 | rc = device_create_file(led_cdev->dev, &dev_attr_delay_on); | 87 | rc = device_create_file(led_cdev->dev, &dev_attr_delay_on); |
178 | if (rc) | 88 | if (rc) |
179 | goto err_out; | 89 | return; |
180 | rc = device_create_file(led_cdev->dev, &dev_attr_delay_off); | 90 | rc = device_create_file(led_cdev->dev, &dev_attr_delay_off); |
181 | if (rc) | 91 | if (rc) |
182 | goto err_out_delayon; | 92 | goto err_out_delayon; |
183 | 93 | ||
184 | /* If there is hardware support for blinking, start one | 94 | led_cdev->trigger_data = (void *)1; |
185 | * user friendly blink rate chosen by the driver. | ||
186 | */ | ||
187 | if (led_cdev->blink_set) | ||
188 | led_cdev->blink_set(led_cdev, | ||
189 | &timer_data->delay_on, &timer_data->delay_off); | ||
190 | 95 | ||
191 | return; | 96 | return; |
192 | 97 | ||
193 | err_out_delayon: | 98 | err_out_delayon: |
194 | device_remove_file(led_cdev->dev, &dev_attr_delay_on); | 99 | device_remove_file(led_cdev->dev, &dev_attr_delay_on); |
195 | err_out: | ||
196 | led_cdev->trigger_data = NULL; | ||
197 | kfree(timer_data); | ||
198 | } | 100 | } |
199 | 101 | ||
200 | static void timer_trig_deactivate(struct led_classdev *led_cdev) | 102 | static void timer_trig_deactivate(struct led_classdev *led_cdev) |
201 | { | 103 | { |
202 | struct timer_trig_data *timer_data = led_cdev->trigger_data; | 104 | if (led_cdev->trigger_data) { |
203 | unsigned long on = 0, off = 0; | ||
204 | |||
205 | if (timer_data) { | ||
206 | device_remove_file(led_cdev->dev, &dev_attr_delay_on); | 105 | device_remove_file(led_cdev->dev, &dev_attr_delay_on); |
207 | device_remove_file(led_cdev->dev, &dev_attr_delay_off); | 106 | device_remove_file(led_cdev->dev, &dev_attr_delay_off); |
208 | del_timer_sync(&timer_data->timer); | ||
209 | kfree(timer_data); | ||
210 | } | 107 | } |
211 | 108 | ||
212 | /* If there is hardware support for blinking, stop it */ | 109 | /* Stop blinking */ |
213 | if (led_cdev->blink_set) | 110 | led_brightness_set(led_cdev, LED_OFF); |
214 | led_cdev->blink_set(led_cdev, &on, &off); | ||
215 | } | 111 | } |
216 | 112 | ||
217 | static struct led_trigger timer_led_trigger = { | 113 | static struct led_trigger timer_led_trigger = { |
diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c index 444696625171..f5f4da3d0b67 100644 --- a/drivers/macintosh/adb-iop.c +++ b/drivers/macintosh/adb-iop.c | |||
@@ -80,7 +80,7 @@ static void adb_iop_end_req(struct adb_request *req, int state) | |||
80 | static void adb_iop_complete(struct iop_msg *msg) | 80 | static void adb_iop_complete(struct iop_msg *msg) |
81 | { | 81 | { |
82 | struct adb_request *req; | 82 | struct adb_request *req; |
83 | uint flags; | 83 | unsigned long flags; |
84 | 84 | ||
85 | local_irq_save(flags); | 85 | local_irq_save(flags); |
86 | 86 | ||
@@ -103,7 +103,7 @@ static void adb_iop_listen(struct iop_msg *msg) | |||
103 | { | 103 | { |
104 | struct adb_iopmsg *amsg = (struct adb_iopmsg *) msg->message; | 104 | struct adb_iopmsg *amsg = (struct adb_iopmsg *) msg->message; |
105 | struct adb_request *req; | 105 | struct adb_request *req; |
106 | uint flags; | 106 | unsigned long flags; |
107 | #ifdef DEBUG_ADB_IOP | 107 | #ifdef DEBUG_ADB_IOP |
108 | int i; | 108 | int i; |
109 | #endif | 109 | #endif |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 4e957f3140a8..324a3663fcda 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -706,7 +706,7 @@ static struct mdk_personality *find_pers(int level, char *clevel) | |||
706 | /* return the offset of the super block in 512byte sectors */ | 706 | /* return the offset of the super block in 512byte sectors */ |
707 | static inline sector_t calc_dev_sboffset(struct block_device *bdev) | 707 | static inline sector_t calc_dev_sboffset(struct block_device *bdev) |
708 | { | 708 | { |
709 | sector_t num_sectors = bdev->bd_inode->i_size / 512; | 709 | sector_t num_sectors = i_size_read(bdev->bd_inode) / 512; |
710 | return MD_NEW_SIZE_SECTORS(num_sectors); | 710 | return MD_NEW_SIZE_SECTORS(num_sectors); |
711 | } | 711 | } |
712 | 712 | ||
@@ -1386,7 +1386,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | |||
1386 | */ | 1386 | */ |
1387 | switch(minor_version) { | 1387 | switch(minor_version) { |
1388 | case 0: | 1388 | case 0: |
1389 | sb_start = rdev->bdev->bd_inode->i_size >> 9; | 1389 | sb_start = i_size_read(rdev->bdev->bd_inode) >> 9; |
1390 | sb_start -= 8*2; | 1390 | sb_start -= 8*2; |
1391 | sb_start &= ~(sector_t)(4*2-1); | 1391 | sb_start &= ~(sector_t)(4*2-1); |
1392 | break; | 1392 | break; |
@@ -1472,7 +1472,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | |||
1472 | ret = 0; | 1472 | ret = 0; |
1473 | } | 1473 | } |
1474 | if (minor_version) | 1474 | if (minor_version) |
1475 | rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) - | 1475 | rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - |
1476 | le64_to_cpu(sb->data_offset); | 1476 | le64_to_cpu(sb->data_offset); |
1477 | else | 1477 | else |
1478 | rdev->sectors = rdev->sb_start; | 1478 | rdev->sectors = rdev->sb_start; |
@@ -1680,7 +1680,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) | |||
1680 | return 0; /* component must fit device */ | 1680 | return 0; /* component must fit device */ |
1681 | if (rdev->sb_start < rdev->data_offset) { | 1681 | if (rdev->sb_start < rdev->data_offset) { |
1682 | /* minor versions 1 and 2; superblock before data */ | 1682 | /* minor versions 1 and 2; superblock before data */ |
1683 | max_sectors = rdev->bdev->bd_inode->i_size >> 9; | 1683 | max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; |
1684 | max_sectors -= rdev->data_offset; | 1684 | max_sectors -= rdev->data_offset; |
1685 | if (!num_sectors || num_sectors > max_sectors) | 1685 | if (!num_sectors || num_sectors > max_sectors) |
1686 | num_sectors = max_sectors; | 1686 | num_sectors = max_sectors; |
@@ -1690,7 +1690,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) | |||
1690 | } else { | 1690 | } else { |
1691 | /* minor version 0; superblock after data */ | 1691 | /* minor version 0; superblock after data */ |
1692 | sector_t sb_start; | 1692 | sector_t sb_start; |
1693 | sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2; | 1693 | sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2; |
1694 | sb_start &= ~(sector_t)(4*2 - 1); | 1694 | sb_start &= ~(sector_t)(4*2 - 1); |
1695 | max_sectors = rdev->sectors + sb_start - rdev->sb_start; | 1695 | max_sectors = rdev->sectors + sb_start - rdev->sb_start; |
1696 | if (!num_sectors || num_sectors > max_sectors) | 1696 | if (!num_sectors || num_sectors > max_sectors) |
@@ -2584,7 +2584,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2584 | if (!sectors) | 2584 | if (!sectors) |
2585 | return -EBUSY; | 2585 | return -EBUSY; |
2586 | } else if (!sectors) | 2586 | } else if (!sectors) |
2587 | sectors = (rdev->bdev->bd_inode->i_size >> 9) - | 2587 | sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - |
2588 | rdev->data_offset; | 2588 | rdev->data_offset; |
2589 | } | 2589 | } |
2590 | if (sectors < my_mddev->dev_sectors) | 2590 | if (sectors < my_mddev->dev_sectors) |
@@ -2797,7 +2797,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi | |||
2797 | 2797 | ||
2798 | kobject_init(&rdev->kobj, &rdev_ktype); | 2798 | kobject_init(&rdev->kobj, &rdev_ktype); |
2799 | 2799 | ||
2800 | size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; | 2800 | size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; |
2801 | if (!size) { | 2801 | if (!size) { |
2802 | printk(KERN_WARNING | 2802 | printk(KERN_WARNING |
2803 | "md: %s has zero or unknown size, marking faulty!\n", | 2803 | "md: %s has zero or unknown size, marking faulty!\n", |
@@ -5235,8 +5235,8 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) | |||
5235 | 5235 | ||
5236 | if (!mddev->persistent) { | 5236 | if (!mddev->persistent) { |
5237 | printk(KERN_INFO "md: nonpersistent superblock ...\n"); | 5237 | printk(KERN_INFO "md: nonpersistent superblock ...\n"); |
5238 | rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; | 5238 | rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; |
5239 | } else | 5239 | } else |
5240 | rdev->sb_start = calc_dev_sboffset(rdev->bdev); | 5240 | rdev->sb_start = calc_dev_sboffset(rdev->bdev); |
5241 | rdev->sectors = rdev->sb_start; | 5241 | rdev->sectors = rdev->sb_start; |
5242 | 5242 | ||
@@ -5306,7 +5306,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) | |||
5306 | if (mddev->persistent) | 5306 | if (mddev->persistent) |
5307 | rdev->sb_start = calc_dev_sboffset(rdev->bdev); | 5307 | rdev->sb_start = calc_dev_sboffset(rdev->bdev); |
5308 | else | 5308 | else |
5309 | rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; | 5309 | rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; |
5310 | 5310 | ||
5311 | rdev->sectors = rdev->sb_start; | 5311 | rdev->sectors = rdev->sb_start; |
5312 | 5312 | ||
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index bad2cedb8d96..a28541b2b1a2 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig | |||
@@ -19,7 +19,6 @@ comment "Multimedia core support" | |||
19 | 19 | ||
20 | config VIDEO_DEV | 20 | config VIDEO_DEV |
21 | tristate "Video For Linux" | 21 | tristate "Video For Linux" |
22 | depends on BKL # used in many drivers for ioctl handling, need to kill | ||
23 | ---help--- | 22 | ---help--- |
24 | V4L core support for video capture and overlay devices, webcams and | 23 | V4L core support for video capture and overlay devices, webcams and |
25 | AM/FM radio cards. | 24 | AM/FM radio cards. |
diff --git a/drivers/media/common/saa7146_i2c.c b/drivers/media/common/saa7146_i2c.c index 3d88542612ea..74ee172b5bc9 100644 --- a/drivers/media/common/saa7146_i2c.c +++ b/drivers/media/common/saa7146_i2c.c | |||
@@ -391,7 +391,6 @@ static int saa7146_i2c_xfer(struct i2c_adapter* adapter, struct i2c_msg *msg, in | |||
391 | 391 | ||
392 | /*****************************************************************************/ | 392 | /*****************************************************************************/ |
393 | /* i2c-adapter helper functions */ | 393 | /* i2c-adapter helper functions */ |
394 | #include <linux/i2c-id.h> | ||
395 | 394 | ||
396 | /* exported algorithm data */ | 395 | /* exported algorithm data */ |
397 | static struct i2c_algorithm saa7146_algo = { | 396 | static struct i2c_algorithm saa7146_algo = { |
diff --git a/drivers/media/dvb/frontends/dibx000_common.c b/drivers/media/dvb/frontends/dibx000_common.c index a4991026254d..2311c0a3406c 100644 --- a/drivers/media/dvb/frontends/dibx000_common.c +++ b/drivers/media/dvb/frontends/dibx000_common.c | |||
@@ -130,6 +130,7 @@ static int i2c_adapter_init(struct i2c_adapter *i2c_adap, | |||
130 | struct dibx000_i2c_master *mst) | 130 | struct dibx000_i2c_master *mst) |
131 | { | 131 | { |
132 | strncpy(i2c_adap->name, name, sizeof(i2c_adap->name)); | 132 | strncpy(i2c_adap->name, name, sizeof(i2c_adap->name)); |
133 | i2c_adap->algo = algo; | ||
133 | i2c_adap->algo_data = NULL; | 134 | i2c_adap->algo_data = NULL; |
134 | i2c_set_adapdata(i2c_adap, mst); | 135 | i2c_set_adapdata(i2c_adap, mst); |
135 | if (i2c_add_adapter(i2c_adap) < 0) | 136 | if (i2c_add_adapter(i2c_adap) < 0) |
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c index 2934770dacc3..7bc36670071a 100644 --- a/drivers/media/video/cafe_ccic.c +++ b/drivers/media/video/cafe_ccic.c | |||
@@ -2065,8 +2065,9 @@ static int cafe_pci_probe(struct pci_dev *pdev, | |||
2065 | sensor_cfg.clock_speed = 45; | 2065 | sensor_cfg.clock_speed = 45; |
2066 | 2066 | ||
2067 | cam->sensor_addr = 0x42; | 2067 | cam->sensor_addr = 0x42; |
2068 | cam->sensor = v4l2_i2c_new_subdev(&cam->v4l2_dev, &cam->i2c_adapter, | 2068 | cam->sensor = v4l2_i2c_new_subdev_cfg(&cam->v4l2_dev, &cam->i2c_adapter, |
2069 | NULL, "ov7670", cam->sensor_addr, NULL); | 2069 | "ov7670", "ov7670", 0, &sensor_cfg, cam->sensor_addr, |
2070 | NULL); | ||
2070 | if (cam->sensor == NULL) { | 2071 | if (cam->sensor == NULL) { |
2071 | ret = -ENODEV; | 2072 | ret = -ENODEV; |
2072 | goto out_smbus; | 2073 | goto out_smbus; |
diff --git a/drivers/media/video/cx231xx/cx231xx-417.c b/drivers/media/video/cx231xx/cx231xx-417.c index aab21f3ce472..4c7cac3b6254 100644 --- a/drivers/media/video/cx231xx/cx231xx-417.c +++ b/drivers/media/video/cx231xx/cx231xx-417.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/delay.h> | 31 | #include <linux/delay.h> |
32 | #include <linux/device.h> | 32 | #include <linux/device.h> |
33 | #include <linux/firmware.h> | 33 | #include <linux/firmware.h> |
34 | #include <linux/smp_lock.h> | ||
35 | #include <linux/vmalloc.h> | 34 | #include <linux/vmalloc.h> |
36 | #include <media/v4l2-common.h> | 35 | #include <media/v4l2-common.h> |
37 | #include <media/v4l2-ioctl.h> | 36 | #include <media/v4l2-ioctl.h> |
@@ -1927,10 +1926,9 @@ static int mpeg_open(struct file *file) | |||
1927 | dev = h; | 1926 | dev = h; |
1928 | } | 1927 | } |
1929 | 1928 | ||
1930 | if (dev == NULL) { | 1929 | if (dev == NULL) |
1931 | unlock_kernel(); | ||
1932 | return -ENODEV; | 1930 | return -ENODEV; |
1933 | } | 1931 | |
1934 | mutex_lock(&dev->lock); | 1932 | mutex_lock(&dev->lock); |
1935 | 1933 | ||
1936 | /* allocate + initialize per filehandle data */ | 1934 | /* allocate + initialize per filehandle data */ |
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c index a6cc12f8736c..9a98dc55f657 100644 --- a/drivers/media/video/cx23885/cx23885-417.c +++ b/drivers/media/video/cx23885/cx23885-417.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/delay.h> | 31 | #include <linux/delay.h> |
32 | #include <linux/device.h> | 32 | #include <linux/device.h> |
33 | #include <linux/firmware.h> | 33 | #include <linux/firmware.h> |
34 | #include <linux/smp_lock.h> | ||
35 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
36 | #include <media/v4l2-common.h> | 35 | #include <media/v4l2-common.h> |
37 | #include <media/v4l2-ioctl.h> | 36 | #include <media/v4l2-ioctl.h> |
@@ -1576,12 +1575,8 @@ static int mpeg_open(struct file *file) | |||
1576 | 1575 | ||
1577 | /* allocate + initialize per filehandle data */ | 1576 | /* allocate + initialize per filehandle data */ |
1578 | fh = kzalloc(sizeof(*fh), GFP_KERNEL); | 1577 | fh = kzalloc(sizeof(*fh), GFP_KERNEL); |
1579 | if (NULL == fh) { | 1578 | if (!fh) |
1580 | unlock_kernel(); | ||
1581 | return -ENOMEM; | 1579 | return -ENOMEM; |
1582 | } | ||
1583 | |||
1584 | lock_kernel(); | ||
1585 | 1580 | ||
1586 | file->private_data = fh; | 1581 | file->private_data = fh; |
1587 | fh->dev = dev; | 1582 | fh->dev = dev; |
@@ -1592,8 +1587,6 @@ static int mpeg_open(struct file *file) | |||
1592 | V4L2_FIELD_INTERLACED, | 1587 | V4L2_FIELD_INTERLACED, |
1593 | sizeof(struct cx23885_buffer), | 1588 | sizeof(struct cx23885_buffer), |
1594 | fh, NULL); | 1589 | fh, NULL); |
1595 | unlock_kernel(); | ||
1596 | |||
1597 | return 0; | 1590 | return 0; |
1598 | } | 1591 | } |
1599 | 1592 | ||
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c index 93af9c65b484..3cc9f462d08d 100644 --- a/drivers/media/video/cx23885/cx23885-video.c +++ b/drivers/media/video/cx23885/cx23885-video.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/kmod.h> | 26 | #include <linux/kmod.h> |
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/smp_lock.h> | ||
30 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
31 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
32 | #include <linux/kthread.h> | 31 | #include <linux/kthread.h> |
@@ -743,8 +742,6 @@ static int video_open(struct file *file) | |||
743 | if (NULL == fh) | 742 | if (NULL == fh) |
744 | return -ENOMEM; | 743 | return -ENOMEM; |
745 | 744 | ||
746 | lock_kernel(); | ||
747 | |||
748 | file->private_data = fh; | 745 | file->private_data = fh; |
749 | fh->dev = dev; | 746 | fh->dev = dev; |
750 | fh->radio = radio; | 747 | fh->radio = radio; |
@@ -762,8 +759,6 @@ static int video_open(struct file *file) | |||
762 | 759 | ||
763 | dprintk(1, "post videobuf_queue_init()\n"); | 760 | dprintk(1, "post videobuf_queue_init()\n"); |
764 | 761 | ||
765 | unlock_kernel(); | ||
766 | |||
767 | return 0; | 762 | return 0; |
768 | } | 763 | } |
769 | 764 | ||
diff --git a/drivers/media/video/imx074.c b/drivers/media/video/imx074.c index 380e459f899d..27b5dfdfbb93 100644 --- a/drivers/media/video/imx074.c +++ b/drivers/media/video/imx074.c | |||
@@ -451,7 +451,6 @@ static int imx074_probe(struct i2c_client *client, | |||
451 | ret = imx074_video_probe(icd, client); | 451 | ret = imx074_video_probe(icd, client); |
452 | if (ret < 0) { | 452 | if (ret < 0) { |
453 | icd->ops = NULL; | 453 | icd->ops = NULL; |
454 | i2c_set_clientdata(client, NULL); | ||
455 | kfree(priv); | 454 | kfree(priv); |
456 | return ret; | 455 | return ret; |
457 | } | 456 | } |
@@ -468,7 +467,6 @@ static int imx074_remove(struct i2c_client *client) | |||
468 | icd->ops = NULL; | 467 | icd->ops = NULL; |
469 | if (icl->free_bus) | 468 | if (icl->free_bus) |
470 | icl->free_bus(icl); | 469 | icl->free_bus(icl); |
471 | i2c_set_clientdata(client, NULL); | ||
472 | client->driver = NULL; | 470 | client->driver = NULL; |
473 | kfree(priv); | 471 | kfree(priv); |
474 | 472 | ||
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c index 5a000c65ae98..ce4a75375909 100644 --- a/drivers/media/video/ir-kbd-i2c.c +++ b/drivers/media/video/ir-kbd-i2c.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <linux/errno.h> | 44 | #include <linux/errno.h> |
45 | #include <linux/slab.h> | 45 | #include <linux/slab.h> |
46 | #include <linux/i2c.h> | 46 | #include <linux/i2c.h> |
47 | #include <linux/i2c-id.h> | ||
48 | #include <linux/workqueue.h> | 47 | #include <linux/workqueue.h> |
49 | 48 | ||
50 | #include <media/ir-core.h> | 49 | #include <media/ir-core.h> |
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c index 4a27862da30d..072bd2d1cfad 100644 --- a/drivers/media/video/mx2_camera.c +++ b/drivers/media/video/mx2_camera.c | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | #include <media/v4l2-common.h> | 32 | #include <media/v4l2-common.h> |
33 | #include <media/v4l2-dev.h> | 33 | #include <media/v4l2-dev.h> |
34 | #include <media/videobuf-core.h> | ||
34 | #include <media/videobuf-dma-contig.h> | 35 | #include <media/videobuf-dma-contig.h> |
35 | #include <media/soc_camera.h> | 36 | #include <media/soc_camera.h> |
36 | #include <media/soc_mediabus.h> | 37 | #include <media/soc_mediabus.h> |
@@ -903,8 +904,6 @@ static int mx2_camera_set_crop(struct soc_camera_device *icd, | |||
903 | static int mx2_camera_set_fmt(struct soc_camera_device *icd, | 904 | static int mx2_camera_set_fmt(struct soc_camera_device *icd, |
904 | struct v4l2_format *f) | 905 | struct v4l2_format *f) |
905 | { | 906 | { |
906 | struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); | ||
907 | struct mx2_camera_dev *pcdev = ici->priv; | ||
908 | struct v4l2_subdev *sd = soc_camera_to_subdev(icd); | 907 | struct v4l2_subdev *sd = soc_camera_to_subdev(icd); |
909 | const struct soc_camera_format_xlate *xlate; | 908 | const struct soc_camera_format_xlate *xlate; |
910 | struct v4l2_pix_format *pix = &f->fmt.pix; | 909 | struct v4l2_pix_format *pix = &f->fmt.pix; |
@@ -943,8 +942,6 @@ static int mx2_camera_set_fmt(struct soc_camera_device *icd, | |||
943 | static int mx2_camera_try_fmt(struct soc_camera_device *icd, | 942 | static int mx2_camera_try_fmt(struct soc_camera_device *icd, |
944 | struct v4l2_format *f) | 943 | struct v4l2_format *f) |
945 | { | 944 | { |
946 | struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); | ||
947 | struct mx2_camera_dev *pcdev = ici->priv; | ||
948 | struct v4l2_subdev *sd = soc_camera_to_subdev(icd); | 945 | struct v4l2_subdev *sd = soc_camera_to_subdev(icd); |
949 | const struct soc_camera_format_xlate *xlate; | 946 | const struct soc_camera_format_xlate *xlate; |
950 | struct v4l2_pix_format *pix = &f->fmt.pix; | 947 | struct v4l2_pix_format *pix = &f->fmt.pix; |
@@ -1024,13 +1021,13 @@ static int mx2_camera_querycap(struct soc_camera_host *ici, | |||
1024 | return 0; | 1021 | return 0; |
1025 | } | 1022 | } |
1026 | 1023 | ||
1027 | static int mx2_camera_reqbufs(struct soc_camera_file *icf, | 1024 | static int mx2_camera_reqbufs(struct soc_camera_device *icd, |
1028 | struct v4l2_requestbuffers *p) | 1025 | struct v4l2_requestbuffers *p) |
1029 | { | 1026 | { |
1030 | int i; | 1027 | int i; |
1031 | 1028 | ||
1032 | for (i = 0; i < p->count; i++) { | 1029 | for (i = 0; i < p->count; i++) { |
1033 | struct mx2_buffer *buf = container_of(icf->vb_vidq.bufs[i], | 1030 | struct mx2_buffer *buf = container_of(icd->vb_vidq.bufs[i], |
1034 | struct mx2_buffer, vb); | 1031 | struct mx2_buffer, vb); |
1035 | INIT_LIST_HEAD(&buf->vb.queue); | 1032 | INIT_LIST_HEAD(&buf->vb.queue); |
1036 | } | 1033 | } |
@@ -1151,9 +1148,9 @@ err_out: | |||
1151 | 1148 | ||
1152 | static unsigned int mx2_camera_poll(struct file *file, poll_table *pt) | 1149 | static unsigned int mx2_camera_poll(struct file *file, poll_table *pt) |
1153 | { | 1150 | { |
1154 | struct soc_camera_file *icf = file->private_data; | 1151 | struct soc_camera_device *icd = file->private_data; |
1155 | 1152 | ||
1156 | return videobuf_poll_stream(file, &icf->vb_vidq, pt); | 1153 | return videobuf_poll_stream(file, &icd->vb_vidq, pt); |
1157 | } | 1154 | } |
1158 | 1155 | ||
1159 | static struct soc_camera_host_ops mx2_soc_camera_host_ops = { | 1156 | static struct soc_camera_host_ops mx2_soc_camera_host_ops = { |
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c index 29c5fc348133..aa871c2936b3 100644 --- a/drivers/media/video/mx3_camera.c +++ b/drivers/media/video/mx3_camera.c | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | #include <mach/ipu.h> | 28 | #include <mach/ipu.h> |
29 | #include <mach/mx3_camera.h> | 29 | #include <mach/mx3_camera.h> |
30 | #include <mach/dma.h> | ||
30 | 31 | ||
31 | #define MX3_CAM_DRV_NAME "mx3-camera" | 32 | #define MX3_CAM_DRV_NAME "mx3-camera" |
32 | 33 | ||
@@ -638,6 +639,9 @@ static bool chan_filter(struct dma_chan *chan, void *arg) | |||
638 | struct dma_chan_request *rq = arg; | 639 | struct dma_chan_request *rq = arg; |
639 | struct mx3_camera_pdata *pdata; | 640 | struct mx3_camera_pdata *pdata; |
640 | 641 | ||
642 | if (!imx_dma_is_ipu(chan)) | ||
643 | return false; | ||
644 | |||
641 | if (!rq) | 645 | if (!rq) |
642 | return false; | 646 | return false; |
643 | 647 | ||
diff --git a/drivers/media/video/omap1_camera.c b/drivers/media/video/omap1_camera.c index 7c30e62b50db..cbfd07f2d9da 100644 --- a/drivers/media/video/omap1_camera.c +++ b/drivers/media/video/omap1_camera.c | |||
@@ -235,7 +235,7 @@ static void free_buffer(struct videobuf_queue *vq, struct omap1_cam_buf *buf, | |||
235 | 235 | ||
236 | BUG_ON(in_interrupt()); | 236 | BUG_ON(in_interrupt()); |
237 | 237 | ||
238 | videobuf_waiton(vb, 0, 0); | 238 | videobuf_waiton(vq, vb, 0, 0); |
239 | 239 | ||
240 | if (vb_mode == OMAP1_CAM_DMA_CONTIG) { | 240 | if (vb_mode == OMAP1_CAM_DMA_CONTIG) { |
241 | videobuf_dma_contig_free(vq, vb); | 241 | videobuf_dma_contig_free(vq, vb); |
@@ -504,7 +504,7 @@ static void omap1_videobuf_queue(struct videobuf_queue *vq, | |||
504 | * empty. Since the transfer of the DMA programming register set | 504 | * empty. Since the transfer of the DMA programming register set |
505 | * content to the DMA working register set is done automatically | 505 | * content to the DMA working register set is done automatically |
506 | * by the DMA hardware, this can pretty well happen while we | 506 | * by the DMA hardware, this can pretty well happen while we |
507 | * are keeping the lock here. Levae fetching it from the queue | 507 | * are keeping the lock here. Leave fetching it from the queue |
508 | * to be done when a next DMA interrupt occures instead. | 508 | * to be done when a next DMA interrupt occures instead. |
509 | */ | 509 | */ |
510 | return; | 510 | return; |
@@ -1365,12 +1365,12 @@ static void omap1_cam_init_videobuf(struct videobuf_queue *q, | |||
1365 | videobuf_queue_dma_contig_init(q, &omap1_videobuf_ops, | 1365 | videobuf_queue_dma_contig_init(q, &omap1_videobuf_ops, |
1366 | icd->dev.parent, &pcdev->lock, | 1366 | icd->dev.parent, &pcdev->lock, |
1367 | V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, | 1367 | V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, |
1368 | sizeof(struct omap1_cam_buf), icd); | 1368 | sizeof(struct omap1_cam_buf), icd, NULL); |
1369 | else | 1369 | else |
1370 | videobuf_queue_sg_init(q, &omap1_videobuf_ops, | 1370 | videobuf_queue_sg_init(q, &omap1_videobuf_ops, |
1371 | icd->dev.parent, &pcdev->lock, | 1371 | icd->dev.parent, &pcdev->lock, |
1372 | V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, | 1372 | V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, |
1373 | sizeof(struct omap1_cam_buf), icd); | 1373 | sizeof(struct omap1_cam_buf), icd, NULL); |
1374 | 1374 | ||
1375 | /* use videobuf mode (auto)selected with the module parameter */ | 1375 | /* use videobuf mode (auto)selected with the module parameter */ |
1376 | pcdev->vb_mode = sg_mode ? OMAP1_CAM_DMA_SG : OMAP1_CAM_DMA_CONTIG; | 1376 | pcdev->vb_mode = sg_mode ? OMAP1_CAM_DMA_SG : OMAP1_CAM_DMA_CONTIG; |
@@ -1386,7 +1386,7 @@ static void omap1_cam_init_videobuf(struct videobuf_queue *q, | |||
1386 | } | 1386 | } |
1387 | } | 1387 | } |
1388 | 1388 | ||
1389 | static int omap1_cam_reqbufs(struct soc_camera_file *icf, | 1389 | static int omap1_cam_reqbufs(struct soc_camera_device *icd, |
1390 | struct v4l2_requestbuffers *p) | 1390 | struct v4l2_requestbuffers *p) |
1391 | { | 1391 | { |
1392 | int i; | 1392 | int i; |
@@ -1398,7 +1398,7 @@ static int omap1_cam_reqbufs(struct soc_camera_file *icf, | |||
1398 | * it hadn't triggered | 1398 | * it hadn't triggered |
1399 | */ | 1399 | */ |
1400 | for (i = 0; i < p->count; i++) { | 1400 | for (i = 0; i < p->count; i++) { |
1401 | struct omap1_cam_buf *buf = container_of(icf->vb_vidq.bufs[i], | 1401 | struct omap1_cam_buf *buf = container_of(icd->vb_vidq.bufs[i], |
1402 | struct omap1_cam_buf, vb); | 1402 | struct omap1_cam_buf, vb); |
1403 | buf->inwork = 0; | 1403 | buf->inwork = 0; |
1404 | INIT_LIST_HEAD(&buf->vb.queue); | 1404 | INIT_LIST_HEAD(&buf->vb.queue); |
@@ -1485,10 +1485,10 @@ static int omap1_cam_set_bus_param(struct soc_camera_device *icd, | |||
1485 | 1485 | ||
1486 | static unsigned int omap1_cam_poll(struct file *file, poll_table *pt) | 1486 | static unsigned int omap1_cam_poll(struct file *file, poll_table *pt) |
1487 | { | 1487 | { |
1488 | struct soc_camera_file *icf = file->private_data; | 1488 | struct soc_camera_device *icd = file->private_data; |
1489 | struct omap1_cam_buf *buf; | 1489 | struct omap1_cam_buf *buf; |
1490 | 1490 | ||
1491 | buf = list_entry(icf->vb_vidq.stream.next, struct omap1_cam_buf, | 1491 | buf = list_entry(icd->vb_vidq.stream.next, struct omap1_cam_buf, |
1492 | vb.stream); | 1492 | vb.stream); |
1493 | 1493 | ||
1494 | poll_wait(file, &buf->vb.done, pt); | 1494 | poll_wait(file, &buf->vb.done, pt); |
diff --git a/drivers/media/video/ov6650.c b/drivers/media/video/ov6650.c index b7cfeab0948c..cf93de988068 100644 --- a/drivers/media/video/ov6650.c +++ b/drivers/media/video/ov6650.c | |||
@@ -754,7 +754,7 @@ static int ov6650_g_fmt(struct v4l2_subdev *sd, | |||
754 | 754 | ||
755 | static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect) | 755 | static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect) |
756 | { | 756 | { |
757 | return (width > rect->width >> 1 || height > rect->height >> 1); | 757 | return width > rect->width >> 1 || height > rect->height >> 1; |
758 | } | 758 | } |
759 | 759 | ||
760 | static u8 to_clkrc(struct v4l2_fract *timeperframe, | 760 | static u8 to_clkrc(struct v4l2_fract *timeperframe, |
@@ -840,8 +840,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) | |||
840 | coma_mask |= COMA_BW | COMA_BYTE_SWAP | COMA_WORD_SWAP; | 840 | coma_mask |= COMA_BW | COMA_BYTE_SWAP | COMA_WORD_SWAP; |
841 | coma_set |= COMA_RAW_RGB | COMA_RGB; | 841 | coma_set |= COMA_RAW_RGB | COMA_RGB; |
842 | break; | 842 | break; |
843 | case 0: | ||
844 | break; | ||
845 | default: | 843 | default: |
846 | dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code); | 844 | dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code); |
847 | return -EINVAL; | 845 | return -EINVAL; |
@@ -1176,7 +1174,6 @@ static int ov6650_probe(struct i2c_client *client, | |||
1176 | 1174 | ||
1177 | if (ret) { | 1175 | if (ret) { |
1178 | icd->ops = NULL; | 1176 | icd->ops = NULL; |
1179 | i2c_set_clientdata(client, NULL); | ||
1180 | kfree(priv); | 1177 | kfree(priv); |
1181 | } | 1178 | } |
1182 | 1179 | ||
@@ -1187,7 +1184,6 @@ static int ov6650_remove(struct i2c_client *client) | |||
1187 | { | 1184 | { |
1188 | struct ov6650 *priv = to_ov6650(client); | 1185 | struct ov6650 *priv = to_ov6650(client); |
1189 | 1186 | ||
1190 | i2c_set_clientdata(client, NULL); | ||
1191 | kfree(priv); | 1187 | kfree(priv); |
1192 | return 0; | 1188 | return 0; |
1193 | } | 1189 | } |
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c index 10a6cbf6a790..0911cb580e18 100644 --- a/drivers/media/video/saa7134/saa7134-cards.c +++ b/drivers/media/video/saa7134/saa7134-cards.c | |||
@@ -6661,6 +6661,18 @@ struct pci_device_id saa7134_pci_tbl[] = { | |||
6661 | .subdevice = 0x2804, | 6661 | .subdevice = 0x2804, |
6662 | .driver_data = SAA7134_BOARD_TECHNOTREND_BUDGET_T3000, | 6662 | .driver_data = SAA7134_BOARD_TECHNOTREND_BUDGET_T3000, |
6663 | }, { | 6663 | }, { |
6664 | .vendor = PCI_VENDOR_ID_PHILIPS, | ||
6665 | .device = PCI_DEVICE_ID_PHILIPS_SAA7133, | ||
6666 | .subvendor = 0x5ace, /* Beholder Intl. Ltd. */ | ||
6667 | .subdevice = 0x7190, | ||
6668 | .driver_data = SAA7134_BOARD_BEHOLD_H7, | ||
6669 | }, { | ||
6670 | .vendor = PCI_VENDOR_ID_PHILIPS, | ||
6671 | .device = PCI_DEVICE_ID_PHILIPS_SAA7133, | ||
6672 | .subvendor = 0x5ace, /* Beholder Intl. Ltd. */ | ||
6673 | .subdevice = 0x7090, | ||
6674 | .driver_data = SAA7134_BOARD_BEHOLD_A7, | ||
6675 | }, { | ||
6664 | /* --- boards without eeprom + subsystem ID --- */ | 6676 | /* --- boards without eeprom + subsystem ID --- */ |
6665 | .vendor = PCI_VENDOR_ID_PHILIPS, | 6677 | .vendor = PCI_VENDOR_ID_PHILIPS, |
6666 | .device = PCI_DEVICE_ID_PHILIPS_SAA7134, | 6678 | .device = PCI_DEVICE_ID_PHILIPS_SAA7134, |
@@ -6698,18 +6710,6 @@ struct pci_device_id saa7134_pci_tbl[] = { | |||
6698 | .subvendor = PCI_ANY_ID, | 6710 | .subvendor = PCI_ANY_ID, |
6699 | .subdevice = PCI_ANY_ID, | 6711 | .subdevice = PCI_ANY_ID, |
6700 | .driver_data = SAA7134_BOARD_UNKNOWN, | 6712 | .driver_data = SAA7134_BOARD_UNKNOWN, |
6701 | }, { | ||
6702 | .vendor = PCI_VENDOR_ID_PHILIPS, | ||
6703 | .device = PCI_DEVICE_ID_PHILIPS_SAA7133, | ||
6704 | .subvendor = 0x5ace, /* Beholder Intl. Ltd. */ | ||
6705 | .subdevice = 0x7190, | ||
6706 | .driver_data = SAA7134_BOARD_BEHOLD_H7, | ||
6707 | }, { | ||
6708 | .vendor = PCI_VENDOR_ID_PHILIPS, | ||
6709 | .device = PCI_DEVICE_ID_PHILIPS_SAA7133, | ||
6710 | .subvendor = 0x5ace, /* Beholder Intl. Ltd. */ | ||
6711 | .subdevice = 0x7090, | ||
6712 | .driver_data = SAA7134_BOARD_BEHOLD_A7, | ||
6713 | },{ | 6713 | },{ |
6714 | /* --- end of list --- */ | 6714 | /* --- end of list --- */ |
6715 | } | 6715 | } |
diff --git a/drivers/media/video/se401.c b/drivers/media/video/se401.c index 41d0166c0f95..41360d7c3e96 100644 --- a/drivers/media/video/se401.c +++ b/drivers/media/video/se401.c | |||
@@ -31,7 +31,6 @@ static const char version[] = "0.24"; | |||
31 | #include <linux/init.h> | 31 | #include <linux/init.h> |
32 | #include <linux/vmalloc.h> | 32 | #include <linux/vmalloc.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/smp_lock.h> | ||
35 | #include <linux/pagemap.h> | 34 | #include <linux/pagemap.h> |
36 | #include <linux/usb.h> | 35 | #include <linux/usb.h> |
37 | #include "se401.h" | 36 | #include "se401.h" |
@@ -951,9 +950,9 @@ static int se401_open(struct file *file) | |||
951 | struct usb_se401 *se401 = (struct usb_se401 *)dev; | 950 | struct usb_se401 *se401 = (struct usb_se401 *)dev; |
952 | int err = 0; | 951 | int err = 0; |
953 | 952 | ||
954 | lock_kernel(); | 953 | mutex_lock(&se401->lock); |
955 | if (se401->user) { | 954 | if (se401->user) { |
956 | unlock_kernel(); | 955 | mutex_unlock(&se401->lock); |
957 | return -EBUSY; | 956 | return -EBUSY; |
958 | } | 957 | } |
959 | se401->fbuf = rvmalloc(se401->maxframesize * SE401_NUMFRAMES); | 958 | se401->fbuf = rvmalloc(se401->maxframesize * SE401_NUMFRAMES); |
@@ -962,7 +961,7 @@ static int se401_open(struct file *file) | |||
962 | else | 961 | else |
963 | err = -ENOMEM; | 962 | err = -ENOMEM; |
964 | se401->user = !err; | 963 | se401->user = !err; |
965 | unlock_kernel(); | 964 | mutex_unlock(&se401->lock); |
966 | 965 | ||
967 | return err; | 966 | return err; |
968 | } | 967 | } |
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c index f07a0f6b71c4..b5afe5f841ce 100644 --- a/drivers/media/video/stk-webcam.c +++ b/drivers/media/video/stk-webcam.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/smp_lock.h> | ||
31 | 30 | ||
32 | #include <linux/usb.h> | 31 | #include <linux/usb.h> |
33 | #include <linux/mm.h> | 32 | #include <linux/mm.h> |
@@ -673,14 +672,11 @@ static int v4l_stk_open(struct file *fp) | |||
673 | vdev = video_devdata(fp); | 672 | vdev = video_devdata(fp); |
674 | dev = vdev_to_camera(vdev); | 673 | dev = vdev_to_camera(vdev); |
675 | 674 | ||
676 | lock_kernel(); | ||
677 | if (dev == NULL || !is_present(dev)) { | 675 | if (dev == NULL || !is_present(dev)) { |
678 | unlock_kernel(); | ||
679 | return -ENXIO; | 676 | return -ENXIO; |
680 | } | 677 | } |
681 | fp->private_data = dev; | 678 | fp->private_data = dev; |
682 | usb_autopm_get_interface(dev->interface); | 679 | usb_autopm_get_interface(dev->interface); |
683 | unlock_kernel(); | ||
684 | 680 | ||
685 | return 0; | 681 | return 0; |
686 | } | 682 | } |
diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c index 4555f4a5f4c8..c91424c0c135 100644 --- a/drivers/media/video/tlg2300/pd-main.c +++ b/drivers/media/video/tlg2300/pd-main.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <linux/string.h> | 36 | #include <linux/string.h> |
37 | #include <linux/types.h> | 37 | #include <linux/types.h> |
38 | #include <linux/firmware.h> | 38 | #include <linux/firmware.h> |
39 | #include <linux/smp_lock.h> | ||
40 | 39 | ||
41 | #include "vendorcmds.h" | 40 | #include "vendorcmds.h" |
42 | #include "pd-common.h" | 41 | #include "pd-common.h" |
@@ -485,15 +484,11 @@ static void poseidon_disconnect(struct usb_interface *interface) | |||
485 | /*unregister v4l2 device */ | 484 | /*unregister v4l2 device */ |
486 | v4l2_device_unregister(&pd->v4l2_dev); | 485 | v4l2_device_unregister(&pd->v4l2_dev); |
487 | 486 | ||
488 | lock_kernel(); | 487 | pd_dvb_usb_device_exit(pd); |
489 | { | 488 | poseidon_fm_exit(pd); |
490 | pd_dvb_usb_device_exit(pd); | ||
491 | poseidon_fm_exit(pd); | ||
492 | 489 | ||
493 | poseidon_audio_free(pd); | 490 | poseidon_audio_free(pd); |
494 | pd_video_exit(pd); | 491 | pd_video_exit(pd); |
495 | } | ||
496 | unlock_kernel(); | ||
497 | 492 | ||
498 | usb_set_intfdata(interface, NULL); | 493 | usb_set_intfdata(interface, NULL); |
499 | kref_put(&pd->kref, poseidon_delete); | 494 | kref_put(&pd->kref, poseidon_delete); |
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c index 5d6fd01f918a..dc17cce2fbb6 100644 --- a/drivers/media/video/usbvideo/vicam.c +++ b/drivers/media/video/usbvideo/vicam.c | |||
@@ -43,7 +43,6 @@ | |||
43 | #include <linux/vmalloc.h> | 43 | #include <linux/vmalloc.h> |
44 | #include <linux/mm.h> | 44 | #include <linux/mm.h> |
45 | #include <linux/slab.h> | 45 | #include <linux/slab.h> |
46 | #include <linux/smp_lock.h> | ||
47 | #include <linux/mutex.h> | 46 | #include <linux/mutex.h> |
48 | #include <linux/firmware.h> | 47 | #include <linux/firmware.h> |
49 | #include <linux/ihex.h> | 48 | #include <linux/ihex.h> |
@@ -483,29 +482,28 @@ vicam_open(struct file *file) | |||
483 | return -EINVAL; | 482 | return -EINVAL; |
484 | } | 483 | } |
485 | 484 | ||
486 | /* the videodev_lock held above us protects us from | 485 | /* cam_lock/open_count protects us from simultaneous opens |
487 | * simultaneous opens...for now. we probably shouldn't | 486 | * ... for now. we probably shouldn't rely on this fact forever. |
488 | * rely on this fact forever. | ||
489 | */ | 487 | */ |
490 | 488 | ||
491 | lock_kernel(); | 489 | mutex_lock(&cam->cam_lock); |
492 | if (cam->open_count > 0) { | 490 | if (cam->open_count > 0) { |
493 | printk(KERN_INFO | 491 | printk(KERN_INFO |
494 | "vicam_open called on already opened camera"); | 492 | "vicam_open called on already opened camera"); |
495 | unlock_kernel(); | 493 | mutex_unlock(&cam->cam_lock); |
496 | return -EBUSY; | 494 | return -EBUSY; |
497 | } | 495 | } |
498 | 496 | ||
499 | cam->raw_image = kmalloc(VICAM_MAX_READ_SIZE, GFP_KERNEL); | 497 | cam->raw_image = kmalloc(VICAM_MAX_READ_SIZE, GFP_KERNEL); |
500 | if (!cam->raw_image) { | 498 | if (!cam->raw_image) { |
501 | unlock_kernel(); | 499 | mutex_unlock(&cam->cam_lock); |
502 | return -ENOMEM; | 500 | return -ENOMEM; |
503 | } | 501 | } |
504 | 502 | ||
505 | cam->framebuf = rvmalloc(VICAM_MAX_FRAME_SIZE * VICAM_FRAMES); | 503 | cam->framebuf = rvmalloc(VICAM_MAX_FRAME_SIZE * VICAM_FRAMES); |
506 | if (!cam->framebuf) { | 504 | if (!cam->framebuf) { |
507 | kfree(cam->raw_image); | 505 | kfree(cam->raw_image); |
508 | unlock_kernel(); | 506 | mutex_unlock(&cam->cam_lock); |
509 | return -ENOMEM; | 507 | return -ENOMEM; |
510 | } | 508 | } |
511 | 509 | ||
@@ -513,10 +511,17 @@ vicam_open(struct file *file) | |||
513 | if (!cam->cntrlbuf) { | 511 | if (!cam->cntrlbuf) { |
514 | kfree(cam->raw_image); | 512 | kfree(cam->raw_image); |
515 | rvfree(cam->framebuf, VICAM_MAX_FRAME_SIZE * VICAM_FRAMES); | 513 | rvfree(cam->framebuf, VICAM_MAX_FRAME_SIZE * VICAM_FRAMES); |
516 | unlock_kernel(); | 514 | mutex_unlock(&cam->cam_lock); |
517 | return -ENOMEM; | 515 | return -ENOMEM; |
518 | } | 516 | } |
519 | 517 | ||
518 | cam->needsDummyRead = 1; | ||
519 | cam->open_count++; | ||
520 | |||
521 | file->private_data = cam; | ||
522 | mutex_unlock(&cam->cam_lock); | ||
523 | |||
524 | |||
520 | // First upload firmware, then turn the camera on | 525 | // First upload firmware, then turn the camera on |
521 | 526 | ||
522 | if (!cam->is_initialized) { | 527 | if (!cam->is_initialized) { |
@@ -527,12 +532,6 @@ vicam_open(struct file *file) | |||
527 | 532 | ||
528 | set_camera_power(cam, 1); | 533 | set_camera_power(cam, 1); |
529 | 534 | ||
530 | cam->needsDummyRead = 1; | ||
531 | cam->open_count++; | ||
532 | |||
533 | file->private_data = cam; | ||
534 | unlock_kernel(); | ||
535 | |||
536 | return 0; | 535 | return 0; |
537 | } | 536 | } |
538 | 537 | ||
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c index 0ca7978654b5..03f7f4670e9b 100644 --- a/drivers/media/video/v4l2-dev.c +++ b/drivers/media/video/v4l2-dev.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | #include <linux/kmod.h> | 26 | #include <linux/kmod.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/smp_lock.h> | ||
29 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
30 | #include <asm/system.h> | 29 | #include <asm/system.h> |
31 | 30 | ||
@@ -247,10 +246,12 @@ static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
247 | mutex_unlock(vdev->lock); | 246 | mutex_unlock(vdev->lock); |
248 | } else if (vdev->fops->ioctl) { | 247 | } else if (vdev->fops->ioctl) { |
249 | /* TODO: convert all drivers to unlocked_ioctl */ | 248 | /* TODO: convert all drivers to unlocked_ioctl */ |
250 | lock_kernel(); | 249 | static DEFINE_MUTEX(v4l2_ioctl_mutex); |
250 | |||
251 | mutex_lock(&v4l2_ioctl_mutex); | ||
251 | if (video_is_registered(vdev)) | 252 | if (video_is_registered(vdev)) |
252 | ret = vdev->fops->ioctl(filp, cmd, arg); | 253 | ret = vdev->fops->ioctl(filp, cmd, arg); |
253 | unlock_kernel(); | 254 | mutex_unlock(&v4l2_ioctl_mutex); |
254 | } else | 255 | } else |
255 | ret = -ENOTTY; | 256 | ret = -ENOTTY; |
256 | 257 | ||
diff --git a/drivers/media/video/zoran/zoran.h b/drivers/media/video/zoran/zoran.h index 37fe16181e3c..27f05551183f 100644 --- a/drivers/media/video/zoran/zoran.h +++ b/drivers/media/video/zoran/zoran.h | |||
@@ -388,6 +388,7 @@ struct zoran { | |||
388 | struct videocodec *vfe; /* video front end */ | 388 | struct videocodec *vfe; /* video front end */ |
389 | 389 | ||
390 | struct mutex resource_lock; /* prevent evil stuff */ | 390 | struct mutex resource_lock; /* prevent evil stuff */ |
391 | struct mutex other_lock; /* please merge with above */ | ||
391 | 392 | ||
392 | u8 initialized; /* flag if zoran has been correctly initialized */ | 393 | u8 initialized; /* flag if zoran has been correctly initialized */ |
393 | int user; /* number of current users */ | 394 | int user; /* number of current users */ |
diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c index 0aac376c3f7a..7e6d62467eaa 100644 --- a/drivers/media/video/zoran/zoran_card.c +++ b/drivers/media/video/zoran/zoran_card.c | |||
@@ -1227,6 +1227,7 @@ static int __devinit zoran_probe(struct pci_dev *pdev, | |||
1227 | snprintf(ZR_DEVNAME(zr), sizeof(ZR_DEVNAME(zr)), "MJPEG[%u]", zr->id); | 1227 | snprintf(ZR_DEVNAME(zr), sizeof(ZR_DEVNAME(zr)), "MJPEG[%u]", zr->id); |
1228 | spin_lock_init(&zr->spinlock); | 1228 | spin_lock_init(&zr->spinlock); |
1229 | mutex_init(&zr->resource_lock); | 1229 | mutex_init(&zr->resource_lock); |
1230 | mutex_init(&zr->other_lock); | ||
1230 | if (pci_enable_device(pdev)) | 1231 | if (pci_enable_device(pdev)) |
1231 | goto zr_unreg; | 1232 | goto zr_unreg; |
1232 | pci_read_config_byte(zr->pci_dev, PCI_CLASS_REVISION, &zr->revision); | 1233 | pci_read_config_byte(zr->pci_dev, PCI_CLASS_REVISION, &zr->revision); |
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c index 401082b853f0..67a52e844ae6 100644 --- a/drivers/media/video/zoran/zoran_driver.c +++ b/drivers/media/video/zoran/zoran_driver.c | |||
@@ -49,7 +49,6 @@ | |||
49 | #include <linux/module.h> | 49 | #include <linux/module.h> |
50 | #include <linux/delay.h> | 50 | #include <linux/delay.h> |
51 | #include <linux/slab.h> | 51 | #include <linux/slab.h> |
52 | #include <linux/smp_lock.h> | ||
53 | #include <linux/pci.h> | 52 | #include <linux/pci.h> |
54 | #include <linux/vmalloc.h> | 53 | #include <linux/vmalloc.h> |
55 | #include <linux/wait.h> | 54 | #include <linux/wait.h> |
@@ -913,7 +912,7 @@ static int zoran_open(struct file *file) | |||
913 | dprintk(2, KERN_INFO "%s: %s(%s, pid=[%d]), users(-)=%d\n", | 912 | dprintk(2, KERN_INFO "%s: %s(%s, pid=[%d]), users(-)=%d\n", |
914 | ZR_DEVNAME(zr), __func__, current->comm, task_pid_nr(current), zr->user + 1); | 913 | ZR_DEVNAME(zr), __func__, current->comm, task_pid_nr(current), zr->user + 1); |
915 | 914 | ||
916 | lock_kernel(); | 915 | mutex_lock(&zr->other_lock); |
917 | 916 | ||
918 | if (zr->user >= 2048) { | 917 | if (zr->user >= 2048) { |
919 | dprintk(1, KERN_ERR "%s: too many users (%d) on device\n", | 918 | dprintk(1, KERN_ERR "%s: too many users (%d) on device\n", |
@@ -963,14 +962,14 @@ static int zoran_open(struct file *file) | |||
963 | file->private_data = fh; | 962 | file->private_data = fh; |
964 | fh->zr = zr; | 963 | fh->zr = zr; |
965 | zoran_open_init_session(fh); | 964 | zoran_open_init_session(fh); |
966 | unlock_kernel(); | 965 | mutex_unlock(&zr->other_lock); |
967 | 966 | ||
968 | return 0; | 967 | return 0; |
969 | 968 | ||
970 | fail_fh: | 969 | fail_fh: |
971 | kfree(fh); | 970 | kfree(fh); |
972 | fail_unlock: | 971 | fail_unlock: |
973 | unlock_kernel(); | 972 | mutex_unlock(&zr->other_lock); |
974 | 973 | ||
975 | dprintk(2, KERN_INFO "%s: open failed (%d), users(-)=%d\n", | 974 | dprintk(2, KERN_INFO "%s: open failed (%d), users(-)=%d\n", |
976 | ZR_DEVNAME(zr), res, zr->user); | 975 | ZR_DEVNAME(zr), res, zr->user); |
@@ -989,7 +988,7 @@ zoran_close(struct file *file) | |||
989 | 988 | ||
990 | /* kernel locks (fs/device.c), so don't do that ourselves | 989 | /* kernel locks (fs/device.c), so don't do that ourselves |
991 | * (prevents deadlocks) */ | 990 | * (prevents deadlocks) */ |
992 | /*mutex_lock(&zr->resource_lock);*/ | 991 | mutex_lock(&zr->other_lock); |
993 | 992 | ||
994 | zoran_close_end_session(fh); | 993 | zoran_close_end_session(fh); |
995 | 994 | ||
@@ -1023,6 +1022,7 @@ zoran_close(struct file *file) | |||
1023 | encoder_call(zr, video, s_routing, 2, 0, 0); | 1022 | encoder_call(zr, video, s_routing, 2, 0, 0); |
1024 | } | 1023 | } |
1025 | } | 1024 | } |
1025 | mutex_unlock(&zr->other_lock); | ||
1026 | 1026 | ||
1027 | file->private_data = NULL; | 1027 | file->private_data = NULL; |
1028 | kfree(fh->overlay_mask); | 1028 | kfree(fh->overlay_mask); |
@@ -3370,11 +3370,26 @@ static const struct v4l2_ioctl_ops zoran_ioctl_ops = { | |||
3370 | #endif | 3370 | #endif |
3371 | }; | 3371 | }; |
3372 | 3372 | ||
3373 | /* please use zr->resource_lock consistently and kill this wrapper */ | ||
3374 | static long zoran_ioctl(struct file *file, unsigned int cmd, | ||
3375 | unsigned long arg) | ||
3376 | { | ||
3377 | struct zoran_fh *fh = file->private_data; | ||
3378 | struct zoran *zr = fh->zr; | ||
3379 | int ret; | ||
3380 | |||
3381 | mutex_lock(&zr->other_lock); | ||
3382 | ret = video_ioctl2(file, cmd, arg); | ||
3383 | mutex_unlock(&zr->other_lock); | ||
3384 | |||
3385 | return ret; | ||
3386 | } | ||
3387 | |||
3373 | static const struct v4l2_file_operations zoran_fops = { | 3388 | static const struct v4l2_file_operations zoran_fops = { |
3374 | .owner = THIS_MODULE, | 3389 | .owner = THIS_MODULE, |
3375 | .open = zoran_open, | 3390 | .open = zoran_open, |
3376 | .release = zoran_close, | 3391 | .release = zoran_close, |
3377 | .ioctl = video_ioctl2, | 3392 | .unlocked_ioctl = zoran_ioctl, |
3378 | .read = zoran_read, | 3393 | .read = zoran_read, |
3379 | .write = zoran_write, | 3394 | .write = zoran_write, |
3380 | .mmap = zoran_mmap, | 3395 | .mmap = zoran_mmap, |
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index e15220ff52fc..d784c36707c0 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c | |||
@@ -97,8 +97,7 @@ static u8 mptfcInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; | |||
97 | 97 | ||
98 | static int mptfc_target_alloc(struct scsi_target *starget); | 98 | static int mptfc_target_alloc(struct scsi_target *starget); |
99 | static int mptfc_slave_alloc(struct scsi_device *sdev); | 99 | static int mptfc_slave_alloc(struct scsi_device *sdev); |
100 | static int mptfc_qcmd(struct scsi_cmnd *SCpnt, | 100 | static int mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt); |
101 | void (*done)(struct scsi_cmnd *)); | ||
102 | static void mptfc_target_destroy(struct scsi_target *starget); | 101 | static void mptfc_target_destroy(struct scsi_target *starget); |
103 | static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout); | 102 | static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout); |
104 | static void __devexit mptfc_remove(struct pci_dev *pdev); | 103 | static void __devexit mptfc_remove(struct pci_dev *pdev); |
@@ -650,7 +649,7 @@ mptfc_slave_alloc(struct scsi_device *sdev) | |||
650 | } | 649 | } |
651 | 650 | ||
652 | static int | 651 | static int |
653 | mptfc_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 652 | mptfc_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) |
654 | { | 653 | { |
655 | struct mptfc_rport_info *ri; | 654 | struct mptfc_rport_info *ri; |
656 | struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device)); | 655 | struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device)); |
@@ -681,6 +680,8 @@ mptfc_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | |||
681 | return mptscsih_qcmd(SCpnt,done); | 680 | return mptscsih_qcmd(SCpnt,done); |
682 | } | 681 | } |
683 | 682 | ||
683 | static DEF_SCSI_QCMD(mptfc_qcmd) | ||
684 | |||
684 | /* | 685 | /* |
685 | * mptfc_display_port_link_speed - displaying link speed | 686 | * mptfc_display_port_link_speed - displaying link speed |
686 | * @ioc: Pointer to MPT_ADAPTER structure | 687 | * @ioc: Pointer to MPT_ADAPTER structure |
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 83a5115f0251..d48c2c6058e1 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c | |||
@@ -1889,7 +1889,7 @@ mptsas_slave_alloc(struct scsi_device *sdev) | |||
1889 | } | 1889 | } |
1890 | 1890 | ||
1891 | static int | 1891 | static int |
1892 | mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 1892 | mptsas_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) |
1893 | { | 1893 | { |
1894 | MPT_SCSI_HOST *hd; | 1894 | MPT_SCSI_HOST *hd; |
1895 | MPT_ADAPTER *ioc; | 1895 | MPT_ADAPTER *ioc; |
@@ -1913,6 +1913,8 @@ mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | |||
1913 | return mptscsih_qcmd(SCpnt,done); | 1913 | return mptscsih_qcmd(SCpnt,done); |
1914 | } | 1914 | } |
1915 | 1915 | ||
1916 | static DEF_SCSI_QCMD(mptsas_qcmd) | ||
1917 | |||
1916 | /** | 1918 | /** |
1917 | * mptsas_mptsas_eh_timed_out - resets the scsi_cmnd timeout | 1919 | * mptsas_mptsas_eh_timed_out - resets the scsi_cmnd timeout |
1918 | * if the device under question is currently in the | 1920 | * if the device under question is currently in the |
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index 0e2803155ae2..6d9568d2ec59 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c | |||
@@ -780,7 +780,7 @@ static int mptspi_slave_configure(struct scsi_device *sdev) | |||
780 | } | 780 | } |
781 | 781 | ||
782 | static int | 782 | static int |
783 | mptspi_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 783 | mptspi_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) |
784 | { | 784 | { |
785 | struct _MPT_SCSI_HOST *hd = shost_priv(SCpnt->device->host); | 785 | struct _MPT_SCSI_HOST *hd = shost_priv(SCpnt->device->host); |
786 | VirtDevice *vdevice = SCpnt->device->hostdata; | 786 | VirtDevice *vdevice = SCpnt->device->hostdata; |
@@ -805,6 +805,8 @@ mptspi_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | |||
805 | return mptscsih_qcmd(SCpnt,done); | 805 | return mptscsih_qcmd(SCpnt,done); |
806 | } | 806 | } |
807 | 807 | ||
808 | static DEF_SCSI_QCMD(mptspi_qcmd) | ||
809 | |||
808 | static void mptspi_slave_destroy(struct scsi_device *sdev) | 810 | static void mptspi_slave_destroy(struct scsi_device *sdev) |
809 | { | 811 | { |
810 | struct scsi_target *starget = scsi_target(sdev); | 812 | struct scsi_target *starget = scsi_target(sdev); |
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c index ea6b2197da8a..97bdf82ec905 100644 --- a/drivers/message/i2o/i2o_scsi.c +++ b/drivers/message/i2o/i2o_scsi.c | |||
@@ -506,7 +506,7 @@ static struct i2o_driver i2o_scsi_driver = { | |||
506 | * Locks: takes the controller lock on error path only | 506 | * Locks: takes the controller lock on error path only |
507 | */ | 507 | */ |
508 | 508 | ||
509 | static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, | 509 | static int i2o_scsi_queuecommand_lck(struct scsi_cmnd *SCpnt, |
510 | void (*done) (struct scsi_cmnd *)) | 510 | void (*done) (struct scsi_cmnd *)) |
511 | { | 511 | { |
512 | struct i2o_controller *c; | 512 | struct i2o_controller *c; |
@@ -688,7 +688,9 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, | |||
688 | 688 | ||
689 | exit: | 689 | exit: |
690 | return rc; | 690 | return rc; |
691 | }; | 691 | } |
692 | |||
693 | static DEF_SCSI_QCMD(i2o_scsi_queuecommand) | ||
692 | 694 | ||
693 | /** | 695 | /** |
694 | * i2o_scsi_abort - abort a running command | 696 | * i2o_scsi_abort - abort a running command |
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c index f9b91ba8900c..644d4cd071cc 100644 --- a/drivers/misc/apds9802als.c +++ b/drivers/misc/apds9802als.c | |||
@@ -123,7 +123,7 @@ static ssize_t als_sensing_range_store(struct device *dev, | |||
123 | { | 123 | { |
124 | struct i2c_client *client = to_i2c_client(dev); | 124 | struct i2c_client *client = to_i2c_client(dev); |
125 | struct als_data *data = i2c_get_clientdata(client); | 125 | struct als_data *data = i2c_get_clientdata(client); |
126 | unsigned int ret_val; | 126 | int ret_val; |
127 | unsigned long val; | 127 | unsigned long val; |
128 | 128 | ||
129 | if (strict_strtoul(buf, 10, &val)) | 129 | if (strict_strtoul(buf, 10, &val)) |
@@ -251,7 +251,6 @@ static int apds9802als_probe(struct i2c_client *client, | |||
251 | 251 | ||
252 | return res; | 252 | return res; |
253 | als_error1: | 253 | als_error1: |
254 | i2c_set_clientdata(client, NULL); | ||
255 | kfree(data); | 254 | kfree(data); |
256 | return res; | 255 | return res; |
257 | } | 256 | } |
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c index cee632e645e1..d79a972f2c79 100644 --- a/drivers/misc/bh1770glc.c +++ b/drivers/misc/bh1770glc.c | |||
@@ -649,7 +649,7 @@ static ssize_t bh1770_power_state_store(struct device *dev, | |||
649 | { | 649 | { |
650 | struct bh1770_chip *chip = dev_get_drvdata(dev); | 650 | struct bh1770_chip *chip = dev_get_drvdata(dev); |
651 | unsigned long value; | 651 | unsigned long value; |
652 | size_t ret; | 652 | ssize_t ret; |
653 | 653 | ||
654 | if (strict_strtoul(buf, 0, &value)) | 654 | if (strict_strtoul(buf, 0, &value)) |
655 | return -EINVAL; | 655 | return -EINVAL; |
@@ -659,8 +659,12 @@ static ssize_t bh1770_power_state_store(struct device *dev, | |||
659 | pm_runtime_get_sync(dev); | 659 | pm_runtime_get_sync(dev); |
660 | 660 | ||
661 | ret = bh1770_lux_rate(chip, chip->lux_rate_index); | 661 | ret = bh1770_lux_rate(chip, chip->lux_rate_index); |
662 | ret |= bh1770_lux_interrupt_control(chip, BH1770_ENABLE); | 662 | if (ret < 0) { |
663 | pm_runtime_put(dev); | ||
664 | goto leave; | ||
665 | } | ||
663 | 666 | ||
667 | ret = bh1770_lux_interrupt_control(chip, BH1770_ENABLE); | ||
664 | if (ret < 0) { | 668 | if (ret < 0) { |
665 | pm_runtime_put(dev); | 669 | pm_runtime_put(dev); |
666 | goto leave; | 670 | goto leave; |
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c index 34fe835921c4..ca47e6285075 100644 --- a/drivers/misc/isl29020.c +++ b/drivers/misc/isl29020.c | |||
@@ -87,7 +87,7 @@ static ssize_t als_sensing_range_store(struct device *dev, | |||
87 | struct device_attribute *attr, const char *buf, size_t count) | 87 | struct device_attribute *attr, const char *buf, size_t count) |
88 | { | 88 | { |
89 | struct i2c_client *client = to_i2c_client(dev); | 89 | struct i2c_client *client = to_i2c_client(dev); |
90 | unsigned int ret_val; | 90 | int ret_val; |
91 | unsigned long val; | 91 | unsigned long val; |
92 | 92 | ||
93 | if (strict_strtoul(buf, 10, &val)) | 93 | if (strict_strtoul(buf, 10, &val)) |
@@ -106,6 +106,8 @@ static ssize_t als_sensing_range_store(struct device *dev, | |||
106 | val = 4; | 106 | val = 4; |
107 | 107 | ||
108 | ret_val = i2c_smbus_read_byte_data(client, 0x00); | 108 | ret_val = i2c_smbus_read_byte_data(client, 0x00); |
109 | if (ret_val < 0) | ||
110 | return ret_val; | ||
109 | 111 | ||
110 | ret_val &= 0xFC; /*reset the bit before setting them */ | 112 | ret_val &= 0xFC; /*reset the bit before setting them */ |
111 | ret_val |= val - 1; | 113 | ret_val |= val - 1; |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 459614d2d7bc..94d5f59d5a6f 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c | |||
@@ -1680,7 +1680,7 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) | |||
1680 | rc = XMIT_PLAIN; | 1680 | rc = XMIT_PLAIN; |
1681 | 1681 | ||
1682 | else { | 1682 | else { |
1683 | if (skb->protocol == htons(ETH_P_IPV6)) { | 1683 | if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) { |
1684 | rc = XMIT_CSUM_V6; | 1684 | rc = XMIT_CSUM_V6; |
1685 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) | 1685 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) |
1686 | rc |= XMIT_CSUM_TCP; | 1686 | rc |= XMIT_CSUM_TCP; |
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c index 6de5e2e448a5..c3449bbc585a 100644 --- a/drivers/net/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/cxgb4vf/cxgb4vf_main.c | |||
@@ -753,7 +753,9 @@ static int cxgb4vf_open(struct net_device *dev) | |||
753 | if (err) | 753 | if (err) |
754 | return err; | 754 | return err; |
755 | set_bit(pi->port_id, &adapter->open_device_map); | 755 | set_bit(pi->port_id, &adapter->open_device_map); |
756 | link_start(dev); | 756 | err = link_start(dev); |
757 | if (err) | ||
758 | return err; | ||
757 | netif_tx_start_all_queues(dev); | 759 | netif_tx_start_all_queues(dev); |
758 | return 0; | 760 | return 0; |
759 | } | 761 | } |
@@ -1103,18 +1105,6 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr) | |||
1103 | return 0; | 1105 | return 0; |
1104 | } | 1106 | } |
1105 | 1107 | ||
1106 | /* | ||
1107 | * Return a TX Queue on which to send the specified skb. | ||
1108 | */ | ||
1109 | static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb) | ||
1110 | { | ||
1111 | /* | ||
1112 | * XXX For now just use the default hash but we probably want to | ||
1113 | * XXX look at other possibilities ... | ||
1114 | */ | ||
1115 | return skb_tx_hash(dev, skb); | ||
1116 | } | ||
1117 | |||
1118 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1108 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1119 | /* | 1109 | /* |
1120 | * Poll all of our receive queues. This is called outside of normal interrupt | 1110 | * Poll all of our receive queues. This is called outside of normal interrupt |
@@ -2075,6 +2065,22 @@ static int adap_init0(struct adapter *adapter) | |||
2075 | } | 2065 | } |
2076 | 2066 | ||
2077 | /* | 2067 | /* |
2068 | * Some environments do not properly handle PCIE FLRs -- e.g. in Linux | ||
2069 | * 2.6.31 and later we can't call pci_reset_function() in order to | ||
2070 | * issue an FLR because of a self- deadlock on the device semaphore. | ||
2071 | * Meanwhile, the OS infrastructure doesn't issue FLRs in all the | ||
2072 | * cases where they're needed -- for instance, some versions of KVM | ||
2073 | * fail to reset "Assigned Devices" when the VM reboots. Therefore we | ||
2074 | * use the firmware based reset in order to reset any per function | ||
2075 | * state. | ||
2076 | */ | ||
2077 | err = t4vf_fw_reset(adapter); | ||
2078 | if (err < 0) { | ||
2079 | dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err); | ||
2080 | return err; | ||
2081 | } | ||
2082 | |||
2083 | /* | ||
2078 | * Grab basic operational parameters. These will predominantly have | 2084 | * Grab basic operational parameters. These will predominantly have |
2079 | * been set up by the Physical Function Driver or will be hard coded | 2085 | * been set up by the Physical Function Driver or will be hard coded |
2080 | * into the adapter. We just have to live with them ... Note that | 2086 | * into the adapter. We just have to live with them ... Note that |
@@ -2417,7 +2423,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = { | |||
2417 | .ndo_get_stats = cxgb4vf_get_stats, | 2423 | .ndo_get_stats = cxgb4vf_get_stats, |
2418 | .ndo_set_rx_mode = cxgb4vf_set_rxmode, | 2424 | .ndo_set_rx_mode = cxgb4vf_set_rxmode, |
2419 | .ndo_set_mac_address = cxgb4vf_set_mac_addr, | 2425 | .ndo_set_mac_address = cxgb4vf_set_mac_addr, |
2420 | .ndo_select_queue = cxgb4vf_select_queue, | ||
2421 | .ndo_validate_addr = eth_validate_addr, | 2426 | .ndo_validate_addr = eth_validate_addr, |
2422 | .ndo_do_ioctl = cxgb4vf_do_ioctl, | 2427 | .ndo_do_ioctl = cxgb4vf_do_ioctl, |
2423 | .ndo_change_mtu = cxgb4vf_change_mtu, | 2428 | .ndo_change_mtu = cxgb4vf_change_mtu, |
@@ -2624,7 +2629,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2624 | netdev->do_ioctl = cxgb4vf_do_ioctl; | 2629 | netdev->do_ioctl = cxgb4vf_do_ioctl; |
2625 | netdev->change_mtu = cxgb4vf_change_mtu; | 2630 | netdev->change_mtu = cxgb4vf_change_mtu; |
2626 | netdev->set_mac_address = cxgb4vf_set_mac_addr; | 2631 | netdev->set_mac_address = cxgb4vf_set_mac_addr; |
2627 | netdev->select_queue = cxgb4vf_select_queue; | ||
2628 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2632 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2629 | netdev->poll_controller = cxgb4vf_poll_controller; | 2633 | netdev->poll_controller = cxgb4vf_poll_controller; |
2630 | #endif | 2634 | #endif |
@@ -2843,6 +2847,14 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = { | |||
2843 | CH_DEVICE(0x4800, 0), /* T440-dbg */ | 2847 | CH_DEVICE(0x4800, 0), /* T440-dbg */ |
2844 | CH_DEVICE(0x4801, 0), /* T420-cr */ | 2848 | CH_DEVICE(0x4801, 0), /* T420-cr */ |
2845 | CH_DEVICE(0x4802, 0), /* T422-cr */ | 2849 | CH_DEVICE(0x4802, 0), /* T422-cr */ |
2850 | CH_DEVICE(0x4803, 0), /* T440-cr */ | ||
2851 | CH_DEVICE(0x4804, 0), /* T420-bch */ | ||
2852 | CH_DEVICE(0x4805, 0), /* T440-bch */ | ||
2853 | CH_DEVICE(0x4806, 0), /* T460-ch */ | ||
2854 | CH_DEVICE(0x4807, 0), /* T420-so */ | ||
2855 | CH_DEVICE(0x4808, 0), /* T420-cx */ | ||
2856 | CH_DEVICE(0x4809, 0), /* T420-bt */ | ||
2857 | CH_DEVICE(0x480a, 0), /* T404-bt */ | ||
2846 | { 0, } | 2858 | { 0, } |
2847 | }; | 2859 | }; |
2848 | 2860 | ||
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c index f10864ddafbe..ecf0770bf0ff 100644 --- a/drivers/net/cxgb4vf/sge.c +++ b/drivers/net/cxgb4vf/sge.c | |||
@@ -154,13 +154,14 @@ enum { | |||
154 | */ | 154 | */ |
155 | RX_COPY_THRES = 256, | 155 | RX_COPY_THRES = 256, |
156 | RX_PULL_LEN = 128, | 156 | RX_PULL_LEN = 128, |
157 | }; | ||
158 | 157 | ||
159 | /* | 158 | /* |
160 | * Can't define this in the above enum because PKTSHIFT isn't a constant in | 159 | * Main body length for sk_buffs used for RX Ethernet packets with |
161 | * the VF Driver ... | 160 | * fragments. Should be >= RX_PULL_LEN but possibly bigger to give |
162 | */ | 161 | * pskb_may_pull() some room. |
163 | #define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT) | 162 | */ |
163 | RX_SKB_LEN = 512, | ||
164 | }; | ||
164 | 165 | ||
165 | /* | 166 | /* |
166 | * Software state per TX descriptor. | 167 | * Software state per TX descriptor. |
@@ -1355,6 +1356,67 @@ out_free: | |||
1355 | } | 1356 | } |
1356 | 1357 | ||
1357 | /** | 1358 | /** |
1359 | * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list | ||
1360 | * @gl: the gather list | ||
1361 | * @skb_len: size of sk_buff main body if it carries fragments | ||
1362 | * @pull_len: amount of data to move to the sk_buff's main body | ||
1363 | * | ||
1364 | * Builds an sk_buff from the given packet gather list. Returns the | ||
1365 | * sk_buff or %NULL if sk_buff allocation failed. | ||
1366 | */ | ||
1367 | struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, | ||
1368 | unsigned int skb_len, unsigned int pull_len) | ||
1369 | { | ||
1370 | struct sk_buff *skb; | ||
1371 | struct skb_shared_info *ssi; | ||
1372 | |||
1373 | /* | ||
1374 | * If the ingress packet is small enough, allocate an skb large enough | ||
1375 | * for all of the data and copy it inline. Otherwise, allocate an skb | ||
1376 | * with enough room to pull in the header and reference the rest of | ||
1377 | * the data via the skb fragment list. | ||
1378 | * | ||
1379 | * Below we rely on RX_COPY_THRES being less than the smallest Rx | ||
1380 | * buff! size, which is expected since buffers are at least | ||
1381 | * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one | ||
1382 | * fragment. | ||
1383 | */ | ||
1384 | if (gl->tot_len <= RX_COPY_THRES) { | ||
1385 | /* small packets have only one fragment */ | ||
1386 | skb = alloc_skb(gl->tot_len, GFP_ATOMIC); | ||
1387 | if (unlikely(!skb)) | ||
1388 | goto out; | ||
1389 | __skb_put(skb, gl->tot_len); | ||
1390 | skb_copy_to_linear_data(skb, gl->va, gl->tot_len); | ||
1391 | } else { | ||
1392 | skb = alloc_skb(skb_len, GFP_ATOMIC); | ||
1393 | if (unlikely(!skb)) | ||
1394 | goto out; | ||
1395 | __skb_put(skb, pull_len); | ||
1396 | skb_copy_to_linear_data(skb, gl->va, pull_len); | ||
1397 | |||
1398 | ssi = skb_shinfo(skb); | ||
1399 | ssi->frags[0].page = gl->frags[0].page; | ||
1400 | ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len; | ||
1401 | ssi->frags[0].size = gl->frags[0].size - pull_len; | ||
1402 | if (gl->nfrags > 1) | ||
1403 | memcpy(&ssi->frags[1], &gl->frags[1], | ||
1404 | (gl->nfrags-1) * sizeof(skb_frag_t)); | ||
1405 | ssi->nr_frags = gl->nfrags; | ||
1406 | |||
1407 | skb->len = gl->tot_len; | ||
1408 | skb->data_len = skb->len - pull_len; | ||
1409 | skb->truesize += skb->data_len; | ||
1410 | |||
1411 | /* Get a reference for the last page, we don't own it */ | ||
1412 | get_page(gl->frags[gl->nfrags - 1].page); | ||
1413 | } | ||
1414 | |||
1415 | out: | ||
1416 | return skb; | ||
1417 | } | ||
1418 | |||
1419 | /** | ||
1358 | * t4vf_pktgl_free - free a packet gather list | 1420 | * t4vf_pktgl_free - free a packet gather list |
1359 | * @gl: the gather list | 1421 | * @gl: the gather list |
1360 | * | 1422 | * |
@@ -1463,10 +1525,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, | |||
1463 | { | 1525 | { |
1464 | struct sk_buff *skb; | 1526 | struct sk_buff *skb; |
1465 | struct port_info *pi; | 1527 | struct port_info *pi; |
1466 | struct skb_shared_info *ssi; | ||
1467 | const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; | 1528 | const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; |
1468 | bool csum_ok = pkt->csum_calc && !pkt->err_vec; | 1529 | bool csum_ok = pkt->csum_calc && !pkt->err_vec; |
1469 | unsigned int len = be16_to_cpu(pkt->len); | ||
1470 | struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); | 1530 | struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); |
1471 | 1531 | ||
1472 | /* | 1532 | /* |
@@ -1481,42 +1541,14 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, | |||
1481 | } | 1541 | } |
1482 | 1542 | ||
1483 | /* | 1543 | /* |
1484 | * If the ingress packet is small enough, allocate an skb large enough | 1544 | * Convert the Packet Gather List into an skb. |
1485 | * for all of the data and copy it inline. Otherwise, allocate an skb | ||
1486 | * with enough room to pull in the header and reference the rest of | ||
1487 | * the data via the skb fragment list. | ||
1488 | */ | 1545 | */ |
1489 | if (len <= RX_COPY_THRES) { | 1546 | skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN); |
1490 | /* small packets have only one fragment */ | 1547 | if (unlikely(!skb)) { |
1491 | skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC); | 1548 | t4vf_pktgl_free(gl); |
1492 | if (!skb) | 1549 | rxq->stats.rx_drops++; |
1493 | goto nomem; | 1550 | return 0; |
1494 | __skb_put(skb, gl->frags[0].size); | ||
1495 | skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size); | ||
1496 | } else { | ||
1497 | skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC); | ||
1498 | if (!skb) | ||
1499 | goto nomem; | ||
1500 | __skb_put(skb, RX_PKT_PULL_LEN); | ||
1501 | skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN); | ||
1502 | |||
1503 | ssi = skb_shinfo(skb); | ||
1504 | ssi->frags[0].page = gl->frags[0].page; | ||
1505 | ssi->frags[0].page_offset = (gl->frags[0].page_offset + | ||
1506 | RX_PKT_PULL_LEN); | ||
1507 | ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN; | ||
1508 | if (gl->nfrags > 1) | ||
1509 | memcpy(&ssi->frags[1], &gl->frags[1], | ||
1510 | (gl->nfrags-1) * sizeof(skb_frag_t)); | ||
1511 | ssi->nr_frags = gl->nfrags; | ||
1512 | skb->len = len + PKTSHIFT; | ||
1513 | skb->data_len = skb->len - RX_PKT_PULL_LEN; | ||
1514 | skb->truesize += skb->data_len; | ||
1515 | |||
1516 | /* Get a reference for the last page, we don't own it */ | ||
1517 | get_page(gl->frags[gl->nfrags - 1].page); | ||
1518 | } | 1551 | } |
1519 | |||
1520 | __skb_pull(skb, PKTSHIFT); | 1552 | __skb_pull(skb, PKTSHIFT); |
1521 | skb->protocol = eth_type_trans(skb, rspq->netdev); | 1553 | skb->protocol = eth_type_trans(skb, rspq->netdev); |
1522 | skb_record_rx_queue(skb, rspq->idx); | 1554 | skb_record_rx_queue(skb, rspq->idx); |
@@ -1549,11 +1581,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, | |||
1549 | netif_receive_skb(skb); | 1581 | netif_receive_skb(skb); |
1550 | 1582 | ||
1551 | return 0; | 1583 | return 0; |
1552 | |||
1553 | nomem: | ||
1554 | t4vf_pktgl_free(gl); | ||
1555 | rxq->stats.rx_drops++; | ||
1556 | return 0; | ||
1557 | } | 1584 | } |
1558 | 1585 | ||
1559 | /** | 1586 | /** |
@@ -1679,6 +1706,7 @@ int process_responses(struct sge_rspq *rspq, int budget) | |||
1679 | } | 1706 | } |
1680 | len = RSPD_LEN(len); | 1707 | len = RSPD_LEN(len); |
1681 | } | 1708 | } |
1709 | gl.tot_len = len; | ||
1682 | 1710 | ||
1683 | /* | 1711 | /* |
1684 | * Gather packet fragments. | 1712 | * Gather packet fragments. |
diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h index 873cb7d86c57..a65c80aed1f2 100644 --- a/drivers/net/cxgb4vf/t4vf_common.h +++ b/drivers/net/cxgb4vf/t4vf_common.h | |||
@@ -235,6 +235,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd, | |||
235 | int __devinit t4vf_wait_dev_ready(struct adapter *); | 235 | int __devinit t4vf_wait_dev_ready(struct adapter *); |
236 | int __devinit t4vf_port_init(struct adapter *, int); | 236 | int __devinit t4vf_port_init(struct adapter *, int); |
237 | 237 | ||
238 | int t4vf_fw_reset(struct adapter *); | ||
238 | int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *); | 239 | int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *); |
239 | int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); | 240 | int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); |
240 | 241 | ||
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c index ea1c123f0cb4..e306c20dfaee 100644 --- a/drivers/net/cxgb4vf/t4vf_hw.c +++ b/drivers/net/cxgb4vf/t4vf_hw.c | |||
@@ -326,6 +326,25 @@ int __devinit t4vf_port_init(struct adapter *adapter, int pidx) | |||
326 | } | 326 | } |
327 | 327 | ||
328 | /** | 328 | /** |
329 | * t4vf_fw_reset - issue a reset to FW | ||
330 | * @adapter: the adapter | ||
331 | * | ||
332 | * Issues a reset command to FW. For a Physical Function this would | ||
333 | * result in the Firmware reseting all of its state. For a Virtual | ||
334 | * Function this just resets the state associated with the VF. | ||
335 | */ | ||
336 | int t4vf_fw_reset(struct adapter *adapter) | ||
337 | { | ||
338 | struct fw_reset_cmd cmd; | ||
339 | |||
340 | memset(&cmd, 0, sizeof(cmd)); | ||
341 | cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) | | ||
342 | FW_CMD_WRITE); | ||
343 | cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); | ||
344 | return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); | ||
345 | } | ||
346 | |||
347 | /** | ||
329 | * t4vf_query_params - query FW or device parameters | 348 | * t4vf_query_params - query FW or device parameters |
330 | * @adapter: the adapter | 349 | * @adapter: the adapter |
331 | * @nparams: the number of parameters | 350 | * @nparams: the number of parameters |
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index 5c566ebc54b8..3bc8e276ba4d 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c | |||
@@ -635,9 +635,10 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
635 | if (wol->wolopts & ~WAKE_MAGIC) | 635 | if (wol->wolopts & ~WAKE_MAGIC) |
636 | return -EINVAL; | 636 | return -EINVAL; |
637 | 637 | ||
638 | device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC); | ||
639 | |||
638 | spin_lock_irqsave(&priv->bflock, flags); | 640 | spin_lock_irqsave(&priv->bflock, flags); |
639 | priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0; | 641 | priv->wol_en = !!device_may_wakeup(&dev->dev); |
640 | device_set_wakeup_enable(&dev->dev, priv->wol_en); | ||
641 | spin_unlock_irqrestore(&priv->bflock, flags); | 642 | spin_unlock_irqrestore(&priv->bflock, flags); |
642 | 643 | ||
643 | return 0; | 644 | return 0; |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 2bd3eb4ee5a1..fbad4d819608 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -764,8 +764,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
764 | #ifdef IXGBE_FCOE | 764 | #ifdef IXGBE_FCOE |
765 | /* adjust for FCoE Sequence Offload */ | 765 | /* adjust for FCoE Sequence Offload */ |
766 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) | 766 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) |
767 | && (skb->protocol == htons(ETH_P_FCOE)) && | 767 | && skb_is_gso(skb) |
768 | skb_is_gso(skb)) { | 768 | && vlan_get_protocol(skb) == |
769 | htons(ETH_P_FCOE)) { | ||
769 | hlen = skb_transport_offset(skb) + | 770 | hlen = skb_transport_offset(skb) + |
770 | sizeof(struct fc_frame_header) + | 771 | sizeof(struct fc_frame_header) + |
771 | sizeof(struct fcoe_crc_eof); | 772 | sizeof(struct fcoe_crc_eof); |
@@ -5823,7 +5824,7 @@ static void ixgbe_watchdog_task(struct work_struct *work) | |||
5823 | 5824 | ||
5824 | static int ixgbe_tso(struct ixgbe_adapter *adapter, | 5825 | static int ixgbe_tso(struct ixgbe_adapter *adapter, |
5825 | struct ixgbe_ring *tx_ring, struct sk_buff *skb, | 5826 | struct ixgbe_ring *tx_ring, struct sk_buff *skb, |
5826 | u32 tx_flags, u8 *hdr_len) | 5827 | u32 tx_flags, u8 *hdr_len, __be16 protocol) |
5827 | { | 5828 | { |
5828 | struct ixgbe_adv_tx_context_desc *context_desc; | 5829 | struct ixgbe_adv_tx_context_desc *context_desc; |
5829 | unsigned int i; | 5830 | unsigned int i; |
@@ -5841,7 +5842,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, | |||
5841 | l4len = tcp_hdrlen(skb); | 5842 | l4len = tcp_hdrlen(skb); |
5842 | *hdr_len += l4len; | 5843 | *hdr_len += l4len; |
5843 | 5844 | ||
5844 | if (skb->protocol == htons(ETH_P_IP)) { | 5845 | if (protocol == htons(ETH_P_IP)) { |
5845 | struct iphdr *iph = ip_hdr(skb); | 5846 | struct iphdr *iph = ip_hdr(skb); |
5846 | iph->tot_len = 0; | 5847 | iph->tot_len = 0; |
5847 | iph->check = 0; | 5848 | iph->check = 0; |
@@ -5880,7 +5881,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, | |||
5880 | type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | | 5881 | type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | |
5881 | IXGBE_ADVTXD_DTYP_CTXT); | 5882 | IXGBE_ADVTXD_DTYP_CTXT); |
5882 | 5883 | ||
5883 | if (skb->protocol == htons(ETH_P_IP)) | 5884 | if (protocol == htons(ETH_P_IP)) |
5884 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; | 5885 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; |
5885 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; | 5886 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; |
5886 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); | 5887 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); |
@@ -5906,16 +5907,10 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, | |||
5906 | return false; | 5907 | return false; |
5907 | } | 5908 | } |
5908 | 5909 | ||
5909 | static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb) | 5910 | static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb, |
5911 | __be16 protocol) | ||
5910 | { | 5912 | { |
5911 | u32 rtn = 0; | 5913 | u32 rtn = 0; |
5912 | __be16 protocol; | ||
5913 | |||
5914 | if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) | ||
5915 | protocol = ((const struct vlan_ethhdr *)skb->data)-> | ||
5916 | h_vlan_encapsulated_proto; | ||
5917 | else | ||
5918 | protocol = skb->protocol; | ||
5919 | 5914 | ||
5920 | switch (protocol) { | 5915 | switch (protocol) { |
5921 | case cpu_to_be16(ETH_P_IP): | 5916 | case cpu_to_be16(ETH_P_IP): |
@@ -5943,7 +5938,7 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb) | |||
5943 | default: | 5938 | default: |
5944 | if (unlikely(net_ratelimit())) | 5939 | if (unlikely(net_ratelimit())) |
5945 | e_warn(probe, "partial checksum but proto=%x!\n", | 5940 | e_warn(probe, "partial checksum but proto=%x!\n", |
5946 | skb->protocol); | 5941 | protocol); |
5947 | break; | 5942 | break; |
5948 | } | 5943 | } |
5949 | 5944 | ||
@@ -5952,7 +5947,8 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb) | |||
5952 | 5947 | ||
5953 | static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, | 5948 | static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, |
5954 | struct ixgbe_ring *tx_ring, | 5949 | struct ixgbe_ring *tx_ring, |
5955 | struct sk_buff *skb, u32 tx_flags) | 5950 | struct sk_buff *skb, u32 tx_flags, |
5951 | __be16 protocol) | ||
5956 | { | 5952 | { |
5957 | struct ixgbe_adv_tx_context_desc *context_desc; | 5953 | struct ixgbe_adv_tx_context_desc *context_desc; |
5958 | unsigned int i; | 5954 | unsigned int i; |
@@ -5981,7 +5977,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, | |||
5981 | IXGBE_ADVTXD_DTYP_CTXT); | 5977 | IXGBE_ADVTXD_DTYP_CTXT); |
5982 | 5978 | ||
5983 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 5979 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
5984 | type_tucmd_mlhl |= ixgbe_psum(adapter, skb); | 5980 | type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol); |
5985 | 5981 | ||
5986 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); | 5982 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); |
5987 | /* use index zero for tx checksum offload */ | 5983 | /* use index zero for tx checksum offload */ |
@@ -6179,7 +6175,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, | |||
6179 | } | 6175 | } |
6180 | 6176 | ||
6181 | static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, | 6177 | static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, |
6182 | int queue, u32 tx_flags) | 6178 | int queue, u32 tx_flags, __be16 protocol) |
6183 | { | 6179 | { |
6184 | struct ixgbe_atr_input atr_input; | 6180 | struct ixgbe_atr_input atr_input; |
6185 | struct tcphdr *th; | 6181 | struct tcphdr *th; |
@@ -6190,7 +6186,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, | |||
6190 | u8 l4type = 0; | 6186 | u8 l4type = 0; |
6191 | 6187 | ||
6192 | /* Right now, we support IPv4 only */ | 6188 | /* Right now, we support IPv4 only */ |
6193 | if (skb->protocol != htons(ETH_P_IP)) | 6189 | if (protocol != htons(ETH_P_IP)) |
6194 | return; | 6190 | return; |
6195 | /* check if we're UDP or TCP */ | 6191 | /* check if we're UDP or TCP */ |
6196 | if (iph->protocol == IPPROTO_TCP) { | 6192 | if (iph->protocol == IPPROTO_TCP) { |
@@ -6257,10 +6253,13 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
6257 | { | 6253 | { |
6258 | struct ixgbe_adapter *adapter = netdev_priv(dev); | 6254 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
6259 | int txq = smp_processor_id(); | 6255 | int txq = smp_processor_id(); |
6260 | |||
6261 | #ifdef IXGBE_FCOE | 6256 | #ifdef IXGBE_FCOE |
6262 | if ((skb->protocol == htons(ETH_P_FCOE)) || | 6257 | __be16 protocol; |
6263 | (skb->protocol == htons(ETH_P_FIP))) { | 6258 | |
6259 | protocol = vlan_get_protocol(skb); | ||
6260 | |||
6261 | if ((protocol == htons(ETH_P_FCOE)) || | ||
6262 | (protocol == htons(ETH_P_FIP))) { | ||
6264 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | 6263 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { |
6265 | txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); | 6264 | txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); |
6266 | txq += adapter->ring_feature[RING_F_FCOE].mask; | 6265 | txq += adapter->ring_feature[RING_F_FCOE].mask; |
@@ -6303,6 +6302,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev | |||
6303 | int tso; | 6302 | int tso; |
6304 | int count = 0; | 6303 | int count = 0; |
6305 | unsigned int f; | 6304 | unsigned int f; |
6305 | __be16 protocol; | ||
6306 | |||
6307 | protocol = vlan_get_protocol(skb); | ||
6306 | 6308 | ||
6307 | if (vlan_tx_tag_present(skb)) { | 6309 | if (vlan_tx_tag_present(skb)) { |
6308 | tx_flags |= vlan_tx_tag_get(skb); | 6310 | tx_flags |= vlan_tx_tag_get(skb); |
@@ -6323,8 +6325,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev | |||
6323 | /* for FCoE with DCB, we force the priority to what | 6325 | /* for FCoE with DCB, we force the priority to what |
6324 | * was specified by the switch */ | 6326 | * was specified by the switch */ |
6325 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && | 6327 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && |
6326 | (skb->protocol == htons(ETH_P_FCOE) || | 6328 | (protocol == htons(ETH_P_FCOE) || |
6327 | skb->protocol == htons(ETH_P_FIP))) { | 6329 | protocol == htons(ETH_P_FIP))) { |
6328 | #ifdef CONFIG_IXGBE_DCB | 6330 | #ifdef CONFIG_IXGBE_DCB |
6329 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 6331 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
6330 | tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK | 6332 | tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK |
@@ -6334,7 +6336,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev | |||
6334 | } | 6336 | } |
6335 | #endif | 6337 | #endif |
6336 | /* flag for FCoE offloads */ | 6338 | /* flag for FCoE offloads */ |
6337 | if (skb->protocol == htons(ETH_P_FCOE)) | 6339 | if (protocol == htons(ETH_P_FCOE)) |
6338 | tx_flags |= IXGBE_TX_FLAGS_FCOE; | 6340 | tx_flags |= IXGBE_TX_FLAGS_FCOE; |
6339 | } | 6341 | } |
6340 | #endif | 6342 | #endif |
@@ -6368,9 +6370,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev | |||
6368 | tx_flags |= IXGBE_TX_FLAGS_FSO; | 6370 | tx_flags |= IXGBE_TX_FLAGS_FSO; |
6369 | #endif /* IXGBE_FCOE */ | 6371 | #endif /* IXGBE_FCOE */ |
6370 | } else { | 6372 | } else { |
6371 | if (skb->protocol == htons(ETH_P_IP)) | 6373 | if (protocol == htons(ETH_P_IP)) |
6372 | tx_flags |= IXGBE_TX_FLAGS_IPV4; | 6374 | tx_flags |= IXGBE_TX_FLAGS_IPV4; |
6373 | tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); | 6375 | tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, |
6376 | protocol); | ||
6374 | if (tso < 0) { | 6377 | if (tso < 0) { |
6375 | dev_kfree_skb_any(skb); | 6378 | dev_kfree_skb_any(skb); |
6376 | return NETDEV_TX_OK; | 6379 | return NETDEV_TX_OK; |
@@ -6378,7 +6381,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev | |||
6378 | 6381 | ||
6379 | if (tso) | 6382 | if (tso) |
6380 | tx_flags |= IXGBE_TX_FLAGS_TSO; | 6383 | tx_flags |= IXGBE_TX_FLAGS_TSO; |
6381 | else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && | 6384 | else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags, |
6385 | protocol) && | ||
6382 | (skb->ip_summed == CHECKSUM_PARTIAL)) | 6386 | (skb->ip_summed == CHECKSUM_PARTIAL)) |
6383 | tx_flags |= IXGBE_TX_FLAGS_CSUM; | 6387 | tx_flags |= IXGBE_TX_FLAGS_CSUM; |
6384 | } | 6388 | } |
@@ -6392,7 +6396,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev | |||
6392 | test_bit(__IXGBE_FDIR_INIT_DONE, | 6396 | test_bit(__IXGBE_FDIR_INIT_DONE, |
6393 | &tx_ring->reinit_state)) { | 6397 | &tx_ring->reinit_state)) { |
6394 | ixgbe_atr(adapter, skb, tx_ring->queue_index, | 6398 | ixgbe_atr(adapter, skb, tx_ring->queue_index, |
6395 | tx_flags); | 6399 | tx_flags, protocol); |
6396 | tx_ring->atr_count = 0; | 6400 | tx_ring->atr_count = 0; |
6397 | } | 6401 | } |
6398 | } | 6402 | } |
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index d2e166e29dda..8a4d19e5de06 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c | |||
@@ -111,13 +111,14 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id); | |||
111 | 111 | ||
112 | typedef struct axnet_dev_t { | 112 | typedef struct axnet_dev_t { |
113 | struct pcmcia_device *p_dev; | 113 | struct pcmcia_device *p_dev; |
114 | caddr_t base; | 114 | caddr_t base; |
115 | struct timer_list watchdog; | 115 | struct timer_list watchdog; |
116 | int stale, fast_poll; | 116 | int stale, fast_poll; |
117 | u_short link_status; | 117 | u_short link_status; |
118 | u_char duplex_flag; | 118 | u_char duplex_flag; |
119 | int phy_id; | 119 | int phy_id; |
120 | int flags; | 120 | int flags; |
121 | int active_low; | ||
121 | } axnet_dev_t; | 122 | } axnet_dev_t; |
122 | 123 | ||
123 | static inline axnet_dev_t *PRIV(struct net_device *dev) | 124 | static inline axnet_dev_t *PRIV(struct net_device *dev) |
@@ -322,6 +323,8 @@ static int axnet_config(struct pcmcia_device *link) | |||
322 | if (info->flags & IS_AX88790) | 323 | if (info->flags & IS_AX88790) |
323 | outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */ | 324 | outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */ |
324 | 325 | ||
326 | info->active_low = 0; | ||
327 | |||
325 | for (i = 0; i < 32; i++) { | 328 | for (i = 0; i < 32; i++) { |
326 | j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); | 329 | j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); |
327 | j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); | 330 | j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); |
@@ -329,15 +332,18 @@ static int axnet_config(struct pcmcia_device *link) | |||
329 | if ((j != 0) && (j != 0xffff)) break; | 332 | if ((j != 0) && (j != 0xffff)) break; |
330 | } | 333 | } |
331 | 334 | ||
332 | /* Maybe PHY is in power down mode. (PPD_SET = 1) | ||
333 | Bit 2 of CCSR is active low. */ | ||
334 | if (i == 32) { | 335 | if (i == 32) { |
336 | /* Maybe PHY is in power down mode. (PPD_SET = 1) | ||
337 | Bit 2 of CCSR is active low. */ | ||
335 | pcmcia_write_config_byte(link, CISREG_CCSR, 0x04); | 338 | pcmcia_write_config_byte(link, CISREG_CCSR, 0x04); |
336 | for (i = 0; i < 32; i++) { | 339 | for (i = 0; i < 32; i++) { |
337 | j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); | 340 | j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); |
338 | j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); | 341 | j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); |
339 | if (j == j2) continue; | 342 | if (j == j2) continue; |
340 | if ((j != 0) && (j != 0xffff)) break; | 343 | if ((j != 0) && (j != 0xffff)) { |
344 | info->active_low = 1; | ||
345 | break; | ||
346 | } | ||
341 | } | 347 | } |
342 | } | 348 | } |
343 | 349 | ||
@@ -383,8 +389,12 @@ static int axnet_suspend(struct pcmcia_device *link) | |||
383 | static int axnet_resume(struct pcmcia_device *link) | 389 | static int axnet_resume(struct pcmcia_device *link) |
384 | { | 390 | { |
385 | struct net_device *dev = link->priv; | 391 | struct net_device *dev = link->priv; |
392 | axnet_dev_t *info = PRIV(dev); | ||
386 | 393 | ||
387 | if (link->open) { | 394 | if (link->open) { |
395 | if (info->active_low == 1) | ||
396 | pcmcia_write_config_byte(link, CISREG_CCSR, 0x04); | ||
397 | |||
388 | axnet_reset_8390(dev); | 398 | axnet_reset_8390(dev); |
389 | AX88190_init(dev, 1); | 399 | AX88190_init(dev, 1); |
390 | netif_device_attach(dev); | 400 | netif_device_attach(dev); |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index d88ce9fb1cbd..4c4d16905efb 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -846,10 +846,10 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
846 | else | 846 | else |
847 | tp->features &= ~RTL_FEATURE_WOL; | 847 | tp->features &= ~RTL_FEATURE_WOL; |
848 | __rtl8169_set_wol(tp, wol->wolopts); | 848 | __rtl8169_set_wol(tp, wol->wolopts); |
849 | device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); | ||
850 | |||
851 | spin_unlock_irq(&tp->lock); | 849 | spin_unlock_irq(&tp->lock); |
852 | 850 | ||
851 | device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); | ||
852 | |||
853 | return 0; | 853 | return 0; |
854 | } | 854 | } |
855 | 855 | ||
@@ -2931,7 +2931,7 @@ static const struct rtl_cfg_info { | |||
2931 | .hw_start = rtl_hw_start_8168, | 2931 | .hw_start = rtl_hw_start_8168, |
2932 | .region = 2, | 2932 | .region = 2, |
2933 | .align = 8, | 2933 | .align = 8, |
2934 | .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow | | 2934 | .intr_event = SYSErr | LinkChg | RxOverflow | |
2935 | TxErr | TxOK | RxOK | RxErr, | 2935 | TxErr | TxOK | RxOK | RxErr, |
2936 | .napi_event = TxErr | TxOK | RxOK | RxOverflow, | 2936 | .napi_event = TxErr | TxOK | RxOK | RxOverflow, |
2937 | .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, | 2937 | .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, |
@@ -4588,7 +4588,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) | |||
4588 | } | 4588 | } |
4589 | 4589 | ||
4590 | /* Work around for rx fifo overflow */ | 4590 | /* Work around for rx fifo overflow */ |
4591 | if (unlikely(status & RxFIFOOver)) { | 4591 | if (unlikely(status & RxFIFOOver) && |
4592 | (tp->mac_version == RTL_GIGA_MAC_VER_11)) { | ||
4592 | netif_stop_queue(dev); | 4593 | netif_stop_queue(dev); |
4593 | rtl8169_tx_timeout(dev); | 4594 | rtl8169_tx_timeout(dev); |
4594 | break; | 4595 | break; |
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index bfec2e0f5275..220e0398f1d5 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -3858,7 +3858,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, | |||
3858 | 3858 | ||
3859 | /* device is off until link detection */ | 3859 | /* device is off until link detection */ |
3860 | netif_carrier_off(dev); | 3860 | netif_carrier_off(dev); |
3861 | netif_stop_queue(dev); | ||
3862 | 3861 | ||
3863 | return dev; | 3862 | return dev; |
3864 | } | 3863 | } |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index a4c3f5708246..acbdab3d66ca 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -2050,12 +2050,16 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth) | |||
2050 | 2050 | ||
2051 | ugeth_vdbg("%s: IN", __func__); | 2051 | ugeth_vdbg("%s: IN", __func__); |
2052 | 2052 | ||
2053 | /* | ||
2054 | * Tell the kernel the link is down. | ||
2055 | * Must be done before disabling the controller | ||
2056 | * or deadlock may happen. | ||
2057 | */ | ||
2058 | phy_stop(phydev); | ||
2059 | |||
2053 | /* Disable the controller */ | 2060 | /* Disable the controller */ |
2054 | ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); | 2061 | ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); |
2055 | 2062 | ||
2056 | /* Tell the kernel the link is down */ | ||
2057 | phy_stop(phydev); | ||
2058 | |||
2059 | /* Mask all interrupts */ | 2063 | /* Mask all interrupts */ |
2060 | out_be32(ugeth->uccf->p_uccm, 0x00000000); | 2064 | out_be32(ugeth->uccf->p_uccm, 0x00000000); |
2061 | 2065 | ||
@@ -2065,9 +2069,6 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth) | |||
2065 | /* Disable Rx and Tx */ | 2069 | /* Disable Rx and Tx */ |
2066 | clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); | 2070 | clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); |
2067 | 2071 | ||
2068 | phy_disconnect(ugeth->phydev); | ||
2069 | ugeth->phydev = NULL; | ||
2070 | |||
2071 | ucc_geth_memclean(ugeth); | 2072 | ucc_geth_memclean(ugeth); |
2072 | } | 2073 | } |
2073 | 2074 | ||
@@ -3550,7 +3551,10 @@ static int ucc_geth_close(struct net_device *dev) | |||
3550 | 3551 | ||
3551 | napi_disable(&ugeth->napi); | 3552 | napi_disable(&ugeth->napi); |
3552 | 3553 | ||
3554 | cancel_work_sync(&ugeth->timeout_work); | ||
3553 | ucc_geth_stop(ugeth); | 3555 | ucc_geth_stop(ugeth); |
3556 | phy_disconnect(ugeth->phydev); | ||
3557 | ugeth->phydev = NULL; | ||
3554 | 3558 | ||
3555 | free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); | 3559 | free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); |
3556 | 3560 | ||
@@ -3579,8 +3583,12 @@ static void ucc_geth_timeout_work(struct work_struct *work) | |||
3579 | * Must reset MAC *and* PHY. This is done by reopening | 3583 | * Must reset MAC *and* PHY. This is done by reopening |
3580 | * the device. | 3584 | * the device. |
3581 | */ | 3585 | */ |
3582 | ucc_geth_close(dev); | 3586 | netif_tx_stop_all_queues(dev); |
3583 | ucc_geth_open(dev); | 3587 | ucc_geth_stop(ugeth); |
3588 | ucc_geth_init_mac(ugeth); | ||
3589 | /* Must start PHY here */ | ||
3590 | phy_start(ugeth->phydev); | ||
3591 | netif_tx_start_all_queues(dev); | ||
3584 | } | 3592 | } |
3585 | 3593 | ||
3586 | netif_tx_schedule_all(dev); | 3594 | netif_tx_schedule_all(dev); |
@@ -3594,7 +3602,6 @@ static void ucc_geth_timeout(struct net_device *dev) | |||
3594 | { | 3602 | { |
3595 | struct ucc_geth_private *ugeth = netdev_priv(dev); | 3603 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
3596 | 3604 | ||
3597 | netif_carrier_off(dev); | ||
3598 | schedule_work(&ugeth->timeout_work); | 3605 | schedule_work(&ugeth->timeout_work); |
3599 | } | 3606 | } |
3600 | 3607 | ||
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index bb6b67f6b0cc..b6d402806ae6 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -986,9 +986,15 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
986 | goto unregister; | 986 | goto unregister; |
987 | } | 987 | } |
988 | 988 | ||
989 | vi->status = VIRTIO_NET_S_LINK_UP; | 989 | /* Assume link up if device can't report link status, |
990 | virtnet_update_status(vi); | 990 | otherwise get link status from config. */ |
991 | netif_carrier_on(dev); | 991 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { |
992 | netif_carrier_off(dev); | ||
993 | virtnet_update_status(vi); | ||
994 | } else { | ||
995 | vi->status = VIRTIO_NET_S_LINK_UP; | ||
996 | netif_carrier_on(dev); | ||
997 | } | ||
992 | 998 | ||
993 | pr_debug("virtnet: registered device %s\n", dev->name); | 999 | pr_debug("virtnet: registered device %s\n", dev->name); |
994 | return 0; | 1000 | return 0; |
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c index a0471f2e1c7a..48261b7252d0 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c | |||
@@ -410,6 +410,9 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah, | |||
410 | val &= ~(AR_WA_BIT6 | AR_WA_BIT7); | 410 | val &= ~(AR_WA_BIT6 | AR_WA_BIT7); |
411 | } | 411 | } |
412 | 412 | ||
413 | if (AR_SREV_9280(ah)) | ||
414 | val |= AR_WA_BIT22; | ||
415 | |||
413 | if (AR_SREV_9285E_20(ah)) | 416 | if (AR_SREV_9285E_20(ah)) |
414 | val |= AR_WA_BIT23; | 417 | val |= AR_WA_BIT23; |
415 | 418 | ||
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 9b8e7e3fcebd..170d44a35ccb 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h | |||
@@ -675,6 +675,7 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz) | |||
675 | } | 675 | } |
676 | 676 | ||
677 | extern struct ieee80211_ops ath9k_ops; | 677 | extern struct ieee80211_ops ath9k_ops; |
678 | extern struct pm_qos_request_list ath9k_pm_qos_req; | ||
678 | extern int modparam_nohwcrypt; | 679 | extern int modparam_nohwcrypt; |
679 | extern int led_blink; | 680 | extern int led_blink; |
680 | 681 | ||
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 6576f683dba0..f7ec31b4ddd3 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c | |||
@@ -35,6 +35,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = { | |||
35 | { USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */ | 35 | { USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */ |
36 | { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */ | 36 | { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */ |
37 | { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */ | 37 | { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */ |
38 | { USB_DEVICE(0x13D3, 0x3346) }, /* IMC Networks */ | ||
38 | { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */ | 39 | { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */ |
39 | { USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */ | 40 | { USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */ |
40 | { }, | 41 | { }, |
@@ -540,11 +541,11 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) | |||
540 | return; | 541 | return; |
541 | } | 542 | } |
542 | 543 | ||
543 | usb_fill_int_urb(urb, hif_dev->udev, | 544 | usb_fill_bulk_urb(urb, hif_dev->udev, |
544 | usb_rcvbulkpipe(hif_dev->udev, | 545 | usb_rcvbulkpipe(hif_dev->udev, |
545 | USB_REG_IN_PIPE), | 546 | USB_REG_IN_PIPE), |
546 | nskb->data, MAX_REG_IN_BUF_SIZE, | 547 | nskb->data, MAX_REG_IN_BUF_SIZE, |
547 | ath9k_hif_usb_reg_in_cb, nskb, 1); | 548 | ath9k_hif_usb_reg_in_cb, nskb); |
548 | 549 | ||
549 | ret = usb_submit_urb(urb, GFP_ATOMIC); | 550 | ret = usb_submit_urb(urb, GFP_ATOMIC); |
550 | if (ret) { | 551 | if (ret) { |
@@ -720,11 +721,11 @@ static int ath9k_hif_usb_alloc_reg_in_urb(struct hif_device_usb *hif_dev) | |||
720 | if (!skb) | 721 | if (!skb) |
721 | goto err; | 722 | goto err; |
722 | 723 | ||
723 | usb_fill_int_urb(hif_dev->reg_in_urb, hif_dev->udev, | 724 | usb_fill_bulk_urb(hif_dev->reg_in_urb, hif_dev->udev, |
724 | usb_rcvbulkpipe(hif_dev->udev, | 725 | usb_rcvbulkpipe(hif_dev->udev, |
725 | USB_REG_IN_PIPE), | 726 | USB_REG_IN_PIPE), |
726 | skb->data, MAX_REG_IN_BUF_SIZE, | 727 | skb->data, MAX_REG_IN_BUF_SIZE, |
727 | ath9k_hif_usb_reg_in_cb, skb, 1); | 728 | ath9k_hif_usb_reg_in_cb, skb); |
728 | 729 | ||
729 | if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0) | 730 | if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0) |
730 | goto err; | 731 | goto err; |
@@ -843,14 +844,6 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev) | |||
843 | goto err_fw_req; | 844 | goto err_fw_req; |
844 | } | 845 | } |
845 | 846 | ||
846 | /* Alloc URBs */ | ||
847 | ret = ath9k_hif_usb_alloc_urbs(hif_dev); | ||
848 | if (ret) { | ||
849 | dev_err(&hif_dev->udev->dev, | ||
850 | "ath9k_htc: Unable to allocate URBs\n"); | ||
851 | goto err_urb; | ||
852 | } | ||
853 | |||
854 | /* Download firmware */ | 847 | /* Download firmware */ |
855 | ret = ath9k_hif_usb_download_fw(hif_dev); | 848 | ret = ath9k_hif_usb_download_fw(hif_dev); |
856 | if (ret) { | 849 | if (ret) { |
@@ -866,16 +859,22 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev) | |||
866 | */ | 859 | */ |
867 | for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) { | 860 | for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) { |
868 | endp = &alt->endpoint[idx].desc; | 861 | endp = &alt->endpoint[idx].desc; |
869 | if (((endp->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) | 862 | if ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) |
870 | == 0x04) && | 863 | == USB_ENDPOINT_XFER_INT) { |
871 | ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) | ||
872 | == USB_ENDPOINT_XFER_INT)) { | ||
873 | endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK; | 864 | endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK; |
874 | endp->bmAttributes |= USB_ENDPOINT_XFER_BULK; | 865 | endp->bmAttributes |= USB_ENDPOINT_XFER_BULK; |
875 | endp->bInterval = 0; | 866 | endp->bInterval = 0; |
876 | } | 867 | } |
877 | } | 868 | } |
878 | 869 | ||
870 | /* Alloc URBs */ | ||
871 | ret = ath9k_hif_usb_alloc_urbs(hif_dev); | ||
872 | if (ret) { | ||
873 | dev_err(&hif_dev->udev->dev, | ||
874 | "ath9k_htc: Unable to allocate URBs\n"); | ||
875 | goto err_urb; | ||
876 | } | ||
877 | |||
879 | return 0; | 878 | return 0; |
880 | 879 | ||
881 | err_fw_download: | 880 | err_fw_download: |
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index cc13ee117823..6ebc68bca91f 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
@@ -484,6 +484,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah) | |||
484 | ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, | 484 | ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, |
485 | "Failed allocating banks for " | 485 | "Failed allocating banks for " |
486 | "external radio\n"); | 486 | "external radio\n"); |
487 | ath9k_hw_rf_free_ext_banks(ah); | ||
487 | return ecode; | 488 | return ecode; |
488 | } | 489 | } |
489 | 490 | ||
@@ -952,9 +953,12 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode) | |||
952 | REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); | 953 | REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); |
953 | break; | 954 | break; |
954 | case NL80211_IFTYPE_STATION: | 955 | case NL80211_IFTYPE_STATION: |
955 | case NL80211_IFTYPE_MONITOR: | ||
956 | REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE); | 956 | REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE); |
957 | break; | 957 | break; |
958 | default: | ||
959 | if (ah->is_monitoring) | ||
960 | REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE); | ||
961 | break; | ||
958 | } | 962 | } |
959 | } | 963 | } |
960 | 964 | ||
@@ -1634,7 +1638,6 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period) | |||
1634 | 1638 | ||
1635 | switch (ah->opmode) { | 1639 | switch (ah->opmode) { |
1636 | case NL80211_IFTYPE_STATION: | 1640 | case NL80211_IFTYPE_STATION: |
1637 | case NL80211_IFTYPE_MONITOR: | ||
1638 | REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon)); | 1641 | REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon)); |
1639 | REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff); | 1642 | REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff); |
1640 | REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff); | 1643 | REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff); |
@@ -1663,6 +1666,14 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period) | |||
1663 | AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; | 1666 | AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; |
1664 | break; | 1667 | break; |
1665 | default: | 1668 | default: |
1669 | if (ah->is_monitoring) { | ||
1670 | REG_WRITE(ah, AR_NEXT_TBTT_TIMER, | ||
1671 | TU_TO_USEC(next_beacon)); | ||
1672 | REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff); | ||
1673 | REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff); | ||
1674 | flags |= AR_TBTT_TIMER_EN; | ||
1675 | break; | ||
1676 | } | ||
1666 | ath_print(ath9k_hw_common(ah), ATH_DBG_BEACON, | 1677 | ath_print(ath9k_hw_common(ah), ATH_DBG_BEACON, |
1667 | "%s: unsupported opmode: %d\n", | 1678 | "%s: unsupported opmode: %d\n", |
1668 | __func__, ah->opmode); | 1679 | __func__, ah->opmode); |
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index d032939768b0..d47d1b4b6002 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h | |||
@@ -622,6 +622,7 @@ struct ath_hw { | |||
622 | 622 | ||
623 | bool sw_mgmt_crypto; | 623 | bool sw_mgmt_crypto; |
624 | bool is_pciexpress; | 624 | bool is_pciexpress; |
625 | bool is_monitoring; | ||
625 | bool need_an_top2_fixup; | 626 | bool need_an_top2_fixup; |
626 | u16 tx_trig_level; | 627 | u16 tx_trig_level; |
627 | 628 | ||
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 95b41db0d86b..6a0d99eff404 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c | |||
@@ -15,6 +15,7 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/pm_qos_params.h> | ||
18 | 19 | ||
19 | #include "ath9k.h" | 20 | #include "ath9k.h" |
20 | 21 | ||
@@ -179,6 +180,8 @@ static const struct ath_ops ath9k_common_ops = { | |||
179 | .write = ath9k_iowrite32, | 180 | .write = ath9k_iowrite32, |
180 | }; | 181 | }; |
181 | 182 | ||
183 | struct pm_qos_request_list ath9k_pm_qos_req; | ||
184 | |||
182 | /**************************/ | 185 | /**************************/ |
183 | /* Initialization */ | 186 | /* Initialization */ |
184 | /**************************/ | 187 | /**************************/ |
@@ -756,6 +759,9 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, | |||
756 | ath_init_leds(sc); | 759 | ath_init_leds(sc); |
757 | ath_start_rfkill_poll(sc); | 760 | ath_start_rfkill_poll(sc); |
758 | 761 | ||
762 | pm_qos_add_request(&ath9k_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, | ||
763 | PM_QOS_DEFAULT_VALUE); | ||
764 | |||
759 | return 0; | 765 | return 0; |
760 | 766 | ||
761 | error_world: | 767 | error_world: |
@@ -811,6 +817,8 @@ void ath9k_deinit_device(struct ath_softc *sc) | |||
811 | 817 | ||
812 | ath9k_ps_wakeup(sc); | 818 | ath9k_ps_wakeup(sc); |
813 | 819 | ||
820 | pm_qos_remove_request(&ath9k_pm_qos_req); | ||
821 | |||
814 | wiphy_rfkill_stop_polling(sc->hw->wiphy); | 822 | wiphy_rfkill_stop_polling(sc->hw->wiphy); |
815 | ath_deinit_leds(sc); | 823 | ath_deinit_leds(sc); |
816 | 824 | ||
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index b52f1cf8a603..25d3ef4c338e 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -15,6 +15,7 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/nl80211.h> | 17 | #include <linux/nl80211.h> |
18 | #include <linux/pm_qos_params.h> | ||
18 | #include "ath9k.h" | 19 | #include "ath9k.h" |
19 | #include "btcoex.h" | 20 | #include "btcoex.h" |
20 | 21 | ||
@@ -93,11 +94,13 @@ void ath9k_ps_wakeup(struct ath_softc *sc) | |||
93 | { | 94 | { |
94 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | 95 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
95 | unsigned long flags; | 96 | unsigned long flags; |
97 | enum ath9k_power_mode power_mode; | ||
96 | 98 | ||
97 | spin_lock_irqsave(&sc->sc_pm_lock, flags); | 99 | spin_lock_irqsave(&sc->sc_pm_lock, flags); |
98 | if (++sc->ps_usecount != 1) | 100 | if (++sc->ps_usecount != 1) |
99 | goto unlock; | 101 | goto unlock; |
100 | 102 | ||
103 | power_mode = sc->sc_ah->power_mode; | ||
101 | ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); | 104 | ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); |
102 | 105 | ||
103 | /* | 106 | /* |
@@ -105,10 +108,12 @@ void ath9k_ps_wakeup(struct ath_softc *sc) | |||
105 | * useful data. Better clear them now so that they don't mess up | 108 | * useful data. Better clear them now so that they don't mess up |
106 | * survey data results. | 109 | * survey data results. |
107 | */ | 110 | */ |
108 | spin_lock(&common->cc_lock); | 111 | if (power_mode != ATH9K_PM_AWAKE) { |
109 | ath_hw_cycle_counters_update(common); | 112 | spin_lock(&common->cc_lock); |
110 | memset(&common->cc_survey, 0, sizeof(common->cc_survey)); | 113 | ath_hw_cycle_counters_update(common); |
111 | spin_unlock(&common->cc_lock); | 114 | memset(&common->cc_survey, 0, sizeof(common->cc_survey)); |
115 | spin_unlock(&common->cc_lock); | ||
116 | } | ||
112 | 117 | ||
113 | unlock: | 118 | unlock: |
114 | spin_unlock_irqrestore(&sc->sc_pm_lock, flags); | 119 | spin_unlock_irqrestore(&sc->sc_pm_lock, flags); |
@@ -1217,6 +1222,7 @@ static int ath9k_start(struct ieee80211_hw *hw) | |||
1217 | ah->imask |= ATH9K_INT_CST; | 1222 | ah->imask |= ATH9K_INT_CST; |
1218 | 1223 | ||
1219 | sc->sc_flags &= ~SC_OP_INVALID; | 1224 | sc->sc_flags &= ~SC_OP_INVALID; |
1225 | sc->sc_ah->is_monitoring = false; | ||
1220 | 1226 | ||
1221 | /* Disable BMISS interrupt when we're not associated */ | 1227 | /* Disable BMISS interrupt when we're not associated */ |
1222 | ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); | 1228 | ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); |
@@ -1238,6 +1244,8 @@ static int ath9k_start(struct ieee80211_hw *hw) | |||
1238 | ath9k_btcoex_timer_resume(sc); | 1244 | ath9k_btcoex_timer_resume(sc); |
1239 | } | 1245 | } |
1240 | 1246 | ||
1247 | pm_qos_update_request(&ath9k_pm_qos_req, 55); | ||
1248 | |||
1241 | mutex_unlock: | 1249 | mutex_unlock: |
1242 | mutex_unlock(&sc->mutex); | 1250 | mutex_unlock(&sc->mutex); |
1243 | 1251 | ||
@@ -1415,6 +1423,8 @@ static void ath9k_stop(struct ieee80211_hw *hw) | |||
1415 | 1423 | ||
1416 | sc->sc_flags |= SC_OP_INVALID; | 1424 | sc->sc_flags |= SC_OP_INVALID; |
1417 | 1425 | ||
1426 | pm_qos_update_request(&ath9k_pm_qos_req, PM_QOS_DEFAULT_VALUE); | ||
1427 | |||
1418 | mutex_unlock(&sc->mutex); | 1428 | mutex_unlock(&sc->mutex); |
1419 | 1429 | ||
1420 | ath_print(common, ATH_DBG_CONFIG, "Driver halt\n"); | 1430 | ath_print(common, ATH_DBG_CONFIG, "Driver halt\n"); |
@@ -1493,8 +1503,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, | |||
1493 | ath9k_hw_set_interrupts(ah, ah->imask); | 1503 | ath9k_hw_set_interrupts(ah, ah->imask); |
1494 | 1504 | ||
1495 | if (vif->type == NL80211_IFTYPE_AP || | 1505 | if (vif->type == NL80211_IFTYPE_AP || |
1496 | vif->type == NL80211_IFTYPE_ADHOC || | 1506 | vif->type == NL80211_IFTYPE_ADHOC) { |
1497 | vif->type == NL80211_IFTYPE_MONITOR) { | ||
1498 | sc->sc_flags |= SC_OP_ANI_RUN; | 1507 | sc->sc_flags |= SC_OP_ANI_RUN; |
1499 | ath_start_ani(common); | 1508 | ath_start_ani(common); |
1500 | } | 1509 | } |
@@ -1644,8 +1653,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) | |||
1644 | if (changed & IEEE80211_CONF_CHANGE_MONITOR) { | 1653 | if (changed & IEEE80211_CONF_CHANGE_MONITOR) { |
1645 | if (conf->flags & IEEE80211_CONF_MONITOR) { | 1654 | if (conf->flags & IEEE80211_CONF_MONITOR) { |
1646 | ath_print(common, ATH_DBG_CONFIG, | 1655 | ath_print(common, ATH_DBG_CONFIG, |
1647 | "HW opmode set to Monitor mode\n"); | 1656 | "Monitor mode is enabled\n"); |
1648 | sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR; | 1657 | sc->sc_ah->is_monitoring = true; |
1658 | } else { | ||
1659 | ath_print(common, ATH_DBG_CONFIG, | ||
1660 | "Monitor mode is disabled\n"); | ||
1661 | sc->sc_ah->is_monitoring = false; | ||
1649 | } | 1662 | } |
1650 | } | 1663 | } |
1651 | 1664 | ||
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index fddb0129bb57..c76ea53c20ce 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c | |||
@@ -441,7 +441,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc) | |||
441 | */ | 441 | */ |
442 | if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && | 442 | if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && |
443 | (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || | 443 | (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || |
444 | (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) | 444 | (sc->sc_ah->is_monitoring)) |
445 | rfilt |= ATH9K_RX_FILTER_PROM; | 445 | rfilt |= ATH9K_RX_FILTER_PROM; |
446 | 446 | ||
447 | if (sc->rx.rxfilter & FIF_CONTROL) | 447 | if (sc->rx.rxfilter & FIF_CONTROL) |
@@ -897,7 +897,7 @@ static bool ath9k_rx_accept(struct ath_common *common, | |||
897 | * decryption and MIC failures. For monitor mode, | 897 | * decryption and MIC failures. For monitor mode, |
898 | * we also ignore the CRC error. | 898 | * we also ignore the CRC error. |
899 | */ | 899 | */ |
900 | if (ah->opmode == NL80211_IFTYPE_MONITOR) { | 900 | if (ah->is_monitoring) { |
901 | if (rx_stats->rs_status & | 901 | if (rx_stats->rs_status & |
902 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | | 902 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | |
903 | ATH9K_RXERR_CRC)) | 903 | ATH9K_RXERR_CRC)) |
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index 42976b0a01c1..fa05b711e5cd 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h | |||
@@ -703,6 +703,7 @@ | |||
703 | #define AR_WA_RESET_EN (1 << 18) /* Sw Control to enable PCI-Reset to POR (bit 15) */ | 703 | #define AR_WA_RESET_EN (1 << 18) /* Sw Control to enable PCI-Reset to POR (bit 15) */ |
704 | #define AR_WA_ANALOG_SHIFT (1 << 20) | 704 | #define AR_WA_ANALOG_SHIFT (1 << 20) |
705 | #define AR_WA_POR_SHORT (1 << 21) /* PCI-E Phy reset control */ | 705 | #define AR_WA_POR_SHORT (1 << 21) /* PCI-E Phy reset control */ |
706 | #define AR_WA_BIT22 (1 << 22) | ||
706 | #define AR9285_WA_DEFAULT 0x004a050b | 707 | #define AR9285_WA_DEFAULT 0x004a050b |
707 | #define AR9280_WA_DEFAULT 0x0040073b | 708 | #define AR9280_WA_DEFAULT 0x0040073b |
708 | #define AR_WA_DEFAULT 0x0000073f | 709 | #define AR_WA_DEFAULT 0x0000073f |
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index d8607f4c144d..3317039cd28f 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c | |||
@@ -82,9 +82,11 @@ static struct usb_device_id carl9170_usb_ids[] = { | |||
82 | { USB_DEVICE(0x07d1, 0x3c10) }, | 82 | { USB_DEVICE(0x07d1, 0x3c10) }, |
83 | /* D-Link DWA 160 A2 */ | 83 | /* D-Link DWA 160 A2 */ |
84 | { USB_DEVICE(0x07d1, 0x3a09) }, | 84 | { USB_DEVICE(0x07d1, 0x3a09) }, |
85 | /* D-Link DWA 130 D */ | ||
86 | { USB_DEVICE(0x07d1, 0x3a0f) }, | ||
85 | /* Netgear WNA1000 */ | 87 | /* Netgear WNA1000 */ |
86 | { USB_DEVICE(0x0846, 0x9040) }, | 88 | { USB_DEVICE(0x0846, 0x9040) }, |
87 | /* Netgear WNDA3100 */ | 89 | /* Netgear WNDA3100 (v1) */ |
88 | { USB_DEVICE(0x0846, 0x9010) }, | 90 | { USB_DEVICE(0x0846, 0x9010) }, |
89 | /* Netgear WN111 v2 */ | 91 | /* Netgear WN111 v2 */ |
90 | { USB_DEVICE(0x0846, 0x9001), .driver_info = CARL9170_ONE_LED }, | 92 | { USB_DEVICE(0x0846, 0x9001), .driver_info = CARL9170_ONE_LED }, |
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 8f8c4b73f8b9..7edf8c2fb8c7 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
@@ -4000,7 +4000,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e | |||
4000 | * "the hard way", rather than using device's scan. | 4000 | * "the hard way", rather than using device's scan. |
4001 | */ | 4001 | */ |
4002 | if (iwl3945_mod_params.disable_hw_scan) { | 4002 | if (iwl3945_mod_params.disable_hw_scan) { |
4003 | IWL_ERR(priv, "sw scan support is deprecated\n"); | 4003 | dev_printk(KERN_DEBUG, &(pdev->dev), |
4004 | "sw scan support is deprecated\n"); | ||
4004 | iwl3945_hw_ops.hw_scan = NULL; | 4005 | iwl3945_hw_ops.hw_scan = NULL; |
4005 | } | 4006 | } |
4006 | 4007 | ||
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index 5046a0005034..373930afc26b 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c | |||
@@ -700,8 +700,9 @@ static void lbs_scan_worker(struct work_struct *work) | |||
700 | 700 | ||
701 | if (priv->scan_channel < priv->scan_req->n_channels) { | 701 | if (priv->scan_channel < priv->scan_req->n_channels) { |
702 | cancel_delayed_work(&priv->scan_work); | 702 | cancel_delayed_work(&priv->scan_work); |
703 | queue_delayed_work(priv->work_thread, &priv->scan_work, | 703 | if (!priv->stopping) |
704 | msecs_to_jiffies(300)); | 704 | queue_delayed_work(priv->work_thread, &priv->scan_work, |
705 | msecs_to_jiffies(300)); | ||
705 | } | 706 | } |
706 | 707 | ||
707 | /* This is the final data we are about to send */ | 708 | /* This is the final data we are about to send */ |
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h index f062ed583901..cb14c38caf3a 100644 --- a/drivers/net/wireless/libertas/dev.h +++ b/drivers/net/wireless/libertas/dev.h | |||
@@ -36,6 +36,7 @@ struct lbs_private { | |||
36 | /* CFG80211 */ | 36 | /* CFG80211 */ |
37 | struct wireless_dev *wdev; | 37 | struct wireless_dev *wdev; |
38 | bool wiphy_registered; | 38 | bool wiphy_registered; |
39 | bool stopping; | ||
39 | struct cfg80211_scan_request *scan_req; | 40 | struct cfg80211_scan_request *scan_req; |
40 | u8 assoc_bss[ETH_ALEN]; | 41 | u8 assoc_bss[ETH_ALEN]; |
41 | u8 disassoc_reason; | 42 | u8 disassoc_reason; |
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 47ce5a6ba120..46b88b118c99 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c | |||
@@ -104,6 +104,7 @@ static int lbs_dev_open(struct net_device *dev) | |||
104 | lbs_deb_enter(LBS_DEB_NET); | 104 | lbs_deb_enter(LBS_DEB_NET); |
105 | 105 | ||
106 | spin_lock_irq(&priv->driver_lock); | 106 | spin_lock_irq(&priv->driver_lock); |
107 | priv->stopping = false; | ||
107 | 108 | ||
108 | if (priv->connect_status == LBS_CONNECTED) | 109 | if (priv->connect_status == LBS_CONNECTED) |
109 | netif_carrier_on(dev); | 110 | netif_carrier_on(dev); |
@@ -131,10 +132,16 @@ static int lbs_eth_stop(struct net_device *dev) | |||
131 | lbs_deb_enter(LBS_DEB_NET); | 132 | lbs_deb_enter(LBS_DEB_NET); |
132 | 133 | ||
133 | spin_lock_irq(&priv->driver_lock); | 134 | spin_lock_irq(&priv->driver_lock); |
135 | priv->stopping = true; | ||
134 | netif_stop_queue(dev); | 136 | netif_stop_queue(dev); |
135 | spin_unlock_irq(&priv->driver_lock); | 137 | spin_unlock_irq(&priv->driver_lock); |
136 | 138 | ||
137 | schedule_work(&priv->mcast_work); | 139 | schedule_work(&priv->mcast_work); |
140 | cancel_delayed_work_sync(&priv->scan_work); | ||
141 | if (priv->scan_req) { | ||
142 | cfg80211_scan_done(priv->scan_req, false); | ||
143 | priv->scan_req = NULL; | ||
144 | } | ||
138 | 145 | ||
139 | lbs_deb_leave(LBS_DEB_NET); | 146 | lbs_deb_leave(LBS_DEB_NET); |
140 | return 0; | 147 | return 0; |
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index eea1ef2f502b..4396d4b9bfb9 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig | |||
@@ -221,9 +221,6 @@ config RT2X00_LIB_LEDS | |||
221 | boolean | 221 | boolean |
222 | default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n) | 222 | default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n) |
223 | 223 | ||
224 | comment "rt2x00 leds support disabled due to modularized LEDS_CLASS and built-in rt2x00" | ||
225 | depends on RT2X00_LIB=y && LEDS_CLASS=m | ||
226 | |||
227 | config RT2X00_LIB_DEBUGFS | 224 | config RT2X00_LIB_DEBUGFS |
228 | bool "Ralink debugfs support" | 225 | bool "Ralink debugfs support" |
229 | depends on RT2X00_LIB && MAC80211_DEBUGFS | 226 | depends on RT2X00_LIB && MAC80211_DEBUGFS |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 5624db8c9ad0..003170ea2e39 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
@@ -64,17 +64,57 @@ void pci_bus_remove_resources(struct pci_bus *bus) | |||
64 | } | 64 | } |
65 | } | 65 | } |
66 | 66 | ||
67 | static bool pci_bus_resource_better(struct resource *res1, bool pos1, | ||
68 | struct resource *res2, bool pos2) | ||
69 | { | ||
70 | /* If exactly one is positive decode, always prefer that one */ | ||
71 | if (pos1 != pos2) | ||
72 | return pos1 ? true : false; | ||
73 | |||
74 | /* Prefer the one that contains the highest address */ | ||
75 | if (res1->end != res2->end) | ||
76 | return (res1->end > res2->end) ? true : false; | ||
77 | |||
78 | /* Otherwise, prefer the one with highest "center of gravity" */ | ||
79 | if (res1->start != res2->start) | ||
80 | return (res1->start > res2->start) ? true : false; | ||
81 | |||
82 | /* Otherwise, choose one arbitrarily (but consistently) */ | ||
83 | return (res1 > res2) ? true : false; | ||
84 | } | ||
85 | |||
86 | static bool pci_bus_resource_positive(struct pci_bus *bus, struct resource *res) | ||
87 | { | ||
88 | struct pci_bus_resource *bus_res; | ||
89 | |||
90 | /* | ||
91 | * This relies on the fact that pci_bus.resource[] refers to P2P or | ||
92 | * CardBus bridge base/limit registers, which are always positively | ||
93 | * decoded. The pci_bus.resources list contains host bridge or | ||
94 | * subtractively decoded resources. | ||
95 | */ | ||
96 | list_for_each_entry(bus_res, &bus->resources, list) { | ||
97 | if (bus_res->res == res) | ||
98 | return (bus_res->flags & PCI_SUBTRACTIVE_DECODE) ? | ||
99 | false : true; | ||
100 | } | ||
101 | return true; | ||
102 | } | ||
103 | |||
67 | /* | 104 | /* |
68 | * Find the highest-address bus resource below the cursor "res". If the | 105 | * Find the next-best bus resource after the cursor "res". If the cursor is |
69 | * cursor is NULL, return the highest resource. | 106 | * NULL, return the best resource. "Best" means that we prefer positive |
107 | * decode regions over subtractive decode, then those at higher addresses. | ||
70 | */ | 108 | */ |
71 | static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus, | 109 | static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus, |
72 | unsigned int type, | 110 | unsigned int type, |
73 | struct resource *res) | 111 | struct resource *res) |
74 | { | 112 | { |
113 | bool res_pos, r_pos, prev_pos = false; | ||
75 | struct resource *r, *prev = NULL; | 114 | struct resource *r, *prev = NULL; |
76 | int i; | 115 | int i; |
77 | 116 | ||
117 | res_pos = pci_bus_resource_positive(bus, res); | ||
78 | pci_bus_for_each_resource(bus, r, i) { | 118 | pci_bus_for_each_resource(bus, r, i) { |
79 | if (!r) | 119 | if (!r) |
80 | continue; | 120 | continue; |
@@ -82,26 +122,14 @@ static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus, | |||
82 | if ((r->flags & IORESOURCE_TYPE_BITS) != type) | 122 | if ((r->flags & IORESOURCE_TYPE_BITS) != type) |
83 | continue; | 123 | continue; |
84 | 124 | ||
85 | /* If this resource is at or past the cursor, skip it */ | 125 | r_pos = pci_bus_resource_positive(bus, r); |
86 | if (res) { | 126 | if (!res || pci_bus_resource_better(res, res_pos, r, r_pos)) { |
87 | if (r == res) | 127 | if (!prev || pci_bus_resource_better(r, r_pos, |
88 | continue; | 128 | prev, prev_pos)) { |
89 | if (r->end > res->end) | 129 | prev = r; |
90 | continue; | 130 | prev_pos = r_pos; |
91 | if (r->end == res->end && r->start > res->start) | 131 | } |
92 | continue; | ||
93 | } | 132 | } |
94 | |||
95 | if (!prev) | ||
96 | prev = r; | ||
97 | |||
98 | /* | ||
99 | * A small resource is higher than a large one that ends at | ||
100 | * the same address. | ||
101 | */ | ||
102 | if (r->end > prev->end || | ||
103 | (r->end == prev->end && r->start > prev->start)) | ||
104 | prev = r; | ||
105 | } | 133 | } |
106 | 134 | ||
107 | return prev; | 135 | return prev; |
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c index 5becbdee4027..2850e64dedae 100644 --- a/drivers/pci/hotplug/ibmphp_ebda.c +++ b/drivers/pci/hotplug/ibmphp_ebda.c | |||
@@ -276,6 +276,12 @@ int __init ibmphp_access_ebda (void) | |||
276 | 276 | ||
277 | for (;;) { | 277 | for (;;) { |
278 | offset = next_offset; | 278 | offset = next_offset; |
279 | |||
280 | /* Make sure what we read is still in the mapped section */ | ||
281 | if (WARN(offset > (ebda_sz * 1024 - 4), | ||
282 | "ibmphp_ebda: next read is beyond ebda_sz\n")) | ||
283 | break; | ||
284 | |||
279 | next_offset = readw (io_mem + offset); /* offset of next blk */ | 285 | next_offset = readw (io_mem + offset); /* offset of next blk */ |
280 | 286 | ||
281 | offset += 2; | 287 | offset += 2; |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index b5a7d9bfcb24..63d5042f2079 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
@@ -705,17 +705,21 @@ void pci_remove_legacy_files(struct pci_bus *b) | |||
705 | 705 | ||
706 | #ifdef HAVE_PCI_MMAP | 706 | #ifdef HAVE_PCI_MMAP |
707 | 707 | ||
708 | int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma) | 708 | int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, |
709 | enum pci_mmap_api mmap_api) | ||
709 | { | 710 | { |
710 | unsigned long nr, start, size; | 711 | unsigned long nr, start, size, pci_start; |
711 | 712 | ||
713 | if (pci_resource_len(pdev, resno) == 0) | ||
714 | return 0; | ||
712 | nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 715 | nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
713 | start = vma->vm_pgoff; | 716 | start = vma->vm_pgoff; |
714 | size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; | 717 | size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; |
715 | if (start < size && size - start >= nr) | 718 | pci_start = (mmap_api == PCI_MMAP_PROCFS) ? |
719 | pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0; | ||
720 | if (start >= pci_start && start < pci_start + size && | ||
721 | start + nr <= pci_start + size) | ||
716 | return 1; | 722 | return 1; |
717 | WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n", | ||
718 | current->comm, start, start+nr, pci_name(pdev), resno, size); | ||
719 | return 0; | 723 | return 0; |
720 | } | 724 | } |
721 | 725 | ||
@@ -745,8 +749,15 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, | |||
745 | if (i >= PCI_ROM_RESOURCE) | 749 | if (i >= PCI_ROM_RESOURCE) |
746 | return -ENODEV; | 750 | return -ENODEV; |
747 | 751 | ||
748 | if (!pci_mmap_fits(pdev, i, vma)) | 752 | if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) { |
753 | WARN(1, "process \"%s\" tried to map 0x%08lx bytes " | ||
754 | "at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n", | ||
755 | current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff, | ||
756 | pci_name(pdev), i, | ||
757 | (u64)pci_resource_start(pdev, i), | ||
758 | (u64)pci_resource_len(pdev, i)); | ||
749 | return -EINVAL; | 759 | return -EINVAL; |
760 | } | ||
750 | 761 | ||
751 | /* pci_mmap_page_range() expects the same kind of entry as coming | 762 | /* pci_mmap_page_range() expects the same kind of entry as coming |
752 | * from /proc/bus/pci/ which is a "user visible" value. If this is | 763 | * from /proc/bus/pci/ which is a "user visible" value. If this is |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e98c8104297b..710c8a29be0d 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -1007,6 +1007,18 @@ static int __pci_enable_device_flags(struct pci_dev *dev, | |||
1007 | int err; | 1007 | int err; |
1008 | int i, bars = 0; | 1008 | int i, bars = 0; |
1009 | 1009 | ||
1010 | /* | ||
1011 | * Power state could be unknown at this point, either due to a fresh | ||
1012 | * boot or a device removal call. So get the current power state | ||
1013 | * so that things like MSI message writing will behave as expected | ||
1014 | * (e.g. if the device really is in D0 at enable time). | ||
1015 | */ | ||
1016 | if (dev->pm_cap) { | ||
1017 | u16 pmcsr; | ||
1018 | pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); | ||
1019 | dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); | ||
1020 | } | ||
1021 | |||
1010 | if (atomic_add_return(1, &dev->enable_cnt) > 1) | 1022 | if (atomic_add_return(1, &dev->enable_cnt) > 1) |
1011 | return 0; /* already enabled */ | 1023 | return 0; /* already enabled */ |
1012 | 1024 | ||
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index f5c7c382765f..7d33f6673868 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -22,8 +22,13 @@ extern void pci_remove_firmware_label_files(struct pci_dev *pdev); | |||
22 | #endif | 22 | #endif |
23 | extern void pci_cleanup_rom(struct pci_dev *dev); | 23 | extern void pci_cleanup_rom(struct pci_dev *dev); |
24 | #ifdef HAVE_PCI_MMAP | 24 | #ifdef HAVE_PCI_MMAP |
25 | enum pci_mmap_api { | ||
26 | PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */ | ||
27 | PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */ | ||
28 | }; | ||
25 | extern int pci_mmap_fits(struct pci_dev *pdev, int resno, | 29 | extern int pci_mmap_fits(struct pci_dev *pdev, int resno, |
26 | struct vm_area_struct *vma); | 30 | struct vm_area_struct *vmai, |
31 | enum pci_mmap_api mmap_api); | ||
27 | #endif | 32 | #endif |
28 | int pci_probe_reset_function(struct pci_dev *dev); | 33 | int pci_probe_reset_function(struct pci_dev *dev); |
29 | 34 | ||
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 297b72c880a1..ea00647f4732 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c | |||
@@ -257,7 +257,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma) | |||
257 | 257 | ||
258 | /* Make sure the caller is mapping a real resource for this device */ | 258 | /* Make sure the caller is mapping a real resource for this device */ |
259 | for (i = 0; i < PCI_ROM_RESOURCE; i++) { | 259 | for (i = 0; i < PCI_ROM_RESOURCE; i++) { |
260 | if (pci_mmap_fits(dev, i, vma)) | 260 | if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS)) |
261 | break; | 261 | break; |
262 | } | 262 | } |
263 | 263 | ||
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index a87c4985326e..3a5a6fcc0ead 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/spinlock.h> | 13 | #include <linux/spinlock.h> |
14 | #include <linux/pci.h> | 14 | #include <linux/pci.h> |
15 | #include <linux/msi.h> | 15 | #include <linux/msi.h> |
16 | #include <xen/xenbus.h> | ||
17 | #include <xen/interface/io/pciif.h> | 16 | #include <xen/interface/io/pciif.h> |
18 | #include <asm/xen/pci.h> | 17 | #include <asm/xen/pci.h> |
19 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
@@ -576,8 +575,9 @@ static pci_ers_result_t pcifront_common_process(int cmd, | |||
576 | 575 | ||
577 | pcidev = pci_get_bus_and_slot(bus, devfn); | 576 | pcidev = pci_get_bus_and_slot(bus, devfn); |
578 | if (!pcidev || !pcidev->driver) { | 577 | if (!pcidev || !pcidev->driver) { |
579 | dev_err(&pcidev->dev, | 578 | dev_err(&pdev->xdev->dev, "device or AER driver is NULL\n"); |
580 | "device or driver is NULL\n"); | 579 | if (pcidev) |
580 | pci_dev_put(pcidev); | ||
581 | return result; | 581 | return result; |
582 | } | 582 | } |
583 | pdrv = pcidev->driver; | 583 | pdrv = pcidev->driver; |
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c index 8cbfa067171f..96c72e90b79c 100644 --- a/drivers/pcmcia/pd6729.c +++ b/drivers/pcmcia/pd6729.c | |||
@@ -725,17 +725,17 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev, | |||
725 | 725 | ||
726 | return 0; | 726 | return 0; |
727 | 727 | ||
728 | err_out_free_res2: | 728 | err_out_free_res2: |
729 | if (irq_mode == 1) | 729 | if (irq_mode == 1) |
730 | free_irq(dev->irq, socket); | 730 | free_irq(dev->irq, socket); |
731 | else | 731 | else |
732 | del_timer_sync(&socket->poll_timer); | 732 | del_timer_sync(&socket->poll_timer); |
733 | err_out_free_res: | 733 | err_out_free_res: |
734 | pci_release_regions(dev); | 734 | pci_release_regions(dev); |
735 | err_out_disable: | 735 | err_out_disable: |
736 | pci_disable_device(dev); | 736 | pci_disable_device(dev); |
737 | 737 | ||
738 | err_out_free_mem: | 738 | err_out_free_mem: |
739 | kfree(socket); | 739 | kfree(socket); |
740 | return ret; | 740 | return ret; |
741 | } | 741 | } |
diff --git a/drivers/pcmcia/pd6729.h b/drivers/pcmcia/pd6729.h index 41418d394c55..c8e84bdece38 100644 --- a/drivers/pcmcia/pd6729.h +++ b/drivers/pcmcia/pd6729.h | |||
@@ -15,7 +15,7 @@ | |||
15 | struct pd6729_socket { | 15 | struct pd6729_socket { |
16 | int number; | 16 | int number; |
17 | int card_irq; | 17 | int card_irq; |
18 | unsigned long io_base; /* base io address of the socket */ | 18 | unsigned long io_base; /* base io address of the socket */ |
19 | struct pcmcia_socket socket; | 19 | struct pcmcia_socket socket; |
20 | struct timer_list poll_timer; | 20 | struct timer_list poll_timer; |
21 | }; | 21 | }; |
diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c index 0ea3b29440e6..81af2b3bcc00 100644 --- a/drivers/pcmcia/pxa2xx_sharpsl.c +++ b/drivers/pcmcia/pxa2xx_sharpsl.c | |||
@@ -237,7 +237,7 @@ static struct pcmcia_low_level sharpsl_pcmcia_ops __initdata = { | |||
237 | #ifdef CONFIG_SA1100_COLLIE | 237 | #ifdef CONFIG_SA1100_COLLIE |
238 | #include "sa11xx_base.h" | 238 | #include "sa11xx_base.h" |
239 | 239 | ||
240 | int __init pcmcia_collie_init(struct device *dev) | 240 | int __devinit pcmcia_collie_init(struct device *dev) |
241 | { | 241 | { |
242 | int ret = -ENODEV; | 242 | int ret = -ENODEV; |
243 | 243 | ||
diff --git a/drivers/pcmcia/sa1100_assabet.c b/drivers/pcmcia/sa1100_assabet.c index fd013a1ef47a..f1e882272ab0 100644 --- a/drivers/pcmcia/sa1100_assabet.c +++ b/drivers/pcmcia/sa1100_assabet.c | |||
@@ -130,7 +130,7 @@ static struct pcmcia_low_level assabet_pcmcia_ops = { | |||
130 | .socket_suspend = assabet_pcmcia_socket_suspend, | 130 | .socket_suspend = assabet_pcmcia_socket_suspend, |
131 | }; | 131 | }; |
132 | 132 | ||
133 | int pcmcia_assabet_init(struct device *dev) | 133 | int __devinit pcmcia_assabet_init(struct device *dev) |
134 | { | 134 | { |
135 | int ret = -ENODEV; | 135 | int ret = -ENODEV; |
136 | 136 | ||
diff --git a/drivers/pcmcia/sa1100_cerf.c b/drivers/pcmcia/sa1100_cerf.c index 9bf088b17275..30560df8c76b 100644 --- a/drivers/pcmcia/sa1100_cerf.c +++ b/drivers/pcmcia/sa1100_cerf.c | |||
@@ -97,7 +97,7 @@ static struct pcmcia_low_level cerf_pcmcia_ops = { | |||
97 | .socket_suspend = cerf_pcmcia_socket_suspend, | 97 | .socket_suspend = cerf_pcmcia_socket_suspend, |
98 | }; | 98 | }; |
99 | 99 | ||
100 | int __init pcmcia_cerf_init(struct device *dev) | 100 | int __devinit pcmcia_cerf_init(struct device *dev) |
101 | { | 101 | { |
102 | int ret = -ENODEV; | 102 | int ret = -ENODEV; |
103 | 103 | ||
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c index 945857f8c284..6b228590b3fd 100644 --- a/drivers/pcmcia/sa1100_generic.c +++ b/drivers/pcmcia/sa1100_generic.c | |||
@@ -64,7 +64,7 @@ static int (*sa11x0_pcmcia_hw_init[])(struct device *dev) = { | |||
64 | #endif | 64 | #endif |
65 | }; | 65 | }; |
66 | 66 | ||
67 | static int sa11x0_drv_pcmcia_probe(struct platform_device *dev) | 67 | static int __devinit sa11x0_drv_pcmcia_probe(struct platform_device *dev) |
68 | { | 68 | { |
69 | int i, ret = -ENODEV; | 69 | int i, ret = -ENODEV; |
70 | 70 | ||
diff --git a/drivers/pcmcia/sa1100_h3600.c b/drivers/pcmcia/sa1100_h3600.c index 56329ad575a9..edf8f0028898 100644 --- a/drivers/pcmcia/sa1100_h3600.c +++ b/drivers/pcmcia/sa1100_h3600.c | |||
@@ -219,7 +219,7 @@ struct pcmcia_low_level h3600_pcmcia_ops = { | |||
219 | .socket_suspend = h3600_pcmcia_socket_suspend, | 219 | .socket_suspend = h3600_pcmcia_socket_suspend, |
220 | }; | 220 | }; |
221 | 221 | ||
222 | int __init pcmcia_h3600_init(struct device *dev) | 222 | int __devinit pcmcia_h3600_init(struct device *dev) |
223 | { | 223 | { |
224 | int ret = -ENODEV; | 224 | int ret = -ENODEV; |
225 | 225 | ||
diff --git a/drivers/pcmcia/sa1100_shannon.c b/drivers/pcmcia/sa1100_shannon.c index c4d51867a050..7ff1b43540b8 100644 --- a/drivers/pcmcia/sa1100_shannon.c +++ b/drivers/pcmcia/sa1100_shannon.c | |||
@@ -113,7 +113,7 @@ static struct pcmcia_low_level shannon_pcmcia_ops = { | |||
113 | .socket_suspend = shannon_pcmcia_socket_suspend, | 113 | .socket_suspend = shannon_pcmcia_socket_suspend, |
114 | }; | 114 | }; |
115 | 115 | ||
116 | int __init pcmcia_shannon_init(struct device *dev) | 116 | int __devinit pcmcia_shannon_init(struct device *dev) |
117 | { | 117 | { |
118 | int ret = -ENODEV; | 118 | int ret = -ENODEV; |
119 | 119 | ||
diff --git a/drivers/pcmcia/sa1100_simpad.c b/drivers/pcmcia/sa1100_simpad.c index 05bd504e6f18..c998f7aaadbc 100644 --- a/drivers/pcmcia/sa1100_simpad.c +++ b/drivers/pcmcia/sa1100_simpad.c | |||
@@ -123,7 +123,7 @@ static struct pcmcia_low_level simpad_pcmcia_ops = { | |||
123 | .socket_suspend = simpad_pcmcia_socket_suspend, | 123 | .socket_suspend = simpad_pcmcia_socket_suspend, |
124 | }; | 124 | }; |
125 | 125 | ||
126 | int __init pcmcia_simpad_init(struct device *dev) | 126 | int __devinit pcmcia_simpad_init(struct device *dev) |
127 | { | 127 | { |
128 | int ret = -ENODEV; | 128 | int ret = -ENODEV; |
129 | 129 | ||
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c index 689e3c02edb8..3753fd0722e7 100644 --- a/drivers/pcmcia/soc_common.c +++ b/drivers/pcmcia/soc_common.c | |||
@@ -57,11 +57,16 @@ module_param(pc_debug, int, 0644); | |||
57 | void soc_pcmcia_debug(struct soc_pcmcia_socket *skt, const char *func, | 57 | void soc_pcmcia_debug(struct soc_pcmcia_socket *skt, const char *func, |
58 | int lvl, const char *fmt, ...) | 58 | int lvl, const char *fmt, ...) |
59 | { | 59 | { |
60 | struct va_format vaf; | ||
60 | va_list args; | 61 | va_list args; |
61 | if (pc_debug > lvl) { | 62 | if (pc_debug > lvl) { |
62 | printk(KERN_DEBUG "skt%u: %s: ", skt->nr, func); | ||
63 | va_start(args, fmt); | 63 | va_start(args, fmt); |
64 | vprintk(fmt, args); | 64 | |
65 | vaf.fmt = fmt; | ||
66 | vaf.va = &args; | ||
67 | |||
68 | printk(KERN_DEBUG "skt%u: %s: %pV", skt->nr, func, &vaf); | ||
69 | |||
65 | va_end(args); | 70 | va_end(args); |
66 | } | 71 | } |
67 | } | 72 | } |
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index 68cf0c99138a..7b5080c45569 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c | |||
@@ -1159,11 +1159,11 @@ int __devinit rio_init_mports(void) | |||
1159 | 1159 | ||
1160 | list_for_each_entry(port, &rio_mports, node) { | 1160 | list_for_each_entry(port, &rio_mports, node) { |
1161 | if (!request_mem_region(port->iores.start, | 1161 | if (!request_mem_region(port->iores.start, |
1162 | port->iores.end - port->iores.start, | 1162 | resource_size(&port->iores), |
1163 | port->name)) { | 1163 | port->name)) { |
1164 | printk(KERN_ERR | 1164 | printk(KERN_ERR |
1165 | "RIO: Error requesting master port region 0x%016llx-0x%016llx\n", | 1165 | "RIO: Error requesting master port region 0x%016llx-0x%016llx\n", |
1166 | (u64)port->iores.start, (u64)port->iores.end - 1); | 1166 | (u64)port->iores.start, (u64)port->iores.end); |
1167 | rc = -ENOMEM; | 1167 | rc = -ENOMEM; |
1168 | goto out; | 1168 | goto out; |
1169 | } | 1169 | } |
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index 5efbd5990ff8..06e41ed93230 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c | |||
@@ -761,7 +761,7 @@ err_unmap: | |||
761 | clk_put(rtc->clk); | 761 | clk_put(rtc->clk); |
762 | iounmap(rtc->regbase); | 762 | iounmap(rtc->regbase); |
763 | err_badmap: | 763 | err_badmap: |
764 | release_resource(rtc->res); | 764 | release_mem_region(rtc->res->start, rtc->regsize); |
765 | err_badres: | 765 | err_badres: |
766 | kfree(rtc); | 766 | kfree(rtc); |
767 | 767 | ||
@@ -786,7 +786,7 @@ static int __exit sh_rtc_remove(struct platform_device *pdev) | |||
786 | } | 786 | } |
787 | 787 | ||
788 | iounmap(rtc->regbase); | 788 | iounmap(rtc->regbase); |
789 | release_resource(rtc->res); | 789 | release_mem_region(rtc->res->start, rtc->regsize); |
790 | 790 | ||
791 | clk_disable(rtc->clk); | 791 | clk_disable(rtc->clk); |
792 | clk_put(rtc->clk); | 792 | clk_put(rtc->clk); |
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 6c408670e08d..b3a3e8e8656e 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c | |||
@@ -209,29 +209,79 @@ tape_state_set(struct tape_device *device, enum tape_state newstate) | |||
209 | wake_up(&device->state_change_wq); | 209 | wake_up(&device->state_change_wq); |
210 | } | 210 | } |
211 | 211 | ||
212 | struct tape_med_state_work_data { | ||
213 | struct tape_device *device; | ||
214 | enum tape_medium_state state; | ||
215 | struct work_struct work; | ||
216 | }; | ||
217 | |||
218 | static void | ||
219 | tape_med_state_work_handler(struct work_struct *work) | ||
220 | { | ||
221 | static char env_state_loaded[] = "MEDIUM_STATE=LOADED"; | ||
222 | static char env_state_unloaded[] = "MEDIUM_STATE=UNLOADED"; | ||
223 | struct tape_med_state_work_data *p = | ||
224 | container_of(work, struct tape_med_state_work_data, work); | ||
225 | struct tape_device *device = p->device; | ||
226 | char *envp[] = { NULL, NULL }; | ||
227 | |||
228 | switch (p->state) { | ||
229 | case MS_UNLOADED: | ||
230 | pr_info("%s: The tape cartridge has been successfully " | ||
231 | "unloaded\n", dev_name(&device->cdev->dev)); | ||
232 | envp[0] = env_state_unloaded; | ||
233 | kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); | ||
234 | break; | ||
235 | case MS_LOADED: | ||
236 | pr_info("%s: A tape cartridge has been mounted\n", | ||
237 | dev_name(&device->cdev->dev)); | ||
238 | envp[0] = env_state_loaded; | ||
239 | kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); | ||
240 | break; | ||
241 | default: | ||
242 | break; | ||
243 | } | ||
244 | tape_put_device(device); | ||
245 | kfree(p); | ||
246 | } | ||
247 | |||
248 | static void | ||
249 | tape_med_state_work(struct tape_device *device, enum tape_medium_state state) | ||
250 | { | ||
251 | struct tape_med_state_work_data *p; | ||
252 | |||
253 | p = kzalloc(sizeof(*p), GFP_ATOMIC); | ||
254 | if (p) { | ||
255 | INIT_WORK(&p->work, tape_med_state_work_handler); | ||
256 | p->device = tape_get_device(device); | ||
257 | p->state = state; | ||
258 | schedule_work(&p->work); | ||
259 | } | ||
260 | } | ||
261 | |||
212 | void | 262 | void |
213 | tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) | 263 | tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) |
214 | { | 264 | { |
215 | if (device->medium_state == newstate) | 265 | enum tape_medium_state oldstate; |
266 | |||
267 | oldstate = device->medium_state; | ||
268 | if (oldstate == newstate) | ||
216 | return; | 269 | return; |
270 | device->medium_state = newstate; | ||
217 | switch(newstate){ | 271 | switch(newstate){ |
218 | case MS_UNLOADED: | 272 | case MS_UNLOADED: |
219 | device->tape_generic_status |= GMT_DR_OPEN(~0); | 273 | device->tape_generic_status |= GMT_DR_OPEN(~0); |
220 | if (device->medium_state == MS_LOADED) | 274 | if (oldstate == MS_LOADED) |
221 | pr_info("%s: The tape cartridge has been successfully " | 275 | tape_med_state_work(device, MS_UNLOADED); |
222 | "unloaded\n", dev_name(&device->cdev->dev)); | ||
223 | break; | 276 | break; |
224 | case MS_LOADED: | 277 | case MS_LOADED: |
225 | device->tape_generic_status &= ~GMT_DR_OPEN(~0); | 278 | device->tape_generic_status &= ~GMT_DR_OPEN(~0); |
226 | if (device->medium_state == MS_UNLOADED) | 279 | if (oldstate == MS_UNLOADED) |
227 | pr_info("%s: A tape cartridge has been mounted\n", | 280 | tape_med_state_work(device, MS_LOADED); |
228 | dev_name(&device->cdev->dev)); | ||
229 | break; | 281 | break; |
230 | default: | 282 | default: |
231 | // print nothing | ||
232 | break; | 283 | break; |
233 | } | 284 | } |
234 | device->medium_state = newstate; | ||
235 | wake_up(&device->state_change_wq); | 285 | wake_up(&device->state_change_wq); |
236 | } | 286 | } |
237 | 287 | ||
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 9f661426e4a1..1cc726b98ec8 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -249,27 +249,25 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, | |||
249 | char cp_command[80]; | 249 | char cp_command[80]; |
250 | char cp_response[160]; | 250 | char cp_response[160]; |
251 | char *onoff, *qid_string; | 251 | char *onoff, *qid_string; |
252 | int rc; | ||
252 | 253 | ||
253 | memset(cp_command, 0x00, sizeof(cp_command)); | 254 | onoff = ((action == 1) ? "ON" : "OFF"); |
254 | memset(cp_response, 0x00, sizeof(cp_response)); | ||
255 | |||
256 | onoff = ((action == 1) ? "ON" : "OFF"); | ||
257 | qid_string = ((recording_class_AB == 1) ? " QID * " : ""); | 255 | qid_string = ((recording_class_AB == 1) ? " QID * " : ""); |
258 | 256 | ||
259 | /* | 257 | /* |
260 | * The recording commands needs to be called with option QID | 258 | * The recording commands needs to be called with option QID |
261 | * for guests that have previlege classes A or B. | 259 | * for guests that have previlege classes A or B. |
262 | * Purging has to be done as separate step, because recording | 260 | * Purging has to be done as separate step, because recording |
263 | * can't be switched on as long as records are on the queue. | 261 | * can't be switched on as long as records are on the queue. |
264 | * Doing both at the same time doesn't work. | 262 | * Doing both at the same time doesn't work. |
265 | */ | 263 | */ |
266 | 264 | if (purge && (action == 1)) { | |
267 | if (purge) { | 265 | memset(cp_command, 0x00, sizeof(cp_command)); |
266 | memset(cp_response, 0x00, sizeof(cp_response)); | ||
268 | snprintf(cp_command, sizeof(cp_command), | 267 | snprintf(cp_command, sizeof(cp_command), |
269 | "RECORDING %s PURGE %s", | 268 | "RECORDING %s PURGE %s", |
270 | logptr->recording_name, | 269 | logptr->recording_name, |
271 | qid_string); | 270 | qid_string); |
272 | |||
273 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); | 271 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); |
274 | } | 272 | } |
275 | 273 | ||
@@ -279,19 +277,33 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, | |||
279 | logptr->recording_name, | 277 | logptr->recording_name, |
280 | onoff, | 278 | onoff, |
281 | qid_string); | 279 | qid_string); |
282 | |||
283 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); | 280 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); |
284 | /* The recording command will usually answer with 'Command complete' | 281 | /* The recording command will usually answer with 'Command complete' |
285 | * on success, but when the specific service was never connected | 282 | * on success, but when the specific service was never connected |
286 | * before then there might be an additional informational message | 283 | * before then there might be an additional informational message |
287 | * 'HCPCRC8072I Recording entry not found' before the | 284 | * 'HCPCRC8072I Recording entry not found' before the |
288 | * 'Command complete'. So I use strstr rather then the strncmp. | 285 | * 'Command complete'. So I use strstr rather then the strncmp. |
289 | */ | 286 | */ |
290 | if (strstr(cp_response,"Command complete")) | 287 | if (strstr(cp_response,"Command complete")) |
291 | return 0; | 288 | rc = 0; |
292 | else | 289 | else |
293 | return -EIO; | 290 | rc = -EIO; |
291 | /* | ||
292 | * If we turn recording off, we have to purge any remaining records | ||
293 | * afterwards, as a large number of queued records may impact z/VM | ||
294 | * performance. | ||
295 | */ | ||
296 | if (purge && (action == 0)) { | ||
297 | memset(cp_command, 0x00, sizeof(cp_command)); | ||
298 | memset(cp_response, 0x00, sizeof(cp_response)); | ||
299 | snprintf(cp_command, sizeof(cp_command), | ||
300 | "RECORDING %s PURGE %s", | ||
301 | logptr->recording_name, | ||
302 | qid_string); | ||
303 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); | ||
304 | } | ||
294 | 305 | ||
306 | return rc; | ||
295 | } | 307 | } |
296 | 308 | ||
297 | 309 | ||
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 2ff8a22d4257..e8391b89eff4 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -1455,7 +1455,16 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) | |||
1455 | break; | 1455 | break; |
1456 | case IO_SCH_UNREG_ATTACH: | 1456 | case IO_SCH_UNREG_ATTACH: |
1457 | case IO_SCH_UNREG: | 1457 | case IO_SCH_UNREG: |
1458 | if (cdev) | 1458 | if (!cdev) |
1459 | break; | ||
1460 | if (cdev->private->state == DEV_STATE_SENSE_ID) { | ||
1461 | /* | ||
1462 | * Note: delayed work triggered by this event | ||
1463 | * and repeated calls to sch_event are synchronized | ||
1464 | * by the above check for work_pending(cdev). | ||
1465 | */ | ||
1466 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); | ||
1467 | } else | ||
1459 | ccw_device_set_notoper(cdev); | 1468 | ccw_device_set_notoper(cdev); |
1460 | break; | 1469 | break; |
1461 | case IO_SCH_NOP: | 1470 | case IO_SCH_NOP: |
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 6be43eb126b4..f47a714538db 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -440,7 +440,6 @@ struct qeth_qdio_out_q { | |||
440 | * index of buffer to be filled by driver; state EMPTY or PACKING | 440 | * index of buffer to be filled by driver; state EMPTY or PACKING |
441 | */ | 441 | */ |
442 | int next_buf_to_fill; | 442 | int next_buf_to_fill; |
443 | int sync_iqdio_error; | ||
444 | /* | 443 | /* |
445 | * number of buffers that are currently filled (PRIMED) | 444 | * number of buffers that are currently filled (PRIMED) |
446 | * -> these buffers are hardware-owned | 445 | * -> these buffers are hardware-owned |
@@ -695,14 +694,6 @@ struct qeth_mc_mac { | |||
695 | int is_vmac; | 694 | int is_vmac; |
696 | }; | 695 | }; |
697 | 696 | ||
698 | struct qeth_skb_data { | ||
699 | __u32 magic; | ||
700 | int count; | ||
701 | }; | ||
702 | |||
703 | #define QETH_SKB_MAGIC 0x71657468 | ||
704 | #define QETH_SIGA_CC2_RETRIES 3 | ||
705 | |||
706 | struct qeth_rx { | 697 | struct qeth_rx { |
707 | int b_count; | 698 | int b_count; |
708 | int b_index; | 699 | int b_index; |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 764267062601..e6b2df0e73f5 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -877,8 +877,8 @@ out: | |||
877 | return; | 877 | return; |
878 | } | 878 | } |
879 | 879 | ||
880 | static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, | 880 | static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, |
881 | struct qeth_qdio_out_buffer *buf, unsigned int qeth_skip_skb) | 881 | struct qeth_qdio_out_buffer *buf) |
882 | { | 882 | { |
883 | int i; | 883 | int i; |
884 | struct sk_buff *skb; | 884 | struct sk_buff *skb; |
@@ -887,13 +887,11 @@ static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, | |||
887 | if (buf->buffer->element[0].flags & 0x40) | 887 | if (buf->buffer->element[0].flags & 0x40) |
888 | atomic_dec(&queue->set_pci_flags_count); | 888 | atomic_dec(&queue->set_pci_flags_count); |
889 | 889 | ||
890 | if (!qeth_skip_skb) { | 890 | skb = skb_dequeue(&buf->skb_list); |
891 | while (skb) { | ||
892 | atomic_dec(&skb->users); | ||
893 | dev_kfree_skb_any(skb); | ||
891 | skb = skb_dequeue(&buf->skb_list); | 894 | skb = skb_dequeue(&buf->skb_list); |
892 | while (skb) { | ||
893 | atomic_dec(&skb->users); | ||
894 | dev_kfree_skb_any(skb); | ||
895 | skb = skb_dequeue(&buf->skb_list); | ||
896 | } | ||
897 | } | 895 | } |
898 | for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { | 896 | for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { |
899 | if (buf->buffer->element[i].addr && buf->is_header[i]) | 897 | if (buf->buffer->element[i].addr && buf->is_header[i]) |
@@ -909,12 +907,6 @@ static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, | |||
909 | atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); | 907 | atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); |
910 | } | 908 | } |
911 | 909 | ||
912 | static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, | ||
913 | struct qeth_qdio_out_buffer *buf) | ||
914 | { | ||
915 | __qeth_clear_output_buffer(queue, buf, 0); | ||
916 | } | ||
917 | |||
918 | void qeth_clear_qdio_buffers(struct qeth_card *card) | 910 | void qeth_clear_qdio_buffers(struct qeth_card *card) |
919 | { | 911 | { |
920 | int i, j; | 912 | int i, j; |
@@ -2833,7 +2825,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, | |||
2833 | } | 2825 | } |
2834 | } | 2826 | } |
2835 | 2827 | ||
2836 | queue->sync_iqdio_error = 0; | ||
2837 | queue->card->dev->trans_start = jiffies; | 2828 | queue->card->dev->trans_start = jiffies; |
2838 | if (queue->card->options.performance_stats) { | 2829 | if (queue->card->options.performance_stats) { |
2839 | queue->card->perf_stats.outbound_do_qdio_cnt++; | 2830 | queue->card->perf_stats.outbound_do_qdio_cnt++; |
@@ -2849,10 +2840,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, | |||
2849 | queue->card->perf_stats.outbound_do_qdio_time += | 2840 | queue->card->perf_stats.outbound_do_qdio_time += |
2850 | qeth_get_micros() - | 2841 | qeth_get_micros() - |
2851 | queue->card->perf_stats.outbound_do_qdio_start_time; | 2842 | queue->card->perf_stats.outbound_do_qdio_start_time; |
2852 | if (rc > 0) { | ||
2853 | if (!(rc & QDIO_ERROR_SIGA_BUSY)) | ||
2854 | queue->sync_iqdio_error = rc & 3; | ||
2855 | } | ||
2856 | if (rc) { | 2843 | if (rc) { |
2857 | queue->card->stats.tx_errors += count; | 2844 | queue->card->stats.tx_errors += count; |
2858 | /* ignore temporary SIGA errors without busy condition */ | 2845 | /* ignore temporary SIGA errors without busy condition */ |
@@ -2916,7 +2903,7 @@ void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue, | |||
2916 | { | 2903 | { |
2917 | struct qeth_card *card = (struct qeth_card *)card_ptr; | 2904 | struct qeth_card *card = (struct qeth_card *)card_ptr; |
2918 | 2905 | ||
2919 | if (card->dev) | 2906 | if (card->dev && (card->dev->flags & IFF_UP)) |
2920 | napi_schedule(&card->napi); | 2907 | napi_schedule(&card->napi); |
2921 | } | 2908 | } |
2922 | EXPORT_SYMBOL_GPL(qeth_qdio_start_poll); | 2909 | EXPORT_SYMBOL_GPL(qeth_qdio_start_poll); |
@@ -2940,7 +2927,6 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, | |||
2940 | struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; | 2927 | struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; |
2941 | struct qeth_qdio_out_buffer *buffer; | 2928 | struct qeth_qdio_out_buffer *buffer; |
2942 | int i; | 2929 | int i; |
2943 | unsigned qeth_send_err; | ||
2944 | 2930 | ||
2945 | QETH_CARD_TEXT(card, 6, "qdouhdl"); | 2931 | QETH_CARD_TEXT(card, 6, "qdouhdl"); |
2946 | if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { | 2932 | if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { |
@@ -2956,9 +2942,8 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, | |||
2956 | } | 2942 | } |
2957 | for (i = first_element; i < (first_element + count); ++i) { | 2943 | for (i = first_element; i < (first_element + count); ++i) { |
2958 | buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; | 2944 | buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; |
2959 | qeth_send_err = qeth_handle_send_error(card, buffer, qdio_error); | 2945 | qeth_handle_send_error(card, buffer, qdio_error); |
2960 | __qeth_clear_output_buffer(queue, buffer, | 2946 | qeth_clear_output_buffer(queue, buffer); |
2961 | (qeth_send_err == QETH_SEND_ERROR_RETRY) ? 1 : 0); | ||
2962 | } | 2947 | } |
2963 | atomic_sub(count, &queue->used_buffers); | 2948 | atomic_sub(count, &queue->used_buffers); |
2964 | /* check if we need to do something on this outbound queue */ | 2949 | /* check if we need to do something on this outbound queue */ |
@@ -3183,10 +3168,7 @@ int qeth_do_send_packet_fast(struct qeth_card *card, | |||
3183 | int offset, int hd_len) | 3168 | int offset, int hd_len) |
3184 | { | 3169 | { |
3185 | struct qeth_qdio_out_buffer *buffer; | 3170 | struct qeth_qdio_out_buffer *buffer; |
3186 | struct sk_buff *skb1; | ||
3187 | struct qeth_skb_data *retry_ctrl; | ||
3188 | int index; | 3171 | int index; |
3189 | int rc; | ||
3190 | 3172 | ||
3191 | /* spin until we get the queue ... */ | 3173 | /* spin until we get the queue ... */ |
3192 | while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, | 3174 | while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, |
@@ -3205,25 +3187,6 @@ int qeth_do_send_packet_fast(struct qeth_card *card, | |||
3205 | atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); | 3187 | atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); |
3206 | qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); | 3188 | qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); |
3207 | qeth_flush_buffers(queue, index, 1); | 3189 | qeth_flush_buffers(queue, index, 1); |
3208 | if (queue->sync_iqdio_error == 2) { | ||
3209 | skb1 = skb_dequeue(&buffer->skb_list); | ||
3210 | while (skb1) { | ||
3211 | atomic_dec(&skb1->users); | ||
3212 | skb1 = skb_dequeue(&buffer->skb_list); | ||
3213 | } | ||
3214 | retry_ctrl = (struct qeth_skb_data *) &skb->cb[16]; | ||
3215 | if (retry_ctrl->magic != QETH_SKB_MAGIC) { | ||
3216 | retry_ctrl->magic = QETH_SKB_MAGIC; | ||
3217 | retry_ctrl->count = 0; | ||
3218 | } | ||
3219 | if (retry_ctrl->count < QETH_SIGA_CC2_RETRIES) { | ||
3220 | retry_ctrl->count++; | ||
3221 | rc = dev_queue_xmit(skb); | ||
3222 | } else { | ||
3223 | dev_kfree_skb_any(skb); | ||
3224 | QETH_CARD_TEXT(card, 2, "qrdrop"); | ||
3225 | } | ||
3226 | } | ||
3227 | return 0; | 3190 | return 0; |
3228 | out: | 3191 | out: |
3229 | atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); | 3192 | atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 50286d8707f3..6bd2dbc4c316 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -76,7 +76,7 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) | |||
76 | scpnt->scsi_done(scpnt); | 76 | scpnt->scsi_done(scpnt); |
77 | } | 77 | } |
78 | 78 | ||
79 | static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, | 79 | static int zfcp_scsi_queuecommand_lck(struct scsi_cmnd *scpnt, |
80 | void (*done) (struct scsi_cmnd *)) | 80 | void (*done) (struct scsi_cmnd *)) |
81 | { | 81 | { |
82 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); | 82 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); |
@@ -127,6 +127,8 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, | |||
127 | return ret; | 127 | return ret; |
128 | } | 128 | } |
129 | 129 | ||
130 | static DEF_SCSI_QCMD(zfcp_scsi_queuecommand) | ||
131 | |||
130 | static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) | 132 | static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) |
131 | { | 133 | { |
132 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); | 134 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); |
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index fcf08b3f52c1..b7bd5b0cc7aa 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c | |||
@@ -1765,7 +1765,7 @@ out: | |||
1765 | } /* End twa_scsi_eh_reset() */ | 1765 | } /* End twa_scsi_eh_reset() */ |
1766 | 1766 | ||
1767 | /* This is the main scsi queue function to handle scsi opcodes */ | 1767 | /* This is the main scsi queue function to handle scsi opcodes */ |
1768 | static int twa_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 1768 | static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) |
1769 | { | 1769 | { |
1770 | int request_id, retval; | 1770 | int request_id, retval; |
1771 | TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; | 1771 | TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; |
@@ -1812,6 +1812,8 @@ out: | |||
1812 | return retval; | 1812 | return retval; |
1813 | } /* End twa_scsi_queue() */ | 1813 | } /* End twa_scsi_queue() */ |
1814 | 1814 | ||
1815 | static DEF_SCSI_QCMD(twa_scsi_queue) | ||
1816 | |||
1815 | /* This function hands scsi cdb's to the firmware */ | 1817 | /* This function hands scsi cdb's to the firmware */ |
1816 | static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg) | 1818 | static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg) |
1817 | { | 1819 | { |
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index 6a95d111d207..13e39e1fdfe2 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c | |||
@@ -1501,7 +1501,7 @@ out: | |||
1501 | } /* End twl_scsi_eh_reset() */ | 1501 | } /* End twl_scsi_eh_reset() */ |
1502 | 1502 | ||
1503 | /* This is the main scsi queue function to handle scsi opcodes */ | 1503 | /* This is the main scsi queue function to handle scsi opcodes */ |
1504 | static int twl_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 1504 | static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) |
1505 | { | 1505 | { |
1506 | int request_id, retval; | 1506 | int request_id, retval; |
1507 | TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; | 1507 | TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; |
@@ -1536,6 +1536,8 @@ out: | |||
1536 | return retval; | 1536 | return retval; |
1537 | } /* End twl_scsi_queue() */ | 1537 | } /* End twl_scsi_queue() */ |
1538 | 1538 | ||
1539 | static DEF_SCSI_QCMD(twl_scsi_queue) | ||
1540 | |||
1539 | /* This function tells the controller to shut down */ | 1541 | /* This function tells the controller to shut down */ |
1540 | static void __twl_shutdown(TW_Device_Extension *tw_dev) | 1542 | static void __twl_shutdown(TW_Device_Extension *tw_dev) |
1541 | { | 1543 | { |
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index b1125341f4c8..7fe96ff60c58 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c | |||
@@ -1947,7 +1947,7 @@ static int tw_scsiop_test_unit_ready_complete(TW_Device_Extension *tw_dev, int r | |||
1947 | } /* End tw_scsiop_test_unit_ready_complete() */ | 1947 | } /* End tw_scsiop_test_unit_ready_complete() */ |
1948 | 1948 | ||
1949 | /* This is the main scsi queue function to handle scsi opcodes */ | 1949 | /* This is the main scsi queue function to handle scsi opcodes */ |
1950 | static int tw_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 1950 | static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) |
1951 | { | 1951 | { |
1952 | unsigned char *command = SCpnt->cmnd; | 1952 | unsigned char *command = SCpnt->cmnd; |
1953 | int request_id = 0; | 1953 | int request_id = 0; |
@@ -2023,6 +2023,8 @@ static int tw_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd | |||
2023 | return retval; | 2023 | return retval; |
2024 | } /* End tw_scsi_queue() */ | 2024 | } /* End tw_scsi_queue() */ |
2025 | 2025 | ||
2026 | static DEF_SCSI_QCMD(tw_scsi_queue) | ||
2027 | |||
2026 | /* This function is the interrupt service routine */ | 2028 | /* This function is the interrupt service routine */ |
2027 | static irqreturn_t tw_interrupt(int irq, void *dev_instance) | 2029 | static irqreturn_t tw_interrupt(int irq, void *dev_instance) |
2028 | { | 2030 | { |
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index 89fc1c8af86b..f672491774eb 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c | |||
@@ -167,7 +167,7 @@ MODULE_LICENSE("GPL"); | |||
167 | #include "53c700_d.h" | 167 | #include "53c700_d.h" |
168 | 168 | ||
169 | 169 | ||
170 | STATIC int NCR_700_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); | 170 | STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *); |
171 | STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt); | 171 | STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt); |
172 | STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt); | 172 | STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt); |
173 | STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt); | 173 | STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt); |
@@ -1749,8 +1749,8 @@ NCR_700_intr(int irq, void *dev_id) | |||
1749 | return IRQ_RETVAL(handled); | 1749 | return IRQ_RETVAL(handled); |
1750 | } | 1750 | } |
1751 | 1751 | ||
1752 | STATIC int | 1752 | static int |
1753 | NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)) | 1753 | NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)) |
1754 | { | 1754 | { |
1755 | struct NCR_700_Host_Parameters *hostdata = | 1755 | struct NCR_700_Host_Parameters *hostdata = |
1756 | (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0]; | 1756 | (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0]; |
@@ -1904,6 +1904,8 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)) | |||
1904 | return 0; | 1904 | return 0; |
1905 | } | 1905 | } |
1906 | 1906 | ||
1907 | STATIC DEF_SCSI_QCMD(NCR_700_queuecommand) | ||
1908 | |||
1907 | STATIC int | 1909 | STATIC int |
1908 | NCR_700_abort(struct scsi_cmnd * SCp) | 1910 | NCR_700_abort(struct scsi_cmnd * SCp) |
1909 | { | 1911 | { |
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index fc0b4b81d552..f66c33b9ab41 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c | |||
@@ -2807,7 +2807,7 @@ static int BusLogic_host_reset(struct scsi_cmnd * SCpnt) | |||
2807 | Outgoing Mailbox for execution by the associated Host Adapter. | 2807 | Outgoing Mailbox for execution by the associated Host Adapter. |
2808 | */ | 2808 | */ |
2809 | 2809 | ||
2810 | static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRoutine) (struct scsi_cmnd *)) | 2810 | static int BusLogic_QueueCommand_lck(struct scsi_cmnd *Command, void (*CompletionRoutine) (struct scsi_cmnd *)) |
2811 | { | 2811 | { |
2812 | struct BusLogic_HostAdapter *HostAdapter = (struct BusLogic_HostAdapter *) Command->device->host->hostdata; | 2812 | struct BusLogic_HostAdapter *HostAdapter = (struct BusLogic_HostAdapter *) Command->device->host->hostdata; |
2813 | struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[Command->device->id]; | 2813 | struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[Command->device->id]; |
@@ -2994,6 +2994,7 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou | |||
2994 | return 0; | 2994 | return 0; |
2995 | } | 2995 | } |
2996 | 2996 | ||
2997 | static DEF_SCSI_QCMD(BusLogic_QueueCommand) | ||
2997 | 2998 | ||
2998 | #if 0 | 2999 | #if 0 |
2999 | /* | 3000 | /* |
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h index 73f237a1ed94..649fcb31f26d 100644 --- a/drivers/scsi/BusLogic.h +++ b/drivers/scsi/BusLogic.h | |||
@@ -1319,7 +1319,7 @@ static inline void BusLogic_IncrementSizeBucket(BusLogic_CommandSizeBuckets_T Co | |||
1319 | */ | 1319 | */ |
1320 | 1320 | ||
1321 | static const char *BusLogic_DriverInfo(struct Scsi_Host *); | 1321 | static const char *BusLogic_DriverInfo(struct Scsi_Host *); |
1322 | static int BusLogic_QueueCommand(struct scsi_cmnd *, void (*CompletionRoutine) (struct scsi_cmnd *)); | 1322 | static int BusLogic_QueueCommand(struct Scsi_Host *h, struct scsi_cmnd *); |
1323 | static int BusLogic_BIOSDiskParameters(struct scsi_device *, struct block_device *, sector_t, int *); | 1323 | static int BusLogic_BIOSDiskParameters(struct scsi_device *, struct block_device *, sector_t, int *); |
1324 | static int BusLogic_ProcDirectoryInfo(struct Scsi_Host *, char *, char **, off_t, int, int); | 1324 | static int BusLogic_ProcDirectoryInfo(struct Scsi_Host *, char *, char **, off_t, int, int); |
1325 | static int BusLogic_SlaveConfigure(struct scsi_device *); | 1325 | static int BusLogic_SlaveConfigure(struct scsi_device *); |
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 5d2f148889ad..9a5629f94f95 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c | |||
@@ -952,7 +952,7 @@ static void NCR5380_exit(struct Scsi_Host *instance) | |||
952 | * Locks: host lock taken by caller | 952 | * Locks: host lock taken by caller |
953 | */ | 953 | */ |
954 | 954 | ||
955 | static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) | 955 | static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) |
956 | { | 956 | { |
957 | struct Scsi_Host *instance = cmd->device->host; | 957 | struct Scsi_Host *instance = cmd->device->host; |
958 | struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; | 958 | struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; |
@@ -1021,6 +1021,7 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) | |||
1021 | return 0; | 1021 | return 0; |
1022 | } | 1022 | } |
1023 | 1023 | ||
1024 | static DEF_SCSI_QCMD(NCR5380_queue_command) | ||
1024 | 1025 | ||
1025 | /** | 1026 | /** |
1026 | * NCR5380_main - NCR state machines | 1027 | * NCR5380_main - NCR state machines |
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h index bdc468c9e1d9..fd40a32b1f6f 100644 --- a/drivers/scsi/NCR5380.h +++ b/drivers/scsi/NCR5380.h | |||
@@ -313,7 +313,7 @@ static void NCR5380_print(struct Scsi_Host *instance); | |||
313 | #endif | 313 | #endif |
314 | static int NCR5380_abort(Scsi_Cmnd * cmd); | 314 | static int NCR5380_abort(Scsi_Cmnd * cmd); |
315 | static int NCR5380_bus_reset(Scsi_Cmnd * cmd); | 315 | static int NCR5380_bus_reset(Scsi_Cmnd * cmd); |
316 | static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)); | 316 | static int NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *); |
317 | static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance, | 317 | static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance, |
318 | char *buffer, char **start, off_t offset, int length, int inout); | 318 | char *buffer, char **start, off_t offset, int length, int inout); |
319 | 319 | ||
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c index 6961f78742ae..c91888a0a23c 100644 --- a/drivers/scsi/NCR53c406a.c +++ b/drivers/scsi/NCR53c406a.c | |||
@@ -693,7 +693,7 @@ static void wait_intr(void) | |||
693 | } | 693 | } |
694 | #endif | 694 | #endif |
695 | 695 | ||
696 | static int NCR53c406a_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) | 696 | static int NCR53c406a_queue_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) |
697 | { | 697 | { |
698 | int i; | 698 | int i; |
699 | 699 | ||
@@ -726,6 +726,8 @@ static int NCR53c406a_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) | |||
726 | return 0; | 726 | return 0; |
727 | } | 727 | } |
728 | 728 | ||
729 | static DEF_SCSI_QCMD(NCR53c406a_queue) | ||
730 | |||
729 | static int NCR53c406a_host_reset(Scsi_Cmnd * SCpnt) | 731 | static int NCR53c406a_host_reset(Scsi_Cmnd * SCpnt) |
730 | { | 732 | { |
731 | DEB(printk("NCR53c406a_reset called\n")); | 733 | DEB(printk("NCR53c406a_reset called\n")); |
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c index dbbc601948e5..dc5ac6e528c4 100644 --- a/drivers/scsi/a100u2w.c +++ b/drivers/scsi/a100u2w.c | |||
@@ -911,7 +911,7 @@ static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struc | |||
911 | * queue the command down to the controller | 911 | * queue the command down to the controller |
912 | */ | 912 | */ |
913 | 913 | ||
914 | static int inia100_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) | 914 | static int inia100_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) |
915 | { | 915 | { |
916 | struct orc_scb *scb; | 916 | struct orc_scb *scb; |
917 | struct orc_host *host; /* Point to Host adapter control block */ | 917 | struct orc_host *host; /* Point to Host adapter control block */ |
@@ -930,6 +930,8 @@ static int inia100_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd | |||
930 | return 0; | 930 | return 0; |
931 | } | 931 | } |
932 | 932 | ||
933 | static DEF_SCSI_QCMD(inia100_queue) | ||
934 | |||
933 | /***************************************************************************** | 935 | /***************************************************************************** |
934 | Function name : inia100_abort | 936 | Function name : inia100_abort |
935 | Description : Abort a queued command. | 937 | Description : Abort a queued command. |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 29c0ed1cf507..2c93d9496d62 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -248,7 +248,7 @@ static struct aac_driver_ident aac_drivers[] = { | |||
248 | * TODO: unify with aac_scsi_cmd(). | 248 | * TODO: unify with aac_scsi_cmd(). |
249 | */ | 249 | */ |
250 | 250 | ||
251 | static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | 251 | static int aac_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
252 | { | 252 | { |
253 | struct Scsi_Host *host = cmd->device->host; | 253 | struct Scsi_Host *host = cmd->device->host; |
254 | struct aac_dev *dev = (struct aac_dev *)host->hostdata; | 254 | struct aac_dev *dev = (struct aac_dev *)host->hostdata; |
@@ -267,6 +267,8 @@ static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd | |||
267 | return (aac_scsi_cmd(cmd) ? FAILED : 0); | 267 | return (aac_scsi_cmd(cmd) ? FAILED : 0); |
268 | } | 268 | } |
269 | 269 | ||
270 | static DEF_SCSI_QCMD(aac_queuecommand) | ||
271 | |||
270 | /** | 272 | /** |
271 | * aac_info - Returns the host adapter name | 273 | * aac_info - Returns the host adapter name |
272 | * @shost: Scsi host to report on | 274 | * @shost: Scsi host to report on |
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index 0ec3da6f3e12..081c6de92bc5 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c | |||
@@ -9500,7 +9500,7 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp) | |||
9500 | * in the 'scp' result field. | 9500 | * in the 'scp' result field. |
9501 | */ | 9501 | */ |
9502 | static int | 9502 | static int |
9503 | advansys_queuecommand(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) | 9503 | advansys_queuecommand_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) |
9504 | { | 9504 | { |
9505 | struct Scsi_Host *shost = scp->device->host; | 9505 | struct Scsi_Host *shost = scp->device->host; |
9506 | int asc_res, result = 0; | 9506 | int asc_res, result = 0; |
@@ -9525,6 +9525,8 @@ advansys_queuecommand(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) | |||
9525 | return result; | 9525 | return result; |
9526 | } | 9526 | } |
9527 | 9527 | ||
9528 | static DEF_SCSI_QCMD(advansys_queuecommand) | ||
9529 | |||
9528 | static ushort __devinit AscGetEisaChipCfg(PortAddr iop_base) | 9530 | static ushort __devinit AscGetEisaChipCfg(PortAddr iop_base) |
9529 | { | 9531 | { |
9530 | PortAddr eisa_cfg_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) | | 9532 | PortAddr eisa_cfg_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) | |
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index 8eab8587ff21..c5169f01c1cd 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c | |||
@@ -1056,7 +1056,7 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete, | |||
1056 | * queue a command | 1056 | * queue a command |
1057 | * | 1057 | * |
1058 | */ | 1058 | */ |
1059 | static int aha152x_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) | 1059 | static int aha152x_queue_lck(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) |
1060 | { | 1060 | { |
1061 | #if 0 | 1061 | #if 0 |
1062 | if(*SCpnt->cmnd == REQUEST_SENSE) { | 1062 | if(*SCpnt->cmnd == REQUEST_SENSE) { |
@@ -1070,6 +1070,8 @@ static int aha152x_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) | |||
1070 | return aha152x_internal_queue(SCpnt, NULL, 0, done); | 1070 | return aha152x_internal_queue(SCpnt, NULL, 0, done); |
1071 | } | 1071 | } |
1072 | 1072 | ||
1073 | static DEF_SCSI_QCMD(aha152x_queue) | ||
1074 | |||
1073 | 1075 | ||
1074 | /* | 1076 | /* |
1075 | * | 1077 | * |
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c index 4f785f254c1f..195823a51aab 100644 --- a/drivers/scsi/aha1542.c +++ b/drivers/scsi/aha1542.c | |||
@@ -558,7 +558,7 @@ static void aha1542_intr_handle(struct Scsi_Host *shost) | |||
558 | }; | 558 | }; |
559 | } | 559 | } |
560 | 560 | ||
561 | static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) | 561 | static int aha1542_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) |
562 | { | 562 | { |
563 | unchar ahacmd = CMD_START_SCSI; | 563 | unchar ahacmd = CMD_START_SCSI; |
564 | unchar direction; | 564 | unchar direction; |
@@ -718,6 +718,8 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) | |||
718 | return 0; | 718 | return 0; |
719 | } | 719 | } |
720 | 720 | ||
721 | static DEF_SCSI_QCMD(aha1542_queuecommand) | ||
722 | |||
721 | /* Initialize mailboxes */ | 723 | /* Initialize mailboxes */ |
722 | static void setup_mailboxes(int bse, struct Scsi_Host *shpnt) | 724 | static void setup_mailboxes(int bse, struct Scsi_Host *shpnt) |
723 | { | 725 | { |
diff --git a/drivers/scsi/aha1542.h b/drivers/scsi/aha1542.h index 1db538552d56..b871d2b57f93 100644 --- a/drivers/scsi/aha1542.h +++ b/drivers/scsi/aha1542.h | |||
@@ -132,7 +132,7 @@ struct ccb { /* Command Control Block 5.3 */ | |||
132 | }; | 132 | }; |
133 | 133 | ||
134 | static int aha1542_detect(struct scsi_host_template *); | 134 | static int aha1542_detect(struct scsi_host_template *); |
135 | static int aha1542_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); | 135 | static int aha1542_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); |
136 | static int aha1542_bus_reset(Scsi_Cmnd * SCpnt); | 136 | static int aha1542_bus_reset(Scsi_Cmnd * SCpnt); |
137 | static int aha1542_dev_reset(Scsi_Cmnd * SCpnt); | 137 | static int aha1542_dev_reset(Scsi_Cmnd * SCpnt); |
138 | static int aha1542_host_reset(Scsi_Cmnd * SCpnt); | 138 | static int aha1542_host_reset(Scsi_Cmnd * SCpnt); |
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c index 0107a4cc3331..d058f1ab82b5 100644 --- a/drivers/scsi/aha1740.c +++ b/drivers/scsi/aha1740.c | |||
@@ -331,7 +331,7 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id) | |||
331 | return IRQ_RETVAL(handled); | 331 | return IRQ_RETVAL(handled); |
332 | } | 332 | } |
333 | 333 | ||
334 | static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *)) | 334 | static int aha1740_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *)) |
335 | { | 335 | { |
336 | unchar direction; | 336 | unchar direction; |
337 | unchar *cmd = (unchar *) SCpnt->cmnd; | 337 | unchar *cmd = (unchar *) SCpnt->cmnd; |
@@ -503,6 +503,8 @@ static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *)) | |||
503 | return 0; | 503 | return 0; |
504 | } | 504 | } |
505 | 505 | ||
506 | static DEF_SCSI_QCMD(aha1740_queuecommand) | ||
507 | |||
506 | /* Query the board for its irq_level and irq_type. Nothing else matters | 508 | /* Query the board for its irq_level and irq_type. Nothing else matters |
507 | in enhanced mode on an EISA bus. */ | 509 | in enhanced mode on an EISA bus. */ |
508 | 510 | ||
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index 88ad8482ef59..25d066624476 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c | |||
@@ -573,7 +573,7 @@ ahd_linux_info(struct Scsi_Host *host) | |||
573 | * Queue an SCB to the controller. | 573 | * Queue an SCB to the controller. |
574 | */ | 574 | */ |
575 | static int | 575 | static int |
576 | ahd_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) | 576 | ahd_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) |
577 | { | 577 | { |
578 | struct ahd_softc *ahd; | 578 | struct ahd_softc *ahd; |
579 | struct ahd_linux_device *dev = scsi_transport_device_data(cmd->device); | 579 | struct ahd_linux_device *dev = scsi_transport_device_data(cmd->device); |
@@ -588,6 +588,8 @@ ahd_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) | |||
588 | return rtn; | 588 | return rtn; |
589 | } | 589 | } |
590 | 590 | ||
591 | static DEF_SCSI_QCMD(ahd_linux_queue) | ||
592 | |||
591 | static struct scsi_target ** | 593 | static struct scsi_target ** |
592 | ahd_linux_target_in_softc(struct scsi_target *starget) | 594 | ahd_linux_target_in_softc(struct scsi_target *starget) |
593 | { | 595 | { |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index aeea7a61478e..4a359bb307c6 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c | |||
@@ -528,7 +528,7 @@ ahc_linux_info(struct Scsi_Host *host) | |||
528 | * Queue an SCB to the controller. | 528 | * Queue an SCB to the controller. |
529 | */ | 529 | */ |
530 | static int | 530 | static int |
531 | ahc_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) | 531 | ahc_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) |
532 | { | 532 | { |
533 | struct ahc_softc *ahc; | 533 | struct ahc_softc *ahc; |
534 | struct ahc_linux_device *dev = scsi_transport_device_data(cmd->device); | 534 | struct ahc_linux_device *dev = scsi_transport_device_data(cmd->device); |
@@ -548,6 +548,8 @@ ahc_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) | |||
548 | return rtn; | 548 | return rtn; |
549 | } | 549 | } |
550 | 550 | ||
551 | static DEF_SCSI_QCMD(ahc_linux_queue) | ||
552 | |||
551 | static inline struct scsi_target ** | 553 | static inline struct scsi_target ** |
552 | ahc_linux_target_in_softc(struct scsi_target *starget) | 554 | ahc_linux_target_in_softc(struct scsi_target *starget) |
553 | { | 555 | { |
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c index aee73fafccc8..4ff60a08df0f 100644 --- a/drivers/scsi/aic7xxx_old.c +++ b/drivers/scsi/aic7xxx_old.c | |||
@@ -10234,7 +10234,7 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd, | |||
10234 | * Description: | 10234 | * Description: |
10235 | * Queue a SCB to the controller. | 10235 | * Queue a SCB to the controller. |
10236 | *-F*************************************************************************/ | 10236 | *-F*************************************************************************/ |
10237 | static int aic7xxx_queue(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) | 10237 | static int aic7xxx_queue_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) |
10238 | { | 10238 | { |
10239 | struct aic7xxx_host *p; | 10239 | struct aic7xxx_host *p; |
10240 | struct aic7xxx_scb *scb; | 10240 | struct aic7xxx_scb *scb; |
@@ -10292,6 +10292,8 @@ static int aic7xxx_queue(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) | |||
10292 | return (0); | 10292 | return (0); |
10293 | } | 10293 | } |
10294 | 10294 | ||
10295 | static DEF_SCSI_QCMD(aic7xxx_queue) | ||
10296 | |||
10295 | /*+F************************************************************************* | 10297 | /*+F************************************************************************* |
10296 | * Function: | 10298 | * Function: |
10297 | * aic7xxx_bus_device_reset | 10299 | * aic7xxx_bus_device_reset |
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 05a78e515a24..17e3df4f016f 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c | |||
@@ -85,8 +85,7 @@ static int arcmsr_abort(struct scsi_cmnd *); | |||
85 | static int arcmsr_bus_reset(struct scsi_cmnd *); | 85 | static int arcmsr_bus_reset(struct scsi_cmnd *); |
86 | static int arcmsr_bios_param(struct scsi_device *sdev, | 86 | static int arcmsr_bios_param(struct scsi_device *sdev, |
87 | struct block_device *bdev, sector_t capacity, int *info); | 87 | struct block_device *bdev, sector_t capacity, int *info); |
88 | static int arcmsr_queue_command(struct scsi_cmnd *cmd, | 88 | static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
89 | void (*done) (struct scsi_cmnd *)); | ||
90 | static int arcmsr_probe(struct pci_dev *pdev, | 89 | static int arcmsr_probe(struct pci_dev *pdev, |
91 | const struct pci_device_id *id); | 90 | const struct pci_device_id *id); |
92 | static void arcmsr_remove(struct pci_dev *pdev); | 91 | static void arcmsr_remove(struct pci_dev *pdev); |
@@ -2081,7 +2080,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, | |||
2081 | } | 2080 | } |
2082 | } | 2081 | } |
2083 | 2082 | ||
2084 | static int arcmsr_queue_command(struct scsi_cmnd *cmd, | 2083 | static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd, |
2085 | void (* done)(struct scsi_cmnd *)) | 2084 | void (* done)(struct scsi_cmnd *)) |
2086 | { | 2085 | { |
2087 | struct Scsi_Host *host = cmd->device->host; | 2086 | struct Scsi_Host *host = cmd->device->host; |
@@ -2124,6 +2123,8 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd, | |||
2124 | return 0; | 2123 | return 0; |
2125 | } | 2124 | } |
2126 | 2125 | ||
2126 | static DEF_SCSI_QCMD(arcmsr_queue_command) | ||
2127 | |||
2127 | static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb) | 2128 | static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb) |
2128 | { | 2129 | { |
2129 | struct MessageUnit_A __iomem *reg = acb->pmuA; | 2130 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index 918ccf818757..ec166726b314 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c | |||
@@ -2511,7 +2511,7 @@ acornscsi_intr(int irq, void *dev_id) | |||
2511 | * done - function called on completion, with pointer to command descriptor | 2511 | * done - function called on completion, with pointer to command descriptor |
2512 | * Returns : 0, or < 0 on error. | 2512 | * Returns : 0, or < 0 on error. |
2513 | */ | 2513 | */ |
2514 | int acornscsi_queuecmd(struct scsi_cmnd *SCpnt, | 2514 | static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt, |
2515 | void (*done)(struct scsi_cmnd *)) | 2515 | void (*done)(struct scsi_cmnd *)) |
2516 | { | 2516 | { |
2517 | AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; | 2517 | AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; |
@@ -2561,6 +2561,8 @@ int acornscsi_queuecmd(struct scsi_cmnd *SCpnt, | |||
2561 | return 0; | 2561 | return 0; |
2562 | } | 2562 | } |
2563 | 2563 | ||
2564 | DEF_SCSI_QCMD(acornscsi_queuecmd) | ||
2565 | |||
2564 | /* | 2566 | /* |
2565 | * Prototype: void acornscsi_reportstatus(struct scsi_cmnd **SCpntp1, struct scsi_cmnd **SCpntp2, int result) | 2567 | * Prototype: void acornscsi_reportstatus(struct scsi_cmnd **SCpntp1, struct scsi_cmnd **SCpntp2, int result) |
2566 | * Purpose : pass a result to *SCpntp1, and check if *SCpntp1 = *SCpntp2 | 2568 | * Purpose : pass a result to *SCpntp1, and check if *SCpntp1 = *SCpntp2 |
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 9e71ac611146..2b2ce21e227e 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c | |||
@@ -2198,7 +2198,7 @@ no_command: | |||
2198 | * Returns: 0 on success, else error. | 2198 | * Returns: 0 on success, else error. |
2199 | * Notes: io_request_lock is held, interrupts are disabled. | 2199 | * Notes: io_request_lock is held, interrupts are disabled. |
2200 | */ | 2200 | */ |
2201 | int fas216_queue_command(struct scsi_cmnd *SCpnt, | 2201 | static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt, |
2202 | void (*done)(struct scsi_cmnd *)) | 2202 | void (*done)(struct scsi_cmnd *)) |
2203 | { | 2203 | { |
2204 | FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; | 2204 | FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; |
@@ -2240,6 +2240,8 @@ int fas216_queue_command(struct scsi_cmnd *SCpnt, | |||
2240 | return result; | 2240 | return result; |
2241 | } | 2241 | } |
2242 | 2242 | ||
2243 | DEF_SCSI_QCMD(fas216_queue_command) | ||
2244 | |||
2243 | /** | 2245 | /** |
2244 | * fas216_internal_done - trigger restart of a waiting thread in fas216_noqueue_command | 2246 | * fas216_internal_done - trigger restart of a waiting thread in fas216_noqueue_command |
2245 | * @SCpnt: Command to wake | 2247 | * @SCpnt: Command to wake |
@@ -2263,7 +2265,7 @@ static void fas216_internal_done(struct scsi_cmnd *SCpnt) | |||
2263 | * Returns: scsi result code. | 2265 | * Returns: scsi result code. |
2264 | * Notes: io_request_lock is held, interrupts are disabled. | 2266 | * Notes: io_request_lock is held, interrupts are disabled. |
2265 | */ | 2267 | */ |
2266 | int fas216_noqueue_command(struct scsi_cmnd *SCpnt, | 2268 | static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt, |
2267 | void (*done)(struct scsi_cmnd *)) | 2269 | void (*done)(struct scsi_cmnd *)) |
2268 | { | 2270 | { |
2269 | FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; | 2271 | FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; |
@@ -2277,7 +2279,7 @@ int fas216_noqueue_command(struct scsi_cmnd *SCpnt, | |||
2277 | BUG_ON(info->scsi.irq != NO_IRQ); | 2279 | BUG_ON(info->scsi.irq != NO_IRQ); |
2278 | 2280 | ||
2279 | info->internal_done = 0; | 2281 | info->internal_done = 0; |
2280 | fas216_queue_command(SCpnt, fas216_internal_done); | 2282 | fas216_queue_command_lck(SCpnt, fas216_internal_done); |
2281 | 2283 | ||
2282 | /* | 2284 | /* |
2283 | * This wastes time, since we can't return until the command is | 2285 | * This wastes time, since we can't return until the command is |
@@ -2310,6 +2312,8 @@ int fas216_noqueue_command(struct scsi_cmnd *SCpnt, | |||
2310 | return 0; | 2312 | return 0; |
2311 | } | 2313 | } |
2312 | 2314 | ||
2315 | DEF_SCSI_QCMD(fas216_noqueue_command) | ||
2316 | |||
2313 | /* | 2317 | /* |
2314 | * Error handler timeout function. Indicate that we timed out, | 2318 | * Error handler timeout function. Indicate that we timed out, |
2315 | * and wake up any error handler process so it can continue. | 2319 | * and wake up any error handler process so it can continue. |
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h index b65f4cf0eec9..377cfb72cc66 100644 --- a/drivers/scsi/arm/fas216.h +++ b/drivers/scsi/arm/fas216.h | |||
@@ -331,23 +331,21 @@ extern int fas216_init (struct Scsi_Host *instance); | |||
331 | */ | 331 | */ |
332 | extern int fas216_add (struct Scsi_Host *instance, struct device *dev); | 332 | extern int fas216_add (struct Scsi_Host *instance, struct device *dev); |
333 | 333 | ||
334 | /* Function: int fas216_queue_command(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 334 | /* Function: int fas216_queue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt) |
335 | * Purpose : queue a command for adapter to process. | 335 | * Purpose : queue a command for adapter to process. |
336 | * Params : SCpnt - Command to queue | 336 | * Params : h - host adapter |
337 | * done - done function to call once command is complete | 337 | * : SCpnt - Command to queue |
338 | * Returns : 0 - success, else error | 338 | * Returns : 0 - success, else error |
339 | */ | 339 | */ |
340 | extern int fas216_queue_command(struct scsi_cmnd *, | 340 | extern int fas216_queue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt); |
341 | void (*done)(struct scsi_cmnd *)); | ||
342 | 341 | ||
343 | /* Function: int fas216_noqueue_command(istruct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 342 | /* Function: int fas216_noqueue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt) |
344 | * Purpose : queue a command for adapter to process, and process it to completion. | 343 | * Purpose : queue a command for adapter to process, and process it to completion. |
345 | * Params : SCpnt - Command to queue | 344 | * Params : h - host adapter |
346 | * done - done function to call once command is complete | 345 | * : SCpnt - Command to queue |
347 | * Returns : 0 - success, else error | 346 | * Returns : 0 - success, else error |
348 | */ | 347 | */ |
349 | extern int fas216_noqueue_command(struct scsi_cmnd *, | 348 | extern int fas216_noqueue_command(struct Scsi_Host *, struct scsi_cmnd *) |
350 | void (*done)(struct scsi_cmnd *)); | ||
351 | 349 | ||
352 | /* Function: irqreturn_t fas216_intr (FAS216_Info *info) | 350 | /* Function: irqreturn_t fas216_intr (FAS216_Info *info) |
353 | * Purpose : handle interrupts from the interface to progress a command | 351 | * Purpose : handle interrupts from the interface to progress a command |
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index 158ebc3644d8..88b2928b4d3b 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c | |||
@@ -910,7 +910,7 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags) | |||
910 | * | 910 | * |
911 | */ | 911 | */ |
912 | 912 | ||
913 | static int NCR5380_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) | 913 | static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) |
914 | { | 914 | { |
915 | SETUP_HOSTDATA(cmd->device->host); | 915 | SETUP_HOSTDATA(cmd->device->host); |
916 | Scsi_Cmnd *tmp; | 916 | Scsi_Cmnd *tmp; |
@@ -1022,6 +1022,8 @@ static int NCR5380_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) | |||
1022 | return 0; | 1022 | return 0; |
1023 | } | 1023 | } |
1024 | 1024 | ||
1025 | static DEF_SCSI_QCMD(NCR5380_queue_command) | ||
1026 | |||
1025 | /* | 1027 | /* |
1026 | * Function : NCR5380_main (void) | 1028 | * Function : NCR5380_main (void) |
1027 | * | 1029 | * |
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index ad7a23aef0ec..3e8658e2f154 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c | |||
@@ -572,23 +572,6 @@ static void falcon_get_lock(void) | |||
572 | } | 572 | } |
573 | 573 | ||
574 | 574 | ||
575 | /* This is the wrapper function for NCR5380_queue_command(). It just | ||
576 | * tries to get the lock on the ST-DMA (see above) and then calls the | ||
577 | * original function. | ||
578 | */ | ||
579 | |||
580 | #if 0 | ||
581 | int atari_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) | ||
582 | { | ||
583 | /* falcon_get_lock(); | ||
584 | * ++guenther: moved to NCR5380_queue_command() to prevent | ||
585 | * race condition, see there for an explanation. | ||
586 | */ | ||
587 | return NCR5380_queue_command(cmd, done); | ||
588 | } | ||
589 | #endif | ||
590 | |||
591 | |||
592 | int __init atari_scsi_detect(struct scsi_host_template *host) | 575 | int __init atari_scsi_detect(struct scsi_host_template *host) |
593 | { | 576 | { |
594 | static int called = 0; | 577 | static int called = 0; |
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index ab5bdda6903e..76029d570beb 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c | |||
@@ -605,7 +605,7 @@ handled: | |||
605 | * | 605 | * |
606 | * Queue a command to the ATP queue. Called with the host lock held. | 606 | * Queue a command to the ATP queue. Called with the host lock held. |
607 | */ | 607 | */ |
608 | static int atp870u_queuecommand(struct scsi_cmnd * req_p, | 608 | static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p, |
609 | void (*done) (struct scsi_cmnd *)) | 609 | void (*done) (struct scsi_cmnd *)) |
610 | { | 610 | { |
611 | unsigned char c; | 611 | unsigned char c; |
@@ -694,6 +694,8 @@ static int atp870u_queuecommand(struct scsi_cmnd * req_p, | |||
694 | return 0; | 694 | return 0; |
695 | } | 695 | } |
696 | 696 | ||
697 | static DEF_SCSI_QCMD(atp870u_queuecommand) | ||
698 | |||
697 | /** | 699 | /** |
698 | * send_s870 - send a command to the controller | 700 | * send_s870 - send a command to the controller |
699 | * @host: host | 701 | * @host: host |
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index 8daa716739d1..8ca967dee66d 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c | |||
@@ -30,8 +30,7 @@ DEFINE_IDR(bfad_im_port_index); | |||
30 | struct scsi_transport_template *bfad_im_scsi_transport_template; | 30 | struct scsi_transport_template *bfad_im_scsi_transport_template; |
31 | struct scsi_transport_template *bfad_im_scsi_vport_transport_template; | 31 | struct scsi_transport_template *bfad_im_scsi_vport_transport_template; |
32 | static void bfad_im_itnim_work_handler(struct work_struct *work); | 32 | static void bfad_im_itnim_work_handler(struct work_struct *work); |
33 | static int bfad_im_queuecommand(struct scsi_cmnd *cmnd, | 33 | static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd); |
34 | void (*done)(struct scsi_cmnd *)); | ||
35 | static int bfad_im_slave_alloc(struct scsi_device *sdev); | 34 | static int bfad_im_slave_alloc(struct scsi_device *sdev); |
36 | static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, | 35 | static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, |
37 | struct bfad_itnim_s *itnim); | 36 | struct bfad_itnim_s *itnim); |
@@ -1120,7 +1119,7 @@ bfad_im_itnim_work_handler(struct work_struct *work) | |||
1120 | * Scsi_Host template entry, queue a SCSI command to the BFAD. | 1119 | * Scsi_Host template entry, queue a SCSI command to the BFAD. |
1121 | */ | 1120 | */ |
1122 | static int | 1121 | static int |
1123 | bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | 1122 | bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) |
1124 | { | 1123 | { |
1125 | struct bfad_im_port_s *im_port = | 1124 | struct bfad_im_port_s *im_port = |
1126 | (struct bfad_im_port_s *) cmnd->device->host->hostdata[0]; | 1125 | (struct bfad_im_port_s *) cmnd->device->host->hostdata[0]; |
@@ -1187,6 +1186,8 @@ out_fail_cmd: | |||
1187 | return 0; | 1186 | return 0; |
1188 | } | 1187 | } |
1189 | 1188 | ||
1189 | static DEF_SCSI_QCMD(bfad_im_queuecommand) | ||
1190 | |||
1190 | void | 1191 | void |
1191 | bfad_os_rport_online_wait(struct bfad_s *bfad) | 1192 | bfad_os_rport_online_wait(struct bfad_s *bfad) |
1192 | { | 1193 | { |
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 54f50b07dac7..8f1b5c8bf903 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c | |||
@@ -1080,7 +1080,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, | |||
1080 | * and is expected to be held on return. | 1080 | * and is expected to be held on return. |
1081 | * | 1081 | * |
1082 | **/ | 1082 | **/ |
1083 | static int dc395x_queue_command(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | 1083 | static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
1084 | { | 1084 | { |
1085 | struct DeviceCtlBlk *dcb; | 1085 | struct DeviceCtlBlk *dcb; |
1086 | struct ScsiReqBlk *srb; | 1086 | struct ScsiReqBlk *srb; |
@@ -1154,6 +1154,7 @@ complete: | |||
1154 | return 0; | 1154 | return 0; |
1155 | } | 1155 | } |
1156 | 1156 | ||
1157 | static DEF_SCSI_QCMD(dc395x_queue_command) | ||
1157 | 1158 | ||
1158 | /* | 1159 | /* |
1159 | * Return the disk geometry for the given SCSI device. | 1160 | * Return the disk geometry for the given SCSI device. |
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 23dec0063385..cffcb108ac96 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c | |||
@@ -423,7 +423,7 @@ static int adpt_slave_configure(struct scsi_device * device) | |||
423 | return 0; | 423 | return 0; |
424 | } | 424 | } |
425 | 425 | ||
426 | static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) | 426 | static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) |
427 | { | 427 | { |
428 | adpt_hba* pHba = NULL; | 428 | adpt_hba* pHba = NULL; |
429 | struct adpt_device* pDev = NULL; /* dpt per device information */ | 429 | struct adpt_device* pDev = NULL; /* dpt per device information */ |
@@ -491,6 +491,8 @@ static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) | |||
491 | return adpt_scsi_to_i2o(pHba, cmd, pDev); | 491 | return adpt_scsi_to_i2o(pHba, cmd, pDev); |
492 | } | 492 | } |
493 | 493 | ||
494 | static DEF_SCSI_QCMD(adpt_queue) | ||
495 | |||
494 | static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev, | 496 | static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev, |
495 | sector_t capacity, int geom[]) | 497 | sector_t capacity, int geom[]) |
496 | { | 498 | { |
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h index 337746d46043..beded716f93f 100644 --- a/drivers/scsi/dpti.h +++ b/drivers/scsi/dpti.h | |||
@@ -29,7 +29,7 @@ | |||
29 | */ | 29 | */ |
30 | 30 | ||
31 | static int adpt_detect(struct scsi_host_template * sht); | 31 | static int adpt_detect(struct scsi_host_template * sht); |
32 | static int adpt_queue(struct scsi_cmnd * cmd, void (*cmdcomplete) (struct scsi_cmnd *)); | 32 | static int adpt_queue(struct Scsi_Host *h, struct scsi_cmnd * cmd); |
33 | static int adpt_abort(struct scsi_cmnd * cmd); | 33 | static int adpt_abort(struct scsi_cmnd * cmd); |
34 | static int adpt_reset(struct scsi_cmnd* cmd); | 34 | static int adpt_reset(struct scsi_cmnd* cmd); |
35 | static int adpt_release(struct Scsi_Host *host); | 35 | static int adpt_release(struct Scsi_Host *host); |
diff --git a/drivers/scsi/dtc.h b/drivers/scsi/dtc.h index 0b205f8c7326..cdc621204b66 100644 --- a/drivers/scsi/dtc.h +++ b/drivers/scsi/dtc.h | |||
@@ -36,7 +36,7 @@ static int dtc_abort(Scsi_Cmnd *); | |||
36 | static int dtc_biosparam(struct scsi_device *, struct block_device *, | 36 | static int dtc_biosparam(struct scsi_device *, struct block_device *, |
37 | sector_t, int*); | 37 | sector_t, int*); |
38 | static int dtc_detect(struct scsi_host_template *); | 38 | static int dtc_detect(struct scsi_host_template *); |
39 | static int dtc_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); | 39 | static int dtc_queue_command(struct Scsi_Host *, struct scsi_cmnd *); |
40 | static int dtc_bus_reset(Scsi_Cmnd *); | 40 | static int dtc_bus_reset(Scsi_Cmnd *); |
41 | 41 | ||
42 | #ifndef CMD_PER_LUN | 42 | #ifndef CMD_PER_LUN |
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c index d1c31378f6da..53925ac178fd 100644 --- a/drivers/scsi/eata.c +++ b/drivers/scsi/eata.c | |||
@@ -505,8 +505,7 @@ | |||
505 | 505 | ||
506 | static int eata2x_detect(struct scsi_host_template *); | 506 | static int eata2x_detect(struct scsi_host_template *); |
507 | static int eata2x_release(struct Scsi_Host *); | 507 | static int eata2x_release(struct Scsi_Host *); |
508 | static int eata2x_queuecommand(struct scsi_cmnd *, | 508 | static int eata2x_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); |
509 | void (*done) (struct scsi_cmnd *)); | ||
510 | static int eata2x_eh_abort(struct scsi_cmnd *); | 509 | static int eata2x_eh_abort(struct scsi_cmnd *); |
511 | static int eata2x_eh_host_reset(struct scsi_cmnd *); | 510 | static int eata2x_eh_host_reset(struct scsi_cmnd *); |
512 | static int eata2x_bios_param(struct scsi_device *, struct block_device *, | 511 | static int eata2x_bios_param(struct scsi_device *, struct block_device *, |
@@ -1758,7 +1757,7 @@ static void scsi_to_dev_dir(unsigned int i, struct hostdata *ha) | |||
1758 | 1757 | ||
1759 | } | 1758 | } |
1760 | 1759 | ||
1761 | static int eata2x_queuecommand(struct scsi_cmnd *SCpnt, | 1760 | static int eata2x_queuecommand_lck(struct scsi_cmnd *SCpnt, |
1762 | void (*done) (struct scsi_cmnd *)) | 1761 | void (*done) (struct scsi_cmnd *)) |
1763 | { | 1762 | { |
1764 | struct Scsi_Host *shost = SCpnt->device->host; | 1763 | struct Scsi_Host *shost = SCpnt->device->host; |
@@ -1843,6 +1842,8 @@ static int eata2x_queuecommand(struct scsi_cmnd *SCpnt, | |||
1843 | return 0; | 1842 | return 0; |
1844 | } | 1843 | } |
1845 | 1844 | ||
1845 | static DEF_SCSI_QCMD(eata2x_queuecommand) | ||
1846 | |||
1846 | static int eata2x_eh_abort(struct scsi_cmnd *SCarg) | 1847 | static int eata2x_eh_abort(struct scsi_cmnd *SCarg) |
1847 | { | 1848 | { |
1848 | struct Scsi_Host *shost = SCarg->device->host; | 1849 | struct Scsi_Host *shost = SCarg->device->host; |
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c index 60886c19065e..4a9641e69f54 100644 --- a/drivers/scsi/eata_pio.c +++ b/drivers/scsi/eata_pio.c | |||
@@ -335,7 +335,7 @@ static inline unsigned int eata_pio_send_command(unsigned long base, unsigned ch | |||
335 | return 0; | 335 | return 0; |
336 | } | 336 | } |
337 | 337 | ||
338 | static int eata_pio_queue(struct scsi_cmnd *cmd, | 338 | static int eata_pio_queue_lck(struct scsi_cmnd *cmd, |
339 | void (*done)(struct scsi_cmnd *)) | 339 | void (*done)(struct scsi_cmnd *)) |
340 | { | 340 | { |
341 | unsigned int x, y; | 341 | unsigned int x, y; |
@@ -438,6 +438,8 @@ static int eata_pio_queue(struct scsi_cmnd *cmd, | |||
438 | return 0; | 438 | return 0; |
439 | } | 439 | } |
440 | 440 | ||
441 | static DEF_SCSI_QCMD(eata_pio_queue) | ||
442 | |||
441 | static int eata_pio_abort(struct scsi_cmnd *cmd) | 443 | static int eata_pio_abort(struct scsi_cmnd *cmd) |
442 | { | 444 | { |
443 | unsigned int loop = 100; | 445 | unsigned int loop = 100; |
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c index e2bc779f86c1..57558523c1b8 100644 --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c | |||
@@ -916,7 +916,7 @@ static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent) | |||
916 | scsi_track_queue_full(dev, lp->num_tagged - 1); | 916 | scsi_track_queue_full(dev, lp->num_tagged - 1); |
917 | } | 917 | } |
918 | 918 | ||
919 | static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | 919 | static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
920 | { | 920 | { |
921 | struct scsi_device *dev = cmd->device; | 921 | struct scsi_device *dev = cmd->device; |
922 | struct esp *esp = shost_priv(dev->host); | 922 | struct esp *esp = shost_priv(dev->host); |
@@ -941,6 +941,8 @@ static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd | |||
941 | return 0; | 941 | return 0; |
942 | } | 942 | } |
943 | 943 | ||
944 | static DEF_SCSI_QCMD(esp_queuecommand) | ||
945 | |||
944 | static int esp_check_gross_error(struct esp *esp) | 946 | static int esp_check_gross_error(struct esp *esp) |
945 | { | 947 | { |
946 | if (esp->sreg & ESP_STAT_SPAM) { | 948 | if (esp->sreg & ESP_STAT_SPAM) { |
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c index 2ad95aa8f585..a2c6135d337e 100644 --- a/drivers/scsi/fd_mcs.c +++ b/drivers/scsi/fd_mcs.c | |||
@@ -1072,7 +1072,7 @@ static int fd_mcs_release(struct Scsi_Host *shpnt) | |||
1072 | return 0; | 1072 | return 0; |
1073 | } | 1073 | } |
1074 | 1074 | ||
1075 | static int fd_mcs_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) | 1075 | static int fd_mcs_queue_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) |
1076 | { | 1076 | { |
1077 | struct Scsi_Host *shpnt = SCpnt->device->host; | 1077 | struct Scsi_Host *shpnt = SCpnt->device->host; |
1078 | 1078 | ||
@@ -1122,6 +1122,8 @@ static int fd_mcs_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) | |||
1122 | return 0; | 1122 | return 0; |
1123 | } | 1123 | } |
1124 | 1124 | ||
1125 | static DEF_SCSI_QCMD(fd_mcs_queue) | ||
1126 | |||
1125 | #if DEBUG_ABORT || DEBUG_RESET | 1127 | #if DEBUG_ABORT || DEBUG_RESET |
1126 | static void fd_mcs_print_info(Scsi_Cmnd * SCpnt) | 1128 | static void fd_mcs_print_info(Scsi_Cmnd * SCpnt) |
1127 | { | 1129 | { |
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c index e296bcc57d5c..69b7aa54f43f 100644 --- a/drivers/scsi/fdomain.c +++ b/drivers/scsi/fdomain.c | |||
@@ -1419,7 +1419,7 @@ static irqreturn_t do_fdomain_16x0_intr(int irq, void *dev_id) | |||
1419 | return IRQ_HANDLED; | 1419 | return IRQ_HANDLED; |
1420 | } | 1420 | } |
1421 | 1421 | ||
1422 | static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt, | 1422 | static int fdomain_16x0_queue_lck(struct scsi_cmnd *SCpnt, |
1423 | void (*done)(struct scsi_cmnd *)) | 1423 | void (*done)(struct scsi_cmnd *)) |
1424 | { | 1424 | { |
1425 | if (in_command) { | 1425 | if (in_command) { |
@@ -1469,6 +1469,8 @@ static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt, | |||
1469 | return 0; | 1469 | return 0; |
1470 | } | 1470 | } |
1471 | 1471 | ||
1472 | static DEF_SCSI_QCMD(fdomain_16x0_queue) | ||
1473 | |||
1472 | #if DEBUG_ABORT | 1474 | #if DEBUG_ABORT |
1473 | static void print_info(struct scsi_cmnd *SCpnt) | 1475 | static void print_info(struct scsi_cmnd *SCpnt) |
1474 | { | 1476 | { |
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index cbb20b13b228..92f185081e62 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h | |||
@@ -246,7 +246,7 @@ void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *); | |||
246 | void fnic_update_mac(struct fc_lport *, u8 *new); | 246 | void fnic_update_mac(struct fc_lport *, u8 *new); |
247 | void fnic_update_mac_locked(struct fnic *, u8 *new); | 247 | void fnic_update_mac_locked(struct fnic *, u8 *new); |
248 | 248 | ||
249 | int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); | 249 | int fnic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); |
250 | int fnic_abort_cmd(struct scsi_cmnd *); | 250 | int fnic_abort_cmd(struct scsi_cmnd *); |
251 | int fnic_device_reset(struct scsi_cmnd *); | 251 | int fnic_device_reset(struct scsi_cmnd *); |
252 | int fnic_host_reset(struct scsi_cmnd *); | 252 | int fnic_host_reset(struct scsi_cmnd *); |
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 198cbab3e894..22d02404d15f 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c | |||
@@ -349,7 +349,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, | |||
349 | * Routine to send a scsi cdb | 349 | * Routine to send a scsi cdb |
350 | * Called with host_lock held and interrupts disabled. | 350 | * Called with host_lock held and interrupts disabled. |
351 | */ | 351 | */ |
352 | int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | 352 | static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) |
353 | { | 353 | { |
354 | struct fc_lport *lp; | 354 | struct fc_lport *lp; |
355 | struct fc_rport *rport; | 355 | struct fc_rport *rport; |
@@ -457,6 +457,8 @@ out: | |||
457 | return ret; | 457 | return ret; |
458 | } | 458 | } |
459 | 459 | ||
460 | DEF_SCSI_QCMD(fnic_queuecommand) | ||
461 | |||
460 | /* | 462 | /* |
461 | * fnic_fcpio_fw_reset_cmpl_handler | 463 | * fnic_fcpio_fw_reset_cmpl_handler |
462 | * Routine to handle fw reset completion | 464 | * Routine to handle fw reset completion |
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h index 921764c9ab24..1bcdb7beb77b 100644 --- a/drivers/scsi/g_NCR5380.h +++ b/drivers/scsi/g_NCR5380.h | |||
@@ -46,7 +46,7 @@ | |||
46 | static int generic_NCR5380_abort(Scsi_Cmnd *); | 46 | static int generic_NCR5380_abort(Scsi_Cmnd *); |
47 | static int generic_NCR5380_detect(struct scsi_host_template *); | 47 | static int generic_NCR5380_detect(struct scsi_host_template *); |
48 | static int generic_NCR5380_release_resources(struct Scsi_Host *); | 48 | static int generic_NCR5380_release_resources(struct Scsi_Host *); |
49 | static int generic_NCR5380_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); | 49 | static int generic_NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *); |
50 | static int generic_NCR5380_bus_reset(Scsi_Cmnd *); | 50 | static int generic_NCR5380_bus_reset(Scsi_Cmnd *); |
51 | static const char* generic_NCR5380_info(struct Scsi_Host *); | 51 | static const char* generic_NCR5380_info(struct Scsi_Host *); |
52 | 52 | ||
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 841101846b88..76365700e2d5 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c | |||
@@ -185,7 +185,7 @@ static long gdth_unlocked_ioctl(struct file *filep, unsigned int cmd, | |||
185 | unsigned long arg); | 185 | unsigned long arg); |
186 | 186 | ||
187 | static void gdth_flush(gdth_ha_str *ha); | 187 | static void gdth_flush(gdth_ha_str *ha); |
188 | static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *)); | 188 | static int gdth_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
189 | static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, | 189 | static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, |
190 | struct gdth_cmndinfo *cmndinfo); | 190 | struct gdth_cmndinfo *cmndinfo); |
191 | static void gdth_scsi_done(struct scsi_cmnd *scp); | 191 | static void gdth_scsi_done(struct scsi_cmnd *scp); |
@@ -4004,7 +4004,7 @@ static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,se | |||
4004 | } | 4004 | } |
4005 | 4005 | ||
4006 | 4006 | ||
4007 | static int gdth_queuecommand(struct scsi_cmnd *scp, | 4007 | static int gdth_queuecommand_lck(struct scsi_cmnd *scp, |
4008 | void (*done)(struct scsi_cmnd *)) | 4008 | void (*done)(struct scsi_cmnd *)) |
4009 | { | 4009 | { |
4010 | gdth_ha_str *ha = shost_priv(scp->device->host); | 4010 | gdth_ha_str *ha = shost_priv(scp->device->host); |
@@ -4022,6 +4022,8 @@ static int gdth_queuecommand(struct scsi_cmnd *scp, | |||
4022 | return __gdth_queuecommand(ha, scp, cmndinfo); | 4022 | return __gdth_queuecommand(ha, scp, cmndinfo); |
4023 | } | 4023 | } |
4024 | 4024 | ||
4025 | static DEF_SCSI_QCMD(gdth_queuecommand) | ||
4026 | |||
4025 | static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, | 4027 | static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, |
4026 | struct gdth_cmndinfo *cmndinfo) | 4028 | struct gdth_cmndinfo *cmndinfo) |
4027 | { | 4029 | { |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index c5d0606ad097..3759d1199b0c 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -143,8 +143,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | |||
143 | void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, | 143 | void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, |
144 | int cmd_type); | 144 | int cmd_type); |
145 | 145 | ||
146 | static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, | 146 | static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
147 | void (*done)(struct scsi_cmnd *)); | ||
148 | static void hpsa_scan_start(struct Scsi_Host *); | 147 | static void hpsa_scan_start(struct Scsi_Host *); |
149 | static int hpsa_scan_finished(struct Scsi_Host *sh, | 148 | static int hpsa_scan_finished(struct Scsi_Host *sh, |
150 | unsigned long elapsed_time); | 149 | unsigned long elapsed_time); |
@@ -1926,7 +1925,7 @@ sglist_finished: | |||
1926 | } | 1925 | } |
1927 | 1926 | ||
1928 | 1927 | ||
1929 | static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, | 1928 | static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, |
1930 | void (*done)(struct scsi_cmnd *)) | 1929 | void (*done)(struct scsi_cmnd *)) |
1931 | { | 1930 | { |
1932 | struct ctlr_info *h; | 1931 | struct ctlr_info *h; |
@@ -2020,6 +2019,8 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, | |||
2020 | return 0; | 2019 | return 0; |
2021 | } | 2020 | } |
2022 | 2021 | ||
2022 | static DEF_SCSI_QCMD(hpsa_scsi_queue_command) | ||
2023 | |||
2023 | static void hpsa_scan_start(struct Scsi_Host *sh) | 2024 | static void hpsa_scan_start(struct Scsi_Host *sh) |
2024 | { | 2025 | { |
2025 | struct ctlr_info *h = shost_to_hba(sh); | 2026 | struct ctlr_info *h = shost_to_hba(sh); |
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index 0729f150b33a..10b65556937b 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c | |||
@@ -751,7 +751,7 @@ static void hptiop_post_req_mv(struct hptiop_hba *hba, | |||
751 | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba); | 751 | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba); |
752 | } | 752 | } |
753 | 753 | ||
754 | static int hptiop_queuecommand(struct scsi_cmnd *scp, | 754 | static int hptiop_queuecommand_lck(struct scsi_cmnd *scp, |
755 | void (*done)(struct scsi_cmnd *)) | 755 | void (*done)(struct scsi_cmnd *)) |
756 | { | 756 | { |
757 | struct Scsi_Host *host = scp->device->host; | 757 | struct Scsi_Host *host = scp->device->host; |
@@ -819,6 +819,8 @@ cmd_done: | |||
819 | return 0; | 819 | return 0; |
820 | } | 820 | } |
821 | 821 | ||
822 | static DEF_SCSI_QCMD(hptiop_queuecommand) | ||
823 | |||
822 | static const char *hptiop_info(struct Scsi_Host *host) | 824 | static const char *hptiop_info(struct Scsi_Host *host) |
823 | { | 825 | { |
824 | return driver_name_long; | 826 | return driver_name_long; |
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c index 9a4b69d4f4eb..67fc8ffd52e6 100644 --- a/drivers/scsi/ibmmca.c +++ b/drivers/scsi/ibmmca.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #include <scsi/scsi_host.h> | 39 | #include <scsi/scsi_host.h> |
40 | 40 | ||
41 | /* Common forward declarations for all Linux-versions: */ | 41 | /* Common forward declarations for all Linux-versions: */ |
42 | static int ibmmca_queuecommand (Scsi_Cmnd *, void (*done) (Scsi_Cmnd *)); | 42 | static int ibmmca_queuecommand (struct Scsi_Host *, struct scsi_cmnd *); |
43 | static int ibmmca_abort (Scsi_Cmnd *); | 43 | static int ibmmca_abort (Scsi_Cmnd *); |
44 | static int ibmmca_host_reset (Scsi_Cmnd *); | 44 | static int ibmmca_host_reset (Scsi_Cmnd *); |
45 | static int ibmmca_biosparam (struct scsi_device *, struct block_device *, sector_t, int *); | 45 | static int ibmmca_biosparam (struct scsi_device *, struct block_device *, sector_t, int *); |
@@ -1691,7 +1691,7 @@ static int __devexit ibmmca_remove(struct device *dev) | |||
1691 | } | 1691 | } |
1692 | 1692 | ||
1693 | /* The following routine is the SCSI command queue for the midlevel driver */ | 1693 | /* The following routine is the SCSI command queue for the midlevel driver */ |
1694 | static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) | 1694 | static int ibmmca_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) |
1695 | { | 1695 | { |
1696 | unsigned int ldn; | 1696 | unsigned int ldn; |
1697 | unsigned int scsi_cmd; | 1697 | unsigned int scsi_cmd; |
@@ -1996,6 +1996,8 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) | |||
1996 | return 0; | 1996 | return 0; |
1997 | } | 1997 | } |
1998 | 1998 | ||
1999 | static DEF_SCSI_QCMD(ibmmca_queuecommand) | ||
2000 | |||
1999 | static int __ibmmca_abort(Scsi_Cmnd * cmd) | 2001 | static int __ibmmca_abort(Scsi_Cmnd * cmd) |
2000 | { | 2002 | { |
2001 | /* Abort does not work, as the adapter never generates an interrupt on | 2003 | /* Abort does not work, as the adapter never generates an interrupt on |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 00d08b25425f..57cad7e20caa 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
@@ -1606,7 +1606,7 @@ static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost) | |||
1606 | * Returns: | 1606 | * Returns: |
1607 | * 0 on success / other on failure | 1607 | * 0 on success / other on failure |
1608 | **/ | 1608 | **/ |
1609 | static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd, | 1609 | static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd, |
1610 | void (*done) (struct scsi_cmnd *)) | 1610 | void (*done) (struct scsi_cmnd *)) |
1611 | { | 1611 | { |
1612 | struct ibmvfc_host *vhost = shost_priv(cmnd->device->host); | 1612 | struct ibmvfc_host *vhost = shost_priv(cmnd->device->host); |
@@ -1672,6 +1672,8 @@ static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd, | |||
1672 | return 0; | 1672 | return 0; |
1673 | } | 1673 | } |
1674 | 1674 | ||
1675 | static DEF_SCSI_QCMD(ibmvfc_queuecommand) | ||
1676 | |||
1675 | /** | 1677 | /** |
1676 | * ibmvfc_sync_completion - Signal that a synchronous command has completed | 1678 | * ibmvfc_sync_completion - Signal that a synchronous command has completed |
1677 | * @evt: ibmvfc event struct | 1679 | * @evt: ibmvfc event struct |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 67f78a470f5f..041958453e2a 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -713,7 +713,7 @@ static inline u16 lun_from_dev(struct scsi_device *dev) | |||
713 | * @cmd: struct scsi_cmnd to be executed | 713 | * @cmd: struct scsi_cmnd to be executed |
714 | * @done: Callback function to be called when cmd is completed | 714 | * @done: Callback function to be called when cmd is completed |
715 | */ | 715 | */ |
716 | static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, | 716 | static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd, |
717 | void (*done) (struct scsi_cmnd *)) | 717 | void (*done) (struct scsi_cmnd *)) |
718 | { | 718 | { |
719 | struct srp_cmd *srp_cmd; | 719 | struct srp_cmd *srp_cmd; |
@@ -766,6 +766,8 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, | |||
766 | return ibmvscsi_send_srp_event(evt_struct, hostdata, 0); | 766 | return ibmvscsi_send_srp_event(evt_struct, hostdata, 0); |
767 | } | 767 | } |
768 | 768 | ||
769 | static DEF_SCSI_QCMD(ibmvscsi_queuecommand) | ||
770 | |||
769 | /* ------------------------------------------------------------ | 771 | /* ------------------------------------------------------------ |
770 | * Routines for driver initialization | 772 | * Routines for driver initialization |
771 | */ | 773 | */ |
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c index 4734ab0b3ff6..99aa0e5699bc 100644 --- a/drivers/scsi/imm.c +++ b/drivers/scsi/imm.c | |||
@@ -926,7 +926,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd) | |||
926 | return 0; | 926 | return 0; |
927 | } | 927 | } |
928 | 928 | ||
929 | static int imm_queuecommand(struct scsi_cmnd *cmd, | 929 | static int imm_queuecommand_lck(struct scsi_cmnd *cmd, |
930 | void (*done)(struct scsi_cmnd *)) | 930 | void (*done)(struct scsi_cmnd *)) |
931 | { | 931 | { |
932 | imm_struct *dev = imm_dev(cmd->device->host); | 932 | imm_struct *dev = imm_dev(cmd->device->host); |
@@ -949,6 +949,8 @@ static int imm_queuecommand(struct scsi_cmnd *cmd, | |||
949 | return 0; | 949 | return 0; |
950 | } | 950 | } |
951 | 951 | ||
952 | static DEF_SCSI_QCMD(imm_queuecommand) | ||
953 | |||
952 | /* | 954 | /* |
953 | * Apparently the disk->capacity attribute is off by 1 sector | 955 | * Apparently the disk->capacity attribute is off by 1 sector |
954 | * for all disk drives. We add the one here, but it should really | 956 | * for all disk drives. We add the one here, but it should really |
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c index 52bdc6df6b92..6568aab745a0 100644 --- a/drivers/scsi/in2000.c +++ b/drivers/scsi/in2000.c | |||
@@ -334,7 +334,7 @@ static uchar calc_sync_xfer(unsigned int period, unsigned int offset) | |||
334 | 334 | ||
335 | static void in2000_execute(struct Scsi_Host *instance); | 335 | static void in2000_execute(struct Scsi_Host *instance); |
336 | 336 | ||
337 | static int in2000_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) | 337 | static int in2000_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) |
338 | { | 338 | { |
339 | struct Scsi_Host *instance; | 339 | struct Scsi_Host *instance; |
340 | struct IN2000_hostdata *hostdata; | 340 | struct IN2000_hostdata *hostdata; |
@@ -431,6 +431,8 @@ static int in2000_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) | |||
431 | return 0; | 431 | return 0; |
432 | } | 432 | } |
433 | 433 | ||
434 | static DEF_SCSI_QCMD(in2000_queuecommand) | ||
435 | |||
434 | 436 | ||
435 | 437 | ||
436 | /* | 438 | /* |
diff --git a/drivers/scsi/in2000.h b/drivers/scsi/in2000.h index 0fb8b06b8392..5821e1fbce08 100644 --- a/drivers/scsi/in2000.h +++ b/drivers/scsi/in2000.h | |||
@@ -396,7 +396,7 @@ struct IN2000_hostdata { | |||
396 | flags) | 396 | flags) |
397 | 397 | ||
398 | static int in2000_detect(struct scsi_host_template *) in2000__INIT; | 398 | static int in2000_detect(struct scsi_host_template *) in2000__INIT; |
399 | static int in2000_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); | 399 | static int in2000_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); |
400 | static int in2000_abort(Scsi_Cmnd *); | 400 | static int in2000_abort(Scsi_Cmnd *); |
401 | static void in2000_setup(char *, int *) in2000__INIT; | 401 | static void in2000_setup(char *, int *) in2000__INIT; |
402 | static int in2000_biosparam(struct scsi_device *, struct block_device *, | 402 | static int in2000_biosparam(struct scsi_device *, struct block_device *, |
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c index 108797761b95..9627d062e16b 100644 --- a/drivers/scsi/initio.c +++ b/drivers/scsi/initio.c | |||
@@ -2639,7 +2639,7 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c | |||
2639 | * will cause the mid layer to call us again later with the command) | 2639 | * will cause the mid layer to call us again later with the command) |
2640 | */ | 2640 | */ |
2641 | 2641 | ||
2642 | static int i91u_queuecommand(struct scsi_cmnd *cmd, | 2642 | static int i91u_queuecommand_lck(struct scsi_cmnd *cmd, |
2643 | void (*done)(struct scsi_cmnd *)) | 2643 | void (*done)(struct scsi_cmnd *)) |
2644 | { | 2644 | { |
2645 | struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata; | 2645 | struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata; |
@@ -2656,6 +2656,8 @@ static int i91u_queuecommand(struct scsi_cmnd *cmd, | |||
2656 | return 0; | 2656 | return 0; |
2657 | } | 2657 | } |
2658 | 2658 | ||
2659 | static DEF_SCSI_QCMD(i91u_queuecommand) | ||
2660 | |||
2659 | /** | 2661 | /** |
2660 | * i91u_bus_reset - reset the SCSI bus | 2662 | * i91u_bus_reset - reset the SCSI bus |
2661 | * @cmnd: Command block we want to trigger the reset for | 2663 | * @cmnd: Command block we want to trigger the reset for |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index fa60d7df44be..5bbaee597e88 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -5709,7 +5709,7 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) | |||
5709 | * SCSI_MLQUEUE_DEVICE_BUSY if device is busy | 5709 | * SCSI_MLQUEUE_DEVICE_BUSY if device is busy |
5710 | * SCSI_MLQUEUE_HOST_BUSY if host is busy | 5710 | * SCSI_MLQUEUE_HOST_BUSY if host is busy |
5711 | **/ | 5711 | **/ |
5712 | static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd, | 5712 | static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd, |
5713 | void (*done) (struct scsi_cmnd *)) | 5713 | void (*done) (struct scsi_cmnd *)) |
5714 | { | 5714 | { |
5715 | struct ipr_ioa_cfg *ioa_cfg; | 5715 | struct ipr_ioa_cfg *ioa_cfg; |
@@ -5792,6 +5792,8 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd, | |||
5792 | return 0; | 5792 | return 0; |
5793 | } | 5793 | } |
5794 | 5794 | ||
5795 | static DEF_SCSI_QCMD(ipr_queuecommand) | ||
5796 | |||
5795 | /** | 5797 | /** |
5796 | * ipr_ioctl - IOCTL handler | 5798 | * ipr_ioctl - IOCTL handler |
5797 | * @sdev: scsi device struct | 5799 | * @sdev: scsi device struct |
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index f83a116955f2..b2511acd39bd 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c | |||
@@ -232,7 +232,7 @@ static int ips_detect(struct scsi_host_template *); | |||
232 | static int ips_release(struct Scsi_Host *); | 232 | static int ips_release(struct Scsi_Host *); |
233 | static int ips_eh_abort(struct scsi_cmnd *); | 233 | static int ips_eh_abort(struct scsi_cmnd *); |
234 | static int ips_eh_reset(struct scsi_cmnd *); | 234 | static int ips_eh_reset(struct scsi_cmnd *); |
235 | static int ips_queue(struct scsi_cmnd *, void (*)(struct scsi_cmnd *)); | 235 | static int ips_queue(struct Scsi_Host *, struct scsi_cmnd *); |
236 | static const char *ips_info(struct Scsi_Host *); | 236 | static const char *ips_info(struct Scsi_Host *); |
237 | static irqreturn_t do_ipsintr(int, void *); | 237 | static irqreturn_t do_ipsintr(int, void *); |
238 | static int ips_hainit(ips_ha_t *); | 238 | static int ips_hainit(ips_ha_t *); |
@@ -1046,7 +1046,7 @@ static int ips_eh_reset(struct scsi_cmnd *SC) | |||
1046 | /* Linux obtains io_request_lock before calling this function */ | 1046 | /* Linux obtains io_request_lock before calling this function */ |
1047 | /* */ | 1047 | /* */ |
1048 | /****************************************************************************/ | 1048 | /****************************************************************************/ |
1049 | static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)) | 1049 | static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)) |
1050 | { | 1050 | { |
1051 | ips_ha_t *ha; | 1051 | ips_ha_t *ha; |
1052 | ips_passthru_t *pt; | 1052 | ips_passthru_t *pt; |
@@ -1137,6 +1137,8 @@ static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)) | |||
1137 | return (0); | 1137 | return (0); |
1138 | } | 1138 | } |
1139 | 1139 | ||
1140 | static DEF_SCSI_QCMD(ips_queue) | ||
1141 | |||
1140 | /****************************************************************************/ | 1142 | /****************************************************************************/ |
1141 | /* */ | 1143 | /* */ |
1142 | /* Routine Name: ips_biosparam */ | 1144 | /* Routine Name: ips_biosparam */ |
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index e340373b509b..2924363d142b 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c | |||
@@ -1753,7 +1753,7 @@ static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport) | |||
1753 | * This is the i/o strategy routine, called by the SCSI layer. This routine | 1753 | * This is the i/o strategy routine, called by the SCSI layer. This routine |
1754 | * is called with the host_lock held. | 1754 | * is called with the host_lock held. |
1755 | */ | 1755 | */ |
1756 | int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) | 1756 | static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) |
1757 | { | 1757 | { |
1758 | struct fc_lport *lport; | 1758 | struct fc_lport *lport; |
1759 | struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); | 1759 | struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); |
@@ -1851,6 +1851,8 @@ out: | |||
1851 | spin_lock_irq(lport->host->host_lock); | 1851 | spin_lock_irq(lport->host->host_lock); |
1852 | return rc; | 1852 | return rc; |
1853 | } | 1853 | } |
1854 | |||
1855 | DEF_SCSI_QCMD(fc_queuecommand) | ||
1854 | EXPORT_SYMBOL(fc_queuecommand); | 1856 | EXPORT_SYMBOL(fc_queuecommand); |
1855 | 1857 | ||
1856 | /** | 1858 | /** |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 633e09036357..c15fde808c33 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -1599,7 +1599,7 @@ enum { | |||
1599 | FAILURE_SESSION_NOT_READY, | 1599 | FAILURE_SESSION_NOT_READY, |
1600 | }; | 1600 | }; |
1601 | 1601 | ||
1602 | int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | 1602 | static int iscsi_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) |
1603 | { | 1603 | { |
1604 | struct iscsi_cls_session *cls_session; | 1604 | struct iscsi_cls_session *cls_session; |
1605 | struct Scsi_Host *host; | 1605 | struct Scsi_Host *host; |
@@ -1736,6 +1736,8 @@ fault: | |||
1736 | spin_lock(host->host_lock); | 1736 | spin_lock(host->host_lock); |
1737 | return 0; | 1737 | return 0; |
1738 | } | 1738 | } |
1739 | |||
1740 | DEF_SCSI_QCMD(iscsi_queuecommand) | ||
1739 | EXPORT_SYMBOL_GPL(iscsi_queuecommand); | 1741 | EXPORT_SYMBOL_GPL(iscsi_queuecommand); |
1740 | 1742 | ||
1741 | int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason) | 1743 | int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason) |
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 55f09e92ab59..29251fabecc6 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c | |||
@@ -189,7 +189,7 @@ int sas_queue_up(struct sas_task *task) | |||
189 | * Note: XXX: Remove the host unlock/lock pair when SCSI Core can | 189 | * Note: XXX: Remove the host unlock/lock pair when SCSI Core can |
190 | * call us without holding an IRQ spinlock... | 190 | * call us without holding an IRQ spinlock... |
191 | */ | 191 | */ |
192 | int sas_queuecommand(struct scsi_cmnd *cmd, | 192 | static int sas_queuecommand_lck(struct scsi_cmnd *cmd, |
193 | void (*scsi_done)(struct scsi_cmnd *)) | 193 | void (*scsi_done)(struct scsi_cmnd *)) |
194 | __releases(host->host_lock) | 194 | __releases(host->host_lock) |
195 | __acquires(dev->sata_dev.ap->lock) | 195 | __acquires(dev->sata_dev.ap->lock) |
@@ -254,6 +254,8 @@ out: | |||
254 | return res; | 254 | return res; |
255 | } | 255 | } |
256 | 256 | ||
257 | DEF_SCSI_QCMD(sas_queuecommand) | ||
258 | |||
257 | static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) | 259 | static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) |
258 | { | 260 | { |
259 | struct sas_task *task = TO_SAS_TASK(cmd); | 261 | struct sas_task *task = TO_SAS_TASK(cmd); |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index f64b65a770b8..581837b3c71a 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -2899,7 +2899,7 @@ void lpfc_poll_timeout(unsigned long ptr) | |||
2899 | * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. | 2899 | * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. |
2900 | **/ | 2900 | **/ |
2901 | static int | 2901 | static int |
2902 | lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | 2902 | lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) |
2903 | { | 2903 | { |
2904 | struct Scsi_Host *shost = cmnd->device->host; | 2904 | struct Scsi_Host *shost = cmnd->device->host; |
2905 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 2905 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
@@ -3060,6 +3060,8 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
3060 | return 0; | 3060 | return 0; |
3061 | } | 3061 | } |
3062 | 3062 | ||
3063 | static DEF_SCSI_QCMD(lpfc_queuecommand) | ||
3064 | |||
3063 | /** | 3065 | /** |
3064 | * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point | 3066 | * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point |
3065 | * @cmnd: Pointer to scsi_cmnd data structure. | 3067 | * @cmnd: Pointer to scsi_cmnd data structure. |
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c index 3ddb4dc62d5d..6c42dff0f4d3 100644 --- a/drivers/scsi/mac53c94.c +++ b/drivers/scsi/mac53c94.c | |||
@@ -66,7 +66,7 @@ static void cmd_done(struct fsc_state *, int result); | |||
66 | static void set_dma_cmds(struct fsc_state *, struct scsi_cmnd *); | 66 | static void set_dma_cmds(struct fsc_state *, struct scsi_cmnd *); |
67 | 67 | ||
68 | 68 | ||
69 | static int mac53c94_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | 69 | static int mac53c94_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
70 | { | 70 | { |
71 | struct fsc_state *state; | 71 | struct fsc_state *state; |
72 | 72 | ||
@@ -99,6 +99,8 @@ static int mac53c94_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd * | |||
99 | return 0; | 99 | return 0; |
100 | } | 100 | } |
101 | 101 | ||
102 | static DEF_SCSI_QCMD(mac53c94_queue) | ||
103 | |||
102 | static int mac53c94_host_reset(struct scsi_cmnd *cmd) | 104 | static int mac53c94_host_reset(struct scsi_cmnd *cmd) |
103 | { | 105 | { |
104 | struct fsc_state *state = (struct fsc_state *) cmd->device->host->hostdata; | 106 | struct fsc_state *state = (struct fsc_state *) cmd->device->host->hostdata; |
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 7ceb5cf12c6b..9aa048525eb2 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c | |||
@@ -366,7 +366,7 @@ mega_runpendq(adapter_t *adapter) | |||
366 | * The command queuing entry point for the mid-layer. | 366 | * The command queuing entry point for the mid-layer. |
367 | */ | 367 | */ |
368 | static int | 368 | static int |
369 | megaraid_queue(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *)) | 369 | megaraid_queue_lck(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *)) |
370 | { | 370 | { |
371 | adapter_t *adapter; | 371 | adapter_t *adapter; |
372 | scb_t *scb; | 372 | scb_t *scb; |
@@ -409,6 +409,8 @@ megaraid_queue(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *)) | |||
409 | return busy; | 409 | return busy; |
410 | } | 410 | } |
411 | 411 | ||
412 | static DEF_SCSI_QCMD(megaraid_queue) | ||
413 | |||
412 | /** | 414 | /** |
413 | * mega_allocate_scb() | 415 | * mega_allocate_scb() |
414 | * @adapter - pointer to our soft state | 416 | * @adapter - pointer to our soft state |
@@ -4456,7 +4458,7 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru) | |||
4456 | 4458 | ||
4457 | scb->idx = CMDID_INT_CMDS; | 4459 | scb->idx = CMDID_INT_CMDS; |
4458 | 4460 | ||
4459 | megaraid_queue(scmd, mega_internal_done); | 4461 | megaraid_queue_lck(scmd, mega_internal_done); |
4460 | 4462 | ||
4461 | wait_for_completion(&adapter->int_waitq); | 4463 | wait_for_completion(&adapter->int_waitq); |
4462 | 4464 | ||
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h index 2b4a048cadf1..f5644745e24e 100644 --- a/drivers/scsi/megaraid.h +++ b/drivers/scsi/megaraid.h | |||
@@ -987,7 +987,7 @@ static int mega_query_adapter(adapter_t *); | |||
987 | static int issue_scb(adapter_t *, scb_t *); | 987 | static int issue_scb(adapter_t *, scb_t *); |
988 | static int mega_setup_mailbox(adapter_t *); | 988 | static int mega_setup_mailbox(adapter_t *); |
989 | 989 | ||
990 | static int megaraid_queue (Scsi_Cmnd *, void (*)(Scsi_Cmnd *)); | 990 | static int megaraid_queue (struct Scsi_Host *, struct scsi_cmnd *); |
991 | static scb_t * mega_build_cmd(adapter_t *, Scsi_Cmnd *, int *); | 991 | static scb_t * mega_build_cmd(adapter_t *, Scsi_Cmnd *, int *); |
992 | static void __mega_runpendq(adapter_t *); | 992 | static void __mega_runpendq(adapter_t *); |
993 | static int issue_scb_block(adapter_t *, u_char *); | 993 | static int issue_scb_block(adapter_t *, u_char *); |
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index a7810a106b37..5708cb27d078 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c | |||
@@ -113,8 +113,7 @@ static int megaraid_mbox_fire_sync_cmd(adapter_t *); | |||
113 | static void megaraid_mbox_display_scb(adapter_t *, scb_t *); | 113 | static void megaraid_mbox_display_scb(adapter_t *, scb_t *); |
114 | static void megaraid_mbox_setup_device_map(adapter_t *); | 114 | static void megaraid_mbox_setup_device_map(adapter_t *); |
115 | 115 | ||
116 | static int megaraid_queue_command(struct scsi_cmnd *, | 116 | static int megaraid_queue_command(struct Scsi_Host *, struct scsi_cmnd *); |
117 | void (*)(struct scsi_cmnd *)); | ||
118 | static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *); | 117 | static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *); |
119 | static void megaraid_mbox_runpendq(adapter_t *, scb_t *); | 118 | static void megaraid_mbox_runpendq(adapter_t *, scb_t *); |
120 | static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *, | 119 | static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *, |
@@ -1484,7 +1483,7 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb) | |||
1484 | * Queue entry point for mailbox based controllers. | 1483 | * Queue entry point for mailbox based controllers. |
1485 | */ | 1484 | */ |
1486 | static int | 1485 | static int |
1487 | megaraid_queue_command(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) | 1486 | megaraid_queue_command_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) |
1488 | { | 1487 | { |
1489 | adapter_t *adapter; | 1488 | adapter_t *adapter; |
1490 | scb_t *scb; | 1489 | scb_t *scb; |
@@ -1513,6 +1512,8 @@ megaraid_queue_command(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) | |||
1513 | return if_busy; | 1512 | return if_busy; |
1514 | } | 1513 | } |
1515 | 1514 | ||
1515 | static DEF_SCSI_QCMD(megaraid_queue_command) | ||
1516 | |||
1516 | /** | 1517 | /** |
1517 | * megaraid_mbox_build_cmd - transform the mid-layer scsi commands | 1518 | * megaraid_mbox_build_cmd - transform the mid-layer scsi commands |
1518 | * @adapter : controller's soft state | 1519 | * @adapter : controller's soft state |
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index eb29d5085131..7451bc096a01 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c | |||
@@ -1334,7 +1334,7 @@ megasas_dump_pending_frames(struct megasas_instance *instance) | |||
1334 | * @done: Callback entry point | 1334 | * @done: Callback entry point |
1335 | */ | 1335 | */ |
1336 | static int | 1336 | static int |
1337 | megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) | 1337 | megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) |
1338 | { | 1338 | { |
1339 | u32 frame_count; | 1339 | u32 frame_count; |
1340 | struct megasas_cmd *cmd; | 1340 | struct megasas_cmd *cmd; |
@@ -1417,6 +1417,8 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) | |||
1417 | return 0; | 1417 | return 0; |
1418 | } | 1418 | } |
1419 | 1419 | ||
1420 | static DEF_SCSI_QCMD(megasas_queue_command) | ||
1421 | |||
1420 | static struct megasas_instance *megasas_lookup_instance(u16 host_no) | 1422 | static struct megasas_instance *megasas_lookup_instance(u16 host_no) |
1421 | { | 1423 | { |
1422 | int i; | 1424 | int i; |
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index 1f784fde2510..197aa1b3f0f3 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c | |||
@@ -1627,7 +1627,7 @@ static void cmd_complete(struct mesh_state *ms) | |||
1627 | * Called by midlayer with host locked to queue a new | 1627 | * Called by midlayer with host locked to queue a new |
1628 | * request | 1628 | * request |
1629 | */ | 1629 | */ |
1630 | static int mesh_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | 1630 | static int mesh_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
1631 | { | 1631 | { |
1632 | struct mesh_state *ms; | 1632 | struct mesh_state *ms; |
1633 | 1633 | ||
@@ -1648,6 +1648,8 @@ static int mesh_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | |||
1648 | return 0; | 1648 | return 0; |
1649 | } | 1649 | } |
1650 | 1650 | ||
1651 | static DEF_SCSI_QCMD(mesh_queue) | ||
1652 | |||
1651 | /* | 1653 | /* |
1652 | * Called to handle interrupts, either call by the interrupt | 1654 | * Called to handle interrupts, either call by the interrupt |
1653 | * handler (do_mesh_interrupt) or by other functions in | 1655 | * handler (do_mesh_interrupt) or by other functions in |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 16e99b686354..1a96a00418a4 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c | |||
@@ -3315,7 +3315,7 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) | |||
3315 | * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full | 3315 | * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full |
3316 | */ | 3316 | */ |
3317 | static int | 3317 | static int |
3318 | _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) | 3318 | _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) |
3319 | { | 3319 | { |
3320 | struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); | 3320 | struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); |
3321 | struct MPT2SAS_DEVICE *sas_device_priv_data; | 3321 | struct MPT2SAS_DEVICE *sas_device_priv_data; |
@@ -3441,6 +3441,8 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) | |||
3441 | return SCSI_MLQUEUE_HOST_BUSY; | 3441 | return SCSI_MLQUEUE_HOST_BUSY; |
3442 | } | 3442 | } |
3443 | 3443 | ||
3444 | static DEF_SCSI_QCMD(_scsih_qcmd) | ||
3445 | |||
3444 | /** | 3446 | /** |
3445 | * _scsih_normalize_sense - normalize descriptor and fixed format sense data | 3447 | * _scsih_normalize_sense - normalize descriptor and fixed format sense data |
3446 | * @sense_buffer: sense data returned by target | 3448 | * @sense_buffer: sense data returned by target |
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index d013a2aa2fd5..46cc3825638d 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c | |||
@@ -8029,7 +8029,7 @@ static int ncr53c8xx_slave_configure(struct scsi_device *device) | |||
8029 | return 0; | 8029 | return 0; |
8030 | } | 8030 | } |
8031 | 8031 | ||
8032 | static int ncr53c8xx_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | 8032 | static int ncr53c8xx_queue_command_lck (struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
8033 | { | 8033 | { |
8034 | struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb; | 8034 | struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb; |
8035 | unsigned long flags; | 8035 | unsigned long flags; |
@@ -8068,6 +8068,8 @@ printk("ncr53c8xx : command successfully queued\n"); | |||
8068 | return sts; | 8068 | return sts; |
8069 | } | 8069 | } |
8070 | 8070 | ||
8071 | static DEF_SCSI_QCMD(ncr53c8xx_queue_command) | ||
8072 | |||
8071 | irqreturn_t ncr53c8xx_intr(int irq, void *dev_id) | 8073 | irqreturn_t ncr53c8xx_intr(int irq, void *dev_id) |
8072 | { | 8074 | { |
8073 | unsigned long flags; | 8075 | unsigned long flags; |
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c index 4c1e54545200..6b8b021400f8 100644 --- a/drivers/scsi/nsp32.c +++ b/drivers/scsi/nsp32.c | |||
@@ -196,8 +196,7 @@ static void __exit exit_nsp32 (void); | |||
196 | static int nsp32_proc_info (struct Scsi_Host *, char *, char **, off_t, int, int); | 196 | static int nsp32_proc_info (struct Scsi_Host *, char *, char **, off_t, int, int); |
197 | 197 | ||
198 | static int nsp32_detect (struct pci_dev *pdev); | 198 | static int nsp32_detect (struct pci_dev *pdev); |
199 | static int nsp32_queuecommand(struct scsi_cmnd *, | 199 | static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); |
200 | void (*done)(struct scsi_cmnd *)); | ||
201 | static const char *nsp32_info (struct Scsi_Host *); | 200 | static const char *nsp32_info (struct Scsi_Host *); |
202 | static int nsp32_release (struct Scsi_Host *); | 201 | static int nsp32_release (struct Scsi_Host *); |
203 | 202 | ||
@@ -909,7 +908,7 @@ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt) | |||
909 | return TRUE; | 908 | return TRUE; |
910 | } | 909 | } |
911 | 910 | ||
912 | static int nsp32_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 911 | static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) |
913 | { | 912 | { |
914 | nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; | 913 | nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; |
915 | nsp32_target *target; | 914 | nsp32_target *target; |
@@ -1050,6 +1049,8 @@ static int nsp32_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ | |||
1050 | return 0; | 1049 | return 0; |
1051 | } | 1050 | } |
1052 | 1051 | ||
1052 | static DEF_SCSI_QCMD(nsp32_queuecommand) | ||
1053 | |||
1053 | /* initialize asic */ | 1054 | /* initialize asic */ |
1054 | static int nsp32hw_init(nsp32_hw_data *data) | 1055 | static int nsp32hw_init(nsp32_hw_data *data) |
1055 | { | 1056 | { |
diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h index 8dc5b1a5f5da..a04281cace2e 100644 --- a/drivers/scsi/pas16.h +++ b/drivers/scsi/pas16.h | |||
@@ -118,7 +118,7 @@ static int pas16_abort(Scsi_Cmnd *); | |||
118 | static int pas16_biosparam(struct scsi_device *, struct block_device *, | 118 | static int pas16_biosparam(struct scsi_device *, struct block_device *, |
119 | sector_t, int*); | 119 | sector_t, int*); |
120 | static int pas16_detect(struct scsi_host_template *); | 120 | static int pas16_detect(struct scsi_host_template *); |
121 | static int pas16_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); | 121 | static int pas16_queue_command(struct Scsi_Host *, struct scsi_cmnd *); |
122 | static int pas16_bus_reset(Scsi_Cmnd *); | 122 | static int pas16_bus_reset(Scsi_Cmnd *); |
123 | 123 | ||
124 | #ifndef CMD_PER_LUN | 124 | #ifndef CMD_PER_LUN |
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 9326c2c14880..be3f33d31a99 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c | |||
@@ -184,7 +184,7 @@ static void nsp_scsi_done(struct scsi_cmnd *SCpnt) | |||
184 | SCpnt->scsi_done(SCpnt); | 184 | SCpnt->scsi_done(SCpnt); |
185 | } | 185 | } |
186 | 186 | ||
187 | static int nsp_queuecommand(struct scsi_cmnd *SCpnt, | 187 | static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt, |
188 | void (*done)(struct scsi_cmnd *)) | 188 | void (*done)(struct scsi_cmnd *)) |
189 | { | 189 | { |
190 | #ifdef NSP_DEBUG | 190 | #ifdef NSP_DEBUG |
@@ -264,6 +264,8 @@ static int nsp_queuecommand(struct scsi_cmnd *SCpnt, | |||
264 | return 0; | 264 | return 0; |
265 | } | 265 | } |
266 | 266 | ||
267 | static DEF_SCSI_QCMD(nsp_queuecommand) | ||
268 | |||
267 | /* | 269 | /* |
268 | * setup PIO FIFO transfer mode and enable/disable to data out | 270 | * setup PIO FIFO transfer mode and enable/disable to data out |
269 | */ | 271 | */ |
diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h index d68c9f267c5e..7fc9a9d0a448 100644 --- a/drivers/scsi/pcmcia/nsp_cs.h +++ b/drivers/scsi/pcmcia/nsp_cs.h | |||
@@ -299,8 +299,7 @@ static int nsp_proc_info ( | |||
299 | off_t offset, | 299 | off_t offset, |
300 | int length, | 300 | int length, |
301 | int inout); | 301 | int inout); |
302 | static int nsp_queuecommand(struct scsi_cmnd *SCpnt, | 302 | static int nsp_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *SCpnt); |
303 | void (* done)(struct scsi_cmnd *SCpnt)); | ||
304 | 303 | ||
305 | /* Error handler */ | 304 | /* Error handler */ |
306 | /*static int nsp_eh_abort (struct scsi_cmnd *SCpnt);*/ | 305 | /*static int nsp_eh_abort (struct scsi_cmnd *SCpnt);*/ |
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c index 0ae27cb5cd6f..8552296edaa1 100644 --- a/drivers/scsi/pcmcia/sym53c500_cs.c +++ b/drivers/scsi/pcmcia/sym53c500_cs.c | |||
@@ -547,7 +547,7 @@ SYM53C500_info(struct Scsi_Host *SChost) | |||
547 | } | 547 | } |
548 | 548 | ||
549 | static int | 549 | static int |
550 | SYM53C500_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 550 | SYM53C500_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) |
551 | { | 551 | { |
552 | int i; | 552 | int i; |
553 | int port_base = SCpnt->device->host->io_port; | 553 | int port_base = SCpnt->device->host->io_port; |
@@ -583,6 +583,8 @@ SYM53C500_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | |||
583 | return 0; | 583 | return 0; |
584 | } | 584 | } |
585 | 585 | ||
586 | static DEF_SCSI_QCMD(SYM53C500_queue) | ||
587 | |||
586 | static int | 588 | static int |
587 | SYM53C500_host_reset(struct scsi_cmnd *SCpnt) | 589 | SYM53C500_host_reset(struct scsi_cmnd *SCpnt) |
588 | { | 590 | { |
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index cf89091e4c3d..5e76a624cb08 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c | |||
@@ -3478,7 +3478,7 @@ static int pmcraid_copy_sglist( | |||
3478 | * SCSI_MLQUEUE_DEVICE_BUSY if device is busy | 3478 | * SCSI_MLQUEUE_DEVICE_BUSY if device is busy |
3479 | * SCSI_MLQUEUE_HOST_BUSY if host is busy | 3479 | * SCSI_MLQUEUE_HOST_BUSY if host is busy |
3480 | */ | 3480 | */ |
3481 | static int pmcraid_queuecommand( | 3481 | static int pmcraid_queuecommand_lck( |
3482 | struct scsi_cmnd *scsi_cmd, | 3482 | struct scsi_cmnd *scsi_cmd, |
3483 | void (*done) (struct scsi_cmnd *) | 3483 | void (*done) (struct scsi_cmnd *) |
3484 | ) | 3484 | ) |
@@ -3584,6 +3584,8 @@ static int pmcraid_queuecommand( | |||
3584 | return rc; | 3584 | return rc; |
3585 | } | 3585 | } |
3586 | 3586 | ||
3587 | static DEF_SCSI_QCMD(pmcraid_queuecommand) | ||
3588 | |||
3587 | /** | 3589 | /** |
3588 | * pmcraid_open -char node "open" entry, allowed only users with admin access | 3590 | * pmcraid_open -char node "open" entry, allowed only users with admin access |
3589 | */ | 3591 | */ |
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c index 7bc2d796e403..d164c9639361 100644 --- a/drivers/scsi/ppa.c +++ b/drivers/scsi/ppa.c | |||
@@ -798,7 +798,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd) | |||
798 | return 0; | 798 | return 0; |
799 | } | 799 | } |
800 | 800 | ||
801 | static int ppa_queuecommand(struct scsi_cmnd *cmd, | 801 | static int ppa_queuecommand_lck(struct scsi_cmnd *cmd, |
802 | void (*done) (struct scsi_cmnd *)) | 802 | void (*done) (struct scsi_cmnd *)) |
803 | { | 803 | { |
804 | ppa_struct *dev = ppa_dev(cmd->device->host); | 804 | ppa_struct *dev = ppa_dev(cmd->device->host); |
@@ -821,6 +821,8 @@ static int ppa_queuecommand(struct scsi_cmnd *cmd, | |||
821 | return 0; | 821 | return 0; |
822 | } | 822 | } |
823 | 823 | ||
824 | static DEF_SCSI_QCMD(ppa_queuecommand) | ||
825 | |||
824 | /* | 826 | /* |
825 | * Apparently the disk->capacity attribute is off by 1 sector | 827 | * Apparently the disk->capacity attribute is off by 1 sector |
826 | * for all disk drives. We add the one here, but it should really | 828 | * for all disk drives. We add the one here, but it should really |
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c index 92ffbb510498..cd178b9e40cd 100644 --- a/drivers/scsi/ps3rom.c +++ b/drivers/scsi/ps3rom.c | |||
@@ -211,7 +211,7 @@ static int ps3rom_write_request(struct ps3_storage_device *dev, | |||
211 | return 0; | 211 | return 0; |
212 | } | 212 | } |
213 | 213 | ||
214 | static int ps3rom_queuecommand(struct scsi_cmnd *cmd, | 214 | static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd, |
215 | void (*done)(struct scsi_cmnd *)) | 215 | void (*done)(struct scsi_cmnd *)) |
216 | { | 216 | { |
217 | struct ps3rom_private *priv = shost_priv(cmd->device->host); | 217 | struct ps3rom_private *priv = shost_priv(cmd->device->host); |
@@ -260,6 +260,8 @@ static int ps3rom_queuecommand(struct scsi_cmnd *cmd, | |||
260 | return 0; | 260 | return 0; |
261 | } | 261 | } |
262 | 262 | ||
263 | static DEF_SCSI_QCMD(ps3rom_queuecommand) | ||
264 | |||
263 | static int decode_lv1_status(u64 status, unsigned char *sense_key, | 265 | static int decode_lv1_status(u64 status, unsigned char *sense_key, |
264 | unsigned char *asc, unsigned char *ascq) | 266 | unsigned char *asc, unsigned char *ascq) |
265 | { | 267 | { |
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index b8166ecfd0e3..5dec684bf010 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c | |||
@@ -727,7 +727,7 @@ qla1280_info(struct Scsi_Host *host) | |||
727 | * context which is a big NO! NO!. | 727 | * context which is a big NO! NO!. |
728 | **************************************************************************/ | 728 | **************************************************************************/ |
729 | static int | 729 | static int |
730 | qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) | 730 | qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) |
731 | { | 731 | { |
732 | struct Scsi_Host *host = cmd->device->host; | 732 | struct Scsi_Host *host = cmd->device->host; |
733 | struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; | 733 | struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; |
@@ -756,6 +756,8 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) | |||
756 | return status; | 756 | return status; |
757 | } | 757 | } |
758 | 758 | ||
759 | static DEF_SCSI_QCMD(qla1280_queuecommand) | ||
760 | |||
759 | enum action { | 761 | enum action { |
760 | ABORT_COMMAND, | 762 | ABORT_COMMAND, |
761 | DEVICE_RESET, | 763 | DEVICE_RESET, |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 1830e6e97315..1644eabaafeb 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -179,8 +179,7 @@ static int qla2xxx_slave_alloc(struct scsi_device *); | |||
179 | static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time); | 179 | static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time); |
180 | static void qla2xxx_scan_start(struct Scsi_Host *); | 180 | static void qla2xxx_scan_start(struct Scsi_Host *); |
181 | static void qla2xxx_slave_destroy(struct scsi_device *); | 181 | static void qla2xxx_slave_destroy(struct scsi_device *); |
182 | static int qla2xxx_queuecommand(struct scsi_cmnd *cmd, | 182 | static int qla2xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
183 | void (*fn)(struct scsi_cmnd *)); | ||
184 | static int qla2xxx_eh_abort(struct scsi_cmnd *); | 183 | static int qla2xxx_eh_abort(struct scsi_cmnd *); |
185 | static int qla2xxx_eh_device_reset(struct scsi_cmnd *); | 184 | static int qla2xxx_eh_device_reset(struct scsi_cmnd *); |
186 | static int qla2xxx_eh_target_reset(struct scsi_cmnd *); | 185 | static int qla2xxx_eh_target_reset(struct scsi_cmnd *); |
@@ -535,7 +534,7 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport, | |||
535 | } | 534 | } |
536 | 535 | ||
537 | static int | 536 | static int |
538 | qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | 537 | qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
539 | { | 538 | { |
540 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); | 539 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); |
541 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; | 540 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; |
@@ -609,6 +608,8 @@ qc24_fail_command: | |||
609 | return 0; | 608 | return 0; |
610 | } | 609 | } |
611 | 610 | ||
611 | static DEF_SCSI_QCMD(qla2xxx_queuecommand) | ||
612 | |||
612 | 613 | ||
613 | /* | 614 | /* |
614 | * qla2x00_eh_wait_on_command | 615 | * qla2x00_eh_wait_on_command |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index f4cd846abf6d..0d48fb4d1044 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -79,8 +79,7 @@ static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); | |||
79 | /* | 79 | /* |
80 | * SCSI host template entry points | 80 | * SCSI host template entry points |
81 | */ | 81 | */ |
82 | static int qla4xxx_queuecommand(struct scsi_cmnd *cmd, | 82 | static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
83 | void (*done) (struct scsi_cmnd *)); | ||
84 | static int qla4xxx_eh_abort(struct scsi_cmnd *cmd); | 83 | static int qla4xxx_eh_abort(struct scsi_cmnd *cmd); |
85 | static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); | 84 | static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); |
86 | static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); | 85 | static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); |
@@ -464,7 +463,7 @@ void qla4xxx_srb_compl(struct kref *ref) | |||
464 | * completion handling). Unfortunely, it sometimes calls the scheduler | 463 | * completion handling). Unfortunely, it sometimes calls the scheduler |
465 | * in interrupt context which is a big NO! NO!. | 464 | * in interrupt context which is a big NO! NO!. |
466 | **/ | 465 | **/ |
467 | static int qla4xxx_queuecommand(struct scsi_cmnd *cmd, | 466 | static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd, |
468 | void (*done)(struct scsi_cmnd *)) | 467 | void (*done)(struct scsi_cmnd *)) |
469 | { | 468 | { |
470 | struct scsi_qla_host *ha = to_qla_host(cmd->device->host); | 469 | struct scsi_qla_host *ha = to_qla_host(cmd->device->host); |
@@ -538,6 +537,8 @@ qc_fail_command: | |||
538 | return 0; | 537 | return 0; |
539 | } | 538 | } |
540 | 539 | ||
540 | static DEF_SCSI_QCMD(qla4xxx_queuecommand) | ||
541 | |||
541 | /** | 542 | /** |
542 | * qla4xxx_mem_free - frees memory allocated to adapter | 543 | * qla4xxx_mem_free - frees memory allocated to adapter |
543 | * @ha: Pointer to host adapter structure. | 544 | * @ha: Pointer to host adapter structure. |
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c index 1ad51552d6b1..c3a9151ca823 100644 --- a/drivers/scsi/qlogicfas408.c +++ b/drivers/scsi/qlogicfas408.c | |||
@@ -439,7 +439,7 @@ irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id) | |||
439 | * Queued command | 439 | * Queued command |
440 | */ | 440 | */ |
441 | 441 | ||
442 | int qlogicfas408_queuecommand(struct scsi_cmnd *cmd, | 442 | static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd, |
443 | void (*done) (struct scsi_cmnd *)) | 443 | void (*done) (struct scsi_cmnd *)) |
444 | { | 444 | { |
445 | struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); | 445 | struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); |
@@ -459,6 +459,8 @@ int qlogicfas408_queuecommand(struct scsi_cmnd *cmd, | |||
459 | return 0; | 459 | return 0; |
460 | } | 460 | } |
461 | 461 | ||
462 | DEF_SCSI_QCMD(qlogicfas408_queuecommand) | ||
463 | |||
462 | /* | 464 | /* |
463 | * Return bios parameters | 465 | * Return bios parameters |
464 | */ | 466 | */ |
diff --git a/drivers/scsi/qlogicfas408.h b/drivers/scsi/qlogicfas408.h index 260626427a32..2f6c0a166200 100644 --- a/drivers/scsi/qlogicfas408.h +++ b/drivers/scsi/qlogicfas408.h | |||
@@ -103,8 +103,7 @@ struct qlogicfas408_priv { | |||
103 | #define get_priv_by_host(x) (struct qlogicfas408_priv *)&((x)->hostdata[0]) | 103 | #define get_priv_by_host(x) (struct qlogicfas408_priv *)&((x)->hostdata[0]) |
104 | 104 | ||
105 | irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id); | 105 | irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id); |
106 | int qlogicfas408_queuecommand(struct scsi_cmnd * cmd, | 106 | int qlogicfas408_queuecommand(struct Scsi_Host *h, struct scsi_cmnd * cmd); |
107 | void (*done) (struct scsi_cmnd *)); | ||
108 | int qlogicfas408_biosparam(struct scsi_device * disk, | 107 | int qlogicfas408_biosparam(struct scsi_device * disk, |
109 | struct block_device *dev, | 108 | struct block_device *dev, |
110 | sector_t capacity, int ip[]); | 109 | sector_t capacity, int ip[]); |
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index f8c561cf751e..664c9572d0c9 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c | |||
@@ -1003,7 +1003,7 @@ static int qlogicpti_slave_configure(struct scsi_device *sdev) | |||
1003 | * | 1003 | * |
1004 | * "This code must fly." -davem | 1004 | * "This code must fly." -davem |
1005 | */ | 1005 | */ |
1006 | static int qlogicpti_queuecommand(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *)) | 1006 | static int qlogicpti_queuecommand_lck(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *)) |
1007 | { | 1007 | { |
1008 | struct Scsi_Host *host = Cmnd->device->host; | 1008 | struct Scsi_Host *host = Cmnd->device->host; |
1009 | struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; | 1009 | struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; |
@@ -1052,6 +1052,8 @@ toss_command: | |||
1052 | return 1; | 1052 | return 1; |
1053 | } | 1053 | } |
1054 | 1054 | ||
1055 | static DEF_SCSI_QCMD(qlogicpti_queuecommand) | ||
1056 | |||
1055 | static int qlogicpti_return_status(struct Status_Entry *sts, int id) | 1057 | static int qlogicpti_return_status(struct Status_Entry *sts, int id) |
1056 | { | 1058 | { |
1057 | int host_status = DID_ERROR; | 1059 | int host_status = DID_ERROR; |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 348fba0a8976..2aeb2e9c4d3b 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -634,12 +634,13 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) | |||
634 | * Description: a serial number identifies a request for error recovery | 634 | * Description: a serial number identifies a request for error recovery |
635 | * and debugging purposes. Protected by the Host_Lock of host. | 635 | * and debugging purposes. Protected by the Host_Lock of host. |
636 | */ | 636 | */ |
637 | static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd) | 637 | void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd) |
638 | { | 638 | { |
639 | cmd->serial_number = host->cmd_serial_number++; | 639 | cmd->serial_number = host->cmd_serial_number++; |
640 | if (cmd->serial_number == 0) | 640 | if (cmd->serial_number == 0) |
641 | cmd->serial_number = host->cmd_serial_number++; | 641 | cmd->serial_number = host->cmd_serial_number++; |
642 | } | 642 | } |
643 | EXPORT_SYMBOL(scsi_cmd_get_serial); | ||
643 | 644 | ||
644 | /** | 645 | /** |
645 | * scsi_dispatch_command - Dispatch a command to the low-level driver. | 646 | * scsi_dispatch_command - Dispatch a command to the low-level driver. |
@@ -651,7 +652,6 @@ static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd | |||
651 | int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | 652 | int scsi_dispatch_cmd(struct scsi_cmnd *cmd) |
652 | { | 653 | { |
653 | struct Scsi_Host *host = cmd->device->host; | 654 | struct Scsi_Host *host = cmd->device->host; |
654 | unsigned long flags = 0; | ||
655 | unsigned long timeout; | 655 | unsigned long timeout; |
656 | int rtn = 0; | 656 | int rtn = 0; |
657 | 657 | ||
@@ -737,23 +737,15 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
737 | goto out; | 737 | goto out; |
738 | } | 738 | } |
739 | 739 | ||
740 | spin_lock_irqsave(host->host_lock, flags); | ||
741 | /* | ||
742 | * AK: unlikely race here: for some reason the timer could | ||
743 | * expire before the serial number is set up below. | ||
744 | * | ||
745 | * TODO: kill serial or move to blk layer | ||
746 | */ | ||
747 | scsi_cmd_get_serial(host, cmd); | ||
748 | |||
749 | if (unlikely(host->shost_state == SHOST_DEL)) { | 740 | if (unlikely(host->shost_state == SHOST_DEL)) { |
750 | cmd->result = (DID_NO_CONNECT << 16); | 741 | cmd->result = (DID_NO_CONNECT << 16); |
751 | scsi_done(cmd); | 742 | scsi_done(cmd); |
752 | } else { | 743 | } else { |
753 | trace_scsi_dispatch_cmd_start(cmd); | 744 | trace_scsi_dispatch_cmd_start(cmd); |
754 | rtn = host->hostt->queuecommand(cmd, scsi_done); | 745 | cmd->scsi_done = scsi_done; |
746 | rtn = host->hostt->queuecommand(host, cmd); | ||
755 | } | 747 | } |
756 | spin_unlock_irqrestore(host->host_lock, flags); | 748 | |
757 | if (rtn) { | 749 | if (rtn) { |
758 | trace_scsi_dispatch_cmd_error(cmd, rtn); | 750 | trace_scsi_dispatch_cmd_error(cmd, rtn); |
759 | if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && | 751 | if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 2c36bae3bd4b..2f1f9b079b10 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -3538,7 +3538,7 @@ static void sdebug_remove_adapter(void) | |||
3538 | } | 3538 | } |
3539 | 3539 | ||
3540 | static | 3540 | static |
3541 | int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done) | 3541 | int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done) |
3542 | { | 3542 | { |
3543 | unsigned char *cmd = (unsigned char *) SCpnt->cmnd; | 3543 | unsigned char *cmd = (unsigned char *) SCpnt->cmnd; |
3544 | int len, k; | 3544 | int len, k; |
@@ -3884,6 +3884,8 @@ write: | |||
3884 | (delay_override ? 0 : scsi_debug_delay)); | 3884 | (delay_override ? 0 : scsi_debug_delay)); |
3885 | } | 3885 | } |
3886 | 3886 | ||
3887 | static DEF_SCSI_QCMD(scsi_debug_queuecommand) | ||
3888 | |||
3887 | static struct scsi_host_template sdebug_driver_template = { | 3889 | static struct scsi_host_template sdebug_driver_template = { |
3888 | .proc_info = scsi_debug_proc_info, | 3890 | .proc_info = scsi_debug_proc_info, |
3889 | .proc_name = sdebug_proc_name, | 3891 | .proc_name = sdebug_proc_name, |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 1de30eb83bb0..824b8fc03ce5 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -320,19 +320,11 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) | |||
320 | "changed. The Linux SCSI layer does not " | 320 | "changed. The Linux SCSI layer does not " |
321 | "automatically adjust these parameters.\n"); | 321 | "automatically adjust these parameters.\n"); |
322 | 322 | ||
323 | if (scmd->request->cmd_flags & REQ_HARDBARRIER) | 323 | /* |
324 | /* | 324 | * Pass the UA upwards for a determination in the completion |
325 | * barrier requests should always retry on UA | 325 | * functions. |
326 | * otherwise block will get a spurious error | 326 | */ |
327 | */ | 327 | return SUCCESS; |
328 | return NEEDS_RETRY; | ||
329 | else | ||
330 | /* | ||
331 | * for normal (non barrier) commands, pass the | ||
332 | * UA upwards for a determination in the | ||
333 | * completion functions | ||
334 | */ | ||
335 | return SUCCESS; | ||
336 | 328 | ||
337 | /* these three are not supported */ | 329 | /* these three are not supported */ |
338 | case COPY_ABORTED: | 330 | case COPY_ABORTED: |
@@ -781,17 +773,15 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, | |||
781 | struct Scsi_Host *shost = sdev->host; | 773 | struct Scsi_Host *shost = sdev->host; |
782 | DECLARE_COMPLETION_ONSTACK(done); | 774 | DECLARE_COMPLETION_ONSTACK(done); |
783 | unsigned long timeleft; | 775 | unsigned long timeleft; |
784 | unsigned long flags; | ||
785 | struct scsi_eh_save ses; | 776 | struct scsi_eh_save ses; |
786 | int rtn; | 777 | int rtn; |
787 | 778 | ||
788 | scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes); | 779 | scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes); |
789 | shost->eh_action = &done; | 780 | shost->eh_action = &done; |
790 | 781 | ||
791 | spin_lock_irqsave(shost->host_lock, flags); | ||
792 | scsi_log_send(scmd); | 782 | scsi_log_send(scmd); |
793 | shost->hostt->queuecommand(scmd, scsi_eh_done); | 783 | scmd->scsi_done = scsi_eh_done; |
794 | spin_unlock_irqrestore(shost->host_lock, flags); | 784 | shost->hostt->queuecommand(shost, scmd); |
795 | 785 | ||
796 | timeleft = wait_for_completion_timeout(&done, timeout); | 786 | timeleft = wait_for_completion_timeout(&done, timeout); |
797 | 787 | ||
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 9c73dbda3bbb..606215e54b88 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c | |||
@@ -572,7 +572,7 @@ stex_slave_destroy(struct scsi_device *sdev) | |||
572 | } | 572 | } |
573 | 573 | ||
574 | static int | 574 | static int |
575 | stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | 575 | stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
576 | { | 576 | { |
577 | struct st_hba *hba; | 577 | struct st_hba *hba; |
578 | struct Scsi_Host *host; | 578 | struct Scsi_Host *host; |
@@ -698,6 +698,8 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | |||
698 | return 0; | 698 | return 0; |
699 | } | 699 | } |
700 | 700 | ||
701 | static DEF_SCSI_QCMD(stex_queuecommand) | ||
702 | |||
701 | static void stex_scsi_done(struct st_ccb *ccb) | 703 | static void stex_scsi_done(struct st_ccb *ccb) |
702 | { | 704 | { |
703 | struct scsi_cmnd *cmd = ccb->cmd; | 705 | struct scsi_cmnd *cmd = ccb->cmd; |
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c index 713620ed70d9..4f0e5485ffde 100644 --- a/drivers/scsi/sun3_NCR5380.c +++ b/drivers/scsi/sun3_NCR5380.c | |||
@@ -908,7 +908,7 @@ static int NCR5380_init (struct Scsi_Host *instance, int flags) | |||
908 | */ | 908 | */ |
909 | 909 | ||
910 | /* Only make static if a wrapper function is used */ | 910 | /* Only make static if a wrapper function is used */ |
911 | static int NCR5380_queue_command(struct scsi_cmnd *cmd, | 911 | static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd, |
912 | void (*done)(struct scsi_cmnd *)) | 912 | void (*done)(struct scsi_cmnd *)) |
913 | { | 913 | { |
914 | SETUP_HOSTDATA(cmd->device->host); | 914 | SETUP_HOSTDATA(cmd->device->host); |
@@ -1019,6 +1019,8 @@ static int NCR5380_queue_command(struct scsi_cmnd *cmd, | |||
1019 | return 0; | 1019 | return 0; |
1020 | } | 1020 | } |
1021 | 1021 | ||
1022 | static DEF_SCSI_QCMD(NCR5380_queue_command) | ||
1023 | |||
1022 | /* | 1024 | /* |
1023 | * Function : NCR5380_main (void) | 1025 | * Function : NCR5380_main (void) |
1024 | * | 1026 | * |
diff --git a/drivers/scsi/sun3_scsi.h b/drivers/scsi/sun3_scsi.h index b29a9d661ca4..bcefd8458e65 100644 --- a/drivers/scsi/sun3_scsi.h +++ b/drivers/scsi/sun3_scsi.h | |||
@@ -51,8 +51,7 @@ static int sun3scsi_abort(struct scsi_cmnd *); | |||
51 | static int sun3scsi_detect (struct scsi_host_template *); | 51 | static int sun3scsi_detect (struct scsi_host_template *); |
52 | static const char *sun3scsi_info (struct Scsi_Host *); | 52 | static const char *sun3scsi_info (struct Scsi_Host *); |
53 | static int sun3scsi_bus_reset(struct scsi_cmnd *); | 53 | static int sun3scsi_bus_reset(struct scsi_cmnd *); |
54 | static int sun3scsi_queue_command(struct scsi_cmnd *, | 54 | static int sun3scsi_queue_command(struct Scsi_Host *, struct scsi_cmnd *); |
55 | void (*done)(struct scsi_cmnd *)); | ||
56 | static int sun3scsi_release (struct Scsi_Host *); | 55 | static int sun3scsi_release (struct Scsi_Host *); |
57 | 56 | ||
58 | #ifndef CMD_PER_LUN | 57 | #ifndef CMD_PER_LUN |
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c index e5c369bb568f..190107ae120b 100644 --- a/drivers/scsi/sym53c416.c +++ b/drivers/scsi/sym53c416.c | |||
@@ -734,7 +734,7 @@ const char *sym53c416_info(struct Scsi_Host *SChost) | |||
734 | return info; | 734 | return info; |
735 | } | 735 | } |
736 | 736 | ||
737 | int sym53c416_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) | 737 | static int sym53c416_queuecommand_lck(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) |
738 | { | 738 | { |
739 | int base; | 739 | int base; |
740 | unsigned long flags = 0; | 740 | unsigned long flags = 0; |
@@ -761,6 +761,8 @@ int sym53c416_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) | |||
761 | return 0; | 761 | return 0; |
762 | } | 762 | } |
763 | 763 | ||
764 | DEF_SCSI_QCMD(sym53c416_queuecommand) | ||
765 | |||
764 | static int sym53c416_host_reset(Scsi_Cmnd *SCpnt) | 766 | static int sym53c416_host_reset(Scsi_Cmnd *SCpnt) |
765 | { | 767 | { |
766 | int base; | 768 | int base; |
diff --git a/drivers/scsi/sym53c416.h b/drivers/scsi/sym53c416.h index 77860d0748ff..387de5d80a70 100644 --- a/drivers/scsi/sym53c416.h +++ b/drivers/scsi/sym53c416.h | |||
@@ -25,7 +25,7 @@ | |||
25 | static int sym53c416_detect(struct scsi_host_template *); | 25 | static int sym53c416_detect(struct scsi_host_template *); |
26 | static const char *sym53c416_info(struct Scsi_Host *); | 26 | static const char *sym53c416_info(struct Scsi_Host *); |
27 | static int sym53c416_release(struct Scsi_Host *); | 27 | static int sym53c416_release(struct Scsi_Host *); |
28 | static int sym53c416_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); | 28 | static int sym53c416_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); |
29 | static int sym53c416_host_reset(Scsi_Cmnd *); | 29 | static int sym53c416_host_reset(Scsi_Cmnd *); |
30 | static int sym53c416_bios_param(struct scsi_device *, struct block_device *, | 30 | static int sym53c416_bios_param(struct scsi_device *, struct block_device *, |
31 | sector_t, int *); | 31 | sector_t, int *); |
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 8b955b534a36..6b97ded9d45d 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c | |||
@@ -505,7 +505,7 @@ void sym_log_bus_error(struct Scsi_Host *shost) | |||
505 | * queuecommand method. Entered with the host adapter lock held and | 505 | * queuecommand method. Entered with the host adapter lock held and |
506 | * interrupts disabled. | 506 | * interrupts disabled. |
507 | */ | 507 | */ |
508 | static int sym53c8xx_queue_command(struct scsi_cmnd *cmd, | 508 | static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd, |
509 | void (*done)(struct scsi_cmnd *)) | 509 | void (*done)(struct scsi_cmnd *)) |
510 | { | 510 | { |
511 | struct sym_hcb *np = SYM_SOFTC_PTR(cmd); | 511 | struct sym_hcb *np = SYM_SOFTC_PTR(cmd); |
@@ -536,6 +536,8 @@ static int sym53c8xx_queue_command(struct scsi_cmnd *cmd, | |||
536 | return 0; | 536 | return 0; |
537 | } | 537 | } |
538 | 538 | ||
539 | static DEF_SCSI_QCMD(sym53c8xx_queue_command) | ||
540 | |||
539 | /* | 541 | /* |
540 | * Linux entry point of the interrupt handler. | 542 | * Linux entry point of the interrupt handler. |
541 | */ | 543 | */ |
diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h index 76a069b7ac0b..ada1115079c9 100644 --- a/drivers/scsi/t128.h +++ b/drivers/scsi/t128.h | |||
@@ -96,8 +96,7 @@ static int t128_abort(struct scsi_cmnd *); | |||
96 | static int t128_biosparam(struct scsi_device *, struct block_device *, | 96 | static int t128_biosparam(struct scsi_device *, struct block_device *, |
97 | sector_t, int*); | 97 | sector_t, int*); |
98 | static int t128_detect(struct scsi_host_template *); | 98 | static int t128_detect(struct scsi_host_template *); |
99 | static int t128_queue_command(struct scsi_cmnd *, | 99 | static int t128_queue_command(struct Scsi_Host *, struct scsi_cmnd *); |
100 | void (*done)(struct scsi_cmnd *)); | ||
101 | static int t128_bus_reset(struct scsi_cmnd *); | 100 | static int t128_bus_reset(struct scsi_cmnd *); |
102 | 101 | ||
103 | #ifndef CMD_PER_LUN | 102 | #ifndef CMD_PER_LUN |
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c index 27866b0adfeb..a124a28f2ccb 100644 --- a/drivers/scsi/tmscsim.c +++ b/drivers/scsi/tmscsim.c | |||
@@ -1883,7 +1883,7 @@ dc390_ScsiRstDetect( struct dc390_acb* pACB ) | |||
1883 | return; | 1883 | return; |
1884 | } | 1884 | } |
1885 | 1885 | ||
1886 | static int DC390_queuecommand(struct scsi_cmnd *cmd, | 1886 | static int DC390_queuecommand_lck(struct scsi_cmnd *cmd, |
1887 | void (*done)(struct scsi_cmnd *)) | 1887 | void (*done)(struct scsi_cmnd *)) |
1888 | { | 1888 | { |
1889 | struct scsi_device *sdev = cmd->device; | 1889 | struct scsi_device *sdev = cmd->device; |
@@ -1944,6 +1944,8 @@ static int DC390_queuecommand(struct scsi_cmnd *cmd, | |||
1944 | return SCSI_MLQUEUE_DEVICE_BUSY; | 1944 | return SCSI_MLQUEUE_DEVICE_BUSY; |
1945 | } | 1945 | } |
1946 | 1946 | ||
1947 | static DEF_SCSI_QCMD(DC390_queuecommand) | ||
1948 | |||
1947 | static void dc390_dumpinfo (struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB) | 1949 | static void dc390_dumpinfo (struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB) |
1948 | { | 1950 | { |
1949 | struct pci_dev *pdev; | 1951 | struct pci_dev *pdev; |
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c index 5d9fdeeb2315..edfc5da8be4c 100644 --- a/drivers/scsi/u14-34f.c +++ b/drivers/scsi/u14-34f.c | |||
@@ -433,7 +433,7 @@ | |||
433 | 433 | ||
434 | static int u14_34f_detect(struct scsi_host_template *); | 434 | static int u14_34f_detect(struct scsi_host_template *); |
435 | static int u14_34f_release(struct Scsi_Host *); | 435 | static int u14_34f_release(struct Scsi_Host *); |
436 | static int u14_34f_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); | 436 | static int u14_34f_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); |
437 | static int u14_34f_eh_abort(struct scsi_cmnd *); | 437 | static int u14_34f_eh_abort(struct scsi_cmnd *); |
438 | static int u14_34f_eh_host_reset(struct scsi_cmnd *); | 438 | static int u14_34f_eh_host_reset(struct scsi_cmnd *); |
439 | static int u14_34f_bios_param(struct scsi_device *, struct block_device *, | 439 | static int u14_34f_bios_param(struct scsi_device *, struct block_device *, |
@@ -1248,7 +1248,7 @@ static void scsi_to_dev_dir(unsigned int i, unsigned int j) { | |||
1248 | 1248 | ||
1249 | } | 1249 | } |
1250 | 1250 | ||
1251 | static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { | 1251 | static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { |
1252 | unsigned int i, j, k; | 1252 | unsigned int i, j, k; |
1253 | struct mscp *cpp; | 1253 | struct mscp *cpp; |
1254 | 1254 | ||
@@ -1329,6 +1329,8 @@ static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scs | |||
1329 | return 0; | 1329 | return 0; |
1330 | } | 1330 | } |
1331 | 1331 | ||
1332 | static DEF_SCSI_QCMD(u14_34f_queuecommand) | ||
1333 | |||
1332 | static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) { | 1334 | static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) { |
1333 | unsigned int i, j; | 1335 | unsigned int i, j; |
1334 | 1336 | ||
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c index 27aa40f3980e..0571ef9639cb 100644 --- a/drivers/scsi/ultrastor.c +++ b/drivers/scsi/ultrastor.c | |||
@@ -700,7 +700,7 @@ static inline void build_sg_list(struct mscp *mscp, struct scsi_cmnd *SCpnt) | |||
700 | mscp->transfer_data_length = transfer_length; | 700 | mscp->transfer_data_length = transfer_length; |
701 | } | 701 | } |
702 | 702 | ||
703 | static int ultrastor_queuecommand(struct scsi_cmnd *SCpnt, | 703 | static int ultrastor_queuecommand_lck(struct scsi_cmnd *SCpnt, |
704 | void (*done) (struct scsi_cmnd *)) | 704 | void (*done) (struct scsi_cmnd *)) |
705 | { | 705 | { |
706 | struct mscp *my_mscp; | 706 | struct mscp *my_mscp; |
@@ -825,6 +825,8 @@ retry: | |||
825 | return 0; | 825 | return 0; |
826 | } | 826 | } |
827 | 827 | ||
828 | static DEF_SCSI_QCMD(ultrastor_queuecommand) | ||
829 | |||
828 | /* This code must deal with 2 cases: | 830 | /* This code must deal with 2 cases: |
829 | 831 | ||
830 | 1. The command has not been written to the OGM. In this case, set | 832 | 1. The command has not been written to the OGM. In this case, set |
diff --git a/drivers/scsi/ultrastor.h b/drivers/scsi/ultrastor.h index a692905f95f7..165c18b5cf5f 100644 --- a/drivers/scsi/ultrastor.h +++ b/drivers/scsi/ultrastor.h | |||
@@ -15,8 +15,7 @@ | |||
15 | 15 | ||
16 | static int ultrastor_detect(struct scsi_host_template *); | 16 | static int ultrastor_detect(struct scsi_host_template *); |
17 | static const char *ultrastor_info(struct Scsi_Host *shpnt); | 17 | static const char *ultrastor_info(struct Scsi_Host *shpnt); |
18 | static int ultrastor_queuecommand(struct scsi_cmnd *, | 18 | static int ultrastor_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); |
19 | void (*done)(struct scsi_cmnd *)); | ||
20 | static int ultrastor_abort(struct scsi_cmnd *); | 19 | static int ultrastor_abort(struct scsi_cmnd *); |
21 | static int ultrastor_host_reset(struct scsi_cmnd *); | 20 | static int ultrastor_host_reset(struct scsi_cmnd *); |
22 | static int ultrastor_biosparam(struct scsi_device *, struct block_device *, | 21 | static int ultrastor_biosparam(struct scsi_device *, struct block_device *, |
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 26894459c37f..a18996d24466 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c | |||
@@ -690,7 +690,7 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, | |||
690 | return 0; | 690 | return 0; |
691 | } | 691 | } |
692 | 692 | ||
693 | static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | 693 | static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
694 | { | 694 | { |
695 | struct Scsi_Host *host = cmd->device->host; | 695 | struct Scsi_Host *host = cmd->device->host; |
696 | struct pvscsi_adapter *adapter = shost_priv(host); | 696 | struct pvscsi_adapter *adapter = shost_priv(host); |
@@ -719,6 +719,8 @@ static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | |||
719 | return 0; | 719 | return 0; |
720 | } | 720 | } |
721 | 721 | ||
722 | static DEF_SCSI_QCMD(pvscsi_queue) | ||
723 | |||
722 | static int pvscsi_abort(struct scsi_cmnd *cmd) | 724 | static int pvscsi_abort(struct scsi_cmnd *cmd) |
723 | { | 725 | { |
724 | struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); | 726 | struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); |
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c index b701bf2cc187..5f697e0bd009 100644 --- a/drivers/scsi/wd33c93.c +++ b/drivers/scsi/wd33c93.c | |||
@@ -371,8 +371,8 @@ calc_sync_msg(unsigned int period, unsigned int offset, unsigned int fast, | |||
371 | msg[1] = offset; | 371 | msg[1] = offset; |
372 | } | 372 | } |
373 | 373 | ||
374 | int | 374 | static int |
375 | wd33c93_queuecommand(struct scsi_cmnd *cmd, | 375 | wd33c93_queuecommand_lck(struct scsi_cmnd *cmd, |
376 | void (*done)(struct scsi_cmnd *)) | 376 | void (*done)(struct scsi_cmnd *)) |
377 | { | 377 | { |
378 | struct WD33C93_hostdata *hostdata; | 378 | struct WD33C93_hostdata *hostdata; |
@@ -468,6 +468,8 @@ wd33c93_queuecommand(struct scsi_cmnd *cmd, | |||
468 | return 0; | 468 | return 0; |
469 | } | 469 | } |
470 | 470 | ||
471 | DEF_SCSI_QCMD(wd33c93_queuecommand) | ||
472 | |||
471 | /* | 473 | /* |
472 | * This routine attempts to start a scsi command. If the host_card is | 474 | * This routine attempts to start a scsi command. If the host_card is |
473 | * already connected, we give up immediately. Otherwise, look through | 475 | * already connected, we give up immediately. Otherwise, look through |
diff --git a/drivers/scsi/wd33c93.h b/drivers/scsi/wd33c93.h index 1ed5f3bf388e..3b463d7304dc 100644 --- a/drivers/scsi/wd33c93.h +++ b/drivers/scsi/wd33c93.h | |||
@@ -343,8 +343,7 @@ struct WD33C93_hostdata { | |||
343 | void wd33c93_init (struct Scsi_Host *instance, const wd33c93_regs regs, | 343 | void wd33c93_init (struct Scsi_Host *instance, const wd33c93_regs regs, |
344 | dma_setup_t setup, dma_stop_t stop, int clock_freq); | 344 | dma_setup_t setup, dma_stop_t stop, int clock_freq); |
345 | int wd33c93_abort (struct scsi_cmnd *cmd); | 345 | int wd33c93_abort (struct scsi_cmnd *cmd); |
346 | int wd33c93_queuecommand (struct scsi_cmnd *cmd, | 346 | int wd33c93_queuecommand (struct Scsi_Host *h, struct scsi_cmnd *cmd); |
347 | void (*done)(struct scsi_cmnd *)); | ||
348 | void wd33c93_intr (struct Scsi_Host *instance); | 347 | void wd33c93_intr (struct Scsi_Host *instance); |
349 | int wd33c93_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int); | 348 | int wd33c93_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int); |
350 | int wd33c93_host_reset (struct scsi_cmnd *); | 349 | int wd33c93_host_reset (struct scsi_cmnd *); |
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c index 333580bf37c5..db451ae0a368 100644 --- a/drivers/scsi/wd7000.c +++ b/drivers/scsi/wd7000.c | |||
@@ -1082,7 +1082,7 @@ static irqreturn_t wd7000_intr(int irq, void *dev_id) | |||
1082 | return IRQ_HANDLED; | 1082 | return IRQ_HANDLED; |
1083 | } | 1083 | } |
1084 | 1084 | ||
1085 | static int wd7000_queuecommand(struct scsi_cmnd *SCpnt, | 1085 | static int wd7000_queuecommand_lck(struct scsi_cmnd *SCpnt, |
1086 | void (*done)(struct scsi_cmnd *)) | 1086 | void (*done)(struct scsi_cmnd *)) |
1087 | { | 1087 | { |
1088 | Scb *scb; | 1088 | Scb *scb; |
@@ -1139,6 +1139,8 @@ static int wd7000_queuecommand(struct scsi_cmnd *SCpnt, | |||
1139 | return 0; | 1139 | return 0; |
1140 | } | 1140 | } |
1141 | 1141 | ||
1142 | static DEF_SCSI_QCMD(wd7000_queuecommand) | ||
1143 | |||
1142 | static int wd7000_diagnostics(Adapter * host, int code) | 1144 | static int wd7000_diagnostics(Adapter * host, int code) |
1143 | { | 1145 | { |
1144 | static IcbDiag icb = { ICB_OP_DIAGNOSTICS }; | 1146 | static IcbDiag icb = { ICB_OP_DIAGNOSTICS }; |
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index 53be4d35a0aa..842e3b2a02b1 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c | |||
@@ -2285,6 +2285,8 @@ static struct pciserial_board pci_boards[] __devinitdata = { | |||
2285 | 2285 | ||
2286 | static const struct pci_device_id softmodem_blacklist[] = { | 2286 | static const struct pci_device_id softmodem_blacklist[] = { |
2287 | { PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */ | 2287 | { PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */ |
2288 | { PCI_VDEVICE(MOTOROLA, 0x3052), }, /* Motorola Si3052-based modem */ | ||
2289 | { PCI_DEVICE(0x1543, 0x3052), }, /* Si3052-based modem, default IDs */ | ||
2288 | }; | 2290 | }; |
2289 | 2291 | ||
2290 | /* | 2292 | /* |
@@ -2863,6 +2865,9 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
2863 | PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL, | 2865 | PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL, |
2864 | 0, 0, | 2866 | 0, 0, |
2865 | pbn_b0_4_1152000 }, | 2867 | pbn_b0_4_1152000 }, |
2868 | { PCI_VENDOR_ID_OXSEMI, 0x9505, | ||
2869 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2870 | pbn_b0_bt_2_921600 }, | ||
2866 | 2871 | ||
2867 | /* | 2872 | /* |
2868 | * The below card is a little controversial since it is the | 2873 | * The below card is a little controversial since it is the |
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c index a9eff2b18eab..19cac9f610fd 100644 --- a/drivers/serial/bfin_5xx.c +++ b/drivers/serial/bfin_5xx.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/tty.h> | 23 | #include <linux/tty.h> |
24 | #include <linux/tty_flip.h> | 24 | #include <linux/tty_flip.h> |
25 | #include <linux/serial_core.h> | 25 | #include <linux/serial_core.h> |
26 | #include <linux/dma-mapping.h> | ||
26 | 27 | ||
27 | #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ | 28 | #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ |
28 | defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) | 29 | defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) |
@@ -33,12 +34,10 @@ | |||
33 | #include <asm/gpio.h> | 34 | #include <asm/gpio.h> |
34 | #include <mach/bfin_serial_5xx.h> | 35 | #include <mach/bfin_serial_5xx.h> |
35 | 36 | ||
36 | #ifdef CONFIG_SERIAL_BFIN_DMA | 37 | #include <asm/dma.h> |
37 | #include <linux/dma-mapping.h> | ||
38 | #include <asm/io.h> | 38 | #include <asm/io.h> |
39 | #include <asm/irq.h> | 39 | #include <asm/irq.h> |
40 | #include <asm/cacheflush.h> | 40 | #include <asm/cacheflush.h> |
41 | #endif | ||
42 | 41 | ||
43 | #ifdef CONFIG_SERIAL_BFIN_MODULE | 42 | #ifdef CONFIG_SERIAL_BFIN_MODULE |
44 | # undef CONFIG_EARLY_PRINTK | 43 | # undef CONFIG_EARLY_PRINTK |
@@ -360,7 +359,6 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart) | |||
360 | UART_PUT_CHAR(uart, xmit->buf[xmit->tail]); | 359 | UART_PUT_CHAR(uart, xmit->buf[xmit->tail]); |
361 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); | 360 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); |
362 | uart->port.icount.tx++; | 361 | uart->port.icount.tx++; |
363 | SSYNC(); | ||
364 | } | 362 | } |
365 | 363 | ||
366 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | 364 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
@@ -688,6 +686,13 @@ static int bfin_serial_startup(struct uart_port *port) | |||
688 | 686 | ||
689 | # ifdef CONFIG_BF54x | 687 | # ifdef CONFIG_BF54x |
690 | { | 688 | { |
689 | /* | ||
690 | * UART2 and UART3 on BF548 share interrupt PINs and DMA | ||
691 | * controllers with SPORT2 and SPORT3. UART rx and tx | ||
692 | * interrupts are generated in PIO mode only when configure | ||
693 | * their peripheral mapping registers properly, which means | ||
694 | * request corresponding DMA channels in PIO mode as well. | ||
695 | */ | ||
691 | unsigned uart_dma_ch_rx, uart_dma_ch_tx; | 696 | unsigned uart_dma_ch_rx, uart_dma_ch_tx; |
692 | 697 | ||
693 | switch (uart->port.irq) { | 698 | switch (uart->port.irq) { |
@@ -734,8 +739,7 @@ static int bfin_serial_startup(struct uart_port *port) | |||
734 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | | 739 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | |
735 | IRQF_DISABLED, "BFIN_UART_CTS", uart)) { | 740 | IRQF_DISABLED, "BFIN_UART_CTS", uart)) { |
736 | uart->cts_pin = -1; | 741 | uart->cts_pin = -1; |
737 | pr_info("Unable to attach BlackFin UART CTS interrupt.\ | 742 | pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n"); |
738 | So, disable it.\n"); | ||
739 | } | 743 | } |
740 | } | 744 | } |
741 | if (uart->rts_pin >= 0) { | 745 | if (uart->rts_pin >= 0) { |
@@ -747,8 +751,7 @@ static int bfin_serial_startup(struct uart_port *port) | |||
747 | if (request_irq(uart->status_irq, | 751 | if (request_irq(uart->status_irq, |
748 | bfin_serial_mctrl_cts_int, | 752 | bfin_serial_mctrl_cts_int, |
749 | IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) { | 753 | IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) { |
750 | pr_info("Unable to attach BlackFin UART Modem \ | 754 | pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n"); |
751 | Status interrupt.\n"); | ||
752 | } | 755 | } |
753 | 756 | ||
754 | /* CTS RTS PINs are negative assertive. */ | 757 | /* CTS RTS PINs are negative assertive. */ |
@@ -846,6 +849,8 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios, | |||
846 | if (termios->c_cflag & CMSPAR) | 849 | if (termios->c_cflag & CMSPAR) |
847 | lcr |= STP; | 850 | lcr |= STP; |
848 | 851 | ||
852 | spin_lock_irqsave(&uart->port.lock, flags); | ||
853 | |||
849 | port->read_status_mask = OE; | 854 | port->read_status_mask = OE; |
850 | if (termios->c_iflag & INPCK) | 855 | if (termios->c_iflag & INPCK) |
851 | port->read_status_mask |= (FE | PE); | 856 | port->read_status_mask |= (FE | PE); |
@@ -875,8 +880,6 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios, | |||
875 | if (termios->c_line != N_IRDA) | 880 | if (termios->c_line != N_IRDA) |
876 | quot -= ANOMALY_05000230; | 881 | quot -= ANOMALY_05000230; |
877 | 882 | ||
878 | spin_lock_irqsave(&uart->port.lock, flags); | ||
879 | |||
880 | UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15); | 883 | UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15); |
881 | 884 | ||
882 | /* Disable UART */ | 885 | /* Disable UART */ |
@@ -1321,6 +1324,14 @@ struct console __init *bfin_earlyserial_init(unsigned int port, | |||
1321 | struct bfin_serial_port *uart; | 1324 | struct bfin_serial_port *uart; |
1322 | struct ktermios t; | 1325 | struct ktermios t; |
1323 | 1326 | ||
1327 | #ifdef CONFIG_SERIAL_BFIN_CONSOLE | ||
1328 | /* | ||
1329 | * If we are using early serial, don't let the normal console rewind | ||
1330 | * log buffer, since that causes things to be printed multiple times | ||
1331 | */ | ||
1332 | bfin_serial_console.flags &= ~CON_PRINTBUFFER; | ||
1333 | #endif | ||
1334 | |||
1324 | if (port == -1 || port >= nr_active_ports) | 1335 | if (port == -1 || port >= nr_active_ports) |
1325 | port = 0; | 1336 | port = 0; |
1326 | bfin_serial_init_ports(); | 1337 | bfin_serial_init_ports(); |
diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c index d4b711c9a416..3374618300af 100644 --- a/drivers/serial/kgdboc.c +++ b/drivers/serial/kgdboc.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/tty.h> | 18 | #include <linux/tty.h> |
19 | #include <linux/console.h> | 19 | #include <linux/console.h> |
20 | #include <linux/vt_kern.h> | 20 | #include <linux/vt_kern.h> |
21 | #include <linux/input.h> | ||
21 | 22 | ||
22 | #define MAX_CONFIG_LEN 40 | 23 | #define MAX_CONFIG_LEN 40 |
23 | 24 | ||
@@ -37,6 +38,61 @@ static struct tty_driver *kgdb_tty_driver; | |||
37 | static int kgdb_tty_line; | 38 | static int kgdb_tty_line; |
38 | 39 | ||
39 | #ifdef CONFIG_KDB_KEYBOARD | 40 | #ifdef CONFIG_KDB_KEYBOARD |
41 | static int kgdboc_reset_connect(struct input_handler *handler, | ||
42 | struct input_dev *dev, | ||
43 | const struct input_device_id *id) | ||
44 | { | ||
45 | input_reset_device(dev); | ||
46 | |||
47 | /* Retrun an error - we do not want to bind, just to reset */ | ||
48 | return -ENODEV; | ||
49 | } | ||
50 | |||
51 | static void kgdboc_reset_disconnect(struct input_handle *handle) | ||
52 | { | ||
53 | /* We do not expect anyone to actually bind to us */ | ||
54 | BUG(); | ||
55 | } | ||
56 | |||
57 | static const struct input_device_id kgdboc_reset_ids[] = { | ||
58 | { | ||
59 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT, | ||
60 | .evbit = { BIT_MASK(EV_KEY) }, | ||
61 | }, | ||
62 | { } | ||
63 | }; | ||
64 | |||
65 | static struct input_handler kgdboc_reset_handler = { | ||
66 | .connect = kgdboc_reset_connect, | ||
67 | .disconnect = kgdboc_reset_disconnect, | ||
68 | .name = "kgdboc_reset", | ||
69 | .id_table = kgdboc_reset_ids, | ||
70 | }; | ||
71 | |||
72 | static DEFINE_MUTEX(kgdboc_reset_mutex); | ||
73 | |||
74 | static void kgdboc_restore_input_helper(struct work_struct *dummy) | ||
75 | { | ||
76 | /* | ||
77 | * We need to take a mutex to prevent several instances of | ||
78 | * this work running on different CPUs so they don't try | ||
79 | * to register again already registered handler. | ||
80 | */ | ||
81 | mutex_lock(&kgdboc_reset_mutex); | ||
82 | |||
83 | if (input_register_handler(&kgdboc_reset_handler) == 0) | ||
84 | input_unregister_handler(&kgdboc_reset_handler); | ||
85 | |||
86 | mutex_unlock(&kgdboc_reset_mutex); | ||
87 | } | ||
88 | |||
89 | static DECLARE_WORK(kgdboc_restore_input_work, kgdboc_restore_input_helper); | ||
90 | |||
91 | static void kgdboc_restore_input(void) | ||
92 | { | ||
93 | schedule_work(&kgdboc_restore_input_work); | ||
94 | } | ||
95 | |||
40 | static int kgdboc_register_kbd(char **cptr) | 96 | static int kgdboc_register_kbd(char **cptr) |
41 | { | 97 | { |
42 | if (strncmp(*cptr, "kbd", 3) == 0) { | 98 | if (strncmp(*cptr, "kbd", 3) == 0) { |
@@ -64,10 +120,12 @@ static void kgdboc_unregister_kbd(void) | |||
64 | i--; | 120 | i--; |
65 | } | 121 | } |
66 | } | 122 | } |
123 | flush_work_sync(&kgdboc_restore_input_work); | ||
67 | } | 124 | } |
68 | #else /* ! CONFIG_KDB_KEYBOARD */ | 125 | #else /* ! CONFIG_KDB_KEYBOARD */ |
69 | #define kgdboc_register_kbd(x) 0 | 126 | #define kgdboc_register_kbd(x) 0 |
70 | #define kgdboc_unregister_kbd() | 127 | #define kgdboc_unregister_kbd() |
128 | #define kgdboc_restore_input() | ||
71 | #endif /* ! CONFIG_KDB_KEYBOARD */ | 129 | #endif /* ! CONFIG_KDB_KEYBOARD */ |
72 | 130 | ||
73 | static int kgdboc_option_setup(char *opt) | 131 | static int kgdboc_option_setup(char *opt) |
@@ -231,6 +289,7 @@ static void kgdboc_post_exp_handler(void) | |||
231 | dbg_restore_graphics = 0; | 289 | dbg_restore_graphics = 0; |
232 | con_debug_leave(); | 290 | con_debug_leave(); |
233 | } | 291 | } |
292 | kgdboc_restore_input(); | ||
234 | } | 293 | } |
235 | 294 | ||
236 | static struct kgdb_io kgdboc_io_ops = { | 295 | static struct kgdb_io kgdboc_io_ops = { |
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c index 09615b51d591..cb12a8e1466b 100644 --- a/drivers/sh/clk/core.c +++ b/drivers/sh/clk/core.c | |||
@@ -571,7 +571,7 @@ long clk_round_parent(struct clk *clk, unsigned long target, | |||
571 | *best_freq = freq_max; | 571 | *best_freq = freq_max; |
572 | } | 572 | } |
573 | 573 | ||
574 | pr_debug("too low freq %lu, error %lu\n", freq->frequency, | 574 | pr_debug("too low freq %u, error %lu\n", freq->frequency, |
575 | target - freq_max); | 575 | target - freq_max); |
576 | 576 | ||
577 | if (!error) | 577 | if (!error) |
@@ -591,7 +591,7 @@ long clk_round_parent(struct clk *clk, unsigned long target, | |||
591 | *best_freq = freq_min; | 591 | *best_freq = freq_min; |
592 | } | 592 | } |
593 | 593 | ||
594 | pr_debug("too high freq %lu, error %lu\n", freq->frequency, | 594 | pr_debug("too high freq %u, error %lu\n", freq->frequency, |
595 | freq_min - target); | 595 | freq_min - target); |
596 | 596 | ||
597 | if (!error) | 597 | if (!error) |
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c index e5bf5d3c698e..4e0ff7181164 100644 --- a/drivers/sh/intc/virq.c +++ b/drivers/sh/intc/virq.c | |||
@@ -215,7 +215,7 @@ restart: | |||
215 | entry = radix_tree_deref_slot((void **)entries[i]); | 215 | entry = radix_tree_deref_slot((void **)entries[i]); |
216 | if (unlikely(!entry)) | 216 | if (unlikely(!entry)) |
217 | continue; | 217 | continue; |
218 | if (unlikely(entry == RADIX_TREE_RETRY)) | 218 | if (radix_tree_deref_retry(entry)) |
219 | goto restart; | 219 | goto restart; |
220 | 220 | ||
221 | irq = create_irq(); | 221 | irq = create_irq(); |
diff --git a/drivers/staging/ath6kl/Kconfig b/drivers/staging/ath6kl/Kconfig index ae2cdf48b74c..8a5caa30b85f 100644 --- a/drivers/staging/ath6kl/Kconfig +++ b/drivers/staging/ath6kl/Kconfig | |||
@@ -102,7 +102,7 @@ config AR600x_BT_RESET_PIN | |||
102 | 102 | ||
103 | config ATH6KL_CFG80211 | 103 | config ATH6KL_CFG80211 |
104 | bool "CFG80211 support" | 104 | bool "CFG80211 support" |
105 | depends on ATH6K_LEGACY | 105 | depends on ATH6K_LEGACY && CFG80211 |
106 | help | 106 | help |
107 | Enables support for CFG80211 APIs. The default option is to use WEXT. Even with this option enabled, WEXT is not explicitly disabled and the onus of not exercising WEXT lies on the application(s) running in the user space. | 107 | Enables support for CFG80211 APIs. The default option is to use WEXT. Even with this option enabled, WEXT is not explicitly disabled and the onus of not exercising WEXT lies on the application(s) running in the user space. |
108 | 108 | ||
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c index c5a6d6c16735..a659f7047373 100644 --- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c +++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c | |||
@@ -1126,7 +1126,7 @@ ar6000_transfer_bin_file(AR_SOFTC_T *ar, AR6K_BIN_FILE file, A_UINT32 address, A | |||
1126 | if ((board_ext_address) && (fw_entry->size == (board_data_size + board_ext_data_size))) { | 1126 | if ((board_ext_address) && (fw_entry->size == (board_data_size + board_ext_data_size))) { |
1127 | A_UINT32 param; | 1127 | A_UINT32 param; |
1128 | 1128 | ||
1129 | status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (A_UCHAR *)(((A_UINT32)fw_entry->data) + board_data_size), board_ext_data_size); | 1129 | status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (A_UCHAR *)(fw_entry->data + board_data_size), board_ext_data_size); |
1130 | 1130 | ||
1131 | if (status != A_OK) { | 1131 | if (status != A_OK) { |
1132 | AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__)); | 1132 | AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__)); |
@@ -3030,7 +3030,8 @@ ar6000_data_tx(struct sk_buff *skb, struct net_device *dev) | |||
3030 | A_UINT8 csumDest=0; | 3030 | A_UINT8 csumDest=0; |
3031 | A_UINT8 csum=skb->ip_summed; | 3031 | A_UINT8 csum=skb->ip_summed; |
3032 | if(csumOffload && (csum==CHECKSUM_PARTIAL)){ | 3032 | if(csumOffload && (csum==CHECKSUM_PARTIAL)){ |
3033 | csumStart=skb->csum_start-(skb->network_header-skb->head)+sizeof(ATH_LLC_SNAP_HDR); | 3033 | csumStart = (skb->head + skb->csum_start - skb_network_header(skb) + |
3034 | sizeof(ATH_LLC_SNAP_HDR)); | ||
3034 | csumDest=skb->csum_offset+csumStart; | 3035 | csumDest=skb->csum_offset+csumStart; |
3035 | } | 3036 | } |
3036 | #endif | 3037 | #endif |
diff --git a/drivers/staging/ath6kl/os/linux/cfg80211.c b/drivers/staging/ath6kl/os/linux/cfg80211.c index c94ad29eeb4d..7269d0a1d618 100644 --- a/drivers/staging/ath6kl/os/linux/cfg80211.c +++ b/drivers/staging/ath6kl/os/linux/cfg80211.c | |||
@@ -808,7 +808,7 @@ ar6k_cfg80211_scanComplete_event(AR_SOFTC_T *ar, A_STATUS status) | |||
808 | 808 | ||
809 | static int | 809 | static int |
810 | ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, | 810 | ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, |
811 | A_UINT8 key_index, const A_UINT8 *mac_addr, | 811 | A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr, |
812 | struct key_params *params) | 812 | struct key_params *params) |
813 | { | 813 | { |
814 | AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); | 814 | AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); |
@@ -901,7 +901,7 @@ ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, | |||
901 | 901 | ||
902 | static int | 902 | static int |
903 | ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, | 903 | ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, |
904 | A_UINT8 key_index, const A_UINT8 *mac_addr) | 904 | A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr) |
905 | { | 905 | { |
906 | AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); | 906 | AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); |
907 | 907 | ||
@@ -936,7 +936,8 @@ ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, | |||
936 | 936 | ||
937 | static int | 937 | static int |
938 | ar6k_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, | 938 | ar6k_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, |
939 | A_UINT8 key_index, const A_UINT8 *mac_addr, void *cookie, | 939 | A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr, |
940 | void *cookie, | ||
940 | void (*callback)(void *cookie, struct key_params*)) | 941 | void (*callback)(void *cookie, struct key_params*)) |
941 | { | 942 | { |
942 | AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); | 943 | AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); |
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c index 80cfa8669585..b68a7e5173be 100644 --- a/drivers/staging/batman-adv/hard-interface.c +++ b/drivers/staging/batman-adv/hard-interface.c | |||
@@ -165,7 +165,7 @@ static void update_mac_addresses(struct batman_if *batman_if) | |||
165 | batman_if->net_dev->dev_addr, ETH_ALEN); | 165 | batman_if->net_dev->dev_addr, ETH_ALEN); |
166 | } | 166 | } |
167 | 167 | ||
168 | static void check_known_mac_addr(uint8_t *addr) | 168 | static void check_known_mac_addr(struct net_device *net_dev) |
169 | { | 169 | { |
170 | struct batman_if *batman_if; | 170 | struct batman_if *batman_if; |
171 | 171 | ||
@@ -175,11 +175,16 @@ static void check_known_mac_addr(uint8_t *addr) | |||
175 | (batman_if->if_status != IF_TO_BE_ACTIVATED)) | 175 | (batman_if->if_status != IF_TO_BE_ACTIVATED)) |
176 | continue; | 176 | continue; |
177 | 177 | ||
178 | if (!compare_orig(batman_if->net_dev->dev_addr, addr)) | 178 | if (batman_if->net_dev == net_dev) |
179 | continue; | ||
180 | |||
181 | if (!compare_orig(batman_if->net_dev->dev_addr, | ||
182 | net_dev->dev_addr)) | ||
179 | continue; | 183 | continue; |
180 | 184 | ||
181 | pr_warning("The newly added mac address (%pM) already exists " | 185 | pr_warning("The newly added mac address (%pM) already exists " |
182 | "on: %s\n", addr, batman_if->net_dev->name); | 186 | "on: %s\n", net_dev->dev_addr, |
187 | batman_if->net_dev->name); | ||
183 | pr_warning("It is strongly recommended to keep mac addresses " | 188 | pr_warning("It is strongly recommended to keep mac addresses " |
184 | "unique to avoid problems!\n"); | 189 | "unique to avoid problems!\n"); |
185 | } | 190 | } |
@@ -430,7 +435,7 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev) | |||
430 | atomic_set(&batman_if->refcnt, 0); | 435 | atomic_set(&batman_if->refcnt, 0); |
431 | hardif_hold(batman_if); | 436 | hardif_hold(batman_if); |
432 | 437 | ||
433 | check_known_mac_addr(batman_if->net_dev->dev_addr); | 438 | check_known_mac_addr(batman_if->net_dev); |
434 | 439 | ||
435 | spin_lock(&if_list_lock); | 440 | spin_lock(&if_list_lock); |
436 | list_add_tail_rcu(&batman_if->list, &if_list); | 441 | list_add_tail_rcu(&batman_if->list, &if_list); |
@@ -515,7 +520,7 @@ static int hard_if_event(struct notifier_block *this, | |||
515 | goto out; | 520 | goto out; |
516 | } | 521 | } |
517 | 522 | ||
518 | check_known_mac_addr(batman_if->net_dev->dev_addr); | 523 | check_known_mac_addr(batman_if->net_dev); |
519 | update_mac_addresses(batman_if); | 524 | update_mac_addresses(batman_if); |
520 | 525 | ||
521 | bat_priv = netdev_priv(batman_if->soft_iface); | 526 | bat_priv = netdev_priv(batman_if->soft_iface); |
diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c index 90102631330b..657b69e6b957 100644 --- a/drivers/staging/batman-adv/routing.c +++ b/drivers/staging/batman-adv/routing.c | |||
@@ -1000,10 +1000,10 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if) | |||
1000 | 1000 | ||
1001 | /* find a suitable router for this originator, and use | 1001 | /* find a suitable router for this originator, and use |
1002 | * bonding if possible. */ | 1002 | * bonding if possible. */ |
1003 | struct neigh_node *find_router(struct orig_node *orig_node, | 1003 | struct neigh_node *find_router(struct bat_priv *bat_priv, |
1004 | struct orig_node *orig_node, | ||
1004 | struct batman_if *recv_if) | 1005 | struct batman_if *recv_if) |
1005 | { | 1006 | { |
1006 | struct bat_priv *bat_priv; | ||
1007 | struct orig_node *primary_orig_node; | 1007 | struct orig_node *primary_orig_node; |
1008 | struct orig_node *router_orig; | 1008 | struct orig_node *router_orig; |
1009 | struct neigh_node *router, *first_candidate, *best_router; | 1009 | struct neigh_node *router, *first_candidate, *best_router; |
@@ -1019,13 +1019,9 @@ struct neigh_node *find_router(struct orig_node *orig_node, | |||
1019 | /* without bonding, the first node should | 1019 | /* without bonding, the first node should |
1020 | * always choose the default router. */ | 1020 | * always choose the default router. */ |
1021 | 1021 | ||
1022 | if (!recv_if) | ||
1023 | return orig_node->router; | ||
1024 | |||
1025 | bat_priv = netdev_priv(recv_if->soft_iface); | ||
1026 | bonding_enabled = atomic_read(&bat_priv->bonding_enabled); | 1022 | bonding_enabled = atomic_read(&bat_priv->bonding_enabled); |
1027 | 1023 | ||
1028 | if (!bonding_enabled) | 1024 | if ((!recv_if) && (!bonding_enabled)) |
1029 | return orig_node->router; | 1025 | return orig_node->router; |
1030 | 1026 | ||
1031 | router_orig = orig_node->router->orig_node; | 1027 | router_orig = orig_node->router->orig_node; |
@@ -1154,7 +1150,7 @@ static int route_unicast_packet(struct sk_buff *skb, | |||
1154 | orig_node = ((struct orig_node *) | 1150 | orig_node = ((struct orig_node *) |
1155 | hash_find(bat_priv->orig_hash, unicast_packet->dest)); | 1151 | hash_find(bat_priv->orig_hash, unicast_packet->dest)); |
1156 | 1152 | ||
1157 | router = find_router(orig_node, recv_if); | 1153 | router = find_router(bat_priv, orig_node, recv_if); |
1158 | 1154 | ||
1159 | if (!router) { | 1155 | if (!router) { |
1160 | spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); | 1156 | spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); |
diff --git a/drivers/staging/batman-adv/routing.h b/drivers/staging/batman-adv/routing.h index 06ea99df3706..92674c8d9c03 100644 --- a/drivers/staging/batman-adv/routing.h +++ b/drivers/staging/batman-adv/routing.h | |||
@@ -38,8 +38,8 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if); | |||
38 | int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if); | 38 | int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if); |
39 | int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if); | 39 | int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if); |
40 | int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if); | 40 | int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if); |
41 | struct neigh_node *find_router(struct orig_node *orig_node, | 41 | struct neigh_node *find_router(struct bat_priv *bat_priv, |
42 | struct batman_if *recv_if); | 42 | struct orig_node *orig_node, struct batman_if *recv_if); |
43 | void update_bonding_candidates(struct bat_priv *bat_priv, | 43 | void update_bonding_candidates(struct bat_priv *bat_priv, |
44 | struct orig_node *orig_node); | 44 | struct orig_node *orig_node); |
45 | 45 | ||
diff --git a/drivers/staging/batman-adv/unicast.c b/drivers/staging/batman-adv/unicast.c index 0dac50d69c03..0459413ff67f 100644 --- a/drivers/staging/batman-adv/unicast.c +++ b/drivers/staging/batman-adv/unicast.c | |||
@@ -224,7 +224,7 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) | |||
224 | if (!orig_node) | 224 | if (!orig_node) |
225 | orig_node = transtable_search(bat_priv, ethhdr->h_dest); | 225 | orig_node = transtable_search(bat_priv, ethhdr->h_dest); |
226 | 226 | ||
227 | router = find_router(orig_node, NULL); | 227 | router = find_router(bat_priv, orig_node, NULL); |
228 | 228 | ||
229 | if (!router) | 229 | if (!router) |
230 | goto unlock; | 230 | goto unlock; |
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c index 77fdfe24d999..fead9c56162e 100644 --- a/drivers/staging/bcm/Bcmchar.c +++ b/drivers/staging/bcm/Bcmchar.c | |||
@@ -1001,13 +1001,15 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) | |||
1001 | } | 1001 | } |
1002 | #endif | 1002 | #endif |
1003 | case IOCTL_BE_BUCKET_SIZE: | 1003 | case IOCTL_BE_BUCKET_SIZE: |
1004 | Adapter->BEBucketSize = *(PULONG)arg; | 1004 | Status = 0; |
1005 | Status = STATUS_SUCCESS; | 1005 | if (get_user(Adapter->BEBucketSize, (unsigned long __user *)arg)) |
1006 | Status = -EFAULT; | ||
1006 | break; | 1007 | break; |
1007 | 1008 | ||
1008 | case IOCTL_RTPS_BUCKET_SIZE: | 1009 | case IOCTL_RTPS_BUCKET_SIZE: |
1009 | Adapter->rtPSBucketSize = *(PULONG)arg; | 1010 | Status = 0; |
1010 | Status = STATUS_SUCCESS; | 1011 | if (get_user(Adapter->rtPSBucketSize, (unsigned long __user *)arg)) |
1012 | Status = -EFAULT; | ||
1011 | break; | 1013 | break; |
1012 | case IOCTL_CHIP_RESET: | 1014 | case IOCTL_CHIP_RESET: |
1013 | { | 1015 | { |
@@ -1028,11 +1030,15 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) | |||
1028 | case IOCTL_QOS_THRESHOLD: | 1030 | case IOCTL_QOS_THRESHOLD: |
1029 | { | 1031 | { |
1030 | USHORT uiLoopIndex; | 1032 | USHORT uiLoopIndex; |
1031 | for(uiLoopIndex = 0 ; uiLoopIndex < NO_OF_QUEUES ; uiLoopIndex++) | 1033 | |
1032 | { | 1034 | Status = 0; |
1033 | Adapter->PackInfo[uiLoopIndex].uiThreshold = *(PULONG)arg; | 1035 | for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) { |
1036 | if (get_user(Adapter->PackInfo[uiLoopIndex].uiThreshold, | ||
1037 | (unsigned long __user *)arg)) { | ||
1038 | Status = -EFAULT; | ||
1039 | break; | ||
1040 | } | ||
1034 | } | 1041 | } |
1035 | Status = STATUS_SUCCESS; | ||
1036 | break; | 1042 | break; |
1037 | } | 1043 | } |
1038 | 1044 | ||
@@ -1093,7 +1099,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) | |||
1093 | } | 1099 | } |
1094 | case IOCTL_BCM_GET_CURRENT_STATUS: | 1100 | case IOCTL_BCM_GET_CURRENT_STATUS: |
1095 | { | 1101 | { |
1096 | LINK_STATE *plink_state = NULL; | 1102 | LINK_STATE plink_state; |
1103 | |||
1097 | /* Copy Ioctl Buffer structure */ | 1104 | /* Copy Ioctl Buffer structure */ |
1098 | if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) | 1105 | if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) |
1099 | { | 1106 | { |
@@ -1101,13 +1108,19 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) | |||
1101 | Status = -EFAULT; | 1108 | Status = -EFAULT; |
1102 | break; | 1109 | break; |
1103 | } | 1110 | } |
1104 | plink_state = (LINK_STATE*)arg; | 1111 | if (IoBuffer.OutputLength != sizeof(plink_state)) { |
1105 | plink_state->bIdleMode = (UCHAR)Adapter->IdleMode; | 1112 | Status = -EINVAL; |
1106 | plink_state->bShutdownMode = Adapter->bShutStatus; | 1113 | break; |
1107 | plink_state->ucLinkStatus = (UCHAR)Adapter->LinkStatus; | 1114 | } |
1108 | if(copy_to_user(IoBuffer.OutputBuffer, | 1115 | |
1109 | (PUCHAR)plink_state, (UINT)IoBuffer.OutputLength)) | 1116 | if (copy_from_user(&plink_state, (void __user *)arg, sizeof(plink_state))) { |
1110 | { | 1117 | Status = -EFAULT; |
1118 | break; | ||
1119 | } | ||
1120 | plink_state.bIdleMode = (UCHAR)Adapter->IdleMode; | ||
1121 | plink_state.bShutdownMode = Adapter->bShutStatus; | ||
1122 | plink_state.ucLinkStatus = (UCHAR)Adapter->LinkStatus; | ||
1123 | if (copy_to_user(IoBuffer.OutputBuffer, &plink_state, IoBuffer.OutputLength)) { | ||
1111 | BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n"); | 1124 | BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n"); |
1112 | Status = -EFAULT; | 1125 | Status = -EFAULT; |
1113 | break; | 1126 | break; |
@@ -1331,7 +1344,9 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) | |||
1331 | BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy From User space failed. status :%d", Status); | 1344 | BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy From User space failed. status :%d", Status); |
1332 | return -EFAULT; | 1345 | return -EFAULT; |
1333 | } | 1346 | } |
1334 | uiSectorSize = *((PUINT)(IoBuffer.InputBuffer)); /* FIXME: unchecked __user access */ | 1347 | if (get_user(uiSectorSize, (unsigned int __user *)IoBuffer.InputBuffer)) |
1348 | return -EFAULT; | ||
1349 | |||
1335 | if((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE)) | 1350 | if((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE)) |
1336 | { | 1351 | { |
1337 | 1352 | ||
diff --git a/drivers/staging/brcm80211/README b/drivers/staging/brcm80211/README index c3ba9bb9b116..c8f1cf1b4409 100644 --- a/drivers/staging/brcm80211/README +++ b/drivers/staging/brcm80211/README | |||
@@ -90,5 +90,5 @@ Contact Info: | |||
90 | ============= | 90 | ============= |
91 | Brett Rudley brudley@broadcom.com | 91 | Brett Rudley brudley@broadcom.com |
92 | Henry Ptasinski henryp@broadcom.com | 92 | Henry Ptasinski henryp@broadcom.com |
93 | Nohee Ko noheek@broadcom.com | 93 | Dowan Kim dowan@broadcom.com |
94 | 94 | ||
diff --git a/drivers/staging/brcm80211/TODO b/drivers/staging/brcm80211/TODO index 8803d300b531..dbf904184899 100644 --- a/drivers/staging/brcm80211/TODO +++ b/drivers/staging/brcm80211/TODO | |||
@@ -45,5 +45,5 @@ Contact | |||
45 | ===== | 45 | ===== |
46 | Brett Rudley <brudley@broadcom.com> | 46 | Brett Rudley <brudley@broadcom.com> |
47 | Henry Ptasinski <henryp@broadcom.com> | 47 | Henry Ptasinski <henryp@broadcom.com> |
48 | Nohee Ko <noheek@broadcom.com> | 48 | Dowan Kim <dowan@broadcom.com> |
49 | 49 | ||
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c index bbbe7c5f7492..9335f02029aa 100644 --- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c | |||
@@ -2222,8 +2222,6 @@ int dhd_net_attach(dhd_pub_t *dhdp, int ifidx) | |||
2222 | ASSERT(net); | 2222 | ASSERT(net); |
2223 | 2223 | ||
2224 | ASSERT(!net->netdev_ops); | 2224 | ASSERT(!net->netdev_ops); |
2225 | net->netdev_ops = &dhd_ops_virt; | ||
2226 | |||
2227 | net->netdev_ops = &dhd_ops_pri; | 2225 | net->netdev_ops = &dhd_ops_pri; |
2228 | 2226 | ||
2229 | /* | 2227 | /* |
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c index 3f29488d9c72..ea0825238d53 100644 --- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c | |||
@@ -95,12 +95,12 @@ static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy, | |||
95 | struct net_device *dev, | 95 | struct net_device *dev, |
96 | u8 key_idx); | 96 | u8 key_idx); |
97 | static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, | 97 | static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, |
98 | u8 key_idx, const u8 *mac_addr, | 98 | u8 key_idx, bool pairwise, const u8 *mac_addr, |
99 | struct key_params *params); | 99 | struct key_params *params); |
100 | static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, | 100 | static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, |
101 | u8 key_idx, const u8 *mac_addr); | 101 | u8 key_idx, bool pairwise, const u8 *mac_addr); |
102 | static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, | 102 | static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, |
103 | u8 key_idx, const u8 *mac_addr, | 103 | u8 key_idx, bool pairwise, const u8 *mac_addr, |
104 | void *cookie, void (*callback) (void *cookie, | 104 | void *cookie, void (*callback) (void *cookie, |
105 | struct | 105 | struct |
106 | key_params * | 106 | key_params * |
@@ -1615,7 +1615,7 @@ wl_add_keyext(struct wiphy *wiphy, struct net_device *dev, | |||
1615 | 1615 | ||
1616 | static s32 | 1616 | static s32 |
1617 | wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, | 1617 | wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, |
1618 | u8 key_idx, const u8 *mac_addr, | 1618 | u8 key_idx, bool pairwise, const u8 *mac_addr, |
1619 | struct key_params *params) | 1619 | struct key_params *params) |
1620 | { | 1620 | { |
1621 | struct wl_wsec_key key; | 1621 | struct wl_wsec_key key; |
@@ -1700,7 +1700,7 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, | |||
1700 | 1700 | ||
1701 | static s32 | 1701 | static s32 |
1702 | wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, | 1702 | wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, |
1703 | u8 key_idx, const u8 *mac_addr) | 1703 | u8 key_idx, bool pairwise, const u8 *mac_addr) |
1704 | { | 1704 | { |
1705 | struct wl_wsec_key key; | 1705 | struct wl_wsec_key key; |
1706 | s32 err = 0; | 1706 | s32 err = 0; |
@@ -1756,7 +1756,7 @@ wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, | |||
1756 | 1756 | ||
1757 | static s32 | 1757 | static s32 |
1758 | wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, | 1758 | wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, |
1759 | u8 key_idx, const u8 *mac_addr, void *cookie, | 1759 | u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie, |
1760 | void (*callback) (void *cookie, struct key_params * params)) | 1760 | void (*callback) (void *cookie, struct key_params * params)) |
1761 | { | 1761 | { |
1762 | struct key_params params; | 1762 | struct key_params params; |
diff --git a/drivers/staging/cpia/cpia.c b/drivers/staging/cpia/cpia.c index 933ae4c8cb9a..0e740b8dafc3 100644 --- a/drivers/staging/cpia/cpia.c +++ b/drivers/staging/cpia/cpia.c | |||
@@ -3184,13 +3184,9 @@ static int cpia_open(struct file *file) | |||
3184 | goto oops; | 3184 | goto oops; |
3185 | } | 3185 | } |
3186 | 3186 | ||
3187 | err = -EINTR; | ||
3188 | if(signal_pending(current)) | ||
3189 | goto oops; | ||
3190 | |||
3191 | /* Set ownership of /proc/cpia/videoX to current user */ | 3187 | /* Set ownership of /proc/cpia/videoX to current user */ |
3192 | if(cam->proc_entry) | 3188 | if(cam->proc_entry) |
3193 | cam->proc_entry->uid = current_uid(); | 3189 | cam->proc_entry->uid = current_euid(); |
3194 | 3190 | ||
3195 | /* set mark for loading first frame uncompressed */ | 3191 | /* set mark for loading first frame uncompressed */ |
3196 | cam->first_frame = 1; | 3192 | cam->first_frame = 1; |
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c b/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c index 87a6487531c2..20d509836d9e 100644 --- a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c +++ b/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c | |||
@@ -286,7 +286,6 @@ int ft1000_CreateDevice(struct ft1000_device *dev) | |||
286 | pid = kernel_thread (exec_mknod, (void *)info, 0); | 286 | pid = kernel_thread (exec_mknod, (void *)info, 0); |
287 | 287 | ||
288 | // initialize application information | 288 | // initialize application information |
289 | info->appcnt = 0; | ||
290 | 289 | ||
291 | // if (ft1000_flarion_cnt == 0) { | 290 | // if (ft1000_flarion_cnt == 0) { |
292 | // | 291 | // |
diff --git a/drivers/staging/hv/hv_utils.c b/drivers/staging/hv/hv_utils.c index 702a478d5542..a99e900ec4c9 100644 --- a/drivers/staging/hv/hv_utils.c +++ b/drivers/staging/hv/hv_utils.c | |||
@@ -212,9 +212,6 @@ static void heartbeat_onchannelcallback(void *context) | |||
212 | recvlen, requestid); | 212 | recvlen, requestid); |
213 | 213 | ||
214 | icmsghdrp = (struct icmsg_hdr *)&buf[ | 214 | icmsghdrp = (struct icmsg_hdr *)&buf[ |
215 | sizeof(struct vmbuspipe_hdr)]; | ||
216 | |||
217 | icmsghdrp = (struct icmsg_hdr *)&buf[ | ||
218 | sizeof(struct vmbuspipe_hdr)]; | 215 | sizeof(struct vmbuspipe_hdr)]; |
219 | 216 | ||
220 | if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { | 217 | if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { |
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c index 41d9acf4cd61..6f8d67d0d64f 100644 --- a/drivers/staging/hv/storvsc_drv.c +++ b/drivers/staging/hv/storvsc_drv.c | |||
@@ -72,8 +72,7 @@ struct storvsc_driver_context { | |||
72 | 72 | ||
73 | /* Static decl */ | 73 | /* Static decl */ |
74 | static int storvsc_probe(struct device *dev); | 74 | static int storvsc_probe(struct device *dev); |
75 | static int storvsc_queuecommand(struct scsi_cmnd *scmnd, | 75 | static int storvsc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd); |
76 | void (*done)(struct scsi_cmnd *)); | ||
77 | static int storvsc_device_alloc(struct scsi_device *); | 76 | static int storvsc_device_alloc(struct scsi_device *); |
78 | static int storvsc_device_configure(struct scsi_device *); | 77 | static int storvsc_device_configure(struct scsi_device *); |
79 | static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd); | 78 | static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd); |
@@ -595,7 +594,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, | |||
595 | /* | 594 | /* |
596 | * storvsc_queuecommand - Initiate command processing | 595 | * storvsc_queuecommand - Initiate command processing |
597 | */ | 596 | */ |
598 | static int storvsc_queuecommand(struct scsi_cmnd *scmnd, | 597 | static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd, |
599 | void (*done)(struct scsi_cmnd *)) | 598 | void (*done)(struct scsi_cmnd *)) |
600 | { | 599 | { |
601 | int ret; | 600 | int ret; |
@@ -783,6 +782,8 @@ retry_request: | |||
783 | return ret; | 782 | return ret; |
784 | } | 783 | } |
785 | 784 | ||
785 | static DEF_SCSI_QCMD(storvsc_queuecommand) | ||
786 | |||
786 | static int storvsc_merge_bvec(struct request_queue *q, | 787 | static int storvsc_merge_bvec(struct request_queue *q, |
787 | struct bvec_merge_data *bmd, struct bio_vec *bvec) | 788 | struct bvec_merge_data *bmd, struct bio_vec *bvec) |
788 | { | 789 | { |
diff --git a/drivers/staging/intel_sst/intel_sst_app_interface.c b/drivers/staging/intel_sst/intel_sst_app_interface.c index 463e5cba8307..9618c7997461 100644 --- a/drivers/staging/intel_sst/intel_sst_app_interface.c +++ b/drivers/staging/intel_sst/intel_sst_app_interface.c | |||
@@ -244,12 +244,12 @@ static int intel_sst_mmap_play_capture(u32 str_id, | |||
244 | int retval, i; | 244 | int retval, i; |
245 | struct stream_info *stream; | 245 | struct stream_info *stream; |
246 | struct snd_sst_mmap_buff_entry *buf_entry; | 246 | struct snd_sst_mmap_buff_entry *buf_entry; |
247 | struct snd_sst_mmap_buff_entry *tmp_buf; | ||
247 | 248 | ||
248 | pr_debug("sst:called for str_id %d\n", str_id); | 249 | pr_debug("sst:called for str_id %d\n", str_id); |
249 | retval = sst_validate_strid(str_id); | 250 | retval = sst_validate_strid(str_id); |
250 | if (retval) | 251 | if (retval) |
251 | return -EINVAL; | 252 | return -EINVAL; |
252 | BUG_ON(!mmap_buf); | ||
253 | 253 | ||
254 | stream = &sst_drv_ctx->streams[str_id]; | 254 | stream = &sst_drv_ctx->streams[str_id]; |
255 | if (stream->mmapped != true) | 255 | if (stream->mmapped != true) |
@@ -262,14 +262,24 @@ static int intel_sst_mmap_play_capture(u32 str_id, | |||
262 | stream->curr_bytes = 0; | 262 | stream->curr_bytes = 0; |
263 | stream->cumm_bytes = 0; | 263 | stream->cumm_bytes = 0; |
264 | 264 | ||
265 | tmp_buf = kcalloc(mmap_buf->entries, sizeof(*tmp_buf), GFP_KERNEL); | ||
266 | if (!tmp_buf) | ||
267 | return -ENOMEM; | ||
268 | if (copy_from_user(tmp_buf, (void __user *)mmap_buf->buff, | ||
269 | mmap_buf->entries * sizeof(*tmp_buf))) { | ||
270 | retval = -EFAULT; | ||
271 | goto out_free; | ||
272 | } | ||
273 | |||
265 | pr_debug("sst:new buffers count %d status %d\n", | 274 | pr_debug("sst:new buffers count %d status %d\n", |
266 | mmap_buf->entries, stream->status); | 275 | mmap_buf->entries, stream->status); |
267 | buf_entry = mmap_buf->buff; | 276 | buf_entry = tmp_buf; |
268 | for (i = 0; i < mmap_buf->entries; i++) { | 277 | for (i = 0; i < mmap_buf->entries; i++) { |
269 | BUG_ON(!buf_entry); | ||
270 | bufs = kzalloc(sizeof(*bufs), GFP_KERNEL); | 278 | bufs = kzalloc(sizeof(*bufs), GFP_KERNEL); |
271 | if (!bufs) | 279 | if (!bufs) { |
272 | return -ENOMEM; | 280 | retval = -ENOMEM; |
281 | goto out_free; | ||
282 | } | ||
273 | bufs->size = buf_entry->size; | 283 | bufs->size = buf_entry->size; |
274 | bufs->offset = buf_entry->offset; | 284 | bufs->offset = buf_entry->offset; |
275 | bufs->addr = sst_drv_ctx->mmap_mem; | 285 | bufs->addr = sst_drv_ctx->mmap_mem; |
@@ -293,13 +303,15 @@ static int intel_sst_mmap_play_capture(u32 str_id, | |||
293 | if (sst_play_frame(str_id) < 0) { | 303 | if (sst_play_frame(str_id) < 0) { |
294 | pr_warn("sst: play frames fail\n"); | 304 | pr_warn("sst: play frames fail\n"); |
295 | mutex_unlock(&stream->lock); | 305 | mutex_unlock(&stream->lock); |
296 | return -EIO; | 306 | retval = -EIO; |
307 | goto out_free; | ||
297 | } | 308 | } |
298 | } else if (stream->ops == STREAM_OPS_CAPTURE) { | 309 | } else if (stream->ops == STREAM_OPS_CAPTURE) { |
299 | if (sst_capture_frame(str_id) < 0) { | 310 | if (sst_capture_frame(str_id) < 0) { |
300 | pr_warn("sst: capture frame fail\n"); | 311 | pr_warn("sst: capture frame fail\n"); |
301 | mutex_unlock(&stream->lock); | 312 | mutex_unlock(&stream->lock); |
302 | return -EIO; | 313 | retval = -EIO; |
314 | goto out_free; | ||
303 | } | 315 | } |
304 | } | 316 | } |
305 | } | 317 | } |
@@ -314,6 +326,9 @@ static int intel_sst_mmap_play_capture(u32 str_id, | |||
314 | if (retval >= 0) | 326 | if (retval >= 0) |
315 | retval = stream->cumm_bytes; | 327 | retval = stream->cumm_bytes; |
316 | pr_debug("sst:end of play/rec ioctl bytes = %d!!\n", retval); | 328 | pr_debug("sst:end of play/rec ioctl bytes = %d!!\n", retval); |
329 | |||
330 | out_free: | ||
331 | kfree(tmp_buf); | ||
317 | return retval; | 332 | return retval; |
318 | } | 333 | } |
319 | 334 | ||
@@ -377,7 +392,7 @@ static int snd_sst_fill_kernel_list(struct stream_info *stream, | |||
377 | { | 392 | { |
378 | struct sst_stream_bufs *stream_bufs; | 393 | struct sst_stream_bufs *stream_bufs; |
379 | unsigned long index, mmap_len; | 394 | unsigned long index, mmap_len; |
380 | unsigned char *bufp; | 395 | unsigned char __user *bufp; |
381 | unsigned long size, copied_size; | 396 | unsigned long size, copied_size; |
382 | int retval = 0, add_to_list = 0; | 397 | int retval = 0, add_to_list = 0; |
383 | static int sent_offset; | 398 | static int sent_offset; |
@@ -512,9 +527,7 @@ static int snd_sst_copy_userbuf_capture(struct stream_info *stream, | |||
512 | /* copy to user */ | 527 | /* copy to user */ |
513 | list_for_each_entry_safe(entry, _entry, | 528 | list_for_each_entry_safe(entry, _entry, |
514 | copy_to_list, node) { | 529 | copy_to_list, node) { |
515 | if (copy_to_user((void *) | 530 | if (copy_to_user(iovec[entry->iov_index].iov_base + entry->iov_offset, |
516 | iovec[entry->iov_index].iov_base + | ||
517 | entry->iov_offset, | ||
518 | kbufs->addr + entry->offset, | 531 | kbufs->addr + entry->offset, |
519 | entry->size)) { | 532 | entry->size)) { |
520 | /* Clean up the list and return error */ | 533 | /* Clean up the list and return error */ |
@@ -590,7 +603,7 @@ static int intel_sst_read_write(unsigned int str_id, char __user *buf, | |||
590 | buf, (int) count, (int) stream->status); | 603 | buf, (int) count, (int) stream->status); |
591 | 604 | ||
592 | stream->buf_type = SST_BUF_USER_STATIC; | 605 | stream->buf_type = SST_BUF_USER_STATIC; |
593 | iovec.iov_base = (void *)buf; | 606 | iovec.iov_base = buf; |
594 | iovec.iov_len = count; | 607 | iovec.iov_len = count; |
595 | nr_segs = 1; | 608 | nr_segs = 1; |
596 | 609 | ||
@@ -838,7 +851,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) | |||
838 | break; | 851 | break; |
839 | 852 | ||
840 | case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): { | 853 | case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): { |
841 | struct snd_sst_params *str_param = (struct snd_sst_params *)arg; | 854 | struct snd_sst_params str_param; |
842 | 855 | ||
843 | pr_debug("sst: IOCTL_SET_PARAMS recieved!\n"); | 856 | pr_debug("sst: IOCTL_SET_PARAMS recieved!\n"); |
844 | if (minor != STREAM_MODULE) { | 857 | if (minor != STREAM_MODULE) { |
@@ -846,17 +859,25 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) | |||
846 | break; | 859 | break; |
847 | } | 860 | } |
848 | 861 | ||
862 | if (copy_from_user(&str_param, (void __user *)arg, | ||
863 | sizeof(str_param))) { | ||
864 | retval = -EFAULT; | ||
865 | break; | ||
866 | } | ||
867 | |||
849 | if (!str_id) { | 868 | if (!str_id) { |
850 | 869 | ||
851 | retval = sst_get_stream(str_param); | 870 | retval = sst_get_stream(&str_param); |
852 | if (retval > 0) { | 871 | if (retval > 0) { |
853 | struct stream_info *str_info; | 872 | struct stream_info *str_info; |
873 | char __user *dest; | ||
874 | |||
854 | sst_drv_ctx->stream_cnt++; | 875 | sst_drv_ctx->stream_cnt++; |
855 | data->str_id = retval; | 876 | data->str_id = retval; |
856 | str_info = &sst_drv_ctx->streams[retval]; | 877 | str_info = &sst_drv_ctx->streams[retval]; |
857 | str_info->src = SST_DRV; | 878 | str_info->src = SST_DRV; |
858 | retval = copy_to_user(&str_param->stream_id, | 879 | dest = (char __user *)arg + offsetof(struct snd_sst_params, stream_id); |
859 | &retval, sizeof(__u32)); | 880 | retval = copy_to_user(dest, &retval, sizeof(__u32)); |
860 | if (retval) | 881 | if (retval) |
861 | retval = -EFAULT; | 882 | retval = -EFAULT; |
862 | } else { | 883 | } else { |
@@ -866,16 +887,14 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) | |||
866 | } else { | 887 | } else { |
867 | pr_debug("sst: SET_STREAM_PARAMS recieved!\n"); | 888 | pr_debug("sst: SET_STREAM_PARAMS recieved!\n"); |
868 | /* allocated set params only */ | 889 | /* allocated set params only */ |
869 | retval = sst_set_stream_param(str_id, str_param); | 890 | retval = sst_set_stream_param(str_id, &str_param); |
870 | /* Block the call for reply */ | 891 | /* Block the call for reply */ |
871 | if (!retval) { | 892 | if (!retval) { |
872 | int sfreq = 0, word_size = 0, num_channel = 0; | 893 | int sfreq = 0, word_size = 0, num_channel = 0; |
873 | sfreq = str_param->sparams.uc.pcm_params.sfreq; | 894 | sfreq = str_param.sparams.uc.pcm_params.sfreq; |
874 | word_size = str_param->sparams. | 895 | word_size = str_param.sparams.uc.pcm_params.pcm_wd_sz; |
875 | uc.pcm_params.pcm_wd_sz; | 896 | num_channel = str_param.sparams.uc.pcm_params.num_chan; |
876 | num_channel = str_param-> | 897 | if (str_param.ops == STREAM_OPS_CAPTURE) { |
877 | sparams.uc.pcm_params.num_chan; | ||
878 | if (str_param->ops == STREAM_OPS_CAPTURE) { | ||
879 | sst_drv_ctx->scard_ops->\ | 898 | sst_drv_ctx->scard_ops->\ |
880 | set_pcm_audio_params(sfreq, | 899 | set_pcm_audio_params(sfreq, |
881 | word_size, num_channel); | 900 | word_size, num_channel); |
@@ -885,41 +904,39 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) | |||
885 | break; | 904 | break; |
886 | } | 905 | } |
887 | case _IOC_NR(SNDRV_SST_SET_VOL): { | 906 | case _IOC_NR(SNDRV_SST_SET_VOL): { |
888 | struct snd_sst_vol *set_vol; | 907 | struct snd_sst_vol set_vol; |
889 | struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg; | 908 | |
909 | if (copy_from_user(&set_vol, (void __user *)arg, | ||
910 | sizeof(set_vol))) { | ||
911 | pr_debug("sst: copy failed\n"); | ||
912 | retval = -EFAULT; | ||
913 | break; | ||
914 | } | ||
890 | pr_debug("sst: SET_VOLUME recieved for %d!\n", | 915 | pr_debug("sst: SET_VOLUME recieved for %d!\n", |
891 | rec_vol->stream_id); | 916 | set_vol.stream_id); |
892 | if (minor == STREAM_MODULE && rec_vol->stream_id == 0) { | 917 | if (minor == STREAM_MODULE && set_vol.stream_id == 0) { |
893 | pr_debug("sst: invalid operation!\n"); | 918 | pr_debug("sst: invalid operation!\n"); |
894 | retval = -EPERM; | 919 | retval = -EPERM; |
895 | break; | 920 | break; |
896 | } | 921 | } |
897 | set_vol = kzalloc(sizeof(*set_vol), GFP_ATOMIC); | 922 | retval = sst_set_vol(&set_vol); |
898 | if (!set_vol) { | ||
899 | pr_debug("sst: mem allocation failed\n"); | ||
900 | retval = -ENOMEM; | ||
901 | break; | ||
902 | } | ||
903 | if (copy_from_user(set_vol, rec_vol, sizeof(*set_vol))) { | ||
904 | pr_debug("sst: copy failed\n"); | ||
905 | retval = -EFAULT; | ||
906 | break; | ||
907 | } | ||
908 | retval = sst_set_vol(set_vol); | ||
909 | kfree(set_vol); | ||
910 | break; | 923 | break; |
911 | } | 924 | } |
912 | case _IOC_NR(SNDRV_SST_GET_VOL): { | 925 | case _IOC_NR(SNDRV_SST_GET_VOL): { |
913 | struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg; | ||
914 | struct snd_sst_vol get_vol; | 926 | struct snd_sst_vol get_vol; |
927 | |||
928 | if (copy_from_user(&get_vol, (void __user *)arg, | ||
929 | sizeof(get_vol))) { | ||
930 | retval = -EFAULT; | ||
931 | break; | ||
932 | } | ||
915 | pr_debug("sst: IOCTL_GET_VOLUME recieved for stream = %d!\n", | 933 | pr_debug("sst: IOCTL_GET_VOLUME recieved for stream = %d!\n", |
916 | rec_vol->stream_id); | 934 | get_vol.stream_id); |
917 | if (minor == STREAM_MODULE && rec_vol->stream_id == 0) { | 935 | if (minor == STREAM_MODULE && get_vol.stream_id == 0) { |
918 | pr_debug("sst: invalid operation!\n"); | 936 | pr_debug("sst: invalid operation!\n"); |
919 | retval = -EPERM; | 937 | retval = -EPERM; |
920 | break; | 938 | break; |
921 | } | 939 | } |
922 | get_vol.stream_id = rec_vol->stream_id; | ||
923 | retval = sst_get_vol(&get_vol); | 940 | retval = sst_get_vol(&get_vol); |
924 | if (retval) { | 941 | if (retval) { |
925 | retval = -EIO; | 942 | retval = -EIO; |
@@ -928,7 +945,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) | |||
928 | pr_debug("sst: id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n", | 945 | pr_debug("sst: id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n", |
929 | get_vol.stream_id, get_vol.volume, | 946 | get_vol.stream_id, get_vol.volume, |
930 | get_vol.ramp_duration, get_vol.ramp_type); | 947 | get_vol.ramp_duration, get_vol.ramp_type); |
931 | if (copy_to_user((struct snd_sst_vol *)arg, | 948 | if (copy_to_user((struct snd_sst_vol __user *)arg, |
932 | &get_vol, sizeof(get_vol))) { | 949 | &get_vol, sizeof(get_vol))) { |
933 | retval = -EFAULT; | 950 | retval = -EFAULT; |
934 | break; | 951 | break; |
@@ -938,25 +955,20 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) | |||
938 | } | 955 | } |
939 | 956 | ||
940 | case _IOC_NR(SNDRV_SST_MUTE): { | 957 | case _IOC_NR(SNDRV_SST_MUTE): { |
941 | struct snd_sst_mute *set_mute; | 958 | struct snd_sst_mute set_mute; |
942 | struct snd_sst_vol *rec_mute = (struct snd_sst_vol *)arg; | 959 | |
943 | pr_debug("sst: SNDRV_SST_SET_VOLUME recieved for %d!\n", | 960 | if (copy_from_user(&set_mute, (void __user *)arg, |
944 | rec_mute->stream_id); | 961 | sizeof(set_mute))) { |
945 | if (minor == STREAM_MODULE && rec_mute->stream_id == 0) { | 962 | retval = -EFAULT; |
946 | retval = -EPERM; | ||
947 | break; | ||
948 | } | ||
949 | set_mute = kzalloc(sizeof(*set_mute), GFP_ATOMIC); | ||
950 | if (!set_mute) { | ||
951 | retval = -ENOMEM; | ||
952 | break; | 963 | break; |
953 | } | 964 | } |
954 | if (copy_from_user(set_mute, rec_mute, sizeof(*set_mute))) { | 965 | pr_debug("sst: SNDRV_SST_SET_VOLUME recieved for %d!\n", |
955 | retval = -EFAULT; | 966 | set_mute.stream_id); |
967 | if (minor == STREAM_MODULE && set_mute.stream_id == 0) { | ||
968 | retval = -EPERM; | ||
956 | break; | 969 | break; |
957 | } | 970 | } |
958 | retval = sst_set_mute(set_mute); | 971 | retval = sst_set_mute(&set_mute); |
959 | kfree(set_mute); | ||
960 | break; | 972 | break; |
961 | } | 973 | } |
962 | case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): { | 974 | case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): { |
@@ -973,7 +985,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) | |||
973 | retval = -EIO; | 985 | retval = -EIO; |
974 | break; | 986 | break; |
975 | } | 987 | } |
976 | if (copy_to_user((struct snd_sst_get_stream_params *)arg, | 988 | if (copy_to_user((struct snd_sst_get_stream_params __user *)arg, |
977 | &get_params, sizeof(get_params))) { | 989 | &get_params, sizeof(get_params))) { |
978 | retval = -EFAULT; | 990 | retval = -EFAULT; |
979 | break; | 991 | break; |
@@ -983,16 +995,22 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) | |||
983 | } | 995 | } |
984 | 996 | ||
985 | case _IOC_NR(SNDRV_SST_MMAP_PLAY): | 997 | case _IOC_NR(SNDRV_SST_MMAP_PLAY): |
986 | case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): | 998 | case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): { |
999 | struct snd_sst_mmap_buffs mmap_buf; | ||
1000 | |||
987 | pr_debug("sst: SNDRV_SST_MMAP_PLAY/CAPTURE recieved!\n"); | 1001 | pr_debug("sst: SNDRV_SST_MMAP_PLAY/CAPTURE recieved!\n"); |
988 | if (minor != STREAM_MODULE) { | 1002 | if (minor != STREAM_MODULE) { |
989 | retval = -EBADRQC; | 1003 | retval = -EBADRQC; |
990 | break; | 1004 | break; |
991 | } | 1005 | } |
992 | retval = intel_sst_mmap_play_capture(str_id, | 1006 | if (copy_from_user(&mmap_buf, (void __user *)arg, |
993 | (struct snd_sst_mmap_buffs *)arg); | 1007 | sizeof(mmap_buf))) { |
1008 | retval = -EFAULT; | ||
1009 | break; | ||
1010 | } | ||
1011 | retval = intel_sst_mmap_play_capture(str_id, &mmap_buf); | ||
994 | break; | 1012 | break; |
995 | 1013 | } | |
996 | case _IOC_NR(SNDRV_SST_STREAM_DROP): | 1014 | case _IOC_NR(SNDRV_SST_STREAM_DROP): |
997 | pr_debug("sst: SNDRV_SST_IOCTL_DROP recieved!\n"); | 1015 | pr_debug("sst: SNDRV_SST_IOCTL_DROP recieved!\n"); |
998 | if (minor != STREAM_MODULE) { | 1016 | if (minor != STREAM_MODULE) { |
@@ -1003,7 +1021,6 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) | |||
1003 | break; | 1021 | break; |
1004 | 1022 | ||
1005 | case _IOC_NR(SNDRV_SST_STREAM_GET_TSTAMP): { | 1023 | case _IOC_NR(SNDRV_SST_STREAM_GET_TSTAMP): { |
1006 | unsigned long long *ms = (unsigned long long *)arg; | ||
1007 | struct snd_sst_tstamp tstamp = {0}; | 1024 | struct snd_sst_tstamp tstamp = {0}; |
1008 | unsigned long long time, freq, mod; | 1025 | unsigned long long time, freq, mod; |
1009 | 1026 | ||
@@ -1013,14 +1030,14 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) | |||
1013 | break; | 1030 | break; |
1014 | } | 1031 | } |
1015 | memcpy_fromio(&tstamp, | 1032 | memcpy_fromio(&tstamp, |
1016 | ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP) | 1033 | sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp), |
1017 | +(str_id * sizeof(tstamp))), | ||
1018 | sizeof(tstamp)); | 1034 | sizeof(tstamp)); |
1019 | time = tstamp.samples_rendered; | 1035 | time = tstamp.samples_rendered; |
1020 | freq = (unsigned long long) tstamp.sampling_frequency; | 1036 | freq = (unsigned long long) tstamp.sampling_frequency; |
1021 | time = time * 1000; /* converting it to ms */ | 1037 | time = time * 1000; /* converting it to ms */ |
1022 | mod = do_div(time, freq); | 1038 | mod = do_div(time, freq); |
1023 | if (copy_to_user(ms, &time, sizeof(*ms))) | 1039 | if (copy_to_user((void __user *)arg, &time, |
1040 | sizeof(unsigned long long))) | ||
1024 | retval = -EFAULT; | 1041 | retval = -EFAULT; |
1025 | break; | 1042 | break; |
1026 | } | 1043 | } |
@@ -1065,92 +1082,118 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) | |||
1065 | } | 1082 | } |
1066 | 1083 | ||
1067 | case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): { | 1084 | case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): { |
1068 | struct snd_sst_target_device *target_device; | 1085 | struct snd_sst_target_device target_device; |
1069 | 1086 | ||
1070 | pr_debug("sst: SET_TARGET_DEVICE recieved!\n"); | 1087 | pr_debug("sst: SET_TARGET_DEVICE recieved!\n"); |
1071 | target_device = (struct snd_sst_target_device *)arg; | 1088 | if (copy_from_user(&target_device, (void __user *)arg, |
1072 | BUG_ON(!target_device); | 1089 | sizeof(target_device))) { |
1090 | retval = -EFAULT; | ||
1091 | break; | ||
1092 | } | ||
1073 | if (minor != AM_MODULE) { | 1093 | if (minor != AM_MODULE) { |
1074 | retval = -EBADRQC; | 1094 | retval = -EBADRQC; |
1075 | break; | 1095 | break; |
1076 | } | 1096 | } |
1077 | retval = sst_target_device_select(target_device); | 1097 | retval = sst_target_device_select(&target_device); |
1078 | break; | 1098 | break; |
1079 | } | 1099 | } |
1080 | 1100 | ||
1081 | case _IOC_NR(SNDRV_SST_DRIVER_INFO): { | 1101 | case _IOC_NR(SNDRV_SST_DRIVER_INFO): { |
1082 | struct snd_sst_driver_info *info = | 1102 | struct snd_sst_driver_info info; |
1083 | (struct snd_sst_driver_info *)arg; | ||
1084 | 1103 | ||
1085 | pr_debug("sst: SNDRV_SST_DRIVER_INFO recived\n"); | 1104 | pr_debug("sst: SNDRV_SST_DRIVER_INFO recived\n"); |
1086 | info->version = SST_VERSION_NUM; | 1105 | info.version = SST_VERSION_NUM; |
1087 | /* hard coding, shud get sumhow later */ | 1106 | /* hard coding, shud get sumhow later */ |
1088 | info->active_pcm_streams = sst_drv_ctx->stream_cnt - | 1107 | info.active_pcm_streams = sst_drv_ctx->stream_cnt - |
1089 | sst_drv_ctx->encoded_cnt; | 1108 | sst_drv_ctx->encoded_cnt; |
1090 | info->active_enc_streams = sst_drv_ctx->encoded_cnt; | 1109 | info.active_enc_streams = sst_drv_ctx->encoded_cnt; |
1091 | info->max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM; | 1110 | info.max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM; |
1092 | info->max_enc_streams = MAX_ENC_STREAM; | 1111 | info.max_enc_streams = MAX_ENC_STREAM; |
1093 | info->buf_per_stream = sst_drv_ctx->mmap_len; | 1112 | info.buf_per_stream = sst_drv_ctx->mmap_len; |
1113 | if (copy_to_user((void __user *)arg, &info, | ||
1114 | sizeof(info))) | ||
1115 | retval = -EFAULT; | ||
1094 | break; | 1116 | break; |
1095 | } | 1117 | } |
1096 | 1118 | ||
1097 | case _IOC_NR(SNDRV_SST_STREAM_DECODE): { | 1119 | case _IOC_NR(SNDRV_SST_STREAM_DECODE): { |
1098 | struct snd_sst_dbufs *param = | 1120 | struct snd_sst_dbufs param; |
1099 | (struct snd_sst_dbufs *)arg, dbufs_local; | 1121 | struct snd_sst_dbufs dbufs_local; |
1100 | int i; | ||
1101 | struct snd_sst_buffs ibufs, obufs; | 1122 | struct snd_sst_buffs ibufs, obufs; |
1102 | struct snd_sst_buff_entry ibuf_temp[param->ibufs->entries], | 1123 | struct snd_sst_buff_entry *ibuf_tmp, *obuf_tmp; |
1103 | obuf_temp[param->obufs->entries]; | 1124 | char __user *dest; |
1104 | 1125 | ||
1105 | pr_debug("sst: SNDRV_SST_STREAM_DECODE recived\n"); | 1126 | pr_debug("sst: SNDRV_SST_STREAM_DECODE recived\n"); |
1106 | if (minor != STREAM_MODULE) { | 1127 | if (minor != STREAM_MODULE) { |
1107 | retval = -EBADRQC; | 1128 | retval = -EBADRQC; |
1108 | break; | 1129 | break; |
1109 | } | 1130 | } |
1110 | if (!param) { | 1131 | if (copy_from_user(¶m, (void __user *)arg, |
1111 | retval = -EINVAL; | 1132 | sizeof(param))) { |
1133 | retval = -EFAULT; | ||
1112 | break; | 1134 | break; |
1113 | } | 1135 | } |
1114 | 1136 | ||
1115 | dbufs_local.input_bytes_consumed = param->input_bytes_consumed; | 1137 | dbufs_local.input_bytes_consumed = param.input_bytes_consumed; |
1116 | dbufs_local.output_bytes_produced = | 1138 | dbufs_local.output_bytes_produced = |
1117 | param->output_bytes_produced; | 1139 | param.output_bytes_produced; |
1118 | dbufs_local.ibufs = &ibufs; | 1140 | |
1119 | dbufs_local.obufs = &obufs; | 1141 | if (copy_from_user(&ibufs, (void __user *)param.ibufs, sizeof(ibufs))) { |
1120 | dbufs_local.ibufs->entries = param->ibufs->entries; | 1142 | retval = -EFAULT; |
1121 | dbufs_local.ibufs->type = param->ibufs->type; | 1143 | break; |
1122 | dbufs_local.obufs->entries = param->obufs->entries; | 1144 | } |
1123 | dbufs_local.obufs->type = param->obufs->type; | 1145 | if (copy_from_user(&obufs, (void __user *)param.obufs, sizeof(obufs))) { |
1124 | 1146 | retval = -EFAULT; | |
1125 | dbufs_local.ibufs->buff_entry = ibuf_temp; | 1147 | break; |
1126 | for (i = 0; i < dbufs_local.ibufs->entries; i++) { | ||
1127 | ibuf_temp[i].buffer = | ||
1128 | param->ibufs->buff_entry[i].buffer; | ||
1129 | ibuf_temp[i].size = | ||
1130 | param->ibufs->buff_entry[i].size; | ||
1131 | } | 1148 | } |
1132 | dbufs_local.obufs->buff_entry = obuf_temp; | 1149 | |
1133 | for (i = 0; i < dbufs_local.obufs->entries; i++) { | 1150 | ibuf_tmp = kcalloc(ibufs.entries, sizeof(*ibuf_tmp), GFP_KERNEL); |
1134 | obuf_temp[i].buffer = | 1151 | obuf_tmp = kcalloc(obufs.entries, sizeof(*obuf_tmp), GFP_KERNEL); |
1135 | param->obufs->buff_entry[i].buffer; | 1152 | if (!ibuf_tmp || !obuf_tmp) { |
1136 | obuf_temp[i].size = | 1153 | retval = -ENOMEM; |
1137 | param->obufs->buff_entry[i].size; | 1154 | goto free_iobufs; |
1155 | } | ||
1156 | |||
1157 | if (copy_from_user(ibuf_tmp, (void __user *)ibufs.buff_entry, | ||
1158 | ibufs.entries * sizeof(*ibuf_tmp))) { | ||
1159 | retval = -EFAULT; | ||
1160 | goto free_iobufs; | ||
1138 | } | 1161 | } |
1162 | ibufs.buff_entry = ibuf_tmp; | ||
1163 | dbufs_local.ibufs = &ibufs; | ||
1164 | |||
1165 | if (copy_from_user(obuf_tmp, (void __user *)obufs.buff_entry, | ||
1166 | obufs.entries * sizeof(*obuf_tmp))) { | ||
1167 | retval = -EFAULT; | ||
1168 | goto free_iobufs; | ||
1169 | } | ||
1170 | obufs.buff_entry = obuf_tmp; | ||
1171 | dbufs_local.obufs = &obufs; | ||
1172 | |||
1139 | retval = sst_decode(str_id, &dbufs_local); | 1173 | retval = sst_decode(str_id, &dbufs_local); |
1140 | if (retval) | 1174 | if (retval) { |
1141 | retval = -EAGAIN; | 1175 | retval = -EAGAIN; |
1142 | if (copy_to_user(¶m->input_bytes_consumed, | 1176 | goto free_iobufs; |
1177 | } | ||
1178 | |||
1179 | dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed); | ||
1180 | if (copy_to_user(dest, | ||
1143 | &dbufs_local.input_bytes_consumed, | 1181 | &dbufs_local.input_bytes_consumed, |
1144 | sizeof(unsigned long long))) { | 1182 | sizeof(unsigned long long))) { |
1145 | retval = -EFAULT; | 1183 | retval = -EFAULT; |
1146 | break; | 1184 | goto free_iobufs; |
1147 | } | 1185 | } |
1148 | if (copy_to_user(¶m->output_bytes_produced, | 1186 | |
1187 | dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed); | ||
1188 | if (copy_to_user(dest, | ||
1149 | &dbufs_local.output_bytes_produced, | 1189 | &dbufs_local.output_bytes_produced, |
1150 | sizeof(unsigned long long))) { | 1190 | sizeof(unsigned long long))) { |
1151 | retval = -EFAULT; | 1191 | retval = -EFAULT; |
1152 | break; | 1192 | goto free_iobufs; |
1153 | } | 1193 | } |
1194 | free_iobufs: | ||
1195 | kfree(ibuf_tmp); | ||
1196 | kfree(obuf_tmp); | ||
1154 | break; | 1197 | break; |
1155 | } | 1198 | } |
1156 | 1199 | ||
@@ -1164,7 +1207,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) | |||
1164 | break; | 1207 | break; |
1165 | 1208 | ||
1166 | case _IOC_NR(SNDRV_SST_STREAM_BYTES_DECODED): { | 1209 | case _IOC_NR(SNDRV_SST_STREAM_BYTES_DECODED): { |
1167 | unsigned long long *bytes = (unsigned long long *)arg; | 1210 | unsigned long long __user *bytes = (unsigned long long __user *)arg; |
1168 | struct snd_sst_tstamp tstamp = {0}; | 1211 | struct snd_sst_tstamp tstamp = {0}; |
1169 | 1212 | ||
1170 | pr_debug("sst: STREAM_BYTES_DECODED recieved!\n"); | 1213 | pr_debug("sst: STREAM_BYTES_DECODED recieved!\n"); |
@@ -1173,8 +1216,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) | |||
1173 | break; | 1216 | break; |
1174 | } | 1217 | } |
1175 | memcpy_fromio(&tstamp, | 1218 | memcpy_fromio(&tstamp, |
1176 | ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP) | 1219 | sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp), |
1177 | +(str_id * sizeof(tstamp))), | ||
1178 | sizeof(tstamp)); | 1220 | sizeof(tstamp)); |
1179 | if (copy_to_user(bytes, &tstamp.bytes_processed, | 1221 | if (copy_to_user(bytes, &tstamp.bytes_processed, |
1180 | sizeof(*bytes))) | 1222 | sizeof(*bytes))) |
@@ -1197,7 +1239,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) | |||
1197 | kfree(fw_info); | 1239 | kfree(fw_info); |
1198 | break; | 1240 | break; |
1199 | } | 1241 | } |
1200 | if (copy_to_user((struct snd_sst_dbufs *)arg, | 1242 | if (copy_to_user((struct snd_sst_dbufs __user *)arg, |
1201 | fw_info, sizeof(*fw_info))) { | 1243 | fw_info, sizeof(*fw_info))) { |
1202 | kfree(fw_info); | 1244 | kfree(fw_info); |
1203 | retval = -EFAULT; | 1245 | retval = -EFAULT; |
diff --git a/drivers/staging/intel_sst/intel_sst_common.h b/drivers/staging/intel_sst/intel_sst_common.h index 73a98c851e4a..bf0ead78bfae 100644 --- a/drivers/staging/intel_sst/intel_sst_common.h +++ b/drivers/staging/intel_sst/intel_sst_common.h | |||
@@ -231,8 +231,8 @@ struct stream_info { | |||
231 | spinlock_t pcm_lock; | 231 | spinlock_t pcm_lock; |
232 | bool mmapped; | 232 | bool mmapped; |
233 | unsigned int sg_index; /* current buf Index */ | 233 | unsigned int sg_index; /* current buf Index */ |
234 | unsigned char *cur_ptr; /* Current static bufs */ | 234 | unsigned char __user *cur_ptr; /* Current static bufs */ |
235 | struct snd_sst_buf_entry *buf_entry; | 235 | struct snd_sst_buf_entry __user *buf_entry; |
236 | struct sst_block data_blk; /* stream ops block */ | 236 | struct sst_block data_blk; /* stream ops block */ |
237 | struct sst_block ctrl_blk; /* stream control cmd block */ | 237 | struct sst_block ctrl_blk; /* stream control cmd block */ |
238 | enum snd_sst_buf_type buf_type; | 238 | enum snd_sst_buf_type buf_type; |
diff --git a/drivers/staging/keucr/init.c b/drivers/staging/keucr/init.c index 1934805844f2..978bf87ff13d 100644 --- a/drivers/staging/keucr/init.c +++ b/drivers/staging/keucr/init.c | |||
@@ -22,7 +22,7 @@ int ENE_InitMedia(struct us_data *us) | |||
22 | int result; | 22 | int result; |
23 | BYTE MiscReg03 = 0; | 23 | BYTE MiscReg03 = 0; |
24 | 24 | ||
25 | printk("--- Initial Nedia ---\n"); | 25 | printk("--- Init Media ---\n"); |
26 | result = ENE_Read_BYTE(us, REG_CARD_STATUS, &MiscReg03); | 26 | result = ENE_Read_BYTE(us, REG_CARD_STATUS, &MiscReg03); |
27 | if (result != USB_STOR_XFER_GOOD) | 27 | if (result != USB_STOR_XFER_GOOD) |
28 | { | 28 | { |
@@ -64,7 +64,7 @@ int ENE_Read_BYTE(struct us_data *us, WORD index, void *buf) | |||
64 | struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; | 64 | struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; |
65 | int result; | 65 | int result; |
66 | 66 | ||
67 | memset(bcb, 0, sizeof(bcb)); | 67 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
68 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 68 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
69 | bcb->DataTransferLength = 0x01; | 69 | bcb->DataTransferLength = 0x01; |
70 | bcb->Flags = 0x80; | 70 | bcb->Flags = 0x80; |
@@ -92,7 +92,7 @@ int ENE_SDInit(struct us_data *us) | |||
92 | return USB_STOR_TRANSPORT_ERROR; | 92 | return USB_STOR_TRANSPORT_ERROR; |
93 | } | 93 | } |
94 | 94 | ||
95 | memset(bcb, 0, sizeof(bcb)); | 95 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
96 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 96 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
97 | bcb->Flags = 0x80; | 97 | bcb->Flags = 0x80; |
98 | bcb->CDB[0] = 0xF2; | 98 | bcb->CDB[0] = 0xF2; |
@@ -112,7 +112,7 @@ int ENE_SDInit(struct us_data *us) | |||
112 | return USB_STOR_TRANSPORT_ERROR; | 112 | return USB_STOR_TRANSPORT_ERROR; |
113 | } | 113 | } |
114 | 114 | ||
115 | memset(bcb, 0, sizeof(bcb)); | 115 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
116 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 116 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
117 | bcb->DataTransferLength = 0x200; | 117 | bcb->DataTransferLength = 0x200; |
118 | bcb->Flags = 0x80; | 118 | bcb->Flags = 0x80; |
@@ -161,7 +161,7 @@ int ENE_MSInit(struct us_data *us) | |||
161 | return USB_STOR_TRANSPORT_ERROR; | 161 | return USB_STOR_TRANSPORT_ERROR; |
162 | } | 162 | } |
163 | 163 | ||
164 | memset(bcb, 0, sizeof(bcb)); | 164 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
165 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 165 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
166 | bcb->DataTransferLength = 0x200; | 166 | bcb->DataTransferLength = 0x200; |
167 | bcb->Flags = 0x80; | 167 | bcb->Flags = 0x80; |
@@ -219,7 +219,7 @@ int ENE_SMInit(struct us_data *us) | |||
219 | return USB_STOR_TRANSPORT_ERROR; | 219 | return USB_STOR_TRANSPORT_ERROR; |
220 | } | 220 | } |
221 | 221 | ||
222 | memset(bcb, 0, sizeof(bcb)); | 222 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
223 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 223 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
224 | bcb->DataTransferLength = 0x200; | 224 | bcb->DataTransferLength = 0x200; |
225 | bcb->Flags = 0x80; | 225 | bcb->Flags = 0x80; |
@@ -341,7 +341,7 @@ int ENE_LoadBinCode(struct us_data *us, BYTE flag) | |||
341 | break; | 341 | break; |
342 | } | 342 | } |
343 | 343 | ||
344 | memset(bcb, 0, sizeof(bcb)); | 344 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
345 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 345 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
346 | bcb->DataTransferLength = 0x800; | 346 | bcb->DataTransferLength = 0x800; |
347 | bcb->Flags =0x00; | 347 | bcb->Flags =0x00; |
@@ -433,7 +433,7 @@ int ENE_Read_Data(struct us_data *us, void *buf, unsigned int length) | |||
433 | 433 | ||
434 | //printk("transport --- ENE_Read_Data\n"); | 434 | //printk("transport --- ENE_Read_Data\n"); |
435 | // set up the command wrapper | 435 | // set up the command wrapper |
436 | memset(bcb, 0, sizeof(bcb)); | 436 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
437 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 437 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
438 | bcb->DataTransferLength = length; | 438 | bcb->DataTransferLength = length; |
439 | bcb->Flags =0x80; | 439 | bcb->Flags =0x80; |
@@ -470,7 +470,7 @@ int ENE_Write_Data(struct us_data *us, void *buf, unsigned int length) | |||
470 | 470 | ||
471 | //printk("transport --- ENE_Write_Data\n"); | 471 | //printk("transport --- ENE_Write_Data\n"); |
472 | // set up the command wrapper | 472 | // set up the command wrapper |
473 | memset(bcb, 0, sizeof(bcb)); | 473 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
474 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 474 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
475 | bcb->DataTransferLength = length; | 475 | bcb->DataTransferLength = length; |
476 | bcb->Flags =0x00; | 476 | bcb->Flags =0x00; |
diff --git a/drivers/staging/keucr/ms.c b/drivers/staging/keucr/ms.c index d4340a9da87d..9a3fdb4e4fe4 100644 --- a/drivers/staging/keucr/ms.c +++ b/drivers/staging/keucr/ms.c | |||
@@ -15,7 +15,7 @@ int MS_ReaderCopyBlock(struct us_data *us, WORD oldphy, WORD newphy, WORD PhyBlo | |||
15 | if (result != USB_STOR_XFER_GOOD) | 15 | if (result != USB_STOR_XFER_GOOD) |
16 | return USB_STOR_TRANSPORT_ERROR; | 16 | return USB_STOR_TRANSPORT_ERROR; |
17 | 17 | ||
18 | memset(bcb, 0, sizeof(bcb)); | 18 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
19 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 19 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
20 | bcb->DataTransferLength = 0x200*len; | 20 | bcb->DataTransferLength = 0x200*len; |
21 | bcb->Flags = 0x00; | 21 | bcb->Flags = 0x00; |
@@ -53,7 +53,7 @@ int MS_ReaderReadPage(struct us_data *us, DWORD PhyBlockAddr, BYTE PageNum, PDWO | |||
53 | return USB_STOR_TRANSPORT_ERROR; | 53 | return USB_STOR_TRANSPORT_ERROR; |
54 | 54 | ||
55 | // Read Page Data | 55 | // Read Page Data |
56 | memset(bcb, 0, sizeof(bcb)); | 56 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
57 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 57 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
58 | bcb->DataTransferLength = 0x200; | 58 | bcb->DataTransferLength = 0x200; |
59 | bcb->Flags = 0x80; | 59 | bcb->Flags = 0x80; |
@@ -69,7 +69,7 @@ int MS_ReaderReadPage(struct us_data *us, DWORD PhyBlockAddr, BYTE PageNum, PDWO | |||
69 | return USB_STOR_TRANSPORT_ERROR; | 69 | return USB_STOR_TRANSPORT_ERROR; |
70 | 70 | ||
71 | // Read Extra Data | 71 | // Read Extra Data |
72 | memset(bcb, 0, sizeof(bcb)); | 72 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
73 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 73 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
74 | bcb->DataTransferLength = 0x4; | 74 | bcb->DataTransferLength = 0x4; |
75 | bcb->Flags = 0x80; | 75 | bcb->Flags = 0x80; |
@@ -108,7 +108,7 @@ int MS_ReaderEraseBlock(struct us_data *us, DWORD PhyBlockAddr) | |||
108 | if (result != USB_STOR_XFER_GOOD) | 108 | if (result != USB_STOR_XFER_GOOD) |
109 | return USB_STOR_TRANSPORT_ERROR; | 109 | return USB_STOR_TRANSPORT_ERROR; |
110 | 110 | ||
111 | memset(bcb, 0, sizeof(bcb)); | 111 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
112 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 112 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
113 | bcb->DataTransferLength = 0x200; | 113 | bcb->DataTransferLength = 0x200; |
114 | bcb->Flags = 0x80; | 114 | bcb->Flags = 0x80; |
@@ -673,7 +673,7 @@ int MS_LibReadExtraBlock(struct us_data *us, DWORD PhyBlock, BYTE PageNum, BYTE | |||
673 | //printk("MS_LibReadExtraBlock --- PhyBlock = %x, PageNum = %x, blen = %x\n", PhyBlock, PageNum, blen); | 673 | //printk("MS_LibReadExtraBlock --- PhyBlock = %x, PageNum = %x, blen = %x\n", PhyBlock, PageNum, blen); |
674 | 674 | ||
675 | // Read Extra Data | 675 | // Read Extra Data |
676 | memset(bcb, 0, sizeof(bcb)); | 676 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
677 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 677 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
678 | bcb->DataTransferLength = 0x4 * blen; | 678 | bcb->DataTransferLength = 0x4 * blen; |
679 | bcb->Flags = 0x80; | 679 | bcb->Flags = 0x80; |
@@ -700,7 +700,7 @@ int MS_LibReadExtra(struct us_data *us, DWORD PhyBlock, BYTE PageNum, MS_LibType | |||
700 | BYTE ExtBuf[4]; | 700 | BYTE ExtBuf[4]; |
701 | 701 | ||
702 | //printk("MS_LibReadExtra --- PhyBlock = %x, PageNum = %x\n", PhyBlock, PageNum); | 702 | //printk("MS_LibReadExtra --- PhyBlock = %x, PageNum = %x\n", PhyBlock, PageNum); |
703 | memset(bcb, 0, sizeof(bcb)); | 703 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
704 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 704 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
705 | bcb->DataTransferLength = 0x4; | 705 | bcb->DataTransferLength = 0x4; |
706 | bcb->Flags = 0x80; | 706 | bcb->Flags = 0x80; |
@@ -807,7 +807,7 @@ int MS_LibOverwriteExtra(struct us_data *us, DWORD PhyBlockAddr, BYTE PageNum, B | |||
807 | if (result != USB_STOR_XFER_GOOD) | 807 | if (result != USB_STOR_XFER_GOOD) |
808 | return USB_STOR_TRANSPORT_ERROR; | 808 | return USB_STOR_TRANSPORT_ERROR; |
809 | 809 | ||
810 | memset(bcb, 0, sizeof(bcb)); | 810 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
811 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 811 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
812 | bcb->DataTransferLength = 0x4; | 812 | bcb->DataTransferLength = 0x4; |
813 | bcb->Flags = 0x80; | 813 | bcb->Flags = 0x80; |
diff --git a/drivers/staging/keucr/msscsi.c b/drivers/staging/keucr/msscsi.c index ad0c5c629935..cb92d25acee0 100644 --- a/drivers/staging/keucr/msscsi.c +++ b/drivers/staging/keucr/msscsi.c | |||
@@ -145,7 +145,7 @@ int MS_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb) | |||
145 | } | 145 | } |
146 | 146 | ||
147 | // set up the command wrapper | 147 | // set up the command wrapper |
148 | memset(bcb, 0, sizeof(bcb)); | 148 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
149 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 149 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
150 | bcb->DataTransferLength = blenByte; | 150 | bcb->DataTransferLength = blenByte; |
151 | bcb->Flags = 0x80; | 151 | bcb->Flags = 0x80; |
@@ -193,7 +193,7 @@ int MS_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb) | |||
193 | blkno = phyblk * 0x20 + PageNum; | 193 | blkno = phyblk * 0x20 + PageNum; |
194 | 194 | ||
195 | // set up the command wrapper | 195 | // set up the command wrapper |
196 | memset(bcb, 0, sizeof(bcb)); | 196 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
197 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 197 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
198 | bcb->DataTransferLength = 0x200 * len; | 198 | bcb->DataTransferLength = 0x200 * len; |
199 | bcb->Flags = 0x80; | 199 | bcb->Flags = 0x80; |
@@ -250,7 +250,7 @@ int MS_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb) | |||
250 | } | 250 | } |
251 | 251 | ||
252 | // set up the command wrapper | 252 | // set up the command wrapper |
253 | memset(bcb, 0, sizeof(bcb)); | 253 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
254 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 254 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
255 | bcb->DataTransferLength = blenByte; | 255 | bcb->DataTransferLength = blenByte; |
256 | bcb->Flags = 0x00; | 256 | bcb->Flags = 0x00; |
diff --git a/drivers/staging/keucr/scsiglue.c b/drivers/staging/keucr/scsiglue.c index a2671404f7ac..da4f42af3838 100644 --- a/drivers/staging/keucr/scsiglue.c +++ b/drivers/staging/keucr/scsiglue.c | |||
@@ -87,7 +87,7 @@ static int slave_configure(struct scsi_device *sdev) | |||
87 | 87 | ||
88 | /* This is always called with scsi_lock(host) held */ | 88 | /* This is always called with scsi_lock(host) held */ |
89 | //----- queuecommand() --------------------- | 89 | //----- queuecommand() --------------------- |
90 | static int queuecommand(struct scsi_cmnd *srb, void (*done)(struct scsi_cmnd *)) | 90 | static int queuecommand_lck(struct scsi_cmnd *srb, void (*done)(struct scsi_cmnd *)) |
91 | { | 91 | { |
92 | struct us_data *us = host_to_us(srb->device->host); | 92 | struct us_data *us = host_to_us(srb->device->host); |
93 | 93 | ||
@@ -117,6 +117,8 @@ static int queuecommand(struct scsi_cmnd *srb, void (*done)(struct scsi_cmnd *)) | |||
117 | return 0; | 117 | return 0; |
118 | } | 118 | } |
119 | 119 | ||
120 | static DEF_SCSI_QCMD(queuecommand) | ||
121 | |||
120 | /*********************************************************************** | 122 | /*********************************************************************** |
121 | * Error handling functions | 123 | * Error handling functions |
122 | ***********************************************************************/ | 124 | ***********************************************************************/ |
diff --git a/drivers/staging/keucr/sdscsi.c b/drivers/staging/keucr/sdscsi.c index 6c332f850ebe..d646507a3611 100644 --- a/drivers/staging/keucr/sdscsi.c +++ b/drivers/staging/keucr/sdscsi.c | |||
@@ -152,7 +152,7 @@ int SD_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb) | |||
152 | bnByte = bn; | 152 | bnByte = bn; |
153 | 153 | ||
154 | // set up the command wrapper | 154 | // set up the command wrapper |
155 | memset(bcb, 0, sizeof(bcb)); | 155 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
156 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 156 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
157 | bcb->DataTransferLength = blenByte; | 157 | bcb->DataTransferLength = blenByte; |
158 | bcb->Flags = 0x80; | 158 | bcb->Flags = 0x80; |
@@ -192,7 +192,7 @@ int SD_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb) | |||
192 | bnByte = bn; | 192 | bnByte = bn; |
193 | 193 | ||
194 | // set up the command wrapper | 194 | // set up the command wrapper |
195 | memset(bcb, 0, sizeof(bcb)); | 195 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
196 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 196 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
197 | bcb->DataTransferLength = blenByte; | 197 | bcb->DataTransferLength = blenByte; |
198 | bcb->Flags = 0x00; | 198 | bcb->Flags = 0x00; |
diff --git a/drivers/staging/keucr/smilsub.c b/drivers/staging/keucr/smilsub.c index 844b65988636..1b52535a388f 100644 --- a/drivers/staging/keucr/smilsub.c +++ b/drivers/staging/keucr/smilsub.c | |||
@@ -266,7 +266,7 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf,BYTE *redundant) | |||
266 | addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; | 266 | addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; |
267 | 267 | ||
268 | // Read sect data | 268 | // Read sect data |
269 | memset(bcb, 0, sizeof(bcb)); | 269 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
270 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 270 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
271 | bcb->DataTransferLength = 0x200; | 271 | bcb->DataTransferLength = 0x200; |
272 | bcb->Flags = 0x80; | 272 | bcb->Flags = 0x80; |
@@ -281,7 +281,7 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf,BYTE *redundant) | |||
281 | return USB_STOR_TRANSPORT_ERROR; | 281 | return USB_STOR_TRANSPORT_ERROR; |
282 | 282 | ||
283 | // Read redundant | 283 | // Read redundant |
284 | memset(bcb, 0, sizeof(bcb)); | 284 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
285 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 285 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
286 | bcb->DataTransferLength = 0x10; | 286 | bcb->DataTransferLength = 0x10; |
287 | bcb->Flags = 0x80; | 287 | bcb->Flags = 0x80; |
@@ -319,7 +319,7 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant) | |||
319 | addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; | 319 | addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; |
320 | 320 | ||
321 | // Read sect data | 321 | // Read sect data |
322 | memset(bcb, 0, sizeof(bcb)); | 322 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
323 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 323 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
324 | bcb->DataTransferLength = 0x200*count; | 324 | bcb->DataTransferLength = 0x200*count; |
325 | bcb->Flags = 0x80; | 325 | bcb->Flags = 0x80; |
@@ -334,7 +334,7 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant) | |||
334 | return USB_STOR_TRANSPORT_ERROR; | 334 | return USB_STOR_TRANSPORT_ERROR; |
335 | 335 | ||
336 | // Read redundant | 336 | // Read redundant |
337 | memset(bcb, 0, sizeof(bcb)); | 337 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
338 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 338 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
339 | bcb->DataTransferLength = 0x10; | 339 | bcb->DataTransferLength = 0x10; |
340 | bcb->Flags = 0x80; | 340 | bcb->Flags = 0x80; |
@@ -536,7 +536,7 @@ int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant) | |||
536 | WriteAddr = WriteAddr*(WORD)Ssfdc.MaxSectors; | 536 | WriteAddr = WriteAddr*(WORD)Ssfdc.MaxSectors; |
537 | 537 | ||
538 | // Write sect data | 538 | // Write sect data |
539 | memset(bcb, 0, sizeof(bcb)); | 539 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
540 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 540 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
541 | bcb->DataTransferLength = 0x200*count; | 541 | bcb->DataTransferLength = 0x200*count; |
542 | bcb->Flags = 0x00; | 542 | bcb->Flags = 0x00; |
@@ -754,7 +754,7 @@ int Ssfdc_D_WriteSectForCopy(struct us_data *us, BYTE *buf, BYTE *redundant) | |||
754 | addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; | 754 | addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; |
755 | 755 | ||
756 | // Write sect data | 756 | // Write sect data |
757 | memset(bcb, 0, sizeof(bcb)); | 757 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
758 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 758 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
759 | bcb->DataTransferLength = 0x200; | 759 | bcb->DataTransferLength = 0x200; |
760 | bcb->Flags = 0x00; | 760 | bcb->Flags = 0x00; |
@@ -791,7 +791,7 @@ int Ssfdc_D_EraseBlock(struct us_data *us) | |||
791 | addr=(WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; | 791 | addr=(WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; |
792 | addr=addr*(WORD)Ssfdc.MaxSectors; | 792 | addr=addr*(WORD)Ssfdc.MaxSectors; |
793 | 793 | ||
794 | memset(bcb, 0, sizeof(bcb)); | 794 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
795 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 795 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
796 | bcb->DataTransferLength = 0x200; | 796 | bcb->DataTransferLength = 0x200; |
797 | bcb->Flags = 0x80; | 797 | bcb->Flags = 0x80; |
@@ -827,7 +827,7 @@ int Ssfdc_D_ReadRedtData(struct us_data *us, BYTE *redundant) | |||
827 | addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; | 827 | addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; |
828 | addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; | 828 | addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; |
829 | 829 | ||
830 | memset(bcb, 0, sizeof(bcb)); | 830 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
831 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 831 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
832 | bcb->DataTransferLength = 0x10; | 832 | bcb->DataTransferLength = 0x10; |
833 | bcb->Flags = 0x80; | 833 | bcb->Flags = 0x80; |
@@ -870,7 +870,7 @@ int Ssfdc_D_WriteRedtData(struct us_data *us, BYTE *redundant) | |||
870 | addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; | 870 | addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; |
871 | addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; | 871 | addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; |
872 | 872 | ||
873 | memset(bcb, 0, sizeof(bcb)); | 873 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
874 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 874 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
875 | bcb->DataTransferLength = 0x10; | 875 | bcb->DataTransferLength = 0x10; |
876 | bcb->Flags = 0x80; | 876 | bcb->Flags = 0x80; |
diff --git a/drivers/staging/keucr/transport.c b/drivers/staging/keucr/transport.c index fd98df643ab0..111160cce441 100644 --- a/drivers/staging/keucr/transport.c +++ b/drivers/staging/keucr/transport.c | |||
@@ -40,7 +40,7 @@ static int usb_stor_msg_common(struct us_data *us, int timeout) | |||
40 | us->current_urb->error_count = 0; | 40 | us->current_urb->error_count = 0; |
41 | us->current_urb->status = 0; | 41 | us->current_urb->status = 0; |
42 | 42 | ||
43 | // us->current_urb->transfer_flags = URB_NO_SETUP_DMA_MAP; | 43 | us->current_urb->transfer_flags = 0; |
44 | if (us->current_urb->transfer_buffer == us->iobuf) | 44 | if (us->current_urb->transfer_buffer == us->iobuf) |
45 | us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; | 45 | us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; |
46 | us->current_urb->transfer_dma = us->iobuf_dma; | 46 | us->current_urb->transfer_dma = us->iobuf_dma; |
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c index 75aa7a36307d..4ca45ec7fd84 100644 --- a/drivers/staging/olpc_dcon/olpc_dcon.c +++ b/drivers/staging/olpc_dcon/olpc_dcon.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/console.h> | 17 | #include <linux/console.h> |
18 | #include <linux/i2c.h> | 18 | #include <linux/i2c.h> |
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/i2c-id.h> | ||
21 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
22 | #include <linux/pci_ids.h> | 21 | #include <linux/pci_ids.h> |
23 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
@@ -733,7 +732,6 @@ static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
733 | edev: | 732 | edev: |
734 | platform_device_unregister(dcon_device); | 733 | platform_device_unregister(dcon_device); |
735 | dcon_device = NULL; | 734 | dcon_device = NULL; |
736 | i2c_set_clientdata(client, NULL); | ||
737 | eirq: | 735 | eirq: |
738 | free_irq(DCON_IRQ, &dcon_driver); | 736 | free_irq(DCON_IRQ, &dcon_driver); |
739 | einit: | 737 | einit: |
@@ -757,8 +755,6 @@ static int dcon_remove(struct i2c_client *client) | |||
757 | platform_device_unregister(dcon_device); | 755 | platform_device_unregister(dcon_device); |
758 | cancel_work_sync(&dcon_work); | 756 | cancel_work_sync(&dcon_work); |
759 | 757 | ||
760 | i2c_set_clientdata(client, NULL); | ||
761 | |||
762 | return 0; | 758 | return 0; |
763 | } | 759 | } |
764 | 760 | ||
diff --git a/drivers/staging/rt2860/common/cmm_aes.c b/drivers/staging/rt2860/common/cmm_aes.c index 1d159ff82fd2..a99879bada42 100644 --- a/drivers/staging/rt2860/common/cmm_aes.c +++ b/drivers/staging/rt2860/common/cmm_aes.c | |||
@@ -330,8 +330,6 @@ void construct_mic_iv(unsigned char *mic_iv, | |||
330 | for (i = 8; i < 14; i++) | 330 | for (i = 8; i < 14; i++) |
331 | mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */ | 331 | mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */ |
332 | #endif | 332 | #endif |
333 | i = (payload_length / 256); | ||
334 | i = (payload_length % 256); | ||
335 | mic_iv[14] = (unsigned char)(payload_length / 256); | 333 | mic_iv[14] = (unsigned char)(payload_length / 256); |
336 | mic_iv[15] = (unsigned char)(payload_length % 256); | 334 | mic_iv[15] = (unsigned char)(payload_length % 256); |
337 | 335 | ||
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c index ebf9074a9083..ddacfc6c4861 100644 --- a/drivers/staging/rt2860/usb_main_dev.c +++ b/drivers/staging/rt2860/usb_main_dev.c | |||
@@ -65,6 +65,7 @@ struct usb_device_id rtusb_usb_id[] = { | |||
65 | {USB_DEVICE(0x14B2, 0x3C07)}, /* AL */ | 65 | {USB_DEVICE(0x14B2, 0x3C07)}, /* AL */ |
66 | {USB_DEVICE(0x050D, 0x8053)}, /* Belkin */ | 66 | {USB_DEVICE(0x050D, 0x8053)}, /* Belkin */ |
67 | {USB_DEVICE(0x050D, 0x825B)}, /* Belkin */ | 67 | {USB_DEVICE(0x050D, 0x825B)}, /* Belkin */ |
68 | {USB_DEVICE(0x050D, 0x935A)}, /* Belkin F6D4050 v1 */ | ||
68 | {USB_DEVICE(0x050D, 0x935B)}, /* Belkin F6D4050 v2 */ | 69 | {USB_DEVICE(0x050D, 0x935B)}, /* Belkin F6D4050 v2 */ |
69 | {USB_DEVICE(0x14B2, 0x3C23)}, /* Airlink */ | 70 | {USB_DEVICE(0x14B2, 0x3C23)}, /* Airlink */ |
70 | {USB_DEVICE(0x14B2, 0x3C27)}, /* Airlink */ | 71 | {USB_DEVICE(0x14B2, 0x3C27)}, /* Airlink */ |
diff --git a/drivers/staging/rtl8192e/r8192E_core.c b/drivers/staging/rtl8192e/r8192E_core.c index a202194b5cbb..b1786dcac245 100644 --- a/drivers/staging/rtl8192e/r8192E_core.c +++ b/drivers/staging/rtl8192e/r8192E_core.c | |||
@@ -5829,6 +5829,9 @@ static void rtl8192_rx(struct net_device *dev) | |||
5829 | } | 5829 | } |
5830 | } | 5830 | } |
5831 | 5831 | ||
5832 | pci_unmap_single(priv->pdev, *((dma_addr_t *) skb->cb), | ||
5833 | priv->rxbuffersize, PCI_DMA_FROMDEVICE); | ||
5834 | |||
5832 | skb = new_skb; | 5835 | skb = new_skb; |
5833 | priv->rx_buf[priv->rx_idx] = skb; | 5836 | priv->rx_buf[priv->rx_idx] = skb; |
5834 | *((dma_addr_t *) skb->cb) = pci_map_single(priv->pdev, skb_tail_pointer(skb), priv->rxbuffersize, PCI_DMA_FROMDEVICE); | 5837 | *((dma_addr_t *) skb->cb) = pci_map_single(priv->pdev, skb_tail_pointer(skb), priv->rxbuffersize, PCI_DMA_FROMDEVICE); |
diff --git a/drivers/staging/stradis/stradis.c b/drivers/staging/stradis/stradis.c index a057824e7ebc..807dd7eb748f 100644 --- a/drivers/staging/stradis/stradis.c +++ b/drivers/staging/stradis/stradis.c | |||
@@ -1286,6 +1286,7 @@ static long saa_ioctl(struct file *file, | |||
1286 | case VIDIOCGCAP: | 1286 | case VIDIOCGCAP: |
1287 | { | 1287 | { |
1288 | struct video_capability b; | 1288 | struct video_capability b; |
1289 | memset(&b, 0, sizeof(b)); | ||
1289 | strcpy(b.name, saa->video_dev.name); | 1290 | strcpy(b.name, saa->video_dev.name); |
1290 | b.type = VID_TYPE_CAPTURE | VID_TYPE_OVERLAY | | 1291 | b.type = VID_TYPE_CAPTURE | VID_TYPE_OVERLAY | |
1291 | VID_TYPE_CLIPPING | VID_TYPE_FRAMERAM | | 1292 | VID_TYPE_CLIPPING | VID_TYPE_FRAMERAM | |
@@ -1416,6 +1417,7 @@ static long saa_ioctl(struct file *file, | |||
1416 | case VIDIOCGWIN: | 1417 | case VIDIOCGWIN: |
1417 | { | 1418 | { |
1418 | struct video_window vw; | 1419 | struct video_window vw; |
1420 | memset(&vw, 0, sizeof(vw)); | ||
1419 | vw.x = saa->win.x; | 1421 | vw.x = saa->win.x; |
1420 | vw.y = saa->win.y; | 1422 | vw.y = saa->win.y; |
1421 | vw.width = saa->win.width; | 1423 | vw.width = saa->win.width; |
@@ -1448,6 +1450,7 @@ static long saa_ioctl(struct file *file, | |||
1448 | case VIDIOCGFBUF: | 1450 | case VIDIOCGFBUF: |
1449 | { | 1451 | { |
1450 | struct video_buffer v; | 1452 | struct video_buffer v; |
1453 | memset(&v, 0, sizeof(v)); | ||
1451 | v.base = (void *)saa->win.vidadr; | 1454 | v.base = (void *)saa->win.vidadr; |
1452 | v.height = saa->win.sheight; | 1455 | v.height = saa->win.sheight; |
1453 | v.width = saa->win.swidth; | 1456 | v.width = saa->win.swidth; |
@@ -1492,6 +1495,7 @@ static long saa_ioctl(struct file *file, | |||
1492 | case VIDIOCGAUDIO: | 1495 | case VIDIOCGAUDIO: |
1493 | { | 1496 | { |
1494 | struct video_audio v; | 1497 | struct video_audio v; |
1498 | memset(&v, 0, sizeof(v)); | ||
1495 | v = saa->audio_dev; | 1499 | v = saa->audio_dev; |
1496 | v.flags &= ~(VIDEO_AUDIO_MUTE | VIDEO_AUDIO_MUTABLE); | 1500 | v.flags &= ~(VIDEO_AUDIO_MUTE | VIDEO_AUDIO_MUTABLE); |
1497 | v.flags |= VIDEO_AUDIO_MUTABLE | VIDEO_AUDIO_VOLUME; | 1501 | v.flags |= VIDEO_AUDIO_MUTABLE | VIDEO_AUDIO_VOLUME; |
@@ -1534,6 +1538,7 @@ static long saa_ioctl(struct file *file, | |||
1534 | case VIDIOCGUNIT: | 1538 | case VIDIOCGUNIT: |
1535 | { | 1539 | { |
1536 | struct video_unit vu; | 1540 | struct video_unit vu; |
1541 | memset(&vu, 0, sizeof(vu)); | ||
1537 | vu.video = saa->video_dev.minor; | 1542 | vu.video = saa->video_dev.minor; |
1538 | vu.vbi = VIDEO_NO_UNIT; | 1543 | vu.vbi = VIDEO_NO_UNIT; |
1539 | vu.radio = VIDEO_NO_UNIT; | 1544 | vu.radio = VIDEO_NO_UNIT; |
@@ -1888,6 +1893,7 @@ static int saa_open(struct file *file) | |||
1888 | 1893 | ||
1889 | saa->user++; | 1894 | saa->user++; |
1890 | if (saa->user > 1) { | 1895 | if (saa->user > 1) { |
1896 | saa->user--; | ||
1891 | unlock_kernel(); | 1897 | unlock_kernel(); |
1892 | return 0; /* device open already, don't reset */ | 1898 | return 0; /* device open already, don't reset */ |
1893 | } | 1899 | } |
@@ -2000,10 +2006,13 @@ static int __devinit configure_saa7146(struct pci_dev *pdev, int num) | |||
2000 | if (retval < 0) { | 2006 | if (retval < 0) { |
2001 | dev_err(&pdev->dev, "%d: error in registering video device!\n", | 2007 | dev_err(&pdev->dev, "%d: error in registering video device!\n", |
2002 | num); | 2008 | num); |
2003 | goto errio; | 2009 | goto errirq; |
2004 | } | 2010 | } |
2005 | 2011 | ||
2006 | return 0; | 2012 | return 0; |
2013 | |||
2014 | errirq: | ||
2015 | free_irq(saa->irq, saa); | ||
2007 | errio: | 2016 | errio: |
2008 | iounmap(saa->saa7146_mem); | 2017 | iounmap(saa->saa7146_mem); |
2009 | err: | 2018 | err: |
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig index ff64d464143c..93de4f2e8bf8 100644 --- a/drivers/staging/tidspbridge/Kconfig +++ b/drivers/staging/tidspbridge/Kconfig | |||
@@ -6,7 +6,6 @@ menuconfig TIDSPBRIDGE | |||
6 | tristate "DSP Bridge driver" | 6 | tristate "DSP Bridge driver" |
7 | depends on ARCH_OMAP3 | 7 | depends on ARCH_OMAP3 |
8 | select OMAP_MBOX_FWK | 8 | select OMAP_MBOX_FWK |
9 | select OMAP_IOMMU | ||
10 | help | 9 | help |
11 | DSP/BIOS Bridge is designed for platforms that contain a GPP and | 10 | DSP/BIOS Bridge is designed for platforms that contain a GPP and |
12 | one or more attached DSPs. The GPP is considered the master or | 11 | one or more attached DSPs. The GPP is considered the master or |
diff --git a/drivers/staging/tidspbridge/Makefile b/drivers/staging/tidspbridge/Makefile index 50decc2935c5..41c644c3318f 100644 --- a/drivers/staging/tidspbridge/Makefile +++ b/drivers/staging/tidspbridge/Makefile | |||
@@ -2,18 +2,19 @@ obj-$(CONFIG_TIDSPBRIDGE) += bridgedriver.o | |||
2 | 2 | ||
3 | libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o | 3 | libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o |
4 | libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \ | 4 | libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \ |
5 | core/tiomap3430_pwr.o core/tiomap_io.o core/dsp-mmu.o \ | 5 | core/tiomap3430_pwr.o core/tiomap_io.o \ |
6 | core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o | 6 | core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o |
7 | libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \ | 7 | libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \ |
8 | pmgr/cmm.o pmgr/dbll.o | 8 | pmgr/dmm.o pmgr/cmm.o pmgr/dbll.o |
9 | librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \ | 9 | librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \ |
10 | rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \ | 10 | rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \ |
11 | rmgr/nldr.o rmgr/drv_interface.o | 11 | rmgr/nldr.o rmgr/drv_interface.o |
12 | libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \ | 12 | libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \ |
13 | dynload/tramp.o | 13 | dynload/tramp.o |
14 | libhw = hw/hw_mmu.o | ||
14 | 15 | ||
15 | bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \ | 16 | bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \ |
16 | $(libdload) | 17 | $(libdload) $(libhw) |
17 | 18 | ||
18 | #Machine dependent | 19 | #Machine dependent |
19 | ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \ | 20 | ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \ |
diff --git a/drivers/staging/tidspbridge/core/_deh.h b/drivers/staging/tidspbridge/core/_deh.h index 8ae263387a87..16723cd34831 100644 --- a/drivers/staging/tidspbridge/core/_deh.h +++ b/drivers/staging/tidspbridge/core/_deh.h | |||
@@ -27,8 +27,9 @@ | |||
27 | struct deh_mgr { | 27 | struct deh_mgr { |
28 | struct bridge_dev_context *hbridge_context; /* Bridge context. */ | 28 | struct bridge_dev_context *hbridge_context; /* Bridge context. */ |
29 | struct ntfy_object *ntfy_obj; /* NTFY object */ | 29 | struct ntfy_object *ntfy_obj; /* NTFY object */ |
30 | }; | ||
31 | 30 | ||
32 | int mmu_fault_isr(struct iommu *mmu); | 31 | /* MMU Fault DPC */ |
32 | struct tasklet_struct dpc_tasklet; | ||
33 | }; | ||
33 | 34 | ||
34 | #endif /* _DEH_ */ | 35 | #endif /* _DEH_ */ |
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h index e0a801c1cb98..1c1f157e167a 100644 --- a/drivers/staging/tidspbridge/core/_tiomap.h +++ b/drivers/staging/tidspbridge/core/_tiomap.h | |||
@@ -23,8 +23,8 @@ | |||
23 | #include <plat/clockdomain.h> | 23 | #include <plat/clockdomain.h> |
24 | #include <mach-omap2/prm-regbits-34xx.h> | 24 | #include <mach-omap2/prm-regbits-34xx.h> |
25 | #include <mach-omap2/cm-regbits-34xx.h> | 25 | #include <mach-omap2/cm-regbits-34xx.h> |
26 | #include <dspbridge/dsp-mmu.h> | ||
27 | #include <dspbridge/devdefs.h> | 26 | #include <dspbridge/devdefs.h> |
27 | #include <hw_defs.h> | ||
28 | #include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */ | 28 | #include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */ |
29 | #include <dspbridge/sync.h> | 29 | #include <dspbridge/sync.h> |
30 | #include <dspbridge/clk.h> | 30 | #include <dspbridge/clk.h> |
@@ -306,18 +306,6 @@ static const struct bpwr_clk_t bpwr_clks[] = { | |||
306 | 306 | ||
307 | #define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index))) | 307 | #define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index))) |
308 | 308 | ||
309 | struct shm_segs { | ||
310 | u32 seg0_da; | ||
311 | u32 seg0_pa; | ||
312 | u32 seg0_va; | ||
313 | u32 seg0_size; | ||
314 | u32 seg1_da; | ||
315 | u32 seg1_pa; | ||
316 | u32 seg1_va; | ||
317 | u32 seg1_size; | ||
318 | }; | ||
319 | |||
320 | |||
321 | /* This Bridge driver's device context: */ | 309 | /* This Bridge driver's device context: */ |
322 | struct bridge_dev_context { | 310 | struct bridge_dev_context { |
323 | struct dev_object *hdev_obj; /* Handle to Bridge device object. */ | 311 | struct dev_object *hdev_obj; /* Handle to Bridge device object. */ |
@@ -328,6 +316,7 @@ struct bridge_dev_context { | |||
328 | */ | 316 | */ |
329 | u32 dw_dsp_ext_base_addr; /* See the comment above */ | 317 | u32 dw_dsp_ext_base_addr; /* See the comment above */ |
330 | u32 dw_api_reg_base; /* API mem map'd registers */ | 318 | u32 dw_api_reg_base; /* API mem map'd registers */ |
319 | void __iomem *dw_dsp_mmu_base; /* DSP MMU Mapped registers */ | ||
331 | u32 dw_api_clk_base; /* CLK Registers */ | 320 | u32 dw_api_clk_base; /* CLK Registers */ |
332 | u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */ | 321 | u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */ |
333 | u32 dw_public_rhea; /* Pub Rhea */ | 322 | u32 dw_public_rhea; /* Pub Rhea */ |
@@ -339,8 +328,7 @@ struct bridge_dev_context { | |||
339 | u32 dw_internal_size; /* Internal memory size */ | 328 | u32 dw_internal_size; /* Internal memory size */ |
340 | 329 | ||
341 | struct omap_mbox *mbox; /* Mail box handle */ | 330 | struct omap_mbox *mbox; /* Mail box handle */ |
342 | struct iommu *dsp_mmu; /* iommu for iva2 handler */ | 331 | |
343 | struct shm_segs sh_s; | ||
344 | struct cfg_hostres *resources; /* Host Resources */ | 332 | struct cfg_hostres *resources; /* Host Resources */ |
345 | 333 | ||
346 | /* | 334 | /* |
@@ -353,6 +341,7 @@ struct bridge_dev_context { | |||
353 | 341 | ||
354 | /* TC Settings */ | 342 | /* TC Settings */ |
355 | bool tc_word_swap_on; /* Traffic Controller Word Swap */ | 343 | bool tc_word_swap_on; /* Traffic Controller Word Swap */ |
344 | struct pg_table_attrs *pt_attrs; | ||
356 | u32 dsp_per_clks; | 345 | u32 dsp_per_clks; |
357 | }; | 346 | }; |
358 | 347 | ||
diff --git a/drivers/staging/tidspbridge/core/dsp-mmu.c b/drivers/staging/tidspbridge/core/dsp-mmu.c deleted file mode 100644 index 983c95adc8ff..000000000000 --- a/drivers/staging/tidspbridge/core/dsp-mmu.c +++ /dev/null | |||
@@ -1,317 +0,0 @@ | |||
1 | /* | ||
2 | * dsp-mmu.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DSP iommu. | ||
7 | * | ||
8 | * Copyright (C) 2010 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #include <dspbridge/host_os.h> | ||
20 | #include <plat/dmtimer.h> | ||
21 | #include <dspbridge/dbdefs.h> | ||
22 | #include <dspbridge/dev.h> | ||
23 | #include <dspbridge/io_sm.h> | ||
24 | #include <dspbridge/dspdeh.h> | ||
25 | #include "_tiomap.h" | ||
26 | |||
27 | #include <dspbridge/dsp-mmu.h> | ||
28 | |||
29 | #define MMU_CNTL_TWL_EN (1 << 2) | ||
30 | |||
31 | static struct tasklet_struct mmu_tasklet; | ||
32 | |||
33 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
34 | static void mmu_fault_print_stack(struct bridge_dev_context *dev_context) | ||
35 | { | ||
36 | void *dummy_addr; | ||
37 | u32 fa, tmp; | ||
38 | struct iotlb_entry e; | ||
39 | struct iommu *mmu = dev_context->dsp_mmu; | ||
40 | dummy_addr = (void *)__get_free_page(GFP_ATOMIC); | ||
41 | |||
42 | /* | ||
43 | * Before acking the MMU fault, let's make sure MMU can only | ||
44 | * access entry #0. Then add a new entry so that the DSP OS | ||
45 | * can continue in order to dump the stack. | ||
46 | */ | ||
47 | tmp = iommu_read_reg(mmu, MMU_CNTL); | ||
48 | tmp &= ~MMU_CNTL_TWL_EN; | ||
49 | iommu_write_reg(mmu, tmp, MMU_CNTL); | ||
50 | fa = iommu_read_reg(mmu, MMU_FAULT_AD); | ||
51 | e.da = fa & PAGE_MASK; | ||
52 | e.pa = virt_to_phys(dummy_addr); | ||
53 | e.valid = 1; | ||
54 | e.prsvd = 1; | ||
55 | e.pgsz = IOVMF_PGSZ_4K & MMU_CAM_PGSZ_MASK; | ||
56 | e.endian = MMU_RAM_ENDIAN_LITTLE; | ||
57 | e.elsz = MMU_RAM_ELSZ_32; | ||
58 | e.mixed = 0; | ||
59 | |||
60 | load_iotlb_entry(mmu, &e); | ||
61 | |||
62 | dsp_clk_enable(DSP_CLK_GPT8); | ||
63 | |||
64 | dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe); | ||
65 | |||
66 | /* Clear MMU interrupt */ | ||
67 | tmp = iommu_read_reg(mmu, MMU_IRQSTATUS); | ||
68 | iommu_write_reg(mmu, tmp, MMU_IRQSTATUS); | ||
69 | |||
70 | dump_dsp_stack(dev_context); | ||
71 | dsp_clk_disable(DSP_CLK_GPT8); | ||
72 | |||
73 | iopgtable_clear_entry(mmu, fa); | ||
74 | free_page((unsigned long)dummy_addr); | ||
75 | } | ||
76 | #endif | ||
77 | |||
78 | |||
79 | static void fault_tasklet(unsigned long data) | ||
80 | { | ||
81 | struct iommu *mmu = (struct iommu *)data; | ||
82 | struct bridge_dev_context *dev_ctx; | ||
83 | struct deh_mgr *dm; | ||
84 | u32 fa; | ||
85 | dev_get_deh_mgr(dev_get_first(), &dm); | ||
86 | dev_get_bridge_context(dev_get_first(), &dev_ctx); | ||
87 | |||
88 | if (!dm || !dev_ctx) | ||
89 | return; | ||
90 | |||
91 | fa = iommu_read_reg(mmu, MMU_FAULT_AD); | ||
92 | |||
93 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
94 | print_dsp_trace_buffer(dev_ctx); | ||
95 | dump_dl_modules(dev_ctx); | ||
96 | mmu_fault_print_stack(dev_ctx); | ||
97 | #endif | ||
98 | |||
99 | bridge_deh_notify(dm, DSP_MMUFAULT, fa); | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * ======== mmu_fault_isr ======== | ||
104 | * ISR to be triggered by a DSP MMU fault interrupt. | ||
105 | */ | ||
106 | static int mmu_fault_callback(struct iommu *mmu) | ||
107 | { | ||
108 | if (!mmu) | ||
109 | return -EPERM; | ||
110 | |||
111 | iommu_write_reg(mmu, 0, MMU_IRQENABLE); | ||
112 | tasklet_schedule(&mmu_tasklet); | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | /** | ||
117 | * dsp_mmu_init() - initialize dsp_mmu module and returns a handle | ||
118 | * | ||
119 | * This function initialize dsp mmu module and returns a struct iommu | ||
120 | * handle to use it for dsp maps. | ||
121 | * | ||
122 | */ | ||
123 | struct iommu *dsp_mmu_init() | ||
124 | { | ||
125 | struct iommu *mmu; | ||
126 | |||
127 | mmu = iommu_get("iva2"); | ||
128 | |||
129 | if (!IS_ERR(mmu)) { | ||
130 | tasklet_init(&mmu_tasklet, fault_tasklet, (unsigned long)mmu); | ||
131 | mmu->isr = mmu_fault_callback; | ||
132 | } | ||
133 | |||
134 | return mmu; | ||
135 | } | ||
136 | |||
137 | /** | ||
138 | * dsp_mmu_exit() - destroy dsp mmu module | ||
139 | * @mmu: Pointer to iommu handle. | ||
140 | * | ||
141 | * This function destroys dsp mmu module. | ||
142 | * | ||
143 | */ | ||
144 | void dsp_mmu_exit(struct iommu *mmu) | ||
145 | { | ||
146 | if (mmu) | ||
147 | iommu_put(mmu); | ||
148 | tasklet_kill(&mmu_tasklet); | ||
149 | } | ||
150 | |||
151 | /** | ||
152 | * user_va2_pa() - get physical address from userspace address. | ||
153 | * @mm: mm_struct Pointer of the process. | ||
154 | * @address: Virtual user space address. | ||
155 | * | ||
156 | */ | ||
157 | static u32 user_va2_pa(struct mm_struct *mm, u32 address) | ||
158 | { | ||
159 | pgd_t *pgd; | ||
160 | pmd_t *pmd; | ||
161 | pte_t *ptep, pte; | ||
162 | |||
163 | pgd = pgd_offset(mm, address); | ||
164 | if (!(pgd_none(*pgd) || pgd_bad(*pgd))) { | ||
165 | pmd = pmd_offset(pgd, address); | ||
166 | if (!(pmd_none(*pmd) || pmd_bad(*pmd))) { | ||
167 | ptep = pte_offset_map(pmd, address); | ||
168 | if (ptep) { | ||
169 | pte = *ptep; | ||
170 | if (pte_present(pte)) | ||
171 | return pte & PAGE_MASK; | ||
172 | } | ||
173 | } | ||
174 | } | ||
175 | |||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | /** | ||
180 | * get_io_pages() - pin and get pages of io user's buffer. | ||
181 | * @mm: mm_struct Pointer of the process. | ||
182 | * @uva: Virtual user space address. | ||
183 | * @pages Pages to be pined. | ||
184 | * @usr_pgs struct page array pointer where the user pages will be stored | ||
185 | * | ||
186 | */ | ||
187 | static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages, | ||
188 | struct page **usr_pgs) | ||
189 | { | ||
190 | u32 pa; | ||
191 | int i; | ||
192 | struct page *pg; | ||
193 | |||
194 | for (i = 0; i < pages; i++) { | ||
195 | pa = user_va2_pa(mm, uva); | ||
196 | |||
197 | if (!pfn_valid(__phys_to_pfn(pa))) | ||
198 | break; | ||
199 | |||
200 | pg = phys_to_page(pa); | ||
201 | usr_pgs[i] = pg; | ||
202 | get_page(pg); | ||
203 | } | ||
204 | return i; | ||
205 | } | ||
206 | |||
207 | /** | ||
208 | * user_to_dsp_map() - maps user to dsp virtual address | ||
209 | * @mmu: Pointer to iommu handle. | ||
210 | * @uva: Virtual user space address. | ||
211 | * @da DSP address | ||
212 | * @size Buffer size to map. | ||
213 | * @usr_pgs struct page array pointer where the user pages will be stored | ||
214 | * | ||
215 | * This function maps a user space buffer into DSP virtual address. | ||
216 | * | ||
217 | */ | ||
218 | u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size, | ||
219 | struct page **usr_pgs) | ||
220 | { | ||
221 | int res, w; | ||
222 | unsigned pages; | ||
223 | int i; | ||
224 | struct vm_area_struct *vma; | ||
225 | struct mm_struct *mm = current->mm; | ||
226 | struct sg_table *sgt; | ||
227 | struct scatterlist *sg; | ||
228 | |||
229 | if (!size || !usr_pgs) | ||
230 | return -EINVAL; | ||
231 | |||
232 | pages = size / PG_SIZE4K; | ||
233 | |||
234 | down_read(&mm->mmap_sem); | ||
235 | vma = find_vma(mm, uva); | ||
236 | while (vma && (uva + size > vma->vm_end)) | ||
237 | vma = find_vma(mm, vma->vm_end + 1); | ||
238 | |||
239 | if (!vma) { | ||
240 | pr_err("%s: Failed to get VMA region for 0x%x (%d)\n", | ||
241 | __func__, uva, size); | ||
242 | up_read(&mm->mmap_sem); | ||
243 | return -EINVAL; | ||
244 | } | ||
245 | if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) | ||
246 | w = 1; | ||
247 | |||
248 | if (vma->vm_flags & VM_IO) | ||
249 | i = get_io_pages(mm, uva, pages, usr_pgs); | ||
250 | else | ||
251 | i = get_user_pages(current, mm, uva, pages, w, 1, | ||
252 | usr_pgs, NULL); | ||
253 | up_read(&mm->mmap_sem); | ||
254 | |||
255 | if (i < 0) | ||
256 | return i; | ||
257 | |||
258 | if (i < pages) { | ||
259 | res = -EFAULT; | ||
260 | goto err_pages; | ||
261 | } | ||
262 | |||
263 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | ||
264 | if (!sgt) { | ||
265 | res = -ENOMEM; | ||
266 | goto err_pages; | ||
267 | } | ||
268 | |||
269 | res = sg_alloc_table(sgt, pages, GFP_KERNEL); | ||
270 | |||
271 | if (res < 0) | ||
272 | goto err_sg; | ||
273 | |||
274 | for_each_sg(sgt->sgl, sg, sgt->nents, i) | ||
275 | sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0); | ||
276 | |||
277 | da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); | ||
278 | |||
279 | if (!IS_ERR_VALUE(da)) | ||
280 | return da; | ||
281 | res = (int)da; | ||
282 | |||
283 | sg_free_table(sgt); | ||
284 | err_sg: | ||
285 | kfree(sgt); | ||
286 | i = pages; | ||
287 | err_pages: | ||
288 | while (i--) | ||
289 | put_page(usr_pgs[i]); | ||
290 | return res; | ||
291 | } | ||
292 | |||
293 | /** | ||
294 | * user_to_dsp_unmap() - unmaps DSP virtual buffer. | ||
295 | * @mmu: Pointer to iommu handle. | ||
296 | * @da DSP address | ||
297 | * | ||
298 | * This function unmaps a user space buffer into DSP virtual address. | ||
299 | * | ||
300 | */ | ||
301 | int user_to_dsp_unmap(struct iommu *mmu, u32 da) | ||
302 | { | ||
303 | unsigned i; | ||
304 | struct sg_table *sgt; | ||
305 | struct scatterlist *sg; | ||
306 | |||
307 | sgt = iommu_vunmap(mmu, da); | ||
308 | if (!sgt) | ||
309 | return -EFAULT; | ||
310 | |||
311 | for_each_sg(sgt->sgl, sg, sgt->nents, i) | ||
312 | put_page(sg_page(sg)); | ||
313 | sg_free_table(sgt); | ||
314 | kfree(sgt); | ||
315 | |||
316 | return 0; | ||
317 | } | ||
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c index 194badaba0ed..571864555ddd 100644 --- a/drivers/staging/tidspbridge/core/io_sm.c +++ b/drivers/staging/tidspbridge/core/io_sm.c | |||
@@ -39,6 +39,10 @@ | |||
39 | #include <dspbridge/ntfy.h> | 39 | #include <dspbridge/ntfy.h> |
40 | #include <dspbridge/sync.h> | 40 | #include <dspbridge/sync.h> |
41 | 41 | ||
42 | /* Hardware Abstraction Layer */ | ||
43 | #include <hw_defs.h> | ||
44 | #include <hw_mmu.h> | ||
45 | |||
42 | /* Bridge Driver */ | 46 | /* Bridge Driver */ |
43 | #include <dspbridge/dspdeh.h> | 47 | #include <dspbridge/dspdeh.h> |
44 | #include <dspbridge/dspio.h> | 48 | #include <dspbridge/dspio.h> |
@@ -287,7 +291,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) | |||
287 | struct cod_manager *cod_man; | 291 | struct cod_manager *cod_man; |
288 | struct chnl_mgr *hchnl_mgr; | 292 | struct chnl_mgr *hchnl_mgr; |
289 | struct msg_mgr *hmsg_mgr; | 293 | struct msg_mgr *hmsg_mgr; |
290 | struct shm_segs *sm_sg; | ||
291 | u32 ul_shm_base; | 294 | u32 ul_shm_base; |
292 | u32 ul_shm_base_offset; | 295 | u32 ul_shm_base_offset; |
293 | u32 ul_shm_limit; | 296 | u32 ul_shm_limit; |
@@ -310,9 +313,18 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) | |||
310 | struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB]; | 313 | struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB]; |
311 | struct cfg_hostres *host_res; | 314 | struct cfg_hostres *host_res; |
312 | struct bridge_dev_context *pbridge_context; | 315 | struct bridge_dev_context *pbridge_context; |
316 | u32 map_attrs; | ||
313 | u32 shm0_end; | 317 | u32 shm0_end; |
314 | u32 ul_dyn_ext_base; | 318 | u32 ul_dyn_ext_base; |
315 | u32 ul_seg1_size = 0; | 319 | u32 ul_seg1_size = 0; |
320 | u32 pa_curr = 0; | ||
321 | u32 va_curr = 0; | ||
322 | u32 gpp_va_curr = 0; | ||
323 | u32 num_bytes = 0; | ||
324 | u32 all_bits = 0; | ||
325 | u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB, | ||
326 | HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB | ||
327 | }; | ||
316 | 328 | ||
317 | status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context); | 329 | status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context); |
318 | if (!pbridge_context) { | 330 | if (!pbridge_context) { |
@@ -325,8 +337,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) | |||
325 | status = -EFAULT; | 337 | status = -EFAULT; |
326 | goto func_end; | 338 | goto func_end; |
327 | } | 339 | } |
328 | sm_sg = &pbridge_context->sh_s; | ||
329 | |||
330 | status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man); | 340 | status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man); |
331 | if (!cod_man) { | 341 | if (!cod_man) { |
332 | status = -EFAULT; | 342 | status = -EFAULT; |
@@ -461,14 +471,129 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) | |||
461 | if (status) | 471 | if (status) |
462 | goto func_end; | 472 | goto func_end; |
463 | 473 | ||
464 | sm_sg->seg1_pa = ul_gpp_pa; | 474 | pa_curr = ul_gpp_pa; |
465 | sm_sg->seg1_da = ul_dyn_ext_base; | 475 | va_curr = ul_dyn_ext_base * hio_mgr->word_size; |
466 | sm_sg->seg1_va = ul_gpp_va; | 476 | gpp_va_curr = ul_gpp_va; |
467 | sm_sg->seg1_size = ul_seg1_size; | 477 | num_bytes = ul_seg1_size; |
468 | sm_sg->seg0_pa = ul_gpp_pa + ul_pad_size + ul_seg1_size; | 478 | |
469 | sm_sg->seg0_da = ul_dsp_va; | 479 | /* |
470 | sm_sg->seg0_va = ul_gpp_va + ul_pad_size + ul_seg1_size; | 480 | * Try to fit into TLB entries. If not possible, push them to page |
471 | sm_sg->seg0_size = ul_seg_size; | 481 | * tables. It is quite possible that if sections are not on |
482 | * bigger page boundary, we may end up making several small pages. | ||
483 | * So, push them onto page tables, if that is the case. | ||
484 | */ | ||
485 | map_attrs = 0x00000000; | ||
486 | map_attrs = DSP_MAPLITTLEENDIAN; | ||
487 | map_attrs |= DSP_MAPPHYSICALADDR; | ||
488 | map_attrs |= DSP_MAPELEMSIZE32; | ||
489 | map_attrs |= DSP_MAPDONOTLOCK; | ||
490 | |||
491 | while (num_bytes) { | ||
492 | /* | ||
493 | * To find the max. page size with which both PA & VA are | ||
494 | * aligned. | ||
495 | */ | ||
496 | all_bits = pa_curr | va_curr; | ||
497 | dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, " | ||
498 | "num_bytes %x\n", all_bits, pa_curr, va_curr, | ||
499 | num_bytes); | ||
500 | for (i = 0; i < 4; i++) { | ||
501 | if ((num_bytes >= page_size[i]) && ((all_bits & | ||
502 | (page_size[i] - | ||
503 | 1)) == 0)) { | ||
504 | status = | ||
505 | hio_mgr->intf_fxns-> | ||
506 | pfn_brd_mem_map(hio_mgr->hbridge_context, | ||
507 | pa_curr, va_curr, | ||
508 | page_size[i], map_attrs, | ||
509 | NULL); | ||
510 | if (status) | ||
511 | goto func_end; | ||
512 | pa_curr += page_size[i]; | ||
513 | va_curr += page_size[i]; | ||
514 | gpp_va_curr += page_size[i]; | ||
515 | num_bytes -= page_size[i]; | ||
516 | /* | ||
517 | * Don't try smaller sizes. Hopefully we have | ||
518 | * reached an address aligned to a bigger page | ||
519 | * size. | ||
520 | */ | ||
521 | break; | ||
522 | } | ||
523 | } | ||
524 | } | ||
525 | pa_curr += ul_pad_size; | ||
526 | va_curr += ul_pad_size; | ||
527 | gpp_va_curr += ul_pad_size; | ||
528 | |||
529 | /* Configure the TLB entries for the next cacheable segment */ | ||
530 | num_bytes = ul_seg_size; | ||
531 | va_curr = ul_dsp_va * hio_mgr->word_size; | ||
532 | while (num_bytes) { | ||
533 | /* | ||
534 | * To find the max. page size with which both PA & VA are | ||
535 | * aligned. | ||
536 | */ | ||
537 | all_bits = pa_curr | va_curr; | ||
538 | dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, " | ||
539 | "va_curr %x, num_bytes %x\n", all_bits, pa_curr, | ||
540 | va_curr, num_bytes); | ||
541 | for (i = 0; i < 4; i++) { | ||
542 | if (!(num_bytes >= page_size[i]) || | ||
543 | !((all_bits & (page_size[i] - 1)) == 0)) | ||
544 | continue; | ||
545 | if (ndx < MAX_LOCK_TLB_ENTRIES) { | ||
546 | /* | ||
547 | * This is the physical address written to | ||
548 | * DSP MMU. | ||
549 | */ | ||
550 | ae_proc[ndx].ul_gpp_pa = pa_curr; | ||
551 | /* | ||
552 | * This is the virtual uncached ioremapped | ||
553 | * address!!! | ||
554 | */ | ||
555 | ae_proc[ndx].ul_gpp_va = gpp_va_curr; | ||
556 | ae_proc[ndx].ul_dsp_va = | ||
557 | va_curr / hio_mgr->word_size; | ||
558 | ae_proc[ndx].ul_size = page_size[i]; | ||
559 | ae_proc[ndx].endianism = HW_LITTLE_ENDIAN; | ||
560 | ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT; | ||
561 | ae_proc[ndx].mixed_mode = HW_MMU_CPUES; | ||
562 | dev_dbg(bridge, "shm MMU TLB entry PA %x" | ||
563 | " VA %x DSP_VA %x Size %x\n", | ||
564 | ae_proc[ndx].ul_gpp_pa, | ||
565 | ae_proc[ndx].ul_gpp_va, | ||
566 | ae_proc[ndx].ul_dsp_va * | ||
567 | hio_mgr->word_size, page_size[i]); | ||
568 | ndx++; | ||
569 | } else { | ||
570 | status = | ||
571 | hio_mgr->intf_fxns-> | ||
572 | pfn_brd_mem_map(hio_mgr->hbridge_context, | ||
573 | pa_curr, va_curr, | ||
574 | page_size[i], map_attrs, | ||
575 | NULL); | ||
576 | dev_dbg(bridge, | ||
577 | "shm MMU PTE entry PA %x" | ||
578 | " VA %x DSP_VA %x Size %x\n", | ||
579 | ae_proc[ndx].ul_gpp_pa, | ||
580 | ae_proc[ndx].ul_gpp_va, | ||
581 | ae_proc[ndx].ul_dsp_va * | ||
582 | hio_mgr->word_size, page_size[i]); | ||
583 | if (status) | ||
584 | goto func_end; | ||
585 | } | ||
586 | pa_curr += page_size[i]; | ||
587 | va_curr += page_size[i]; | ||
588 | gpp_va_curr += page_size[i]; | ||
589 | num_bytes -= page_size[i]; | ||
590 | /* | ||
591 | * Don't try smaller sizes. Hopefully we have reached | ||
592 | * an address aligned to a bigger page size. | ||
593 | */ | ||
594 | break; | ||
595 | } | ||
596 | } | ||
472 | 597 | ||
473 | /* | 598 | /* |
474 | * Copy remaining entries from CDB. All entries are 1 MB and | 599 | * Copy remaining entries from CDB. All entries are 1 MB and |
@@ -509,12 +634,38 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) | |||
509 | "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa, | 634 | "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa, |
510 | ae_proc[ndx].ul_dsp_va); | 635 | ae_proc[ndx].ul_dsp_va); |
511 | ndx++; | 636 | ndx++; |
637 | } else { | ||
638 | status = hio_mgr->intf_fxns->pfn_brd_mem_map | ||
639 | (hio_mgr->hbridge_context, | ||
640 | hio_mgr->ext_proc_info.ty_tlb[i]. | ||
641 | ul_gpp_phys, | ||
642 | hio_mgr->ext_proc_info.ty_tlb[i]. | ||
643 | ul_dsp_virt, 0x100000, map_attrs, | ||
644 | NULL); | ||
512 | } | 645 | } |
513 | } | 646 | } |
514 | if (status) | 647 | if (status) |
515 | goto func_end; | 648 | goto func_end; |
516 | } | 649 | } |
517 | 650 | ||
651 | map_attrs = 0x00000000; | ||
652 | map_attrs = DSP_MAPLITTLEENDIAN; | ||
653 | map_attrs |= DSP_MAPPHYSICALADDR; | ||
654 | map_attrs |= DSP_MAPELEMSIZE32; | ||
655 | map_attrs |= DSP_MAPDONOTLOCK; | ||
656 | |||
657 | /* Map the L4 peripherals */ | ||
658 | i = 0; | ||
659 | while (l4_peripheral_table[i].phys_addr) { | ||
660 | status = hio_mgr->intf_fxns->pfn_brd_mem_map | ||
661 | (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr, | ||
662 | l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB, | ||
663 | map_attrs, NULL); | ||
664 | if (status) | ||
665 | goto func_end; | ||
666 | i++; | ||
667 | } | ||
668 | |||
518 | for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { | 669 | for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { |
519 | ae_proc[i].ul_dsp_va = 0; | 670 | ae_proc[i].ul_dsp_va = 0; |
520 | ae_proc[i].ul_gpp_pa = 0; | 671 | ae_proc[i].ul_gpp_pa = 0; |
@@ -537,12 +688,12 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) | |||
537 | status = -EFAULT; | 688 | status = -EFAULT; |
538 | goto func_end; | 689 | goto func_end; |
539 | } else { | 690 | } else { |
540 | if (sm_sg->seg0_da > ul_shm_base) { | 691 | if (ae_proc[0].ul_dsp_va > ul_shm_base) { |
541 | status = -EPERM; | 692 | status = -EPERM; |
542 | goto func_end; | 693 | goto func_end; |
543 | } | 694 | } |
544 | /* ul_shm_base may not be at ul_dsp_va address */ | 695 | /* ul_shm_base may not be at ul_dsp_va address */ |
545 | ul_shm_base_offset = (ul_shm_base - sm_sg->seg0_da) * | 696 | ul_shm_base_offset = (ul_shm_base - ae_proc[0].ul_dsp_va) * |
546 | hio_mgr->word_size; | 697 | hio_mgr->word_size; |
547 | /* | 698 | /* |
548 | * bridge_dev_ctrl() will set dev context dsp-mmu info. In | 699 | * bridge_dev_ctrl() will set dev context dsp-mmu info. In |
@@ -566,7 +717,8 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) | |||
566 | goto func_end; | 717 | goto func_end; |
567 | } | 718 | } |
568 | /* Register SM */ | 719 | /* Register SM */ |
569 | status = register_shm_segs(hio_mgr, cod_man, sm_sg->seg0_pa); | 720 | status = |
721 | register_shm_segs(hio_mgr, cod_man, ae_proc[0].ul_gpp_pa); | ||
570 | } | 722 | } |
571 | 723 | ||
572 | hio_mgr->shared_mem = (struct shm *)ul_shm_base; | 724 | hio_mgr->shared_mem = (struct shm *)ul_shm_base; |
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c index f22bc12bc0d3..1be081f917a7 100644 --- a/drivers/staging/tidspbridge/core/tiomap3430.c +++ b/drivers/staging/tidspbridge/core/tiomap3430.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <dspbridge/host_os.h> | 23 | #include <dspbridge/host_os.h> |
24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
25 | #include <linux/mmzone.h> | 25 | #include <linux/mmzone.h> |
26 | #include <plat/control.h> | ||
27 | 26 | ||
28 | /* ----------------------------------- DSP/BIOS Bridge */ | 27 | /* ----------------------------------- DSP/BIOS Bridge */ |
29 | #include <dspbridge/dbdefs.h> | 28 | #include <dspbridge/dbdefs.h> |
@@ -35,6 +34,10 @@ | |||
35 | #include <dspbridge/drv.h> | 34 | #include <dspbridge/drv.h> |
36 | #include <dspbridge/sync.h> | 35 | #include <dspbridge/sync.h> |
37 | 36 | ||
37 | /* ------------------------------------ Hardware Abstraction Layer */ | ||
38 | #include <hw_defs.h> | ||
39 | #include <hw_mmu.h> | ||
40 | |||
38 | /* ----------------------------------- Link Driver */ | 41 | /* ----------------------------------- Link Driver */ |
39 | #include <dspbridge/dspdefs.h> | 42 | #include <dspbridge/dspdefs.h> |
40 | #include <dspbridge/dspchnl.h> | 43 | #include <dspbridge/dspchnl.h> |
@@ -47,6 +50,7 @@ | |||
47 | /* ----------------------------------- Platform Manager */ | 50 | /* ----------------------------------- Platform Manager */ |
48 | #include <dspbridge/dev.h> | 51 | #include <dspbridge/dev.h> |
49 | #include <dspbridge/dspapi.h> | 52 | #include <dspbridge/dspapi.h> |
53 | #include <dspbridge/dmm.h> | ||
50 | #include <dspbridge/wdt.h> | 54 | #include <dspbridge/wdt.h> |
51 | 55 | ||
52 | /* ----------------------------------- Local */ | 56 | /* ----------------------------------- Local */ |
@@ -67,6 +71,20 @@ | |||
67 | #define MMU_SMALL_PAGE_MASK 0xFFFFF000 | 71 | #define MMU_SMALL_PAGE_MASK 0xFFFFF000 |
68 | #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00 | 72 | #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00 |
69 | #define PAGES_II_LVL_TABLE 512 | 73 | #define PAGES_II_LVL_TABLE 512 |
74 | #define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT) | ||
75 | |||
76 | /* | ||
77 | * This is a totally ugly layer violation, but needed until | ||
78 | * omap_ctrl_set_dsp_boot*() are provided. | ||
79 | */ | ||
80 | #define OMAP3_IVA2_BOOTMOD_IDLE 1 | ||
81 | #define OMAP2_CONTROL_GENERAL 0x270 | ||
82 | #define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190) | ||
83 | #define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194) | ||
84 | |||
85 | #define OMAP343X_CTRL_REGADDR(reg) \ | ||
86 | OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg)) | ||
87 | |||
70 | 88 | ||
71 | /* Forward Declarations: */ | 89 | /* Forward Declarations: */ |
72 | static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt); | 90 | static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt); |
@@ -91,6 +109,12 @@ static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt, | |||
91 | static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, | 109 | static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, |
92 | u8 *host_buff, u32 dsp_addr, | 110 | u8 *host_buff, u32 dsp_addr, |
93 | u32 ul_num_bytes, u32 mem_type); | 111 | u32 ul_num_bytes, u32 mem_type); |
112 | static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt, | ||
113 | u32 ul_mpu_addr, u32 virt_addr, | ||
114 | u32 ul_num_bytes, u32 ul_map_attr, | ||
115 | struct page **mapped_pages); | ||
116 | static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt, | ||
117 | u32 virt_addr, u32 ul_num_bytes); | ||
94 | static int bridge_dev_create(struct bridge_dev_context | 118 | static int bridge_dev_create(struct bridge_dev_context |
95 | **dev_cntxt, | 119 | **dev_cntxt, |
96 | struct dev_object *hdev_obj, | 120 | struct dev_object *hdev_obj, |
@@ -98,8 +122,57 @@ static int bridge_dev_create(struct bridge_dev_context | |||
98 | static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, | 122 | static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, |
99 | u32 dw_cmd, void *pargs); | 123 | u32 dw_cmd, void *pargs); |
100 | static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt); | 124 | static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt); |
125 | static u32 user_va2_pa(struct mm_struct *mm, u32 address); | ||
126 | static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa, | ||
127 | u32 va, u32 size, | ||
128 | struct hw_mmu_map_attrs_t *map_attrs); | ||
129 | static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va, | ||
130 | u32 size, struct hw_mmu_map_attrs_t *attrs); | ||
131 | static int mem_map_vmalloc(struct bridge_dev_context *dev_context, | ||
132 | u32 ul_mpu_addr, u32 virt_addr, | ||
133 | u32 ul_num_bytes, | ||
134 | struct hw_mmu_map_attrs_t *hw_attrs); | ||
135 | |||
101 | bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr); | 136 | bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr); |
102 | 137 | ||
138 | /* ----------------------------------- Globals */ | ||
139 | |||
140 | /* Attributes of L2 page tables for DSP MMU */ | ||
141 | struct page_info { | ||
142 | u32 num_entries; /* Number of valid PTEs in the L2 PT */ | ||
143 | }; | ||
144 | |||
145 | /* Attributes used to manage the DSP MMU page tables */ | ||
146 | struct pg_table_attrs { | ||
147 | spinlock_t pg_lock; /* Critical section object handle */ | ||
148 | |||
149 | u32 l1_base_pa; /* Physical address of the L1 PT */ | ||
150 | u32 l1_base_va; /* Virtual address of the L1 PT */ | ||
151 | u32 l1_size; /* Size of the L1 PT */ | ||
152 | u32 l1_tbl_alloc_pa; | ||
153 | /* Physical address of Allocated mem for L1 table. May not be aligned */ | ||
154 | u32 l1_tbl_alloc_va; | ||
155 | /* Virtual address of Allocated mem for L1 table. May not be aligned */ | ||
156 | u32 l1_tbl_alloc_sz; | ||
157 | /* Size of consistent memory allocated for L1 table. | ||
158 | * May not be aligned */ | ||
159 | |||
160 | u32 l2_base_pa; /* Physical address of the L2 PT */ | ||
161 | u32 l2_base_va; /* Virtual address of the L2 PT */ | ||
162 | u32 l2_size; /* Size of the L2 PT */ | ||
163 | u32 l2_tbl_alloc_pa; | ||
164 | /* Physical address of Allocated mem for L2 table. May not be aligned */ | ||
165 | u32 l2_tbl_alloc_va; | ||
166 | /* Virtual address of Allocated mem for L2 table. May not be aligned */ | ||
167 | u32 l2_tbl_alloc_sz; | ||
168 | /* Size of consistent memory allocated for L2 table. | ||
169 | * May not be aligned */ | ||
170 | |||
171 | u32 l2_num_pages; /* Number of allocated L2 PT */ | ||
172 | /* Array [l2_num_pages] of L2 PT info structs */ | ||
173 | struct page_info *pg_info; | ||
174 | }; | ||
175 | |||
103 | /* | 176 | /* |
104 | * This Bridge driver's function interface table. | 177 | * This Bridge driver's function interface table. |
105 | */ | 178 | */ |
@@ -119,6 +192,8 @@ static struct bridge_drv_interface drv_interface_fxns = { | |||
119 | bridge_brd_set_state, | 192 | bridge_brd_set_state, |
120 | bridge_brd_mem_copy, | 193 | bridge_brd_mem_copy, |
121 | bridge_brd_mem_write, | 194 | bridge_brd_mem_write, |
195 | bridge_brd_mem_map, | ||
196 | bridge_brd_mem_un_map, | ||
122 | /* The following CHNL functions are provided by chnl_io.lib: */ | 197 | /* The following CHNL functions are provided by chnl_io.lib: */ |
123 | bridge_chnl_create, | 198 | bridge_chnl_create, |
124 | bridge_chnl_destroy, | 199 | bridge_chnl_destroy, |
@@ -148,6 +223,27 @@ static struct bridge_drv_interface drv_interface_fxns = { | |||
148 | bridge_msg_set_queue_id, | 223 | bridge_msg_set_queue_id, |
149 | }; | 224 | }; |
150 | 225 | ||
226 | static inline void flush_all(struct bridge_dev_context *dev_context) | ||
227 | { | ||
228 | if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION || | ||
229 | dev_context->dw_brd_state == BRD_HIBERNATION) | ||
230 | wake_dsp(dev_context, NULL); | ||
231 | |||
232 | hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base); | ||
233 | } | ||
234 | |||
235 | static void bad_page_dump(u32 pa, struct page *pg) | ||
236 | { | ||
237 | pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa); | ||
238 | pr_emerg("Bad page state in process '%s'\n" | ||
239 | "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" | ||
240 | "Backtrace:\n", | ||
241 | current->comm, pg, (int)(2 * sizeof(unsigned long)), | ||
242 | (unsigned long)pg->flags, pg->mapping, | ||
243 | page_mapcount(pg), page_count(pg)); | ||
244 | dump_stack(); | ||
245 | } | ||
246 | |||
151 | /* | 247 | /* |
152 | * ======== bridge_drv_entry ======== | 248 | * ======== bridge_drv_entry ======== |
153 | * purpose: | 249 | * purpose: |
@@ -203,7 +299,8 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt) | |||
203 | (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, | 299 | (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, |
204 | OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); | 300 | OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); |
205 | } | 301 | } |
206 | 302 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, | |
303 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | ||
207 | dsp_clk_enable(DSP_CLK_IVA2); | 304 | dsp_clk_enable(DSP_CLK_IVA2); |
208 | 305 | ||
209 | /* set the device state to IDLE */ | 306 | /* set the device state to IDLE */ |
@@ -274,17 +371,14 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
274 | { | 371 | { |
275 | int status = 0; | 372 | int status = 0; |
276 | struct bridge_dev_context *dev_context = dev_ctxt; | 373 | struct bridge_dev_context *dev_context = dev_ctxt; |
277 | struct iommu *mmu = NULL; | ||
278 | struct shm_segs *sm_sg; | ||
279 | int l4_i = 0, tlb_i = 0; | ||
280 | u32 sg0_da = 0, sg1_da = 0; | ||
281 | struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry; | ||
282 | u32 dw_sync_addr = 0; | 374 | u32 dw_sync_addr = 0; |
283 | u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */ | 375 | u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */ |
284 | u32 ul_shm_base_virt; /* Dsp Virt SM base addr */ | 376 | u32 ul_shm_base_virt; /* Dsp Virt SM base addr */ |
285 | u32 ul_tlb_base_virt; /* Base of MMU TLB entry */ | 377 | u32 ul_tlb_base_virt; /* Base of MMU TLB entry */ |
286 | /* Offset of shm_base_virt from tlb_base_virt */ | 378 | /* Offset of shm_base_virt from tlb_base_virt */ |
287 | u32 ul_shm_offset_virt; | 379 | u32 ul_shm_offset_virt; |
380 | s32 entry_ndx; | ||
381 | s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */ | ||
288 | struct cfg_hostres *resources = NULL; | 382 | struct cfg_hostres *resources = NULL; |
289 | u32 temp; | 383 | u32 temp; |
290 | u32 ul_dsp_clk_rate; | 384 | u32 ul_dsp_clk_rate; |
@@ -305,12 +399,12 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
305 | ul_shm_base_virt *= DSPWORDSIZE; | 399 | ul_shm_base_virt *= DSPWORDSIZE; |
306 | DBC_ASSERT(ul_shm_base_virt != 0); | 400 | DBC_ASSERT(ul_shm_base_virt != 0); |
307 | /* DSP Virtual address */ | 401 | /* DSP Virtual address */ |
308 | ul_tlb_base_virt = dev_context->sh_s.seg0_da; | 402 | ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va; |
309 | DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); | 403 | DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); |
310 | ul_shm_offset_virt = | 404 | ul_shm_offset_virt = |
311 | ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE); | 405 | ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE); |
312 | /* Kernel logical address */ | 406 | /* Kernel logical address */ |
313 | ul_shm_base = dev_context->sh_s.seg0_va + ul_shm_offset_virt; | 407 | ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt; |
314 | 408 | ||
315 | DBC_ASSERT(ul_shm_base != 0); | 409 | DBC_ASSERT(ul_shm_base != 0); |
316 | /* 2nd wd is used as sync field */ | 410 | /* 2nd wd is used as sync field */ |
@@ -345,83 +439,78 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
345 | OMAP343X_CONTROL_IVA2_BOOTMOD)); | 439 | OMAP343X_CONTROL_IVA2_BOOTMOD)); |
346 | } | 440 | } |
347 | } | 441 | } |
348 | |||
349 | if (!status) { | 442 | if (!status) { |
443 | /* Reset and Unreset the RST2, so that BOOTADDR is copied to | ||
444 | * IVA2 SYSC register */ | ||
445 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, | ||
446 | OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | ||
447 | udelay(100); | ||
350 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, | 448 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, |
351 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | 449 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); |
352 | mmu = dev_context->dsp_mmu; | 450 | udelay(100); |
353 | if (mmu) | 451 | |
354 | dsp_mmu_exit(mmu); | 452 | /* Disbale the DSP MMU */ |
355 | mmu = dsp_mmu_init(); | 453 | hw_mmu_disable(resources->dw_dmmu_base); |
356 | if (IS_ERR(mmu)) { | 454 | /* Disable TWL */ |
357 | dev_err(bridge, "dsp_mmu_init failed!\n"); | 455 | hw_mmu_twl_disable(resources->dw_dmmu_base); |
358 | dev_context->dsp_mmu = NULL; | 456 | |
359 | status = (int)mmu; | 457 | /* Only make TLB entry if both addresses are non-zero */ |
360 | } | 458 | for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; |
361 | } | 459 | entry_ndx++) { |
362 | if (!status) { | 460 | struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx]; |
363 | dev_context->dsp_mmu = mmu; | 461 | struct hw_mmu_map_attrs_t map_attrs = { |
364 | sm_sg = &dev_context->sh_s; | 462 | .endianism = e->endianism, |
365 | sg0_da = iommu_kmap(mmu, sm_sg->seg0_da, sm_sg->seg0_pa, | 463 | .element_size = e->elem_size, |
366 | sm_sg->seg0_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); | 464 | .mixed_size = e->mixed_mode, |
367 | if (IS_ERR_VALUE(sg0_da)) { | 465 | }; |
368 | status = (int)sg0_da; | 466 | |
369 | sg0_da = 0; | 467 | if (!e->ul_gpp_pa || !e->ul_dsp_va) |
370 | } | ||
371 | } | ||
372 | if (!status) { | ||
373 | sg1_da = iommu_kmap(mmu, sm_sg->seg1_da, sm_sg->seg1_pa, | ||
374 | sm_sg->seg1_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); | ||
375 | if (IS_ERR_VALUE(sg1_da)) { | ||
376 | status = (int)sg1_da; | ||
377 | sg1_da = 0; | ||
378 | } | ||
379 | } | ||
380 | if (!status) { | ||
381 | u32 da; | ||
382 | for (tlb_i = 0; tlb_i < BRDIOCTL_NUMOFMMUTLB; tlb_i++) { | ||
383 | if (!tlb[tlb_i].ul_gpp_pa) | ||
384 | continue; | 468 | continue; |
385 | 469 | ||
386 | dev_dbg(bridge, "IOMMU %d GppPa: 0x%x DspVa 0x%x Size" | 470 | dev_dbg(bridge, |
387 | " 0x%x\n", tlb_i, tlb[tlb_i].ul_gpp_pa, | 471 | "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x", |
388 | tlb[tlb_i].ul_dsp_va, tlb[tlb_i].ul_size); | 472 | itmp_entry_ndx, |
389 | 473 | e->ul_gpp_pa, | |
390 | da = iommu_kmap(mmu, tlb[tlb_i].ul_dsp_va, | 474 | e->ul_dsp_va, |
391 | tlb[tlb_i].ul_gpp_pa, PAGE_SIZE, | 475 | e->ul_size); |
392 | IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); | 476 | |
393 | if (IS_ERR_VALUE(da)) { | 477 | hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base, |
394 | status = (int)da; | 478 | e->ul_gpp_pa, |
395 | break; | 479 | e->ul_dsp_va, |
396 | } | 480 | e->ul_size, |
397 | } | 481 | itmp_entry_ndx, |
398 | } | 482 | &map_attrs, 1, 1); |
399 | if (!status) { | 483 | |
400 | u32 da; | 484 | itmp_entry_ndx++; |
401 | l4_i = 0; | ||
402 | while (l4_peripheral_table[l4_i].phys_addr) { | ||
403 | da = iommu_kmap(mmu, l4_peripheral_table[l4_i]. | ||
404 | dsp_virt_addr, l4_peripheral_table[l4_i]. | ||
405 | phys_addr, PAGE_SIZE, | ||
406 | IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); | ||
407 | if (IS_ERR_VALUE(da)) { | ||
408 | status = (int)da; | ||
409 | break; | ||
410 | } | ||
411 | l4_i++; | ||
412 | } | 485 | } |
413 | } | 486 | } |
414 | 487 | ||
415 | /* Lock the above TLB entries and get the BIOS and load monitor timer | 488 | /* Lock the above TLB entries and get the BIOS and load monitor timer |
416 | * information */ | 489 | * information */ |
417 | if (!status) { | 490 | if (!status) { |
491 | hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx); | ||
492 | hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx); | ||
493 | hw_mmu_ttb_set(resources->dw_dmmu_base, | ||
494 | dev_context->pt_attrs->l1_base_pa); | ||
495 | hw_mmu_twl_enable(resources->dw_dmmu_base); | ||
496 | /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */ | ||
497 | |||
498 | temp = __raw_readl((resources->dw_dmmu_base) + 0x10); | ||
499 | temp = (temp & 0xFFFFFFEF) | 0x11; | ||
500 | __raw_writel(temp, (resources->dw_dmmu_base) + 0x10); | ||
501 | |||
502 | /* Let the DSP MMU run */ | ||
503 | hw_mmu_enable(resources->dw_dmmu_base); | ||
504 | |||
418 | /* Enable the BIOS clock */ | 505 | /* Enable the BIOS clock */ |
419 | (void)dev_get_symbol(dev_context->hdev_obj, | 506 | (void)dev_get_symbol(dev_context->hdev_obj, |
420 | BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer); | 507 | BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer); |
421 | (void)dev_get_symbol(dev_context->hdev_obj, | 508 | (void)dev_get_symbol(dev_context->hdev_obj, |
422 | BRIDGEINIT_LOADMON_GPTIMER, | 509 | BRIDGEINIT_LOADMON_GPTIMER, |
423 | &ul_load_monitor_timer); | 510 | &ul_load_monitor_timer); |
511 | } | ||
424 | 512 | ||
513 | if (!status) { | ||
425 | if (ul_load_monitor_timer != 0xFFFF) { | 514 | if (ul_load_monitor_timer != 0xFFFF) { |
426 | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | | 515 | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | |
427 | ul_load_monitor_timer; | 516 | ul_load_monitor_timer; |
@@ -430,7 +519,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
430 | dev_dbg(bridge, "Not able to get the symbol for Load " | 519 | dev_dbg(bridge, "Not able to get the symbol for Load " |
431 | "Monitor Timer\n"); | 520 | "Monitor Timer\n"); |
432 | } | 521 | } |
522 | } | ||
433 | 523 | ||
524 | if (!status) { | ||
434 | if (ul_bios_gp_timer != 0xFFFF) { | 525 | if (ul_bios_gp_timer != 0xFFFF) { |
435 | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | | 526 | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | |
436 | ul_bios_gp_timer; | 527 | ul_bios_gp_timer; |
@@ -439,7 +530,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
439 | dev_dbg(bridge, | 530 | dev_dbg(bridge, |
440 | "Not able to get the symbol for BIOS Timer\n"); | 531 | "Not able to get the symbol for BIOS Timer\n"); |
441 | } | 532 | } |
533 | } | ||
442 | 534 | ||
535 | if (!status) { | ||
443 | /* Set the DSP clock rate */ | 536 | /* Set the DSP clock rate */ |
444 | (void)dev_get_symbol(dev_context->hdev_obj, | 537 | (void)dev_get_symbol(dev_context->hdev_obj, |
445 | "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); | 538 | "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); |
@@ -492,6 +585,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
492 | 585 | ||
493 | /* Let DSP go */ | 586 | /* Let DSP go */ |
494 | dev_dbg(bridge, "%s Unreset\n", __func__); | 587 | dev_dbg(bridge, "%s Unreset\n", __func__); |
588 | /* Enable DSP MMU Interrupts */ | ||
589 | hw_mmu_event_enable(resources->dw_dmmu_base, | ||
590 | HW_MMU_ALL_INTERRUPTS); | ||
495 | /* release the RST1, DSP starts executing now .. */ | 591 | /* release the RST1, DSP starts executing now .. */ |
496 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0, | 592 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0, |
497 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | 593 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); |
@@ -521,23 +617,11 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
521 | 617 | ||
522 | /* update board state */ | 618 | /* update board state */ |
523 | dev_context->dw_brd_state = BRD_RUNNING; | 619 | dev_context->dw_brd_state = BRD_RUNNING; |
524 | return 0; | 620 | /* (void)chnlsm_enable_interrupt(dev_context); */ |
525 | } else { | 621 | } else { |
526 | dev_context->dw_brd_state = BRD_UNKNOWN; | 622 | dev_context->dw_brd_state = BRD_UNKNOWN; |
527 | } | 623 | } |
528 | } | 624 | } |
529 | |||
530 | while (tlb_i--) { | ||
531 | if (!tlb[tlb_i].ul_gpp_pa) | ||
532 | continue; | ||
533 | iommu_kunmap(mmu, tlb[tlb_i].ul_gpp_va); | ||
534 | } | ||
535 | while (l4_i--) | ||
536 | iommu_kunmap(mmu, l4_peripheral_table[l4_i].dsp_virt_addr); | ||
537 | if (sg0_da) | ||
538 | iommu_kunmap(mmu, sg0_da); | ||
539 | if (sg1_da) | ||
540 | iommu_kunmap(mmu, sg1_da); | ||
541 | return status; | 625 | return status; |
542 | } | 626 | } |
543 | 627 | ||
@@ -553,9 +637,8 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt) | |||
553 | { | 637 | { |
554 | int status = 0; | 638 | int status = 0; |
555 | struct bridge_dev_context *dev_context = dev_ctxt; | 639 | struct bridge_dev_context *dev_context = dev_ctxt; |
640 | struct pg_table_attrs *pt_attrs; | ||
556 | u32 dsp_pwr_state; | 641 | u32 dsp_pwr_state; |
557 | int i; | ||
558 | struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry; | ||
559 | struct omap_dsp_platform_data *pdata = | 642 | struct omap_dsp_platform_data *pdata = |
560 | omap_dspbridge_dev->dev.platform_data; | 643 | omap_dspbridge_dev->dev.platform_data; |
561 | 644 | ||
@@ -591,37 +674,23 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt) | |||
591 | 674 | ||
592 | dsp_wdt_enable(false); | 675 | dsp_wdt_enable(false); |
593 | 676 | ||
594 | /* Reset DSP */ | 677 | /* This is a good place to clear the MMU page tables as well */ |
595 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, | 678 | if (dev_context->pt_attrs) { |
596 | OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | 679 | pt_attrs = dev_context->pt_attrs; |
597 | 680 | memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size); | |
681 | memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size); | ||
682 | memset((u8 *) pt_attrs->pg_info, 0x00, | ||
683 | (pt_attrs->l2_num_pages * sizeof(struct page_info))); | ||
684 | } | ||
598 | /* Disable the mailbox interrupts */ | 685 | /* Disable the mailbox interrupts */ |
599 | if (dev_context->mbox) { | 686 | if (dev_context->mbox) { |
600 | omap_mbox_disable_irq(dev_context->mbox, IRQ_RX); | 687 | omap_mbox_disable_irq(dev_context->mbox, IRQ_RX); |
601 | omap_mbox_put(dev_context->mbox); | 688 | omap_mbox_put(dev_context->mbox); |
602 | dev_context->mbox = NULL; | 689 | dev_context->mbox = NULL; |
603 | } | 690 | } |
604 | if (dev_context->dsp_mmu) { | 691 | /* Reset IVA2 clocks*/ |
605 | pr_err("Proc stop mmu if statement\n"); | 692 | (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK | |
606 | for (i = 0; i < BRDIOCTL_NUMOFMMUTLB; i++) { | 693 | OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); |
607 | if (!tlb[i].ul_gpp_pa) | ||
608 | continue; | ||
609 | iommu_kunmap(dev_context->dsp_mmu, tlb[i].ul_gpp_va); | ||
610 | } | ||
611 | i = 0; | ||
612 | while (l4_peripheral_table[i].phys_addr) { | ||
613 | iommu_kunmap(dev_context->dsp_mmu, | ||
614 | l4_peripheral_table[i].dsp_virt_addr); | ||
615 | i++; | ||
616 | } | ||
617 | iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg0_da); | ||
618 | iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg1_da); | ||
619 | dsp_mmu_exit(dev_context->dsp_mmu); | ||
620 | dev_context->dsp_mmu = NULL; | ||
621 | } | ||
622 | /* Reset IVA IOMMU*/ | ||
623 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, | ||
624 | OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | ||
625 | 694 | ||
626 | dsp_clock_disable_all(dev_context->dsp_per_clks); | 695 | dsp_clock_disable_all(dev_context->dsp_per_clks); |
627 | dsp_clk_disable(DSP_CLK_IVA2); | 696 | dsp_clk_disable(DSP_CLK_IVA2); |
@@ -681,6 +750,10 @@ static int bridge_dev_create(struct bridge_dev_context | |||
681 | struct bridge_dev_context *dev_context = NULL; | 750 | struct bridge_dev_context *dev_context = NULL; |
682 | s32 entry_ndx; | 751 | s32 entry_ndx; |
683 | struct cfg_hostres *resources = config_param; | 752 | struct cfg_hostres *resources = config_param; |
753 | struct pg_table_attrs *pt_attrs; | ||
754 | u32 pg_tbl_pa; | ||
755 | u32 pg_tbl_va; | ||
756 | u32 align_size; | ||
684 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | 757 | struct drv_data *drv_datap = dev_get_drvdata(bridge); |
685 | 758 | ||
686 | /* Allocate and initialize a data structure to contain the bridge driver | 759 | /* Allocate and initialize a data structure to contain the bridge driver |
@@ -711,8 +784,97 @@ static int bridge_dev_create(struct bridge_dev_context | |||
711 | if (!dev_context->dw_dsp_base_addr) | 784 | if (!dev_context->dw_dsp_base_addr) |
712 | status = -EPERM; | 785 | status = -EPERM; |
713 | 786 | ||
787 | pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL); | ||
788 | if (pt_attrs != NULL) { | ||
789 | /* Assuming that we use only DSP's memory map | ||
790 | * until 0x4000:0000 , we would need only 1024 | ||
791 | * L1 enties i.e L1 size = 4K */ | ||
792 | pt_attrs->l1_size = 0x1000; | ||
793 | align_size = pt_attrs->l1_size; | ||
794 | /* Align sizes are expected to be power of 2 */ | ||
795 | /* we like to get aligned on L1 table size */ | ||
796 | pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size, | ||
797 | align_size, &pg_tbl_pa); | ||
798 | |||
799 | /* Check if the PA is aligned for us */ | ||
800 | if ((pg_tbl_pa) & (align_size - 1)) { | ||
801 | /* PA not aligned to page table size , | ||
802 | * try with more allocation and align */ | ||
803 | mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa, | ||
804 | pt_attrs->l1_size); | ||
805 | /* we like to get aligned on L1 table size */ | ||
806 | pg_tbl_va = | ||
807 | (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2, | ||
808 | align_size, &pg_tbl_pa); | ||
809 | /* We should be able to get aligned table now */ | ||
810 | pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa; | ||
811 | pt_attrs->l1_tbl_alloc_va = pg_tbl_va; | ||
812 | pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2; | ||
813 | /* Align the PA to the next 'align' boundary */ | ||
814 | pt_attrs->l1_base_pa = | ||
815 | ((pg_tbl_pa) + | ||
816 | (align_size - 1)) & (~(align_size - 1)); | ||
817 | pt_attrs->l1_base_va = | ||
818 | pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa); | ||
819 | } else { | ||
820 | /* We got aligned PA, cool */ | ||
821 | pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa; | ||
822 | pt_attrs->l1_tbl_alloc_va = pg_tbl_va; | ||
823 | pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size; | ||
824 | pt_attrs->l1_base_pa = pg_tbl_pa; | ||
825 | pt_attrs->l1_base_va = pg_tbl_va; | ||
826 | } | ||
827 | if (pt_attrs->l1_base_va) | ||
828 | memset((u8 *) pt_attrs->l1_base_va, 0x00, | ||
829 | pt_attrs->l1_size); | ||
830 | |||
831 | /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM + | ||
832 | * L4 pages */ | ||
833 | pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6); | ||
834 | pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE * | ||
835 | pt_attrs->l2_num_pages; | ||
836 | align_size = 4; /* Make it u32 aligned */ | ||
837 | /* we like to get aligned on L1 table size */ | ||
838 | pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size, | ||
839 | align_size, &pg_tbl_pa); | ||
840 | pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa; | ||
841 | pt_attrs->l2_tbl_alloc_va = pg_tbl_va; | ||
842 | pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size; | ||
843 | pt_attrs->l2_base_pa = pg_tbl_pa; | ||
844 | pt_attrs->l2_base_va = pg_tbl_va; | ||
845 | |||
846 | if (pt_attrs->l2_base_va) | ||
847 | memset((u8 *) pt_attrs->l2_base_va, 0x00, | ||
848 | pt_attrs->l2_size); | ||
849 | |||
850 | pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages * | ||
851 | sizeof(struct page_info), GFP_KERNEL); | ||
852 | dev_dbg(bridge, | ||
853 | "L1 pa %x, va %x, size %x\n L2 pa %x, va " | ||
854 | "%x, size %x\n", pt_attrs->l1_base_pa, | ||
855 | pt_attrs->l1_base_va, pt_attrs->l1_size, | ||
856 | pt_attrs->l2_base_pa, pt_attrs->l2_base_va, | ||
857 | pt_attrs->l2_size); | ||
858 | dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n", | ||
859 | pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info); | ||
860 | } | ||
861 | if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) && | ||
862 | (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL)) | ||
863 | dev_context->pt_attrs = pt_attrs; | ||
864 | else | ||
865 | status = -ENOMEM; | ||
866 | |||
714 | if (!status) { | 867 | if (!status) { |
868 | spin_lock_init(&pt_attrs->pg_lock); | ||
715 | dev_context->tc_word_swap_on = drv_datap->tc_wordswapon; | 869 | dev_context->tc_word_swap_on = drv_datap->tc_wordswapon; |
870 | |||
871 | /* Set the Clock Divisor for the DSP module */ | ||
872 | udelay(5); | ||
873 | /* MMU address is obtained from the host | ||
874 | * resources struct */ | ||
875 | dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base; | ||
876 | } | ||
877 | if (!status) { | ||
716 | dev_context->hdev_obj = hdev_obj; | 878 | dev_context->hdev_obj = hdev_obj; |
717 | /* Store current board state. */ | 879 | /* Store current board state. */ |
718 | dev_context->dw_brd_state = BRD_UNKNOWN; | 880 | dev_context->dw_brd_state = BRD_UNKNOWN; |
@@ -722,6 +884,23 @@ static int bridge_dev_create(struct bridge_dev_context | |||
722 | /* Return ptr to our device state to the DSP API for storage */ | 884 | /* Return ptr to our device state to the DSP API for storage */ |
723 | *dev_cntxt = dev_context; | 885 | *dev_cntxt = dev_context; |
724 | } else { | 886 | } else { |
887 | if (pt_attrs != NULL) { | ||
888 | kfree(pt_attrs->pg_info); | ||
889 | |||
890 | if (pt_attrs->l2_tbl_alloc_va) { | ||
891 | mem_free_phys_mem((void *) | ||
892 | pt_attrs->l2_tbl_alloc_va, | ||
893 | pt_attrs->l2_tbl_alloc_pa, | ||
894 | pt_attrs->l2_tbl_alloc_sz); | ||
895 | } | ||
896 | if (pt_attrs->l1_tbl_alloc_va) { | ||
897 | mem_free_phys_mem((void *) | ||
898 | pt_attrs->l1_tbl_alloc_va, | ||
899 | pt_attrs->l1_tbl_alloc_pa, | ||
900 | pt_attrs->l1_tbl_alloc_sz); | ||
901 | } | ||
902 | } | ||
903 | kfree(pt_attrs); | ||
725 | kfree(dev_context); | 904 | kfree(dev_context); |
726 | } | 905 | } |
727 | func_end: | 906 | func_end: |
@@ -789,6 +968,7 @@ static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, | |||
789 | */ | 968 | */ |
790 | static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) | 969 | static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) |
791 | { | 970 | { |
971 | struct pg_table_attrs *pt_attrs; | ||
792 | int status = 0; | 972 | int status = 0; |
793 | struct bridge_dev_context *dev_context = (struct bridge_dev_context *) | 973 | struct bridge_dev_context *dev_context = (struct bridge_dev_context *) |
794 | dev_ctxt; | 974 | dev_ctxt; |
@@ -802,6 +982,23 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) | |||
802 | 982 | ||
803 | /* first put the device to stop state */ | 983 | /* first put the device to stop state */ |
804 | bridge_brd_stop(dev_context); | 984 | bridge_brd_stop(dev_context); |
985 | if (dev_context->pt_attrs) { | ||
986 | pt_attrs = dev_context->pt_attrs; | ||
987 | kfree(pt_attrs->pg_info); | ||
988 | |||
989 | if (pt_attrs->l2_tbl_alloc_va) { | ||
990 | mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va, | ||
991 | pt_attrs->l2_tbl_alloc_pa, | ||
992 | pt_attrs->l2_tbl_alloc_sz); | ||
993 | } | ||
994 | if (pt_attrs->l1_tbl_alloc_va) { | ||
995 | mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va, | ||
996 | pt_attrs->l1_tbl_alloc_pa, | ||
997 | pt_attrs->l1_tbl_alloc_sz); | ||
998 | } | ||
999 | kfree(pt_attrs); | ||
1000 | |||
1001 | } | ||
805 | 1002 | ||
806 | if (dev_context->resources) { | 1003 | if (dev_context->resources) { |
807 | host_res = dev_context->resources; | 1004 | host_res = dev_context->resources; |
@@ -832,6 +1029,8 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) | |||
832 | iounmap((void *)host_res->dw_mem_base[3]); | 1029 | iounmap((void *)host_res->dw_mem_base[3]); |
833 | if (host_res->dw_mem_base[4]) | 1030 | if (host_res->dw_mem_base[4]) |
834 | iounmap((void *)host_res->dw_mem_base[4]); | 1031 | iounmap((void *)host_res->dw_mem_base[4]); |
1032 | if (host_res->dw_dmmu_base) | ||
1033 | iounmap(host_res->dw_dmmu_base); | ||
835 | if (host_res->dw_per_base) | 1034 | if (host_res->dw_per_base) |
836 | iounmap(host_res->dw_per_base); | 1035 | iounmap(host_res->dw_per_base); |
837 | if (host_res->dw_per_pm_base) | 1036 | if (host_res->dw_per_pm_base) |
@@ -845,6 +1044,7 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) | |||
845 | host_res->dw_mem_base[2] = (u32) NULL; | 1044 | host_res->dw_mem_base[2] = (u32) NULL; |
846 | host_res->dw_mem_base[3] = (u32) NULL; | 1045 | host_res->dw_mem_base[3] = (u32) NULL; |
847 | host_res->dw_mem_base[4] = (u32) NULL; | 1046 | host_res->dw_mem_base[4] = (u32) NULL; |
1047 | host_res->dw_dmmu_base = NULL; | ||
848 | host_res->dw_sys_ctrl_base = NULL; | 1048 | host_res->dw_sys_ctrl_base = NULL; |
849 | 1049 | ||
850 | kfree(host_res); | 1050 | kfree(host_res); |
@@ -928,6 +1128,673 @@ static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, | |||
928 | } | 1128 | } |
929 | 1129 | ||
930 | /* | 1130 | /* |
1131 | * ======== bridge_brd_mem_map ======== | ||
1132 | * This function maps MPU buffer to the DSP address space. It performs | ||
1133 | * linear to physical address translation if required. It translates each | ||
1134 | * page since linear addresses can be physically non-contiguous | ||
1135 | * All address & size arguments are assumed to be page aligned (in proc.c) | ||
1136 | * | ||
1137 | * TODO: Disable MMU while updating the page tables (but that'll stall DSP) | ||
1138 | */ | ||
1139 | static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt, | ||
1140 | u32 ul_mpu_addr, u32 virt_addr, | ||
1141 | u32 ul_num_bytes, u32 ul_map_attr, | ||
1142 | struct page **mapped_pages) | ||
1143 | { | ||
1144 | u32 attrs; | ||
1145 | int status = 0; | ||
1146 | struct bridge_dev_context *dev_context = dev_ctxt; | ||
1147 | struct hw_mmu_map_attrs_t hw_attrs; | ||
1148 | struct vm_area_struct *vma; | ||
1149 | struct mm_struct *mm = current->mm; | ||
1150 | u32 write = 0; | ||
1151 | u32 num_usr_pgs = 0; | ||
1152 | struct page *mapped_page, *pg; | ||
1153 | s32 pg_num; | ||
1154 | u32 va = virt_addr; | ||
1155 | struct task_struct *curr_task = current; | ||
1156 | u32 pg_i = 0; | ||
1157 | u32 mpu_addr, pa; | ||
1158 | |||
1159 | dev_dbg(bridge, | ||
1160 | "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n", | ||
1161 | __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes, | ||
1162 | ul_map_attr); | ||
1163 | if (ul_num_bytes == 0) | ||
1164 | return -EINVAL; | ||
1165 | |||
1166 | if (ul_map_attr & DSP_MAP_DIR_MASK) { | ||
1167 | attrs = ul_map_attr; | ||
1168 | } else { | ||
1169 | /* Assign default attributes */ | ||
1170 | attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16); | ||
1171 | } | ||
1172 | /* Take mapping properties */ | ||
1173 | if (attrs & DSP_MAPBIGENDIAN) | ||
1174 | hw_attrs.endianism = HW_BIG_ENDIAN; | ||
1175 | else | ||
1176 | hw_attrs.endianism = HW_LITTLE_ENDIAN; | ||
1177 | |||
1178 | hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t) | ||
1179 | ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2); | ||
1180 | /* Ignore element_size if mixed_size is enabled */ | ||
1181 | if (hw_attrs.mixed_size == 0) { | ||
1182 | if (attrs & DSP_MAPELEMSIZE8) { | ||
1183 | /* Size is 8 bit */ | ||
1184 | hw_attrs.element_size = HW_ELEM_SIZE8BIT; | ||
1185 | } else if (attrs & DSP_MAPELEMSIZE16) { | ||
1186 | /* Size is 16 bit */ | ||
1187 | hw_attrs.element_size = HW_ELEM_SIZE16BIT; | ||
1188 | } else if (attrs & DSP_MAPELEMSIZE32) { | ||
1189 | /* Size is 32 bit */ | ||
1190 | hw_attrs.element_size = HW_ELEM_SIZE32BIT; | ||
1191 | } else if (attrs & DSP_MAPELEMSIZE64) { | ||
1192 | /* Size is 64 bit */ | ||
1193 | hw_attrs.element_size = HW_ELEM_SIZE64BIT; | ||
1194 | } else { | ||
1195 | /* | ||
1196 | * Mixedsize isn't enabled, so size can't be | ||
1197 | * zero here | ||
1198 | */ | ||
1199 | return -EINVAL; | ||
1200 | } | ||
1201 | } | ||
1202 | if (attrs & DSP_MAPDONOTLOCK) | ||
1203 | hw_attrs.donotlockmpupage = 1; | ||
1204 | else | ||
1205 | hw_attrs.donotlockmpupage = 0; | ||
1206 | |||
1207 | if (attrs & DSP_MAPVMALLOCADDR) { | ||
1208 | return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr, | ||
1209 | ul_num_bytes, &hw_attrs); | ||
1210 | } | ||
1211 | /* | ||
1212 | * Do OS-specific user-va to pa translation. | ||
1213 | * Combine physically contiguous regions to reduce TLBs. | ||
1214 | * Pass the translated pa to pte_update. | ||
1215 | */ | ||
1216 | if ((attrs & DSP_MAPPHYSICALADDR)) { | ||
1217 | status = pte_update(dev_context, ul_mpu_addr, virt_addr, | ||
1218 | ul_num_bytes, &hw_attrs); | ||
1219 | goto func_cont; | ||
1220 | } | ||
1221 | |||
1222 | /* | ||
1223 | * Important Note: ul_mpu_addr is mapped from user application process | ||
1224 | * to current process - it must lie completely within the current | ||
1225 | * virtual memory address space in order to be of use to us here! | ||
1226 | */ | ||
1227 | down_read(&mm->mmap_sem); | ||
1228 | vma = find_vma(mm, ul_mpu_addr); | ||
1229 | if (vma) | ||
1230 | dev_dbg(bridge, | ||
1231 | "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, " | ||
1232 | "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr, | ||
1233 | ul_num_bytes, vma->vm_start, vma->vm_end, | ||
1234 | vma->vm_flags); | ||
1235 | |||
1236 | /* | ||
1237 | * It is observed that under some circumstances, the user buffer is | ||
1238 | * spread across several VMAs. So loop through and check if the entire | ||
1239 | * user buffer is covered | ||
1240 | */ | ||
1241 | while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) { | ||
1242 | /* jump to the next VMA region */ | ||
1243 | vma = find_vma(mm, vma->vm_end + 1); | ||
1244 | dev_dbg(bridge, | ||
1245 | "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, " | ||
1246 | "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr, | ||
1247 | ul_num_bytes, vma->vm_start, vma->vm_end, | ||
1248 | vma->vm_flags); | ||
1249 | } | ||
1250 | if (!vma) { | ||
1251 | pr_err("%s: Failed to get VMA region for 0x%x (%d)\n", | ||
1252 | __func__, ul_mpu_addr, ul_num_bytes); | ||
1253 | status = -EINVAL; | ||
1254 | up_read(&mm->mmap_sem); | ||
1255 | goto func_cont; | ||
1256 | } | ||
1257 | |||
1258 | if (vma->vm_flags & VM_IO) { | ||
1259 | num_usr_pgs = ul_num_bytes / PG_SIZE4K; | ||
1260 | mpu_addr = ul_mpu_addr; | ||
1261 | |||
1262 | /* Get the physical addresses for user buffer */ | ||
1263 | for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) { | ||
1264 | pa = user_va2_pa(mm, mpu_addr); | ||
1265 | if (!pa) { | ||
1266 | status = -EPERM; | ||
1267 | pr_err("DSPBRIDGE: VM_IO mapping physical" | ||
1268 | "address is invalid\n"); | ||
1269 | break; | ||
1270 | } | ||
1271 | if (pfn_valid(__phys_to_pfn(pa))) { | ||
1272 | pg = PHYS_TO_PAGE(pa); | ||
1273 | get_page(pg); | ||
1274 | if (page_count(pg) < 1) { | ||
1275 | pr_err("Bad page in VM_IO buffer\n"); | ||
1276 | bad_page_dump(pa, pg); | ||
1277 | } | ||
1278 | } | ||
1279 | status = pte_set(dev_context->pt_attrs, pa, | ||
1280 | va, HW_PAGE_SIZE4KB, &hw_attrs); | ||
1281 | if (status) | ||
1282 | break; | ||
1283 | |||
1284 | va += HW_PAGE_SIZE4KB; | ||
1285 | mpu_addr += HW_PAGE_SIZE4KB; | ||
1286 | pa += HW_PAGE_SIZE4KB; | ||
1287 | } | ||
1288 | } else { | ||
1289 | num_usr_pgs = ul_num_bytes / PG_SIZE4K; | ||
1290 | if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) | ||
1291 | write = 1; | ||
1292 | |||
1293 | for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) { | ||
1294 | pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1, | ||
1295 | write, 1, &mapped_page, NULL); | ||
1296 | if (pg_num > 0) { | ||
1297 | if (page_count(mapped_page) < 1) { | ||
1298 | pr_err("Bad page count after doing" | ||
1299 | "get_user_pages on" | ||
1300 | "user buffer\n"); | ||
1301 | bad_page_dump(page_to_phys(mapped_page), | ||
1302 | mapped_page); | ||
1303 | } | ||
1304 | status = pte_set(dev_context->pt_attrs, | ||
1305 | page_to_phys(mapped_page), va, | ||
1306 | HW_PAGE_SIZE4KB, &hw_attrs); | ||
1307 | if (status) | ||
1308 | break; | ||
1309 | |||
1310 | if (mapped_pages) | ||
1311 | mapped_pages[pg_i] = mapped_page; | ||
1312 | |||
1313 | va += HW_PAGE_SIZE4KB; | ||
1314 | ul_mpu_addr += HW_PAGE_SIZE4KB; | ||
1315 | } else { | ||
1316 | pr_err("DSPBRIDGE: get_user_pages FAILED," | ||
1317 | "MPU addr = 0x%x," | ||
1318 | "vma->vm_flags = 0x%lx," | ||
1319 | "get_user_pages Err" | ||
1320 | "Value = %d, Buffer" | ||
1321 | "size=0x%x\n", ul_mpu_addr, | ||
1322 | vma->vm_flags, pg_num, ul_num_bytes); | ||
1323 | status = -EPERM; | ||
1324 | break; | ||
1325 | } | ||
1326 | } | ||
1327 | } | ||
1328 | up_read(&mm->mmap_sem); | ||
1329 | func_cont: | ||
1330 | if (status) { | ||
1331 | /* | ||
1332 | * Roll out the mapped pages incase it failed in middle of | ||
1333 | * mapping | ||
1334 | */ | ||
1335 | if (pg_i) { | ||
1336 | bridge_brd_mem_un_map(dev_context, virt_addr, | ||
1337 | (pg_i * PG_SIZE4K)); | ||
1338 | } | ||
1339 | status = -EPERM; | ||
1340 | } | ||
1341 | /* | ||
1342 | * In any case, flush the TLB | ||
1343 | * This is called from here instead from pte_update to avoid unnecessary | ||
1344 | * repetition while mapping non-contiguous physical regions of a virtual | ||
1345 | * region | ||
1346 | */ | ||
1347 | flush_all(dev_context); | ||
1348 | dev_dbg(bridge, "%s status %x\n", __func__, status); | ||
1349 | return status; | ||
1350 | } | ||
1351 | |||
1352 | /* | ||
1353 | * ======== bridge_brd_mem_un_map ======== | ||
1354 | * Invalidate the PTEs for the DSP VA block to be unmapped. | ||
1355 | * | ||
1356 | * PTEs of a mapped memory block are contiguous in any page table | ||
1357 | * So, instead of looking up the PTE address for every 4K block, | ||
1358 | * we clear consecutive PTEs until we unmap all the bytes | ||
1359 | */ | ||
1360 | static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt, | ||
1361 | u32 virt_addr, u32 ul_num_bytes) | ||
1362 | { | ||
1363 | u32 l1_base_va; | ||
1364 | u32 l2_base_va; | ||
1365 | u32 l2_base_pa; | ||
1366 | u32 l2_page_num; | ||
1367 | u32 pte_val; | ||
1368 | u32 pte_size; | ||
1369 | u32 pte_count; | ||
1370 | u32 pte_addr_l1; | ||
1371 | u32 pte_addr_l2 = 0; | ||
1372 | u32 rem_bytes; | ||
1373 | u32 rem_bytes_l2; | ||
1374 | u32 va_curr; | ||
1375 | struct page *pg = NULL; | ||
1376 | int status = 0; | ||
1377 | struct bridge_dev_context *dev_context = dev_ctxt; | ||
1378 | struct pg_table_attrs *pt = dev_context->pt_attrs; | ||
1379 | u32 temp; | ||
1380 | u32 paddr; | ||
1381 | u32 numof4k_pages = 0; | ||
1382 | |||
1383 | va_curr = virt_addr; | ||
1384 | rem_bytes = ul_num_bytes; | ||
1385 | rem_bytes_l2 = 0; | ||
1386 | l1_base_va = pt->l1_base_va; | ||
1387 | pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr); | ||
1388 | dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, " | ||
1389 | "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr, | ||
1390 | ul_num_bytes, l1_base_va, pte_addr_l1); | ||
1391 | |||
1392 | while (rem_bytes && !status) { | ||
1393 | u32 va_curr_orig = va_curr; | ||
1394 | /* Find whether the L1 PTE points to a valid L2 PT */ | ||
1395 | pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr); | ||
1396 | pte_val = *(u32 *) pte_addr_l1; | ||
1397 | pte_size = hw_mmu_pte_size_l1(pte_val); | ||
1398 | |||
1399 | if (pte_size != HW_MMU_COARSE_PAGE_SIZE) | ||
1400 | goto skip_coarse_page; | ||
1401 | |||
1402 | /* | ||
1403 | * Get the L2 PA from the L1 PTE, and find | ||
1404 | * corresponding L2 VA | ||
1405 | */ | ||
1406 | l2_base_pa = hw_mmu_pte_coarse_l1(pte_val); | ||
1407 | l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va; | ||
1408 | l2_page_num = | ||
1409 | (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE; | ||
1410 | /* | ||
1411 | * Find the L2 PTE address from which we will start | ||
1412 | * clearing, the number of PTEs to be cleared on this | ||
1413 | * page, and the size of VA space that needs to be | ||
1414 | * cleared on this L2 page | ||
1415 | */ | ||
1416 | pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr); | ||
1417 | pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1); | ||
1418 | pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32); | ||
1419 | if (rem_bytes < (pte_count * PG_SIZE4K)) | ||
1420 | pte_count = rem_bytes / PG_SIZE4K; | ||
1421 | rem_bytes_l2 = pte_count * PG_SIZE4K; | ||
1422 | |||
1423 | /* | ||
1424 | * Unmap the VA space on this L2 PT. A quicker way | ||
1425 | * would be to clear pte_count entries starting from | ||
1426 | * pte_addr_l2. However, below code checks that we don't | ||
1427 | * clear invalid entries or less than 64KB for a 64KB | ||
1428 | * entry. Similar checking is done for L1 PTEs too | ||
1429 | * below | ||
1430 | */ | ||
1431 | while (rem_bytes_l2 && !status) { | ||
1432 | pte_val = *(u32 *) pte_addr_l2; | ||
1433 | pte_size = hw_mmu_pte_size_l2(pte_val); | ||
1434 | /* va_curr aligned to pte_size? */ | ||
1435 | if (pte_size == 0 || rem_bytes_l2 < pte_size || | ||
1436 | va_curr & (pte_size - 1)) { | ||
1437 | status = -EPERM; | ||
1438 | break; | ||
1439 | } | ||
1440 | |||
1441 | /* Collect Physical addresses from VA */ | ||
1442 | paddr = (pte_val & ~(pte_size - 1)); | ||
1443 | if (pte_size == HW_PAGE_SIZE64KB) | ||
1444 | numof4k_pages = 16; | ||
1445 | else | ||
1446 | numof4k_pages = 1; | ||
1447 | temp = 0; | ||
1448 | while (temp++ < numof4k_pages) { | ||
1449 | if (!pfn_valid(__phys_to_pfn(paddr))) { | ||
1450 | paddr += HW_PAGE_SIZE4KB; | ||
1451 | continue; | ||
1452 | } | ||
1453 | pg = PHYS_TO_PAGE(paddr); | ||
1454 | if (page_count(pg) < 1) { | ||
1455 | pr_info("DSPBRIDGE: UNMAP function: " | ||
1456 | "COUNT 0 FOR PA 0x%x, size = " | ||
1457 | "0x%x\n", paddr, ul_num_bytes); | ||
1458 | bad_page_dump(paddr, pg); | ||
1459 | } else { | ||
1460 | set_page_dirty(pg); | ||
1461 | page_cache_release(pg); | ||
1462 | } | ||
1463 | paddr += HW_PAGE_SIZE4KB; | ||
1464 | } | ||
1465 | if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) { | ||
1466 | status = -EPERM; | ||
1467 | goto EXIT_LOOP; | ||
1468 | } | ||
1469 | |||
1470 | status = 0; | ||
1471 | rem_bytes_l2 -= pte_size; | ||
1472 | va_curr += pte_size; | ||
1473 | pte_addr_l2 += (pte_size >> 12) * sizeof(u32); | ||
1474 | } | ||
1475 | spin_lock(&pt->pg_lock); | ||
1476 | if (rem_bytes_l2 == 0) { | ||
1477 | pt->pg_info[l2_page_num].num_entries -= pte_count; | ||
1478 | if (pt->pg_info[l2_page_num].num_entries == 0) { | ||
1479 | /* | ||
1480 | * Clear the L1 PTE pointing to the L2 PT | ||
1481 | */ | ||
1482 | if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig, | ||
1483 | HW_MMU_COARSE_PAGE_SIZE)) | ||
1484 | status = 0; | ||
1485 | else { | ||
1486 | status = -EPERM; | ||
1487 | spin_unlock(&pt->pg_lock); | ||
1488 | goto EXIT_LOOP; | ||
1489 | } | ||
1490 | } | ||
1491 | rem_bytes -= pte_count * PG_SIZE4K; | ||
1492 | } else | ||
1493 | status = -EPERM; | ||
1494 | |||
1495 | spin_unlock(&pt->pg_lock); | ||
1496 | continue; | ||
1497 | skip_coarse_page: | ||
1498 | /* va_curr aligned to pte_size? */ | ||
1499 | /* pte_size = 1 MB or 16 MB */ | ||
1500 | if (pte_size == 0 || rem_bytes < pte_size || | ||
1501 | va_curr & (pte_size - 1)) { | ||
1502 | status = -EPERM; | ||
1503 | break; | ||
1504 | } | ||
1505 | |||
1506 | if (pte_size == HW_PAGE_SIZE1MB) | ||
1507 | numof4k_pages = 256; | ||
1508 | else | ||
1509 | numof4k_pages = 4096; | ||
1510 | temp = 0; | ||
1511 | /* Collect Physical addresses from VA */ | ||
1512 | paddr = (pte_val & ~(pte_size - 1)); | ||
1513 | while (temp++ < numof4k_pages) { | ||
1514 | if (pfn_valid(__phys_to_pfn(paddr))) { | ||
1515 | pg = PHYS_TO_PAGE(paddr); | ||
1516 | if (page_count(pg) < 1) { | ||
1517 | pr_info("DSPBRIDGE: UNMAP function: " | ||
1518 | "COUNT 0 FOR PA 0x%x, size = " | ||
1519 | "0x%x\n", paddr, ul_num_bytes); | ||
1520 | bad_page_dump(paddr, pg); | ||
1521 | } else { | ||
1522 | set_page_dirty(pg); | ||
1523 | page_cache_release(pg); | ||
1524 | } | ||
1525 | } | ||
1526 | paddr += HW_PAGE_SIZE4KB; | ||
1527 | } | ||
1528 | if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) { | ||
1529 | status = 0; | ||
1530 | rem_bytes -= pte_size; | ||
1531 | va_curr += pte_size; | ||
1532 | } else { | ||
1533 | status = -EPERM; | ||
1534 | goto EXIT_LOOP; | ||
1535 | } | ||
1536 | } | ||
1537 | /* | ||
1538 | * It is better to flush the TLB here, so that any stale old entries | ||
1539 | * get flushed | ||
1540 | */ | ||
1541 | EXIT_LOOP: | ||
1542 | flush_all(dev_context); | ||
1543 | dev_dbg(bridge, | ||
1544 | "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x," | ||
1545 | " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1, | ||
1546 | pte_addr_l2, rem_bytes, rem_bytes_l2, status); | ||
1547 | return status; | ||
1548 | } | ||
1549 | |||
1550 | /* | ||
1551 | * ======== user_va2_pa ======== | ||
1552 | * Purpose: | ||
1553 | * This function walks through the page tables to convert a userland | ||
1554 | * virtual address to physical address | ||
1555 | */ | ||
1556 | static u32 user_va2_pa(struct mm_struct *mm, u32 address) | ||
1557 | { | ||
1558 | pgd_t *pgd; | ||
1559 | pmd_t *pmd; | ||
1560 | pte_t *ptep, pte; | ||
1561 | |||
1562 | pgd = pgd_offset(mm, address); | ||
1563 | if (!(pgd_none(*pgd) || pgd_bad(*pgd))) { | ||
1564 | pmd = pmd_offset(pgd, address); | ||
1565 | if (!(pmd_none(*pmd) || pmd_bad(*pmd))) { | ||
1566 | ptep = pte_offset_map(pmd, address); | ||
1567 | if (ptep) { | ||
1568 | pte = *ptep; | ||
1569 | if (pte_present(pte)) | ||
1570 | return pte & PAGE_MASK; | ||
1571 | } | ||
1572 | } | ||
1573 | } | ||
1574 | |||
1575 | return 0; | ||
1576 | } | ||
1577 | |||
1578 | /* | ||
1579 | * ======== pte_update ======== | ||
1580 | * This function calculates the optimum page-aligned addresses and sizes | ||
1581 | * Caller must pass page-aligned values | ||
1582 | */ | ||
1583 | static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa, | ||
1584 | u32 va, u32 size, | ||
1585 | struct hw_mmu_map_attrs_t *map_attrs) | ||
1586 | { | ||
1587 | u32 i; | ||
1588 | u32 all_bits; | ||
1589 | u32 pa_curr = pa; | ||
1590 | u32 va_curr = va; | ||
1591 | u32 num_bytes = size; | ||
1592 | struct bridge_dev_context *dev_context = dev_ctxt; | ||
1593 | int status = 0; | ||
1594 | u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB, | ||
1595 | HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB | ||
1596 | }; | ||
1597 | |||
1598 | while (num_bytes && !status) { | ||
1599 | /* To find the max. page size with which both PA & VA are | ||
1600 | * aligned */ | ||
1601 | all_bits = pa_curr | va_curr; | ||
1602 | |||
1603 | for (i = 0; i < 4; i++) { | ||
1604 | if ((num_bytes >= page_size[i]) && ((all_bits & | ||
1605 | (page_size[i] - | ||
1606 | 1)) == 0)) { | ||
1607 | status = | ||
1608 | pte_set(dev_context->pt_attrs, pa_curr, | ||
1609 | va_curr, page_size[i], map_attrs); | ||
1610 | pa_curr += page_size[i]; | ||
1611 | va_curr += page_size[i]; | ||
1612 | num_bytes -= page_size[i]; | ||
1613 | /* Don't try smaller sizes. Hopefully we have | ||
1614 | * reached an address aligned to a bigger page | ||
1615 | * size */ | ||
1616 | break; | ||
1617 | } | ||
1618 | } | ||
1619 | } | ||
1620 | |||
1621 | return status; | ||
1622 | } | ||
1623 | |||
1624 | /* | ||
1625 | * ======== pte_set ======== | ||
1626 | * This function calculates PTE address (MPU virtual) to be updated | ||
1627 | * It also manages the L2 page tables | ||
1628 | */ | ||
1629 | static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va, | ||
1630 | u32 size, struct hw_mmu_map_attrs_t *attrs) | ||
1631 | { | ||
1632 | u32 i; | ||
1633 | u32 pte_val; | ||
1634 | u32 pte_addr_l1; | ||
1635 | u32 pte_size; | ||
1636 | /* Base address of the PT that will be updated */ | ||
1637 | u32 pg_tbl_va; | ||
1638 | u32 l1_base_va; | ||
1639 | /* Compiler warns that the next three variables might be used | ||
1640 | * uninitialized in this function. Doesn't seem so. Working around, | ||
1641 | * anyways. */ | ||
1642 | u32 l2_base_va = 0; | ||
1643 | u32 l2_base_pa = 0; | ||
1644 | u32 l2_page_num = 0; | ||
1645 | int status = 0; | ||
1646 | |||
1647 | l1_base_va = pt->l1_base_va; | ||
1648 | pg_tbl_va = l1_base_va; | ||
1649 | if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) { | ||
1650 | /* Find whether the L1 PTE points to a valid L2 PT */ | ||
1651 | pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va); | ||
1652 | if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) { | ||
1653 | pte_val = *(u32 *) pte_addr_l1; | ||
1654 | pte_size = hw_mmu_pte_size_l1(pte_val); | ||
1655 | } else { | ||
1656 | return -EPERM; | ||
1657 | } | ||
1658 | spin_lock(&pt->pg_lock); | ||
1659 | if (pte_size == HW_MMU_COARSE_PAGE_SIZE) { | ||
1660 | /* Get the L2 PA from the L1 PTE, and find | ||
1661 | * corresponding L2 VA */ | ||
1662 | l2_base_pa = hw_mmu_pte_coarse_l1(pte_val); | ||
1663 | l2_base_va = | ||
1664 | l2_base_pa - pt->l2_base_pa + pt->l2_base_va; | ||
1665 | l2_page_num = | ||
1666 | (l2_base_pa - | ||
1667 | pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE; | ||
1668 | } else if (pte_size == 0) { | ||
1669 | /* L1 PTE is invalid. Allocate a L2 PT and | ||
1670 | * point the L1 PTE to it */ | ||
1671 | /* Find a free L2 PT. */ | ||
1672 | for (i = 0; (i < pt->l2_num_pages) && | ||
1673 | (pt->pg_info[i].num_entries != 0); i++) | ||
1674 | ;; | ||
1675 | if (i < pt->l2_num_pages) { | ||
1676 | l2_page_num = i; | ||
1677 | l2_base_pa = pt->l2_base_pa + (l2_page_num * | ||
1678 | HW_MMU_COARSE_PAGE_SIZE); | ||
1679 | l2_base_va = pt->l2_base_va + (l2_page_num * | ||
1680 | HW_MMU_COARSE_PAGE_SIZE); | ||
1681 | /* Endianness attributes are ignored for | ||
1682 | * HW_MMU_COARSE_PAGE_SIZE */ | ||
1683 | status = | ||
1684 | hw_mmu_pte_set(l1_base_va, l2_base_pa, va, | ||
1685 | HW_MMU_COARSE_PAGE_SIZE, | ||
1686 | attrs); | ||
1687 | } else { | ||
1688 | status = -ENOMEM; | ||
1689 | } | ||
1690 | } else { | ||
1691 | /* Found valid L1 PTE of another size. | ||
1692 | * Should not overwrite it. */ | ||
1693 | status = -EPERM; | ||
1694 | } | ||
1695 | if (!status) { | ||
1696 | pg_tbl_va = l2_base_va; | ||
1697 | if (size == HW_PAGE_SIZE64KB) | ||
1698 | pt->pg_info[l2_page_num].num_entries += 16; | ||
1699 | else | ||
1700 | pt->pg_info[l2_page_num].num_entries++; | ||
1701 | dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum " | ||
1702 | "%x, num_entries %x\n", l2_base_va, | ||
1703 | l2_base_pa, l2_page_num, | ||
1704 | pt->pg_info[l2_page_num].num_entries); | ||
1705 | } | ||
1706 | spin_unlock(&pt->pg_lock); | ||
1707 | } | ||
1708 | if (!status) { | ||
1709 | dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n", | ||
1710 | pg_tbl_va, pa, va, size); | ||
1711 | dev_dbg(bridge, "PTE: endianism %x, element_size %x, " | ||
1712 | "mixed_size %x\n", attrs->endianism, | ||
1713 | attrs->element_size, attrs->mixed_size); | ||
1714 | status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs); | ||
1715 | } | ||
1716 | |||
1717 | return status; | ||
1718 | } | ||
1719 | |||
1720 | /* Memory map kernel VA -- memory allocated with vmalloc */ | ||
1721 | static int mem_map_vmalloc(struct bridge_dev_context *dev_context, | ||
1722 | u32 ul_mpu_addr, u32 virt_addr, | ||
1723 | u32 ul_num_bytes, | ||
1724 | struct hw_mmu_map_attrs_t *hw_attrs) | ||
1725 | { | ||
1726 | int status = 0; | ||
1727 | struct page *page[1]; | ||
1728 | u32 i; | ||
1729 | u32 pa_curr; | ||
1730 | u32 pa_next; | ||
1731 | u32 va_curr; | ||
1732 | u32 size_curr; | ||
1733 | u32 num_pages; | ||
1734 | u32 pa; | ||
1735 | u32 num_of4k_pages; | ||
1736 | u32 temp = 0; | ||
1737 | |||
1738 | /* | ||
1739 | * Do Kernel va to pa translation. | ||
1740 | * Combine physically contiguous regions to reduce TLBs. | ||
1741 | * Pass the translated pa to pte_update. | ||
1742 | */ | ||
1743 | num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */ | ||
1744 | i = 0; | ||
1745 | va_curr = ul_mpu_addr; | ||
1746 | page[0] = vmalloc_to_page((void *)va_curr); | ||
1747 | pa_next = page_to_phys(page[0]); | ||
1748 | while (!status && (i < num_pages)) { | ||
1749 | /* | ||
1750 | * Reuse pa_next from the previous iteraion to avoid | ||
1751 | * an extra va2pa call | ||
1752 | */ | ||
1753 | pa_curr = pa_next; | ||
1754 | size_curr = PAGE_SIZE; | ||
1755 | /* | ||
1756 | * If the next page is physically contiguous, | ||
1757 | * map it with the current one by increasing | ||
1758 | * the size of the region to be mapped | ||
1759 | */ | ||
1760 | while (++i < num_pages) { | ||
1761 | page[0] = | ||
1762 | vmalloc_to_page((void *)(va_curr + size_curr)); | ||
1763 | pa_next = page_to_phys(page[0]); | ||
1764 | |||
1765 | if (pa_next == (pa_curr + size_curr)) | ||
1766 | size_curr += PAGE_SIZE; | ||
1767 | else | ||
1768 | break; | ||
1769 | |||
1770 | } | ||
1771 | if (pa_next == 0) { | ||
1772 | status = -ENOMEM; | ||
1773 | break; | ||
1774 | } | ||
1775 | pa = pa_curr; | ||
1776 | num_of4k_pages = size_curr / HW_PAGE_SIZE4KB; | ||
1777 | while (temp++ < num_of4k_pages) { | ||
1778 | get_page(PHYS_TO_PAGE(pa)); | ||
1779 | pa += HW_PAGE_SIZE4KB; | ||
1780 | } | ||
1781 | status = pte_update(dev_context, pa_curr, virt_addr + | ||
1782 | (va_curr - ul_mpu_addr), size_curr, | ||
1783 | hw_attrs); | ||
1784 | va_curr += size_curr; | ||
1785 | } | ||
1786 | /* | ||
1787 | * In any case, flush the TLB | ||
1788 | * This is called from here instead from pte_update to avoid unnecessary | ||
1789 | * repetition while mapping non-contiguous physical regions of a virtual | ||
1790 | * region | ||
1791 | */ | ||
1792 | flush_all(dev_context); | ||
1793 | dev_dbg(bridge, "%s status %x\n", __func__, status); | ||
1794 | return status; | ||
1795 | } | ||
1796 | |||
1797 | /* | ||
931 | * ======== wait_for_start ======== | 1798 | * ======== wait_for_start ======== |
932 | * Wait for the singal from DSP that it has started, or time out. | 1799 | * Wait for the singal from DSP that it has started, or time out. |
933 | */ | 1800 | */ |
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c index b57a9fd5e757..fb9026e1403c 100644 --- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c +++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c | |||
@@ -31,6 +31,10 @@ | |||
31 | #include <dspbridge/dev.h> | 31 | #include <dspbridge/dev.h> |
32 | #include <dspbridge/iodefs.h> | 32 | #include <dspbridge/iodefs.h> |
33 | 33 | ||
34 | /* ------------------------------------ Hardware Abstraction Layer */ | ||
35 | #include <hw_defs.h> | ||
36 | #include <hw_mmu.h> | ||
37 | |||
34 | #include <dspbridge/pwr_sh.h> | 38 | #include <dspbridge/pwr_sh.h> |
35 | 39 | ||
36 | /* ----------------------------------- Bridge Driver */ | 40 | /* ----------------------------------- Bridge Driver */ |
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c index 66dbf02549e4..ba2961049dad 100644 --- a/drivers/staging/tidspbridge/core/tiomap_io.c +++ b/drivers/staging/tidspbridge/core/tiomap_io.c | |||
@@ -134,16 +134,17 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt, | |||
134 | 134 | ||
135 | if (!status) { | 135 | if (!status) { |
136 | ul_tlb_base_virt = | 136 | ul_tlb_base_virt = |
137 | dev_context->sh_s.seg0_da * DSPWORDSIZE; | 137 | dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE; |
138 | DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); | 138 | DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); |
139 | dw_ext_prog_virt_mem = dev_context->sh_s.seg0_va; | 139 | dw_ext_prog_virt_mem = |
140 | dev_context->atlb_entry[0].ul_gpp_va; | ||
140 | 141 | ||
141 | if (!trace_read) { | 142 | if (!trace_read) { |
142 | ul_shm_offset_virt = | 143 | ul_shm_offset_virt = |
143 | ul_shm_base_virt - ul_tlb_base_virt; | 144 | ul_shm_base_virt - ul_tlb_base_virt; |
144 | ul_shm_offset_virt += | 145 | ul_shm_offset_virt += |
145 | PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base + | 146 | PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base + |
146 | 1, PAGE_SIZE * 16); | 147 | 1, HW_PAGE_SIZE64KB); |
147 | dw_ext_prog_virt_mem -= ul_shm_offset_virt; | 148 | dw_ext_prog_virt_mem -= ul_shm_offset_virt; |
148 | dw_ext_prog_virt_mem += | 149 | dw_ext_prog_virt_mem += |
149 | (ul_ext_base - ul_dyn_ext_base); | 150 | (ul_ext_base - ul_dyn_ext_base); |
@@ -317,9 +318,8 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context, | |||
317 | ret = -EPERM; | 318 | ret = -EPERM; |
318 | 319 | ||
319 | if (!ret) { | 320 | if (!ret) { |
320 | ul_tlb_base_virt = dev_context->sh_s.seg0_da * | 321 | ul_tlb_base_virt = |
321 | DSPWORDSIZE; | 322 | dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE; |
322 | |||
323 | DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); | 323 | DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); |
324 | 324 | ||
325 | if (symbols_reloaded) { | 325 | if (symbols_reloaded) { |
@@ -337,7 +337,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context, | |||
337 | ul_shm_base_virt - ul_tlb_base_virt; | 337 | ul_shm_base_virt - ul_tlb_base_virt; |
338 | if (trace_load) { | 338 | if (trace_load) { |
339 | dw_ext_prog_virt_mem = | 339 | dw_ext_prog_virt_mem = |
340 | dev_context->sh_s.seg0_va; | 340 | dev_context->atlb_entry[0].ul_gpp_va; |
341 | } else { | 341 | } else { |
342 | dw_ext_prog_virt_mem = host_res->dw_mem_base[1]; | 342 | dw_ext_prog_virt_mem = host_res->dw_mem_base[1]; |
343 | dw_ext_prog_virt_mem += | 343 | dw_ext_prog_virt_mem += |
@@ -393,6 +393,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val) | |||
393 | omap_dspbridge_dev->dev.platform_data; | 393 | omap_dspbridge_dev->dev.platform_data; |
394 | struct cfg_hostres *resources = dev_context->resources; | 394 | struct cfg_hostres *resources = dev_context->resources; |
395 | int status = 0; | 395 | int status = 0; |
396 | u32 temp; | ||
396 | 397 | ||
397 | if (!dev_context->mbox) | 398 | if (!dev_context->mbox) |
398 | return 0; | 399 | return 0; |
@@ -436,7 +437,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val) | |||
436 | omap_mbox_restore_ctx(dev_context->mbox); | 437 | omap_mbox_restore_ctx(dev_context->mbox); |
437 | 438 | ||
438 | /* Access MMU SYS CONFIG register to generate a short wakeup */ | 439 | /* Access MMU SYS CONFIG register to generate a short wakeup */ |
439 | iommu_read_reg(dev_context->dsp_mmu, MMU_SYSCONFIG); | 440 | temp = readl(resources->dw_dmmu_base + 0x10); |
440 | 441 | ||
441 | dev_context->dw_brd_state = BRD_RUNNING; | 442 | dev_context->dw_brd_state = BRD_RUNNING; |
442 | } else if (dev_context->dw_brd_state == BRD_RETENTION) { | 443 | } else if (dev_context->dw_brd_state == BRD_RETENTION) { |
diff --git a/drivers/staging/tidspbridge/core/ue_deh.c b/drivers/staging/tidspbridge/core/ue_deh.c index e24ea0c73914..3430418190da 100644 --- a/drivers/staging/tidspbridge/core/ue_deh.c +++ b/drivers/staging/tidspbridge/core/ue_deh.c | |||
@@ -31,6 +31,57 @@ | |||
31 | #include <dspbridge/drv.h> | 31 | #include <dspbridge/drv.h> |
32 | #include <dspbridge/wdt.h> | 32 | #include <dspbridge/wdt.h> |
33 | 33 | ||
34 | static u32 fault_addr; | ||
35 | |||
36 | static void mmu_fault_dpc(unsigned long data) | ||
37 | { | ||
38 | struct deh_mgr *deh = (void *)data; | ||
39 | |||
40 | if (!deh) | ||
41 | return; | ||
42 | |||
43 | bridge_deh_notify(deh, DSP_MMUFAULT, 0); | ||
44 | } | ||
45 | |||
46 | static irqreturn_t mmu_fault_isr(int irq, void *data) | ||
47 | { | ||
48 | struct deh_mgr *deh = data; | ||
49 | struct cfg_hostres *resources; | ||
50 | u32 event; | ||
51 | |||
52 | if (!deh) | ||
53 | return IRQ_HANDLED; | ||
54 | |||
55 | resources = deh->hbridge_context->resources; | ||
56 | if (!resources) { | ||
57 | dev_dbg(bridge, "%s: Failed to get Host Resources\n", | ||
58 | __func__); | ||
59 | return IRQ_HANDLED; | ||
60 | } | ||
61 | |||
62 | hw_mmu_event_status(resources->dw_dmmu_base, &event); | ||
63 | if (event == HW_MMU_TRANSLATION_FAULT) { | ||
64 | hw_mmu_fault_addr_read(resources->dw_dmmu_base, &fault_addr); | ||
65 | dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__, | ||
66 | event, fault_addr); | ||
67 | /* | ||
68 | * Schedule a DPC directly. In the future, it may be | ||
69 | * necessary to check if DSP MMU fault is intended for | ||
70 | * Bridge. | ||
71 | */ | ||
72 | tasklet_schedule(&deh->dpc_tasklet); | ||
73 | |||
74 | /* Disable the MMU events, else once we clear it will | ||
75 | * start to raise INTs again */ | ||
76 | hw_mmu_event_disable(resources->dw_dmmu_base, | ||
77 | HW_MMU_TRANSLATION_FAULT); | ||
78 | } else { | ||
79 | hw_mmu_event_disable(resources->dw_dmmu_base, | ||
80 | HW_MMU_ALL_INTERRUPTS); | ||
81 | } | ||
82 | return IRQ_HANDLED; | ||
83 | } | ||
84 | |||
34 | int bridge_deh_create(struct deh_mgr **ret_deh, | 85 | int bridge_deh_create(struct deh_mgr **ret_deh, |
35 | struct dev_object *hdev_obj) | 86 | struct dev_object *hdev_obj) |
36 | { | 87 | { |
@@ -58,9 +109,18 @@ int bridge_deh_create(struct deh_mgr **ret_deh, | |||
58 | } | 109 | } |
59 | ntfy_init(deh->ntfy_obj); | 110 | ntfy_init(deh->ntfy_obj); |
60 | 111 | ||
112 | /* Create a MMUfault DPC */ | ||
113 | tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh); | ||
114 | |||
61 | /* Fill in context structure */ | 115 | /* Fill in context structure */ |
62 | deh->hbridge_context = hbridge_context; | 116 | deh->hbridge_context = hbridge_context; |
63 | 117 | ||
118 | /* Install ISR function for DSP MMU fault */ | ||
119 | status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0, | ||
120 | "DspBridge\tiommu fault", deh); | ||
121 | if (status < 0) | ||
122 | goto err; | ||
123 | |||
64 | *ret_deh = deh; | 124 | *ret_deh = deh; |
65 | return 0; | 125 | return 0; |
66 | 126 | ||
@@ -80,6 +140,11 @@ int bridge_deh_destroy(struct deh_mgr *deh) | |||
80 | ntfy_delete(deh->ntfy_obj); | 140 | ntfy_delete(deh->ntfy_obj); |
81 | kfree(deh->ntfy_obj); | 141 | kfree(deh->ntfy_obj); |
82 | } | 142 | } |
143 | /* Disable DSP MMU fault */ | ||
144 | free_irq(INT_DSP_MMU_IRQ, deh); | ||
145 | |||
146 | /* Free DPC object */ | ||
147 | tasklet_kill(&deh->dpc_tasklet); | ||
83 | 148 | ||
84 | /* Deallocate the DEH manager object */ | 149 | /* Deallocate the DEH manager object */ |
85 | kfree(deh); | 150 | kfree(deh); |
@@ -101,6 +166,48 @@ int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask, | |||
101 | return ntfy_unregister(deh->ntfy_obj, hnotification); | 166 | return ntfy_unregister(deh->ntfy_obj, hnotification); |
102 | } | 167 | } |
103 | 168 | ||
169 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
170 | static void mmu_fault_print_stack(struct bridge_dev_context *dev_context) | ||
171 | { | ||
172 | struct cfg_hostres *resources; | ||
173 | struct hw_mmu_map_attrs_t map_attrs = { | ||
174 | .endianism = HW_LITTLE_ENDIAN, | ||
175 | .element_size = HW_ELEM_SIZE16BIT, | ||
176 | .mixed_size = HW_MMU_CPUES, | ||
177 | }; | ||
178 | void *dummy_va_addr; | ||
179 | |||
180 | resources = dev_context->resources; | ||
181 | dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC); | ||
182 | |||
183 | /* | ||
184 | * Before acking the MMU fault, let's make sure MMU can only | ||
185 | * access entry #0. Then add a new entry so that the DSP OS | ||
186 | * can continue in order to dump the stack. | ||
187 | */ | ||
188 | hw_mmu_twl_disable(resources->dw_dmmu_base); | ||
189 | hw_mmu_tlb_flush_all(resources->dw_dmmu_base); | ||
190 | |||
191 | hw_mmu_tlb_add(resources->dw_dmmu_base, | ||
192 | virt_to_phys(dummy_va_addr), fault_addr, | ||
193 | HW_PAGE_SIZE4KB, 1, | ||
194 | &map_attrs, HW_SET, HW_SET); | ||
195 | |||
196 | dsp_clk_enable(DSP_CLK_GPT8); | ||
197 | |||
198 | dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe); | ||
199 | |||
200 | /* Clear MMU interrupt */ | ||
201 | hw_mmu_event_ack(resources->dw_dmmu_base, | ||
202 | HW_MMU_TRANSLATION_FAULT); | ||
203 | dump_dsp_stack(dev_context); | ||
204 | dsp_clk_disable(DSP_CLK_GPT8); | ||
205 | |||
206 | hw_mmu_disable(resources->dw_dmmu_base); | ||
207 | free_page((unsigned long)dummy_va_addr); | ||
208 | } | ||
209 | #endif | ||
210 | |||
104 | static inline const char *event_to_string(int event) | 211 | static inline const char *event_to_string(int event) |
105 | { | 212 | { |
106 | switch (event) { | 213 | switch (event) { |
@@ -133,7 +240,13 @@ void bridge_deh_notify(struct deh_mgr *deh, int event, int info) | |||
133 | #endif | 240 | #endif |
134 | break; | 241 | break; |
135 | case DSP_MMUFAULT: | 242 | case DSP_MMUFAULT: |
136 | dev_err(bridge, "%s: %s, addr=0x%x", __func__, str, info); | 243 | dev_err(bridge, "%s: %s, addr=0x%x", __func__, |
244 | str, fault_addr); | ||
245 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
246 | print_dsp_trace_buffer(dev_context); | ||
247 | dump_dl_modules(dev_context); | ||
248 | mmu_fault_print_stack(dev_context); | ||
249 | #endif | ||
137 | break; | 250 | break; |
138 | default: | 251 | default: |
139 | dev_err(bridge, "%s: %s", __func__, str); | 252 | dev_err(bridge, "%s: %s", __func__, str); |
diff --git a/drivers/staging/tidspbridge/hw/EasiGlobal.h b/drivers/staging/tidspbridge/hw/EasiGlobal.h new file mode 100644 index 000000000000..e48d7f67c60a --- /dev/null +++ b/drivers/staging/tidspbridge/hw/EasiGlobal.h | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * EasiGlobal.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2007 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #ifndef _EASIGLOBAL_H | ||
18 | #define _EASIGLOBAL_H | ||
19 | #include <linux/types.h> | ||
20 | |||
21 | /* | ||
22 | * DEFINE: READ_ONLY, WRITE_ONLY & READ_WRITE | ||
23 | * | ||
24 | * DESCRIPTION: Defines used to describe register types for EASI-checker tests. | ||
25 | */ | ||
26 | |||
27 | #define READ_ONLY 1 | ||
28 | #define WRITE_ONLY 2 | ||
29 | #define READ_WRITE 3 | ||
30 | |||
31 | /* | ||
32 | * MACRO: _DEBUG_LEVEL1_EASI | ||
33 | * | ||
34 | * DESCRIPTION: A MACRO which can be used to indicate that a particular beach | ||
35 | * register access function was called. | ||
36 | * | ||
37 | * NOTE: We currently dont use this functionality. | ||
38 | */ | ||
39 | #define _DEBUG_LEVEL1_EASI(easi_num) ((void)0) | ||
40 | |||
41 | #endif /* _EASIGLOBAL_H */ | ||
diff --git a/drivers/staging/tidspbridge/hw/MMUAccInt.h b/drivers/staging/tidspbridge/hw/MMUAccInt.h new file mode 100644 index 000000000000..1cefca321d71 --- /dev/null +++ b/drivers/staging/tidspbridge/hw/MMUAccInt.h | |||
@@ -0,0 +1,76 @@ | |||
1 | /* | ||
2 | * MMUAccInt.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2007 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #ifndef _MMU_ACC_INT_H | ||
18 | #define _MMU_ACC_INT_H | ||
19 | |||
20 | /* Mappings of level 1 EASI function numbers to function names */ | ||
21 | |||
22 | #define EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32 (MMU_BASE_EASIL1 + 3) | ||
23 | #define EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32 (MMU_BASE_EASIL1 + 17) | ||
24 | #define EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32 (MMU_BASE_EASIL1 + 39) | ||
25 | #define EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 51) | ||
26 | #define EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32 (MMU_BASE_EASIL1 + 102) | ||
27 | #define EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 103) | ||
28 | #define EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32 (MMU_BASE_EASIL1 + 156) | ||
29 | #define EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32 (MMU_BASE_EASIL1 + 174) | ||
30 | #define EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 180) | ||
31 | #define EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 190) | ||
32 | #define EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32 (MMU_BASE_EASIL1 + 194) | ||
33 | #define EASIL1_MMUMMU_TTB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 198) | ||
34 | #define EASIL1_MMUMMU_LOCK_READ_REGISTER32 (MMU_BASE_EASIL1 + 203) | ||
35 | #define EASIL1_MMUMMU_LOCK_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 204) | ||
36 | #define EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32 (MMU_BASE_EASIL1 + 205) | ||
37 | #define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32 (MMU_BASE_EASIL1 + 209) | ||
38 | #define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32 (MMU_BASE_EASIL1 + 211) | ||
39 | #define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32 (MMU_BASE_EASIL1 + 212) | ||
40 | #define EASIL1_MMUMMU_LD_TLB_READ_REGISTER32 (MMU_BASE_EASIL1 + 213) | ||
41 | #define EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 214) | ||
42 | #define EASIL1_MMUMMU_CAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 226) | ||
43 | #define EASIL1_MMUMMU_RAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 268) | ||
44 | #define EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 322) | ||
45 | |||
46 | /* Register offset address definitions */ | ||
47 | #define MMU_MMU_SYSCONFIG_OFFSET 0x10 | ||
48 | #define MMU_MMU_IRQSTATUS_OFFSET 0x18 | ||
49 | #define MMU_MMU_IRQENABLE_OFFSET 0x1c | ||
50 | #define MMU_MMU_WALKING_ST_OFFSET 0x40 | ||
51 | #define MMU_MMU_CNTL_OFFSET 0x44 | ||
52 | #define MMU_MMU_FAULT_AD_OFFSET 0x48 | ||
53 | #define MMU_MMU_TTB_OFFSET 0x4c | ||
54 | #define MMU_MMU_LOCK_OFFSET 0x50 | ||
55 | #define MMU_MMU_LD_TLB_OFFSET 0x54 | ||
56 | #define MMU_MMU_CAM_OFFSET 0x58 | ||
57 | #define MMU_MMU_RAM_OFFSET 0x5c | ||
58 | #define MMU_MMU_GFLUSH_OFFSET 0x60 | ||
59 | #define MMU_MMU_FLUSH_ENTRY_OFFSET 0x64 | ||
60 | /* Bitfield mask and offset declarations */ | ||
61 | #define MMU_MMU_SYSCONFIG_IDLE_MODE_MASK 0x18 | ||
62 | #define MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET 3 | ||
63 | #define MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK 0x1 | ||
64 | #define MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET 0 | ||
65 | #define MMU_MMU_WALKING_ST_TWL_RUNNING_MASK 0x1 | ||
66 | #define MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET 0 | ||
67 | #define MMU_MMU_CNTL_TWL_ENABLE_MASK 0x4 | ||
68 | #define MMU_MMU_CNTL_TWL_ENABLE_OFFSET 2 | ||
69 | #define MMU_MMU_CNTL_MMU_ENABLE_MASK 0x2 | ||
70 | #define MMU_MMU_CNTL_MMU_ENABLE_OFFSET 1 | ||
71 | #define MMU_MMU_LOCK_BASE_VALUE_MASK 0xfc00 | ||
72 | #define MMU_MMU_LOCK_BASE_VALUE_OFFSET 10 | ||
73 | #define MMU_MMU_LOCK_CURRENT_VICTIM_MASK 0x3f0 | ||
74 | #define MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET 4 | ||
75 | |||
76 | #endif /* _MMU_ACC_INT_H */ | ||
diff --git a/drivers/staging/tidspbridge/hw/MMURegAcM.h b/drivers/staging/tidspbridge/hw/MMURegAcM.h new file mode 100644 index 000000000000..ab1a16da731c --- /dev/null +++ b/drivers/staging/tidspbridge/hw/MMURegAcM.h | |||
@@ -0,0 +1,225 @@ | |||
1 | /* | ||
2 | * MMURegAcM.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2007 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #ifndef _MMU_REG_ACM_H | ||
18 | #define _MMU_REG_ACM_H | ||
19 | |||
20 | #include <linux/io.h> | ||
21 | #include <EasiGlobal.h> | ||
22 | |||
23 | #include "MMUAccInt.h" | ||
24 | |||
25 | #if defined(USE_LEVEL_1_MACROS) | ||
26 | |||
27 | #define MMUMMU_SYSCONFIG_READ_REGISTER32(base_address)\ | ||
28 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32),\ | ||
29 | __raw_readl((base_address)+MMU_MMU_SYSCONFIG_OFFSET)) | ||
30 | |||
31 | #define MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32(base_address, value)\ | ||
32 | {\ | ||
33 | const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\ | ||
34 | register u32 data = __raw_readl((base_address)+offset);\ | ||
35 | register u32 new_value = (value);\ | ||
36 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32);\ | ||
37 | data &= ~(MMU_MMU_SYSCONFIG_IDLE_MODE_MASK);\ | ||
38 | new_value <<= MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET;\ | ||
39 | new_value &= MMU_MMU_SYSCONFIG_IDLE_MODE_MASK;\ | ||
40 | new_value |= data;\ | ||
41 | __raw_writel(new_value, base_address+offset);\ | ||
42 | } | ||
43 | |||
44 | #define MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32(base_address, value)\ | ||
45 | {\ | ||
46 | const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\ | ||
47 | register u32 data = __raw_readl((base_address)+offset);\ | ||
48 | register u32 new_value = (value);\ | ||
49 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32);\ | ||
50 | data &= ~(MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK);\ | ||
51 | new_value <<= MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET;\ | ||
52 | new_value &= MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK;\ | ||
53 | new_value |= data;\ | ||
54 | __raw_writel(new_value, base_address+offset);\ | ||
55 | } | ||
56 | |||
57 | #define MMUMMU_IRQSTATUS_READ_REGISTER32(base_address)\ | ||
58 | (_DEBUG_LEVEL1_EASI(easil1_mmummu_irqstatus_read_register32),\ | ||
59 | __raw_readl((base_address)+MMU_MMU_IRQSTATUS_OFFSET)) | ||
60 | |||
61 | #define MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, value)\ | ||
62 | {\ | ||
63 | const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\ | ||
64 | register u32 new_value = (value);\ | ||
65 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32);\ | ||
66 | __raw_writel(new_value, (base_address)+offset);\ | ||
67 | } | ||
68 | |||
69 | #define MMUMMU_IRQENABLE_READ_REGISTER32(base_address)\ | ||
70 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32),\ | ||
71 | __raw_readl((base_address)+MMU_MMU_IRQENABLE_OFFSET)) | ||
72 | |||
73 | #define MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, value)\ | ||
74 | {\ | ||
75 | const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\ | ||
76 | register u32 new_value = (value);\ | ||
77 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32);\ | ||
78 | __raw_writel(new_value, (base_address)+offset);\ | ||
79 | } | ||
80 | |||
81 | #define MMUMMU_WALKING_STTWL_RUNNING_READ32(base_address)\ | ||
82 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32),\ | ||
83 | (((__raw_readl(((base_address)+(MMU_MMU_WALKING_ST_OFFSET))))\ | ||
84 | & MMU_MMU_WALKING_ST_TWL_RUNNING_MASK) >>\ | ||
85 | MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET)) | ||
86 | |||
87 | #define MMUMMU_CNTLTWL_ENABLE_READ32(base_address)\ | ||
88 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32),\ | ||
89 | (((__raw_readl(((base_address)+(MMU_MMU_CNTL_OFFSET)))) &\ | ||
90 | MMU_MMU_CNTL_TWL_ENABLE_MASK) >>\ | ||
91 | MMU_MMU_CNTL_TWL_ENABLE_OFFSET)) | ||
92 | |||
93 | #define MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, value)\ | ||
94 | {\ | ||
95 | const u32 offset = MMU_MMU_CNTL_OFFSET;\ | ||
96 | register u32 data = __raw_readl((base_address)+offset);\ | ||
97 | register u32 new_value = (value);\ | ||
98 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32);\ | ||
99 | data &= ~(MMU_MMU_CNTL_TWL_ENABLE_MASK);\ | ||
100 | new_value <<= MMU_MMU_CNTL_TWL_ENABLE_OFFSET;\ | ||
101 | new_value &= MMU_MMU_CNTL_TWL_ENABLE_MASK;\ | ||
102 | new_value |= data;\ | ||
103 | __raw_writel(new_value, base_address+offset);\ | ||
104 | } | ||
105 | |||
106 | #define MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, value)\ | ||
107 | {\ | ||
108 | const u32 offset = MMU_MMU_CNTL_OFFSET;\ | ||
109 | register u32 data = __raw_readl((base_address)+offset);\ | ||
110 | register u32 new_value = (value);\ | ||
111 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32);\ | ||
112 | data &= ~(MMU_MMU_CNTL_MMU_ENABLE_MASK);\ | ||
113 | new_value <<= MMU_MMU_CNTL_MMU_ENABLE_OFFSET;\ | ||
114 | new_value &= MMU_MMU_CNTL_MMU_ENABLE_MASK;\ | ||
115 | new_value |= data;\ | ||
116 | __raw_writel(new_value, base_address+offset);\ | ||
117 | } | ||
118 | |||
119 | #define MMUMMU_FAULT_AD_READ_REGISTER32(base_address)\ | ||
120 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32),\ | ||
121 | __raw_readl((base_address)+MMU_MMU_FAULT_AD_OFFSET)) | ||
122 | |||
123 | #define MMUMMU_TTB_WRITE_REGISTER32(base_address, value)\ | ||
124 | {\ | ||
125 | const u32 offset = MMU_MMU_TTB_OFFSET;\ | ||
126 | register u32 new_value = (value);\ | ||
127 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_TTB_WRITE_REGISTER32);\ | ||
128 | __raw_writel(new_value, (base_address)+offset);\ | ||
129 | } | ||
130 | |||
131 | #define MMUMMU_LOCK_READ_REGISTER32(base_address)\ | ||
132 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_READ_REGISTER32),\ | ||
133 | __raw_readl((base_address)+MMU_MMU_LOCK_OFFSET)) | ||
134 | |||
135 | #define MMUMMU_LOCK_WRITE_REGISTER32(base_address, value)\ | ||
136 | {\ | ||
137 | const u32 offset = MMU_MMU_LOCK_OFFSET;\ | ||
138 | register u32 new_value = (value);\ | ||
139 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_WRITE_REGISTER32);\ | ||
140 | __raw_writel(new_value, (base_address)+offset);\ | ||
141 | } | ||
142 | |||
143 | #define MMUMMU_LOCK_BASE_VALUE_READ32(base_address)\ | ||
144 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32),\ | ||
145 | (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\ | ||
146 | MMU_MMU_LOCK_BASE_VALUE_MASK) >>\ | ||
147 | MMU_MMU_LOCK_BASE_VALUE_OFFSET)) | ||
148 | |||
149 | #define MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, value)\ | ||
150 | {\ | ||
151 | const u32 offset = MMU_MMU_LOCK_OFFSET;\ | ||
152 | register u32 data = __raw_readl((base_address)+offset);\ | ||
153 | register u32 new_value = (value);\ | ||
154 | _DEBUG_LEVEL1_EASI(easil1_mmummu_lock_base_value_write32);\ | ||
155 | data &= ~(MMU_MMU_LOCK_BASE_VALUE_MASK);\ | ||
156 | new_value <<= MMU_MMU_LOCK_BASE_VALUE_OFFSET;\ | ||
157 | new_value &= MMU_MMU_LOCK_BASE_VALUE_MASK;\ | ||
158 | new_value |= data;\ | ||
159 | __raw_writel(new_value, base_address+offset);\ | ||
160 | } | ||
161 | |||
162 | #define MMUMMU_LOCK_CURRENT_VICTIM_READ32(base_address)\ | ||
163 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32),\ | ||
164 | (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\ | ||
165 | MMU_MMU_LOCK_CURRENT_VICTIM_MASK) >>\ | ||
166 | MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET)) | ||
167 | |||
168 | #define MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, value)\ | ||
169 | {\ | ||
170 | const u32 offset = MMU_MMU_LOCK_OFFSET;\ | ||
171 | register u32 data = __raw_readl((base_address)+offset);\ | ||
172 | register u32 new_value = (value);\ | ||
173 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32);\ | ||
174 | data &= ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK);\ | ||
175 | new_value <<= MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET;\ | ||
176 | new_value &= MMU_MMU_LOCK_CURRENT_VICTIM_MASK;\ | ||
177 | new_value |= data;\ | ||
178 | __raw_writel(new_value, base_address+offset);\ | ||
179 | } | ||
180 | |||
181 | #define MMUMMU_LOCK_CURRENT_VICTIM_SET32(var, value)\ | ||
182 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32),\ | ||
183 | (((var) & ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK)) |\ | ||
184 | (((value) << MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET) &\ | ||
185 | MMU_MMU_LOCK_CURRENT_VICTIM_MASK))) | ||
186 | |||
187 | #define MMUMMU_LD_TLB_READ_REGISTER32(base_address)\ | ||
188 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_READ_REGISTER32),\ | ||
189 | __raw_readl((base_address)+MMU_MMU_LD_TLB_OFFSET)) | ||
190 | |||
191 | #define MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, value)\ | ||
192 | {\ | ||
193 | const u32 offset = MMU_MMU_LD_TLB_OFFSET;\ | ||
194 | register u32 new_value = (value);\ | ||
195 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32);\ | ||
196 | __raw_writel(new_value, (base_address)+offset);\ | ||
197 | } | ||
198 | |||
199 | #define MMUMMU_CAM_WRITE_REGISTER32(base_address, value)\ | ||
200 | {\ | ||
201 | const u32 offset = MMU_MMU_CAM_OFFSET;\ | ||
202 | register u32 new_value = (value);\ | ||
203 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CAM_WRITE_REGISTER32);\ | ||
204 | __raw_writel(new_value, (base_address)+offset);\ | ||
205 | } | ||
206 | |||
207 | #define MMUMMU_RAM_WRITE_REGISTER32(base_address, value)\ | ||
208 | {\ | ||
209 | const u32 offset = MMU_MMU_RAM_OFFSET;\ | ||
210 | register u32 new_value = (value);\ | ||
211 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_RAM_WRITE_REGISTER32);\ | ||
212 | __raw_writel(new_value, (base_address)+offset);\ | ||
213 | } | ||
214 | |||
215 | #define MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, value)\ | ||
216 | {\ | ||
217 | const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\ | ||
218 | register u32 new_value = (value);\ | ||
219 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32);\ | ||
220 | __raw_writel(new_value, (base_address)+offset);\ | ||
221 | } | ||
222 | |||
223 | #endif /* USE_LEVEL_1_MACROS */ | ||
224 | |||
225 | #endif /* _MMU_REG_ACM_H */ | ||
diff --git a/drivers/staging/tidspbridge/hw/hw_defs.h b/drivers/staging/tidspbridge/hw/hw_defs.h new file mode 100644 index 000000000000..d5266d4c163f --- /dev/null +++ b/drivers/staging/tidspbridge/hw/hw_defs.h | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * hw_defs.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Global HW definitions | ||
7 | * | ||
8 | * Copyright (C) 2007 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef _HW_DEFS_H | ||
20 | #define _HW_DEFS_H | ||
21 | |||
22 | /* Page size */ | ||
23 | #define HW_PAGE_SIZE4KB 0x1000 | ||
24 | #define HW_PAGE_SIZE64KB 0x10000 | ||
25 | #define HW_PAGE_SIZE1MB 0x100000 | ||
26 | #define HW_PAGE_SIZE16MB 0x1000000 | ||
27 | |||
28 | /* hw_status: return type for HW API */ | ||
29 | typedef long hw_status; | ||
30 | |||
31 | /* Macro used to set and clear any bit */ | ||
32 | #define HW_CLEAR 0 | ||
33 | #define HW_SET 1 | ||
34 | |||
35 | /* hw_endianism_t: Enumerated Type used to specify the endianism | ||
36 | * Do NOT change these values. They are used as bit fields. */ | ||
37 | enum hw_endianism_t { | ||
38 | HW_LITTLE_ENDIAN, | ||
39 | HW_BIG_ENDIAN | ||
40 | }; | ||
41 | |||
42 | /* hw_element_size_t: Enumerated Type used to specify the element size | ||
43 | * Do NOT change these values. They are used as bit fields. */ | ||
44 | enum hw_element_size_t { | ||
45 | HW_ELEM_SIZE8BIT, | ||
46 | HW_ELEM_SIZE16BIT, | ||
47 | HW_ELEM_SIZE32BIT, | ||
48 | HW_ELEM_SIZE64BIT | ||
49 | }; | ||
50 | |||
51 | /* hw_idle_mode_t: Enumerated Type used to specify Idle modes */ | ||
52 | enum hw_idle_mode_t { | ||
53 | HW_FORCE_IDLE, | ||
54 | HW_NO_IDLE, | ||
55 | HW_SMART_IDLE | ||
56 | }; | ||
57 | |||
58 | #endif /* _HW_DEFS_H */ | ||
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.c b/drivers/staging/tidspbridge/hw/hw_mmu.c new file mode 100644 index 000000000000..014f5d5293ae --- /dev/null +++ b/drivers/staging/tidspbridge/hw/hw_mmu.c | |||
@@ -0,0 +1,562 @@ | |||
1 | /* | ||
2 | * hw_mmu.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * API definitions to setup MMU TLB and PTE | ||
7 | * | ||
8 | * Copyright (C) 2007 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #include <linux/io.h> | ||
20 | #include "MMURegAcM.h" | ||
21 | #include <hw_defs.h> | ||
22 | #include <hw_mmu.h> | ||
23 | #include <linux/types.h> | ||
24 | #include <linux/err.h> | ||
25 | |||
26 | #define MMU_BASE_VAL_MASK 0xFC00 | ||
27 | #define MMU_PAGE_MAX 3 | ||
28 | #define MMU_ELEMENTSIZE_MAX 3 | ||
29 | #define MMU_ADDR_MASK 0xFFFFF000 | ||
30 | #define MMU_TTB_MASK 0xFFFFC000 | ||
31 | #define MMU_SECTION_ADDR_MASK 0xFFF00000 | ||
32 | #define MMU_SSECTION_ADDR_MASK 0xFF000000 | ||
33 | #define MMU_PAGE_TABLE_MASK 0xFFFFFC00 | ||
34 | #define MMU_LARGE_PAGE_MASK 0xFFFF0000 | ||
35 | #define MMU_SMALL_PAGE_MASK 0xFFFFF000 | ||
36 | |||
37 | #define MMU_LOAD_TLB 0x00000001 | ||
38 | #define MMU_GFLUSH 0x60 | ||
39 | |||
40 | /* | ||
41 | * hw_mmu_page_size_t: Enumerated Type used to specify the MMU Page Size(SLSS) | ||
42 | */ | ||
43 | enum hw_mmu_page_size_t { | ||
44 | HW_MMU_SECTION, | ||
45 | HW_MMU_LARGE_PAGE, | ||
46 | HW_MMU_SMALL_PAGE, | ||
47 | HW_MMU_SUPERSECTION | ||
48 | }; | ||
49 | |||
50 | /* | ||
51 | * FUNCTION : mmu_flush_entry | ||
52 | * | ||
53 | * INPUTS: | ||
54 | * | ||
55 | * Identifier : base_address | ||
56 | * Type : const u32 | ||
57 | * Description : Base Address of instance of MMU module | ||
58 | * | ||
59 | * RETURNS: | ||
60 | * | ||
61 | * Type : hw_status | ||
62 | * Description : 0 -- No errors occured | ||
63 | * RET_BAD_NULL_PARAM -- A Pointer | ||
64 | * Paramater was set to NULL | ||
65 | * | ||
66 | * PURPOSE: : Flush the TLB entry pointed by the | ||
67 | * lock counter register | ||
68 | * even if this entry is set protected | ||
69 | * | ||
70 | * METHOD: : Check the Input parameter and Flush a | ||
71 | * single entry in the TLB. | ||
72 | */ | ||
73 | static hw_status mmu_flush_entry(const void __iomem *base_address); | ||
74 | |||
75 | /* | ||
76 | * FUNCTION : mmu_set_cam_entry | ||
77 | * | ||
78 | * INPUTS: | ||
79 | * | ||
80 | * Identifier : base_address | ||
81 | * TypE : const u32 | ||
82 | * Description : Base Address of instance of MMU module | ||
83 | * | ||
84 | * Identifier : page_sz | ||
85 | * TypE : const u32 | ||
86 | * Description : It indicates the page size | ||
87 | * | ||
88 | * Identifier : preserved_bit | ||
89 | * Type : const u32 | ||
90 | * Description : It indicates the TLB entry is preserved entry | ||
91 | * or not | ||
92 | * | ||
93 | * Identifier : valid_bit | ||
94 | * Type : const u32 | ||
95 | * Description : It indicates the TLB entry is valid entry or not | ||
96 | * | ||
97 | * | ||
98 | * Identifier : virtual_addr_tag | ||
99 | * Type : const u32 | ||
100 | * Description : virtual Address | ||
101 | * | ||
102 | * RETURNS: | ||
103 | * | ||
104 | * Type : hw_status | ||
105 | * Description : 0 -- No errors occured | ||
106 | * RET_BAD_NULL_PARAM -- A Pointer Paramater | ||
107 | * was set to NULL | ||
108 | * RET_PARAM_OUT_OF_RANGE -- Input Parameter out | ||
109 | * of Range | ||
110 | * | ||
111 | * PURPOSE: : Set MMU_CAM reg | ||
112 | * | ||
113 | * METHOD: : Check the Input parameters and set the CAM entry. | ||
114 | */ | ||
115 | static hw_status mmu_set_cam_entry(const void __iomem *base_address, | ||
116 | const u32 page_sz, | ||
117 | const u32 preserved_bit, | ||
118 | const u32 valid_bit, | ||
119 | const u32 virtual_addr_tag); | ||
120 | |||
121 | /* | ||
122 | * FUNCTION : mmu_set_ram_entry | ||
123 | * | ||
124 | * INPUTS: | ||
125 | * | ||
126 | * Identifier : base_address | ||
127 | * Type : const u32 | ||
128 | * Description : Base Address of instance of MMU module | ||
129 | * | ||
130 | * Identifier : physical_addr | ||
131 | * Type : const u32 | ||
132 | * Description : Physical Address to which the corresponding | ||
133 | * virtual Address shouldpoint | ||
134 | * | ||
135 | * Identifier : endianism | ||
136 | * Type : hw_endianism_t | ||
137 | * Description : endianism for the given page | ||
138 | * | ||
139 | * Identifier : element_size | ||
140 | * Type : hw_element_size_t | ||
141 | * Description : The element size ( 8,16, 32 or 64 bit) | ||
142 | * | ||
143 | * Identifier : mixed_size | ||
144 | * Type : hw_mmu_mixed_size_t | ||
145 | * Description : Element Size to follow CPU or TLB | ||
146 | * | ||
147 | * RETURNS: | ||
148 | * | ||
149 | * Type : hw_status | ||
150 | * Description : 0 -- No errors occured | ||
151 | * RET_BAD_NULL_PARAM -- A Pointer Paramater | ||
152 | * was set to NULL | ||
153 | * RET_PARAM_OUT_OF_RANGE -- Input Parameter | ||
154 | * out of Range | ||
155 | * | ||
156 | * PURPOSE: : Set MMU_CAM reg | ||
157 | * | ||
158 | * METHOD: : Check the Input parameters and set the RAM entry. | ||
159 | */ | ||
160 | static hw_status mmu_set_ram_entry(const void __iomem *base_address, | ||
161 | const u32 physical_addr, | ||
162 | enum hw_endianism_t endianism, | ||
163 | enum hw_element_size_t element_size, | ||
164 | enum hw_mmu_mixed_size_t mixed_size); | ||
165 | |||
166 | /* HW FUNCTIONS */ | ||
167 | |||
168 | hw_status hw_mmu_enable(const void __iomem *base_address) | ||
169 | { | ||
170 | hw_status status = 0; | ||
171 | |||
172 | MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET); | ||
173 | |||
174 | return status; | ||
175 | } | ||
176 | |||
177 | hw_status hw_mmu_disable(const void __iomem *base_address) | ||
178 | { | ||
179 | hw_status status = 0; | ||
180 | |||
181 | MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR); | ||
182 | |||
183 | return status; | ||
184 | } | ||
185 | |||
186 | hw_status hw_mmu_num_locked_set(const void __iomem *base_address, | ||
187 | u32 num_locked_entries) | ||
188 | { | ||
189 | hw_status status = 0; | ||
190 | |||
191 | MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, num_locked_entries); | ||
192 | |||
193 | return status; | ||
194 | } | ||
195 | |||
196 | hw_status hw_mmu_victim_num_set(const void __iomem *base_address, | ||
197 | u32 victim_entry_num) | ||
198 | { | ||
199 | hw_status status = 0; | ||
200 | |||
201 | MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num); | ||
202 | |||
203 | return status; | ||
204 | } | ||
205 | |||
206 | hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irq_mask) | ||
207 | { | ||
208 | hw_status status = 0; | ||
209 | |||
210 | MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irq_mask); | ||
211 | |||
212 | return status; | ||
213 | } | ||
214 | |||
215 | hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irq_mask) | ||
216 | { | ||
217 | hw_status status = 0; | ||
218 | u32 irq_reg; | ||
219 | |||
220 | irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address); | ||
221 | |||
222 | MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irq_mask); | ||
223 | |||
224 | return status; | ||
225 | } | ||
226 | |||
227 | hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irq_mask) | ||
228 | { | ||
229 | hw_status status = 0; | ||
230 | u32 irq_reg; | ||
231 | |||
232 | irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address); | ||
233 | |||
234 | MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irq_mask); | ||
235 | |||
236 | return status; | ||
237 | } | ||
238 | |||
239 | hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irq_mask) | ||
240 | { | ||
241 | hw_status status = 0; | ||
242 | |||
243 | *irq_mask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address); | ||
244 | |||
245 | return status; | ||
246 | } | ||
247 | |||
248 | hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr) | ||
249 | { | ||
250 | hw_status status = 0; | ||
251 | |||
252 | /* read values from register */ | ||
253 | *addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address); | ||
254 | |||
255 | return status; | ||
256 | } | ||
257 | |||
258 | hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 ttb_phys_addr) | ||
259 | { | ||
260 | hw_status status = 0; | ||
261 | u32 load_ttb; | ||
262 | |||
263 | load_ttb = ttb_phys_addr & ~0x7FUL; | ||
264 | /* write values to register */ | ||
265 | MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb); | ||
266 | |||
267 | return status; | ||
268 | } | ||
269 | |||
270 | hw_status hw_mmu_twl_enable(const void __iomem *base_address) | ||
271 | { | ||
272 | hw_status status = 0; | ||
273 | |||
274 | MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET); | ||
275 | |||
276 | return status; | ||
277 | } | ||
278 | |||
279 | hw_status hw_mmu_twl_disable(const void __iomem *base_address) | ||
280 | { | ||
281 | hw_status status = 0; | ||
282 | |||
283 | MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR); | ||
284 | |||
285 | return status; | ||
286 | } | ||
287 | |||
288 | hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr, | ||
289 | u32 page_sz) | ||
290 | { | ||
291 | hw_status status = 0; | ||
292 | u32 virtual_addr_tag; | ||
293 | enum hw_mmu_page_size_t pg_size_bits; | ||
294 | |||
295 | switch (page_sz) { | ||
296 | case HW_PAGE_SIZE4KB: | ||
297 | pg_size_bits = HW_MMU_SMALL_PAGE; | ||
298 | break; | ||
299 | |||
300 | case HW_PAGE_SIZE64KB: | ||
301 | pg_size_bits = HW_MMU_LARGE_PAGE; | ||
302 | break; | ||
303 | |||
304 | case HW_PAGE_SIZE1MB: | ||
305 | pg_size_bits = HW_MMU_SECTION; | ||
306 | break; | ||
307 | |||
308 | case HW_PAGE_SIZE16MB: | ||
309 | pg_size_bits = HW_MMU_SUPERSECTION; | ||
310 | break; | ||
311 | |||
312 | default: | ||
313 | return -EINVAL; | ||
314 | } | ||
315 | |||
316 | /* Generate the 20-bit tag from virtual address */ | ||
317 | virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12); | ||
318 | |||
319 | mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag); | ||
320 | |||
321 | mmu_flush_entry(base_address); | ||
322 | |||
323 | return status; | ||
324 | } | ||
325 | |||
326 | hw_status hw_mmu_tlb_add(const void __iomem *base_address, | ||
327 | u32 physical_addr, | ||
328 | u32 virtual_addr, | ||
329 | u32 page_sz, | ||
330 | u32 entry_num, | ||
331 | struct hw_mmu_map_attrs_t *map_attrs, | ||
332 | s8 preserved_bit, s8 valid_bit) | ||
333 | { | ||
334 | hw_status status = 0; | ||
335 | u32 lock_reg; | ||
336 | u32 virtual_addr_tag; | ||
337 | enum hw_mmu_page_size_t mmu_pg_size; | ||
338 | |||
339 | /*Check the input Parameters */ | ||
340 | switch (page_sz) { | ||
341 | case HW_PAGE_SIZE4KB: | ||
342 | mmu_pg_size = HW_MMU_SMALL_PAGE; | ||
343 | break; | ||
344 | |||
345 | case HW_PAGE_SIZE64KB: | ||
346 | mmu_pg_size = HW_MMU_LARGE_PAGE; | ||
347 | break; | ||
348 | |||
349 | case HW_PAGE_SIZE1MB: | ||
350 | mmu_pg_size = HW_MMU_SECTION; | ||
351 | break; | ||
352 | |||
353 | case HW_PAGE_SIZE16MB: | ||
354 | mmu_pg_size = HW_MMU_SUPERSECTION; | ||
355 | break; | ||
356 | |||
357 | default: | ||
358 | return -EINVAL; | ||
359 | } | ||
360 | |||
361 | lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address); | ||
362 | |||
363 | /* Generate the 20-bit tag from virtual address */ | ||
364 | virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12); | ||
365 | |||
366 | /* Write the fields in the CAM Entry Register */ | ||
367 | mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit, | ||
368 | virtual_addr_tag); | ||
369 | |||
370 | /* Write the different fields of the RAM Entry Register */ | ||
371 | /* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */ | ||
372 | mmu_set_ram_entry(base_address, physical_addr, map_attrs->endianism, | ||
373 | map_attrs->element_size, map_attrs->mixed_size); | ||
374 | |||
375 | /* Update the MMU Lock Register */ | ||
376 | /* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */ | ||
377 | MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num); | ||
378 | |||
379 | /* Enable loading of an entry in TLB by writing 1 | ||
380 | into LD_TLB_REG register */ | ||
381 | MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB); | ||
382 | |||
383 | MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg); | ||
384 | |||
385 | return status; | ||
386 | } | ||
387 | |||
388 | hw_status hw_mmu_pte_set(const u32 pg_tbl_va, | ||
389 | u32 physical_addr, | ||
390 | u32 virtual_addr, | ||
391 | u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs) | ||
392 | { | ||
393 | hw_status status = 0; | ||
394 | u32 pte_addr, pte_val; | ||
395 | s32 num_entries = 1; | ||
396 | |||
397 | switch (page_sz) { | ||
398 | case HW_PAGE_SIZE4KB: | ||
399 | pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, | ||
400 | virtual_addr & | ||
401 | MMU_SMALL_PAGE_MASK); | ||
402 | pte_val = | ||
403 | ((physical_addr & MMU_SMALL_PAGE_MASK) | | ||
404 | (map_attrs->endianism << 9) | (map_attrs-> | ||
405 | element_size << 4) | | ||
406 | (map_attrs->mixed_size << 11) | 2); | ||
407 | break; | ||
408 | |||
409 | case HW_PAGE_SIZE64KB: | ||
410 | num_entries = 16; | ||
411 | pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, | ||
412 | virtual_addr & | ||
413 | MMU_LARGE_PAGE_MASK); | ||
414 | pte_val = | ||
415 | ((physical_addr & MMU_LARGE_PAGE_MASK) | | ||
416 | (map_attrs->endianism << 9) | (map_attrs-> | ||
417 | element_size << 4) | | ||
418 | (map_attrs->mixed_size << 11) | 1); | ||
419 | break; | ||
420 | |||
421 | case HW_PAGE_SIZE1MB: | ||
422 | pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, | ||
423 | virtual_addr & | ||
424 | MMU_SECTION_ADDR_MASK); | ||
425 | pte_val = | ||
426 | ((((physical_addr & MMU_SECTION_ADDR_MASK) | | ||
427 | (map_attrs->endianism << 15) | (map_attrs-> | ||
428 | element_size << 10) | | ||
429 | (map_attrs->mixed_size << 17)) & ~0x40000) | 0x2); | ||
430 | break; | ||
431 | |||
432 | case HW_PAGE_SIZE16MB: | ||
433 | num_entries = 16; | ||
434 | pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, | ||
435 | virtual_addr & | ||
436 | MMU_SSECTION_ADDR_MASK); | ||
437 | pte_val = | ||
438 | (((physical_addr & MMU_SSECTION_ADDR_MASK) | | ||
439 | (map_attrs->endianism << 15) | (map_attrs-> | ||
440 | element_size << 10) | | ||
441 | (map_attrs->mixed_size << 17) | ||
442 | ) | 0x40000 | 0x2); | ||
443 | break; | ||
444 | |||
445 | case HW_MMU_COARSE_PAGE_SIZE: | ||
446 | pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, | ||
447 | virtual_addr & | ||
448 | MMU_SECTION_ADDR_MASK); | ||
449 | pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1; | ||
450 | break; | ||
451 | |||
452 | default: | ||
453 | return -EINVAL; | ||
454 | } | ||
455 | |||
456 | while (--num_entries >= 0) | ||
457 | ((u32 *) pte_addr)[num_entries] = pte_val; | ||
458 | |||
459 | return status; | ||
460 | } | ||
461 | |||
462 | hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size) | ||
463 | { | ||
464 | hw_status status = 0; | ||
465 | u32 pte_addr; | ||
466 | s32 num_entries = 1; | ||
467 | |||
468 | switch (page_size) { | ||
469 | case HW_PAGE_SIZE4KB: | ||
470 | pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, | ||
471 | virtual_addr & | ||
472 | MMU_SMALL_PAGE_MASK); | ||
473 | break; | ||
474 | |||
475 | case HW_PAGE_SIZE64KB: | ||
476 | num_entries = 16; | ||
477 | pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, | ||
478 | virtual_addr & | ||
479 | MMU_LARGE_PAGE_MASK); | ||
480 | break; | ||
481 | |||
482 | case HW_PAGE_SIZE1MB: | ||
483 | case HW_MMU_COARSE_PAGE_SIZE: | ||
484 | pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, | ||
485 | virtual_addr & | ||
486 | MMU_SECTION_ADDR_MASK); | ||
487 | break; | ||
488 | |||
489 | case HW_PAGE_SIZE16MB: | ||
490 | num_entries = 16; | ||
491 | pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, | ||
492 | virtual_addr & | ||
493 | MMU_SSECTION_ADDR_MASK); | ||
494 | break; | ||
495 | |||
496 | default: | ||
497 | return -EINVAL; | ||
498 | } | ||
499 | |||
500 | while (--num_entries >= 0) | ||
501 | ((u32 *) pte_addr)[num_entries] = 0; | ||
502 | |||
503 | return status; | ||
504 | } | ||
505 | |||
506 | /* mmu_flush_entry */ | ||
507 | static hw_status mmu_flush_entry(const void __iomem *base_address) | ||
508 | { | ||
509 | hw_status status = 0; | ||
510 | u32 flush_entry_data = 0x1; | ||
511 | |||
512 | /* write values to register */ | ||
513 | MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, flush_entry_data); | ||
514 | |||
515 | return status; | ||
516 | } | ||
517 | |||
518 | /* mmu_set_cam_entry */ | ||
519 | static hw_status mmu_set_cam_entry(const void __iomem *base_address, | ||
520 | const u32 page_sz, | ||
521 | const u32 preserved_bit, | ||
522 | const u32 valid_bit, | ||
523 | const u32 virtual_addr_tag) | ||
524 | { | ||
525 | hw_status status = 0; | ||
526 | u32 mmu_cam_reg; | ||
527 | |||
528 | mmu_cam_reg = (virtual_addr_tag << 12); | ||
529 | mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) | | ||
530 | (preserved_bit << 3); | ||
531 | |||
532 | /* write values to register */ | ||
533 | MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg); | ||
534 | |||
535 | return status; | ||
536 | } | ||
537 | |||
538 | /* mmu_set_ram_entry */ | ||
539 | static hw_status mmu_set_ram_entry(const void __iomem *base_address, | ||
540 | const u32 physical_addr, | ||
541 | enum hw_endianism_t endianism, | ||
542 | enum hw_element_size_t element_size, | ||
543 | enum hw_mmu_mixed_size_t mixed_size) | ||
544 | { | ||
545 | hw_status status = 0; | ||
546 | u32 mmu_ram_reg; | ||
547 | |||
548 | mmu_ram_reg = (physical_addr & MMU_ADDR_MASK); | ||
549 | mmu_ram_reg = (mmu_ram_reg) | ((endianism << 9) | (element_size << 7) | | ||
550 | (mixed_size << 6)); | ||
551 | |||
552 | /* write values to register */ | ||
553 | MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg); | ||
554 | |||
555 | return status; | ||
556 | |||
557 | } | ||
558 | |||
559 | void hw_mmu_tlb_flush_all(const void __iomem *base) | ||
560 | { | ||
561 | __raw_writeb(1, base + MMU_GFLUSH); | ||
562 | } | ||
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.h b/drivers/staging/tidspbridge/hw/hw_mmu.h new file mode 100644 index 000000000000..1458a2c6027b --- /dev/null +++ b/drivers/staging/tidspbridge/hw/hw_mmu.h | |||
@@ -0,0 +1,163 @@ | |||
1 | /* | ||
2 | * hw_mmu.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * MMU types and API declarations | ||
7 | * | ||
8 | * Copyright (C) 2007 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef _HW_MMU_H | ||
20 | #define _HW_MMU_H | ||
21 | |||
22 | #include <linux/types.h> | ||
23 | |||
24 | /* Bitmasks for interrupt sources */ | ||
25 | #define HW_MMU_TRANSLATION_FAULT 0x2 | ||
26 | #define HW_MMU_ALL_INTERRUPTS 0x1F | ||
27 | |||
28 | #define HW_MMU_COARSE_PAGE_SIZE 0x400 | ||
29 | |||
30 | /* hw_mmu_mixed_size_t: Enumerated Type used to specify whether to follow | ||
31 | CPU/TLB Element size */ | ||
32 | enum hw_mmu_mixed_size_t { | ||
33 | HW_MMU_TLBES, | ||
34 | HW_MMU_CPUES | ||
35 | }; | ||
36 | |||
37 | /* hw_mmu_map_attrs_t: Struct containing MMU mapping attributes */ | ||
38 | struct hw_mmu_map_attrs_t { | ||
39 | enum hw_endianism_t endianism; | ||
40 | enum hw_element_size_t element_size; | ||
41 | enum hw_mmu_mixed_size_t mixed_size; | ||
42 | bool donotlockmpupage; | ||
43 | }; | ||
44 | |||
45 | extern hw_status hw_mmu_enable(const void __iomem *base_address); | ||
46 | |||
47 | extern hw_status hw_mmu_disable(const void __iomem *base_address); | ||
48 | |||
49 | extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address, | ||
50 | u32 num_locked_entries); | ||
51 | |||
52 | extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address, | ||
53 | u32 victim_entry_num); | ||
54 | |||
55 | /* For MMU faults */ | ||
56 | extern hw_status hw_mmu_event_ack(const void __iomem *base_address, | ||
57 | u32 irq_mask); | ||
58 | |||
59 | extern hw_status hw_mmu_event_disable(const void __iomem *base_address, | ||
60 | u32 irq_mask); | ||
61 | |||
62 | extern hw_status hw_mmu_event_enable(const void __iomem *base_address, | ||
63 | u32 irq_mask); | ||
64 | |||
65 | extern hw_status hw_mmu_event_status(const void __iomem *base_address, | ||
66 | u32 *irq_mask); | ||
67 | |||
68 | extern hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, | ||
69 | u32 *addr); | ||
70 | |||
71 | /* Set the TT base address */ | ||
72 | extern hw_status hw_mmu_ttb_set(const void __iomem *base_address, | ||
73 | u32 ttb_phys_addr); | ||
74 | |||
75 | extern hw_status hw_mmu_twl_enable(const void __iomem *base_address); | ||
76 | |||
77 | extern hw_status hw_mmu_twl_disable(const void __iomem *base_address); | ||
78 | |||
79 | extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address, | ||
80 | u32 virtual_addr, u32 page_sz); | ||
81 | |||
82 | extern hw_status hw_mmu_tlb_add(const void __iomem *base_address, | ||
83 | u32 physical_addr, | ||
84 | u32 virtual_addr, | ||
85 | u32 page_sz, | ||
86 | u32 entry_num, | ||
87 | struct hw_mmu_map_attrs_t *map_attrs, | ||
88 | s8 preserved_bit, s8 valid_bit); | ||
89 | |||
90 | /* For PTEs */ | ||
91 | extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va, | ||
92 | u32 physical_addr, | ||
93 | u32 virtual_addr, | ||
94 | u32 page_sz, | ||
95 | struct hw_mmu_map_attrs_t *map_attrs); | ||
96 | |||
97 | extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, | ||
98 | u32 virtual_addr, u32 page_size); | ||
99 | |||
100 | void hw_mmu_tlb_flush_all(const void __iomem *base); | ||
101 | |||
102 | static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va) | ||
103 | { | ||
104 | u32 pte_addr; | ||
105 | u32 va31_to20; | ||
106 | |||
107 | va31_to20 = va >> (20 - 2); /* Left-shift by 2 here itself */ | ||
108 | va31_to20 &= 0xFFFFFFFCUL; | ||
109 | pte_addr = l1_base + va31_to20; | ||
110 | |||
111 | return pte_addr; | ||
112 | } | ||
113 | |||
114 | static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va) | ||
115 | { | ||
116 | u32 pte_addr; | ||
117 | |||
118 | pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC); | ||
119 | |||
120 | return pte_addr; | ||
121 | } | ||
122 | |||
123 | static inline u32 hw_mmu_pte_coarse_l1(u32 pte_val) | ||
124 | { | ||
125 | u32 pte_coarse; | ||
126 | |||
127 | pte_coarse = pte_val & 0xFFFFFC00; | ||
128 | |||
129 | return pte_coarse; | ||
130 | } | ||
131 | |||
132 | static inline u32 hw_mmu_pte_size_l1(u32 pte_val) | ||
133 | { | ||
134 | u32 pte_size = 0; | ||
135 | |||
136 | if ((pte_val & 0x3) == 0x1) { | ||
137 | /* Points to L2 PT */ | ||
138 | pte_size = HW_MMU_COARSE_PAGE_SIZE; | ||
139 | } | ||
140 | |||
141 | if ((pte_val & 0x3) == 0x2) { | ||
142 | if (pte_val & (1 << 18)) | ||
143 | pte_size = HW_PAGE_SIZE16MB; | ||
144 | else | ||
145 | pte_size = HW_PAGE_SIZE1MB; | ||
146 | } | ||
147 | |||
148 | return pte_size; | ||
149 | } | ||
150 | |||
151 | static inline u32 hw_mmu_pte_size_l2(u32 pte_val) | ||
152 | { | ||
153 | u32 pte_size = 0; | ||
154 | |||
155 | if (pte_val & 0x2) | ||
156 | pte_size = HW_PAGE_SIZE4KB; | ||
157 | else if (pte_val & 0x1) | ||
158 | pte_size = HW_PAGE_SIZE64KB; | ||
159 | |||
160 | return pte_size; | ||
161 | } | ||
162 | |||
163 | #endif /* _HW_MMU_H */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h index dfb55cca34c7..38122dbf877a 100644 --- a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h +++ b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h | |||
@@ -68,6 +68,7 @@ struct cfg_hostres { | |||
68 | void __iomem *dw_per_base; | 68 | void __iomem *dw_per_base; |
69 | u32 dw_per_pm_base; | 69 | u32 dw_per_pm_base; |
70 | u32 dw_core_pm_base; | 70 | u32 dw_core_pm_base; |
71 | void __iomem *dw_dmmu_base; | ||
71 | void __iomem *dw_sys_ctrl_base; | 72 | void __iomem *dw_sys_ctrl_base; |
72 | }; | 73 | }; |
73 | 74 | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dev.h b/drivers/staging/tidspbridge/include/dspbridge/dev.h index 9bdd48f57429..357458fadd2a 100644 --- a/drivers/staging/tidspbridge/include/dspbridge/dev.h +++ b/drivers/staging/tidspbridge/include/dspbridge/dev.h | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <dspbridge/nodedefs.h> | 27 | #include <dspbridge/nodedefs.h> |
28 | #include <dspbridge/dispdefs.h> | 28 | #include <dspbridge/dispdefs.h> |
29 | #include <dspbridge/dspdefs.h> | 29 | #include <dspbridge/dspdefs.h> |
30 | #include <dspbridge/dmm.h> | ||
30 | #include <dspbridge/host_os.h> | 31 | #include <dspbridge/host_os.h> |
31 | 32 | ||
32 | /* ----------------------------------- This */ | 33 | /* ----------------------------------- This */ |
@@ -233,6 +234,29 @@ extern int dev_get_cmm_mgr(struct dev_object *hdev_obj, | |||
233 | struct cmm_object **mgr); | 234 | struct cmm_object **mgr); |
234 | 235 | ||
235 | /* | 236 | /* |
237 | * ======== dev_get_dmm_mgr ======== | ||
238 | * Purpose: | ||
239 | * Retrieve the handle to the dynamic memory manager created for this | ||
240 | * device. | ||
241 | * Parameters: | ||
242 | * hdev_obj: Handle to device object created with | ||
243 | * dev_create_device(). | ||
244 | * *mgr: Ptr to location to store handle. | ||
245 | * Returns: | ||
246 | * 0: Success. | ||
247 | * -EFAULT: Invalid hdev_obj. | ||
248 | * Requires: | ||
249 | * mgr != NULL. | ||
250 | * DEV Initialized. | ||
251 | * Ensures: | ||
252 | * 0: *mgr contains a handle to a channel manager object, | ||
253 | * or NULL. | ||
254 | * else: *mgr is NULL. | ||
255 | */ | ||
256 | extern int dev_get_dmm_mgr(struct dev_object *hdev_obj, | ||
257 | struct dmm_object **mgr); | ||
258 | |||
259 | /* | ||
236 | * ======== dev_get_cod_mgr ======== | 260 | * ======== dev_get_cod_mgr ======== |
237 | * Purpose: | 261 | * Purpose: |
238 | * Retrieve the COD manager create for this device. | 262 | * Retrieve the COD manager create for this device. |
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dmm.h b/drivers/staging/tidspbridge/include/dspbridge/dmm.h new file mode 100644 index 000000000000..6c58335c5f60 --- /dev/null +++ b/drivers/staging/tidspbridge/include/dspbridge/dmm.h | |||
@@ -0,0 +1,75 @@ | |||
1 | /* | ||
2 | * dmm.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * The Dynamic Memory Mapping(DMM) module manages the DSP Virtual address | ||
7 | * space that can be directly mapped to any MPU buffer or memory region. | ||
8 | * | ||
9 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | */ | ||
19 | |||
20 | #ifndef DMM_ | ||
21 | #define DMM_ | ||
22 | |||
23 | #include <dspbridge/dbdefs.h> | ||
24 | |||
25 | struct dmm_object; | ||
26 | |||
27 | /* DMM attributes used in dmm_create() */ | ||
28 | struct dmm_mgrattrs { | ||
29 | u32 reserved; | ||
30 | }; | ||
31 | |||
32 | #define DMMPOOLSIZE 0x4000000 | ||
33 | |||
34 | /* | ||
35 | * ======== dmm_get_handle ======== | ||
36 | * Purpose: | ||
37 | * Return the dynamic memory manager object for this device. | ||
38 | * This is typically called from the client process. | ||
39 | */ | ||
40 | |||
41 | extern int dmm_get_handle(void *hprocessor, | ||
42 | struct dmm_object **dmm_manager); | ||
43 | |||
44 | extern int dmm_reserve_memory(struct dmm_object *dmm_mgr, | ||
45 | u32 size, u32 *prsv_addr); | ||
46 | |||
47 | extern int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, | ||
48 | u32 rsv_addr); | ||
49 | |||
50 | extern int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, | ||
51 | u32 size); | ||
52 | |||
53 | extern int dmm_un_map_memory(struct dmm_object *dmm_mgr, | ||
54 | u32 addr, u32 *psize); | ||
55 | |||
56 | extern int dmm_destroy(struct dmm_object *dmm_mgr); | ||
57 | |||
58 | extern int dmm_delete_tables(struct dmm_object *dmm_mgr); | ||
59 | |||
60 | extern int dmm_create(struct dmm_object **dmm_manager, | ||
61 | struct dev_object *hdev_obj, | ||
62 | const struct dmm_mgrattrs *mgr_attrts); | ||
63 | |||
64 | extern bool dmm_init(void); | ||
65 | |||
66 | extern void dmm_exit(void); | ||
67 | |||
68 | extern int dmm_create_tables(struct dmm_object *dmm_mgr, | ||
69 | u32 addr, u32 size); | ||
70 | |||
71 | #ifdef DSP_DMM_DEBUG | ||
72 | u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr); | ||
73 | #endif | ||
74 | |||
75 | #endif /* DMM_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/drv.h b/drivers/staging/tidspbridge/include/dspbridge/drv.h index 75a2c9b5c6f2..c1f363ec9afa 100644 --- a/drivers/staging/tidspbridge/include/dspbridge/drv.h +++ b/drivers/staging/tidspbridge/include/dspbridge/drv.h | |||
@@ -108,6 +108,12 @@ struct dmm_map_object { | |||
108 | struct bridge_dma_map_info dma_info; | 108 | struct bridge_dma_map_info dma_info; |
109 | }; | 109 | }; |
110 | 110 | ||
111 | /* Used for DMM reserved memory accounting */ | ||
112 | struct dmm_rsv_object { | ||
113 | struct list_head link; | ||
114 | u32 dsp_reserved_addr; | ||
115 | }; | ||
116 | |||
111 | /* New structure (member of process context) abstracts DMM resource info */ | 117 | /* New structure (member of process context) abstracts DMM resource info */ |
112 | struct dspheap_res_object { | 118 | struct dspheap_res_object { |
113 | s32 heap_allocated; /* DMM status */ | 119 | s32 heap_allocated; /* DMM status */ |
@@ -159,6 +165,10 @@ struct process_context { | |||
159 | struct list_head dmm_map_list; | 165 | struct list_head dmm_map_list; |
160 | spinlock_t dmm_map_lock; | 166 | spinlock_t dmm_map_lock; |
161 | 167 | ||
168 | /* DMM reserved memory resources */ | ||
169 | struct list_head dmm_rsv_list; | ||
170 | spinlock_t dmm_rsv_lock; | ||
171 | |||
162 | /* DSP Heap resources */ | 172 | /* DSP Heap resources */ |
163 | struct dspheap_res_object *pdspheap_list; | 173 | struct dspheap_res_object *pdspheap_list; |
164 | 174 | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h b/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h deleted file mode 100644 index cb38d4cc0734..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h +++ /dev/null | |||
@@ -1,67 +0,0 @@ | |||
1 | /* | ||
2 | * dsp-mmu.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DSP iommu. | ||
7 | * | ||
8 | * Copyright (C) 2005-2010 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef _DSP_MMU_ | ||
20 | #define _DSP_MMU_ | ||
21 | |||
22 | #include <plat/iommu.h> | ||
23 | #include <plat/iovmm.h> | ||
24 | |||
25 | /** | ||
26 | * dsp_mmu_init() - initialize dsp_mmu module and returns a handle | ||
27 | * | ||
28 | * This function initialize dsp mmu module and returns a struct iommu | ||
29 | * handle to use it for dsp maps. | ||
30 | * | ||
31 | */ | ||
32 | struct iommu *dsp_mmu_init(void); | ||
33 | |||
34 | /** | ||
35 | * dsp_mmu_exit() - destroy dsp mmu module | ||
36 | * @mmu: Pointer to iommu handle. | ||
37 | * | ||
38 | * This function destroys dsp mmu module. | ||
39 | * | ||
40 | */ | ||
41 | void dsp_mmu_exit(struct iommu *mmu); | ||
42 | |||
43 | /** | ||
44 | * user_to_dsp_map() - maps user to dsp virtual address | ||
45 | * @mmu: Pointer to iommu handle. | ||
46 | * @uva: Virtual user space address. | ||
47 | * @da DSP address | ||
48 | * @size Buffer size to map. | ||
49 | * @usr_pgs struct page array pointer where the user pages will be stored | ||
50 | * | ||
51 | * This function maps a user space buffer into DSP virtual address. | ||
52 | * | ||
53 | */ | ||
54 | u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size, | ||
55 | struct page **usr_pgs); | ||
56 | |||
57 | /** | ||
58 | * user_to_dsp_unmap() - unmaps DSP virtual buffer. | ||
59 | * @mmu: Pointer to iommu handle. | ||
60 | * @da DSP address | ||
61 | * | ||
62 | * This function unmaps a user space buffer into DSP virtual address. | ||
63 | * | ||
64 | */ | ||
65 | int user_to_dsp_unmap(struct iommu *mmu, u32 da); | ||
66 | |||
67 | #endif | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h index 615363474810..0ae7d1646a1b 100644 --- a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h +++ b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h | |||
@@ -162,6 +162,48 @@ typedef int(*fxn_brd_memwrite) (struct bridge_dev_context | |||
162 | u32 mem_type); | 162 | u32 mem_type); |
163 | 163 | ||
164 | /* | 164 | /* |
165 | * ======== bridge_brd_mem_map ======== | ||
166 | * Purpose: | ||
167 | * Map a MPU memory region to a DSP/IVA memory space | ||
168 | * Parameters: | ||
169 | * dev_ctxt: Handle to Bridge driver defined device info. | ||
170 | * ul_mpu_addr: MPU memory region start address. | ||
171 | * virt_addr: DSP/IVA memory region u8 address. | ||
172 | * ul_num_bytes: Number of bytes to map. | ||
173 | * map_attrs: Mapping attributes (e.g. endianness). | ||
174 | * Returns: | ||
175 | * 0: Success. | ||
176 | * -EPERM: Other, unspecified error. | ||
177 | * Requires: | ||
178 | * dev_ctxt != NULL; | ||
179 | * Ensures: | ||
180 | */ | ||
181 | typedef int(*fxn_brd_memmap) (struct bridge_dev_context | ||
182 | * dev_ctxt, u32 ul_mpu_addr, | ||
183 | u32 virt_addr, u32 ul_num_bytes, | ||
184 | u32 map_attr, | ||
185 | struct page **mapped_pages); | ||
186 | |||
187 | /* | ||
188 | * ======== bridge_brd_mem_un_map ======== | ||
189 | * Purpose: | ||
190 | * UnMap an MPU memory region from DSP/IVA memory space | ||
191 | * Parameters: | ||
192 | * dev_ctxt: Handle to Bridge driver defined device info. | ||
193 | * virt_addr: DSP/IVA memory region u8 address. | ||
194 | * ul_num_bytes: Number of bytes to unmap. | ||
195 | * Returns: | ||
196 | * 0: Success. | ||
197 | * -EPERM: Other, unspecified error. | ||
198 | * Requires: | ||
199 | * dev_ctxt != NULL; | ||
200 | * Ensures: | ||
201 | */ | ||
202 | typedef int(*fxn_brd_memunmap) (struct bridge_dev_context | ||
203 | * dev_ctxt, | ||
204 | u32 virt_addr, u32 ul_num_bytes); | ||
205 | |||
206 | /* | ||
165 | * ======== bridge_brd_stop ======== | 207 | * ======== bridge_brd_stop ======== |
166 | * Purpose: | 208 | * Purpose: |
167 | * Bring board to the BRD_STOPPED state. | 209 | * Bring board to the BRD_STOPPED state. |
@@ -951,6 +993,8 @@ struct bridge_drv_interface { | |||
951 | fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */ | 993 | fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */ |
952 | fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */ | 994 | fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */ |
953 | fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */ | 995 | fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */ |
996 | fxn_brd_memmap pfn_brd_mem_map; /* Maps MPU mem to DSP mem */ | ||
997 | fxn_brd_memunmap pfn_brd_mem_un_map; /* Unmaps MPU mem to DSP mem */ | ||
954 | fxn_chnl_create pfn_chnl_create; /* Create channel manager. */ | 998 | fxn_chnl_create pfn_chnl_create; /* Create channel manager. */ |
955 | fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */ | 999 | fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */ |
956 | fxn_chnl_open pfn_chnl_open; /* Create a new channel. */ | 1000 | fxn_chnl_open pfn_chnl_open; /* Create a new channel. */ |
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h index bad180108ada..41e0594dff34 100644 --- a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h +++ b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h | |||
@@ -19,6 +19,10 @@ | |||
19 | #ifndef DSPIOCTL_ | 19 | #ifndef DSPIOCTL_ |
20 | #define DSPIOCTL_ | 20 | #define DSPIOCTL_ |
21 | 21 | ||
22 | /* ------------------------------------ Hardware Abstraction Layer */ | ||
23 | #include <hw_defs.h> | ||
24 | #include <hw_mmu.h> | ||
25 | |||
22 | /* | 26 | /* |
23 | * Any IOCTLS at or above this value are reserved for standard Bridge driver | 27 | * Any IOCTLS at or above this value are reserved for standard Bridge driver |
24 | * interfaces. | 28 | * interfaces. |
@@ -61,6 +65,9 @@ struct bridge_ioctl_extproc { | |||
61 | /* GPP virtual address. __va does not work for ioremapped addresses */ | 65 | /* GPP virtual address. __va does not work for ioremapped addresses */ |
62 | u32 ul_gpp_va; | 66 | u32 ul_gpp_va; |
63 | u32 ul_size; /* Size of the mapped memory in bytes */ | 67 | u32 ul_size; /* Size of the mapped memory in bytes */ |
68 | enum hw_endianism_t endianism; | ||
69 | enum hw_mmu_mixed_size_t mixed_mode; | ||
70 | enum hw_element_size_t elem_size; | ||
64 | }; | 71 | }; |
65 | 72 | ||
66 | #endif /* DSPIOCTL_ */ | 73 | #endif /* DSPIOCTL_ */ |
diff --git a/drivers/staging/tidspbridge/include/dspbridge/proc.h b/drivers/staging/tidspbridge/include/dspbridge/proc.h index 2d12aab6b5bf..5e09fd165d9d 100644 --- a/drivers/staging/tidspbridge/include/dspbridge/proc.h +++ b/drivers/staging/tidspbridge/include/dspbridge/proc.h | |||
@@ -551,6 +551,29 @@ extern int proc_map(void *hprocessor, | |||
551 | struct process_context *pr_ctxt); | 551 | struct process_context *pr_ctxt); |
552 | 552 | ||
553 | /* | 553 | /* |
554 | * ======== proc_reserve_memory ======== | ||
555 | * Purpose: | ||
556 | * Reserve a virtually contiguous region of DSP address space. | ||
557 | * Parameters: | ||
558 | * hprocessor : The processor handle. | ||
559 | * ul_size : Size of the address space to reserve. | ||
560 | * pp_rsv_addr : Ptr to DSP side reserved u8 address. | ||
561 | * Returns: | ||
562 | * 0 : Success. | ||
563 | * -EFAULT : Invalid processor handle. | ||
564 | * -EPERM : General failure. | ||
565 | * -ENOMEM : Cannot reserve chunk of this size. | ||
566 | * Requires: | ||
567 | * pp_rsv_addr is not NULL | ||
568 | * PROC Initialized. | ||
569 | * Ensures: | ||
570 | * Details: | ||
571 | */ | ||
572 | extern int proc_reserve_memory(void *hprocessor, | ||
573 | u32 ul_size, void **pp_rsv_addr, | ||
574 | struct process_context *pr_ctxt); | ||
575 | |||
576 | /* | ||
554 | * ======== proc_un_map ======== | 577 | * ======== proc_un_map ======== |
555 | * Purpose: | 578 | * Purpose: |
556 | * Removes a MPU buffer mapping from the DSP address space. | 579 | * Removes a MPU buffer mapping from the DSP address space. |
@@ -572,4 +595,27 @@ extern int proc_map(void *hprocessor, | |||
572 | extern int proc_un_map(void *hprocessor, void *map_addr, | 595 | extern int proc_un_map(void *hprocessor, void *map_addr, |
573 | struct process_context *pr_ctxt); | 596 | struct process_context *pr_ctxt); |
574 | 597 | ||
598 | /* | ||
599 | * ======== proc_un_reserve_memory ======== | ||
600 | * Purpose: | ||
601 | * Frees a previously reserved region of DSP address space. | ||
602 | * Parameters: | ||
603 | * hprocessor : The processor handle. | ||
604 | * prsv_addr : Ptr to DSP side reservedBYTE address. | ||
605 | * Returns: | ||
606 | * 0 : Success. | ||
607 | * -EFAULT : Invalid processor handle. | ||
608 | * -EPERM : General failure. | ||
609 | * -ENOENT : Cannot find a reserved region starting with this | ||
610 | * : address. | ||
611 | * Requires: | ||
612 | * prsv_addr is not NULL | ||
613 | * PROC Initialized. | ||
614 | * Ensures: | ||
615 | * Details: | ||
616 | */ | ||
617 | extern int proc_un_reserve_memory(void *hprocessor, | ||
618 | void *prsv_addr, | ||
619 | struct process_context *pr_ctxt); | ||
620 | |||
575 | #endif /* PROC_ */ | 621 | #endif /* PROC_ */ |
diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c index 7b30267ef0e2..132e960967b9 100644 --- a/drivers/staging/tidspbridge/pmgr/dev.c +++ b/drivers/staging/tidspbridge/pmgr/dev.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <dspbridge/cod.h> | 34 | #include <dspbridge/cod.h> |
35 | #include <dspbridge/drv.h> | 35 | #include <dspbridge/drv.h> |
36 | #include <dspbridge/proc.h> | 36 | #include <dspbridge/proc.h> |
37 | #include <dspbridge/dmm.h> | ||
37 | 38 | ||
38 | /* ----------------------------------- Resource Manager */ | 39 | /* ----------------------------------- Resource Manager */ |
39 | #include <dspbridge/mgr.h> | 40 | #include <dspbridge/mgr.h> |
@@ -74,6 +75,7 @@ struct dev_object { | |||
74 | struct msg_mgr *hmsg_mgr; /* Message manager. */ | 75 | struct msg_mgr *hmsg_mgr; /* Message manager. */ |
75 | struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */ | 76 | struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */ |
76 | struct cmm_object *hcmm_mgr; /* SM memory manager. */ | 77 | struct cmm_object *hcmm_mgr; /* SM memory manager. */ |
78 | struct dmm_object *dmm_mgr; /* Dynamic memory manager. */ | ||
77 | struct ldr_module *module_obj; /* Bridge Module handle. */ | 79 | struct ldr_module *module_obj; /* Bridge Module handle. */ |
78 | u32 word_size; /* DSP word size: quick access. */ | 80 | u32 word_size; /* DSP word size: quick access. */ |
79 | struct drv_object *hdrv_obj; /* Driver Object */ | 81 | struct drv_object *hdrv_obj; /* Driver Object */ |
@@ -248,6 +250,9 @@ int dev_create_device(struct dev_object **device_obj, | |||
248 | /* Instantiate the DEH module */ | 250 | /* Instantiate the DEH module */ |
249 | status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj); | 251 | status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj); |
250 | } | 252 | } |
253 | /* Create DMM mgr . */ | ||
254 | status = dmm_create(&dev_obj->dmm_mgr, | ||
255 | (struct dev_object *)dev_obj, NULL); | ||
251 | } | 256 | } |
252 | /* Add the new DEV_Object to the global list: */ | 257 | /* Add the new DEV_Object to the global list: */ |
253 | if (!status) { | 258 | if (!status) { |
@@ -273,6 +278,8 @@ leave: | |||
273 | kfree(dev_obj->proc_list); | 278 | kfree(dev_obj->proc_list); |
274 | if (dev_obj->cod_mgr) | 279 | if (dev_obj->cod_mgr) |
275 | cod_delete(dev_obj->cod_mgr); | 280 | cod_delete(dev_obj->cod_mgr); |
281 | if (dev_obj->dmm_mgr) | ||
282 | dmm_destroy(dev_obj->dmm_mgr); | ||
276 | kfree(dev_obj); | 283 | kfree(dev_obj); |
277 | } | 284 | } |
278 | 285 | ||
@@ -382,6 +389,11 @@ int dev_destroy_device(struct dev_object *hdev_obj) | |||
382 | dev_obj->hcmm_mgr = NULL; | 389 | dev_obj->hcmm_mgr = NULL; |
383 | } | 390 | } |
384 | 391 | ||
392 | if (dev_obj->dmm_mgr) { | ||
393 | dmm_destroy(dev_obj->dmm_mgr); | ||
394 | dev_obj->dmm_mgr = NULL; | ||
395 | } | ||
396 | |||
385 | /* Call the driver's bridge_dev_destroy() function: */ | 397 | /* Call the driver's bridge_dev_destroy() function: */ |
386 | /* Require of DevDestroy */ | 398 | /* Require of DevDestroy */ |
387 | if (dev_obj->hbridge_context) { | 399 | if (dev_obj->hbridge_context) { |
@@ -462,6 +474,32 @@ int dev_get_cmm_mgr(struct dev_object *hdev_obj, | |||
462 | } | 474 | } |
463 | 475 | ||
464 | /* | 476 | /* |
477 | * ======== dev_get_dmm_mgr ======== | ||
478 | * Purpose: | ||
479 | * Retrieve the handle to the dynamic memory manager created for this | ||
480 | * device. | ||
481 | */ | ||
482 | int dev_get_dmm_mgr(struct dev_object *hdev_obj, | ||
483 | struct dmm_object **mgr) | ||
484 | { | ||
485 | int status = 0; | ||
486 | struct dev_object *dev_obj = hdev_obj; | ||
487 | |||
488 | DBC_REQUIRE(refs > 0); | ||
489 | DBC_REQUIRE(mgr != NULL); | ||
490 | |||
491 | if (hdev_obj) { | ||
492 | *mgr = dev_obj->dmm_mgr; | ||
493 | } else { | ||
494 | *mgr = NULL; | ||
495 | status = -EFAULT; | ||
496 | } | ||
497 | |||
498 | DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL)); | ||
499 | return status; | ||
500 | } | ||
501 | |||
502 | /* | ||
465 | * ======== dev_get_cod_mgr ======== | 503 | * ======== dev_get_cod_mgr ======== |
466 | * Purpose: | 504 | * Purpose: |
467 | * Retrieve the COD manager create for this device. | 505 | * Retrieve the COD manager create for this device. |
@@ -713,8 +751,10 @@ void dev_exit(void) | |||
713 | 751 | ||
714 | refs--; | 752 | refs--; |
715 | 753 | ||
716 | if (refs == 0) | 754 | if (refs == 0) { |
717 | cmm_exit(); | 755 | cmm_exit(); |
756 | dmm_exit(); | ||
757 | } | ||
718 | 758 | ||
719 | DBC_ENSURE(refs >= 0); | 759 | DBC_ENSURE(refs >= 0); |
720 | } | 760 | } |
@@ -726,12 +766,25 @@ void dev_exit(void) | |||
726 | */ | 766 | */ |
727 | bool dev_init(void) | 767 | bool dev_init(void) |
728 | { | 768 | { |
729 | bool ret = true; | 769 | bool cmm_ret, dmm_ret, ret = true; |
730 | 770 | ||
731 | DBC_REQUIRE(refs >= 0); | 771 | DBC_REQUIRE(refs >= 0); |
732 | 772 | ||
733 | if (refs == 0) | 773 | if (refs == 0) { |
734 | ret = cmm_init(); | 774 | cmm_ret = cmm_init(); |
775 | dmm_ret = dmm_init(); | ||
776 | |||
777 | ret = cmm_ret && dmm_ret; | ||
778 | |||
779 | if (!ret) { | ||
780 | if (cmm_ret) | ||
781 | cmm_exit(); | ||
782 | |||
783 | if (dmm_ret) | ||
784 | dmm_exit(); | ||
785 | |||
786 | } | ||
787 | } | ||
735 | 788 | ||
736 | if (ret) | 789 | if (ret) |
737 | refs++; | 790 | refs++; |
@@ -1065,6 +1118,8 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns, | |||
1065 | STORE_FXN(fxn_brd_setstate, pfn_brd_set_state); | 1118 | STORE_FXN(fxn_brd_setstate, pfn_brd_set_state); |
1066 | STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy); | 1119 | STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy); |
1067 | STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write); | 1120 | STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write); |
1121 | STORE_FXN(fxn_brd_memmap, pfn_brd_mem_map); | ||
1122 | STORE_FXN(fxn_brd_memunmap, pfn_brd_mem_un_map); | ||
1068 | STORE_FXN(fxn_chnl_create, pfn_chnl_create); | 1123 | STORE_FXN(fxn_chnl_create, pfn_chnl_create); |
1069 | STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy); | 1124 | STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy); |
1070 | STORE_FXN(fxn_chnl_open, pfn_chnl_open); | 1125 | STORE_FXN(fxn_chnl_open, pfn_chnl_open); |
diff --git a/drivers/staging/tidspbridge/pmgr/dmm.c b/drivers/staging/tidspbridge/pmgr/dmm.c new file mode 100644 index 000000000000..8685233d7627 --- /dev/null +++ b/drivers/staging/tidspbridge/pmgr/dmm.c | |||
@@ -0,0 +1,533 @@ | |||
1 | /* | ||
2 | * dmm.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address | ||
7 | * space that can be directly mapped to any MPU buffer or memory region | ||
8 | * | ||
9 | * Notes: | ||
10 | * Region: Generic memory entitiy having a start address and a size | ||
11 | * Chunk: Reserved region | ||
12 | * | ||
13 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
14 | * | ||
15 | * This package is free software; you can redistribute it and/or modify | ||
16 | * it under the terms of the GNU General Public License version 2 as | ||
17 | * published by the Free Software Foundation. | ||
18 | * | ||
19 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
20 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
22 | */ | ||
23 | #include <linux/types.h> | ||
24 | |||
25 | /* ----------------------------------- Host OS */ | ||
26 | #include <dspbridge/host_os.h> | ||
27 | |||
28 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
29 | #include <dspbridge/dbdefs.h> | ||
30 | |||
31 | /* ----------------------------------- Trace & Debug */ | ||
32 | #include <dspbridge/dbc.h> | ||
33 | |||
34 | /* ----------------------------------- OS Adaptation Layer */ | ||
35 | #include <dspbridge/sync.h> | ||
36 | |||
37 | /* ----------------------------------- Platform Manager */ | ||
38 | #include <dspbridge/dev.h> | ||
39 | #include <dspbridge/proc.h> | ||
40 | |||
41 | /* ----------------------------------- This */ | ||
42 | #include <dspbridge/dmm.h> | ||
43 | |||
44 | /* ----------------------------------- Defines, Data Structures, Typedefs */ | ||
45 | #define DMM_ADDR_VIRTUAL(a) \ | ||
46 | (((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\ | ||
47 | dyn_mem_map_beg) | ||
48 | #define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K) | ||
49 | |||
50 | /* DMM Mgr */ | ||
51 | struct dmm_object { | ||
52 | /* Dmm Lock is used to serialize access mem manager for | ||
53 | * multi-threads. */ | ||
54 | spinlock_t dmm_lock; /* Lock to access dmm mgr */ | ||
55 | }; | ||
56 | |||
57 | /* ----------------------------------- Globals */ | ||
58 | static u32 refs; /* module reference count */ | ||
59 | struct map_page { | ||
60 | u32 region_size:15; | ||
61 | u32 mapped_size:15; | ||
62 | u32 reserved:1; | ||
63 | u32 mapped:1; | ||
64 | }; | ||
65 | |||
66 | /* Create the free list */ | ||
67 | static struct map_page *virtual_mapping_table; | ||
68 | static u32 free_region; /* The index of free region */ | ||
69 | static u32 free_size; | ||
70 | static u32 dyn_mem_map_beg; /* The Beginning of dynamic memory mapping */ | ||
71 | static u32 table_size; /* The size of virt and phys pages tables */ | ||
72 | |||
73 | /* ----------------------------------- Function Prototypes */ | ||
74 | static struct map_page *get_region(u32 addr); | ||
75 | static struct map_page *get_free_region(u32 len); | ||
76 | static struct map_page *get_mapped_region(u32 addrs); | ||
77 | |||
78 | /* ======== dmm_create_tables ======== | ||
79 | * Purpose: | ||
80 | * Create table to hold the information of physical address | ||
81 | * the buffer pages that is passed by the user, and the table | ||
82 | * to hold the information of the virtual memory that is reserved | ||
83 | * for DSP. | ||
84 | */ | ||
85 | int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size) | ||
86 | { | ||
87 | struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; | ||
88 | int status = 0; | ||
89 | |||
90 | status = dmm_delete_tables(dmm_obj); | ||
91 | if (!status) { | ||
92 | dyn_mem_map_beg = addr; | ||
93 | table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K; | ||
94 | /* Create the free list */ | ||
95 | virtual_mapping_table = __vmalloc(table_size * | ||
96 | sizeof(struct map_page), GFP_KERNEL | | ||
97 | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); | ||
98 | if (virtual_mapping_table == NULL) | ||
99 | status = -ENOMEM; | ||
100 | else { | ||
101 | /* On successful allocation, | ||
102 | * all entries are zero ('free') */ | ||
103 | free_region = 0; | ||
104 | free_size = table_size * PG_SIZE4K; | ||
105 | virtual_mapping_table[0].region_size = table_size; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | if (status) | ||
110 | pr_err("%s: failure, status 0x%x\n", __func__, status); | ||
111 | |||
112 | return status; | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * ======== dmm_create ======== | ||
117 | * Purpose: | ||
118 | * Create a dynamic memory manager object. | ||
119 | */ | ||
120 | int dmm_create(struct dmm_object **dmm_manager, | ||
121 | struct dev_object *hdev_obj, | ||
122 | const struct dmm_mgrattrs *mgr_attrts) | ||
123 | { | ||
124 | struct dmm_object *dmm_obj = NULL; | ||
125 | int status = 0; | ||
126 | DBC_REQUIRE(refs > 0); | ||
127 | DBC_REQUIRE(dmm_manager != NULL); | ||
128 | |||
129 | *dmm_manager = NULL; | ||
130 | /* create, zero, and tag a cmm mgr object */ | ||
131 | dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL); | ||
132 | if (dmm_obj != NULL) { | ||
133 | spin_lock_init(&dmm_obj->dmm_lock); | ||
134 | *dmm_manager = dmm_obj; | ||
135 | } else { | ||
136 | status = -ENOMEM; | ||
137 | } | ||
138 | |||
139 | return status; | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * ======== dmm_destroy ======== | ||
144 | * Purpose: | ||
145 | * Release the communication memory manager resources. | ||
146 | */ | ||
147 | int dmm_destroy(struct dmm_object *dmm_mgr) | ||
148 | { | ||
149 | struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; | ||
150 | int status = 0; | ||
151 | |||
152 | DBC_REQUIRE(refs > 0); | ||
153 | if (dmm_mgr) { | ||
154 | status = dmm_delete_tables(dmm_obj); | ||
155 | if (!status) | ||
156 | kfree(dmm_obj); | ||
157 | } else | ||
158 | status = -EFAULT; | ||
159 | |||
160 | return status; | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * ======== dmm_delete_tables ======== | ||
165 | * Purpose: | ||
166 | * Delete DMM Tables. | ||
167 | */ | ||
168 | int dmm_delete_tables(struct dmm_object *dmm_mgr) | ||
169 | { | ||
170 | int status = 0; | ||
171 | |||
172 | DBC_REQUIRE(refs > 0); | ||
173 | /* Delete all DMM tables */ | ||
174 | if (dmm_mgr) | ||
175 | vfree(virtual_mapping_table); | ||
176 | else | ||
177 | status = -EFAULT; | ||
178 | return status; | ||
179 | } | ||
180 | |||
181 | /* | ||
182 | * ======== dmm_exit ======== | ||
183 | * Purpose: | ||
184 | * Discontinue usage of module; free resources when reference count | ||
185 | * reaches 0. | ||
186 | */ | ||
187 | void dmm_exit(void) | ||
188 | { | ||
189 | DBC_REQUIRE(refs > 0); | ||
190 | |||
191 | refs--; | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * ======== dmm_get_handle ======== | ||
196 | * Purpose: | ||
197 | * Return the dynamic memory manager object for this device. | ||
198 | * This is typically called from the client process. | ||
199 | */ | ||
200 | int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager) | ||
201 | { | ||
202 | int status = 0; | ||
203 | struct dev_object *hdev_obj; | ||
204 | |||
205 | DBC_REQUIRE(refs > 0); | ||
206 | DBC_REQUIRE(dmm_manager != NULL); | ||
207 | if (hprocessor != NULL) | ||
208 | status = proc_get_dev_object(hprocessor, &hdev_obj); | ||
209 | else | ||
210 | hdev_obj = dev_get_first(); /* default */ | ||
211 | |||
212 | if (!status) | ||
213 | status = dev_get_dmm_mgr(hdev_obj, dmm_manager); | ||
214 | |||
215 | return status; | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * ======== dmm_init ======== | ||
220 | * Purpose: | ||
221 | * Initializes private state of DMM module. | ||
222 | */ | ||
223 | bool dmm_init(void) | ||
224 | { | ||
225 | bool ret = true; | ||
226 | |||
227 | DBC_REQUIRE(refs >= 0); | ||
228 | |||
229 | if (ret) | ||
230 | refs++; | ||
231 | |||
232 | DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0))); | ||
233 | |||
234 | virtual_mapping_table = NULL; | ||
235 | table_size = 0; | ||
236 | |||
237 | return ret; | ||
238 | } | ||
239 | |||
240 | /* | ||
241 | * ======== dmm_map_memory ======== | ||
242 | * Purpose: | ||
243 | * Add a mapping block to the reserved chunk. DMM assumes that this block | ||
244 | * will be mapped in the DSP/IVA's address space. DMM returns an error if a | ||
245 | * mapping overlaps another one. This function stores the info that will be | ||
246 | * required later while unmapping the block. | ||
247 | */ | ||
248 | int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size) | ||
249 | { | ||
250 | struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; | ||
251 | struct map_page *chunk; | ||
252 | int status = 0; | ||
253 | |||
254 | spin_lock(&dmm_obj->dmm_lock); | ||
255 | /* Find the Reserved memory chunk containing the DSP block to | ||
256 | * be mapped */ | ||
257 | chunk = (struct map_page *)get_region(addr); | ||
258 | if (chunk != NULL) { | ||
259 | /* Mark the region 'mapped', leave the 'reserved' info as-is */ | ||
260 | chunk->mapped = true; | ||
261 | chunk->mapped_size = (size / PG_SIZE4K); | ||
262 | } else | ||
263 | status = -ENOENT; | ||
264 | spin_unlock(&dmm_obj->dmm_lock); | ||
265 | |||
266 | dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, " | ||
267 | "chunk %p", __func__, dmm_mgr, addr, size, status, chunk); | ||
268 | |||
269 | return status; | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * ======== dmm_reserve_memory ======== | ||
274 | * Purpose: | ||
275 | * Reserve a chunk of virtually contiguous DSP/IVA address space. | ||
276 | */ | ||
277 | int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size, | ||
278 | u32 *prsv_addr) | ||
279 | { | ||
280 | int status = 0; | ||
281 | struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; | ||
282 | struct map_page *node; | ||
283 | u32 rsv_addr = 0; | ||
284 | u32 rsv_size = 0; | ||
285 | |||
286 | spin_lock(&dmm_obj->dmm_lock); | ||
287 | |||
288 | /* Try to get a DSP chunk from the free list */ | ||
289 | node = get_free_region(size); | ||
290 | if (node != NULL) { | ||
291 | /* DSP chunk of given size is available. */ | ||
292 | rsv_addr = DMM_ADDR_VIRTUAL(node); | ||
293 | /* Calculate the number entries to use */ | ||
294 | rsv_size = size / PG_SIZE4K; | ||
295 | if (rsv_size < node->region_size) { | ||
296 | /* Mark remainder of free region */ | ||
297 | node[rsv_size].mapped = false; | ||
298 | node[rsv_size].reserved = false; | ||
299 | node[rsv_size].region_size = | ||
300 | node->region_size - rsv_size; | ||
301 | node[rsv_size].mapped_size = 0; | ||
302 | } | ||
303 | /* get_region will return first fit chunk. But we only use what | ||
304 | is requested. */ | ||
305 | node->mapped = false; | ||
306 | node->reserved = true; | ||
307 | node->region_size = rsv_size; | ||
308 | node->mapped_size = 0; | ||
309 | /* Return the chunk's starting address */ | ||
310 | *prsv_addr = rsv_addr; | ||
311 | } else | ||
312 | /*dSP chunk of given size is not available */ | ||
313 | status = -ENOMEM; | ||
314 | |||
315 | spin_unlock(&dmm_obj->dmm_lock); | ||
316 | |||
317 | dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, " | ||
318 | "rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size, | ||
319 | prsv_addr, status, rsv_addr, rsv_size); | ||
320 | |||
321 | return status; | ||
322 | } | ||
323 | |||
324 | /* | ||
325 | * ======== dmm_un_map_memory ======== | ||
326 | * Purpose: | ||
327 | * Remove the mapped block from the reserved chunk. | ||
328 | */ | ||
329 | int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize) | ||
330 | { | ||
331 | struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; | ||
332 | struct map_page *chunk; | ||
333 | int status = 0; | ||
334 | |||
335 | spin_lock(&dmm_obj->dmm_lock); | ||
336 | chunk = get_mapped_region(addr); | ||
337 | if (chunk == NULL) | ||
338 | status = -ENOENT; | ||
339 | |||
340 | if (!status) { | ||
341 | /* Unmap the region */ | ||
342 | *psize = chunk->mapped_size * PG_SIZE4K; | ||
343 | chunk->mapped = false; | ||
344 | chunk->mapped_size = 0; | ||
345 | } | ||
346 | spin_unlock(&dmm_obj->dmm_lock); | ||
347 | |||
348 | dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, " | ||
349 | "chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk); | ||
350 | |||
351 | return status; | ||
352 | } | ||
353 | |||
354 | /* | ||
355 | * ======== dmm_un_reserve_memory ======== | ||
356 | * Purpose: | ||
357 | * Free a chunk of reserved DSP/IVA address space. | ||
358 | */ | ||
359 | int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr) | ||
360 | { | ||
361 | struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; | ||
362 | struct map_page *chunk; | ||
363 | u32 i; | ||
364 | int status = 0; | ||
365 | u32 chunk_size; | ||
366 | |||
367 | spin_lock(&dmm_obj->dmm_lock); | ||
368 | |||
369 | /* Find the chunk containing the reserved address */ | ||
370 | chunk = get_mapped_region(rsv_addr); | ||
371 | if (chunk == NULL) | ||
372 | status = -ENOENT; | ||
373 | |||
374 | if (!status) { | ||
375 | /* Free all the mapped pages for this reserved region */ | ||
376 | i = 0; | ||
377 | while (i < chunk->region_size) { | ||
378 | if (chunk[i].mapped) { | ||
379 | /* Remove mapping from the page tables. */ | ||
380 | chunk_size = chunk[i].mapped_size; | ||
381 | /* Clear the mapping flags */ | ||
382 | chunk[i].mapped = false; | ||
383 | chunk[i].mapped_size = 0; | ||
384 | i += chunk_size; | ||
385 | } else | ||
386 | i++; | ||
387 | } | ||
388 | /* Clear the flags (mark the region 'free') */ | ||
389 | chunk->reserved = false; | ||
390 | /* NOTE: We do NOT coalesce free regions here. | ||
391 | * Free regions are coalesced in get_region(), as it traverses | ||
392 | *the whole mapping table | ||
393 | */ | ||
394 | } | ||
395 | spin_unlock(&dmm_obj->dmm_lock); | ||
396 | |||
397 | dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p", | ||
398 | __func__, dmm_mgr, rsv_addr, status, chunk); | ||
399 | |||
400 | return status; | ||
401 | } | ||
402 | |||
403 | /* | ||
404 | * ======== get_region ======== | ||
405 | * Purpose: | ||
406 | * Returns a region containing the specified memory region | ||
407 | */ | ||
408 | static struct map_page *get_region(u32 addr) | ||
409 | { | ||
410 | struct map_page *curr_region = NULL; | ||
411 | u32 i = 0; | ||
412 | |||
413 | if (virtual_mapping_table != NULL) { | ||
414 | /* find page mapped by this address */ | ||
415 | i = DMM_ADDR_TO_INDEX(addr); | ||
416 | if (i < table_size) | ||
417 | curr_region = virtual_mapping_table + i; | ||
418 | } | ||
419 | |||
420 | dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n", | ||
421 | __func__, curr_region, free_region, free_size); | ||
422 | return curr_region; | ||
423 | } | ||
424 | |||
425 | /* | ||
426 | * ======== get_free_region ======== | ||
427 | * Purpose: | ||
428 | * Returns the requested free region | ||
429 | */ | ||
430 | static struct map_page *get_free_region(u32 len) | ||
431 | { | ||
432 | struct map_page *curr_region = NULL; | ||
433 | u32 i = 0; | ||
434 | u32 region_size = 0; | ||
435 | u32 next_i = 0; | ||
436 | |||
437 | if (virtual_mapping_table == NULL) | ||
438 | return curr_region; | ||
439 | if (len > free_size) { | ||
440 | /* Find the largest free region | ||
441 | * (coalesce during the traversal) */ | ||
442 | while (i < table_size) { | ||
443 | region_size = virtual_mapping_table[i].region_size; | ||
444 | next_i = i + region_size; | ||
445 | if (virtual_mapping_table[i].reserved == false) { | ||
446 | /* Coalesce, if possible */ | ||
447 | if (next_i < table_size && | ||
448 | virtual_mapping_table[next_i].reserved | ||
449 | == false) { | ||
450 | virtual_mapping_table[i].region_size += | ||
451 | virtual_mapping_table | ||
452 | [next_i].region_size; | ||
453 | continue; | ||
454 | } | ||
455 | region_size *= PG_SIZE4K; | ||
456 | if (region_size > free_size) { | ||
457 | free_region = i; | ||
458 | free_size = region_size; | ||
459 | } | ||
460 | } | ||
461 | i = next_i; | ||
462 | } | ||
463 | } | ||
464 | if (len <= free_size) { | ||
465 | curr_region = virtual_mapping_table + free_region; | ||
466 | free_region += (len / PG_SIZE4K); | ||
467 | free_size -= len; | ||
468 | } | ||
469 | return curr_region; | ||
470 | } | ||
471 | |||
472 | /* | ||
473 | * ======== get_mapped_region ======== | ||
474 | * Purpose: | ||
475 | * Returns the requestedmapped region | ||
476 | */ | ||
477 | static struct map_page *get_mapped_region(u32 addrs) | ||
478 | { | ||
479 | u32 i = 0; | ||
480 | struct map_page *curr_region = NULL; | ||
481 | |||
482 | if (virtual_mapping_table == NULL) | ||
483 | return curr_region; | ||
484 | |||
485 | i = DMM_ADDR_TO_INDEX(addrs); | ||
486 | if (i < table_size && (virtual_mapping_table[i].mapped || | ||
487 | virtual_mapping_table[i].reserved)) | ||
488 | curr_region = virtual_mapping_table + i; | ||
489 | return curr_region; | ||
490 | } | ||
491 | |||
492 | #ifdef DSP_DMM_DEBUG | ||
493 | u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr) | ||
494 | { | ||
495 | struct map_page *curr_node = NULL; | ||
496 | u32 i; | ||
497 | u32 freemem = 0; | ||
498 | u32 bigsize = 0; | ||
499 | |||
500 | spin_lock(&dmm_mgr->dmm_lock); | ||
501 | |||
502 | if (virtual_mapping_table != NULL) { | ||
503 | for (i = 0; i < table_size; i += | ||
504 | virtual_mapping_table[i].region_size) { | ||
505 | curr_node = virtual_mapping_table + i; | ||
506 | if (curr_node->reserved) { | ||
507 | /*printk("RESERVED size = 0x%x, " | ||
508 | "Map size = 0x%x\n", | ||
509 | (curr_node->region_size * PG_SIZE4K), | ||
510 | (curr_node->mapped == false) ? 0 : | ||
511 | (curr_node->mapped_size * PG_SIZE4K)); | ||
512 | */ | ||
513 | } else { | ||
514 | /* printk("UNRESERVED size = 0x%x\n", | ||
515 | (curr_node->region_size * PG_SIZE4K)); | ||
516 | */ | ||
517 | freemem += (curr_node->region_size * PG_SIZE4K); | ||
518 | if (curr_node->region_size > bigsize) | ||
519 | bigsize = curr_node->region_size; | ||
520 | } | ||
521 | } | ||
522 | } | ||
523 | spin_unlock(&dmm_mgr->dmm_lock); | ||
524 | printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n", | ||
525 | freemem / (1024 * 1024)); | ||
526 | printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n", | ||
527 | (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024)); | ||
528 | printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n", | ||
529 | (bigsize * PG_SIZE4K / (1024 * 1024))); | ||
530 | |||
531 | return 0; | ||
532 | } | ||
533 | #endif | ||
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c index 981551ce4d78..86ca785f1913 100644 --- a/drivers/staging/tidspbridge/pmgr/dspapi.c +++ b/drivers/staging/tidspbridge/pmgr/dspapi.c | |||
@@ -993,10 +993,27 @@ u32 procwrap_register_notify(union trapped_args *args, void *pr_ctxt) | |||
993 | /* | 993 | /* |
994 | * ======== procwrap_reserve_memory ======== | 994 | * ======== procwrap_reserve_memory ======== |
995 | */ | 995 | */ |
996 | u32 __deprecated procwrap_reserve_memory(union trapped_args *args, | 996 | u32 procwrap_reserve_memory(union trapped_args *args, void *pr_ctxt) |
997 | void *pr_ctxt) | ||
998 | { | 997 | { |
999 | return 0; | 998 | int status; |
999 | void *prsv_addr; | ||
1000 | void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; | ||
1001 | |||
1002 | if ((args->args_proc_rsvmem.ul_size <= 0) || | ||
1003 | (args->args_proc_rsvmem.ul_size & (PG_SIZE4K - 1)) != 0) | ||
1004 | return -EINVAL; | ||
1005 | |||
1006 | status = proc_reserve_memory(hprocessor, | ||
1007 | args->args_proc_rsvmem.ul_size, &prsv_addr, | ||
1008 | pr_ctxt); | ||
1009 | if (!status) { | ||
1010 | if (put_user(prsv_addr, args->args_proc_rsvmem.pp_rsv_addr)) { | ||
1011 | status = -EINVAL; | ||
1012 | proc_un_reserve_memory(args->args_proc_rsvmem. | ||
1013 | hprocessor, prsv_addr, pr_ctxt); | ||
1014 | } | ||
1015 | } | ||
1016 | return status; | ||
1000 | } | 1017 | } |
1001 | 1018 | ||
1002 | /* | 1019 | /* |
@@ -1025,10 +1042,15 @@ u32 procwrap_un_map(union trapped_args *args, void *pr_ctxt) | |||
1025 | /* | 1042 | /* |
1026 | * ======== procwrap_un_reserve_memory ======== | 1043 | * ======== procwrap_un_reserve_memory ======== |
1027 | */ | 1044 | */ |
1028 | u32 __deprecated procwrap_un_reserve_memory(union trapped_args *args, | 1045 | u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt) |
1029 | void *pr_ctxt) | ||
1030 | { | 1046 | { |
1031 | return 0; | 1047 | int status; |
1048 | void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; | ||
1049 | |||
1050 | status = proc_un_reserve_memory(hprocessor, | ||
1051 | args->args_proc_unrsvmem.prsv_addr, | ||
1052 | pr_ctxt); | ||
1053 | return status; | ||
1032 | } | 1054 | } |
1033 | 1055 | ||
1034 | /* | 1056 | /* |
diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c index 91cc168516e5..81b1b9013550 100644 --- a/drivers/staging/tidspbridge/rmgr/drv.c +++ b/drivers/staging/tidspbridge/rmgr/drv.c | |||
@@ -146,6 +146,7 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt) | |||
146 | struct process_context *ctxt = (struct process_context *)process_ctxt; | 146 | struct process_context *ctxt = (struct process_context *)process_ctxt; |
147 | int status = 0; | 147 | int status = 0; |
148 | struct dmm_map_object *temp_map, *map_obj; | 148 | struct dmm_map_object *temp_map, *map_obj; |
149 | struct dmm_rsv_object *temp_rsv, *rsv_obj; | ||
149 | 150 | ||
150 | /* Free DMM mapped memory resources */ | 151 | /* Free DMM mapped memory resources */ |
151 | list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) { | 152 | list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) { |
@@ -155,6 +156,16 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt) | |||
155 | pr_err("%s: proc_un_map failed!" | 156 | pr_err("%s: proc_un_map failed!" |
156 | " status = 0x%xn", __func__, status); | 157 | " status = 0x%xn", __func__, status); |
157 | } | 158 | } |
159 | |||
160 | /* Free DMM reserved memory resources */ | ||
161 | list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) { | ||
162 | status = proc_un_reserve_memory(ctxt->hprocessor, (void *) | ||
163 | rsv_obj->dsp_reserved_addr, | ||
164 | ctxt); | ||
165 | if (status) | ||
166 | pr_err("%s: proc_un_reserve_memory failed!" | ||
167 | " status = 0x%xn", __func__, status); | ||
168 | } | ||
158 | return status; | 169 | return status; |
159 | } | 170 | } |
160 | 171 | ||
@@ -732,6 +743,7 @@ static int request_bridge_resources(struct cfg_hostres *res) | |||
732 | host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE); | 743 | host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE); |
733 | dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]); | 744 | dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]); |
734 | dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]); | 745 | dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]); |
746 | dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base); | ||
735 | 747 | ||
736 | /* for 24xx base port is not mapping the mamory for DSP | 748 | /* for 24xx base port is not mapping the mamory for DSP |
737 | * internal memory TODO Do a ioremap here */ | 749 | * internal memory TODO Do a ioremap here */ |
@@ -785,6 +797,8 @@ int drv_request_bridge_res_dsp(void **phost_resources) | |||
785 | OMAP_PER_PRM_SIZE); | 797 | OMAP_PER_PRM_SIZE); |
786 | host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE, | 798 | host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE, |
787 | OMAP_CORE_PRM_SIZE); | 799 | OMAP_CORE_PRM_SIZE); |
800 | host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE, | ||
801 | OMAP_DMMU_SIZE); | ||
788 | 802 | ||
789 | dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", | 803 | dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", |
790 | host_res->dw_mem_base[0]); | 804 | host_res->dw_mem_base[0]); |
@@ -796,6 +810,7 @@ int drv_request_bridge_res_dsp(void **phost_resources) | |||
796 | host_res->dw_mem_base[3]); | 810 | host_res->dw_mem_base[3]); |
797 | dev_dbg(bridge, "dw_mem_base[4] 0x%x\n", | 811 | dev_dbg(bridge, "dw_mem_base[4] 0x%x\n", |
798 | host_res->dw_mem_base[4]); | 812 | host_res->dw_mem_base[4]); |
813 | dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base); | ||
799 | 814 | ||
800 | shm_size = drv_datap->shm_size; | 815 | shm_size = drv_datap->shm_size; |
801 | if (shm_size >= 0x10000) { | 816 | if (shm_size >= 0x10000) { |
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c index 34be43fec044..324fcdffb3b3 100644 --- a/drivers/staging/tidspbridge/rmgr/drv_interface.c +++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c | |||
@@ -509,6 +509,8 @@ static int bridge_open(struct inode *ip, struct file *filp) | |||
509 | pr_ctxt->res_state = PROC_RES_ALLOCATED; | 509 | pr_ctxt->res_state = PROC_RES_ALLOCATED; |
510 | spin_lock_init(&pr_ctxt->dmm_map_lock); | 510 | spin_lock_init(&pr_ctxt->dmm_map_lock); |
511 | INIT_LIST_HEAD(&pr_ctxt->dmm_map_list); | 511 | INIT_LIST_HEAD(&pr_ctxt->dmm_map_list); |
512 | spin_lock_init(&pr_ctxt->dmm_rsv_lock); | ||
513 | INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list); | ||
512 | 514 | ||
513 | pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL); | 515 | pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL); |
514 | if (pr_ctxt->node_id) { | 516 | if (pr_ctxt->node_id) { |
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c index a660247f527a..1562f3c1281c 100644 --- a/drivers/staging/tidspbridge/rmgr/node.c +++ b/drivers/staging/tidspbridge/rmgr/node.c | |||
@@ -56,6 +56,7 @@ | |||
56 | /* ----------------------------------- This */ | 56 | /* ----------------------------------- This */ |
57 | #include <dspbridge/nodepriv.h> | 57 | #include <dspbridge/nodepriv.h> |
58 | #include <dspbridge/node.h> | 58 | #include <dspbridge/node.h> |
59 | #include <dspbridge/dmm.h> | ||
59 | 60 | ||
60 | /* Static/Dynamic Loader includes */ | 61 | /* Static/Dynamic Loader includes */ |
61 | #include <dspbridge/dbll.h> | 62 | #include <dspbridge/dbll.h> |
@@ -316,6 +317,10 @@ int node_allocate(struct proc_object *hprocessor, | |||
316 | u32 mapped_addr = 0; | 317 | u32 mapped_addr = 0; |
317 | u32 map_attrs = 0x0; | 318 | u32 map_attrs = 0x0; |
318 | struct dsp_processorstate proc_state; | 319 | struct dsp_processorstate proc_state; |
320 | #ifdef DSP_DMM_DEBUG | ||
321 | struct dmm_object *dmm_mgr; | ||
322 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | ||
323 | #endif | ||
319 | 324 | ||
320 | void *node_res; | 325 | void *node_res; |
321 | 326 | ||
@@ -425,12 +430,34 @@ int node_allocate(struct proc_object *hprocessor, | |||
425 | if (status) | 430 | if (status) |
426 | goto func_cont; | 431 | goto func_cont; |
427 | 432 | ||
433 | status = proc_reserve_memory(hprocessor, | ||
434 | pnode->create_args.asa.task_arg_obj. | ||
435 | heap_size + PAGE_SIZE, | ||
436 | (void **)&(pnode->create_args.asa. | ||
437 | task_arg_obj.udsp_heap_res_addr), | ||
438 | pr_ctxt); | ||
439 | if (status) { | ||
440 | pr_err("%s: Failed to reserve memory for heap: 0x%x\n", | ||
441 | __func__, status); | ||
442 | goto func_cont; | ||
443 | } | ||
444 | #ifdef DSP_DMM_DEBUG | ||
445 | status = dmm_get_handle(p_proc_object, &dmm_mgr); | ||
446 | if (!dmm_mgr) { | ||
447 | status = DSP_EHANDLE; | ||
448 | goto func_cont; | ||
449 | } | ||
450 | |||
451 | dmm_mem_map_dump(dmm_mgr); | ||
452 | #endif | ||
453 | |||
428 | map_attrs |= DSP_MAPLITTLEENDIAN; | 454 | map_attrs |= DSP_MAPLITTLEENDIAN; |
429 | map_attrs |= DSP_MAPELEMSIZE32; | 455 | map_attrs |= DSP_MAPELEMSIZE32; |
430 | map_attrs |= DSP_MAPVIRTUALADDR; | 456 | map_attrs |= DSP_MAPVIRTUALADDR; |
431 | status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr, | 457 | status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr, |
432 | pnode->create_args.asa.task_arg_obj.heap_size, | 458 | pnode->create_args.asa.task_arg_obj.heap_size, |
433 | NULL, (void **)&mapped_addr, map_attrs, | 459 | (void *)pnode->create_args.asa.task_arg_obj. |
460 | udsp_heap_res_addr, (void **)&mapped_addr, map_attrs, | ||
434 | pr_ctxt); | 461 | pr_ctxt); |
435 | if (status) | 462 | if (status) |
436 | pr_err("%s: Failed to map memory for Heap: 0x%x\n", | 463 | pr_err("%s: Failed to map memory for Heap: 0x%x\n", |
@@ -2484,7 +2511,11 @@ static void delete_node(struct node_object *hnode, | |||
2484 | struct stream_chnl stream; | 2511 | struct stream_chnl stream; |
2485 | struct node_msgargs node_msg_args; | 2512 | struct node_msgargs node_msg_args; |
2486 | struct node_taskargs task_arg_obj; | 2513 | struct node_taskargs task_arg_obj; |
2487 | 2514 | #ifdef DSP_DMM_DEBUG | |
2515 | struct dmm_object *dmm_mgr; | ||
2516 | struct proc_object *p_proc_object = | ||
2517 | (struct proc_object *)hnode->hprocessor; | ||
2518 | #endif | ||
2488 | int status; | 2519 | int status; |
2489 | if (!hnode) | 2520 | if (!hnode) |
2490 | goto func_end; | 2521 | goto func_end; |
@@ -2545,6 +2576,19 @@ static void delete_node(struct node_object *hnode, | |||
2545 | status = proc_un_map(hnode->hprocessor, (void *) | 2576 | status = proc_un_map(hnode->hprocessor, (void *) |
2546 | task_arg_obj.udsp_heap_addr, | 2577 | task_arg_obj.udsp_heap_addr, |
2547 | pr_ctxt); | 2578 | pr_ctxt); |
2579 | |||
2580 | status = proc_un_reserve_memory(hnode->hprocessor, | ||
2581 | (void *) | ||
2582 | task_arg_obj. | ||
2583 | udsp_heap_res_addr, | ||
2584 | pr_ctxt); | ||
2585 | #ifdef DSP_DMM_DEBUG | ||
2586 | status = dmm_get_handle(p_proc_object, &dmm_mgr); | ||
2587 | if (dmm_mgr) | ||
2588 | dmm_mem_map_dump(dmm_mgr); | ||
2589 | else | ||
2590 | status = DSP_EHANDLE; | ||
2591 | #endif | ||
2548 | } | 2592 | } |
2549 | } | 2593 | } |
2550 | if (node_type != NODE_MESSAGE) { | 2594 | if (node_type != NODE_MESSAGE) { |
diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c index 7a15a02efedf..b47d7aa747b1 100644 --- a/drivers/staging/tidspbridge/rmgr/proc.c +++ b/drivers/staging/tidspbridge/rmgr/proc.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <dspbridge/cod.h> | 39 | #include <dspbridge/cod.h> |
40 | #include <dspbridge/dev.h> | 40 | #include <dspbridge/dev.h> |
41 | #include <dspbridge/procpriv.h> | 41 | #include <dspbridge/procpriv.h> |
42 | #include <dspbridge/dmm.h> | ||
42 | 43 | ||
43 | /* ----------------------------------- Resource Manager */ | 44 | /* ----------------------------------- Resource Manager */ |
44 | #include <dspbridge/mgr.h> | 45 | #include <dspbridge/mgr.h> |
@@ -51,7 +52,6 @@ | |||
51 | #include <dspbridge/msg.h> | 52 | #include <dspbridge/msg.h> |
52 | #include <dspbridge/dspioctl.h> | 53 | #include <dspbridge/dspioctl.h> |
53 | #include <dspbridge/drv.h> | 54 | #include <dspbridge/drv.h> |
54 | #include <_tiomap.h> | ||
55 | 55 | ||
56 | /* ----------------------------------- This */ | 56 | /* ----------------------------------- This */ |
57 | #include <dspbridge/proc.h> | 57 | #include <dspbridge/proc.h> |
@@ -151,21 +151,34 @@ static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt, | |||
151 | return map_obj; | 151 | return map_obj; |
152 | } | 152 | } |
153 | 153 | ||
154 | static int match_exact_map_obj(struct dmm_map_object *map_obj, | ||
155 | u32 dsp_addr, u32 size) | ||
156 | { | ||
157 | if (map_obj->dsp_addr == dsp_addr && map_obj->size != size) | ||
158 | pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n", | ||
159 | __func__, dsp_addr, map_obj->size, size); | ||
160 | |||
161 | return map_obj->dsp_addr == dsp_addr && | ||
162 | map_obj->size == size; | ||
163 | } | ||
164 | |||
154 | static void remove_mapping_information(struct process_context *pr_ctxt, | 165 | static void remove_mapping_information(struct process_context *pr_ctxt, |
155 | u32 dsp_addr) | 166 | u32 dsp_addr, u32 size) |
156 | { | 167 | { |
157 | struct dmm_map_object *map_obj; | 168 | struct dmm_map_object *map_obj; |
158 | 169 | ||
159 | pr_debug("%s: looking for virt 0x%x\n", __func__, dsp_addr); | 170 | pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__, |
171 | dsp_addr, size); | ||
160 | 172 | ||
161 | spin_lock(&pr_ctxt->dmm_map_lock); | 173 | spin_lock(&pr_ctxt->dmm_map_lock); |
162 | list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { | 174 | list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { |
163 | pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x\n", | 175 | pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n", |
164 | __func__, | 176 | __func__, |
165 | map_obj->mpu_addr, | 177 | map_obj->mpu_addr, |
166 | map_obj->dsp_addr); | 178 | map_obj->dsp_addr, |
179 | map_obj->size); | ||
167 | 180 | ||
168 | if (map_obj->dsp_addr == dsp_addr) { | 181 | if (match_exact_map_obj(map_obj, dsp_addr, size)) { |
169 | pr_debug("%s: match, deleting map info\n", __func__); | 182 | pr_debug("%s: match, deleting map info\n", __func__); |
170 | list_del(&map_obj->link); | 183 | list_del(&map_obj->link); |
171 | kfree(map_obj->dma_info.sg); | 184 | kfree(map_obj->dma_info.sg); |
@@ -1077,6 +1090,7 @@ int proc_load(void *hprocessor, const s32 argc_index, | |||
1077 | s32 cnew_envp; /* " " in new_envp[] */ | 1090 | s32 cnew_envp; /* " " in new_envp[] */ |
1078 | s32 nproc_id = 0; /* Anticipate MP version. */ | 1091 | s32 nproc_id = 0; /* Anticipate MP version. */ |
1079 | struct dcd_manager *hdcd_handle; | 1092 | struct dcd_manager *hdcd_handle; |
1093 | struct dmm_object *dmm_mgr; | ||
1080 | u32 dw_ext_end; | 1094 | u32 dw_ext_end; |
1081 | u32 proc_id; | 1095 | u32 proc_id; |
1082 | int brd_state; | 1096 | int brd_state; |
@@ -1267,6 +1281,25 @@ int proc_load(void *hprocessor, const s32 argc_index, | |||
1267 | if (!status) | 1281 | if (!status) |
1268 | status = cod_get_sym_value(cod_mgr, EXTEND, | 1282 | status = cod_get_sym_value(cod_mgr, EXTEND, |
1269 | &dw_ext_end); | 1283 | &dw_ext_end); |
1284 | |||
1285 | /* Reset DMM structs and add an initial free chunk */ | ||
1286 | if (!status) { | ||
1287 | status = | ||
1288 | dev_get_dmm_mgr(p_proc_object->hdev_obj, | ||
1289 | &dmm_mgr); | ||
1290 | if (dmm_mgr) { | ||
1291 | /* Set dw_ext_end to DMM START u8 | ||
1292 | * address */ | ||
1293 | dw_ext_end = | ||
1294 | (dw_ext_end + 1) * DSPWORDSIZE; | ||
1295 | /* DMM memory is from EXT_END */ | ||
1296 | status = dmm_create_tables(dmm_mgr, | ||
1297 | dw_ext_end, | ||
1298 | DMMPOOLSIZE); | ||
1299 | } else { | ||
1300 | status = -EFAULT; | ||
1301 | } | ||
1302 | } | ||
1270 | } | 1303 | } |
1271 | } | 1304 | } |
1272 | /* Restore the original argv[0] */ | 1305 | /* Restore the original argv[0] */ |
@@ -1319,10 +1352,12 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size, | |||
1319 | { | 1352 | { |
1320 | u32 va_align; | 1353 | u32 va_align; |
1321 | u32 pa_align; | 1354 | u32 pa_align; |
1355 | struct dmm_object *dmm_mgr; | ||
1322 | u32 size_align; | 1356 | u32 size_align; |
1323 | int status = 0; | 1357 | int status = 0; |
1324 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | 1358 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; |
1325 | struct dmm_map_object *map_obj; | 1359 | struct dmm_map_object *map_obj; |
1360 | u32 tmp_addr = 0; | ||
1326 | 1361 | ||
1327 | #ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK | 1362 | #ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK |
1328 | if ((ul_map_attr & BUFMODE_MASK) != RBUF) { | 1363 | if ((ul_map_attr & BUFMODE_MASK) != RBUF) { |
@@ -1347,30 +1382,33 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size, | |||
1347 | } | 1382 | } |
1348 | /* Critical section */ | 1383 | /* Critical section */ |
1349 | mutex_lock(&proc_lock); | 1384 | mutex_lock(&proc_lock); |
1385 | dmm_get_handle(p_proc_object, &dmm_mgr); | ||
1386 | if (dmm_mgr) | ||
1387 | status = dmm_map_memory(dmm_mgr, va_align, size_align); | ||
1388 | else | ||
1389 | status = -EFAULT; | ||
1350 | 1390 | ||
1351 | /* Add mapping to the page tables. */ | 1391 | /* Add mapping to the page tables. */ |
1352 | if (!status) { | 1392 | if (!status) { |
1393 | |||
1394 | /* Mapped address = MSB of VA | LSB of PA */ | ||
1395 | tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1))); | ||
1353 | /* mapped memory resource tracking */ | 1396 | /* mapped memory resource tracking */ |
1354 | map_obj = add_mapping_info(pr_ctxt, pa_align, va_align, | 1397 | map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr, |
1355 | size_align); | 1398 | size_align); |
1356 | if (!map_obj) { | 1399 | if (!map_obj) |
1357 | status = -ENOMEM; | 1400 | status = -ENOMEM; |
1358 | } else { | 1401 | else |
1359 | va_align = user_to_dsp_map( | 1402 | status = (*p_proc_object->intf_fxns->pfn_brd_mem_map) |
1360 | p_proc_object->hbridge_context->dsp_mmu, | 1403 | (p_proc_object->hbridge_context, pa_align, va_align, |
1361 | pa_align, va_align, size_align, | 1404 | size_align, ul_map_attr, map_obj->pages); |
1362 | map_obj->pages); | ||
1363 | if (IS_ERR_VALUE(va_align)) | ||
1364 | status = (int)va_align; | ||
1365 | } | ||
1366 | } | 1405 | } |
1367 | if (!status) { | 1406 | if (!status) { |
1368 | /* Mapped address = MSB of VA | LSB of PA */ | 1407 | /* Mapped address = MSB of VA | LSB of PA */ |
1369 | map_obj->dsp_addr = (va_align | | 1408 | *pp_map_addr = (void *) tmp_addr; |
1370 | ((u32)pmpu_addr & (PG_SIZE4K - 1))); | ||
1371 | *pp_map_addr = (void *)map_obj->dsp_addr; | ||
1372 | } else { | 1409 | } else { |
1373 | remove_mapping_information(pr_ctxt, va_align); | 1410 | remove_mapping_information(pr_ctxt, tmp_addr, size_align); |
1411 | dmm_un_map_memory(dmm_mgr, va_align, &size_align); | ||
1374 | } | 1412 | } |
1375 | mutex_unlock(&proc_lock); | 1413 | mutex_unlock(&proc_lock); |
1376 | 1414 | ||
@@ -1463,6 +1501,55 @@ func_end: | |||
1463 | } | 1501 | } |
1464 | 1502 | ||
1465 | /* | 1503 | /* |
1504 | * ======== proc_reserve_memory ======== | ||
1505 | * Purpose: | ||
1506 | * Reserve a virtually contiguous region of DSP address space. | ||
1507 | */ | ||
1508 | int proc_reserve_memory(void *hprocessor, u32 ul_size, | ||
1509 | void **pp_rsv_addr, | ||
1510 | struct process_context *pr_ctxt) | ||
1511 | { | ||
1512 | struct dmm_object *dmm_mgr; | ||
1513 | int status = 0; | ||
1514 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | ||
1515 | struct dmm_rsv_object *rsv_obj; | ||
1516 | |||
1517 | if (!p_proc_object) { | ||
1518 | status = -EFAULT; | ||
1519 | goto func_end; | ||
1520 | } | ||
1521 | |||
1522 | status = dmm_get_handle(p_proc_object, &dmm_mgr); | ||
1523 | if (!dmm_mgr) { | ||
1524 | status = -EFAULT; | ||
1525 | goto func_end; | ||
1526 | } | ||
1527 | |||
1528 | status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr); | ||
1529 | if (status != 0) | ||
1530 | goto func_end; | ||
1531 | |||
1532 | /* | ||
1533 | * A successful reserve should be followed by insertion of rsv_obj | ||
1534 | * into dmm_rsv_list, so that reserved memory resource tracking | ||
1535 | * remains uptodate | ||
1536 | */ | ||
1537 | rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL); | ||
1538 | if (rsv_obj) { | ||
1539 | rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr; | ||
1540 | spin_lock(&pr_ctxt->dmm_rsv_lock); | ||
1541 | list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list); | ||
1542 | spin_unlock(&pr_ctxt->dmm_rsv_lock); | ||
1543 | } | ||
1544 | |||
1545 | func_end: | ||
1546 | dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p " | ||
1547 | "status 0x%x\n", __func__, hprocessor, | ||
1548 | ul_size, pp_rsv_addr, status); | ||
1549 | return status; | ||
1550 | } | ||
1551 | |||
1552 | /* | ||
1466 | * ======== proc_start ======== | 1553 | * ======== proc_start ======== |
1467 | * Purpose: | 1554 | * Purpose: |
1468 | * Start a processor running. | 1555 | * Start a processor running. |
@@ -1610,7 +1697,9 @@ int proc_un_map(void *hprocessor, void *map_addr, | |||
1610 | { | 1697 | { |
1611 | int status = 0; | 1698 | int status = 0; |
1612 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | 1699 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; |
1700 | struct dmm_object *dmm_mgr; | ||
1613 | u32 va_align; | 1701 | u32 va_align; |
1702 | u32 size_align; | ||
1614 | 1703 | ||
1615 | va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K); | 1704 | va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K); |
1616 | if (!p_proc_object) { | 1705 | if (!p_proc_object) { |
@@ -1618,11 +1707,24 @@ int proc_un_map(void *hprocessor, void *map_addr, | |||
1618 | goto func_end; | 1707 | goto func_end; |
1619 | } | 1708 | } |
1620 | 1709 | ||
1710 | status = dmm_get_handle(hprocessor, &dmm_mgr); | ||
1711 | if (!dmm_mgr) { | ||
1712 | status = -EFAULT; | ||
1713 | goto func_end; | ||
1714 | } | ||
1715 | |||
1621 | /* Critical section */ | 1716 | /* Critical section */ |
1622 | mutex_lock(&proc_lock); | 1717 | mutex_lock(&proc_lock); |
1718 | /* | ||
1719 | * Update DMM structures. Get the size to unmap. | ||
1720 | * This function returns error if the VA is not mapped | ||
1721 | */ | ||
1722 | status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align); | ||
1623 | /* Remove mapping from the page tables. */ | 1723 | /* Remove mapping from the page tables. */ |
1624 | status = user_to_dsp_unmap(p_proc_object->hbridge_context->dsp_mmu, | 1724 | if (!status) { |
1625 | va_align); | 1725 | status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map) |
1726 | (p_proc_object->hbridge_context, va_align, size_align); | ||
1727 | } | ||
1626 | 1728 | ||
1627 | mutex_unlock(&proc_lock); | 1729 | mutex_unlock(&proc_lock); |
1628 | if (status) | 1730 | if (status) |
@@ -1633,7 +1735,7 @@ int proc_un_map(void *hprocessor, void *map_addr, | |||
1633 | * from dmm_map_list, so that mapped memory resource tracking | 1735 | * from dmm_map_list, so that mapped memory resource tracking |
1634 | * remains uptodate | 1736 | * remains uptodate |
1635 | */ | 1737 | */ |
1636 | remove_mapping_information(pr_ctxt, (u32) map_addr); | 1738 | remove_mapping_information(pr_ctxt, (u32) map_addr, size_align); |
1637 | 1739 | ||
1638 | func_end: | 1740 | func_end: |
1639 | dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", | 1741 | dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", |
@@ -1642,6 +1744,55 @@ func_end: | |||
1642 | } | 1744 | } |
1643 | 1745 | ||
1644 | /* | 1746 | /* |
1747 | * ======== proc_un_reserve_memory ======== | ||
1748 | * Purpose: | ||
1749 | * Frees a previously reserved region of DSP address space. | ||
1750 | */ | ||
1751 | int proc_un_reserve_memory(void *hprocessor, void *prsv_addr, | ||
1752 | struct process_context *pr_ctxt) | ||
1753 | { | ||
1754 | struct dmm_object *dmm_mgr; | ||
1755 | int status = 0; | ||
1756 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | ||
1757 | struct dmm_rsv_object *rsv_obj; | ||
1758 | |||
1759 | if (!p_proc_object) { | ||
1760 | status = -EFAULT; | ||
1761 | goto func_end; | ||
1762 | } | ||
1763 | |||
1764 | status = dmm_get_handle(p_proc_object, &dmm_mgr); | ||
1765 | if (!dmm_mgr) { | ||
1766 | status = -EFAULT; | ||
1767 | goto func_end; | ||
1768 | } | ||
1769 | |||
1770 | status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr); | ||
1771 | if (status != 0) | ||
1772 | goto func_end; | ||
1773 | |||
1774 | /* | ||
1775 | * A successful unreserve should be followed by removal of rsv_obj | ||
1776 | * from dmm_rsv_list, so that reserved memory resource tracking | ||
1777 | * remains uptodate | ||
1778 | */ | ||
1779 | spin_lock(&pr_ctxt->dmm_rsv_lock); | ||
1780 | list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) { | ||
1781 | if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) { | ||
1782 | list_del(&rsv_obj->link); | ||
1783 | kfree(rsv_obj); | ||
1784 | break; | ||
1785 | } | ||
1786 | } | ||
1787 | spin_unlock(&pr_ctxt->dmm_rsv_lock); | ||
1788 | |||
1789 | func_end: | ||
1790 | dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n", | ||
1791 | __func__, hprocessor, prsv_addr, status); | ||
1792 | return status; | ||
1793 | } | ||
1794 | |||
1795 | /* | ||
1645 | * ======== = proc_monitor ======== == | 1796 | * ======== = proc_monitor ======== == |
1646 | * Purpose: | 1797 | * Purpose: |
1647 | * Place the Processor in Monitor State. This is an internal | 1798 | * Place the Processor in Monitor State. This is an internal |
diff --git a/drivers/staging/tm6000/tm6000-video.c b/drivers/staging/tm6000/tm6000-video.c index 9ec82796634e..c5690b2a8924 100644 --- a/drivers/staging/tm6000/tm6000-video.c +++ b/drivers/staging/tm6000/tm6000-video.c | |||
@@ -1032,6 +1032,7 @@ static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *norm) | |||
1032 | struct tm6000_fh *fh=priv; | 1032 | struct tm6000_fh *fh=priv; |
1033 | struct tm6000_core *dev = fh->dev; | 1033 | struct tm6000_core *dev = fh->dev; |
1034 | 1034 | ||
1035 | dev->norm = *norm; | ||
1035 | rc = tm6000_init_analog_mode(dev); | 1036 | rc = tm6000_init_analog_mode(dev); |
1036 | 1037 | ||
1037 | fh->width = dev->width; | 1038 | fh->width = dev->width; |
diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c index 5969e848d297..fed25105970a 100644 --- a/drivers/staging/udlfb/udlfb.c +++ b/drivers/staging/udlfb/udlfb.c | |||
@@ -887,7 +887,7 @@ static int dlfb_ops_open(struct fb_info *info, int user) | |||
887 | 887 | ||
888 | struct fb_deferred_io *fbdefio; | 888 | struct fb_deferred_io *fbdefio; |
889 | 889 | ||
890 | fbdefio = kmalloc(GFP_KERNEL, sizeof(struct fb_deferred_io)); | 890 | fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); |
891 | 891 | ||
892 | if (fbdefio) { | 892 | if (fbdefio) { |
893 | fbdefio->delay = DL_DEFIO_WRITE_DELAY; | 893 | fbdefio->delay = DL_DEFIO_WRITE_DELAY; |
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index e992d5d9e15b..7cc3d2407d1b 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c | |||
@@ -1675,13 +1675,14 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { | |||
1675 | 1675 | ||
1676 | { | 1676 | { |
1677 | char essid[IW_ESSID_MAX_SIZE+1]; | 1677 | char essid[IW_ESSID_MAX_SIZE+1]; |
1678 | if (wrq->u.essid.pointer) | 1678 | if (wrq->u.essid.pointer) { |
1679 | rc = iwctl_giwessid(dev, NULL, | 1679 | rc = iwctl_giwessid(dev, NULL, |
1680 | &(wrq->u.essid), essid); | 1680 | &(wrq->u.essid), essid); |
1681 | if (copy_to_user(wrq->u.essid.pointer, | 1681 | if (copy_to_user(wrq->u.essid.pointer, |
1682 | essid, | 1682 | essid, |
1683 | wrq->u.essid.length) ) | 1683 | wrq->u.essid.length) ) |
1684 | rc = -EFAULT; | 1684 | rc = -EFAULT; |
1685 | } | ||
1685 | } | 1686 | } |
1686 | break; | 1687 | break; |
1687 | 1688 | ||
diff --git a/drivers/staging/westbridge/astoria/api/src/cyasusb.c b/drivers/staging/westbridge/astoria/api/src/cyasusb.c index 5a2197012065..7777d9a60a52 100644 --- a/drivers/staging/westbridge/astoria/api/src/cyasusb.c +++ b/drivers/staging/westbridge/astoria/api/src/cyasusb.c | |||
@@ -1417,7 +1417,6 @@ cy_as_usb_set_enum_config(cy_as_device_handle handle, | |||
1417 | */ | 1417 | */ |
1418 | bus_mask = 0; | 1418 | bus_mask = 0; |
1419 | media_mask = 0; | 1419 | media_mask = 0; |
1420 | media_mask = 0; | ||
1421 | for (bus = 0; bus < CY_AS_MAX_BUSES; bus++) { | 1420 | for (bus = 0; bus < CY_AS_MAX_BUSES; bus++) { |
1422 | for (device = 0; device < CY_AS_MAX_STORAGE_DEVICES; device++) { | 1421 | for (device = 0; device < CY_AS_MAX_STORAGE_DEVICES; device++) { |
1423 | if (config_p->devices_to_enumerate[bus][device] == | 1422 | if (config_p->devices_to_enumerate[bus][device] == |
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c index 4af83d5318f2..6a71f52c59b1 100644 --- a/drivers/staging/wlan-ng/cfg80211.c +++ b/drivers/staging/wlan-ng/cfg80211.c | |||
@@ -139,7 +139,7 @@ exit: | |||
139 | } | 139 | } |
140 | 140 | ||
141 | int prism2_add_key(struct wiphy *wiphy, struct net_device *dev, | 141 | int prism2_add_key(struct wiphy *wiphy, struct net_device *dev, |
142 | u8 key_index, const u8 *mac_addr, | 142 | u8 key_index, bool pairwise, const u8 *mac_addr, |
143 | struct key_params *params) | 143 | struct key_params *params) |
144 | { | 144 | { |
145 | wlandevice_t *wlandev = dev->ml_priv; | 145 | wlandevice_t *wlandev = dev->ml_priv; |
@@ -198,7 +198,7 @@ exit: | |||
198 | } | 198 | } |
199 | 199 | ||
200 | int prism2_get_key(struct wiphy *wiphy, struct net_device *dev, | 200 | int prism2_get_key(struct wiphy *wiphy, struct net_device *dev, |
201 | u8 key_index, const u8 *mac_addr, void *cookie, | 201 | u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie, |
202 | void (*callback)(void *cookie, struct key_params*)) | 202 | void (*callback)(void *cookie, struct key_params*)) |
203 | { | 203 | { |
204 | wlandevice_t *wlandev = dev->ml_priv; | 204 | wlandevice_t *wlandev = dev->ml_priv; |
@@ -227,7 +227,7 @@ int prism2_get_key(struct wiphy *wiphy, struct net_device *dev, | |||
227 | } | 227 | } |
228 | 228 | ||
229 | int prism2_del_key(struct wiphy *wiphy, struct net_device *dev, | 229 | int prism2_del_key(struct wiphy *wiphy, struct net_device *dev, |
230 | u8 key_index, const u8 *mac_addr) | 230 | u8 key_index, bool pairwise, const u8 *mac_addr) |
231 | { | 231 | { |
232 | wlandevice_t *wlandev = dev->ml_priv; | 232 | wlandevice_t *wlandev = dev->ml_priv; |
233 | u32 did; | 233 | u32 did; |
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c index aa1792c8429e..b7b4a733b467 100644 --- a/drivers/staging/wlan-ng/p80211netdev.c +++ b/drivers/staging/wlan-ng/p80211netdev.c | |||
@@ -522,8 +522,8 @@ static int p80211netdev_ethtool(wlandevice_t *wlandev, void __user *useraddr) | |||
522 | if (copy_to_user(useraddr, &edata, sizeof(edata))) | 522 | if (copy_to_user(useraddr, &edata, sizeof(edata))) |
523 | return -EFAULT; | 523 | return -EFAULT; |
524 | return 0; | 524 | return 0; |
525 | } | ||
526 | #endif | 525 | #endif |
526 | } | ||
527 | 527 | ||
528 | return -EOPNOTSUPP; | 528 | return -EOPNOTSUPP; |
529 | } | 529 | } |
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 04ef3ef0a422..81b46585edf7 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c | |||
@@ -716,8 +716,8 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) | |||
716 | if (msg->len < 128) | 716 | if (msg->len < 128) |
717 | *--dp = (msg->len << 1) | EA; | 717 | *--dp = (msg->len << 1) | EA; |
718 | else { | 718 | else { |
719 | *--dp = (msg->len >> 6) | EA; | 719 | *--dp = ((msg->len & 127) << 1) | EA; |
720 | *--dp = (msg->len & 127) << 1; | 720 | *--dp = (msg->len >> 6) & 0xfe; |
721 | } | 721 | } |
722 | } | 722 | } |
723 | 723 | ||
@@ -2375,6 +2375,7 @@ static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm, | |||
2375 | gsm->mru = c->mru; | 2375 | gsm->mru = c->mru; |
2376 | gsm->encoding = c->encapsulation; | 2376 | gsm->encoding = c->encapsulation; |
2377 | gsm->adaption = c->adaption; | 2377 | gsm->adaption = c->adaption; |
2378 | gsm->n2 = c->n2; | ||
2378 | 2379 | ||
2379 | if (c->i == 1) | 2380 | if (c->i == 1) |
2380 | gsm->ftype = UIH; | 2381 | gsm->ftype = UIH; |
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index cc1e9850d655..d8210ca00720 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c | |||
@@ -413,7 +413,8 @@ static void flush_to_ldisc(struct work_struct *work) | |||
413 | spin_lock_irqsave(&tty->buf.lock, flags); | 413 | spin_lock_irqsave(&tty->buf.lock, flags); |
414 | 414 | ||
415 | if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) { | 415 | if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) { |
416 | struct tty_buffer *head; | 416 | struct tty_buffer *head, *tail = tty->buf.tail; |
417 | int seen_tail = 0; | ||
417 | while ((head = tty->buf.head) != NULL) { | 418 | while ((head = tty->buf.head) != NULL) { |
418 | int count; | 419 | int count; |
419 | char *char_buf; | 420 | char *char_buf; |
@@ -423,6 +424,15 @@ static void flush_to_ldisc(struct work_struct *work) | |||
423 | if (!count) { | 424 | if (!count) { |
424 | if (head->next == NULL) | 425 | if (head->next == NULL) |
425 | break; | 426 | break; |
427 | /* | ||
428 | There's a possibility tty might get new buffer | ||
429 | added during the unlock window below. We could | ||
430 | end up spinning in here forever hogging the CPU | ||
431 | completely. To avoid this let's have a rest each | ||
432 | time we processed the tail buffer. | ||
433 | */ | ||
434 | if (tail == head) | ||
435 | seen_tail = 1; | ||
426 | tty->buf.head = head->next; | 436 | tty->buf.head = head->next; |
427 | tty_buffer_free(tty, head); | 437 | tty_buffer_free(tty, head); |
428 | continue; | 438 | continue; |
@@ -432,7 +442,7 @@ static void flush_to_ldisc(struct work_struct *work) | |||
432 | line discipline as we want to empty the queue */ | 442 | line discipline as we want to empty the queue */ |
433 | if (test_bit(TTY_FLUSHPENDING, &tty->flags)) | 443 | if (test_bit(TTY_FLUSHPENDING, &tty->flags)) |
434 | break; | 444 | break; |
435 | if (!tty->receive_room) { | 445 | if (!tty->receive_room || seen_tail) { |
436 | schedule_delayed_work(&tty->buf.work, 1); | 446 | schedule_delayed_work(&tty->buf.work, 1); |
437 | break; | 447 | break; |
438 | } | 448 | } |
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 412f9775d19c..d8e96b005023 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c | |||
@@ -47,6 +47,7 @@ | |||
47 | 47 | ||
48 | static DEFINE_SPINLOCK(tty_ldisc_lock); | 48 | static DEFINE_SPINLOCK(tty_ldisc_lock); |
49 | static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait); | 49 | static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait); |
50 | static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_idle); | ||
50 | /* Line disc dispatch table */ | 51 | /* Line disc dispatch table */ |
51 | static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS]; | 52 | static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS]; |
52 | 53 | ||
@@ -83,6 +84,7 @@ static void put_ldisc(struct tty_ldisc *ld) | |||
83 | return; | 84 | return; |
84 | } | 85 | } |
85 | local_irq_restore(flags); | 86 | local_irq_restore(flags); |
87 | wake_up(&tty_ldisc_idle); | ||
86 | } | 88 | } |
87 | 89 | ||
88 | /** | 90 | /** |
@@ -531,6 +533,23 @@ static int tty_ldisc_halt(struct tty_struct *tty) | |||
531 | } | 533 | } |
532 | 534 | ||
533 | /** | 535 | /** |
536 | * tty_ldisc_wait_idle - wait for the ldisc to become idle | ||
537 | * @tty: tty to wait for | ||
538 | * | ||
539 | * Wait for the line discipline to become idle. The discipline must | ||
540 | * have been halted for this to guarantee it remains idle. | ||
541 | */ | ||
542 | static int tty_ldisc_wait_idle(struct tty_struct *tty) | ||
543 | { | ||
544 | int ret; | ||
545 | ret = wait_event_interruptible_timeout(tty_ldisc_idle, | ||
546 | atomic_read(&tty->ldisc->users) == 1, 5 * HZ); | ||
547 | if (ret < 0) | ||
548 | return ret; | ||
549 | return ret > 0 ? 0 : -EBUSY; | ||
550 | } | ||
551 | |||
552 | /** | ||
534 | * tty_set_ldisc - set line discipline | 553 | * tty_set_ldisc - set line discipline |
535 | * @tty: the terminal to set | 554 | * @tty: the terminal to set |
536 | * @ldisc: the line discipline | 555 | * @ldisc: the line discipline |
@@ -634,8 +653,17 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) | |||
634 | 653 | ||
635 | flush_scheduled_work(); | 654 | flush_scheduled_work(); |
636 | 655 | ||
656 | retval = tty_ldisc_wait_idle(tty); | ||
657 | |||
637 | tty_lock(); | 658 | tty_lock(); |
638 | mutex_lock(&tty->ldisc_mutex); | 659 | mutex_lock(&tty->ldisc_mutex); |
660 | |||
661 | /* handle wait idle failure locked */ | ||
662 | if (retval) { | ||
663 | tty_ldisc_put(new_ldisc); | ||
664 | goto enable; | ||
665 | } | ||
666 | |||
639 | if (test_bit(TTY_HUPPED, &tty->flags)) { | 667 | if (test_bit(TTY_HUPPED, &tty->flags)) { |
640 | /* We were raced by the hangup method. It will have stomped | 668 | /* We were raced by the hangup method. It will have stomped |
641 | the ldisc data and closed the ldisc down */ | 669 | the ldisc data and closed the ldisc down */ |
@@ -669,6 +697,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) | |||
669 | 697 | ||
670 | tty_ldisc_put(o_ldisc); | 698 | tty_ldisc_put(o_ldisc); |
671 | 699 | ||
700 | enable: | ||
672 | /* | 701 | /* |
673 | * Allow ldisc referencing to occur again | 702 | * Allow ldisc referencing to occur again |
674 | */ | 703 | */ |
@@ -714,9 +743,12 @@ static void tty_reset_termios(struct tty_struct *tty) | |||
714 | * state closed | 743 | * state closed |
715 | */ | 744 | */ |
716 | 745 | ||
717 | static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc) | 746 | static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc) |
718 | { | 747 | { |
719 | struct tty_ldisc *ld; | 748 | struct tty_ldisc *ld = tty_ldisc_get(ldisc); |
749 | |||
750 | if (IS_ERR(ld)) | ||
751 | return -1; | ||
720 | 752 | ||
721 | tty_ldisc_close(tty, tty->ldisc); | 753 | tty_ldisc_close(tty, tty->ldisc); |
722 | tty_ldisc_put(tty->ldisc); | 754 | tty_ldisc_put(tty->ldisc); |
@@ -724,10 +756,10 @@ static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc) | |||
724 | /* | 756 | /* |
725 | * Switch the line discipline back | 757 | * Switch the line discipline back |
726 | */ | 758 | */ |
727 | ld = tty_ldisc_get(ldisc); | ||
728 | BUG_ON(IS_ERR(ld)); | ||
729 | tty_ldisc_assign(tty, ld); | 759 | tty_ldisc_assign(tty, ld); |
730 | tty_set_termios_ldisc(tty, ldisc); | 760 | tty_set_termios_ldisc(tty, ldisc); |
761 | |||
762 | return 0; | ||
731 | } | 763 | } |
732 | 764 | ||
733 | /** | 765 | /** |
@@ -802,13 +834,16 @@ void tty_ldisc_hangup(struct tty_struct *tty) | |||
802 | a FIXME */ | 834 | a FIXME */ |
803 | if (tty->ldisc) { /* Not yet closed */ | 835 | if (tty->ldisc) { /* Not yet closed */ |
804 | if (reset == 0) { | 836 | if (reset == 0) { |
805 | tty_ldisc_reinit(tty, tty->termios->c_line); | 837 | |
806 | err = tty_ldisc_open(tty, tty->ldisc); | 838 | if (!tty_ldisc_reinit(tty, tty->termios->c_line)) |
839 | err = tty_ldisc_open(tty, tty->ldisc); | ||
840 | else | ||
841 | err = 1; | ||
807 | } | 842 | } |
808 | /* If the re-open fails or we reset then go to N_TTY. The | 843 | /* If the re-open fails or we reset then go to N_TTY. The |
809 | N_TTY open cannot fail */ | 844 | N_TTY open cannot fail */ |
810 | if (reset || err) { | 845 | if (reset || err) { |
811 | tty_ldisc_reinit(tty, N_TTY); | 846 | BUG_ON(tty_ldisc_reinit(tty, N_TTY)); |
812 | WARN_ON(tty_ldisc_open(tty, tty->ldisc)); | 847 | WARN_ON(tty_ldisc_open(tty, tty->ldisc)); |
813 | } | 848 | } |
814 | tty_ldisc_enable(tty); | 849 | tty_ldisc_enable(tty); |
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c index 273ab44cc91d..eab3a1ff99e4 100644 --- a/drivers/tty/vt/vc_screen.c +++ b/drivers/tty/vt/vc_screen.c | |||
@@ -553,12 +553,12 @@ static unsigned int | |||
553 | vcs_poll(struct file *file, poll_table *wait) | 553 | vcs_poll(struct file *file, poll_table *wait) |
554 | { | 554 | { |
555 | struct vcs_poll_data *poll = vcs_poll_data_get(file); | 555 | struct vcs_poll_data *poll = vcs_poll_data_get(file); |
556 | int ret = 0; | 556 | int ret = DEFAULT_POLLMASK|POLLERR|POLLPRI; |
557 | 557 | ||
558 | if (poll) { | 558 | if (poll) { |
559 | poll_wait(file, &poll->waitq, wait); | 559 | poll_wait(file, &poll->waitq, wait); |
560 | if (!poll->seen_last_update) | 560 | if (poll->seen_last_update) |
561 | ret = POLLIN | POLLRDNORM; | 561 | ret = DEFAULT_POLLMASK; |
562 | } | 562 | } |
563 | return ret; | 563 | return ret; |
564 | } | 564 | } |
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index f1aaff6202a5..045bb4b823e1 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -965,10 +965,11 @@ static int proc_getdriver(struct dev_state *ps, void __user *arg) | |||
965 | 965 | ||
966 | static int proc_connectinfo(struct dev_state *ps, void __user *arg) | 966 | static int proc_connectinfo(struct dev_state *ps, void __user *arg) |
967 | { | 967 | { |
968 | struct usbdevfs_connectinfo ci; | 968 | struct usbdevfs_connectinfo ci = { |
969 | .devnum = ps->dev->devnum, | ||
970 | .slow = ps->dev->speed == USB_SPEED_LOW | ||
971 | }; | ||
969 | 972 | ||
970 | ci.devnum = ps->dev->devnum; | ||
971 | ci.slow = ps->dev->speed == USB_SPEED_LOW; | ||
972 | if (copy_to_user(arg, &ci, sizeof(ci))) | 973 | if (copy_to_user(arg, &ci, sizeof(ci))) |
973 | return -EFAULT; | 974 | return -EFAULT; |
974 | return 0; | 975 | return 0; |
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index b739ca814651..607d0db4a988 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig | |||
@@ -158,7 +158,7 @@ config USB_GADGET_FSL_USB2 | |||
158 | boolean "Freescale Highspeed USB DR Peripheral Controller" | 158 | boolean "Freescale Highspeed USB DR Peripheral Controller" |
159 | depends on FSL_SOC || ARCH_MXC | 159 | depends on FSL_SOC || ARCH_MXC |
160 | select USB_GADGET_DUALSPEED | 160 | select USB_GADGET_DUALSPEED |
161 | select USB_FSL_MPH_DR_OF | 161 | select USB_FSL_MPH_DR_OF if OF |
162 | help | 162 | help |
163 | Some of Freescale PowerPC processors have a High Speed | 163 | Some of Freescale PowerPC processors have a High Speed |
164 | Dual-Role(DR) USB controller, which supports device mode. | 164 | Dual-Role(DR) USB controller, which supports device mode. |
diff --git a/drivers/usb/gadget/goku_udc.h b/drivers/usb/gadget/goku_udc.h index 566cb2319056..e7e0c69d3b1f 100644 --- a/drivers/usb/gadget/goku_udc.h +++ b/drivers/usb/gadget/goku_udc.h | |||
@@ -251,7 +251,8 @@ struct goku_udc { | |||
251 | got_region:1, | 251 | got_region:1, |
252 | req_config:1, | 252 | req_config:1, |
253 | configured:1, | 253 | configured:1, |
254 | enabled:1; | 254 | enabled:1, |
255 | registered:1; | ||
255 | 256 | ||
256 | /* pci state used to access those endpoints */ | 257 | /* pci state used to access those endpoints */ |
257 | struct pci_dev *pdev; | 258 | struct pci_dev *pdev; |
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c index 01e5354a4c20..40f7716b31fc 100644 --- a/drivers/usb/gadget/u_serial.c +++ b/drivers/usb/gadget/u_serial.c | |||
@@ -105,11 +105,15 @@ struct gs_port { | |||
105 | wait_queue_head_t close_wait; /* wait for last close */ | 105 | wait_queue_head_t close_wait; /* wait for last close */ |
106 | 106 | ||
107 | struct list_head read_pool; | 107 | struct list_head read_pool; |
108 | int read_started; | ||
109 | int read_allocated; | ||
108 | struct list_head read_queue; | 110 | struct list_head read_queue; |
109 | unsigned n_read; | 111 | unsigned n_read; |
110 | struct tasklet_struct push; | 112 | struct tasklet_struct push; |
111 | 113 | ||
112 | struct list_head write_pool; | 114 | struct list_head write_pool; |
115 | int write_started; | ||
116 | int write_allocated; | ||
113 | struct gs_buf port_write_buf; | 117 | struct gs_buf port_write_buf; |
114 | wait_queue_head_t drain_wait; /* wait while writes drain */ | 118 | wait_queue_head_t drain_wait; /* wait while writes drain */ |
115 | 119 | ||
@@ -363,6 +367,9 @@ __acquires(&port->port_lock) | |||
363 | struct usb_request *req; | 367 | struct usb_request *req; |
364 | int len; | 368 | int len; |
365 | 369 | ||
370 | if (port->write_started >= QUEUE_SIZE) | ||
371 | break; | ||
372 | |||
366 | req = list_entry(pool->next, struct usb_request, list); | 373 | req = list_entry(pool->next, struct usb_request, list); |
367 | len = gs_send_packet(port, req->buf, in->maxpacket); | 374 | len = gs_send_packet(port, req->buf, in->maxpacket); |
368 | if (len == 0) { | 375 | if (len == 0) { |
@@ -397,6 +404,8 @@ __acquires(&port->port_lock) | |||
397 | break; | 404 | break; |
398 | } | 405 | } |
399 | 406 | ||
407 | port->write_started++; | ||
408 | |||
400 | /* abort immediately after disconnect */ | 409 | /* abort immediately after disconnect */ |
401 | if (!port->port_usb) | 410 | if (!port->port_usb) |
402 | break; | 411 | break; |
@@ -418,7 +427,6 @@ __acquires(&port->port_lock) | |||
418 | { | 427 | { |
419 | struct list_head *pool = &port->read_pool; | 428 | struct list_head *pool = &port->read_pool; |
420 | struct usb_ep *out = port->port_usb->out; | 429 | struct usb_ep *out = port->port_usb->out; |
421 | unsigned started = 0; | ||
422 | 430 | ||
423 | while (!list_empty(pool)) { | 431 | while (!list_empty(pool)) { |
424 | struct usb_request *req; | 432 | struct usb_request *req; |
@@ -430,6 +438,9 @@ __acquires(&port->port_lock) | |||
430 | if (!tty) | 438 | if (!tty) |
431 | break; | 439 | break; |
432 | 440 | ||
441 | if (port->read_started >= QUEUE_SIZE) | ||
442 | break; | ||
443 | |||
433 | req = list_entry(pool->next, struct usb_request, list); | 444 | req = list_entry(pool->next, struct usb_request, list); |
434 | list_del(&req->list); | 445 | list_del(&req->list); |
435 | req->length = out->maxpacket; | 446 | req->length = out->maxpacket; |
@@ -447,13 +458,13 @@ __acquires(&port->port_lock) | |||
447 | list_add(&req->list, pool); | 458 | list_add(&req->list, pool); |
448 | break; | 459 | break; |
449 | } | 460 | } |
450 | started++; | 461 | port->read_started++; |
451 | 462 | ||
452 | /* abort immediately after disconnect */ | 463 | /* abort immediately after disconnect */ |
453 | if (!port->port_usb) | 464 | if (!port->port_usb) |
454 | break; | 465 | break; |
455 | } | 466 | } |
456 | return started; | 467 | return port->read_started; |
457 | } | 468 | } |
458 | 469 | ||
459 | /* | 470 | /* |
@@ -535,6 +546,7 @@ static void gs_rx_push(unsigned long _port) | |||
535 | } | 546 | } |
536 | recycle: | 547 | recycle: |
537 | list_move(&req->list, &port->read_pool); | 548 | list_move(&req->list, &port->read_pool); |
549 | port->read_started--; | ||
538 | } | 550 | } |
539 | 551 | ||
540 | /* Push from tty to ldisc; without low_latency set this is handled by | 552 | /* Push from tty to ldisc; without low_latency set this is handled by |
@@ -587,6 +599,7 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req) | |||
587 | 599 | ||
588 | spin_lock(&port->port_lock); | 600 | spin_lock(&port->port_lock); |
589 | list_add(&req->list, &port->write_pool); | 601 | list_add(&req->list, &port->write_pool); |
602 | port->write_started--; | ||
590 | 603 | ||
591 | switch (req->status) { | 604 | switch (req->status) { |
592 | default: | 605 | default: |
@@ -608,7 +621,8 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req) | |||
608 | spin_unlock(&port->port_lock); | 621 | spin_unlock(&port->port_lock); |
609 | } | 622 | } |
610 | 623 | ||
611 | static void gs_free_requests(struct usb_ep *ep, struct list_head *head) | 624 | static void gs_free_requests(struct usb_ep *ep, struct list_head *head, |
625 | int *allocated) | ||
612 | { | 626 | { |
613 | struct usb_request *req; | 627 | struct usb_request *req; |
614 | 628 | ||
@@ -616,25 +630,31 @@ static void gs_free_requests(struct usb_ep *ep, struct list_head *head) | |||
616 | req = list_entry(head->next, struct usb_request, list); | 630 | req = list_entry(head->next, struct usb_request, list); |
617 | list_del(&req->list); | 631 | list_del(&req->list); |
618 | gs_free_req(ep, req); | 632 | gs_free_req(ep, req); |
633 | if (allocated) | ||
634 | (*allocated)--; | ||
619 | } | 635 | } |
620 | } | 636 | } |
621 | 637 | ||
622 | static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, | 638 | static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, |
623 | void (*fn)(struct usb_ep *, struct usb_request *)) | 639 | void (*fn)(struct usb_ep *, struct usb_request *), |
640 | int *allocated) | ||
624 | { | 641 | { |
625 | int i; | 642 | int i; |
626 | struct usb_request *req; | 643 | struct usb_request *req; |
644 | int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE; | ||
627 | 645 | ||
628 | /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't | 646 | /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't |
629 | * do quite that many this time, don't fail ... we just won't | 647 | * do quite that many this time, don't fail ... we just won't |
630 | * be as speedy as we might otherwise be. | 648 | * be as speedy as we might otherwise be. |
631 | */ | 649 | */ |
632 | for (i = 0; i < QUEUE_SIZE; i++) { | 650 | for (i = 0; i < n; i++) { |
633 | req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC); | 651 | req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC); |
634 | if (!req) | 652 | if (!req) |
635 | return list_empty(head) ? -ENOMEM : 0; | 653 | return list_empty(head) ? -ENOMEM : 0; |
636 | req->complete = fn; | 654 | req->complete = fn; |
637 | list_add_tail(&req->list, head); | 655 | list_add_tail(&req->list, head); |
656 | if (allocated) | ||
657 | (*allocated)++; | ||
638 | } | 658 | } |
639 | return 0; | 659 | return 0; |
640 | } | 660 | } |
@@ -661,14 +681,15 @@ static int gs_start_io(struct gs_port *port) | |||
661 | * configurations may use different endpoints with a given port; | 681 | * configurations may use different endpoints with a given port; |
662 | * and high speed vs full speed changes packet sizes too. | 682 | * and high speed vs full speed changes packet sizes too. |
663 | */ | 683 | */ |
664 | status = gs_alloc_requests(ep, head, gs_read_complete); | 684 | status = gs_alloc_requests(ep, head, gs_read_complete, |
685 | &port->read_allocated); | ||
665 | if (status) | 686 | if (status) |
666 | return status; | 687 | return status; |
667 | 688 | ||
668 | status = gs_alloc_requests(port->port_usb->in, &port->write_pool, | 689 | status = gs_alloc_requests(port->port_usb->in, &port->write_pool, |
669 | gs_write_complete); | 690 | gs_write_complete, &port->write_allocated); |
670 | if (status) { | 691 | if (status) { |
671 | gs_free_requests(ep, head); | 692 | gs_free_requests(ep, head, &port->read_allocated); |
672 | return status; | 693 | return status; |
673 | } | 694 | } |
674 | 695 | ||
@@ -680,8 +701,9 @@ static int gs_start_io(struct gs_port *port) | |||
680 | if (started) { | 701 | if (started) { |
681 | tty_wakeup(port->port_tty); | 702 | tty_wakeup(port->port_tty); |
682 | } else { | 703 | } else { |
683 | gs_free_requests(ep, head); | 704 | gs_free_requests(ep, head, &port->read_allocated); |
684 | gs_free_requests(port->port_usb->in, &port->write_pool); | 705 | gs_free_requests(port->port_usb->in, &port->write_pool, |
706 | &port->write_allocated); | ||
685 | status = -EIO; | 707 | status = -EIO; |
686 | } | 708 | } |
687 | 709 | ||
@@ -1315,8 +1337,12 @@ void gserial_disconnect(struct gserial *gser) | |||
1315 | spin_lock_irqsave(&port->port_lock, flags); | 1337 | spin_lock_irqsave(&port->port_lock, flags); |
1316 | if (port->open_count == 0 && !port->openclose) | 1338 | if (port->open_count == 0 && !port->openclose) |
1317 | gs_buf_free(&port->port_write_buf); | 1339 | gs_buf_free(&port->port_write_buf); |
1318 | gs_free_requests(gser->out, &port->read_pool); | 1340 | gs_free_requests(gser->out, &port->read_pool, NULL); |
1319 | gs_free_requests(gser->out, &port->read_queue); | 1341 | gs_free_requests(gser->out, &port->read_queue, NULL); |
1320 | gs_free_requests(gser->in, &port->write_pool); | 1342 | gs_free_requests(gser->in, &port->write_pool, NULL); |
1343 | |||
1344 | port->read_allocated = port->read_started = | ||
1345 | port->write_allocated = port->write_started = 0; | ||
1346 | |||
1321 | spin_unlock_irqrestore(&port->port_lock, flags); | 1347 | spin_unlock_irqrestore(&port->port_lock, flags); |
1322 | } | 1348 | } |
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 2391c396ca32..6f4f8e6a40c7 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig | |||
@@ -122,7 +122,7 @@ config USB_EHCI_FSL | |||
122 | bool "Support for Freescale on-chip EHCI USB controller" | 122 | bool "Support for Freescale on-chip EHCI USB controller" |
123 | depends on USB_EHCI_HCD && FSL_SOC | 123 | depends on USB_EHCI_HCD && FSL_SOC |
124 | select USB_EHCI_ROOT_HUB_TT | 124 | select USB_EHCI_ROOT_HUB_TT |
125 | select USB_FSL_MPH_DR_OF | 125 | select USB_FSL_MPH_DR_OF if OF |
126 | ---help--- | 126 | ---help--- |
127 | Variation of ARC USB block used in some Freescale chips. | 127 | Variation of ARC USB block used in some Freescale chips. |
128 | 128 | ||
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c index ac9c4d7c44af..bce85055019a 100644 --- a/drivers/usb/host/ehci-mxc.c +++ b/drivers/usb/host/ehci-mxc.c | |||
@@ -36,6 +36,8 @@ struct ehci_mxc_priv { | |||
36 | static int ehci_mxc_setup(struct usb_hcd *hcd) | 36 | static int ehci_mxc_setup(struct usb_hcd *hcd) |
37 | { | 37 | { |
38 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | 38 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); |
39 | struct device *dev = hcd->self.controller; | ||
40 | struct mxc_usbh_platform_data *pdata = dev_get_platdata(dev); | ||
39 | int retval; | 41 | int retval; |
40 | 42 | ||
41 | /* EHCI registers start at offset 0x100 */ | 43 | /* EHCI registers start at offset 0x100 */ |
@@ -63,6 +65,12 @@ static int ehci_mxc_setup(struct usb_hcd *hcd) | |||
63 | 65 | ||
64 | ehci_reset(ehci); | 66 | ehci_reset(ehci); |
65 | 67 | ||
68 | /* set up the PORTSCx register */ | ||
69 | ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]); | ||
70 | |||
71 | /* is this really needed? */ | ||
72 | msleep(10); | ||
73 | |||
66 | ehci_port_power(ehci, 0); | 74 | ehci_port_power(ehci, 0); |
67 | return 0; | 75 | return 0; |
68 | } | 76 | } |
@@ -114,7 +122,7 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev) | |||
114 | struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data; | 122 | struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data; |
115 | struct usb_hcd *hcd; | 123 | struct usb_hcd *hcd; |
116 | struct resource *res; | 124 | struct resource *res; |
117 | int irq, ret, temp; | 125 | int irq, ret; |
118 | struct ehci_mxc_priv *priv; | 126 | struct ehci_mxc_priv *priv; |
119 | struct device *dev = &pdev->dev; | 127 | struct device *dev = &pdev->dev; |
120 | 128 | ||
@@ -188,10 +196,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev) | |||
188 | clk_enable(priv->ahbclk); | 196 | clk_enable(priv->ahbclk); |
189 | } | 197 | } |
190 | 198 | ||
191 | /* set up the PORTSCx register */ | ||
192 | ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]); | ||
193 | mdelay(10); | ||
194 | |||
195 | /* setup specific usb hw */ | 199 | /* setup specific usb hw */ |
196 | ret = mxc_initialize_usb_hw(pdev->id, pdata->flags); | 200 | ret = mxc_initialize_usb_hw(pdev->id, pdata->flags); |
197 | if (ret < 0) | 201 | if (ret < 0) |
diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c index 10e1872f3ab9..931d588c3fb5 100644 --- a/drivers/usb/host/ohci-jz4740.c +++ b/drivers/usb/host/ohci-jz4740.c | |||
@@ -273,4 +273,4 @@ static struct platform_driver ohci_hcd_jz4740_driver = { | |||
273 | }, | 273 | }, |
274 | }; | 274 | }; |
275 | 275 | ||
276 | MODULE_ALIAS("platfrom:jz4740-ohci"); | 276 | MODULE_ALIAS("platform:jz4740-ohci"); |
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c index 5a47805d9580..c90c89dc0003 100644 --- a/drivers/usb/image/microtek.c +++ b/drivers/usb/image/microtek.c | |||
@@ -364,7 +364,7 @@ static int mts_scsi_host_reset(struct scsi_cmnd *srb) | |||
364 | } | 364 | } |
365 | 365 | ||
366 | static int | 366 | static int |
367 | mts_scsi_queuecommand(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback); | 367 | mts_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *srb); |
368 | 368 | ||
369 | static void mts_transfer_cleanup( struct urb *transfer ); | 369 | static void mts_transfer_cleanup( struct urb *transfer ); |
370 | static void mts_do_sg(struct urb * transfer); | 370 | static void mts_do_sg(struct urb * transfer); |
@@ -573,7 +573,7 @@ mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc) | |||
573 | 573 | ||
574 | 574 | ||
575 | static int | 575 | static int |
576 | mts_scsi_queuecommand(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback) | 576 | mts_scsi_queuecommand_lck(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback) |
577 | { | 577 | { |
578 | struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); | 578 | struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); |
579 | int err = 0; | 579 | int err = 0; |
@@ -626,6 +626,8 @@ out: | |||
626 | return err; | 626 | return err; |
627 | } | 627 | } |
628 | 628 | ||
629 | static DEF_SCSI_QCMD(mts_scsi_queuecommand) | ||
630 | |||
629 | static struct scsi_host_template mts_scsi_host_template = { | 631 | static struct scsi_host_template mts_scsi_host_template = { |
630 | .module = THIS_MODULE, | 632 | .module = THIS_MODULE, |
631 | .name = "microtekX6", | 633 | .name = "microtekX6", |
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 375664198776..c9078e4e1f4d 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c | |||
@@ -553,6 +553,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd, | |||
553 | /* needed for power consumption */ | 553 | /* needed for power consumption */ |
554 | struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc; | 554 | struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc; |
555 | 555 | ||
556 | memset(&info, 0, sizeof(info)); | ||
556 | /* directly from the descriptor */ | 557 | /* directly from the descriptor */ |
557 | info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor); | 558 | info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor); |
558 | info.product = dev->product_id; | 559 | info.product = dev->product_id; |
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index 70d00e99a4b4..dd573abd2d1e 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c | |||
@@ -3008,6 +3008,7 @@ sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
3008 | #else | 3008 | #else |
3009 | x.sisusb_conactive = 0; | 3009 | x.sisusb_conactive = 0; |
3010 | #endif | 3010 | #endif |
3011 | memset(x.sisusb_reserved, 0, sizeof(x.sisusb_reserved)); | ||
3011 | 3012 | ||
3012 | if (copy_to_user((void __user *)arg, &x, sizeof(x))) | 3013 | if (copy_to_user((void __user *)arg, &x, sizeof(x))) |
3013 | retval = -EFAULT; | 3014 | retval = -EFAULT; |
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c index 611a9d274363..fcb5206a65bd 100644 --- a/drivers/usb/musb/blackfin.c +++ b/drivers/usb/musb/blackfin.c | |||
@@ -171,8 +171,9 @@ static irqreturn_t blackfin_interrupt(int irq, void *__hci) | |||
171 | } | 171 | } |
172 | 172 | ||
173 | /* Start sampling ID pin, when plug is removed from MUSB */ | 173 | /* Start sampling ID pin, when plug is removed from MUSB */ |
174 | if (is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE | 174 | if ((is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE |
175 | || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) { | 175 | || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) || |
176 | (musb->int_usb & MUSB_INTR_DISCONNECT && is_host_active(musb))) { | ||
176 | mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); | 177 | mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); |
177 | musb->a_wait_bcon = TIMER_DELAY; | 178 | musb->a_wait_bcon = TIMER_DELAY; |
178 | } | 179 | } |
@@ -323,30 +324,8 @@ int musb_platform_set_mode(struct musb *musb, u8 musb_mode) | |||
323 | return -EIO; | 324 | return -EIO; |
324 | } | 325 | } |
325 | 326 | ||
326 | int __init musb_platform_init(struct musb *musb, void *board_data) | 327 | static void musb_platform_reg_init(struct musb *musb) |
327 | { | 328 | { |
328 | |||
329 | /* | ||
330 | * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE | ||
331 | * and OTG HOST modes, while rev 1.1 and greater require PE7 to | ||
332 | * be low for DEVICE mode and high for HOST mode. We set it high | ||
333 | * here because we are in host mode | ||
334 | */ | ||
335 | |||
336 | if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) { | ||
337 | printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d \n", | ||
338 | musb->config->gpio_vrsel); | ||
339 | return -ENODEV; | ||
340 | } | ||
341 | gpio_direction_output(musb->config->gpio_vrsel, 0); | ||
342 | |||
343 | usb_nop_xceiv_register(); | ||
344 | musb->xceiv = otg_get_transceiver(); | ||
345 | if (!musb->xceiv) { | ||
346 | gpio_free(musb->config->gpio_vrsel); | ||
347 | return -ENODEV; | ||
348 | } | ||
349 | |||
350 | if (ANOMALY_05000346) { | 329 | if (ANOMALY_05000346) { |
351 | bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value); | 330 | bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value); |
352 | SSYNC(); | 331 | SSYNC(); |
@@ -358,7 +337,8 @@ int __init musb_platform_init(struct musb *musb, void *board_data) | |||
358 | } | 337 | } |
359 | 338 | ||
360 | /* Configure PLL oscillator register */ | 339 | /* Configure PLL oscillator register */ |
361 | bfin_write_USB_PLLOSC_CTRL(0x30a8); | 340 | bfin_write_USB_PLLOSC_CTRL(0x3080 | |
341 | ((480/musb->config->clkin) << 1)); | ||
362 | SSYNC(); | 342 | SSYNC(); |
363 | 343 | ||
364 | bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1); | 344 | bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1); |
@@ -380,6 +360,33 @@ int __init musb_platform_init(struct musb *musb, void *board_data) | |||
380 | EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA | | 360 | EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA | |
381 | EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA); | 361 | EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA); |
382 | SSYNC(); | 362 | SSYNC(); |
363 | } | ||
364 | |||
365 | int __init musb_platform_init(struct musb *musb, void *board_data) | ||
366 | { | ||
367 | |||
368 | /* | ||
369 | * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE | ||
370 | * and OTG HOST modes, while rev 1.1 and greater require PE7 to | ||
371 | * be low for DEVICE mode and high for HOST mode. We set it high | ||
372 | * here because we are in host mode | ||
373 | */ | ||
374 | |||
375 | if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) { | ||
376 | printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d\n", | ||
377 | musb->config->gpio_vrsel); | ||
378 | return -ENODEV; | ||
379 | } | ||
380 | gpio_direction_output(musb->config->gpio_vrsel, 0); | ||
381 | |||
382 | usb_nop_xceiv_register(); | ||
383 | musb->xceiv = otg_get_transceiver(); | ||
384 | if (!musb->xceiv) { | ||
385 | gpio_free(musb->config->gpio_vrsel); | ||
386 | return -ENODEV; | ||
387 | } | ||
388 | |||
389 | musb_platform_reg_init(musb); | ||
383 | 390 | ||
384 | if (is_host_enabled(musb)) { | 391 | if (is_host_enabled(musb)) { |
385 | musb->board_set_vbus = bfin_set_vbus; | 392 | musb->board_set_vbus = bfin_set_vbus; |
@@ -394,6 +401,27 @@ int __init musb_platform_init(struct musb *musb, void *board_data) | |||
394 | return 0; | 401 | return 0; |
395 | } | 402 | } |
396 | 403 | ||
404 | #ifdef CONFIG_PM | ||
405 | void musb_platform_save_context(struct musb *musb, | ||
406 | struct musb_context_registers *musb_context) | ||
407 | { | ||
408 | if (is_host_active(musb)) | ||
409 | /* | ||
410 | * During hibernate gpio_vrsel will change from high to low | ||
411 | * low which will generate wakeup event resume the system | ||
412 | * immediately. Set it to 0 before hibernate to avoid this | ||
413 | * wakeup event. | ||
414 | */ | ||
415 | gpio_set_value(musb->config->gpio_vrsel, 0); | ||
416 | } | ||
417 | |||
418 | void musb_platform_restore_context(struct musb *musb, | ||
419 | struct musb_context_registers *musb_context) | ||
420 | { | ||
421 | musb_platform_reg_init(musb); | ||
422 | } | ||
423 | #endif | ||
424 | |||
397 | int musb_platform_exit(struct musb *musb) | 425 | int musb_platform_exit(struct musb *musb) |
398 | { | 426 | { |
399 | gpio_free(musb->config->gpio_vrsel); | 427 | gpio_free(musb->config->gpio_vrsel); |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index c9f9024c5515..e6669fc3b804 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -552,7 +552,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | |||
552 | if (int_usb & MUSB_INTR_SESSREQ) { | 552 | if (int_usb & MUSB_INTR_SESSREQ) { |
553 | void __iomem *mbase = musb->mregs; | 553 | void __iomem *mbase = musb->mregs; |
554 | 554 | ||
555 | if (devctl & MUSB_DEVCTL_BDEVICE) { | 555 | if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS |
556 | && (devctl & MUSB_DEVCTL_BDEVICE)) { | ||
556 | DBG(3, "SessReq while on B state\n"); | 557 | DBG(3, "SessReq while on B state\n"); |
557 | return IRQ_HANDLED; | 558 | return IRQ_HANDLED; |
558 | } | 559 | } |
@@ -1052,6 +1053,11 @@ static void musb_shutdown(struct platform_device *pdev) | |||
1052 | clk_put(musb->clock); | 1053 | clk_put(musb->clock); |
1053 | spin_unlock_irqrestore(&musb->lock, flags); | 1054 | spin_unlock_irqrestore(&musb->lock, flags); |
1054 | 1055 | ||
1056 | if (!is_otg_enabled(musb) && is_host_enabled(musb)) | ||
1057 | usb_remove_hcd(musb_to_hcd(musb)); | ||
1058 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
1059 | musb_platform_exit(musb); | ||
1060 | |||
1055 | /* FIXME power down */ | 1061 | /* FIXME power down */ |
1056 | } | 1062 | } |
1057 | 1063 | ||
@@ -2244,13 +2250,6 @@ static int __exit musb_remove(struct platform_device *pdev) | |||
2244 | */ | 2250 | */ |
2245 | musb_exit_debugfs(musb); | 2251 | musb_exit_debugfs(musb); |
2246 | musb_shutdown(pdev); | 2252 | musb_shutdown(pdev); |
2247 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
2248 | if (musb->board_mode == MUSB_HOST) | ||
2249 | usb_remove_hcd(musb_to_hcd(musb)); | ||
2250 | #endif | ||
2251 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
2252 | musb_platform_exit(musb); | ||
2253 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
2254 | 2253 | ||
2255 | musb_free(musb); | 2254 | musb_free(musb); |
2256 | iounmap(ctrl_base); | 2255 | iounmap(ctrl_base); |
@@ -2411,9 +2410,6 @@ static int musb_suspend(struct device *dev) | |||
2411 | unsigned long flags; | 2410 | unsigned long flags; |
2412 | struct musb *musb = dev_to_musb(&pdev->dev); | 2411 | struct musb *musb = dev_to_musb(&pdev->dev); |
2413 | 2412 | ||
2414 | if (!musb->clock) | ||
2415 | return 0; | ||
2416 | |||
2417 | spin_lock_irqsave(&musb->lock, flags); | 2413 | spin_lock_irqsave(&musb->lock, flags); |
2418 | 2414 | ||
2419 | if (is_peripheral_active(musb)) { | 2415 | if (is_peripheral_active(musb)) { |
@@ -2428,10 +2424,12 @@ static int musb_suspend(struct device *dev) | |||
2428 | 2424 | ||
2429 | musb_save_context(musb); | 2425 | musb_save_context(musb); |
2430 | 2426 | ||
2431 | if (musb->set_clock) | 2427 | if (musb->clock) { |
2432 | musb->set_clock(musb->clock, 0); | 2428 | if (musb->set_clock) |
2433 | else | 2429 | musb->set_clock(musb->clock, 0); |
2434 | clk_disable(musb->clock); | 2430 | else |
2431 | clk_disable(musb->clock); | ||
2432 | } | ||
2435 | spin_unlock_irqrestore(&musb->lock, flags); | 2433 | spin_unlock_irqrestore(&musb->lock, flags); |
2436 | return 0; | 2434 | return 0; |
2437 | } | 2435 | } |
@@ -2441,13 +2439,12 @@ static int musb_resume_noirq(struct device *dev) | |||
2441 | struct platform_device *pdev = to_platform_device(dev); | 2439 | struct platform_device *pdev = to_platform_device(dev); |
2442 | struct musb *musb = dev_to_musb(&pdev->dev); | 2440 | struct musb *musb = dev_to_musb(&pdev->dev); |
2443 | 2441 | ||
2444 | if (!musb->clock) | 2442 | if (musb->clock) { |
2445 | return 0; | 2443 | if (musb->set_clock) |
2446 | 2444 | musb->set_clock(musb->clock, 1); | |
2447 | if (musb->set_clock) | 2445 | else |
2448 | musb->set_clock(musb->clock, 1); | 2446 | clk_enable(musb->clock); |
2449 | else | 2447 | } |
2450 | clk_enable(musb->clock); | ||
2451 | 2448 | ||
2452 | musb_restore_context(musb); | 2449 | musb_restore_context(musb); |
2453 | 2450 | ||
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 69797e5b46a7..febaabcc2b35 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h | |||
@@ -487,7 +487,7 @@ struct musb_context_registers { | |||
487 | }; | 487 | }; |
488 | 488 | ||
489 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ | 489 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ |
490 | defined(CONFIG_ARCH_OMAP4) | 490 | defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_BLACKFIN) |
491 | extern void musb_platform_save_context(struct musb *musb, | 491 | extern void musb_platform_save_context(struct musb *musb, |
492 | struct musb_context_registers *musb_context); | 492 | struct musb_context_registers *musb_context); |
493 | extern void musb_platform_restore_context(struct musb *musb, | 493 | extern void musb_platform_restore_context(struct musb *musb, |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 5d815049cbaa..36cfd060dbe5 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -644,10 +644,8 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
644 | */ | 644 | */ |
645 | 645 | ||
646 | csr |= MUSB_RXCSR_DMAENAB; | 646 | csr |= MUSB_RXCSR_DMAENAB; |
647 | if (!musb_ep->hb_mult && | ||
648 | musb_ep->hw_ep->rx_double_buffered) | ||
649 | csr |= MUSB_RXCSR_AUTOCLEAR; | ||
650 | #ifdef USE_MODE1 | 647 | #ifdef USE_MODE1 |
648 | csr |= MUSB_RXCSR_AUTOCLEAR; | ||
651 | /* csr |= MUSB_RXCSR_DMAMODE; */ | 649 | /* csr |= MUSB_RXCSR_DMAMODE; */ |
652 | 650 | ||
653 | /* this special sequence (enabling and then | 651 | /* this special sequence (enabling and then |
@@ -656,6 +654,10 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
656 | */ | 654 | */ |
657 | musb_writew(epio, MUSB_RXCSR, | 655 | musb_writew(epio, MUSB_RXCSR, |
658 | csr | MUSB_RXCSR_DMAMODE); | 656 | csr | MUSB_RXCSR_DMAMODE); |
657 | #else | ||
658 | if (!musb_ep->hb_mult && | ||
659 | musb_ep->hw_ep->rx_double_buffered) | ||
660 | csr |= MUSB_RXCSR_AUTOCLEAR; | ||
659 | #endif | 661 | #endif |
660 | musb_writew(epio, MUSB_RXCSR, csr); | 662 | musb_writew(epio, MUSB_RXCSR, csr); |
661 | 663 | ||
@@ -807,7 +809,7 @@ void musb_g_rx(struct musb *musb, u8 epnum) | |||
807 | 809 | ||
808 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) | 810 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) |
809 | /* Autoclear doesn't clear RxPktRdy for short packets */ | 811 | /* Autoclear doesn't clear RxPktRdy for short packets */ |
810 | if ((dma->desired_mode == 0) | 812 | if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered) |
811 | || (dma->actual_len | 813 | || (dma->actual_len |
812 | & (musb_ep->packet_sz - 1))) { | 814 | & (musb_ep->packet_sz - 1))) { |
813 | /* ack the read! */ | 815 | /* ack the read! */ |
@@ -818,8 +820,16 @@ void musb_g_rx(struct musb *musb, u8 epnum) | |||
818 | /* incomplete, and not short? wait for next IN packet */ | 820 | /* incomplete, and not short? wait for next IN packet */ |
819 | if ((request->actual < request->length) | 821 | if ((request->actual < request->length) |
820 | && (musb_ep->dma->actual_len | 822 | && (musb_ep->dma->actual_len |
821 | == musb_ep->packet_sz)) | 823 | == musb_ep->packet_sz)) { |
824 | /* In double buffer case, continue to unload fifo if | ||
825 | * there is Rx packet in FIFO. | ||
826 | **/ | ||
827 | csr = musb_readw(epio, MUSB_RXCSR); | ||
828 | if ((csr & MUSB_RXCSR_RXPKTRDY) && | ||
829 | hw_ep->rx_double_buffered) | ||
830 | goto exit; | ||
822 | return; | 831 | return; |
832 | } | ||
823 | #endif | 833 | #endif |
824 | musb_g_giveback(musb_ep, request, 0); | 834 | musb_g_giveback(musb_ep, request, 0); |
825 | 835 | ||
@@ -827,7 +837,7 @@ void musb_g_rx(struct musb *musb, u8 epnum) | |||
827 | if (!request) | 837 | if (!request) |
828 | return; | 838 | return; |
829 | } | 839 | } |
830 | 840 | exit: | |
831 | /* Analyze request */ | 841 | /* Analyze request */ |
832 | rxstate(musb, to_musb_request(request)); | 842 | rxstate(musb, to_musb_request(request)); |
833 | } | 843 | } |
@@ -916,13 +926,9 @@ static int musb_gadget_enable(struct usb_ep *ep, | |||
916 | * likewise high bandwidth periodic tx | 926 | * likewise high bandwidth periodic tx |
917 | */ | 927 | */ |
918 | /* Set TXMAXP with the FIFO size of the endpoint | 928 | /* Set TXMAXP with the FIFO size of the endpoint |
919 | * to disable double buffering mode. Currently, It seems that double | 929 | * to disable double buffering mode. |
920 | * buffering has problem if musb RTL revision number < 2.0. | ||
921 | */ | 930 | */ |
922 | if (musb->hwvers < MUSB_HWVERS_2000) | 931 | musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); |
923 | musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); | ||
924 | else | ||
925 | musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); | ||
926 | 932 | ||
927 | csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; | 933 | csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; |
928 | if (musb_readw(regs, MUSB_TXCSR) | 934 | if (musb_readw(regs, MUSB_TXCSR) |
@@ -958,10 +964,7 @@ static int musb_gadget_enable(struct usb_ep *ep, | |||
958 | /* Set RXMAXP with the FIFO size of the endpoint | 964 | /* Set RXMAXP with the FIFO size of the endpoint |
959 | * to disable double buffering mode. | 965 | * to disable double buffering mode. |
960 | */ | 966 | */ |
961 | if (musb->hwvers < MUSB_HWVERS_2000) | 967 | musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); |
962 | musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx); | ||
963 | else | ||
964 | musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); | ||
965 | 968 | ||
966 | /* force shared fifo to OUT-only mode */ | 969 | /* force shared fifo to OUT-only mode */ |
967 | if (hw_ep->is_shared_fifo) { | 970 | if (hw_ep->is_shared_fifo) { |
@@ -1166,8 +1169,6 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, | |||
1166 | : DMA_FROM_DEVICE); | 1169 | : DMA_FROM_DEVICE); |
1167 | request->mapped = 0; | 1170 | request->mapped = 0; |
1168 | } | 1171 | } |
1169 | } else if (!req->buf) { | ||
1170 | return -ENODATA; | ||
1171 | } else | 1172 | } else |
1172 | request->mapped = 0; | 1173 | request->mapped = 0; |
1173 | 1174 | ||
@@ -1695,8 +1696,10 @@ int __init musb_gadget_setup(struct musb *musb) | |||
1695 | musb_platform_try_idle(musb, 0); | 1696 | musb_platform_try_idle(musb, 0); |
1696 | 1697 | ||
1697 | status = device_register(&musb->g.dev); | 1698 | status = device_register(&musb->g.dev); |
1698 | if (status != 0) | 1699 | if (status != 0) { |
1700 | put_device(&musb->g.dev); | ||
1699 | the_gadget = NULL; | 1701 | the_gadget = NULL; |
1702 | } | ||
1700 | return status; | 1703 | return status; |
1701 | } | 1704 | } |
1702 | 1705 | ||
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h index 244267527a60..5a727c5b8676 100644 --- a/drivers/usb/musb/musb_regs.h +++ b/drivers/usb/musb/musb_regs.h | |||
@@ -633,8 +633,9 @@ static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum) | |||
633 | return 0; | 633 | return 0; |
634 | } | 634 | } |
635 | 635 | ||
636 | static inline void musb_read_txhubport(void __iomem *mbase, u8 epnum) | 636 | static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum) |
637 | { | 637 | { |
638 | return 0; | ||
638 | } | 639 | } |
639 | 640 | ||
640 | #endif /* CONFIG_BLACKFIN */ | 641 | #endif /* CONFIG_BLACKFIN */ |
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index 6f771af5cbdb..563114d613d6 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c | |||
@@ -158,6 +158,8 @@ static int dma_channel_program(struct dma_channel *channel, | |||
158 | dma_addr_t dma_addr, u32 len) | 158 | dma_addr_t dma_addr, u32 len) |
159 | { | 159 | { |
160 | struct musb_dma_channel *musb_channel = channel->private_data; | 160 | struct musb_dma_channel *musb_channel = channel->private_data; |
161 | struct musb_dma_controller *controller = musb_channel->controller; | ||
162 | struct musb *musb = controller->private_data; | ||
161 | 163 | ||
162 | DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n", | 164 | DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n", |
163 | musb_channel->epnum, | 165 | musb_channel->epnum, |
@@ -167,6 +169,18 @@ static int dma_channel_program(struct dma_channel *channel, | |||
167 | BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || | 169 | BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || |
168 | channel->status == MUSB_DMA_STATUS_BUSY); | 170 | channel->status == MUSB_DMA_STATUS_BUSY); |
169 | 171 | ||
172 | /* | ||
173 | * The DMA engine in RTL1.8 and above cannot handle | ||
174 | * DMA addresses that are not aligned to a 4 byte boundary. | ||
175 | * It ends up masking the last two bits of the address | ||
176 | * programmed in DMA_ADDR. | ||
177 | * | ||
178 | * Fail such DMA transfers, so that the backup PIO mode | ||
179 | * can carry out the transfer | ||
180 | */ | ||
181 | if ((musb->hwvers >= MUSB_HWVERS_1800) && (dma_addr % 4)) | ||
182 | return false; | ||
183 | |||
170 | channel->actual_len = 0; | 184 | channel->actual_len = 0; |
171 | musb_channel->start_addr = dma_addr; | 185 | musb_channel->start_addr = dma_addr; |
172 | musb_channel->len = len; | 186 | musb_channel->len = len; |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 89a9a5847803..76f8b3556672 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -794,6 +794,8 @@ static struct usb_device_id id_table_combined [] = { | |||
794 | { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) }, | 794 | { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) }, |
795 | { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, | 795 | { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, |
796 | { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, | 796 | { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, |
797 | { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), | ||
798 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | ||
797 | { }, /* Optional parameter entry */ | 799 | { }, /* Optional parameter entry */ |
798 | { } /* Terminating entry */ | 800 | { } /* Terminating entry */ |
799 | }; | 801 | }; |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 7dfe02f1fb6a..263f62551197 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -1100,3 +1100,10 @@ | |||
1100 | #define FTDI_SCIENCESCOPE_LOGBOOKML_PID 0xFF18 | 1100 | #define FTDI_SCIENCESCOPE_LOGBOOKML_PID 0xFF18 |
1101 | #define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID 0xFF1C | 1101 | #define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID 0xFF1C |
1102 | #define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID 0xFF1D | 1102 | #define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID 0xFF1D |
1103 | |||
1104 | /* | ||
1105 | * Milkymist One JTAG/Serial | ||
1106 | */ | ||
1107 | #define QIHARDWARE_VID 0x20B7 | ||
1108 | #define MILKYMISTONE_JTAGSERIAL_PID 0x0713 | ||
1109 | |||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 2297fb1bcf65..ef2977d3a613 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -518,7 +518,7 @@ static const struct usb_device_id option_ids[] = { | |||
518 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) }, | 518 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) }, |
519 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, | 519 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, |
520 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, | 520 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, |
521 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) }, | 521 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, |
522 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, | 522 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, |
523 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, | 523 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, |
524 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, | 524 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, |
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index a688b1e686ea..689ee1fb702a 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c | |||
@@ -285,7 +285,7 @@ static int slave_configure(struct scsi_device *sdev) | |||
285 | 285 | ||
286 | /* queue a command */ | 286 | /* queue a command */ |
287 | /* This is always called with scsi_lock(host) held */ | 287 | /* This is always called with scsi_lock(host) held */ |
288 | static int queuecommand(struct scsi_cmnd *srb, | 288 | static int queuecommand_lck(struct scsi_cmnd *srb, |
289 | void (*done)(struct scsi_cmnd *)) | 289 | void (*done)(struct scsi_cmnd *)) |
290 | { | 290 | { |
291 | struct us_data *us = host_to_us(srb->device->host); | 291 | struct us_data *us = host_to_us(srb->device->host); |
@@ -315,6 +315,8 @@ static int queuecommand(struct scsi_cmnd *srb, | |||
315 | return 0; | 315 | return 0; |
316 | } | 316 | } |
317 | 317 | ||
318 | static DEF_SCSI_QCMD(queuecommand) | ||
319 | |||
318 | /*********************************************************************** | 320 | /*********************************************************************** |
319 | * Error handling functions | 321 | * Error handling functions |
320 | ***********************************************************************/ | 322 | ***********************************************************************/ |
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 2054b1e25a65..339fac3949df 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c | |||
@@ -331,10 +331,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp, | |||
331 | 331 | ||
332 | iu->iu_id = IU_ID_COMMAND; | 332 | iu->iu_id = IU_ID_COMMAND; |
333 | iu->tag = cpu_to_be16(stream_id); | 333 | iu->tag = cpu_to_be16(stream_id); |
334 | if (sdev->ordered_tags && (cmnd->request->cmd_flags & REQ_HARDBARRIER)) | 334 | iu->prio_attr = UAS_SIMPLE_TAG; |
335 | iu->prio_attr = UAS_ORDERED_TAG; | ||
336 | else | ||
337 | iu->prio_attr = UAS_SIMPLE_TAG; | ||
338 | iu->len = len; | 335 | iu->len = len; |
339 | int_to_scsilun(sdev->lun, &iu->lun); | 336 | int_to_scsilun(sdev->lun, &iu->lun); |
340 | memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len); | 337 | memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len); |
@@ -433,7 +430,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd, | |||
433 | return 0; | 430 | return 0; |
434 | } | 431 | } |
435 | 432 | ||
436 | static int uas_queuecommand(struct scsi_cmnd *cmnd, | 433 | static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, |
437 | void (*done)(struct scsi_cmnd *)) | 434 | void (*done)(struct scsi_cmnd *)) |
438 | { | 435 | { |
439 | struct scsi_device *sdev = cmnd->device; | 436 | struct scsi_device *sdev = cmnd->device; |
@@ -491,6 +488,8 @@ static int uas_queuecommand(struct scsi_cmnd *cmnd, | |||
491 | return 0; | 488 | return 0; |
492 | } | 489 | } |
493 | 490 | ||
491 | static DEF_SCSI_QCMD(uas_queuecommand) | ||
492 | |||
494 | static int uas_eh_abort_handler(struct scsi_cmnd *cmnd) | 493 | static int uas_eh_abort_handler(struct scsi_cmnd *cmnd) |
495 | { | 494 | { |
496 | struct scsi_device *sdev = cmnd->device; | 495 | struct scsi_device *sdev = cmnd->device; |
diff --git a/drivers/uwb/allocator.c b/drivers/uwb/allocator.c index 436e4f7110cb..e45e673b8770 100644 --- a/drivers/uwb/allocator.c +++ b/drivers/uwb/allocator.c | |||
@@ -326,7 +326,8 @@ int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *availab | |||
326 | int bit_index; | 326 | int bit_index; |
327 | 327 | ||
328 | ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL); | 328 | ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL); |
329 | 329 | if (!ai) | |
330 | return UWB_RSV_ALLOC_NOT_FOUND; | ||
330 | ai->min_mas = rsv->min_mas; | 331 | ai->min_mas = rsv->min_mas; |
331 | ai->max_mas = rsv->max_mas; | 332 | ai->max_mas = rsv->max_mas; |
332 | ai->max_interval = rsv->max_interval; | 333 | ai->max_interval = rsv->max_interval; |
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c index 3ec24609151e..734c650a47c4 100644 --- a/drivers/video/backlight/adp8860_bl.c +++ b/drivers/video/backlight/adp8860_bl.c | |||
@@ -502,8 +502,10 @@ static ssize_t adp8860_bl_l1_daylight_max_store(struct device *dev, | |||
502 | struct device_attribute *attr, const char *buf, size_t count) | 502 | struct device_attribute *attr, const char *buf, size_t count) |
503 | { | 503 | { |
504 | struct adp8860_bl *data = dev_get_drvdata(dev); | 504 | struct adp8860_bl *data = dev_get_drvdata(dev); |
505 | int ret = strict_strtoul(buf, 10, &data->cached_daylight_max); | ||
506 | if (ret) | ||
507 | return ret; | ||
505 | 508 | ||
506 | strict_strtoul(buf, 10, &data->cached_daylight_max); | ||
507 | return adp8860_store(dev, buf, count, ADP8860_BLMX1); | 509 | return adp8860_store(dev, buf, count, ADP8860_BLMX1); |
508 | } | 510 | } |
509 | static DEVICE_ATTR(l1_daylight_max, 0664, adp8860_bl_l1_daylight_max_show, | 511 | static DEVICE_ATTR(l1_daylight_max, 0664, adp8860_bl_l1_daylight_max_show, |
@@ -614,7 +616,7 @@ static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev, | |||
614 | if (val == 0) { | 616 | if (val == 0) { |
615 | /* Enable automatic ambient light sensing */ | 617 | /* Enable automatic ambient light sensing */ |
616 | adp8860_set_bits(data->client, ADP8860_MDCR, CMP_AUTOEN); | 618 | adp8860_set_bits(data->client, ADP8860_MDCR, CMP_AUTOEN); |
617 | } else if ((val > 0) && (val < 6)) { | 619 | } else if ((val > 0) && (val <= 3)) { |
618 | /* Disable automatic ambient light sensing */ | 620 | /* Disable automatic ambient light sensing */ |
619 | adp8860_clr_bits(data->client, ADP8860_MDCR, CMP_AUTOEN); | 621 | adp8860_clr_bits(data->client, ADP8860_MDCR, CMP_AUTOEN); |
620 | 622 | ||
@@ -622,7 +624,7 @@ static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev, | |||
622 | mutex_lock(&data->lock); | 624 | mutex_lock(&data->lock); |
623 | adp8860_read(data->client, ADP8860_CFGR, ®_val); | 625 | adp8860_read(data->client, ADP8860_CFGR, ®_val); |
624 | reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT); | 626 | reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT); |
625 | reg_val |= val << CFGR_BLV_SHIFT; | 627 | reg_val |= (val - 1) << CFGR_BLV_SHIFT; |
626 | adp8860_write(data->client, ADP8860_CFGR, reg_val); | 628 | adp8860_write(data->client, ADP8860_CFGR, reg_val); |
627 | mutex_unlock(&data->lock); | 629 | mutex_unlock(&data->lock); |
628 | } | 630 | } |
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c index 9093ef0fa869..c67801e57aaf 100644 --- a/drivers/video/backlight/l4f00242t03.c +++ b/drivers/video/backlight/l4f00242t03.c | |||
@@ -78,7 +78,7 @@ static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power) | |||
78 | const u16 slpin = 0x10; | 78 | const u16 slpin = 0x10; |
79 | const u16 disoff = 0x28; | 79 | const u16 disoff = 0x28; |
80 | 80 | ||
81 | if (power) { | 81 | if (power <= FB_BLANK_NORMAL) { |
82 | if (priv->lcd_on) | 82 | if (priv->lcd_on) |
83 | return 0; | 83 | return 0; |
84 | 84 | ||
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c index abc43a0eb97d..5d3cf33953ac 100644 --- a/drivers/video/backlight/lms283gf05.c +++ b/drivers/video/backlight/lms283gf05.c | |||
@@ -129,7 +129,7 @@ static int lms283gf05_power_set(struct lcd_device *ld, int power) | |||
129 | struct spi_device *spi = st->spi; | 129 | struct spi_device *spi = st->spi; |
130 | struct lms283gf05_pdata *pdata = spi->dev.platform_data; | 130 | struct lms283gf05_pdata *pdata = spi->dev.platform_data; |
131 | 131 | ||
132 | if (power) { | 132 | if (power <= FB_BLANK_NORMAL) { |
133 | if (pdata) | 133 | if (pdata) |
134 | lms283gf05_reset(pdata->reset_gpio, | 134 | lms283gf05_reset(pdata->reset_gpio, |
135 | pdata->reset_inverted); | 135 | pdata->reset_inverted); |
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c index 9fb533f6373e..1485f7345f49 100644 --- a/drivers/video/backlight/mbp_nvidia_bl.c +++ b/drivers/video/backlight/mbp_nvidia_bl.c | |||
@@ -335,6 +335,24 @@ static const struct dmi_system_id __initdata mbp_device_table[] = { | |||
335 | }, | 335 | }, |
336 | .driver_data = (void *)&nvidia_chipset_data, | 336 | .driver_data = (void *)&nvidia_chipset_data, |
337 | }, | 337 | }, |
338 | { | ||
339 | .callback = mbp_dmi_match, | ||
340 | .ident = "MacBookAir 3,1", | ||
341 | .matches = { | ||
342 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), | ||
343 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,1"), | ||
344 | }, | ||
345 | .driver_data = (void *)&nvidia_chipset_data, | ||
346 | }, | ||
347 | { | ||
348 | .callback = mbp_dmi_match, | ||
349 | .ident = "MacBookAir 3,2", | ||
350 | .matches = { | ||
351 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), | ||
352 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,2"), | ||
353 | }, | ||
354 | .driver_data = (void *)&nvidia_chipset_data, | ||
355 | }, | ||
338 | { } | 356 | { } |
339 | }; | 357 | }; |
340 | 358 | ||
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index 550443518891..21866ec69656 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c | |||
@@ -25,6 +25,7 @@ struct pwm_bl_data { | |||
25 | struct pwm_device *pwm; | 25 | struct pwm_device *pwm; |
26 | struct device *dev; | 26 | struct device *dev; |
27 | unsigned int period; | 27 | unsigned int period; |
28 | unsigned int lth_brightness; | ||
28 | int (*notify)(struct device *, | 29 | int (*notify)(struct device *, |
29 | int brightness); | 30 | int brightness); |
30 | }; | 31 | }; |
@@ -48,7 +49,9 @@ static int pwm_backlight_update_status(struct backlight_device *bl) | |||
48 | pwm_config(pb->pwm, 0, pb->period); | 49 | pwm_config(pb->pwm, 0, pb->period); |
49 | pwm_disable(pb->pwm); | 50 | pwm_disable(pb->pwm); |
50 | } else { | 51 | } else { |
51 | pwm_config(pb->pwm, brightness * pb->period / max, pb->period); | 52 | brightness = pb->lth_brightness + |
53 | (brightness * (pb->period - pb->lth_brightness) / max); | ||
54 | pwm_config(pb->pwm, brightness, pb->period); | ||
52 | pwm_enable(pb->pwm); | 55 | pwm_enable(pb->pwm); |
53 | } | 56 | } |
54 | return 0; | 57 | return 0; |
@@ -92,6 +95,8 @@ static int pwm_backlight_probe(struct platform_device *pdev) | |||
92 | 95 | ||
93 | pb->period = data->pwm_period_ns; | 96 | pb->period = data->pwm_period_ns; |
94 | pb->notify = data->notify; | 97 | pb->notify = data->notify; |
98 | pb->lth_brightness = data->lth_brightness * | ||
99 | (data->pwm_period_ns / data->max_brightness); | ||
95 | pb->dev = &pdev->dev; | 100 | pb->dev = &pdev->dev; |
96 | 101 | ||
97 | pb->pwm = pwm_request(data->pwm_id, "backlight"); | 102 | pb->pwm = pwm_request(data->pwm_id, "backlight"); |
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c index a3128c9cb7ad..5927db0da999 100644 --- a/drivers/video/backlight/s6e63m0.c +++ b/drivers/video/backlight/s6e63m0.c | |||
@@ -729,10 +729,10 @@ static ssize_t s6e63m0_sysfs_show_gamma_table(struct device *dev, | |||
729 | 729 | ||
730 | return strlen(buf); | 730 | return strlen(buf); |
731 | } | 731 | } |
732 | static DEVICE_ATTR(gamma_table, 0644, | 732 | static DEVICE_ATTR(gamma_table, 0444, |
733 | s6e63m0_sysfs_show_gamma_table, NULL); | 733 | s6e63m0_sysfs_show_gamma_table, NULL); |
734 | 734 | ||
735 | static int __init s6e63m0_probe(struct spi_device *spi) | 735 | static int __devinit s6e63m0_probe(struct spi_device *spi) |
736 | { | 736 | { |
737 | int ret = 0; | 737 | int ret = 0; |
738 | struct s6e63m0 *lcd = NULL; | 738 | struct s6e63m0 *lcd = NULL; |
@@ -829,6 +829,9 @@ static int __devexit s6e63m0_remove(struct spi_device *spi) | |||
829 | struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev); | 829 | struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev); |
830 | 830 | ||
831 | s6e63m0_power(lcd, FB_BLANK_POWERDOWN); | 831 | s6e63m0_power(lcd, FB_BLANK_POWERDOWN); |
832 | device_remove_file(&spi->dev, &dev_attr_gamma_table); | ||
833 | device_remove_file(&spi->dev, &dev_attr_gamma_mode); | ||
834 | backlight_device_unregister(lcd->bd); | ||
832 | lcd_device_unregister(lcd->ld); | 835 | lcd_device_unregister(lcd->ld); |
833 | kfree(lcd); | 836 | kfree(lcd); |
834 | 837 | ||
diff --git a/drivers/video/omap2/vram.c b/drivers/video/omap2/vram.c index fed2a72bc6b6..2fd7e5271be9 100644 --- a/drivers/video/omap2/vram.c +++ b/drivers/video/omap2/vram.c | |||
@@ -554,9 +554,15 @@ void __init omap_vram_reserve_sdram_memblock(void) | |||
554 | size = PAGE_ALIGN(size); | 554 | size = PAGE_ALIGN(size); |
555 | 555 | ||
556 | if (paddr) { | 556 | if (paddr) { |
557 | if ((paddr & ~PAGE_MASK) || | 557 | if (paddr & ~PAGE_MASK) { |
558 | !memblock_is_region_memory(paddr, size)) { | 558 | pr_err("VRAM start address 0x%08x not page aligned\n", |
559 | pr_err("Illegal SDRAM region for VRAM\n"); | 559 | paddr); |
560 | return; | ||
561 | } | ||
562 | |||
563 | if (!memblock_is_region_memory(paddr, size)) { | ||
564 | pr_err("Illegal SDRAM region 0x%08x..0x%08x for VRAM\n", | ||
565 | paddr, paddr + size - 1); | ||
560 | return; | 566 | return; |
561 | } | 567 | } |
562 | 568 | ||
@@ -570,9 +576,12 @@ void __init omap_vram_reserve_sdram_memblock(void) | |||
570 | return; | 576 | return; |
571 | } | 577 | } |
572 | } else { | 578 | } else { |
573 | paddr = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_REAL_LIMIT); | 579 | paddr = memblock_alloc(size, PAGE_SIZE); |
574 | } | 580 | } |
575 | 581 | ||
582 | memblock_free(paddr, size); | ||
583 | memblock_remove(paddr, size); | ||
584 | |||
576 | omap_vram_add_region(paddr, size); | 585 | omap_vram_add_region(paddr, size); |
577 | 586 | ||
578 | pr_info("Reserving %u bytes SDRAM for VRAM\n", size); | 587 | pr_info("Reserving %u bytes SDRAM for VRAM\n", size); |
diff --git a/drivers/video/riva/rivafb-i2c.c b/drivers/video/riva/rivafb-i2c.c index a0e22ac483a3..167400e2a182 100644 --- a/drivers/video/riva/rivafb-i2c.c +++ b/drivers/video/riva/rivafb-i2c.c | |||
@@ -94,7 +94,6 @@ static int __devinit riva_setup_i2c_bus(struct riva_i2c_chan *chan, | |||
94 | 94 | ||
95 | strcpy(chan->adapter.name, name); | 95 | strcpy(chan->adapter.name, name); |
96 | chan->adapter.owner = THIS_MODULE; | 96 | chan->adapter.owner = THIS_MODULE; |
97 | chan->adapter.id = I2C_HW_B_RIVA; | ||
98 | chan->adapter.class = i2c_class; | 97 | chan->adapter.class = i2c_class; |
99 | chan->adapter.algo_data = &chan->algo; | 98 | chan->adapter.algo_data = &chan->algo; |
100 | chan->adapter.dev.parent = &chan->par->pdev->dev; | 99 | chan->adapter.dev.parent = &chan->par->pdev->dev; |
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c index 55b3077ff6ff..d7df10315d8d 100644 --- a/drivers/video/sh_mobile_hdmi.c +++ b/drivers/video/sh_mobile_hdmi.c | |||
@@ -1071,6 +1071,10 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work) | |||
1071 | if (!hdmi->info) | 1071 | if (!hdmi->info) |
1072 | goto out; | 1072 | goto out; |
1073 | 1073 | ||
1074 | hdmi->monspec.modedb_len = 0; | ||
1075 | fb_destroy_modedb(hdmi->monspec.modedb); | ||
1076 | hdmi->monspec.modedb = NULL; | ||
1077 | |||
1074 | acquire_console_sem(); | 1078 | acquire_console_sem(); |
1075 | 1079 | ||
1076 | /* HDMI disconnect */ | 1080 | /* HDMI disconnect */ |
@@ -1078,7 +1082,6 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work) | |||
1078 | 1082 | ||
1079 | release_console_sem(); | 1083 | release_console_sem(); |
1080 | pm_runtime_put(hdmi->dev); | 1084 | pm_runtime_put(hdmi->dev); |
1081 | fb_destroy_modedb(hdmi->monspec.modedb); | ||
1082 | } | 1085 | } |
1083 | 1086 | ||
1084 | out: | 1087 | out: |
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 50963739a409..9b1364723c65 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c | |||
@@ -115,15 +115,16 @@ static const struct fb_videomode default_720p = { | |||
115 | .xres = 1280, | 115 | .xres = 1280, |
116 | .yres = 720, | 116 | .yres = 720, |
117 | 117 | ||
118 | .left_margin = 200, | 118 | .left_margin = 220, |
119 | .right_margin = 88, | 119 | .right_margin = 110, |
120 | .hsync_len = 48, | 120 | .hsync_len = 40, |
121 | 121 | ||
122 | .upper_margin = 20, | 122 | .upper_margin = 20, |
123 | .lower_margin = 5, | 123 | .lower_margin = 5, |
124 | .vsync_len = 5, | 124 | .vsync_len = 5, |
125 | 125 | ||
126 | .pixclock = 13468, | 126 | .pixclock = 13468, |
127 | .refresh = 60, | ||
127 | .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT, | 128 | .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT, |
128 | }; | 129 | }; |
129 | 130 | ||
@@ -1197,6 +1198,7 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
1197 | const struct fb_videomode *mode = cfg->lcd_cfg; | 1198 | const struct fb_videomode *mode = cfg->lcd_cfg; |
1198 | unsigned long max_size = 0; | 1199 | unsigned long max_size = 0; |
1199 | int k; | 1200 | int k; |
1201 | int num_cfg; | ||
1200 | 1202 | ||
1201 | ch->info = framebuffer_alloc(0, &pdev->dev); | 1203 | ch->info = framebuffer_alloc(0, &pdev->dev); |
1202 | if (!ch->info) { | 1204 | if (!ch->info) { |
@@ -1232,8 +1234,14 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
1232 | info->fix = sh_mobile_lcdc_fix; | 1234 | info->fix = sh_mobile_lcdc_fix; |
1233 | info->fix.smem_len = max_size * (cfg->bpp / 8) * 2; | 1235 | info->fix.smem_len = max_size * (cfg->bpp / 8) * 2; |
1234 | 1236 | ||
1235 | if (!mode) | 1237 | if (!mode) { |
1236 | mode = &default_720p; | 1238 | mode = &default_720p; |
1239 | num_cfg = 1; | ||
1240 | } else { | ||
1241 | num_cfg = ch->cfg.num_cfg; | ||
1242 | } | ||
1243 | |||
1244 | fb_videomode_to_modelist(mode, num_cfg, &info->modelist); | ||
1237 | 1245 | ||
1238 | fb_videomode_to_var(var, mode); | 1246 | fb_videomode_to_var(var, mode); |
1239 | /* Default Y virtual resolution is 2x panel size */ | 1247 | /* Default Y virtual resolution is 2x panel size */ |
@@ -1281,10 +1289,6 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
1281 | 1289 | ||
1282 | for (i = 0; i < j; i++) { | 1290 | for (i = 0; i < j; i++) { |
1283 | struct sh_mobile_lcdc_chan *ch = priv->ch + i; | 1291 | struct sh_mobile_lcdc_chan *ch = priv->ch + i; |
1284 | const struct fb_videomode *mode = ch->cfg.lcd_cfg; | ||
1285 | |||
1286 | if (!mode) | ||
1287 | mode = &default_720p; | ||
1288 | 1292 | ||
1289 | info = ch->info; | 1293 | info = ch->info; |
1290 | 1294 | ||
@@ -1297,7 +1301,6 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
1297 | } | 1301 | } |
1298 | } | 1302 | } |
1299 | 1303 | ||
1300 | fb_videomode_to_modelist(mode, ch->cfg.num_cfg, &info->modelist); | ||
1301 | error = register_framebuffer(info); | 1304 | error = register_framebuffer(info); |
1302 | if (error < 0) | 1305 | if (error < 0) |
1303 | goto err1; | 1306 | goto err1; |
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c index b52f8e4ef1fd..3dde12b0ab06 100644 --- a/drivers/video/sis/sis_main.c +++ b/drivers/video/sis/sis_main.c | |||
@@ -4181,6 +4181,9 @@ static void __devinit | |||
4181 | sisfb_post_map_vram(struct sis_video_info *ivideo, unsigned int *mapsize, | 4181 | sisfb_post_map_vram(struct sis_video_info *ivideo, unsigned int *mapsize, |
4182 | unsigned int min) | 4182 | unsigned int min) |
4183 | { | 4183 | { |
4184 | if (*mapsize < (min << 20)) | ||
4185 | return; | ||
4186 | |||
4184 | ivideo->video_vbase = ioremap(ivideo->video_base, (*mapsize)); | 4187 | ivideo->video_vbase = ioremap(ivideo->video_base, (*mapsize)); |
4185 | 4188 | ||
4186 | if(!ivideo->video_vbase) { | 4189 | if(!ivideo->video_vbase) { |
@@ -4514,7 +4517,7 @@ sisfb_post_sis300(struct pci_dev *pdev) | |||
4514 | } else { | 4517 | } else { |
4515 | #endif | 4518 | #endif |
4516 | /* Need to map max FB size for finding out about RAM size */ | 4519 | /* Need to map max FB size for finding out about RAM size */ |
4517 | mapsize = 64 << 20; | 4520 | mapsize = ivideo->video_size; |
4518 | sisfb_post_map_vram(ivideo, &mapsize, 4); | 4521 | sisfb_post_map_vram(ivideo, &mapsize, 4); |
4519 | 4522 | ||
4520 | if(ivideo->video_vbase) { | 4523 | if(ivideo->video_vbase) { |
@@ -4680,7 +4683,7 @@ sisfb_post_xgi_ramsize(struct sis_video_info *ivideo) | |||
4680 | orSISIDXREG(SISSR, 0x20, (0x80 | 0x04)); | 4683 | orSISIDXREG(SISSR, 0x20, (0x80 | 0x04)); |
4681 | 4684 | ||
4682 | /* Need to map max FB size for finding out about RAM size */ | 4685 | /* Need to map max FB size for finding out about RAM size */ |
4683 | mapsize = 256 << 20; | 4686 | mapsize = ivideo->video_size; |
4684 | sisfb_post_map_vram(ivideo, &mapsize, 32); | 4687 | sisfb_post_map_vram(ivideo, &mapsize, 32); |
4685 | 4688 | ||
4686 | if(!ivideo->video_vbase) { | 4689 | if(!ivideo->video_vbase) { |
@@ -5936,6 +5939,7 @@ sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
5936 | } | 5939 | } |
5937 | 5940 | ||
5938 | ivideo->video_base = pci_resource_start(pdev, 0); | 5941 | ivideo->video_base = pci_resource_start(pdev, 0); |
5942 | ivideo->video_size = pci_resource_len(pdev, 0); | ||
5939 | ivideo->mmio_base = pci_resource_start(pdev, 1); | 5943 | ivideo->mmio_base = pci_resource_start(pdev, 1); |
5940 | ivideo->mmio_size = pci_resource_len(pdev, 1); | 5944 | ivideo->mmio_size = pci_resource_len(pdev, 1); |
5941 | ivideo->SiS_Pr.RelIO = pci_resource_start(pdev, 2) + 0x30; | 5945 | ivideo->SiS_Pr.RelIO = pci_resource_start(pdev, 2) + 0x30; |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 97612f548a8e..321a0c8346e5 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -1299,9 +1299,6 @@ static void restore_cpu_virqs(unsigned int cpu) | |||
1299 | evtchn_to_irq[evtchn] = irq; | 1299 | evtchn_to_irq[evtchn] = irq; |
1300 | irq_info[irq] = mk_virq_info(evtchn, virq); | 1300 | irq_info[irq] = mk_virq_info(evtchn, virq); |
1301 | bind_evtchn_to_cpu(evtchn, cpu); | 1301 | bind_evtchn_to_cpu(evtchn, cpu); |
1302 | |||
1303 | /* Ready for use. */ | ||
1304 | unmask_evtchn(evtchn); | ||
1305 | } | 1302 | } |
1306 | } | 1303 | } |
1307 | 1304 | ||
@@ -1327,10 +1324,6 @@ static void restore_cpu_ipis(unsigned int cpu) | |||
1327 | evtchn_to_irq[evtchn] = irq; | 1324 | evtchn_to_irq[evtchn] = irq; |
1328 | irq_info[irq] = mk_ipi_info(evtchn, ipi); | 1325 | irq_info[irq] = mk_ipi_info(evtchn, ipi); |
1329 | bind_evtchn_to_cpu(evtchn, cpu); | 1326 | bind_evtchn_to_cpu(evtchn, cpu); |
1330 | |||
1331 | /* Ready for use. */ | ||
1332 | unmask_evtchn(evtchn); | ||
1333 | |||
1334 | } | 1327 | } |
1335 | } | 1328 | } |
1336 | 1329 | ||
@@ -1390,6 +1383,7 @@ void xen_poll_irq(int irq) | |||
1390 | void xen_irq_resume(void) | 1383 | void xen_irq_resume(void) |
1391 | { | 1384 | { |
1392 | unsigned int cpu, irq, evtchn; | 1385 | unsigned int cpu, irq, evtchn; |
1386 | struct irq_desc *desc; | ||
1393 | 1387 | ||
1394 | init_evtchn_cpu_bindings(); | 1388 | init_evtchn_cpu_bindings(); |
1395 | 1389 | ||
@@ -1408,6 +1402,23 @@ void xen_irq_resume(void) | |||
1408 | restore_cpu_virqs(cpu); | 1402 | restore_cpu_virqs(cpu); |
1409 | restore_cpu_ipis(cpu); | 1403 | restore_cpu_ipis(cpu); |
1410 | } | 1404 | } |
1405 | |||
1406 | /* | ||
1407 | * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These | ||
1408 | * are not handled by the IRQ core. | ||
1409 | */ | ||
1410 | for_each_irq_desc(irq, desc) { | ||
1411 | if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND)) | ||
1412 | continue; | ||
1413 | if (desc->status & IRQ_DISABLED) | ||
1414 | continue; | ||
1415 | |||
1416 | evtchn = evtchn_from_irq(irq); | ||
1417 | if (evtchn == -1) | ||
1418 | continue; | ||
1419 | |||
1420 | unmask_evtchn(evtchn); | ||
1421 | } | ||
1411 | } | 1422 | } |
1412 | 1423 | ||
1413 | static struct irq_chip xen_dynamic_chip __read_mostly = { | 1424 | static struct irq_chip xen_dynamic_chip __read_mostly = { |
@@ -370,6 +370,9 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs) | |||
370 | { | 370 | { |
371 | struct bio *bio; | 371 | struct bio *bio; |
372 | 372 | ||
373 | if (nr_iovecs > UIO_MAXIOV) | ||
374 | return NULL; | ||
375 | |||
373 | bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec), | 376 | bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec), |
374 | gfp_mask); | 377 | gfp_mask); |
375 | if (unlikely(!bio)) | 378 | if (unlikely(!bio)) |
@@ -697,8 +700,12 @@ static void bio_free_map_data(struct bio_map_data *bmd) | |||
697 | static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count, | 700 | static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count, |
698 | gfp_t gfp_mask) | 701 | gfp_t gfp_mask) |
699 | { | 702 | { |
700 | struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask); | 703 | struct bio_map_data *bmd; |
701 | 704 | ||
705 | if (iov_count > UIO_MAXIOV) | ||
706 | return NULL; | ||
707 | |||
708 | bmd = kmalloc(sizeof(*bmd), gfp_mask); | ||
702 | if (!bmd) | 709 | if (!bmd) |
703 | return NULL; | 710 | return NULL; |
704 | 711 | ||
@@ -827,6 +834,12 @@ struct bio *bio_copy_user_iov(struct request_queue *q, | |||
827 | end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 834 | end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
828 | start = uaddr >> PAGE_SHIFT; | 835 | start = uaddr >> PAGE_SHIFT; |
829 | 836 | ||
837 | /* | ||
838 | * Overflow, abort | ||
839 | */ | ||
840 | if (end < start) | ||
841 | return ERR_PTR(-EINVAL); | ||
842 | |||
830 | nr_pages += end - start; | 843 | nr_pages += end - start; |
831 | len += iov[i].iov_len; | 844 | len += iov[i].iov_len; |
832 | } | 845 | } |
@@ -955,6 +968,12 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, | |||
955 | unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 968 | unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
956 | unsigned long start = uaddr >> PAGE_SHIFT; | 969 | unsigned long start = uaddr >> PAGE_SHIFT; |
957 | 970 | ||
971 | /* | ||
972 | * Overflow, abort | ||
973 | */ | ||
974 | if (end < start) | ||
975 | return ERR_PTR(-EINVAL); | ||
976 | |||
958 | nr_pages += end - start; | 977 | nr_pages += end - start; |
959 | /* | 978 | /* |
960 | * buffer must be aligned to at least hardsector size for now | 979 | * buffer must be aligned to at least hardsector size for now |
@@ -982,7 +1001,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, | |||
982 | unsigned long start = uaddr >> PAGE_SHIFT; | 1001 | unsigned long start = uaddr >> PAGE_SHIFT; |
983 | const int local_nr_pages = end - start; | 1002 | const int local_nr_pages = end - start; |
984 | const int page_limit = cur_page + local_nr_pages; | 1003 | const int page_limit = cur_page + local_nr_pages; |
985 | 1004 | ||
986 | ret = get_user_pages_fast(uaddr, local_nr_pages, | 1005 | ret = get_user_pages_fast(uaddr, local_nr_pages, |
987 | write_to_vm, &pages[cur_page]); | 1006 | write_to_vm, &pages[cur_page]); |
988 | if (ret < local_nr_pages) { | 1007 | if (ret < local_nr_pages) { |
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c index 06d582732d34..5ab3839dfcb9 100644 --- a/fs/gfs2/export.c +++ b/fs/gfs2/export.c | |||
@@ -138,10 +138,8 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, | |||
138 | struct gfs2_inum_host *inum) | 138 | struct gfs2_inum_host *inum) |
139 | { | 139 | { |
140 | struct gfs2_sbd *sdp = sb->s_fs_info; | 140 | struct gfs2_sbd *sdp = sb->s_fs_info; |
141 | struct gfs2_holder i_gh; | ||
142 | struct inode *inode; | 141 | struct inode *inode; |
143 | struct dentry *dentry; | 142 | struct dentry *dentry; |
144 | int error; | ||
145 | 143 | ||
146 | inode = gfs2_ilookup(sb, inum->no_addr); | 144 | inode = gfs2_ilookup(sb, inum->no_addr); |
147 | if (inode) { | 145 | if (inode) { |
@@ -152,52 +150,16 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, | |||
152 | goto out_inode; | 150 | goto out_inode; |
153 | } | 151 | } |
154 | 152 | ||
155 | error = gfs2_glock_nq_num(sdp, inum->no_addr, &gfs2_inode_glops, | 153 | inode = gfs2_lookup_by_inum(sdp, inum->no_addr, &inum->no_formal_ino, |
156 | LM_ST_SHARED, LM_FLAG_ANY, &i_gh); | 154 | GFS2_BLKST_DINODE); |
157 | if (error) | 155 | if (IS_ERR(inode)) |
158 | return ERR_PTR(error); | 156 | return ERR_CAST(inode); |
159 | |||
160 | error = gfs2_check_blk_type(sdp, inum->no_addr, GFS2_BLKST_DINODE); | ||
161 | if (error) | ||
162 | goto fail; | ||
163 | |||
164 | inode = gfs2_inode_lookup(sb, DT_UNKNOWN, inum->no_addr, 0); | ||
165 | if (IS_ERR(inode)) { | ||
166 | error = PTR_ERR(inode); | ||
167 | goto fail; | ||
168 | } | ||
169 | |||
170 | error = gfs2_inode_refresh(GFS2_I(inode)); | ||
171 | if (error) { | ||
172 | iput(inode); | ||
173 | goto fail; | ||
174 | } | ||
175 | |||
176 | /* Pick up the works we bypass in gfs2_inode_lookup */ | ||
177 | if (inode->i_state & I_NEW) | ||
178 | gfs2_set_iop(inode); | ||
179 | |||
180 | if (GFS2_I(inode)->i_no_formal_ino != inum->no_formal_ino) { | ||
181 | iput(inode); | ||
182 | goto fail; | ||
183 | } | ||
184 | |||
185 | error = -EIO; | ||
186 | if (GFS2_I(inode)->i_diskflags & GFS2_DIF_SYSTEM) { | ||
187 | iput(inode); | ||
188 | goto fail; | ||
189 | } | ||
190 | |||
191 | gfs2_glock_dq_uninit(&i_gh); | ||
192 | 157 | ||
193 | out_inode: | 158 | out_inode: |
194 | dentry = d_obtain_alias(inode); | 159 | dentry = d_obtain_alias(inode); |
195 | if (!IS_ERR(dentry)) | 160 | if (!IS_ERR(dentry)) |
196 | dentry->d_op = &gfs2_dops; | 161 | dentry->d_op = &gfs2_dops; |
197 | return dentry; | 162 | return dentry; |
198 | fail: | ||
199 | gfs2_glock_dq_uninit(&i_gh); | ||
200 | return ERR_PTR(error); | ||
201 | } | 163 | } |
202 | 164 | ||
203 | static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid, | 165 | static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid, |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 87778857f099..f92c17704169 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -686,21 +686,20 @@ static void delete_work_func(struct work_struct *work) | |||
686 | { | 686 | { |
687 | struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); | 687 | struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); |
688 | struct gfs2_sbd *sdp = gl->gl_sbd; | 688 | struct gfs2_sbd *sdp = gl->gl_sbd; |
689 | struct gfs2_inode *ip = NULL; | 689 | struct gfs2_inode *ip; |
690 | struct inode *inode; | 690 | struct inode *inode; |
691 | u64 no_addr = 0; | 691 | u64 no_addr = gl->gl_name.ln_number; |
692 | |||
693 | ip = gl->gl_object; | ||
694 | /* Note: Unsafe to dereference ip as we don't hold right refs/locks */ | ||
692 | 695 | ||
693 | spin_lock(&gl->gl_spin); | ||
694 | ip = (struct gfs2_inode *)gl->gl_object; | ||
695 | if (ip) | 696 | if (ip) |
696 | no_addr = ip->i_no_addr; | ||
697 | spin_unlock(&gl->gl_spin); | ||
698 | if (ip) { | ||
699 | inode = gfs2_ilookup(sdp->sd_vfs, no_addr); | 697 | inode = gfs2_ilookup(sdp->sd_vfs, no_addr); |
700 | if (inode) { | 698 | else |
701 | d_prune_aliases(inode); | 699 | inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED); |
702 | iput(inode); | 700 | if (inode && !IS_ERR(inode)) { |
703 | } | 701 | d_prune_aliases(inode); |
702 | iput(inode); | ||
704 | } | 703 | } |
705 | gfs2_glock_put(gl); | 704 | gfs2_glock_put(gl); |
706 | } | 705 | } |
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 06370f8bd8cf..e1213f7f9217 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c | |||
@@ -73,49 +73,6 @@ static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr) | |||
73 | return iget5_locked(sb, hash, iget_test, iget_set, &no_addr); | 73 | return iget5_locked(sb, hash, iget_test, iget_set, &no_addr); |
74 | } | 74 | } |
75 | 75 | ||
76 | struct gfs2_skip_data { | ||
77 | u64 no_addr; | ||
78 | int skipped; | ||
79 | }; | ||
80 | |||
81 | static int iget_skip_test(struct inode *inode, void *opaque) | ||
82 | { | ||
83 | struct gfs2_inode *ip = GFS2_I(inode); | ||
84 | struct gfs2_skip_data *data = opaque; | ||
85 | |||
86 | if (ip->i_no_addr == data->no_addr) { | ||
87 | if (inode->i_state & (I_FREEING|I_WILL_FREE)){ | ||
88 | data->skipped = 1; | ||
89 | return 0; | ||
90 | } | ||
91 | return 1; | ||
92 | } | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | static int iget_skip_set(struct inode *inode, void *opaque) | ||
97 | { | ||
98 | struct gfs2_inode *ip = GFS2_I(inode); | ||
99 | struct gfs2_skip_data *data = opaque; | ||
100 | |||
101 | if (data->skipped) | ||
102 | return 1; | ||
103 | inode->i_ino = (unsigned long)(data->no_addr); | ||
104 | ip->i_no_addr = data->no_addr; | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | static struct inode *gfs2_iget_skip(struct super_block *sb, | ||
109 | u64 no_addr) | ||
110 | { | ||
111 | struct gfs2_skip_data data; | ||
112 | unsigned long hash = (unsigned long)no_addr; | ||
113 | |||
114 | data.no_addr = no_addr; | ||
115 | data.skipped = 0; | ||
116 | return iget5_locked(sb, hash, iget_skip_test, iget_skip_set, &data); | ||
117 | } | ||
118 | |||
119 | /** | 76 | /** |
120 | * GFS2 lookup code fills in vfs inode contents based on info obtained | 77 | * GFS2 lookup code fills in vfs inode contents based on info obtained |
121 | * from directory entry inside gfs2_inode_lookup(). This has caused issues | 78 | * from directory entry inside gfs2_inode_lookup(). This has caused issues |
@@ -243,93 +200,54 @@ fail: | |||
243 | return ERR_PTR(error); | 200 | return ERR_PTR(error); |
244 | } | 201 | } |
245 | 202 | ||
246 | /** | 203 | struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr, |
247 | * gfs2_process_unlinked_inode - Lookup an unlinked inode for reclamation | 204 | u64 *no_formal_ino, unsigned int blktype) |
248 | * and try to reclaim it by doing iput. | ||
249 | * | ||
250 | * This function assumes no rgrp locks are currently held. | ||
251 | * | ||
252 | * @sb: The super block | ||
253 | * no_addr: The inode number | ||
254 | * | ||
255 | */ | ||
256 | |||
257 | void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr) | ||
258 | { | 205 | { |
259 | struct gfs2_sbd *sdp; | 206 | struct super_block *sb = sdp->sd_vfs; |
260 | struct gfs2_inode *ip; | 207 | struct gfs2_holder i_gh; |
261 | struct gfs2_glock *io_gl = NULL; | ||
262 | int error; | ||
263 | struct gfs2_holder gh; | ||
264 | struct inode *inode; | 208 | struct inode *inode; |
209 | int error; | ||
265 | 210 | ||
266 | inode = gfs2_iget_skip(sb, no_addr); | 211 | error = gfs2_glock_nq_num(sdp, no_addr, &gfs2_inode_glops, |
267 | 212 | LM_ST_SHARED, LM_FLAG_ANY, &i_gh); | |
268 | if (!inode) | 213 | if (error) |
269 | return; | 214 | return ERR_PTR(error); |
270 | |||
271 | /* If it's not a new inode, someone's using it, so leave it alone. */ | ||
272 | if (!(inode->i_state & I_NEW)) { | ||
273 | iput(inode); | ||
274 | return; | ||
275 | } | ||
276 | |||
277 | ip = GFS2_I(inode); | ||
278 | sdp = GFS2_SB(inode); | ||
279 | ip->i_no_formal_ino = -1; | ||
280 | 215 | ||
281 | error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); | 216 | error = gfs2_check_blk_type(sdp, no_addr, blktype); |
282 | if (unlikely(error)) | 217 | if (error) |
283 | goto fail; | 218 | goto fail; |
284 | ip->i_gl->gl_object = ip; | ||
285 | 219 | ||
286 | error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl); | 220 | inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0); |
287 | if (unlikely(error)) | 221 | if (IS_ERR(inode)) |
288 | goto fail_put; | 222 | goto fail; |
289 | |||
290 | set_bit(GIF_INVALID, &ip->i_flags); | ||
291 | error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, LM_FLAG_TRY | GL_EXACT, | ||
292 | &ip->i_iopen_gh); | ||
293 | if (unlikely(error)) | ||
294 | goto fail_iopen; | ||
295 | 223 | ||
296 | ip->i_iopen_gh.gh_gl->gl_object = ip; | 224 | error = gfs2_inode_refresh(GFS2_I(inode)); |
297 | gfs2_glock_put(io_gl); | 225 | if (error) |
298 | io_gl = NULL; | 226 | goto fail_iput; |
299 | 227 | ||
300 | inode->i_mode = DT2IF(DT_UNKNOWN); | 228 | /* Pick up the works we bypass in gfs2_inode_lookup */ |
229 | if (inode->i_state & I_NEW) | ||
230 | gfs2_set_iop(inode); | ||
301 | 231 | ||
302 | /* | 232 | /* Two extra checks for NFS only */ |
303 | * We must read the inode in order to work out its type in | 233 | if (no_formal_ino) { |
304 | * this case. Note that this doesn't happen often as we normally | 234 | error = -ESTALE; |
305 | * know the type beforehand. This code path only occurs during | 235 | if (GFS2_I(inode)->i_no_formal_ino != *no_formal_ino) |
306 | * unlinked inode recovery (where it is safe to do this glock, | 236 | goto fail_iput; |
307 | * which is not true in the general case). | ||
308 | */ | ||
309 | error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY, | ||
310 | &gh); | ||
311 | if (unlikely(error)) | ||
312 | goto fail_glock; | ||
313 | 237 | ||
314 | /* Inode is now uptodate */ | 238 | error = -EIO; |
315 | gfs2_glock_dq_uninit(&gh); | 239 | if (GFS2_I(inode)->i_diskflags & GFS2_DIF_SYSTEM) |
316 | gfs2_set_iop(inode); | 240 | goto fail_iput; |
317 | 241 | ||
318 | /* The iput will cause it to be deleted. */ | 242 | error = 0; |
319 | iput(inode); | 243 | } |
320 | return; | ||
321 | 244 | ||
322 | fail_glock: | ||
323 | gfs2_glock_dq(&ip->i_iopen_gh); | ||
324 | fail_iopen: | ||
325 | if (io_gl) | ||
326 | gfs2_glock_put(io_gl); | ||
327 | fail_put: | ||
328 | ip->i_gl->gl_object = NULL; | ||
329 | gfs2_glock_put(ip->i_gl); | ||
330 | fail: | 245 | fail: |
331 | iget_failed(inode); | 246 | gfs2_glock_dq_uninit(&i_gh); |
332 | return; | 247 | return error ? ERR_PTR(error) : inode; |
248 | fail_iput: | ||
249 | iput(inode); | ||
250 | goto fail; | ||
333 | } | 251 | } |
334 | 252 | ||
335 | static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) | 253 | static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) |
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h index 6720d7d5fbc6..d8499fadcc53 100644 --- a/fs/gfs2/inode.h +++ b/fs/gfs2/inode.h | |||
@@ -99,7 +99,9 @@ err: | |||
99 | extern void gfs2_set_iop(struct inode *inode); | 99 | extern void gfs2_set_iop(struct inode *inode); |
100 | extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, | 100 | extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, |
101 | u64 no_addr, u64 no_formal_ino); | 101 | u64 no_addr, u64 no_formal_ino); |
102 | extern void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr); | 102 | extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr, |
103 | u64 *no_formal_ino, | ||
104 | unsigned int blktype); | ||
103 | extern struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr); | 105 | extern struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr); |
104 | 106 | ||
105 | extern int gfs2_inode_refresh(struct gfs2_inode *ip); | 107 | extern int gfs2_inode_refresh(struct gfs2_inode *ip); |
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index bef3ab6cf5c1..33c8407b876f 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c | |||
@@ -963,17 +963,18 @@ static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al) | |||
963 | * The inode, if one has been found, in inode. | 963 | * The inode, if one has been found, in inode. |
964 | */ | 964 | */ |
965 | 965 | ||
966 | static u64 try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, | 966 | static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip) |
967 | u64 skip) | ||
968 | { | 967 | { |
969 | u32 goal = 0, block; | 968 | u32 goal = 0, block; |
970 | u64 no_addr; | 969 | u64 no_addr; |
971 | struct gfs2_sbd *sdp = rgd->rd_sbd; | 970 | struct gfs2_sbd *sdp = rgd->rd_sbd; |
972 | unsigned int n; | 971 | unsigned int n; |
972 | struct gfs2_glock *gl; | ||
973 | struct gfs2_inode *ip; | ||
974 | int error; | ||
975 | int found = 0; | ||
973 | 976 | ||
974 | for(;;) { | 977 | while (goal < rgd->rd_data) { |
975 | if (goal >= rgd->rd_data) | ||
976 | break; | ||
977 | down_write(&sdp->sd_log_flush_lock); | 978 | down_write(&sdp->sd_log_flush_lock); |
978 | n = 1; | 979 | n = 1; |
979 | block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED, | 980 | block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED, |
@@ -990,11 +991,32 @@ static u64 try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, | |||
990 | if (no_addr == skip) | 991 | if (no_addr == skip) |
991 | continue; | 992 | continue; |
992 | *last_unlinked = no_addr; | 993 | *last_unlinked = no_addr; |
993 | return no_addr; | 994 | |
995 | error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &gl); | ||
996 | if (error) | ||
997 | continue; | ||
998 | |||
999 | /* If the inode is already in cache, we can ignore it here | ||
1000 | * because the existing inode disposal code will deal with | ||
1001 | * it when all refs have gone away. Accessing gl_object like | ||
1002 | * this is not safe in general. Here it is ok because we do | ||
1003 | * not dereference the pointer, and we only need an approx | ||
1004 | * answer to whether it is NULL or not. | ||
1005 | */ | ||
1006 | ip = gl->gl_object; | ||
1007 | |||
1008 | if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) | ||
1009 | gfs2_glock_put(gl); | ||
1010 | else | ||
1011 | found++; | ||
1012 | |||
1013 | /* Limit reclaim to sensible number of tasks */ | ||
1014 | if (found > 2*NR_CPUS) | ||
1015 | return; | ||
994 | } | 1016 | } |
995 | 1017 | ||
996 | rgd->rd_flags &= ~GFS2_RDF_CHECK; | 1018 | rgd->rd_flags &= ~GFS2_RDF_CHECK; |
997 | return 0; | 1019 | return; |
998 | } | 1020 | } |
999 | 1021 | ||
1000 | /** | 1022 | /** |
@@ -1075,11 +1097,9 @@ static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd) | |||
1075 | * Try to acquire rgrp in way which avoids contending with others. | 1097 | * Try to acquire rgrp in way which avoids contending with others. |
1076 | * | 1098 | * |
1077 | * Returns: errno | 1099 | * Returns: errno |
1078 | * unlinked: the block address of an unlinked block to be reclaimed | ||
1079 | */ | 1100 | */ |
1080 | 1101 | ||
1081 | static int get_local_rgrp(struct gfs2_inode *ip, u64 *unlinked, | 1102 | static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked) |
1082 | u64 *last_unlinked) | ||
1083 | { | 1103 | { |
1084 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1104 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
1085 | struct gfs2_rgrpd *rgd, *begin = NULL; | 1105 | struct gfs2_rgrpd *rgd, *begin = NULL; |
@@ -1089,7 +1109,6 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *unlinked, | |||
1089 | int loops = 0; | 1109 | int loops = 0; |
1090 | int error, rg_locked; | 1110 | int error, rg_locked; |
1091 | 1111 | ||
1092 | *unlinked = 0; | ||
1093 | rgd = gfs2_blk2rgrpd(sdp, ip->i_goal); | 1112 | rgd = gfs2_blk2rgrpd(sdp, ip->i_goal); |
1094 | 1113 | ||
1095 | while (rgd) { | 1114 | while (rgd) { |
@@ -1106,17 +1125,10 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *unlinked, | |||
1106 | case 0: | 1125 | case 0: |
1107 | if (try_rgrp_fit(rgd, al)) | 1126 | if (try_rgrp_fit(rgd, al)) |
1108 | goto out; | 1127 | goto out; |
1109 | /* If the rg came in already locked, there's no | 1128 | if (rgd->rd_flags & GFS2_RDF_CHECK) |
1110 | way we can recover from a failed try_rgrp_unlink | 1129 | try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr); |
1111 | because that would require an iput which can only | ||
1112 | happen after the rgrp is unlocked. */ | ||
1113 | if (!rg_locked && rgd->rd_flags & GFS2_RDF_CHECK) | ||
1114 | *unlinked = try_rgrp_unlink(rgd, last_unlinked, | ||
1115 | ip->i_no_addr); | ||
1116 | if (!rg_locked) | 1130 | if (!rg_locked) |
1117 | gfs2_glock_dq_uninit(&al->al_rgd_gh); | 1131 | gfs2_glock_dq_uninit(&al->al_rgd_gh); |
1118 | if (*unlinked) | ||
1119 | return -EAGAIN; | ||
1120 | /* fall through */ | 1132 | /* fall through */ |
1121 | case GLR_TRYFAILED: | 1133 | case GLR_TRYFAILED: |
1122 | rgd = recent_rgrp_next(rgd); | 1134 | rgd = recent_rgrp_next(rgd); |
@@ -1145,13 +1157,10 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *unlinked, | |||
1145 | case 0: | 1157 | case 0: |
1146 | if (try_rgrp_fit(rgd, al)) | 1158 | if (try_rgrp_fit(rgd, al)) |
1147 | goto out; | 1159 | goto out; |
1148 | if (!rg_locked && rgd->rd_flags & GFS2_RDF_CHECK) | 1160 | if (rgd->rd_flags & GFS2_RDF_CHECK) |
1149 | *unlinked = try_rgrp_unlink(rgd, last_unlinked, | 1161 | try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr); |
1150 | ip->i_no_addr); | ||
1151 | if (!rg_locked) | 1162 | if (!rg_locked) |
1152 | gfs2_glock_dq_uninit(&al->al_rgd_gh); | 1163 | gfs2_glock_dq_uninit(&al->al_rgd_gh); |
1153 | if (*unlinked) | ||
1154 | return -EAGAIN; | ||
1155 | break; | 1164 | break; |
1156 | 1165 | ||
1157 | case GLR_TRYFAILED: | 1166 | case GLR_TRYFAILED: |
@@ -1204,12 +1213,12 @@ int gfs2_inplace_reserve_i(struct gfs2_inode *ip, int hold_rindex, | |||
1204 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1213 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
1205 | struct gfs2_alloc *al = ip->i_alloc; | 1214 | struct gfs2_alloc *al = ip->i_alloc; |
1206 | int error = 0; | 1215 | int error = 0; |
1207 | u64 last_unlinked = NO_BLOCK, unlinked; | 1216 | u64 last_unlinked = NO_BLOCK; |
1217 | int tries = 0; | ||
1208 | 1218 | ||
1209 | if (gfs2_assert_warn(sdp, al->al_requested)) | 1219 | if (gfs2_assert_warn(sdp, al->al_requested)) |
1210 | return -EINVAL; | 1220 | return -EINVAL; |
1211 | 1221 | ||
1212 | try_again: | ||
1213 | if (hold_rindex) { | 1222 | if (hold_rindex) { |
1214 | /* We need to hold the rindex unless the inode we're using is | 1223 | /* We need to hold the rindex unless the inode we're using is |
1215 | the rindex itself, in which case it's already held. */ | 1224 | the rindex itself, in which case it's already held. */ |
@@ -1218,31 +1227,23 @@ try_again: | |||
1218 | else if (!sdp->sd_rgrps) /* We may not have the rindex read | 1227 | else if (!sdp->sd_rgrps) /* We may not have the rindex read |
1219 | in, so: */ | 1228 | in, so: */ |
1220 | error = gfs2_ri_update_special(ip); | 1229 | error = gfs2_ri_update_special(ip); |
1230 | if (error) | ||
1231 | return error; | ||
1221 | } | 1232 | } |
1222 | 1233 | ||
1223 | if (error) | 1234 | do { |
1224 | return error; | 1235 | error = get_local_rgrp(ip, &last_unlinked); |
1236 | /* If there is no space, flushing the log may release some */ | ||
1237 | if (error) | ||
1238 | gfs2_log_flush(sdp, NULL); | ||
1239 | } while (error && tries++ < 3); | ||
1225 | 1240 | ||
1226 | /* Find an rgrp suitable for allocation. If it encounters any unlinked | ||
1227 | dinodes along the way, error will equal -EAGAIN and unlinked will | ||
1228 | contains it block address. We then need to look up that inode and | ||
1229 | try to free it, and try the allocation again. */ | ||
1230 | error = get_local_rgrp(ip, &unlinked, &last_unlinked); | ||
1231 | if (error) { | 1241 | if (error) { |
1232 | if (hold_rindex && ip != GFS2_I(sdp->sd_rindex)) | 1242 | if (hold_rindex && ip != GFS2_I(sdp->sd_rindex)) |
1233 | gfs2_glock_dq_uninit(&al->al_ri_gh); | 1243 | gfs2_glock_dq_uninit(&al->al_ri_gh); |
1234 | if (error != -EAGAIN) | 1244 | return error; |
1235 | return error; | ||
1236 | |||
1237 | gfs2_process_unlinked_inode(ip->i_inode.i_sb, unlinked); | ||
1238 | /* regardless of whether or not gfs2_process_unlinked_inode | ||
1239 | was successful, we don't want to repeat it again. */ | ||
1240 | last_unlinked = unlinked; | ||
1241 | gfs2_log_flush(sdp, NULL); | ||
1242 | error = 0; | ||
1243 | |||
1244 | goto try_again; | ||
1245 | } | 1245 | } |
1246 | |||
1246 | /* no error, so we have the rgrp set in the inode's allocation. */ | 1247 | /* no error, so we have the rgrp set in the inode's allocation. */ |
1247 | al->al_file = file; | 1248 | al->al_file = file; |
1248 | al->al_line = line; | 1249 | al->al_line = line; |
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index d6cfac1f0a40..a5fe68189eed 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -932,8 +932,7 @@ struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag, | |||
932 | if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { | 932 | if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { |
933 | *user = current_user(); | 933 | *user = current_user(); |
934 | if (user_shm_lock(size, *user)) { | 934 | if (user_shm_lock(size, *user)) { |
935 | WARN_ONCE(1, | 935 | printk_once(KERN_WARNING "Using mlock ulimits for SHM_HUGETLB is deprecated\n"); |
936 | "Using mlock ulimits for SHM_HUGETLB deprecated\n"); | ||
937 | } else { | 936 | } else { |
938 | *user = NULL; | 937 | *user = NULL; |
939 | return ERR_PTR(-EPERM); | 938 | return ERR_PTR(-EPERM); |
diff --git a/fs/ioprio.c b/fs/ioprio.c index 748cfb92dcc6..2f7d05c89922 100644 --- a/fs/ioprio.c +++ b/fs/ioprio.c | |||
@@ -111,12 +111,14 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) | |||
111 | read_lock(&tasklist_lock); | 111 | read_lock(&tasklist_lock); |
112 | switch (which) { | 112 | switch (which) { |
113 | case IOPRIO_WHO_PROCESS: | 113 | case IOPRIO_WHO_PROCESS: |
114 | rcu_read_lock(); | ||
114 | if (!who) | 115 | if (!who) |
115 | p = current; | 116 | p = current; |
116 | else | 117 | else |
117 | p = find_task_by_vpid(who); | 118 | p = find_task_by_vpid(who); |
118 | if (p) | 119 | if (p) |
119 | ret = set_task_ioprio(p, ioprio); | 120 | ret = set_task_ioprio(p, ioprio); |
121 | rcu_read_unlock(); | ||
120 | break; | 122 | break; |
121 | case IOPRIO_WHO_PGRP: | 123 | case IOPRIO_WHO_PGRP: |
122 | if (!who) | 124 | if (!who) |
@@ -139,7 +141,12 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) | |||
139 | break; | 141 | break; |
140 | 142 | ||
141 | do_each_thread(g, p) { | 143 | do_each_thread(g, p) { |
142 | if (__task_cred(p)->uid != who) | 144 | int match; |
145 | |||
146 | rcu_read_lock(); | ||
147 | match = __task_cred(p)->uid == who; | ||
148 | rcu_read_unlock(); | ||
149 | if (!match) | ||
143 | continue; | 150 | continue; |
144 | ret = set_task_ioprio(p, ioprio); | 151 | ret = set_task_ioprio(p, ioprio); |
145 | if (ret) | 152 | if (ret) |
@@ -200,12 +207,14 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) | |||
200 | read_lock(&tasklist_lock); | 207 | read_lock(&tasklist_lock); |
201 | switch (which) { | 208 | switch (which) { |
202 | case IOPRIO_WHO_PROCESS: | 209 | case IOPRIO_WHO_PROCESS: |
210 | rcu_read_lock(); | ||
203 | if (!who) | 211 | if (!who) |
204 | p = current; | 212 | p = current; |
205 | else | 213 | else |
206 | p = find_task_by_vpid(who); | 214 | p = find_task_by_vpid(who); |
207 | if (p) | 215 | if (p) |
208 | ret = get_task_ioprio(p); | 216 | ret = get_task_ioprio(p); |
217 | rcu_read_unlock(); | ||
209 | break; | 218 | break; |
210 | case IOPRIO_WHO_PGRP: | 219 | case IOPRIO_WHO_PGRP: |
211 | if (!who) | 220 | if (!who) |
@@ -232,7 +241,12 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) | |||
232 | break; | 241 | break; |
233 | 242 | ||
234 | do_each_thread(g, p) { | 243 | do_each_thread(g, p) { |
235 | if (__task_cred(p)->uid != user->uid) | 244 | int match; |
245 | |||
246 | rcu_read_lock(); | ||
247 | match = __task_cred(p)->uid == user->uid; | ||
248 | rcu_read_unlock(); | ||
249 | if (!match) | ||
236 | continue; | 250 | continue; |
237 | tmpio = get_task_ioprio(p); | 251 | tmpio = get_task_ioprio(p); |
238 | if (tmpio < 0) | 252 | if (tmpio < 0) |
diff --git a/fs/locks.c b/fs/locks.c index 65765cb6afed..0e62dd35d088 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
@@ -1504,9 +1504,8 @@ static int do_fcntl_delete_lease(struct file *filp) | |||
1504 | 1504 | ||
1505 | static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) | 1505 | static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) |
1506 | { | 1506 | { |
1507 | struct file_lock *fl; | 1507 | struct file_lock *fl, *ret; |
1508 | struct fasync_struct *new; | 1508 | struct fasync_struct *new; |
1509 | struct inode *inode = filp->f_path.dentry->d_inode; | ||
1510 | int error; | 1509 | int error; |
1511 | 1510 | ||
1512 | fl = lease_alloc(filp, arg); | 1511 | fl = lease_alloc(filp, arg); |
@@ -1518,13 +1517,16 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) | |||
1518 | locks_free_lock(fl); | 1517 | locks_free_lock(fl); |
1519 | return -ENOMEM; | 1518 | return -ENOMEM; |
1520 | } | 1519 | } |
1520 | ret = fl; | ||
1521 | lock_flocks(); | 1521 | lock_flocks(); |
1522 | error = __vfs_setlease(filp, arg, &fl); | 1522 | error = __vfs_setlease(filp, arg, &ret); |
1523 | if (error) { | 1523 | if (error) { |
1524 | unlock_flocks(); | 1524 | unlock_flocks(); |
1525 | locks_free_lock(fl); | 1525 | locks_free_lock(fl); |
1526 | goto out_free_fasync; | 1526 | goto out_free_fasync; |
1527 | } | 1527 | } |
1528 | if (ret != fl) | ||
1529 | locks_free_lock(fl); | ||
1528 | 1530 | ||
1529 | /* | 1531 | /* |
1530 | * fasync_insert_entry() returns the old entry if any. | 1532 | * fasync_insert_entry() returns the old entry if any. |
@@ -1532,17 +1534,10 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) | |||
1532 | * inserted it into the fasync list. Clear new so that | 1534 | * inserted it into the fasync list. Clear new so that |
1533 | * we don't release it here. | 1535 | * we don't release it here. |
1534 | */ | 1536 | */ |
1535 | if (!fasync_insert_entry(fd, filp, &fl->fl_fasync, new)) | 1537 | if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new)) |
1536 | new = NULL; | 1538 | new = NULL; |
1537 | 1539 | ||
1538 | if (error < 0) { | 1540 | error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); |
1539 | /* remove lease just inserted by setlease */ | ||
1540 | fl->fl_type = F_UNLCK | F_INPROGRESS; | ||
1541 | fl->fl_break_time = jiffies - 10; | ||
1542 | time_out_leases(inode); | ||
1543 | } else { | ||
1544 | error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); | ||
1545 | } | ||
1546 | unlock_flocks(); | 1541 | unlock_flocks(); |
1547 | 1542 | ||
1548 | out_free_fasync: | 1543 | out_free_fasync: |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index f1e5ec6b5105..ad2bfa68d534 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -673,16 +673,17 @@ static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) | |||
673 | spin_unlock(&clp->cl_lock); | 673 | spin_unlock(&clp->cl_lock); |
674 | } | 674 | } |
675 | 675 | ||
676 | static void nfsd4_register_conn(struct nfsd4_conn *conn) | 676 | static int nfsd4_register_conn(struct nfsd4_conn *conn) |
677 | { | 677 | { |
678 | conn->cn_xpt_user.callback = nfsd4_conn_lost; | 678 | conn->cn_xpt_user.callback = nfsd4_conn_lost; |
679 | register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); | 679 | return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); |
680 | } | 680 | } |
681 | 681 | ||
682 | static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses) | 682 | static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses) |
683 | { | 683 | { |
684 | struct nfsd4_conn *conn; | 684 | struct nfsd4_conn *conn; |
685 | u32 flags = NFS4_CDFC4_FORE; | 685 | u32 flags = NFS4_CDFC4_FORE; |
686 | int ret; | ||
686 | 687 | ||
687 | if (ses->se_flags & SESSION4_BACK_CHAN) | 688 | if (ses->se_flags & SESSION4_BACK_CHAN) |
688 | flags |= NFS4_CDFC4_BACK; | 689 | flags |= NFS4_CDFC4_BACK; |
@@ -690,7 +691,10 @@ static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses) | |||
690 | if (!conn) | 691 | if (!conn) |
691 | return nfserr_jukebox; | 692 | return nfserr_jukebox; |
692 | nfsd4_hash_conn(conn, ses); | 693 | nfsd4_hash_conn(conn, ses); |
693 | nfsd4_register_conn(conn); | 694 | ret = nfsd4_register_conn(conn); |
695 | if (ret) | ||
696 | /* oops; xprt is already down: */ | ||
697 | nfsd4_conn_lost(&conn->cn_xpt_user); | ||
694 | return nfs_ok; | 698 | return nfs_ok; |
695 | } | 699 | } |
696 | 700 | ||
@@ -1644,6 +1648,7 @@ static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_sessi | |||
1644 | { | 1648 | { |
1645 | struct nfs4_client *clp = ses->se_client; | 1649 | struct nfs4_client *clp = ses->se_client; |
1646 | struct nfsd4_conn *c; | 1650 | struct nfsd4_conn *c; |
1651 | int ret; | ||
1647 | 1652 | ||
1648 | spin_lock(&clp->cl_lock); | 1653 | spin_lock(&clp->cl_lock); |
1649 | c = __nfsd4_find_conn(new->cn_xprt, ses); | 1654 | c = __nfsd4_find_conn(new->cn_xprt, ses); |
@@ -1654,7 +1659,10 @@ static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_sessi | |||
1654 | } | 1659 | } |
1655 | __nfsd4_hash_conn(new, ses); | 1660 | __nfsd4_hash_conn(new, ses); |
1656 | spin_unlock(&clp->cl_lock); | 1661 | spin_unlock(&clp->cl_lock); |
1657 | nfsd4_register_conn(new); | 1662 | ret = nfsd4_register_conn(new); |
1663 | if (ret) | ||
1664 | /* oops; xprt is already down: */ | ||
1665 | nfsd4_conn_lost(&new->cn_xpt_user); | ||
1658 | return; | 1666 | return; |
1659 | } | 1667 | } |
1660 | 1668 | ||
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index d8408217e3bd..1efea3615589 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
@@ -159,7 +159,9 @@ struct ocfs2_lock_res { | |||
159 | char l_name[OCFS2_LOCK_ID_MAX_LEN]; | 159 | char l_name[OCFS2_LOCK_ID_MAX_LEN]; |
160 | unsigned int l_ro_holders; | 160 | unsigned int l_ro_holders; |
161 | unsigned int l_ex_holders; | 161 | unsigned int l_ex_holders; |
162 | unsigned char l_level; | 162 | char l_level; |
163 | char l_requested; | ||
164 | char l_blocking; | ||
163 | 165 | ||
164 | /* Data packed - type enum ocfs2_lock_type */ | 166 | /* Data packed - type enum ocfs2_lock_type */ |
165 | unsigned char l_type; | 167 | unsigned char l_type; |
@@ -169,8 +171,6 @@ struct ocfs2_lock_res { | |||
169 | unsigned char l_action; | 171 | unsigned char l_action; |
170 | /* Data packed - enum type ocfs2_unlock_action */ | 172 | /* Data packed - enum type ocfs2_unlock_action */ |
171 | unsigned char l_unlock_action; | 173 | unsigned char l_unlock_action; |
172 | unsigned char l_requested; | ||
173 | unsigned char l_blocking; | ||
174 | unsigned int l_pending_gen; | 174 | unsigned int l_pending_gen; |
175 | 175 | ||
176 | spinlock_t l_lock; | 176 | spinlock_t l_lock; |
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index c9af48fffcd7..7d287afccde5 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -1111,11 +1111,12 @@ xfs_vm_writepage( | |||
1111 | uptodate = 0; | 1111 | uptodate = 0; |
1112 | 1112 | ||
1113 | /* | 1113 | /* |
1114 | * A hole may still be marked uptodate because discard_buffer | 1114 | * set_page_dirty dirties all buffers in a page, independent |
1115 | * leaves the flag set. | 1115 | * of their state. The dirty state however is entirely |
1116 | * meaningless for holes (!mapped && uptodate), so skip | ||
1117 | * buffers covering holes here. | ||
1116 | */ | 1118 | */ |
1117 | if (!buffer_mapped(bh) && buffer_uptodate(bh)) { | 1119 | if (!buffer_mapped(bh) && buffer_uptodate(bh)) { |
1118 | ASSERT(!buffer_dirty(bh)); | ||
1119 | imap_valid = 0; | 1120 | imap_valid = 0; |
1120 | continue; | 1121 | continue; |
1121 | } | 1122 | } |
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 63fd2c07cb57..aa1d353def29 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -1781,7 +1781,6 @@ xfs_buf_delwri_split( | |||
1781 | INIT_LIST_HEAD(list); | 1781 | INIT_LIST_HEAD(list); |
1782 | spin_lock(dwlk); | 1782 | spin_lock(dwlk); |
1783 | list_for_each_entry_safe(bp, n, dwq, b_list) { | 1783 | list_for_each_entry_safe(bp, n, dwq, b_list) { |
1784 | trace_xfs_buf_delwri_split(bp, _RET_IP_); | ||
1785 | ASSERT(bp->b_flags & XBF_DELWRI); | 1784 | ASSERT(bp->b_flags & XBF_DELWRI); |
1786 | 1785 | ||
1787 | if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) { | 1786 | if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) { |
@@ -1795,6 +1794,7 @@ xfs_buf_delwri_split( | |||
1795 | _XBF_RUN_QUEUES); | 1794 | _XBF_RUN_QUEUES); |
1796 | bp->b_flags |= XBF_WRITE; | 1795 | bp->b_flags |= XBF_WRITE; |
1797 | list_move_tail(&bp->b_list, list); | 1796 | list_move_tail(&bp->b_list, list); |
1797 | trace_xfs_buf_delwri_split(bp, _RET_IP_); | ||
1798 | } else | 1798 | } else |
1799 | skipped++; | 1799 | skipped++; |
1800 | } | 1800 | } |
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 2ea238f6d38e..ad442d9e392e 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c | |||
@@ -416,7 +416,7 @@ xfs_attrlist_by_handle( | |||
416 | if (IS_ERR(dentry)) | 416 | if (IS_ERR(dentry)) |
417 | return PTR_ERR(dentry); | 417 | return PTR_ERR(dentry); |
418 | 418 | ||
419 | kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL); | 419 | kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL); |
420 | if (!kbuf) | 420 | if (!kbuf) |
421 | goto out_dput; | 421 | goto out_dput; |
422 | 422 | ||
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 96107efc0c61..94d5fd6a2973 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c | |||
@@ -762,7 +762,8 @@ xfs_setup_inode( | |||
762 | inode->i_state = I_NEW; | 762 | inode->i_state = I_NEW; |
763 | 763 | ||
764 | inode_sb_list_add(inode); | 764 | inode_sb_list_add(inode); |
765 | insert_inode_hash(inode); | 765 | /* make the inode look hashed for the writeback code */ |
766 | hlist_add_fake(&inode->i_hash); | ||
766 | 767 | ||
767 | inode->i_mode = ip->i_d.di_mode; | 768 | inode->i_mode = ip->i_d.di_mode; |
768 | inode->i_nlink = ip->i_d.di_nlink; | 769 | inode->i_nlink = ip->i_d.di_nlink; |
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 9f3a78fe6ae4..064f964d4f3c 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
@@ -353,9 +353,6 @@ xfs_parseargs( | |||
353 | mp->m_qflags &= ~XFS_OQUOTA_ENFD; | 353 | mp->m_qflags &= ~XFS_OQUOTA_ENFD; |
354 | } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { | 354 | } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { |
355 | mp->m_flags |= XFS_MOUNT_DELAYLOG; | 355 | mp->m_flags |= XFS_MOUNT_DELAYLOG; |
356 | cmn_err(CE_WARN, | ||
357 | "Enabling EXPERIMENTAL delayed logging feature " | ||
358 | "- use at your own risk.\n"); | ||
359 | } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { | 356 | } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { |
360 | mp->m_flags &= ~XFS_MOUNT_DELAYLOG; | 357 | mp->m_flags &= ~XFS_MOUNT_DELAYLOG; |
361 | } else if (!strcmp(this_char, "ihashsize")) { | 358 | } else if (!strcmp(this_char, "ihashsize")) { |
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 37d33254981d..afb0d7cfad1c 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
@@ -853,6 +853,7 @@ restart: | |||
853 | if (trylock) { | 853 | if (trylock) { |
854 | if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { | 854 | if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { |
855 | skipped++; | 855 | skipped++; |
856 | xfs_perag_put(pag); | ||
856 | continue; | 857 | continue; |
857 | } | 858 | } |
858 | first_index = pag->pag_ici_reclaim_cursor; | 859 | first_index = pag->pag_ici_reclaim_cursor; |
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c index 9b715dce5699..9124425b7f2f 100644 --- a/fs/xfs/xfs_filestream.c +++ b/fs/xfs/xfs_filestream.c | |||
@@ -744,9 +744,15 @@ xfs_filestream_new_ag( | |||
744 | * If the file's parent directory is known, take its iolock in exclusive | 744 | * If the file's parent directory is known, take its iolock in exclusive |
745 | * mode to prevent two sibling files from racing each other to migrate | 745 | * mode to prevent two sibling files from racing each other to migrate |
746 | * themselves and their parent to different AGs. | 746 | * themselves and their parent to different AGs. |
747 | * | ||
748 | * Note that we lock the parent directory iolock inside the child | ||
749 | * iolock here. That's fine as we never hold both parent and child | ||
750 | * iolock in any other place. This is different from the ilock, | ||
751 | * which requires locking of the child after the parent for namespace | ||
752 | * operations. | ||
747 | */ | 753 | */ |
748 | if (pip) | 754 | if (pip) |
749 | xfs_ilock(pip, XFS_IOLOCK_EXCL); | 755 | xfs_ilock(pip, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT); |
750 | 756 | ||
751 | /* | 757 | /* |
752 | * A new AG needs to be found for the file. If the file's parent | 758 | * A new AG needs to be found for the file. If the file's parent |
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index b1498ab5a399..19e9dfa1c254 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
@@ -275,6 +275,7 @@ xfs_free_perag( | |||
275 | pag = radix_tree_delete(&mp->m_perag_tree, agno); | 275 | pag = radix_tree_delete(&mp->m_perag_tree, agno); |
276 | spin_unlock(&mp->m_perag_lock); | 276 | spin_unlock(&mp->m_perag_lock); |
277 | ASSERT(pag); | 277 | ASSERT(pag); |
278 | ASSERT(atomic_read(&pag->pag_ref) == 0); | ||
278 | call_rcu(&pag->rcu_head, __xfs_free_perag); | 279 | call_rcu(&pag->rcu_head, __xfs_free_perag); |
279 | } | 280 | } |
280 | } | 281 | } |
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h index e0e64b113bd6..9bb6eda4cd21 100644 --- a/fs/xfs/xfs_quota.h +++ b/fs/xfs/xfs_quota.h | |||
@@ -346,8 +346,17 @@ xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid, | |||
346 | #define xfs_trans_mod_dquot_byino(tp, ip, fields, delta) | 346 | #define xfs_trans_mod_dquot_byino(tp, ip, fields, delta) |
347 | #define xfs_trans_apply_dquot_deltas(tp) | 347 | #define xfs_trans_apply_dquot_deltas(tp) |
348 | #define xfs_trans_unreserve_and_mod_dquots(tp) | 348 | #define xfs_trans_unreserve_and_mod_dquots(tp) |
349 | #define xfs_trans_reserve_quota_nblks(tp, ip, nblks, ninos, flags) (0) | 349 | static inline int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp, |
350 | #define xfs_trans_reserve_quota_bydquots(tp, mp, u, g, nb, ni, fl) (0) | 350 | struct xfs_inode *ip, long nblks, long ninos, uint flags) |
351 | { | ||
352 | return 0; | ||
353 | } | ||
354 | static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp, | ||
355 | struct xfs_mount *mp, struct xfs_dquot *udqp, | ||
356 | struct xfs_dquot *gdqp, long nblks, long nions, uint flags) | ||
357 | { | ||
358 | return 0; | ||
359 | } | ||
351 | #define xfs_qm_vop_create_dqattach(tp, ip, u, g) | 360 | #define xfs_qm_vop_create_dqattach(tp, ip, u, g) |
352 | #define xfs_qm_vop_rename_dqattach(it) (0) | 361 | #define xfs_qm_vop_rename_dqattach(it) (0) |
353 | #define xfs_qm_vop_chown(tp, ip, old, new) (NULL) | 362 | #define xfs_qm_vop_chown(tp, ip, old, new) (NULL) |
@@ -357,11 +366,14 @@ xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid, | |||
357 | #define xfs_qm_dqdetach(ip) | 366 | #define xfs_qm_dqdetach(ip) |
358 | #define xfs_qm_dqrele(d) | 367 | #define xfs_qm_dqrele(d) |
359 | #define xfs_qm_statvfs(ip, s) | 368 | #define xfs_qm_statvfs(ip, s) |
360 | #define xfs_qm_sync(mp, fl) (0) | 369 | static inline int xfs_qm_sync(struct xfs_mount *mp, int flags) |
370 | { | ||
371 | return 0; | ||
372 | } | ||
361 | #define xfs_qm_newmount(mp, a, b) (0) | 373 | #define xfs_qm_newmount(mp, a, b) (0) |
362 | #define xfs_qm_mount_quotas(mp) | 374 | #define xfs_qm_mount_quotas(mp) |
363 | #define xfs_qm_unmount(mp) | 375 | #define xfs_qm_unmount(mp) |
364 | #define xfs_qm_unmount_quotas(mp) (0) | 376 | #define xfs_qm_unmount_quotas(mp) |
365 | #endif /* CONFIG_XFS_QUOTA */ | 377 | #endif /* CONFIG_XFS_QUOTA */ |
366 | 378 | ||
367 | #define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \ | 379 | #define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \ |
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 5afa5b52063e..beafc156a535 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h | |||
@@ -432,6 +432,10 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); | |||
432 | * together with the @destroy function, | 432 | * together with the @destroy function, |
433 | * enables driver-specific objects derived from a ttm_buffer_object. | 433 | * enables driver-specific objects derived from a ttm_buffer_object. |
434 | * On successful return, the object kref and list_kref are set to 1. | 434 | * On successful return, the object kref and list_kref are set to 1. |
435 | * If a failure occurs, the function will call the @destroy function, or | ||
436 | * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is | ||
437 | * illegal and will likely cause memory corruption. | ||
438 | * | ||
435 | * Returns | 439 | * Returns |
436 | * -ENOMEM: Out of memory. | 440 | * -ENOMEM: Out of memory. |
437 | * -EINVAL: Invalid placement flags. | 441 | * -EINVAL: Invalid placement flags. |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index d01b4ddbdc56..8e0c848326b6 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -206,14 +206,84 @@ struct ttm_tt { | |||
206 | struct ttm_mem_type_manager; | 206 | struct ttm_mem_type_manager; |
207 | 207 | ||
208 | struct ttm_mem_type_manager_func { | 208 | struct ttm_mem_type_manager_func { |
209 | /** | ||
210 | * struct ttm_mem_type_manager member init | ||
211 | * | ||
212 | * @man: Pointer to a memory type manager. | ||
213 | * @p_size: Implementation dependent, but typically the size of the | ||
214 | * range to be managed in pages. | ||
215 | * | ||
216 | * Called to initialize a private range manager. The function is | ||
217 | * expected to initialize the man::priv member. | ||
218 | * Returns 0 on success, negative error code on failure. | ||
219 | */ | ||
209 | int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size); | 220 | int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size); |
221 | |||
222 | /** | ||
223 | * struct ttm_mem_type_manager member takedown | ||
224 | * | ||
225 | * @man: Pointer to a memory type manager. | ||
226 | * | ||
227 | * Called to undo the setup done in init. All allocated resources | ||
228 | * should be freed. | ||
229 | */ | ||
210 | int (*takedown)(struct ttm_mem_type_manager *man); | 230 | int (*takedown)(struct ttm_mem_type_manager *man); |
231 | |||
232 | /** | ||
233 | * struct ttm_mem_type_manager member get_node | ||
234 | * | ||
235 | * @man: Pointer to a memory type manager. | ||
236 | * @bo: Pointer to the buffer object we're allocating space for. | ||
237 | * @placement: Placement details. | ||
238 | * @mem: Pointer to a struct ttm_mem_reg to be filled in. | ||
239 | * | ||
240 | * This function should allocate space in the memory type managed | ||
241 | * by @man. Placement details if | ||
242 | * applicable are given by @placement. If successful, | ||
243 | * @mem::mm_node should be set to a non-null value, and | ||
244 | * @mem::start should be set to a value identifying the beginning | ||
245 | * of the range allocated, and the function should return zero. | ||
246 | * If the memory region accomodate the buffer object, @mem::mm_node | ||
247 | * should be set to NULL, and the function should return 0. | ||
248 | * If a system error occured, preventing the request to be fulfilled, | ||
249 | * the function should return a negative error code. | ||
250 | * | ||
251 | * Note that @mem::mm_node will only be dereferenced by | ||
252 | * struct ttm_mem_type_manager functions and optionally by the driver, | ||
253 | * which has knowledge of the underlying type. | ||
254 | * | ||
255 | * This function may not be called from within atomic context, so | ||
256 | * an implementation can and must use either a mutex or a spinlock to | ||
257 | * protect any data structures managing the space. | ||
258 | */ | ||
211 | int (*get_node)(struct ttm_mem_type_manager *man, | 259 | int (*get_node)(struct ttm_mem_type_manager *man, |
212 | struct ttm_buffer_object *bo, | 260 | struct ttm_buffer_object *bo, |
213 | struct ttm_placement *placement, | 261 | struct ttm_placement *placement, |
214 | struct ttm_mem_reg *mem); | 262 | struct ttm_mem_reg *mem); |
263 | |||
264 | /** | ||
265 | * struct ttm_mem_type_manager member put_node | ||
266 | * | ||
267 | * @man: Pointer to a memory type manager. | ||
268 | * @mem: Pointer to a struct ttm_mem_reg to be filled in. | ||
269 | * | ||
270 | * This function frees memory type resources previously allocated | ||
271 | * and that are identified by @mem::mm_node and @mem::start. May not | ||
272 | * be called from within atomic context. | ||
273 | */ | ||
215 | void (*put_node)(struct ttm_mem_type_manager *man, | 274 | void (*put_node)(struct ttm_mem_type_manager *man, |
216 | struct ttm_mem_reg *mem); | 275 | struct ttm_mem_reg *mem); |
276 | |||
277 | /** | ||
278 | * struct ttm_mem_type_manager member debug | ||
279 | * | ||
280 | * @man: Pointer to a memory type manager. | ||
281 | * @prefix: Prefix to be used in printout to identify the caller. | ||
282 | * | ||
283 | * This function is called to print out the state of the memory | ||
284 | * type manager to aid debugging of out-of-memory conditions. | ||
285 | * It may not be called from within atomic context. | ||
286 | */ | ||
217 | void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); | 287 | void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); |
218 | }; | 288 | }; |
219 | 289 | ||
@@ -231,14 +301,13 @@ struct ttm_mem_type_manager { | |||
231 | uint64_t size; | 301 | uint64_t size; |
232 | uint32_t available_caching; | 302 | uint32_t available_caching; |
233 | uint32_t default_caching; | 303 | uint32_t default_caching; |
304 | const struct ttm_mem_type_manager_func *func; | ||
305 | void *priv; | ||
234 | 306 | ||
235 | /* | 307 | /* |
236 | * Protected by the bdev->lru_lock. | 308 | * Protected by the global->lru_lock. |
237 | * TODO: Consider one lru_lock per ttm_mem_type_manager. | ||
238 | * Plays ill with list removal, though. | ||
239 | */ | 309 | */ |
240 | const struct ttm_mem_type_manager_func *func; | 310 | |
241 | void *priv; | ||
242 | struct list_head lru; | 311 | struct list_head lru; |
243 | }; | 312 | }; |
244 | 313 | ||
diff --git a/include/linux/atomic.h b/include/linux/atomic.h new file mode 100644 index 000000000000..96c038e43d66 --- /dev/null +++ b/include/linux/atomic.h | |||
@@ -0,0 +1,37 @@ | |||
1 | #ifndef _LINUX_ATOMIC_H | ||
2 | #define _LINUX_ATOMIC_H | ||
3 | #include <asm/atomic.h> | ||
4 | |||
5 | /** | ||
6 | * atomic_inc_not_zero_hint - increment if not null | ||
7 | * @v: pointer of type atomic_t | ||
8 | * @hint: probable value of the atomic before the increment | ||
9 | * | ||
10 | * This version of atomic_inc_not_zero() gives a hint of probable | ||
11 | * value of the atomic. This helps processor to not read the memory | ||
12 | * before doing the atomic read/modify/write cycle, lowering | ||
13 | * number of bus transactions on some arches. | ||
14 | * | ||
15 | * Returns: 0 if increment was not done, 1 otherwise. | ||
16 | */ | ||
17 | #ifndef atomic_inc_not_zero_hint | ||
18 | static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) | ||
19 | { | ||
20 | int val, c = hint; | ||
21 | |||
22 | /* sanity test, should be removed by compiler if hint is a constant */ | ||
23 | if (!hint) | ||
24 | return atomic_inc_not_zero(v); | ||
25 | |||
26 | do { | ||
27 | val = atomic_cmpxchg(v, c, c + 1); | ||
28 | if (val == c) | ||
29 | return 1; | ||
30 | c = val; | ||
31 | } while (c); | ||
32 | |||
33 | return 0; | ||
34 | } | ||
35 | #endif | ||
36 | |||
37 | #endif /* _LINUX_ATOMIC_H */ | ||
diff --git a/include/linux/bio.h b/include/linux/bio.h index ba679992d39b..35dcdb3589bc 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -66,10 +66,6 @@ | |||
66 | #define bio_offset(bio) bio_iovec((bio))->bv_offset | 66 | #define bio_offset(bio) bio_iovec((bio))->bv_offset |
67 | #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) | 67 | #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) |
68 | #define bio_sectors(bio) ((bio)->bi_size >> 9) | 68 | #define bio_sectors(bio) ((bio)->bi_size >> 9) |
69 | #define bio_empty_barrier(bio) \ | ||
70 | ((bio->bi_rw & REQ_HARDBARRIER) && \ | ||
71 | !bio_has_data(bio) && \ | ||
72 | !(bio->bi_rw & REQ_DISCARD)) | ||
73 | 69 | ||
74 | static inline unsigned int bio_cur_bytes(struct bio *bio) | 70 | static inline unsigned int bio_cur_bytes(struct bio *bio) |
75 | { | 71 | { |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 0437ab6bb54c..46ad5197537a 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -122,7 +122,6 @@ enum rq_flag_bits { | |||
122 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ | 122 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ |
123 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ | 123 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ |
124 | 124 | ||
125 | __REQ_HARDBARRIER, /* may not be passed by drive either */ | ||
126 | __REQ_SYNC, /* request is sync (sync write or read) */ | 125 | __REQ_SYNC, /* request is sync (sync write or read) */ |
127 | __REQ_META, /* metadata io request */ | 126 | __REQ_META, /* metadata io request */ |
128 | __REQ_DISCARD, /* request to discard sectors */ | 127 | __REQ_DISCARD, /* request to discard sectors */ |
@@ -159,7 +158,6 @@ enum rq_flag_bits { | |||
159 | #define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) | 158 | #define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) |
160 | #define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) | 159 | #define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) |
161 | #define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) | 160 | #define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) |
162 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) | ||
163 | #define REQ_SYNC (1 << __REQ_SYNC) | 161 | #define REQ_SYNC (1 << __REQ_SYNC) |
164 | #define REQ_META (1 << __REQ_META) | 162 | #define REQ_META (1 << __REQ_META) |
165 | #define REQ_DISCARD (1 << __REQ_DISCARD) | 163 | #define REQ_DISCARD (1 << __REQ_DISCARD) |
@@ -168,8 +166,8 @@ enum rq_flag_bits { | |||
168 | #define REQ_FAILFAST_MASK \ | 166 | #define REQ_FAILFAST_MASK \ |
169 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) | 167 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) |
170 | #define REQ_COMMON_MASK \ | 168 | #define REQ_COMMON_MASK \ |
171 | (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \ | 169 | (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \ |
172 | REQ_META | REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) | 170 | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) |
173 | #define REQ_CLONE_MASK REQ_COMMON_MASK | 171 | #define REQ_CLONE_MASK REQ_COMMON_MASK |
174 | 172 | ||
175 | #define REQ_UNPLUG (1 << __REQ_UNPLUG) | 173 | #define REQ_UNPLUG (1 << __REQ_UNPLUG) |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5027a599077d..aae86fd10c4f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -552,8 +552,7 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync) | |||
552 | * it already be started by driver. | 552 | * it already be started by driver. |
553 | */ | 553 | */ |
554 | #define RQ_NOMERGE_FLAGS \ | 554 | #define RQ_NOMERGE_FLAGS \ |
555 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER | \ | 555 | (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) |
556 | REQ_FLUSH | REQ_FUA) | ||
557 | #define rq_mergeable(rq) \ | 556 | #define rq_mergeable(rq) \ |
558 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ | 557 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ |
559 | (((rq)->cmd_flags & REQ_DISCARD) || \ | 558 | (((rq)->cmd_flags & REQ_DISCARD) || \ |
diff --git a/include/linux/drbd.h b/include/linux/drbd.h index 9b2a0158f399..ef44c7a0638c 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h | |||
@@ -53,7 +53,7 @@ | |||
53 | 53 | ||
54 | 54 | ||
55 | extern const char *drbd_buildtag(void); | 55 | extern const char *drbd_buildtag(void); |
56 | #define REL_VERSION "8.3.9rc2" | 56 | #define REL_VERSION "8.3.9" |
57 | #define API_VERSION 88 | 57 | #define API_VERSION 88 |
58 | #define PRO_VERSION_MIN 86 | 58 | #define PRO_VERSION_MIN 86 |
59 | #define PRO_VERSION_MAX 95 | 59 | #define PRO_VERSION_MAX 95 |
diff --git a/include/linux/fsl-diu-fb.h b/include/linux/fsl-diu-fb.h index fc295d7ea463..781d4671415f 100644 --- a/include/linux/fsl-diu-fb.h +++ b/include/linux/fsl-diu-fb.h | |||
@@ -54,7 +54,6 @@ struct aoi_display_offset { | |||
54 | }; | 54 | }; |
55 | 55 | ||
56 | #define MFB_SET_CHROMA_KEY _IOW('M', 1, struct mfb_chroma_key) | 56 | #define MFB_SET_CHROMA_KEY _IOW('M', 1, struct mfb_chroma_key) |
57 | #define MFB_WAIT_FOR_VSYNC _IOW('F', 0x20, u_int32_t) | ||
58 | #define MFB_SET_BRIGHTNESS _IOW('M', 3, __u8) | 57 | #define MFB_SET_BRIGHTNESS _IOW('M', 3, __u8) |
59 | 58 | ||
60 | #define MFB_SET_ALPHA 0x80014d00 | 59 | #define MFB_SET_ALPHA 0x80014d00 |
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index e9138198e823..b676c585574e 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/kernel.h> | 5 | #include <linux/kernel.h> |
6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
7 | #include <linux/uaccess.h> | 7 | #include <linux/uaccess.h> |
8 | #include <linux/hardirq.h> | ||
8 | 9 | ||
9 | #include <asm/cacheflush.h> | 10 | #include <asm/cacheflush.h> |
10 | 11 | ||
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index e844a0b18695..4bef5c557160 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h | |||
@@ -32,28 +32,6 @@ | |||
32 | */ | 32 | */ |
33 | 33 | ||
34 | /* --- Bit algorithm adapters */ | 34 | /* --- Bit algorithm adapters */ |
35 | #define I2C_HW_B_BT848 0x010005 /* BT848 video boards */ | ||
36 | #define I2C_HW_B_RIVA 0x010010 /* Riva based graphics cards */ | ||
37 | #define I2C_HW_B_ZR36067 0x010019 /* Zoran-36057/36067 based boards */ | ||
38 | #define I2C_HW_B_CX2388x 0x01001b /* connexant 2388x based tv cards */ | 35 | #define I2C_HW_B_CX2388x 0x01001b /* connexant 2388x based tv cards */ |
39 | #define I2C_HW_B_EM28XX 0x01001f /* em28xx video capture cards */ | ||
40 | #define I2C_HW_B_CX2341X 0x010020 /* Conexant CX2341X MPEG encoder cards */ | ||
41 | #define I2C_HW_B_CX23885 0x010022 /* conexant 23885 based tv cards (bus1) */ | ||
42 | #define I2C_HW_B_AU0828 0x010023 /* auvitek au0828 usb bridge */ | ||
43 | #define I2C_HW_B_CX231XX 0x010024 /* Conexant CX231XX USB based cards */ | ||
44 | #define I2C_HW_B_HDPVR 0x010025 /* Hauppauge HD PVR */ | ||
45 | |||
46 | /* --- SGI adapters */ | ||
47 | #define I2C_HW_SGI_VINO 0x160000 | ||
48 | |||
49 | /* --- SMBus only adapters */ | ||
50 | #define I2C_HW_SMBUS_W9968CF 0x04000d | ||
51 | #define I2C_HW_SMBUS_OV511 0x04000e /* OV511(+) USB 1.1 webcam ICs */ | ||
52 | #define I2C_HW_SMBUS_OV518 0x04000f /* OV518(+) USB 1.1 webcam ICs */ | ||
53 | #define I2C_HW_SMBUS_CAFE 0x040012 /* Marvell 88ALP01 "CAFE" cam */ | ||
54 | |||
55 | /* --- Miscellaneous adapters */ | ||
56 | #define I2C_HW_SAA7146 0x060000 /* SAA7146 video decoder bus */ | ||
57 | #define I2C_HW_SAA7134 0x090000 /* SAA7134 video decoder bus */ | ||
58 | 36 | ||
59 | #endif /* LINUX_I2C_ID_H */ | 37 | #endif /* LINUX_I2C_ID_H */ |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 889b35abaeda..56cfe23ffb39 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
@@ -353,7 +353,7 @@ struct i2c_algorithm { | |||
353 | */ | 353 | */ |
354 | struct i2c_adapter { | 354 | struct i2c_adapter { |
355 | struct module *owner; | 355 | struct module *owner; |
356 | unsigned int id; | 356 | unsigned int id __deprecated; |
357 | unsigned int class; /* classes to allow probing for */ | 357 | unsigned int class; /* classes to allow probing for */ |
358 | const struct i2c_algorithm *algo; /* the algorithm to access the bus */ | 358 | const struct i2c_algorithm *algo; /* the algorithm to access the bus */ |
359 | void *algo_data; | 359 | void *algo_data; |
diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h index 3c5d6b6e765c..cec17cf6cac2 100644 --- a/include/linux/i2c/adp5588.h +++ b/include/linux/i2c/adp5588.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller | 2 | * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller |
3 | * | 3 | * |
4 | * Copyright 2009 Analog Devices Inc. | 4 | * Copyright 2009-2010 Analog Devices Inc. |
5 | * | 5 | * |
6 | * Licensed under the GPL-2 or later. | 6 | * Licensed under the GPL-2 or later. |
7 | */ | 7 | */ |
@@ -77,13 +77,26 @@ | |||
77 | /* Configuration Register1 */ | 77 | /* Configuration Register1 */ |
78 | #define ADP5588_AUTO_INC (1 << 7) | 78 | #define ADP5588_AUTO_INC (1 << 7) |
79 | #define ADP5588_GPIEM_CFG (1 << 6) | 79 | #define ADP5588_GPIEM_CFG (1 << 6) |
80 | #define ADP5588_OVR_FLOW_M (1 << 5) | ||
80 | #define ADP5588_INT_CFG (1 << 4) | 81 | #define ADP5588_INT_CFG (1 << 4) |
82 | #define ADP5588_OVR_FLOW_IEN (1 << 3) | ||
83 | #define ADP5588_K_LCK_IM (1 << 2) | ||
81 | #define ADP5588_GPI_IEN (1 << 1) | 84 | #define ADP5588_GPI_IEN (1 << 1) |
85 | #define ADP5588_KE_IEN (1 << 0) | ||
82 | 86 | ||
83 | /* Interrupt Status Register */ | 87 | /* Interrupt Status Register */ |
88 | #define ADP5588_CMP2_INT (1 << 5) | ||
89 | #define ADP5588_CMP1_INT (1 << 4) | ||
90 | #define ADP5588_OVR_FLOW_INT (1 << 3) | ||
91 | #define ADP5588_K_LCK_INT (1 << 2) | ||
84 | #define ADP5588_GPI_INT (1 << 1) | 92 | #define ADP5588_GPI_INT (1 << 1) |
85 | #define ADP5588_KE_INT (1 << 0) | 93 | #define ADP5588_KE_INT (1 << 0) |
86 | 94 | ||
95 | /* Key Lock and Event Counter Register */ | ||
96 | #define ADP5588_K_LCK_EN (1 << 6) | ||
97 | #define ADP5588_LCK21 0x30 | ||
98 | #define ADP5588_KEC 0xF | ||
99 | |||
87 | #define ADP5588_MAXGPIO 18 | 100 | #define ADP5588_MAXGPIO 18 |
88 | #define ADP5588_BANK(offs) ((offs) >> 3) | 101 | #define ADP5588_BANK(offs) ((offs) >> 3) |
89 | #define ADP5588_BIT(offs) (1u << ((offs) & 0x7)) | 102 | #define ADP5588_BIT(offs) (1u << ((offs) & 0x7)) |
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index c2f3a72712ce..635e1faec412 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h | |||
@@ -339,6 +339,31 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) | |||
339 | } | 339 | } |
340 | } | 340 | } |
341 | 341 | ||
342 | /** | ||
343 | * vlan_get_protocol - get protocol EtherType. | ||
344 | * @skb: skbuff to query | ||
345 | * | ||
346 | * Returns the EtherType of the packet, regardless of whether it is | ||
347 | * vlan encapsulated (normal or hardware accelerated) or not. | ||
348 | */ | ||
349 | static inline __be16 vlan_get_protocol(const struct sk_buff *skb) | ||
350 | { | ||
351 | __be16 protocol = 0; | ||
352 | |||
353 | if (vlan_tx_tag_present(skb) || | ||
354 | skb->protocol != cpu_to_be16(ETH_P_8021Q)) | ||
355 | protocol = skb->protocol; | ||
356 | else { | ||
357 | __be16 proto, *protop; | ||
358 | protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr, | ||
359 | h_vlan_encapsulated_proto), | ||
360 | sizeof(proto), &proto); | ||
361 | if (likely(protop)) | ||
362 | protocol = *protop; | ||
363 | } | ||
364 | |||
365 | return protocol; | ||
366 | } | ||
342 | #endif /* __KERNEL__ */ | 367 | #endif /* __KERNEL__ */ |
343 | 368 | ||
344 | /* VLAN IOCTLs are found in sockios.h */ | 369 | /* VLAN IOCTLs are found in sockios.h */ |
diff --git a/include/linux/input.h b/include/linux/input.h index 51af441f3a21..6ef44465db8d 100644 --- a/include/linux/input.h +++ b/include/linux/input.h | |||
@@ -1406,6 +1406,8 @@ static inline void input_set_drvdata(struct input_dev *dev, void *data) | |||
1406 | int __must_check input_register_device(struct input_dev *); | 1406 | int __must_check input_register_device(struct input_dev *); |
1407 | void input_unregister_device(struct input_dev *); | 1407 | void input_unregister_device(struct input_dev *); |
1408 | 1408 | ||
1409 | void input_reset_device(struct input_dev *); | ||
1410 | |||
1409 | int __must_check input_register_handler(struct input_handler *); | 1411 | int __must_check input_register_handler(struct input_handler *); |
1410 | void input_unregister_handler(struct input_handler *); | 1412 | void input_unregister_handler(struct input_handler *); |
1411 | 1413 | ||
@@ -1421,7 +1423,7 @@ void input_release_device(struct input_handle *); | |||
1421 | int input_open_device(struct input_handle *); | 1423 | int input_open_device(struct input_handle *); |
1422 | void input_close_device(struct input_handle *); | 1424 | void input_close_device(struct input_handle *); |
1423 | 1425 | ||
1424 | int input_flush_device(struct input_handle* handle, struct file* file); | 1426 | int input_flush_device(struct input_handle *handle, struct file *file); |
1425 | 1427 | ||
1426 | void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value); | 1428 | void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value); |
1427 | void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value); | 1429 | void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value); |
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 3e70b21884a9..b2eee896dcbc 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h | |||
@@ -76,7 +76,6 @@ int put_io_context(struct io_context *ioc); | |||
76 | void exit_io_context(struct task_struct *task); | 76 | void exit_io_context(struct task_struct *task); |
77 | struct io_context *get_io_context(gfp_t gfp_flags, int node); | 77 | struct io_context *get_io_context(gfp_t gfp_flags, int node); |
78 | struct io_context *alloc_io_context(gfp_t gfp_flags, int node); | 78 | struct io_context *alloc_io_context(gfp_t gfp_flags, int node); |
79 | void copy_io_context(struct io_context **pdst, struct io_context **psrc); | ||
80 | #else | 79 | #else |
81 | static inline void exit_io_context(struct task_struct *task) | 80 | static inline void exit_io_context(struct task_struct *task) |
82 | { | 81 | { |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index b526947bdf48..b6de9a6f7018 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -17,13 +17,11 @@ | |||
17 | #include <linux/bitops.h> | 17 | #include <linux/bitops.h> |
18 | #include <linux/log2.h> | 18 | #include <linux/log2.h> |
19 | #include <linux/typecheck.h> | 19 | #include <linux/typecheck.h> |
20 | #include <linux/printk.h> | ||
20 | #include <linux/dynamic_debug.h> | 21 | #include <linux/dynamic_debug.h> |
21 | #include <asm/byteorder.h> | 22 | #include <asm/byteorder.h> |
22 | #include <asm/bug.h> | 23 | #include <asm/bug.h> |
23 | 24 | ||
24 | extern const char linux_banner[]; | ||
25 | extern const char linux_proc_banner[]; | ||
26 | |||
27 | #define USHRT_MAX ((u16)(~0U)) | 25 | #define USHRT_MAX ((u16)(~0U)) |
28 | #define SHRT_MAX ((s16)(USHRT_MAX>>1)) | 26 | #define SHRT_MAX ((s16)(USHRT_MAX>>1)) |
29 | #define SHRT_MIN ((s16)(-SHRT_MAX - 1)) | 27 | #define SHRT_MIN ((s16)(-SHRT_MAX - 1)) |
@@ -110,31 +108,6 @@ extern const char linux_proc_banner[]; | |||
110 | */ | 108 | */ |
111 | #define lower_32_bits(n) ((u32)(n)) | 109 | #define lower_32_bits(n) ((u32)(n)) |
112 | 110 | ||
113 | #define KERN_EMERG "<0>" /* system is unusable */ | ||
114 | #define KERN_ALERT "<1>" /* action must be taken immediately */ | ||
115 | #define KERN_CRIT "<2>" /* critical conditions */ | ||
116 | #define KERN_ERR "<3>" /* error conditions */ | ||
117 | #define KERN_WARNING "<4>" /* warning conditions */ | ||
118 | #define KERN_NOTICE "<5>" /* normal but significant condition */ | ||
119 | #define KERN_INFO "<6>" /* informational */ | ||
120 | #define KERN_DEBUG "<7>" /* debug-level messages */ | ||
121 | |||
122 | /* Use the default kernel loglevel */ | ||
123 | #define KERN_DEFAULT "<d>" | ||
124 | /* | ||
125 | * Annotation for a "continued" line of log printout (only done after a | ||
126 | * line that had no enclosing \n). Only to be used by core/arch code | ||
127 | * during early bootup (a continued line is not SMP-safe otherwise). | ||
128 | */ | ||
129 | #define KERN_CONT "<c>" | ||
130 | |||
131 | extern int console_printk[]; | ||
132 | |||
133 | #define console_loglevel (console_printk[0]) | ||
134 | #define default_message_loglevel (console_printk[1]) | ||
135 | #define minimum_console_loglevel (console_printk[2]) | ||
136 | #define default_console_loglevel (console_printk[3]) | ||
137 | |||
138 | struct completion; | 111 | struct completion; |
139 | struct pt_regs; | 112 | struct pt_regs; |
140 | struct user; | 113 | struct user; |
@@ -187,11 +160,6 @@ static inline void might_fault(void) | |||
187 | } | 160 | } |
188 | #endif | 161 | #endif |
189 | 162 | ||
190 | struct va_format { | ||
191 | const char *fmt; | ||
192 | va_list *va; | ||
193 | }; | ||
194 | |||
195 | extern struct atomic_notifier_head panic_notifier_list; | 163 | extern struct atomic_notifier_head panic_notifier_list; |
196 | extern long (*panic_blink)(int state); | 164 | extern long (*panic_blink)(int state); |
197 | NORET_TYPE void panic(const char * fmt, ...) | 165 | NORET_TYPE void panic(const char * fmt, ...) |
@@ -245,114 +213,8 @@ extern int func_ptr_is_kernel_text(void *ptr); | |||
245 | struct pid; | 213 | struct pid; |
246 | extern struct pid *session_of_pgrp(struct pid *pgrp); | 214 | extern struct pid *session_of_pgrp(struct pid *pgrp); |
247 | 215 | ||
248 | /* | ||
249 | * FW_BUG | ||
250 | * Add this to a message where you are sure the firmware is buggy or behaves | ||
251 | * really stupid or out of spec. Be aware that the responsible BIOS developer | ||
252 | * should be able to fix this issue or at least get a concrete idea of the | ||
253 | * problem by reading your message without the need of looking at the kernel | ||
254 | * code. | ||
255 | * | ||
256 | * Use it for definite and high priority BIOS bugs. | ||
257 | * | ||
258 | * FW_WARN | ||
259 | * Use it for not that clear (e.g. could the kernel messed up things already?) | ||
260 | * and medium priority BIOS bugs. | ||
261 | * | ||
262 | * FW_INFO | ||
263 | * Use this one if you want to tell the user or vendor about something | ||
264 | * suspicious, but generally harmless related to the firmware. | ||
265 | * | ||
266 | * Use it for information or very low priority BIOS bugs. | ||
267 | */ | ||
268 | #define FW_BUG "[Firmware Bug]: " | ||
269 | #define FW_WARN "[Firmware Warn]: " | ||
270 | #define FW_INFO "[Firmware Info]: " | ||
271 | |||
272 | /* | ||
273 | * HW_ERR | ||
274 | * Add this to a message for hardware errors, so that user can report | ||
275 | * it to hardware vendor instead of LKML or software vendor. | ||
276 | */ | ||
277 | #define HW_ERR "[Hardware Error]: " | ||
278 | |||
279 | #ifdef CONFIG_PRINTK | ||
280 | asmlinkage int vprintk(const char *fmt, va_list args) | ||
281 | __attribute__ ((format (printf, 1, 0))); | ||
282 | asmlinkage int printk(const char * fmt, ...) | ||
283 | __attribute__ ((format (printf, 1, 2))) __cold; | ||
284 | |||
285 | /* | ||
286 | * Please don't use printk_ratelimit(), because it shares ratelimiting state | ||
287 | * with all other unrelated printk_ratelimit() callsites. Instead use | ||
288 | * printk_ratelimited() or plain old __ratelimit(). | ||
289 | */ | ||
290 | extern int __printk_ratelimit(const char *func); | ||
291 | #define printk_ratelimit() __printk_ratelimit(__func__) | ||
292 | extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, | ||
293 | unsigned int interval_msec); | ||
294 | |||
295 | extern int printk_delay_msec; | ||
296 | |||
297 | /* | ||
298 | * Print a one-time message (analogous to WARN_ONCE() et al): | ||
299 | */ | ||
300 | #define printk_once(x...) ({ \ | ||
301 | static bool __print_once; \ | ||
302 | \ | ||
303 | if (!__print_once) { \ | ||
304 | __print_once = true; \ | ||
305 | printk(x); \ | ||
306 | } \ | ||
307 | }) | ||
308 | |||
309 | void log_buf_kexec_setup(void); | ||
310 | #else | ||
311 | static inline int vprintk(const char *s, va_list args) | ||
312 | __attribute__ ((format (printf, 1, 0))); | ||
313 | static inline int vprintk(const char *s, va_list args) { return 0; } | ||
314 | static inline int printk(const char *s, ...) | ||
315 | __attribute__ ((format (printf, 1, 2))); | ||
316 | static inline int __cold printk(const char *s, ...) { return 0; } | ||
317 | static inline int printk_ratelimit(void) { return 0; } | ||
318 | static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \ | ||
319 | unsigned int interval_msec) \ | ||
320 | { return false; } | ||
321 | |||
322 | /* No effect, but we still get type checking even in the !PRINTK case: */ | ||
323 | #define printk_once(x...) printk(x) | ||
324 | |||
325 | static inline void log_buf_kexec_setup(void) | ||
326 | { | ||
327 | } | ||
328 | #endif | ||
329 | |||
330 | /* | ||
331 | * Dummy printk for disabled debugging statements to use whilst maintaining | ||
332 | * gcc's format and side-effect checking. | ||
333 | */ | ||
334 | static inline __attribute__ ((format (printf, 1, 2))) | ||
335 | int no_printk(const char *s, ...) { return 0; } | ||
336 | |||
337 | extern int printk_needs_cpu(int cpu); | ||
338 | extern void printk_tick(void); | ||
339 | |||
340 | extern void asmlinkage __attribute__((format(printf, 1, 2))) | ||
341 | early_printk(const char *fmt, ...); | ||
342 | |||
343 | unsigned long int_sqrt(unsigned long); | 216 | unsigned long int_sqrt(unsigned long); |
344 | 217 | ||
345 | static inline void console_silent(void) | ||
346 | { | ||
347 | console_loglevel = 0; | ||
348 | } | ||
349 | |||
350 | static inline void console_verbose(void) | ||
351 | { | ||
352 | if (console_loglevel) | ||
353 | console_loglevel = 15; | ||
354 | } | ||
355 | |||
356 | extern void bust_spinlocks(int yes); | 218 | extern void bust_spinlocks(int yes); |
357 | extern void wake_up_klogd(void); | 219 | extern void wake_up_klogd(void); |
358 | extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ | 220 | extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ |
@@ -389,22 +251,6 @@ extern enum system_states { | |||
389 | #define TAINT_CRAP 10 | 251 | #define TAINT_CRAP 10 |
390 | #define TAINT_FIRMWARE_WORKAROUND 11 | 252 | #define TAINT_FIRMWARE_WORKAROUND 11 |
391 | 253 | ||
392 | extern void dump_stack(void) __cold; | ||
393 | |||
394 | enum { | ||
395 | DUMP_PREFIX_NONE, | ||
396 | DUMP_PREFIX_ADDRESS, | ||
397 | DUMP_PREFIX_OFFSET | ||
398 | }; | ||
399 | extern void hex_dump_to_buffer(const void *buf, size_t len, | ||
400 | int rowsize, int groupsize, | ||
401 | char *linebuf, size_t linebuflen, bool ascii); | ||
402 | extern void print_hex_dump(const char *level, const char *prefix_str, | ||
403 | int prefix_type, int rowsize, int groupsize, | ||
404 | const void *buf, size_t len, bool ascii); | ||
405 | extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type, | ||
406 | const void *buf, size_t len); | ||
407 | |||
408 | extern const char hex_asc[]; | 254 | extern const char hex_asc[]; |
409 | #define hex_asc_lo(x) hex_asc[((x) & 0x0f)] | 255 | #define hex_asc_lo(x) hex_asc[((x) & 0x0f)] |
410 | #define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4] | 256 | #define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4] |
@@ -418,94 +264,6 @@ static inline char *pack_hex_byte(char *buf, u8 byte) | |||
418 | 264 | ||
419 | extern int hex_to_bin(char ch); | 265 | extern int hex_to_bin(char ch); |
420 | 266 | ||
421 | #ifndef pr_fmt | ||
422 | #define pr_fmt(fmt) fmt | ||
423 | #endif | ||
424 | |||
425 | #define pr_emerg(fmt, ...) \ | ||
426 | printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) | ||
427 | #define pr_alert(fmt, ...) \ | ||
428 | printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) | ||
429 | #define pr_crit(fmt, ...) \ | ||
430 | printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) | ||
431 | #define pr_err(fmt, ...) \ | ||
432 | printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) | ||
433 | #define pr_warning(fmt, ...) \ | ||
434 | printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) | ||
435 | #define pr_warn pr_warning | ||
436 | #define pr_notice(fmt, ...) \ | ||
437 | printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) | ||
438 | #define pr_info(fmt, ...) \ | ||
439 | printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) | ||
440 | #define pr_cont(fmt, ...) \ | ||
441 | printk(KERN_CONT fmt, ##__VA_ARGS__) | ||
442 | |||
443 | /* pr_devel() should produce zero code unless DEBUG is defined */ | ||
444 | #ifdef DEBUG | ||
445 | #define pr_devel(fmt, ...) \ | ||
446 | printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) | ||
447 | #else | ||
448 | #define pr_devel(fmt, ...) \ | ||
449 | ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) | ||
450 | #endif | ||
451 | |||
452 | /* If you are writing a driver, please use dev_dbg instead */ | ||
453 | #if defined(DEBUG) | ||
454 | #define pr_debug(fmt, ...) \ | ||
455 | printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) | ||
456 | #elif defined(CONFIG_DYNAMIC_DEBUG) | ||
457 | /* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ | ||
458 | #define pr_debug(fmt, ...) \ | ||
459 | dynamic_pr_debug(fmt, ##__VA_ARGS__) | ||
460 | #else | ||
461 | #define pr_debug(fmt, ...) \ | ||
462 | ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) | ||
463 | #endif | ||
464 | |||
465 | /* | ||
466 | * ratelimited messages with local ratelimit_state, | ||
467 | * no local ratelimit_state used in the !PRINTK case | ||
468 | */ | ||
469 | #ifdef CONFIG_PRINTK | ||
470 | #define printk_ratelimited(fmt, ...) ({ \ | ||
471 | static DEFINE_RATELIMIT_STATE(_rs, \ | ||
472 | DEFAULT_RATELIMIT_INTERVAL, \ | ||
473 | DEFAULT_RATELIMIT_BURST); \ | ||
474 | \ | ||
475 | if (__ratelimit(&_rs)) \ | ||
476 | printk(fmt, ##__VA_ARGS__); \ | ||
477 | }) | ||
478 | #else | ||
479 | /* No effect, but we still get type checking even in the !PRINTK case: */ | ||
480 | #define printk_ratelimited printk | ||
481 | #endif | ||
482 | |||
483 | #define pr_emerg_ratelimited(fmt, ...) \ | ||
484 | printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) | ||
485 | #define pr_alert_ratelimited(fmt, ...) \ | ||
486 | printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) | ||
487 | #define pr_crit_ratelimited(fmt, ...) \ | ||
488 | printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) | ||
489 | #define pr_err_ratelimited(fmt, ...) \ | ||
490 | printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) | ||
491 | #define pr_warning_ratelimited(fmt, ...) \ | ||
492 | printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) | ||
493 | #define pr_warn_ratelimited pr_warning_ratelimited | ||
494 | #define pr_notice_ratelimited(fmt, ...) \ | ||
495 | printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) | ||
496 | #define pr_info_ratelimited(fmt, ...) \ | ||
497 | printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) | ||
498 | /* no pr_cont_ratelimited, don't do that... */ | ||
499 | /* If you are writing a driver, please use dev_dbg instead */ | ||
500 | #if defined(DEBUG) | ||
501 | #define pr_debug_ratelimited(fmt, ...) \ | ||
502 | printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) | ||
503 | #else | ||
504 | #define pr_debug_ratelimited(fmt, ...) \ | ||
505 | ({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \ | ||
506 | ##__VA_ARGS__); 0; }) | ||
507 | #endif | ||
508 | |||
509 | /* | 267 | /* |
510 | * General tracing related utility functions - trace_printk(), | 268 | * General tracing related utility functions - trace_printk(), |
511 | * tracing_on/tracing_off and tracing_start()/tracing_stop | 269 | * tracing_on/tracing_off and tracing_start()/tracing_stop |
diff --git a/include/linux/leds-lp5521.h b/include/linux/leds-lp5521.h new file mode 100644 index 000000000000..38368d785f08 --- /dev/null +++ b/include/linux/leds-lp5521.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * LP5521 LED chip driver. | ||
3 | * | ||
4 | * Copyright (C) 2010 Nokia Corporation | ||
5 | * | ||
6 | * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | */ | ||
22 | |||
23 | #ifndef __LINUX_LP5521_H | ||
24 | #define __LINUX_LP5521_H | ||
25 | |||
26 | /* See Documentation/leds/leds-lp5521.txt */ | ||
27 | |||
28 | struct lp5521_led_config { | ||
29 | u8 chan_nr; | ||
30 | u8 led_current; /* mA x10, 0 if led is not connected */ | ||
31 | u8 max_current; | ||
32 | }; | ||
33 | |||
34 | #define LP5521_CLOCK_AUTO 0 | ||
35 | #define LP5521_CLOCK_INT 1 | ||
36 | #define LP5521_CLOCK_EXT 2 | ||
37 | |||
38 | struct lp5521_platform_data { | ||
39 | struct lp5521_led_config *led_config; | ||
40 | u8 num_channels; | ||
41 | u8 clock_mode; | ||
42 | int (*setup_resources)(void); | ||
43 | void (*release_resources)(void); | ||
44 | void (*enable)(bool state); | ||
45 | }; | ||
46 | |||
47 | #endif /* __LINUX_LP5521_H */ | ||
diff --git a/include/linux/leds-lp5523.h b/include/linux/leds-lp5523.h new file mode 100644 index 000000000000..796747637b80 --- /dev/null +++ b/include/linux/leds-lp5523.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * LP5523 LED Driver | ||
3 | * | ||
4 | * Copyright (C) 2010 Nokia Corporation | ||
5 | * | ||
6 | * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | */ | ||
22 | |||
23 | #ifndef __LINUX_LP5523_H | ||
24 | #define __LINUX_LP5523_H | ||
25 | |||
26 | /* See Documentation/leds/leds-lp5523.txt */ | ||
27 | |||
28 | struct lp5523_led_config { | ||
29 | u8 chan_nr; | ||
30 | u8 led_current; /* mA x10, 0 if led is not connected */ | ||
31 | u8 max_current; | ||
32 | }; | ||
33 | |||
34 | #define LP5523_CLOCK_AUTO 0 | ||
35 | #define LP5523_CLOCK_INT 1 | ||
36 | #define LP5523_CLOCK_EXT 2 | ||
37 | |||
38 | struct lp5523_platform_data { | ||
39 | struct lp5523_led_config *led_config; | ||
40 | u8 num_channels; | ||
41 | u8 clock_mode; | ||
42 | int (*setup_resources)(void); | ||
43 | void (*release_resources)(void); | ||
44 | void (*enable)(bool state); | ||
45 | }; | ||
46 | |||
47 | #endif /* __LINUX_LP5523_H */ | ||
diff --git a/include/linux/leds.h b/include/linux/leds.h index ba6986a11663..0f19df9e37b0 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/list.h> | 15 | #include <linux/list.h> |
16 | #include <linux/spinlock.h> | 16 | #include <linux/spinlock.h> |
17 | #include <linux/rwsem.h> | 17 | #include <linux/rwsem.h> |
18 | #include <linux/timer.h> | ||
18 | 19 | ||
19 | struct device; | 20 | struct device; |
20 | /* | 21 | /* |
@@ -45,10 +46,14 @@ struct led_classdev { | |||
45 | /* Get LED brightness level */ | 46 | /* Get LED brightness level */ |
46 | enum led_brightness (*brightness_get)(struct led_classdev *led_cdev); | 47 | enum led_brightness (*brightness_get)(struct led_classdev *led_cdev); |
47 | 48 | ||
48 | /* Activate hardware accelerated blink, delays are in | 49 | /* |
49 | * miliseconds and if none is provided then a sensible default | 50 | * Activate hardware accelerated blink, delays are in milliseconds |
50 | * should be chosen. The call can adjust the timings if it can't | 51 | * and if both are zero then a sensible default should be chosen. |
51 | * match the values specified exactly. */ | 52 | * The call should adjust the timings in that case and if it can't |
53 | * match the values specified exactly. | ||
54 | * Deactivate blinking again when the brightness is set to a fixed | ||
55 | * value via the brightness_set() callback. | ||
56 | */ | ||
52 | int (*blink_set)(struct led_classdev *led_cdev, | 57 | int (*blink_set)(struct led_classdev *led_cdev, |
53 | unsigned long *delay_on, | 58 | unsigned long *delay_on, |
54 | unsigned long *delay_off); | 59 | unsigned long *delay_off); |
@@ -57,6 +62,10 @@ struct led_classdev { | |||
57 | struct list_head node; /* LED Device list */ | 62 | struct list_head node; /* LED Device list */ |
58 | const char *default_trigger; /* Trigger to use */ | 63 | const char *default_trigger; /* Trigger to use */ |
59 | 64 | ||
65 | unsigned long blink_delay_on, blink_delay_off; | ||
66 | struct timer_list blink_timer; | ||
67 | int blink_brightness; | ||
68 | |||
60 | #ifdef CONFIG_LEDS_TRIGGERS | 69 | #ifdef CONFIG_LEDS_TRIGGERS |
61 | /* Protects the trigger data below */ | 70 | /* Protects the trigger data below */ |
62 | struct rw_semaphore trigger_lock; | 71 | struct rw_semaphore trigger_lock; |
@@ -73,6 +82,36 @@ extern void led_classdev_unregister(struct led_classdev *led_cdev); | |||
73 | extern void led_classdev_suspend(struct led_classdev *led_cdev); | 82 | extern void led_classdev_suspend(struct led_classdev *led_cdev); |
74 | extern void led_classdev_resume(struct led_classdev *led_cdev); | 83 | extern void led_classdev_resume(struct led_classdev *led_cdev); |
75 | 84 | ||
85 | /** | ||
86 | * led_blink_set - set blinking with software fallback | ||
87 | * @led_cdev: the LED to start blinking | ||
88 | * @delay_on: the time it should be on (in ms) | ||
89 | * @delay_off: the time it should ble off (in ms) | ||
90 | * | ||
91 | * This function makes the LED blink, attempting to use the | ||
92 | * hardware acceleration if possible, but falling back to | ||
93 | * software blinking if there is no hardware blinking or if | ||
94 | * the LED refuses the passed values. | ||
95 | * | ||
96 | * Note that if software blinking is active, simply calling | ||
97 | * led_cdev->brightness_set() will not stop the blinking, | ||
98 | * use led_classdev_brightness_set() instead. | ||
99 | */ | ||
100 | extern void led_blink_set(struct led_classdev *led_cdev, | ||
101 | unsigned long *delay_on, | ||
102 | unsigned long *delay_off); | ||
103 | /** | ||
104 | * led_brightness_set - set LED brightness | ||
105 | * @led_cdev: the LED to set | ||
106 | * @brightness: the brightness to set it to | ||
107 | * | ||
108 | * Set an LED's brightness, and, if necessary, cancel the | ||
109 | * software blink timer that implements blinking when the | ||
110 | * hardware doesn't. | ||
111 | */ | ||
112 | extern void led_brightness_set(struct led_classdev *led_cdev, | ||
113 | enum led_brightness brightness); | ||
114 | |||
76 | /* | 115 | /* |
77 | * LED Triggers | 116 | * LED Triggers |
78 | */ | 117 | */ |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 15b77b8dc7e1..d947b1231662 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -986,7 +986,7 @@ extern void ata_host_init(struct ata_host *, struct device *, | |||
986 | unsigned long, struct ata_port_operations *); | 986 | unsigned long, struct ata_port_operations *); |
987 | extern int ata_scsi_detect(struct scsi_host_template *sht); | 987 | extern int ata_scsi_detect(struct scsi_host_template *sht); |
988 | extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); | 988 | extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); |
989 | extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); | 989 | extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
990 | extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, | 990 | extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, |
991 | int cmd, void __user *arg); | 991 | int cmd, void __user *arg); |
992 | extern void ata_sas_port_destroy(struct ata_port *); | 992 | extern void ata_sas_port_destroy(struct ata_port *); |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 072652d94d9f..d8fd2c23a1b9 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -1554,6 +1554,11 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev) | |||
1554 | 1554 | ||
1555 | static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) | 1555 | static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) |
1556 | { | 1556 | { |
1557 | if (WARN_ON(!dev_queue)) { | ||
1558 | printk(KERN_INFO "netif_stop_queue() cannot be called before " | ||
1559 | "register_netdev()"); | ||
1560 | return; | ||
1561 | } | ||
1557 | set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | 1562 | set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); |
1558 | } | 1563 | } |
1559 | 1564 | ||
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 89341c32631a..03317c8d4077 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h | |||
@@ -215,7 +215,7 @@ NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sk_buff *skb, | |||
215 | int ret; | 215 | int ret; |
216 | 216 | ||
217 | if (!cond || | 217 | if (!cond || |
218 | (ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN) == 1)) | 218 | ((ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN)) == 1)) |
219 | ret = okfn(skb); | 219 | ret = okfn(skb); |
220 | return ret; | 220 | return ret; |
221 | } | 221 | } |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 057bf22a8323..40150f345982 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -747,6 +747,16 @@ struct perf_event { | |||
747 | u64 tstamp_running; | 747 | u64 tstamp_running; |
748 | u64 tstamp_stopped; | 748 | u64 tstamp_stopped; |
749 | 749 | ||
750 | /* | ||
751 | * timestamp shadows the actual context timing but it can | ||
752 | * be safely used in NMI interrupt context. It reflects the | ||
753 | * context time as it was when the event was last scheduled in. | ||
754 | * | ||
755 | * ctx_time already accounts for ctx->timestamp. Therefore to | ||
756 | * compute ctx_time for a sample, simply add perf_clock(). | ||
757 | */ | ||
758 | u64 shadow_ctx_time; | ||
759 | |||
750 | struct perf_event_attr attr; | 760 | struct perf_event_attr attr; |
751 | struct hw_perf_event hw; | 761 | struct hw_perf_event hw; |
752 | 762 | ||
diff --git a/include/linux/printk.h b/include/linux/printk.h new file mode 100644 index 000000000000..b772ca5fbdf0 --- /dev/null +++ b/include/linux/printk.h | |||
@@ -0,0 +1,248 @@ | |||
1 | #ifndef __KERNEL_PRINTK__ | ||
2 | #define __KERNEL_PRINTK__ | ||
3 | |||
4 | extern const char linux_banner[]; | ||
5 | extern const char linux_proc_banner[]; | ||
6 | |||
7 | #define KERN_EMERG "<0>" /* system is unusable */ | ||
8 | #define KERN_ALERT "<1>" /* action must be taken immediately */ | ||
9 | #define KERN_CRIT "<2>" /* critical conditions */ | ||
10 | #define KERN_ERR "<3>" /* error conditions */ | ||
11 | #define KERN_WARNING "<4>" /* warning conditions */ | ||
12 | #define KERN_NOTICE "<5>" /* normal but significant condition */ | ||
13 | #define KERN_INFO "<6>" /* informational */ | ||
14 | #define KERN_DEBUG "<7>" /* debug-level messages */ | ||
15 | |||
16 | /* Use the default kernel loglevel */ | ||
17 | #define KERN_DEFAULT "<d>" | ||
18 | /* | ||
19 | * Annotation for a "continued" line of log printout (only done after a | ||
20 | * line that had no enclosing \n). Only to be used by core/arch code | ||
21 | * during early bootup (a continued line is not SMP-safe otherwise). | ||
22 | */ | ||
23 | #define KERN_CONT "<c>" | ||
24 | |||
25 | extern int console_printk[]; | ||
26 | |||
27 | #define console_loglevel (console_printk[0]) | ||
28 | #define default_message_loglevel (console_printk[1]) | ||
29 | #define minimum_console_loglevel (console_printk[2]) | ||
30 | #define default_console_loglevel (console_printk[3]) | ||
31 | |||
32 | struct va_format { | ||
33 | const char *fmt; | ||
34 | va_list *va; | ||
35 | }; | ||
36 | |||
37 | /* | ||
38 | * FW_BUG | ||
39 | * Add this to a message where you are sure the firmware is buggy or behaves | ||
40 | * really stupid or out of spec. Be aware that the responsible BIOS developer | ||
41 | * should be able to fix this issue or at least get a concrete idea of the | ||
42 | * problem by reading your message without the need of looking at the kernel | ||
43 | * code. | ||
44 | * | ||
45 | * Use it for definite and high priority BIOS bugs. | ||
46 | * | ||
47 | * FW_WARN | ||
48 | * Use it for not that clear (e.g. could the kernel messed up things already?) | ||
49 | * and medium priority BIOS bugs. | ||
50 | * | ||
51 | * FW_INFO | ||
52 | * Use this one if you want to tell the user or vendor about something | ||
53 | * suspicious, but generally harmless related to the firmware. | ||
54 | * | ||
55 | * Use it for information or very low priority BIOS bugs. | ||
56 | */ | ||
57 | #define FW_BUG "[Firmware Bug]: " | ||
58 | #define FW_WARN "[Firmware Warn]: " | ||
59 | #define FW_INFO "[Firmware Info]: " | ||
60 | |||
61 | /* | ||
62 | * HW_ERR | ||
63 | * Add this to a message for hardware errors, so that user can report | ||
64 | * it to hardware vendor instead of LKML or software vendor. | ||
65 | */ | ||
66 | #define HW_ERR "[Hardware Error]: " | ||
67 | |||
68 | #ifdef CONFIG_PRINTK | ||
69 | asmlinkage int vprintk(const char *fmt, va_list args) | ||
70 | __attribute__ ((format (printf, 1, 0))); | ||
71 | asmlinkage int printk(const char * fmt, ...) | ||
72 | __attribute__ ((format (printf, 1, 2))) __cold; | ||
73 | |||
74 | /* | ||
75 | * Please don't use printk_ratelimit(), because it shares ratelimiting state | ||
76 | * with all other unrelated printk_ratelimit() callsites. Instead use | ||
77 | * printk_ratelimited() or plain old __ratelimit(). | ||
78 | */ | ||
79 | extern int __printk_ratelimit(const char *func); | ||
80 | #define printk_ratelimit() __printk_ratelimit(__func__) | ||
81 | extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, | ||
82 | unsigned int interval_msec); | ||
83 | |||
84 | extern int printk_delay_msec; | ||
85 | extern int dmesg_restrict; | ||
86 | |||
87 | /* | ||
88 | * Print a one-time message (analogous to WARN_ONCE() et al): | ||
89 | */ | ||
90 | #define printk_once(x...) ({ \ | ||
91 | static bool __print_once; \ | ||
92 | \ | ||
93 | if (!__print_once) { \ | ||
94 | __print_once = true; \ | ||
95 | printk(x); \ | ||
96 | } \ | ||
97 | }) | ||
98 | |||
99 | void log_buf_kexec_setup(void); | ||
100 | #else | ||
101 | static inline int vprintk(const char *s, va_list args) | ||
102 | __attribute__ ((format (printf, 1, 0))); | ||
103 | static inline int vprintk(const char *s, va_list args) { return 0; } | ||
104 | static inline int printk(const char *s, ...) | ||
105 | __attribute__ ((format (printf, 1, 2))); | ||
106 | static inline int __cold printk(const char *s, ...) { return 0; } | ||
107 | static inline int printk_ratelimit(void) { return 0; } | ||
108 | static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \ | ||
109 | unsigned int interval_msec) \ | ||
110 | { return false; } | ||
111 | |||
112 | /* No effect, but we still get type checking even in the !PRINTK case: */ | ||
113 | #define printk_once(x...) printk(x) | ||
114 | |||
115 | static inline void log_buf_kexec_setup(void) | ||
116 | { | ||
117 | } | ||
118 | #endif | ||
119 | |||
120 | /* | ||
121 | * Dummy printk for disabled debugging statements to use whilst maintaining | ||
122 | * gcc's format and side-effect checking. | ||
123 | */ | ||
124 | static inline __attribute__ ((format (printf, 1, 2))) | ||
125 | int no_printk(const char *s, ...) { return 0; } | ||
126 | |||
127 | extern int printk_needs_cpu(int cpu); | ||
128 | extern void printk_tick(void); | ||
129 | |||
130 | extern void asmlinkage __attribute__((format(printf, 1, 2))) | ||
131 | early_printk(const char *fmt, ...); | ||
132 | |||
133 | static inline void console_silent(void) | ||
134 | { | ||
135 | console_loglevel = 0; | ||
136 | } | ||
137 | |||
138 | static inline void console_verbose(void) | ||
139 | { | ||
140 | if (console_loglevel) | ||
141 | console_loglevel = 15; | ||
142 | } | ||
143 | |||
144 | extern void dump_stack(void) __cold; | ||
145 | |||
146 | enum { | ||
147 | DUMP_PREFIX_NONE, | ||
148 | DUMP_PREFIX_ADDRESS, | ||
149 | DUMP_PREFIX_OFFSET | ||
150 | }; | ||
151 | extern void hex_dump_to_buffer(const void *buf, size_t len, | ||
152 | int rowsize, int groupsize, | ||
153 | char *linebuf, size_t linebuflen, bool ascii); | ||
154 | extern void print_hex_dump(const char *level, const char *prefix_str, | ||
155 | int prefix_type, int rowsize, int groupsize, | ||
156 | const void *buf, size_t len, bool ascii); | ||
157 | extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type, | ||
158 | const void *buf, size_t len); | ||
159 | |||
160 | #ifndef pr_fmt | ||
161 | #define pr_fmt(fmt) fmt | ||
162 | #endif | ||
163 | |||
164 | #define pr_emerg(fmt, ...) \ | ||
165 | printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) | ||
166 | #define pr_alert(fmt, ...) \ | ||
167 | printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) | ||
168 | #define pr_crit(fmt, ...) \ | ||
169 | printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) | ||
170 | #define pr_err(fmt, ...) \ | ||
171 | printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) | ||
172 | #define pr_warning(fmt, ...) \ | ||
173 | printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) | ||
174 | #define pr_warn pr_warning | ||
175 | #define pr_notice(fmt, ...) \ | ||
176 | printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) | ||
177 | #define pr_info(fmt, ...) \ | ||
178 | printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) | ||
179 | #define pr_cont(fmt, ...) \ | ||
180 | printk(KERN_CONT fmt, ##__VA_ARGS__) | ||
181 | |||
182 | /* pr_devel() should produce zero code unless DEBUG is defined */ | ||
183 | #ifdef DEBUG | ||
184 | #define pr_devel(fmt, ...) \ | ||
185 | printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) | ||
186 | #else | ||
187 | #define pr_devel(fmt, ...) \ | ||
188 | ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) | ||
189 | #endif | ||
190 | |||
191 | /* If you are writing a driver, please use dev_dbg instead */ | ||
192 | #if defined(DEBUG) | ||
193 | #define pr_debug(fmt, ...) \ | ||
194 | printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) | ||
195 | #elif defined(CONFIG_DYNAMIC_DEBUG) | ||
196 | /* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ | ||
197 | #define pr_debug(fmt, ...) \ | ||
198 | dynamic_pr_debug(fmt, ##__VA_ARGS__) | ||
199 | #else | ||
200 | #define pr_debug(fmt, ...) \ | ||
201 | ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) | ||
202 | #endif | ||
203 | |||
204 | /* | ||
205 | * ratelimited messages with local ratelimit_state, | ||
206 | * no local ratelimit_state used in the !PRINTK case | ||
207 | */ | ||
208 | #ifdef CONFIG_PRINTK | ||
209 | #define printk_ratelimited(fmt, ...) ({ \ | ||
210 | static DEFINE_RATELIMIT_STATE(_rs, \ | ||
211 | DEFAULT_RATELIMIT_INTERVAL, \ | ||
212 | DEFAULT_RATELIMIT_BURST); \ | ||
213 | \ | ||
214 | if (__ratelimit(&_rs)) \ | ||
215 | printk(fmt, ##__VA_ARGS__); \ | ||
216 | }) | ||
217 | #else | ||
218 | /* No effect, but we still get type checking even in the !PRINTK case: */ | ||
219 | #define printk_ratelimited printk | ||
220 | #endif | ||
221 | |||
222 | #define pr_emerg_ratelimited(fmt, ...) \ | ||
223 | printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) | ||
224 | #define pr_alert_ratelimited(fmt, ...) \ | ||
225 | printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) | ||
226 | #define pr_crit_ratelimited(fmt, ...) \ | ||
227 | printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) | ||
228 | #define pr_err_ratelimited(fmt, ...) \ | ||
229 | printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) | ||
230 | #define pr_warning_ratelimited(fmt, ...) \ | ||
231 | printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) | ||
232 | #define pr_warn_ratelimited pr_warning_ratelimited | ||
233 | #define pr_notice_ratelimited(fmt, ...) \ | ||
234 | printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) | ||
235 | #define pr_info_ratelimited(fmt, ...) \ | ||
236 | printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) | ||
237 | /* no pr_cont_ratelimited, don't do that... */ | ||
238 | /* If you are writing a driver, please use dev_dbg instead */ | ||
239 | #if defined(DEBUG) | ||
240 | #define pr_debug_ratelimited(fmt, ...) \ | ||
241 | printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) | ||
242 | #else | ||
243 | #define pr_debug_ratelimited(fmt, ...) \ | ||
244 | ({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \ | ||
245 | ##__VA_ARGS__); 0; }) | ||
246 | #endif | ||
247 | |||
248 | #endif | ||
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h index 01b3d759f1fc..e031e1a486d9 100644 --- a/include/linux/pwm_backlight.h +++ b/include/linux/pwm_backlight.h | |||
@@ -8,6 +8,7 @@ struct platform_pwm_backlight_data { | |||
8 | int pwm_id; | 8 | int pwm_id; |
9 | unsigned int max_brightness; | 9 | unsigned int max_brightness; |
10 | unsigned int dft_brightness; | 10 | unsigned int dft_brightness; |
11 | unsigned int lth_brightness; | ||
11 | unsigned int pwm_period_ns; | 12 | unsigned int pwm_period_ns; |
12 | int (*init)(struct device *dev); | 13 | int (*init)(struct device *dev); |
13 | int (*notify)(struct device *dev, int brightness); | 14 | int (*notify)(struct device *dev, int brightness); |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index a39cbed9ee17..ab2baa5c4884 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
@@ -34,19 +34,13 @@ | |||
34 | * needed for RCU lookups (because root->height is unreliable). The only | 34 | * needed for RCU lookups (because root->height is unreliable). The only |
35 | * time callers need worry about this is when doing a lookup_slot under | 35 | * time callers need worry about this is when doing a lookup_slot under |
36 | * RCU. | 36 | * RCU. |
37 | * | ||
38 | * Indirect pointer in fact is also used to tag the last pointer of a node | ||
39 | * when it is shrunk, before we rcu free the node. See shrink code for | ||
40 | * details. | ||
37 | */ | 41 | */ |
38 | #define RADIX_TREE_INDIRECT_PTR 1 | 42 | #define RADIX_TREE_INDIRECT_PTR 1 |
39 | #define RADIX_TREE_RETRY ((void *)-1UL) | ||
40 | |||
41 | static inline void *radix_tree_ptr_to_indirect(void *ptr) | ||
42 | { | ||
43 | return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR); | ||
44 | } | ||
45 | 43 | ||
46 | static inline void *radix_tree_indirect_to_ptr(void *ptr) | ||
47 | { | ||
48 | return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR); | ||
49 | } | ||
50 | #define radix_tree_indirect_to_ptr(ptr) \ | 44 | #define radix_tree_indirect_to_ptr(ptr) \ |
51 | radix_tree_indirect_to_ptr((void __force *)(ptr)) | 45 | radix_tree_indirect_to_ptr((void __force *)(ptr)) |
52 | 46 | ||
@@ -140,16 +134,29 @@ do { \ | |||
140 | * removed. | 134 | * removed. |
141 | * | 135 | * |
142 | * For use with radix_tree_lookup_slot(). Caller must hold tree at least read | 136 | * For use with radix_tree_lookup_slot(). Caller must hold tree at least read |
143 | * locked across slot lookup and dereference. More likely, will be used with | 137 | * locked across slot lookup and dereference. Not required if write lock is |
144 | * radix_tree_replace_slot(), as well, so caller will hold tree write locked. | 138 | * held (ie. items cannot be concurrently inserted). |
139 | * | ||
140 | * radix_tree_deref_retry must be used to confirm validity of the pointer if | ||
141 | * only the read lock is held. | ||
145 | */ | 142 | */ |
146 | static inline void *radix_tree_deref_slot(void **pslot) | 143 | static inline void *radix_tree_deref_slot(void **pslot) |
147 | { | 144 | { |
148 | void *ret = rcu_dereference(*pslot); | 145 | return rcu_dereference(*pslot); |
149 | if (unlikely(radix_tree_is_indirect_ptr(ret))) | ||
150 | ret = RADIX_TREE_RETRY; | ||
151 | return ret; | ||
152 | } | 146 | } |
147 | |||
148 | /** | ||
149 | * radix_tree_deref_retry - check radix_tree_deref_slot | ||
150 | * @arg: pointer returned by radix_tree_deref_slot | ||
151 | * Returns: 0 if retry is not required, otherwise retry is required | ||
152 | * | ||
153 | * radix_tree_deref_retry must be used with radix_tree_deref_slot. | ||
154 | */ | ||
155 | static inline int radix_tree_deref_retry(void *arg) | ||
156 | { | ||
157 | return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR); | ||
158 | } | ||
159 | |||
153 | /** | 160 | /** |
154 | * radix_tree_replace_slot - replace item in a slot | 161 | * radix_tree_replace_slot - replace item in a slot |
155 | * @pslot: pointer to slot, returned by radix_tree_lookup_slot | 162 | * @pslot: pointer to slot, returned by radix_tree_lookup_slot |
diff --git a/include/linux/resource.h b/include/linux/resource.h index 88d36f9145ba..d01c96c1966e 100644 --- a/include/linux/resource.h +++ b/include/linux/resource.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _LINUX_RESOURCE_H | 2 | #define _LINUX_RESOURCE_H |
3 | 3 | ||
4 | #include <linux/time.h> | 4 | #include <linux/time.h> |
5 | #include <linux/types.h> | ||
5 | 6 | ||
6 | /* | 7 | /* |
7 | * Resource control/accounting header file for linux | 8 | * Resource control/accounting header file for linux |
diff --git a/include/linux/security.h b/include/linux/security.h index b8246a8df7d2..fd4d55fb8845 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -77,7 +77,6 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, | |||
77 | extern int cap_task_setscheduler(struct task_struct *p); | 77 | extern int cap_task_setscheduler(struct task_struct *p); |
78 | extern int cap_task_setioprio(struct task_struct *p, int ioprio); | 78 | extern int cap_task_setioprio(struct task_struct *p, int ioprio); |
79 | extern int cap_task_setnice(struct task_struct *p, int nice); | 79 | extern int cap_task_setnice(struct task_struct *p, int nice); |
80 | extern int cap_syslog(int type, bool from_file); | ||
81 | extern int cap_vm_enough_memory(struct mm_struct *mm, long pages); | 80 | extern int cap_vm_enough_memory(struct mm_struct *mm, long pages); |
82 | 81 | ||
83 | struct msghdr; | 82 | struct msghdr; |
@@ -1388,7 +1387,7 @@ struct security_operations { | |||
1388 | int (*sysctl) (struct ctl_table *table, int op); | 1387 | int (*sysctl) (struct ctl_table *table, int op); |
1389 | int (*quotactl) (int cmds, int type, int id, struct super_block *sb); | 1388 | int (*quotactl) (int cmds, int type, int id, struct super_block *sb); |
1390 | int (*quota_on) (struct dentry *dentry); | 1389 | int (*quota_on) (struct dentry *dentry); |
1391 | int (*syslog) (int type, bool from_file); | 1390 | int (*syslog) (int type); |
1392 | int (*settime) (struct timespec *ts, struct timezone *tz); | 1391 | int (*settime) (struct timespec *ts, struct timezone *tz); |
1393 | int (*vm_enough_memory) (struct mm_struct *mm, long pages); | 1392 | int (*vm_enough_memory) (struct mm_struct *mm, long pages); |
1394 | 1393 | ||
@@ -1671,7 +1670,7 @@ int security_real_capable_noaudit(struct task_struct *tsk, int cap); | |||
1671 | int security_sysctl(struct ctl_table *table, int op); | 1670 | int security_sysctl(struct ctl_table *table, int op); |
1672 | int security_quotactl(int cmds, int type, int id, struct super_block *sb); | 1671 | int security_quotactl(int cmds, int type, int id, struct super_block *sb); |
1673 | int security_quota_on(struct dentry *dentry); | 1672 | int security_quota_on(struct dentry *dentry); |
1674 | int security_syslog(int type, bool from_file); | 1673 | int security_syslog(int type); |
1675 | int security_settime(struct timespec *ts, struct timezone *tz); | 1674 | int security_settime(struct timespec *ts, struct timezone *tz); |
1676 | int security_vm_enough_memory(long pages); | 1675 | int security_vm_enough_memory(long pages); |
1677 | int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); | 1676 | int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); |
@@ -1901,9 +1900,9 @@ static inline int security_quota_on(struct dentry *dentry) | |||
1901 | return 0; | 1900 | return 0; |
1902 | } | 1901 | } |
1903 | 1902 | ||
1904 | static inline int security_syslog(int type, bool from_file) | 1903 | static inline int security_syslog(int type) |
1905 | { | 1904 | { |
1906 | return cap_syslog(type, from_file); | 1905 | return 0; |
1907 | } | 1906 | } |
1908 | 1907 | ||
1909 | static inline int security_settime(struct timespec *ts, struct timezone *tz) | 1908 | static inline int security_settime(struct timespec *ts, struct timezone *tz) |
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h index f656d1a43dc0..5812fefbcedf 100644 --- a/include/linux/sh_intc.h +++ b/include/linux/sh_intc.h | |||
@@ -79,7 +79,7 @@ struct intc_hw_desc { | |||
79 | unsigned int nr_subgroups; | 79 | unsigned int nr_subgroups; |
80 | }; | 80 | }; |
81 | 81 | ||
82 | #define _INTC_ARRAY(a) a, a == NULL ? 0 : sizeof(a)/sizeof(*a) | 82 | #define _INTC_ARRAY(a) a, __same_type(a, NULL) ? 0 : sizeof(a)/sizeof(*a) |
83 | 83 | ||
84 | #define INTC_HW_DESC(vectors, groups, mask_regs, \ | 84 | #define INTC_HW_DESC(vectors, groups, mask_regs, \ |
85 | prio_regs, sense_regs, ack_regs) \ | 85 | prio_regs, sense_regs, ack_regs) \ |
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index bbdb680ffbe9..aea0d438e3c7 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h | |||
@@ -82,18 +82,28 @@ struct svc_xprt { | |||
82 | struct net *xpt_net; | 82 | struct net *xpt_net; |
83 | }; | 83 | }; |
84 | 84 | ||
85 | static inline void register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) | 85 | static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) |
86 | { | 86 | { |
87 | spin_lock(&xpt->xpt_lock); | 87 | spin_lock(&xpt->xpt_lock); |
88 | list_add(&u->list, &xpt->xpt_users); | 88 | list_del_init(&u->list); |
89 | spin_unlock(&xpt->xpt_lock); | 89 | spin_unlock(&xpt->xpt_lock); |
90 | } | 90 | } |
91 | 91 | ||
92 | static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) | 92 | static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) |
93 | { | 93 | { |
94 | spin_lock(&xpt->xpt_lock); | 94 | spin_lock(&xpt->xpt_lock); |
95 | list_del_init(&u->list); | 95 | if (test_bit(XPT_CLOSE, &xpt->xpt_flags)) { |
96 | /* | ||
97 | * The connection is about to be deleted soon (or, | ||
98 | * worse, may already be deleted--in which case we've | ||
99 | * already notified the xpt_users). | ||
100 | */ | ||
101 | spin_unlock(&xpt->xpt_lock); | ||
102 | return -ENOTCONN; | ||
103 | } | ||
104 | list_add(&u->list, &xpt->xpt_users); | ||
96 | spin_unlock(&xpt->xpt_lock); | 105 | spin_unlock(&xpt->xpt_lock); |
106 | return 0; | ||
97 | } | 107 | } |
98 | 108 | ||
99 | int svc_reg_xprt_class(struct svc_xprt_class *); | 109 | int svc_reg_xprt_class(struct svc_xprt_class *); |
diff --git a/include/linux/tty.h b/include/linux/tty.h index 2a754748dd5f..c7ea9bc8897c 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -50,7 +50,7 @@ | |||
50 | #define N_V253 19 /* Codec control over voice modem */ | 50 | #define N_V253 19 /* Codec control over voice modem */ |
51 | #define N_CAIF 20 /* CAIF protocol for talking to modems */ | 51 | #define N_CAIF 20 /* CAIF protocol for talking to modems */ |
52 | #define N_GSM0710 21 /* GSM 0710 Mux */ | 52 | #define N_GSM0710 21 /* GSM 0710 Mux */ |
53 | #define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */ | 53 | #define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */ |
54 | 54 | ||
55 | /* | 55 | /* |
56 | * This character is the same as _POSIX_VDISABLE: it cannot be used as | 56 | * This character is the same as _POSIX_VDISABLE: it cannot be used as |
diff --git a/include/linux/usb.h b/include/linux/usb.h index 35fe6ab222bb..24300d8a1bc1 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h | |||
@@ -797,7 +797,7 @@ struct usbdrv_wrap { | |||
797 | * @disconnect: Called when the interface is no longer accessible, usually | 797 | * @disconnect: Called when the interface is no longer accessible, usually |
798 | * because its device has been (or is being) disconnected or the | 798 | * because its device has been (or is being) disconnected or the |
799 | * driver module is being unloaded. | 799 | * driver module is being unloaded. |
800 | * @ioctl: Used for drivers that want to talk to userspace through | 800 | * @unlocked_ioctl: Used for drivers that want to talk to userspace through |
801 | * the "usbfs" filesystem. This lets devices provide ways to | 801 | * the "usbfs" filesystem. This lets devices provide ways to |
802 | * expose information to user space regardless of where they | 802 | * expose information to user space regardless of where they |
803 | * do (or don't) show up otherwise in the filesystem. | 803 | * do (or don't) show up otherwise in the filesystem. |
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h index ee2dd1d506ed..2387f9fc8138 100644 --- a/include/linux/usb/musb.h +++ b/include/linux/usb/musb.h | |||
@@ -89,6 +89,8 @@ struct musb_hdrc_config { | |||
89 | /* A GPIO controlling VRSEL in Blackfin */ | 89 | /* A GPIO controlling VRSEL in Blackfin */ |
90 | unsigned int gpio_vrsel; | 90 | unsigned int gpio_vrsel; |
91 | unsigned int gpio_vrsel_active; | 91 | unsigned int gpio_vrsel_active; |
92 | /* musb CLKIN in Blackfin in MHZ */ | ||
93 | unsigned char clkin; | ||
92 | #endif | 94 | #endif |
93 | 95 | ||
94 | }; | 96 | }; |
diff --git a/include/net/dn.h b/include/net/dn.h index e5469f7b67a3..a514a3cf4573 100644 --- a/include/net/dn.h +++ b/include/net/dn.h | |||
@@ -225,7 +225,7 @@ extern int decnet_di_count; | |||
225 | extern int decnet_dr_count; | 225 | extern int decnet_dr_count; |
226 | extern int decnet_no_fc_max_cwnd; | 226 | extern int decnet_no_fc_max_cwnd; |
227 | 227 | ||
228 | extern int sysctl_decnet_mem[3]; | 228 | extern long sysctl_decnet_mem[3]; |
229 | extern int sysctl_decnet_wmem[3]; | 229 | extern int sysctl_decnet_wmem[3]; |
230 | extern int sysctl_decnet_rmem[3]; | 230 | extern int sysctl_decnet_rmem[3]; |
231 | 231 | ||
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index 1fa5306e3e23..51665b3461b8 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _NET_DST_OPS_H | 2 | #define _NET_DST_OPS_H |
3 | #include <linux/types.h> | 3 | #include <linux/types.h> |
4 | #include <linux/percpu_counter.h> | 4 | #include <linux/percpu_counter.h> |
5 | #include <linux/cache.h> | ||
5 | 6 | ||
6 | struct dst_entry; | 7 | struct dst_entry; |
7 | struct kmem_cachep; | 8 | struct kmem_cachep; |
diff --git a/include/net/sock.h b/include/net/sock.h index c7a736228ca2..a6338d039857 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -762,7 +762,7 @@ struct proto { | |||
762 | 762 | ||
763 | /* Memory pressure */ | 763 | /* Memory pressure */ |
764 | void (*enter_memory_pressure)(struct sock *sk); | 764 | void (*enter_memory_pressure)(struct sock *sk); |
765 | atomic_t *memory_allocated; /* Current allocated memory. */ | 765 | atomic_long_t *memory_allocated; /* Current allocated memory. */ |
766 | struct percpu_counter *sockets_allocated; /* Current number of sockets. */ | 766 | struct percpu_counter *sockets_allocated; /* Current number of sockets. */ |
767 | /* | 767 | /* |
768 | * Pressure flag: try to collapse. | 768 | * Pressure flag: try to collapse. |
@@ -771,7 +771,7 @@ struct proto { | |||
771 | * is strict, actions are advisory and have some latency. | 771 | * is strict, actions are advisory and have some latency. |
772 | */ | 772 | */ |
773 | int *memory_pressure; | 773 | int *memory_pressure; |
774 | int *sysctl_mem; | 774 | long *sysctl_mem; |
775 | int *sysctl_wmem; | 775 | int *sysctl_wmem; |
776 | int *sysctl_rmem; | 776 | int *sysctl_rmem; |
777 | int max_header; | 777 | int max_header; |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 4fee0424af7e..e36c874c7fb1 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -224,7 +224,7 @@ extern int sysctl_tcp_fack; | |||
224 | extern int sysctl_tcp_reordering; | 224 | extern int sysctl_tcp_reordering; |
225 | extern int sysctl_tcp_ecn; | 225 | extern int sysctl_tcp_ecn; |
226 | extern int sysctl_tcp_dsack; | 226 | extern int sysctl_tcp_dsack; |
227 | extern int sysctl_tcp_mem[3]; | 227 | extern long sysctl_tcp_mem[3]; |
228 | extern int sysctl_tcp_wmem[3]; | 228 | extern int sysctl_tcp_wmem[3]; |
229 | extern int sysctl_tcp_rmem[3]; | 229 | extern int sysctl_tcp_rmem[3]; |
230 | extern int sysctl_tcp_app_win; | 230 | extern int sysctl_tcp_app_win; |
@@ -247,7 +247,7 @@ extern int sysctl_tcp_cookie_size; | |||
247 | extern int sysctl_tcp_thin_linear_timeouts; | 247 | extern int sysctl_tcp_thin_linear_timeouts; |
248 | extern int sysctl_tcp_thin_dupack; | 248 | extern int sysctl_tcp_thin_dupack; |
249 | 249 | ||
250 | extern atomic_t tcp_memory_allocated; | 250 | extern atomic_long_t tcp_memory_allocated; |
251 | extern struct percpu_counter tcp_sockets_allocated; | 251 | extern struct percpu_counter tcp_sockets_allocated; |
252 | extern int tcp_memory_pressure; | 252 | extern int tcp_memory_pressure; |
253 | 253 | ||
@@ -280,7 +280,7 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift) | |||
280 | } | 280 | } |
281 | 281 | ||
282 | if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && | 282 | if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && |
283 | atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) | 283 | atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) |
284 | return true; | 284 | return true; |
285 | return false; | 285 | return false; |
286 | } | 286 | } |
diff --git a/include/net/udp.h b/include/net/udp.h index 200b82848c9a..bb967dd59bf7 100644 --- a/include/net/udp.h +++ b/include/net/udp.h | |||
@@ -105,10 +105,10 @@ static inline struct udp_hslot *udp_hashslot2(struct udp_table *table, | |||
105 | 105 | ||
106 | extern struct proto udp_prot; | 106 | extern struct proto udp_prot; |
107 | 107 | ||
108 | extern atomic_t udp_memory_allocated; | 108 | extern atomic_long_t udp_memory_allocated; |
109 | 109 | ||
110 | /* sysctl variables for udp */ | 110 | /* sysctl variables for udp */ |
111 | extern int sysctl_udp_mem[3]; | 111 | extern long sysctl_udp_mem[3]; |
112 | extern int sysctl_udp_rmem_min; | 112 | extern int sysctl_udp_rmem_min; |
113 | extern int sysctl_udp_wmem_min; | 113 | extern int sysctl_udp_wmem_min; |
114 | 114 | ||
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index f986ab7ffe6f..5c4c1678f7be 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h | |||
@@ -1006,8 +1006,7 @@ void fc_fcp_destroy(struct fc_lport *); | |||
1006 | /* | 1006 | /* |
1007 | * SCSI INTERACTION LAYER | 1007 | * SCSI INTERACTION LAYER |
1008 | *****************************/ | 1008 | *****************************/ |
1009 | int fc_queuecommand(struct scsi_cmnd *, | 1009 | int fc_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); |
1010 | void (*done)(struct scsi_cmnd *)); | ||
1011 | int fc_eh_abort(struct scsi_cmnd *); | 1010 | int fc_eh_abort(struct scsi_cmnd *); |
1012 | int fc_eh_device_reset(struct scsi_cmnd *); | 1011 | int fc_eh_device_reset(struct scsi_cmnd *); |
1013 | int fc_eh_host_reset(struct scsi_cmnd *); | 1012 | int fc_eh_host_reset(struct scsi_cmnd *); |
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index ae5196aae1a5..b81d969ddc67 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h | |||
@@ -341,8 +341,7 @@ extern int iscsi_eh_abort(struct scsi_cmnd *sc); | |||
341 | extern int iscsi_eh_recover_target(struct scsi_cmnd *sc); | 341 | extern int iscsi_eh_recover_target(struct scsi_cmnd *sc); |
342 | extern int iscsi_eh_session_reset(struct scsi_cmnd *sc); | 342 | extern int iscsi_eh_session_reset(struct scsi_cmnd *sc); |
343 | extern int iscsi_eh_device_reset(struct scsi_cmnd *sc); | 343 | extern int iscsi_eh_device_reset(struct scsi_cmnd *sc); |
344 | extern int iscsi_queuecommand(struct scsi_cmnd *sc, | 344 | extern int iscsi_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *sc); |
345 | void (*done)(struct scsi_cmnd *)); | ||
346 | 345 | ||
347 | /* | 346 | /* |
348 | * iSCSI host helpers. | 347 | * iSCSI host helpers. |
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 3dec1949f69c..90ce527ecf3d 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h | |||
@@ -621,8 +621,7 @@ int sas_set_phy_speed(struct sas_phy *phy, | |||
621 | int sas_phy_enable(struct sas_phy *phy, int enabled); | 621 | int sas_phy_enable(struct sas_phy *phy, int enabled); |
622 | int sas_phy_reset(struct sas_phy *phy, int hard_reset); | 622 | int sas_phy_reset(struct sas_phy *phy, int hard_reset); |
623 | int sas_queue_up(struct sas_task *task); | 623 | int sas_queue_up(struct sas_task *task); |
624 | extern int sas_queuecommand(struct scsi_cmnd *, | 624 | extern int sas_queuecommand(struct Scsi_Host * ,struct scsi_cmnd *); |
625 | void (*scsi_done)(struct scsi_cmnd *)); | ||
626 | extern int sas_target_alloc(struct scsi_target *); | 625 | extern int sas_target_alloc(struct scsi_target *); |
627 | extern int sas_slave_alloc(struct scsi_device *); | 626 | extern int sas_slave_alloc(struct scsi_device *); |
628 | extern int sas_slave_configure(struct scsi_device *); | 627 | extern int sas_slave_configure(struct scsi_device *); |
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index d0a6a845f204..e7e385842a38 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h | |||
@@ -127,8 +127,7 @@ struct scsi_host_template { | |||
127 | * | 127 | * |
128 | * STATUS: REQUIRED | 128 | * STATUS: REQUIRED |
129 | */ | 129 | */ |
130 | int (* queuecommand)(struct scsi_cmnd *, | 130 | int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); |
131 | void (*done)(struct scsi_cmnd *)); | ||
132 | 131 | ||
133 | /* | 132 | /* |
134 | * The transfer functions are used to queue a scsi command to | 133 | * The transfer functions are used to queue a scsi command to |
@@ -505,6 +504,25 @@ struct scsi_host_template { | |||
505 | }; | 504 | }; |
506 | 505 | ||
507 | /* | 506 | /* |
507 | * Temporary #define for host lock push down. Can be removed when all | ||
508 | * drivers have been updated to take advantage of unlocked | ||
509 | * queuecommand. | ||
510 | * | ||
511 | */ | ||
512 | #define DEF_SCSI_QCMD(func_name) \ | ||
513 | int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd) \ | ||
514 | { \ | ||
515 | unsigned long irq_flags; \ | ||
516 | int rc; \ | ||
517 | spin_lock_irqsave(shost->host_lock, irq_flags); \ | ||
518 | scsi_cmd_get_serial(shost, cmd); \ | ||
519 | rc = func_name##_lck (cmd, cmd->scsi_done); \ | ||
520 | spin_unlock_irqrestore(shost->host_lock, irq_flags); \ | ||
521 | return rc; \ | ||
522 | } | ||
523 | |||
524 | |||
525 | /* | ||
508 | * shost state: If you alter this, you also need to alter scsi_sysfs.c | 526 | * shost state: If you alter this, you also need to alter scsi_sysfs.c |
509 | * (for the ascii descriptions) and the state model enforcer: | 527 | * (for the ascii descriptions) and the state model enforcer: |
510 | * scsi_host_set_state() | 528 | * scsi_host_set_state() |
@@ -752,6 +770,7 @@ extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *); | |||
752 | extern void scsi_host_put(struct Scsi_Host *t); | 770 | extern void scsi_host_put(struct Scsi_Host *t); |
753 | extern struct Scsi_Host *scsi_host_lookup(unsigned short); | 771 | extern struct Scsi_Host *scsi_host_lookup(unsigned short); |
754 | extern const char *scsi_host_state_name(enum scsi_host_state); | 772 | extern const char *scsi_host_state_name(enum scsi_host_state); |
773 | extern void scsi_cmd_get_serial(struct Scsi_Host *, struct scsi_cmnd *); | ||
755 | 774 | ||
756 | extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *); | 775 | extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *); |
757 | 776 | ||
diff --git a/kernel/futex.c b/kernel/futex.c index 6c683b37f2ce..40a8777a27d0 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -2489,7 +2489,8 @@ void exit_robust_list(struct task_struct *curr) | |||
2489 | { | 2489 | { |
2490 | struct robust_list_head __user *head = curr->robust_list; | 2490 | struct robust_list_head __user *head = curr->robust_list; |
2491 | struct robust_list __user *entry, *next_entry, *pending; | 2491 | struct robust_list __user *entry, *next_entry, *pending; |
2492 | unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; | 2492 | unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; |
2493 | unsigned int uninitialized_var(next_pi); | ||
2493 | unsigned long futex_offset; | 2494 | unsigned long futex_offset; |
2494 | int rc; | 2495 | int rc; |
2495 | 2496 | ||
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index 06da4dfc339b..a7934ac75e5b 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
@@ -49,7 +49,8 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
49 | { | 49 | { |
50 | struct compat_robust_list_head __user *head = curr->compat_robust_list; | 50 | struct compat_robust_list_head __user *head = curr->compat_robust_list; |
51 | struct robust_list __user *entry, *next_entry, *pending; | 51 | struct robust_list __user *entry, *next_entry, *pending; |
52 | unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; | 52 | unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; |
53 | unsigned int uninitialized_var(next_pi); | ||
53 | compat_uptr_t uentry, next_uentry, upending; | 54 | compat_uptr_t uentry, next_uentry, upending; |
54 | compat_long_t futex_offset; | 55 | compat_long_t futex_offset; |
55 | int rc; | 56 | int rc; |
diff --git a/kernel/latencytop.c b/kernel/latencytop.c index 877fb306d415..17110a4a4fc2 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c | |||
@@ -194,14 +194,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) | |||
194 | 194 | ||
195 | account_global_scheduler_latency(tsk, &lat); | 195 | account_global_scheduler_latency(tsk, &lat); |
196 | 196 | ||
197 | /* | 197 | for (i = 0; i < tsk->latency_record_count; i++) { |
198 | * short term hack; if we're > 32 we stop; future we recycle: | ||
199 | */ | ||
200 | tsk->latency_record_count++; | ||
201 | if (tsk->latency_record_count >= LT_SAVECOUNT) | ||
202 | goto out_unlock; | ||
203 | |||
204 | for (i = 0; i < LT_SAVECOUNT; i++) { | ||
205 | struct latency_record *mylat; | 198 | struct latency_record *mylat; |
206 | int same = 1; | 199 | int same = 1; |
207 | 200 | ||
@@ -227,8 +220,14 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) | |||
227 | } | 220 | } |
228 | } | 221 | } |
229 | 222 | ||
223 | /* | ||
224 | * short term hack; if we're > 32 we stop; future we recycle: | ||
225 | */ | ||
226 | if (tsk->latency_record_count >= LT_SAVECOUNT) | ||
227 | goto out_unlock; | ||
228 | |||
230 | /* Allocated a new one: */ | 229 | /* Allocated a new one: */ |
231 | i = tsk->latency_record_count; | 230 | i = tsk->latency_record_count++; |
232 | memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); | 231 | memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); |
233 | 232 | ||
234 | out_unlock: | 233 | out_unlock: |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 517d827f4982..cb6c0d2af68f 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -674,6 +674,8 @@ event_sched_in(struct perf_event *event, | |||
674 | 674 | ||
675 | event->tstamp_running += ctx->time - event->tstamp_stopped; | 675 | event->tstamp_running += ctx->time - event->tstamp_stopped; |
676 | 676 | ||
677 | event->shadow_ctx_time = ctx->time - ctx->timestamp; | ||
678 | |||
677 | if (!is_software_event(event)) | 679 | if (!is_software_event(event)) |
678 | cpuctx->active_oncpu++; | 680 | cpuctx->active_oncpu++; |
679 | ctx->nr_active++; | 681 | ctx->nr_active++; |
@@ -3396,7 +3398,8 @@ static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) | |||
3396 | } | 3398 | } |
3397 | 3399 | ||
3398 | static void perf_output_read_one(struct perf_output_handle *handle, | 3400 | static void perf_output_read_one(struct perf_output_handle *handle, |
3399 | struct perf_event *event) | 3401 | struct perf_event *event, |
3402 | u64 enabled, u64 running) | ||
3400 | { | 3403 | { |
3401 | u64 read_format = event->attr.read_format; | 3404 | u64 read_format = event->attr.read_format; |
3402 | u64 values[4]; | 3405 | u64 values[4]; |
@@ -3404,11 +3407,11 @@ static void perf_output_read_one(struct perf_output_handle *handle, | |||
3404 | 3407 | ||
3405 | values[n++] = perf_event_count(event); | 3408 | values[n++] = perf_event_count(event); |
3406 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | 3409 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { |
3407 | values[n++] = event->total_time_enabled + | 3410 | values[n++] = enabled + |
3408 | atomic64_read(&event->child_total_time_enabled); | 3411 | atomic64_read(&event->child_total_time_enabled); |
3409 | } | 3412 | } |
3410 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | 3413 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { |
3411 | values[n++] = event->total_time_running + | 3414 | values[n++] = running + |
3412 | atomic64_read(&event->child_total_time_running); | 3415 | atomic64_read(&event->child_total_time_running); |
3413 | } | 3416 | } |
3414 | if (read_format & PERF_FORMAT_ID) | 3417 | if (read_format & PERF_FORMAT_ID) |
@@ -3421,7 +3424,8 @@ static void perf_output_read_one(struct perf_output_handle *handle, | |||
3421 | * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. | 3424 | * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. |
3422 | */ | 3425 | */ |
3423 | static void perf_output_read_group(struct perf_output_handle *handle, | 3426 | static void perf_output_read_group(struct perf_output_handle *handle, |
3424 | struct perf_event *event) | 3427 | struct perf_event *event, |
3428 | u64 enabled, u64 running) | ||
3425 | { | 3429 | { |
3426 | struct perf_event *leader = event->group_leader, *sub; | 3430 | struct perf_event *leader = event->group_leader, *sub; |
3427 | u64 read_format = event->attr.read_format; | 3431 | u64 read_format = event->attr.read_format; |
@@ -3431,10 +3435,10 @@ static void perf_output_read_group(struct perf_output_handle *handle, | |||
3431 | values[n++] = 1 + leader->nr_siblings; | 3435 | values[n++] = 1 + leader->nr_siblings; |
3432 | 3436 | ||
3433 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | 3437 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
3434 | values[n++] = leader->total_time_enabled; | 3438 | values[n++] = enabled; |
3435 | 3439 | ||
3436 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | 3440 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) |
3437 | values[n++] = leader->total_time_running; | 3441 | values[n++] = running; |
3438 | 3442 | ||
3439 | if (leader != event) | 3443 | if (leader != event) |
3440 | leader->pmu->read(leader); | 3444 | leader->pmu->read(leader); |
@@ -3459,13 +3463,35 @@ static void perf_output_read_group(struct perf_output_handle *handle, | |||
3459 | } | 3463 | } |
3460 | } | 3464 | } |
3461 | 3465 | ||
3466 | #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ | ||
3467 | PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
3468 | |||
3462 | static void perf_output_read(struct perf_output_handle *handle, | 3469 | static void perf_output_read(struct perf_output_handle *handle, |
3463 | struct perf_event *event) | 3470 | struct perf_event *event) |
3464 | { | 3471 | { |
3472 | u64 enabled = 0, running = 0, now, ctx_time; | ||
3473 | u64 read_format = event->attr.read_format; | ||
3474 | |||
3475 | /* | ||
3476 | * compute total_time_enabled, total_time_running | ||
3477 | * based on snapshot values taken when the event | ||
3478 | * was last scheduled in. | ||
3479 | * | ||
3480 | * we cannot simply called update_context_time() | ||
3481 | * because of locking issue as we are called in | ||
3482 | * NMI context | ||
3483 | */ | ||
3484 | if (read_format & PERF_FORMAT_TOTAL_TIMES) { | ||
3485 | now = perf_clock(); | ||
3486 | ctx_time = event->shadow_ctx_time + now; | ||
3487 | enabled = ctx_time - event->tstamp_enabled; | ||
3488 | running = ctx_time - event->tstamp_running; | ||
3489 | } | ||
3490 | |||
3465 | if (event->attr.read_format & PERF_FORMAT_GROUP) | 3491 | if (event->attr.read_format & PERF_FORMAT_GROUP) |
3466 | perf_output_read_group(handle, event); | 3492 | perf_output_read_group(handle, event, enabled, running); |
3467 | else | 3493 | else |
3468 | perf_output_read_one(handle, event); | 3494 | perf_output_read_one(handle, event, enabled, running); |
3469 | } | 3495 | } |
3470 | 3496 | ||
3471 | void perf_output_sample(struct perf_output_handle *handle, | 3497 | void perf_output_sample(struct perf_output_handle *handle, |
diff --git a/kernel/printk.c b/kernel/printk.c index b2ebaee8c377..9a2264fc42ca 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -261,6 +261,12 @@ static inline void boot_delay_msec(void) | |||
261 | } | 261 | } |
262 | #endif | 262 | #endif |
263 | 263 | ||
264 | #ifdef CONFIG_SECURITY_DMESG_RESTRICT | ||
265 | int dmesg_restrict = 1; | ||
266 | #else | ||
267 | int dmesg_restrict; | ||
268 | #endif | ||
269 | |||
264 | int do_syslog(int type, char __user *buf, int len, bool from_file) | 270 | int do_syslog(int type, char __user *buf, int len, bool from_file) |
265 | { | 271 | { |
266 | unsigned i, j, limit, count; | 272 | unsigned i, j, limit, count; |
@@ -268,7 +274,20 @@ int do_syslog(int type, char __user *buf, int len, bool from_file) | |||
268 | char c; | 274 | char c; |
269 | int error = 0; | 275 | int error = 0; |
270 | 276 | ||
271 | error = security_syslog(type, from_file); | 277 | /* |
278 | * If this is from /proc/kmsg we only do the capabilities checks | ||
279 | * at open time. | ||
280 | */ | ||
281 | if (type == SYSLOG_ACTION_OPEN || !from_file) { | ||
282 | if (dmesg_restrict && !capable(CAP_SYS_ADMIN)) | ||
283 | return -EPERM; | ||
284 | if ((type != SYSLOG_ACTION_READ_ALL && | ||
285 | type != SYSLOG_ACTION_SIZE_BUFFER) && | ||
286 | !capable(CAP_SYS_ADMIN)) | ||
287 | return -EPERM; | ||
288 | } | ||
289 | |||
290 | error = security_syslog(type); | ||
272 | if (error) | 291 | if (error) |
273 | return error; | 292 | return error; |
274 | 293 | ||
diff --git a/kernel/range.c b/kernel/range.c index 471b66acabb5..37fa9b99ad58 100644 --- a/kernel/range.c +++ b/kernel/range.c | |||
@@ -119,7 +119,7 @@ static int cmp_range(const void *x1, const void *x2) | |||
119 | 119 | ||
120 | int clean_sort_range(struct range *range, int az) | 120 | int clean_sort_range(struct range *range, int az) |
121 | { | 121 | { |
122 | int i, j, k = az - 1, nr_range = 0; | 122 | int i, j, k = az - 1, nr_range = az; |
123 | 123 | ||
124 | for (i = 0; i < k; i++) { | 124 | for (i = 0; i < k; i++) { |
125 | if (range[i].end) | 125 | if (range[i].end) |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index c33a1edb799f..5abfa1518554 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -702,6 +702,15 @@ static struct ctl_table kern_table[] = { | |||
702 | .extra1 = &zero, | 702 | .extra1 = &zero, |
703 | .extra2 = &ten_thousand, | 703 | .extra2 = &ten_thousand, |
704 | }, | 704 | }, |
705 | { | ||
706 | .procname = "dmesg_restrict", | ||
707 | .data = &dmesg_restrict, | ||
708 | .maxlen = sizeof(int), | ||
709 | .mode = 0644, | ||
710 | .proc_handler = proc_dointvec_minmax, | ||
711 | .extra1 = &zero, | ||
712 | .extra2 = &one, | ||
713 | }, | ||
705 | #endif | 714 | #endif |
706 | { | 715 | { |
707 | .procname = "ngroups_max", | 716 | .procname = "ngroups_max", |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index e04b8bcdef88..ea37e2ff4164 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -126,7 +126,7 @@ if FTRACE | |||
126 | config FUNCTION_TRACER | 126 | config FUNCTION_TRACER |
127 | bool "Kernel Function Tracer" | 127 | bool "Kernel Function Tracer" |
128 | depends on HAVE_FUNCTION_TRACER | 128 | depends on HAVE_FUNCTION_TRACER |
129 | select FRAME_POINTER if (!ARM_UNWIND) | 129 | select FRAME_POINTER if !ARM_UNWIND && !S390 |
130 | select KALLSYMS | 130 | select KALLSYMS |
131 | select GENERIC_TRACER | 131 | select GENERIC_TRACER |
132 | select CONTEXT_SWITCH_TRACER | 132 | select CONTEXT_SWITCH_TRACER |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index bc251ed66724..7b8ec0281548 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -168,7 +168,6 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, | |||
168 | static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), | 168 | static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), |
169 | BLK_TC_ACT(BLK_TC_WRITE) }; | 169 | BLK_TC_ACT(BLK_TC_WRITE) }; |
170 | 170 | ||
171 | #define BLK_TC_HARDBARRIER BLK_TC_BARRIER | ||
172 | #define BLK_TC_RAHEAD BLK_TC_AHEAD | 171 | #define BLK_TC_RAHEAD BLK_TC_AHEAD |
173 | 172 | ||
174 | /* The ilog2() calls fall out because they're constant */ | 173 | /* The ilog2() calls fall out because they're constant */ |
@@ -196,7 +195,6 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
196 | return; | 195 | return; |
197 | 196 | ||
198 | what |= ddir_act[rw & WRITE]; | 197 | what |= ddir_act[rw & WRITE]; |
199 | what |= MASK_TC_BIT(rw, HARDBARRIER); | ||
200 | what |= MASK_TC_BIT(rw, SYNC); | 198 | what |= MASK_TC_BIT(rw, SYNC); |
201 | what |= MASK_TC_BIT(rw, RAHEAD); | 199 | what |= MASK_TC_BIT(rw, RAHEAD); |
202 | what |= MASK_TC_BIT(rw, META); | 200 | what |= MASK_TC_BIT(rw, META); |
@@ -1807,8 +1805,6 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) | |||
1807 | 1805 | ||
1808 | if (rw & REQ_RAHEAD) | 1806 | if (rw & REQ_RAHEAD) |
1809 | rwbs[i++] = 'A'; | 1807 | rwbs[i++] = 'A'; |
1810 | if (rw & REQ_HARDBARRIER) | ||
1811 | rwbs[i++] = 'B'; | ||
1812 | if (rw & REQ_SYNC) | 1808 | if (rw & REQ_SYNC) |
1813 | rwbs[i++] = 'S'; | 1809 | rwbs[i++] = 'S'; |
1814 | if (rw & REQ_META) | 1810 | if (rw & REQ_META) |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 6f412ab4c24f..5086bb962b4d 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -82,6 +82,16 @@ struct radix_tree_preload { | |||
82 | }; | 82 | }; |
83 | static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; | 83 | static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; |
84 | 84 | ||
85 | static inline void *ptr_to_indirect(void *ptr) | ||
86 | { | ||
87 | return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR); | ||
88 | } | ||
89 | |||
90 | static inline void *indirect_to_ptr(void *ptr) | ||
91 | { | ||
92 | return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR); | ||
93 | } | ||
94 | |||
85 | static inline gfp_t root_gfp_mask(struct radix_tree_root *root) | 95 | static inline gfp_t root_gfp_mask(struct radix_tree_root *root) |
86 | { | 96 | { |
87 | return root->gfp_mask & __GFP_BITS_MASK; | 97 | return root->gfp_mask & __GFP_BITS_MASK; |
@@ -265,7 +275,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) | |||
265 | return -ENOMEM; | 275 | return -ENOMEM; |
266 | 276 | ||
267 | /* Increase the height. */ | 277 | /* Increase the height. */ |
268 | node->slots[0] = radix_tree_indirect_to_ptr(root->rnode); | 278 | node->slots[0] = indirect_to_ptr(root->rnode); |
269 | 279 | ||
270 | /* Propagate the aggregated tag info into the new root */ | 280 | /* Propagate the aggregated tag info into the new root */ |
271 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { | 281 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { |
@@ -276,7 +286,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) | |||
276 | newheight = root->height+1; | 286 | newheight = root->height+1; |
277 | node->height = newheight; | 287 | node->height = newheight; |
278 | node->count = 1; | 288 | node->count = 1; |
279 | node = radix_tree_ptr_to_indirect(node); | 289 | node = ptr_to_indirect(node); |
280 | rcu_assign_pointer(root->rnode, node); | 290 | rcu_assign_pointer(root->rnode, node); |
281 | root->height = newheight; | 291 | root->height = newheight; |
282 | } while (height > root->height); | 292 | } while (height > root->height); |
@@ -309,7 +319,7 @@ int radix_tree_insert(struct radix_tree_root *root, | |||
309 | return error; | 319 | return error; |
310 | } | 320 | } |
311 | 321 | ||
312 | slot = radix_tree_indirect_to_ptr(root->rnode); | 322 | slot = indirect_to_ptr(root->rnode); |
313 | 323 | ||
314 | height = root->height; | 324 | height = root->height; |
315 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; | 325 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; |
@@ -325,8 +335,7 @@ int radix_tree_insert(struct radix_tree_root *root, | |||
325 | rcu_assign_pointer(node->slots[offset], slot); | 335 | rcu_assign_pointer(node->slots[offset], slot); |
326 | node->count++; | 336 | node->count++; |
327 | } else | 337 | } else |
328 | rcu_assign_pointer(root->rnode, | 338 | rcu_assign_pointer(root->rnode, ptr_to_indirect(slot)); |
329 | radix_tree_ptr_to_indirect(slot)); | ||
330 | } | 339 | } |
331 | 340 | ||
332 | /* Go a level down */ | 341 | /* Go a level down */ |
@@ -374,7 +383,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, | |||
374 | return NULL; | 383 | return NULL; |
375 | return is_slot ? (void *)&root->rnode : node; | 384 | return is_slot ? (void *)&root->rnode : node; |
376 | } | 385 | } |
377 | node = radix_tree_indirect_to_ptr(node); | 386 | node = indirect_to_ptr(node); |
378 | 387 | ||
379 | height = node->height; | 388 | height = node->height; |
380 | if (index > radix_tree_maxindex(height)) | 389 | if (index > radix_tree_maxindex(height)) |
@@ -393,7 +402,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, | |||
393 | height--; | 402 | height--; |
394 | } while (height > 0); | 403 | } while (height > 0); |
395 | 404 | ||
396 | return is_slot ? (void *)slot:node; | 405 | return is_slot ? (void *)slot : indirect_to_ptr(node); |
397 | } | 406 | } |
398 | 407 | ||
399 | /** | 408 | /** |
@@ -455,7 +464,7 @@ void *radix_tree_tag_set(struct radix_tree_root *root, | |||
455 | height = root->height; | 464 | height = root->height; |
456 | BUG_ON(index > radix_tree_maxindex(height)); | 465 | BUG_ON(index > radix_tree_maxindex(height)); |
457 | 466 | ||
458 | slot = radix_tree_indirect_to_ptr(root->rnode); | 467 | slot = indirect_to_ptr(root->rnode); |
459 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | 468 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; |
460 | 469 | ||
461 | while (height > 0) { | 470 | while (height > 0) { |
@@ -509,7 +518,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, | |||
509 | 518 | ||
510 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | 519 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; |
511 | pathp->node = NULL; | 520 | pathp->node = NULL; |
512 | slot = radix_tree_indirect_to_ptr(root->rnode); | 521 | slot = indirect_to_ptr(root->rnode); |
513 | 522 | ||
514 | while (height > 0) { | 523 | while (height > 0) { |
515 | int offset; | 524 | int offset; |
@@ -579,7 +588,7 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
579 | 588 | ||
580 | if (!radix_tree_is_indirect_ptr(node)) | 589 | if (!radix_tree_is_indirect_ptr(node)) |
581 | return (index == 0); | 590 | return (index == 0); |
582 | node = radix_tree_indirect_to_ptr(node); | 591 | node = indirect_to_ptr(node); |
583 | 592 | ||
584 | height = node->height; | 593 | height = node->height; |
585 | if (index > radix_tree_maxindex(height)) | 594 | if (index > radix_tree_maxindex(height)) |
@@ -666,7 +675,7 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |||
666 | } | 675 | } |
667 | 676 | ||
668 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | 677 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; |
669 | slot = radix_tree_indirect_to_ptr(root->rnode); | 678 | slot = indirect_to_ptr(root->rnode); |
670 | 679 | ||
671 | /* | 680 | /* |
672 | * we fill the path from (root->height - 2) to 0, leaving the index at | 681 | * we fill the path from (root->height - 2) to 0, leaving the index at |
@@ -897,7 +906,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
897 | results[0] = node; | 906 | results[0] = node; |
898 | return 1; | 907 | return 1; |
899 | } | 908 | } |
900 | node = radix_tree_indirect_to_ptr(node); | 909 | node = indirect_to_ptr(node); |
901 | 910 | ||
902 | max_index = radix_tree_maxindex(node->height); | 911 | max_index = radix_tree_maxindex(node->height); |
903 | 912 | ||
@@ -916,7 +925,8 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
916 | slot = *(((void ***)results)[ret + i]); | 925 | slot = *(((void ***)results)[ret + i]); |
917 | if (!slot) | 926 | if (!slot) |
918 | continue; | 927 | continue; |
919 | results[ret + nr_found] = rcu_dereference_raw(slot); | 928 | results[ret + nr_found] = |
929 | indirect_to_ptr(rcu_dereference_raw(slot)); | ||
920 | nr_found++; | 930 | nr_found++; |
921 | } | 931 | } |
922 | ret += nr_found; | 932 | ret += nr_found; |
@@ -965,7 +975,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, | |||
965 | results[0] = (void **)&root->rnode; | 975 | results[0] = (void **)&root->rnode; |
966 | return 1; | 976 | return 1; |
967 | } | 977 | } |
968 | node = radix_tree_indirect_to_ptr(node); | 978 | node = indirect_to_ptr(node); |
969 | 979 | ||
970 | max_index = radix_tree_maxindex(node->height); | 980 | max_index = radix_tree_maxindex(node->height); |
971 | 981 | ||
@@ -1090,7 +1100,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
1090 | results[0] = node; | 1100 | results[0] = node; |
1091 | return 1; | 1101 | return 1; |
1092 | } | 1102 | } |
1093 | node = radix_tree_indirect_to_ptr(node); | 1103 | node = indirect_to_ptr(node); |
1094 | 1104 | ||
1095 | max_index = radix_tree_maxindex(node->height); | 1105 | max_index = radix_tree_maxindex(node->height); |
1096 | 1106 | ||
@@ -1109,7 +1119,8 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
1109 | slot = *(((void ***)results)[ret + i]); | 1119 | slot = *(((void ***)results)[ret + i]); |
1110 | if (!slot) | 1120 | if (!slot) |
1111 | continue; | 1121 | continue; |
1112 | results[ret + nr_found] = rcu_dereference_raw(slot); | 1122 | results[ret + nr_found] = |
1123 | indirect_to_ptr(rcu_dereference_raw(slot)); | ||
1113 | nr_found++; | 1124 | nr_found++; |
1114 | } | 1125 | } |
1115 | ret += nr_found; | 1126 | ret += nr_found; |
@@ -1159,7 +1170,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | |||
1159 | results[0] = (void **)&root->rnode; | 1170 | results[0] = (void **)&root->rnode; |
1160 | return 1; | 1171 | return 1; |
1161 | } | 1172 | } |
1162 | node = radix_tree_indirect_to_ptr(node); | 1173 | node = indirect_to_ptr(node); |
1163 | 1174 | ||
1164 | max_index = radix_tree_maxindex(node->height); | 1175 | max_index = radix_tree_maxindex(node->height); |
1165 | 1176 | ||
@@ -1195,7 +1206,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) | |||
1195 | void *newptr; | 1206 | void *newptr; |
1196 | 1207 | ||
1197 | BUG_ON(!radix_tree_is_indirect_ptr(to_free)); | 1208 | BUG_ON(!radix_tree_is_indirect_ptr(to_free)); |
1198 | to_free = radix_tree_indirect_to_ptr(to_free); | 1209 | to_free = indirect_to_ptr(to_free); |
1199 | 1210 | ||
1200 | /* | 1211 | /* |
1201 | * The candidate node has more than one child, or its child | 1212 | * The candidate node has more than one child, or its child |
@@ -1208,16 +1219,39 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) | |||
1208 | 1219 | ||
1209 | /* | 1220 | /* |
1210 | * We don't need rcu_assign_pointer(), since we are simply | 1221 | * We don't need rcu_assign_pointer(), since we are simply |
1211 | * moving the node from one part of the tree to another. If | 1222 | * moving the node from one part of the tree to another: if it |
1212 | * it was safe to dereference the old pointer to it | 1223 | * was safe to dereference the old pointer to it |
1213 | * (to_free->slots[0]), it will be safe to dereference the new | 1224 | * (to_free->slots[0]), it will be safe to dereference the new |
1214 | * one (root->rnode). | 1225 | * one (root->rnode) as far as dependent read barriers go. |
1215 | */ | 1226 | */ |
1216 | newptr = to_free->slots[0]; | 1227 | newptr = to_free->slots[0]; |
1217 | if (root->height > 1) | 1228 | if (root->height > 1) |
1218 | newptr = radix_tree_ptr_to_indirect(newptr); | 1229 | newptr = ptr_to_indirect(newptr); |
1219 | root->rnode = newptr; | 1230 | root->rnode = newptr; |
1220 | root->height--; | 1231 | root->height--; |
1232 | |||
1233 | /* | ||
1234 | * We have a dilemma here. The node's slot[0] must not be | ||
1235 | * NULLed in case there are concurrent lookups expecting to | ||
1236 | * find the item. However if this was a bottom-level node, | ||
1237 | * then it may be subject to the slot pointer being visible | ||
1238 | * to callers dereferencing it. If item corresponding to | ||
1239 | * slot[0] is subsequently deleted, these callers would expect | ||
1240 | * their slot to become empty sooner or later. | ||
1241 | * | ||
1242 | * For example, lockless pagecache will look up a slot, deref | ||
1243 | * the page pointer, and if the page is 0 refcount it means it | ||
1244 | * was concurrently deleted from pagecache so try the deref | ||
1245 | * again. Fortunately there is already a requirement for logic | ||
1246 | * to retry the entire slot lookup -- the indirect pointer | ||
1247 | * problem (replacing direct root node with an indirect pointer | ||
1248 | * also results in a stale slot). So tag the slot as indirect | ||
1249 | * to force callers to retry. | ||
1250 | */ | ||
1251 | if (root->height == 0) | ||
1252 | *((unsigned long *)&to_free->slots[0]) |= | ||
1253 | RADIX_TREE_INDIRECT_PTR; | ||
1254 | |||
1221 | radix_tree_node_free(to_free); | 1255 | radix_tree_node_free(to_free); |
1222 | } | 1256 | } |
1223 | } | 1257 | } |
@@ -1254,7 +1288,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | |||
1254 | root->rnode = NULL; | 1288 | root->rnode = NULL; |
1255 | goto out; | 1289 | goto out; |
1256 | } | 1290 | } |
1257 | slot = radix_tree_indirect_to_ptr(slot); | 1291 | slot = indirect_to_ptr(slot); |
1258 | 1292 | ||
1259 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | 1293 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; |
1260 | pathp->node = NULL; | 1294 | pathp->node = NULL; |
@@ -1296,8 +1330,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | |||
1296 | radix_tree_node_free(to_free); | 1330 | radix_tree_node_free(to_free); |
1297 | 1331 | ||
1298 | if (pathp->node->count) { | 1332 | if (pathp->node->count) { |
1299 | if (pathp->node == | 1333 | if (pathp->node == indirect_to_ptr(root->rnode)) |
1300 | radix_tree_indirect_to_ptr(root->rnode)) | ||
1301 | radix_tree_shrink(root); | 1334 | radix_tree_shrink(root); |
1302 | goto out; | 1335 | goto out; |
1303 | } | 1336 | } |
diff --git a/mm/filemap.c b/mm/filemap.c index 61ba5e405791..ea89840fc65f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -644,7 +644,9 @@ repeat: | |||
644 | pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); | 644 | pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); |
645 | if (pagep) { | 645 | if (pagep) { |
646 | page = radix_tree_deref_slot(pagep); | 646 | page = radix_tree_deref_slot(pagep); |
647 | if (unlikely(!page || page == RADIX_TREE_RETRY)) | 647 | if (unlikely(!page)) |
648 | goto out; | ||
649 | if (radix_tree_deref_retry(page)) | ||
648 | goto repeat; | 650 | goto repeat; |
649 | 651 | ||
650 | if (!page_cache_get_speculative(page)) | 652 | if (!page_cache_get_speculative(page)) |
@@ -660,6 +662,7 @@ repeat: | |||
660 | goto repeat; | 662 | goto repeat; |
661 | } | 663 | } |
662 | } | 664 | } |
665 | out: | ||
663 | rcu_read_unlock(); | 666 | rcu_read_unlock(); |
664 | 667 | ||
665 | return page; | 668 | return page; |
@@ -777,12 +780,11 @@ repeat: | |||
777 | page = radix_tree_deref_slot((void **)pages[i]); | 780 | page = radix_tree_deref_slot((void **)pages[i]); |
778 | if (unlikely(!page)) | 781 | if (unlikely(!page)) |
779 | continue; | 782 | continue; |
780 | /* | 783 | if (radix_tree_deref_retry(page)) { |
781 | * this can only trigger if nr_found == 1, making livelock | 784 | if (ret) |
782 | * a non issue. | 785 | start = pages[ret-1]->index; |
783 | */ | ||
784 | if (unlikely(page == RADIX_TREE_RETRY)) | ||
785 | goto restart; | 786 | goto restart; |
787 | } | ||
786 | 788 | ||
787 | if (!page_cache_get_speculative(page)) | 789 | if (!page_cache_get_speculative(page)) |
788 | goto repeat; | 790 | goto repeat; |
@@ -830,11 +832,7 @@ repeat: | |||
830 | page = radix_tree_deref_slot((void **)pages[i]); | 832 | page = radix_tree_deref_slot((void **)pages[i]); |
831 | if (unlikely(!page)) | 833 | if (unlikely(!page)) |
832 | continue; | 834 | continue; |
833 | /* | 835 | if (radix_tree_deref_retry(page)) |
834 | * this can only trigger if nr_found == 1, making livelock | ||
835 | * a non issue. | ||
836 | */ | ||
837 | if (unlikely(page == RADIX_TREE_RETRY)) | ||
838 | goto restart; | 836 | goto restart; |
839 | 837 | ||
840 | if (page->mapping == NULL || page->index != index) | 838 | if (page->mapping == NULL || page->index != index) |
@@ -887,11 +885,7 @@ repeat: | |||
887 | page = radix_tree_deref_slot((void **)pages[i]); | 885 | page = radix_tree_deref_slot((void **)pages[i]); |
888 | if (unlikely(!page)) | 886 | if (unlikely(!page)) |
889 | continue; | 887 | continue; |
890 | /* | 888 | if (radix_tree_deref_retry(page)) |
891 | * this can only trigger if nr_found == 1, making livelock | ||
892 | * a non issue. | ||
893 | */ | ||
894 | if (unlikely(page == RADIX_TREE_RETRY)) | ||
895 | goto restart; | 889 | goto restart; |
896 | 890 | ||
897 | if (!page_cache_get_speculative(page)) | 891 | if (!page_cache_get_speculative(page)) |
@@ -1029,6 +1023,9 @@ find_page: | |||
1029 | goto page_not_up_to_date; | 1023 | goto page_not_up_to_date; |
1030 | if (!trylock_page(page)) | 1024 | if (!trylock_page(page)) |
1031 | goto page_not_up_to_date; | 1025 | goto page_not_up_to_date; |
1026 | /* Did it get truncated before we got the lock? */ | ||
1027 | if (!page->mapping) | ||
1028 | goto page_not_up_to_date_locked; | ||
1032 | if (!mapping->a_ops->is_partially_uptodate(page, | 1029 | if (!mapping->a_ops->is_partially_uptodate(page, |
1033 | desc, offset)) | 1030 | desc, offset)) |
1034 | goto page_not_up_to_date_locked; | 1031 | goto page_not_up_to_date_locked; |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9a99cfaf0a19..2efa8ea07ff7 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -4208,15 +4208,17 @@ static struct mem_cgroup *mem_cgroup_alloc(void) | |||
4208 | 4208 | ||
4209 | memset(mem, 0, size); | 4209 | memset(mem, 0, size); |
4210 | mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); | 4210 | mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); |
4211 | if (!mem->stat) { | 4211 | if (!mem->stat) |
4212 | if (size < PAGE_SIZE) | 4212 | goto out_free; |
4213 | kfree(mem); | ||
4214 | else | ||
4215 | vfree(mem); | ||
4216 | mem = NULL; | ||
4217 | } | ||
4218 | spin_lock_init(&mem->pcp_counter_lock); | 4213 | spin_lock_init(&mem->pcp_counter_lock); |
4219 | return mem; | 4214 | return mem; |
4215 | |||
4216 | out_free: | ||
4217 | if (size < PAGE_SIZE) | ||
4218 | kfree(mem); | ||
4219 | else | ||
4220 | vfree(mem); | ||
4221 | return NULL; | ||
4220 | } | 4222 | } |
4221 | 4223 | ||
4222 | /* | 4224 | /* |
@@ -3273,9 +3273,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
3273 | kfree(n); | 3273 | kfree(n); |
3274 | kfree(s); | 3274 | kfree(s); |
3275 | } | 3275 | } |
3276 | err: | ||
3276 | up_write(&slub_lock); | 3277 | up_write(&slub_lock); |
3277 | 3278 | ||
3278 | err: | ||
3279 | if (flags & SLAB_PANIC) | 3279 | if (flags & SLAB_PANIC) |
3280 | panic("Cannot create slabcache %s\n", name); | 3280 | panic("Cannot create slabcache %s\n", name); |
3281 | else | 3281 | else |
@@ -3862,6 +3862,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, | |||
3862 | x += sprintf(buf + x, " N%d=%lu", | 3862 | x += sprintf(buf + x, " N%d=%lu", |
3863 | node, nodes[node]); | 3863 | node, nodes[node]); |
3864 | #endif | 3864 | #endif |
3865 | up_read(&slub_lock); | ||
3865 | kfree(nodes); | 3866 | kfree(nodes); |
3866 | return x + sprintf(buf + x, "\n"); | 3867 | return x + sprintf(buf + x, "\n"); |
3867 | } | 3868 | } |
diff --git a/mm/vmscan.c b/mm/vmscan.c index b8a6fdc21312..d31d7ce52c0e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -913,7 +913,7 @@ keep_lumpy: | |||
913 | * back off and wait for congestion to clear because further reclaim | 913 | * back off and wait for congestion to clear because further reclaim |
914 | * will encounter the same problem | 914 | * will encounter the same problem |
915 | */ | 915 | */ |
916 | if (nr_dirty == nr_congested) | 916 | if (nr_dirty == nr_congested && nr_dirty != 0) |
917 | zone_set_flag(zone, ZONE_CONGESTED); | 917 | zone_set_flag(zone, ZONE_CONGESTED); |
918 | 918 | ||
919 | free_page_list(&free_pages); | 919 | free_page_list(&free_pages); |
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 26eaebf4aaa9..bb86d2932394 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -1392,6 +1392,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr, | |||
1392 | ax25_cb *ax25; | 1392 | ax25_cb *ax25; |
1393 | int err = 0; | 1393 | int err = 0; |
1394 | 1394 | ||
1395 | memset(fsa, 0, sizeof(fsa)); | ||
1395 | lock_sock(sk); | 1396 | lock_sock(sk); |
1396 | ax25 = ax25_sk(sk); | 1397 | ax25 = ax25_sk(sk); |
1397 | 1398 | ||
@@ -1403,7 +1404,6 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr, | |||
1403 | 1404 | ||
1404 | fsa->fsa_ax25.sax25_family = AF_AX25; | 1405 | fsa->fsa_ax25.sax25_family = AF_AX25; |
1405 | fsa->fsa_ax25.sax25_call = ax25->dest_addr; | 1406 | fsa->fsa_ax25.sax25_call = ax25->dest_addr; |
1406 | fsa->fsa_ax25.sax25_ndigis = 0; | ||
1407 | 1407 | ||
1408 | if (ax25->digipeat != NULL) { | 1408 | if (ax25->digipeat != NULL) { |
1409 | ndigi = ax25->digipeat->ndigi; | 1409 | ndigi = ax25->digipeat->ndigi; |
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index bfef5bae0b3a..84093b0000b9 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
@@ -1175,6 +1175,12 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff | |||
1175 | hci_send_cmd(hdev, | 1175 | hci_send_cmd(hdev, |
1176 | HCI_OP_READ_REMOTE_EXT_FEATURES, | 1176 | HCI_OP_READ_REMOTE_EXT_FEATURES, |
1177 | sizeof(cp), &cp); | 1177 | sizeof(cp), &cp); |
1178 | } else if (!ev->status && conn->out && | ||
1179 | conn->sec_level == BT_SECURITY_HIGH) { | ||
1180 | struct hci_cp_auth_requested cp; | ||
1181 | cp.handle = ev->handle; | ||
1182 | hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, | ||
1183 | sizeof(cp), &cp); | ||
1178 | } else { | 1184 | } else { |
1179 | conn->state = BT_CONNECTED; | 1185 | conn->state = BT_CONNECTED; |
1180 | hci_proto_connect_cfm(conn, ev->status); | 1186 | hci_proto_connect_cfm(conn, ev->status); |
diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig index 98fdfa1fbddd..86a91543172a 100644 --- a/net/bluetooth/hidp/Kconfig +++ b/net/bluetooth/hidp/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config BT_HIDP | 1 | config BT_HIDP |
2 | tristate "HIDP protocol support" | 2 | tristate "HIDP protocol support" |
3 | depends on BT && BT_L2CAP && INPUT | 3 | depends on BT && BT_L2CAP && INPUT && HID_SUPPORT |
4 | select HID | 4 | select HID |
5 | help | 5 | help |
6 | HIDP (Human Interface Device Protocol) is a transport layer | 6 | HIDP (Human Interface Device Protocol) is a transport layer |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index daa7a988d9a6..cd8f6ea03841 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -2421,11 +2421,11 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned | |||
2421 | break; | 2421 | break; |
2422 | 2422 | ||
2423 | case 2: | 2423 | case 2: |
2424 | *val = __le16_to_cpu(*((__le16 *) opt->val)); | 2424 | *val = get_unaligned_le16(opt->val); |
2425 | break; | 2425 | break; |
2426 | 2426 | ||
2427 | case 4: | 2427 | case 4: |
2428 | *val = __le32_to_cpu(*((__le32 *) opt->val)); | 2428 | *val = get_unaligned_le32(opt->val); |
2429 | break; | 2429 | break; |
2430 | 2430 | ||
2431 | default: | 2431 | default: |
@@ -2452,11 +2452,11 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val) | |||
2452 | break; | 2452 | break; |
2453 | 2453 | ||
2454 | case 2: | 2454 | case 2: |
2455 | *((__le16 *) opt->val) = cpu_to_le16(val); | 2455 | put_unaligned_le16(val, opt->val); |
2456 | break; | 2456 | break; |
2457 | 2457 | ||
2458 | case 4: | 2458 | case 4: |
2459 | *((__le32 *) opt->val) = cpu_to_le32(val); | 2459 | put_unaligned_le32(val, opt->val); |
2460 | break; | 2460 | break; |
2461 | 2461 | ||
2462 | default: | 2462 | default: |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 39a5d87e33b4..fa642aa652bd 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
@@ -79,7 +79,10 @@ static void rfcomm_make_uih(struct sk_buff *skb, u8 addr); | |||
79 | 79 | ||
80 | static void rfcomm_process_connect(struct rfcomm_session *s); | 80 | static void rfcomm_process_connect(struct rfcomm_session *s); |
81 | 81 | ||
82 | static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err); | 82 | static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, |
83 | bdaddr_t *dst, | ||
84 | u8 sec_level, | ||
85 | int *err); | ||
83 | static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst); | 86 | static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst); |
84 | static void rfcomm_session_del(struct rfcomm_session *s); | 87 | static void rfcomm_session_del(struct rfcomm_session *s); |
85 | 88 | ||
@@ -401,7 +404,7 @@ static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, | |||
401 | 404 | ||
402 | s = rfcomm_session_get(src, dst); | 405 | s = rfcomm_session_get(src, dst); |
403 | if (!s) { | 406 | if (!s) { |
404 | s = rfcomm_session_create(src, dst, &err); | 407 | s = rfcomm_session_create(src, dst, d->sec_level, &err); |
405 | if (!s) | 408 | if (!s) |
406 | return err; | 409 | return err; |
407 | } | 410 | } |
@@ -679,7 +682,10 @@ static void rfcomm_session_close(struct rfcomm_session *s, int err) | |||
679 | rfcomm_session_put(s); | 682 | rfcomm_session_put(s); |
680 | } | 683 | } |
681 | 684 | ||
682 | static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err) | 685 | static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, |
686 | bdaddr_t *dst, | ||
687 | u8 sec_level, | ||
688 | int *err) | ||
683 | { | 689 | { |
684 | struct rfcomm_session *s = NULL; | 690 | struct rfcomm_session *s = NULL; |
685 | struct sockaddr_l2 addr; | 691 | struct sockaddr_l2 addr; |
@@ -704,6 +710,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst | |||
704 | sk = sock->sk; | 710 | sk = sock->sk; |
705 | lock_sock(sk); | 711 | lock_sock(sk); |
706 | l2cap_pi(sk)->imtu = l2cap_mtu; | 712 | l2cap_pi(sk)->imtu = l2cap_mtu; |
713 | l2cap_pi(sk)->sec_level = sec_level; | ||
707 | if (l2cap_ertm) | 714 | if (l2cap_ertm) |
708 | l2cap_pi(sk)->mode = L2CAP_MODE_ERTM; | 715 | l2cap_pi(sk)->mode = L2CAP_MODE_ERTM; |
709 | release_sock(sk); | 716 | release_sock(sk); |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 08ffe9e4be20..6faa8256e10c 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -125,7 +125,7 @@ struct bcm_sock { | |||
125 | struct list_head tx_ops; | 125 | struct list_head tx_ops; |
126 | unsigned long dropped_usr_msgs; | 126 | unsigned long dropped_usr_msgs; |
127 | struct proc_dir_entry *bcm_proc_read; | 127 | struct proc_dir_entry *bcm_proc_read; |
128 | char procname [9]; /* pointer printed in ASCII with \0 */ | 128 | char procname [20]; /* pointer printed in ASCII with \0 */ |
129 | }; | 129 | }; |
130 | 130 | ||
131 | static inline struct bcm_sock *bcm_sk(const struct sock *sk) | 131 | static inline struct bcm_sock *bcm_sk(const struct sock *sk) |
diff --git a/net/core/dst.c b/net/core/dst.c index 8abe628b79f1..b99c7c7ffce2 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
@@ -370,6 +370,7 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, | |||
370 | 370 | ||
371 | static struct notifier_block dst_dev_notifier = { | 371 | static struct notifier_block dst_dev_notifier = { |
372 | .notifier_call = dst_dev_event, | 372 | .notifier_call = dst_dev_event, |
373 | .priority = -10, /* must be called after other network notifiers */ | ||
373 | }; | 374 | }; |
374 | 375 | ||
375 | void __init dst_init(void) | 376 | void __init dst_init(void) |
diff --git a/net/core/filter.c b/net/core/filter.c index 7beaec36b541..23e9b2a6b4c8 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -112,39 +112,41 @@ EXPORT_SYMBOL(sk_filter); | |||
112 | */ | 112 | */ |
113 | unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) | 113 | unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) |
114 | { | 114 | { |
115 | struct sock_filter *fentry; /* We walk down these */ | ||
116 | void *ptr; | 115 | void *ptr; |
117 | u32 A = 0; /* Accumulator */ | 116 | u32 A = 0; /* Accumulator */ |
118 | u32 X = 0; /* Index Register */ | 117 | u32 X = 0; /* Index Register */ |
119 | u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ | 118 | u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ |
119 | unsigned long memvalid = 0; | ||
120 | u32 tmp; | 120 | u32 tmp; |
121 | int k; | 121 | int k; |
122 | int pc; | 122 | int pc; |
123 | 123 | ||
124 | BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG); | ||
124 | /* | 125 | /* |
125 | * Process array of filter instructions. | 126 | * Process array of filter instructions. |
126 | */ | 127 | */ |
127 | for (pc = 0; pc < flen; pc++) { | 128 | for (pc = 0; pc < flen; pc++) { |
128 | fentry = &filter[pc]; | 129 | const struct sock_filter *fentry = &filter[pc]; |
130 | u32 f_k = fentry->k; | ||
129 | 131 | ||
130 | switch (fentry->code) { | 132 | switch (fentry->code) { |
131 | case BPF_S_ALU_ADD_X: | 133 | case BPF_S_ALU_ADD_X: |
132 | A += X; | 134 | A += X; |
133 | continue; | 135 | continue; |
134 | case BPF_S_ALU_ADD_K: | 136 | case BPF_S_ALU_ADD_K: |
135 | A += fentry->k; | 137 | A += f_k; |
136 | continue; | 138 | continue; |
137 | case BPF_S_ALU_SUB_X: | 139 | case BPF_S_ALU_SUB_X: |
138 | A -= X; | 140 | A -= X; |
139 | continue; | 141 | continue; |
140 | case BPF_S_ALU_SUB_K: | 142 | case BPF_S_ALU_SUB_K: |
141 | A -= fentry->k; | 143 | A -= f_k; |
142 | continue; | 144 | continue; |
143 | case BPF_S_ALU_MUL_X: | 145 | case BPF_S_ALU_MUL_X: |
144 | A *= X; | 146 | A *= X; |
145 | continue; | 147 | continue; |
146 | case BPF_S_ALU_MUL_K: | 148 | case BPF_S_ALU_MUL_K: |
147 | A *= fentry->k; | 149 | A *= f_k; |
148 | continue; | 150 | continue; |
149 | case BPF_S_ALU_DIV_X: | 151 | case BPF_S_ALU_DIV_X: |
150 | if (X == 0) | 152 | if (X == 0) |
@@ -152,49 +154,49 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int | |||
152 | A /= X; | 154 | A /= X; |
153 | continue; | 155 | continue; |
154 | case BPF_S_ALU_DIV_K: | 156 | case BPF_S_ALU_DIV_K: |
155 | A /= fentry->k; | 157 | A /= f_k; |
156 | continue; | 158 | continue; |
157 | case BPF_S_ALU_AND_X: | 159 | case BPF_S_ALU_AND_X: |
158 | A &= X; | 160 | A &= X; |
159 | continue; | 161 | continue; |
160 | case BPF_S_ALU_AND_K: | 162 | case BPF_S_ALU_AND_K: |
161 | A &= fentry->k; | 163 | A &= f_k; |
162 | continue; | 164 | continue; |
163 | case BPF_S_ALU_OR_X: | 165 | case BPF_S_ALU_OR_X: |
164 | A |= X; | 166 | A |= X; |
165 | continue; | 167 | continue; |
166 | case BPF_S_ALU_OR_K: | 168 | case BPF_S_ALU_OR_K: |
167 | A |= fentry->k; | 169 | A |= f_k; |
168 | continue; | 170 | continue; |
169 | case BPF_S_ALU_LSH_X: | 171 | case BPF_S_ALU_LSH_X: |
170 | A <<= X; | 172 | A <<= X; |
171 | continue; | 173 | continue; |
172 | case BPF_S_ALU_LSH_K: | 174 | case BPF_S_ALU_LSH_K: |
173 | A <<= fentry->k; | 175 | A <<= f_k; |
174 | continue; | 176 | continue; |
175 | case BPF_S_ALU_RSH_X: | 177 | case BPF_S_ALU_RSH_X: |
176 | A >>= X; | 178 | A >>= X; |
177 | continue; | 179 | continue; |
178 | case BPF_S_ALU_RSH_K: | 180 | case BPF_S_ALU_RSH_K: |
179 | A >>= fentry->k; | 181 | A >>= f_k; |
180 | continue; | 182 | continue; |
181 | case BPF_S_ALU_NEG: | 183 | case BPF_S_ALU_NEG: |
182 | A = -A; | 184 | A = -A; |
183 | continue; | 185 | continue; |
184 | case BPF_S_JMP_JA: | 186 | case BPF_S_JMP_JA: |
185 | pc += fentry->k; | 187 | pc += f_k; |
186 | continue; | 188 | continue; |
187 | case BPF_S_JMP_JGT_K: | 189 | case BPF_S_JMP_JGT_K: |
188 | pc += (A > fentry->k) ? fentry->jt : fentry->jf; | 190 | pc += (A > f_k) ? fentry->jt : fentry->jf; |
189 | continue; | 191 | continue; |
190 | case BPF_S_JMP_JGE_K: | 192 | case BPF_S_JMP_JGE_K: |
191 | pc += (A >= fentry->k) ? fentry->jt : fentry->jf; | 193 | pc += (A >= f_k) ? fentry->jt : fentry->jf; |
192 | continue; | 194 | continue; |
193 | case BPF_S_JMP_JEQ_K: | 195 | case BPF_S_JMP_JEQ_K: |
194 | pc += (A == fentry->k) ? fentry->jt : fentry->jf; | 196 | pc += (A == f_k) ? fentry->jt : fentry->jf; |
195 | continue; | 197 | continue; |
196 | case BPF_S_JMP_JSET_K: | 198 | case BPF_S_JMP_JSET_K: |
197 | pc += (A & fentry->k) ? fentry->jt : fentry->jf; | 199 | pc += (A & f_k) ? fentry->jt : fentry->jf; |
198 | continue; | 200 | continue; |
199 | case BPF_S_JMP_JGT_X: | 201 | case BPF_S_JMP_JGT_X: |
200 | pc += (A > X) ? fentry->jt : fentry->jf; | 202 | pc += (A > X) ? fentry->jt : fentry->jf; |
@@ -209,7 +211,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int | |||
209 | pc += (A & X) ? fentry->jt : fentry->jf; | 211 | pc += (A & X) ? fentry->jt : fentry->jf; |
210 | continue; | 212 | continue; |
211 | case BPF_S_LD_W_ABS: | 213 | case BPF_S_LD_W_ABS: |
212 | k = fentry->k; | 214 | k = f_k; |
213 | load_w: | 215 | load_w: |
214 | ptr = load_pointer(skb, k, 4, &tmp); | 216 | ptr = load_pointer(skb, k, 4, &tmp); |
215 | if (ptr != NULL) { | 217 | if (ptr != NULL) { |
@@ -218,7 +220,7 @@ load_w: | |||
218 | } | 220 | } |
219 | break; | 221 | break; |
220 | case BPF_S_LD_H_ABS: | 222 | case BPF_S_LD_H_ABS: |
221 | k = fentry->k; | 223 | k = f_k; |
222 | load_h: | 224 | load_h: |
223 | ptr = load_pointer(skb, k, 2, &tmp); | 225 | ptr = load_pointer(skb, k, 2, &tmp); |
224 | if (ptr != NULL) { | 226 | if (ptr != NULL) { |
@@ -227,7 +229,7 @@ load_h: | |||
227 | } | 229 | } |
228 | break; | 230 | break; |
229 | case BPF_S_LD_B_ABS: | 231 | case BPF_S_LD_B_ABS: |
230 | k = fentry->k; | 232 | k = f_k; |
231 | load_b: | 233 | load_b: |
232 | ptr = load_pointer(skb, k, 1, &tmp); | 234 | ptr = load_pointer(skb, k, 1, &tmp); |
233 | if (ptr != NULL) { | 235 | if (ptr != NULL) { |
@@ -242,32 +244,34 @@ load_b: | |||
242 | X = skb->len; | 244 | X = skb->len; |
243 | continue; | 245 | continue; |
244 | case BPF_S_LD_W_IND: | 246 | case BPF_S_LD_W_IND: |
245 | k = X + fentry->k; | 247 | k = X + f_k; |
246 | goto load_w; | 248 | goto load_w; |
247 | case BPF_S_LD_H_IND: | 249 | case BPF_S_LD_H_IND: |
248 | k = X + fentry->k; | 250 | k = X + f_k; |
249 | goto load_h; | 251 | goto load_h; |
250 | case BPF_S_LD_B_IND: | 252 | case BPF_S_LD_B_IND: |
251 | k = X + fentry->k; | 253 | k = X + f_k; |
252 | goto load_b; | 254 | goto load_b; |
253 | case BPF_S_LDX_B_MSH: | 255 | case BPF_S_LDX_B_MSH: |
254 | ptr = load_pointer(skb, fentry->k, 1, &tmp); | 256 | ptr = load_pointer(skb, f_k, 1, &tmp); |
255 | if (ptr != NULL) { | 257 | if (ptr != NULL) { |
256 | X = (*(u8 *)ptr & 0xf) << 2; | 258 | X = (*(u8 *)ptr & 0xf) << 2; |
257 | continue; | 259 | continue; |
258 | } | 260 | } |
259 | return 0; | 261 | return 0; |
260 | case BPF_S_LD_IMM: | 262 | case BPF_S_LD_IMM: |
261 | A = fentry->k; | 263 | A = f_k; |
262 | continue; | 264 | continue; |
263 | case BPF_S_LDX_IMM: | 265 | case BPF_S_LDX_IMM: |
264 | X = fentry->k; | 266 | X = f_k; |
265 | continue; | 267 | continue; |
266 | case BPF_S_LD_MEM: | 268 | case BPF_S_LD_MEM: |
267 | A = mem[fentry->k]; | 269 | A = (memvalid & (1UL << f_k)) ? |
270 | mem[f_k] : 0; | ||
268 | continue; | 271 | continue; |
269 | case BPF_S_LDX_MEM: | 272 | case BPF_S_LDX_MEM: |
270 | X = mem[fentry->k]; | 273 | X = (memvalid & (1UL << f_k)) ? |
274 | mem[f_k] : 0; | ||
271 | continue; | 275 | continue; |
272 | case BPF_S_MISC_TAX: | 276 | case BPF_S_MISC_TAX: |
273 | X = A; | 277 | X = A; |
@@ -276,14 +280,16 @@ load_b: | |||
276 | A = X; | 280 | A = X; |
277 | continue; | 281 | continue; |
278 | case BPF_S_RET_K: | 282 | case BPF_S_RET_K: |
279 | return fentry->k; | 283 | return f_k; |
280 | case BPF_S_RET_A: | 284 | case BPF_S_RET_A: |
281 | return A; | 285 | return A; |
282 | case BPF_S_ST: | 286 | case BPF_S_ST: |
283 | mem[fentry->k] = A; | 287 | memvalid |= 1UL << f_k; |
288 | mem[f_k] = A; | ||
284 | continue; | 289 | continue; |
285 | case BPF_S_STX: | 290 | case BPF_S_STX: |
286 | mem[fentry->k] = X; | 291 | memvalid |= 1UL << f_k; |
292 | mem[f_k] = X; | ||
287 | continue; | 293 | continue; |
288 | default: | 294 | default: |
289 | WARN_ON(1); | 295 | WARN_ON(1); |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index fbce4b05a53e..33bc3823ac6f 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -887,7 +887,7 @@ static ssize_t pktgen_if_write(struct file *file, | |||
887 | i += len; | 887 | i += len; |
888 | 888 | ||
889 | if (debug) { | 889 | if (debug) { |
890 | size_t copy = min(count, 1023); | 890 | size_t copy = min_t(size_t, count, 1023); |
891 | char tb[copy + 1]; | 891 | char tb[copy + 1]; |
892 | if (copy_from_user(tb, user_buffer, copy)) | 892 | if (copy_from_user(tb, user_buffer, copy)) |
893 | return -EFAULT; | 893 | return -EFAULT; |
@@ -2612,8 +2612,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
2612 | /* Update any of the values, used when we're incrementing various | 2612 | /* Update any of the values, used when we're incrementing various |
2613 | * fields. | 2613 | * fields. |
2614 | */ | 2614 | */ |
2615 | queue_map = pkt_dev->cur_queue_map; | ||
2616 | mod_cur_headers(pkt_dev); | 2615 | mod_cur_headers(pkt_dev); |
2616 | queue_map = pkt_dev->cur_queue_map; | ||
2617 | 2617 | ||
2618 | datalen = (odev->hard_header_len + 16) & ~0xf; | 2618 | datalen = (odev->hard_header_len + 16) & ~0xf; |
2619 | 2619 | ||
@@ -2976,8 +2976,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | |||
2976 | /* Update any of the values, used when we're incrementing various | 2976 | /* Update any of the values, used when we're incrementing various |
2977 | * fields. | 2977 | * fields. |
2978 | */ | 2978 | */ |
2979 | queue_map = pkt_dev->cur_queue_map; | ||
2980 | mod_cur_headers(pkt_dev); | 2979 | mod_cur_headers(pkt_dev); |
2980 | queue_map = pkt_dev->cur_queue_map; | ||
2981 | 2981 | ||
2982 | skb = __netdev_alloc_skb(odev, | 2982 | skb = __netdev_alloc_skb(odev, |
2983 | pkt_dev->cur_pkt_size + 64 | 2983 | pkt_dev->cur_pkt_size + 64 |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 8121268ddbdd..841c287ef40a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -347,16 +347,17 @@ static size_t rtnl_link_get_size(const struct net_device *dev) | |||
347 | if (!ops) | 347 | if (!ops) |
348 | return 0; | 348 | return 0; |
349 | 349 | ||
350 | size = nlmsg_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ | 350 | size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ |
351 | nlmsg_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ | 351 | nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ |
352 | 352 | ||
353 | if (ops->get_size) | 353 | if (ops->get_size) |
354 | /* IFLA_INFO_DATA + nested data */ | 354 | /* IFLA_INFO_DATA + nested data */ |
355 | size += nlmsg_total_size(sizeof(struct nlattr)) + | 355 | size += nla_total_size(sizeof(struct nlattr)) + |
356 | ops->get_size(dev); | 356 | ops->get_size(dev); |
357 | 357 | ||
358 | if (ops->get_xstats_size) | 358 | if (ops->get_xstats_size) |
359 | size += ops->get_xstats_size(dev); /* IFLA_INFO_XSTATS */ | 359 | /* IFLA_INFO_XSTATS */ |
360 | size += nla_total_size(ops->get_xstats_size(dev)); | ||
360 | 361 | ||
361 | return size; | 362 | return size; |
362 | } | 363 | } |
diff --git a/net/core/sock.c b/net/core/sock.c index 3eed5424e659..fb6080111461 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1653,10 +1653,10 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind) | |||
1653 | { | 1653 | { |
1654 | struct proto *prot = sk->sk_prot; | 1654 | struct proto *prot = sk->sk_prot; |
1655 | int amt = sk_mem_pages(size); | 1655 | int amt = sk_mem_pages(size); |
1656 | int allocated; | 1656 | long allocated; |
1657 | 1657 | ||
1658 | sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; | 1658 | sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; |
1659 | allocated = atomic_add_return(amt, prot->memory_allocated); | 1659 | allocated = atomic_long_add_return(amt, prot->memory_allocated); |
1660 | 1660 | ||
1661 | /* Under limit. */ | 1661 | /* Under limit. */ |
1662 | if (allocated <= prot->sysctl_mem[0]) { | 1662 | if (allocated <= prot->sysctl_mem[0]) { |
@@ -1714,7 +1714,7 @@ suppress_allocation: | |||
1714 | 1714 | ||
1715 | /* Alas. Undo changes. */ | 1715 | /* Alas. Undo changes. */ |
1716 | sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; | 1716 | sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; |
1717 | atomic_sub(amt, prot->memory_allocated); | 1717 | atomic_long_sub(amt, prot->memory_allocated); |
1718 | return 0; | 1718 | return 0; |
1719 | } | 1719 | } |
1720 | EXPORT_SYMBOL(__sk_mem_schedule); | 1720 | EXPORT_SYMBOL(__sk_mem_schedule); |
@@ -1727,12 +1727,12 @@ void __sk_mem_reclaim(struct sock *sk) | |||
1727 | { | 1727 | { |
1728 | struct proto *prot = sk->sk_prot; | 1728 | struct proto *prot = sk->sk_prot; |
1729 | 1729 | ||
1730 | atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, | 1730 | atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, |
1731 | prot->memory_allocated); | 1731 | prot->memory_allocated); |
1732 | sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; | 1732 | sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; |
1733 | 1733 | ||
1734 | if (prot->memory_pressure && *prot->memory_pressure && | 1734 | if (prot->memory_pressure && *prot->memory_pressure && |
1735 | (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) | 1735 | (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0])) |
1736 | *prot->memory_pressure = 0; | 1736 | *prot->memory_pressure = 0; |
1737 | } | 1737 | } |
1738 | EXPORT_SYMBOL(__sk_mem_reclaim); | 1738 | EXPORT_SYMBOL(__sk_mem_reclaim); |
@@ -2452,12 +2452,12 @@ static char proto_method_implemented(const void *method) | |||
2452 | 2452 | ||
2453 | static void proto_seq_printf(struct seq_file *seq, struct proto *proto) | 2453 | static void proto_seq_printf(struct seq_file *seq, struct proto *proto) |
2454 | { | 2454 | { |
2455 | seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s " | 2455 | seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " |
2456 | "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", | 2456 | "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", |
2457 | proto->name, | 2457 | proto->name, |
2458 | proto->obj_size, | 2458 | proto->obj_size, |
2459 | sock_prot_inuse_get(seq_file_net(seq), proto), | 2459 | sock_prot_inuse_get(seq_file_net(seq), proto), |
2460 | proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1, | 2460 | proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L, |
2461 | proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", | 2461 | proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", |
2462 | proto->max_header, | 2462 | proto->max_header, |
2463 | proto->slab == NULL ? "no" : "yes", | 2463 | proto->slab == NULL ? "no" : "yes", |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index d6b93d19790f..a76b78de679f 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -155,7 +155,7 @@ static const struct proto_ops dn_proto_ops; | |||
155 | static DEFINE_RWLOCK(dn_hash_lock); | 155 | static DEFINE_RWLOCK(dn_hash_lock); |
156 | static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE]; | 156 | static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE]; |
157 | static struct hlist_head dn_wild_sk; | 157 | static struct hlist_head dn_wild_sk; |
158 | static atomic_t decnet_memory_allocated; | 158 | static atomic_long_t decnet_memory_allocated; |
159 | 159 | ||
160 | static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags); | 160 | static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags); |
161 | static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags); | 161 | static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags); |
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c index be3eb8e23288..28f8b5e5f73b 100644 --- a/net/decnet/sysctl_net_decnet.c +++ b/net/decnet/sysctl_net_decnet.c | |||
@@ -38,7 +38,7 @@ int decnet_log_martians = 1; | |||
38 | int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW; | 38 | int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW; |
39 | 39 | ||
40 | /* Reasonable defaults, I hope, based on tcp's defaults */ | 40 | /* Reasonable defaults, I hope, based on tcp's defaults */ |
41 | int sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 }; | 41 | long sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 }; |
42 | int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; | 42 | int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; |
43 | int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; | 43 | int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; |
44 | 44 | ||
@@ -324,7 +324,7 @@ static ctl_table dn_table[] = { | |||
324 | .data = &sysctl_decnet_mem, | 324 | .data = &sysctl_decnet_mem, |
325 | .maxlen = sizeof(sysctl_decnet_mem), | 325 | .maxlen = sizeof(sysctl_decnet_mem), |
326 | .mode = 0644, | 326 | .mode = 0644, |
327 | .proc_handler = proc_dointvec, | 327 | .proc_handler = proc_doulongvec_minmax |
328 | }, | 328 | }, |
329 | { | 329 | { |
330 | .procname = "decnet_rmem", | 330 | .procname = "decnet_rmem", |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index c8877c6c7216..3c53c2d89e3b 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -2306,10 +2306,8 @@ void ip_mc_drop_socket(struct sock *sk) | |||
2306 | 2306 | ||
2307 | in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); | 2307 | in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); |
2308 | (void) ip_mc_leave_src(sk, iml, in_dev); | 2308 | (void) ip_mc_leave_src(sk, iml, in_dev); |
2309 | if (in_dev != NULL) { | 2309 | if (in_dev != NULL) |
2310 | ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); | 2310 | ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); |
2311 | in_dev_put(in_dev); | ||
2312 | } | ||
2313 | /* decrease mem now to avoid the memleak warning */ | 2311 | /* decrease mem now to avoid the memleak warning */ |
2314 | atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); | 2312 | atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); |
2315 | call_rcu(&iml->rcu, ip_mc_socklist_reclaim); | 2313 | call_rcu(&iml->rcu, ip_mc_socklist_reclaim); |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 4ae1f203f7cb..1b48eb1ed453 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -59,13 +59,13 @@ static int sockstat_seq_show(struct seq_file *seq, void *v) | |||
59 | local_bh_enable(); | 59 | local_bh_enable(); |
60 | 60 | ||
61 | socket_seq_show(seq); | 61 | socket_seq_show(seq); |
62 | seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", | 62 | seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n", |
63 | sock_prot_inuse_get(net, &tcp_prot), orphans, | 63 | sock_prot_inuse_get(net, &tcp_prot), orphans, |
64 | tcp_death_row.tw_count, sockets, | 64 | tcp_death_row.tw_count, sockets, |
65 | atomic_read(&tcp_memory_allocated)); | 65 | atomic_long_read(&tcp_memory_allocated)); |
66 | seq_printf(seq, "UDP: inuse %d mem %d\n", | 66 | seq_printf(seq, "UDP: inuse %d mem %ld\n", |
67 | sock_prot_inuse_get(net, &udp_prot), | 67 | sock_prot_inuse_get(net, &udp_prot), |
68 | atomic_read(&udp_memory_allocated)); | 68 | atomic_long_read(&udp_memory_allocated)); |
69 | seq_printf(seq, "UDPLITE: inuse %d\n", | 69 | seq_printf(seq, "UDPLITE: inuse %d\n", |
70 | sock_prot_inuse_get(net, &udplite_prot)); | 70 | sock_prot_inuse_get(net, &udplite_prot)); |
71 | seq_printf(seq, "RAW: inuse %d\n", | 71 | seq_printf(seq, "RAW: inuse %d\n", |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index d96c1da4b17c..e91911d7aae2 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -398,7 +398,7 @@ static struct ctl_table ipv4_table[] = { | |||
398 | .data = &sysctl_tcp_mem, | 398 | .data = &sysctl_tcp_mem, |
399 | .maxlen = sizeof(sysctl_tcp_mem), | 399 | .maxlen = sizeof(sysctl_tcp_mem), |
400 | .mode = 0644, | 400 | .mode = 0644, |
401 | .proc_handler = proc_dointvec | 401 | .proc_handler = proc_doulongvec_minmax |
402 | }, | 402 | }, |
403 | { | 403 | { |
404 | .procname = "tcp_wmem", | 404 | .procname = "tcp_wmem", |
@@ -602,8 +602,7 @@ static struct ctl_table ipv4_table[] = { | |||
602 | .data = &sysctl_udp_mem, | 602 | .data = &sysctl_udp_mem, |
603 | .maxlen = sizeof(sysctl_udp_mem), | 603 | .maxlen = sizeof(sysctl_udp_mem), |
604 | .mode = 0644, | 604 | .mode = 0644, |
605 | .proc_handler = proc_dointvec_minmax, | 605 | .proc_handler = proc_doulongvec_minmax, |
606 | .extra1 = &zero | ||
607 | }, | 606 | }, |
608 | { | 607 | { |
609 | .procname = "udp_rmem_min", | 608 | .procname = "udp_rmem_min", |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1664a0590bb8..081419969485 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -282,7 +282,7 @@ int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; | |||
282 | struct percpu_counter tcp_orphan_count; | 282 | struct percpu_counter tcp_orphan_count; |
283 | EXPORT_SYMBOL_GPL(tcp_orphan_count); | 283 | EXPORT_SYMBOL_GPL(tcp_orphan_count); |
284 | 284 | ||
285 | int sysctl_tcp_mem[3] __read_mostly; | 285 | long sysctl_tcp_mem[3] __read_mostly; |
286 | int sysctl_tcp_wmem[3] __read_mostly; | 286 | int sysctl_tcp_wmem[3] __read_mostly; |
287 | int sysctl_tcp_rmem[3] __read_mostly; | 287 | int sysctl_tcp_rmem[3] __read_mostly; |
288 | 288 | ||
@@ -290,7 +290,7 @@ EXPORT_SYMBOL(sysctl_tcp_mem); | |||
290 | EXPORT_SYMBOL(sysctl_tcp_rmem); | 290 | EXPORT_SYMBOL(sysctl_tcp_rmem); |
291 | EXPORT_SYMBOL(sysctl_tcp_wmem); | 291 | EXPORT_SYMBOL(sysctl_tcp_wmem); |
292 | 292 | ||
293 | atomic_t tcp_memory_allocated; /* Current allocated memory. */ | 293 | atomic_long_t tcp_memory_allocated; /* Current allocated memory. */ |
294 | EXPORT_SYMBOL(tcp_memory_allocated); | 294 | EXPORT_SYMBOL(tcp_memory_allocated); |
295 | 295 | ||
296 | /* | 296 | /* |
@@ -2246,7 +2246,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, | |||
2246 | /* Values greater than interface MTU won't take effect. However | 2246 | /* Values greater than interface MTU won't take effect. However |
2247 | * at the point when this call is done we typically don't yet | 2247 | * at the point when this call is done we typically don't yet |
2248 | * know which interface is going to be used */ | 2248 | * know which interface is going to be used */ |
2249 | if (val < 8 || val > MAX_TCP_WINDOW) { | 2249 | if (val < 64 || val > MAX_TCP_WINDOW) { |
2250 | err = -EINVAL; | 2250 | err = -EINVAL; |
2251 | break; | 2251 | break; |
2252 | } | 2252 | } |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3357f69e353d..6d8ab1c4efc3 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -259,8 +259,11 @@ static void tcp_fixup_sndbuf(struct sock *sk) | |||
259 | int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 + | 259 | int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 + |
260 | sizeof(struct sk_buff); | 260 | sizeof(struct sk_buff); |
261 | 261 | ||
262 | if (sk->sk_sndbuf < 3 * sndmem) | 262 | if (sk->sk_sndbuf < 3 * sndmem) { |
263 | sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]); | 263 | sk->sk_sndbuf = 3 * sndmem; |
264 | if (sk->sk_sndbuf > sysctl_tcp_wmem[2]) | ||
265 | sk->sk_sndbuf = sysctl_tcp_wmem[2]; | ||
266 | } | ||
264 | } | 267 | } |
265 | 268 | ||
266 | /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) | 269 | /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) |
@@ -396,7 +399,7 @@ static void tcp_clamp_window(struct sock *sk) | |||
396 | if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && | 399 | if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && |
397 | !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && | 400 | !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && |
398 | !tcp_memory_pressure && | 401 | !tcp_memory_pressure && |
399 | atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { | 402 | atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { |
400 | sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), | 403 | sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), |
401 | sysctl_tcp_rmem[2]); | 404 | sysctl_tcp_rmem[2]); |
402 | } | 405 | } |
@@ -4861,7 +4864,7 @@ static int tcp_should_expand_sndbuf(struct sock *sk) | |||
4861 | return 0; | 4864 | return 0; |
4862 | 4865 | ||
4863 | /* If we are under soft global TCP memory pressure, do not expand. */ | 4866 | /* If we are under soft global TCP memory pressure, do not expand. */ |
4864 | if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0]) | 4867 | if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0]) |
4865 | return 0; | 4868 | return 0; |
4866 | 4869 | ||
4867 | /* If we filled the congestion window, do not expand. */ | 4870 | /* If we filled the congestion window, do not expand. */ |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 8f8527d41682..69ccbc1dde9c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -415,6 +415,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
415 | !icsk->icsk_backoff) | 415 | !icsk->icsk_backoff) |
416 | break; | 416 | break; |
417 | 417 | ||
418 | if (sock_owned_by_user(sk)) | ||
419 | break; | ||
420 | |||
418 | icsk->icsk_backoff--; | 421 | icsk->icsk_backoff--; |
419 | inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) << | 422 | inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) << |
420 | icsk->icsk_backoff; | 423 | icsk->icsk_backoff; |
@@ -429,11 +432,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
429 | if (remaining) { | 432 | if (remaining) { |
430 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | 433 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
431 | remaining, TCP_RTO_MAX); | 434 | remaining, TCP_RTO_MAX); |
432 | } else if (sock_owned_by_user(sk)) { | ||
433 | /* RTO revert clocked out retransmission, | ||
434 | * but socket is locked. Will defer. */ | ||
435 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | ||
436 | HZ/20, TCP_RTO_MAX); | ||
437 | } else { | 435 | } else { |
438 | /* RTO revert clocked out retransmission. | 436 | /* RTO revert clocked out retransmission. |
439 | * Will retransmit now */ | 437 | * Will retransmit now */ |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 28cb2d733a3c..5e0a3a582a59 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -110,7 +110,7 @@ | |||
110 | struct udp_table udp_table __read_mostly; | 110 | struct udp_table udp_table __read_mostly; |
111 | EXPORT_SYMBOL(udp_table); | 111 | EXPORT_SYMBOL(udp_table); |
112 | 112 | ||
113 | int sysctl_udp_mem[3] __read_mostly; | 113 | long sysctl_udp_mem[3] __read_mostly; |
114 | EXPORT_SYMBOL(sysctl_udp_mem); | 114 | EXPORT_SYMBOL(sysctl_udp_mem); |
115 | 115 | ||
116 | int sysctl_udp_rmem_min __read_mostly; | 116 | int sysctl_udp_rmem_min __read_mostly; |
@@ -119,7 +119,7 @@ EXPORT_SYMBOL(sysctl_udp_rmem_min); | |||
119 | int sysctl_udp_wmem_min __read_mostly; | 119 | int sysctl_udp_wmem_min __read_mostly; |
120 | EXPORT_SYMBOL(sysctl_udp_wmem_min); | 120 | EXPORT_SYMBOL(sysctl_udp_wmem_min); |
121 | 121 | ||
122 | atomic_t udp_memory_allocated; | 122 | atomic_long_t udp_memory_allocated; |
123 | EXPORT_SYMBOL(udp_memory_allocated); | 123 | EXPORT_SYMBOL(udp_memory_allocated); |
124 | 124 | ||
125 | #define MAX_UDP_PORTS 65536 | 125 | #define MAX_UDP_PORTS 65536 |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e048ec62d109..b41ce0f0d514 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -2740,10 +2740,6 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2740 | /* Flag it for later restoration when link comes up */ | 2740 | /* Flag it for later restoration when link comes up */ |
2741 | ifa->flags |= IFA_F_TENTATIVE; | 2741 | ifa->flags |= IFA_F_TENTATIVE; |
2742 | ifa->state = INET6_IFADDR_STATE_DAD; | 2742 | ifa->state = INET6_IFADDR_STATE_DAD; |
2743 | |||
2744 | write_unlock_bh(&idev->lock); | ||
2745 | |||
2746 | in6_ifa_hold(ifa); | ||
2747 | } else { | 2743 | } else { |
2748 | list_del(&ifa->if_list); | 2744 | list_del(&ifa->if_list); |
2749 | 2745 | ||
@@ -2758,19 +2754,15 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2758 | ifa->state = INET6_IFADDR_STATE_DEAD; | 2754 | ifa->state = INET6_IFADDR_STATE_DEAD; |
2759 | spin_unlock_bh(&ifa->state_lock); | 2755 | spin_unlock_bh(&ifa->state_lock); |
2760 | 2756 | ||
2761 | if (state == INET6_IFADDR_STATE_DEAD) | 2757 | if (state == INET6_IFADDR_STATE_DEAD) { |
2762 | goto put_ifa; | 2758 | in6_ifa_put(ifa); |
2759 | } else { | ||
2760 | __ipv6_ifa_notify(RTM_DELADDR, ifa); | ||
2761 | atomic_notifier_call_chain(&inet6addr_chain, | ||
2762 | NETDEV_DOWN, ifa); | ||
2763 | } | ||
2764 | write_lock_bh(&idev->lock); | ||
2763 | } | 2765 | } |
2764 | |||
2765 | __ipv6_ifa_notify(RTM_DELADDR, ifa); | ||
2766 | if (ifa->state == INET6_IFADDR_STATE_DEAD) | ||
2767 | atomic_notifier_call_chain(&inet6addr_chain, | ||
2768 | NETDEV_DOWN, ifa); | ||
2769 | |||
2770 | put_ifa: | ||
2771 | in6_ifa_put(ifa); | ||
2772 | |||
2773 | write_lock_bh(&idev->lock); | ||
2774 | } | 2766 | } |
2775 | 2767 | ||
2776 | list_splice(&keep_list, &idev->addr_list); | 2768 | list_splice(&keep_list, &idev->addr_list); |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 3a3f129a44cb..79d43aa8fa8d 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -286,7 +286,7 @@ found: | |||
286 | 286 | ||
287 | /* Check for overlap with preceding fragment. */ | 287 | /* Check for overlap with preceding fragment. */ |
288 | if (prev && | 288 | if (prev && |
289 | (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0) | 289 | (NFCT_FRAG6_CB(prev)->offset + prev->len) > offset) |
290 | goto discard_fq; | 290 | goto discard_fq; |
291 | 291 | ||
292 | /* Look for overlap with succeeding segment. */ | 292 | /* Look for overlap with succeeding segment. */ |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index c7ba3149633f..0f2766453759 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -349,7 +349,7 @@ found: | |||
349 | 349 | ||
350 | /* Check for overlap with preceding fragment. */ | 350 | /* Check for overlap with preceding fragment. */ |
351 | if (prev && | 351 | if (prev && |
352 | (FRAG6_CB(prev)->offset + prev->len) - offset > 0) | 352 | (FRAG6_CB(prev)->offset + prev->len) > offset) |
353 | goto discard_fq; | 353 | goto discard_fq; |
354 | 354 | ||
355 | /* Look for overlap with succeeding segment. */ | 355 | /* Look for overlap with succeeding segment. */ |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index fc328339be99..96455ffb76fb 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1945,8 +1945,12 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
1945 | struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); | 1945 | struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); |
1946 | struct neighbour *neigh; | 1946 | struct neighbour *neigh; |
1947 | 1947 | ||
1948 | if (rt == NULL) | 1948 | if (rt == NULL) { |
1949 | if (net_ratelimit()) | ||
1950 | pr_warning("IPv6: Maximum number of routes reached," | ||
1951 | " consider increasing route/max_size.\n"); | ||
1949 | return ERR_PTR(-ENOMEM); | 1952 | return ERR_PTR(-ENOMEM); |
1953 | } | ||
1950 | 1954 | ||
1951 | dev_hold(net->loopback_dev); | 1955 | dev_hold(net->loopback_dev); |
1952 | in6_dev_hold(idev); | 1956 | in6_dev_hold(idev); |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index f9163b12c7f1..7aa85591dbe7 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -391,6 +391,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
391 | u32 hw_reconf_flags = 0; | 391 | u32 hw_reconf_flags = 0; |
392 | int i; | 392 | int i; |
393 | 393 | ||
394 | if (local->scan_sdata == sdata) | ||
395 | ieee80211_scan_cancel(local); | ||
396 | |||
394 | clear_bit(SDATA_STATE_RUNNING, &sdata->state); | 397 | clear_bit(SDATA_STATE_RUNNING, &sdata->state); |
395 | 398 | ||
396 | /* | 399 | /* |
@@ -523,9 +526,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
523 | synchronize_rcu(); | 526 | synchronize_rcu(); |
524 | skb_queue_purge(&sdata->skb_queue); | 527 | skb_queue_purge(&sdata->skb_queue); |
525 | 528 | ||
526 | if (local->scan_sdata == sdata) | ||
527 | ieee80211_scan_cancel(local); | ||
528 | |||
529 | /* | 529 | /* |
530 | * Disable beaconing here for mesh only, AP and IBSS | 530 | * Disable beaconing here for mesh only, AP and IBSS |
531 | * are already taken care of. | 531 | * are already taken care of. |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 3616f27b9d46..8298e676f5a0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -1610,9 +1610,11 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1610 | 1610 | ||
1611 | err = -EINVAL; | 1611 | err = -EINVAL; |
1612 | vnet_hdr_len = sizeof(vnet_hdr); | 1612 | vnet_hdr_len = sizeof(vnet_hdr); |
1613 | if ((len -= vnet_hdr_len) < 0) | 1613 | if (len < vnet_hdr_len) |
1614 | goto out_free; | 1614 | goto out_free; |
1615 | 1615 | ||
1616 | len -= vnet_hdr_len; | ||
1617 | |||
1616 | if (skb_is_gso(skb)) { | 1618 | if (skb_is_gso(skb)) { |
1617 | struct skb_shared_info *sinfo = skb_shinfo(skb); | 1619 | struct skb_shared_info *sinfo = skb_shinfo(skb); |
1618 | 1620 | ||
@@ -1719,7 +1721,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, | |||
1719 | rcu_read_lock(); | 1721 | rcu_read_lock(); |
1720 | dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); | 1722 | dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); |
1721 | if (dev) | 1723 | if (dev) |
1722 | strlcpy(uaddr->sa_data, dev->name, 15); | 1724 | strncpy(uaddr->sa_data, dev->name, 14); |
1723 | else | 1725 | else |
1724 | memset(uaddr->sa_data, 0, 14); | 1726 | memset(uaddr->sa_data, 0, 14); |
1725 | rcu_read_unlock(); | 1727 | rcu_read_unlock(); |
@@ -1742,6 +1744,7 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr, | |||
1742 | sll->sll_family = AF_PACKET; | 1744 | sll->sll_family = AF_PACKET; |
1743 | sll->sll_ifindex = po->ifindex; | 1745 | sll->sll_ifindex = po->ifindex; |
1744 | sll->sll_protocol = po->num; | 1746 | sll->sll_protocol = po->num; |
1747 | sll->sll_pkttype = 0; | ||
1745 | rcu_read_lock(); | 1748 | rcu_read_lock(); |
1746 | dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); | 1749 | dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); |
1747 | if (dev) { | 1750 | if (dev) { |
diff --git a/net/rds/message.c b/net/rds/message.c index 848cff45183b..1fd3d29023d7 100644 --- a/net/rds/message.c +++ b/net/rds/message.c | |||
@@ -249,8 +249,10 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in | |||
249 | rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); | 249 | rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); |
250 | rm->data.op_nents = ceil(total_len, PAGE_SIZE); | 250 | rm->data.op_nents = ceil(total_len, PAGE_SIZE); |
251 | rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); | 251 | rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); |
252 | if (!rm->data.op_sg) | 252 | if (!rm->data.op_sg) { |
253 | rds_message_put(rm); | ||
253 | return ERR_PTR(-ENOMEM); | 254 | return ERR_PTR(-ENOMEM); |
255 | } | ||
254 | 256 | ||
255 | for (i = 0; i < rm->data.op_nents; ++i) { | 257 | for (i = 0; i < rm->data.op_nents; ++i) { |
256 | sg_set_page(&rm->data.op_sg[i], | 258 | sg_set_page(&rm->data.op_sg[i], |
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index efd4f95fd050..f23d9155b1ef 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c | |||
@@ -268,6 +268,10 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh, | |||
268 | goto nla_put_failure; | 268 | goto nla_put_failure; |
269 | 269 | ||
270 | nla_nest_end(skb, nest); | 270 | nla_nest_end(skb, nest); |
271 | |||
272 | if (tcf_exts_dump_stats(skb, &f->exts, &basic_ext_map) < 0) | ||
273 | goto nla_put_failure; | ||
274 | |||
271 | return skb->len; | 275 | return skb->len; |
272 | 276 | ||
273 | nla_put_failure: | 277 | nla_put_failure: |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 1ef29c74d85e..e58f9476f29c 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -92,7 +92,7 @@ static struct sctp_af *sctp_af_v6_specific; | |||
92 | struct kmem_cache *sctp_chunk_cachep __read_mostly; | 92 | struct kmem_cache *sctp_chunk_cachep __read_mostly; |
93 | struct kmem_cache *sctp_bucket_cachep __read_mostly; | 93 | struct kmem_cache *sctp_bucket_cachep __read_mostly; |
94 | 94 | ||
95 | int sysctl_sctp_mem[3]; | 95 | long sysctl_sctp_mem[3]; |
96 | int sysctl_sctp_rmem[3]; | 96 | int sysctl_sctp_rmem[3]; |
97 | int sysctl_sctp_wmem[3]; | 97 | int sysctl_sctp_wmem[3]; |
98 | 98 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index e34ca9cc1167..6bd554323a34 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -111,12 +111,12 @@ static void sctp_sock_migrate(struct sock *, struct sock *, | |||
111 | static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; | 111 | static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; |
112 | 112 | ||
113 | extern struct kmem_cache *sctp_bucket_cachep; | 113 | extern struct kmem_cache *sctp_bucket_cachep; |
114 | extern int sysctl_sctp_mem[3]; | 114 | extern long sysctl_sctp_mem[3]; |
115 | extern int sysctl_sctp_rmem[3]; | 115 | extern int sysctl_sctp_rmem[3]; |
116 | extern int sysctl_sctp_wmem[3]; | 116 | extern int sysctl_sctp_wmem[3]; |
117 | 117 | ||
118 | static int sctp_memory_pressure; | 118 | static int sctp_memory_pressure; |
119 | static atomic_t sctp_memory_allocated; | 119 | static atomic_long_t sctp_memory_allocated; |
120 | struct percpu_counter sctp_sockets_allocated; | 120 | struct percpu_counter sctp_sockets_allocated; |
121 | 121 | ||
122 | static void sctp_enter_memory_pressure(struct sock *sk) | 122 | static void sctp_enter_memory_pressure(struct sock *sk) |
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 832590bbe0c0..50cb57f0919e 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c | |||
@@ -54,7 +54,7 @@ static int sack_timer_max = 500; | |||
54 | static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ | 54 | static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ |
55 | static int rwnd_scale_max = 16; | 55 | static int rwnd_scale_max = 16; |
56 | 56 | ||
57 | extern int sysctl_sctp_mem[3]; | 57 | extern long sysctl_sctp_mem[3]; |
58 | extern int sysctl_sctp_rmem[3]; | 58 | extern int sysctl_sctp_rmem[3]; |
59 | extern int sysctl_sctp_wmem[3]; | 59 | extern int sysctl_sctp_wmem[3]; |
60 | 60 | ||
@@ -203,7 +203,7 @@ static ctl_table sctp_table[] = { | |||
203 | .data = &sysctl_sctp_mem, | 203 | .data = &sysctl_sctp_mem, |
204 | .maxlen = sizeof(sysctl_sctp_mem), | 204 | .maxlen = sizeof(sysctl_sctp_mem), |
205 | .mode = 0644, | 205 | .mode = 0644, |
206 | .proc_handler = proc_dointvec, | 206 | .proc_handler = proc_doulongvec_minmax |
207 | }, | 207 | }, |
208 | { | 208 | { |
209 | .procname = "sctp_rmem", | 209 | .procname = "sctp_rmem", |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 33217fc3d697..e9f0d5004483 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -396,6 +396,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr, | |||
396 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; | 396 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; |
397 | struct tipc_sock *tsock = tipc_sk(sock->sk); | 397 | struct tipc_sock *tsock = tipc_sk(sock->sk); |
398 | 398 | ||
399 | memset(addr, 0, sizeof(*addr)); | ||
399 | if (peer) { | 400 | if (peer) { |
400 | if ((sock->state != SS_CONNECTED) && | 401 | if ((sock->state != SS_CONNECTED) && |
401 | ((peer != 2) || (sock->state != SS_DISCONNECTING))) | 402 | ((peer != 2) || (sock->state != SS_DISCONNECTING))) |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c506241f8637..4e78e3f26798 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -224,8 +224,8 @@ static int nl80211_prepare_netdev_dump(struct sk_buff *skb, | |||
224 | } | 224 | } |
225 | 225 | ||
226 | *rdev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); | 226 | *rdev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); |
227 | if (IS_ERR(dev)) { | 227 | if (IS_ERR(*rdev)) { |
228 | err = PTR_ERR(dev); | 228 | err = PTR_ERR(*rdev); |
229 | goto out_rtnl; | 229 | goto out_rtnl; |
230 | } | 230 | } |
231 | 231 | ||
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index 3a8c4c419cd4..55187c8f6420 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c | |||
@@ -61,6 +61,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
61 | while (len > 0) { | 61 | while (len > 0) { |
62 | switch (*p & X25_FAC_CLASS_MASK) { | 62 | switch (*p & X25_FAC_CLASS_MASK) { |
63 | case X25_FAC_CLASS_A: | 63 | case X25_FAC_CLASS_A: |
64 | if (len < 2) | ||
65 | return 0; | ||
64 | switch (*p) { | 66 | switch (*p) { |
65 | case X25_FAC_REVERSE: | 67 | case X25_FAC_REVERSE: |
66 | if((p[1] & 0x81) == 0x81) { | 68 | if((p[1] & 0x81) == 0x81) { |
@@ -104,6 +106,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
104 | len -= 2; | 106 | len -= 2; |
105 | break; | 107 | break; |
106 | case X25_FAC_CLASS_B: | 108 | case X25_FAC_CLASS_B: |
109 | if (len < 3) | ||
110 | return 0; | ||
107 | switch (*p) { | 111 | switch (*p) { |
108 | case X25_FAC_PACKET_SIZE: | 112 | case X25_FAC_PACKET_SIZE: |
109 | facilities->pacsize_in = p[1]; | 113 | facilities->pacsize_in = p[1]; |
@@ -125,6 +129,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
125 | len -= 3; | 129 | len -= 3; |
126 | break; | 130 | break; |
127 | case X25_FAC_CLASS_C: | 131 | case X25_FAC_CLASS_C: |
132 | if (len < 4) | ||
133 | return 0; | ||
128 | printk(KERN_DEBUG "X.25: unknown facility %02X, " | 134 | printk(KERN_DEBUG "X.25: unknown facility %02X, " |
129 | "values %02X, %02X, %02X\n", | 135 | "values %02X, %02X, %02X\n", |
130 | p[0], p[1], p[2], p[3]); | 136 | p[0], p[1], p[2], p[3]); |
@@ -132,6 +138,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
132 | len -= 4; | 138 | len -= 4; |
133 | break; | 139 | break; |
134 | case X25_FAC_CLASS_D: | 140 | case X25_FAC_CLASS_D: |
141 | if (len < p[1] + 2) | ||
142 | return 0; | ||
135 | switch (*p) { | 143 | switch (*p) { |
136 | case X25_FAC_CALLING_AE: | 144 | case X25_FAC_CALLING_AE: |
137 | if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) | 145 | if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) |
@@ -149,9 +157,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
149 | break; | 157 | break; |
150 | default: | 158 | default: |
151 | printk(KERN_DEBUG "X.25: unknown facility %02X," | 159 | printk(KERN_DEBUG "X.25: unknown facility %02X," |
152 | "length %d, values %02X, %02X, " | 160 | "length %d\n", p[0], p[1]); |
153 | "%02X, %02X\n", | ||
154 | p[0], p[1], p[2], p[3], p[4], p[5]); | ||
155 | break; | 161 | break; |
156 | } | 162 | } |
157 | len -= p[1] + 2; | 163 | len -= p[1] + 2; |
diff --git a/security/Kconfig b/security/Kconfig index bd72ae623494..e80da955e687 100644 --- a/security/Kconfig +++ b/security/Kconfig | |||
@@ -39,6 +39,18 @@ config KEYS_DEBUG_PROC_KEYS | |||
39 | 39 | ||
40 | If you are unsure as to whether this is required, answer N. | 40 | If you are unsure as to whether this is required, answer N. |
41 | 41 | ||
42 | config SECURITY_DMESG_RESTRICT | ||
43 | bool "Restrict unprivileged access to the kernel syslog" | ||
44 | default n | ||
45 | help | ||
46 | This enforces restrictions on unprivileged users reading the kernel | ||
47 | syslog via dmesg(8). | ||
48 | |||
49 | If this option is not selected, no restrictions will be enforced | ||
50 | unless the dmesg_restrict sysctl is explicitly set to (1). | ||
51 | |||
52 | If you are unsure how to answer this question, answer N. | ||
53 | |||
42 | config SECURITY | 54 | config SECURITY |
43 | bool "Enable different security models" | 55 | bool "Enable different security models" |
44 | depends on SYSFS | 56 | depends on SYSFS |
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index cf1de4462ccd..b7106f192b75 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c | |||
@@ -922,7 +922,7 @@ static int __init apparmor_init(void) | |||
922 | error = register_security(&apparmor_ops); | 922 | error = register_security(&apparmor_ops); |
923 | if (error) { | 923 | if (error) { |
924 | AA_ERROR("Unable to register AppArmor\n"); | 924 | AA_ERROR("Unable to register AppArmor\n"); |
925 | goto register_security_out; | 925 | goto set_init_cxt_out; |
926 | } | 926 | } |
927 | 927 | ||
928 | /* Report that AppArmor successfully initialized */ | 928 | /* Report that AppArmor successfully initialized */ |
@@ -936,6 +936,9 @@ static int __init apparmor_init(void) | |||
936 | 936 | ||
937 | return error; | 937 | return error; |
938 | 938 | ||
939 | set_init_cxt_out: | ||
940 | aa_free_task_context(current->real_cred->security); | ||
941 | |||
939 | register_security_out: | 942 | register_security_out: |
940 | aa_free_root_ns(); | 943 | aa_free_root_ns(); |
941 | 944 | ||
@@ -944,7 +947,6 @@ alloc_out: | |||
944 | 947 | ||
945 | apparmor_enabled = 0; | 948 | apparmor_enabled = 0; |
946 | return error; | 949 | return error; |
947 | |||
948 | } | 950 | } |
949 | 951 | ||
950 | security_initcall(apparmor_init); | 952 | security_initcall(apparmor_init); |
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index 52cc865f1464..4f0eadee78b8 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c | |||
@@ -306,7 +306,7 @@ static struct aa_namespace *alloc_namespace(const char *prefix, | |||
306 | return ns; | 306 | return ns; |
307 | 307 | ||
308 | fail_unconfined: | 308 | fail_unconfined: |
309 | kzfree(ns->base.name); | 309 | kzfree(ns->base.hname); |
310 | fail_ns: | 310 | fail_ns: |
311 | kzfree(ns); | 311 | kzfree(ns); |
312 | return NULL; | 312 | return NULL; |
diff --git a/security/capability.c b/security/capability.c index 30ae00fbecd5..c773635ca3a0 100644 --- a/security/capability.c +++ b/security/capability.c | |||
@@ -17,6 +17,11 @@ static int cap_sysctl(ctl_table *table, int op) | |||
17 | return 0; | 17 | return 0; |
18 | } | 18 | } |
19 | 19 | ||
20 | static int cap_syslog(int type) | ||
21 | { | ||
22 | return 0; | ||
23 | } | ||
24 | |||
20 | static int cap_quotactl(int cmds, int type, int id, struct super_block *sb) | 25 | static int cap_quotactl(int cmds, int type, int id, struct super_block *sb) |
21 | { | 26 | { |
22 | return 0; | 27 | return 0; |
diff --git a/security/commoncap.c b/security/commoncap.c index 5e632b4857e4..64c2ed9c9015 100644 --- a/security/commoncap.c +++ b/security/commoncap.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
28 | #include <linux/prctl.h> | 28 | #include <linux/prctl.h> |
29 | #include <linux/securebits.h> | 29 | #include <linux/securebits.h> |
30 | #include <linux/syslog.h> | ||
31 | 30 | ||
32 | /* | 31 | /* |
33 | * If a non-root user executes a setuid-root binary in | 32 | * If a non-root user executes a setuid-root binary in |
@@ -884,24 +883,6 @@ error: | |||
884 | } | 883 | } |
885 | 884 | ||
886 | /** | 885 | /** |
887 | * cap_syslog - Determine whether syslog function is permitted | ||
888 | * @type: Function requested | ||
889 | * @from_file: Whether this request came from an open file (i.e. /proc) | ||
890 | * | ||
891 | * Determine whether the current process is permitted to use a particular | ||
892 | * syslog function, returning 0 if permission is granted, -ve if not. | ||
893 | */ | ||
894 | int cap_syslog(int type, bool from_file) | ||
895 | { | ||
896 | if (type != SYSLOG_ACTION_OPEN && from_file) | ||
897 | return 0; | ||
898 | if ((type != SYSLOG_ACTION_READ_ALL && | ||
899 | type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN)) | ||
900 | return -EPERM; | ||
901 | return 0; | ||
902 | } | ||
903 | |||
904 | /** | ||
905 | * cap_vm_enough_memory - Determine whether a new virtual mapping is permitted | 886 | * cap_vm_enough_memory - Determine whether a new virtual mapping is permitted |
906 | * @mm: The VM space in which the new mapping is to be made | 887 | * @mm: The VM space in which the new mapping is to be made |
907 | * @pages: The size of the mapping | 888 | * @pages: The size of the mapping |
diff --git a/security/security.c b/security/security.c index 3ef5e2a7a741..1b798d3df710 100644 --- a/security/security.c +++ b/security/security.c | |||
@@ -197,9 +197,9 @@ int security_quota_on(struct dentry *dentry) | |||
197 | return security_ops->quota_on(dentry); | 197 | return security_ops->quota_on(dentry); |
198 | } | 198 | } |
199 | 199 | ||
200 | int security_syslog(int type, bool from_file) | 200 | int security_syslog(int type) |
201 | { | 201 | { |
202 | return security_ops->syslog(type, from_file); | 202 | return security_ops->syslog(type); |
203 | } | 203 | } |
204 | 204 | ||
205 | int security_settime(struct timespec *ts, struct timezone *tz) | 205 | int security_settime(struct timespec *ts, struct timezone *tz) |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index d9154cf90ae1..65fa8bf596f5 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -1973,14 +1973,10 @@ static int selinux_quota_on(struct dentry *dentry) | |||
1973 | return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON); | 1973 | return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON); |
1974 | } | 1974 | } |
1975 | 1975 | ||
1976 | static int selinux_syslog(int type, bool from_file) | 1976 | static int selinux_syslog(int type) |
1977 | { | 1977 | { |
1978 | int rc; | 1978 | int rc; |
1979 | 1979 | ||
1980 | rc = cap_syslog(type, from_file); | ||
1981 | if (rc) | ||
1982 | return rc; | ||
1983 | |||
1984 | switch (type) { | 1980 | switch (type) { |
1985 | case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */ | 1981 | case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */ |
1986 | case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */ | 1982 | case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */ |
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index bc39f4067af6..489a85afa477 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c | |||
@@ -157,15 +157,11 @@ static int smack_ptrace_traceme(struct task_struct *ptp) | |||
157 | * | 157 | * |
158 | * Returns 0 on success, error code otherwise. | 158 | * Returns 0 on success, error code otherwise. |
159 | */ | 159 | */ |
160 | static int smack_syslog(int type, bool from_file) | 160 | static int smack_syslog(int typefrom_file) |
161 | { | 161 | { |
162 | int rc; | 162 | int rc = 0; |
163 | char *sp = current_security(); | 163 | char *sp = current_security(); |
164 | 164 | ||
165 | rc = cap_syslog(type, from_file); | ||
166 | if (rc != 0) | ||
167 | return rc; | ||
168 | |||
169 | if (capable(CAP_MAC_OVERRIDE)) | 165 | if (capable(CAP_MAC_OVERRIDE)) |
170 | return 0; | 166 | return 0; |
171 | 167 | ||
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt index 122ec9dc4853..26aff6bf9e50 100644 --- a/tools/perf/Documentation/perf-trace.txt +++ b/tools/perf/Documentation/perf-trace.txt | |||
@@ -8,7 +8,11 @@ perf-trace - Read perf.data (created by perf record) and display trace output | |||
8 | SYNOPSIS | 8 | SYNOPSIS |
9 | -------- | 9 | -------- |
10 | [verse] | 10 | [verse] |
11 | 'perf trace' {record <script> | report <script> [args] } | 11 | 'perf trace' [<options>] |
12 | 'perf trace' [<options>] record <script> [<record-options>] <command> | ||
13 | 'perf trace' [<options>] report <script> [script-args] | ||
14 | 'perf trace' [<options>] <script> <required-script-args> [<record-options>] <command> | ||
15 | 'perf trace' [<options>] <top-script> [script-args] | ||
12 | 16 | ||
13 | DESCRIPTION | 17 | DESCRIPTION |
14 | ----------- | 18 | ----------- |
@@ -24,23 +28,53 @@ There are several variants of perf trace: | |||
24 | available via 'perf trace -l'). The following variants allow you to | 28 | available via 'perf trace -l'). The following variants allow you to |
25 | record and run those scripts: | 29 | record and run those scripts: |
26 | 30 | ||
27 | 'perf trace record <script>' to record the events required for 'perf | 31 | 'perf trace record <script> <command>' to record the events required |
28 | trace report'. <script> is the name displayed in the output of | 32 | for 'perf trace report'. <script> is the name displayed in the |
29 | 'perf trace --list' i.e. the actual script name minus any language | 33 | output of 'perf trace --list' i.e. the actual script name minus any |
30 | extension. | 34 | language extension. If <command> is not specified, the events are |
35 | recorded using the -a (system-wide) 'perf record' option. | ||
31 | 36 | ||
32 | 'perf trace report <script>' to run and display the results of | 37 | 'perf trace report <script> [args]' to run and display the results |
33 | <script>. <script> is the name displayed in the output of 'perf | 38 | of <script>. <script> is the name displayed in the output of 'perf |
34 | trace --list' i.e. the actual script name minus any language | 39 | trace --list' i.e. the actual script name minus any language |
35 | extension. The perf.data output from a previous run of 'perf trace | 40 | extension. The perf.data output from a previous run of 'perf trace |
36 | record <script>' is used and should be present for this command to | 41 | record <script>' is used and should be present for this command to |
37 | succeed. | 42 | succeed. [args] refers to the (mainly optional) args expected by |
43 | the script. | ||
44 | |||
45 | 'perf trace <script> <required-script-args> <command>' to both | ||
46 | record the events required for <script> and to run the <script> | ||
47 | using 'live-mode' i.e. without writing anything to disk. <script> | ||
48 | is the name displayed in the output of 'perf trace --list' i.e. the | ||
49 | actual script name minus any language extension. If <command> is | ||
50 | not specified, the events are recorded using the -a (system-wide) | ||
51 | 'perf record' option. If <script> has any required args, they | ||
52 | should be specified before <command>. This mode doesn't allow for | ||
53 | optional script args to be specified; if optional script args are | ||
54 | desired, they can be specified using separate 'perf trace record' | ||
55 | and 'perf trace report' commands, with the stdout of the record step | ||
56 | piped to the stdin of the report script, using the '-o -' and '-i -' | ||
57 | options of the corresponding commands. | ||
58 | |||
59 | 'perf trace <top-script>' to both record the events required for | ||
60 | <top-script> and to run the <top-script> using 'live-mode' | ||
61 | i.e. without writing anything to disk. <top-script> is the name | ||
62 | displayed in the output of 'perf trace --list' i.e. the actual | ||
63 | script name minus any language extension; a <top-script> is defined | ||
64 | as any script name ending with the string 'top'. | ||
65 | |||
66 | [<record-options>] can be passed to the record steps of 'perf trace | ||
67 | record' and 'live-mode' variants; this isn't possible however for | ||
68 | <top-script> 'live-mode' or 'perf trace report' variants. | ||
38 | 69 | ||
39 | See the 'SEE ALSO' section for links to language-specific | 70 | See the 'SEE ALSO' section for links to language-specific |
40 | information on how to write and run your own trace scripts. | 71 | information on how to write and run your own trace scripts. |
41 | 72 | ||
42 | OPTIONS | 73 | OPTIONS |
43 | ------- | 74 | ------- |
75 | <command>...:: | ||
76 | Any command you can specify in a shell. | ||
77 | |||
44 | -D:: | 78 | -D:: |
45 | --dump-raw-trace=:: | 79 | --dump-raw-trace=:: |
46 | Display verbose dump of the trace data. | 80 | Display verbose dump of the trace data. |
@@ -64,6 +98,13 @@ OPTIONS | |||
64 | Generate perf-trace.[ext] starter script for given language, | 98 | Generate perf-trace.[ext] starter script for given language, |
65 | using current perf.data. | 99 | using current perf.data. |
66 | 100 | ||
101 | -a:: | ||
102 | Force system-wide collection. Scripts run without a <command> | ||
103 | normally use -a by default, while scripts run with a <command> | ||
104 | normally don't - this option allows the latter to be run in | ||
105 | system-wide mode. | ||
106 | |||
107 | |||
67 | SEE ALSO | 108 | SEE ALSO |
68 | -------- | 109 | -------- |
69 | linkperf:perf-record[1], linkperf:perf-trace-perl[1], | 110 | linkperf:perf-record[1], linkperf:perf-trace-perl[1], |
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 4e75583ddd6d..93bd2ff001fb 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -790,7 +790,7 @@ static const char * const record_usage[] = { | |||
790 | 790 | ||
791 | static bool force, append_file; | 791 | static bool force, append_file; |
792 | 792 | ||
793 | static const struct option options[] = { | 793 | const struct option record_options[] = { |
794 | OPT_CALLBACK('e', "event", NULL, "event", | 794 | OPT_CALLBACK('e', "event", NULL, "event", |
795 | "event selector. use 'perf list' to list available events", | 795 | "event selector. use 'perf list' to list available events", |
796 | parse_events), | 796 | parse_events), |
@@ -839,16 +839,16 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) | |||
839 | { | 839 | { |
840 | int i, j, err = -ENOMEM; | 840 | int i, j, err = -ENOMEM; |
841 | 841 | ||
842 | argc = parse_options(argc, argv, options, record_usage, | 842 | argc = parse_options(argc, argv, record_options, record_usage, |
843 | PARSE_OPT_STOP_AT_NON_OPTION); | 843 | PARSE_OPT_STOP_AT_NON_OPTION); |
844 | if (!argc && target_pid == -1 && target_tid == -1 && | 844 | if (!argc && target_pid == -1 && target_tid == -1 && |
845 | !system_wide && !cpu_list) | 845 | !system_wide && !cpu_list) |
846 | usage_with_options(record_usage, options); | 846 | usage_with_options(record_usage, record_options); |
847 | 847 | ||
848 | if (force && append_file) { | 848 | if (force && append_file) { |
849 | fprintf(stderr, "Can't overwrite and append at the same time." | 849 | fprintf(stderr, "Can't overwrite and append at the same time." |
850 | " You need to choose between -f and -A"); | 850 | " You need to choose between -f and -A"); |
851 | usage_with_options(record_usage, options); | 851 | usage_with_options(record_usage, record_options); |
852 | } else if (append_file) { | 852 | } else if (append_file) { |
853 | write_mode = WRITE_APPEND; | 853 | write_mode = WRITE_APPEND; |
854 | } else { | 854 | } else { |
@@ -871,7 +871,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) | |||
871 | if (thread_num <= 0) { | 871 | if (thread_num <= 0) { |
872 | fprintf(stderr, "Can't find all threads of pid %d\n", | 872 | fprintf(stderr, "Can't find all threads of pid %d\n", |
873 | target_pid); | 873 | target_pid); |
874 | usage_with_options(record_usage, options); | 874 | usage_with_options(record_usage, record_options); |
875 | } | 875 | } |
876 | } else { | 876 | } else { |
877 | all_tids=malloc(sizeof(pid_t)); | 877 | all_tids=malloc(sizeof(pid_t)); |
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index b513e40974f4..dd625808c2a5 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -69,7 +69,6 @@ static int target_tid = -1; | |||
69 | static pid_t *all_tids = NULL; | 69 | static pid_t *all_tids = NULL; |
70 | static int thread_num = 0; | 70 | static int thread_num = 0; |
71 | static bool inherit = false; | 71 | static bool inherit = false; |
72 | static int profile_cpu = -1; | ||
73 | static int nr_cpus = 0; | 72 | static int nr_cpus = 0; |
74 | static int realtime_prio = 0; | 73 | static int realtime_prio = 0; |
75 | static bool group = false; | 74 | static bool group = false; |
@@ -558,13 +557,13 @@ static void print_sym_table(void) | |||
558 | else | 557 | else |
559 | printf(" (all"); | 558 | printf(" (all"); |
560 | 559 | ||
561 | if (profile_cpu != -1) | 560 | if (cpu_list) |
562 | printf(", cpu: %d)\n", profile_cpu); | 561 | printf(", CPU%s: %s)\n", nr_cpus > 1 ? "s" : "", cpu_list); |
563 | else { | 562 | else { |
564 | if (target_tid != -1) | 563 | if (target_tid != -1) |
565 | printf(")\n"); | 564 | printf(")\n"); |
566 | else | 565 | else |
567 | printf(", %d CPUs)\n", nr_cpus); | 566 | printf(", %d CPU%s)\n", nr_cpus, nr_cpus > 1 ? "s" : ""); |
568 | } | 567 | } |
569 | 568 | ||
570 | printf("%-*.*s\n", win_width, win_width, graph_dotted_line); | 569 | printf("%-*.*s\n", win_width, win_width, graph_dotted_line); |
@@ -1187,11 +1186,10 @@ int group_fd; | |||
1187 | static void start_counter(int i, int counter) | 1186 | static void start_counter(int i, int counter) |
1188 | { | 1187 | { |
1189 | struct perf_event_attr *attr; | 1188 | struct perf_event_attr *attr; |
1190 | int cpu; | 1189 | int cpu = -1; |
1191 | int thread_index; | 1190 | int thread_index; |
1192 | 1191 | ||
1193 | cpu = profile_cpu; | 1192 | if (target_tid == -1) |
1194 | if (target_tid == -1 && profile_cpu == -1) | ||
1195 | cpu = cpumap[i]; | 1193 | cpu = cpumap[i]; |
1196 | 1194 | ||
1197 | attr = attrs + counter; | 1195 | attr = attrs + counter; |
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 2f8df45c4dcb..86cfe3800e6b 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include "util/symbol.h" | 10 | #include "util/symbol.h" |
11 | #include "util/thread.h" | 11 | #include "util/thread.h" |
12 | #include "util/trace-event.h" | 12 | #include "util/trace-event.h" |
13 | #include "util/parse-options.h" | ||
13 | #include "util/util.h" | 14 | #include "util/util.h" |
14 | 15 | ||
15 | static char const *script_name; | 16 | static char const *script_name; |
@@ -17,6 +18,7 @@ static char const *generate_script_lang; | |||
17 | static bool debug_mode; | 18 | static bool debug_mode; |
18 | static u64 last_timestamp; | 19 | static u64 last_timestamp; |
19 | static u64 nr_unordered; | 20 | static u64 nr_unordered; |
21 | extern const struct option record_options[]; | ||
20 | 22 | ||
21 | static int default_start_script(const char *script __unused, | 23 | static int default_start_script(const char *script __unused, |
22 | int argc __unused, | 24 | int argc __unused, |
@@ -328,7 +330,7 @@ static struct script_desc *script_desc__new(const char *name) | |||
328 | { | 330 | { |
329 | struct script_desc *s = zalloc(sizeof(*s)); | 331 | struct script_desc *s = zalloc(sizeof(*s)); |
330 | 332 | ||
331 | if (s != NULL) | 333 | if (s != NULL && name) |
332 | s->name = strdup(name); | 334 | s->name = strdup(name); |
333 | 335 | ||
334 | return s; | 336 | return s; |
@@ -337,6 +339,8 @@ static struct script_desc *script_desc__new(const char *name) | |||
337 | static void script_desc__delete(struct script_desc *s) | 339 | static void script_desc__delete(struct script_desc *s) |
338 | { | 340 | { |
339 | free(s->name); | 341 | free(s->name); |
342 | free(s->half_liner); | ||
343 | free(s->args); | ||
340 | free(s); | 344 | free(s); |
341 | } | 345 | } |
342 | 346 | ||
@@ -537,8 +541,40 @@ static char *get_script_path(const char *script_root, const char *suffix) | |||
537 | return path; | 541 | return path; |
538 | } | 542 | } |
539 | 543 | ||
544 | static bool is_top_script(const char *script_path) | ||
545 | { | ||
546 | return ends_with((char *)script_path, "top") == NULL ? false : true; | ||
547 | } | ||
548 | |||
549 | static int has_required_arg(char *script_path) | ||
550 | { | ||
551 | struct script_desc *desc; | ||
552 | int n_args = 0; | ||
553 | char *p; | ||
554 | |||
555 | desc = script_desc__new(NULL); | ||
556 | |||
557 | if (read_script_info(desc, script_path)) | ||
558 | goto out; | ||
559 | |||
560 | if (!desc->args) | ||
561 | goto out; | ||
562 | |||
563 | for (p = desc->args; *p; p++) | ||
564 | if (*p == '<') | ||
565 | n_args++; | ||
566 | out: | ||
567 | script_desc__delete(desc); | ||
568 | |||
569 | return n_args; | ||
570 | } | ||
571 | |||
540 | static const char * const trace_usage[] = { | 572 | static const char * const trace_usage[] = { |
541 | "perf trace [<options>] <command>", | 573 | "perf trace [<options>]", |
574 | "perf trace [<options>] record <script> [<record-options>] <command>", | ||
575 | "perf trace [<options>] report <script> [script-args]", | ||
576 | "perf trace [<options>] <script> [<record-options>] <command>", | ||
577 | "perf trace [<options>] <top-script> [script-args]", | ||
542 | NULL | 578 | NULL |
543 | }; | 579 | }; |
544 | 580 | ||
@@ -564,50 +600,81 @@ static const struct option options[] = { | |||
564 | OPT_END() | 600 | OPT_END() |
565 | }; | 601 | }; |
566 | 602 | ||
603 | static bool have_cmd(int argc, const char **argv) | ||
604 | { | ||
605 | char **__argv = malloc(sizeof(const char *) * argc); | ||
606 | |||
607 | if (!__argv) | ||
608 | die("malloc"); | ||
609 | memcpy(__argv, argv, sizeof(const char *) * argc); | ||
610 | argc = parse_options(argc, (const char **)__argv, record_options, | ||
611 | NULL, PARSE_OPT_STOP_AT_NON_OPTION); | ||
612 | free(__argv); | ||
613 | |||
614 | return argc != 0; | ||
615 | } | ||
616 | |||
567 | int cmd_trace(int argc, const char **argv, const char *prefix __used) | 617 | int cmd_trace(int argc, const char **argv, const char *prefix __used) |
568 | { | 618 | { |
619 | char *rec_script_path = NULL; | ||
620 | char *rep_script_path = NULL; | ||
569 | struct perf_session *session; | 621 | struct perf_session *session; |
570 | const char *suffix = NULL; | 622 | char *script_path = NULL; |
571 | const char **__argv; | 623 | const char **__argv; |
572 | char *script_path; | 624 | bool system_wide; |
573 | int i, err; | 625 | int i, j, err; |
574 | 626 | ||
575 | if (argc >= 2 && strncmp(argv[1], "rec", strlen("rec")) == 0) { | 627 | setup_scripting(); |
576 | if (argc < 3) { | 628 | |
577 | fprintf(stderr, | 629 | argc = parse_options(argc, argv, options, trace_usage, |
578 | "Please specify a record script\n"); | 630 | PARSE_OPT_STOP_AT_NON_OPTION); |
579 | return -1; | 631 | |
580 | } | 632 | if (argc > 1 && !strncmp(argv[0], "rec", strlen("rec"))) { |
581 | suffix = RECORD_SUFFIX; | 633 | rec_script_path = get_script_path(argv[1], RECORD_SUFFIX); |
634 | if (!rec_script_path) | ||
635 | return cmd_record(argc, argv, NULL); | ||
582 | } | 636 | } |
583 | 637 | ||
584 | if (argc >= 2 && strncmp(argv[1], "rep", strlen("rep")) == 0) { | 638 | if (argc > 1 && !strncmp(argv[0], "rep", strlen("rep"))) { |
585 | if (argc < 3) { | 639 | rep_script_path = get_script_path(argv[1], REPORT_SUFFIX); |
640 | if (!rep_script_path) { | ||
586 | fprintf(stderr, | 641 | fprintf(stderr, |
587 | "Please specify a report script\n"); | 642 | "Please specify a valid report script" |
643 | "(see 'perf trace -l' for listing)\n"); | ||
588 | return -1; | 644 | return -1; |
589 | } | 645 | } |
590 | suffix = REPORT_SUFFIX; | ||
591 | } | 646 | } |
592 | 647 | ||
593 | /* make sure PERF_EXEC_PATH is set for scripts */ | 648 | /* make sure PERF_EXEC_PATH is set for scripts */ |
594 | perf_set_argv_exec_path(perf_exec_path()); | 649 | perf_set_argv_exec_path(perf_exec_path()); |
595 | 650 | ||
596 | if (!suffix && argc >= 2 && strncmp(argv[1], "-", strlen("-")) != 0) { | 651 | if (argc && !script_name && !rec_script_path && !rep_script_path) { |
597 | char *record_script_path, *report_script_path; | ||
598 | int live_pipe[2]; | 652 | int live_pipe[2]; |
653 | int rep_args; | ||
599 | pid_t pid; | 654 | pid_t pid; |
600 | 655 | ||
601 | record_script_path = get_script_path(argv[1], RECORD_SUFFIX); | 656 | rec_script_path = get_script_path(argv[0], RECORD_SUFFIX); |
602 | if (!record_script_path) { | 657 | rep_script_path = get_script_path(argv[0], REPORT_SUFFIX); |
603 | fprintf(stderr, "record script not found\n"); | 658 | |
604 | return -1; | 659 | if (!rec_script_path && !rep_script_path) { |
660 | fprintf(stderr, " Couldn't find script %s\n\n See perf" | ||
661 | " trace -l for available scripts.\n", argv[0]); | ||
662 | usage_with_options(trace_usage, options); | ||
605 | } | 663 | } |
606 | 664 | ||
607 | report_script_path = get_script_path(argv[1], REPORT_SUFFIX); | 665 | if (is_top_script(argv[0])) { |
608 | if (!report_script_path) { | 666 | rep_args = argc - 1; |
609 | fprintf(stderr, "report script not found\n"); | 667 | } else { |
610 | return -1; | 668 | int rec_args; |
669 | |||
670 | rep_args = has_required_arg(rep_script_path); | ||
671 | rec_args = (argc - 1) - rep_args; | ||
672 | if (rec_args < 0) { | ||
673 | fprintf(stderr, " %s script requires options." | ||
674 | "\n\n See perf trace -l for available " | ||
675 | "scripts and options.\n", argv[0]); | ||
676 | usage_with_options(trace_usage, options); | ||
677 | } | ||
611 | } | 678 | } |
612 | 679 | ||
613 | if (pipe(live_pipe) < 0) { | 680 | if (pipe(live_pipe) < 0) { |
@@ -622,60 +689,84 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used) | |||
622 | } | 689 | } |
623 | 690 | ||
624 | if (!pid) { | 691 | if (!pid) { |
692 | system_wide = true; | ||
693 | j = 0; | ||
694 | |||
625 | dup2(live_pipe[1], 1); | 695 | dup2(live_pipe[1], 1); |
626 | close(live_pipe[0]); | 696 | close(live_pipe[0]); |
627 | 697 | ||
628 | __argv = malloc(6 * sizeof(const char *)); | 698 | if (!is_top_script(argv[0])) |
629 | __argv[0] = "/bin/sh"; | 699 | system_wide = !have_cmd(argc - rep_args, |
630 | __argv[1] = record_script_path; | 700 | &argv[rep_args]); |
631 | __argv[2] = "-q"; | 701 | |
632 | __argv[3] = "-o"; | 702 | __argv = malloc((argc + 6) * sizeof(const char *)); |
633 | __argv[4] = "-"; | 703 | if (!__argv) |
634 | __argv[5] = NULL; | 704 | die("malloc"); |
705 | |||
706 | __argv[j++] = "/bin/sh"; | ||
707 | __argv[j++] = rec_script_path; | ||
708 | if (system_wide) | ||
709 | __argv[j++] = "-a"; | ||
710 | __argv[j++] = "-q"; | ||
711 | __argv[j++] = "-o"; | ||
712 | __argv[j++] = "-"; | ||
713 | for (i = rep_args + 1; i < argc; i++) | ||
714 | __argv[j++] = argv[i]; | ||
715 | __argv[j++] = NULL; | ||
635 | 716 | ||
636 | execvp("/bin/sh", (char **)__argv); | 717 | execvp("/bin/sh", (char **)__argv); |
718 | free(__argv); | ||
637 | exit(-1); | 719 | exit(-1); |
638 | } | 720 | } |
639 | 721 | ||
640 | dup2(live_pipe[0], 0); | 722 | dup2(live_pipe[0], 0); |
641 | close(live_pipe[1]); | 723 | close(live_pipe[1]); |
642 | 724 | ||
643 | __argv = malloc((argc + 3) * sizeof(const char *)); | 725 | __argv = malloc((argc + 4) * sizeof(const char *)); |
644 | __argv[0] = "/bin/sh"; | 726 | if (!__argv) |
645 | __argv[1] = report_script_path; | 727 | die("malloc"); |
646 | for (i = 2; i < argc; i++) | 728 | j = 0; |
647 | __argv[i] = argv[i]; | 729 | __argv[j++] = "/bin/sh"; |
648 | __argv[i++] = "-i"; | 730 | __argv[j++] = rep_script_path; |
649 | __argv[i++] = "-"; | 731 | for (i = 1; i < rep_args + 1; i++) |
650 | __argv[i++] = NULL; | 732 | __argv[j++] = argv[i]; |
733 | __argv[j++] = "-i"; | ||
734 | __argv[j++] = "-"; | ||
735 | __argv[j++] = NULL; | ||
651 | 736 | ||
652 | execvp("/bin/sh", (char **)__argv); | 737 | execvp("/bin/sh", (char **)__argv); |
738 | free(__argv); | ||
653 | exit(-1); | 739 | exit(-1); |
654 | } | 740 | } |
655 | 741 | ||
656 | if (suffix) { | 742 | if (rec_script_path) |
657 | script_path = get_script_path(argv[2], suffix); | 743 | script_path = rec_script_path; |
658 | if (!script_path) { | 744 | if (rep_script_path) |
659 | fprintf(stderr, "script not found\n"); | 745 | script_path = rep_script_path; |
660 | return -1; | 746 | |
661 | } | 747 | if (script_path) { |
662 | 748 | system_wide = false; | |
663 | __argv = malloc((argc + 1) * sizeof(const char *)); | 749 | j = 0; |
664 | __argv[0] = "/bin/sh"; | 750 | |
665 | __argv[1] = script_path; | 751 | if (rec_script_path) |
666 | for (i = 3; i < argc; i++) | 752 | system_wide = !have_cmd(argc - 1, &argv[1]); |
667 | __argv[i - 1] = argv[i]; | 753 | |
668 | __argv[argc - 1] = NULL; | 754 | __argv = malloc((argc + 2) * sizeof(const char *)); |
755 | if (!__argv) | ||
756 | die("malloc"); | ||
757 | __argv[j++] = "/bin/sh"; | ||
758 | __argv[j++] = script_path; | ||
759 | if (system_wide) | ||
760 | __argv[j++] = "-a"; | ||
761 | for (i = 2; i < argc; i++) | ||
762 | __argv[j++] = argv[i]; | ||
763 | __argv[j++] = NULL; | ||
669 | 764 | ||
670 | execvp("/bin/sh", (char **)__argv); | 765 | execvp("/bin/sh", (char **)__argv); |
766 | free(__argv); | ||
671 | exit(-1); | 767 | exit(-1); |
672 | } | 768 | } |
673 | 769 | ||
674 | setup_scripting(); | ||
675 | |||
676 | argc = parse_options(argc, argv, options, trace_usage, | ||
677 | PARSE_OPT_STOP_AT_NON_OPTION); | ||
678 | |||
679 | if (symbol__init() < 0) | 770 | if (symbol__init() < 0) |
680 | return -1; | 771 | return -1; |
681 | if (!script_name) | 772 | if (!script_name) |
diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-record b/tools/perf/scripts/perl/bin/failed-syscalls-record index eb5846bcb565..8104895a7b67 100644 --- a/tools/perf/scripts/perl/bin/failed-syscalls-record +++ b/tools/perf/scripts/perl/bin/failed-syscalls-record | |||
@@ -1,2 +1,2 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | perf record -a -e raw_syscalls:sys_exit $@ | 2 | perf record -e raw_syscalls:sys_exit $@ |
diff --git a/tools/perf/scripts/perl/bin/rw-by-file-record b/tools/perf/scripts/perl/bin/rw-by-file-record index 5bfaae5a6cba..33efc8673aae 100644 --- a/tools/perf/scripts/perl/bin/rw-by-file-record +++ b/tools/perf/scripts/perl/bin/rw-by-file-record | |||
@@ -1,3 +1,3 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | perf record -a -e syscalls:sys_enter_read -e syscalls:sys_enter_write $@ | 2 | perf record -e syscalls:sys_enter_read -e syscalls:sys_enter_write $@ |
3 | 3 | ||
diff --git a/tools/perf/scripts/perl/bin/rw-by-pid-record b/tools/perf/scripts/perl/bin/rw-by-pid-record index 6e0b2f7755ac..7cb9db230448 100644 --- a/tools/perf/scripts/perl/bin/rw-by-pid-record +++ b/tools/perf/scripts/perl/bin/rw-by-pid-record | |||
@@ -1,2 +1,2 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | perf record -a -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@ | 2 | perf record -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@ |
diff --git a/tools/perf/scripts/perl/bin/rwtop-record b/tools/perf/scripts/perl/bin/rwtop-record index 6e0b2f7755ac..7cb9db230448 100644 --- a/tools/perf/scripts/perl/bin/rwtop-record +++ b/tools/perf/scripts/perl/bin/rwtop-record | |||
@@ -1,2 +1,2 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | perf record -a -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@ | 2 | perf record -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@ |
diff --git a/tools/perf/scripts/perl/bin/wakeup-latency-record b/tools/perf/scripts/perl/bin/wakeup-latency-record index 9f2acaaae9f0..464251a1bd7e 100644 --- a/tools/perf/scripts/perl/bin/wakeup-latency-record +++ b/tools/perf/scripts/perl/bin/wakeup-latency-record | |||
@@ -1,5 +1,5 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | perf record -a -e sched:sched_switch -e sched:sched_wakeup $@ | 2 | perf record -e sched:sched_switch -e sched:sched_wakeup $@ |
3 | 3 | ||
4 | 4 | ||
5 | 5 | ||
diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-record b/tools/perf/scripts/perl/bin/workqueue-stats-record index 85301f2471ff..8edda9078d5d 100644 --- a/tools/perf/scripts/perl/bin/workqueue-stats-record +++ b/tools/perf/scripts/perl/bin/workqueue-stats-record | |||
@@ -1,2 +1,2 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | perf record -a -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@ | 2 | perf record -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@ |
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record index eb5846bcb565..8104895a7b67 100644 --- a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record +++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record | |||
@@ -1,2 +1,2 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | perf record -a -e raw_syscalls:sys_exit $@ | 2 | perf record -e raw_syscalls:sys_exit $@ |
diff --git a/tools/perf/scripts/python/bin/futex-contention-record b/tools/perf/scripts/python/bin/futex-contention-record index 5ecbb433caf4..b1495c9a9b20 100644 --- a/tools/perf/scripts/python/bin/futex-contention-record +++ b/tools/perf/scripts/python/bin/futex-contention-record | |||
@@ -1,2 +1,2 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | perf record -a -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@ | 2 | perf record -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@ |
diff --git a/tools/perf/scripts/python/bin/netdev-times-record b/tools/perf/scripts/python/bin/netdev-times-record index d931a828126b..558754b840a9 100644 --- a/tools/perf/scripts/python/bin/netdev-times-record +++ b/tools/perf/scripts/python/bin/netdev-times-record | |||
@@ -1,5 +1,5 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | perf record -a -e net:net_dev_xmit -e net:net_dev_queue \ | 2 | perf record -e net:net_dev_xmit -e net:net_dev_queue \ |
3 | -e net:netif_receive_skb -e net:netif_rx \ | 3 | -e net:netif_receive_skb -e net:netif_rx \ |
4 | -e skb:consume_skb -e skb:kfree_skb \ | 4 | -e skb:consume_skb -e skb:kfree_skb \ |
5 | -e skb:skb_copy_datagram_iovec -e napi:napi_poll \ | 5 | -e skb:skb_copy_datagram_iovec -e napi:napi_poll \ |
diff --git a/tools/perf/scripts/python/bin/sched-migration-record b/tools/perf/scripts/python/bin/sched-migration-record index 17a3e9bd9e8f..7493fddbe995 100644 --- a/tools/perf/scripts/python/bin/sched-migration-record +++ b/tools/perf/scripts/python/bin/sched-migration-record | |||
@@ -1,2 +1,2 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | perf record -m 16384 -a -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched:sched_migrate_task $@ | 2 | perf record -m 16384 -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched:sched_migrate_task $@ |
diff --git a/tools/perf/scripts/python/bin/sctop-record b/tools/perf/scripts/python/bin/sctop-record index 1fc5998b721d..4efbfaa7f6a5 100644 --- a/tools/perf/scripts/python/bin/sctop-record +++ b/tools/perf/scripts/python/bin/sctop-record | |||
@@ -1,2 +1,2 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | perf record -a -e raw_syscalls:sys_enter $@ | 2 | perf record -e raw_syscalls:sys_enter $@ |
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record index 1fc5998b721d..4efbfaa7f6a5 100644 --- a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record +++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record | |||
@@ -1,2 +1,2 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | perf record -a -e raw_syscalls:sys_enter $@ | 2 | perf record -e raw_syscalls:sys_enter $@ |
diff --git a/tools/perf/scripts/python/bin/syscall-counts-record b/tools/perf/scripts/python/bin/syscall-counts-record index 1fc5998b721d..4efbfaa7f6a5 100644 --- a/tools/perf/scripts/python/bin/syscall-counts-record +++ b/tools/perf/scripts/python/bin/syscall-counts-record | |||
@@ -1,2 +1,2 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | perf record -a -e raw_syscalls:sys_enter $@ | 2 | perf record -e raw_syscalls:sys_enter $@ |
diff --git a/tools/perf/util/ui/util.c b/tools/perf/util/ui/util.c index 9706d9d40279..056c69521a38 100644 --- a/tools/perf/util/ui/util.c +++ b/tools/perf/util/ui/util.c | |||
@@ -104,9 +104,10 @@ out_destroy_form: | |||
104 | return rc; | 104 | return rc; |
105 | } | 105 | } |
106 | 106 | ||
107 | static const char yes[] = "Yes", no[] = "No"; | ||
108 | |||
107 | bool ui__dialog_yesno(const char *msg) | 109 | bool ui__dialog_yesno(const char *msg) |
108 | { | 110 | { |
109 | /* newtWinChoice should really be accepting const char pointers... */ | 111 | /* newtWinChoice should really be accepting const char pointers... */ |
110 | char yes[] = "Yes", no[] = "No"; | 112 | return newtWinChoice(NULL, (char *)yes, (char *)no, (char *)msg) == 1; |
111 | return newtWinChoice(NULL, yes, no, (char *)msg) == 1; | ||
112 | } | 113 | } |