diff options
39 files changed, 582 insertions, 348 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-cache_disable b/Documentation/ABI/testing/sysfs-devices-cache_disable deleted file mode 100644 index 175bb4f70512..000000000000 --- a/Documentation/ABI/testing/sysfs-devices-cache_disable +++ /dev/null | |||
@@ -1,18 +0,0 @@ | |||
1 | What: /sys/devices/system/cpu/cpu*/cache/index*/cache_disable_X | ||
2 | Date: August 2008 | ||
3 | KernelVersion: 2.6.27 | ||
4 | Contact: mark.langsdorf@amd.com | ||
5 | Description: These files exist in every cpu's cache index directories. | ||
6 | There are currently 2 cache_disable_# files in each | ||
7 | directory. Reading from these files on a supported | ||
8 | processor will return that cache disable index value | ||
9 | for that processor and node. Writing to one of these | ||
10 | files will cause the specificed cache index to be disabled. | ||
11 | |||
12 | Currently, only AMD Family 10h Processors support cache index | ||
13 | disable, and only for their L3 caches. See the BIOS and | ||
14 | Kernel Developer's Guide at | ||
15 | http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/31116-Public-GH-BKDG_3.20_2-4-09.pdf | ||
16 | for formatting information and other details on the | ||
17 | cache index disable. | ||
18 | Users: joachim.deguara@amd.com | ||
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu new file mode 100644 index 000000000000..a703b9e9aeb9 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu | |||
@@ -0,0 +1,156 @@ | |||
1 | What: /sys/devices/system/cpu/ | ||
2 | Date: pre-git history | ||
3 | Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> | ||
4 | Description: | ||
5 | A collection of both global and individual CPU attributes | ||
6 | |||
7 | Individual CPU attributes are contained in subdirectories | ||
8 | named by the kernel's logical CPU number, e.g.: | ||
9 | |||
10 | /sys/devices/system/cpu/cpu#/ | ||
11 | |||
12 | What: /sys/devices/system/cpu/sched_mc_power_savings | ||
13 | /sys/devices/system/cpu/sched_smt_power_savings | ||
14 | Date: June 2006 | ||
15 | Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> | ||
16 | Description: Discover and adjust the kernel's multi-core scheduler support. | ||
17 | |||
18 | Possible values are: | ||
19 | |||
20 | 0 - No power saving load balance (default value) | ||
21 | 1 - Fill one thread/core/package first for long running threads | ||
22 | 2 - Also bias task wakeups to semi-idle cpu package for power | ||
23 | savings | ||
24 | |||
25 | sched_mc_power_savings is dependent upon SCHED_MC, which is | ||
26 | itself architecture dependent. | ||
27 | |||
28 | sched_smt_power_savings is dependent upon SCHED_SMT, which | ||
29 | is itself architecture dependent. | ||
30 | |||
31 | The two files are independent of each other. It is possible | ||
32 | that one file may be present without the other. | ||
33 | |||
34 | Introduced by git commit 5c45bf27. | ||
35 | |||
36 | |||
37 | What: /sys/devices/system/cpu/kernel_max | ||
38 | /sys/devices/system/cpu/offline | ||
39 | /sys/devices/system/cpu/online | ||
40 | /sys/devices/system/cpu/possible | ||
41 | /sys/devices/system/cpu/present | ||
42 | Date: December 2008 | ||
43 | Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> | ||
44 | Description: CPU topology files that describe kernel limits related to | ||
45 | hotplug. Briefly: | ||
46 | |||
47 | kernel_max: the maximum cpu index allowed by the kernel | ||
48 | configuration. | ||
49 | |||
50 | offline: cpus that are not online because they have been | ||
51 | HOTPLUGGED off or exceed the limit of cpus allowed by the | ||
52 | kernel configuration (kernel_max above). | ||
53 | |||
54 | online: cpus that are online and being scheduled. | ||
55 | |||
56 | possible: cpus that have been allocated resources and can be | ||
57 | brought online if they are present. | ||
58 | |||
59 | present: cpus that have been identified as being present in | ||
60 | the system. | ||
61 | |||
62 | See Documentation/cputopology.txt for more information. | ||
63 | |||
64 | |||
65 | |||
66 | What: /sys/devices/system/cpu/cpu#/node | ||
67 | Date: October 2009 | ||
68 | Contact: Linux memory management mailing list <linux-mm@kvack.org> | ||
69 | Description: Discover NUMA node a CPU belongs to | ||
70 | |||
71 | When CONFIG_NUMA is enabled, a symbolic link that points | ||
72 | to the corresponding NUMA node directory. | ||
73 | |||
74 | For example, the following symlink is created for cpu42 | ||
75 | in NUMA node 2: | ||
76 | |||
77 | /sys/devices/system/cpu/cpu42/node2 -> ../../node/node2 | ||
78 | |||
79 | |||
80 | What: /sys/devices/system/cpu/cpu#/topology/core_id | ||
81 | /sys/devices/system/cpu/cpu#/topology/core_siblings | ||
82 | /sys/devices/system/cpu/cpu#/topology/core_siblings_list | ||
83 | /sys/devices/system/cpu/cpu#/topology/physical_package_id | ||
84 | /sys/devices/system/cpu/cpu#/topology/thread_siblings | ||
85 | /sys/devices/system/cpu/cpu#/topology/thread_siblings_list | ||
86 | Date: December 2008 | ||
87 | Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> | ||
88 | Description: CPU topology files that describe a logical CPU's relationship | ||
89 | to other cores and threads in the same physical package. | ||
90 | |||
91 | One cpu# directory is created per logical CPU in the system, | ||
92 | e.g. /sys/devices/system/cpu/cpu42/. | ||
93 | |||
94 | Briefly, the files above are: | ||
95 | |||
96 | core_id: the CPU core ID of cpu#. Typically it is the | ||
97 | hardware platform's identifier (rather than the kernel's). | ||
98 | The actual value is architecture and platform dependent. | ||
99 | |||
100 | core_siblings: internal kernel map of cpu#'s hardware threads | ||
101 | within the same physical_package_id. | ||
102 | |||
103 | core_siblings_list: human-readable list of the logical CPU | ||
104 | numbers within the same physical_package_id as cpu#. | ||
105 | |||
106 | physical_package_id: physical package id of cpu#. Typically | ||
107 | corresponds to a physical socket number, but the actual value | ||
108 | is architecture and platform dependent. | ||
109 | |||
110 | thread_siblings: internel kernel map of cpu#'s hardware | ||
111 | threads within the same core as cpu# | ||
112 | |||
113 | thread_siblings_list: human-readable list of cpu#'s hardware | ||
114 | threads within the same core as cpu# | ||
115 | |||
116 | See Documentation/cputopology.txt for more information. | ||
117 | |||
118 | |||
119 | What: /sys/devices/system/cpu/cpuidle/current_driver | ||
120 | /sys/devices/system/cpu/cpuidle/current_governer_ro | ||
121 | Date: September 2007 | ||
122 | Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> | ||
123 | Description: Discover cpuidle policy and mechanism | ||
124 | |||
125 | Various CPUs today support multiple idle levels that are | ||
126 | differentiated by varying exit latencies and power | ||
127 | consumption during idle. | ||
128 | |||
129 | Idle policy (governor) is differentiated from idle mechanism | ||
130 | (driver) | ||
131 | |||
132 | current_driver: displays current idle mechanism | ||
133 | |||
134 | current_governor_ro: displays current idle policy | ||
135 | |||
136 | See files in Documentation/cpuidle/ for more information. | ||
137 | |||
138 | |||
139 | What: /sys/devices/system/cpu/cpu*/cache/index*/cache_disable_X | ||
140 | Date: August 2008 | ||
141 | KernelVersion: 2.6.27 | ||
142 | Contact: mark.langsdorf@amd.com | ||
143 | Description: These files exist in every cpu's cache index directories. | ||
144 | There are currently 2 cache_disable_# files in each | ||
145 | directory. Reading from these files on a supported | ||
146 | processor will return that cache disable index value | ||
147 | for that processor and node. Writing to one of these | ||
148 | files will cause the specificed cache index to be disabled. | ||
149 | |||
150 | Currently, only AMD Family 10h Processors support cache index | ||
151 | disable, and only for their L3 caches. See the BIOS and | ||
152 | Kernel Developer's Guide at | ||
153 | http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/31116-Public-GH-BKDG_3.20_2-4-09.pdf | ||
154 | for formatting information and other details on the | ||
155 | cache index disable. | ||
156 | Users: joachim.deguara@amd.com | ||
diff --git a/Documentation/cputopology.txt b/Documentation/cputopology.txt index b41f3e58aefa..f1c5c4bccd3e 100644 --- a/Documentation/cputopology.txt +++ b/Documentation/cputopology.txt | |||
@@ -1,15 +1,28 @@ | |||
1 | 1 | ||
2 | Export cpu topology info via sysfs. Items (attributes) are similar | 2 | Export CPU topology info via sysfs. Items (attributes) are similar |
3 | to /proc/cpuinfo. | 3 | to /proc/cpuinfo. |
4 | 4 | ||
5 | 1) /sys/devices/system/cpu/cpuX/topology/physical_package_id: | 5 | 1) /sys/devices/system/cpu/cpuX/topology/physical_package_id: |
6 | represent the physical package id of cpu X; | 6 | |
7 | physical package id of cpuX. Typically corresponds to a physical | ||
8 | socket number, but the actual value is architecture and platform | ||
9 | dependent. | ||
10 | |||
7 | 2) /sys/devices/system/cpu/cpuX/topology/core_id: | 11 | 2) /sys/devices/system/cpu/cpuX/topology/core_id: |
8 | represent the cpu core id to cpu X; | 12 | |
13 | the CPU core ID of cpuX. Typically it is the hardware platform's | ||
14 | identifier (rather than the kernel's). The actual value is | ||
15 | architecture and platform dependent. | ||
16 | |||
9 | 3) /sys/devices/system/cpu/cpuX/topology/thread_siblings: | 17 | 3) /sys/devices/system/cpu/cpuX/topology/thread_siblings: |
10 | represent the thread siblings to cpu X in the same core; | 18 | |
19 | internel kernel map of cpuX's hardware threads within the same | ||
20 | core as cpuX | ||
21 | |||
11 | 4) /sys/devices/system/cpu/cpuX/topology/core_siblings: | 22 | 4) /sys/devices/system/cpu/cpuX/topology/core_siblings: |
12 | represent the thread siblings to cpu X in the same physical package; | 23 | |
24 | internal kernel map of cpuX's hardware threads within the same | ||
25 | physical_package_id. | ||
13 | 26 | ||
14 | To implement it in an architecture-neutral way, a new source file, | 27 | To implement it in an architecture-neutral way, a new source file, |
15 | drivers/base/topology.c, is to export the 4 attributes. | 28 | drivers/base/topology.c, is to export the 4 attributes. |
@@ -32,32 +45,32 @@ not defined by include/asm-XXX/topology.h: | |||
32 | 3) thread_siblings: just the given CPU | 45 | 3) thread_siblings: just the given CPU |
33 | 4) core_siblings: just the given CPU | 46 | 4) core_siblings: just the given CPU |
34 | 47 | ||
35 | Additionally, cpu topology information is provided under | 48 | Additionally, CPU topology information is provided under |
36 | /sys/devices/system/cpu and includes these files. The internal | 49 | /sys/devices/system/cpu and includes these files. The internal |
37 | source for the output is in brackets ("[]"). | 50 | source for the output is in brackets ("[]"). |
38 | 51 | ||
39 | kernel_max: the maximum cpu index allowed by the kernel configuration. | 52 | kernel_max: the maximum CPU index allowed by the kernel configuration. |
40 | [NR_CPUS-1] | 53 | [NR_CPUS-1] |
41 | 54 | ||
42 | offline: cpus that are not online because they have been | 55 | offline: CPUs that are not online because they have been |
43 | HOTPLUGGED off (see cpu-hotplug.txt) or exceed the limit | 56 | HOTPLUGGED off (see cpu-hotplug.txt) or exceed the limit |
44 | of cpus allowed by the kernel configuration (kernel_max | 57 | of CPUs allowed by the kernel configuration (kernel_max |
45 | above). [~cpu_online_mask + cpus >= NR_CPUS] | 58 | above). [~cpu_online_mask + cpus >= NR_CPUS] |
46 | 59 | ||
47 | online: cpus that are online and being scheduled [cpu_online_mask] | 60 | online: CPUs that are online and being scheduled [cpu_online_mask] |
48 | 61 | ||
49 | possible: cpus that have been allocated resources and can be | 62 | possible: CPUs that have been allocated resources and can be |
50 | brought online if they are present. [cpu_possible_mask] | 63 | brought online if they are present. [cpu_possible_mask] |
51 | 64 | ||
52 | present: cpus that have been identified as being present in the | 65 | present: CPUs that have been identified as being present in the |
53 | system. [cpu_present_mask] | 66 | system. [cpu_present_mask] |
54 | 67 | ||
55 | The format for the above output is compatible with cpulist_parse() | 68 | The format for the above output is compatible with cpulist_parse() |
56 | [see <linux/cpumask.h>]. Some examples follow. | 69 | [see <linux/cpumask.h>]. Some examples follow. |
57 | 70 | ||
58 | In this example, there are 64 cpus in the system but cpus 32-63 exceed | 71 | In this example, there are 64 CPUs in the system but cpus 32-63 exceed |
59 | the kernel max which is limited to 0..31 by the NR_CPUS config option | 72 | the kernel max which is limited to 0..31 by the NR_CPUS config option |
60 | being 32. Note also that cpus 2 and 4-31 are not online but could be | 73 | being 32. Note also that CPUs 2 and 4-31 are not online but could be |
61 | brought online as they are both present and possible. | 74 | brought online as they are both present and possible. |
62 | 75 | ||
63 | kernel_max: 31 | 76 | kernel_max: 31 |
@@ -67,8 +80,8 @@ brought online as they are both present and possible. | |||
67 | present: 0-31 | 80 | present: 0-31 |
68 | 81 | ||
69 | In this example, the NR_CPUS config option is 128, but the kernel was | 82 | In this example, the NR_CPUS config option is 128, but the kernel was |
70 | started with possible_cpus=144. There are 4 cpus in the system and cpu2 | 83 | started with possible_cpus=144. There are 4 CPUs in the system and cpu2 |
71 | was manually taken offline (and is the only cpu that can be brought | 84 | was manually taken offline (and is the only CPU that can be brought |
72 | online.) | 85 | online.) |
73 | 86 | ||
74 | kernel_max: 127 | 87 | kernel_max: 127 |
@@ -78,4 +91,4 @@ online.) | |||
78 | present: 0-3 | 91 | present: 0-3 |
79 | 92 | ||
80 | See cpu-hotplug.txt for the possible_cpus=NUM kernel start parameter | 93 | See cpu-hotplug.txt for the possible_cpus=NUM kernel start parameter |
81 | as well as more information on the various cpumask's. | 94 | as well as more information on the various cpumasks. |
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 29dd8489ffec..ecdc19a299b2 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig | |||
@@ -561,7 +561,7 @@ config HPAPCI | |||
561 | 561 | ||
562 | config MVME147_SCC | 562 | config MVME147_SCC |
563 | bool "SCC support for MVME147 serial ports" | 563 | bool "SCC support for MVME147 serial ports" |
564 | depends on MVME147 | 564 | depends on MVME147 && BROKEN |
565 | help | 565 | help |
566 | This is the driver for the serial ports on the Motorola MVME147 | 566 | This is the driver for the serial ports on the Motorola MVME147 |
567 | boards. Everyone using one of these boards should say Y here. | 567 | boards. Everyone using one of these boards should say Y here. |
@@ -576,14 +576,14 @@ config SERIAL167 | |||
576 | 576 | ||
577 | config MVME162_SCC | 577 | config MVME162_SCC |
578 | bool "SCC support for MVME162 serial ports" | 578 | bool "SCC support for MVME162 serial ports" |
579 | depends on MVME16x | 579 | depends on MVME16x && BROKEN |
580 | help | 580 | help |
581 | This is the driver for the serial ports on the Motorola MVME162 and | 581 | This is the driver for the serial ports on the Motorola MVME162 and |
582 | 172 boards. Everyone using one of these boards should say Y here. | 582 | 172 boards. Everyone using one of these boards should say Y here. |
583 | 583 | ||
584 | config BVME6000_SCC | 584 | config BVME6000_SCC |
585 | bool "SCC support for BVME6000 serial ports" | 585 | bool "SCC support for BVME6000 serial ports" |
586 | depends on BVME6000 | 586 | depends on BVME6000 && BROKEN |
587 | help | 587 | help |
588 | This is the driver for the serial ports on the BVME4000 and BVME6000 | 588 | This is the driver for the serial ports on the BVME4000 and BVME6000 |
589 | boards from BVM Ltd. Everyone using one of these boards should say | 589 | boards from BVM Ltd. Everyone using one of these boards should say |
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index 24b1244aadb9..f23961ada7fb 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h | |||
@@ -78,7 +78,7 @@ cputime64_to_jiffies64(cputime64_t cputime) | |||
78 | static inline unsigned int | 78 | static inline unsigned int |
79 | cputime_to_msecs(const cputime_t cputime) | 79 | cputime_to_msecs(const cputime_t cputime) |
80 | { | 80 | { |
81 | return __div(cputime, 4096000); | 81 | return cputime_div(cputime, 4096000); |
82 | } | 82 | } |
83 | 83 | ||
84 | static inline cputime_t | 84 | static inline cputime_t |
@@ -160,7 +160,7 @@ cputime_to_timeval(const cputime_t cputime, struct timeval *value) | |||
160 | static inline clock_t | 160 | static inline clock_t |
161 | cputime_to_clock_t(cputime_t cputime) | 161 | cputime_to_clock_t(cputime_t cputime) |
162 | { | 162 | { |
163 | return __div(cputime, 4096000000ULL / USER_HZ); | 163 | return cputime_div(cputime, 4096000000ULL / USER_HZ); |
164 | } | 164 | } |
165 | 165 | ||
166 | static inline cputime_t | 166 | static inline cputime_t |
@@ -175,7 +175,7 @@ clock_t_to_cputime(unsigned long x) | |||
175 | static inline clock_t | 175 | static inline clock_t |
176 | cputime64_to_clock_t(cputime64_t cputime) | 176 | cputime64_to_clock_t(cputime64_t cputime) |
177 | { | 177 | { |
178 | return __div(cputime, 4096000000ULL / USER_HZ); | 178 | return cputime_div(cputime, 4096000000ULL / USER_HZ); |
179 | } | 179 | } |
180 | 180 | ||
181 | struct s390_idle_data { | 181 | struct s390_idle_data { |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index ee57a42e6e93..4890ac6d7faa 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -1595,10 +1595,9 @@ static void stop_run(struct shutdown_trigger *trigger) | |||
1595 | { | 1595 | { |
1596 | if (strcmp(trigger->name, ON_PANIC_STR) == 0) | 1596 | if (strcmp(trigger->name, ON_PANIC_STR) == 0) |
1597 | disabled_wait((unsigned long) __builtin_return_address(0)); | 1597 | disabled_wait((unsigned long) __builtin_return_address(0)); |
1598 | else { | 1598 | while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) |
1599 | signal_processor(smp_processor_id(), sigp_stop); | 1599 | cpu_relax(); |
1600 | for (;;); | 1600 | for (;;); |
1601 | } | ||
1602 | } | 1601 | } |
1603 | 1602 | ||
1604 | static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR, | 1603 | static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR, |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index c932caa5e850..93e52039321b 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -76,7 +76,6 @@ static int cpu_stopped(int cpu) | |||
76 | __u32 status; | 76 | __u32 status; |
77 | 77 | ||
78 | switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { | 78 | switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { |
79 | case sigp_order_code_accepted: | ||
80 | case sigp_status_stored: | 79 | case sigp_status_stored: |
81 | /* Check for stopped and check stop state */ | 80 | /* Check for stopped and check stop state */ |
82 | if (status & 0x50) | 81 | if (status & 0x50) |
@@ -638,6 +637,8 @@ void __cpu_die(unsigned int cpu) | |||
638 | /* Wait until target cpu is down */ | 637 | /* Wait until target cpu is down */ |
639 | while (!cpu_stopped(cpu)) | 638 | while (!cpu_stopped(cpu)) |
640 | cpu_relax(); | 639 | cpu_relax(); |
640 | while (signal_processor_p(0, cpu, sigp_set_prefix) == sigp_busy) | ||
641 | udelay(10); | ||
641 | smp_free_lowcore(cpu); | 642 | smp_free_lowcore(cpu); |
642 | pr_info("Processor %d stopped\n", cpu); | 643 | pr_info("Processor %d stopped\n", cpu); |
643 | } | 644 | } |
@@ -645,8 +646,8 @@ void __cpu_die(unsigned int cpu) | |||
645 | void cpu_die(void) | 646 | void cpu_die(void) |
646 | { | 647 | { |
647 | idle_task_exit(); | 648 | idle_task_exit(); |
648 | signal_processor(smp_processor_id(), sigp_stop); | 649 | while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) |
649 | BUG(); | 650 | cpu_relax(); |
650 | for (;;); | 651 | for (;;); |
651 | } | 652 | } |
652 | 653 | ||
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index 7c8653e27db6..0c26cc1898ec 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S | |||
@@ -199,6 +199,7 @@ pgm_check_entry: | |||
199 | brc 2,4b /* busy, try again */ | 199 | brc 2,4b /* busy, try again */ |
200 | 5: | 200 | 5: |
201 | sigp %r9,%r2,__SIGP_STOP /* stop resume (current) CPU */ | 201 | sigp %r9,%r2,__SIGP_STOP /* stop resume (current) CPU */ |
202 | brc 2,5b /* busy, try again */ | ||
202 | 6: j 6b | 203 | 6: j 6b |
203 | 204 | ||
204 | restart_suspend: | 205 | restart_suspend: |
@@ -206,6 +207,7 @@ restart_suspend: | |||
206 | llgh %r2,0(%r1) | 207 | llgh %r2,0(%r1) |
207 | 7: | 208 | 7: |
208 | sigp %r9,%r2,__SIGP_SENSE /* Wait for resume CPU */ | 209 | sigp %r9,%r2,__SIGP_SENSE /* Wait for resume CPU */ |
210 | brc 8,7b /* accepted, status 0, still running */ | ||
209 | brc 2,7b /* busy, try again */ | 211 | brc 2,7b /* busy, try again */ |
210 | tmll %r9,0x40 /* Test if resume CPU is stopped */ | 212 | tmll %r9,0x40 /* Test if resume CPU is stopped */ |
211 | jz 7b | 213 | jz 7b |
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index b88db6d1dc65..6b5cc4fba59f 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c | |||
@@ -26,14 +26,10 @@ | |||
26 | #include <linux/async_tx.h> | 26 | #include <linux/async_tx.h> |
27 | 27 | ||
28 | /** | 28 | /** |
29 | * scribble - space to hold throwaway P buffer for synchronous gen_syndrome | 29 | * pq_scribble_page - space to hold throwaway P or Q buffer for |
30 | * synchronous gen_syndrome | ||
30 | */ | 31 | */ |
31 | static struct page *scribble; | 32 | static struct page *pq_scribble_page; |
32 | |||
33 | static bool is_raid6_zero_block(struct page *p) | ||
34 | { | ||
35 | return p == (void *) raid6_empty_zero_page; | ||
36 | } | ||
37 | 33 | ||
38 | /* the struct page *blocks[] parameter passed to async_gen_syndrome() | 34 | /* the struct page *blocks[] parameter passed to async_gen_syndrome() |
39 | * and async_syndrome_val() contains the 'P' destination address at | 35 | * and async_syndrome_val() contains the 'P' destination address at |
@@ -83,7 +79,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, | |||
83 | * sources and update the coefficients accordingly | 79 | * sources and update the coefficients accordingly |
84 | */ | 80 | */ |
85 | for (i = 0, idx = 0; i < src_cnt; i++) { | 81 | for (i = 0, idx = 0; i < src_cnt; i++) { |
86 | if (is_raid6_zero_block(blocks[i])) | 82 | if (blocks[i] == NULL) |
87 | continue; | 83 | continue; |
88 | dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, | 84 | dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, |
89 | DMA_TO_DEVICE); | 85 | DMA_TO_DEVICE); |
@@ -160,9 +156,9 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | |||
160 | srcs = (void **) blocks; | 156 | srcs = (void **) blocks; |
161 | 157 | ||
162 | for (i = 0; i < disks; i++) { | 158 | for (i = 0; i < disks; i++) { |
163 | if (is_raid6_zero_block(blocks[i])) { | 159 | if (blocks[i] == NULL) { |
164 | BUG_ON(i > disks - 3); /* P or Q can't be zero */ | 160 | BUG_ON(i > disks - 3); /* P or Q can't be zero */ |
165 | srcs[i] = blocks[i]; | 161 | srcs[i] = (void*)raid6_empty_zero_page; |
166 | } else | 162 | } else |
167 | srcs[i] = page_address(blocks[i]) + offset; | 163 | srcs[i] = page_address(blocks[i]) + offset; |
168 | } | 164 | } |
@@ -186,10 +182,14 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | |||
186 | * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= | 182 | * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= |
187 | * PAGE_SIZE as a temporary buffer of this size is used in the | 183 | * PAGE_SIZE as a temporary buffer of this size is used in the |
188 | * synchronous path. 'disks' always accounts for both destination | 184 | * synchronous path. 'disks' always accounts for both destination |
189 | * buffers. | 185 | * buffers. If any source buffers (blocks[i] where i < disks - 2) are |
186 | * set to NULL those buffers will be replaced with the raid6_zero_page | ||
187 | * in the synchronous path and omitted in the hardware-asynchronous | ||
188 | * path. | ||
190 | * | 189 | * |
191 | * 'blocks' note: if submit->scribble is NULL then the contents of | 190 | * 'blocks' note: if submit->scribble is NULL then the contents of |
192 | * 'blocks' may be overridden | 191 | * 'blocks' may be overwritten to perform address conversions |
192 | * (dma_map_page() or page_address()). | ||
193 | */ | 193 | */ |
194 | struct dma_async_tx_descriptor * | 194 | struct dma_async_tx_descriptor * |
195 | async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | 195 | async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, |
@@ -227,11 +227,11 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | |||
227 | async_tx_quiesce(&submit->depend_tx); | 227 | async_tx_quiesce(&submit->depend_tx); |
228 | 228 | ||
229 | if (!P(blocks, disks)) { | 229 | if (!P(blocks, disks)) { |
230 | P(blocks, disks) = scribble; | 230 | P(blocks, disks) = pq_scribble_page; |
231 | BUG_ON(len + offset > PAGE_SIZE); | 231 | BUG_ON(len + offset > PAGE_SIZE); |
232 | } | 232 | } |
233 | if (!Q(blocks, disks)) { | 233 | if (!Q(blocks, disks)) { |
234 | Q(blocks, disks) = scribble; | 234 | Q(blocks, disks) = pq_scribble_page; |
235 | BUG_ON(len + offset > PAGE_SIZE); | 235 | BUG_ON(len + offset > PAGE_SIZE); |
236 | } | 236 | } |
237 | do_sync_gen_syndrome(blocks, offset, disks, len, submit); | 237 | do_sync_gen_syndrome(blocks, offset, disks, len, submit); |
@@ -265,8 +265,10 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, | |||
265 | len); | 265 | len); |
266 | struct dma_device *device = chan ? chan->device : NULL; | 266 | struct dma_device *device = chan ? chan->device : NULL; |
267 | struct dma_async_tx_descriptor *tx; | 267 | struct dma_async_tx_descriptor *tx; |
268 | unsigned char coefs[disks-2]; | ||
268 | enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; | 269 | enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; |
269 | dma_addr_t *dma_src = NULL; | 270 | dma_addr_t *dma_src = NULL; |
271 | int src_cnt = 0; | ||
270 | 272 | ||
271 | BUG_ON(disks < 4); | 273 | BUG_ON(disks < 4); |
272 | 274 | ||
@@ -285,22 +287,32 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, | |||
285 | __func__, disks, len); | 287 | __func__, disks, len); |
286 | if (!P(blocks, disks)) | 288 | if (!P(blocks, disks)) |
287 | dma_flags |= DMA_PREP_PQ_DISABLE_P; | 289 | dma_flags |= DMA_PREP_PQ_DISABLE_P; |
290 | else | ||
291 | pq[0] = dma_map_page(dev, P(blocks, disks), | ||
292 | offset, len, | ||
293 | DMA_TO_DEVICE); | ||
288 | if (!Q(blocks, disks)) | 294 | if (!Q(blocks, disks)) |
289 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; | 295 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; |
296 | else | ||
297 | pq[1] = dma_map_page(dev, Q(blocks, disks), | ||
298 | offset, len, | ||
299 | DMA_TO_DEVICE); | ||
300 | |||
290 | if (submit->flags & ASYNC_TX_FENCE) | 301 | if (submit->flags & ASYNC_TX_FENCE) |
291 | dma_flags |= DMA_PREP_FENCE; | 302 | dma_flags |= DMA_PREP_FENCE; |
292 | for (i = 0; i < disks; i++) | 303 | for (i = 0; i < disks-2; i++) |
293 | if (likely(blocks[i])) { | 304 | if (likely(blocks[i])) { |
294 | BUG_ON(is_raid6_zero_block(blocks[i])); | 305 | dma_src[src_cnt] = dma_map_page(dev, blocks[i], |
295 | dma_src[i] = dma_map_page(dev, blocks[i], | 306 | offset, len, |
296 | offset, len, | 307 | DMA_TO_DEVICE); |
297 | DMA_TO_DEVICE); | 308 | coefs[src_cnt] = raid6_gfexp[i]; |
309 | src_cnt++; | ||
298 | } | 310 | } |
299 | 311 | ||
300 | for (;;) { | 312 | for (;;) { |
301 | tx = device->device_prep_dma_pq_val(chan, pq, dma_src, | 313 | tx = device->device_prep_dma_pq_val(chan, pq, dma_src, |
302 | disks - 2, | 314 | src_cnt, |
303 | raid6_gfexp, | 315 | coefs, |
304 | len, pqres, | 316 | len, pqres, |
305 | dma_flags); | 317 | dma_flags); |
306 | if (likely(tx)) | 318 | if (likely(tx)) |
@@ -373,9 +385,9 @@ EXPORT_SYMBOL_GPL(async_syndrome_val); | |||
373 | 385 | ||
374 | static int __init async_pq_init(void) | 386 | static int __init async_pq_init(void) |
375 | { | 387 | { |
376 | scribble = alloc_page(GFP_KERNEL); | 388 | pq_scribble_page = alloc_page(GFP_KERNEL); |
377 | 389 | ||
378 | if (scribble) | 390 | if (pq_scribble_page) |
379 | return 0; | 391 | return 0; |
380 | 392 | ||
381 | pr_err("%s: failed to allocate required spare page\n", __func__); | 393 | pr_err("%s: failed to allocate required spare page\n", __func__); |
@@ -385,7 +397,7 @@ static int __init async_pq_init(void) | |||
385 | 397 | ||
386 | static void __exit async_pq_exit(void) | 398 | static void __exit async_pq_exit(void) |
387 | { | 399 | { |
388 | put_page(scribble); | 400 | put_page(pq_scribble_page); |
389 | } | 401 | } |
390 | 402 | ||
391 | module_init(async_pq_init); | 403 | module_init(async_pq_init); |
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index 6d73dde4786d..943f2abac9b4 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c | |||
@@ -131,8 +131,8 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, | |||
131 | } | 131 | } |
132 | 132 | ||
133 | static struct dma_async_tx_descriptor * | 133 | static struct dma_async_tx_descriptor * |
134 | __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, | 134 | __2data_recov_4(int disks, size_t bytes, int faila, int failb, |
135 | struct async_submit_ctl *submit) | 135 | struct page **blocks, struct async_submit_ctl *submit) |
136 | { | 136 | { |
137 | struct dma_async_tx_descriptor *tx = NULL; | 137 | struct dma_async_tx_descriptor *tx = NULL; |
138 | struct page *p, *q, *a, *b; | 138 | struct page *p, *q, *a, *b; |
@@ -143,8 +143,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, | |||
143 | void *cb_param = submit->cb_param; | 143 | void *cb_param = submit->cb_param; |
144 | void *scribble = submit->scribble; | 144 | void *scribble = submit->scribble; |
145 | 145 | ||
146 | p = blocks[4-2]; | 146 | p = blocks[disks-2]; |
147 | q = blocks[4-1]; | 147 | q = blocks[disks-1]; |
148 | 148 | ||
149 | a = blocks[faila]; | 149 | a = blocks[faila]; |
150 | b = blocks[failb]; | 150 | b = blocks[failb]; |
@@ -170,8 +170,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, | |||
170 | } | 170 | } |
171 | 171 | ||
172 | static struct dma_async_tx_descriptor * | 172 | static struct dma_async_tx_descriptor * |
173 | __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, | 173 | __2data_recov_5(int disks, size_t bytes, int faila, int failb, |
174 | struct async_submit_ctl *submit) | 174 | struct page **blocks, struct async_submit_ctl *submit) |
175 | { | 175 | { |
176 | struct dma_async_tx_descriptor *tx = NULL; | 176 | struct dma_async_tx_descriptor *tx = NULL; |
177 | struct page *p, *q, *g, *dp, *dq; | 177 | struct page *p, *q, *g, *dp, *dq; |
@@ -181,21 +181,22 @@ __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, | |||
181 | dma_async_tx_callback cb_fn = submit->cb_fn; | 181 | dma_async_tx_callback cb_fn = submit->cb_fn; |
182 | void *cb_param = submit->cb_param; | 182 | void *cb_param = submit->cb_param; |
183 | void *scribble = submit->scribble; | 183 | void *scribble = submit->scribble; |
184 | int uninitialized_var(good); | 184 | int good_srcs, good, i; |
185 | int i; | ||
186 | 185 | ||
187 | for (i = 0; i < 3; i++) { | 186 | good_srcs = 0; |
187 | good = -1; | ||
188 | for (i = 0; i < disks-2; i++) { | ||
189 | if (blocks[i] == NULL) | ||
190 | continue; | ||
188 | if (i == faila || i == failb) | 191 | if (i == faila || i == failb) |
189 | continue; | 192 | continue; |
190 | else { | 193 | good = i; |
191 | good = i; | 194 | good_srcs++; |
192 | break; | ||
193 | } | ||
194 | } | 195 | } |
195 | BUG_ON(i >= 3); | 196 | BUG_ON(good_srcs > 1); |
196 | 197 | ||
197 | p = blocks[5-2]; | 198 | p = blocks[disks-2]; |
198 | q = blocks[5-1]; | 199 | q = blocks[disks-1]; |
199 | g = blocks[good]; | 200 | g = blocks[good]; |
200 | 201 | ||
201 | /* Compute syndrome with zero for the missing data pages | 202 | /* Compute syndrome with zero for the missing data pages |
@@ -263,10 +264,10 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb, | |||
263 | * delta p and delta q | 264 | * delta p and delta q |
264 | */ | 265 | */ |
265 | dp = blocks[faila]; | 266 | dp = blocks[faila]; |
266 | blocks[faila] = (void *)raid6_empty_zero_page; | 267 | blocks[faila] = NULL; |
267 | blocks[disks-2] = dp; | 268 | blocks[disks-2] = dp; |
268 | dq = blocks[failb]; | 269 | dq = blocks[failb]; |
269 | blocks[failb] = (void *)raid6_empty_zero_page; | 270 | blocks[failb] = NULL; |
270 | blocks[disks-1] = dq; | 271 | blocks[disks-1] = dq; |
271 | 272 | ||
272 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); | 273 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); |
@@ -323,6 +324,8 @@ struct dma_async_tx_descriptor * | |||
323 | async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | 324 | async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, |
324 | struct page **blocks, struct async_submit_ctl *submit) | 325 | struct page **blocks, struct async_submit_ctl *submit) |
325 | { | 326 | { |
327 | int non_zero_srcs, i; | ||
328 | |||
326 | BUG_ON(faila == failb); | 329 | BUG_ON(faila == failb); |
327 | if (failb < faila) | 330 | if (failb < faila) |
328 | swap(faila, failb); | 331 | swap(faila, failb); |
@@ -334,11 +337,13 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | |||
334 | */ | 337 | */ |
335 | if (!submit->scribble) { | 338 | if (!submit->scribble) { |
336 | void **ptrs = (void **) blocks; | 339 | void **ptrs = (void **) blocks; |
337 | int i; | ||
338 | 340 | ||
339 | async_tx_quiesce(&submit->depend_tx); | 341 | async_tx_quiesce(&submit->depend_tx); |
340 | for (i = 0; i < disks; i++) | 342 | for (i = 0; i < disks; i++) |
341 | ptrs[i] = page_address(blocks[i]); | 343 | if (blocks[i] == NULL) |
344 | ptrs[i] = (void *) raid6_empty_zero_page; | ||
345 | else | ||
346 | ptrs[i] = page_address(blocks[i]); | ||
342 | 347 | ||
343 | raid6_2data_recov(disks, bytes, faila, failb, ptrs); | 348 | raid6_2data_recov(disks, bytes, faila, failb, ptrs); |
344 | 349 | ||
@@ -347,19 +352,30 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | |||
347 | return NULL; | 352 | return NULL; |
348 | } | 353 | } |
349 | 354 | ||
350 | switch (disks) { | 355 | non_zero_srcs = 0; |
351 | case 4: | 356 | for (i = 0; i < disks-2 && non_zero_srcs < 4; i++) |
357 | if (blocks[i]) | ||
358 | non_zero_srcs++; | ||
359 | switch (non_zero_srcs) { | ||
360 | case 0: | ||
361 | case 1: | ||
362 | /* There must be at least 2 sources - the failed devices. */ | ||
363 | BUG(); | ||
364 | |||
365 | case 2: | ||
352 | /* dma devices do not uniformly understand a zero source pq | 366 | /* dma devices do not uniformly understand a zero source pq |
353 | * operation (in contrast to the synchronous case), so | 367 | * operation (in contrast to the synchronous case), so |
354 | * explicitly handle the 4 disk special case | 368 | * explicitly handle the special case of a 4 disk array with |
369 | * both data disks missing. | ||
355 | */ | 370 | */ |
356 | return __2data_recov_4(bytes, faila, failb, blocks, submit); | 371 | return __2data_recov_4(disks, bytes, faila, failb, blocks, submit); |
357 | case 5: | 372 | case 3: |
358 | /* dma devices do not uniformly understand a single | 373 | /* dma devices do not uniformly understand a single |
359 | * source pq operation (in contrast to the synchronous | 374 | * source pq operation (in contrast to the synchronous |
360 | * case), so explicitly handle the 5 disk special case | 375 | * case), so explicitly handle the special case of a 5 disk |
376 | * array with 2 of 3 data disks missing. | ||
361 | */ | 377 | */ |
362 | return __2data_recov_5(bytes, faila, failb, blocks, submit); | 378 | return __2data_recov_5(disks, bytes, faila, failb, blocks, submit); |
363 | default: | 379 | default: |
364 | return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); | 380 | return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); |
365 | } | 381 | } |
@@ -385,6 +401,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, | |||
385 | dma_async_tx_callback cb_fn = submit->cb_fn; | 401 | dma_async_tx_callback cb_fn = submit->cb_fn; |
386 | void *cb_param = submit->cb_param; | 402 | void *cb_param = submit->cb_param; |
387 | void *scribble = submit->scribble; | 403 | void *scribble = submit->scribble; |
404 | int good_srcs, good, i; | ||
388 | struct page *srcs[2]; | 405 | struct page *srcs[2]; |
389 | 406 | ||
390 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); | 407 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); |
@@ -394,11 +411,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, | |||
394 | */ | 411 | */ |
395 | if (!scribble) { | 412 | if (!scribble) { |
396 | void **ptrs = (void **) blocks; | 413 | void **ptrs = (void **) blocks; |
397 | int i; | ||
398 | 414 | ||
399 | async_tx_quiesce(&submit->depend_tx); | 415 | async_tx_quiesce(&submit->depend_tx); |
400 | for (i = 0; i < disks; i++) | 416 | for (i = 0; i < disks; i++) |
401 | ptrs[i] = page_address(blocks[i]); | 417 | if (blocks[i] == NULL) |
418 | ptrs[i] = (void*)raid6_empty_zero_page; | ||
419 | else | ||
420 | ptrs[i] = page_address(blocks[i]); | ||
402 | 421 | ||
403 | raid6_datap_recov(disks, bytes, faila, ptrs); | 422 | raid6_datap_recov(disks, bytes, faila, ptrs); |
404 | 423 | ||
@@ -407,6 +426,20 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, | |||
407 | return NULL; | 426 | return NULL; |
408 | } | 427 | } |
409 | 428 | ||
429 | good_srcs = 0; | ||
430 | good = -1; | ||
431 | for (i = 0; i < disks-2; i++) { | ||
432 | if (i == faila) | ||
433 | continue; | ||
434 | if (blocks[i]) { | ||
435 | good = i; | ||
436 | good_srcs++; | ||
437 | if (good_srcs > 1) | ||
438 | break; | ||
439 | } | ||
440 | } | ||
441 | BUG_ON(good_srcs == 0); | ||
442 | |||
410 | p = blocks[disks-2]; | 443 | p = blocks[disks-2]; |
411 | q = blocks[disks-1]; | 444 | q = blocks[disks-1]; |
412 | 445 | ||
@@ -414,14 +447,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, | |||
414 | * Use the dead data page as temporary storage for delta q | 447 | * Use the dead data page as temporary storage for delta q |
415 | */ | 448 | */ |
416 | dq = blocks[faila]; | 449 | dq = blocks[faila]; |
417 | blocks[faila] = (void *)raid6_empty_zero_page; | 450 | blocks[faila] = NULL; |
418 | blocks[disks-1] = dq; | 451 | blocks[disks-1] = dq; |
419 | 452 | ||
420 | /* in the 4 disk case we only need to perform a single source | 453 | /* in the 4-disk case we only need to perform a single source |
421 | * multiplication | 454 | * multiplication with the one good data block. |
422 | */ | 455 | */ |
423 | if (disks == 4) { | 456 | if (good_srcs == 1) { |
424 | int good = faila == 0 ? 1 : 0; | ||
425 | struct page *g = blocks[good]; | 457 | struct page *g = blocks[good]; |
426 | 458 | ||
427 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, | 459 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, |
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index b459a9034aac..79182dcb91b7 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c | |||
@@ -44,20 +44,23 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | |||
44 | void *cb_param_orig = submit->cb_param; | 44 | void *cb_param_orig = submit->cb_param; |
45 | enum async_tx_flags flags_orig = submit->flags; | 45 | enum async_tx_flags flags_orig = submit->flags; |
46 | enum dma_ctrl_flags dma_flags; | 46 | enum dma_ctrl_flags dma_flags; |
47 | int xor_src_cnt; | 47 | int xor_src_cnt = 0; |
48 | dma_addr_t dma_dest; | 48 | dma_addr_t dma_dest; |
49 | 49 | ||
50 | /* map the dest bidrectional in case it is re-used as a source */ | 50 | /* map the dest bidrectional in case it is re-used as a source */ |
51 | dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); | 51 | dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); |
52 | for (i = 0; i < src_cnt; i++) { | 52 | for (i = 0; i < src_cnt; i++) { |
53 | /* only map the dest once */ | 53 | /* only map the dest once */ |
54 | if (!src_list[i]) | ||
55 | continue; | ||
54 | if (unlikely(src_list[i] == dest)) { | 56 | if (unlikely(src_list[i] == dest)) { |
55 | dma_src[i] = dma_dest; | 57 | dma_src[xor_src_cnt++] = dma_dest; |
56 | continue; | 58 | continue; |
57 | } | 59 | } |
58 | dma_src[i] = dma_map_page(dma->dev, src_list[i], offset, | 60 | dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset, |
59 | len, DMA_TO_DEVICE); | 61 | len, DMA_TO_DEVICE); |
60 | } | 62 | } |
63 | src_cnt = xor_src_cnt; | ||
61 | 64 | ||
62 | while (src_cnt) { | 65 | while (src_cnt) { |
63 | submit->flags = flags_orig; | 66 | submit->flags = flags_orig; |
@@ -123,7 +126,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, | |||
123 | int src_cnt, size_t len, struct async_submit_ctl *submit) | 126 | int src_cnt, size_t len, struct async_submit_ctl *submit) |
124 | { | 127 | { |
125 | int i; | 128 | int i; |
126 | int xor_src_cnt; | 129 | int xor_src_cnt = 0; |
127 | int src_off = 0; | 130 | int src_off = 0; |
128 | void *dest_buf; | 131 | void *dest_buf; |
129 | void **srcs; | 132 | void **srcs; |
@@ -135,8 +138,9 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, | |||
135 | 138 | ||
136 | /* convert to buffer pointers */ | 139 | /* convert to buffer pointers */ |
137 | for (i = 0; i < src_cnt; i++) | 140 | for (i = 0; i < src_cnt; i++) |
138 | srcs[i] = page_address(src_list[i]) + offset; | 141 | if (src_list[i]) |
139 | 142 | srcs[xor_src_cnt++] = page_address(src_list[i]) + offset; | |
143 | src_cnt = xor_src_cnt; | ||
140 | /* set destination address */ | 144 | /* set destination address */ |
141 | dest_buf = page_address(dest) + offset; | 145 | dest_buf = page_address(dest) + offset; |
142 | 146 | ||
diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 973bf2ad4e0d..63c143e54a57 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c | |||
@@ -689,15 +689,19 @@ int bus_add_driver(struct device_driver *drv) | |||
689 | printk(KERN_ERR "%s: driver_add_attrs(%s) failed\n", | 689 | printk(KERN_ERR "%s: driver_add_attrs(%s) failed\n", |
690 | __func__, drv->name); | 690 | __func__, drv->name); |
691 | } | 691 | } |
692 | error = add_bind_files(drv); | 692 | |
693 | if (error) { | 693 | if (!drv->suppress_bind_attrs) { |
694 | /* Ditto */ | 694 | error = add_bind_files(drv); |
695 | printk(KERN_ERR "%s: add_bind_files(%s) failed\n", | 695 | if (error) { |
696 | __func__, drv->name); | 696 | /* Ditto */ |
697 | printk(KERN_ERR "%s: add_bind_files(%s) failed\n", | ||
698 | __func__, drv->name); | ||
699 | } | ||
697 | } | 700 | } |
698 | 701 | ||
699 | kobject_uevent(&priv->kobj, KOBJ_ADD); | 702 | kobject_uevent(&priv->kobj, KOBJ_ADD); |
700 | return 0; | 703 | return 0; |
704 | |||
701 | out_unregister: | 705 | out_unregister: |
702 | kfree(drv->p); | 706 | kfree(drv->p); |
703 | drv->p = NULL; | 707 | drv->p = NULL; |
@@ -720,7 +724,8 @@ void bus_remove_driver(struct device_driver *drv) | |||
720 | if (!drv->bus) | 724 | if (!drv->bus) |
721 | return; | 725 | return; |
722 | 726 | ||
723 | remove_bind_files(drv); | 727 | if (!drv->suppress_bind_attrs) |
728 | remove_bind_files(drv); | ||
724 | driver_remove_attrs(drv->bus, drv); | 729 | driver_remove_attrs(drv->bus, drv); |
725 | driver_remove_file(drv, &driver_attr_uevent); | 730 | driver_remove_file(drv, &driver_attr_uevent); |
726 | klist_remove(&drv->p->knode_bus); | 731 | klist_remove(&drv->p->knode_bus); |
diff --git a/drivers/base/driver.c b/drivers/base/driver.c index ed2ebd3c287d..f367885a7646 100644 --- a/drivers/base/driver.c +++ b/drivers/base/driver.c | |||
@@ -236,7 +236,7 @@ int driver_register(struct device_driver *drv) | |||
236 | put_driver(other); | 236 | put_driver(other); |
237 | printk(KERN_ERR "Error: Driver '%s' is already registered, " | 237 | printk(KERN_ERR "Error: Driver '%s' is already registered, " |
238 | "aborting...\n", drv->name); | 238 | "aborting...\n", drv->name); |
239 | return -EEXIST; | 239 | return -EBUSY; |
240 | } | 240 | } |
241 | 241 | ||
242 | ret = bus_add_driver(drv); | 242 | ret = bus_add_driver(drv); |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index ed156a13aa40..4fa954b07ac4 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -521,11 +521,15 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv, | |||
521 | { | 521 | { |
522 | int retval, code; | 522 | int retval, code; |
523 | 523 | ||
524 | /* make sure driver won't have bind/unbind attributes */ | ||
525 | drv->driver.suppress_bind_attrs = true; | ||
526 | |||
524 | /* temporary section violation during probe() */ | 527 | /* temporary section violation during probe() */ |
525 | drv->probe = probe; | 528 | drv->probe = probe; |
526 | retval = code = platform_driver_register(drv); | 529 | retval = code = platform_driver_register(drv); |
527 | 530 | ||
528 | /* Fixup that section violation, being paranoid about code scanning | 531 | /* |
532 | * Fixup that section violation, being paranoid about code scanning | ||
529 | * the list of drivers in order to probe new devices. Check to see | 533 | * the list of drivers in order to probe new devices. Check to see |
530 | * if the probe was successful, and make sure any forced probes of | 534 | * if the probe was successful, and make sure any forced probes of |
531 | * new devices fail. | 535 | * new devices fail. |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 08a6f50ae791..6aad99ec4e0f 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -323,7 +323,7 @@ config SPECIALIX | |||
323 | 323 | ||
324 | config SX | 324 | config SX |
325 | tristate "Specialix SX (and SI) card support" | 325 | tristate "Specialix SX (and SI) card support" |
326 | depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA) | 326 | depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA) && BROKEN |
327 | help | 327 | help |
328 | This is a driver for the SX and SI multiport serial cards. | 328 | This is a driver for the SX and SI multiport serial cards. |
329 | Please read the file <file:Documentation/serial/sx.txt> for details. | 329 | Please read the file <file:Documentation/serial/sx.txt> for details. |
@@ -334,7 +334,7 @@ config SX | |||
334 | 334 | ||
335 | config RIO | 335 | config RIO |
336 | tristate "Specialix RIO system support" | 336 | tristate "Specialix RIO system support" |
337 | depends on SERIAL_NONSTANDARD | 337 | depends on SERIAL_NONSTANDARD && BROKEN |
338 | help | 338 | help |
339 | This is a driver for the Specialix RIO, a smart serial card which | 339 | This is a driver for the Specialix RIO, a smart serial card which |
340 | drives an outboard box that can support up to 128 ports. Product | 340 | drives an outboard box that can support up to 128 ports. Product |
@@ -395,7 +395,7 @@ config NOZOMI | |||
395 | 395 | ||
396 | config A2232 | 396 | config A2232 |
397 | tristate "Commodore A2232 serial support (EXPERIMENTAL)" | 397 | tristate "Commodore A2232 serial support (EXPERIMENTAL)" |
398 | depends on EXPERIMENTAL && ZORRO && BROKEN_ON_SMP | 398 | depends on EXPERIMENTAL && ZORRO && BROKEN |
399 | ---help--- | 399 | ---help--- |
400 | This option supports the 2232 7-port serial card shipped with the | 400 | This option supports the 2232 7-port serial card shipped with the |
401 | Amiga 2000 and other Zorro-bus machines, dating from 1989. At | 401 | Amiga 2000 and other Zorro-bus machines, dating from 1989. At |
diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 1dc4185bd781..e355e7f6a536 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile | |||
@@ -46,7 +46,7 @@ obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o | |||
46 | obj-$(CONFIG_DM_ZERO) += dm-zero.o | 46 | obj-$(CONFIG_DM_ZERO) += dm-zero.o |
47 | 47 | ||
48 | quiet_cmd_unroll = UNROLL $@ | 48 | quiet_cmd_unroll = UNROLL $@ |
49 | cmd_unroll = $(PERL) $(srctree)/$(src)/unroll.pl $(UNROLL) \ | 49 | cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \ |
50 | < $< > $@ || ( rm -f $@ && exit 1 ) | 50 | < $< > $@ || ( rm -f $@ && exit 1 ) |
51 | 51 | ||
52 | ifeq ($(CONFIG_ALTIVEC),y) | 52 | ifeq ($(CONFIG_ALTIVEC),y) |
@@ -59,56 +59,56 @@ endif | |||
59 | 59 | ||
60 | targets += raid6int1.c | 60 | targets += raid6int1.c |
61 | $(obj)/raid6int1.c: UNROLL := 1 | 61 | $(obj)/raid6int1.c: UNROLL := 1 |
62 | $(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE | 62 | $(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE |
63 | $(call if_changed,unroll) | 63 | $(call if_changed,unroll) |
64 | 64 | ||
65 | targets += raid6int2.c | 65 | targets += raid6int2.c |
66 | $(obj)/raid6int2.c: UNROLL := 2 | 66 | $(obj)/raid6int2.c: UNROLL := 2 |
67 | $(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE | 67 | $(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE |
68 | $(call if_changed,unroll) | 68 | $(call if_changed,unroll) |
69 | 69 | ||
70 | targets += raid6int4.c | 70 | targets += raid6int4.c |
71 | $(obj)/raid6int4.c: UNROLL := 4 | 71 | $(obj)/raid6int4.c: UNROLL := 4 |
72 | $(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE | 72 | $(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE |
73 | $(call if_changed,unroll) | 73 | $(call if_changed,unroll) |
74 | 74 | ||
75 | targets += raid6int8.c | 75 | targets += raid6int8.c |
76 | $(obj)/raid6int8.c: UNROLL := 8 | 76 | $(obj)/raid6int8.c: UNROLL := 8 |
77 | $(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE | 77 | $(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE |
78 | $(call if_changed,unroll) | 78 | $(call if_changed,unroll) |
79 | 79 | ||
80 | targets += raid6int16.c | 80 | targets += raid6int16.c |
81 | $(obj)/raid6int16.c: UNROLL := 16 | 81 | $(obj)/raid6int16.c: UNROLL := 16 |
82 | $(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE | 82 | $(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE |
83 | $(call if_changed,unroll) | 83 | $(call if_changed,unroll) |
84 | 84 | ||
85 | targets += raid6int32.c | 85 | targets += raid6int32.c |
86 | $(obj)/raid6int32.c: UNROLL := 32 | 86 | $(obj)/raid6int32.c: UNROLL := 32 |
87 | $(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE | 87 | $(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE |
88 | $(call if_changed,unroll) | 88 | $(call if_changed,unroll) |
89 | 89 | ||
90 | CFLAGS_raid6altivec1.o += $(altivec_flags) | 90 | CFLAGS_raid6altivec1.o += $(altivec_flags) |
91 | targets += raid6altivec1.c | 91 | targets += raid6altivec1.c |
92 | $(obj)/raid6altivec1.c: UNROLL := 1 | 92 | $(obj)/raid6altivec1.c: UNROLL := 1 |
93 | $(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE | 93 | $(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE |
94 | $(call if_changed,unroll) | 94 | $(call if_changed,unroll) |
95 | 95 | ||
96 | CFLAGS_raid6altivec2.o += $(altivec_flags) | 96 | CFLAGS_raid6altivec2.o += $(altivec_flags) |
97 | targets += raid6altivec2.c | 97 | targets += raid6altivec2.c |
98 | $(obj)/raid6altivec2.c: UNROLL := 2 | 98 | $(obj)/raid6altivec2.c: UNROLL := 2 |
99 | $(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE | 99 | $(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE |
100 | $(call if_changed,unroll) | 100 | $(call if_changed,unroll) |
101 | 101 | ||
102 | CFLAGS_raid6altivec4.o += $(altivec_flags) | 102 | CFLAGS_raid6altivec4.o += $(altivec_flags) |
103 | targets += raid6altivec4.c | 103 | targets += raid6altivec4.c |
104 | $(obj)/raid6altivec4.c: UNROLL := 4 | 104 | $(obj)/raid6altivec4.c: UNROLL := 4 |
105 | $(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE | 105 | $(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE |
106 | $(call if_changed,unroll) | 106 | $(call if_changed,unroll) |
107 | 107 | ||
108 | CFLAGS_raid6altivec8.o += $(altivec_flags) | 108 | CFLAGS_raid6altivec8.o += $(altivec_flags) |
109 | targets += raid6altivec8.c | 109 | targets += raid6altivec8.c |
110 | $(obj)/raid6altivec8.c: UNROLL := 8 | 110 | $(obj)/raid6altivec8.c: UNROLL := 8 |
111 | $(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE | 111 | $(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE |
112 | $(call if_changed,unroll) | 112 | $(call if_changed,unroll) |
113 | 113 | ||
114 | quiet_cmd_mktable = TABLE $@ | 114 | quiet_cmd_mktable = TABLE $@ |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 6986b0059d23..60e2b322db11 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -1624,10 +1624,11 @@ int bitmap_create(mddev_t *mddev) | |||
1624 | bitmap->offset = mddev->bitmap_offset; | 1624 | bitmap->offset = mddev->bitmap_offset; |
1625 | if (file) { | 1625 | if (file) { |
1626 | get_file(file); | 1626 | get_file(file); |
1627 | do_sync_mapping_range(file->f_mapping, 0, LLONG_MAX, | 1627 | /* As future accesses to this file will use bmap, |
1628 | SYNC_FILE_RANGE_WAIT_BEFORE | | 1628 | * and bypass the page cache, we must sync the file |
1629 | SYNC_FILE_RANGE_WRITE | | 1629 | * first. |
1630 | SYNC_FILE_RANGE_WAIT_AFTER); | 1630 | */ |
1631 | vfs_fsync(file, file->f_dentry, 1); | ||
1631 | } | 1632 | } |
1632 | /* read superblock from bitmap file (this sets bitmap->chunksize) */ | 1633 | /* read superblock from bitmap file (this sets bitmap->chunksize) */ |
1633 | err = bitmap_read_sb(bitmap); | 1634 | err = bitmap_read_sb(bitmap); |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 26ba42a79129..10eb1fce975e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -2631,7 +2631,7 @@ static void analyze_sbs(mddev_t * mddev) | |||
2631 | rdev->desc_nr = i++; | 2631 | rdev->desc_nr = i++; |
2632 | rdev->raid_disk = rdev->desc_nr; | 2632 | rdev->raid_disk = rdev->desc_nr; |
2633 | set_bit(In_sync, &rdev->flags); | 2633 | set_bit(In_sync, &rdev->flags); |
2634 | } else if (rdev->raid_disk >= mddev->raid_disks) { | 2634 | } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) { |
2635 | rdev->raid_disk = -1; | 2635 | rdev->raid_disk = -1; |
2636 | clear_bit(In_sync, &rdev->flags); | 2636 | clear_bit(In_sync, &rdev->flags); |
2637 | } | 2637 | } |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index d1b9bd5fd4f6..a053423785c9 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -64,7 +64,7 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) | |||
64 | 64 | ||
65 | /* allocate a r1bio with room for raid_disks entries in the bios array */ | 65 | /* allocate a r1bio with room for raid_disks entries in the bios array */ |
66 | r1_bio = kzalloc(size, gfp_flags); | 66 | r1_bio = kzalloc(size, gfp_flags); |
67 | if (!r1_bio) | 67 | if (!r1_bio && pi->mddev) |
68 | unplug_slaves(pi->mddev); | 68 | unplug_slaves(pi->mddev); |
69 | 69 | ||
70 | return r1_bio; | 70 | return r1_bio; |
@@ -1683,6 +1683,7 @@ static void raid1d(mddev_t *mddev) | |||
1683 | generic_make_request(bio); | 1683 | generic_make_request(bio); |
1684 | } | 1684 | } |
1685 | } | 1685 | } |
1686 | cond_resched(); | ||
1686 | } | 1687 | } |
1687 | if (unplug) | 1688 | if (unplug) |
1688 | unplug_slaves(mddev); | 1689 | unplug_slaves(mddev); |
@@ -1978,13 +1979,14 @@ static int run(mddev_t *mddev) | |||
1978 | conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL); | 1979 | conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL); |
1979 | if (!conf->poolinfo) | 1980 | if (!conf->poolinfo) |
1980 | goto out_no_mem; | 1981 | goto out_no_mem; |
1981 | conf->poolinfo->mddev = mddev; | 1982 | conf->poolinfo->mddev = NULL; |
1982 | conf->poolinfo->raid_disks = mddev->raid_disks; | 1983 | conf->poolinfo->raid_disks = mddev->raid_disks; |
1983 | conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, | 1984 | conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, |
1984 | r1bio_pool_free, | 1985 | r1bio_pool_free, |
1985 | conf->poolinfo); | 1986 | conf->poolinfo); |
1986 | if (!conf->r1bio_pool) | 1987 | if (!conf->r1bio_pool) |
1987 | goto out_no_mem; | 1988 | goto out_no_mem; |
1989 | conf->poolinfo->mddev = mddev; | ||
1988 | 1990 | ||
1989 | spin_lock_init(&conf->device_lock); | 1991 | spin_lock_init(&conf->device_lock); |
1990 | mddev->queue->queue_lock = &conf->device_lock; | 1992 | mddev->queue->queue_lock = &conf->device_lock; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 51c4c5c4d87a..c2cb7b87b440 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -68,7 +68,7 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) | |||
68 | 68 | ||
69 | /* allocate a r10bio with room for raid_disks entries in the bios array */ | 69 | /* allocate a r10bio with room for raid_disks entries in the bios array */ |
70 | r10_bio = kzalloc(size, gfp_flags); | 70 | r10_bio = kzalloc(size, gfp_flags); |
71 | if (!r10_bio) | 71 | if (!r10_bio && conf->mddev) |
72 | unplug_slaves(conf->mddev); | 72 | unplug_slaves(conf->mddev); |
73 | 73 | ||
74 | return r10_bio; | 74 | return r10_bio; |
@@ -1632,6 +1632,7 @@ static void raid10d(mddev_t *mddev) | |||
1632 | generic_make_request(bio); | 1632 | generic_make_request(bio); |
1633 | } | 1633 | } |
1634 | } | 1634 | } |
1635 | cond_resched(); | ||
1635 | } | 1636 | } |
1636 | if (unplug) | 1637 | if (unplug) |
1637 | unplug_slaves(mddev); | 1638 | unplug_slaves(mddev); |
@@ -2095,7 +2096,6 @@ static int run(mddev_t *mddev) | |||
2095 | if (!conf->tmppage) | 2096 | if (!conf->tmppage) |
2096 | goto out_free_conf; | 2097 | goto out_free_conf; |
2097 | 2098 | ||
2098 | conf->mddev = mddev; | ||
2099 | conf->raid_disks = mddev->raid_disks; | 2099 | conf->raid_disks = mddev->raid_disks; |
2100 | conf->near_copies = nc; | 2100 | conf->near_copies = nc; |
2101 | conf->far_copies = fc; | 2101 | conf->far_copies = fc; |
@@ -2132,6 +2132,7 @@ static int run(mddev_t *mddev) | |||
2132 | goto out_free_conf; | 2132 | goto out_free_conf; |
2133 | } | 2133 | } |
2134 | 2134 | ||
2135 | conf->mddev = mddev; | ||
2135 | spin_lock_init(&conf->device_lock); | 2136 | spin_lock_init(&conf->device_lock); |
2136 | mddev->queue->queue_lock = &conf->device_lock; | 2137 | mddev->queue->queue_lock = &conf->device_lock; |
2137 | 2138 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 94829804ab7f..81abefc172d9 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -156,13 +156,16 @@ static inline int raid6_next_disk(int disk, int raid_disks) | |||
156 | static int raid6_idx_to_slot(int idx, struct stripe_head *sh, | 156 | static int raid6_idx_to_slot(int idx, struct stripe_head *sh, |
157 | int *count, int syndrome_disks) | 157 | int *count, int syndrome_disks) |
158 | { | 158 | { |
159 | int slot; | 159 | int slot = *count; |
160 | 160 | ||
161 | if (sh->ddf_layout) | ||
162 | (*count)++; | ||
161 | if (idx == sh->pd_idx) | 163 | if (idx == sh->pd_idx) |
162 | return syndrome_disks; | 164 | return syndrome_disks; |
163 | if (idx == sh->qd_idx) | 165 | if (idx == sh->qd_idx) |
164 | return syndrome_disks + 1; | 166 | return syndrome_disks + 1; |
165 | slot = (*count)++; | 167 | if (!sh->ddf_layout) |
168 | (*count)++; | ||
166 | return slot; | 169 | return slot; |
167 | } | 170 | } |
168 | 171 | ||
@@ -717,7 +720,7 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh) | |||
717 | int i; | 720 | int i; |
718 | 721 | ||
719 | for (i = 0; i < disks; i++) | 722 | for (i = 0; i < disks; i++) |
720 | srcs[i] = (void *)raid6_empty_zero_page; | 723 | srcs[i] = NULL; |
721 | 724 | ||
722 | count = 0; | 725 | count = 0; |
723 | i = d0_idx; | 726 | i = d0_idx; |
@@ -727,9 +730,8 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh) | |||
727 | srcs[slot] = sh->dev[i].page; | 730 | srcs[slot] = sh->dev[i].page; |
728 | i = raid6_next_disk(i, disks); | 731 | i = raid6_next_disk(i, disks); |
729 | } while (i != d0_idx); | 732 | } while (i != d0_idx); |
730 | BUG_ON(count != syndrome_disks); | ||
731 | 733 | ||
732 | return count; | 734 | return syndrome_disks; |
733 | } | 735 | } |
734 | 736 | ||
735 | static struct dma_async_tx_descriptor * | 737 | static struct dma_async_tx_descriptor * |
@@ -814,7 +816,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) | |||
814 | * slot number conversion for 'faila' and 'failb' | 816 | * slot number conversion for 'faila' and 'failb' |
815 | */ | 817 | */ |
816 | for (i = 0; i < disks ; i++) | 818 | for (i = 0; i < disks ; i++) |
817 | blocks[i] = (void *)raid6_empty_zero_page; | 819 | blocks[i] = NULL; |
818 | count = 0; | 820 | count = 0; |
819 | i = d0_idx; | 821 | i = d0_idx; |
820 | do { | 822 | do { |
@@ -828,7 +830,6 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) | |||
828 | failb = slot; | 830 | failb = slot; |
829 | i = raid6_next_disk(i, disks); | 831 | i = raid6_next_disk(i, disks); |
830 | } while (i != d0_idx); | 832 | } while (i != d0_idx); |
831 | BUG_ON(count != syndrome_disks); | ||
832 | 833 | ||
833 | BUG_ON(faila == failb); | 834 | BUG_ON(faila == failb); |
834 | if (failb < faila) | 835 | if (failb < faila) |
@@ -845,7 +846,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) | |||
845 | init_async_submit(&submit, ASYNC_TX_FENCE, NULL, | 846 | init_async_submit(&submit, ASYNC_TX_FENCE, NULL, |
846 | ops_complete_compute, sh, | 847 | ops_complete_compute, sh, |
847 | to_addr_conv(sh, percpu)); | 848 | to_addr_conv(sh, percpu)); |
848 | return async_gen_syndrome(blocks, 0, count+2, | 849 | return async_gen_syndrome(blocks, 0, syndrome_disks+2, |
849 | STRIPE_SIZE, &submit); | 850 | STRIPE_SIZE, &submit); |
850 | } else { | 851 | } else { |
851 | struct page *dest; | 852 | struct page *dest; |
@@ -1139,7 +1140,7 @@ static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu | |||
1139 | &sh->ops.zero_sum_result, percpu->spare_page, &submit); | 1140 | &sh->ops.zero_sum_result, percpu->spare_page, &submit); |
1140 | } | 1141 | } |
1141 | 1142 | ||
1142 | static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) | 1143 | static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request) |
1143 | { | 1144 | { |
1144 | int overlap_clear = 0, i, disks = sh->disks; | 1145 | int overlap_clear = 0, i, disks = sh->disks; |
1145 | struct dma_async_tx_descriptor *tx = NULL; | 1146 | struct dma_async_tx_descriptor *tx = NULL; |
@@ -1204,22 +1205,55 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) | |||
1204 | put_cpu(); | 1205 | put_cpu(); |
1205 | } | 1206 | } |
1206 | 1207 | ||
1208 | #ifdef CONFIG_MULTICORE_RAID456 | ||
1209 | static void async_run_ops(void *param, async_cookie_t cookie) | ||
1210 | { | ||
1211 | struct stripe_head *sh = param; | ||
1212 | unsigned long ops_request = sh->ops.request; | ||
1213 | |||
1214 | clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state); | ||
1215 | wake_up(&sh->ops.wait_for_ops); | ||
1216 | |||
1217 | __raid_run_ops(sh, ops_request); | ||
1218 | release_stripe(sh); | ||
1219 | } | ||
1220 | |||
1221 | static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) | ||
1222 | { | ||
1223 | /* since handle_stripe can be called outside of raid5d context | ||
1224 | * we need to ensure sh->ops.request is de-staged before another | ||
1225 | * request arrives | ||
1226 | */ | ||
1227 | wait_event(sh->ops.wait_for_ops, | ||
1228 | !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state)); | ||
1229 | sh->ops.request = ops_request; | ||
1230 | |||
1231 | atomic_inc(&sh->count); | ||
1232 | async_schedule(async_run_ops, sh); | ||
1233 | } | ||
1234 | #else | ||
1235 | #define raid_run_ops __raid_run_ops | ||
1236 | #endif | ||
1237 | |||
1207 | static int grow_one_stripe(raid5_conf_t *conf) | 1238 | static int grow_one_stripe(raid5_conf_t *conf) |
1208 | { | 1239 | { |
1209 | struct stripe_head *sh; | 1240 | struct stripe_head *sh; |
1241 | int disks = max(conf->raid_disks, conf->previous_raid_disks); | ||
1210 | sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); | 1242 | sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); |
1211 | if (!sh) | 1243 | if (!sh) |
1212 | return 0; | 1244 | return 0; |
1213 | memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); | 1245 | memset(sh, 0, sizeof(*sh) + (disks-1)*sizeof(struct r5dev)); |
1214 | sh->raid_conf = conf; | 1246 | sh->raid_conf = conf; |
1215 | spin_lock_init(&sh->lock); | 1247 | spin_lock_init(&sh->lock); |
1248 | #ifdef CONFIG_MULTICORE_RAID456 | ||
1249 | init_waitqueue_head(&sh->ops.wait_for_ops); | ||
1250 | #endif | ||
1216 | 1251 | ||
1217 | if (grow_buffers(sh, conf->raid_disks)) { | 1252 | if (grow_buffers(sh, disks)) { |
1218 | shrink_buffers(sh, conf->raid_disks); | 1253 | shrink_buffers(sh, disks); |
1219 | kmem_cache_free(conf->slab_cache, sh); | 1254 | kmem_cache_free(conf->slab_cache, sh); |
1220 | return 0; | 1255 | return 0; |
1221 | } | 1256 | } |
1222 | sh->disks = conf->raid_disks; | ||
1223 | /* we just created an active stripe so... */ | 1257 | /* we just created an active stripe so... */ |
1224 | atomic_set(&sh->count, 1); | 1258 | atomic_set(&sh->count, 1); |
1225 | atomic_inc(&conf->active_stripes); | 1259 | atomic_inc(&conf->active_stripes); |
@@ -1231,7 +1265,7 @@ static int grow_one_stripe(raid5_conf_t *conf) | |||
1231 | static int grow_stripes(raid5_conf_t *conf, int num) | 1265 | static int grow_stripes(raid5_conf_t *conf, int num) |
1232 | { | 1266 | { |
1233 | struct kmem_cache *sc; | 1267 | struct kmem_cache *sc; |
1234 | int devs = conf->raid_disks; | 1268 | int devs = max(conf->raid_disks, conf->previous_raid_disks); |
1235 | 1269 | ||
1236 | sprintf(conf->cache_name[0], | 1270 | sprintf(conf->cache_name[0], |
1237 | "raid%d-%s", conf->level, mdname(conf->mddev)); | 1271 | "raid%d-%s", conf->level, mdname(conf->mddev)); |
@@ -1329,6 +1363,9 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) | |||
1329 | 1363 | ||
1330 | nsh->raid_conf = conf; | 1364 | nsh->raid_conf = conf; |
1331 | spin_lock_init(&nsh->lock); | 1365 | spin_lock_init(&nsh->lock); |
1366 | #ifdef CONFIG_MULTICORE_RAID456 | ||
1367 | init_waitqueue_head(&nsh->ops.wait_for_ops); | ||
1368 | #endif | ||
1332 | 1369 | ||
1333 | list_add(&nsh->lru, &newstripes); | 1370 | list_add(&nsh->lru, &newstripes); |
1334 | } | 1371 | } |
@@ -1899,10 +1936,15 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) | |||
1899 | case ALGORITHM_PARITY_N: | 1936 | case ALGORITHM_PARITY_N: |
1900 | break; | 1937 | break; |
1901 | case ALGORITHM_ROTATING_N_CONTINUE: | 1938 | case ALGORITHM_ROTATING_N_CONTINUE: |
1939 | /* Like left_symmetric, but P is before Q */ | ||
1902 | if (sh->pd_idx == 0) | 1940 | if (sh->pd_idx == 0) |
1903 | i--; /* P D D D Q */ | 1941 | i--; /* P D D D Q */ |
1904 | else if (i > sh->pd_idx) | 1942 | else { |
1905 | i -= 2; /* D D Q P D */ | 1943 | /* D D Q P D */ |
1944 | if (i < sh->pd_idx) | ||
1945 | i += raid_disks; | ||
1946 | i -= (sh->pd_idx + 1); | ||
1947 | } | ||
1906 | break; | 1948 | break; |
1907 | case ALGORITHM_LEFT_ASYMMETRIC_6: | 1949 | case ALGORITHM_LEFT_ASYMMETRIC_6: |
1908 | case ALGORITHM_RIGHT_ASYMMETRIC_6: | 1950 | case ALGORITHM_RIGHT_ASYMMETRIC_6: |
@@ -2896,7 +2938,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, | |||
2896 | * | 2938 | * |
2897 | */ | 2939 | */ |
2898 | 2940 | ||
2899 | static bool handle_stripe5(struct stripe_head *sh) | 2941 | static void handle_stripe5(struct stripe_head *sh) |
2900 | { | 2942 | { |
2901 | raid5_conf_t *conf = sh->raid_conf; | 2943 | raid5_conf_t *conf = sh->raid_conf; |
2902 | int disks = sh->disks, i; | 2944 | int disks = sh->disks, i; |
@@ -3167,11 +3209,9 @@ static bool handle_stripe5(struct stripe_head *sh) | |||
3167 | ops_run_io(sh, &s); | 3209 | ops_run_io(sh, &s); |
3168 | 3210 | ||
3169 | return_io(return_bi); | 3211 | return_io(return_bi); |
3170 | |||
3171 | return blocked_rdev == NULL; | ||
3172 | } | 3212 | } |
3173 | 3213 | ||
3174 | static bool handle_stripe6(struct stripe_head *sh) | 3214 | static void handle_stripe6(struct stripe_head *sh) |
3175 | { | 3215 | { |
3176 | raid5_conf_t *conf = sh->raid_conf; | 3216 | raid5_conf_t *conf = sh->raid_conf; |
3177 | int disks = sh->disks; | 3217 | int disks = sh->disks; |
@@ -3455,17 +3495,14 @@ static bool handle_stripe6(struct stripe_head *sh) | |||
3455 | ops_run_io(sh, &s); | 3495 | ops_run_io(sh, &s); |
3456 | 3496 | ||
3457 | return_io(return_bi); | 3497 | return_io(return_bi); |
3458 | |||
3459 | return blocked_rdev == NULL; | ||
3460 | } | 3498 | } |
3461 | 3499 | ||
3462 | /* returns true if the stripe was handled */ | 3500 | static void handle_stripe(struct stripe_head *sh) |
3463 | static bool handle_stripe(struct stripe_head *sh) | ||
3464 | { | 3501 | { |
3465 | if (sh->raid_conf->level == 6) | 3502 | if (sh->raid_conf->level == 6) |
3466 | return handle_stripe6(sh); | 3503 | handle_stripe6(sh); |
3467 | else | 3504 | else |
3468 | return handle_stripe5(sh); | 3505 | handle_stripe5(sh); |
3469 | } | 3506 | } |
3470 | 3507 | ||
3471 | static void raid5_activate_delayed(raid5_conf_t *conf) | 3508 | static void raid5_activate_delayed(raid5_conf_t *conf) |
@@ -3503,9 +3540,10 @@ static void unplug_slaves(mddev_t *mddev) | |||
3503 | { | 3540 | { |
3504 | raid5_conf_t *conf = mddev->private; | 3541 | raid5_conf_t *conf = mddev->private; |
3505 | int i; | 3542 | int i; |
3543 | int devs = max(conf->raid_disks, conf->previous_raid_disks); | ||
3506 | 3544 | ||
3507 | rcu_read_lock(); | 3545 | rcu_read_lock(); |
3508 | for (i = 0; i < conf->raid_disks; i++) { | 3546 | for (i = 0; i < devs; i++) { |
3509 | mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); | 3547 | mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); |
3510 | if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { | 3548 | if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { |
3511 | struct request_queue *r_queue = bdev_get_queue(rdev->bdev); | 3549 | struct request_queue *r_queue = bdev_get_queue(rdev->bdev); |
@@ -4277,9 +4315,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski | |||
4277 | clear_bit(STRIPE_INSYNC, &sh->state); | 4315 | clear_bit(STRIPE_INSYNC, &sh->state); |
4278 | spin_unlock(&sh->lock); | 4316 | spin_unlock(&sh->lock); |
4279 | 4317 | ||
4280 | /* wait for any blocked device to be handled */ | 4318 | handle_stripe(sh); |
4281 | while (unlikely(!handle_stripe(sh))) | ||
4282 | ; | ||
4283 | release_stripe(sh); | 4319 | release_stripe(sh); |
4284 | 4320 | ||
4285 | return STRIPE_SECTORS; | 4321 | return STRIPE_SECTORS; |
@@ -4349,37 +4385,6 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
4349 | return handled; | 4385 | return handled; |
4350 | } | 4386 | } |
4351 | 4387 | ||
4352 | #ifdef CONFIG_MULTICORE_RAID456 | ||
4353 | static void __process_stripe(void *param, async_cookie_t cookie) | ||
4354 | { | ||
4355 | struct stripe_head *sh = param; | ||
4356 | |||
4357 | handle_stripe(sh); | ||
4358 | release_stripe(sh); | ||
4359 | } | ||
4360 | |||
4361 | static void process_stripe(struct stripe_head *sh, struct list_head *domain) | ||
4362 | { | ||
4363 | async_schedule_domain(__process_stripe, sh, domain); | ||
4364 | } | ||
4365 | |||
4366 | static void synchronize_stripe_processing(struct list_head *domain) | ||
4367 | { | ||
4368 | async_synchronize_full_domain(domain); | ||
4369 | } | ||
4370 | #else | ||
4371 | static void process_stripe(struct stripe_head *sh, struct list_head *domain) | ||
4372 | { | ||
4373 | handle_stripe(sh); | ||
4374 | release_stripe(sh); | ||
4375 | cond_resched(); | ||
4376 | } | ||
4377 | |||
4378 | static void synchronize_stripe_processing(struct list_head *domain) | ||
4379 | { | ||
4380 | } | ||
4381 | #endif | ||
4382 | |||
4383 | 4388 | ||
4384 | /* | 4389 | /* |
4385 | * This is our raid5 kernel thread. | 4390 | * This is our raid5 kernel thread. |
@@ -4393,7 +4398,6 @@ static void raid5d(mddev_t *mddev) | |||
4393 | struct stripe_head *sh; | 4398 | struct stripe_head *sh; |
4394 | raid5_conf_t *conf = mddev->private; | 4399 | raid5_conf_t *conf = mddev->private; |
4395 | int handled; | 4400 | int handled; |
4396 | LIST_HEAD(raid_domain); | ||
4397 | 4401 | ||
4398 | pr_debug("+++ raid5d active\n"); | 4402 | pr_debug("+++ raid5d active\n"); |
4399 | 4403 | ||
@@ -4430,7 +4434,9 @@ static void raid5d(mddev_t *mddev) | |||
4430 | spin_unlock_irq(&conf->device_lock); | 4434 | spin_unlock_irq(&conf->device_lock); |
4431 | 4435 | ||
4432 | handled++; | 4436 | handled++; |
4433 | process_stripe(sh, &raid_domain); | 4437 | handle_stripe(sh); |
4438 | release_stripe(sh); | ||
4439 | cond_resched(); | ||
4434 | 4440 | ||
4435 | spin_lock_irq(&conf->device_lock); | 4441 | spin_lock_irq(&conf->device_lock); |
4436 | } | 4442 | } |
@@ -4438,7 +4444,6 @@ static void raid5d(mddev_t *mddev) | |||
4438 | 4444 | ||
4439 | spin_unlock_irq(&conf->device_lock); | 4445 | spin_unlock_irq(&conf->device_lock); |
4440 | 4446 | ||
4441 | synchronize_stripe_processing(&raid_domain); | ||
4442 | async_tx_issue_pending_all(); | 4447 | async_tx_issue_pending_all(); |
4443 | unplug_slaves(mddev); | 4448 | unplug_slaves(mddev); |
4444 | 4449 | ||
@@ -4558,13 +4563,9 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) | |||
4558 | 4563 | ||
4559 | if (!sectors) | 4564 | if (!sectors) |
4560 | sectors = mddev->dev_sectors; | 4565 | sectors = mddev->dev_sectors; |
4561 | if (!raid_disks) { | 4566 | if (!raid_disks) |
4562 | /* size is defined by the smallest of previous and new size */ | 4567 | /* size is defined by the smallest of previous and new size */ |
4563 | if (conf->raid_disks < conf->previous_raid_disks) | 4568 | raid_disks = min(conf->raid_disks, conf->previous_raid_disks); |
4564 | raid_disks = conf->raid_disks; | ||
4565 | else | ||
4566 | raid_disks = conf->previous_raid_disks; | ||
4567 | } | ||
4568 | 4569 | ||
4569 | sectors &= ~((sector_t)mddev->chunk_sectors - 1); | 4570 | sectors &= ~((sector_t)mddev->chunk_sectors - 1); |
4570 | sectors &= ~((sector_t)mddev->new_chunk_sectors - 1); | 4571 | sectors &= ~((sector_t)mddev->new_chunk_sectors - 1); |
@@ -4665,7 +4666,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf) | |||
4665 | } | 4666 | } |
4666 | per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; | 4667 | per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; |
4667 | } | 4668 | } |
4668 | scribble = kmalloc(scribble_len(conf->raid_disks), GFP_KERNEL); | 4669 | scribble = kmalloc(conf->scribble_len, GFP_KERNEL); |
4669 | if (!scribble) { | 4670 | if (!scribble) { |
4670 | err = -ENOMEM; | 4671 | err = -ENOMEM; |
4671 | break; | 4672 | break; |
@@ -4686,7 +4687,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf) | |||
4686 | static raid5_conf_t *setup_conf(mddev_t *mddev) | 4687 | static raid5_conf_t *setup_conf(mddev_t *mddev) |
4687 | { | 4688 | { |
4688 | raid5_conf_t *conf; | 4689 | raid5_conf_t *conf; |
4689 | int raid_disk, memory; | 4690 | int raid_disk, memory, max_disks; |
4690 | mdk_rdev_t *rdev; | 4691 | mdk_rdev_t *rdev; |
4691 | struct disk_info *disk; | 4692 | struct disk_info *disk; |
4692 | 4693 | ||
@@ -4722,15 +4723,28 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) | |||
4722 | conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL); | 4723 | conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL); |
4723 | if (conf == NULL) | 4724 | if (conf == NULL) |
4724 | goto abort; | 4725 | goto abort; |
4726 | spin_lock_init(&conf->device_lock); | ||
4727 | init_waitqueue_head(&conf->wait_for_stripe); | ||
4728 | init_waitqueue_head(&conf->wait_for_overlap); | ||
4729 | INIT_LIST_HEAD(&conf->handle_list); | ||
4730 | INIT_LIST_HEAD(&conf->hold_list); | ||
4731 | INIT_LIST_HEAD(&conf->delayed_list); | ||
4732 | INIT_LIST_HEAD(&conf->bitmap_list); | ||
4733 | INIT_LIST_HEAD(&conf->inactive_list); | ||
4734 | atomic_set(&conf->active_stripes, 0); | ||
4735 | atomic_set(&conf->preread_active_stripes, 0); | ||
4736 | atomic_set(&conf->active_aligned_reads, 0); | ||
4737 | conf->bypass_threshold = BYPASS_THRESHOLD; | ||
4725 | 4738 | ||
4726 | conf->raid_disks = mddev->raid_disks; | 4739 | conf->raid_disks = mddev->raid_disks; |
4727 | conf->scribble_len = scribble_len(conf->raid_disks); | ||
4728 | if (mddev->reshape_position == MaxSector) | 4740 | if (mddev->reshape_position == MaxSector) |
4729 | conf->previous_raid_disks = mddev->raid_disks; | 4741 | conf->previous_raid_disks = mddev->raid_disks; |
4730 | else | 4742 | else |
4731 | conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; | 4743 | conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; |
4744 | max_disks = max(conf->raid_disks, conf->previous_raid_disks); | ||
4745 | conf->scribble_len = scribble_len(max_disks); | ||
4732 | 4746 | ||
4733 | conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), | 4747 | conf->disks = kzalloc(max_disks * sizeof(struct disk_info), |
4734 | GFP_KERNEL); | 4748 | GFP_KERNEL); |
4735 | if (!conf->disks) | 4749 | if (!conf->disks) |
4736 | goto abort; | 4750 | goto abort; |
@@ -4744,24 +4758,11 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) | |||
4744 | if (raid5_alloc_percpu(conf) != 0) | 4758 | if (raid5_alloc_percpu(conf) != 0) |
4745 | goto abort; | 4759 | goto abort; |
4746 | 4760 | ||
4747 | spin_lock_init(&conf->device_lock); | ||
4748 | init_waitqueue_head(&conf->wait_for_stripe); | ||
4749 | init_waitqueue_head(&conf->wait_for_overlap); | ||
4750 | INIT_LIST_HEAD(&conf->handle_list); | ||
4751 | INIT_LIST_HEAD(&conf->hold_list); | ||
4752 | INIT_LIST_HEAD(&conf->delayed_list); | ||
4753 | INIT_LIST_HEAD(&conf->bitmap_list); | ||
4754 | INIT_LIST_HEAD(&conf->inactive_list); | ||
4755 | atomic_set(&conf->active_stripes, 0); | ||
4756 | atomic_set(&conf->preread_active_stripes, 0); | ||
4757 | atomic_set(&conf->active_aligned_reads, 0); | ||
4758 | conf->bypass_threshold = BYPASS_THRESHOLD; | ||
4759 | |||
4760 | pr_debug("raid5: run(%s) called.\n", mdname(mddev)); | 4761 | pr_debug("raid5: run(%s) called.\n", mdname(mddev)); |
4761 | 4762 | ||
4762 | list_for_each_entry(rdev, &mddev->disks, same_set) { | 4763 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
4763 | raid_disk = rdev->raid_disk; | 4764 | raid_disk = rdev->raid_disk; |
4764 | if (raid_disk >= conf->raid_disks | 4765 | if (raid_disk >= max_disks |
4765 | || raid_disk < 0) | 4766 | || raid_disk < 0) |
4766 | continue; | 4767 | continue; |
4767 | disk = conf->disks + raid_disk; | 4768 | disk = conf->disks + raid_disk; |
@@ -4793,7 +4794,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) | |||
4793 | } | 4794 | } |
4794 | 4795 | ||
4795 | memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + | 4796 | memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + |
4796 | conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; | 4797 | max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; |
4797 | if (grow_stripes(conf, conf->max_nr_stripes)) { | 4798 | if (grow_stripes(conf, conf->max_nr_stripes)) { |
4798 | printk(KERN_ERR | 4799 | printk(KERN_ERR |
4799 | "raid5: couldn't allocate %dkB for buffers\n", memory); | 4800 | "raid5: couldn't allocate %dkB for buffers\n", memory); |
@@ -4918,7 +4919,8 @@ static int run(mddev_t *mddev) | |||
4918 | test_bit(In_sync, &rdev->flags)) | 4919 | test_bit(In_sync, &rdev->flags)) |
4919 | working_disks++; | 4920 | working_disks++; |
4920 | 4921 | ||
4921 | mddev->degraded = conf->raid_disks - working_disks; | 4922 | mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks) |
4923 | - working_disks); | ||
4922 | 4924 | ||
4923 | if (mddev->degraded > conf->max_degraded) { | 4925 | if (mddev->degraded > conf->max_degraded) { |
4924 | printk(KERN_ERR "raid5: not enough operational devices for %s" | 4926 | printk(KERN_ERR "raid5: not enough operational devices for %s" |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 2390e0e83daf..dd708359b451 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
@@ -214,12 +214,20 @@ struct stripe_head { | |||
214 | int disks; /* disks in stripe */ | 214 | int disks; /* disks in stripe */ |
215 | enum check_states check_state; | 215 | enum check_states check_state; |
216 | enum reconstruct_states reconstruct_state; | 216 | enum reconstruct_states reconstruct_state; |
217 | /* stripe_operations | 217 | /** |
218 | * struct stripe_operations | ||
218 | * @target - STRIPE_OP_COMPUTE_BLK target | 219 | * @target - STRIPE_OP_COMPUTE_BLK target |
220 | * @target2 - 2nd compute target in the raid6 case | ||
221 | * @zero_sum_result - P and Q verification flags | ||
222 | * @request - async service request flags for raid_run_ops | ||
219 | */ | 223 | */ |
220 | struct stripe_operations { | 224 | struct stripe_operations { |
221 | int target, target2; | 225 | int target, target2; |
222 | enum sum_check_flags zero_sum_result; | 226 | enum sum_check_flags zero_sum_result; |
227 | #ifdef CONFIG_MULTICORE_RAID456 | ||
228 | unsigned long request; | ||
229 | wait_queue_head_t wait_for_ops; | ||
230 | #endif | ||
223 | } ops; | 231 | } ops; |
224 | struct r5dev { | 232 | struct r5dev { |
225 | struct bio req; | 233 | struct bio req; |
@@ -294,6 +302,8 @@ struct r6_state { | |||
294 | #define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */ | 302 | #define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */ |
295 | #define STRIPE_BIOFILL_RUN 14 | 303 | #define STRIPE_BIOFILL_RUN 14 |
296 | #define STRIPE_COMPUTE_RUN 15 | 304 | #define STRIPE_COMPUTE_RUN 15 |
305 | #define STRIPE_OPS_REQ_PENDING 16 | ||
306 | |||
297 | /* | 307 | /* |
298 | * Operation request flags | 308 | * Operation request flags |
299 | */ | 309 | */ |
@@ -478,7 +488,7 @@ static inline int algorithm_valid_raid6(int layout) | |||
478 | { | 488 | { |
479 | return (layout >= 0 && layout <= 5) | 489 | return (layout >= 0 && layout <= 5) |
480 | || | 490 | || |
481 | (layout == 8 || layout == 10) | 491 | (layout >= 8 && layout <= 10) |
482 | || | 492 | || |
483 | (layout >= 16 && layout <= 20); | 493 | (layout >= 16 && layout <= 20); |
484 | } | 494 | } |
diff --git a/drivers/md/raid6altivec.uc b/drivers/md/raid6altivec.uc index 699dfeee4944..2654d5c854be 100644 --- a/drivers/md/raid6altivec.uc +++ b/drivers/md/raid6altivec.uc | |||
@@ -15,7 +15,7 @@ | |||
15 | * | 15 | * |
16 | * $#-way unrolled portable integer math RAID-6 instruction set | 16 | * $#-way unrolled portable integer math RAID-6 instruction set |
17 | * | 17 | * |
18 | * This file is postprocessed using unroll.pl | 18 | * This file is postprocessed using unroll.awk |
19 | * | 19 | * |
20 | * <benh> hpa: in process, | 20 | * <benh> hpa: in process, |
21 | * you can just "steal" the vec unit with enable_kernel_altivec() (but | 21 | * you can just "steal" the vec unit with enable_kernel_altivec() (but |
diff --git a/drivers/md/raid6int.uc b/drivers/md/raid6int.uc index f9bf9cba357f..d1e276a14fab 100644 --- a/drivers/md/raid6int.uc +++ b/drivers/md/raid6int.uc | |||
@@ -15,7 +15,7 @@ | |||
15 | * | 15 | * |
16 | * $#-way unrolled portable integer math RAID-6 instruction set | 16 | * $#-way unrolled portable integer math RAID-6 instruction set |
17 | * | 17 | * |
18 | * This file is postprocessed using unroll.pl | 18 | * This file is postprocessed using unroll.awk |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/raid/pq.h> | 21 | #include <linux/raid/pq.h> |
diff --git a/drivers/md/raid6test/Makefile b/drivers/md/raid6test/Makefile index 58ffdf4f5161..2874cbef529d 100644 --- a/drivers/md/raid6test/Makefile +++ b/drivers/md/raid6test/Makefile | |||
@@ -7,7 +7,7 @@ CC = gcc | |||
7 | OPTFLAGS = -O2 # Adjust as desired | 7 | OPTFLAGS = -O2 # Adjust as desired |
8 | CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS) | 8 | CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS) |
9 | LD = ld | 9 | LD = ld |
10 | PERL = perl | 10 | AWK = awk |
11 | AR = ar | 11 | AR = ar |
12 | RANLIB = ranlib | 12 | RANLIB = ranlib |
13 | 13 | ||
@@ -35,35 +35,35 @@ raid6.a: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \ | |||
35 | raid6test: test.c raid6.a | 35 | raid6test: test.c raid6.a |
36 | $(CC) $(CFLAGS) -o raid6test $^ | 36 | $(CC) $(CFLAGS) -o raid6test $^ |
37 | 37 | ||
38 | raid6altivec1.c: raid6altivec.uc ../unroll.pl | 38 | raid6altivec1.c: raid6altivec.uc ../unroll.awk |
39 | $(PERL) ../unroll.pl 1 < raid6altivec.uc > $@ | 39 | $(AWK) ../unroll.awk -vN=1 < raid6altivec.uc > $@ |
40 | 40 | ||
41 | raid6altivec2.c: raid6altivec.uc ../unroll.pl | 41 | raid6altivec2.c: raid6altivec.uc ../unroll.awk |
42 | $(PERL) ../unroll.pl 2 < raid6altivec.uc > $@ | 42 | $(AWK) ../unroll.awk -vN=2 < raid6altivec.uc > $@ |
43 | 43 | ||
44 | raid6altivec4.c: raid6altivec.uc ../unroll.pl | 44 | raid6altivec4.c: raid6altivec.uc ../unroll.awk |
45 | $(PERL) ../unroll.pl 4 < raid6altivec.uc > $@ | 45 | $(AWK) ../unroll.awk -vN=4 < raid6altivec.uc > $@ |
46 | 46 | ||
47 | raid6altivec8.c: raid6altivec.uc ../unroll.pl | 47 | raid6altivec8.c: raid6altivec.uc ../unroll.awk |
48 | $(PERL) ../unroll.pl 8 < raid6altivec.uc > $@ | 48 | $(AWK) ../unroll.awk -vN=8 < raid6altivec.uc > $@ |
49 | 49 | ||
50 | raid6int1.c: raid6int.uc ../unroll.pl | 50 | raid6int1.c: raid6int.uc ../unroll.awk |
51 | $(PERL) ../unroll.pl 1 < raid6int.uc > $@ | 51 | $(AWK) ../unroll.awk -vN=1 < raid6int.uc > $@ |
52 | 52 | ||
53 | raid6int2.c: raid6int.uc ../unroll.pl | 53 | raid6int2.c: raid6int.uc ../unroll.awk |
54 | $(PERL) ../unroll.pl 2 < raid6int.uc > $@ | 54 | $(AWK) ../unroll.awk -vN=2 < raid6int.uc > $@ |
55 | 55 | ||
56 | raid6int4.c: raid6int.uc ../unroll.pl | 56 | raid6int4.c: raid6int.uc ../unroll.awk |
57 | $(PERL) ../unroll.pl 4 < raid6int.uc > $@ | 57 | $(AWK) ../unroll.awk -vN=4 < raid6int.uc > $@ |
58 | 58 | ||
59 | raid6int8.c: raid6int.uc ../unroll.pl | 59 | raid6int8.c: raid6int.uc ../unroll.awk |
60 | $(PERL) ../unroll.pl 8 < raid6int.uc > $@ | 60 | $(AWK) ../unroll.awk -vN=8 < raid6int.uc > $@ |
61 | 61 | ||
62 | raid6int16.c: raid6int.uc ../unroll.pl | 62 | raid6int16.c: raid6int.uc ../unroll.awk |
63 | $(PERL) ../unroll.pl 16 < raid6int.uc > $@ | 63 | $(AWK) ../unroll.awk -vN=16 < raid6int.uc > $@ |
64 | 64 | ||
65 | raid6int32.c: raid6int.uc ../unroll.pl | 65 | raid6int32.c: raid6int.uc ../unroll.awk |
66 | $(PERL) ../unroll.pl 32 < raid6int.uc > $@ | 66 | $(AWK) ../unroll.awk -vN=32 < raid6int.uc > $@ |
67 | 67 | ||
68 | raid6tables.c: mktables | 68 | raid6tables.c: mktables |
69 | ./mktables > raid6tables.c | 69 | ./mktables > raid6tables.c |
diff --git a/drivers/md/unroll.awk b/drivers/md/unroll.awk new file mode 100644 index 000000000000..c6aa03631df8 --- /dev/null +++ b/drivers/md/unroll.awk | |||
@@ -0,0 +1,20 @@ | |||
1 | |||
2 | # This filter requires one command line option of form -vN=n | ||
3 | # where n must be a decimal number. | ||
4 | # | ||
5 | # Repeat each input line containing $$ n times, replacing $$ with 0...n-1. | ||
6 | # Replace each $# with n, and each $* with a single $. | ||
7 | |||
8 | BEGIN { | ||
9 | n = N + 0 | ||
10 | } | ||
11 | { | ||
12 | if (/\$\$/) { rep = n } else { rep = 1 } | ||
13 | for (i = 0; i < rep; ++i) { | ||
14 | tmp = $0 | ||
15 | gsub(/\$\$/, i, tmp) | ||
16 | gsub(/\$\#/, n, tmp) | ||
17 | gsub(/\$\*/, "$", tmp) | ||
18 | print tmp | ||
19 | } | ||
20 | } | ||
diff --git a/drivers/md/unroll.pl b/drivers/md/unroll.pl deleted file mode 100644 index 3acc710a20ea..000000000000 --- a/drivers/md/unroll.pl +++ /dev/null | |||
@@ -1,24 +0,0 @@ | |||
1 | #!/usr/bin/perl | ||
2 | # | ||
3 | # Take a piece of C code and for each line which contains the sequence $$ | ||
4 | # repeat n times with $ replaced by 0...n-1; the sequence $# is replaced | ||
5 | # by the unrolling factor, and $* with a single $ | ||
6 | # | ||
7 | |||
8 | ($n) = @ARGV; | ||
9 | $n += 0; | ||
10 | |||
11 | while ( defined($line = <STDIN>) ) { | ||
12 | if ( $line =~ /\$\$/ ) { | ||
13 | $rep = $n; | ||
14 | } else { | ||
15 | $rep = 1; | ||
16 | } | ||
17 | for ( $i = 0 ; $i < $rep ; $i++ ) { | ||
18 | $tmp = $line; | ||
19 | $tmp =~ s/\$\$/$i/g; | ||
20 | $tmp =~ s/\$\#/$n/g; | ||
21 | $tmp =~ s/\$\*/\$/g; | ||
22 | print $tmp; | ||
23 | } | ||
24 | } | ||
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 0caa8008c51c..f56dec6119c3 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c | |||
@@ -362,12 +362,12 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) | |||
362 | retval = -EINVAL; | 362 | retval = -EINVAL; |
363 | goto halt_fail_and_release; | 363 | goto halt_fail_and_release; |
364 | } | 364 | } |
365 | dev->hard_mtu = tmp; | ||
366 | net->mtu = dev->hard_mtu - net->hard_header_len; | ||
367 | dev_warn(&intf->dev, | 365 | dev_warn(&intf->dev, |
368 | "dev can't take %u byte packets (max %u), " | 366 | "dev can't take %u byte packets (max %u), " |
369 | "adjusting MTU to %u\n", | 367 | "adjusting MTU to %u\n", |
370 | dev->hard_mtu, tmp, net->mtu); | 368 | dev->hard_mtu, tmp, tmp - net->hard_header_len); |
369 | dev->hard_mtu = tmp; | ||
370 | net->mtu = dev->hard_mtu - net->hard_header_len; | ||
371 | } | 371 | } |
372 | 372 | ||
373 | /* REVISIT: peripheral "alignment" request is ignored ... */ | 373 | /* REVISIT: peripheral "alignment" request is ignored ... */ |
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c index a4f68e5b9c96..b44462a6c6d3 100644 --- a/drivers/s390/char/sclp_async.c +++ b/drivers/s390/char/sclp_async.c | |||
@@ -26,7 +26,6 @@ static struct sclp_async_sccb *sccb; | |||
26 | static int sclp_async_send_wait(char *message); | 26 | static int sclp_async_send_wait(char *message); |
27 | static struct ctl_table_header *callhome_sysctl_header; | 27 | static struct ctl_table_header *callhome_sysctl_header; |
28 | static DEFINE_SPINLOCK(sclp_async_lock); | 28 | static DEFINE_SPINLOCK(sclp_async_lock); |
29 | static char nodename[64]; | ||
30 | #define SCLP_NORMAL_WRITE 0x00 | 29 | #define SCLP_NORMAL_WRITE 0x00 |
31 | 30 | ||
32 | struct async_evbuf { | 31 | struct async_evbuf { |
@@ -52,9 +51,10 @@ static struct sclp_register sclp_async_register = { | |||
52 | static int call_home_on_panic(struct notifier_block *self, | 51 | static int call_home_on_panic(struct notifier_block *self, |
53 | unsigned long event, void *data) | 52 | unsigned long event, void *data) |
54 | { | 53 | { |
55 | strncat(data, nodename, strlen(nodename)); | 54 | strncat(data, init_utsname()->nodename, |
56 | sclp_async_send_wait(data); | 55 | sizeof(init_utsname()->nodename)); |
57 | return NOTIFY_DONE; | 56 | sclp_async_send_wait(data); |
57 | return NOTIFY_DONE; | ||
58 | } | 58 | } |
59 | 59 | ||
60 | static struct notifier_block call_home_panic_nb = { | 60 | static struct notifier_block call_home_panic_nb = { |
@@ -68,15 +68,14 @@ static int proc_handler_callhome(struct ctl_table *ctl, int write, | |||
68 | { | 68 | { |
69 | unsigned long val; | 69 | unsigned long val; |
70 | int len, rc; | 70 | int len, rc; |
71 | char buf[2]; | 71 | char buf[3]; |
72 | 72 | ||
73 | if (!*count | (*ppos && !write)) { | 73 | if (!*count || (*ppos && !write)) { |
74 | *count = 0; | 74 | *count = 0; |
75 | return 0; | 75 | return 0; |
76 | } | 76 | } |
77 | if (!write) { | 77 | if (!write) { |
78 | len = sprintf(buf, "%d\n", callhome_enabled); | 78 | len = snprintf(buf, sizeof(buf), "%d\n", callhome_enabled); |
79 | buf[len] = '\0'; | ||
80 | rc = copy_to_user(buffer, buf, sizeof(buf)); | 79 | rc = copy_to_user(buffer, buf, sizeof(buf)); |
81 | if (rc != 0) | 80 | if (rc != 0) |
82 | return -EFAULT; | 81 | return -EFAULT; |
@@ -171,39 +170,29 @@ static int __init sclp_async_init(void) | |||
171 | rc = sclp_register(&sclp_async_register); | 170 | rc = sclp_register(&sclp_async_register); |
172 | if (rc) | 171 | if (rc) |
173 | return rc; | 172 | return rc; |
174 | callhome_sysctl_header = register_sysctl_table(kern_dir_table); | 173 | rc = -EOPNOTSUPP; |
175 | if (!callhome_sysctl_header) { | 174 | if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK)) |
176 | rc = -ENOMEM; | ||
177 | goto out_sclp; | ||
178 | } | ||
179 | if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK)) { | ||
180 | rc = -EOPNOTSUPP; | ||
181 | goto out_sclp; | 175 | goto out_sclp; |
182 | } | ||
183 | rc = -ENOMEM; | 176 | rc = -ENOMEM; |
177 | callhome_sysctl_header = register_sysctl_table(kern_dir_table); | ||
178 | if (!callhome_sysctl_header) | ||
179 | goto out_sclp; | ||
184 | request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL); | 180 | request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL); |
185 | if (!request) | ||
186 | goto out_sys; | ||
187 | sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | 181 | sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); |
188 | if (!sccb) | 182 | if (!request || !sccb) |
189 | goto out_mem; | 183 | goto out_mem; |
190 | rc = atomic_notifier_chain_register(&panic_notifier_list, | 184 | rc = atomic_notifier_chain_register(&panic_notifier_list, |
191 | &call_home_panic_nb); | 185 | &call_home_panic_nb); |
192 | if (rc) | 186 | if (!rc) |
193 | goto out_mem; | 187 | goto out; |
194 | |||
195 | strncpy(nodename, init_utsname()->nodename, 64); | ||
196 | return 0; | ||
197 | |||
198 | out_mem: | 188 | out_mem: |
199 | kfree(request); | 189 | kfree(request); |
200 | free_page((unsigned long) sccb); | 190 | free_page((unsigned long) sccb); |
201 | out_sys: | ||
202 | unregister_sysctl_table(callhome_sysctl_header); | 191 | unregister_sysctl_table(callhome_sysctl_header); |
203 | out_sclp: | 192 | out_sclp: |
204 | sclp_unregister(&sclp_async_register); | 193 | sclp_unregister(&sclp_async_register); |
194 | out: | ||
205 | return rc; | 195 | return rc; |
206 | |||
207 | } | 196 | } |
208 | module_init(sclp_async_init); | 197 | module_init(sclp_async_init); |
209 | 198 | ||
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 102000d1af6f..3012355f8304 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c | |||
@@ -158,7 +158,12 @@ static int smsg_pm_restore_thaw(struct device *dev) | |||
158 | smsg_path->flags = 0; | 158 | smsg_path->flags = 0; |
159 | rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", | 159 | rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", |
160 | NULL, NULL, NULL); | 160 | NULL, NULL, NULL); |
161 | printk(KERN_ERR "iucv_path_connect returned with rc %i\n", rc); | 161 | #ifdef CONFIG_PM_DEBUG |
162 | if (rc) | ||
163 | printk(KERN_ERR | ||
164 | "iucv_path_connect returned with rc %i\n", rc); | ||
165 | #endif | ||
166 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); | ||
162 | } | 167 | } |
163 | return 0; | 168 | return 0; |
164 | } | 169 | } |
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 7d1aac31ec8d..496764349c41 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c | |||
@@ -1919,7 +1919,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg) | |||
1919 | size = size>>16; | 1919 | size = size>>16; |
1920 | size *= 4; | 1920 | size *= 4; |
1921 | if (size > MAX_MESSAGE_SIZE) { | 1921 | if (size > MAX_MESSAGE_SIZE) { |
1922 | rcode = EINVAL; | 1922 | rcode = -EINVAL; |
1923 | goto cleanup; | 1923 | goto cleanup; |
1924 | } | 1924 | } |
1925 | /* Copy in the user's I2O command */ | 1925 | /* Copy in the user's I2O command */ |
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c index 42a74b8a0bb8..fa3d142ba64d 100644 --- a/drivers/usb/gadget/fsl_udc_core.c +++ b/drivers/usb/gadget/fsl_udc_core.c | |||
@@ -2139,7 +2139,7 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count, | |||
2139 | static void fsl_udc_release(struct device *dev) | 2139 | static void fsl_udc_release(struct device *dev) |
2140 | { | 2140 | { |
2141 | complete(udc_controller->done); | 2141 | complete(udc_controller->done); |
2142 | dma_free_coherent(dev, udc_controller->ep_qh_size, | 2142 | dma_free_coherent(dev->parent, udc_controller->ep_qh_size, |
2143 | udc_controller->ep_qh, udc_controller->ep_qh_dma); | 2143 | udc_controller->ep_qh, udc_controller->ep_qh_dma); |
2144 | kfree(udc_controller); | 2144 | kfree(udc_controller); |
2145 | } | 2145 | } |
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index 749b53742828..e33d36256350 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c | |||
@@ -1003,19 +1003,20 @@ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, | |||
1003 | if (syssts == SE0) { | 1003 | if (syssts == SE0) { |
1004 | r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); | 1004 | r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); |
1005 | r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port)); | 1005 | r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port)); |
1006 | return; | 1006 | } else { |
1007 | } | 1007 | if (syssts == FS_JSTS) |
1008 | r8a66597_bset(r8a66597, HSE, get_syscfg_reg(port)); | ||
1009 | else if (syssts == LS_JSTS) | ||
1010 | r8a66597_bclr(r8a66597, HSE, get_syscfg_reg(port)); | ||
1008 | 1011 | ||
1009 | if (syssts == FS_JSTS) | 1012 | r8a66597_write(r8a66597, ~DTCH, get_intsts_reg(port)); |
1010 | r8a66597_bset(r8a66597, HSE, get_syscfg_reg(port)); | 1013 | r8a66597_bset(r8a66597, DTCHE, get_intenb_reg(port)); |
1011 | else if (syssts == LS_JSTS) | ||
1012 | r8a66597_bclr(r8a66597, HSE, get_syscfg_reg(port)); | ||
1013 | 1014 | ||
1014 | r8a66597_write(r8a66597, ~DTCH, get_intsts_reg(port)); | 1015 | if (r8a66597->bus_suspended) |
1015 | r8a66597_bset(r8a66597, DTCHE, get_intenb_reg(port)); | 1016 | usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); |
1017 | } | ||
1016 | 1018 | ||
1017 | if (r8a66597->bus_suspended) | 1019 | usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597)); |
1018 | usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); | ||
1019 | } | 1020 | } |
1020 | 1021 | ||
1021 | /* this function must be called with interrupt disabled */ | 1022 | /* this function must be called with interrupt disabled */ |
@@ -1024,6 +1025,8 @@ static void r8a66597_usb_connect(struct r8a66597 *r8a66597, int port) | |||
1024 | u16 speed = get_rh_usb_speed(r8a66597, port); | 1025 | u16 speed = get_rh_usb_speed(r8a66597, port); |
1025 | struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; | 1026 | struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; |
1026 | 1027 | ||
1028 | rh->port &= ~((1 << USB_PORT_FEAT_HIGHSPEED) | | ||
1029 | (1 << USB_PORT_FEAT_LOWSPEED)); | ||
1027 | if (speed == HSMODE) | 1030 | if (speed == HSMODE) |
1028 | rh->port |= (1 << USB_PORT_FEAT_HIGHSPEED); | 1031 | rh->port |= (1 << USB_PORT_FEAT_HIGHSPEED); |
1029 | else if (speed == LSMODE) | 1032 | else if (speed == LSMODE) |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 65d96b214f95..cd44c68954df 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -315,6 +315,9 @@ static int option_resume(struct usb_serial *serial); | |||
315 | #define QISDA_PRODUCT_H20_4515 0x4515 | 315 | #define QISDA_PRODUCT_H20_4515 0x4515 |
316 | #define QISDA_PRODUCT_H20_4519 0x4519 | 316 | #define QISDA_PRODUCT_H20_4519 0x4519 |
317 | 317 | ||
318 | /* TLAYTECH PRODUCTS */ | ||
319 | #define TLAYTECH_VENDOR_ID 0x20B9 | ||
320 | #define TLAYTECH_PRODUCT_TEU800 0x1682 | ||
318 | 321 | ||
319 | /* TOSHIBA PRODUCTS */ | 322 | /* TOSHIBA PRODUCTS */ |
320 | #define TOSHIBA_VENDOR_ID 0x0930 | 323 | #define TOSHIBA_VENDOR_ID 0x0930 |
@@ -593,6 +596,7 @@ static struct usb_device_id option_ids[] = { | |||
593 | { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, | 596 | { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, |
594 | { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, | 597 | { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, |
595 | { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, | 598 | { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, |
599 | { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, | ||
596 | { } /* Terminating entry */ | 600 | { } /* Terminating entry */ |
597 | }; | 601 | }; |
598 | MODULE_DEVICE_TABLE(usb, option_ids); | 602 | MODULE_DEVICE_TABLE(usb, option_ids); |
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 45883988a005..5019325ba25d 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
@@ -296,7 +296,6 @@ struct sierra_port_private { | |||
296 | int dsr_state; | 296 | int dsr_state; |
297 | int dcd_state; | 297 | int dcd_state; |
298 | int ri_state; | 298 | int ri_state; |
299 | |||
300 | unsigned int opened:1; | 299 | unsigned int opened:1; |
301 | }; | 300 | }; |
302 | 301 | ||
@@ -306,6 +305,8 @@ static int sierra_send_setup(struct usb_serial_port *port) | |||
306 | struct sierra_port_private *portdata; | 305 | struct sierra_port_private *portdata; |
307 | __u16 interface = 0; | 306 | __u16 interface = 0; |
308 | int val = 0; | 307 | int val = 0; |
308 | int do_send = 0; | ||
309 | int retval; | ||
309 | 310 | ||
310 | dev_dbg(&port->dev, "%s\n", __func__); | 311 | dev_dbg(&port->dev, "%s\n", __func__); |
311 | 312 | ||
@@ -324,10 +325,7 @@ static int sierra_send_setup(struct usb_serial_port *port) | |||
324 | */ | 325 | */ |
325 | if (port->interrupt_in_urb) { | 326 | if (port->interrupt_in_urb) { |
326 | /* send control message */ | 327 | /* send control message */ |
327 | return usb_control_msg(serial->dev, | 328 | do_send = 1; |
328 | usb_rcvctrlpipe(serial->dev, 0), | ||
329 | 0x22, 0x21, val, interface, | ||
330 | NULL, 0, USB_CTRL_SET_TIMEOUT); | ||
331 | } | 329 | } |
332 | } | 330 | } |
333 | 331 | ||
@@ -339,12 +337,18 @@ static int sierra_send_setup(struct usb_serial_port *port) | |||
339 | interface = 1; | 337 | interface = 1; |
340 | else if (port->bulk_out_endpointAddress == 5) | 338 | else if (port->bulk_out_endpointAddress == 5) |
341 | interface = 2; | 339 | interface = 2; |
342 | return usb_control_msg(serial->dev, | 340 | |
343 | usb_rcvctrlpipe(serial->dev, 0), | 341 | do_send = 1; |
344 | 0x22, 0x21, val, interface, | ||
345 | NULL, 0, USB_CTRL_SET_TIMEOUT); | ||
346 | } | 342 | } |
347 | return 0; | 343 | if (!do_send) |
344 | return 0; | ||
345 | |||
346 | usb_autopm_get_interface(serial->interface); | ||
347 | retval = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), | ||
348 | 0x22, 0x21, val, interface, NULL, 0, USB_CTRL_SET_TIMEOUT); | ||
349 | usb_autopm_put_interface(serial->interface); | ||
350 | |||
351 | return retval; | ||
348 | } | 352 | } |
349 | 353 | ||
350 | static void sierra_set_termios(struct tty_struct *tty, | 354 | static void sierra_set_termios(struct tty_struct *tty, |
@@ -773,8 +777,11 @@ static void sierra_close(struct usb_serial_port *port) | |||
773 | 777 | ||
774 | if (serial->dev) { | 778 | if (serial->dev) { |
775 | mutex_lock(&serial->disc_mutex); | 779 | mutex_lock(&serial->disc_mutex); |
776 | if (!serial->disconnected) | 780 | if (!serial->disconnected) { |
781 | serial->interface->needs_remote_wakeup = 0; | ||
782 | usb_autopm_get_interface(serial->interface); | ||
777 | sierra_send_setup(port); | 783 | sierra_send_setup(port); |
784 | } | ||
778 | mutex_unlock(&serial->disc_mutex); | 785 | mutex_unlock(&serial->disc_mutex); |
779 | spin_lock_irq(&intfdata->susp_lock); | 786 | spin_lock_irq(&intfdata->susp_lock); |
780 | portdata->opened = 0; | 787 | portdata->opened = 0; |
@@ -788,8 +795,6 @@ static void sierra_close(struct usb_serial_port *port) | |||
788 | sierra_release_urb(portdata->in_urbs[i]); | 795 | sierra_release_urb(portdata->in_urbs[i]); |
789 | portdata->in_urbs[i] = NULL; | 796 | portdata->in_urbs[i] = NULL; |
790 | } | 797 | } |
791 | usb_autopm_get_interface(serial->interface); | ||
792 | serial->interface->needs_remote_wakeup = 0; | ||
793 | } | 798 | } |
794 | } | 799 | } |
795 | 800 | ||
@@ -827,6 +832,8 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
827 | if (err) { | 832 | if (err) { |
828 | /* get rid of everything as in close */ | 833 | /* get rid of everything as in close */ |
829 | sierra_close(port); | 834 | sierra_close(port); |
835 | /* restore balance for autopm */ | ||
836 | usb_autopm_put_interface(serial->interface); | ||
830 | return err; | 837 | return err; |
831 | } | 838 | } |
832 | sierra_send_setup(port); | 839 | sierra_send_setup(port); |
@@ -915,7 +922,7 @@ static void sierra_release(struct usb_serial *serial) | |||
915 | #ifdef CONFIG_PM | 922 | #ifdef CONFIG_PM |
916 | static void stop_read_write_urbs(struct usb_serial *serial) | 923 | static void stop_read_write_urbs(struct usb_serial *serial) |
917 | { | 924 | { |
918 | int i, j; | 925 | int i; |
919 | struct usb_serial_port *port; | 926 | struct usb_serial_port *port; |
920 | struct sierra_port_private *portdata; | 927 | struct sierra_port_private *portdata; |
921 | 928 | ||
@@ -923,8 +930,7 @@ static void stop_read_write_urbs(struct usb_serial *serial) | |||
923 | for (i = 0; i < serial->num_ports; ++i) { | 930 | for (i = 0; i < serial->num_ports; ++i) { |
924 | port = serial->port[i]; | 931 | port = serial->port[i]; |
925 | portdata = usb_get_serial_port_data(port); | 932 | portdata = usb_get_serial_port_data(port); |
926 | for (j = 0; j < N_IN_URB; j++) | 933 | sierra_stop_rx_urbs(port); |
927 | usb_kill_urb(portdata->in_urbs[j]); | ||
928 | usb_kill_anchored_urbs(&portdata->active); | 934 | usb_kill_anchored_urbs(&portdata->active); |
929 | } | 935 | } |
930 | } | 936 | } |
diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/linux-2.6/xfs_quotaops.c index 9e41f91aa269..3d4a0c84d634 100644 --- a/fs/xfs/linux-2.6/xfs_quotaops.c +++ b/fs/xfs/linux-2.6/xfs_quotaops.c | |||
@@ -80,7 +80,7 @@ xfs_fs_set_xstate( | |||
80 | 80 | ||
81 | if (sb->s_flags & MS_RDONLY) | 81 | if (sb->s_flags & MS_RDONLY) |
82 | return -EROFS; | 82 | return -EROFS; |
83 | if (!XFS_IS_QUOTA_RUNNING(mp)) | 83 | if (op != Q_XQUOTARM && !XFS_IS_QUOTA_RUNNING(mp)) |
84 | return -ENOSYS; | 84 | return -ENOSYS; |
85 | if (!capable(CAP_SYS_ADMIN)) | 85 | if (!capable(CAP_SYS_ADMIN)) |
86 | return -EPERM; | 86 | return -EPERM; |
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index ab64f3efb43b..0785797db828 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c | |||
@@ -880,6 +880,7 @@ nextag: | |||
880 | * Not in range - save last search | 880 | * Not in range - save last search |
881 | * location and allocate a new inode | 881 | * location and allocate a new inode |
882 | */ | 882 | */ |
883 | xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); | ||
883 | pag->pagl_leftrec = trec.ir_startino; | 884 | pag->pagl_leftrec = trec.ir_startino; |
884 | pag->pagl_rightrec = rec.ir_startino; | 885 | pag->pagl_rightrec = rec.ir_startino; |
885 | pag->pagl_pagino = pagino; | 886 | pag->pagl_pagino = pagino; |
diff --git a/include/linux/device.h b/include/linux/device.h index aca31bf7d8ed..2ea3e4921812 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -124,7 +124,9 @@ struct device_driver { | |||
124 | struct bus_type *bus; | 124 | struct bus_type *bus; |
125 | 125 | ||
126 | struct module *owner; | 126 | struct module *owner; |
127 | const char *mod_name; /* used for built-in modules */ | 127 | const char *mod_name; /* used for built-in modules */ |
128 | |||
129 | bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ | ||
128 | 130 | ||
129 | int (*probe) (struct device *dev); | 131 | int (*probe) (struct device *dev); |
130 | int (*remove) (struct device *dev); | 132 | int (*remove) (struct device *dev); |
diff --git a/mm/nommu.c b/mm/nommu.c index 5189b5aed8c0..9876fa0c3ad3 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -1362,9 +1362,11 @@ share: | |||
1362 | error_just_free: | 1362 | error_just_free: |
1363 | up_write(&nommu_region_sem); | 1363 | up_write(&nommu_region_sem); |
1364 | error: | 1364 | error: |
1365 | fput(region->vm_file); | 1365 | if (region->vm_file) |
1366 | fput(region->vm_file); | ||
1366 | kmem_cache_free(vm_region_jar, region); | 1367 | kmem_cache_free(vm_region_jar, region); |
1367 | fput(vma->vm_file); | 1368 | if (vma->vm_file) |
1369 | fput(vma->vm_file); | ||
1368 | if (vma->vm_flags & VM_EXECUTABLE) | 1370 | if (vma->vm_flags & VM_EXECUTABLE) |
1369 | removed_exe_file_vma(vma->vm_mm); | 1371 | removed_exe_file_vma(vma->vm_mm); |
1370 | kmem_cache_free(vm_area_cachep, vma); | 1372 | kmem_cache_free(vm_area_cachep, vma); |