aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/s390/include/asm/cputime.h6
-rw-r--r--arch/s390/kernel/ipl.c7
-rw-r--r--arch/s390/kernel/smp.c7
-rw-r--r--arch/s390/kernel/swsusp_asm64.S2
-rw-r--r--crypto/async_tx/async_pq.c60
-rw-r--r--crypto/async_tx/async_raid6_recov.c100
-rw-r--r--crypto/async_tx/async_xor.c18
-rw-r--r--drivers/md/Makefile22
-rw-r--r--drivers/md/bitmap.c9
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c5
-rw-r--r--drivers/md/raid5.c182
-rw-r--r--drivers/md/raid5.h14
-rw-r--r--drivers/md/raid6altivec.uc2
-rw-r--r--drivers/md/raid6int.uc2
-rw-r--r--drivers/md/raid6test/Makefile42
-rw-r--r--drivers/md/unroll.awk20
-rw-r--r--drivers/md/unroll.pl24
-rw-r--r--drivers/s390/char/sclp_async.c47
-rw-r--r--drivers/s390/net/smsgiucv.c7
-rw-r--r--drivers/scsi/dpt_i2o.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_quotaops.c2
-rw-r--r--fs/xfs/xfs_ialloc.c1
-rw-r--r--mm/nommu.c6
25 files changed, 327 insertions, 268 deletions
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 24b1244aadb9..f23961ada7fb 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -78,7 +78,7 @@ cputime64_to_jiffies64(cputime64_t cputime)
78static inline unsigned int 78static inline unsigned int
79cputime_to_msecs(const cputime_t cputime) 79cputime_to_msecs(const cputime_t cputime)
80{ 80{
81 return __div(cputime, 4096000); 81 return cputime_div(cputime, 4096000);
82} 82}
83 83
84static inline cputime_t 84static inline cputime_t
@@ -160,7 +160,7 @@ cputime_to_timeval(const cputime_t cputime, struct timeval *value)
160static inline clock_t 160static inline clock_t
161cputime_to_clock_t(cputime_t cputime) 161cputime_to_clock_t(cputime_t cputime)
162{ 162{
163 return __div(cputime, 4096000000ULL / USER_HZ); 163 return cputime_div(cputime, 4096000000ULL / USER_HZ);
164} 164}
165 165
166static inline cputime_t 166static inline cputime_t
@@ -175,7 +175,7 @@ clock_t_to_cputime(unsigned long x)
175static inline clock_t 175static inline clock_t
176cputime64_to_clock_t(cputime64_t cputime) 176cputime64_to_clock_t(cputime64_t cputime)
177{ 177{
178 return __div(cputime, 4096000000ULL / USER_HZ); 178 return cputime_div(cputime, 4096000000ULL / USER_HZ);
179} 179}
180 180
181struct s390_idle_data { 181struct s390_idle_data {
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index ee57a42e6e93..4890ac6d7faa 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1595,10 +1595,9 @@ static void stop_run(struct shutdown_trigger *trigger)
1595{ 1595{
1596 if (strcmp(trigger->name, ON_PANIC_STR) == 0) 1596 if (strcmp(trigger->name, ON_PANIC_STR) == 0)
1597 disabled_wait((unsigned long) __builtin_return_address(0)); 1597 disabled_wait((unsigned long) __builtin_return_address(0));
1598 else { 1598 while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy)
1599 signal_processor(smp_processor_id(), sigp_stop); 1599 cpu_relax();
1600 for (;;); 1600 for (;;);
1601 }
1602} 1601}
1603 1602
1604static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR, 1603static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR,
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index c932caa5e850..93e52039321b 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -76,7 +76,6 @@ static int cpu_stopped(int cpu)
76 __u32 status; 76 __u32 status;
77 77
78 switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { 78 switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) {
79 case sigp_order_code_accepted:
80 case sigp_status_stored: 79 case sigp_status_stored:
81 /* Check for stopped and check stop state */ 80 /* Check for stopped and check stop state */
82 if (status & 0x50) 81 if (status & 0x50)
@@ -638,6 +637,8 @@ void __cpu_die(unsigned int cpu)
638 /* Wait until target cpu is down */ 637 /* Wait until target cpu is down */
639 while (!cpu_stopped(cpu)) 638 while (!cpu_stopped(cpu))
640 cpu_relax(); 639 cpu_relax();
640 while (signal_processor_p(0, cpu, sigp_set_prefix) == sigp_busy)
641 udelay(10);
641 smp_free_lowcore(cpu); 642 smp_free_lowcore(cpu);
642 pr_info("Processor %d stopped\n", cpu); 643 pr_info("Processor %d stopped\n", cpu);
643} 644}
@@ -645,8 +646,8 @@ void __cpu_die(unsigned int cpu)
645void cpu_die(void) 646void cpu_die(void)
646{ 647{
647 idle_task_exit(); 648 idle_task_exit();
648 signal_processor(smp_processor_id(), sigp_stop); 649 while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy)
649 BUG(); 650 cpu_relax();
650 for (;;); 651 for (;;);
651} 652}
652 653
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index 7c8653e27db6..0c26cc1898ec 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -199,6 +199,7 @@ pgm_check_entry:
199 brc 2,4b /* busy, try again */ 199 brc 2,4b /* busy, try again */
2005: 2005:
201 sigp %r9,%r2,__SIGP_STOP /* stop resume (current) CPU */ 201 sigp %r9,%r2,__SIGP_STOP /* stop resume (current) CPU */
202 brc 2,5b /* busy, try again */
2026: j 6b 2036: j 6b
203 204
204restart_suspend: 205restart_suspend:
@@ -206,6 +207,7 @@ restart_suspend:
206 llgh %r2,0(%r1) 207 llgh %r2,0(%r1)
2077: 2087:
208 sigp %r9,%r2,__SIGP_SENSE /* Wait for resume CPU */ 209 sigp %r9,%r2,__SIGP_SENSE /* Wait for resume CPU */
210 brc 8,7b /* accepted, status 0, still running */
209 brc 2,7b /* busy, try again */ 211 brc 2,7b /* busy, try again */
210 tmll %r9,0x40 /* Test if resume CPU is stopped */ 212 tmll %r9,0x40 /* Test if resume CPU is stopped */
211 jz 7b 213 jz 7b
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index b88db6d1dc65..6b5cc4fba59f 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -26,14 +26,10 @@
26#include <linux/async_tx.h> 26#include <linux/async_tx.h>
27 27
28/** 28/**
29 * scribble - space to hold throwaway P buffer for synchronous gen_syndrome 29 * pq_scribble_page - space to hold throwaway P or Q buffer for
30 * synchronous gen_syndrome
30 */ 31 */
31static struct page *scribble; 32static struct page *pq_scribble_page;
32
33static bool is_raid6_zero_block(struct page *p)
34{
35 return p == (void *) raid6_empty_zero_page;
36}
37 33
38/* the struct page *blocks[] parameter passed to async_gen_syndrome() 34/* the struct page *blocks[] parameter passed to async_gen_syndrome()
39 * and async_syndrome_val() contains the 'P' destination address at 35 * and async_syndrome_val() contains the 'P' destination address at
@@ -83,7 +79,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
83 * sources and update the coefficients accordingly 79 * sources and update the coefficients accordingly
84 */ 80 */
85 for (i = 0, idx = 0; i < src_cnt; i++) { 81 for (i = 0, idx = 0; i < src_cnt; i++) {
86 if (is_raid6_zero_block(blocks[i])) 82 if (blocks[i] == NULL)
87 continue; 83 continue;
88 dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, 84 dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
89 DMA_TO_DEVICE); 85 DMA_TO_DEVICE);
@@ -160,9 +156,9 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
160 srcs = (void **) blocks; 156 srcs = (void **) blocks;
161 157
162 for (i = 0; i < disks; i++) { 158 for (i = 0; i < disks; i++) {
163 if (is_raid6_zero_block(blocks[i])) { 159 if (blocks[i] == NULL) {
164 BUG_ON(i > disks - 3); /* P or Q can't be zero */ 160 BUG_ON(i > disks - 3); /* P or Q can't be zero */
165 srcs[i] = blocks[i]; 161 srcs[i] = (void*)raid6_empty_zero_page;
166 } else 162 } else
167 srcs[i] = page_address(blocks[i]) + offset; 163 srcs[i] = page_address(blocks[i]) + offset;
168 } 164 }
@@ -186,10 +182,14 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
186 * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= 182 * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
187 * PAGE_SIZE as a temporary buffer of this size is used in the 183 * PAGE_SIZE as a temporary buffer of this size is used in the
188 * synchronous path. 'disks' always accounts for both destination 184 * synchronous path. 'disks' always accounts for both destination
189 * buffers. 185 * buffers. If any source buffers (blocks[i] where i < disks - 2) are
186 * set to NULL those buffers will be replaced with the raid6_zero_page
187 * in the synchronous path and omitted in the hardware-asynchronous
188 * path.
190 * 189 *
191 * 'blocks' note: if submit->scribble is NULL then the contents of 190 * 'blocks' note: if submit->scribble is NULL then the contents of
192 * 'blocks' may be overridden 191 * 'blocks' may be overwritten to perform address conversions
192 * (dma_map_page() or page_address()).
193 */ 193 */
194struct dma_async_tx_descriptor * 194struct dma_async_tx_descriptor *
195async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, 195async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
@@ -227,11 +227,11 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
227 async_tx_quiesce(&submit->depend_tx); 227 async_tx_quiesce(&submit->depend_tx);
228 228
229 if (!P(blocks, disks)) { 229 if (!P(blocks, disks)) {
230 P(blocks, disks) = scribble; 230 P(blocks, disks) = pq_scribble_page;
231 BUG_ON(len + offset > PAGE_SIZE); 231 BUG_ON(len + offset > PAGE_SIZE);
232 } 232 }
233 if (!Q(blocks, disks)) { 233 if (!Q(blocks, disks)) {
234 Q(blocks, disks) = scribble; 234 Q(blocks, disks) = pq_scribble_page;
235 BUG_ON(len + offset > PAGE_SIZE); 235 BUG_ON(len + offset > PAGE_SIZE);
236 } 236 }
237 do_sync_gen_syndrome(blocks, offset, disks, len, submit); 237 do_sync_gen_syndrome(blocks, offset, disks, len, submit);
@@ -265,8 +265,10 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
265 len); 265 len);
266 struct dma_device *device = chan ? chan->device : NULL; 266 struct dma_device *device = chan ? chan->device : NULL;
267 struct dma_async_tx_descriptor *tx; 267 struct dma_async_tx_descriptor *tx;
268 unsigned char coefs[disks-2];
268 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; 269 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
269 dma_addr_t *dma_src = NULL; 270 dma_addr_t *dma_src = NULL;
271 int src_cnt = 0;
270 272
271 BUG_ON(disks < 4); 273 BUG_ON(disks < 4);
272 274
@@ -285,22 +287,32 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
285 __func__, disks, len); 287 __func__, disks, len);
286 if (!P(blocks, disks)) 288 if (!P(blocks, disks))
287 dma_flags |= DMA_PREP_PQ_DISABLE_P; 289 dma_flags |= DMA_PREP_PQ_DISABLE_P;
290 else
291 pq[0] = dma_map_page(dev, P(blocks, disks),
292 offset, len,
293 DMA_TO_DEVICE);
288 if (!Q(blocks, disks)) 294 if (!Q(blocks, disks))
289 dma_flags |= DMA_PREP_PQ_DISABLE_Q; 295 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
296 else
297 pq[1] = dma_map_page(dev, Q(blocks, disks),
298 offset, len,
299 DMA_TO_DEVICE);
300
290 if (submit->flags & ASYNC_TX_FENCE) 301 if (submit->flags & ASYNC_TX_FENCE)
291 dma_flags |= DMA_PREP_FENCE; 302 dma_flags |= DMA_PREP_FENCE;
292 for (i = 0; i < disks; i++) 303 for (i = 0; i < disks-2; i++)
293 if (likely(blocks[i])) { 304 if (likely(blocks[i])) {
294 BUG_ON(is_raid6_zero_block(blocks[i])); 305 dma_src[src_cnt] = dma_map_page(dev, blocks[i],
295 dma_src[i] = dma_map_page(dev, blocks[i], 306 offset, len,
296 offset, len, 307 DMA_TO_DEVICE);
297 DMA_TO_DEVICE); 308 coefs[src_cnt] = raid6_gfexp[i];
309 src_cnt++;
298 } 310 }
299 311
300 for (;;) { 312 for (;;) {
301 tx = device->device_prep_dma_pq_val(chan, pq, dma_src, 313 tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
302 disks - 2, 314 src_cnt,
303 raid6_gfexp, 315 coefs,
304 len, pqres, 316 len, pqres,
305 dma_flags); 317 dma_flags);
306 if (likely(tx)) 318 if (likely(tx))
@@ -373,9 +385,9 @@ EXPORT_SYMBOL_GPL(async_syndrome_val);
373 385
374static int __init async_pq_init(void) 386static int __init async_pq_init(void)
375{ 387{
376 scribble = alloc_page(GFP_KERNEL); 388 pq_scribble_page = alloc_page(GFP_KERNEL);
377 389
378 if (scribble) 390 if (pq_scribble_page)
379 return 0; 391 return 0;
380 392
381 pr_err("%s: failed to allocate required spare page\n", __func__); 393 pr_err("%s: failed to allocate required spare page\n", __func__);
@@ -385,7 +397,7 @@ static int __init async_pq_init(void)
385 397
386static void __exit async_pq_exit(void) 398static void __exit async_pq_exit(void)
387{ 399{
388 put_page(scribble); 400 put_page(pq_scribble_page);
389} 401}
390 402
391module_init(async_pq_init); 403module_init(async_pq_init);
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index 6d73dde4786d..943f2abac9b4 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -131,8 +131,8 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
131} 131}
132 132
133static struct dma_async_tx_descriptor * 133static struct dma_async_tx_descriptor *
134__2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, 134__2data_recov_4(int disks, size_t bytes, int faila, int failb,
135 struct async_submit_ctl *submit) 135 struct page **blocks, struct async_submit_ctl *submit)
136{ 136{
137 struct dma_async_tx_descriptor *tx = NULL; 137 struct dma_async_tx_descriptor *tx = NULL;
138 struct page *p, *q, *a, *b; 138 struct page *p, *q, *a, *b;
@@ -143,8 +143,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
143 void *cb_param = submit->cb_param; 143 void *cb_param = submit->cb_param;
144 void *scribble = submit->scribble; 144 void *scribble = submit->scribble;
145 145
146 p = blocks[4-2]; 146 p = blocks[disks-2];
147 q = blocks[4-1]; 147 q = blocks[disks-1];
148 148
149 a = blocks[faila]; 149 a = blocks[faila];
150 b = blocks[failb]; 150 b = blocks[failb];
@@ -170,8 +170,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
170} 170}
171 171
172static struct dma_async_tx_descriptor * 172static struct dma_async_tx_descriptor *
173__2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, 173__2data_recov_5(int disks, size_t bytes, int faila, int failb,
174 struct async_submit_ctl *submit) 174 struct page **blocks, struct async_submit_ctl *submit)
175{ 175{
176 struct dma_async_tx_descriptor *tx = NULL; 176 struct dma_async_tx_descriptor *tx = NULL;
177 struct page *p, *q, *g, *dp, *dq; 177 struct page *p, *q, *g, *dp, *dq;
@@ -181,21 +181,22 @@ __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks,
181 dma_async_tx_callback cb_fn = submit->cb_fn; 181 dma_async_tx_callback cb_fn = submit->cb_fn;
182 void *cb_param = submit->cb_param; 182 void *cb_param = submit->cb_param;
183 void *scribble = submit->scribble; 183 void *scribble = submit->scribble;
184 int uninitialized_var(good); 184 int good_srcs, good, i;
185 int i;
186 185
187 for (i = 0; i < 3; i++) { 186 good_srcs = 0;
187 good = -1;
188 for (i = 0; i < disks-2; i++) {
189 if (blocks[i] == NULL)
190 continue;
188 if (i == faila || i == failb) 191 if (i == faila || i == failb)
189 continue; 192 continue;
190 else { 193 good = i;
191 good = i; 194 good_srcs++;
192 break;
193 }
194 } 195 }
195 BUG_ON(i >= 3); 196 BUG_ON(good_srcs > 1);
196 197
197 p = blocks[5-2]; 198 p = blocks[disks-2];
198 q = blocks[5-1]; 199 q = blocks[disks-1];
199 g = blocks[good]; 200 g = blocks[good];
200 201
201 /* Compute syndrome with zero for the missing data pages 202 /* Compute syndrome with zero for the missing data pages
@@ -263,10 +264,10 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb,
263 * delta p and delta q 264 * delta p and delta q
264 */ 265 */
265 dp = blocks[faila]; 266 dp = blocks[faila];
266 blocks[faila] = (void *)raid6_empty_zero_page; 267 blocks[faila] = NULL;
267 blocks[disks-2] = dp; 268 blocks[disks-2] = dp;
268 dq = blocks[failb]; 269 dq = blocks[failb];
269 blocks[failb] = (void *)raid6_empty_zero_page; 270 blocks[failb] = NULL;
270 blocks[disks-1] = dq; 271 blocks[disks-1] = dq;
271 272
272 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); 273 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
@@ -323,6 +324,8 @@ struct dma_async_tx_descriptor *
323async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, 324async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
324 struct page **blocks, struct async_submit_ctl *submit) 325 struct page **blocks, struct async_submit_ctl *submit)
325{ 326{
327 int non_zero_srcs, i;
328
326 BUG_ON(faila == failb); 329 BUG_ON(faila == failb);
327 if (failb < faila) 330 if (failb < faila)
328 swap(faila, failb); 331 swap(faila, failb);
@@ -334,11 +337,13 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
334 */ 337 */
335 if (!submit->scribble) { 338 if (!submit->scribble) {
336 void **ptrs = (void **) blocks; 339 void **ptrs = (void **) blocks;
337 int i;
338 340
339 async_tx_quiesce(&submit->depend_tx); 341 async_tx_quiesce(&submit->depend_tx);
340 for (i = 0; i < disks; i++) 342 for (i = 0; i < disks; i++)
341 ptrs[i] = page_address(blocks[i]); 343 if (blocks[i] == NULL)
344 ptrs[i] = (void *) raid6_empty_zero_page;
345 else
346 ptrs[i] = page_address(blocks[i]);
342 347
343 raid6_2data_recov(disks, bytes, faila, failb, ptrs); 348 raid6_2data_recov(disks, bytes, faila, failb, ptrs);
344 349
@@ -347,19 +352,30 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
347 return NULL; 352 return NULL;
348 } 353 }
349 354
350 switch (disks) { 355 non_zero_srcs = 0;
351 case 4: 356 for (i = 0; i < disks-2 && non_zero_srcs < 4; i++)
357 if (blocks[i])
358 non_zero_srcs++;
359 switch (non_zero_srcs) {
360 case 0:
361 case 1:
362 /* There must be at least 2 sources - the failed devices. */
363 BUG();
364
365 case 2:
352 /* dma devices do not uniformly understand a zero source pq 366 /* dma devices do not uniformly understand a zero source pq
353 * operation (in contrast to the synchronous case), so 367 * operation (in contrast to the synchronous case), so
354 * explicitly handle the 4 disk special case 368 * explicitly handle the special case of a 4 disk array with
369 * both data disks missing.
355 */ 370 */
356 return __2data_recov_4(bytes, faila, failb, blocks, submit); 371 return __2data_recov_4(disks, bytes, faila, failb, blocks, submit);
357 case 5: 372 case 3:
358 /* dma devices do not uniformly understand a single 373 /* dma devices do not uniformly understand a single
359 * source pq operation (in contrast to the synchronous 374 * source pq operation (in contrast to the synchronous
360 * case), so explicitly handle the 5 disk special case 375 * case), so explicitly handle the special case of a 5 disk
376 * array with 2 of 3 data disks missing.
361 */ 377 */
362 return __2data_recov_5(bytes, faila, failb, blocks, submit); 378 return __2data_recov_5(disks, bytes, faila, failb, blocks, submit);
363 default: 379 default:
364 return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); 380 return __2data_recov_n(disks, bytes, faila, failb, blocks, submit);
365 } 381 }
@@ -385,6 +401,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
385 dma_async_tx_callback cb_fn = submit->cb_fn; 401 dma_async_tx_callback cb_fn = submit->cb_fn;
386 void *cb_param = submit->cb_param; 402 void *cb_param = submit->cb_param;
387 void *scribble = submit->scribble; 403 void *scribble = submit->scribble;
404 int good_srcs, good, i;
388 struct page *srcs[2]; 405 struct page *srcs[2];
389 406
390 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); 407 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
@@ -394,11 +411,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
394 */ 411 */
395 if (!scribble) { 412 if (!scribble) {
396 void **ptrs = (void **) blocks; 413 void **ptrs = (void **) blocks;
397 int i;
398 414
399 async_tx_quiesce(&submit->depend_tx); 415 async_tx_quiesce(&submit->depend_tx);
400 for (i = 0; i < disks; i++) 416 for (i = 0; i < disks; i++)
401 ptrs[i] = page_address(blocks[i]); 417 if (blocks[i] == NULL)
418 ptrs[i] = (void*)raid6_empty_zero_page;
419 else
420 ptrs[i] = page_address(blocks[i]);
402 421
403 raid6_datap_recov(disks, bytes, faila, ptrs); 422 raid6_datap_recov(disks, bytes, faila, ptrs);
404 423
@@ -407,6 +426,20 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
407 return NULL; 426 return NULL;
408 } 427 }
409 428
429 good_srcs = 0;
430 good = -1;
431 for (i = 0; i < disks-2; i++) {
432 if (i == faila)
433 continue;
434 if (blocks[i]) {
435 good = i;
436 good_srcs++;
437 if (good_srcs > 1)
438 break;
439 }
440 }
441 BUG_ON(good_srcs == 0);
442
410 p = blocks[disks-2]; 443 p = blocks[disks-2];
411 q = blocks[disks-1]; 444 q = blocks[disks-1];
412 445
@@ -414,14 +447,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
414 * Use the dead data page as temporary storage for delta q 447 * Use the dead data page as temporary storage for delta q
415 */ 448 */
416 dq = blocks[faila]; 449 dq = blocks[faila];
417 blocks[faila] = (void *)raid6_empty_zero_page; 450 blocks[faila] = NULL;
418 blocks[disks-1] = dq; 451 blocks[disks-1] = dq;
419 452
420 /* in the 4 disk case we only need to perform a single source 453 /* in the 4-disk case we only need to perform a single source
421 * multiplication 454 * multiplication with the one good data block.
422 */ 455 */
423 if (disks == 4) { 456 if (good_srcs == 1) {
424 int good = faila == 0 ? 1 : 0;
425 struct page *g = blocks[good]; 457 struct page *g = blocks[good];
426 458
427 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, 459 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index b459a9034aac..79182dcb91b7 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -44,20 +44,23 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
44 void *cb_param_orig = submit->cb_param; 44 void *cb_param_orig = submit->cb_param;
45 enum async_tx_flags flags_orig = submit->flags; 45 enum async_tx_flags flags_orig = submit->flags;
46 enum dma_ctrl_flags dma_flags; 46 enum dma_ctrl_flags dma_flags;
47 int xor_src_cnt; 47 int xor_src_cnt = 0;
48 dma_addr_t dma_dest; 48 dma_addr_t dma_dest;
49 49
50 /* map the dest bidrectional in case it is re-used as a source */ 50 /* map the dest bidrectional in case it is re-used as a source */
51 dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); 51 dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
52 for (i = 0; i < src_cnt; i++) { 52 for (i = 0; i < src_cnt; i++) {
53 /* only map the dest once */ 53 /* only map the dest once */
54 if (!src_list[i])
55 continue;
54 if (unlikely(src_list[i] == dest)) { 56 if (unlikely(src_list[i] == dest)) {
55 dma_src[i] = dma_dest; 57 dma_src[xor_src_cnt++] = dma_dest;
56 continue; 58 continue;
57 } 59 }
58 dma_src[i] = dma_map_page(dma->dev, src_list[i], offset, 60 dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset,
59 len, DMA_TO_DEVICE); 61 len, DMA_TO_DEVICE);
60 } 62 }
63 src_cnt = xor_src_cnt;
61 64
62 while (src_cnt) { 65 while (src_cnt) {
63 submit->flags = flags_orig; 66 submit->flags = flags_orig;
@@ -123,7 +126,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
123 int src_cnt, size_t len, struct async_submit_ctl *submit) 126 int src_cnt, size_t len, struct async_submit_ctl *submit)
124{ 127{
125 int i; 128 int i;
126 int xor_src_cnt; 129 int xor_src_cnt = 0;
127 int src_off = 0; 130 int src_off = 0;
128 void *dest_buf; 131 void *dest_buf;
129 void **srcs; 132 void **srcs;
@@ -135,8 +138,9 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
135 138
136 /* convert to buffer pointers */ 139 /* convert to buffer pointers */
137 for (i = 0; i < src_cnt; i++) 140 for (i = 0; i < src_cnt; i++)
138 srcs[i] = page_address(src_list[i]) + offset; 141 if (src_list[i])
139 142 srcs[xor_src_cnt++] = page_address(src_list[i]) + offset;
143 src_cnt = xor_src_cnt;
140 /* set destination address */ 144 /* set destination address */
141 dest_buf = page_address(dest) + offset; 145 dest_buf = page_address(dest) + offset;
142 146
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 1dc4185bd781..e355e7f6a536 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -46,7 +46,7 @@ obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o
46obj-$(CONFIG_DM_ZERO) += dm-zero.o 46obj-$(CONFIG_DM_ZERO) += dm-zero.o
47 47
48quiet_cmd_unroll = UNROLL $@ 48quiet_cmd_unroll = UNROLL $@
49 cmd_unroll = $(PERL) $(srctree)/$(src)/unroll.pl $(UNROLL) \ 49 cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \
50 < $< > $@ || ( rm -f $@ && exit 1 ) 50 < $< > $@ || ( rm -f $@ && exit 1 )
51 51
52ifeq ($(CONFIG_ALTIVEC),y) 52ifeq ($(CONFIG_ALTIVEC),y)
@@ -59,56 +59,56 @@ endif
59 59
60targets += raid6int1.c 60targets += raid6int1.c
61$(obj)/raid6int1.c: UNROLL := 1 61$(obj)/raid6int1.c: UNROLL := 1
62$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE 62$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
63 $(call if_changed,unroll) 63 $(call if_changed,unroll)
64 64
65targets += raid6int2.c 65targets += raid6int2.c
66$(obj)/raid6int2.c: UNROLL := 2 66$(obj)/raid6int2.c: UNROLL := 2
67$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE 67$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
68 $(call if_changed,unroll) 68 $(call if_changed,unroll)
69 69
70targets += raid6int4.c 70targets += raid6int4.c
71$(obj)/raid6int4.c: UNROLL := 4 71$(obj)/raid6int4.c: UNROLL := 4
72$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE 72$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
73 $(call if_changed,unroll) 73 $(call if_changed,unroll)
74 74
75targets += raid6int8.c 75targets += raid6int8.c
76$(obj)/raid6int8.c: UNROLL := 8 76$(obj)/raid6int8.c: UNROLL := 8
77$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE 77$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
78 $(call if_changed,unroll) 78 $(call if_changed,unroll)
79 79
80targets += raid6int16.c 80targets += raid6int16.c
81$(obj)/raid6int16.c: UNROLL := 16 81$(obj)/raid6int16.c: UNROLL := 16
82$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE 82$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
83 $(call if_changed,unroll) 83 $(call if_changed,unroll)
84 84
85targets += raid6int32.c 85targets += raid6int32.c
86$(obj)/raid6int32.c: UNROLL := 32 86$(obj)/raid6int32.c: UNROLL := 32
87$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE 87$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
88 $(call if_changed,unroll) 88 $(call if_changed,unroll)
89 89
90CFLAGS_raid6altivec1.o += $(altivec_flags) 90CFLAGS_raid6altivec1.o += $(altivec_flags)
91targets += raid6altivec1.c 91targets += raid6altivec1.c
92$(obj)/raid6altivec1.c: UNROLL := 1 92$(obj)/raid6altivec1.c: UNROLL := 1
93$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE 93$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
94 $(call if_changed,unroll) 94 $(call if_changed,unroll)
95 95
96CFLAGS_raid6altivec2.o += $(altivec_flags) 96CFLAGS_raid6altivec2.o += $(altivec_flags)
97targets += raid6altivec2.c 97targets += raid6altivec2.c
98$(obj)/raid6altivec2.c: UNROLL := 2 98$(obj)/raid6altivec2.c: UNROLL := 2
99$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE 99$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
100 $(call if_changed,unroll) 100 $(call if_changed,unroll)
101 101
102CFLAGS_raid6altivec4.o += $(altivec_flags) 102CFLAGS_raid6altivec4.o += $(altivec_flags)
103targets += raid6altivec4.c 103targets += raid6altivec4.c
104$(obj)/raid6altivec4.c: UNROLL := 4 104$(obj)/raid6altivec4.c: UNROLL := 4
105$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE 105$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
106 $(call if_changed,unroll) 106 $(call if_changed,unroll)
107 107
108CFLAGS_raid6altivec8.o += $(altivec_flags) 108CFLAGS_raid6altivec8.o += $(altivec_flags)
109targets += raid6altivec8.c 109targets += raid6altivec8.c
110$(obj)/raid6altivec8.c: UNROLL := 8 110$(obj)/raid6altivec8.c: UNROLL := 8
111$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE 111$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
112 $(call if_changed,unroll) 112 $(call if_changed,unroll)
113 113
114quiet_cmd_mktable = TABLE $@ 114quiet_cmd_mktable = TABLE $@
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 6986b0059d23..60e2b322db11 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1624,10 +1624,11 @@ int bitmap_create(mddev_t *mddev)
1624 bitmap->offset = mddev->bitmap_offset; 1624 bitmap->offset = mddev->bitmap_offset;
1625 if (file) { 1625 if (file) {
1626 get_file(file); 1626 get_file(file);
1627 do_sync_mapping_range(file->f_mapping, 0, LLONG_MAX, 1627 /* As future accesses to this file will use bmap,
1628 SYNC_FILE_RANGE_WAIT_BEFORE | 1628 * and bypass the page cache, we must sync the file
1629 SYNC_FILE_RANGE_WRITE | 1629 * first.
1630 SYNC_FILE_RANGE_WAIT_AFTER); 1630 */
1631 vfs_fsync(file, file->f_dentry, 1);
1631 } 1632 }
1632 /* read superblock from bitmap file (this sets bitmap->chunksize) */ 1633 /* read superblock from bitmap file (this sets bitmap->chunksize) */
1633 err = bitmap_read_sb(bitmap); 1634 err = bitmap_read_sb(bitmap);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 26ba42a79129..10eb1fce975e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2631,7 +2631,7 @@ static void analyze_sbs(mddev_t * mddev)
2631 rdev->desc_nr = i++; 2631 rdev->desc_nr = i++;
2632 rdev->raid_disk = rdev->desc_nr; 2632 rdev->raid_disk = rdev->desc_nr;
2633 set_bit(In_sync, &rdev->flags); 2633 set_bit(In_sync, &rdev->flags);
2634 } else if (rdev->raid_disk >= mddev->raid_disks) { 2634 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
2635 rdev->raid_disk = -1; 2635 rdev->raid_disk = -1;
2636 clear_bit(In_sync, &rdev->flags); 2636 clear_bit(In_sync, &rdev->flags);
2637 } 2637 }
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d1b9bd5fd4f6..a053423785c9 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -64,7 +64,7 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
64 64
65 /* allocate a r1bio with room for raid_disks entries in the bios array */ 65 /* allocate a r1bio with room for raid_disks entries in the bios array */
66 r1_bio = kzalloc(size, gfp_flags); 66 r1_bio = kzalloc(size, gfp_flags);
67 if (!r1_bio) 67 if (!r1_bio && pi->mddev)
68 unplug_slaves(pi->mddev); 68 unplug_slaves(pi->mddev);
69 69
70 return r1_bio; 70 return r1_bio;
@@ -1683,6 +1683,7 @@ static void raid1d(mddev_t *mddev)
1683 generic_make_request(bio); 1683 generic_make_request(bio);
1684 } 1684 }
1685 } 1685 }
1686 cond_resched();
1686 } 1687 }
1687 if (unplug) 1688 if (unplug)
1688 unplug_slaves(mddev); 1689 unplug_slaves(mddev);
@@ -1978,13 +1979,14 @@ static int run(mddev_t *mddev)
1978 conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL); 1979 conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
1979 if (!conf->poolinfo) 1980 if (!conf->poolinfo)
1980 goto out_no_mem; 1981 goto out_no_mem;
1981 conf->poolinfo->mddev = mddev; 1982 conf->poolinfo->mddev = NULL;
1982 conf->poolinfo->raid_disks = mddev->raid_disks; 1983 conf->poolinfo->raid_disks = mddev->raid_disks;
1983 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, 1984 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
1984 r1bio_pool_free, 1985 r1bio_pool_free,
1985 conf->poolinfo); 1986 conf->poolinfo);
1986 if (!conf->r1bio_pool) 1987 if (!conf->r1bio_pool)
1987 goto out_no_mem; 1988 goto out_no_mem;
1989 conf->poolinfo->mddev = mddev;
1988 1990
1989 spin_lock_init(&conf->device_lock); 1991 spin_lock_init(&conf->device_lock);
1990 mddev->queue->queue_lock = &conf->device_lock; 1992 mddev->queue->queue_lock = &conf->device_lock;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 51c4c5c4d87a..c2cb7b87b440 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -68,7 +68,7 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
68 68
69 /* allocate a r10bio with room for raid_disks entries in the bios array */ 69 /* allocate a r10bio with room for raid_disks entries in the bios array */
70 r10_bio = kzalloc(size, gfp_flags); 70 r10_bio = kzalloc(size, gfp_flags);
71 if (!r10_bio) 71 if (!r10_bio && conf->mddev)
72 unplug_slaves(conf->mddev); 72 unplug_slaves(conf->mddev);
73 73
74 return r10_bio; 74 return r10_bio;
@@ -1632,6 +1632,7 @@ static void raid10d(mddev_t *mddev)
1632 generic_make_request(bio); 1632 generic_make_request(bio);
1633 } 1633 }
1634 } 1634 }
1635 cond_resched();
1635 } 1636 }
1636 if (unplug) 1637 if (unplug)
1637 unplug_slaves(mddev); 1638 unplug_slaves(mddev);
@@ -2095,7 +2096,6 @@ static int run(mddev_t *mddev)
2095 if (!conf->tmppage) 2096 if (!conf->tmppage)
2096 goto out_free_conf; 2097 goto out_free_conf;
2097 2098
2098 conf->mddev = mddev;
2099 conf->raid_disks = mddev->raid_disks; 2099 conf->raid_disks = mddev->raid_disks;
2100 conf->near_copies = nc; 2100 conf->near_copies = nc;
2101 conf->far_copies = fc; 2101 conf->far_copies = fc;
@@ -2132,6 +2132,7 @@ static int run(mddev_t *mddev)
2132 goto out_free_conf; 2132 goto out_free_conf;
2133 } 2133 }
2134 2134
2135 conf->mddev = mddev;
2135 spin_lock_init(&conf->device_lock); 2136 spin_lock_init(&conf->device_lock);
2136 mddev->queue->queue_lock = &conf->device_lock; 2137 mddev->queue->queue_lock = &conf->device_lock;
2137 2138
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 94829804ab7f..81abefc172d9 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -156,13 +156,16 @@ static inline int raid6_next_disk(int disk, int raid_disks)
156static int raid6_idx_to_slot(int idx, struct stripe_head *sh, 156static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
157 int *count, int syndrome_disks) 157 int *count, int syndrome_disks)
158{ 158{
159 int slot; 159 int slot = *count;
160 160
161 if (sh->ddf_layout)
162 (*count)++;
161 if (idx == sh->pd_idx) 163 if (idx == sh->pd_idx)
162 return syndrome_disks; 164 return syndrome_disks;
163 if (idx == sh->qd_idx) 165 if (idx == sh->qd_idx)
164 return syndrome_disks + 1; 166 return syndrome_disks + 1;
165 slot = (*count)++; 167 if (!sh->ddf_layout)
168 (*count)++;
166 return slot; 169 return slot;
167} 170}
168 171
@@ -717,7 +720,7 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
717 int i; 720 int i;
718 721
719 for (i = 0; i < disks; i++) 722 for (i = 0; i < disks; i++)
720 srcs[i] = (void *)raid6_empty_zero_page; 723 srcs[i] = NULL;
721 724
722 count = 0; 725 count = 0;
723 i = d0_idx; 726 i = d0_idx;
@@ -727,9 +730,8 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
727 srcs[slot] = sh->dev[i].page; 730 srcs[slot] = sh->dev[i].page;
728 i = raid6_next_disk(i, disks); 731 i = raid6_next_disk(i, disks);
729 } while (i != d0_idx); 732 } while (i != d0_idx);
730 BUG_ON(count != syndrome_disks);
731 733
732 return count; 734 return syndrome_disks;
733} 735}
734 736
735static struct dma_async_tx_descriptor * 737static struct dma_async_tx_descriptor *
@@ -814,7 +816,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
814 * slot number conversion for 'faila' and 'failb' 816 * slot number conversion for 'faila' and 'failb'
815 */ 817 */
816 for (i = 0; i < disks ; i++) 818 for (i = 0; i < disks ; i++)
817 blocks[i] = (void *)raid6_empty_zero_page; 819 blocks[i] = NULL;
818 count = 0; 820 count = 0;
819 i = d0_idx; 821 i = d0_idx;
820 do { 822 do {
@@ -828,7 +830,6 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
828 failb = slot; 830 failb = slot;
829 i = raid6_next_disk(i, disks); 831 i = raid6_next_disk(i, disks);
830 } while (i != d0_idx); 832 } while (i != d0_idx);
831 BUG_ON(count != syndrome_disks);
832 833
833 BUG_ON(faila == failb); 834 BUG_ON(faila == failb);
834 if (failb < faila) 835 if (failb < faila)
@@ -845,7 +846,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
845 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 846 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
846 ops_complete_compute, sh, 847 ops_complete_compute, sh,
847 to_addr_conv(sh, percpu)); 848 to_addr_conv(sh, percpu));
848 return async_gen_syndrome(blocks, 0, count+2, 849 return async_gen_syndrome(blocks, 0, syndrome_disks+2,
849 STRIPE_SIZE, &submit); 850 STRIPE_SIZE, &submit);
850 } else { 851 } else {
851 struct page *dest; 852 struct page *dest;
@@ -1139,7 +1140,7 @@ static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu
1139 &sh->ops.zero_sum_result, percpu->spare_page, &submit); 1140 &sh->ops.zero_sum_result, percpu->spare_page, &submit);
1140} 1141}
1141 1142
1142static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) 1143static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1143{ 1144{
1144 int overlap_clear = 0, i, disks = sh->disks; 1145 int overlap_clear = 0, i, disks = sh->disks;
1145 struct dma_async_tx_descriptor *tx = NULL; 1146 struct dma_async_tx_descriptor *tx = NULL;
@@ -1204,22 +1205,55 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1204 put_cpu(); 1205 put_cpu();
1205} 1206}
1206 1207
1208#ifdef CONFIG_MULTICORE_RAID456
1209static void async_run_ops(void *param, async_cookie_t cookie)
1210{
1211 struct stripe_head *sh = param;
1212 unsigned long ops_request = sh->ops.request;
1213
1214 clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
1215 wake_up(&sh->ops.wait_for_ops);
1216
1217 __raid_run_ops(sh, ops_request);
1218 release_stripe(sh);
1219}
1220
1221static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1222{
1223 /* since handle_stripe can be called outside of raid5d context
1224 * we need to ensure sh->ops.request is de-staged before another
1225 * request arrives
1226 */
1227 wait_event(sh->ops.wait_for_ops,
1228 !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
1229 sh->ops.request = ops_request;
1230
1231 atomic_inc(&sh->count);
1232 async_schedule(async_run_ops, sh);
1233}
1234#else
1235#define raid_run_ops __raid_run_ops
1236#endif
1237
1207static int grow_one_stripe(raid5_conf_t *conf) 1238static int grow_one_stripe(raid5_conf_t *conf)
1208{ 1239{
1209 struct stripe_head *sh; 1240 struct stripe_head *sh;
1241 int disks = max(conf->raid_disks, conf->previous_raid_disks);
1210 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); 1242 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
1211 if (!sh) 1243 if (!sh)
1212 return 0; 1244 return 0;
1213 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); 1245 memset(sh, 0, sizeof(*sh) + (disks-1)*sizeof(struct r5dev));
1214 sh->raid_conf = conf; 1246 sh->raid_conf = conf;
1215 spin_lock_init(&sh->lock); 1247 spin_lock_init(&sh->lock);
1248 #ifdef CONFIG_MULTICORE_RAID456
1249 init_waitqueue_head(&sh->ops.wait_for_ops);
1250 #endif
1216 1251
1217 if (grow_buffers(sh, conf->raid_disks)) { 1252 if (grow_buffers(sh, disks)) {
1218 shrink_buffers(sh, conf->raid_disks); 1253 shrink_buffers(sh, disks);
1219 kmem_cache_free(conf->slab_cache, sh); 1254 kmem_cache_free(conf->slab_cache, sh);
1220 return 0; 1255 return 0;
1221 } 1256 }
1222 sh->disks = conf->raid_disks;
1223 /* we just created an active stripe so... */ 1257 /* we just created an active stripe so... */
1224 atomic_set(&sh->count, 1); 1258 atomic_set(&sh->count, 1);
1225 atomic_inc(&conf->active_stripes); 1259 atomic_inc(&conf->active_stripes);
@@ -1231,7 +1265,7 @@ static int grow_one_stripe(raid5_conf_t *conf)
1231static int grow_stripes(raid5_conf_t *conf, int num) 1265static int grow_stripes(raid5_conf_t *conf, int num)
1232{ 1266{
1233 struct kmem_cache *sc; 1267 struct kmem_cache *sc;
1234 int devs = conf->raid_disks; 1268 int devs = max(conf->raid_disks, conf->previous_raid_disks);
1235 1269
1236 sprintf(conf->cache_name[0], 1270 sprintf(conf->cache_name[0],
1237 "raid%d-%s", conf->level, mdname(conf->mddev)); 1271 "raid%d-%s", conf->level, mdname(conf->mddev));
@@ -1329,6 +1363,9 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
1329 1363
1330 nsh->raid_conf = conf; 1364 nsh->raid_conf = conf;
1331 spin_lock_init(&nsh->lock); 1365 spin_lock_init(&nsh->lock);
1366 #ifdef CONFIG_MULTICORE_RAID456
1367 init_waitqueue_head(&nsh->ops.wait_for_ops);
1368 #endif
1332 1369
1333 list_add(&nsh->lru, &newstripes); 1370 list_add(&nsh->lru, &newstripes);
1334 } 1371 }
@@ -1899,10 +1936,15 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1899 case ALGORITHM_PARITY_N: 1936 case ALGORITHM_PARITY_N:
1900 break; 1937 break;
1901 case ALGORITHM_ROTATING_N_CONTINUE: 1938 case ALGORITHM_ROTATING_N_CONTINUE:
1939 /* Like left_symmetric, but P is before Q */
1902 if (sh->pd_idx == 0) 1940 if (sh->pd_idx == 0)
1903 i--; /* P D D D Q */ 1941 i--; /* P D D D Q */
1904 else if (i > sh->pd_idx) 1942 else {
1905 i -= 2; /* D D Q P D */ 1943 /* D D Q P D */
1944 if (i < sh->pd_idx)
1945 i += raid_disks;
1946 i -= (sh->pd_idx + 1);
1947 }
1906 break; 1948 break;
1907 case ALGORITHM_LEFT_ASYMMETRIC_6: 1949 case ALGORITHM_LEFT_ASYMMETRIC_6:
1908 case ALGORITHM_RIGHT_ASYMMETRIC_6: 1950 case ALGORITHM_RIGHT_ASYMMETRIC_6:
@@ -2896,7 +2938,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2896 * 2938 *
2897 */ 2939 */
2898 2940
2899static bool handle_stripe5(struct stripe_head *sh) 2941static void handle_stripe5(struct stripe_head *sh)
2900{ 2942{
2901 raid5_conf_t *conf = sh->raid_conf; 2943 raid5_conf_t *conf = sh->raid_conf;
2902 int disks = sh->disks, i; 2944 int disks = sh->disks, i;
@@ -3167,11 +3209,9 @@ static bool handle_stripe5(struct stripe_head *sh)
3167 ops_run_io(sh, &s); 3209 ops_run_io(sh, &s);
3168 3210
3169 return_io(return_bi); 3211 return_io(return_bi);
3170
3171 return blocked_rdev == NULL;
3172} 3212}
3173 3213
3174static bool handle_stripe6(struct stripe_head *sh) 3214static void handle_stripe6(struct stripe_head *sh)
3175{ 3215{
3176 raid5_conf_t *conf = sh->raid_conf; 3216 raid5_conf_t *conf = sh->raid_conf;
3177 int disks = sh->disks; 3217 int disks = sh->disks;
@@ -3455,17 +3495,14 @@ static bool handle_stripe6(struct stripe_head *sh)
3455 ops_run_io(sh, &s); 3495 ops_run_io(sh, &s);
3456 3496
3457 return_io(return_bi); 3497 return_io(return_bi);
3458
3459 return blocked_rdev == NULL;
3460} 3498}
3461 3499
3462/* returns true if the stripe was handled */ 3500static void handle_stripe(struct stripe_head *sh)
3463static bool handle_stripe(struct stripe_head *sh)
3464{ 3501{
3465 if (sh->raid_conf->level == 6) 3502 if (sh->raid_conf->level == 6)
3466 return handle_stripe6(sh); 3503 handle_stripe6(sh);
3467 else 3504 else
3468 return handle_stripe5(sh); 3505 handle_stripe5(sh);
3469} 3506}
3470 3507
3471static void raid5_activate_delayed(raid5_conf_t *conf) 3508static void raid5_activate_delayed(raid5_conf_t *conf)
@@ -3503,9 +3540,10 @@ static void unplug_slaves(mddev_t *mddev)
3503{ 3540{
3504 raid5_conf_t *conf = mddev->private; 3541 raid5_conf_t *conf = mddev->private;
3505 int i; 3542 int i;
3543 int devs = max(conf->raid_disks, conf->previous_raid_disks);
3506 3544
3507 rcu_read_lock(); 3545 rcu_read_lock();
3508 for (i = 0; i < conf->raid_disks; i++) { 3546 for (i = 0; i < devs; i++) {
3509 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3547 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3510 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 3548 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
3511 struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 3549 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
@@ -4277,9 +4315,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
4277 clear_bit(STRIPE_INSYNC, &sh->state); 4315 clear_bit(STRIPE_INSYNC, &sh->state);
4278 spin_unlock(&sh->lock); 4316 spin_unlock(&sh->lock);
4279 4317
4280 /* wait for any blocked device to be handled */ 4318 handle_stripe(sh);
4281 while (unlikely(!handle_stripe(sh)))
4282 ;
4283 release_stripe(sh); 4319 release_stripe(sh);
4284 4320
4285 return STRIPE_SECTORS; 4321 return STRIPE_SECTORS;
@@ -4349,37 +4385,6 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
4349 return handled; 4385 return handled;
4350} 4386}
4351 4387
4352#ifdef CONFIG_MULTICORE_RAID456
4353static void __process_stripe(void *param, async_cookie_t cookie)
4354{
4355 struct stripe_head *sh = param;
4356
4357 handle_stripe(sh);
4358 release_stripe(sh);
4359}
4360
4361static void process_stripe(struct stripe_head *sh, struct list_head *domain)
4362{
4363 async_schedule_domain(__process_stripe, sh, domain);
4364}
4365
4366static void synchronize_stripe_processing(struct list_head *domain)
4367{
4368 async_synchronize_full_domain(domain);
4369}
4370#else
4371static void process_stripe(struct stripe_head *sh, struct list_head *domain)
4372{
4373 handle_stripe(sh);
4374 release_stripe(sh);
4375 cond_resched();
4376}
4377
4378static void synchronize_stripe_processing(struct list_head *domain)
4379{
4380}
4381#endif
4382
4383 4388
4384/* 4389/*
4385 * This is our raid5 kernel thread. 4390 * This is our raid5 kernel thread.
@@ -4393,7 +4398,6 @@ static void raid5d(mddev_t *mddev)
4393 struct stripe_head *sh; 4398 struct stripe_head *sh;
4394 raid5_conf_t *conf = mddev->private; 4399 raid5_conf_t *conf = mddev->private;
4395 int handled; 4400 int handled;
4396 LIST_HEAD(raid_domain);
4397 4401
4398 pr_debug("+++ raid5d active\n"); 4402 pr_debug("+++ raid5d active\n");
4399 4403
@@ -4430,7 +4434,9 @@ static void raid5d(mddev_t *mddev)
4430 spin_unlock_irq(&conf->device_lock); 4434 spin_unlock_irq(&conf->device_lock);
4431 4435
4432 handled++; 4436 handled++;
4433 process_stripe(sh, &raid_domain); 4437 handle_stripe(sh);
4438 release_stripe(sh);
4439 cond_resched();
4434 4440
4435 spin_lock_irq(&conf->device_lock); 4441 spin_lock_irq(&conf->device_lock);
4436 } 4442 }
@@ -4438,7 +4444,6 @@ static void raid5d(mddev_t *mddev)
4438 4444
4439 spin_unlock_irq(&conf->device_lock); 4445 spin_unlock_irq(&conf->device_lock);
4440 4446
4441 synchronize_stripe_processing(&raid_domain);
4442 async_tx_issue_pending_all(); 4447 async_tx_issue_pending_all();
4443 unplug_slaves(mddev); 4448 unplug_slaves(mddev);
4444 4449
@@ -4558,13 +4563,9 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
4558 4563
4559 if (!sectors) 4564 if (!sectors)
4560 sectors = mddev->dev_sectors; 4565 sectors = mddev->dev_sectors;
4561 if (!raid_disks) { 4566 if (!raid_disks)
4562 /* size is defined by the smallest of previous and new size */ 4567 /* size is defined by the smallest of previous and new size */
4563 if (conf->raid_disks < conf->previous_raid_disks) 4568 raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
4564 raid_disks = conf->raid_disks;
4565 else
4566 raid_disks = conf->previous_raid_disks;
4567 }
4568 4569
4569 sectors &= ~((sector_t)mddev->chunk_sectors - 1); 4570 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
4570 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1); 4571 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
@@ -4665,7 +4666,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
4665 } 4666 }
4666 per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; 4667 per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
4667 } 4668 }
4668 scribble = kmalloc(scribble_len(conf->raid_disks), GFP_KERNEL); 4669 scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4669 if (!scribble) { 4670 if (!scribble) {
4670 err = -ENOMEM; 4671 err = -ENOMEM;
4671 break; 4672 break;
@@ -4686,7 +4687,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
4686static raid5_conf_t *setup_conf(mddev_t *mddev) 4687static raid5_conf_t *setup_conf(mddev_t *mddev)
4687{ 4688{
4688 raid5_conf_t *conf; 4689 raid5_conf_t *conf;
4689 int raid_disk, memory; 4690 int raid_disk, memory, max_disks;
4690 mdk_rdev_t *rdev; 4691 mdk_rdev_t *rdev;
4691 struct disk_info *disk; 4692 struct disk_info *disk;
4692 4693
@@ -4722,15 +4723,28 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4722 conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL); 4723 conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL);
4723 if (conf == NULL) 4724 if (conf == NULL)
4724 goto abort; 4725 goto abort;
4726 spin_lock_init(&conf->device_lock);
4727 init_waitqueue_head(&conf->wait_for_stripe);
4728 init_waitqueue_head(&conf->wait_for_overlap);
4729 INIT_LIST_HEAD(&conf->handle_list);
4730 INIT_LIST_HEAD(&conf->hold_list);
4731 INIT_LIST_HEAD(&conf->delayed_list);
4732 INIT_LIST_HEAD(&conf->bitmap_list);
4733 INIT_LIST_HEAD(&conf->inactive_list);
4734 atomic_set(&conf->active_stripes, 0);
4735 atomic_set(&conf->preread_active_stripes, 0);
4736 atomic_set(&conf->active_aligned_reads, 0);
4737 conf->bypass_threshold = BYPASS_THRESHOLD;
4725 4738
4726 conf->raid_disks = mddev->raid_disks; 4739 conf->raid_disks = mddev->raid_disks;
4727 conf->scribble_len = scribble_len(conf->raid_disks);
4728 if (mddev->reshape_position == MaxSector) 4740 if (mddev->reshape_position == MaxSector)
4729 conf->previous_raid_disks = mddev->raid_disks; 4741 conf->previous_raid_disks = mddev->raid_disks;
4730 else 4742 else
4731 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 4743 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
4744 max_disks = max(conf->raid_disks, conf->previous_raid_disks);
4745 conf->scribble_len = scribble_len(max_disks);
4732 4746
4733 conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), 4747 conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
4734 GFP_KERNEL); 4748 GFP_KERNEL);
4735 if (!conf->disks) 4749 if (!conf->disks)
4736 goto abort; 4750 goto abort;
@@ -4744,24 +4758,11 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4744 if (raid5_alloc_percpu(conf) != 0) 4758 if (raid5_alloc_percpu(conf) != 0)
4745 goto abort; 4759 goto abort;
4746 4760
4747 spin_lock_init(&conf->device_lock);
4748 init_waitqueue_head(&conf->wait_for_stripe);
4749 init_waitqueue_head(&conf->wait_for_overlap);
4750 INIT_LIST_HEAD(&conf->handle_list);
4751 INIT_LIST_HEAD(&conf->hold_list);
4752 INIT_LIST_HEAD(&conf->delayed_list);
4753 INIT_LIST_HEAD(&conf->bitmap_list);
4754 INIT_LIST_HEAD(&conf->inactive_list);
4755 atomic_set(&conf->active_stripes, 0);
4756 atomic_set(&conf->preread_active_stripes, 0);
4757 atomic_set(&conf->active_aligned_reads, 0);
4758 conf->bypass_threshold = BYPASS_THRESHOLD;
4759
4760 pr_debug("raid5: run(%s) called.\n", mdname(mddev)); 4761 pr_debug("raid5: run(%s) called.\n", mdname(mddev));
4761 4762
4762 list_for_each_entry(rdev, &mddev->disks, same_set) { 4763 list_for_each_entry(rdev, &mddev->disks, same_set) {
4763 raid_disk = rdev->raid_disk; 4764 raid_disk = rdev->raid_disk;
4764 if (raid_disk >= conf->raid_disks 4765 if (raid_disk >= max_disks
4765 || raid_disk < 0) 4766 || raid_disk < 0)
4766 continue; 4767 continue;
4767 disk = conf->disks + raid_disk; 4768 disk = conf->disks + raid_disk;
@@ -4793,7 +4794,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4793 } 4794 }
4794 4795
4795 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 4796 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
4796 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 4797 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
4797 if (grow_stripes(conf, conf->max_nr_stripes)) { 4798 if (grow_stripes(conf, conf->max_nr_stripes)) {
4798 printk(KERN_ERR 4799 printk(KERN_ERR
4799 "raid5: couldn't allocate %dkB for buffers\n", memory); 4800 "raid5: couldn't allocate %dkB for buffers\n", memory);
@@ -4918,7 +4919,8 @@ static int run(mddev_t *mddev)
4918 test_bit(In_sync, &rdev->flags)) 4919 test_bit(In_sync, &rdev->flags))
4919 working_disks++; 4920 working_disks++;
4920 4921
4921 mddev->degraded = conf->raid_disks - working_disks; 4922 mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks)
4923 - working_disks);
4922 4924
4923 if (mddev->degraded > conf->max_degraded) { 4925 if (mddev->degraded > conf->max_degraded) {
4924 printk(KERN_ERR "raid5: not enough operational devices for %s" 4926 printk(KERN_ERR "raid5: not enough operational devices for %s"
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 2390e0e83daf..dd708359b451 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -214,12 +214,20 @@ struct stripe_head {
214 int disks; /* disks in stripe */ 214 int disks; /* disks in stripe */
215 enum check_states check_state; 215 enum check_states check_state;
216 enum reconstruct_states reconstruct_state; 216 enum reconstruct_states reconstruct_state;
217 /* stripe_operations 217 /**
218 * struct stripe_operations
218 * @target - STRIPE_OP_COMPUTE_BLK target 219 * @target - STRIPE_OP_COMPUTE_BLK target
220 * @target2 - 2nd compute target in the raid6 case
221 * @zero_sum_result - P and Q verification flags
222 * @request - async service request flags for raid_run_ops
219 */ 223 */
220 struct stripe_operations { 224 struct stripe_operations {
221 int target, target2; 225 int target, target2;
222 enum sum_check_flags zero_sum_result; 226 enum sum_check_flags zero_sum_result;
227 #ifdef CONFIG_MULTICORE_RAID456
228 unsigned long request;
229 wait_queue_head_t wait_for_ops;
230 #endif
223 } ops; 231 } ops;
224 struct r5dev { 232 struct r5dev {
225 struct bio req; 233 struct bio req;
@@ -294,6 +302,8 @@ struct r6_state {
294#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */ 302#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */
295#define STRIPE_BIOFILL_RUN 14 303#define STRIPE_BIOFILL_RUN 14
296#define STRIPE_COMPUTE_RUN 15 304#define STRIPE_COMPUTE_RUN 15
305#define STRIPE_OPS_REQ_PENDING 16
306
297/* 307/*
298 * Operation request flags 308 * Operation request flags
299 */ 309 */
@@ -478,7 +488,7 @@ static inline int algorithm_valid_raid6(int layout)
478{ 488{
479 return (layout >= 0 && layout <= 5) 489 return (layout >= 0 && layout <= 5)
480 || 490 ||
481 (layout == 8 || layout == 10) 491 (layout >= 8 && layout <= 10)
482 || 492 ||
483 (layout >= 16 && layout <= 20); 493 (layout >= 16 && layout <= 20);
484} 494}
diff --git a/drivers/md/raid6altivec.uc b/drivers/md/raid6altivec.uc
index 699dfeee4944..2654d5c854be 100644
--- a/drivers/md/raid6altivec.uc
+++ b/drivers/md/raid6altivec.uc
@@ -15,7 +15,7 @@
15 * 15 *
16 * $#-way unrolled portable integer math RAID-6 instruction set 16 * $#-way unrolled portable integer math RAID-6 instruction set
17 * 17 *
18 * This file is postprocessed using unroll.pl 18 * This file is postprocessed using unroll.awk
19 * 19 *
20 * <benh> hpa: in process, 20 * <benh> hpa: in process,
21 * you can just "steal" the vec unit with enable_kernel_altivec() (but 21 * you can just "steal" the vec unit with enable_kernel_altivec() (but
diff --git a/drivers/md/raid6int.uc b/drivers/md/raid6int.uc
index f9bf9cba357f..d1e276a14fab 100644
--- a/drivers/md/raid6int.uc
+++ b/drivers/md/raid6int.uc
@@ -15,7 +15,7 @@
15 * 15 *
16 * $#-way unrolled portable integer math RAID-6 instruction set 16 * $#-way unrolled portable integer math RAID-6 instruction set
17 * 17 *
18 * This file is postprocessed using unroll.pl 18 * This file is postprocessed using unroll.awk
19 */ 19 */
20 20
21#include <linux/raid/pq.h> 21#include <linux/raid/pq.h>
diff --git a/drivers/md/raid6test/Makefile b/drivers/md/raid6test/Makefile
index 58ffdf4f5161..2874cbef529d 100644
--- a/drivers/md/raid6test/Makefile
+++ b/drivers/md/raid6test/Makefile
@@ -7,7 +7,7 @@ CC = gcc
7OPTFLAGS = -O2 # Adjust as desired 7OPTFLAGS = -O2 # Adjust as desired
8CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS) 8CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS)
9LD = ld 9LD = ld
10PERL = perl 10AWK = awk
11AR = ar 11AR = ar
12RANLIB = ranlib 12RANLIB = ranlib
13 13
@@ -35,35 +35,35 @@ raid6.a: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \
35raid6test: test.c raid6.a 35raid6test: test.c raid6.a
36 $(CC) $(CFLAGS) -o raid6test $^ 36 $(CC) $(CFLAGS) -o raid6test $^
37 37
38raid6altivec1.c: raid6altivec.uc ../unroll.pl 38raid6altivec1.c: raid6altivec.uc ../unroll.awk
39 $(PERL) ../unroll.pl 1 < raid6altivec.uc > $@ 39 $(AWK) ../unroll.awk -vN=1 < raid6altivec.uc > $@
40 40
41raid6altivec2.c: raid6altivec.uc ../unroll.pl 41raid6altivec2.c: raid6altivec.uc ../unroll.awk
42 $(PERL) ../unroll.pl 2 < raid6altivec.uc > $@ 42 $(AWK) ../unroll.awk -vN=2 < raid6altivec.uc > $@
43 43
44raid6altivec4.c: raid6altivec.uc ../unroll.pl 44raid6altivec4.c: raid6altivec.uc ../unroll.awk
45 $(PERL) ../unroll.pl 4 < raid6altivec.uc > $@ 45 $(AWK) ../unroll.awk -vN=4 < raid6altivec.uc > $@
46 46
47raid6altivec8.c: raid6altivec.uc ../unroll.pl 47raid6altivec8.c: raid6altivec.uc ../unroll.awk
48 $(PERL) ../unroll.pl 8 < raid6altivec.uc > $@ 48 $(AWK) ../unroll.awk -vN=8 < raid6altivec.uc > $@
49 49
50raid6int1.c: raid6int.uc ../unroll.pl 50raid6int1.c: raid6int.uc ../unroll.awk
51 $(PERL) ../unroll.pl 1 < raid6int.uc > $@ 51 $(AWK) ../unroll.awk -vN=1 < raid6int.uc > $@
52 52
53raid6int2.c: raid6int.uc ../unroll.pl 53raid6int2.c: raid6int.uc ../unroll.awk
54 $(PERL) ../unroll.pl 2 < raid6int.uc > $@ 54 $(AWK) ../unroll.awk -vN=2 < raid6int.uc > $@
55 55
56raid6int4.c: raid6int.uc ../unroll.pl 56raid6int4.c: raid6int.uc ../unroll.awk
57 $(PERL) ../unroll.pl 4 < raid6int.uc > $@ 57 $(AWK) ../unroll.awk -vN=4 < raid6int.uc > $@
58 58
59raid6int8.c: raid6int.uc ../unroll.pl 59raid6int8.c: raid6int.uc ../unroll.awk
60 $(PERL) ../unroll.pl 8 < raid6int.uc > $@ 60 $(AWK) ../unroll.awk -vN=8 < raid6int.uc > $@
61 61
62raid6int16.c: raid6int.uc ../unroll.pl 62raid6int16.c: raid6int.uc ../unroll.awk
63 $(PERL) ../unroll.pl 16 < raid6int.uc > $@ 63 $(AWK) ../unroll.awk -vN=16 < raid6int.uc > $@
64 64
65raid6int32.c: raid6int.uc ../unroll.pl 65raid6int32.c: raid6int.uc ../unroll.awk
66 $(PERL) ../unroll.pl 32 < raid6int.uc > $@ 66 $(AWK) ../unroll.awk -vN=32 < raid6int.uc > $@
67 67
68raid6tables.c: mktables 68raid6tables.c: mktables
69 ./mktables > raid6tables.c 69 ./mktables > raid6tables.c
diff --git a/drivers/md/unroll.awk b/drivers/md/unroll.awk
new file mode 100644
index 000000000000..c6aa03631df8
--- /dev/null
+++ b/drivers/md/unroll.awk
@@ -0,0 +1,20 @@
1
2# This filter requires one command line option of form -vN=n
3# where n must be a decimal number.
4#
5# Repeat each input line containing $$ n times, replacing $$ with 0...n-1.
6# Replace each $# with n, and each $* with a single $.
7
8BEGIN {
9 n = N + 0
10}
11{
12 if (/\$\$/) { rep = n } else { rep = 1 }
13 for (i = 0; i < rep; ++i) {
14 tmp = $0
15 gsub(/\$\$/, i, tmp)
16 gsub(/\$\#/, n, tmp)
17 gsub(/\$\*/, "$", tmp)
18 print tmp
19 }
20}
diff --git a/drivers/md/unroll.pl b/drivers/md/unroll.pl
deleted file mode 100644
index 3acc710a20ea..000000000000
--- a/drivers/md/unroll.pl
+++ /dev/null
@@ -1,24 +0,0 @@
1#!/usr/bin/perl
2#
3# Take a piece of C code and for each line which contains the sequence $$
4# repeat n times with $ replaced by 0...n-1; the sequence $# is replaced
5# by the unrolling factor, and $* with a single $
6#
7
8($n) = @ARGV;
9$n += 0;
10
11while ( defined($line = <STDIN>) ) {
12 if ( $line =~ /\$\$/ ) {
13 $rep = $n;
14 } else {
15 $rep = 1;
16 }
17 for ( $i = 0 ; $i < $rep ; $i++ ) {
18 $tmp = $line;
19 $tmp =~ s/\$\$/$i/g;
20 $tmp =~ s/\$\#/$n/g;
21 $tmp =~ s/\$\*/\$/g;
22 print $tmp;
23 }
24}
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
index a4f68e5b9c96..b44462a6c6d3 100644
--- a/drivers/s390/char/sclp_async.c
+++ b/drivers/s390/char/sclp_async.c
@@ -26,7 +26,6 @@ static struct sclp_async_sccb *sccb;
26static int sclp_async_send_wait(char *message); 26static int sclp_async_send_wait(char *message);
27static struct ctl_table_header *callhome_sysctl_header; 27static struct ctl_table_header *callhome_sysctl_header;
28static DEFINE_SPINLOCK(sclp_async_lock); 28static DEFINE_SPINLOCK(sclp_async_lock);
29static char nodename[64];
30#define SCLP_NORMAL_WRITE 0x00 29#define SCLP_NORMAL_WRITE 0x00
31 30
32struct async_evbuf { 31struct async_evbuf {
@@ -52,9 +51,10 @@ static struct sclp_register sclp_async_register = {
52static int call_home_on_panic(struct notifier_block *self, 51static int call_home_on_panic(struct notifier_block *self,
53 unsigned long event, void *data) 52 unsigned long event, void *data)
54{ 53{
55 strncat(data, nodename, strlen(nodename)); 54 strncat(data, init_utsname()->nodename,
56 sclp_async_send_wait(data); 55 sizeof(init_utsname()->nodename));
57 return NOTIFY_DONE; 56 sclp_async_send_wait(data);
57 return NOTIFY_DONE;
58} 58}
59 59
60static struct notifier_block call_home_panic_nb = { 60static struct notifier_block call_home_panic_nb = {
@@ -68,15 +68,14 @@ static int proc_handler_callhome(struct ctl_table *ctl, int write,
68{ 68{
69 unsigned long val; 69 unsigned long val;
70 int len, rc; 70 int len, rc;
71 char buf[2]; 71 char buf[3];
72 72
73 if (!*count | (*ppos && !write)) { 73 if (!*count || (*ppos && !write)) {
74 *count = 0; 74 *count = 0;
75 return 0; 75 return 0;
76 } 76 }
77 if (!write) { 77 if (!write) {
78 len = sprintf(buf, "%d\n", callhome_enabled); 78 len = snprintf(buf, sizeof(buf), "%d\n", callhome_enabled);
79 buf[len] = '\0';
80 rc = copy_to_user(buffer, buf, sizeof(buf)); 79 rc = copy_to_user(buffer, buf, sizeof(buf));
81 if (rc != 0) 80 if (rc != 0)
82 return -EFAULT; 81 return -EFAULT;
@@ -171,39 +170,29 @@ static int __init sclp_async_init(void)
171 rc = sclp_register(&sclp_async_register); 170 rc = sclp_register(&sclp_async_register);
172 if (rc) 171 if (rc)
173 return rc; 172 return rc;
174 callhome_sysctl_header = register_sysctl_table(kern_dir_table); 173 rc = -EOPNOTSUPP;
175 if (!callhome_sysctl_header) { 174 if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK))
176 rc = -ENOMEM;
177 goto out_sclp;
178 }
179 if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK)) {
180 rc = -EOPNOTSUPP;
181 goto out_sclp; 175 goto out_sclp;
182 }
183 rc = -ENOMEM; 176 rc = -ENOMEM;
177 callhome_sysctl_header = register_sysctl_table(kern_dir_table);
178 if (!callhome_sysctl_header)
179 goto out_sclp;
184 request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL); 180 request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
185 if (!request)
186 goto out_sys;
187 sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 181 sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
188 if (!sccb) 182 if (!request || !sccb)
189 goto out_mem; 183 goto out_mem;
190 rc = atomic_notifier_chain_register(&panic_notifier_list, 184 rc = atomic_notifier_chain_register(&panic_notifier_list,
191 &call_home_panic_nb); 185 &call_home_panic_nb);
192 if (rc) 186 if (!rc)
193 goto out_mem; 187 goto out;
194
195 strncpy(nodename, init_utsname()->nodename, 64);
196 return 0;
197
198out_mem: 188out_mem:
199 kfree(request); 189 kfree(request);
200 free_page((unsigned long) sccb); 190 free_page((unsigned long) sccb);
201out_sys:
202 unregister_sysctl_table(callhome_sysctl_header); 191 unregister_sysctl_table(callhome_sysctl_header);
203out_sclp: 192out_sclp:
204 sclp_unregister(&sclp_async_register); 193 sclp_unregister(&sclp_async_register);
194out:
205 return rc; 195 return rc;
206
207} 196}
208module_init(sclp_async_init); 197module_init(sclp_async_init);
209 198
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 102000d1af6f..3012355f8304 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -158,7 +158,12 @@ static int smsg_pm_restore_thaw(struct device *dev)
158 smsg_path->flags = 0; 158 smsg_path->flags = 0;
159 rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", 159 rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ",
160 NULL, NULL, NULL); 160 NULL, NULL, NULL);
161 printk(KERN_ERR "iucv_path_connect returned with rc %i\n", rc); 161#ifdef CONFIG_PM_DEBUG
162 if (rc)
163 printk(KERN_ERR
164 "iucv_path_connect returned with rc %i\n", rc);
165#endif
166 cpcmd("SET SMSG IUCV", NULL, 0, NULL);
162 } 167 }
163 return 0; 168 return 0;
164} 169}
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 7d1aac31ec8d..496764349c41 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -1919,7 +1919,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1919 size = size>>16; 1919 size = size>>16;
1920 size *= 4; 1920 size *= 4;
1921 if (size > MAX_MESSAGE_SIZE) { 1921 if (size > MAX_MESSAGE_SIZE) {
1922 rcode = EINVAL; 1922 rcode = -EINVAL;
1923 goto cleanup; 1923 goto cleanup;
1924 } 1924 }
1925 /* Copy in the user's I2O command */ 1925 /* Copy in the user's I2O command */
diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/linux-2.6/xfs_quotaops.c
index 9e41f91aa269..3d4a0c84d634 100644
--- a/fs/xfs/linux-2.6/xfs_quotaops.c
+++ b/fs/xfs/linux-2.6/xfs_quotaops.c
@@ -80,7 +80,7 @@ xfs_fs_set_xstate(
80 80
81 if (sb->s_flags & MS_RDONLY) 81 if (sb->s_flags & MS_RDONLY)
82 return -EROFS; 82 return -EROFS;
83 if (!XFS_IS_QUOTA_RUNNING(mp)) 83 if (op != Q_XQUOTARM && !XFS_IS_QUOTA_RUNNING(mp))
84 return -ENOSYS; 84 return -ENOSYS;
85 if (!capable(CAP_SYS_ADMIN)) 85 if (!capable(CAP_SYS_ADMIN))
86 return -EPERM; 86 return -EPERM;
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index ab64f3efb43b..0785797db828 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -880,6 +880,7 @@ nextag:
880 * Not in range - save last search 880 * Not in range - save last search
881 * location and allocate a new inode 881 * location and allocate a new inode
882 */ 882 */
883 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
883 pag->pagl_leftrec = trec.ir_startino; 884 pag->pagl_leftrec = trec.ir_startino;
884 pag->pagl_rightrec = rec.ir_startino; 885 pag->pagl_rightrec = rec.ir_startino;
885 pag->pagl_pagino = pagino; 886 pag->pagl_pagino = pagino;
diff --git a/mm/nommu.c b/mm/nommu.c
index 5189b5aed8c0..9876fa0c3ad3 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1362,9 +1362,11 @@ share:
1362error_just_free: 1362error_just_free:
1363 up_write(&nommu_region_sem); 1363 up_write(&nommu_region_sem);
1364error: 1364error:
1365 fput(region->vm_file); 1365 if (region->vm_file)
1366 fput(region->vm_file);
1366 kmem_cache_free(vm_region_jar, region); 1367 kmem_cache_free(vm_region_jar, region);
1367 fput(vma->vm_file); 1368 if (vma->vm_file)
1369 fput(vma->vm_file);
1368 if (vma->vm_flags & VM_EXECUTABLE) 1370 if (vma->vm_flags & VM_EXECUTABLE)
1369 removed_exe_file_vma(vma->vm_mm); 1371 removed_exe_file_vma(vma->vm_mm);
1370 kmem_cache_free(vm_area_cachep, vma); 1372 kmem_cache_free(vm_area_cachep, vma);