aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/s390/kernel/early.c9
-rw-r--r--arch/s390/kernel/entry.S8
-rw-r--r--arch/s390/kernel/entry64.S8
-rw-r--r--drivers/bluetooth/btusb.c4
-rw-r--r--drivers/input/ff-core.c20
-rw-r--r--drivers/input/ff-memless.c26
-rw-r--r--drivers/input/input.c29
-rw-r--r--drivers/input/keyboard/atkbd.c13
-rw-r--r--drivers/input/mouse/psmouse-base.c2
-rw-r--r--drivers/md/md.c41
-rw-r--r--drivers/md/raid5.c85
-rw-r--r--drivers/s390/char/monreader.c1
-rw-r--r--drivers/s390/char/sclp_quiesce.c48
-rw-r--r--fs/nfsd/nfs3xdr.c2
-rw-r--r--include/linux/input.h4
-rw-r--r--mm/percpu.c121
16 files changed, 311 insertions, 110 deletions
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index bf8b4ae7ff2d..e49e9e0c69fd 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -55,6 +55,7 @@ static void __init reset_tod_clock(void)
55 disabled_wait(0); 55 disabled_wait(0);
56 56
57 sched_clock_base_cc = TOD_UNIX_EPOCH; 57 sched_clock_base_cc = TOD_UNIX_EPOCH;
58 S390_lowcore.last_update_clock = sched_clock_base_cc;
58} 59}
59 60
60#ifdef CONFIG_SHARED_KERNEL 61#ifdef CONFIG_SHARED_KERNEL
@@ -167,6 +168,14 @@ static noinline __init void create_kernel_nss(void)
167 return; 168 return;
168 } 169 }
169 170
171 /* re-initialize cputime accounting. */
172 sched_clock_base_cc = get_clock();
173 S390_lowcore.last_update_clock = sched_clock_base_cc;
174 S390_lowcore.last_update_timer = 0x7fffffffffffffffULL;
175 S390_lowcore.user_timer = 0;
176 S390_lowcore.system_timer = 0;
177 asm volatile("SPT 0(%0)" : : "a" (&S390_lowcore.last_update_timer));
178
170 /* re-setup boot command line with new ipl vm parms */ 179 /* re-setup boot command line with new ipl vm parms */
171 ipl_update_parameters(); 180 ipl_update_parameters();
172 setup_boot_command_line(); 181 setup_boot_command_line();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index f43d2ee54464..48215d15762b 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -565,10 +565,10 @@ pgm_svcper:
565 lh %r7,0x8a # get svc number from lowcore 565 lh %r7,0x8a # get svc number from lowcore
566 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 566 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
567 TRACE_IRQS_OFF 567 TRACE_IRQS_OFF
568 l %r1,__TI_task(%r9) 568 l %r8,__TI_task(%r9)
569 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 569 mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID
570 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS 570 mvc __THREAD_per+__PER_address(4,%r8),__LC_PER_ADDRESS
571 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 571 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
572 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 572 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
573 TRACE_IRQS_ON 573 TRACE_IRQS_ON
574 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 574 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index a6f7b20df616..9aff1d449b6e 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -543,10 +543,10 @@ pgm_svcper:
543 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 543 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
544 llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore 544 llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
545 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 545 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
546 lg %r1,__TI_task(%r9) 546 lg %r8,__TI_task(%r9)
547 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 547 mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID
548 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS 548 mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS
549 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 549 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
550 oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 550 oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
551 TRACE_IRQS_ON 551 TRACE_IRQS_ON
552 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 552 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 2fb38027f3bb..44bc8bbabf54 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -600,11 +600,13 @@ static int btusb_close(struct hci_dev *hdev)
600 btusb_stop_traffic(data); 600 btusb_stop_traffic(data);
601 err = usb_autopm_get_interface(data->intf); 601 err = usb_autopm_get_interface(data->intf);
602 if (err < 0) 602 if (err < 0)
603 return 0; 603 goto failed;
604 604
605 data->intf->needs_remote_wakeup = 0; 605 data->intf->needs_remote_wakeup = 0;
606 usb_autopm_put_interface(data->intf); 606 usb_autopm_put_interface(data->intf);
607 607
608failed:
609 usb_scuttle_anchored_urbs(&data->deferred);
608 return 0; 610 return 0;
609} 611}
610 612
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index 72c63e5dd630..38df81fcdc3a 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -337,16 +337,16 @@ int input_ff_create(struct input_dev *dev, int max_effects)
337 dev->ff = ff; 337 dev->ff = ff;
338 dev->flush = flush_effects; 338 dev->flush = flush_effects;
339 dev->event = input_ff_event; 339 dev->event = input_ff_event;
340 set_bit(EV_FF, dev->evbit); 340 __set_bit(EV_FF, dev->evbit);
341 341
342 /* Copy "true" bits into ff device bitmap */ 342 /* Copy "true" bits into ff device bitmap */
343 for (i = 0; i <= FF_MAX; i++) 343 for (i = 0; i <= FF_MAX; i++)
344 if (test_bit(i, dev->ffbit)) 344 if (test_bit(i, dev->ffbit))
345 set_bit(i, ff->ffbit); 345 __set_bit(i, ff->ffbit);
346 346
347 /* we can emulate RUMBLE with periodic effects */ 347 /* we can emulate RUMBLE with periodic effects */
348 if (test_bit(FF_PERIODIC, ff->ffbit)) 348 if (test_bit(FF_PERIODIC, ff->ffbit))
349 set_bit(FF_RUMBLE, dev->ffbit); 349 __set_bit(FF_RUMBLE, dev->ffbit);
350 350
351 return 0; 351 return 0;
352} 352}
@@ -362,12 +362,14 @@ EXPORT_SYMBOL_GPL(input_ff_create);
362 */ 362 */
363void input_ff_destroy(struct input_dev *dev) 363void input_ff_destroy(struct input_dev *dev)
364{ 364{
365 clear_bit(EV_FF, dev->evbit); 365 struct ff_device *ff = dev->ff;
366 if (dev->ff) { 366
367 if (dev->ff->destroy) 367 __clear_bit(EV_FF, dev->evbit);
368 dev->ff->destroy(dev->ff); 368 if (ff) {
369 kfree(dev->ff->private); 369 if (ff->destroy)
370 kfree(dev->ff); 370 ff->destroy(ff);
371 kfree(ff->private);
372 kfree(ff);
371 dev->ff = NULL; 373 dev->ff = NULL;
372 } 374 }
373} 375}
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index 2d1415e16834..b483b2995fa9 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -61,7 +61,6 @@ struct ml_device {
61 struct ml_effect_state states[FF_MEMLESS_EFFECTS]; 61 struct ml_effect_state states[FF_MEMLESS_EFFECTS];
62 int gain; 62 int gain;
63 struct timer_list timer; 63 struct timer_list timer;
64 spinlock_t timer_lock;
65 struct input_dev *dev; 64 struct input_dev *dev;
66 65
67 int (*play_effect)(struct input_dev *dev, void *data, 66 int (*play_effect)(struct input_dev *dev, void *data,
@@ -368,38 +367,38 @@ static void ml_effect_timer(unsigned long timer_data)
368{ 367{
369 struct input_dev *dev = (struct input_dev *)timer_data; 368 struct input_dev *dev = (struct input_dev *)timer_data;
370 struct ml_device *ml = dev->ff->private; 369 struct ml_device *ml = dev->ff->private;
370 unsigned long flags;
371 371
372 debug("timer: updating effects"); 372 debug("timer: updating effects");
373 373
374 spin_lock(&ml->timer_lock); 374 spin_lock_irqsave(&dev->event_lock, flags);
375 ml_play_effects(ml); 375 ml_play_effects(ml);
376 spin_unlock(&ml->timer_lock); 376 spin_unlock_irqrestore(&dev->event_lock, flags);
377} 377}
378 378
379/*
380 * Sets requested gain for FF effects. Called with dev->event_lock held.
381 */
379static void ml_ff_set_gain(struct input_dev *dev, u16 gain) 382static void ml_ff_set_gain(struct input_dev *dev, u16 gain)
380{ 383{
381 struct ml_device *ml = dev->ff->private; 384 struct ml_device *ml = dev->ff->private;
382 int i; 385 int i;
383 386
384 spin_lock_bh(&ml->timer_lock);
385
386 ml->gain = gain; 387 ml->gain = gain;
387 388
388 for (i = 0; i < FF_MEMLESS_EFFECTS; i++) 389 for (i = 0; i < FF_MEMLESS_EFFECTS; i++)
389 __clear_bit(FF_EFFECT_PLAYING, &ml->states[i].flags); 390 __clear_bit(FF_EFFECT_PLAYING, &ml->states[i].flags);
390 391
391 ml_play_effects(ml); 392 ml_play_effects(ml);
392
393 spin_unlock_bh(&ml->timer_lock);
394} 393}
395 394
395/*
396 * Start/stop specified FF effect. Called with dev->event_lock held.
397 */
396static int ml_ff_playback(struct input_dev *dev, int effect_id, int value) 398static int ml_ff_playback(struct input_dev *dev, int effect_id, int value)
397{ 399{
398 struct ml_device *ml = dev->ff->private; 400 struct ml_device *ml = dev->ff->private;
399 struct ml_effect_state *state = &ml->states[effect_id]; 401 struct ml_effect_state *state = &ml->states[effect_id];
400 unsigned long flags;
401
402 spin_lock_irqsave(&ml->timer_lock, flags);
403 402
404 if (value > 0) { 403 if (value > 0) {
405 debug("initiated play"); 404 debug("initiated play");
@@ -425,8 +424,6 @@ static int ml_ff_playback(struct input_dev *dev, int effect_id, int value)
425 ml_play_effects(ml); 424 ml_play_effects(ml);
426 } 425 }
427 426
428 spin_unlock_irqrestore(&ml->timer_lock, flags);
429
430 return 0; 427 return 0;
431} 428}
432 429
@@ -436,7 +433,7 @@ static int ml_ff_upload(struct input_dev *dev,
436 struct ml_device *ml = dev->ff->private; 433 struct ml_device *ml = dev->ff->private;
437 struct ml_effect_state *state = &ml->states[effect->id]; 434 struct ml_effect_state *state = &ml->states[effect->id];
438 435
439 spin_lock_bh(&ml->timer_lock); 436 spin_lock_irq(&dev->event_lock);
440 437
441 if (test_bit(FF_EFFECT_STARTED, &state->flags)) { 438 if (test_bit(FF_EFFECT_STARTED, &state->flags)) {
442 __clear_bit(FF_EFFECT_PLAYING, &state->flags); 439 __clear_bit(FF_EFFECT_PLAYING, &state->flags);
@@ -448,7 +445,7 @@ static int ml_ff_upload(struct input_dev *dev,
448 ml_schedule_timer(ml); 445 ml_schedule_timer(ml);
449 } 446 }
450 447
451 spin_unlock_bh(&ml->timer_lock); 448 spin_unlock_irq(&dev->event_lock);
452 449
453 return 0; 450 return 0;
454} 451}
@@ -482,7 +479,6 @@ int input_ff_create_memless(struct input_dev *dev, void *data,
482 ml->private = data; 479 ml->private = data;
483 ml->play_effect = play_effect; 480 ml->play_effect = play_effect;
484 ml->gain = 0xffff; 481 ml->gain = 0xffff;
485 spin_lock_init(&ml->timer_lock);
486 setup_timer(&ml->timer, ml_effect_timer, (unsigned long)dev); 482 setup_timer(&ml->timer, ml_effect_timer, (unsigned long)dev);
487 483
488 set_bit(FF_GAIN, dev->ffbit); 484 set_bit(FF_GAIN, dev->ffbit);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index cc763c96fada..2266ecbfbc01 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1292,17 +1292,24 @@ static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
1292 return 0; 1292 return 0;
1293} 1293}
1294 1294
1295#define INPUT_DO_TOGGLE(dev, type, bits, on) \ 1295#define INPUT_DO_TOGGLE(dev, type, bits, on) \
1296 do { \ 1296 do { \
1297 int i; \ 1297 int i; \
1298 if (!test_bit(EV_##type, dev->evbit)) \ 1298 bool active; \
1299 break; \ 1299 \
1300 for (i = 0; i < type##_MAX; i++) { \ 1300 if (!test_bit(EV_##type, dev->evbit)) \
1301 if (!test_bit(i, dev->bits##bit) || \ 1301 break; \
1302 !test_bit(i, dev->bits)) \ 1302 \
1303 continue; \ 1303 for (i = 0; i < type##_MAX; i++) { \
1304 dev->event(dev, EV_##type, i, on); \ 1304 if (!test_bit(i, dev->bits##bit)) \
1305 } \ 1305 continue; \
1306 \
1307 active = test_bit(i, dev->bits); \
1308 if (!active && !on) \
1309 continue; \
1310 \
1311 dev->event(dev, EV_##type, i, on ? active : 0); \
1312 } \
1306 } while (0) 1313 } while (0)
1307 1314
1308#ifdef CONFIG_PM 1315#ifdef CONFIG_PM
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 4452eabbee6d..28e6110d1ff8 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -1174,6 +1174,18 @@ static int atkbd_reconnect(struct serio *serio)
1174 return -1; 1174 return -1;
1175 1175
1176 atkbd_activate(atkbd); 1176 atkbd_activate(atkbd);
1177
1178 /*
1179 * Restore LED state and repeat rate. While input core
1180 * will do this for us at resume time reconnect may happen
1181 * because user requested it via sysfs or simply because
1182 * keyboard was unplugged and plugged in again so we need
1183 * to do it ourselves here.
1184 */
1185 atkbd_set_leds(atkbd);
1186 if (!atkbd->softrepeat)
1187 atkbd_set_repeat_rate(atkbd);
1188
1177 } 1189 }
1178 1190
1179 atkbd_enable(atkbd); 1191 atkbd_enable(atkbd);
@@ -1422,6 +1434,7 @@ static ssize_t atkbd_set_set(struct atkbd *atkbd, const char *buf, size_t count)
1422 1434
1423 atkbd->dev = new_dev; 1435 atkbd->dev = new_dev;
1424 atkbd->set = atkbd_select_set(atkbd, value, atkbd->extra); 1436 atkbd->set = atkbd_select_set(atkbd, value, atkbd->extra);
1437 atkbd_reset_state(atkbd);
1425 atkbd_activate(atkbd); 1438 atkbd_activate(atkbd);
1426 atkbd_set_keycode_table(atkbd); 1439 atkbd_set_keycode_table(atkbd);
1427 atkbd_set_device_attrs(atkbd); 1440 atkbd_set_device_attrs(atkbd);
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 690aed905436..5bd64841bf1c 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1673,7 +1673,7 @@ static int psmouse_get_maxproto(char *buffer, struct kernel_param *kp)
1673{ 1673{
1674 int type = *((unsigned int *)kp->arg); 1674 int type = *((unsigned int *)kp->arg);
1675 1675
1676 return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name); 1676 return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
1677} 1677}
1678 1678
1679static int __init psmouse_init(void) 1679static int __init psmouse_init(void)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e64c971038d1..b182f86a19dd 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -944,6 +944,14 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
944 desc->raid_disk < mddev->raid_disks */) { 944 desc->raid_disk < mddev->raid_disks */) {
945 set_bit(In_sync, &rdev->flags); 945 set_bit(In_sync, &rdev->flags);
946 rdev->raid_disk = desc->raid_disk; 946 rdev->raid_disk = desc->raid_disk;
947 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
948 /* active but not in sync implies recovery up to
949 * reshape position. We don't know exactly where
950 * that is, so set to zero for now */
951 if (mddev->minor_version >= 91) {
952 rdev->recovery_offset = 0;
953 rdev->raid_disk = desc->raid_disk;
954 }
947 } 955 }
948 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 956 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
949 set_bit(WriteMostly, &rdev->flags); 957 set_bit(WriteMostly, &rdev->flags);
@@ -1032,8 +1040,19 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1032 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1040 list_for_each_entry(rdev2, &mddev->disks, same_set) {
1033 mdp_disk_t *d; 1041 mdp_disk_t *d;
1034 int desc_nr; 1042 int desc_nr;
1035 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 1043 int is_active = test_bit(In_sync, &rdev2->flags);
1036 && !test_bit(Faulty, &rdev2->flags)) 1044
1045 if (rdev2->raid_disk >= 0 &&
1046 sb->minor_version >= 91)
1047 /* we have nowhere to store the recovery_offset,
1048 * but if it is not below the reshape_position,
1049 * we can piggy-back on that.
1050 */
1051 is_active = 1;
1052 if (rdev2->raid_disk < 0 ||
1053 test_bit(Faulty, &rdev2->flags))
1054 is_active = 0;
1055 if (is_active)
1037 desc_nr = rdev2->raid_disk; 1056 desc_nr = rdev2->raid_disk;
1038 else 1057 else
1039 desc_nr = next_spare++; 1058 desc_nr = next_spare++;
@@ -1043,16 +1062,16 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1043 d->number = rdev2->desc_nr; 1062 d->number = rdev2->desc_nr;
1044 d->major = MAJOR(rdev2->bdev->bd_dev); 1063 d->major = MAJOR(rdev2->bdev->bd_dev);
1045 d->minor = MINOR(rdev2->bdev->bd_dev); 1064 d->minor = MINOR(rdev2->bdev->bd_dev);
1046 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 1065 if (is_active)
1047 && !test_bit(Faulty, &rdev2->flags))
1048 d->raid_disk = rdev2->raid_disk; 1066 d->raid_disk = rdev2->raid_disk;
1049 else 1067 else
1050 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1068 d->raid_disk = rdev2->desc_nr; /* compatibility */
1051 if (test_bit(Faulty, &rdev2->flags)) 1069 if (test_bit(Faulty, &rdev2->flags))
1052 d->state = (1<<MD_DISK_FAULTY); 1070 d->state = (1<<MD_DISK_FAULTY);
1053 else if (test_bit(In_sync, &rdev2->flags)) { 1071 else if (is_active) {
1054 d->state = (1<<MD_DISK_ACTIVE); 1072 d->state = (1<<MD_DISK_ACTIVE);
1055 d->state |= (1<<MD_DISK_SYNC); 1073 if (test_bit(In_sync, &rdev2->flags))
1074 d->state |= (1<<MD_DISK_SYNC);
1056 active++; 1075 active++;
1057 working++; 1076 working++;
1058 } else { 1077 } else {
@@ -1382,8 +1401,6 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1382 1401
1383 if (rdev->raid_disk >= 0 && 1402 if (rdev->raid_disk >= 0 &&
1384 !test_bit(In_sync, &rdev->flags)) { 1403 !test_bit(In_sync, &rdev->flags)) {
1385 if (mddev->curr_resync_completed > rdev->recovery_offset)
1386 rdev->recovery_offset = mddev->curr_resync_completed;
1387 if (rdev->recovery_offset > 0) { 1404 if (rdev->recovery_offset > 0) {
1388 sb->feature_map |= 1405 sb->feature_map |=
1389 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 1406 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
@@ -1917,6 +1934,14 @@ static void sync_sbs(mddev_t * mddev, int nospares)
1917 */ 1934 */
1918 mdk_rdev_t *rdev; 1935 mdk_rdev_t *rdev;
1919 1936
1937 /* First make sure individual recovery_offsets are correct */
1938 list_for_each_entry(rdev, &mddev->disks, same_set) {
1939 if (rdev->raid_disk >= 0 &&
1940 !test_bit(In_sync, &rdev->flags) &&
1941 mddev->curr_resync_completed > rdev->recovery_offset)
1942 rdev->recovery_offset = mddev->curr_resync_completed;
1943
1944 }
1920 list_for_each_entry(rdev, &mddev->disks, same_set) { 1945 list_for_each_entry(rdev, &mddev->disks, same_set) {
1921 if (rdev->sb_events == mddev->events || 1946 if (rdev->sb_events == mddev->events ||
1922 (nospares && 1947 (nospares &&
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index dcce204b6c73..d29215d966da 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4823,11 +4823,40 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4823 return ERR_PTR(-ENOMEM); 4823 return ERR_PTR(-ENOMEM);
4824} 4824}
4825 4825
4826
4827static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
4828{
4829 switch (algo) {
4830 case ALGORITHM_PARITY_0:
4831 if (raid_disk < max_degraded)
4832 return 1;
4833 break;
4834 case ALGORITHM_PARITY_N:
4835 if (raid_disk >= raid_disks - max_degraded)
4836 return 1;
4837 break;
4838 case ALGORITHM_PARITY_0_6:
4839 if (raid_disk == 0 ||
4840 raid_disk == raid_disks - 1)
4841 return 1;
4842 break;
4843 case ALGORITHM_LEFT_ASYMMETRIC_6:
4844 case ALGORITHM_RIGHT_ASYMMETRIC_6:
4845 case ALGORITHM_LEFT_SYMMETRIC_6:
4846 case ALGORITHM_RIGHT_SYMMETRIC_6:
4847 if (raid_disk == raid_disks - 1)
4848 return 1;
4849 }
4850 return 0;
4851}
4852
4826static int run(mddev_t *mddev) 4853static int run(mddev_t *mddev)
4827{ 4854{
4828 raid5_conf_t *conf; 4855 raid5_conf_t *conf;
4829 int working_disks = 0, chunk_size; 4856 int working_disks = 0, chunk_size;
4857 int dirty_parity_disks = 0;
4830 mdk_rdev_t *rdev; 4858 mdk_rdev_t *rdev;
4859 sector_t reshape_offset = 0;
4831 4860
4832 if (mddev->recovery_cp != MaxSector) 4861 if (mddev->recovery_cp != MaxSector)
4833 printk(KERN_NOTICE "raid5: %s is not clean" 4862 printk(KERN_NOTICE "raid5: %s is not clean"
@@ -4861,6 +4890,7 @@ static int run(mddev_t *mddev)
4861 "on a stripe boundary\n"); 4890 "on a stripe boundary\n");
4862 return -EINVAL; 4891 return -EINVAL;
4863 } 4892 }
4893 reshape_offset = here_new * mddev->new_chunk_sectors;
4864 /* here_new is the stripe we will write to */ 4894 /* here_new is the stripe we will write to */
4865 here_old = mddev->reshape_position; 4895 here_old = mddev->reshape_position;
4866 sector_div(here_old, mddev->chunk_sectors * 4896 sector_div(here_old, mddev->chunk_sectors *
@@ -4916,10 +4946,51 @@ static int run(mddev_t *mddev)
4916 /* 4946 /*
4917 * 0 for a fully functional array, 1 or 2 for a degraded array. 4947 * 0 for a fully functional array, 1 or 2 for a degraded array.
4918 */ 4948 */
4919 list_for_each_entry(rdev, &mddev->disks, same_set) 4949 list_for_each_entry(rdev, &mddev->disks, same_set) {
4920 if (rdev->raid_disk >= 0 && 4950 if (rdev->raid_disk < 0)
4921 test_bit(In_sync, &rdev->flags)) 4951 continue;
4952 if (test_bit(In_sync, &rdev->flags))
4922 working_disks++; 4953 working_disks++;
4954 /* This disc is not fully in-sync. However if it
4955 * just stored parity (beyond the recovery_offset),
4956 * when we don't need to be concerned about the
4957 * array being dirty.
4958 * When reshape goes 'backwards', we never have
4959 * partially completed devices, so we only need
4960 * to worry about reshape going forwards.
4961 */
4962 /* Hack because v0.91 doesn't store recovery_offset properly. */
4963 if (mddev->major_version == 0 &&
4964 mddev->minor_version > 90)
4965 rdev->recovery_offset = reshape_offset;
4966
4967 printk("%d: w=%d pa=%d pr=%d m=%d a=%d r=%d op1=%d op2=%d\n",
4968 rdev->raid_disk, working_disks, conf->prev_algo,
4969 conf->previous_raid_disks, conf->max_degraded,
4970 conf->algorithm, conf->raid_disks,
4971 only_parity(rdev->raid_disk,
4972 conf->prev_algo,
4973 conf->previous_raid_disks,
4974 conf->max_degraded),
4975 only_parity(rdev->raid_disk,
4976 conf->algorithm,
4977 conf->raid_disks,
4978 conf->max_degraded));
4979 if (rdev->recovery_offset < reshape_offset) {
4980 /* We need to check old and new layout */
4981 if (!only_parity(rdev->raid_disk,
4982 conf->algorithm,
4983 conf->raid_disks,
4984 conf->max_degraded))
4985 continue;
4986 }
4987 if (!only_parity(rdev->raid_disk,
4988 conf->prev_algo,
4989 conf->previous_raid_disks,
4990 conf->max_degraded))
4991 continue;
4992 dirty_parity_disks++;
4993 }
4923 4994
4924 mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks) 4995 mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks)
4925 - working_disks); 4996 - working_disks);
@@ -4935,7 +5006,7 @@ static int run(mddev_t *mddev)
4935 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); 5006 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
4936 mddev->resync_max_sectors = mddev->dev_sectors; 5007 mddev->resync_max_sectors = mddev->dev_sectors;
4937 5008
4938 if (mddev->degraded > 0 && 5009 if (mddev->degraded > dirty_parity_disks &&
4939 mddev->recovery_cp != MaxSector) { 5010 mddev->recovery_cp != MaxSector) {
4940 if (mddev->ok_start_degraded) 5011 if (mddev->ok_start_degraded)
4941 printk(KERN_WARNING 5012 printk(KERN_WARNING
@@ -5361,9 +5432,11 @@ static int raid5_start_reshape(mddev_t *mddev)
5361 !test_bit(Faulty, &rdev->flags)) { 5432 !test_bit(Faulty, &rdev->flags)) {
5362 if (raid5_add_disk(mddev, rdev) == 0) { 5433 if (raid5_add_disk(mddev, rdev) == 0) {
5363 char nm[20]; 5434 char nm[20];
5364 set_bit(In_sync, &rdev->flags); 5435 if (rdev->raid_disk >= conf->previous_raid_disks)
5436 set_bit(In_sync, &rdev->flags);
5437 else
5438 rdev->recovery_offset = 0;
5365 added_devices++; 5439 added_devices++;
5366 rdev->recovery_offset = 0;
5367 sprintf(nm, "rd%d", rdev->raid_disk); 5440 sprintf(nm, "rd%d", rdev->raid_disk);
5368 if (sysfs_create_link(&mddev->kobj, 5441 if (sysfs_create_link(&mddev->kobj,
5369 &rdev->kobj, nm)) 5442 &rdev->kobj, nm))
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 89ece1c235aa..66e21dd23154 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -357,6 +357,7 @@ static int mon_close(struct inode *inode, struct file *filp)
357 atomic_set(&monpriv->msglim_count, 0); 357 atomic_set(&monpriv->msglim_count, 0);
358 monpriv->write_index = 0; 358 monpriv->write_index = 0;
359 monpriv->read_index = 0; 359 monpriv->read_index = 0;
360 dev_set_drvdata(monreader_device, NULL);
360 361
361 for (i = 0; i < MON_MSGLIM; i++) 362 for (i = 0; i < MON_MSGLIM; i++)
362 kfree(monpriv->msg_array[i]); 363 kfree(monpriv->msg_array[i]);
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 84c191c1cd62..05909a7df8b3 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -20,9 +20,12 @@
20 20
21#include "sclp.h" 21#include "sclp.h"
22 22
23static void (*old_machine_restart)(char *);
24static void (*old_machine_halt)(void);
25static void (*old_machine_power_off)(void);
26
23/* Shutdown handler. Signal completion of shutdown by loading special PSW. */ 27/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
24static void 28static void do_machine_quiesce(void)
25do_machine_quiesce(void)
26{ 29{
27 psw_t quiesce_psw; 30 psw_t quiesce_psw;
28 31
@@ -33,23 +36,48 @@ do_machine_quiesce(void)
33} 36}
34 37
35/* Handler for quiesce event. Start shutdown procedure. */ 38/* Handler for quiesce event. Start shutdown procedure. */
36static void 39static void sclp_quiesce_handler(struct evbuf_header *evbuf)
37sclp_quiesce_handler(struct evbuf_header *evbuf)
38{ 40{
39 _machine_restart = (void *) do_machine_quiesce; 41 if (_machine_restart != (void *) do_machine_quiesce) {
40 _machine_halt = do_machine_quiesce; 42 old_machine_restart = _machine_restart;
41 _machine_power_off = do_machine_quiesce; 43 old_machine_halt = _machine_halt;
44 old_machine_power_off = _machine_power_off;
45 _machine_restart = (void *) do_machine_quiesce;
46 _machine_halt = do_machine_quiesce;
47 _machine_power_off = do_machine_quiesce;
48 }
42 ctrl_alt_del(); 49 ctrl_alt_del();
43} 50}
44 51
52/* Undo machine restart/halt/power_off modification on resume */
53static void sclp_quiesce_pm_event(struct sclp_register *reg,
54 enum sclp_pm_event sclp_pm_event)
55{
56 switch (sclp_pm_event) {
57 case SCLP_PM_EVENT_RESTORE:
58 if (old_machine_restart) {
59 _machine_restart = old_machine_restart;
60 _machine_halt = old_machine_halt;
61 _machine_power_off = old_machine_power_off;
62 old_machine_restart = NULL;
63 old_machine_halt = NULL;
64 old_machine_power_off = NULL;
65 }
66 break;
67 case SCLP_PM_EVENT_FREEZE:
68 case SCLP_PM_EVENT_THAW:
69 break;
70 }
71}
72
45static struct sclp_register sclp_quiesce_event = { 73static struct sclp_register sclp_quiesce_event = {
46 .receive_mask = EVTYP_SIGQUIESCE_MASK, 74 .receive_mask = EVTYP_SIGQUIESCE_MASK,
47 .receiver_fn = sclp_quiesce_handler 75 .receiver_fn = sclp_quiesce_handler,
76 .pm_event_fn = sclp_quiesce_pm_event
48}; 77};
49 78
50/* Initialize quiesce driver. */ 79/* Initialize quiesce driver. */
51static int __init 80static int __init sclp_quiesce_init(void)
52sclp_quiesce_init(void)
53{ 81{
54 return sclp_register(&sclp_quiesce_event); 82 return sclp_register(&sclp_quiesce_event);
55} 83}
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index edf926e1062f..d0a2ce1b4324 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -958,7 +958,7 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen,
958 p1 = encode_entry_baggage(cd, p1, name, namlen, ino); 958 p1 = encode_entry_baggage(cd, p1, name, namlen, ino);
959 959
960 if (plus) 960 if (plus)
961 p = encode_entryplus_baggage(cd, p1, name, namlen); 961 p1 = encode_entryplus_baggage(cd, p1, name, namlen);
962 962
963 /* determine entry word length and lengths to go in pages */ 963 /* determine entry word length and lengths to go in pages */
964 num_entry_words = p1 - tmp; 964 num_entry_words = p1 - tmp;
diff --git a/include/linux/input.h b/include/linux/input.h
index 0ccfc30cd40f..c2b1a7d244d9 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -1377,6 +1377,10 @@ extern struct class input_class;
1377 * methods; erase() is optional. set_gain() and set_autocenter() need 1377 * methods; erase() is optional. set_gain() and set_autocenter() need
1378 * only be implemented if driver sets up FF_GAIN and FF_AUTOCENTER 1378 * only be implemented if driver sets up FF_GAIN and FF_AUTOCENTER
1379 * bits. 1379 * bits.
1380 *
1381 * Note that playback(), set_gain() and set_autocenter() are called with
1382 * dev->event_lock spinlock held and interrupts off and thus may not
1383 * sleep.
1380 */ 1384 */
1381struct ff_device { 1385struct ff_device {
1382 int (*upload)(struct input_dev *dev, struct ff_effect *effect, 1386 int (*upload)(struct input_dev *dev, struct ff_effect *effect,
diff --git a/mm/percpu.c b/mm/percpu.c
index d90797160c2a..5adfc268b408 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -355,62 +355,86 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
355} 355}
356 356
357/** 357/**
358 * pcpu_extend_area_map - extend area map for allocation 358 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
359 * @chunk: target chunk 359 * @chunk: chunk of interest
360 * 360 *
361 * Extend area map of @chunk so that it can accomodate an allocation. 361 * Determine whether area map of @chunk needs to be extended to
362 * A single allocation can split an area into three areas, so this 362 * accomodate a new allocation.
363 * function makes sure that @chunk->map has at least two extra slots.
364 * 363 *
365 * CONTEXT: 364 * CONTEXT:
366 * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired 365 * pcpu_lock.
367 * if area map is extended.
368 * 366 *
369 * RETURNS: 367 * RETURNS:
370 * 0 if noop, 1 if successfully extended, -errno on failure. 368 * New target map allocation length if extension is necessary, 0
369 * otherwise.
371 */ 370 */
372static int pcpu_extend_area_map(struct pcpu_chunk *chunk, unsigned long *flags) 371static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
373{ 372{
374 int new_alloc; 373 int new_alloc;
375 int *new;
376 size_t size;
377 374
378 /* has enough? */
379 if (chunk->map_alloc >= chunk->map_used + 2) 375 if (chunk->map_alloc >= chunk->map_used + 2)
380 return 0; 376 return 0;
381 377
382 spin_unlock_irqrestore(&pcpu_lock, *flags);
383
384 new_alloc = PCPU_DFL_MAP_ALLOC; 378 new_alloc = PCPU_DFL_MAP_ALLOC;
385 while (new_alloc < chunk->map_used + 2) 379 while (new_alloc < chunk->map_used + 2)
386 new_alloc *= 2; 380 new_alloc *= 2;
387 381
388 new = pcpu_mem_alloc(new_alloc * sizeof(new[0])); 382 return new_alloc;
389 if (!new) { 383}
390 spin_lock_irqsave(&pcpu_lock, *flags); 384
385/**
386 * pcpu_extend_area_map - extend area map of a chunk
387 * @chunk: chunk of interest
388 * @new_alloc: new target allocation length of the area map
389 *
390 * Extend area map of @chunk to have @new_alloc entries.
391 *
392 * CONTEXT:
393 * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
394 *
395 * RETURNS:
396 * 0 on success, -errno on failure.
397 */
398static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
399{
400 int *old = NULL, *new = NULL;
401 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
402 unsigned long flags;
403
404 new = pcpu_mem_alloc(new_size);
405 if (!new)
391 return -ENOMEM; 406 return -ENOMEM;
392 }
393 407
394 /* 408 /* acquire pcpu_lock and switch to new area map */
395 * Acquire pcpu_lock and switch to new area map. Only free 409 spin_lock_irqsave(&pcpu_lock, flags);
396 * could have happened inbetween, so map_used couldn't have 410
397 * grown. 411 if (new_alloc <= chunk->map_alloc)
398 */ 412 goto out_unlock;
399 spin_lock_irqsave(&pcpu_lock, *flags);
400 BUG_ON(new_alloc < chunk->map_used + 2);
401 413
402 size = chunk->map_alloc * sizeof(chunk->map[0]); 414 old_size = chunk->map_alloc * sizeof(chunk->map[0]);
403 memcpy(new, chunk->map, size); 415 memcpy(new, chunk->map, old_size);
404 416
405 /* 417 /*
406 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is 418 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
407 * one of the first chunks and still using static map. 419 * one of the first chunks and still using static map.
408 */ 420 */
409 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) 421 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
410 pcpu_mem_free(chunk->map, size); 422 old = chunk->map;
411 423
412 chunk->map_alloc = new_alloc; 424 chunk->map_alloc = new_alloc;
413 chunk->map = new; 425 chunk->map = new;
426 new = NULL;
427
428out_unlock:
429 spin_unlock_irqrestore(&pcpu_lock, flags);
430
431 /*
432 * pcpu_mem_free() might end up calling vfree() which uses
433 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
434 */
435 pcpu_mem_free(old, old_size);
436 pcpu_mem_free(new, new_size);
437
414 return 0; 438 return 0;
415} 439}
416 440
@@ -1049,7 +1073,7 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
1049 static int warn_limit = 10; 1073 static int warn_limit = 10;
1050 struct pcpu_chunk *chunk; 1074 struct pcpu_chunk *chunk;
1051 const char *err; 1075 const char *err;
1052 int slot, off; 1076 int slot, off, new_alloc;
1053 unsigned long flags; 1077 unsigned long flags;
1054 1078
1055 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 1079 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
@@ -1064,14 +1088,25 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
1064 /* serve reserved allocations from the reserved chunk if available */ 1088 /* serve reserved allocations from the reserved chunk if available */
1065 if (reserved && pcpu_reserved_chunk) { 1089 if (reserved && pcpu_reserved_chunk) {
1066 chunk = pcpu_reserved_chunk; 1090 chunk = pcpu_reserved_chunk;
1067 if (size > chunk->contig_hint || 1091
1068 pcpu_extend_area_map(chunk, &flags) < 0) { 1092 if (size > chunk->contig_hint) {
1069 err = "failed to extend area map of reserved chunk"; 1093 err = "alloc from reserved chunk failed";
1070 goto fail_unlock; 1094 goto fail_unlock;
1071 } 1095 }
1096
1097 while ((new_alloc = pcpu_need_to_extend(chunk))) {
1098 spin_unlock_irqrestore(&pcpu_lock, flags);
1099 if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
1100 err = "failed to extend area map of reserved chunk";
1101 goto fail_unlock_mutex;
1102 }
1103 spin_lock_irqsave(&pcpu_lock, flags);
1104 }
1105
1072 off = pcpu_alloc_area(chunk, size, align); 1106 off = pcpu_alloc_area(chunk, size, align);
1073 if (off >= 0) 1107 if (off >= 0)
1074 goto area_found; 1108 goto area_found;
1109
1075 err = "alloc from reserved chunk failed"; 1110 err = "alloc from reserved chunk failed";
1076 goto fail_unlock; 1111 goto fail_unlock;
1077 } 1112 }
@@ -1083,14 +1118,20 @@ restart:
1083 if (size > chunk->contig_hint) 1118 if (size > chunk->contig_hint)
1084 continue; 1119 continue;
1085 1120
1086 switch (pcpu_extend_area_map(chunk, &flags)) { 1121 new_alloc = pcpu_need_to_extend(chunk);
1087 case 0: 1122 if (new_alloc) {
1088 break; 1123 spin_unlock_irqrestore(&pcpu_lock, flags);
1089 case 1: 1124 if (pcpu_extend_area_map(chunk,
1090 goto restart; /* pcpu_lock dropped, restart */ 1125 new_alloc) < 0) {
1091 default: 1126 err = "failed to extend area map";
1092 err = "failed to extend area map"; 1127 goto fail_unlock_mutex;
1093 goto fail_unlock; 1128 }
1129 spin_lock_irqsave(&pcpu_lock, flags);
1130 /*
1131 * pcpu_lock has been dropped, need to
1132 * restart cpu_slot list walking.
1133 */
1134 goto restart;
1094 } 1135 }
1095 1136
1096 off = pcpu_alloc_area(chunk, size, align); 1137 off = pcpu_alloc_area(chunk, size, align);