aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/md.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r--drivers/md/md.c164
1 files changed, 102 insertions, 62 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0ff1bbf6c90e..41c050b59ec4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -69,7 +69,7 @@
69 69
70#include <trace/events/block.h> 70#include <trace/events/block.h>
71#include "md.h" 71#include "md.h"
72#include "bitmap.h" 72#include "md-bitmap.h"
73#include "md-cluster.h" 73#include "md-cluster.h"
74 74
75#ifndef MODULE 75#ifndef MODULE
@@ -266,16 +266,31 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
266 * call has finished, the bio has been linked into some internal structure 266 * call has finished, the bio has been linked into some internal structure
267 * and so is visible to ->quiesce(), so we don't need the refcount any more. 267 * and so is visible to ->quiesce(), so we don't need the refcount any more.
268 */ 268 */
269static bool is_suspended(struct mddev *mddev, struct bio *bio)
270{
271 if (mddev->suspended)
272 return true;
273 if (bio_data_dir(bio) != WRITE)
274 return false;
275 if (mddev->suspend_lo >= mddev->suspend_hi)
276 return false;
277 if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
278 return false;
279 if (bio_end_sector(bio) < mddev->suspend_lo)
280 return false;
281 return true;
282}
283
269void md_handle_request(struct mddev *mddev, struct bio *bio) 284void md_handle_request(struct mddev *mddev, struct bio *bio)
270{ 285{
271check_suspended: 286check_suspended:
272 rcu_read_lock(); 287 rcu_read_lock();
273 if (mddev->suspended) { 288 if (is_suspended(mddev, bio)) {
274 DEFINE_WAIT(__wait); 289 DEFINE_WAIT(__wait);
275 for (;;) { 290 for (;;) {
276 prepare_to_wait(&mddev->sb_wait, &__wait, 291 prepare_to_wait(&mddev->sb_wait, &__wait,
277 TASK_UNINTERRUPTIBLE); 292 TASK_UNINTERRUPTIBLE);
278 if (!mddev->suspended) 293 if (!is_suspended(mddev, bio))
279 break; 294 break;
280 rcu_read_unlock(); 295 rcu_read_unlock();
281 schedule(); 296 schedule();
@@ -344,12 +359,17 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
344void mddev_suspend(struct mddev *mddev) 359void mddev_suspend(struct mddev *mddev)
345{ 360{
346 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk); 361 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
362 lockdep_assert_held(&mddev->reconfig_mutex);
347 if (mddev->suspended++) 363 if (mddev->suspended++)
348 return; 364 return;
349 synchronize_rcu(); 365 synchronize_rcu();
350 wake_up(&mddev->sb_wait); 366 wake_up(&mddev->sb_wait);
367 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
368 smp_mb__after_atomic();
351 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 369 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
352 mddev->pers->quiesce(mddev, 1); 370 mddev->pers->quiesce(mddev, 1);
371 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
372 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
353 373
354 del_timer_sync(&mddev->safemode_timer); 374 del_timer_sync(&mddev->safemode_timer);
355} 375}
@@ -357,6 +377,7 @@ EXPORT_SYMBOL_GPL(mddev_suspend);
357 377
358void mddev_resume(struct mddev *mddev) 378void mddev_resume(struct mddev *mddev)
359{ 379{
380 lockdep_assert_held(&mddev->reconfig_mutex);
360 if (--mddev->suspended) 381 if (--mddev->suspended)
361 return; 382 return;
362 wake_up(&mddev->sb_wait); 383 wake_up(&mddev->sb_wait);
@@ -520,7 +541,7 @@ static void mddev_put(struct mddev *mddev)
520 bioset_free(sync_bs); 541 bioset_free(sync_bs);
521} 542}
522 543
523static void md_safemode_timeout(unsigned long data); 544static void md_safemode_timeout(struct timer_list *t);
524 545
525void mddev_init(struct mddev *mddev) 546void mddev_init(struct mddev *mddev)
526{ 547{
@@ -529,8 +550,7 @@ void mddev_init(struct mddev *mddev)
529 mutex_init(&mddev->bitmap_info.mutex); 550 mutex_init(&mddev->bitmap_info.mutex);
530 INIT_LIST_HEAD(&mddev->disks); 551 INIT_LIST_HEAD(&mddev->disks);
531 INIT_LIST_HEAD(&mddev->all_mddevs); 552 INIT_LIST_HEAD(&mddev->all_mddevs);
532 setup_timer(&mddev->safemode_timer, md_safemode_timeout, 553 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
533 (unsigned long) mddev);
534 atomic_set(&mddev->active, 1); 554 atomic_set(&mddev->active, 1);
535 atomic_set(&mddev->openers, 0); 555 atomic_set(&mddev->openers, 0);
536 atomic_set(&mddev->active_io, 0); 556 atomic_set(&mddev->active_io, 0);
@@ -663,6 +683,7 @@ void mddev_unlock(struct mddev *mddev)
663 */ 683 */
664 spin_lock(&pers_lock); 684 spin_lock(&pers_lock);
665 md_wakeup_thread(mddev->thread); 685 md_wakeup_thread(mddev->thread);
686 wake_up(&mddev->sb_wait);
666 spin_unlock(&pers_lock); 687 spin_unlock(&pers_lock);
667} 688}
668EXPORT_SYMBOL_GPL(mddev_unlock); 689EXPORT_SYMBOL_GPL(mddev_unlock);
@@ -2313,7 +2334,7 @@ static void export_array(struct mddev *mddev)
2313 2334
2314static bool set_in_sync(struct mddev *mddev) 2335static bool set_in_sync(struct mddev *mddev)
2315{ 2336{
2316 WARN_ON_ONCE(NR_CPUS != 1 && !spin_is_locked(&mddev->lock)); 2337 lockdep_assert_held(&mddev->lock);
2317 if (!mddev->in_sync) { 2338 if (!mddev->in_sync) {
2318 mddev->sync_checkers++; 2339 mddev->sync_checkers++;
2319 spin_unlock(&mddev->lock); 2340 spin_unlock(&mddev->lock);
@@ -2432,10 +2453,18 @@ repeat:
2432 } 2453 }
2433 } 2454 }
2434 2455
2435 /* First make sure individual recovery_offsets are correct */ 2456 /*
2457 * First make sure individual recovery_offsets are correct
2458 * curr_resync_completed can only be used during recovery.
2459 * During reshape/resync it might use array-addresses rather
2460 * that device addresses.
2461 */
2436 rdev_for_each(rdev, mddev) { 2462 rdev_for_each(rdev, mddev) {
2437 if (rdev->raid_disk >= 0 && 2463 if (rdev->raid_disk >= 0 &&
2438 mddev->delta_disks >= 0 && 2464 mddev->delta_disks >= 0 &&
2465 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
2466 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
2467 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
2439 !test_bit(Journal, &rdev->flags) && 2468 !test_bit(Journal, &rdev->flags) &&
2440 !test_bit(In_sync, &rdev->flags) && 2469 !test_bit(In_sync, &rdev->flags) &&
2441 mddev->curr_resync_completed > rdev->recovery_offset) 2470 mddev->curr_resync_completed > rdev->recovery_offset)
@@ -2651,7 +2680,7 @@ state_show(struct md_rdev *rdev, char *page)
2651{ 2680{
2652 char *sep = ","; 2681 char *sep = ",";
2653 size_t len = 0; 2682 size_t len = 0;
2654 unsigned long flags = ACCESS_ONCE(rdev->flags); 2683 unsigned long flags = READ_ONCE(rdev->flags);
2655 2684
2656 if (test_bit(Faulty, &flags) || 2685 if (test_bit(Faulty, &flags) ||
2657 (!test_bit(ExternalBbl, &flags) && 2686 (!test_bit(ExternalBbl, &flags) &&
@@ -4824,7 +4853,7 @@ suspend_lo_show(struct mddev *mddev, char *page)
4824static ssize_t 4853static ssize_t
4825suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) 4854suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
4826{ 4855{
4827 unsigned long long old, new; 4856 unsigned long long new;
4828 int err; 4857 int err;
4829 4858
4830 err = kstrtoull(buf, 10, &new); 4859 err = kstrtoull(buf, 10, &new);
@@ -4840,16 +4869,10 @@ suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
4840 if (mddev->pers == NULL || 4869 if (mddev->pers == NULL ||
4841 mddev->pers->quiesce == NULL) 4870 mddev->pers->quiesce == NULL)
4842 goto unlock; 4871 goto unlock;
4843 old = mddev->suspend_lo; 4872 mddev_suspend(mddev);
4844 mddev->suspend_lo = new; 4873 mddev->suspend_lo = new;
4845 if (new >= old) 4874 mddev_resume(mddev);
4846 /* Shrinking suspended region */ 4875
4847 mddev->pers->quiesce(mddev, 2);
4848 else {
4849 /* Expanding suspended region - need to wait */
4850 mddev->pers->quiesce(mddev, 1);
4851 mddev->pers->quiesce(mddev, 0);
4852 }
4853 err = 0; 4876 err = 0;
4854unlock: 4877unlock:
4855 mddev_unlock(mddev); 4878 mddev_unlock(mddev);
@@ -4867,7 +4890,7 @@ suspend_hi_show(struct mddev *mddev, char *page)
4867static ssize_t 4890static ssize_t
4868suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) 4891suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
4869{ 4892{
4870 unsigned long long old, new; 4893 unsigned long long new;
4871 int err; 4894 int err;
4872 4895
4873 err = kstrtoull(buf, 10, &new); 4896 err = kstrtoull(buf, 10, &new);
@@ -4880,19 +4903,13 @@ suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
4880 if (err) 4903 if (err)
4881 return err; 4904 return err;
4882 err = -EINVAL; 4905 err = -EINVAL;
4883 if (mddev->pers == NULL || 4906 if (mddev->pers == NULL)
4884 mddev->pers->quiesce == NULL)
4885 goto unlock; 4907 goto unlock;
4886 old = mddev->suspend_hi; 4908
4909 mddev_suspend(mddev);
4887 mddev->suspend_hi = new; 4910 mddev->suspend_hi = new;
4888 if (new <= old) 4911 mddev_resume(mddev);
4889 /* Shrinking suspended region */ 4912
4890 mddev->pers->quiesce(mddev, 2);
4891 else {
4892 /* Expanding suspended region - need to wait */
4893 mddev->pers->quiesce(mddev, 1);
4894 mddev->pers->quiesce(mddev, 0);
4895 }
4896 err = 0; 4913 err = 0;
4897unlock: 4914unlock:
4898 mddev_unlock(mddev); 4915 mddev_unlock(mddev);
@@ -5357,7 +5374,7 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
5357 return NULL; 5374 return NULL;
5358} 5375}
5359 5376
5360static int add_named_array(const char *val, struct kernel_param *kp) 5377static int add_named_array(const char *val, const struct kernel_param *kp)
5361{ 5378{
5362 /* 5379 /*
5363 * val must be "md_*" or "mdNNN". 5380 * val must be "md_*" or "mdNNN".
@@ -5386,9 +5403,9 @@ static int add_named_array(const char *val, struct kernel_param *kp)
5386 return -EINVAL; 5403 return -EINVAL;
5387} 5404}
5388 5405
5389static void md_safemode_timeout(unsigned long data) 5406static void md_safemode_timeout(struct timer_list *t)
5390{ 5407{
5391 struct mddev *mddev = (struct mddev *) data; 5408 struct mddev *mddev = from_timer(mddev, t, safemode_timer);
5392 5409
5393 mddev->safemode = 1; 5410 mddev->safemode = 1;
5394 if (mddev->external) 5411 if (mddev->external)
@@ -5834,8 +5851,14 @@ void md_stop(struct mddev *mddev)
5834 * This is called from dm-raid 5851 * This is called from dm-raid
5835 */ 5852 */
5836 __md_stop(mddev); 5853 __md_stop(mddev);
5837 if (mddev->bio_set) 5854 if (mddev->bio_set) {
5838 bioset_free(mddev->bio_set); 5855 bioset_free(mddev->bio_set);
5856 mddev->bio_set = NULL;
5857 }
5858 if (mddev->sync_set) {
5859 bioset_free(mddev->sync_set);
5860 mddev->sync_set = NULL;
5861 }
5839} 5862}
5840 5863
5841EXPORT_SYMBOL_GPL(md_stop); 5864EXPORT_SYMBOL_GPL(md_stop);
@@ -6362,7 +6385,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
6362 break; 6385 break;
6363 } 6386 }
6364 } 6387 }
6365 if (has_journal) { 6388 if (has_journal || mddev->bitmap) {
6366 export_rdev(rdev); 6389 export_rdev(rdev);
6367 return -EBUSY; 6390 return -EBUSY;
6368 } 6391 }
@@ -6618,22 +6641,26 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
6618 return -ENOENT; /* cannot remove what isn't there */ 6641 return -ENOENT; /* cannot remove what isn't there */
6619 err = 0; 6642 err = 0;
6620 if (mddev->pers) { 6643 if (mddev->pers) {
6621 mddev->pers->quiesce(mddev, 1);
6622 if (fd >= 0) { 6644 if (fd >= 0) {
6623 struct bitmap *bitmap; 6645 struct bitmap *bitmap;
6624 6646
6625 bitmap = bitmap_create(mddev, -1); 6647 bitmap = bitmap_create(mddev, -1);
6648 mddev_suspend(mddev);
6626 if (!IS_ERR(bitmap)) { 6649 if (!IS_ERR(bitmap)) {
6627 mddev->bitmap = bitmap; 6650 mddev->bitmap = bitmap;
6628 err = bitmap_load(mddev); 6651 err = bitmap_load(mddev);
6629 } else 6652 } else
6630 err = PTR_ERR(bitmap); 6653 err = PTR_ERR(bitmap);
6631 } 6654 if (err) {
6632 if (fd < 0 || err) { 6655 bitmap_destroy(mddev);
6656 fd = -1;
6657 }
6658 mddev_resume(mddev);
6659 } else if (fd < 0) {
6660 mddev_suspend(mddev);
6633 bitmap_destroy(mddev); 6661 bitmap_destroy(mddev);
6634 fd = -1; /* make sure to put the file */ 6662 mddev_resume(mddev);
6635 } 6663 }
6636 mddev->pers->quiesce(mddev, 0);
6637 } 6664 }
6638 if (fd < 0) { 6665 if (fd < 0) {
6639 struct file *f = mddev->bitmap_info.file; 6666 struct file *f = mddev->bitmap_info.file;
@@ -6735,7 +6762,7 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
6735 6762
6736void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) 6763void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
6737{ 6764{
6738 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__); 6765 lockdep_assert_held(&mddev->reconfig_mutex);
6739 6766
6740 if (mddev->external_size) 6767 if (mddev->external_size)
6741 return; 6768 return;
@@ -6917,8 +6944,8 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
6917 mddev->bitmap_info.default_offset; 6944 mddev->bitmap_info.default_offset;
6918 mddev->bitmap_info.space = 6945 mddev->bitmap_info.space =
6919 mddev->bitmap_info.default_space; 6946 mddev->bitmap_info.default_space;
6920 mddev->pers->quiesce(mddev, 1);
6921 bitmap = bitmap_create(mddev, -1); 6947 bitmap = bitmap_create(mddev, -1);
6948 mddev_suspend(mddev);
6922 if (!IS_ERR(bitmap)) { 6949 if (!IS_ERR(bitmap)) {
6923 mddev->bitmap = bitmap; 6950 mddev->bitmap = bitmap;
6924 rv = bitmap_load(mddev); 6951 rv = bitmap_load(mddev);
@@ -6926,7 +6953,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
6926 rv = PTR_ERR(bitmap); 6953 rv = PTR_ERR(bitmap);
6927 if (rv) 6954 if (rv)
6928 bitmap_destroy(mddev); 6955 bitmap_destroy(mddev);
6929 mddev->pers->quiesce(mddev, 0); 6956 mddev_resume(mddev);
6930 } else { 6957 } else {
6931 /* remove the bitmap */ 6958 /* remove the bitmap */
6932 if (!mddev->bitmap) { 6959 if (!mddev->bitmap) {
@@ -6949,9 +6976,9 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
6949 mddev->bitmap_info.nodes = 0; 6976 mddev->bitmap_info.nodes = 0;
6950 md_cluster_ops->leave(mddev); 6977 md_cluster_ops->leave(mddev);
6951 } 6978 }
6952 mddev->pers->quiesce(mddev, 1); 6979 mddev_suspend(mddev);
6953 bitmap_destroy(mddev); 6980 bitmap_destroy(mddev);
6954 mddev->pers->quiesce(mddev, 0); 6981 mddev_resume(mddev);
6955 mddev->bitmap_info.offset = 0; 6982 mddev->bitmap_info.offset = 0;
6956 } 6983 }
6957 } 6984 }
@@ -7468,8 +7495,8 @@ void md_wakeup_thread(struct md_thread *thread)
7468{ 7495{
7469 if (thread) { 7496 if (thread) {
7470 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); 7497 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
7471 if (!test_and_set_bit(THREAD_WAKEUP, &thread->flags)) 7498 set_bit(THREAD_WAKEUP, &thread->flags);
7472 wake_up(&thread->wqueue); 7499 wake_up(&thread->wqueue);
7473 } 7500 }
7474} 7501}
7475EXPORT_SYMBOL(md_wakeup_thread); 7502EXPORT_SYMBOL(md_wakeup_thread);
@@ -8039,7 +8066,8 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
8039 if (did_change) 8066 if (did_change)
8040 sysfs_notify_dirent_safe(mddev->sysfs_state); 8067 sysfs_notify_dirent_safe(mddev->sysfs_state);
8041 wait_event(mddev->sb_wait, 8068 wait_event(mddev->sb_wait,
8042 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && !mddev->suspended); 8069 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
8070 mddev->suspended);
8043 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 8071 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
8044 percpu_ref_put(&mddev->writes_pending); 8072 percpu_ref_put(&mddev->writes_pending);
8045 return false; 8073 return false;
@@ -8110,7 +8138,6 @@ void md_allow_write(struct mddev *mddev)
8110 sysfs_notify_dirent_safe(mddev->sysfs_state); 8138 sysfs_notify_dirent_safe(mddev->sysfs_state);
8111 /* wait for the dirty state to be recorded in the metadata */ 8139 /* wait for the dirty state to be recorded in the metadata */
8112 wait_event(mddev->sb_wait, 8140 wait_event(mddev->sb_wait,
8113 !test_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags) &&
8114 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 8141 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
8115 } else 8142 } else
8116 spin_unlock(&mddev->lock); 8143 spin_unlock(&mddev->lock);
@@ -8477,16 +8504,19 @@ void md_do_sync(struct md_thread *thread)
8477 } else { 8504 } else {
8478 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8505 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8479 mddev->curr_resync = MaxSector; 8506 mddev->curr_resync = MaxSector;
8480 rcu_read_lock(); 8507 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8481 rdev_for_each_rcu(rdev, mddev) 8508 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
8482 if (rdev->raid_disk >= 0 && 8509 rcu_read_lock();
8483 mddev->delta_disks >= 0 && 8510 rdev_for_each_rcu(rdev, mddev)
8484 !test_bit(Journal, &rdev->flags) && 8511 if (rdev->raid_disk >= 0 &&
8485 !test_bit(Faulty, &rdev->flags) && 8512 mddev->delta_disks >= 0 &&
8486 !test_bit(In_sync, &rdev->flags) && 8513 !test_bit(Journal, &rdev->flags) &&
8487 rdev->recovery_offset < mddev->curr_resync) 8514 !test_bit(Faulty, &rdev->flags) &&
8488 rdev->recovery_offset = mddev->curr_resync; 8515 !test_bit(In_sync, &rdev->flags) &&
8489 rcu_read_unlock(); 8516 rdev->recovery_offset < mddev->curr_resync)
8517 rdev->recovery_offset = mddev->curr_resync;
8518 rcu_read_unlock();
8519 }
8490 } 8520 }
8491 } 8521 }
8492 skip: 8522 skip:
@@ -8813,6 +8843,16 @@ void md_check_recovery(struct mddev *mddev)
8813 unlock: 8843 unlock:
8814 wake_up(&mddev->sb_wait); 8844 wake_up(&mddev->sb_wait);
8815 mddev_unlock(mddev); 8845 mddev_unlock(mddev);
8846 } else if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
8847 /* Write superblock - thread that called mddev_suspend()
8848 * holds reconfig_mutex for us.
8849 */
8850 set_bit(MD_UPDATING_SB, &mddev->flags);
8851 smp_mb__after_atomic();
8852 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
8853 md_update_sb(mddev, 0);
8854 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
8855 wake_up(&mddev->sb_wait);
8816 } 8856 }
8817} 8857}
8818EXPORT_SYMBOL(md_check_recovery); 8858EXPORT_SYMBOL(md_check_recovery);
@@ -9274,11 +9314,11 @@ static __exit void md_exit(void)
9274subsys_initcall(md_init); 9314subsys_initcall(md_init);
9275module_exit(md_exit) 9315module_exit(md_exit)
9276 9316
9277static int get_ro(char *buffer, struct kernel_param *kp) 9317static int get_ro(char *buffer, const struct kernel_param *kp)
9278{ 9318{
9279 return sprintf(buffer, "%d", start_readonly); 9319 return sprintf(buffer, "%d", start_readonly);
9280} 9320}
9281static int set_ro(const char *val, struct kernel_param *kp) 9321static int set_ro(const char *val, const struct kernel_param *kp)
9282{ 9322{
9283 return kstrtouint(val, 10, (unsigned int *)&start_readonly); 9323 return kstrtouint(val, 10, (unsigned int *)&start_readonly);
9284} 9324}