aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm-uevent.c22
-rw-r--r--drivers/md/raid5.c28
2 files changed, 25 insertions, 25 deletions
diff --git a/drivers/md/dm-uevent.c b/drivers/md/dm-uevent.c
index 50377e5dc2a3..6f65883aef12 100644
--- a/drivers/md/dm-uevent.c
+++ b/drivers/md/dm-uevent.c
@@ -78,7 +78,7 @@ static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
78 78
79 event = dm_uevent_alloc(md); 79 event = dm_uevent_alloc(md);
80 if (!event) { 80 if (!event) {
81 DMERR("%s: dm_uevent_alloc() failed", __FUNCTION__); 81 DMERR("%s: dm_uevent_alloc() failed", __func__);
82 goto err_nomem; 82 goto err_nomem;
83 } 83 }
84 84
@@ -86,32 +86,32 @@ static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
86 86
87 if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) { 87 if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) {
88 DMERR("%s: add_uevent_var() for DM_TARGET failed", 88 DMERR("%s: add_uevent_var() for DM_TARGET failed",
89 __FUNCTION__); 89 __func__);
90 goto err_add; 90 goto err_add;
91 } 91 }
92 92
93 if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) { 93 if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) {
94 DMERR("%s: add_uevent_var() for DM_ACTION failed", 94 DMERR("%s: add_uevent_var() for DM_ACTION failed",
95 __FUNCTION__); 95 __func__);
96 goto err_add; 96 goto err_add;
97 } 97 }
98 98
99 if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u", 99 if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u",
100 dm_next_uevent_seq(md))) { 100 dm_next_uevent_seq(md))) {
101 DMERR("%s: add_uevent_var() for DM_SEQNUM failed", 101 DMERR("%s: add_uevent_var() for DM_SEQNUM failed",
102 __FUNCTION__); 102 __func__);
103 goto err_add; 103 goto err_add;
104 } 104 }
105 105
106 if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) { 106 if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) {
107 DMERR("%s: add_uevent_var() for DM_PATH failed", __FUNCTION__); 107 DMERR("%s: add_uevent_var() for DM_PATH failed", __func__);
108 goto err_add; 108 goto err_add;
109 } 109 }
110 110
111 if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d", 111 if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d",
112 nr_valid_paths)) { 112 nr_valid_paths)) {
113 DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed", 113 DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed",
114 __FUNCTION__); 114 __func__);
115 goto err_add; 115 goto err_add;
116 } 116 }
117 117
@@ -146,25 +146,25 @@ void dm_send_uevents(struct list_head *events, struct kobject *kobj)
146 if (dm_copy_name_and_uuid(event->md, event->name, 146 if (dm_copy_name_and_uuid(event->md, event->name,
147 event->uuid)) { 147 event->uuid)) {
148 DMERR("%s: dm_copy_name_and_uuid() failed", 148 DMERR("%s: dm_copy_name_and_uuid() failed",
149 __FUNCTION__); 149 __func__);
150 goto uevent_free; 150 goto uevent_free;
151 } 151 }
152 152
153 if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) { 153 if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) {
154 DMERR("%s: add_uevent_var() for DM_NAME failed", 154 DMERR("%s: add_uevent_var() for DM_NAME failed",
155 __FUNCTION__); 155 __func__);
156 goto uevent_free; 156 goto uevent_free;
157 } 157 }
158 158
159 if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) { 159 if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) {
160 DMERR("%s: add_uevent_var() for DM_UUID failed", 160 DMERR("%s: add_uevent_var() for DM_UUID failed",
161 __FUNCTION__); 161 __func__);
162 goto uevent_free; 162 goto uevent_free;
163 } 163 }
164 164
165 r = kobject_uevent_env(kobj, event->action, event->ku_env.envp); 165 r = kobject_uevent_env(kobj, event->action, event->ku_env.envp);
166 if (r) 166 if (r)
167 DMERR("%s: kobject_uevent_env failed", __FUNCTION__); 167 DMERR("%s: kobject_uevent_env failed", __func__);
168uevent_free: 168uevent_free:
169 dm_uevent_free(event); 169 dm_uevent_free(event);
170 } 170 }
@@ -187,7 +187,7 @@ void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
187 struct dm_uevent *event; 187 struct dm_uevent *event;
188 188
189 if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) { 189 if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) {
190 DMERR("%s: Invalid event_type %d", __FUNCTION__, event_type); 190 DMERR("%s: Invalid event_type %d", __func__, event_type);
191 goto out; 191 goto out;
192 } 192 }
193 193
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b162b839a662..4efec467e2f1 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -433,7 +433,7 @@ static void ops_run_io(struct stripe_head *sh)
433 433
434 bi->bi_bdev = rdev->bdev; 434 bi->bi_bdev = rdev->bdev;
435 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 435 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
436 __FUNCTION__, (unsigned long long)sh->sector, 436 __func__, (unsigned long long)sh->sector,
437 bi->bi_rw, i); 437 bi->bi_rw, i);
438 atomic_inc(&sh->count); 438 atomic_inc(&sh->count);
439 bi->bi_sector = sh->sector + rdev->data_offset; 439 bi->bi_sector = sh->sector + rdev->data_offset;
@@ -520,7 +520,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
520 raid5_conf_t *conf = sh->raid_conf; 520 raid5_conf_t *conf = sh->raid_conf;
521 int i; 521 int i;
522 522
523 pr_debug("%s: stripe %llu\n", __FUNCTION__, 523 pr_debug("%s: stripe %llu\n", __func__,
524 (unsigned long long)sh->sector); 524 (unsigned long long)sh->sector);
525 525
526 /* clear completed biofills */ 526 /* clear completed biofills */
@@ -569,7 +569,7 @@ static void ops_run_biofill(struct stripe_head *sh)
569 raid5_conf_t *conf = sh->raid_conf; 569 raid5_conf_t *conf = sh->raid_conf;
570 int i; 570 int i;
571 571
572 pr_debug("%s: stripe %llu\n", __FUNCTION__, 572 pr_debug("%s: stripe %llu\n", __func__,
573 (unsigned long long)sh->sector); 573 (unsigned long long)sh->sector);
574 574
575 for (i = sh->disks; i--; ) { 575 for (i = sh->disks; i--; ) {
@@ -600,7 +600,7 @@ static void ops_complete_compute5(void *stripe_head_ref)
600 int target = sh->ops.target; 600 int target = sh->ops.target;
601 struct r5dev *tgt = &sh->dev[target]; 601 struct r5dev *tgt = &sh->dev[target];
602 602
603 pr_debug("%s: stripe %llu\n", __FUNCTION__, 603 pr_debug("%s: stripe %llu\n", __func__,
604 (unsigned long long)sh->sector); 604 (unsigned long long)sh->sector);
605 605
606 set_bit(R5_UPTODATE, &tgt->flags); 606 set_bit(R5_UPTODATE, &tgt->flags);
@@ -625,7 +625,7 @@ ops_run_compute5(struct stripe_head *sh, unsigned long pending)
625 int i; 625 int i;
626 626
627 pr_debug("%s: stripe %llu block: %d\n", 627 pr_debug("%s: stripe %llu block: %d\n",
628 __FUNCTION__, (unsigned long long)sh->sector, target); 628 __func__, (unsigned long long)sh->sector, target);
629 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 629 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
630 630
631 for (i = disks; i--; ) 631 for (i = disks; i--; )
@@ -653,7 +653,7 @@ static void ops_complete_prexor(void *stripe_head_ref)
653{ 653{
654 struct stripe_head *sh = stripe_head_ref; 654 struct stripe_head *sh = stripe_head_ref;
655 655
656 pr_debug("%s: stripe %llu\n", __FUNCTION__, 656 pr_debug("%s: stripe %llu\n", __func__,
657 (unsigned long long)sh->sector); 657 (unsigned long long)sh->sector);
658 658
659 set_bit(STRIPE_OP_PREXOR, &sh->ops.complete); 659 set_bit(STRIPE_OP_PREXOR, &sh->ops.complete);
@@ -670,7 +670,7 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
670 /* existing parity data subtracted */ 670 /* existing parity data subtracted */
671 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 671 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
672 672
673 pr_debug("%s: stripe %llu\n", __FUNCTION__, 673 pr_debug("%s: stripe %llu\n", __func__,
674 (unsigned long long)sh->sector); 674 (unsigned long long)sh->sector);
675 675
676 for (i = disks; i--; ) { 676 for (i = disks; i--; ) {
@@ -699,7 +699,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
699 */ 699 */
700 int prexor = test_bit(STRIPE_OP_PREXOR, &pending); 700 int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
701 701
702 pr_debug("%s: stripe %llu\n", __FUNCTION__, 702 pr_debug("%s: stripe %llu\n", __func__,
703 (unsigned long long)sh->sector); 703 (unsigned long long)sh->sector);
704 704
705 for (i = disks; i--; ) { 705 for (i = disks; i--; ) {
@@ -744,7 +744,7 @@ static void ops_complete_postxor(void *stripe_head_ref)
744{ 744{
745 struct stripe_head *sh = stripe_head_ref; 745 struct stripe_head *sh = stripe_head_ref;
746 746
747 pr_debug("%s: stripe %llu\n", __FUNCTION__, 747 pr_debug("%s: stripe %llu\n", __func__,
748 (unsigned long long)sh->sector); 748 (unsigned long long)sh->sector);
749 749
750 set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 750 set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
@@ -757,7 +757,7 @@ static void ops_complete_write(void *stripe_head_ref)
757 struct stripe_head *sh = stripe_head_ref; 757 struct stripe_head *sh = stripe_head_ref;
758 int disks = sh->disks, i, pd_idx = sh->pd_idx; 758 int disks = sh->disks, i, pd_idx = sh->pd_idx;
759 759
760 pr_debug("%s: stripe %llu\n", __FUNCTION__, 760 pr_debug("%s: stripe %llu\n", __func__,
761 (unsigned long long)sh->sector); 761 (unsigned long long)sh->sector);
762 762
763 for (i = disks; i--; ) { 763 for (i = disks; i--; ) {
@@ -787,7 +787,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
787 unsigned long flags; 787 unsigned long flags;
788 dma_async_tx_callback callback; 788 dma_async_tx_callback callback;
789 789
790 pr_debug("%s: stripe %llu\n", __FUNCTION__, 790 pr_debug("%s: stripe %llu\n", __func__,
791 (unsigned long long)sh->sector); 791 (unsigned long long)sh->sector);
792 792
793 /* check if prexor is active which means only process blocks 793 /* check if prexor is active which means only process blocks
@@ -837,7 +837,7 @@ static void ops_complete_check(void *stripe_head_ref)
837 struct stripe_head *sh = stripe_head_ref; 837 struct stripe_head *sh = stripe_head_ref;
838 int pd_idx = sh->pd_idx; 838 int pd_idx = sh->pd_idx;
839 839
840 pr_debug("%s: stripe %llu\n", __FUNCTION__, 840 pr_debug("%s: stripe %llu\n", __func__,
841 (unsigned long long)sh->sector); 841 (unsigned long long)sh->sector);
842 842
843 if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending) && 843 if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending) &&
@@ -859,7 +859,7 @@ static void ops_run_check(struct stripe_head *sh)
859 int count = 0, pd_idx = sh->pd_idx, i; 859 int count = 0, pd_idx = sh->pd_idx, i;
860 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 860 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
861 861
862 pr_debug("%s: stripe %llu\n", __FUNCTION__, 862 pr_debug("%s: stripe %llu\n", __func__,
863 (unsigned long long)sh->sector); 863 (unsigned long long)sh->sector);
864 864
865 for (i = disks; i--; ) { 865 for (i = disks; i--; ) {
@@ -1759,7 +1759,7 @@ handle_write_operations5(struct stripe_head *sh, int rcw, int expand)
1759 locked++; 1759 locked++;
1760 1760
1761 pr_debug("%s: stripe %llu locked: %d pending: %lx\n", 1761 pr_debug("%s: stripe %llu locked: %d pending: %lx\n",
1762 __FUNCTION__, (unsigned long long)sh->sector, 1762 __func__, (unsigned long long)sh->sector,
1763 locked, sh->ops.pending); 1763 locked, sh->ops.pending);
1764 1764
1765 return locked; 1765 return locked;