aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2014-02-20 01:50:32 -0500
committerTakashi Iwai <tiwai@suse.de>2014-02-20 01:50:32 -0500
commitf31f40be8f82d5eeb4ca084f9ac0f11ca265876b (patch)
tree6fce9ac78045249084d641945e094dcaea72d265 /drivers/md
parent13c12dbe3a2ce17227f7ddef652b6a53c78fa51f (diff)
parent895be5b31e5175bef575008aadb4f0a27b850daa (diff)
Merge tag 'asoc-v3.14-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Fixes for v3.14 A few fixes, all driver speccific ones. The DaVinci ones aren't as clear as they should be from the subject lines on the commits but they fix issues which will prevent correct operation in some use cases and only affect that particular driver so are reasonably safe.
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/bcache.h4
-rw-r--r--drivers/md/bcache/bset.c7
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/extents.c2
-rw-r--r--drivers/md/bcache/request.c6
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid5.c90
8 files changed, 69 insertions, 59 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 0c707e4f4eaf..a4c7306ff43d 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -210,7 +210,9 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
210#define GC_MARK_RECLAIMABLE 0 210#define GC_MARK_RECLAIMABLE 0
211#define GC_MARK_DIRTY 1 211#define GC_MARK_DIRTY 1
212#define GC_MARK_METADATA 2 212#define GC_MARK_METADATA 2
213BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13); 213#define GC_SECTORS_USED_SIZE 13
214#define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE))
215BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
214BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); 216BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
215 217
216#include "journal.h" 218#include "journal.h"
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 4f6b5940e609..3f74b4b0747b 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -23,7 +23,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
23 for (k = i->start; k < bset_bkey_last(i); k = next) { 23 for (k = i->start; k < bset_bkey_last(i); k = next) {
24 next = bkey_next(k); 24 next = bkey_next(k);
25 25
26 printk(KERN_ERR "block %u key %zi/%u: ", set, 26 printk(KERN_ERR "block %u key %li/%u: ", set,
27 (uint64_t *) k - i->d, i->keys); 27 (uint64_t *) k - i->d, i->keys);
28 28
29 if (b->ops->key_dump) 29 if (b->ops->key_dump)
@@ -1185,9 +1185,12 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
1185 struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO, 1185 struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
1186 order); 1186 order);
1187 if (!out) { 1187 if (!out) {
1188 struct page *outp;
1189
1188 BUG_ON(order > state->page_order); 1190 BUG_ON(order > state->page_order);
1189 1191
1190 out = page_address(mempool_alloc(state->pool, GFP_NOIO)); 1192 outp = mempool_alloc(state->pool, GFP_NOIO);
1193 out = page_address(outp);
1191 used_mempool = true; 1194 used_mempool = true;
1192 order = state->page_order; 1195 order = state->page_order;
1193 } 1196 }
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 98cc0a810a36..5f9c2a665ca5 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1167,7 +1167,7 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
1167 /* guard against overflow */ 1167 /* guard against overflow */
1168 SET_GC_SECTORS_USED(g, min_t(unsigned, 1168 SET_GC_SECTORS_USED(g, min_t(unsigned,
1169 GC_SECTORS_USED(g) + KEY_SIZE(k), 1169 GC_SECTORS_USED(g) + KEY_SIZE(k),
1170 (1 << 14) - 1)); 1170 MAX_GC_SECTORS_USED));
1171 1171
1172 BUG_ON(!GC_SECTORS_USED(g)); 1172 BUG_ON(!GC_SECTORS_USED(g));
1173 } 1173 }
@@ -1805,7 +1805,7 @@ static bool btree_insert_key(struct btree *b, struct bkey *k,
1805 1805
1806static size_t insert_u64s_remaining(struct btree *b) 1806static size_t insert_u64s_remaining(struct btree *b)
1807{ 1807{
1808 ssize_t ret = bch_btree_keys_u64s_remaining(&b->keys); 1808 long ret = bch_btree_keys_u64s_remaining(&b->keys);
1809 1809
1810 /* 1810 /*
1811 * Might land in the middle of an existing extent and have to split it 1811 * Might land in the middle of an existing extent and have to split it
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index c3ead586dc27..416d1a3e028e 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -194,7 +194,7 @@ err:
194 mutex_unlock(&b->c->bucket_lock); 194 mutex_unlock(&b->c->bucket_lock);
195 bch_extent_to_text(buf, sizeof(buf), k); 195 bch_extent_to_text(buf, sizeof(buf), k);
196 btree_bug(b, 196 btree_bug(b,
197"inconsistent btree pointer %s: bucket %li pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i", 197"inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
198 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), 198 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
199 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen); 199 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
200 return true; 200 return true;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 72cd213f213f..5d5d031cf381 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -353,14 +353,14 @@ static void bch_data_insert_start(struct closure *cl)
353 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 353 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
354 struct bio *bio = op->bio, *n; 354 struct bio *bio = op->bio, *n;
355 355
356 if (op->bypass)
357 return bch_data_invalidate(cl);
358
359 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { 356 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
360 set_gc_sectors(op->c); 357 set_gc_sectors(op->c);
361 wake_up_gc(op->c); 358 wake_up_gc(op->c);
362 } 359 }
363 360
361 if (op->bypass)
362 return bch_data_invalidate(cl);
363
364 /* 364 /*
365 * Journal writes are marked REQ_FLUSH; if the original write was a 365 * Journal writes are marked REQ_FLUSH; if the original write was a
366 * flush, it'll wait on the journal write. 366 * flush, it'll wait on the journal write.
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index c6ab69333a6d..d8458d477a12 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -416,7 +416,7 @@ static int btree_bset_stats(struct btree_op *b_op, struct btree *b)
416 return MAP_CONTINUE; 416 return MAP_CONTINUE;
417} 417}
418 418
419int bch_bset_print_stats(struct cache_set *c, char *buf) 419static int bch_bset_print_stats(struct cache_set *c, char *buf)
420{ 420{
421 struct bset_stats_op op; 421 struct bset_stats_op op;
422 int ret; 422 int ret;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index fd3a2a14b587..4a6ca1cb2e78 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1953,11 +1953,15 @@ static int process_checks(struct r1bio *r1_bio)
1953 for (i = 0; i < conf->raid_disks * 2; i++) { 1953 for (i = 0; i < conf->raid_disks * 2; i++) {
1954 int j; 1954 int j;
1955 int size; 1955 int size;
1956 int uptodate;
1956 struct bio *b = r1_bio->bios[i]; 1957 struct bio *b = r1_bio->bios[i];
1957 if (b->bi_end_io != end_sync_read) 1958 if (b->bi_end_io != end_sync_read)
1958 continue; 1959 continue;
1959 /* fixup the bio for reuse */ 1960 /* fixup the bio for reuse, but preserve BIO_UPTODATE */
1961 uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
1960 bio_reset(b); 1962 bio_reset(b);
1963 if (!uptodate)
1964 clear_bit(BIO_UPTODATE, &b->bi_flags);
1961 b->bi_vcnt = vcnt; 1965 b->bi_vcnt = vcnt;
1962 b->bi_iter.bi_size = r1_bio->sectors << 9; 1966 b->bi_iter.bi_size = r1_bio->sectors << 9;
1963 b->bi_iter.bi_sector = r1_bio->sector + 1967 b->bi_iter.bi_sector = r1_bio->sector +
@@ -1990,11 +1994,14 @@ static int process_checks(struct r1bio *r1_bio)
1990 int j; 1994 int j;
1991 struct bio *pbio = r1_bio->bios[primary]; 1995 struct bio *pbio = r1_bio->bios[primary];
1992 struct bio *sbio = r1_bio->bios[i]; 1996 struct bio *sbio = r1_bio->bios[i];
1997 int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
1993 1998
1994 if (sbio->bi_end_io != end_sync_read) 1999 if (sbio->bi_end_io != end_sync_read)
1995 continue; 2000 continue;
2001 /* Now we can 'fixup' the BIO_UPTODATE flag */
2002 set_bit(BIO_UPTODATE, &sbio->bi_flags);
1996 2003
1997 if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) { 2004 if (uptodate) {
1998 for (j = vcnt; j-- ; ) { 2005 for (j = vcnt; j-- ; ) {
1999 struct page *p, *s; 2006 struct page *p, *s;
2000 p = pbio->bi_io_vec[j].bv_page; 2007 p = pbio->bi_io_vec[j].bv_page;
@@ -2009,7 +2016,7 @@ static int process_checks(struct r1bio *r1_bio)
2009 if (j >= 0) 2016 if (j >= 0)
2010 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); 2017 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2011 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) 2018 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2012 && test_bit(BIO_UPTODATE, &sbio->bi_flags))) { 2019 && uptodate)) {
2013 /* No need to write to this device. */ 2020 /* No need to write to this device. */
2014 sbio->bi_end_io = NULL; 2021 sbio->bi_end_io = NULL;
2015 rdev_dec_pending(conf->mirrors[i].rdev, mddev); 2022 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f1feadeb7bb2..16f5c21963db 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5514,23 +5514,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
5514 return sectors * (raid_disks - conf->max_degraded); 5514 return sectors * (raid_disks - conf->max_degraded);
5515} 5515}
5516 5516
5517static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
5518{
5519 safe_put_page(percpu->spare_page);
5520 kfree(percpu->scribble);
5521 percpu->spare_page = NULL;
5522 percpu->scribble = NULL;
5523}
5524
5525static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
5526{
5527 if (conf->level == 6 && !percpu->spare_page)
5528 percpu->spare_page = alloc_page(GFP_KERNEL);
5529 if (!percpu->scribble)
5530 percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
5531
5532 if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
5533 free_scratch_buffer(conf, percpu);
5534 return -ENOMEM;
5535 }
5536
5537 return 0;
5538}
5539
5517static void raid5_free_percpu(struct r5conf *conf) 5540static void raid5_free_percpu(struct r5conf *conf)
5518{ 5541{
5519 struct raid5_percpu *percpu;
5520 unsigned long cpu; 5542 unsigned long cpu;
5521 5543
5522 if (!conf->percpu) 5544 if (!conf->percpu)
5523 return; 5545 return;
5524 5546
5525 get_online_cpus();
5526 for_each_possible_cpu(cpu) {
5527 percpu = per_cpu_ptr(conf->percpu, cpu);
5528 safe_put_page(percpu->spare_page);
5529 kfree(percpu->scribble);
5530 }
5531#ifdef CONFIG_HOTPLUG_CPU 5547#ifdef CONFIG_HOTPLUG_CPU
5532 unregister_cpu_notifier(&conf->cpu_notify); 5548 unregister_cpu_notifier(&conf->cpu_notify);
5533#endif 5549#endif
5550
5551 get_online_cpus();
5552 for_each_possible_cpu(cpu)
5553 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
5534 put_online_cpus(); 5554 put_online_cpus();
5535 5555
5536 free_percpu(conf->percpu); 5556 free_percpu(conf->percpu);
@@ -5557,15 +5577,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
5557 switch (action) { 5577 switch (action) {
5558 case CPU_UP_PREPARE: 5578 case CPU_UP_PREPARE:
5559 case CPU_UP_PREPARE_FROZEN: 5579 case CPU_UP_PREPARE_FROZEN:
5560 if (conf->level == 6 && !percpu->spare_page) 5580 if (alloc_scratch_buffer(conf, percpu)) {
5561 percpu->spare_page = alloc_page(GFP_KERNEL);
5562 if (!percpu->scribble)
5563 percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
5564
5565 if (!percpu->scribble ||
5566 (conf->level == 6 && !percpu->spare_page)) {
5567 safe_put_page(percpu->spare_page);
5568 kfree(percpu->scribble);
5569 pr_err("%s: failed memory allocation for cpu%ld\n", 5581 pr_err("%s: failed memory allocation for cpu%ld\n",
5570 __func__, cpu); 5582 __func__, cpu);
5571 return notifier_from_errno(-ENOMEM); 5583 return notifier_from_errno(-ENOMEM);
@@ -5573,10 +5585,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
5573 break; 5585 break;
5574 case CPU_DEAD: 5586 case CPU_DEAD:
5575 case CPU_DEAD_FROZEN: 5587 case CPU_DEAD_FROZEN:
5576 safe_put_page(percpu->spare_page); 5588 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
5577 kfree(percpu->scribble);
5578 percpu->spare_page = NULL;
5579 percpu->scribble = NULL;
5580 break; 5589 break;
5581 default: 5590 default:
5582 break; 5591 break;
@@ -5588,40 +5597,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
5588static int raid5_alloc_percpu(struct r5conf *conf) 5597static int raid5_alloc_percpu(struct r5conf *conf)
5589{ 5598{
5590 unsigned long cpu; 5599 unsigned long cpu;
5591 struct page *spare_page; 5600 int err = 0;
5592 struct raid5_percpu __percpu *allcpus;
5593 void *scribble;
5594 int err;
5595 5601
5596 allcpus = alloc_percpu(struct raid5_percpu); 5602 conf->percpu = alloc_percpu(struct raid5_percpu);
5597 if (!allcpus) 5603 if (!conf->percpu)
5598 return -ENOMEM; 5604 return -ENOMEM;
5599 conf->percpu = allcpus; 5605
5606#ifdef CONFIG_HOTPLUG_CPU
5607 conf->cpu_notify.notifier_call = raid456_cpu_notify;
5608 conf->cpu_notify.priority = 0;
5609 err = register_cpu_notifier(&conf->cpu_notify);
5610 if (err)
5611 return err;
5612#endif
5600 5613
5601 get_online_cpus(); 5614 get_online_cpus();
5602 err = 0;
5603 for_each_present_cpu(cpu) { 5615 for_each_present_cpu(cpu) {
5604 if (conf->level == 6) { 5616 err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
5605 spare_page = alloc_page(GFP_KERNEL); 5617 if (err) {
5606 if (!spare_page) { 5618 pr_err("%s: failed memory allocation for cpu%ld\n",
5607 err = -ENOMEM; 5619 __func__, cpu);
5608 break;
5609 }
5610 per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
5611 }
5612 scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
5613 if (!scribble) {
5614 err = -ENOMEM;
5615 break; 5620 break;
5616 } 5621 }
5617 per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
5618 } 5622 }
5619#ifdef CONFIG_HOTPLUG_CPU
5620 conf->cpu_notify.notifier_call = raid456_cpu_notify;
5621 conf->cpu_notify.priority = 0;
5622 if (err == 0)
5623 err = register_cpu_notifier(&conf->cpu_notify);
5624#endif
5625 put_online_cpus(); 5623 put_online_cpus();
5626 5624
5627 return err; 5625 return err;