diff options
39 files changed, 420 insertions, 145 deletions
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 211ca3f7fd16..4ea15ca89b2b 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h | |||
@@ -88,6 +88,7 @@ extern int acpi_disabled; | |||
88 | extern int acpi_pci_disabled; | 88 | extern int acpi_pci_disabled; |
89 | extern int acpi_skip_timer_override; | 89 | extern int acpi_skip_timer_override; |
90 | extern int acpi_use_timer_override; | 90 | extern int acpi_use_timer_override; |
91 | extern int acpi_fix_pin2_polarity; | ||
91 | 92 | ||
92 | extern u8 acpi_sci_flags; | 93 | extern u8 acpi_sci_flags; |
93 | extern int acpi_sci_override_gsi; | 94 | extern int acpi_sci_override_gsi; |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index b3a71137983a..3e6e2d68f761 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -72,6 +72,7 @@ u8 acpi_sci_flags __initdata; | |||
72 | int acpi_sci_override_gsi __initdata; | 72 | int acpi_sci_override_gsi __initdata; |
73 | int acpi_skip_timer_override __initdata; | 73 | int acpi_skip_timer_override __initdata; |
74 | int acpi_use_timer_override __initdata; | 74 | int acpi_use_timer_override __initdata; |
75 | int acpi_fix_pin2_polarity __initdata; | ||
75 | 76 | ||
76 | #ifdef CONFIG_X86_LOCAL_APIC | 77 | #ifdef CONFIG_X86_LOCAL_APIC |
77 | static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; | 78 | static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; |
@@ -415,10 +416,15 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header, | |||
415 | return 0; | 416 | return 0; |
416 | } | 417 | } |
417 | 418 | ||
418 | if (acpi_skip_timer_override && | 419 | if (intsrc->source_irq == 0 && intsrc->global_irq == 2) { |
419 | intsrc->source_irq == 0 && intsrc->global_irq == 2) { | 420 | if (acpi_skip_timer_override) { |
420 | printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); | 421 | printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); |
421 | return 0; | 422 | return 0; |
423 | } | ||
424 | if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) { | ||
425 | intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK; | ||
426 | printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n"); | ||
427 | } | ||
422 | } | 428 | } |
423 | 429 | ||
424 | mp_override_legacy_irq(intsrc->source_irq, | 430 | mp_override_legacy_irq(intsrc->source_irq, |
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 51ef31a89be9..51d4e1663066 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -284,7 +284,7 @@ static int __init apbt_clockevent_register(void) | |||
284 | memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device)); | 284 | memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device)); |
285 | 285 | ||
286 | if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) { | 286 | if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) { |
287 | apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100; | 287 | adev->evt.rating = APBT_CLOCKEVENT_RATING - 100; |
288 | global_clock_event = &adev->evt; | 288 | global_clock_event = &adev->evt; |
289 | printk(KERN_DEBUG "%s clockevent registered as global\n", | 289 | printk(KERN_DEBUG "%s clockevent registered as global\n", |
290 | global_clock_event->name); | 290 | global_clock_event->name); |
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 76b8cd953dee..9efbdcc56425 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -143,15 +143,10 @@ static void __init ati_bugs(int num, int slot, int func) | |||
143 | 143 | ||
144 | static u32 __init ati_sbx00_rev(int num, int slot, int func) | 144 | static u32 __init ati_sbx00_rev(int num, int slot, int func) |
145 | { | 145 | { |
146 | u32 old, d; | 146 | u32 d; |
147 | 147 | ||
148 | d = read_pci_config(num, slot, func, 0x70); | ||
149 | old = d; | ||
150 | d &= ~(1<<8); | ||
151 | write_pci_config(num, slot, func, 0x70, d); | ||
152 | d = read_pci_config(num, slot, func, 0x8); | 148 | d = read_pci_config(num, slot, func, 0x8); |
153 | d &= 0xff; | 149 | d &= 0xff; |
154 | write_pci_config(num, slot, func, 0x70, old); | ||
155 | 150 | ||
156 | return d; | 151 | return d; |
157 | } | 152 | } |
@@ -160,13 +155,16 @@ static void __init ati_bugs_contd(int num, int slot, int func) | |||
160 | { | 155 | { |
161 | u32 d, rev; | 156 | u32 d, rev; |
162 | 157 | ||
163 | if (acpi_use_timer_override) | ||
164 | return; | ||
165 | |||
166 | rev = ati_sbx00_rev(num, slot, func); | 158 | rev = ati_sbx00_rev(num, slot, func); |
159 | if (rev >= 0x40) | ||
160 | acpi_fix_pin2_polarity = 1; | ||
161 | |||
167 | if (rev > 0x13) | 162 | if (rev > 0x13) |
168 | return; | 163 | return; |
169 | 164 | ||
165 | if (acpi_use_timer_override) | ||
166 | return; | ||
167 | |||
170 | /* check for IRQ0 interrupt swap */ | 168 | /* check for IRQ0 interrupt swap */ |
171 | d = read_pci_config(num, slot, func, 0x64); | 169 | d = read_pci_config(num, slot, func, 0x64); |
172 | if (!(d & (1<<14))) | 170 | if (!(d & (1<<14))) |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index fc7aae1e2bc7..715037caeb43 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -285,6 +285,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
285 | DMI_MATCH(DMI_BOARD_NAME, "P4S800"), | 285 | DMI_MATCH(DMI_BOARD_NAME, "P4S800"), |
286 | }, | 286 | }, |
287 | }, | 287 | }, |
288 | { /* Handle problems with rebooting on VersaLogic Menlow boards */ | ||
289 | .callback = set_bios_reboot, | ||
290 | .ident = "VersaLogic Menlow based board", | ||
291 | .matches = { | ||
292 | DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"), | ||
293 | DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"), | ||
294 | }, | ||
295 | }, | ||
288 | { } | 296 | { } |
289 | }; | 297 | }; |
290 | 298 | ||
diff --git a/block/genhd.c b/block/genhd.c index 6a5b772aa201..cbf1112a885c 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno) | |||
1355 | struct block_device *bdev = bdget_disk(disk, partno); | 1355 | struct block_device *bdev = bdget_disk(disk, partno); |
1356 | if (bdev) { | 1356 | if (bdev) { |
1357 | fsync_bdev(bdev); | 1357 | fsync_bdev(bdev); |
1358 | res = __invalidate_device(bdev); | 1358 | res = __invalidate_device(bdev, true); |
1359 | bdput(bdev); | 1359 | bdput(bdev); |
1360 | } | 1360 | } |
1361 | return res; | 1361 | return res; |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index b9ba04fc2b34..77fc76f8aea9 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -3281,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g, | |||
3281 | struct block_device *bdev = opened_bdev[cnt]; | 3281 | struct block_device *bdev = opened_bdev[cnt]; |
3282 | if (!bdev || ITYPE(drive_state[cnt].fd_device) != type) | 3282 | if (!bdev || ITYPE(drive_state[cnt].fd_device) != type) |
3283 | continue; | 3283 | continue; |
3284 | __invalidate_device(bdev); | 3284 | __invalidate_device(bdev, true); |
3285 | } | 3285 | } |
3286 | mutex_unlock(&open_lock); | 3286 | mutex_unlock(&open_lock); |
3287 | } else { | 3287 | } else { |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 8a2f767f26d8..0ed7f6bc2a7f 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev) | |||
216 | 216 | ||
217 | if (md_check_no_bitmap(mddev)) | 217 | if (md_check_no_bitmap(mddev)) |
218 | return -EINVAL; | 218 | return -EINVAL; |
219 | mddev->queue->queue_lock = &mddev->queue->__queue_lock; | ||
220 | conf = linear_conf(mddev, mddev->raid_disks); | 219 | conf = linear_conf(mddev, mddev->raid_disks); |
221 | 220 | ||
222 | if (!conf) | 221 | if (!conf) |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 0cc30ecda4c1..818313e277e7 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -553,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit) | |||
553 | { | 553 | { |
554 | mddev_t *mddev, *new = NULL; | 554 | mddev_t *mddev, *new = NULL; |
555 | 555 | ||
556 | if (unit && MAJOR(unit) != MD_MAJOR) | ||
557 | unit &= ~((1<<MdpMinorShift)-1); | ||
558 | |||
556 | retry: | 559 | retry: |
557 | spin_lock(&all_mddevs_lock); | 560 | spin_lock(&all_mddevs_lock); |
558 | 561 | ||
@@ -4138,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len) | |||
4138 | } | 4141 | } |
4139 | 4142 | ||
4140 | mddev->array_sectors = sectors; | 4143 | mddev->array_sectors = sectors; |
4141 | set_capacity(mddev->gendisk, mddev->array_sectors); | 4144 | if (mddev->pers) { |
4142 | if (mddev->pers) | 4145 | set_capacity(mddev->gendisk, mddev->array_sectors); |
4143 | revalidate_disk(mddev->gendisk); | 4146 | revalidate_disk(mddev->gendisk); |
4144 | 4147 | } | |
4145 | return len; | 4148 | return len; |
4146 | } | 4149 | } |
4147 | 4150 | ||
@@ -4624,6 +4627,7 @@ static int do_md_run(mddev_t *mddev) | |||
4624 | } | 4627 | } |
4625 | set_capacity(mddev->gendisk, mddev->array_sectors); | 4628 | set_capacity(mddev->gendisk, mddev->array_sectors); |
4626 | revalidate_disk(mddev->gendisk); | 4629 | revalidate_disk(mddev->gendisk); |
4630 | mddev->changed = 1; | ||
4627 | kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); | 4631 | kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); |
4628 | out: | 4632 | out: |
4629 | return err; | 4633 | return err; |
@@ -4712,6 +4716,7 @@ static void md_clean(mddev_t *mddev) | |||
4712 | mddev->sync_speed_min = mddev->sync_speed_max = 0; | 4716 | mddev->sync_speed_min = mddev->sync_speed_max = 0; |
4713 | mddev->recovery = 0; | 4717 | mddev->recovery = 0; |
4714 | mddev->in_sync = 0; | 4718 | mddev->in_sync = 0; |
4719 | mddev->changed = 0; | ||
4715 | mddev->degraded = 0; | 4720 | mddev->degraded = 0; |
4716 | mddev->safemode = 0; | 4721 | mddev->safemode = 0; |
4717 | mddev->bitmap_info.offset = 0; | 4722 | mddev->bitmap_info.offset = 0; |
@@ -4827,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
4827 | 4832 | ||
4828 | set_capacity(disk, 0); | 4833 | set_capacity(disk, 0); |
4829 | mutex_unlock(&mddev->open_mutex); | 4834 | mutex_unlock(&mddev->open_mutex); |
4835 | mddev->changed = 1; | ||
4830 | revalidate_disk(disk); | 4836 | revalidate_disk(disk); |
4831 | 4837 | ||
4832 | if (mddev->ro) | 4838 | if (mddev->ro) |
@@ -6011,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode) | |||
6011 | atomic_inc(&mddev->openers); | 6017 | atomic_inc(&mddev->openers); |
6012 | mutex_unlock(&mddev->open_mutex); | 6018 | mutex_unlock(&mddev->open_mutex); |
6013 | 6019 | ||
6014 | check_disk_size_change(mddev->gendisk, bdev); | 6020 | check_disk_change(bdev); |
6015 | out: | 6021 | out: |
6016 | return err; | 6022 | return err; |
6017 | } | 6023 | } |
@@ -6026,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode) | |||
6026 | 6032 | ||
6027 | return 0; | 6033 | return 0; |
6028 | } | 6034 | } |
6035 | |||
6036 | static int md_media_changed(struct gendisk *disk) | ||
6037 | { | ||
6038 | mddev_t *mddev = disk->private_data; | ||
6039 | |||
6040 | return mddev->changed; | ||
6041 | } | ||
6042 | |||
6043 | static int md_revalidate(struct gendisk *disk) | ||
6044 | { | ||
6045 | mddev_t *mddev = disk->private_data; | ||
6046 | |||
6047 | mddev->changed = 0; | ||
6048 | return 0; | ||
6049 | } | ||
6029 | static const struct block_device_operations md_fops = | 6050 | static const struct block_device_operations md_fops = |
6030 | { | 6051 | { |
6031 | .owner = THIS_MODULE, | 6052 | .owner = THIS_MODULE, |
@@ -6036,6 +6057,8 @@ static const struct block_device_operations md_fops = | |||
6036 | .compat_ioctl = md_compat_ioctl, | 6057 | .compat_ioctl = md_compat_ioctl, |
6037 | #endif | 6058 | #endif |
6038 | .getgeo = md_getgeo, | 6059 | .getgeo = md_getgeo, |
6060 | .media_changed = md_media_changed, | ||
6061 | .revalidate_disk= md_revalidate, | ||
6039 | }; | 6062 | }; |
6040 | 6063 | ||
6041 | static int md_thread(void * arg) | 6064 | static int md_thread(void * arg) |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 7e90b8593b2a..12215d437fcc 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -274,6 +274,8 @@ struct mddev_s | |||
274 | atomic_t active; /* general refcount */ | 274 | atomic_t active; /* general refcount */ |
275 | atomic_t openers; /* number of active opens */ | 275 | atomic_t openers; /* number of active opens */ |
276 | 276 | ||
277 | int changed; /* True if we might need to | ||
278 | * reread partition info */ | ||
277 | int degraded; /* whether md should consider | 279 | int degraded; /* whether md should consider |
278 | * adding a spare | 280 | * adding a spare |
279 | */ | 281 | */ |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 6d7ddf32ef2e..3a62d440e27b 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev) | |||
435 | * bookkeeping area. [whatever we allocate in multipath_run(), | 435 | * bookkeeping area. [whatever we allocate in multipath_run(), |
436 | * should be freed in multipath_stop()] | 436 | * should be freed in multipath_stop()] |
437 | */ | 437 | */ |
438 | mddev->queue->queue_lock = &mddev->queue->__queue_lock; | ||
439 | 438 | ||
440 | conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); | 439 | conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); |
441 | mddev->private = conf; | 440 | mddev->private = conf; |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 637a96855edb..c0ac457f1218 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -361,7 +361,6 @@ static int raid0_run(mddev_t *mddev) | |||
361 | if (md_check_no_bitmap(mddev)) | 361 | if (md_check_no_bitmap(mddev)) |
362 | return -EINVAL; | 362 | return -EINVAL; |
363 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); | 363 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); |
364 | mddev->queue->queue_lock = &mddev->queue->__queue_lock; | ||
365 | 364 | ||
366 | /* if private is not null, we are here after takeover */ | 365 | /* if private is not null, we are here after takeover */ |
367 | if (mddev->private == NULL) { | 366 | if (mddev->private == NULL) { |
@@ -670,6 +669,7 @@ static void *raid0_takeover_raid1(mddev_t *mddev) | |||
670 | mddev->new_layout = 0; | 669 | mddev->new_layout = 0; |
671 | mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */ | 670 | mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */ |
672 | mddev->delta_disks = 1 - mddev->raid_disks; | 671 | mddev->delta_disks = 1 - mddev->raid_disks; |
672 | mddev->raid_disks = 1; | ||
673 | /* make sure it will be not marked as dirty */ | 673 | /* make sure it will be not marked as dirty */ |
674 | mddev->recovery_cp = MaxSector; | 674 | mddev->recovery_cp = MaxSector; |
675 | 675 | ||
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a23ffa397ba9..06cd712807d0 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf) | |||
593 | if (conf->pending_bio_list.head) { | 593 | if (conf->pending_bio_list.head) { |
594 | struct bio *bio; | 594 | struct bio *bio; |
595 | bio = bio_list_get(&conf->pending_bio_list); | 595 | bio = bio_list_get(&conf->pending_bio_list); |
596 | /* Only take the spinlock to quiet a warning */ | ||
597 | spin_lock(conf->mddev->queue->queue_lock); | ||
596 | blk_remove_plug(conf->mddev->queue); | 598 | blk_remove_plug(conf->mddev->queue); |
599 | spin_unlock(conf->mddev->queue->queue_lock); | ||
597 | spin_unlock_irq(&conf->device_lock); | 600 | spin_unlock_irq(&conf->device_lock); |
598 | /* flush any pending bitmap writes to | 601 | /* flush any pending bitmap writes to |
599 | * disk before proceeding w/ I/O */ | 602 | * disk before proceeding w/ I/O */ |
@@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
959 | atomic_inc(&r1_bio->remaining); | 962 | atomic_inc(&r1_bio->remaining); |
960 | spin_lock_irqsave(&conf->device_lock, flags); | 963 | spin_lock_irqsave(&conf->device_lock, flags); |
961 | bio_list_add(&conf->pending_bio_list, mbio); | 964 | bio_list_add(&conf->pending_bio_list, mbio); |
962 | blk_plug_device(mddev->queue); | 965 | blk_plug_device_unlocked(mddev->queue); |
963 | spin_unlock_irqrestore(&conf->device_lock, flags); | 966 | spin_unlock_irqrestore(&conf->device_lock, flags); |
964 | } | 967 | } |
965 | r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); | 968 | r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); |
@@ -2021,7 +2024,6 @@ static int run(mddev_t *mddev) | |||
2021 | if (IS_ERR(conf)) | 2024 | if (IS_ERR(conf)) |
2022 | return PTR_ERR(conf); | 2025 | return PTR_ERR(conf); |
2023 | 2026 | ||
2024 | mddev->queue->queue_lock = &conf->device_lock; | ||
2025 | list_for_each_entry(rdev, &mddev->disks, same_set) { | 2027 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
2026 | disk_stack_limits(mddev->gendisk, rdev->bdev, | 2028 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
2027 | rdev->data_offset << 9); | 2029 | rdev->data_offset << 9); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 3b607b28741b..747d061d8e05 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf) | |||
662 | if (conf->pending_bio_list.head) { | 662 | if (conf->pending_bio_list.head) { |
663 | struct bio *bio; | 663 | struct bio *bio; |
664 | bio = bio_list_get(&conf->pending_bio_list); | 664 | bio = bio_list_get(&conf->pending_bio_list); |
665 | /* Spinlock only taken to quiet a warning */ | ||
666 | spin_lock(conf->mddev->queue->queue_lock); | ||
665 | blk_remove_plug(conf->mddev->queue); | 667 | blk_remove_plug(conf->mddev->queue); |
668 | spin_unlock(conf->mddev->queue->queue_lock); | ||
666 | spin_unlock_irq(&conf->device_lock); | 669 | spin_unlock_irq(&conf->device_lock); |
667 | /* flush any pending bitmap writes to disk | 670 | /* flush any pending bitmap writes to disk |
668 | * before proceeding w/ I/O */ | 671 | * before proceeding w/ I/O */ |
@@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
971 | atomic_inc(&r10_bio->remaining); | 974 | atomic_inc(&r10_bio->remaining); |
972 | spin_lock_irqsave(&conf->device_lock, flags); | 975 | spin_lock_irqsave(&conf->device_lock, flags); |
973 | bio_list_add(&conf->pending_bio_list, mbio); | 976 | bio_list_add(&conf->pending_bio_list, mbio); |
974 | blk_plug_device(mddev->queue); | 977 | blk_plug_device_unlocked(mddev->queue); |
975 | spin_unlock_irqrestore(&conf->device_lock, flags); | 978 | spin_unlock_irqrestore(&conf->device_lock, flags); |
976 | } | 979 | } |
977 | 980 | ||
@@ -2304,8 +2307,6 @@ static int run(mddev_t *mddev) | |||
2304 | if (!conf) | 2307 | if (!conf) |
2305 | goto out; | 2308 | goto out; |
2306 | 2309 | ||
2307 | mddev->queue->queue_lock = &conf->device_lock; | ||
2308 | |||
2309 | mddev->thread = conf->thread; | 2310 | mddev->thread = conf->thread; |
2310 | conf->thread = NULL; | 2311 | conf->thread = NULL; |
2311 | 2312 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 702812824195..78536fdbd87f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -5204,7 +5204,6 @@ static int run(mddev_t *mddev) | |||
5204 | 5204 | ||
5205 | mddev->queue->backing_dev_info.congested_data = mddev; | 5205 | mddev->queue->backing_dev_info.congested_data = mddev; |
5206 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; | 5206 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; |
5207 | mddev->queue->queue_lock = &conf->device_lock; | ||
5208 | mddev->queue->unplug_fn = raid5_unplug_queue; | 5207 | mddev->queue->unplug_fn = raid5_unplug_queue; |
5209 | 5208 | ||
5210 | chunk_size = mddev->chunk_sectors << 9; | 5209 | chunk_size = mddev->chunk_sectors << 9; |
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c index c36749e4c926..5469c52cba3d 100644 --- a/drivers/rtc/rtc-at91sam9.c +++ b/drivers/rtc/rtc-at91sam9.c | |||
@@ -309,7 +309,7 @@ static const struct rtc_class_ops at91_rtc_ops = { | |||
309 | .read_alarm = at91_rtc_readalarm, | 309 | .read_alarm = at91_rtc_readalarm, |
310 | .set_alarm = at91_rtc_setalarm, | 310 | .set_alarm = at91_rtc_setalarm, |
311 | .proc = at91_rtc_proc, | 311 | .proc = at91_rtc_proc, |
312 | .alarm_irq_enabled = at91_rtc_alarm_irq_enable, | 312 | .alarm_irq_enable = at91_rtc_alarm_irq_enable, |
313 | }; | 313 | }; |
314 | 314 | ||
315 | /* | 315 | /* |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index d041c6826e43..0f299b7aad60 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -2681,17 +2681,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, | |||
2681 | 2681 | ||
2682 | mutex_lock(&usb_address0_mutex); | 2682 | mutex_lock(&usb_address0_mutex); |
2683 | 2683 | ||
2684 | if (!udev->config && oldspeed == USB_SPEED_SUPER) { | 2684 | /* Reset the device; full speed may morph to high speed */ |
2685 | /* Don't reset USB 3.0 devices during an initial setup */ | 2685 | /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ |
2686 | usb_set_device_state(udev, USB_STATE_DEFAULT); | 2686 | retval = hub_port_reset(hub, port1, udev, delay); |
2687 | } else { | 2687 | if (retval < 0) /* error or disconnect */ |
2688 | /* Reset the device; full speed may morph to high speed */ | 2688 | goto fail; |
2689 | /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ | 2689 | /* success, speed is known */ |
2690 | retval = hub_port_reset(hub, port1, udev, delay); | 2690 | |
2691 | if (retval < 0) /* error or disconnect */ | ||
2692 | goto fail; | ||
2693 | /* success, speed is known */ | ||
2694 | } | ||
2695 | retval = -ENODEV; | 2691 | retval = -ENODEV; |
2696 | 2692 | ||
2697 | if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) { | 2693 | if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) { |
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index fcbf4abbf381..0231814a97a5 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c | |||
@@ -169,9 +169,10 @@ static void xhci_print_ports(struct xhci_hcd *xhci) | |||
169 | } | 169 | } |
170 | } | 170 | } |
171 | 171 | ||
172 | void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num) | 172 | void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num) |
173 | { | 173 | { |
174 | void *addr; | 174 | struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num]; |
175 | void __iomem *addr; | ||
175 | u32 temp; | 176 | u32 temp; |
176 | u64 temp_64; | 177 | u64 temp_64; |
177 | 178 | ||
@@ -449,7 +450,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci, | |||
449 | } | 450 | } |
450 | } | 451 | } |
451 | 452 | ||
452 | void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) | 453 | static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) |
453 | { | 454 | { |
454 | /* Fields are 32 bits wide, DMA addresses are in bytes */ | 455 | /* Fields are 32 bits wide, DMA addresses are in bytes */ |
455 | int field_size = 32 / 8; | 456 | int field_size = 32 / 8; |
@@ -488,7 +489,7 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) | |||
488 | dbg_rsvd64(xhci, (u64 *)slot_ctx, dma); | 489 | dbg_rsvd64(xhci, (u64 *)slot_ctx, dma); |
489 | } | 490 | } |
490 | 491 | ||
491 | void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, | 492 | static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, |
492 | struct xhci_container_ctx *ctx, | 493 | struct xhci_container_ctx *ctx, |
493 | unsigned int last_ep) | 494 | unsigned int last_ep) |
494 | { | 495 | { |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 1d0f45f0e7a6..a9534396e85b 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -307,7 +307,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, | |||
307 | 307 | ||
308 | /***************** Streams structures manipulation *************************/ | 308 | /***************** Streams structures manipulation *************************/ |
309 | 309 | ||
310 | void xhci_free_stream_ctx(struct xhci_hcd *xhci, | 310 | static void xhci_free_stream_ctx(struct xhci_hcd *xhci, |
311 | unsigned int num_stream_ctxs, | 311 | unsigned int num_stream_ctxs, |
312 | struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) | 312 | struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) |
313 | { | 313 | { |
@@ -335,7 +335,7 @@ void xhci_free_stream_ctx(struct xhci_hcd *xhci, | |||
335 | * The stream context array must be a power of 2, and can be as small as | 335 | * The stream context array must be a power of 2, and can be as small as |
336 | * 64 bytes or as large as 1MB. | 336 | * 64 bytes or as large as 1MB. |
337 | */ | 337 | */ |
338 | struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, | 338 | static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, |
339 | unsigned int num_stream_ctxs, dma_addr_t *dma, | 339 | unsigned int num_stream_ctxs, dma_addr_t *dma, |
340 | gfp_t mem_flags) | 340 | gfp_t mem_flags) |
341 | { | 341 | { |
@@ -1900,11 +1900,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
1900 | val &= DBOFF_MASK; | 1900 | val &= DBOFF_MASK; |
1901 | xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" | 1901 | xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" |
1902 | " from cap regs base addr\n", val); | 1902 | " from cap regs base addr\n", val); |
1903 | xhci->dba = (void *) xhci->cap_regs + val; | 1903 | xhci->dba = (void __iomem *) xhci->cap_regs + val; |
1904 | xhci_dbg_regs(xhci); | 1904 | xhci_dbg_regs(xhci); |
1905 | xhci_print_run_regs(xhci); | 1905 | xhci_print_run_regs(xhci); |
1906 | /* Set ir_set to interrupt register set 0 */ | 1906 | /* Set ir_set to interrupt register set 0 */ |
1907 | xhci->ir_set = (void *) xhci->run_regs->ir_set; | 1907 | xhci->ir_set = &xhci->run_regs->ir_set[0]; |
1908 | 1908 | ||
1909 | /* | 1909 | /* |
1910 | * Event ring setup: Allocate a normal ring, but also setup | 1910 | * Event ring setup: Allocate a normal ring, but also setup |
@@ -1961,7 +1961,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
1961 | /* Set the event ring dequeue address */ | 1961 | /* Set the event ring dequeue address */ |
1962 | xhci_set_hc_event_deq(xhci); | 1962 | xhci_set_hc_event_deq(xhci); |
1963 | xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); | 1963 | xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); |
1964 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | 1964 | xhci_print_ir_set(xhci, 0); |
1965 | 1965 | ||
1966 | /* | 1966 | /* |
1967 | * XXX: Might need to set the Interrupter Moderation Register to | 1967 | * XXX: Might need to set the Interrupter Moderation Register to |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 3e8211c1ce5a..3289bf4832c9 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -474,8 +474,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
474 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, | 474 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, |
475 | dev->eps[ep_index].stopped_trb, | 475 | dev->eps[ep_index].stopped_trb, |
476 | &state->new_cycle_state); | 476 | &state->new_cycle_state); |
477 | if (!state->new_deq_seg) | 477 | if (!state->new_deq_seg) { |
478 | BUG(); | 478 | WARN_ON(1); |
479 | return; | ||
480 | } | ||
481 | |||
479 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ | 482 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ |
480 | xhci_dbg(xhci, "Finding endpoint context\n"); | 483 | xhci_dbg(xhci, "Finding endpoint context\n"); |
481 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | 484 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); |
@@ -486,8 +489,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
486 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, | 489 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, |
487 | state->new_deq_ptr, | 490 | state->new_deq_ptr, |
488 | &state->new_cycle_state); | 491 | &state->new_cycle_state); |
489 | if (!state->new_deq_seg) | 492 | if (!state->new_deq_seg) { |
490 | BUG(); | 493 | WARN_ON(1); |
494 | return; | ||
495 | } | ||
491 | 496 | ||
492 | trb = &state->new_deq_ptr->generic; | 497 | trb = &state->new_deq_ptr->generic; |
493 | if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && | 498 | if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && |
@@ -2363,12 +2368,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) | |||
2363 | 2368 | ||
2364 | /* Scatter gather list entries may cross 64KB boundaries */ | 2369 | /* Scatter gather list entries may cross 64KB boundaries */ |
2365 | running_total = TRB_MAX_BUFF_SIZE - | 2370 | running_total = TRB_MAX_BUFF_SIZE - |
2366 | (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | 2371 | (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1)); |
2372 | running_total &= TRB_MAX_BUFF_SIZE - 1; | ||
2367 | if (running_total != 0) | 2373 | if (running_total != 0) |
2368 | num_trbs++; | 2374 | num_trbs++; |
2369 | 2375 | ||
2370 | /* How many more 64KB chunks to transfer, how many more TRBs? */ | 2376 | /* How many more 64KB chunks to transfer, how many more TRBs? */ |
2371 | while (running_total < sg_dma_len(sg)) { | 2377 | while (running_total < sg_dma_len(sg) && running_total < temp) { |
2372 | num_trbs++; | 2378 | num_trbs++; |
2373 | running_total += TRB_MAX_BUFF_SIZE; | 2379 | running_total += TRB_MAX_BUFF_SIZE; |
2374 | } | 2380 | } |
@@ -2394,11 +2400,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) | |||
2394 | static void check_trb_math(struct urb *urb, int num_trbs, int running_total) | 2400 | static void check_trb_math(struct urb *urb, int num_trbs, int running_total) |
2395 | { | 2401 | { |
2396 | if (num_trbs != 0) | 2402 | if (num_trbs != 0) |
2397 | dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " | 2403 | dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " |
2398 | "TRBs, %d left\n", __func__, | 2404 | "TRBs, %d left\n", __func__, |
2399 | urb->ep->desc.bEndpointAddress, num_trbs); | 2405 | urb->ep->desc.bEndpointAddress, num_trbs); |
2400 | if (running_total != urb->transfer_buffer_length) | 2406 | if (running_total != urb->transfer_buffer_length) |
2401 | dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " | 2407 | dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " |
2402 | "queued %#x (%d), asked for %#x (%d)\n", | 2408 | "queued %#x (%d), asked for %#x (%d)\n", |
2403 | __func__, | 2409 | __func__, |
2404 | urb->ep->desc.bEndpointAddress, | 2410 | urb->ep->desc.bEndpointAddress, |
@@ -2533,8 +2539,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2533 | sg = urb->sg; | 2539 | sg = urb->sg; |
2534 | addr = (u64) sg_dma_address(sg); | 2540 | addr = (u64) sg_dma_address(sg); |
2535 | this_sg_len = sg_dma_len(sg); | 2541 | this_sg_len = sg_dma_len(sg); |
2536 | trb_buff_len = TRB_MAX_BUFF_SIZE - | 2542 | trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); |
2537 | (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | ||
2538 | trb_buff_len = min_t(int, trb_buff_len, this_sg_len); | 2543 | trb_buff_len = min_t(int, trb_buff_len, this_sg_len); |
2539 | if (trb_buff_len > urb->transfer_buffer_length) | 2544 | if (trb_buff_len > urb->transfer_buffer_length) |
2540 | trb_buff_len = urb->transfer_buffer_length; | 2545 | trb_buff_len = urb->transfer_buffer_length; |
@@ -2572,7 +2577,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2572 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), | 2577 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), |
2573 | (unsigned int) addr + trb_buff_len); | 2578 | (unsigned int) addr + trb_buff_len); |
2574 | if (TRB_MAX_BUFF_SIZE - | 2579 | if (TRB_MAX_BUFF_SIZE - |
2575 | (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) { | 2580 | (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) { |
2576 | xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); | 2581 | xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); |
2577 | xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", | 2582 | xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", |
2578 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), | 2583 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), |
@@ -2616,7 +2621,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2616 | } | 2621 | } |
2617 | 2622 | ||
2618 | trb_buff_len = TRB_MAX_BUFF_SIZE - | 2623 | trb_buff_len = TRB_MAX_BUFF_SIZE - |
2619 | (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | 2624 | (addr & (TRB_MAX_BUFF_SIZE - 1)); |
2620 | trb_buff_len = min_t(int, trb_buff_len, this_sg_len); | 2625 | trb_buff_len = min_t(int, trb_buff_len, this_sg_len); |
2621 | if (running_total + trb_buff_len > urb->transfer_buffer_length) | 2626 | if (running_total + trb_buff_len > urb->transfer_buffer_length) |
2622 | trb_buff_len = | 2627 | trb_buff_len = |
@@ -2656,7 +2661,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2656 | num_trbs = 0; | 2661 | num_trbs = 0; |
2657 | /* How much data is (potentially) left before the 64KB boundary? */ | 2662 | /* How much data is (potentially) left before the 64KB boundary? */ |
2658 | running_total = TRB_MAX_BUFF_SIZE - | 2663 | running_total = TRB_MAX_BUFF_SIZE - |
2659 | (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | 2664 | (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); |
2665 | running_total &= TRB_MAX_BUFF_SIZE - 1; | ||
2660 | 2666 | ||
2661 | /* If there's some data on this 64KB chunk, or we have to send a | 2667 | /* If there's some data on this 64KB chunk, or we have to send a |
2662 | * zero-length transfer, we need at least one TRB | 2668 | * zero-length transfer, we need at least one TRB |
@@ -2700,8 +2706,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2700 | /* How much data is in the first TRB? */ | 2706 | /* How much data is in the first TRB? */ |
2701 | addr = (u64) urb->transfer_dma; | 2707 | addr = (u64) urb->transfer_dma; |
2702 | trb_buff_len = TRB_MAX_BUFF_SIZE - | 2708 | trb_buff_len = TRB_MAX_BUFF_SIZE - |
2703 | (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | 2709 | (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); |
2704 | if (urb->transfer_buffer_length < trb_buff_len) | 2710 | if (trb_buff_len > urb->transfer_buffer_length) |
2705 | trb_buff_len = urb->transfer_buffer_length; | 2711 | trb_buff_len = urb->transfer_buffer_length; |
2706 | 2712 | ||
2707 | first_trb = true; | 2713 | first_trb = true; |
@@ -2879,8 +2885,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci, | |||
2879 | addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); | 2885 | addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); |
2880 | td_len = urb->iso_frame_desc[i].length; | 2886 | td_len = urb->iso_frame_desc[i].length; |
2881 | 2887 | ||
2882 | running_total = TRB_MAX_BUFF_SIZE - | 2888 | running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); |
2883 | (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | 2889 | running_total &= TRB_MAX_BUFF_SIZE - 1; |
2884 | if (running_total != 0) | 2890 | if (running_total != 0) |
2885 | num_trbs++; | 2891 | num_trbs++; |
2886 | 2892 | ||
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 34cf4e165877..2083fc2179b2 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -109,7 +109,7 @@ int xhci_halt(struct xhci_hcd *xhci) | |||
109 | /* | 109 | /* |
110 | * Set the run bit and wait for the host to be running. | 110 | * Set the run bit and wait for the host to be running. |
111 | */ | 111 | */ |
112 | int xhci_start(struct xhci_hcd *xhci) | 112 | static int xhci_start(struct xhci_hcd *xhci) |
113 | { | 113 | { |
114 | u32 temp; | 114 | u32 temp; |
115 | int ret; | 115 | int ret; |
@@ -329,7 +329,7 @@ int xhci_init(struct usb_hcd *hcd) | |||
329 | 329 | ||
330 | 330 | ||
331 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | 331 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
332 | void xhci_event_ring_work(unsigned long arg) | 332 | static void xhci_event_ring_work(unsigned long arg) |
333 | { | 333 | { |
334 | unsigned long flags; | 334 | unsigned long flags; |
335 | int temp; | 335 | int temp; |
@@ -473,7 +473,7 @@ int xhci_run(struct usb_hcd *hcd) | |||
473 | xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); | 473 | xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); |
474 | xhci_writel(xhci, ER_IRQ_ENABLE(temp), | 474 | xhci_writel(xhci, ER_IRQ_ENABLE(temp), |
475 | &xhci->ir_set->irq_pending); | 475 | &xhci->ir_set->irq_pending); |
476 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | 476 | xhci_print_ir_set(xhci, 0); |
477 | 477 | ||
478 | if (NUM_TEST_NOOPS > 0) | 478 | if (NUM_TEST_NOOPS > 0) |
479 | doorbell = xhci_setup_one_noop(xhci); | 479 | doorbell = xhci_setup_one_noop(xhci); |
@@ -528,7 +528,7 @@ void xhci_stop(struct usb_hcd *hcd) | |||
528 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | 528 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
529 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), | 529 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), |
530 | &xhci->ir_set->irq_pending); | 530 | &xhci->ir_set->irq_pending); |
531 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | 531 | xhci_print_ir_set(xhci, 0); |
532 | 532 | ||
533 | xhci_dbg(xhci, "cleaning up memory\n"); | 533 | xhci_dbg(xhci, "cleaning up memory\n"); |
534 | xhci_mem_cleanup(xhci); | 534 | xhci_mem_cleanup(xhci); |
@@ -755,7 +755,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
755 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | 755 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
756 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), | 756 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), |
757 | &xhci->ir_set->irq_pending); | 757 | &xhci->ir_set->irq_pending); |
758 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | 758 | xhci_print_ir_set(xhci, 0); |
759 | 759 | ||
760 | xhci_dbg(xhci, "cleaning up memory\n"); | 760 | xhci_dbg(xhci, "cleaning up memory\n"); |
761 | xhci_mem_cleanup(xhci); | 761 | xhci_mem_cleanup(xhci); |
@@ -857,7 +857,7 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs) | |||
857 | /* Returns 1 if the arguments are OK; | 857 | /* Returns 1 if the arguments are OK; |
858 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. | 858 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. |
859 | */ | 859 | */ |
860 | int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, | 860 | static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, |
861 | struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, | 861 | struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, |
862 | const char *func) { | 862 | const char *func) { |
863 | struct xhci_hcd *xhci; | 863 | struct xhci_hcd *xhci; |
@@ -1693,7 +1693,7 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, | |||
1693 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); | 1693 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); |
1694 | } | 1694 | } |
1695 | 1695 | ||
1696 | void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, | 1696 | static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, |
1697 | unsigned int slot_id, unsigned int ep_index, | 1697 | unsigned int slot_id, unsigned int ep_index, |
1698 | struct xhci_dequeue_state *deq_state) | 1698 | struct xhci_dequeue_state *deq_state) |
1699 | { | 1699 | { |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 7f236fd22015..7f127df6dd55 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -1348,7 +1348,7 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci) | |||
1348 | } | 1348 | } |
1349 | 1349 | ||
1350 | /* xHCI debugging */ | 1350 | /* xHCI debugging */ |
1351 | void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); | 1351 | void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num); |
1352 | void xhci_print_registers(struct xhci_hcd *xhci); | 1352 | void xhci_print_registers(struct xhci_hcd *xhci); |
1353 | void xhci_dbg_regs(struct xhci_hcd *xhci); | 1353 | void xhci_dbg_regs(struct xhci_hcd *xhci); |
1354 | void xhci_print_run_regs(struct xhci_hcd *xhci); | 1354 | void xhci_print_run_regs(struct xhci_hcd *xhci); |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 54a8bd1047d6..c292d5c499e7 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -1864,6 +1864,7 @@ allocate_instance(struct device *dev, | |||
1864 | INIT_LIST_HEAD(&musb->out_bulk); | 1864 | INIT_LIST_HEAD(&musb->out_bulk); |
1865 | 1865 | ||
1866 | hcd->uses_new_polling = 1; | 1866 | hcd->uses_new_polling = 1; |
1867 | hcd->has_tt = 1; | ||
1867 | 1868 | ||
1868 | musb->vbuserr_retry = VBUSERR_RETRY_COUNT; | 1869 | musb->vbuserr_retry = VBUSERR_RETRY_COUNT; |
1869 | musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; | 1870 | musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; |
diff --git a/fs/afs/write.c b/fs/afs/write.c index 15690bb1d3b5..789b3afb3423 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c | |||
@@ -140,6 +140,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping, | |||
140 | candidate->first = candidate->last = index; | 140 | candidate->first = candidate->last = index; |
141 | candidate->offset_first = from; | 141 | candidate->offset_first = from; |
142 | candidate->to_last = to; | 142 | candidate->to_last = to; |
143 | INIT_LIST_HEAD(&candidate->link); | ||
143 | candidate->usage = 1; | 144 | candidate->usage = 1; |
144 | candidate->state = AFS_WBACK_PENDING; | 145 | candidate->state = AFS_WBACK_PENDING; |
145 | init_waitqueue_head(&candidate->waitq); | 146 | init_waitqueue_head(&candidate->waitq); |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 94d41db62004..f05bf16cd979 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -933,9 +933,9 @@ EXPORT_SYMBOL_GPL(bd_unlink_disk_holder); | |||
933 | * when a disk has been changed -- either by a media change or online | 933 | * when a disk has been changed -- either by a media change or online |
934 | * resize. | 934 | * resize. |
935 | */ | 935 | */ |
936 | static void flush_disk(struct block_device *bdev) | 936 | static void flush_disk(struct block_device *bdev, bool kill_dirty) |
937 | { | 937 | { |
938 | if (__invalidate_device(bdev)) { | 938 | if (__invalidate_device(bdev, kill_dirty)) { |
939 | char name[BDEVNAME_SIZE] = ""; | 939 | char name[BDEVNAME_SIZE] = ""; |
940 | 940 | ||
941 | if (bdev->bd_disk) | 941 | if (bdev->bd_disk) |
@@ -972,7 +972,7 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev) | |||
972 | "%s: detected capacity change from %lld to %lld\n", | 972 | "%s: detected capacity change from %lld to %lld\n", |
973 | name, bdev_size, disk_size); | 973 | name, bdev_size, disk_size); |
974 | i_size_write(bdev->bd_inode, disk_size); | 974 | i_size_write(bdev->bd_inode, disk_size); |
975 | flush_disk(bdev); | 975 | flush_disk(bdev, false); |
976 | } | 976 | } |
977 | } | 977 | } |
978 | EXPORT_SYMBOL(check_disk_size_change); | 978 | EXPORT_SYMBOL(check_disk_size_change); |
@@ -1025,7 +1025,7 @@ int check_disk_change(struct block_device *bdev) | |||
1025 | if (!(events & DISK_EVENT_MEDIA_CHANGE)) | 1025 | if (!(events & DISK_EVENT_MEDIA_CHANGE)) |
1026 | return 0; | 1026 | return 0; |
1027 | 1027 | ||
1028 | flush_disk(bdev); | 1028 | flush_disk(bdev, true); |
1029 | if (bdops->revalidate_disk) | 1029 | if (bdops->revalidate_disk) |
1030 | bdops->revalidate_disk(bdev->bd_disk); | 1030 | bdops->revalidate_disk(bdev->bd_disk); |
1031 | return 1; | 1031 | return 1; |
@@ -1606,7 +1606,7 @@ fail: | |||
1606 | } | 1606 | } |
1607 | EXPORT_SYMBOL(lookup_bdev); | 1607 | EXPORT_SYMBOL(lookup_bdev); |
1608 | 1608 | ||
1609 | int __invalidate_device(struct block_device *bdev) | 1609 | int __invalidate_device(struct block_device *bdev, bool kill_dirty) |
1610 | { | 1610 | { |
1611 | struct super_block *sb = get_super(bdev); | 1611 | struct super_block *sb = get_super(bdev); |
1612 | int res = 0; | 1612 | int res = 0; |
@@ -1619,7 +1619,7 @@ int __invalidate_device(struct block_device *bdev) | |||
1619 | * hold). | 1619 | * hold). |
1620 | */ | 1620 | */ |
1621 | shrink_dcache_sb(sb); | 1621 | shrink_dcache_sb(sb); |
1622 | res = invalidate_inodes(sb); | 1622 | res = invalidate_inodes(sb, kill_dirty); |
1623 | drop_super(sb); | 1623 | drop_super(sb); |
1624 | } | 1624 | } |
1625 | invalidate_bdev(bdev); | 1625 | invalidate_bdev(bdev); |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2c98b3af6052..6f820fa23df4 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -1254,6 +1254,7 @@ struct btrfs_root { | |||
1254 | #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) | 1254 | #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) |
1255 | #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) | 1255 | #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) |
1256 | #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) | 1256 | #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) |
1257 | #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) | ||
1257 | 1258 | ||
1258 | #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) | 1259 | #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) |
1259 | #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) | 1260 | #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) |
@@ -2218,6 +2219,8 @@ int btrfs_error_unpin_extent_range(struct btrfs_root *root, | |||
2218 | u64 start, u64 end); | 2219 | u64 start, u64 end); |
2219 | int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, | 2220 | int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, |
2220 | u64 num_bytes); | 2221 | u64 num_bytes); |
2222 | int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, | ||
2223 | struct btrfs_root *root, u64 type); | ||
2221 | 2224 | ||
2222 | /* ctree.c */ | 2225 | /* ctree.c */ |
2223 | int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, | 2226 | int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f3c96fc01439..588ff9849873 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -5376,7 +5376,7 @@ again: | |||
5376 | num_bytes, data, 1); | 5376 | num_bytes, data, 1); |
5377 | goto again; | 5377 | goto again; |
5378 | } | 5378 | } |
5379 | if (ret == -ENOSPC) { | 5379 | if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) { |
5380 | struct btrfs_space_info *sinfo; | 5380 | struct btrfs_space_info *sinfo; |
5381 | 5381 | ||
5382 | sinfo = __find_space_info(root->fs_info, data); | 5382 | sinfo = __find_space_info(root->fs_info, data); |
@@ -8065,6 +8065,13 @@ out: | |||
8065 | return ret; | 8065 | return ret; |
8066 | } | 8066 | } |
8067 | 8067 | ||
8068 | int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, | ||
8069 | struct btrfs_root *root, u64 type) | ||
8070 | { | ||
8071 | u64 alloc_flags = get_alloc_profile(root, type); | ||
8072 | return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); | ||
8073 | } | ||
8074 | |||
8068 | /* | 8075 | /* |
8069 | * helper to account the unused space of all the readonly block group in the | 8076 | * helper to account the unused space of all the readonly block group in the |
8070 | * list. takes mirrors into account. | 8077 | * list. takes mirrors into account. |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 92ac5192c518..fd3f172e94e6 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -1433,12 +1433,13 @@ int extent_clear_unlock_delalloc(struct inode *inode, | |||
1433 | */ | 1433 | */ |
1434 | u64 count_range_bits(struct extent_io_tree *tree, | 1434 | u64 count_range_bits(struct extent_io_tree *tree, |
1435 | u64 *start, u64 search_end, u64 max_bytes, | 1435 | u64 *start, u64 search_end, u64 max_bytes, |
1436 | unsigned long bits) | 1436 | unsigned long bits, int contig) |
1437 | { | 1437 | { |
1438 | struct rb_node *node; | 1438 | struct rb_node *node; |
1439 | struct extent_state *state; | 1439 | struct extent_state *state; |
1440 | u64 cur_start = *start; | 1440 | u64 cur_start = *start; |
1441 | u64 total_bytes = 0; | 1441 | u64 total_bytes = 0; |
1442 | u64 last = 0; | ||
1442 | int found = 0; | 1443 | int found = 0; |
1443 | 1444 | ||
1444 | if (search_end <= cur_start) { | 1445 | if (search_end <= cur_start) { |
@@ -1463,7 +1464,9 @@ u64 count_range_bits(struct extent_io_tree *tree, | |||
1463 | state = rb_entry(node, struct extent_state, rb_node); | 1464 | state = rb_entry(node, struct extent_state, rb_node); |
1464 | if (state->start > search_end) | 1465 | if (state->start > search_end) |
1465 | break; | 1466 | break; |
1466 | if (state->end >= cur_start && (state->state & bits)) { | 1467 | if (contig && found && state->start > last + 1) |
1468 | break; | ||
1469 | if (state->end >= cur_start && (state->state & bits) == bits) { | ||
1467 | total_bytes += min(search_end, state->end) + 1 - | 1470 | total_bytes += min(search_end, state->end) + 1 - |
1468 | max(cur_start, state->start); | 1471 | max(cur_start, state->start); |
1469 | if (total_bytes >= max_bytes) | 1472 | if (total_bytes >= max_bytes) |
@@ -1472,6 +1475,9 @@ u64 count_range_bits(struct extent_io_tree *tree, | |||
1472 | *start = state->start; | 1475 | *start = state->start; |
1473 | found = 1; | 1476 | found = 1; |
1474 | } | 1477 | } |
1478 | last = state->end; | ||
1479 | } else if (contig && found) { | ||
1480 | break; | ||
1475 | } | 1481 | } |
1476 | node = rb_next(node); | 1482 | node = rb_next(node); |
1477 | if (!node) | 1483 | if (!node) |
@@ -2912,6 +2918,46 @@ out: | |||
2912 | return sector; | 2918 | return sector; |
2913 | } | 2919 | } |
2914 | 2920 | ||
2921 | /* | ||
2922 | * helper function for fiemap, which doesn't want to see any holes. | ||
2923 | * This maps until we find something past 'last' | ||
2924 | */ | ||
2925 | static struct extent_map *get_extent_skip_holes(struct inode *inode, | ||
2926 | u64 offset, | ||
2927 | u64 last, | ||
2928 | get_extent_t *get_extent) | ||
2929 | { | ||
2930 | u64 sectorsize = BTRFS_I(inode)->root->sectorsize; | ||
2931 | struct extent_map *em; | ||
2932 | u64 len; | ||
2933 | |||
2934 | if (offset >= last) | ||
2935 | return NULL; | ||
2936 | |||
2937 | while(1) { | ||
2938 | len = last - offset; | ||
2939 | if (len == 0) | ||
2940 | break; | ||
2941 | len = (len + sectorsize - 1) & ~(sectorsize - 1); | ||
2942 | em = get_extent(inode, NULL, 0, offset, len, 0); | ||
2943 | if (!em || IS_ERR(em)) | ||
2944 | return em; | ||
2945 | |||
2946 | /* if this isn't a hole return it */ | ||
2947 | if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) && | ||
2948 | em->block_start != EXTENT_MAP_HOLE) { | ||
2949 | return em; | ||
2950 | } | ||
2951 | |||
2952 | /* this is a hole, advance to the next extent */ | ||
2953 | offset = extent_map_end(em); | ||
2954 | free_extent_map(em); | ||
2955 | if (offset >= last) | ||
2956 | break; | ||
2957 | } | ||
2958 | return NULL; | ||
2959 | } | ||
2960 | |||
2915 | int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | 2961 | int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
2916 | __u64 start, __u64 len, get_extent_t *get_extent) | 2962 | __u64 start, __u64 len, get_extent_t *get_extent) |
2917 | { | 2963 | { |
@@ -2921,16 +2967,19 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
2921 | u32 flags = 0; | 2967 | u32 flags = 0; |
2922 | u32 found_type; | 2968 | u32 found_type; |
2923 | u64 last; | 2969 | u64 last; |
2970 | u64 last_for_get_extent = 0; | ||
2924 | u64 disko = 0; | 2971 | u64 disko = 0; |
2972 | u64 isize = i_size_read(inode); | ||
2925 | struct btrfs_key found_key; | 2973 | struct btrfs_key found_key; |
2926 | struct extent_map *em = NULL; | 2974 | struct extent_map *em = NULL; |
2927 | struct extent_state *cached_state = NULL; | 2975 | struct extent_state *cached_state = NULL; |
2928 | struct btrfs_path *path; | 2976 | struct btrfs_path *path; |
2929 | struct btrfs_file_extent_item *item; | 2977 | struct btrfs_file_extent_item *item; |
2930 | int end = 0; | 2978 | int end = 0; |
2931 | u64 em_start = 0, em_len = 0; | 2979 | u64 em_start = 0; |
2980 | u64 em_len = 0; | ||
2981 | u64 em_end = 0; | ||
2932 | unsigned long emflags; | 2982 | unsigned long emflags; |
2933 | int hole = 0; | ||
2934 | 2983 | ||
2935 | if (len == 0) | 2984 | if (len == 0) |
2936 | return -EINVAL; | 2985 | return -EINVAL; |
@@ -2940,6 +2989,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
2940 | return -ENOMEM; | 2989 | return -ENOMEM; |
2941 | path->leave_spinning = 1; | 2990 | path->leave_spinning = 1; |
2942 | 2991 | ||
2992 | /* | ||
2993 | * lookup the last file extent. We're not using i_size here | ||
2994 | * because there might be preallocation past i_size | ||
2995 | */ | ||
2943 | ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, | 2996 | ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, |
2944 | path, inode->i_ino, -1, 0); | 2997 | path, inode->i_ino, -1, 0); |
2945 | if (ret < 0) { | 2998 | if (ret < 0) { |
@@ -2953,18 +3006,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
2953 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); | 3006 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); |
2954 | found_type = btrfs_key_type(&found_key); | 3007 | found_type = btrfs_key_type(&found_key); |
2955 | 3008 | ||
2956 | /* No extents, just return */ | 3009 | /* No extents, but there might be delalloc bits */ |
2957 | if (found_key.objectid != inode->i_ino || | 3010 | if (found_key.objectid != inode->i_ino || |
2958 | found_type != BTRFS_EXTENT_DATA_KEY) { | 3011 | found_type != BTRFS_EXTENT_DATA_KEY) { |
2959 | btrfs_free_path(path); | 3012 | /* have to trust i_size as the end */ |
2960 | return 0; | 3013 | last = (u64)-1; |
3014 | last_for_get_extent = isize; | ||
3015 | } else { | ||
3016 | /* | ||
3017 | * remember the start of the last extent. There are a | ||
3018 | * bunch of different factors that go into the length of the | ||
3019 | * extent, so its much less complex to remember where it started | ||
3020 | */ | ||
3021 | last = found_key.offset; | ||
3022 | last_for_get_extent = last + 1; | ||
2961 | } | 3023 | } |
2962 | last = found_key.offset; | ||
2963 | btrfs_free_path(path); | 3024 | btrfs_free_path(path); |
2964 | 3025 | ||
3026 | /* | ||
3027 | * we might have some extents allocated but more delalloc past those | ||
3028 | * extents. so, we trust isize unless the start of the last extent is | ||
3029 | * beyond isize | ||
3030 | */ | ||
3031 | if (last < isize) { | ||
3032 | last = (u64)-1; | ||
3033 | last_for_get_extent = isize; | ||
3034 | } | ||
3035 | |||
2965 | lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, | 3036 | lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, |
2966 | &cached_state, GFP_NOFS); | 3037 | &cached_state, GFP_NOFS); |
2967 | em = get_extent(inode, NULL, 0, off, max - off, 0); | 3038 | |
3039 | em = get_extent_skip_holes(inode, off, last_for_get_extent, | ||
3040 | get_extent); | ||
2968 | if (!em) | 3041 | if (!em) |
2969 | goto out; | 3042 | goto out; |
2970 | if (IS_ERR(em)) { | 3043 | if (IS_ERR(em)) { |
@@ -2973,19 +3046,14 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
2973 | } | 3046 | } |
2974 | 3047 | ||
2975 | while (!end) { | 3048 | while (!end) { |
2976 | hole = 0; | 3049 | off = extent_map_end(em); |
2977 | off = em->start + em->len; | ||
2978 | if (off >= max) | 3050 | if (off >= max) |
2979 | end = 1; | 3051 | end = 1; |
2980 | 3052 | ||
2981 | if (em->block_start == EXTENT_MAP_HOLE) { | ||
2982 | hole = 1; | ||
2983 | goto next; | ||
2984 | } | ||
2985 | |||
2986 | em_start = em->start; | 3053 | em_start = em->start; |
2987 | em_len = em->len; | 3054 | em_len = em->len; |
2988 | 3055 | em_end = extent_map_end(em); | |
3056 | emflags = em->flags; | ||
2989 | disko = 0; | 3057 | disko = 0; |
2990 | flags = 0; | 3058 | flags = 0; |
2991 | 3059 | ||
@@ -3004,37 +3072,29 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
3004 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) | 3072 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) |
3005 | flags |= FIEMAP_EXTENT_ENCODED; | 3073 | flags |= FIEMAP_EXTENT_ENCODED; |
3006 | 3074 | ||
3007 | next: | ||
3008 | emflags = em->flags; | ||
3009 | free_extent_map(em); | 3075 | free_extent_map(em); |
3010 | em = NULL; | 3076 | em = NULL; |
3011 | if (!end) { | 3077 | if ((em_start >= last) || em_len == (u64)-1 || |
3012 | em = get_extent(inode, NULL, 0, off, max - off, 0); | 3078 | (last == (u64)-1 && isize <= em_end)) { |
3013 | if (!em) | ||
3014 | goto out; | ||
3015 | if (IS_ERR(em)) { | ||
3016 | ret = PTR_ERR(em); | ||
3017 | goto out; | ||
3018 | } | ||
3019 | emflags = em->flags; | ||
3020 | } | ||
3021 | |||
3022 | if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) { | ||
3023 | flags |= FIEMAP_EXTENT_LAST; | 3079 | flags |= FIEMAP_EXTENT_LAST; |
3024 | end = 1; | 3080 | end = 1; |
3025 | } | 3081 | } |
3026 | 3082 | ||
3027 | if (em_start == last) { | 3083 | /* now scan forward to see if this is really the last extent. */ |
3084 | em = get_extent_skip_holes(inode, off, last_for_get_extent, | ||
3085 | get_extent); | ||
3086 | if (IS_ERR(em)) { | ||
3087 | ret = PTR_ERR(em); | ||
3088 | goto out; | ||
3089 | } | ||
3090 | if (!em) { | ||
3028 | flags |= FIEMAP_EXTENT_LAST; | 3091 | flags |= FIEMAP_EXTENT_LAST; |
3029 | end = 1; | 3092 | end = 1; |
3030 | } | 3093 | } |
3031 | 3094 | ret = fiemap_fill_next_extent(fieinfo, em_start, disko, | |
3032 | if (!hole) { | 3095 | em_len, flags); |
3033 | ret = fiemap_fill_next_extent(fieinfo, em_start, disko, | 3096 | if (ret) |
3034 | em_len, flags); | 3097 | goto out_free; |
3035 | if (ret) | ||
3036 | goto out_free; | ||
3037 | } | ||
3038 | } | 3098 | } |
3039 | out_free: | 3099 | out_free: |
3040 | free_extent_map(em); | 3100 | free_extent_map(em); |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 7083cfafd061..9318dfefd59c 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -191,7 +191,7 @@ void extent_io_exit(void); | |||
191 | 191 | ||
192 | u64 count_range_bits(struct extent_io_tree *tree, | 192 | u64 count_range_bits(struct extent_io_tree *tree, |
193 | u64 *start, u64 search_end, | 193 | u64 *start, u64 search_end, |
194 | u64 max_bytes, unsigned long bits); | 194 | u64 max_bytes, unsigned long bits, int contig); |
195 | 195 | ||
196 | void free_extent_state(struct extent_state *state); | 196 | void free_extent_state(struct extent_state *state); |
197 | int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, | 197 | int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index fb9bd7832b6d..0efdb65953c5 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -1913,7 +1913,7 @@ static int btrfs_clean_io_failures(struct inode *inode, u64 start) | |||
1913 | 1913 | ||
1914 | private = 0; | 1914 | private = 0; |
1915 | if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, | 1915 | if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, |
1916 | (u64)-1, 1, EXTENT_DIRTY)) { | 1916 | (u64)-1, 1, EXTENT_DIRTY, 0)) { |
1917 | ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, | 1917 | ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, |
1918 | start, &private_failure); | 1918 | start, &private_failure); |
1919 | if (ret == 0) { | 1919 | if (ret == 0) { |
@@ -5280,6 +5280,128 @@ out: | |||
5280 | return em; | 5280 | return em; |
5281 | } | 5281 | } |
5282 | 5282 | ||
5283 | struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, | ||
5284 | size_t pg_offset, u64 start, u64 len, | ||
5285 | int create) | ||
5286 | { | ||
5287 | struct extent_map *em; | ||
5288 | struct extent_map *hole_em = NULL; | ||
5289 | u64 range_start = start; | ||
5290 | u64 end; | ||
5291 | u64 found; | ||
5292 | u64 found_end; | ||
5293 | int err = 0; | ||
5294 | |||
5295 | em = btrfs_get_extent(inode, page, pg_offset, start, len, create); | ||
5296 | if (IS_ERR(em)) | ||
5297 | return em; | ||
5298 | if (em) { | ||
5299 | /* | ||
5300 | * if our em maps to a hole, there might | ||
5301 | * actually be delalloc bytes behind it | ||
5302 | */ | ||
5303 | if (em->block_start != EXTENT_MAP_HOLE) | ||
5304 | return em; | ||
5305 | else | ||
5306 | hole_em = em; | ||
5307 | } | ||
5308 | |||
5309 | /* check to see if we've wrapped (len == -1 or similar) */ | ||
5310 | end = start + len; | ||
5311 | if (end < start) | ||
5312 | end = (u64)-1; | ||
5313 | else | ||
5314 | end -= 1; | ||
5315 | |||
5316 | em = NULL; | ||
5317 | |||
5318 | /* ok, we didn't find anything, lets look for delalloc */ | ||
5319 | found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start, | ||
5320 | end, len, EXTENT_DELALLOC, 1); | ||
5321 | found_end = range_start + found; | ||
5322 | if (found_end < range_start) | ||
5323 | found_end = (u64)-1; | ||
5324 | |||
5325 | /* | ||
5326 | * we didn't find anything useful, return | ||
5327 | * the original results from get_extent() | ||
5328 | */ | ||
5329 | if (range_start > end || found_end <= start) { | ||
5330 | em = hole_em; | ||
5331 | hole_em = NULL; | ||
5332 | goto out; | ||
5333 | } | ||
5334 | |||
5335 | /* adjust the range_start to make sure it doesn't | ||
5336 | * go backwards from the start they passed in | ||
5337 | */ | ||
5338 | range_start = max(start,range_start); | ||
5339 | found = found_end - range_start; | ||
5340 | |||
5341 | if (found > 0) { | ||
5342 | u64 hole_start = start; | ||
5343 | u64 hole_len = len; | ||
5344 | |||
5345 | em = alloc_extent_map(GFP_NOFS); | ||
5346 | if (!em) { | ||
5347 | err = -ENOMEM; | ||
5348 | goto out; | ||
5349 | } | ||
5350 | /* | ||
5351 | * when btrfs_get_extent can't find anything it | ||
5352 | * returns one huge hole | ||
5353 | * | ||
5354 | * make sure what it found really fits our range, and | ||
5355 | * adjust to make sure it is based on the start from | ||
5356 | * the caller | ||
5357 | */ | ||
5358 | if (hole_em) { | ||
5359 | u64 calc_end = extent_map_end(hole_em); | ||
5360 | |||
5361 | if (calc_end <= start || (hole_em->start > end)) { | ||
5362 | free_extent_map(hole_em); | ||
5363 | hole_em = NULL; | ||
5364 | } else { | ||
5365 | hole_start = max(hole_em->start, start); | ||
5366 | hole_len = calc_end - hole_start; | ||
5367 | } | ||
5368 | } | ||
5369 | em->bdev = NULL; | ||
5370 | if (hole_em && range_start > hole_start) { | ||
5371 | /* our hole starts before our delalloc, so we | ||
5372 | * have to return just the parts of the hole | ||
5373 | * that go until the delalloc starts | ||
5374 | */ | ||
5375 | em->len = min(hole_len, | ||
5376 | range_start - hole_start); | ||
5377 | em->start = hole_start; | ||
5378 | em->orig_start = hole_start; | ||
5379 | /* | ||
5380 | * don't adjust block start at all, | ||
5381 | * it is fixed at EXTENT_MAP_HOLE | ||
5382 | */ | ||
5383 | em->block_start = hole_em->block_start; | ||
5384 | em->block_len = hole_len; | ||
5385 | } else { | ||
5386 | em->start = range_start; | ||
5387 | em->len = found; | ||
5388 | em->orig_start = range_start; | ||
5389 | em->block_start = EXTENT_MAP_DELALLOC; | ||
5390 | em->block_len = found; | ||
5391 | } | ||
5392 | } else if (hole_em) { | ||
5393 | return hole_em; | ||
5394 | } | ||
5395 | out: | ||
5396 | |||
5397 | free_extent_map(hole_em); | ||
5398 | if (err) { | ||
5399 | free_extent_map(em); | ||
5400 | return ERR_PTR(err); | ||
5401 | } | ||
5402 | return em; | ||
5403 | } | ||
5404 | |||
5283 | static struct extent_map *btrfs_new_extent_direct(struct inode *inode, | 5405 | static struct extent_map *btrfs_new_extent_direct(struct inode *inode, |
5284 | u64 start, u64 len) | 5406 | u64 start, u64 len) |
5285 | { | 5407 | { |
@@ -6102,7 +6224,7 @@ out: | |||
6102 | static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | 6224 | static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
6103 | __u64 start, __u64 len) | 6225 | __u64 start, __u64 len) |
6104 | { | 6226 | { |
6105 | return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent); | 6227 | return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap); |
6106 | } | 6228 | } |
6107 | 6229 | ||
6108 | int btrfs_readpage(struct file *file, struct page *page) | 6230 | int btrfs_readpage(struct file *file, struct page *page) |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index be2d4f6aaa5e..5fdb2abc4fa7 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -1071,12 +1071,15 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, | |||
1071 | if (copy_from_user(&flags, arg, sizeof(flags))) | 1071 | if (copy_from_user(&flags, arg, sizeof(flags))) |
1072 | return -EFAULT; | 1072 | return -EFAULT; |
1073 | 1073 | ||
1074 | if (flags & ~BTRFS_SUBVOL_CREATE_ASYNC) | 1074 | if (flags & BTRFS_SUBVOL_CREATE_ASYNC) |
1075 | return -EINVAL; | 1075 | return -EINVAL; |
1076 | 1076 | ||
1077 | if (flags & ~BTRFS_SUBVOL_RDONLY) | 1077 | if (flags & ~BTRFS_SUBVOL_RDONLY) |
1078 | return -EOPNOTSUPP; | 1078 | return -EOPNOTSUPP; |
1079 | 1079 | ||
1080 | if (!is_owner_or_cap(inode)) | ||
1081 | return -EACCES; | ||
1082 | |||
1080 | down_write(&root->fs_info->subvol_sem); | 1083 | down_write(&root->fs_info->subvol_sem); |
1081 | 1084 | ||
1082 | /* nothing to do */ | 1085 | /* nothing to do */ |
@@ -1097,7 +1100,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, | |||
1097 | goto out_reset; | 1100 | goto out_reset; |
1098 | } | 1101 | } |
1099 | 1102 | ||
1100 | ret = btrfs_update_root(trans, root, | 1103 | ret = btrfs_update_root(trans, root->fs_info->tree_root, |
1101 | &root->root_key, &root->root_item); | 1104 | &root->root_key, &root->root_item); |
1102 | 1105 | ||
1103 | btrfs_commit_transaction(trans, root); | 1106 | btrfs_commit_transaction(trans, root); |
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index cc9b450399df..a178f5ebea78 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c | |||
@@ -280,6 +280,7 @@ static int lzo_decompress_biovec(struct list_head *ws, | |||
280 | unsigned long tot_out; | 280 | unsigned long tot_out; |
281 | unsigned long tot_len; | 281 | unsigned long tot_len; |
282 | char *buf; | 282 | char *buf; |
283 | bool may_late_unmap, need_unmap; | ||
283 | 284 | ||
284 | data_in = kmap(pages_in[0]); | 285 | data_in = kmap(pages_in[0]); |
285 | tot_len = read_compress_length(data_in); | 286 | tot_len = read_compress_length(data_in); |
@@ -300,11 +301,13 @@ static int lzo_decompress_biovec(struct list_head *ws, | |||
300 | 301 | ||
301 | tot_in += in_len; | 302 | tot_in += in_len; |
302 | working_bytes = in_len; | 303 | working_bytes = in_len; |
304 | may_late_unmap = need_unmap = false; | ||
303 | 305 | ||
304 | /* fast path: avoid using the working buffer */ | 306 | /* fast path: avoid using the working buffer */ |
305 | if (in_page_bytes_left >= in_len) { | 307 | if (in_page_bytes_left >= in_len) { |
306 | buf = data_in + in_offset; | 308 | buf = data_in + in_offset; |
307 | bytes = in_len; | 309 | bytes = in_len; |
310 | may_late_unmap = true; | ||
308 | goto cont; | 311 | goto cont; |
309 | } | 312 | } |
310 | 313 | ||
@@ -329,14 +332,17 @@ cont: | |||
329 | if (working_bytes == 0 && tot_in >= tot_len) | 332 | if (working_bytes == 0 && tot_in >= tot_len) |
330 | break; | 333 | break; |
331 | 334 | ||
332 | kunmap(pages_in[page_in_index]); | 335 | if (page_in_index + 1 >= total_pages_in) { |
333 | page_in_index++; | ||
334 | if (page_in_index >= total_pages_in) { | ||
335 | ret = -1; | 336 | ret = -1; |
336 | data_in = NULL; | ||
337 | goto done; | 337 | goto done; |
338 | } | 338 | } |
339 | data_in = kmap(pages_in[page_in_index]); | 339 | |
340 | if (may_late_unmap) | ||
341 | need_unmap = true; | ||
342 | else | ||
343 | kunmap(pages_in[page_in_index]); | ||
344 | |||
345 | data_in = kmap(pages_in[++page_in_index]); | ||
340 | 346 | ||
341 | in_page_bytes_left = PAGE_CACHE_SIZE; | 347 | in_page_bytes_left = PAGE_CACHE_SIZE; |
342 | in_offset = 0; | 348 | in_offset = 0; |
@@ -346,6 +352,8 @@ cont: | |||
346 | out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); | 352 | out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); |
347 | ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, | 353 | ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, |
348 | &out_len); | 354 | &out_len); |
355 | if (need_unmap) | ||
356 | kunmap(pages_in[page_in_index - 1]); | ||
349 | if (ret != LZO_E_OK) { | 357 | if (ret != LZO_E_OK) { |
350 | printk(KERN_WARNING "btrfs decompress failed\n"); | 358 | printk(KERN_WARNING "btrfs decompress failed\n"); |
351 | ret = -1; | 359 | ret = -1; |
@@ -363,8 +371,7 @@ cont: | |||
363 | break; | 371 | break; |
364 | } | 372 | } |
365 | done: | 373 | done: |
366 | if (data_in) | 374 | kunmap(pages_in[page_in_index]); |
367 | kunmap(pages_in[page_in_index]); | ||
368 | return ret; | 375 | return ret; |
369 | } | 376 | } |
370 | 377 | ||
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 0825e4ed9447..31ade5802ae8 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -3654,6 +3654,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) | |||
3654 | u32 item_size; | 3654 | u32 item_size; |
3655 | int ret; | 3655 | int ret; |
3656 | int err = 0; | 3656 | int err = 0; |
3657 | int progress = 0; | ||
3657 | 3658 | ||
3658 | path = btrfs_alloc_path(); | 3659 | path = btrfs_alloc_path(); |
3659 | if (!path) | 3660 | if (!path) |
@@ -3666,9 +3667,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) | |||
3666 | } | 3667 | } |
3667 | 3668 | ||
3668 | while (1) { | 3669 | while (1) { |
3670 | progress++; | ||
3669 | trans = btrfs_start_transaction(rc->extent_root, 0); | 3671 | trans = btrfs_start_transaction(rc->extent_root, 0); |
3670 | BUG_ON(IS_ERR(trans)); | 3672 | BUG_ON(IS_ERR(trans)); |
3671 | 3673 | restart: | |
3672 | if (update_backref_cache(trans, &rc->backref_cache)) { | 3674 | if (update_backref_cache(trans, &rc->backref_cache)) { |
3673 | btrfs_end_transaction(trans, rc->extent_root); | 3675 | btrfs_end_transaction(trans, rc->extent_root); |
3674 | continue; | 3676 | continue; |
@@ -3781,6 +3783,15 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) | |||
3781 | } | 3783 | } |
3782 | } | 3784 | } |
3783 | } | 3785 | } |
3786 | if (trans && progress && err == -ENOSPC) { | ||
3787 | ret = btrfs_force_chunk_alloc(trans, rc->extent_root, | ||
3788 | rc->block_group->flags); | ||
3789 | if (ret == 0) { | ||
3790 | err = 0; | ||
3791 | progress = 0; | ||
3792 | goto restart; | ||
3793 | } | ||
3794 | } | ||
3784 | 3795 | ||
3785 | btrfs_release_path(rc->extent_root, path); | 3796 | btrfs_release_path(rc->extent_root, path); |
3786 | clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, | 3797 | clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index a004008f7d28..d39a9895d932 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -155,7 +155,8 @@ enum { | |||
155 | Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, | 155 | Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, |
156 | Opt_compress_type, Opt_compress_force, Opt_compress_force_type, | 156 | Opt_compress_type, Opt_compress_force, Opt_compress_force_type, |
157 | Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, | 157 | Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, |
158 | Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_err, | 158 | Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, |
159 | Opt_enospc_debug, Opt_err, | ||
159 | }; | 160 | }; |
160 | 161 | ||
161 | static match_table_t tokens = { | 162 | static match_table_t tokens = { |
@@ -184,6 +185,7 @@ static match_table_t tokens = { | |||
184 | {Opt_space_cache, "space_cache"}, | 185 | {Opt_space_cache, "space_cache"}, |
185 | {Opt_clear_cache, "clear_cache"}, | 186 | {Opt_clear_cache, "clear_cache"}, |
186 | {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, | 187 | {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, |
188 | {Opt_enospc_debug, "enospc_debug"}, | ||
187 | {Opt_err, NULL}, | 189 | {Opt_err, NULL}, |
188 | }; | 190 | }; |
189 | 191 | ||
@@ -358,6 +360,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
358 | case Opt_user_subvol_rm_allowed: | 360 | case Opt_user_subvol_rm_allowed: |
359 | btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED); | 361 | btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED); |
360 | break; | 362 | break; |
363 | case Opt_enospc_debug: | ||
364 | btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG); | ||
365 | break; | ||
361 | case Opt_err: | 366 | case Opt_err: |
362 | printk(KERN_INFO "btrfs: unrecognized mount option " | 367 | printk(KERN_INFO "btrfs: unrecognized mount option " |
363 | "'%s'\n", p); | 368 | "'%s'\n", p); |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index af7dbca15276..dd13eb81ee40 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -1338,11 +1338,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) | |||
1338 | 1338 | ||
1339 | ret = btrfs_shrink_device(device, 0); | 1339 | ret = btrfs_shrink_device(device, 0); |
1340 | if (ret) | 1340 | if (ret) |
1341 | goto error_brelse; | 1341 | goto error_undo; |
1342 | 1342 | ||
1343 | ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); | 1343 | ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); |
1344 | if (ret) | 1344 | if (ret) |
1345 | goto error_brelse; | 1345 | goto error_undo; |
1346 | 1346 | ||
1347 | device->in_fs_metadata = 0; | 1347 | device->in_fs_metadata = 0; |
1348 | 1348 | ||
@@ -1416,6 +1416,13 @@ out: | |||
1416 | mutex_unlock(&root->fs_info->volume_mutex); | 1416 | mutex_unlock(&root->fs_info->volume_mutex); |
1417 | mutex_unlock(&uuid_mutex); | 1417 | mutex_unlock(&uuid_mutex); |
1418 | return ret; | 1418 | return ret; |
1419 | error_undo: | ||
1420 | if (device->writeable) { | ||
1421 | list_add(&device->dev_alloc_list, | ||
1422 | &root->fs_info->fs_devices->alloc_list); | ||
1423 | root->fs_info->fs_devices->rw_devices++; | ||
1424 | } | ||
1425 | goto error_brelse; | ||
1419 | } | 1426 | } |
1420 | 1427 | ||
1421 | /* | 1428 | /* |
@@ -1633,7 +1640,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1633 | device->dev_root = root->fs_info->dev_root; | 1640 | device->dev_root = root->fs_info->dev_root; |
1634 | device->bdev = bdev; | 1641 | device->bdev = bdev; |
1635 | device->in_fs_metadata = 1; | 1642 | device->in_fs_metadata = 1; |
1636 | device->mode = 0; | 1643 | device->mode = FMODE_EXCL; |
1637 | set_blocksize(device->bdev, 4096); | 1644 | set_blocksize(device->bdev, 4096); |
1638 | 1645 | ||
1639 | if (seeding_dev) { | 1646 | if (seeding_dev) { |
diff --git a/fs/inode.c b/fs/inode.c index 9c2b795ccc93..0647d80accf6 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -548,11 +548,14 @@ void evict_inodes(struct super_block *sb) | |||
548 | /** | 548 | /** |
549 | * invalidate_inodes - attempt to free all inodes on a superblock | 549 | * invalidate_inodes - attempt to free all inodes on a superblock |
550 | * @sb: superblock to operate on | 550 | * @sb: superblock to operate on |
551 | * @kill_dirty: flag to guide handling of dirty inodes | ||
551 | * | 552 | * |
552 | * Attempts to free all inodes for a given superblock. If there were any | 553 | * Attempts to free all inodes for a given superblock. If there were any |
553 | * busy inodes return a non-zero value, else zero. | 554 | * busy inodes return a non-zero value, else zero. |
555 | * If @kill_dirty is set, discard dirty inodes too, otherwise treat | ||
556 | * them as busy. | ||
554 | */ | 557 | */ |
555 | int invalidate_inodes(struct super_block *sb) | 558 | int invalidate_inodes(struct super_block *sb, bool kill_dirty) |
556 | { | 559 | { |
557 | int busy = 0; | 560 | int busy = 0; |
558 | struct inode *inode, *next; | 561 | struct inode *inode, *next; |
@@ -564,6 +567,10 @@ int invalidate_inodes(struct super_block *sb) | |||
564 | list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { | 567 | list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { |
565 | if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) | 568 | if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) |
566 | continue; | 569 | continue; |
570 | if (inode->i_state & I_DIRTY && !kill_dirty) { | ||
571 | busy = 1; | ||
572 | continue; | ||
573 | } | ||
567 | if (atomic_read(&inode->i_count)) { | 574 | if (atomic_read(&inode->i_count)) { |
568 | busy = 1; | 575 | busy = 1; |
569 | continue; | 576 | continue; |
diff --git a/fs/internal.h b/fs/internal.h index 0663568b1247..9b976b57d7fe 100644 --- a/fs/internal.h +++ b/fs/internal.h | |||
@@ -112,4 +112,4 @@ extern void release_open_intent(struct nameidata *); | |||
112 | */ | 112 | */ |
113 | extern int get_nr_dirty_inodes(void); | 113 | extern int get_nr_dirty_inodes(void); |
114 | extern void evict_inodes(struct super_block *); | 114 | extern void evict_inodes(struct super_block *); |
115 | extern int invalidate_inodes(struct super_block *); | 115 | extern int invalidate_inodes(struct super_block *, bool); |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 97d08d8a7de8..e38b50a4b9d2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -2140,7 +2140,7 @@ extern void check_disk_size_change(struct gendisk *disk, | |||
2140 | struct block_device *bdev); | 2140 | struct block_device *bdev); |
2141 | extern int revalidate_disk(struct gendisk *); | 2141 | extern int revalidate_disk(struct gendisk *); |
2142 | extern int check_disk_change(struct block_device *); | 2142 | extern int check_disk_change(struct block_device *); |
2143 | extern int __invalidate_device(struct block_device *); | 2143 | extern int __invalidate_device(struct block_device *, bool); |
2144 | extern int invalidate_partition(struct gendisk *, int); | 2144 | extern int invalidate_partition(struct gendisk *, int); |
2145 | #endif | 2145 | #endif |
2146 | unsigned long invalidate_mapping_pages(struct address_space *mapping, | 2146 | unsigned long invalidate_mapping_pages(struct address_space *mapping, |
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c index 5ee16f0353fe..d763793d39de 100644 --- a/net/rxrpc/ar-key.c +++ b/net/rxrpc/ar-key.c | |||
@@ -89,11 +89,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr, | |||
89 | return ret; | 89 | return ret; |
90 | 90 | ||
91 | plen -= sizeof(*token); | 91 | plen -= sizeof(*token); |
92 | token = kmalloc(sizeof(*token), GFP_KERNEL); | 92 | token = kzalloc(sizeof(*token), GFP_KERNEL); |
93 | if (!token) | 93 | if (!token) |
94 | return -ENOMEM; | 94 | return -ENOMEM; |
95 | 95 | ||
96 | token->kad = kmalloc(plen, GFP_KERNEL); | 96 | token->kad = kzalloc(plen, GFP_KERNEL); |
97 | if (!token->kad) { | 97 | if (!token->kad) { |
98 | kfree(token); | 98 | kfree(token); |
99 | return -ENOMEM; | 99 | return -ENOMEM; |
@@ -731,10 +731,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen) | |||
731 | goto error; | 731 | goto error; |
732 | 732 | ||
733 | ret = -ENOMEM; | 733 | ret = -ENOMEM; |
734 | token = kmalloc(sizeof(*token), GFP_KERNEL); | 734 | token = kzalloc(sizeof(*token), GFP_KERNEL); |
735 | if (!token) | 735 | if (!token) |
736 | goto error; | 736 | goto error; |
737 | token->kad = kmalloc(plen, GFP_KERNEL); | 737 | token->kad = kzalloc(plen, GFP_KERNEL); |
738 | if (!token->kad) | 738 | if (!token->kad) |
739 | goto error_free; | 739 | goto error_free; |
740 | 740 | ||