diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-04-05 18:29:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-04-05 18:29:01 -0400 |
commit | 44148a667d3715f3a1c37eeff7e954c946cc1efe (patch) | |
tree | fd8685dd17dcef45993b733f4ec4df97e6808304 | |
parent | d0de4dc584ec6aa3b26fffea320a8457827768fc (diff) | |
parent | 782b86e2656762382ae1c2686d8d5c91f7d5eacf (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-2.6-block
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-2.6-block:
ide: always ensure that blk_delay_queue() is called if we have pending IO
block: fix request sorting at unplug
dm: improve block integrity support
fs: export empty_aops
ide: ide_requeue_and_plug() reinstate "always plug" behaviour
blk-throttle: don't call xchg on bool
ufs: remove unessecary blk_flush_plug
block: make the flush insertion use the tail of the dispatch list
block: get rid of elv_insert() interface
block: dump request state on seeing a corrupted request completion
-rw-r--r-- | block/blk-core.c | 4 | ||||
-rw-r--r-- | block/blk-flush.c | 6 | ||||
-rw-r--r-- | block/blk-integrity.c | 12 | ||||
-rw-r--r-- | block/blk-throttle.c | 4 | ||||
-rw-r--r-- | block/elevator.c | 35 | ||||
-rw-r--r-- | drivers/ide/ide-io.c | 43 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 114 | ||||
-rw-r--r-- | fs/inode.c | 9 | ||||
-rw-r--r-- | fs/nilfs2/page.c | 2 | ||||
-rw-r--r-- | fs/ubifs/xattr.c | 4 | ||||
-rw-r--r-- | fs/ufs/truncate.c | 1 | ||||
-rw-r--r-- | include/linux/blkdev.h | 2 | ||||
-rw-r--r-- | include/linux/elevator.h | 1 | ||||
-rw-r--r-- | include/linux/fs.h | 2 |
14 files changed, 148 insertions, 91 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index e0a062363937..725091d5496d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -2163,7 +2163,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) | |||
2163 | * size, something has gone terribly wrong. | 2163 | * size, something has gone terribly wrong. |
2164 | */ | 2164 | */ |
2165 | if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { | 2165 | if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { |
2166 | printk(KERN_ERR "blk: request botched\n"); | 2166 | blk_dump_rq_flags(req, "request botched"); |
2167 | req->__data_len = blk_rq_cur_bytes(req); | 2167 | req->__data_len = blk_rq_cur_bytes(req); |
2168 | } | 2168 | } |
2169 | 2169 | ||
@@ -2665,7 +2665,7 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) | |||
2665 | struct request *rqa = container_of(a, struct request, queuelist); | 2665 | struct request *rqa = container_of(a, struct request, queuelist); |
2666 | struct request *rqb = container_of(b, struct request, queuelist); | 2666 | struct request *rqb = container_of(b, struct request, queuelist); |
2667 | 2667 | ||
2668 | return !(rqa->q == rqb->q); | 2668 | return !(rqa->q <= rqb->q); |
2669 | } | 2669 | } |
2670 | 2670 | ||
2671 | static void flush_plug_list(struct blk_plug *plug) | 2671 | static void flush_plug_list(struct blk_plug *plug) |
diff --git a/block/blk-flush.c b/block/blk-flush.c index 93d5fd8e51eb..eba4a2790c6c 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -261,7 +261,7 @@ static bool blk_kick_flush(struct request_queue *q) | |||
261 | q->flush_rq.end_io = flush_end_io; | 261 | q->flush_rq.end_io = flush_end_io; |
262 | 262 | ||
263 | q->flush_pending_idx ^= 1; | 263 | q->flush_pending_idx ^= 1; |
264 | elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_REQUEUE); | 264 | list_add_tail(&q->flush_rq.queuelist, &q->queue_head); |
265 | return true; | 265 | return true; |
266 | } | 266 | } |
267 | 267 | ||
@@ -281,7 +281,7 @@ static void flush_data_end_io(struct request *rq, int error) | |||
281 | * blk_insert_flush - insert a new FLUSH/FUA request | 281 | * blk_insert_flush - insert a new FLUSH/FUA request |
282 | * @rq: request to insert | 282 | * @rq: request to insert |
283 | * | 283 | * |
284 | * To be called from elv_insert() for %ELEVATOR_INSERT_FLUSH insertions. | 284 | * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. |
285 | * @rq is being submitted. Analyze what needs to be done and put it on the | 285 | * @rq is being submitted. Analyze what needs to be done and put it on the |
286 | * right queue. | 286 | * right queue. |
287 | * | 287 | * |
@@ -312,7 +312,7 @@ void blk_insert_flush(struct request *rq) | |||
312 | */ | 312 | */ |
313 | if ((policy & REQ_FSEQ_DATA) && | 313 | if ((policy & REQ_FSEQ_DATA) && |
314 | !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { | 314 | !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { |
315 | list_add(&rq->queuelist, &q->queue_head); | 315 | list_add_tail(&rq->queuelist, &q->queue_head); |
316 | return; | 316 | return; |
317 | } | 317 | } |
318 | 318 | ||
diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 54bcba6c02a7..129b9e209a3b 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c | |||
@@ -30,6 +30,8 @@ | |||
30 | 30 | ||
31 | static struct kmem_cache *integrity_cachep; | 31 | static struct kmem_cache *integrity_cachep; |
32 | 32 | ||
33 | static const char *bi_unsupported_name = "unsupported"; | ||
34 | |||
33 | /** | 35 | /** |
34 | * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements | 36 | * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements |
35 | * @q: request queue | 37 | * @q: request queue |
@@ -358,6 +360,14 @@ static struct kobj_type integrity_ktype = { | |||
358 | .release = blk_integrity_release, | 360 | .release = blk_integrity_release, |
359 | }; | 361 | }; |
360 | 362 | ||
363 | bool blk_integrity_is_initialized(struct gendisk *disk) | ||
364 | { | ||
365 | struct blk_integrity *bi = blk_get_integrity(disk); | ||
366 | |||
367 | return (bi && bi->name && strcmp(bi->name, bi_unsupported_name) != 0); | ||
368 | } | ||
369 | EXPORT_SYMBOL(blk_integrity_is_initialized); | ||
370 | |||
361 | /** | 371 | /** |
362 | * blk_integrity_register - Register a gendisk as being integrity-capable | 372 | * blk_integrity_register - Register a gendisk as being integrity-capable |
363 | * @disk: struct gendisk pointer to make integrity-aware | 373 | * @disk: struct gendisk pointer to make integrity-aware |
@@ -407,7 +417,7 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) | |||
407 | bi->get_tag_fn = template->get_tag_fn; | 417 | bi->get_tag_fn = template->get_tag_fn; |
408 | bi->tag_size = template->tag_size; | 418 | bi->tag_size = template->tag_size; |
409 | } else | 419 | } else |
410 | bi->name = "unsupported"; | 420 | bi->name = bi_unsupported_name; |
411 | 421 | ||
412 | return 0; | 422 | return 0; |
413 | } | 423 | } |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 5352bdafbcf0..6c98cfeeedf0 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -77,7 +77,7 @@ struct throtl_grp { | |||
77 | unsigned long slice_end[2]; | 77 | unsigned long slice_end[2]; |
78 | 78 | ||
79 | /* Some throttle limits got updated for the group */ | 79 | /* Some throttle limits got updated for the group */ |
80 | bool limits_changed; | 80 | int limits_changed; |
81 | }; | 81 | }; |
82 | 82 | ||
83 | struct throtl_data | 83 | struct throtl_data |
@@ -102,7 +102,7 @@ struct throtl_data | |||
102 | /* Work for dispatching throttled bios */ | 102 | /* Work for dispatching throttled bios */ |
103 | struct delayed_work throtl_work; | 103 | struct delayed_work throtl_work; |
104 | 104 | ||
105 | bool limits_changed; | 105 | int limits_changed; |
106 | }; | 106 | }; |
107 | 107 | ||
108 | enum tg_state_flags { | 108 | enum tg_state_flags { |
diff --git a/block/elevator.c b/block/elevator.c index c387d3168734..0cdb4e7ebab4 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -610,7 +610,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq) | |||
610 | 610 | ||
611 | rq->cmd_flags &= ~REQ_STARTED; | 611 | rq->cmd_flags &= ~REQ_STARTED; |
612 | 612 | ||
613 | elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); | 613 | __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE); |
614 | } | 614 | } |
615 | 615 | ||
616 | void elv_drain_elevator(struct request_queue *q) | 616 | void elv_drain_elevator(struct request_queue *q) |
@@ -655,12 +655,25 @@ void elv_quiesce_end(struct request_queue *q) | |||
655 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); | 655 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); |
656 | } | 656 | } |
657 | 657 | ||
658 | void elv_insert(struct request_queue *q, struct request *rq, int where) | 658 | void __elv_add_request(struct request_queue *q, struct request *rq, int where) |
659 | { | 659 | { |
660 | trace_block_rq_insert(q, rq); | 660 | trace_block_rq_insert(q, rq); |
661 | 661 | ||
662 | rq->q = q; | 662 | rq->q = q; |
663 | 663 | ||
664 | BUG_ON(rq->cmd_flags & REQ_ON_PLUG); | ||
665 | |||
666 | if (rq->cmd_flags & REQ_SOFTBARRIER) { | ||
667 | /* barriers are scheduling boundary, update end_sector */ | ||
668 | if (rq->cmd_type == REQ_TYPE_FS || | ||
669 | (rq->cmd_flags & REQ_DISCARD)) { | ||
670 | q->end_sector = rq_end_sector(rq); | ||
671 | q->boundary_rq = rq; | ||
672 | } | ||
673 | } else if (!(rq->cmd_flags & REQ_ELVPRIV) && | ||
674 | where == ELEVATOR_INSERT_SORT) | ||
675 | where = ELEVATOR_INSERT_BACK; | ||
676 | |||
664 | switch (where) { | 677 | switch (where) { |
665 | case ELEVATOR_INSERT_REQUEUE: | 678 | case ELEVATOR_INSERT_REQUEUE: |
666 | case ELEVATOR_INSERT_FRONT: | 679 | case ELEVATOR_INSERT_FRONT: |
@@ -722,24 +735,6 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) | |||
722 | BUG(); | 735 | BUG(); |
723 | } | 736 | } |
724 | } | 737 | } |
725 | |||
726 | void __elv_add_request(struct request_queue *q, struct request *rq, int where) | ||
727 | { | ||
728 | BUG_ON(rq->cmd_flags & REQ_ON_PLUG); | ||
729 | |||
730 | if (rq->cmd_flags & REQ_SOFTBARRIER) { | ||
731 | /* barriers are scheduling boundary, update end_sector */ | ||
732 | if (rq->cmd_type == REQ_TYPE_FS || | ||
733 | (rq->cmd_flags & REQ_DISCARD)) { | ||
734 | q->end_sector = rq_end_sector(rq); | ||
735 | q->boundary_rq = rq; | ||
736 | } | ||
737 | } else if (!(rq->cmd_flags & REQ_ELVPRIV) && | ||
738 | where == ELEVATOR_INSERT_SORT) | ||
739 | where = ELEVATOR_INSERT_BACK; | ||
740 | |||
741 | elv_insert(q, rq, where); | ||
742 | } | ||
743 | EXPORT_SYMBOL(__elv_add_request); | 738 | EXPORT_SYMBOL(__elv_add_request); |
744 | 739 | ||
745 | void elv_add_request(struct request_queue *q, struct request *rq, int where) | 740 | void elv_add_request(struct request_queue *q, struct request *rq, int where) |
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 0e406d73b2c8..177db6d5b2f5 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -430,6 +430,26 @@ static inline void ide_unlock_host(struct ide_host *host) | |||
430 | } | 430 | } |
431 | } | 431 | } |
432 | 432 | ||
433 | static void __ide_requeue_and_plug(struct request_queue *q, struct request *rq) | ||
434 | { | ||
435 | if (rq) | ||
436 | blk_requeue_request(q, rq); | ||
437 | if (rq || blk_peek_request(q)) { | ||
438 | /* Use 3ms as that was the old plug delay */ | ||
439 | blk_delay_queue(q, 3); | ||
440 | } | ||
441 | } | ||
442 | |||
443 | void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) | ||
444 | { | ||
445 | struct request_queue *q = drive->queue; | ||
446 | unsigned long flags; | ||
447 | |||
448 | spin_lock_irqsave(q->queue_lock, flags); | ||
449 | __ide_requeue_and_plug(q, rq); | ||
450 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
451 | } | ||
452 | |||
433 | /* | 453 | /* |
434 | * Issue a new request to a device. | 454 | * Issue a new request to a device. |
435 | */ | 455 | */ |
@@ -550,28 +570,7 @@ plug_device: | |||
550 | ide_unlock_host(host); | 570 | ide_unlock_host(host); |
551 | plug_device_2: | 571 | plug_device_2: |
552 | spin_lock_irq(q->queue_lock); | 572 | spin_lock_irq(q->queue_lock); |
553 | 573 | __ide_requeue_and_plug(q, rq); | |
554 | if (rq) { | ||
555 | blk_requeue_request(q, rq); | ||
556 | blk_delay_queue(q, queue_run_ms); | ||
557 | } | ||
558 | } | ||
559 | |||
560 | void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) | ||
561 | { | ||
562 | struct request_queue *q = drive->queue; | ||
563 | unsigned long flags; | ||
564 | |||
565 | spin_lock_irqsave(q->queue_lock, flags); | ||
566 | |||
567 | if (rq) | ||
568 | blk_requeue_request(q, rq); | ||
569 | |||
570 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
571 | |||
572 | /* Use 3ms as that was the old plug delay */ | ||
573 | if (rq) | ||
574 | blk_delay_queue(q, 3); | ||
575 | } | 574 | } |
576 | 575 | ||
577 | static int drive_is_ready(ide_drive_t *drive) | 576 | static int drive_is_ready(ide_drive_t *drive) |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 416d4e258df6..cb8380c9767f 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -927,20 +927,80 @@ static int dm_table_build_index(struct dm_table *t) | |||
927 | } | 927 | } |
928 | 928 | ||
929 | /* | 929 | /* |
930 | * Get a disk whose integrity profile reflects the table's profile. | ||
931 | * If %match_all is true, all devices' profiles must match. | ||
932 | * If %match_all is false, all devices must at least have an | ||
933 | * allocated integrity profile; but uninitialized is ok. | ||
934 | * Returns NULL if integrity support was inconsistent or unavailable. | ||
935 | */ | ||
936 | static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t, | ||
937 | bool match_all) | ||
938 | { | ||
939 | struct list_head *devices = dm_table_get_devices(t); | ||
940 | struct dm_dev_internal *dd = NULL; | ||
941 | struct gendisk *prev_disk = NULL, *template_disk = NULL; | ||
942 | |||
943 | list_for_each_entry(dd, devices, list) { | ||
944 | template_disk = dd->dm_dev.bdev->bd_disk; | ||
945 | if (!blk_get_integrity(template_disk)) | ||
946 | goto no_integrity; | ||
947 | if (!match_all && !blk_integrity_is_initialized(template_disk)) | ||
948 | continue; /* skip uninitialized profiles */ | ||
949 | else if (prev_disk && | ||
950 | blk_integrity_compare(prev_disk, template_disk) < 0) | ||
951 | goto no_integrity; | ||
952 | prev_disk = template_disk; | ||
953 | } | ||
954 | |||
955 | return template_disk; | ||
956 | |||
957 | no_integrity: | ||
958 | if (prev_disk) | ||
959 | DMWARN("%s: integrity not set: %s and %s profile mismatch", | ||
960 | dm_device_name(t->md), | ||
961 | prev_disk->disk_name, | ||
962 | template_disk->disk_name); | ||
963 | return NULL; | ||
964 | } | ||
965 | |||
966 | /* | ||
930 | * Register the mapped device for blk_integrity support if | 967 | * Register the mapped device for blk_integrity support if |
931 | * the underlying devices support it. | 968 | * the underlying devices have an integrity profile. But all devices |
969 | * may not have matching profiles (checking all devices isn't reliable | ||
970 | * during table load because this table may use other DM device(s) which | ||
971 | * must be resumed before they will have an initialized integity profile). | ||
972 | * Stacked DM devices force a 2 stage integrity profile validation: | ||
973 | * 1 - during load, validate all initialized integrity profiles match | ||
974 | * 2 - during resume, validate all integrity profiles match | ||
932 | */ | 975 | */ |
933 | static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md) | 976 | static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md) |
934 | { | 977 | { |
935 | struct list_head *devices = dm_table_get_devices(t); | 978 | struct gendisk *template_disk = NULL; |
936 | struct dm_dev_internal *dd; | ||
937 | 979 | ||
938 | list_for_each_entry(dd, devices, list) | 980 | template_disk = dm_table_get_integrity_disk(t, false); |
939 | if (bdev_get_integrity(dd->dm_dev.bdev)) { | 981 | if (!template_disk) |
940 | t->integrity_supported = 1; | 982 | return 0; |
941 | return blk_integrity_register(dm_disk(md), NULL); | ||
942 | } | ||
943 | 983 | ||
984 | if (!blk_integrity_is_initialized(dm_disk(md))) { | ||
985 | t->integrity_supported = 1; | ||
986 | return blk_integrity_register(dm_disk(md), NULL); | ||
987 | } | ||
988 | |||
989 | /* | ||
990 | * If DM device already has an initalized integrity | ||
991 | * profile the new profile should not conflict. | ||
992 | */ | ||
993 | if (blk_integrity_is_initialized(template_disk) && | ||
994 | blk_integrity_compare(dm_disk(md), template_disk) < 0) { | ||
995 | DMWARN("%s: conflict with existing integrity profile: " | ||
996 | "%s profile mismatch", | ||
997 | dm_device_name(t->md), | ||
998 | template_disk->disk_name); | ||
999 | return 1; | ||
1000 | } | ||
1001 | |||
1002 | /* Preserve existing initialized integrity profile */ | ||
1003 | t->integrity_supported = 1; | ||
944 | return 0; | 1004 | return 0; |
945 | } | 1005 | } |
946 | 1006 | ||
@@ -1094,41 +1154,27 @@ combine_limits: | |||
1094 | 1154 | ||
1095 | /* | 1155 | /* |
1096 | * Set the integrity profile for this device if all devices used have | 1156 | * Set the integrity profile for this device if all devices used have |
1097 | * matching profiles. | 1157 | * matching profiles. We're quite deep in the resume path but still |
1158 | * don't know if all devices (particularly DM devices this device | ||
1159 | * may be stacked on) have matching profiles. Even if the profiles | ||
1160 | * don't match we have no way to fail (to resume) at this point. | ||
1098 | */ | 1161 | */ |
1099 | static void dm_table_set_integrity(struct dm_table *t) | 1162 | static void dm_table_set_integrity(struct dm_table *t) |
1100 | { | 1163 | { |
1101 | struct list_head *devices = dm_table_get_devices(t); | 1164 | struct gendisk *template_disk = NULL; |
1102 | struct dm_dev_internal *prev = NULL, *dd = NULL; | ||
1103 | 1165 | ||
1104 | if (!blk_get_integrity(dm_disk(t->md))) | 1166 | if (!blk_get_integrity(dm_disk(t->md))) |
1105 | return; | 1167 | return; |
1106 | 1168 | ||
1107 | list_for_each_entry(dd, devices, list) { | 1169 | template_disk = dm_table_get_integrity_disk(t, true); |
1108 | if (prev && | 1170 | if (!template_disk && |
1109 | blk_integrity_compare(prev->dm_dev.bdev->bd_disk, | 1171 | blk_integrity_is_initialized(dm_disk(t->md))) { |
1110 | dd->dm_dev.bdev->bd_disk) < 0) { | 1172 | DMWARN("%s: device no longer has a valid integrity profile", |
1111 | DMWARN("%s: integrity not set: %s and %s mismatch", | 1173 | dm_device_name(t->md)); |
1112 | dm_device_name(t->md), | 1174 | return; |
1113 | prev->dm_dev.bdev->bd_disk->disk_name, | ||
1114 | dd->dm_dev.bdev->bd_disk->disk_name); | ||
1115 | goto no_integrity; | ||
1116 | } | ||
1117 | prev = dd; | ||
1118 | } | 1175 | } |
1119 | |||
1120 | if (!prev || !bdev_get_integrity(prev->dm_dev.bdev)) | ||
1121 | goto no_integrity; | ||
1122 | |||
1123 | blk_integrity_register(dm_disk(t->md), | 1176 | blk_integrity_register(dm_disk(t->md), |
1124 | bdev_get_integrity(prev->dm_dev.bdev)); | 1177 | blk_get_integrity(template_disk)); |
1125 | |||
1126 | return; | ||
1127 | |||
1128 | no_integrity: | ||
1129 | blk_integrity_register(dm_disk(t->md), NULL); | ||
1130 | |||
1131 | return; | ||
1132 | } | 1178 | } |
1133 | 1179 | ||
1134 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | 1180 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, |
diff --git a/fs/inode.c b/fs/inode.c index 5f4e11aaeb5c..33c963d08ab4 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -125,6 +125,14 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock); | |||
125 | static DECLARE_RWSEM(iprune_sem); | 125 | static DECLARE_RWSEM(iprune_sem); |
126 | 126 | ||
127 | /* | 127 | /* |
128 | * Empty aops. Can be used for the cases where the user does not | ||
129 | * define any of the address_space operations. | ||
130 | */ | ||
131 | const struct address_space_operations empty_aops = { | ||
132 | }; | ||
133 | EXPORT_SYMBOL(empty_aops); | ||
134 | |||
135 | /* | ||
128 | * Statistics gathering.. | 136 | * Statistics gathering.. |
129 | */ | 137 | */ |
130 | struct inodes_stat_t inodes_stat; | 138 | struct inodes_stat_t inodes_stat; |
@@ -176,7 +184,6 @@ int proc_nr_inodes(ctl_table *table, int write, | |||
176 | */ | 184 | */ |
177 | int inode_init_always(struct super_block *sb, struct inode *inode) | 185 | int inode_init_always(struct super_block *sb, struct inode *inode) |
178 | { | 186 | { |
179 | static const struct address_space_operations empty_aops; | ||
180 | static const struct inode_operations empty_iops; | 187 | static const struct inode_operations empty_iops; |
181 | static const struct file_operations empty_fops; | 188 | static const struct file_operations empty_fops; |
182 | struct address_space *const mapping = &inode->i_data; | 189 | struct address_space *const mapping = &inode->i_data; |
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 9d2dc6b4348e..1168059c7efd 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c | |||
@@ -495,8 +495,6 @@ unsigned nilfs_page_count_clean_buffers(struct page *page, | |||
495 | void nilfs_mapping_init(struct address_space *mapping, | 495 | void nilfs_mapping_init(struct address_space *mapping, |
496 | struct backing_dev_info *bdi) | 496 | struct backing_dev_info *bdi) |
497 | { | 497 | { |
498 | static const struct address_space_operations empty_aops; | ||
499 | |||
500 | mapping->host = NULL; | 498 | mapping->host = NULL; |
501 | mapping->flags = 0; | 499 | mapping->flags = 0; |
502 | mapping_set_gfp_mask(mapping, GFP_NOFS); | 500 | mapping_set_gfp_mask(mapping, GFP_NOFS); |
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index c74400f88fe0..3299f469e712 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c | |||
@@ -56,6 +56,7 @@ | |||
56 | */ | 56 | */ |
57 | 57 | ||
58 | #include "ubifs.h" | 58 | #include "ubifs.h" |
59 | #include <linux/fs.h> | ||
59 | #include <linux/slab.h> | 60 | #include <linux/slab.h> |
60 | #include <linux/xattr.h> | 61 | #include <linux/xattr.h> |
61 | #include <linux/posix_acl_xattr.h> | 62 | #include <linux/posix_acl_xattr.h> |
@@ -80,7 +81,6 @@ enum { | |||
80 | }; | 81 | }; |
81 | 82 | ||
82 | static const struct inode_operations none_inode_operations; | 83 | static const struct inode_operations none_inode_operations; |
83 | static const struct address_space_operations none_address_operations; | ||
84 | static const struct file_operations none_file_operations; | 84 | static const struct file_operations none_file_operations; |
85 | 85 | ||
86 | /** | 86 | /** |
@@ -130,7 +130,7 @@ static int create_xattr(struct ubifs_info *c, struct inode *host, | |||
130 | } | 130 | } |
131 | 131 | ||
132 | /* Re-define all operations to be "nothing" */ | 132 | /* Re-define all operations to be "nothing" */ |
133 | inode->i_mapping->a_ops = &none_address_operations; | 133 | inode->i_mapping->a_ops = &empty_aops; |
134 | inode->i_op = &none_inode_operations; | 134 | inode->i_op = &none_inode_operations; |
135 | inode->i_fop = &none_file_operations; | 135 | inode->i_fop = &none_file_operations; |
136 | 136 | ||
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c index 11014302c9ca..5f821dbc0579 100644 --- a/fs/ufs/truncate.c +++ b/fs/ufs/truncate.c | |||
@@ -479,7 +479,6 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size) | |||
479 | break; | 479 | break; |
480 | if (IS_SYNC(inode) && (inode->i_state & I_DIRTY)) | 480 | if (IS_SYNC(inode) && (inode->i_state & I_DIRTY)) |
481 | ufs_sync_inode (inode); | 481 | ufs_sync_inode (inode); |
482 | blk_flush_plug(current); | ||
483 | yield(); | 482 | yield(); |
484 | } | 483 | } |
485 | 484 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 16a902f099ac..32176cc8e715 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -1206,6 +1206,7 @@ struct blk_integrity { | |||
1206 | struct kobject kobj; | 1206 | struct kobject kobj; |
1207 | }; | 1207 | }; |
1208 | 1208 | ||
1209 | extern bool blk_integrity_is_initialized(struct gendisk *); | ||
1209 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); | 1210 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); |
1210 | extern void blk_integrity_unregister(struct gendisk *); | 1211 | extern void blk_integrity_unregister(struct gendisk *); |
1211 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); | 1212 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); |
@@ -1262,6 +1263,7 @@ queue_max_integrity_segments(struct request_queue *q) | |||
1262 | #define queue_max_integrity_segments(a) (0) | 1263 | #define queue_max_integrity_segments(a) (0) |
1263 | #define blk_integrity_merge_rq(a, b, c) (0) | 1264 | #define blk_integrity_merge_rq(a, b, c) (0) |
1264 | #define blk_integrity_merge_bio(a, b, c) (0) | 1265 | #define blk_integrity_merge_bio(a, b, c) (0) |
1266 | #define blk_integrity_is_initialized(a) (0) | ||
1265 | 1267 | ||
1266 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 1268 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
1267 | 1269 | ||
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index d93efcc44570..21a8ebf2dc3a 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -101,7 +101,6 @@ extern void elv_dispatch_sort(struct request_queue *, struct request *); | |||
101 | extern void elv_dispatch_add_tail(struct request_queue *, struct request *); | 101 | extern void elv_dispatch_add_tail(struct request_queue *, struct request *); |
102 | extern void elv_add_request(struct request_queue *, struct request *, int); | 102 | extern void elv_add_request(struct request_queue *, struct request *, int); |
103 | extern void __elv_add_request(struct request_queue *, struct request *, int); | 103 | extern void __elv_add_request(struct request_queue *, struct request *, int); |
104 | extern void elv_insert(struct request_queue *, struct request *, int); | ||
105 | extern int elv_merge(struct request_queue *, struct request **, struct bio *); | 104 | extern int elv_merge(struct request_queue *, struct request **, struct bio *); |
106 | extern int elv_try_merge(struct request *, struct bio *); | 105 | extern int elv_try_merge(struct request *, struct bio *); |
107 | extern void elv_merge_requests(struct request_queue *, struct request *, | 106 | extern void elv_merge_requests(struct request_queue *, struct request *, |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 52f283c1edb2..1b95af37e3b3 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -613,6 +613,8 @@ struct address_space_operations { | |||
613 | int (*error_remove_page)(struct address_space *, struct page *); | 613 | int (*error_remove_page)(struct address_space *, struct page *); |
614 | }; | 614 | }; |
615 | 615 | ||
616 | extern const struct address_space_operations empty_aops; | ||
617 | |||
616 | /* | 618 | /* |
617 | * pagecache_write_begin/pagecache_write_end must be used by general code | 619 | * pagecache_write_begin/pagecache_write_end must be used by general code |
618 | * to write into the pagecache. | 620 | * to write into the pagecache. |