diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 142 |
1 files changed, 27 insertions, 115 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 09a840264d6f..89c855c5655c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -60,7 +60,6 @@ enum rq_cmd_type_bits { | |||
60 | REQ_TYPE_PM_RESUME, /* resume request */ | 60 | REQ_TYPE_PM_RESUME, /* resume request */ |
61 | REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ | 61 | REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ |
62 | REQ_TYPE_SPECIAL, /* driver defined type */ | 62 | REQ_TYPE_SPECIAL, /* driver defined type */ |
63 | REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ | ||
64 | /* | 63 | /* |
65 | * for ATA/ATAPI devices. this really doesn't belong here, ide should | 64 | * for ATA/ATAPI devices. this really doesn't belong here, ide should |
66 | * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver | 65 | * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver |
@@ -70,84 +69,6 @@ enum rq_cmd_type_bits { | |||
70 | REQ_TYPE_ATA_PC, | 69 | REQ_TYPE_ATA_PC, |
71 | }; | 70 | }; |
72 | 71 | ||
73 | /* | ||
74 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being | ||
75 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a | ||
76 | * SCSI cdb. | ||
77 | * | ||
78 | * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, | ||
79 | * typically to differentiate REQ_TYPE_SPECIAL requests. | ||
80 | * | ||
81 | */ | ||
82 | enum { | ||
83 | REQ_LB_OP_EJECT = 0x40, /* eject request */ | ||
84 | REQ_LB_OP_FLUSH = 0x41, /* flush request */ | ||
85 | }; | ||
86 | |||
87 | /* | ||
88 | * request type modified bits. first four bits match BIO_RW* bits, important | ||
89 | */ | ||
90 | enum rq_flag_bits { | ||
91 | __REQ_RW, /* not set, read. set, write */ | ||
92 | __REQ_FAILFAST_DEV, /* no driver retries of device errors */ | ||
93 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ | ||
94 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ | ||
95 | /* above flags must match BIO_RW_* */ | ||
96 | __REQ_DISCARD, /* request to discard sectors */ | ||
97 | __REQ_SORTED, /* elevator knows about this request */ | ||
98 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | ||
99 | __REQ_HARDBARRIER, /* may not be passed by drive either */ | ||
100 | __REQ_FUA, /* forced unit access */ | ||
101 | __REQ_NOMERGE, /* don't touch this for merging */ | ||
102 | __REQ_STARTED, /* drive already may have started this one */ | ||
103 | __REQ_DONTPREP, /* don't call prep for this one */ | ||
104 | __REQ_QUEUED, /* uses queueing */ | ||
105 | __REQ_ELVPRIV, /* elevator private data attached */ | ||
106 | __REQ_FAILED, /* set if the request failed */ | ||
107 | __REQ_QUIET, /* don't worry about errors */ | ||
108 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | ||
109 | __REQ_ORDERED_COLOR, /* is before or after barrier */ | ||
110 | __REQ_RW_SYNC, /* request is sync (sync write or read) */ | ||
111 | __REQ_ALLOCED, /* request came from our alloc pool */ | ||
112 | __REQ_RW_META, /* metadata io request */ | ||
113 | __REQ_COPY_USER, /* contains copies of user pages */ | ||
114 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ | ||
115 | __REQ_NOIDLE, /* Don't anticipate more IO after this one */ | ||
116 | __REQ_IO_STAT, /* account I/O stat */ | ||
117 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ | ||
118 | __REQ_NR_BITS, /* stops here */ | ||
119 | }; | ||
120 | |||
121 | #define REQ_RW (1 << __REQ_RW) | ||
122 | #define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) | ||
123 | #define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) | ||
124 | #define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) | ||
125 | #define REQ_DISCARD (1 << __REQ_DISCARD) | ||
126 | #define REQ_SORTED (1 << __REQ_SORTED) | ||
127 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | ||
128 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) | ||
129 | #define REQ_FUA (1 << __REQ_FUA) | ||
130 | #define REQ_NOMERGE (1 << __REQ_NOMERGE) | ||
131 | #define REQ_STARTED (1 << __REQ_STARTED) | ||
132 | #define REQ_DONTPREP (1 << __REQ_DONTPREP) | ||
133 | #define REQ_QUEUED (1 << __REQ_QUEUED) | ||
134 | #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) | ||
135 | #define REQ_FAILED (1 << __REQ_FAILED) | ||
136 | #define REQ_QUIET (1 << __REQ_QUIET) | ||
137 | #define REQ_PREEMPT (1 << __REQ_PREEMPT) | ||
138 | #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) | ||
139 | #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) | ||
140 | #define REQ_ALLOCED (1 << __REQ_ALLOCED) | ||
141 | #define REQ_RW_META (1 << __REQ_RW_META) | ||
142 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) | ||
143 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) | ||
144 | #define REQ_NOIDLE (1 << __REQ_NOIDLE) | ||
145 | #define REQ_IO_STAT (1 << __REQ_IO_STAT) | ||
146 | #define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) | ||
147 | |||
148 | #define REQ_FAILFAST_MASK (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \ | ||
149 | REQ_FAILFAST_DRIVER) | ||
150 | |||
151 | #define BLK_MAX_CDB 16 | 72 | #define BLK_MAX_CDB 16 |
152 | 73 | ||
153 | /* | 74 | /* |
@@ -264,6 +185,7 @@ struct request_pm_state | |||
264 | typedef void (request_fn_proc) (struct request_queue *q); | 185 | typedef void (request_fn_proc) (struct request_queue *q); |
265 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); | 186 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); |
266 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 187 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
188 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); | ||
267 | typedef void (unplug_fn) (struct request_queue *); | 189 | typedef void (unplug_fn) (struct request_queue *); |
268 | 190 | ||
269 | struct bio_vec; | 191 | struct bio_vec; |
@@ -275,7 +197,6 @@ struct bvec_merge_data { | |||
275 | }; | 197 | }; |
276 | typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, | 198 | typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, |
277 | struct bio_vec *); | 199 | struct bio_vec *); |
278 | typedef void (prepare_flush_fn) (struct request_queue *, struct request *); | ||
279 | typedef void (softirq_done_fn)(struct request *); | 200 | typedef void (softirq_done_fn)(struct request *); |
280 | typedef int (dma_drain_needed_fn)(struct request *); | 201 | typedef int (dma_drain_needed_fn)(struct request *); |
281 | typedef int (lld_busy_fn) (struct request_queue *q); | 202 | typedef int (lld_busy_fn) (struct request_queue *q); |
@@ -346,9 +267,9 @@ struct request_queue | |||
346 | request_fn_proc *request_fn; | 267 | request_fn_proc *request_fn; |
347 | make_request_fn *make_request_fn; | 268 | make_request_fn *make_request_fn; |
348 | prep_rq_fn *prep_rq_fn; | 269 | prep_rq_fn *prep_rq_fn; |
270 | unprep_rq_fn *unprep_rq_fn; | ||
349 | unplug_fn *unplug_fn; | 271 | unplug_fn *unplug_fn; |
350 | merge_bvec_fn *merge_bvec_fn; | 272 | merge_bvec_fn *merge_bvec_fn; |
351 | prepare_flush_fn *prepare_flush_fn; | ||
352 | softirq_done_fn *softirq_done_fn; | 273 | softirq_done_fn *softirq_done_fn; |
353 | rq_timed_out_fn *rq_timed_out_fn; | 274 | rq_timed_out_fn *rq_timed_out_fn; |
354 | dma_drain_needed_fn *dma_drain_needed; | 275 | dma_drain_needed_fn *dma_drain_needed; |
@@ -467,11 +388,13 @@ struct request_queue | |||
467 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | 388 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ |
468 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ | 389 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ |
469 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ | 390 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ |
391 | #define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ | ||
470 | 392 | ||
471 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 393 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
472 | (1 << QUEUE_FLAG_CLUSTER) | \ | 394 | (1 << QUEUE_FLAG_CLUSTER) | \ |
473 | (1 << QUEUE_FLAG_STACKABLE) | \ | 395 | (1 << QUEUE_FLAG_STACKABLE) | \ |
474 | (1 << QUEUE_FLAG_SAME_COMP)) | 396 | (1 << QUEUE_FLAG_SAME_COMP) | \ |
397 | (1 << QUEUE_FLAG_ADD_RANDOM)) | ||
475 | 398 | ||
476 | static inline int queue_is_locked(struct request_queue *q) | 399 | static inline int queue_is_locked(struct request_queue *q) |
477 | { | 400 | { |
@@ -596,38 +519,26 @@ enum { | |||
596 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) | 519 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) |
597 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | 520 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
598 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) | 521 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) |
522 | #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) | ||
599 | #define blk_queue_flushing(q) ((q)->ordseq) | 523 | #define blk_queue_flushing(q) ((q)->ordseq) |
600 | #define blk_queue_stackable(q) \ | 524 | #define blk_queue_stackable(q) \ |
601 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | 525 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) |
602 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) | 526 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) |
603 | 527 | ||
604 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) | 528 | #define blk_noretry_request(rq) \ |
605 | #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) | 529 | ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ |
606 | #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) | 530 | REQ_FAILFAST_DRIVER)) |
607 | #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) | 531 | |
608 | 532 | #define blk_account_rq(rq) \ | |
609 | #define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV) | 533 | (((rq)->cmd_flags & REQ_STARTED) && \ |
610 | #define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT) | 534 | ((rq)->cmd_type == REQ_TYPE_FS || \ |
611 | #define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER) | 535 | ((rq)->cmd_flags & REQ_DISCARD))) |
612 | #define blk_noretry_request(rq) (blk_failfast_dev(rq) || \ | 536 | |
613 | blk_failfast_transport(rq) || \ | ||
614 | blk_failfast_driver(rq)) | ||
615 | #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) | ||
616 | #define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT) | ||
617 | #define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET) | ||
618 | |||
619 | #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) | ||
620 | |||
621 | #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) | ||
622 | #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) | ||
623 | #define blk_pm_request(rq) \ | 537 | #define blk_pm_request(rq) \ |
624 | (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) | 538 | ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ |
539 | (rq)->cmd_type == REQ_TYPE_PM_RESUME) | ||
625 | 540 | ||
626 | #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) | 541 | #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) |
627 | #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) | ||
628 | #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) | ||
629 | #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) | ||
630 | #define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) | ||
631 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) | 542 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) |
632 | /* rq->queuelist of dequeued request must be list_empty() */ | 543 | /* rq->queuelist of dequeued request must be list_empty() */ |
633 | #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) | 544 | #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) |
@@ -641,7 +552,7 @@ enum { | |||
641 | */ | 552 | */ |
642 | static inline bool rw_is_sync(unsigned int rw_flags) | 553 | static inline bool rw_is_sync(unsigned int rw_flags) |
643 | { | 554 | { |
644 | return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); | 555 | return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); |
645 | } | 556 | } |
646 | 557 | ||
647 | static inline bool rq_is_sync(struct request *rq) | 558 | static inline bool rq_is_sync(struct request *rq) |
@@ -649,9 +560,6 @@ static inline bool rq_is_sync(struct request *rq) | |||
649 | return rw_is_sync(rq->cmd_flags); | 560 | return rw_is_sync(rq->cmd_flags); |
650 | } | 561 | } |
651 | 562 | ||
652 | #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) | ||
653 | #define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE) | ||
654 | |||
655 | static inline int blk_queue_full(struct request_queue *q, int sync) | 563 | static inline int blk_queue_full(struct request_queue *q, int sync) |
656 | { | 564 | { |
657 | if (sync) | 565 | if (sync) |
@@ -684,7 +592,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync) | |||
684 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) | 592 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) |
685 | #define rq_mergeable(rq) \ | 593 | #define rq_mergeable(rq) \ |
686 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ | 594 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ |
687 | (blk_discard_rq(rq) || blk_fs_request((rq)))) | 595 | (((rq)->cmd_flags & REQ_DISCARD) || \ |
596 | (rq)->cmd_type == REQ_TYPE_FS)) | ||
688 | 597 | ||
689 | /* | 598 | /* |
690 | * q->prep_rq_fn return values | 599 | * q->prep_rq_fn return values |
@@ -709,7 +618,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn; | |||
709 | #define BLK_BOUNCE_HIGH -1ULL | 618 | #define BLK_BOUNCE_HIGH -1ULL |
710 | #endif | 619 | #endif |
711 | #define BLK_BOUNCE_ANY (-1ULL) | 620 | #define BLK_BOUNCE_ANY (-1ULL) |
712 | #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) | 621 | #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) |
713 | 622 | ||
714 | /* | 623 | /* |
715 | * default timeout for SG_IO if none specified | 624 | * default timeout for SG_IO if none specified |
@@ -781,6 +690,8 @@ extern struct request *blk_make_request(struct request_queue *, struct bio *, | |||
781 | gfp_t); | 690 | gfp_t); |
782 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | 691 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
783 | extern void blk_requeue_request(struct request_queue *, struct request *); | 692 | extern void blk_requeue_request(struct request_queue *, struct request *); |
693 | extern void blk_add_request_payload(struct request *rq, struct page *page, | ||
694 | unsigned int len); | ||
784 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | 695 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); |
785 | extern int blk_lld_busy(struct request_queue *q); | 696 | extern int blk_lld_busy(struct request_queue *q); |
786 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | 697 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, |
@@ -915,6 +826,7 @@ extern void blk_complete_request(struct request *); | |||
915 | extern void __blk_complete_request(struct request *); | 826 | extern void __blk_complete_request(struct request *); |
916 | extern void blk_abort_request(struct request *); | 827 | extern void blk_abort_request(struct request *); |
917 | extern void blk_abort_queue(struct request_queue *); | 828 | extern void blk_abort_queue(struct request_queue *); |
829 | extern void blk_unprep_request(struct request *); | ||
918 | 830 | ||
919 | /* | 831 | /* |
920 | * Access functions for manipulating queue properties | 832 | * Access functions for manipulating queue properties |
@@ -959,6 +871,7 @@ extern int blk_queue_dma_drain(struct request_queue *q, | |||
959 | extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); | 871 | extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); |
960 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); | 872 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); |
961 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); | 873 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); |
874 | extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); | ||
962 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | 875 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); |
963 | extern void blk_queue_dma_alignment(struct request_queue *, int); | 876 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
964 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | 877 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
@@ -966,7 +879,7 @@ extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | |||
966 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | 879 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); |
967 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | 880 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); |
968 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 881 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
969 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); | 882 | extern int blk_queue_ordered(struct request_queue *, unsigned); |
970 | extern bool blk_do_ordered(struct request_queue *, struct request **); | 883 | extern bool blk_do_ordered(struct request_queue *, struct request **); |
971 | extern unsigned blk_ordered_cur_seq(struct request_queue *); | 884 | extern unsigned blk_ordered_cur_seq(struct request_queue *); |
972 | extern unsigned blk_ordered_req_seq(struct request *); | 885 | extern unsigned blk_ordered_req_seq(struct request *); |
@@ -1020,7 +933,7 @@ static inline int sb_issue_discard(struct super_block *sb, | |||
1020 | { | 933 | { |
1021 | block <<= (sb->s_blocksize_bits - 9); | 934 | block <<= (sb->s_blocksize_bits - 9); |
1022 | nr_blocks <<= (sb->s_blocksize_bits - 9); | 935 | nr_blocks <<= (sb->s_blocksize_bits - 9); |
1023 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL, | 936 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_NOFS, |
1024 | BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); | 937 | BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); |
1025 | } | 938 | } |
1026 | 939 | ||
@@ -1333,7 +1246,6 @@ static inline int blk_integrity_rq(struct request *rq) | |||
1333 | struct block_device_operations { | 1246 | struct block_device_operations { |
1334 | int (*open) (struct block_device *, fmode_t); | 1247 | int (*open) (struct block_device *, fmode_t); |
1335 | int (*release) (struct gendisk *, fmode_t); | 1248 | int (*release) (struct gendisk *, fmode_t); |
1336 | int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | ||
1337 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 1249 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
1338 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 1250 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
1339 | int (*direct_access) (struct block_device *, sector_t, | 1251 | int (*direct_access) (struct block_device *, sector_t, |