diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-15 12:03:47 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-15 12:03:47 -0400 |
commit | 23da64b4714812b66ecf010e7dfb3ed1bf2eda69 (patch) | |
tree | e2736bebc916cb540b0da83296d62b342612ecbd /include | |
parent | a23c218bd36e11120daf18e00a91d5dc20e288e6 (diff) | |
parent | a36e71f996e25d6213f57951f7ae1874086ec57e (diff) |
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: (28 commits)
cfq-iosched: add close cooperator code
cfq-iosched: log responsible 'cfqq' in idle timer arm
cfq-iosched: tweak kick logic a bit more
cfq-iosched: no need to save interrupts in cfq_kick_queue()
brd: fix cacheflushing
brd: support barriers
swap: Remove code handling bio_alloc failure with __GFP_WAIT
gfs2: Remove code handling bio_alloc failure with __GFP_WAIT
ext4: Remove code handling bio_alloc failure with __GFP_WAIT
dio: Remove code handling bio_alloc failure with __GFP_WAIT
block: Remove code handling bio_alloc failure with __GFP_WAIT
bio: add documentation to bio_alloc()
splice: add helpers for locking pipe inode
splice: remove generic_file_splice_write_nolock()
ocfs2: fix i_mutex locking in ocfs2_splice_to_file()
splice: fix i_mutex locking in generic_splice_write()
splice: remove i_mutex locking in splice_from_pipe()
splice: split up __splice_from_pipe()
block: fix SG_IO to return a proper error value
cfq-iosched: don't delay queue kick for a merged request
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/bio.h | 109 | ||||
-rw-r--r-- | include/linux/fs.h | 64 | ||||
-rw-r--r-- | include/linux/pipe_fs_i.h | 5 | ||||
-rw-r--r-- | include/linux/splice.h | 12 |
4 files changed, 185 insertions, 5 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h index b900d2c67d29..b89cf2d82898 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -504,6 +504,115 @@ static inline int bio_has_data(struct bio *bio) | |||
504 | return bio && bio->bi_io_vec != NULL; | 504 | return bio && bio->bi_io_vec != NULL; |
505 | } | 505 | } |
506 | 506 | ||
507 | /* | ||
508 | * BIO list managment for use by remapping drivers (e.g. DM or MD). | ||
509 | * | ||
510 | * A bio_list anchors a singly-linked list of bios chained through the bi_next | ||
511 | * member of the bio. The bio_list also caches the last list member to allow | ||
512 | * fast access to the tail. | ||
513 | */ | ||
514 | struct bio_list { | ||
515 | struct bio *head; | ||
516 | struct bio *tail; | ||
517 | }; | ||
518 | |||
519 | static inline int bio_list_empty(const struct bio_list *bl) | ||
520 | { | ||
521 | return bl->head == NULL; | ||
522 | } | ||
523 | |||
524 | static inline void bio_list_init(struct bio_list *bl) | ||
525 | { | ||
526 | bl->head = bl->tail = NULL; | ||
527 | } | ||
528 | |||
529 | #define bio_list_for_each(bio, bl) \ | ||
530 | for (bio = (bl)->head; bio; bio = bio->bi_next) | ||
531 | |||
532 | static inline unsigned bio_list_size(const struct bio_list *bl) | ||
533 | { | ||
534 | unsigned sz = 0; | ||
535 | struct bio *bio; | ||
536 | |||
537 | bio_list_for_each(bio, bl) | ||
538 | sz++; | ||
539 | |||
540 | return sz; | ||
541 | } | ||
542 | |||
543 | static inline void bio_list_add(struct bio_list *bl, struct bio *bio) | ||
544 | { | ||
545 | bio->bi_next = NULL; | ||
546 | |||
547 | if (bl->tail) | ||
548 | bl->tail->bi_next = bio; | ||
549 | else | ||
550 | bl->head = bio; | ||
551 | |||
552 | bl->tail = bio; | ||
553 | } | ||
554 | |||
555 | static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) | ||
556 | { | ||
557 | bio->bi_next = bl->head; | ||
558 | |||
559 | bl->head = bio; | ||
560 | |||
561 | if (!bl->tail) | ||
562 | bl->tail = bio; | ||
563 | } | ||
564 | |||
565 | static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) | ||
566 | { | ||
567 | if (!bl2->head) | ||
568 | return; | ||
569 | |||
570 | if (bl->tail) | ||
571 | bl->tail->bi_next = bl2->head; | ||
572 | else | ||
573 | bl->head = bl2->head; | ||
574 | |||
575 | bl->tail = bl2->tail; | ||
576 | } | ||
577 | |||
578 | static inline void bio_list_merge_head(struct bio_list *bl, | ||
579 | struct bio_list *bl2) | ||
580 | { | ||
581 | if (!bl2->head) | ||
582 | return; | ||
583 | |||
584 | if (bl->head) | ||
585 | bl2->tail->bi_next = bl->head; | ||
586 | else | ||
587 | bl->tail = bl2->tail; | ||
588 | |||
589 | bl->head = bl2->head; | ||
590 | } | ||
591 | |||
592 | static inline struct bio *bio_list_pop(struct bio_list *bl) | ||
593 | { | ||
594 | struct bio *bio = bl->head; | ||
595 | |||
596 | if (bio) { | ||
597 | bl->head = bl->head->bi_next; | ||
598 | if (!bl->head) | ||
599 | bl->tail = NULL; | ||
600 | |||
601 | bio->bi_next = NULL; | ||
602 | } | ||
603 | |||
604 | return bio; | ||
605 | } | ||
606 | |||
607 | static inline struct bio *bio_list_get(struct bio_list *bl) | ||
608 | { | ||
609 | struct bio *bio = bl->head; | ||
610 | |||
611 | bl->head = bl->tail = NULL; | ||
612 | |||
613 | return bio; | ||
614 | } | ||
615 | |||
507 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 616 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
508 | 617 | ||
509 | #define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) | 618 | #define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 562d2855cf30..e766be0d4329 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -87,6 +87,60 @@ struct inodes_stat_t { | |||
87 | */ | 87 | */ |
88 | #define FMODE_NOCMTIME ((__force fmode_t)2048) | 88 | #define FMODE_NOCMTIME ((__force fmode_t)2048) |
89 | 89 | ||
90 | /* | ||
91 | * The below are the various read and write types that we support. Some of | ||
92 | * them include behavioral modifiers that send information down to the | ||
93 | * block layer and IO scheduler. Terminology: | ||
94 | * | ||
95 | * The block layer uses device plugging to defer IO a little bit, in | ||
96 | * the hope that we will see more IO very shortly. This increases | ||
97 | * coalescing of adjacent IO and thus reduces the number of IOs we | ||
98 | * have to send to the device. It also allows for better queuing, | ||
99 | * if the IO isn't mergeable. If the caller is going to be waiting | ||
100 | * for the IO, then he must ensure that the device is unplugged so | ||
101 | * that the IO is dispatched to the driver. | ||
102 | * | ||
103 | * All IO is handled async in Linux. This is fine for background | ||
104 | * writes, but for reads or writes that someone waits for completion | ||
105 | * on, we want to notify the block layer and IO scheduler so that they | ||
106 | * know about it. That allows them to make better scheduling | ||
107 | * decisions. So when the below references 'sync' and 'async', it | ||
108 | * is referencing this priority hint. | ||
109 | * | ||
110 | * With that in mind, the available types are: | ||
111 | * | ||
112 | * READ A normal read operation. Device will be plugged. | ||
113 | * READ_SYNC A synchronous read. Device is not plugged, caller can | ||
114 | * immediately wait on this read without caring about | ||
115 | * unplugging. | ||
116 | * READA Used for read-ahead operations. Lower priority, and the | ||
117 | * block layer could (in theory) choose to ignore this | ||
118 | * request if it runs into resource problems. | ||
119 | * WRITE A normal async write. Device will be plugged. | ||
120 | * SWRITE Like WRITE, but a special case for ll_rw_block() that | ||
121 | * tells it to lock the buffer first. Normally a buffer | ||
122 | * must be locked before doing IO. | ||
123 | * WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down | ||
124 | * the hint that someone will be waiting on this IO | ||
125 | * shortly. The device must still be unplugged explicitly, | ||
126 | * WRITE_SYNC_PLUG does not do this as we could be | ||
127 | * submitting more writes before we actually wait on any | ||
128 | * of them. | ||
129 | * WRITE_SYNC Like WRITE_SYNC_PLUG, but also unplugs the device | ||
130 | * immediately after submission. The write equivalent | ||
131 | * of READ_SYNC. | ||
132 | * WRITE_ODIRECT Special case write for O_DIRECT only. | ||
133 | * SWRITE_SYNC | ||
134 | * SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer. | ||
135 | * See SWRITE. | ||
136 | * WRITE_BARRIER Like WRITE, but tells the block layer that all | ||
137 | * previously submitted writes must be safely on storage | ||
138 | * before this one is started. Also guarantees that when | ||
139 | * this write is complete, it itself is also safely on | ||
140 | * storage. Prevents reordering of writes on both sides | ||
141 | * of this IO. | ||
142 | * | ||
143 | */ | ||
90 | #define RW_MASK 1 | 144 | #define RW_MASK 1 |
91 | #define RWA_MASK 2 | 145 | #define RWA_MASK 2 |
92 | #define READ 0 | 146 | #define READ 0 |
@@ -102,6 +156,11 @@ struct inodes_stat_t { | |||
102 | (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) | 156 | (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) |
103 | #define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) | 157 | #define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) |
104 | #define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) | 158 | #define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) |
159 | |||
160 | /* | ||
161 | * These aren't really reads or writes, they pass down information about | ||
162 | * parts of device that are now unused by the file system. | ||
163 | */ | ||
105 | #define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD) | 164 | #define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD) |
106 | #define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER)) | 165 | #define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER)) |
107 | 166 | ||
@@ -738,9 +797,6 @@ enum inode_i_mutex_lock_class | |||
738 | I_MUTEX_QUOTA | 797 | I_MUTEX_QUOTA |
739 | }; | 798 | }; |
740 | 799 | ||
741 | extern void inode_double_lock(struct inode *inode1, struct inode *inode2); | ||
742 | extern void inode_double_unlock(struct inode *inode1, struct inode *inode2); | ||
743 | |||
744 | /* | 800 | /* |
745 | * NOTE: in a 32bit arch with a preemptable kernel and | 801 | * NOTE: in a 32bit arch with a preemptable kernel and |
746 | * an UP compile the i_size_read/write must be atomic | 802 | * an UP compile the i_size_read/write must be atomic |
@@ -2150,8 +2206,6 @@ extern ssize_t generic_file_splice_read(struct file *, loff_t *, | |||
2150 | struct pipe_inode_info *, size_t, unsigned int); | 2206 | struct pipe_inode_info *, size_t, unsigned int); |
2151 | extern ssize_t generic_file_splice_write(struct pipe_inode_info *, | 2207 | extern ssize_t generic_file_splice_write(struct pipe_inode_info *, |
2152 | struct file *, loff_t *, size_t, unsigned int); | 2208 | struct file *, loff_t *, size_t, unsigned int); |
2153 | extern ssize_t generic_file_splice_write_nolock(struct pipe_inode_info *, | ||
2154 | struct file *, loff_t *, size_t, unsigned int); | ||
2155 | extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, | 2209 | extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, |
2156 | struct file *out, loff_t *, size_t len, unsigned int flags); | 2210 | struct file *out, loff_t *, size_t len, unsigned int flags); |
2157 | extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, | 2211 | extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, |
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 8e4120285f72..c8f038554e80 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h | |||
@@ -134,6 +134,11 @@ struct pipe_buf_operations { | |||
134 | memory allocation, whereas PIPE_BUF makes atomicity guarantees. */ | 134 | memory allocation, whereas PIPE_BUF makes atomicity guarantees. */ |
135 | #define PIPE_SIZE PAGE_SIZE | 135 | #define PIPE_SIZE PAGE_SIZE |
136 | 136 | ||
137 | /* Pipe lock and unlock operations */ | ||
138 | void pipe_lock(struct pipe_inode_info *); | ||
139 | void pipe_unlock(struct pipe_inode_info *); | ||
140 | void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *); | ||
141 | |||
137 | /* Drop the inode semaphore and wait for a pipe event, atomically */ | 142 | /* Drop the inode semaphore and wait for a pipe event, atomically */ |
138 | void pipe_wait(struct pipe_inode_info *pipe); | 143 | void pipe_wait(struct pipe_inode_info *pipe); |
139 | 144 | ||
diff --git a/include/linux/splice.h b/include/linux/splice.h index 528dcb93c2f2..5f3faa9d15ae 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h | |||
@@ -36,6 +36,8 @@ struct splice_desc { | |||
36 | void *data; /* cookie */ | 36 | void *data; /* cookie */ |
37 | } u; | 37 | } u; |
38 | loff_t pos; /* file position */ | 38 | loff_t pos; /* file position */ |
39 | size_t num_spliced; /* number of bytes already spliced */ | ||
40 | bool need_wakeup; /* need to wake up writer */ | ||
39 | }; | 41 | }; |
40 | 42 | ||
41 | struct partial_page { | 43 | struct partial_page { |
@@ -66,6 +68,16 @@ extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *, | |||
66 | splice_actor *); | 68 | splice_actor *); |
67 | extern ssize_t __splice_from_pipe(struct pipe_inode_info *, | 69 | extern ssize_t __splice_from_pipe(struct pipe_inode_info *, |
68 | struct splice_desc *, splice_actor *); | 70 | struct splice_desc *, splice_actor *); |
71 | extern int splice_from_pipe_feed(struct pipe_inode_info *, struct splice_desc *, | ||
72 | splice_actor *); | ||
73 | extern int splice_from_pipe_next(struct pipe_inode_info *, | ||
74 | struct splice_desc *); | ||
75 | extern void splice_from_pipe_begin(struct splice_desc *); | ||
76 | extern void splice_from_pipe_end(struct pipe_inode_info *, | ||
77 | struct splice_desc *); | ||
78 | extern int pipe_to_file(struct pipe_inode_info *, struct pipe_buffer *, | ||
79 | struct splice_desc *); | ||
80 | |||
69 | extern ssize_t splice_to_pipe(struct pipe_inode_info *, | 81 | extern ssize_t splice_to_pipe(struct pipe_inode_info *, |
70 | struct splice_pipe_desc *); | 82 | struct splice_pipe_desc *); |
71 | extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *, | 83 | extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *, |