aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/block/data-integrity.txt4
-rw-r--r--block/Makefile2
-rw-r--r--block/blk-core.c14
-rw-r--r--block/bsg.c2
-rw-r--r--block/cfq-iosched.c176
-rw-r--r--block/cmd-filter.c233
-rw-r--r--block/scsi_ioctl.c43
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/scsi/sg.c4
-rw-r--r--fs/bio-integrity.c170
-rw-r--r--fs/bio.c11
-rw-r--r--include/linux/bio.h22
-rw-r--r--include/linux/blkdev.h15
13 files changed, 294 insertions, 406 deletions
diff --git a/Documentation/block/data-integrity.txt b/Documentation/block/data-integrity.txt
index e8ca040ba2cf..2d735b0ae383 100644
--- a/Documentation/block/data-integrity.txt
+++ b/Documentation/block/data-integrity.txt
@@ -50,7 +50,7 @@ encouraged them to allow separation of the data and integrity metadata
50scatter-gather lists. 50scatter-gather lists.
51 51
52The controller will interleave the buffers on write and split them on 52The controller will interleave the buffers on write and split them on
53read. This means that the Linux can DMA the data buffers to and from 53read. This means that Linux can DMA the data buffers to and from
54host memory without changes to the page cache. 54host memory without changes to the page cache.
55 55
56Also, the 16-bit CRC checksum mandated by both the SCSI and SATA specs 56Also, the 16-bit CRC checksum mandated by both the SCSI and SATA specs
@@ -66,7 +66,7 @@ software RAID5).
66 66
67The IP checksum is weaker than the CRC in terms of detecting bit 67The IP checksum is weaker than the CRC in terms of detecting bit
68errors. However, the strength is really in the separation of the data 68errors. However, the strength is really in the separation of the data
69buffers and the integrity metadata. These two distinct buffers much 69buffers and the integrity metadata. These two distinct buffers must
70match up for an I/O to complete. 70match up for an I/O to complete.
71 71
72The separation of the data and integrity metadata buffers as well as 72The separation of the data and integrity metadata buffers as well as
diff --git a/block/Makefile b/block/Makefile
index e9fa4dd690f2..6c54ed0ff755 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -5,7 +5,7 @@
5obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ 5obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
6 blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \ 6 blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \
7 blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ 7 blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
8 ioctl.o genhd.o scsi_ioctl.o cmd-filter.o 8 ioctl.o genhd.o scsi_ioctl.o
9 9
10obj-$(CONFIG_BLK_DEV_BSG) += bsg.o 10obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
11obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o 11obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
diff --git a/block/blk-core.c b/block/blk-core.c
index b06cf5c2a829..4b45435c6eaf 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -595,8 +595,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
595 595
596 q->sg_reserved_size = INT_MAX; 596 q->sg_reserved_size = INT_MAX;
597 597
598 blk_set_cmd_filter_defaults(&q->cmd_filter);
599
600 /* 598 /*
601 * all done 599 * all done
602 */ 600 */
@@ -1172,6 +1170,11 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1172 const int unplug = bio_unplug(bio); 1170 const int unplug = bio_unplug(bio);
1173 int rw_flags; 1171 int rw_flags;
1174 1172
1173 if (bio_barrier(bio) && bio_has_data(bio) &&
1174 (q->next_ordered == QUEUE_ORDERED_NONE)) {
1175 bio_endio(bio, -EOPNOTSUPP);
1176 return 0;
1177 }
1175 /* 1178 /*
1176 * low level driver can indicate that it wants pages above a 1179 * low level driver can indicate that it wants pages above a
1177 * certain limit bounced to low memory (ie for highmem, or even 1180 * certain limit bounced to low memory (ie for highmem, or even
@@ -1472,11 +1475,6 @@ static inline void __generic_make_request(struct bio *bio)
1472 err = -EOPNOTSUPP; 1475 err = -EOPNOTSUPP;
1473 goto end_io; 1476 goto end_io;
1474 } 1477 }
1475 if (bio_barrier(bio) && bio_has_data(bio) &&
1476 (q->next_ordered == QUEUE_ORDERED_NONE)) {
1477 err = -EOPNOTSUPP;
1478 goto end_io;
1479 }
1480 1478
1481 ret = q->make_request_fn(q, bio); 1479 ret = q->make_request_fn(q, bio);
1482 } while (ret); 1480 } while (ret);
@@ -2365,7 +2363,7 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
2365 __bio_clone(bio, bio_src); 2363 __bio_clone(bio, bio_src);
2366 2364
2367 if (bio_integrity(bio_src) && 2365 if (bio_integrity(bio_src) &&
2368 bio_integrity_clone(bio, bio_src, gfp_mask)) 2366 bio_integrity_clone(bio, bio_src, gfp_mask, bs))
2369 goto free_and_out; 2367 goto free_and_out;
2370 2368
2371 if (bio_ctr && bio_ctr(bio, bio_src, data)) 2369 if (bio_ctr && bio_ctr(bio, bio_src, data))
diff --git a/block/bsg.c b/block/bsg.c
index e7d475254248..5f184bb3ff9e 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -186,7 +186,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
186 return -EFAULT; 186 return -EFAULT;
187 187
188 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { 188 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
189 if (blk_verify_command(&q->cmd_filter, rq->cmd, has_write_perm)) 189 if (blk_verify_command(rq->cmd, has_write_perm))
190 return -EPERM; 190 return -EPERM;
191 } else if (!capable(CAP_SYS_RAWIO)) 191 } else if (!capable(CAP_SYS_RAWIO))
192 return -EPERM; 192 return -EPERM;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 833ec18eaa63..87276eb83f7f 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -71,6 +71,51 @@ struct cfq_rb_root {
71#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, } 71#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, }
72 72
73/* 73/*
74 * Per process-grouping structure
75 */
76struct cfq_queue {
77 /* reference count */
78 atomic_t ref;
79 /* various state flags, see below */
80 unsigned int flags;
81 /* parent cfq_data */
82 struct cfq_data *cfqd;
83 /* service_tree member */
84 struct rb_node rb_node;
85 /* service_tree key */
86 unsigned long rb_key;
87 /* prio tree member */
88 struct rb_node p_node;
89 /* prio tree root we belong to, if any */
90 struct rb_root *p_root;
91 /* sorted list of pending requests */
92 struct rb_root sort_list;
93 /* if fifo isn't expired, next request to serve */
94 struct request *next_rq;
95 /* requests queued in sort_list */
96 int queued[2];
97 /* currently allocated requests */
98 int allocated[2];
99 /* fifo list of requests in sort_list */
100 struct list_head fifo;
101
102 unsigned long slice_end;
103 long slice_resid;
104 unsigned int slice_dispatch;
105
106 /* pending metadata requests */
107 int meta_pending;
108 /* number of requests that are on the dispatch list or inside driver */
109 int dispatched;
110
111 /* io prio of this group */
112 unsigned short ioprio, org_ioprio;
113 unsigned short ioprio_class, org_ioprio_class;
114
115 pid_t pid;
116};
117
118/*
74 * Per block device queue structure 119 * Per block device queue structure
75 */ 120 */
76struct cfq_data { 121struct cfq_data {
@@ -135,51 +180,11 @@ struct cfq_data {
135 unsigned int cfq_slice_idle; 180 unsigned int cfq_slice_idle;
136 181
137 struct list_head cic_list; 182 struct list_head cic_list;
138};
139
140/*
141 * Per process-grouping structure
142 */
143struct cfq_queue {
144 /* reference count */
145 atomic_t ref;
146 /* various state flags, see below */
147 unsigned int flags;
148 /* parent cfq_data */
149 struct cfq_data *cfqd;
150 /* service_tree member */
151 struct rb_node rb_node;
152 /* service_tree key */
153 unsigned long rb_key;
154 /* prio tree member */
155 struct rb_node p_node;
156 /* prio tree root we belong to, if any */
157 struct rb_root *p_root;
158 /* sorted list of pending requests */
159 struct rb_root sort_list;
160 /* if fifo isn't expired, next request to serve */
161 struct request *next_rq;
162 /* requests queued in sort_list */
163 int queued[2];
164 /* currently allocated requests */
165 int allocated[2];
166 /* fifo list of requests in sort_list */
167 struct list_head fifo;
168 183
169 unsigned long slice_end; 184 /*
170 long slice_resid; 185 * Fallback dummy cfqq for extreme OOM conditions
171 unsigned int slice_dispatch; 186 */
172 187 struct cfq_queue oom_cfqq;
173 /* pending metadata requests */
174 int meta_pending;
175 /* number of requests that are on the dispatch list or inside driver */
176 int dispatched;
177
178 /* io prio of this group */
179 unsigned short ioprio, org_ioprio;
180 unsigned short ioprio_class, org_ioprio_class;
181
182 pid_t pid;
183}; 188};
184 189
185enum cfqq_state_flags { 190enum cfqq_state_flags {
@@ -1641,6 +1646,26 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
1641 ioc->ioprio_changed = 0; 1646 ioc->ioprio_changed = 0;
1642} 1647}
1643 1648
1649static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1650 pid_t pid, int is_sync)
1651{
1652 RB_CLEAR_NODE(&cfqq->rb_node);
1653 RB_CLEAR_NODE(&cfqq->p_node);
1654 INIT_LIST_HEAD(&cfqq->fifo);
1655
1656 atomic_set(&cfqq->ref, 0);
1657 cfqq->cfqd = cfqd;
1658
1659 cfq_mark_cfqq_prio_changed(cfqq);
1660
1661 if (is_sync) {
1662 if (!cfq_class_idle(cfqq))
1663 cfq_mark_cfqq_idle_window(cfqq);
1664 cfq_mark_cfqq_sync(cfqq);
1665 }
1666 cfqq->pid = pid;
1667}
1668
1644static struct cfq_queue * 1669static struct cfq_queue *
1645cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, 1670cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
1646 struct io_context *ioc, gfp_t gfp_mask) 1671 struct io_context *ioc, gfp_t gfp_mask)
@@ -1653,56 +1678,40 @@ retry:
1653 /* cic always exists here */ 1678 /* cic always exists here */
1654 cfqq = cic_to_cfqq(cic, is_sync); 1679 cfqq = cic_to_cfqq(cic, is_sync);
1655 1680
1656 if (!cfqq) { 1681 /*
1682 * Always try a new alloc if we fell back to the OOM cfqq
1683 * originally, since it should just be a temporary situation.
1684 */
1685 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
1686 cfqq = NULL;
1657 if (new_cfqq) { 1687 if (new_cfqq) {
1658 cfqq = new_cfqq; 1688 cfqq = new_cfqq;
1659 new_cfqq = NULL; 1689 new_cfqq = NULL;
1660 } else if (gfp_mask & __GFP_WAIT) { 1690 } else if (gfp_mask & __GFP_WAIT) {
1661 /*
1662 * Inform the allocator of the fact that we will
1663 * just repeat this allocation if it fails, to allow
1664 * the allocator to do whatever it needs to attempt to
1665 * free memory.
1666 */
1667 spin_unlock_irq(cfqd->queue->queue_lock); 1691 spin_unlock_irq(cfqd->queue->queue_lock);
1668 new_cfqq = kmem_cache_alloc_node(cfq_pool, 1692 new_cfqq = kmem_cache_alloc_node(cfq_pool,
1669 gfp_mask | __GFP_NOFAIL | __GFP_ZERO, 1693 gfp_mask | __GFP_ZERO,
1670 cfqd->queue->node); 1694 cfqd->queue->node);
1671 spin_lock_irq(cfqd->queue->queue_lock); 1695 spin_lock_irq(cfqd->queue->queue_lock);
1672 goto retry; 1696 if (new_cfqq)
1697 goto retry;
1673 } else { 1698 } else {
1674 cfqq = kmem_cache_alloc_node(cfq_pool, 1699 cfqq = kmem_cache_alloc_node(cfq_pool,
1675 gfp_mask | __GFP_ZERO, 1700 gfp_mask | __GFP_ZERO,
1676 cfqd->queue->node); 1701 cfqd->queue->node);
1677 if (!cfqq)
1678 goto out;
1679 } 1702 }
1680 1703
1681 RB_CLEAR_NODE(&cfqq->rb_node); 1704 if (cfqq) {
1682 RB_CLEAR_NODE(&cfqq->p_node); 1705 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
1683 INIT_LIST_HEAD(&cfqq->fifo); 1706 cfq_init_prio_data(cfqq, ioc);
1684 1707 cfq_log_cfqq(cfqd, cfqq, "alloced");
1685 atomic_set(&cfqq->ref, 0); 1708 } else
1686 cfqq->cfqd = cfqd; 1709 cfqq = &cfqd->oom_cfqq;
1687
1688 cfq_mark_cfqq_prio_changed(cfqq);
1689
1690 cfq_init_prio_data(cfqq, ioc);
1691
1692 if (is_sync) {
1693 if (!cfq_class_idle(cfqq))
1694 cfq_mark_cfqq_idle_window(cfqq);
1695 cfq_mark_cfqq_sync(cfqq);
1696 }
1697 cfqq->pid = current->pid;
1698 cfq_log_cfqq(cfqd, cfqq, "alloced");
1699 } 1710 }
1700 1711
1701 if (new_cfqq) 1712 if (new_cfqq)
1702 kmem_cache_free(cfq_pool, new_cfqq); 1713 kmem_cache_free(cfq_pool, new_cfqq);
1703 1714
1704out:
1705 WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
1706 return cfqq; 1715 return cfqq;
1707} 1716}
1708 1717
@@ -1735,11 +1744,8 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
1735 cfqq = *async_cfqq; 1744 cfqq = *async_cfqq;
1736 } 1745 }
1737 1746
1738 if (!cfqq) { 1747 if (!cfqq)
1739 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask); 1748 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
1740 if (!cfqq)
1741 return NULL;
1742 }
1743 1749
1744 /* 1750 /*
1745 * pin the queue now that it's allocated, scheduler exit will prune it 1751 * pin the queue now that it's allocated, scheduler exit will prune it
@@ -2307,10 +2313,6 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
2307 cfqq = cic_to_cfqq(cic, is_sync); 2313 cfqq = cic_to_cfqq(cic, is_sync);
2308 if (!cfqq) { 2314 if (!cfqq) {
2309 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); 2315 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
2310
2311 if (!cfqq)
2312 goto queue_fail;
2313
2314 cic_set_cfqq(cic, cfqq, is_sync); 2316 cic_set_cfqq(cic, cfqq, is_sync);
2315 } 2317 }
2316 2318
@@ -2465,6 +2467,14 @@ static void *cfq_init_queue(struct request_queue *q)
2465 for (i = 0; i < CFQ_PRIO_LISTS; i++) 2467 for (i = 0; i < CFQ_PRIO_LISTS; i++)
2466 cfqd->prio_trees[i] = RB_ROOT; 2468 cfqd->prio_trees[i] = RB_ROOT;
2467 2469
2470 /*
2471 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
2472 * Grab a permanent reference to it, so that the normal code flow
2473 * will not attempt to free it.
2474 */
2475 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
2476 atomic_inc(&cfqd->oom_cfqq.ref);
2477
2468 INIT_LIST_HEAD(&cfqd->cic_list); 2478 INIT_LIST_HEAD(&cfqd->cic_list);
2469 2479
2470 cfqd->queue = q; 2480 cfqd->queue = q;
diff --git a/block/cmd-filter.c b/block/cmd-filter.c
deleted file mode 100644
index 572bbc2f900d..000000000000
--- a/block/cmd-filter.c
+++ /dev/null
@@ -1,233 +0,0 @@
1/*
2 * Copyright 2004 Peter M. Jones <pjones@redhat.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public Licens
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
17 *
18 */
19
20#include <linux/list.h>
21#include <linux/genhd.h>
22#include <linux/spinlock.h>
23#include <linux/capability.h>
24#include <linux/bitops.h>
25#include <linux/blkdev.h>
26
27#include <scsi/scsi.h>
28#include <linux/cdrom.h>
29
30int blk_verify_command(struct blk_cmd_filter *filter,
31 unsigned char *cmd, fmode_t has_write_perm)
32{
33 /* root can do any command. */
34 if (capable(CAP_SYS_RAWIO))
35 return 0;
36
37 /* if there's no filter set, assume we're filtering everything out */
38 if (!filter)
39 return -EPERM;
40
41 /* Anybody who can open the device can do a read-safe command */
42 if (test_bit(cmd[0], filter->read_ok))
43 return 0;
44
45 /* Write-safe commands require a writable open */
46 if (test_bit(cmd[0], filter->write_ok) && has_write_perm)
47 return 0;
48
49 return -EPERM;
50}
51EXPORT_SYMBOL(blk_verify_command);
52
53#if 0
54/* and now, the sysfs stuff */
55static ssize_t rcf_cmds_show(struct blk_cmd_filter *filter, char *page,
56 int rw)
57{
58 char *npage = page;
59 unsigned long *okbits;
60 int i;
61
62 if (rw == READ)
63 okbits = filter->read_ok;
64 else
65 okbits = filter->write_ok;
66
67 for (i = 0; i < BLK_SCSI_MAX_CMDS; i++) {
68 if (test_bit(i, okbits)) {
69 npage += sprintf(npage, "0x%02x", i);
70 if (i < BLK_SCSI_MAX_CMDS - 1)
71 sprintf(npage++, " ");
72 }
73 }
74
75 if (npage != page)
76 npage += sprintf(npage, "\n");
77
78 return npage - page;
79}
80
81static ssize_t rcf_readcmds_show(struct blk_cmd_filter *filter, char *page)
82{
83 return rcf_cmds_show(filter, page, READ);
84}
85
86static ssize_t rcf_writecmds_show(struct blk_cmd_filter *filter,
87 char *page)
88{
89 return rcf_cmds_show(filter, page, WRITE);
90}
91
92static ssize_t rcf_cmds_store(struct blk_cmd_filter *filter,
93 const char *page, size_t count, int rw)
94{
95 unsigned long okbits[BLK_SCSI_CMD_PER_LONG], *target_okbits;
96 int cmd, set;
97 char *p, *status;
98
99 if (rw == READ) {
100 memcpy(&okbits, filter->read_ok, sizeof(okbits));
101 target_okbits = filter->read_ok;
102 } else {
103 memcpy(&okbits, filter->write_ok, sizeof(okbits));
104 target_okbits = filter->write_ok;
105 }
106
107 while ((p = strsep((char **)&page, " ")) != NULL) {
108 set = 1;
109
110 if (p[0] == '+') {
111 p++;
112 } else if (p[0] == '-') {
113 set = 0;
114 p++;
115 }
116
117 cmd = simple_strtol(p, &status, 16);
118
119 /* either of these cases means invalid input, so do nothing. */
120 if ((status == p) || cmd >= BLK_SCSI_MAX_CMDS)
121 return -EINVAL;
122
123 if (set)
124 __set_bit(cmd, okbits);
125 else
126 __clear_bit(cmd, okbits);
127 }
128
129 memcpy(target_okbits, okbits, sizeof(okbits));
130 return count;
131}
132
133static ssize_t rcf_readcmds_store(struct blk_cmd_filter *filter,
134 const char *page, size_t count)
135{
136 return rcf_cmds_store(filter, page, count, READ);
137}
138
139static ssize_t rcf_writecmds_store(struct blk_cmd_filter *filter,
140 const char *page, size_t count)
141{
142 return rcf_cmds_store(filter, page, count, WRITE);
143}
144
145struct rcf_sysfs_entry {
146 struct attribute attr;
147 ssize_t (*show)(struct blk_cmd_filter *, char *);
148 ssize_t (*store)(struct blk_cmd_filter *, const char *, size_t);
149};
150
151static struct rcf_sysfs_entry rcf_readcmds_entry = {
152 .attr = { .name = "read_table", .mode = S_IRUGO | S_IWUSR },
153 .show = rcf_readcmds_show,
154 .store = rcf_readcmds_store,
155};
156
157static struct rcf_sysfs_entry rcf_writecmds_entry = {
158 .attr = {.name = "write_table", .mode = S_IRUGO | S_IWUSR },
159 .show = rcf_writecmds_show,
160 .store = rcf_writecmds_store,
161};
162
163static struct attribute *default_attrs[] = {
164 &rcf_readcmds_entry.attr,
165 &rcf_writecmds_entry.attr,
166 NULL,
167};
168
169#define to_rcf(atr) container_of((atr), struct rcf_sysfs_entry, attr)
170
171static ssize_t
172rcf_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
173{
174 struct rcf_sysfs_entry *entry = to_rcf(attr);
175 struct blk_cmd_filter *filter;
176
177 filter = container_of(kobj, struct blk_cmd_filter, kobj);
178 if (entry->show)
179 return entry->show(filter, page);
180
181 return 0;
182}
183
184static ssize_t
185rcf_attr_store(struct kobject *kobj, struct attribute *attr,
186 const char *page, size_t length)
187{
188 struct rcf_sysfs_entry *entry = to_rcf(attr);
189 struct blk_cmd_filter *filter;
190
191 if (!capable(CAP_SYS_RAWIO))
192 return -EPERM;
193
194 if (!entry->store)
195 return -EINVAL;
196
197 filter = container_of(kobj, struct blk_cmd_filter, kobj);
198 return entry->store(filter, page, length);
199}
200
201static struct sysfs_ops rcf_sysfs_ops = {
202 .show = rcf_attr_show,
203 .store = rcf_attr_store,
204};
205
206static struct kobj_type rcf_ktype = {
207 .sysfs_ops = &rcf_sysfs_ops,
208 .default_attrs = default_attrs,
209};
210
211int blk_register_filter(struct gendisk *disk)
212{
213 int ret;
214 struct blk_cmd_filter *filter = &disk->queue->cmd_filter;
215
216 ret = kobject_init_and_add(&filter->kobj, &rcf_ktype,
217 &disk_to_dev(disk)->kobj,
218 "%s", "cmd_filter");
219 if (ret < 0)
220 return ret;
221
222 return 0;
223}
224EXPORT_SYMBOL(blk_register_filter);
225
226void blk_unregister_filter(struct gendisk *disk)
227{
228 struct blk_cmd_filter *filter = &disk->queue->cmd_filter;
229
230 kobject_put(&filter->kobj);
231}
232EXPORT_SYMBOL(blk_unregister_filter);
233#endif
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 5f8e798ede4e..f0e0ce0a607d 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -32,6 +32,11 @@
32#include <scsi/scsi_ioctl.h> 32#include <scsi/scsi_ioctl.h>
33#include <scsi/scsi_cmnd.h> 33#include <scsi/scsi_cmnd.h>
34 34
35struct blk_cmd_filter {
36 unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
37 unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
38} blk_default_cmd_filter;
39
35/* Command group 3 is reserved and should never be used. */ 40/* Command group 3 is reserved and should never be used. */
36const unsigned char scsi_command_size_tbl[8] = 41const unsigned char scsi_command_size_tbl[8] =
37{ 42{
@@ -105,7 +110,7 @@ static int sg_emulated_host(struct request_queue *q, int __user *p)
105 return put_user(1, p); 110 return put_user(1, p);
106} 111}
107 112
108void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter) 113static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
109{ 114{
110 /* Basic read-only commands */ 115 /* Basic read-only commands */
111 __set_bit(TEST_UNIT_READY, filter->read_ok); 116 __set_bit(TEST_UNIT_READY, filter->read_ok);
@@ -187,14 +192,37 @@ void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
187 __set_bit(GPCMD_SET_STREAMING, filter->write_ok); 192 __set_bit(GPCMD_SET_STREAMING, filter->write_ok);
188 __set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok); 193 __set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
189} 194}
190EXPORT_SYMBOL_GPL(blk_set_cmd_filter_defaults); 195
196int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm)
197{
198 struct blk_cmd_filter *filter = &blk_default_cmd_filter;
199
200 /* root can do any command. */
201 if (capable(CAP_SYS_RAWIO))
202 return 0;
203
204 /* if there's no filter set, assume we're filtering everything out */
205 if (!filter)
206 return -EPERM;
207
208 /* Anybody who can open the device can do a read-safe command */
209 if (test_bit(cmd[0], filter->read_ok))
210 return 0;
211
212 /* Write-safe commands require a writable open */
213 if (test_bit(cmd[0], filter->write_ok) && has_write_perm)
214 return 0;
215
216 return -EPERM;
217}
218EXPORT_SYMBOL(blk_verify_command);
191 219
192static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, 220static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
193 struct sg_io_hdr *hdr, fmode_t mode) 221 struct sg_io_hdr *hdr, fmode_t mode)
194{ 222{
195 if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) 223 if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
196 return -EFAULT; 224 return -EFAULT;
197 if (blk_verify_command(&q->cmd_filter, rq->cmd, mode & FMODE_WRITE)) 225 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
198 return -EPERM; 226 return -EPERM;
199 227
200 /* 228 /*
@@ -427,7 +455,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
427 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len)) 455 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
428 goto error; 456 goto error;
429 457
430 err = blk_verify_command(&q->cmd_filter, rq->cmd, mode & FMODE_WRITE); 458 err = blk_verify_command(rq->cmd, mode & FMODE_WRITE);
431 if (err) 459 if (err)
432 goto error; 460 goto error;
433 461
@@ -645,5 +673,10 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod
645 blk_put_queue(q); 673 blk_put_queue(q);
646 return err; 674 return err;
647} 675}
648
649EXPORT_SYMBOL(scsi_cmd_ioctl); 676EXPORT_SYMBOL(scsi_cmd_ioctl);
677
678int __init blk_scsi_ioctl_init(void)
679{
680 blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
681 return 0;
682}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3c6d4ee8921d..9acd54a5cffb 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1017,7 +1017,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
1017 clone->bi_flags |= 1 << BIO_CLONED; 1017 clone->bi_flags |= 1 << BIO_CLONED;
1018 1018
1019 if (bio_integrity(bio)) { 1019 if (bio_integrity(bio)) {
1020 bio_integrity_clone(clone, bio, GFP_NOIO); 1020 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
1021 bio_integrity_trim(clone, 1021 bio_integrity_trim(clone,
1022 bio_sector_offset(bio, idx, offset), len); 1022 bio_sector_offset(bio, idx, offset), len);
1023 } 1023 }
@@ -1045,7 +1045,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
1045 clone->bi_flags &= ~(1 << BIO_SEG_VALID); 1045 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
1046 1046
1047 if (bio_integrity(bio)) { 1047 if (bio_integrity(bio)) {
1048 bio_integrity_clone(clone, bio, GFP_NOIO); 1048 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
1049 1049
1050 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) 1050 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
1051 bio_integrity_trim(clone, 1051 bio_integrity_trim(clone,
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8201387b4daa..ef142fd47a83 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -210,13 +210,11 @@ static void sg_put_dev(Sg_device *sdp);
210static int sg_allow_access(struct file *filp, unsigned char *cmd) 210static int sg_allow_access(struct file *filp, unsigned char *cmd)
211{ 211{
212 struct sg_fd *sfp = (struct sg_fd *)filp->private_data; 212 struct sg_fd *sfp = (struct sg_fd *)filp->private_data;
213 struct request_queue *q = sfp->parentdp->device->request_queue;
214 213
215 if (sfp->parentdp->device->type == TYPE_SCANNER) 214 if (sfp->parentdp->device->type == TYPE_SCANNER)
216 return 0; 215 return 0;
217 216
218 return blk_verify_command(&q->cmd_filter, 217 return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
219 cmd, filp->f_mode & FMODE_WRITE);
220} 218}
221 219
222static int 220static int
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 31c46a241bac..49a34e7f7306 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * bio-integrity.c - bio data integrity extensions 2 * bio-integrity.c - bio data integrity extensions
3 * 3 *
4 * Copyright (C) 2007, 2008 Oracle Corporation 4 * Copyright (C) 2007, 2008, 2009 Oracle Corporation
5 * Written by: Martin K. Petersen <martin.petersen@oracle.com> 5 * Written by: Martin K. Petersen <martin.petersen@oracle.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
@@ -25,63 +25,121 @@
25#include <linux/bio.h> 25#include <linux/bio.h>
26#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27 27
28static struct kmem_cache *bio_integrity_slab __read_mostly; 28struct integrity_slab {
29static mempool_t *bio_integrity_pool; 29 struct kmem_cache *slab;
30static struct bio_set *integrity_bio_set; 30 unsigned short nr_vecs;
31 char name[8];
32};
33
34#define IS(x) { .nr_vecs = x, .name = "bip-"__stringify(x) }
35struct integrity_slab bip_slab[BIOVEC_NR_POOLS] __read_mostly = {
36 IS(1), IS(4), IS(16), IS(64), IS(128), IS(BIO_MAX_PAGES),
37};
38#undef IS
39
31static struct workqueue_struct *kintegrityd_wq; 40static struct workqueue_struct *kintegrityd_wq;
32 41
42static inline unsigned int vecs_to_idx(unsigned int nr)
43{
44 switch (nr) {
45 case 1:
46 return 0;
47 case 2 ... 4:
48 return 1;
49 case 5 ... 16:
50 return 2;
51 case 17 ... 64:
52 return 3;
53 case 65 ... 128:
54 return 4;
55 case 129 ... BIO_MAX_PAGES:
56 return 5;
57 default:
58 BUG();
59 }
60}
61
62static inline int use_bip_pool(unsigned int idx)
63{
64 if (idx == BIOVEC_NR_POOLS)
65 return 1;
66
67 return 0;
68}
69
33/** 70/**
34 * bio_integrity_alloc - Allocate integrity payload and attach it to bio 71 * bio_integrity_alloc_bioset - Allocate integrity payload and attach it to bio
35 * @bio: bio to attach integrity metadata to 72 * @bio: bio to attach integrity metadata to
36 * @gfp_mask: Memory allocation mask 73 * @gfp_mask: Memory allocation mask
37 * @nr_vecs: Number of integrity metadata scatter-gather elements 74 * @nr_vecs: Number of integrity metadata scatter-gather elements
75 * @bs: bio_set to allocate from
38 * 76 *
39 * Description: This function prepares a bio for attaching integrity 77 * Description: This function prepares a bio for attaching integrity
40 * metadata. nr_vecs specifies the maximum number of pages containing 78 * metadata. nr_vecs specifies the maximum number of pages containing
41 * integrity metadata that can be attached. 79 * integrity metadata that can be attached.
42 */ 80 */
43struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, 81struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
44 gfp_t gfp_mask, 82 gfp_t gfp_mask,
45 unsigned int nr_vecs) 83 unsigned int nr_vecs,
84 struct bio_set *bs)
46{ 85{
47 struct bio_integrity_payload *bip; 86 struct bio_integrity_payload *bip;
48 struct bio_vec *iv; 87 unsigned int idx = vecs_to_idx(nr_vecs);
49 unsigned long idx;
50 88
51 BUG_ON(bio == NULL); 89 BUG_ON(bio == NULL);
90 bip = NULL;
52 91
53 bip = mempool_alloc(bio_integrity_pool, gfp_mask); 92 /* Lower order allocations come straight from slab */
54 if (unlikely(bip == NULL)) { 93 if (!use_bip_pool(idx))
55 printk(KERN_ERR "%s: could not alloc bip\n", __func__); 94 bip = kmem_cache_alloc(bip_slab[idx].slab, gfp_mask);
56 return NULL;
57 }
58 95
59 memset(bip, 0, sizeof(*bip)); 96 /* Use mempool if lower order alloc failed or max vecs were requested */
97 if (bip == NULL) {
98 bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
60 99
61 iv = bvec_alloc_bs(gfp_mask, nr_vecs, &idx, integrity_bio_set); 100 if (unlikely(bip == NULL)) {
62 if (unlikely(iv == NULL)) { 101 printk(KERN_ERR "%s: could not alloc bip\n", __func__);
63 printk(KERN_ERR "%s: could not alloc bip_vec\n", __func__); 102 return NULL;
64 mempool_free(bip, bio_integrity_pool); 103 }
65 return NULL;
66 } 104 }
67 105
68 bip->bip_pool = idx; 106 memset(bip, 0, sizeof(*bip));
69 bip->bip_vec = iv; 107
108 bip->bip_slab = idx;
70 bip->bip_bio = bio; 109 bip->bip_bio = bio;
71 bio->bi_integrity = bip; 110 bio->bi_integrity = bip;
72 111
73 return bip; 112 return bip;
74} 113}
114EXPORT_SYMBOL(bio_integrity_alloc_bioset);
115
116/**
117 * bio_integrity_alloc - Allocate integrity payload and attach it to bio
118 * @bio: bio to attach integrity metadata to
119 * @gfp_mask: Memory allocation mask
120 * @nr_vecs: Number of integrity metadata scatter-gather elements
121 *
122 * Description: This function prepares a bio for attaching integrity
123 * metadata. nr_vecs specifies the maximum number of pages containing
124 * integrity metadata that can be attached.
125 */
126struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
127 gfp_t gfp_mask,
128 unsigned int nr_vecs)
129{
130 return bio_integrity_alloc_bioset(bio, gfp_mask, nr_vecs, fs_bio_set);
131}
75EXPORT_SYMBOL(bio_integrity_alloc); 132EXPORT_SYMBOL(bio_integrity_alloc);
76 133
77/** 134/**
78 * bio_integrity_free - Free bio integrity payload 135 * bio_integrity_free - Free bio integrity payload
79 * @bio: bio containing bip to be freed 136 * @bio: bio containing bip to be freed
137 * @bs: bio_set this bio was allocated from
80 * 138 *
81 * Description: Used to free the integrity portion of a bio. Usually 139 * Description: Used to free the integrity portion of a bio. Usually
82 * called from bio_free(). 140 * called from bio_free().
83 */ 141 */
84void bio_integrity_free(struct bio *bio) 142void bio_integrity_free(struct bio *bio, struct bio_set *bs)
85{ 143{
86 struct bio_integrity_payload *bip = bio->bi_integrity; 144 struct bio_integrity_payload *bip = bio->bi_integrity;
87 145
@@ -92,8 +150,10 @@ void bio_integrity_free(struct bio *bio)
92 && bip->bip_buf != NULL) 150 && bip->bip_buf != NULL)
93 kfree(bip->bip_buf); 151 kfree(bip->bip_buf);
94 152
95 bvec_free_bs(integrity_bio_set, bip->bip_vec, bip->bip_pool); 153 if (use_bip_pool(bip->bip_slab))
96 mempool_free(bip, bio_integrity_pool); 154 mempool_free(bip, bs->bio_integrity_pool);
155 else
156 kmem_cache_free(bip_slab[bip->bip_slab].slab, bip);
97 157
98 bio->bi_integrity = NULL; 158 bio->bi_integrity = NULL;
99} 159}
@@ -114,7 +174,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
114 struct bio_integrity_payload *bip = bio->bi_integrity; 174 struct bio_integrity_payload *bip = bio->bi_integrity;
115 struct bio_vec *iv; 175 struct bio_vec *iv;
116 176
117 if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_pool)) { 177 if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) {
118 printk(KERN_ERR "%s: bip_vec full\n", __func__); 178 printk(KERN_ERR "%s: bip_vec full\n", __func__);
119 return 0; 179 return 0;
120 } 180 }
@@ -647,8 +707,8 @@ void bio_integrity_split(struct bio *bio, struct bio_pair *bp, int sectors)
647 bp->iv1 = bip->bip_vec[0]; 707 bp->iv1 = bip->bip_vec[0];
648 bp->iv2 = bip->bip_vec[0]; 708 bp->iv2 = bip->bip_vec[0];
649 709
650 bp->bip1.bip_vec = &bp->iv1; 710 bp->bip1.bip_vec[0] = bp->iv1;
651 bp->bip2.bip_vec = &bp->iv2; 711 bp->bip2.bip_vec[0] = bp->iv2;
652 712
653 bp->iv1.bv_len = sectors * bi->tuple_size; 713 bp->iv1.bv_len = sectors * bi->tuple_size;
654 bp->iv2.bv_offset += sectors * bi->tuple_size; 714 bp->iv2.bv_offset += sectors * bi->tuple_size;
@@ -667,17 +727,19 @@ EXPORT_SYMBOL(bio_integrity_split);
667 * @bio: New bio 727 * @bio: New bio
668 * @bio_src: Original bio 728 * @bio_src: Original bio
669 * @gfp_mask: Memory allocation mask 729 * @gfp_mask: Memory allocation mask
730 * @bs: bio_set to allocate bip from
670 * 731 *
671 * Description: Called to allocate a bip when cloning a bio 732 * Description: Called to allocate a bip when cloning a bio
672 */ 733 */
673int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask) 734int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
735 gfp_t gfp_mask, struct bio_set *bs)
674{ 736{
675 struct bio_integrity_payload *bip_src = bio_src->bi_integrity; 737 struct bio_integrity_payload *bip_src = bio_src->bi_integrity;
676 struct bio_integrity_payload *bip; 738 struct bio_integrity_payload *bip;
677 739
678 BUG_ON(bip_src == NULL); 740 BUG_ON(bip_src == NULL);
679 741
680 bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt); 742 bip = bio_integrity_alloc_bioset(bio, gfp_mask, bip_src->bip_vcnt, bs);
681 743
682 if (bip == NULL) 744 if (bip == NULL)
683 return -EIO; 745 return -EIO;
@@ -693,25 +755,43 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask)
693} 755}
694EXPORT_SYMBOL(bio_integrity_clone); 756EXPORT_SYMBOL(bio_integrity_clone);
695 757
696static int __init bio_integrity_init(void) 758int bioset_integrity_create(struct bio_set *bs, int pool_size)
697{ 759{
698 kintegrityd_wq = create_workqueue("kintegrityd"); 760 unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES);
761
762 bs->bio_integrity_pool =
763 mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab);
699 764
765 if (!bs->bio_integrity_pool)
766 return -1;
767
768 return 0;
769}
770EXPORT_SYMBOL(bioset_integrity_create);
771
772void bioset_integrity_free(struct bio_set *bs)
773{
774 if (bs->bio_integrity_pool)
775 mempool_destroy(bs->bio_integrity_pool);
776}
777EXPORT_SYMBOL(bioset_integrity_free);
778
779void __init bio_integrity_init(void)
780{
781 unsigned int i;
782
783 kintegrityd_wq = create_workqueue("kintegrityd");
700 if (!kintegrityd_wq) 784 if (!kintegrityd_wq)
701 panic("Failed to create kintegrityd\n"); 785 panic("Failed to create kintegrityd\n");
702 786
703 bio_integrity_slab = KMEM_CACHE(bio_integrity_payload, 787 for (i = 0 ; i < BIOVEC_NR_POOLS ; i++) {
704 SLAB_HWCACHE_ALIGN|SLAB_PANIC); 788 unsigned int size;
705 789
706 bio_integrity_pool = mempool_create_slab_pool(BIO_POOL_SIZE, 790 size = sizeof(struct bio_integrity_payload)
707 bio_integrity_slab); 791 + bip_slab[i].nr_vecs * sizeof(struct bio_vec);
708 if (!bio_integrity_pool)
709 panic("bio_integrity: can't allocate bip pool\n");
710 792
711 integrity_bio_set = bioset_create(BIO_POOL_SIZE, 0); 793 bip_slab[i].slab =
712 if (!integrity_bio_set) 794 kmem_cache_create(bip_slab[i].name, size, 0,
713 panic("bio_integrity: can't allocate bio_set\n"); 795 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
714 796 }
715 return 0;
716} 797}
717subsys_initcall(bio_integrity_init);
diff --git a/fs/bio.c b/fs/bio.c
index 24c914043532..1486b19fc431 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -238,7 +238,7 @@ void bio_free(struct bio *bio, struct bio_set *bs)
238 bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio)); 238 bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
239 239
240 if (bio_integrity(bio)) 240 if (bio_integrity(bio))
241 bio_integrity_free(bio); 241 bio_integrity_free(bio, bs);
242 242
243 /* 243 /*
244 * If we have front padding, adjust the bio pointer before freeing 244 * If we have front padding, adjust the bio pointer before freeing
@@ -341,7 +341,7 @@ struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
341static void bio_kmalloc_destructor(struct bio *bio) 341static void bio_kmalloc_destructor(struct bio *bio)
342{ 342{
343 if (bio_integrity(bio)) 343 if (bio_integrity(bio))
344 bio_integrity_free(bio); 344 bio_integrity_free(bio, fs_bio_set);
345 kfree(bio); 345 kfree(bio);
346} 346}
347 347
@@ -472,7 +472,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
472 if (bio_integrity(bio)) { 472 if (bio_integrity(bio)) {
473 int ret; 473 int ret;
474 474
475 ret = bio_integrity_clone(b, bio, gfp_mask); 475 ret = bio_integrity_clone(b, bio, gfp_mask, fs_bio_set);
476 476
477 if (ret < 0) { 477 if (ret < 0) {
478 bio_put(b); 478 bio_put(b);
@@ -1539,6 +1539,7 @@ void bioset_free(struct bio_set *bs)
1539 if (bs->bio_pool) 1539 if (bs->bio_pool)
1540 mempool_destroy(bs->bio_pool); 1540 mempool_destroy(bs->bio_pool);
1541 1541
1542 bioset_integrity_free(bs);
1542 biovec_free_pools(bs); 1543 biovec_free_pools(bs);
1543 bio_put_slab(bs); 1544 bio_put_slab(bs);
1544 1545
@@ -1579,6 +1580,9 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
1579 if (!bs->bio_pool) 1580 if (!bs->bio_pool)
1580 goto bad; 1581 goto bad;
1581 1582
1583 if (bioset_integrity_create(bs, pool_size))
1584 goto bad;
1585
1582 if (!biovec_create_pools(bs, pool_size)) 1586 if (!biovec_create_pools(bs, pool_size))
1583 return bs; 1587 return bs;
1584 1588
@@ -1616,6 +1620,7 @@ static int __init init_bio(void)
1616 if (!bio_slabs) 1620 if (!bio_slabs)
1617 panic("bio: can't allocate bios\n"); 1621 panic("bio: can't allocate bios\n");
1618 1622
1623 bio_integrity_init();
1619 biovec_init_slabs(); 1624 biovec_init_slabs();
1620 1625
1621 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); 1626 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 2a04eb54c0dd..2892b710771c 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -319,7 +319,6 @@ static inline int bio_has_allocated_vec(struct bio *bio)
319 */ 319 */
320struct bio_integrity_payload { 320struct bio_integrity_payload {
321 struct bio *bip_bio; /* parent bio */ 321 struct bio *bip_bio; /* parent bio */
322 struct bio_vec *bip_vec; /* integrity data vector */
323 322
324 sector_t bip_sector; /* virtual start sector */ 323 sector_t bip_sector; /* virtual start sector */
325 324
@@ -328,11 +327,12 @@ struct bio_integrity_payload {
328 327
329 unsigned int bip_size; 328 unsigned int bip_size;
330 329
331 unsigned short bip_pool; /* pool the ivec came from */ 330 unsigned short bip_slab; /* slab the bip came from */
332 unsigned short bip_vcnt; /* # of integrity bio_vecs */ 331 unsigned short bip_vcnt; /* # of integrity bio_vecs */
333 unsigned short bip_idx; /* current bip_vec index */ 332 unsigned short bip_idx; /* current bip_vec index */
334 333
335 struct work_struct bip_work; /* I/O completion */ 334 struct work_struct bip_work; /* I/O completion */
335 struct bio_vec bip_vec[0]; /* embedded bvec array */
336}; 336};
337#endif /* CONFIG_BLK_DEV_INTEGRITY */ 337#endif /* CONFIG_BLK_DEV_INTEGRITY */
338 338
@@ -430,6 +430,9 @@ struct bio_set {
430 unsigned int front_pad; 430 unsigned int front_pad;
431 431
432 mempool_t *bio_pool; 432 mempool_t *bio_pool;
433#if defined(CONFIG_BLK_DEV_INTEGRITY)
434 mempool_t *bio_integrity_pool;
435#endif
433 mempool_t *bvec_pool; 436 mempool_t *bvec_pool;
434}; 437};
435 438
@@ -634,8 +637,9 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
634 637
635#define bio_integrity(bio) (bio->bi_integrity != NULL) 638#define bio_integrity(bio) (bio->bi_integrity != NULL)
636 639
640extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
637extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); 641extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
638extern void bio_integrity_free(struct bio *); 642extern void bio_integrity_free(struct bio *, struct bio_set *);
639extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); 643extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
640extern int bio_integrity_enabled(struct bio *bio); 644extern int bio_integrity_enabled(struct bio *bio);
641extern int bio_integrity_set_tag(struct bio *, void *, unsigned int); 645extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
@@ -645,21 +649,27 @@ extern void bio_integrity_endio(struct bio *, int);
645extern void bio_integrity_advance(struct bio *, unsigned int); 649extern void bio_integrity_advance(struct bio *, unsigned int);
646extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); 650extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
647extern void bio_integrity_split(struct bio *, struct bio_pair *, int); 651extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
648extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); 652extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t, struct bio_set *);
653extern int bioset_integrity_create(struct bio_set *, int);
654extern void bioset_integrity_free(struct bio_set *);
655extern void bio_integrity_init(void);
649 656
650#else /* CONFIG_BLK_DEV_INTEGRITY */ 657#else /* CONFIG_BLK_DEV_INTEGRITY */
651 658
652#define bio_integrity(a) (0) 659#define bio_integrity(a) (0)
660#define bioset_integrity_create(a, b) (0)
653#define bio_integrity_prep(a) (0) 661#define bio_integrity_prep(a) (0)
654#define bio_integrity_enabled(a) (0) 662#define bio_integrity_enabled(a) (0)
655#define bio_integrity_clone(a, b, c) (0) 663#define bio_integrity_clone(a, b, c, d) (0)
656#define bio_integrity_free(a) do { } while (0) 664#define bioset_integrity_free(a) do { } while (0)
665#define bio_integrity_free(a, b) do { } while (0)
657#define bio_integrity_endio(a, b) do { } while (0) 666#define bio_integrity_endio(a, b) do { } while (0)
658#define bio_integrity_advance(a, b) do { } while (0) 667#define bio_integrity_advance(a, b) do { } while (0)
659#define bio_integrity_trim(a, b, c) do { } while (0) 668#define bio_integrity_trim(a, b, c) do { } while (0)
660#define bio_integrity_split(a, b, c) do { } while (0) 669#define bio_integrity_split(a, b, c) do { } while (0)
661#define bio_integrity_set_tag(a, b, c) do { } while (0) 670#define bio_integrity_set_tag(a, b, c) do { } while (0)
662#define bio_integrity_get_tag(a, b, c) do { } while (0) 671#define bio_integrity_get_tag(a, b, c) do { } while (0)
672#define bio_integrity_init(a) do { } while (0)
663 673
664#endif /* CONFIG_BLK_DEV_INTEGRITY */ 674#endif /* CONFIG_BLK_DEV_INTEGRITY */
665 675
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8963d9149b5f..49ae07951d55 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -301,12 +301,6 @@ struct blk_queue_tag {
301#define BLK_SCSI_MAX_CMDS (256) 301#define BLK_SCSI_MAX_CMDS (256)
302#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 302#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
303 303
304struct blk_cmd_filter {
305 unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
306 unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
307 struct kobject kobj;
308};
309
310struct queue_limits { 304struct queue_limits {
311 unsigned long bounce_pfn; 305 unsigned long bounce_pfn;
312 unsigned long seg_boundary_mask; 306 unsigned long seg_boundary_mask;
@@ -445,7 +439,6 @@ struct request_queue
445#if defined(CONFIG_BLK_DEV_BSG) 439#if defined(CONFIG_BLK_DEV_BSG)
446 struct bsg_class_device bsg_dev; 440 struct bsg_class_device bsg_dev;
447#endif 441#endif
448 struct blk_cmd_filter cmd_filter;
449}; 442};
450 443
451#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 444#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
@@ -998,13 +991,7 @@ static inline int sb_issue_discard(struct super_block *sb,
998 return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); 991 return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL);
999} 992}
1000 993
1001/* 994extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
1002* command filter functions
1003*/
1004extern int blk_verify_command(struct blk_cmd_filter *filter,
1005 unsigned char *cmd, fmode_t has_write_perm);
1006extern void blk_unregister_filter(struct gendisk *disk);
1007extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
1008 995
1009#define MAX_PHYS_SEGMENTS 128 996#define MAX_PHYS_SEGMENTS 128
1010#define MAX_HW_SEGMENTS 128 997#define MAX_HW_SEGMENTS 128