diff options
author | Jens Axboe <axboe@kernel.dk> | 2013-11-08 11:08:12 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2013-11-08 11:08:12 -0500 |
commit | e37459b8e2c7db6735e39e019e448b76e5e77647 (patch) | |
tree | a3f0944db87a8ae0d41e5acbbbabc1e7ef534d1b /drivers | |
parent | c7d1ba417c7cb7297d14dd47a390ec90ce548d5c (diff) | |
parent | e7e245000110a7794de8f925b9edc06a9c852f80 (diff) |
Merge branch 'blk-mq/core' into for-3.13/core
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Conflicts:
block/blk-timeout.c
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/block/Kconfig | 3 | ||||
-rw-r--r-- | drivers/block/Makefile | 1 | ||||
-rw-r--r-- | drivers/block/floppy.c | 4 | ||||
-rw-r--r-- | drivers/block/null_blk.c | 635 | ||||
-rw-r--r-- | drivers/scsi/sd.c | 2 |
5 files changed, 642 insertions, 3 deletions
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index e07a5fd58ad7..4682546c5da7 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
@@ -15,6 +15,9 @@ menuconfig BLK_DEV | |||
15 | 15 | ||
16 | if BLK_DEV | 16 | if BLK_DEV |
17 | 17 | ||
18 | config BLK_DEV_NULL_BLK | ||
19 | tristate "Null test block driver" | ||
20 | |||
18 | config BLK_DEV_FD | 21 | config BLK_DEV_FD |
19 | tristate "Normal floppy disk support" | 22 | tristate "Normal floppy disk support" |
20 | depends on ARCH_MAY_HAVE_PC_FDC | 23 | depends on ARCH_MAY_HAVE_PC_FDC |
diff --git a/drivers/block/Makefile b/drivers/block/Makefile index ca07399a8d99..03b3b4a2bd8a 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile | |||
@@ -41,6 +41,7 @@ obj-$(CONFIG_BLK_DEV_RBD) += rbd.o | |||
41 | obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/ | 41 | obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/ |
42 | 42 | ||
43 | obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/ | 43 | obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/ |
44 | obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o | ||
44 | 45 | ||
45 | nvme-y := nvme-core.o nvme-scsi.o | 46 | nvme-y := nvme-core.o nvme-scsi.o |
46 | swim_mod-y := swim.o swim_asm.o | 47 | swim_mod-y := swim.o swim_asm.o |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 04ceb7e2fadd..000abe2f105c 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -2886,9 +2886,9 @@ static void do_fd_request(struct request_queue *q) | |||
2886 | return; | 2886 | return; |
2887 | 2887 | ||
2888 | if (WARN(atomic_read(&usage_count) == 0, | 2888 | if (WARN(atomic_read(&usage_count) == 0, |
2889 | "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%x\n", | 2889 | "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%llx\n", |
2890 | current_req, (long)blk_rq_pos(current_req), current_req->cmd_type, | 2890 | current_req, (long)blk_rq_pos(current_req), current_req->cmd_type, |
2891 | current_req->cmd_flags)) | 2891 | (unsigned long long) current_req->cmd_flags)) |
2892 | return; | 2892 | return; |
2893 | 2893 | ||
2894 | if (test_and_set_bit(0, &fdc_busy)) { | 2894 | if (test_and_set_bit(0, &fdc_busy)) { |
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c new file mode 100644 index 000000000000..b5d842370cc9 --- /dev/null +++ b/drivers/block/null_blk.c | |||
@@ -0,0 +1,635 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/moduleparam.h> | ||
3 | #include <linux/sched.h> | ||
4 | #include <linux/fs.h> | ||
5 | #include <linux/blkdev.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/slab.h> | ||
8 | #include <linux/blk-mq.h> | ||
9 | #include <linux/hrtimer.h> | ||
10 | |||
11 | struct nullb_cmd { | ||
12 | struct list_head list; | ||
13 | struct llist_node ll_list; | ||
14 | struct call_single_data csd; | ||
15 | struct request *rq; | ||
16 | struct bio *bio; | ||
17 | unsigned int tag; | ||
18 | struct nullb_queue *nq; | ||
19 | }; | ||
20 | |||
21 | struct nullb_queue { | ||
22 | unsigned long *tag_map; | ||
23 | wait_queue_head_t wait; | ||
24 | unsigned int queue_depth; | ||
25 | |||
26 | struct nullb_cmd *cmds; | ||
27 | }; | ||
28 | |||
29 | struct nullb { | ||
30 | struct list_head list; | ||
31 | unsigned int index; | ||
32 | struct request_queue *q; | ||
33 | struct gendisk *disk; | ||
34 | struct hrtimer timer; | ||
35 | unsigned int queue_depth; | ||
36 | spinlock_t lock; | ||
37 | |||
38 | struct nullb_queue *queues; | ||
39 | unsigned int nr_queues; | ||
40 | }; | ||
41 | |||
42 | static LIST_HEAD(nullb_list); | ||
43 | static struct mutex lock; | ||
44 | static int null_major; | ||
45 | static int nullb_indexes; | ||
46 | |||
47 | struct completion_queue { | ||
48 | struct llist_head list; | ||
49 | struct hrtimer timer; | ||
50 | }; | ||
51 | |||
52 | /* | ||
53 | * These are per-cpu for now, they will need to be configured by the | ||
54 | * complete_queues parameter and appropriately mapped. | ||
55 | */ | ||
56 | static DEFINE_PER_CPU(struct completion_queue, completion_queues); | ||
57 | |||
58 | enum { | ||
59 | NULL_IRQ_NONE = 0, | ||
60 | NULL_IRQ_SOFTIRQ = 1, | ||
61 | NULL_IRQ_TIMER = 2, | ||
62 | |||
63 | NULL_Q_BIO = 0, | ||
64 | NULL_Q_RQ = 1, | ||
65 | NULL_Q_MQ = 2, | ||
66 | }; | ||
67 | |||
68 | static int submit_queues = 1; | ||
69 | module_param(submit_queues, int, S_IRUGO); | ||
70 | MODULE_PARM_DESC(submit_queues, "Number of submission queues"); | ||
71 | |||
72 | static int home_node = NUMA_NO_NODE; | ||
73 | module_param(home_node, int, S_IRUGO); | ||
74 | MODULE_PARM_DESC(home_node, "Home node for the device"); | ||
75 | |||
76 | static int queue_mode = NULL_Q_MQ; | ||
77 | module_param(queue_mode, int, S_IRUGO); | ||
78 | MODULE_PARM_DESC(use_mq, "Use blk-mq interface (0=bio,1=rq,2=multiqueue)"); | ||
79 | |||
80 | static int gb = 250; | ||
81 | module_param(gb, int, S_IRUGO); | ||
82 | MODULE_PARM_DESC(gb, "Size in GB"); | ||
83 | |||
84 | static int bs = 512; | ||
85 | module_param(bs, int, S_IRUGO); | ||
86 | MODULE_PARM_DESC(bs, "Block size (in bytes)"); | ||
87 | |||
88 | static int nr_devices = 2; | ||
89 | module_param(nr_devices, int, S_IRUGO); | ||
90 | MODULE_PARM_DESC(nr_devices, "Number of devices to register"); | ||
91 | |||
92 | static int irqmode = NULL_IRQ_SOFTIRQ; | ||
93 | module_param(irqmode, int, S_IRUGO); | ||
94 | MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); | ||
95 | |||
96 | static int completion_nsec = 10000; | ||
97 | module_param(completion_nsec, int, S_IRUGO); | ||
98 | MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); | ||
99 | |||
100 | static int hw_queue_depth = 64; | ||
101 | module_param(hw_queue_depth, int, S_IRUGO); | ||
102 | MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); | ||
103 | |||
104 | static bool use_per_node_hctx = true; | ||
105 | module_param(use_per_node_hctx, bool, S_IRUGO); | ||
106 | MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true"); | ||
107 | |||
108 | static void put_tag(struct nullb_queue *nq, unsigned int tag) | ||
109 | { | ||
110 | clear_bit_unlock(tag, nq->tag_map); | ||
111 | |||
112 | if (waitqueue_active(&nq->wait)) | ||
113 | wake_up(&nq->wait); | ||
114 | } | ||
115 | |||
116 | static unsigned int get_tag(struct nullb_queue *nq) | ||
117 | { | ||
118 | unsigned int tag; | ||
119 | |||
120 | do { | ||
121 | tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); | ||
122 | if (tag >= nq->queue_depth) | ||
123 | return -1U; | ||
124 | } while (test_and_set_bit_lock(tag, nq->tag_map)); | ||
125 | |||
126 | return tag; | ||
127 | } | ||
128 | |||
129 | static void free_cmd(struct nullb_cmd *cmd) | ||
130 | { | ||
131 | put_tag(cmd->nq, cmd->tag); | ||
132 | } | ||
133 | |||
134 | static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) | ||
135 | { | ||
136 | struct nullb_cmd *cmd; | ||
137 | unsigned int tag; | ||
138 | |||
139 | tag = get_tag(nq); | ||
140 | if (tag != -1U) { | ||
141 | cmd = &nq->cmds[tag]; | ||
142 | cmd->tag = tag; | ||
143 | cmd->nq = nq; | ||
144 | return cmd; | ||
145 | } | ||
146 | |||
147 | return NULL; | ||
148 | } | ||
149 | |||
150 | static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait) | ||
151 | { | ||
152 | struct nullb_cmd *cmd; | ||
153 | DEFINE_WAIT(wait); | ||
154 | |||
155 | cmd = __alloc_cmd(nq); | ||
156 | if (cmd || !can_wait) | ||
157 | return cmd; | ||
158 | |||
159 | do { | ||
160 | prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE); | ||
161 | cmd = __alloc_cmd(nq); | ||
162 | if (cmd) | ||
163 | break; | ||
164 | |||
165 | io_schedule(); | ||
166 | } while (1); | ||
167 | |||
168 | finish_wait(&nq->wait, &wait); | ||
169 | return cmd; | ||
170 | } | ||
171 | |||
172 | static void end_cmd(struct nullb_cmd *cmd) | ||
173 | { | ||
174 | if (cmd->rq) { | ||
175 | if (queue_mode == NULL_Q_MQ) | ||
176 | blk_mq_end_io(cmd->rq, 0); | ||
177 | else { | ||
178 | INIT_LIST_HEAD(&cmd->rq->queuelist); | ||
179 | blk_end_request_all(cmd->rq, 0); | ||
180 | } | ||
181 | } else if (cmd->bio) | ||
182 | bio_endio(cmd->bio, 0); | ||
183 | |||
184 | if (queue_mode != NULL_Q_MQ) | ||
185 | free_cmd(cmd); | ||
186 | } | ||
187 | |||
188 | static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) | ||
189 | { | ||
190 | struct completion_queue *cq; | ||
191 | struct llist_node *entry; | ||
192 | struct nullb_cmd *cmd; | ||
193 | |||
194 | cq = &per_cpu(completion_queues, smp_processor_id()); | ||
195 | |||
196 | while ((entry = llist_del_all(&cq->list)) != NULL) { | ||
197 | do { | ||
198 | cmd = container_of(entry, struct nullb_cmd, ll_list); | ||
199 | end_cmd(cmd); | ||
200 | entry = entry->next; | ||
201 | } while (entry); | ||
202 | } | ||
203 | |||
204 | return HRTIMER_NORESTART; | ||
205 | } | ||
206 | |||
207 | static void null_cmd_end_timer(struct nullb_cmd *cmd) | ||
208 | { | ||
209 | struct completion_queue *cq = &per_cpu(completion_queues, get_cpu()); | ||
210 | |||
211 | cmd->ll_list.next = NULL; | ||
212 | if (llist_add(&cmd->ll_list, &cq->list)) { | ||
213 | ktime_t kt = ktime_set(0, completion_nsec); | ||
214 | |||
215 | hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL); | ||
216 | } | ||
217 | |||
218 | put_cpu(); | ||
219 | } | ||
220 | |||
221 | static void null_softirq_done_fn(struct request *rq) | ||
222 | { | ||
223 | blk_end_request_all(rq, 0); | ||
224 | } | ||
225 | |||
226 | #if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS) | ||
227 | |||
228 | static void null_ipi_cmd_end_io(void *data) | ||
229 | { | ||
230 | struct completion_queue *cq; | ||
231 | struct llist_node *entry, *next; | ||
232 | struct nullb_cmd *cmd; | ||
233 | |||
234 | cq = &per_cpu(completion_queues, smp_processor_id()); | ||
235 | |||
236 | entry = llist_del_all(&cq->list); | ||
237 | |||
238 | while (entry) { | ||
239 | next = entry->next; | ||
240 | cmd = llist_entry(entry, struct nullb_cmd, ll_list); | ||
241 | end_cmd(cmd); | ||
242 | entry = next; | ||
243 | } | ||
244 | } | ||
245 | |||
246 | static void null_cmd_end_ipi(struct nullb_cmd *cmd) | ||
247 | { | ||
248 | struct call_single_data *data = &cmd->csd; | ||
249 | int cpu = get_cpu(); | ||
250 | struct completion_queue *cq = &per_cpu(completion_queues, cpu); | ||
251 | |||
252 | cmd->ll_list.next = NULL; | ||
253 | |||
254 | if (llist_add(&cmd->ll_list, &cq->list)) { | ||
255 | data->func = null_ipi_cmd_end_io; | ||
256 | data->flags = 0; | ||
257 | __smp_call_function_single(cpu, data, 0); | ||
258 | } | ||
259 | |||
260 | put_cpu(); | ||
261 | } | ||
262 | |||
263 | #endif /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */ | ||
264 | |||
265 | static inline void null_handle_cmd(struct nullb_cmd *cmd) | ||
266 | { | ||
267 | /* Complete IO by inline, softirq or timer */ | ||
268 | switch (irqmode) { | ||
269 | case NULL_IRQ_NONE: | ||
270 | end_cmd(cmd); | ||
271 | break; | ||
272 | case NULL_IRQ_SOFTIRQ: | ||
273 | #if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS) | ||
274 | null_cmd_end_ipi(cmd); | ||
275 | #else | ||
276 | end_cmd(cmd); | ||
277 | #endif | ||
278 | break; | ||
279 | case NULL_IRQ_TIMER: | ||
280 | null_cmd_end_timer(cmd); | ||
281 | break; | ||
282 | } | ||
283 | } | ||
284 | |||
285 | static struct nullb_queue *nullb_to_queue(struct nullb *nullb) | ||
286 | { | ||
287 | int index = 0; | ||
288 | |||
289 | if (nullb->nr_queues != 1) | ||
290 | index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); | ||
291 | |||
292 | return &nullb->queues[index]; | ||
293 | } | ||
294 | |||
295 | static void null_queue_bio(struct request_queue *q, struct bio *bio) | ||
296 | { | ||
297 | struct nullb *nullb = q->queuedata; | ||
298 | struct nullb_queue *nq = nullb_to_queue(nullb); | ||
299 | struct nullb_cmd *cmd; | ||
300 | |||
301 | cmd = alloc_cmd(nq, 1); | ||
302 | cmd->bio = bio; | ||
303 | |||
304 | null_handle_cmd(cmd); | ||
305 | } | ||
306 | |||
307 | static int null_rq_prep_fn(struct request_queue *q, struct request *req) | ||
308 | { | ||
309 | struct nullb *nullb = q->queuedata; | ||
310 | struct nullb_queue *nq = nullb_to_queue(nullb); | ||
311 | struct nullb_cmd *cmd; | ||
312 | |||
313 | cmd = alloc_cmd(nq, 0); | ||
314 | if (cmd) { | ||
315 | cmd->rq = req; | ||
316 | req->special = cmd; | ||
317 | return BLKPREP_OK; | ||
318 | } | ||
319 | |||
320 | return BLKPREP_DEFER; | ||
321 | } | ||
322 | |||
323 | static void null_request_fn(struct request_queue *q) | ||
324 | { | ||
325 | struct request *rq; | ||
326 | |||
327 | while ((rq = blk_fetch_request(q)) != NULL) { | ||
328 | struct nullb_cmd *cmd = rq->special; | ||
329 | |||
330 | spin_unlock_irq(q->queue_lock); | ||
331 | null_handle_cmd(cmd); | ||
332 | spin_lock_irq(q->queue_lock); | ||
333 | } | ||
334 | } | ||
335 | |||
336 | static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) | ||
337 | { | ||
338 | struct nullb_cmd *cmd = rq->special; | ||
339 | |||
340 | cmd->rq = rq; | ||
341 | cmd->nq = hctx->driver_data; | ||
342 | |||
343 | null_handle_cmd(cmd); | ||
344 | return BLK_MQ_RQ_QUEUE_OK; | ||
345 | } | ||
346 | |||
347 | static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) | ||
348 | { | ||
349 | return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, | ||
350 | hctx_index); | ||
351 | } | ||
352 | |||
353 | static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) | ||
354 | { | ||
355 | kfree(hctx); | ||
356 | } | ||
357 | |||
358 | static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, | ||
359 | unsigned int index) | ||
360 | { | ||
361 | struct nullb *nullb = data; | ||
362 | struct nullb_queue *nq = &nullb->queues[index]; | ||
363 | |||
364 | init_waitqueue_head(&nq->wait); | ||
365 | nq->queue_depth = nullb->queue_depth; | ||
366 | nullb->nr_queues++; | ||
367 | hctx->driver_data = nq; | ||
368 | |||
369 | return 0; | ||
370 | } | ||
371 | |||
372 | static struct blk_mq_ops null_mq_ops = { | ||
373 | .queue_rq = null_queue_rq, | ||
374 | .map_queue = blk_mq_map_queue, | ||
375 | .init_hctx = null_init_hctx, | ||
376 | }; | ||
377 | |||
378 | static struct blk_mq_reg null_mq_reg = { | ||
379 | .ops = &null_mq_ops, | ||
380 | .queue_depth = 64, | ||
381 | .cmd_size = sizeof(struct nullb_cmd), | ||
382 | .flags = BLK_MQ_F_SHOULD_MERGE, | ||
383 | }; | ||
384 | |||
385 | static void null_del_dev(struct nullb *nullb) | ||
386 | { | ||
387 | list_del_init(&nullb->list); | ||
388 | |||
389 | del_gendisk(nullb->disk); | ||
390 | if (queue_mode == NULL_Q_MQ) | ||
391 | blk_mq_free_queue(nullb->q); | ||
392 | else | ||
393 | blk_cleanup_queue(nullb->q); | ||
394 | put_disk(nullb->disk); | ||
395 | kfree(nullb); | ||
396 | } | ||
397 | |||
398 | static int null_open(struct block_device *bdev, fmode_t mode) | ||
399 | { | ||
400 | return 0; | ||
401 | } | ||
402 | |||
403 | static void null_release(struct gendisk *disk, fmode_t mode) | ||
404 | { | ||
405 | } | ||
406 | |||
407 | static const struct block_device_operations null_fops = { | ||
408 | .owner = THIS_MODULE, | ||
409 | .open = null_open, | ||
410 | .release = null_release, | ||
411 | }; | ||
412 | |||
413 | static int setup_commands(struct nullb_queue *nq) | ||
414 | { | ||
415 | struct nullb_cmd *cmd; | ||
416 | int i, tag_size; | ||
417 | |||
418 | nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); | ||
419 | if (!nq->cmds) | ||
420 | return 1; | ||
421 | |||
422 | tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; | ||
423 | nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); | ||
424 | if (!nq->tag_map) { | ||
425 | kfree(nq->cmds); | ||
426 | return 1; | ||
427 | } | ||
428 | |||
429 | for (i = 0; i < nq->queue_depth; i++) { | ||
430 | cmd = &nq->cmds[i]; | ||
431 | INIT_LIST_HEAD(&cmd->list); | ||
432 | cmd->ll_list.next = NULL; | ||
433 | cmd->tag = -1U; | ||
434 | } | ||
435 | |||
436 | return 0; | ||
437 | } | ||
438 | |||
439 | static void cleanup_queue(struct nullb_queue *nq) | ||
440 | { | ||
441 | kfree(nq->tag_map); | ||
442 | kfree(nq->cmds); | ||
443 | } | ||
444 | |||
445 | static void cleanup_queues(struct nullb *nullb) | ||
446 | { | ||
447 | int i; | ||
448 | |||
449 | for (i = 0; i < nullb->nr_queues; i++) | ||
450 | cleanup_queue(&nullb->queues[i]); | ||
451 | |||
452 | kfree(nullb->queues); | ||
453 | } | ||
454 | |||
455 | static int setup_queues(struct nullb *nullb) | ||
456 | { | ||
457 | struct nullb_queue *nq; | ||
458 | int i; | ||
459 | |||
460 | nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL); | ||
461 | if (!nullb->queues) | ||
462 | return 1; | ||
463 | |||
464 | nullb->nr_queues = 0; | ||
465 | nullb->queue_depth = hw_queue_depth; | ||
466 | |||
467 | if (queue_mode == NULL_Q_MQ) | ||
468 | return 0; | ||
469 | |||
470 | for (i = 0; i < submit_queues; i++) { | ||
471 | nq = &nullb->queues[i]; | ||
472 | init_waitqueue_head(&nq->wait); | ||
473 | nq->queue_depth = hw_queue_depth; | ||
474 | if (setup_commands(nq)) | ||
475 | break; | ||
476 | nullb->nr_queues++; | ||
477 | } | ||
478 | |||
479 | if (i == submit_queues) | ||
480 | return 0; | ||
481 | |||
482 | cleanup_queues(nullb); | ||
483 | return 1; | ||
484 | } | ||
485 | |||
486 | static int null_add_dev(void) | ||
487 | { | ||
488 | struct gendisk *disk; | ||
489 | struct nullb *nullb; | ||
490 | sector_t size; | ||
491 | |||
492 | nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); | ||
493 | if (!nullb) | ||
494 | return -ENOMEM; | ||
495 | |||
496 | spin_lock_init(&nullb->lock); | ||
497 | |||
498 | if (setup_queues(nullb)) | ||
499 | goto err; | ||
500 | |||
501 | if (queue_mode == NULL_Q_MQ) { | ||
502 | null_mq_reg.numa_node = home_node; | ||
503 | null_mq_reg.queue_depth = hw_queue_depth; | ||
504 | |||
505 | if (use_per_node_hctx) { | ||
506 | null_mq_reg.ops->alloc_hctx = null_alloc_hctx; | ||
507 | null_mq_reg.ops->free_hctx = null_free_hctx; | ||
508 | |||
509 | null_mq_reg.nr_hw_queues = nr_online_nodes; | ||
510 | } else { | ||
511 | null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; | ||
512 | null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; | ||
513 | |||
514 | null_mq_reg.nr_hw_queues = submit_queues; | ||
515 | } | ||
516 | |||
517 | nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); | ||
518 | } else if (queue_mode == NULL_Q_BIO) { | ||
519 | nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); | ||
520 | blk_queue_make_request(nullb->q, null_queue_bio); | ||
521 | } else { | ||
522 | nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); | ||
523 | blk_queue_prep_rq(nullb->q, null_rq_prep_fn); | ||
524 | if (nullb->q) | ||
525 | blk_queue_softirq_done(nullb->q, null_softirq_done_fn); | ||
526 | } | ||
527 | |||
528 | if (!nullb->q) | ||
529 | goto queue_fail; | ||
530 | |||
531 | nullb->q->queuedata = nullb; | ||
532 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); | ||
533 | |||
534 | disk = nullb->disk = alloc_disk_node(1, home_node); | ||
535 | if (!disk) { | ||
536 | queue_fail: | ||
537 | if (queue_mode == NULL_Q_MQ) | ||
538 | blk_mq_free_queue(nullb->q); | ||
539 | else | ||
540 | blk_cleanup_queue(nullb->q); | ||
541 | cleanup_queues(nullb); | ||
542 | err: | ||
543 | kfree(nullb); | ||
544 | return -ENOMEM; | ||
545 | } | ||
546 | |||
547 | mutex_lock(&lock); | ||
548 | list_add_tail(&nullb->list, &nullb_list); | ||
549 | nullb->index = nullb_indexes++; | ||
550 | mutex_unlock(&lock); | ||
551 | |||
552 | blk_queue_logical_block_size(nullb->q, bs); | ||
553 | blk_queue_physical_block_size(nullb->q, bs); | ||
554 | |||
555 | size = gb * 1024 * 1024 * 1024ULL; | ||
556 | sector_div(size, bs); | ||
557 | set_capacity(disk, size); | ||
558 | |||
559 | disk->flags |= GENHD_FL_EXT_DEVT; | ||
560 | disk->major = null_major; | ||
561 | disk->first_minor = nullb->index; | ||
562 | disk->fops = &null_fops; | ||
563 | disk->private_data = nullb; | ||
564 | disk->queue = nullb->q; | ||
565 | sprintf(disk->disk_name, "nullb%d", nullb->index); | ||
566 | add_disk(disk); | ||
567 | return 0; | ||
568 | } | ||
569 | |||
570 | static int __init null_init(void) | ||
571 | { | ||
572 | unsigned int i; | ||
573 | |||
574 | #if !defined(CONFIG_SMP) || !defined(CONFIG_USE_GENERIC_SMP_HELPERS) | ||
575 | if (irqmode == NULL_IRQ_SOFTIRQ) { | ||
576 | pr_warn("null_blk: softirq completions not available.\n"); | ||
577 | pr_warn("null_blk: using direct completions.\n"); | ||
578 | irqmode = NULL_IRQ_NONE; | ||
579 | } | ||
580 | #endif | ||
581 | |||
582 | if (submit_queues > nr_cpu_ids) | ||
583 | submit_queues = nr_cpu_ids; | ||
584 | else if (!submit_queues) | ||
585 | submit_queues = 1; | ||
586 | |||
587 | mutex_init(&lock); | ||
588 | |||
589 | /* Initialize a separate list for each CPU for issuing softirqs */ | ||
590 | for_each_possible_cpu(i) { | ||
591 | struct completion_queue *cq = &per_cpu(completion_queues, i); | ||
592 | |||
593 | init_llist_head(&cq->list); | ||
594 | |||
595 | if (irqmode != NULL_IRQ_TIMER) | ||
596 | continue; | ||
597 | |||
598 | hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
599 | cq->timer.function = null_cmd_timer_expired; | ||
600 | } | ||
601 | |||
602 | null_major = register_blkdev(0, "nullb"); | ||
603 | if (null_major < 0) | ||
604 | return null_major; | ||
605 | |||
606 | for (i = 0; i < nr_devices; i++) { | ||
607 | if (null_add_dev()) { | ||
608 | unregister_blkdev(null_major, "nullb"); | ||
609 | return -EINVAL; | ||
610 | } | ||
611 | } | ||
612 | |||
613 | pr_info("null: module loaded\n"); | ||
614 | return 0; | ||
615 | } | ||
616 | |||
617 | static void __exit null_exit(void) | ||
618 | { | ||
619 | struct nullb *nullb; | ||
620 | |||
621 | unregister_blkdev(null_major, "nullb"); | ||
622 | |||
623 | mutex_lock(&lock); | ||
624 | while (!list_empty(&nullb_list)) { | ||
625 | nullb = list_entry(nullb_list.next, struct nullb, list); | ||
626 | null_del_dev(nullb); | ||
627 | } | ||
628 | mutex_unlock(&lock); | ||
629 | } | ||
630 | |||
631 | module_init(null_init); | ||
632 | module_exit(null_exit); | ||
633 | |||
634 | MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>"); | ||
635 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 5693f6d7eddb..7fe4faaa149b 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -1002,7 +1002,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) | |||
1002 | SCpnt->cmnd[0] = READ_6; | 1002 | SCpnt->cmnd[0] = READ_6; |
1003 | SCpnt->sc_data_direction = DMA_FROM_DEVICE; | 1003 | SCpnt->sc_data_direction = DMA_FROM_DEVICE; |
1004 | } else { | 1004 | } else { |
1005 | scmd_printk(KERN_ERR, SCpnt, "Unknown command %x\n", rq->cmd_flags); | 1005 | scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags); |
1006 | goto out; | 1006 | goto out; |
1007 | } | 1007 | } |
1008 | 1008 | ||