aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-16 22:05:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-16 22:05:27 -0400
commita39ef1a7c6093bbd4e0a8197350b99cd635e5446 (patch)
tree5a19d1d1f289c52be72710f1d7cd954c830964f6
parentd82312c80860b8b83cd4473ac6eafd244e712061 (diff)
parent976f2ab4985a1dbe3f3f0777ce01d4fdb404c62a (diff)
Merge branch 'for-4.1/drivers' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe: "This is the block driver pull request for 4.1. As with the core bits, this is a relatively slow round. This pull request contains: - Various fixes and cleanups for NVMe, from Alexey Khoroshilov, Chong Yuan, myself, Keith Busch, and Murali Iyer. - Documentation and code cleanups for nbd from Markus Pargmann. - Change of brd maintainer to me, from Ross Zwisler. At least the email doesn't bounce anymore then. - Two xen-blkback fixes from Tao Chen" * 'for-4.1/drivers' of git://git.kernel.dk/linux-block: (23 commits) NVMe: Meta data handling through submit io ioctl NVMe: Add translation for block limits NVMe: Remove check for null NVMe: Fix error handling of class_create("nvme") xen-blkback: define pr_fmt macro to avoid the duplication of DRV_PFX xen-blkback: enlarge the array size of blkback name nbd: Return error pointer directly nbd: Return error code directly nbd: Remove fixme that was already fixed nbd: Restructure debugging prints nbd: Fix device bytesize type nbd: Replace kthread_create with kthread_run nbd: Remove kernel internal header Documentation: nbd: Add list of module parameters Documentation: nbd: Reformat to allow more documentation NVMe: increase depth of admin queue nvme: Fix PRP list calculation for non-4k system page size NVMe: Fix blk-mq hot cpu notification NVMe: embedded iod mask cleanup NVMe: Freeze admin queue on device failure ...
-rw-r--r--Documentation/blockdev/nbd.txt48
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/block/drbd/drbd_main.c7
-rw-r--r--drivers/block/drbd/drbd_req.c3
-rw-r--r--drivers/block/nbd.c140
-rw-r--r--drivers/block/nvme-core.c159
-rw-r--r--drivers/block/nvme-scsi.c28
-rw-r--r--drivers/block/xen-blkback/blkback.c62
-rw-r--r--drivers/block/xen-blkback/common.h6
-rw-r--r--drivers/block/xen-blkback/xenbus.c38
-rw-r--r--include/linux/nbd.h46
-rw-r--r--include/linux/nvme.h5
12 files changed, 242 insertions, 302 deletions
diff --git a/Documentation/blockdev/nbd.txt b/Documentation/blockdev/nbd.txt
index 271e607304da..db242ea2bce8 100644
--- a/Documentation/blockdev/nbd.txt
+++ b/Documentation/blockdev/nbd.txt
@@ -1,17 +1,31 @@
1 Network Block Device (TCP version) 1Network Block Device (TCP version)
2 2==================================
3 What is it: With this compiled in the kernel (or as a module), Linux 3
4 can use a remote server as one of its block devices. So every time 41) Overview
5 the client computer wants to read, e.g., /dev/nb0, it sends a 5-----------
6 request over TCP to the server, which will reply with the data read. 6
7 This can be used for stations with low disk space (or even diskless) 7What is it: With this compiled in the kernel (or as a module), Linux
8 to borrow disk space from another computer. 8can use a remote server as one of its block devices. So every time
9 Unlike NFS, it is possible to put any filesystem on it, etc. 9the client computer wants to read, e.g., /dev/nb0, it sends a
10 10request over TCP to the server, which will reply with the data read.
11 For more information, or to download the nbd-client and nbd-server 11This can be used for stations with low disk space (or even diskless)
12 tools, go to http://nbd.sf.net/. 12to borrow disk space from another computer.
13 13Unlike NFS, it is possible to put any filesystem on it, etc.
14 The nbd kernel module need only be installed on the client 14
15 system, as the nbd-server is completely in userspace. In fact, 15For more information, or to download the nbd-client and nbd-server
16 the nbd-server has been successfully ported to other operating 16tools, go to http://nbd.sf.net/.
17 systems, including Windows. 17
18The nbd kernel module need only be installed on the client
19system, as the nbd-server is completely in userspace. In fact,
20the nbd-server has been successfully ported to other operating
21systems, including Windows.
22
23A) NBD parameters
24-----------------
25
26max_part
27 Number of partitions per device (default: 0).
28
29nbds_max
30 Number of block devices that should be initialized (default: 16).
31
diff --git a/MAINTAINERS b/MAINTAINERS
index f40881b5ad20..bf990f53eec3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8107,7 +8107,7 @@ S: Maintained
8107F: drivers/net/wireless/rt2x00/ 8107F: drivers/net/wireless/rt2x00/
8108 8108
8109RAMDISK RAM BLOCK DEVICE DRIVER 8109RAMDISK RAM BLOCK DEVICE DRIVER
8110M: Nick Piggin <npiggin@kernel.dk> 8110M: Jens Axboe <axboe@kernel.dk>
8111S: Maintained 8111S: Maintained
8112F: Documentation/blockdev/ramdisk.txt 8112F: Documentation/blockdev/ramdisk.txt
8113F: drivers/block/brd.c 8113F: drivers/block/brd.c
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 1fc83427199c..81fde9ef7f8e 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2107,13 +2107,12 @@ static int drbd_create_mempools(void)
2107 if (drbd_md_io_page_pool == NULL) 2107 if (drbd_md_io_page_pool == NULL)
2108 goto Enomem; 2108 goto Enomem;
2109 2109
2110 drbd_request_mempool = mempool_create(number, 2110 drbd_request_mempool = mempool_create_slab_pool(number,
2111 mempool_alloc_slab, mempool_free_slab, drbd_request_cache); 2111 drbd_request_cache);
2112 if (drbd_request_mempool == NULL) 2112 if (drbd_request_mempool == NULL)
2113 goto Enomem; 2113 goto Enomem;
2114 2114
2115 drbd_ee_mempool = mempool_create(number, 2115 drbd_ee_mempool = mempool_create_slab_pool(number, drbd_ee_cache);
2116 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
2117 if (drbd_ee_mempool == NULL) 2116 if (drbd_ee_mempool == NULL)
2118 goto Enomem; 2117 goto Enomem;
2119 2118
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 34f2f0ba409b..3907202fb9d9 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -52,9 +52,10 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device,
52{ 52{
53 struct drbd_request *req; 53 struct drbd_request *req;
54 54
55 req = mempool_alloc(drbd_request_mempool, GFP_NOIO | __GFP_ZERO); 55 req = mempool_alloc(drbd_request_mempool, GFP_NOIO);
56 if (!req) 56 if (!req)
57 return NULL; 57 return NULL;
58 memset(req, 0, sizeof(*req));
58 59
59 drbd_req_make_private_bio(req, bio_src); 60 drbd_req_make_private_bio(req, bio_src);
60 req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; 61 req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index a98c41f72c63..39e5f7fae3ef 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -32,28 +32,36 @@
32#include <net/sock.h> 32#include <net/sock.h>
33#include <linux/net.h> 33#include <linux/net.h>
34#include <linux/kthread.h> 34#include <linux/kthread.h>
35#include <linux/types.h>
35 36
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37#include <asm/types.h> 38#include <asm/types.h>
38 39
39#include <linux/nbd.h> 40#include <linux/nbd.h>
40 41
41#define NBD_MAGIC 0x68797548 42struct nbd_device {
43 int flags;
44 int harderror; /* Code of hard error */
45 struct socket * sock; /* If == NULL, device is not ready, yet */
46 int magic;
47
48 spinlock_t queue_lock;
49 struct list_head queue_head; /* Requests waiting result */
50 struct request *active_req;
51 wait_queue_head_t active_wq;
52 struct list_head waiting_queue; /* Requests to be sent */
53 wait_queue_head_t waiting_wq;
54
55 struct mutex tx_lock;
56 struct gendisk *disk;
57 int blksize;
58 loff_t bytesize;
59 pid_t pid; /* pid of nbd-client, if attached */
60 int xmit_timeout;
61 int disconnect; /* a disconnect has been requested by user */
62};
42 63
43#ifdef NDEBUG 64#define NBD_MAGIC 0x68797548
44#define dprintk(flags, fmt...)
45#else /* NDEBUG */
46#define dprintk(flags, fmt...) do { \
47 if (debugflags & (flags)) printk(KERN_DEBUG fmt); \
48} while (0)
49#define DBG_IOCTL 0x0004
50#define DBG_INIT 0x0010
51#define DBG_EXIT 0x0020
52#define DBG_BLKDEV 0x0100
53#define DBG_RX 0x0200
54#define DBG_TX 0x0400
55static unsigned int debugflags;
56#endif /* NDEBUG */
57 65
58static unsigned int nbds_max = 16; 66static unsigned int nbds_max = 16;
59static struct nbd_device *nbd_dev; 67static struct nbd_device *nbd_dev;
@@ -71,25 +79,9 @@ static int max_part;
71 */ 79 */
72static DEFINE_SPINLOCK(nbd_lock); 80static DEFINE_SPINLOCK(nbd_lock);
73 81
74#ifndef NDEBUG 82static inline struct device *nbd_to_dev(struct nbd_device *nbd)
75static const char *ioctl_cmd_to_ascii(int cmd)
76{ 83{
77 switch (cmd) { 84 return disk_to_dev(nbd->disk);
78 case NBD_SET_SOCK: return "set-sock";
79 case NBD_SET_BLKSIZE: return "set-blksize";
80 case NBD_SET_SIZE: return "set-size";
81 case NBD_SET_TIMEOUT: return "set-timeout";
82 case NBD_SET_FLAGS: return "set-flags";
83 case NBD_DO_IT: return "do-it";
84 case NBD_CLEAR_SOCK: return "clear-sock";
85 case NBD_CLEAR_QUE: return "clear-que";
86 case NBD_PRINT_DEBUG: return "print-debug";
87 case NBD_SET_SIZE_BLOCKS: return "set-size-blocks";
88 case NBD_DISCONNECT: return "disconnect";
89 case BLKROSET: return "set-read-only";
90 case BLKFLSBUF: return "flush-buffer-cache";
91 }
92 return "unknown";
93} 85}
94 86
95static const char *nbdcmd_to_ascii(int cmd) 87static const char *nbdcmd_to_ascii(int cmd)
@@ -103,30 +95,26 @@ static const char *nbdcmd_to_ascii(int cmd)
103 } 95 }
104 return "invalid"; 96 return "invalid";
105} 97}
106#endif /* NDEBUG */
107 98
108static void nbd_end_request(struct request *req) 99static void nbd_end_request(struct nbd_device *nbd, struct request *req)
109{ 100{
110 int error = req->errors ? -EIO : 0; 101 int error = req->errors ? -EIO : 0;
111 struct request_queue *q = req->q; 102 struct request_queue *q = req->q;
112 unsigned long flags; 103 unsigned long flags;
113 104
114 dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name, 105 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req,
115 req, error ? "failed" : "done"); 106 error ? "failed" : "done");
116 107
117 spin_lock_irqsave(q->queue_lock, flags); 108 spin_lock_irqsave(q->queue_lock, flags);
118 __blk_end_request_all(req, error); 109 __blk_end_request_all(req, error);
119 spin_unlock_irqrestore(q->queue_lock, flags); 110 spin_unlock_irqrestore(q->queue_lock, flags);
120} 111}
121 112
113/*
114 * Forcibly shutdown the socket causing all listeners to error
115 */
122static void sock_shutdown(struct nbd_device *nbd, int lock) 116static void sock_shutdown(struct nbd_device *nbd, int lock)
123{ 117{
124 /* Forcibly shutdown the socket causing all listeners
125 * to error
126 *
127 * FIXME: This code is duplicated from sys_shutdown, but
128 * there should be a more generic interface rather than
129 * calling socket ops directly here */
130 if (lock) 118 if (lock)
131 mutex_lock(&nbd->tx_lock); 119 mutex_lock(&nbd->tx_lock);
132 if (nbd->sock) { 120 if (nbd->sock) {
@@ -253,17 +241,15 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
253 } 241 }
254 memcpy(request.handle, &req, sizeof(req)); 242 memcpy(request.handle, &req, sizeof(req));
255 243
256 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n", 244 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
257 nbd->disk->disk_name, req, 245 req, nbdcmd_to_ascii(nbd_cmd(req)),
258 nbdcmd_to_ascii(nbd_cmd(req)), 246 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
259 (unsigned long long)blk_rq_pos(req) << 9,
260 blk_rq_bytes(req));
261 result = sock_xmit(nbd, 1, &request, sizeof(request), 247 result = sock_xmit(nbd, 1, &request, sizeof(request),
262 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); 248 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
263 if (result <= 0) { 249 if (result <= 0) {
264 dev_err(disk_to_dev(nbd->disk), 250 dev_err(disk_to_dev(nbd->disk),
265 "Send control failed (result %d)\n", result); 251 "Send control failed (result %d)\n", result);
266 goto error_out; 252 return -EIO;
267 } 253 }
268 254
269 if (nbd_cmd(req) == NBD_CMD_WRITE) { 255 if (nbd_cmd(req) == NBD_CMD_WRITE) {
@@ -277,21 +263,18 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
277 flags = 0; 263 flags = 0;
278 if (!rq_iter_last(bvec, iter)) 264 if (!rq_iter_last(bvec, iter))
279 flags = MSG_MORE; 265 flags = MSG_MORE;
280 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", 266 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
281 nbd->disk->disk_name, req, bvec.bv_len); 267 req, bvec.bv_len);
282 result = sock_send_bvec(nbd, &bvec, flags); 268 result = sock_send_bvec(nbd, &bvec, flags);
283 if (result <= 0) { 269 if (result <= 0) {
284 dev_err(disk_to_dev(nbd->disk), 270 dev_err(disk_to_dev(nbd->disk),
285 "Send data failed (result %d)\n", 271 "Send data failed (result %d)\n",
286 result); 272 result);
287 goto error_out; 273 return -EIO;
288 } 274 }
289 } 275 }
290 } 276 }
291 return 0; 277 return 0;
292
293error_out:
294 return -EIO;
295} 278}
296 279
297static struct request *nbd_find_request(struct nbd_device *nbd, 280static struct request *nbd_find_request(struct nbd_device *nbd,
@@ -302,7 +285,7 @@ static struct request *nbd_find_request(struct nbd_device *nbd,
302 285
303 err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq); 286 err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq);
304 if (unlikely(err)) 287 if (unlikely(err))
305 goto out; 288 return ERR_PTR(err);
306 289
307 spin_lock(&nbd->queue_lock); 290 spin_lock(&nbd->queue_lock);
308 list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) { 291 list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) {
@@ -314,10 +297,7 @@ static struct request *nbd_find_request(struct nbd_device *nbd,
314 } 297 }
315 spin_unlock(&nbd->queue_lock); 298 spin_unlock(&nbd->queue_lock);
316 299
317 err = -ENOENT; 300 return ERR_PTR(-ENOENT);
318
319out:
320 return ERR_PTR(err);
321} 301}
322 302
323static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec) 303static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
@@ -371,8 +351,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
371 return req; 351 return req;
372 } 352 }
373 353
374 dprintk(DBG_RX, "%s: request %p: got reply\n", 354 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
375 nbd->disk->disk_name, req);
376 if (nbd_cmd(req) == NBD_CMD_READ) { 355 if (nbd_cmd(req) == NBD_CMD_READ) {
377 struct req_iterator iter; 356 struct req_iterator iter;
378 struct bio_vec bvec; 357 struct bio_vec bvec;
@@ -385,8 +364,8 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
385 req->errors++; 364 req->errors++;
386 return req; 365 return req;
387 } 366 }
388 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", 367 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
389 nbd->disk->disk_name, req, bvec.bv_len); 368 req, bvec.bv_len);
390 } 369 }
391 } 370 }
392 return req; 371 return req;
@@ -426,7 +405,7 @@ static int nbd_do_it(struct nbd_device *nbd)
426 } 405 }
427 406
428 while ((req = nbd_read_stat(nbd)) != NULL) 407 while ((req = nbd_read_stat(nbd)) != NULL)
429 nbd_end_request(req); 408 nbd_end_request(nbd, req);
430 409
431 device_remove_file(disk_to_dev(nbd->disk), &pid_attr); 410 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
432 nbd->pid = 0; 411 nbd->pid = 0;
@@ -455,7 +434,7 @@ static void nbd_clear_que(struct nbd_device *nbd)
455 queuelist); 434 queuelist);
456 list_del_init(&req->queuelist); 435 list_del_init(&req->queuelist);
457 req->errors++; 436 req->errors++;
458 nbd_end_request(req); 437 nbd_end_request(nbd, req);
459 } 438 }
460 439
461 while (!list_empty(&nbd->waiting_queue)) { 440 while (!list_empty(&nbd->waiting_queue)) {
@@ -463,7 +442,7 @@ static void nbd_clear_que(struct nbd_device *nbd)
463 queuelist); 442 queuelist);
464 list_del_init(&req->queuelist); 443 list_del_init(&req->queuelist);
465 req->errors++; 444 req->errors++;
466 nbd_end_request(req); 445 nbd_end_request(nbd, req);
467 } 446 }
468} 447}
469 448
@@ -507,7 +486,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
507 if (nbd_send_req(nbd, req) != 0) { 486 if (nbd_send_req(nbd, req) != 0) {
508 dev_err(disk_to_dev(nbd->disk), "Request send failed\n"); 487 dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
509 req->errors++; 488 req->errors++;
510 nbd_end_request(req); 489 nbd_end_request(nbd, req);
511 } else { 490 } else {
512 spin_lock(&nbd->queue_lock); 491 spin_lock(&nbd->queue_lock);
513 list_add_tail(&req->queuelist, &nbd->queue_head); 492 list_add_tail(&req->queuelist, &nbd->queue_head);
@@ -522,7 +501,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
522 501
523error_out: 502error_out:
524 req->errors++; 503 req->errors++;
525 nbd_end_request(req); 504 nbd_end_request(nbd, req);
526} 505}
527 506
528static int nbd_thread(void *data) 507static int nbd_thread(void *data)
@@ -570,18 +549,18 @@ static void do_nbd_request(struct request_queue *q)
570 549
571 spin_unlock_irq(q->queue_lock); 550 spin_unlock_irq(q->queue_lock);
572 551
573 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
574 req->rq_disk->disk_name, req, req->cmd_type);
575
576 nbd = req->rq_disk->private_data; 552 nbd = req->rq_disk->private_data;
577 553
578 BUG_ON(nbd->magic != NBD_MAGIC); 554 BUG_ON(nbd->magic != NBD_MAGIC);
579 555
556 dev_dbg(nbd_to_dev(nbd), "request %p: dequeued (flags=%x)\n",
557 req, req->cmd_type);
558
580 if (unlikely(!nbd->sock)) { 559 if (unlikely(!nbd->sock)) {
581 dev_err(disk_to_dev(nbd->disk), 560 dev_err(disk_to_dev(nbd->disk),
582 "Attempted send on closed socket\n"); 561 "Attempted send on closed socket\n");
583 req->errors++; 562 req->errors++;
584 nbd_end_request(req); 563 nbd_end_request(nbd, req);
585 spin_lock_irq(q->queue_lock); 564 spin_lock_irq(q->queue_lock);
586 continue; 565 continue;
587 } 566 }
@@ -706,13 +685,13 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
706 else 685 else
707 blk_queue_flush(nbd->disk->queue, 0); 686 blk_queue_flush(nbd->disk->queue, 0);
708 687
709 thread = kthread_create(nbd_thread, nbd, "%s", 688 thread = kthread_run(nbd_thread, nbd, "%s",
710 nbd->disk->disk_name); 689 nbd->disk->disk_name);
711 if (IS_ERR(thread)) { 690 if (IS_ERR(thread)) {
712 mutex_lock(&nbd->tx_lock); 691 mutex_lock(&nbd->tx_lock);
713 return PTR_ERR(thread); 692 return PTR_ERR(thread);
714 } 693 }
715 wake_up_process(thread); 694
716 error = nbd_do_it(nbd); 695 error = nbd_do_it(nbd);
717 kthread_stop(thread); 696 kthread_stop(thread);
718 697
@@ -768,10 +747,6 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
768 747
769 BUG_ON(nbd->magic != NBD_MAGIC); 748 BUG_ON(nbd->magic != NBD_MAGIC);
770 749
771 /* Anyone capable of this syscall can do *real bad* things */
772 dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
773 nbd->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
774
775 mutex_lock(&nbd->tx_lock); 750 mutex_lock(&nbd->tx_lock);
776 error = __nbd_ioctl(bdev, nbd, cmd, arg); 751 error = __nbd_ioctl(bdev, nbd, cmd, arg);
777 mutex_unlock(&nbd->tx_lock); 752 mutex_unlock(&nbd->tx_lock);
@@ -861,7 +836,6 @@ static int __init nbd_init(void)
861 } 836 }
862 837
863 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR); 838 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
864 dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags);
865 839
866 for (i = 0; i < nbds_max; i++) { 840 for (i = 0; i < nbds_max; i++) {
867 struct gendisk *disk = nbd_dev[i].disk; 841 struct gendisk *disk = nbd_dev[i].disk;
@@ -920,7 +894,3 @@ module_param(nbds_max, int, 0444);
920MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); 894MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
921module_param(max_part, int, 0444); 895module_param(max_part, int, 0444);
922MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)"); 896MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");
923#ifndef NDEBUG
924module_param(debugflags, int, 0644);
925MODULE_PARM_DESC(debugflags, "flags for controlling debug output");
926#endif
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index e23be20a3417..85b8036deaa3 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -44,7 +44,7 @@
44 44
45#define NVME_MINORS (1U << MINORBITS) 45#define NVME_MINORS (1U << MINORBITS)
46#define NVME_Q_DEPTH 1024 46#define NVME_Q_DEPTH 1024
47#define NVME_AQ_DEPTH 64 47#define NVME_AQ_DEPTH 256
48#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 48#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
49#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 49#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
50#define ADMIN_TIMEOUT (admin_timeout * HZ) 50#define ADMIN_TIMEOUT (admin_timeout * HZ)
@@ -152,6 +152,7 @@ struct nvme_cmd_info {
152 */ 152 */
153#define NVME_INT_PAGES 2 153#define NVME_INT_PAGES 2
154#define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->page_size) 154#define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->page_size)
155#define NVME_INT_MASK 0x01
155 156
156/* 157/*
157 * Will slightly overestimate the number of pages needed. This is OK 158 * Will slightly overestimate the number of pages needed. This is OK
@@ -257,7 +258,7 @@ static void *iod_get_private(struct nvme_iod *iod)
257 */ 258 */
258static bool iod_should_kfree(struct nvme_iod *iod) 259static bool iod_should_kfree(struct nvme_iod *iod)
259{ 260{
260 return (iod->private & 0x01) == 0; 261 return (iod->private & NVME_INT_MASK) == 0;
261} 262}
262 263
263/* Special values must be less than 0x1000 */ 264/* Special values must be less than 0x1000 */
@@ -301,8 +302,6 @@ static void *cancel_cmd_info(struct nvme_cmd_info *cmd, nvme_completion_fn *fn)
301static void async_req_completion(struct nvme_queue *nvmeq, void *ctx, 302static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
302 struct nvme_completion *cqe) 303 struct nvme_completion *cqe)
303{ 304{
304 struct request *req = ctx;
305
306 u32 result = le32_to_cpup(&cqe->result); 305 u32 result = le32_to_cpup(&cqe->result);
307 u16 status = le16_to_cpup(&cqe->status) >> 1; 306 u16 status = le16_to_cpup(&cqe->status) >> 1;
308 307
@@ -311,8 +310,6 @@ static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
311 if (status == NVME_SC_SUCCESS) 310 if (status == NVME_SC_SUCCESS)
312 dev_warn(nvmeq->q_dmadev, 311 dev_warn(nvmeq->q_dmadev,
313 "async event result %08x\n", result); 312 "async event result %08x\n", result);
314
315 blk_mq_free_hctx_request(nvmeq->hctx, req);
316} 313}
317 314
318static void abort_completion(struct nvme_queue *nvmeq, void *ctx, 315static void abort_completion(struct nvme_queue *nvmeq, void *ctx,
@@ -432,7 +429,6 @@ static struct nvme_iod *nvme_alloc_iod(struct request *rq, struct nvme_dev *dev,
432{ 429{
433 unsigned size = !(rq->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(rq) : 430 unsigned size = !(rq->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(rq) :
434 sizeof(struct nvme_dsm_range); 431 sizeof(struct nvme_dsm_range);
435 unsigned long mask = 0;
436 struct nvme_iod *iod; 432 struct nvme_iod *iod;
437 433
438 if (rq->nr_phys_segments <= NVME_INT_PAGES && 434 if (rq->nr_phys_segments <= NVME_INT_PAGES &&
@@ -440,9 +436,8 @@ static struct nvme_iod *nvme_alloc_iod(struct request *rq, struct nvme_dev *dev,
440 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(rq); 436 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(rq);
441 437
442 iod = cmd->iod; 438 iod = cmd->iod;
443 mask = 0x01;
444 iod_init(iod, size, rq->nr_phys_segments, 439 iod_init(iod, size, rq->nr_phys_segments,
445 (unsigned long) rq | 0x01); 440 (unsigned long) rq | NVME_INT_MASK);
446 return iod; 441 return iod;
447 } 442 }
448 443
@@ -522,8 +517,6 @@ static void nvme_dif_remap(struct request *req,
522 return; 517 return;
523 518
524 pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset; 519 pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
525 if (!pmap)
526 return;
527 520
528 p = pmap; 521 p = pmap;
529 virt = bip_get_seed(bip); 522 virt = bip_get_seed(bip);
@@ -645,12 +638,12 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
645 struct scatterlist *sg = iod->sg; 638 struct scatterlist *sg = iod->sg;
646 int dma_len = sg_dma_len(sg); 639 int dma_len = sg_dma_len(sg);
647 u64 dma_addr = sg_dma_address(sg); 640 u64 dma_addr = sg_dma_address(sg);
648 int offset = offset_in_page(dma_addr); 641 u32 page_size = dev->page_size;
642 int offset = dma_addr & (page_size - 1);
649 __le64 *prp_list; 643 __le64 *prp_list;
650 __le64 **list = iod_list(iod); 644 __le64 **list = iod_list(iod);
651 dma_addr_t prp_dma; 645 dma_addr_t prp_dma;
652 int nprps, i; 646 int nprps, i;
653 u32 page_size = dev->page_size;
654 647
655 length -= (page_size - offset); 648 length -= (page_size - offset);
656 if (length <= 0) 649 if (length <= 0)
@@ -1028,18 +1021,19 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
1028 struct nvme_cmd_info *cmd_info; 1021 struct nvme_cmd_info *cmd_info;
1029 struct request *req; 1022 struct request *req;
1030 1023
1031 req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_ATOMIC, false); 1024 req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_ATOMIC, true);
1032 if (IS_ERR(req)) 1025 if (IS_ERR(req))
1033 return PTR_ERR(req); 1026 return PTR_ERR(req);
1034 1027
1035 req->cmd_flags |= REQ_NO_TIMEOUT; 1028 req->cmd_flags |= REQ_NO_TIMEOUT;
1036 cmd_info = blk_mq_rq_to_pdu(req); 1029 cmd_info = blk_mq_rq_to_pdu(req);
1037 nvme_set_info(cmd_info, req, async_req_completion); 1030 nvme_set_info(cmd_info, NULL, async_req_completion);
1038 1031
1039 memset(&c, 0, sizeof(c)); 1032 memset(&c, 0, sizeof(c));
1040 c.common.opcode = nvme_admin_async_event; 1033 c.common.opcode = nvme_admin_async_event;
1041 c.common.command_id = req->tag; 1034 c.common.command_id = req->tag;
1042 1035
1036 blk_mq_free_hctx_request(nvmeq->hctx, req);
1043 return __nvme_submit_cmd(nvmeq, &c); 1037 return __nvme_submit_cmd(nvmeq, &c);
1044} 1038}
1045 1039
@@ -1347,6 +1341,9 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1347 nvmeq->cq_vector = -1; 1341 nvmeq->cq_vector = -1;
1348 spin_unlock_irq(&nvmeq->q_lock); 1342 spin_unlock_irq(&nvmeq->q_lock);
1349 1343
1344 if (!nvmeq->qid && nvmeq->dev->admin_q)
1345 blk_mq_freeze_queue_start(nvmeq->dev->admin_q);
1346
1350 irq_set_affinity_hint(vector, NULL); 1347 irq_set_affinity_hint(vector, NULL);
1351 free_irq(vector, nvmeq); 1348 free_irq(vector, nvmeq);
1352 1349
@@ -1378,8 +1375,6 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
1378 adapter_delete_sq(dev, qid); 1375 adapter_delete_sq(dev, qid);
1379 adapter_delete_cq(dev, qid); 1376 adapter_delete_cq(dev, qid);
1380 } 1377 }
1381 if (!qid && dev->admin_q)
1382 blk_mq_freeze_queue_start(dev->admin_q);
1383 1378
1384 spin_lock_irq(&nvmeq->q_lock); 1379 spin_lock_irq(&nvmeq->q_lock);
1385 nvme_process_cq(nvmeq); 1380 nvme_process_cq(nvmeq);
@@ -1583,6 +1578,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
1583 dev->admin_tagset.ops = &nvme_mq_admin_ops; 1578 dev->admin_tagset.ops = &nvme_mq_admin_ops;
1584 dev->admin_tagset.nr_hw_queues = 1; 1579 dev->admin_tagset.nr_hw_queues = 1;
1585 dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1; 1580 dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1;
1581 dev->admin_tagset.reserved_tags = 1;
1586 dev->admin_tagset.timeout = ADMIN_TIMEOUT; 1582 dev->admin_tagset.timeout = ADMIN_TIMEOUT;
1587 dev->admin_tagset.numa_node = dev_to_node(&dev->pci_dev->dev); 1583 dev->admin_tagset.numa_node = dev_to_node(&dev->pci_dev->dev);
1588 dev->admin_tagset.cmd_size = nvme_cmd_size(dev); 1584 dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
@@ -1749,25 +1745,31 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1749 struct nvme_dev *dev = ns->dev; 1745 struct nvme_dev *dev = ns->dev;
1750 struct nvme_user_io io; 1746 struct nvme_user_io io;
1751 struct nvme_command c; 1747 struct nvme_command c;
1752 unsigned length, meta_len; 1748 unsigned length, meta_len, prp_len;
1753 int status, i; 1749 int status, write;
1754 struct nvme_iod *iod, *meta_iod = NULL; 1750 struct nvme_iod *iod;
1755 dma_addr_t meta_dma_addr; 1751 dma_addr_t meta_dma = 0;
1756 void *meta, *uninitialized_var(meta_mem); 1752 void *meta = NULL;
1757 1753
1758 if (copy_from_user(&io, uio, sizeof(io))) 1754 if (copy_from_user(&io, uio, sizeof(io)))
1759 return -EFAULT; 1755 return -EFAULT;
1760 length = (io.nblocks + 1) << ns->lba_shift; 1756 length = (io.nblocks + 1) << ns->lba_shift;
1761 meta_len = (io.nblocks + 1) * ns->ms; 1757 meta_len = (io.nblocks + 1) * ns->ms;
1762 1758
1763 if (meta_len && ((io.metadata & 3) || !io.metadata)) 1759 if (meta_len && ((io.metadata & 3) || !io.metadata) && !ns->ext)
1764 return -EINVAL; 1760 return -EINVAL;
1761 else if (meta_len && ns->ext) {
1762 length += meta_len;
1763 meta_len = 0;
1764 }
1765
1766 write = io.opcode & 1;
1765 1767
1766 switch (io.opcode) { 1768 switch (io.opcode) {
1767 case nvme_cmd_write: 1769 case nvme_cmd_write:
1768 case nvme_cmd_read: 1770 case nvme_cmd_read:
1769 case nvme_cmd_compare: 1771 case nvme_cmd_compare:
1770 iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length); 1772 iod = nvme_map_user_pages(dev, write, io.addr, length);
1771 break; 1773 break;
1772 default: 1774 default:
1773 return -EINVAL; 1775 return -EINVAL;
@@ -1776,6 +1778,27 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1776 if (IS_ERR(iod)) 1778 if (IS_ERR(iod))
1777 return PTR_ERR(iod); 1779 return PTR_ERR(iod);
1778 1780
1781 prp_len = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
1782 if (length != prp_len) {
1783 status = -ENOMEM;
1784 goto unmap;
1785 }
1786 if (meta_len) {
1787 meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
1788 &meta_dma, GFP_KERNEL);
1789 if (!meta) {
1790 status = -ENOMEM;
1791 goto unmap;
1792 }
1793 if (write) {
1794 if (copy_from_user(meta, (void __user *)io.metadata,
1795 meta_len)) {
1796 status = -EFAULT;
1797 goto unmap;
1798 }
1799 }
1800 }
1801
1779 memset(&c, 0, sizeof(c)); 1802 memset(&c, 0, sizeof(c));
1780 c.rw.opcode = io.opcode; 1803 c.rw.opcode = io.opcode;
1781 c.rw.flags = io.flags; 1804 c.rw.flags = io.flags;
@@ -1787,75 +1810,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1787 c.rw.reftag = cpu_to_le32(io.reftag); 1810 c.rw.reftag = cpu_to_le32(io.reftag);
1788 c.rw.apptag = cpu_to_le16(io.apptag); 1811 c.rw.apptag = cpu_to_le16(io.apptag);
1789 c.rw.appmask = cpu_to_le16(io.appmask); 1812 c.rw.appmask = cpu_to_le16(io.appmask);
1790
1791 if (meta_len) {
1792 meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata,
1793 meta_len);
1794 if (IS_ERR(meta_iod)) {
1795 status = PTR_ERR(meta_iod);
1796 meta_iod = NULL;
1797 goto unmap;
1798 }
1799
1800 meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
1801 &meta_dma_addr, GFP_KERNEL);
1802 if (!meta_mem) {
1803 status = -ENOMEM;
1804 goto unmap;
1805 }
1806
1807 if (io.opcode & 1) {
1808 int meta_offset = 0;
1809
1810 for (i = 0; i < meta_iod->nents; i++) {
1811 meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
1812 meta_iod->sg[i].offset;
1813 memcpy(meta_mem + meta_offset, meta,
1814 meta_iod->sg[i].length);
1815 kunmap_atomic(meta);
1816 meta_offset += meta_iod->sg[i].length;
1817 }
1818 }
1819
1820 c.rw.metadata = cpu_to_le64(meta_dma_addr);
1821 }
1822
1823 length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
1824 c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 1813 c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
1825 c.rw.prp2 = cpu_to_le64(iod->first_dma); 1814 c.rw.prp2 = cpu_to_le64(iod->first_dma);
1826 1815 c.rw.metadata = cpu_to_le64(meta_dma);
1827 if (length != (io.nblocks + 1) << ns->lba_shift) 1816 status = nvme_submit_io_cmd(dev, ns, &c, NULL);
1828 status = -ENOMEM;
1829 else
1830 status = nvme_submit_io_cmd(dev, ns, &c, NULL);
1831
1832 if (meta_len) {
1833 if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
1834 int meta_offset = 0;
1835
1836 for (i = 0; i < meta_iod->nents; i++) {
1837 meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
1838 meta_iod->sg[i].offset;
1839 memcpy(meta, meta_mem + meta_offset,
1840 meta_iod->sg[i].length);
1841 kunmap_atomic(meta);
1842 meta_offset += meta_iod->sg[i].length;
1843 }
1844 }
1845
1846 dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem,
1847 meta_dma_addr);
1848 }
1849
1850 unmap: 1817 unmap:
1851 nvme_unmap_user_pages(dev, io.opcode & 1, iod); 1818 nvme_unmap_user_pages(dev, write, iod);
1852 nvme_free_iod(dev, iod); 1819 nvme_free_iod(dev, iod);
1853 1820 if (meta) {
1854 if (meta_iod) { 1821 if (status == NVME_SC_SUCCESS && !write) {
1855 nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod); 1822 if (copy_to_user((void __user *)io.metadata, meta,
1856 nvme_free_iod(dev, meta_iod); 1823 meta_len))
1824 status = -EFAULT;
1825 }
1826 dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma);
1857 } 1827 }
1858
1859 return status; 1828 return status;
1860} 1829}
1861 1830
@@ -2018,7 +1987,8 @@ static int nvme_revalidate_disk(struct gendisk *disk)
2018 struct nvme_dev *dev = ns->dev; 1987 struct nvme_dev *dev = ns->dev;
2019 struct nvme_id_ns *id; 1988 struct nvme_id_ns *id;
2020 dma_addr_t dma_addr; 1989 dma_addr_t dma_addr;
2021 int lbaf, pi_type, old_ms; 1990 u8 lbaf, pi_type;
1991 u16 old_ms;
2022 unsigned short bs; 1992 unsigned short bs;
2023 1993
2024 id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr, 1994 id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
@@ -2039,6 +2009,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
2039 lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK; 2009 lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
2040 ns->lba_shift = id->lbaf[lbaf].ds; 2010 ns->lba_shift = id->lbaf[lbaf].ds;
2041 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); 2011 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
2012 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
2042 2013
2043 /* 2014 /*
2044 * If identify namespace failed, use default 512 byte block size so 2015 * If identify namespace failed, use default 512 byte block size so
@@ -2055,14 +2026,14 @@ static int nvme_revalidate_disk(struct gendisk *disk)
2055 if (blk_get_integrity(disk) && (ns->pi_type != pi_type || 2026 if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
2056 ns->ms != old_ms || 2027 ns->ms != old_ms ||
2057 bs != queue_logical_block_size(disk->queue) || 2028 bs != queue_logical_block_size(disk->queue) ||
2058 (ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT))) 2029 (ns->ms && ns->ext)))
2059 blk_integrity_unregister(disk); 2030 blk_integrity_unregister(disk);
2060 2031
2061 ns->pi_type = pi_type; 2032 ns->pi_type = pi_type;
2062 blk_queue_logical_block_size(ns->queue, bs); 2033 blk_queue_logical_block_size(ns->queue, bs);
2063 2034
2064 if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) && 2035 if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) &&
2065 !(id->flbas & NVME_NS_FLBAS_META_EXT)) 2036 !ns->ext)
2066 nvme_init_integrity(ns); 2037 nvme_init_integrity(ns);
2067 2038
2068 if (id->ncap == 0 || (ns->ms && !blk_get_integrity(disk))) 2039 if (id->ncap == 0 || (ns->ms && !blk_get_integrity(disk)))
@@ -2334,7 +2305,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
2334 dev->oncs = le16_to_cpup(&ctrl->oncs); 2305 dev->oncs = le16_to_cpup(&ctrl->oncs);
2335 dev->abort_limit = ctrl->acl + 1; 2306 dev->abort_limit = ctrl->acl + 1;
2336 dev->vwc = ctrl->vwc; 2307 dev->vwc = ctrl->vwc;
2337 dev->event_limit = min(ctrl->aerl + 1, 8);
2338 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); 2308 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
2339 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); 2309 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
2340 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); 2310 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
@@ -2881,6 +2851,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
2881 2851
2882 nvme_set_irq_hints(dev); 2852 nvme_set_irq_hints(dev);
2883 2853
2854 dev->event_limit = 1;
2884 return result; 2855 return result;
2885 2856
2886 free_tags: 2857 free_tags:
@@ -3166,8 +3137,10 @@ static int __init nvme_init(void)
3166 nvme_char_major = result; 3137 nvme_char_major = result;
3167 3138
3168 nvme_class = class_create(THIS_MODULE, "nvme"); 3139 nvme_class = class_create(THIS_MODULE, "nvme");
3169 if (!nvme_class) 3140 if (IS_ERR(nvme_class)) {
3141 result = PTR_ERR(nvme_class);
3170 goto unregister_chrdev; 3142 goto unregister_chrdev;
3143 }
3171 3144
3172 result = pci_register_driver(&nvme_driver); 3145 result = pci_register_driver(&nvme_driver);
3173 if (result) 3146 if (result)
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index e10196e0182d..6b736b00f63e 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -55,6 +55,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */
55#define VPD_SERIAL_NUMBER 0x80 55#define VPD_SERIAL_NUMBER 0x80
56#define VPD_DEVICE_IDENTIFIERS 0x83 56#define VPD_DEVICE_IDENTIFIERS 0x83
57#define VPD_EXTENDED_INQUIRY 0x86 57#define VPD_EXTENDED_INQUIRY 0x86
58#define VPD_BLOCK_LIMITS 0xB0
58#define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1 59#define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1
59 60
60/* CDB offsets */ 61/* CDB offsets */
@@ -132,9 +133,10 @@ static int sg_version_num = 30534; /* 2 digits for each component */
132#define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80 133#define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80
133#define INQ_DEVICE_IDENTIFICATION_PAGE 0x83 134#define INQ_DEVICE_IDENTIFICATION_PAGE 0x83
134#define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86 135#define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86
136#define INQ_BDEV_LIMITS_PAGE 0xB0
135#define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1 137#define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1
136#define INQ_SERIAL_NUMBER_LENGTH 0x14 138#define INQ_SERIAL_NUMBER_LENGTH 0x14
137#define INQ_NUM_SUPPORTED_VPD_PAGES 5 139#define INQ_NUM_SUPPORTED_VPD_PAGES 6
138#define VERSION_SPC_4 0x06 140#define VERSION_SPC_4 0x06
139#define ACA_UNSUPPORTED 0 141#define ACA_UNSUPPORTED 0
140#define STANDARD_INQUIRY_LENGTH 36 142#define STANDARD_INQUIRY_LENGTH 36
@@ -747,6 +749,7 @@ static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns,
747 inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE; 749 inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE;
748 inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE; 750 inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE;
749 inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE; 751 inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE;
752 inq_response[9] = INQ_BDEV_LIMITS_PAGE;
750 753
751 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); 754 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
752 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 755 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
@@ -938,6 +941,25 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
938 return res; 941 return res;
939} 942}
940 943
944static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
945 u8 *inq_response, int alloc_len)
946{
947 __be32 max_sectors = cpu_to_be32(queue_max_hw_sectors(ns->queue));
948 __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors);
949 __be32 discard_desc_count = cpu_to_be32(0x100);
950
951 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
952 inq_response[1] = VPD_BLOCK_LIMITS;
953 inq_response[3] = 0x3c; /* Page Length */
954 memcpy(&inq_response[8], &max_sectors, sizeof(u32));
955 memcpy(&inq_response[20], &max_discard, sizeof(u32));
956
957 if (max_discard)
958 memcpy(&inq_response[24], &discard_desc_count, sizeof(u32));
959
960 return nvme_trans_copy_to_user(hdr, inq_response, 0x3c);
961}
962
941static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, 963static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
942 int alloc_len) 964 int alloc_len)
943{ 965{
@@ -2268,6 +2290,10 @@ static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2268 case VPD_EXTENDED_INQUIRY: 2290 case VPD_EXTENDED_INQUIRY:
2269 res = nvme_trans_ext_inq_page(ns, hdr, alloc_len); 2291 res = nvme_trans_ext_inq_page(ns, hdr, alloc_len);
2270 break; 2292 break;
2293 case VPD_BLOCK_LIMITS:
2294 res = nvme_trans_bdev_limits_page(ns, hdr, inq_response,
2295 alloc_len);
2296 break;
2271 case VPD_BLOCK_DEV_CHARACTERISTICS: 2297 case VPD_BLOCK_DEV_CHARACTERISTICS:
2272 res = nvme_trans_bdev_char_page(ns, hdr, alloc_len); 2298 res = nvme_trans_bdev_char_page(ns, hdr, alloc_len);
2273 break; 2299 break;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 2a04d341e598..bd2b3bbbb22c 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -34,6 +34,8 @@
34 * IN THE SOFTWARE. 34 * IN THE SOFTWARE.
35 */ 35 */
36 36
37#define pr_fmt(fmt) "xen-blkback: " fmt
38
37#include <linux/spinlock.h> 39#include <linux/spinlock.h>
38#include <linux/kthread.h> 40#include <linux/kthread.h>
39#include <linux/list.h> 41#include <linux/list.h>
@@ -211,7 +213,7 @@ static int add_persistent_gnt(struct xen_blkif *blkif,
211 else if (persistent_gnt->gnt > this->gnt) 213 else if (persistent_gnt->gnt > this->gnt)
212 new = &((*new)->rb_right); 214 new = &((*new)->rb_right);
213 else { 215 else {
214 pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n"); 216 pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
215 return -EINVAL; 217 return -EINVAL;
216 } 218 }
217 } 219 }
@@ -242,7 +244,7 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
242 node = node->rb_right; 244 node = node->rb_right;
243 else { 245 else {
244 if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { 246 if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
245 pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n"); 247 pr_alert_ratelimited("requesting a grant already in use\n");
246 return NULL; 248 return NULL;
247 } 249 }
248 set_bit(PERSISTENT_GNT_ACTIVE, data->flags); 250 set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
@@ -257,7 +259,7 @@ static void put_persistent_gnt(struct xen_blkif *blkif,
257 struct persistent_gnt *persistent_gnt) 259 struct persistent_gnt *persistent_gnt)
258{ 260{
259 if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) 261 if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
260 pr_alert_ratelimited(DRV_PFX " freeing a grant already unused"); 262 pr_alert_ratelimited("freeing a grant already unused\n");
261 set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); 263 set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
262 clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); 264 clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
263 atomic_dec(&blkif->persistent_gnt_in_use); 265 atomic_dec(&blkif->persistent_gnt_in_use);
@@ -374,7 +376,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
374 } 376 }
375 377
376 if (work_pending(&blkif->persistent_purge_work)) { 378 if (work_pending(&blkif->persistent_purge_work)) {
377 pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n"); 379 pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n");
378 return; 380 return;
379 } 381 }
380 382
@@ -396,7 +398,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
396 398
397 total = num_clean; 399 total = num_clean;
398 400
399 pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); 401 pr_debug("Going to purge %u persistent grants\n", num_clean);
400 402
401 BUG_ON(!list_empty(&blkif->persistent_purge_list)); 403 BUG_ON(!list_empty(&blkif->persistent_purge_list));
402 root = &blkif->persistent_gnts; 404 root = &blkif->persistent_gnts;
@@ -428,13 +430,13 @@ purge_list:
428 * with the requested num 430 * with the requested num
429 */ 431 */
430 if (!scan_used && !clean_used) { 432 if (!scan_used && !clean_used) {
431 pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean); 433 pr_debug("Still missing %u purged frames\n", num_clean);
432 scan_used = true; 434 scan_used = true;
433 goto purge_list; 435 goto purge_list;
434 } 436 }
435finished: 437finished:
436 if (!clean_used) { 438 if (!clean_used) {
437 pr_debug(DRV_PFX "Finished scanning for grants to clean, removing used flag\n"); 439 pr_debug("Finished scanning for grants to clean, removing used flag\n");
438 clean_used = true; 440 clean_used = true;
439 goto purge_list; 441 goto purge_list;
440 } 442 }
@@ -444,7 +446,7 @@ finished:
444 446
445 /* We can defer this work */ 447 /* We can defer this work */
446 schedule_work(&blkif->persistent_purge_work); 448 schedule_work(&blkif->persistent_purge_work);
447 pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total); 449 pr_debug("Purged %u/%u\n", (total - num_clean), total);
448 return; 450 return;
449} 451}
450 452
@@ -520,20 +522,20 @@ static void xen_vbd_resize(struct xen_blkif *blkif)
520 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be); 522 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
521 unsigned long long new_size = vbd_sz(vbd); 523 unsigned long long new_size = vbd_sz(vbd);
522 524
523 pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n", 525 pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
524 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice)); 526 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
525 pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size); 527 pr_info("VBD Resize: new size %llu\n", new_size);
526 vbd->size = new_size; 528 vbd->size = new_size;
527again: 529again:
528 err = xenbus_transaction_start(&xbt); 530 err = xenbus_transaction_start(&xbt);
529 if (err) { 531 if (err) {
530 pr_warn(DRV_PFX "Error starting transaction"); 532 pr_warn("Error starting transaction\n");
531 return; 533 return;
532 } 534 }
533 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", 535 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
534 (unsigned long long)vbd_sz(vbd)); 536 (unsigned long long)vbd_sz(vbd));
535 if (err) { 537 if (err) {
536 pr_warn(DRV_PFX "Error writing new size"); 538 pr_warn("Error writing new size\n");
537 goto abort; 539 goto abort;
538 } 540 }
539 /* 541 /*
@@ -543,7 +545,7 @@ again:
543 */ 545 */
544 err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state); 546 err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
545 if (err) { 547 if (err) {
546 pr_warn(DRV_PFX "Error writing the state"); 548 pr_warn("Error writing the state\n");
547 goto abort; 549 goto abort;
548 } 550 }
549 551
@@ -551,7 +553,7 @@ again:
551 if (err == -EAGAIN) 553 if (err == -EAGAIN)
552 goto again; 554 goto again;
553 if (err) 555 if (err)
554 pr_warn(DRV_PFX "Error ending transaction"); 556 pr_warn("Error ending transaction\n");
555 return; 557 return;
556abort: 558abort:
557 xenbus_transaction_end(xbt, 1); 559 xenbus_transaction_end(xbt, 1);
@@ -578,7 +580,7 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
578 580
579static void print_stats(struct xen_blkif *blkif) 581static void print_stats(struct xen_blkif *blkif)
580{ 582{
581 pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" 583 pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
582 " | ds %4llu | pg: %4u/%4d\n", 584 " | ds %4llu | pg: %4u/%4d\n",
583 current->comm, blkif->st_oo_req, 585 current->comm, blkif->st_oo_req,
584 blkif->st_rd_req, blkif->st_wr_req, 586 blkif->st_rd_req, blkif->st_wr_req,
@@ -855,7 +857,7 @@ again:
855 /* This is a newly mapped grant */ 857 /* This is a newly mapped grant */
856 BUG_ON(new_map_idx >= segs_to_map); 858 BUG_ON(new_map_idx >= segs_to_map);
857 if (unlikely(map[new_map_idx].status != 0)) { 859 if (unlikely(map[new_map_idx].status != 0)) {
858 pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); 860 pr_debug("invalid buffer -- could not remap it\n");
859 put_free_pages(blkif, &pages[seg_idx]->page, 1); 861 put_free_pages(blkif, &pages[seg_idx]->page, 1);
860 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; 862 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
861 ret |= 1; 863 ret |= 1;
@@ -891,14 +893,14 @@ again:
891 goto next; 893 goto next;
892 } 894 }
893 pages[seg_idx]->persistent_gnt = persistent_gnt; 895 pages[seg_idx]->persistent_gnt = persistent_gnt;
894 pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", 896 pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
895 persistent_gnt->gnt, blkif->persistent_gnt_c, 897 persistent_gnt->gnt, blkif->persistent_gnt_c,
896 xen_blkif_max_pgrants); 898 xen_blkif_max_pgrants);
897 goto next; 899 goto next;
898 } 900 }
899 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { 901 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
900 blkif->vbd.overflow_max_grants = 1; 902 blkif->vbd.overflow_max_grants = 1;
901 pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n", 903 pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
902 blkif->domid, blkif->vbd.handle); 904 blkif->domid, blkif->vbd.handle);
903 } 905 }
904 /* 906 /*
@@ -916,7 +918,7 @@ next:
916 return ret; 918 return ret;
917 919
918out_of_memory: 920out_of_memory:
919 pr_alert(DRV_PFX "%s: out of memory\n", __func__); 921 pr_alert("%s: out of memory\n", __func__);
920 put_free_pages(blkif, pages_to_gnt, segs_to_map); 922 put_free_pages(blkif, pages_to_gnt, segs_to_map);
921 return -ENOMEM; 923 return -ENOMEM;
922} 924}
@@ -996,7 +998,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
996 998
997 err = xen_vbd_translate(&preq, blkif, WRITE); 999 err = xen_vbd_translate(&preq, blkif, WRITE);
998 if (err) { 1000 if (err) {
999 pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n", 1001 pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
1000 preq.sector_number, 1002 preq.sector_number,
1001 preq.sector_number + preq.nr_sects, blkif->vbd.pdevice); 1003 preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
1002 goto fail_response; 1004 goto fail_response;
@@ -1012,7 +1014,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
1012 GFP_KERNEL, secure); 1014 GFP_KERNEL, secure);
1013fail_response: 1015fail_response:
1014 if (err == -EOPNOTSUPP) { 1016 if (err == -EOPNOTSUPP) {
1015 pr_debug(DRV_PFX "discard op failed, not supported\n"); 1017 pr_debug("discard op failed, not supported\n");
1016 status = BLKIF_RSP_EOPNOTSUPP; 1018 status = BLKIF_RSP_EOPNOTSUPP;
1017 } else if (err) 1019 } else if (err)
1018 status = BLKIF_RSP_ERROR; 1020 status = BLKIF_RSP_ERROR;
@@ -1056,16 +1058,16 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
1056 /* An error fails the entire request. */ 1058 /* An error fails the entire request. */
1057 if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) && 1059 if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
1058 (error == -EOPNOTSUPP)) { 1060 (error == -EOPNOTSUPP)) {
1059 pr_debug(DRV_PFX "flush diskcache op failed, not supported\n"); 1061 pr_debug("flush diskcache op failed, not supported\n");
1060 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0); 1062 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
1061 pending_req->status = BLKIF_RSP_EOPNOTSUPP; 1063 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1062 } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && 1064 } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
1063 (error == -EOPNOTSUPP)) { 1065 (error == -EOPNOTSUPP)) {
1064 pr_debug(DRV_PFX "write barrier op failed, not supported\n"); 1066 pr_debug("write barrier op failed, not supported\n");
1065 xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0); 1067 xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
1066 pending_req->status = BLKIF_RSP_EOPNOTSUPP; 1068 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1067 } else if (error) { 1069 } else if (error) {
1068 pr_debug(DRV_PFX "Buffer not up-to-date at end of operation," 1070 pr_debug("Buffer not up-to-date at end of operation,"
1069 " error=%d\n", error); 1071 " error=%d\n", error);
1070 pending_req->status = BLKIF_RSP_ERROR; 1072 pending_req->status = BLKIF_RSP_ERROR;
1071 } 1073 }
@@ -1110,7 +1112,7 @@ __do_block_io_op(struct xen_blkif *blkif)
1110 1112
1111 if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) { 1113 if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1112 rc = blk_rings->common.rsp_prod_pvt; 1114 rc = blk_rings->common.rsp_prod_pvt;
1113 pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n", 1115 pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1114 rp, rc, rp - rc, blkif->vbd.pdevice); 1116 rp, rc, rp - rc, blkif->vbd.pdevice);
1115 return -EACCES; 1117 return -EACCES;
1116 } 1118 }
@@ -1217,8 +1219,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1217 if ((req->operation == BLKIF_OP_INDIRECT) && 1219 if ((req->operation == BLKIF_OP_INDIRECT) &&
1218 (req_operation != BLKIF_OP_READ) && 1220 (req_operation != BLKIF_OP_READ) &&
1219 (req_operation != BLKIF_OP_WRITE)) { 1221 (req_operation != BLKIF_OP_WRITE)) {
1220 pr_debug(DRV_PFX "Invalid indirect operation (%u)\n", 1222 pr_debug("Invalid indirect operation (%u)\n", req_operation);
1221 req_operation);
1222 goto fail_response; 1223 goto fail_response;
1223 } 1224 }
1224 1225
@@ -1252,8 +1253,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1252 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || 1253 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1253 unlikely((req->operation == BLKIF_OP_INDIRECT) && 1254 unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1254 (nseg > MAX_INDIRECT_SEGMENTS))) { 1255 (nseg > MAX_INDIRECT_SEGMENTS))) {
1255 pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", 1256 pr_debug("Bad number of segments in request (%d)\n", nseg);
1256 nseg);
1257 /* Haven't submitted any bio's yet. */ 1257 /* Haven't submitted any bio's yet. */
1258 goto fail_response; 1258 goto fail_response;
1259 } 1259 }
@@ -1288,7 +1288,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1288 } 1288 }
1289 1289
1290 if (xen_vbd_translate(&preq, blkif, operation) != 0) { 1290 if (xen_vbd_translate(&preq, blkif, operation) != 0) {
1291 pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", 1291 pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
1292 operation == READ ? "read" : "write", 1292 operation == READ ? "read" : "write",
1293 preq.sector_number, 1293 preq.sector_number,
1294 preq.sector_number + preq.nr_sects, 1294 preq.sector_number + preq.nr_sects,
@@ -1303,7 +1303,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1303 for (i = 0; i < nseg; i++) { 1303 for (i = 0; i < nseg; i++) {
1304 if (((int)preq.sector_number|(int)seg[i].nsec) & 1304 if (((int)preq.sector_number|(int)seg[i].nsec) &
1305 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { 1305 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1306 pr_debug(DRV_PFX "Misaligned I/O request from domain %d", 1306 pr_debug("Misaligned I/O request from domain %d\n",
1307 blkif->domid); 1307 blkif->domid);
1308 goto fail_response; 1308 goto fail_response;
1309 } 1309 }
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 375d28851860..f620b5d3f77c 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -44,12 +44,6 @@
44#include <xen/interface/io/blkif.h> 44#include <xen/interface/io/blkif.h>
45#include <xen/interface/io/protocols.h> 45#include <xen/interface/io/protocols.h>
46 46
47#define DRV_PFX "xen-blkback:"
48#define DPRINTK(fmt, args...) \
49 pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \
50 __func__, __LINE__, ##args)
51
52
53/* 47/*
54 * This is the maximum number of segments that would be allowed in indirect 48 * This is the maximum number of segments that would be allowed in indirect
55 * requests. This value will also be passed to the frontend. 49 * requests. This value will also be passed to the frontend.
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index ff3025922c14..6ab69ad61ee1 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -14,6 +14,8 @@
14 14
15*/ 15*/
16 16
17#define pr_fmt(fmt) "xen-blkback: " fmt
18
17#include <stdarg.h> 19#include <stdarg.h>
18#include <linux/module.h> 20#include <linux/module.h>
19#include <linux/kthread.h> 21#include <linux/kthread.h>
@@ -21,6 +23,9 @@
21#include <xen/grant_table.h> 23#include <xen/grant_table.h>
22#include "common.h" 24#include "common.h"
23 25
26/* Enlarge the array size in order to fully show blkback name. */
27#define BLKBACK_NAME_LEN (20)
28
24struct backend_info { 29struct backend_info {
25 struct xenbus_device *dev; 30 struct xenbus_device *dev;
26 struct xen_blkif *blkif; 31 struct xen_blkif *blkif;
@@ -70,7 +75,7 @@ static int blkback_name(struct xen_blkif *blkif, char *buf)
70 else 75 else
71 devname = devpath; 76 devname = devpath;
72 77
73 snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname); 78 snprintf(buf, BLKBACK_NAME_LEN, "blkback.%d.%s", blkif->domid, devname);
74 kfree(devpath); 79 kfree(devpath);
75 80
76 return 0; 81 return 0;
@@ -79,7 +84,7 @@ static int blkback_name(struct xen_blkif *blkif, char *buf)
79static void xen_update_blkif_status(struct xen_blkif *blkif) 84static void xen_update_blkif_status(struct xen_blkif *blkif)
80{ 85{
81 int err; 86 int err;
82 char name[TASK_COMM_LEN]; 87 char name[BLKBACK_NAME_LEN];
83 88
84 /* Not ready to connect? */ 89 /* Not ready to connect? */
85 if (!blkif->irq || !blkif->vbd.bdev) 90 if (!blkif->irq || !blkif->vbd.bdev)
@@ -424,14 +429,14 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
424 FMODE_READ : FMODE_WRITE, NULL); 429 FMODE_READ : FMODE_WRITE, NULL);
425 430
426 if (IS_ERR(bdev)) { 431 if (IS_ERR(bdev)) {
427 DPRINTK("xen_vbd_create: device %08x could not be opened.\n", 432 pr_warn("xen_vbd_create: device %08x could not be opened\n",
428 vbd->pdevice); 433 vbd->pdevice);
429 return -ENOENT; 434 return -ENOENT;
430 } 435 }
431 436
432 vbd->bdev = bdev; 437 vbd->bdev = bdev;
433 if (vbd->bdev->bd_disk == NULL) { 438 if (vbd->bdev->bd_disk == NULL) {
434 DPRINTK("xen_vbd_create: device %08x doesn't exist.\n", 439 pr_warn("xen_vbd_create: device %08x doesn't exist\n",
435 vbd->pdevice); 440 vbd->pdevice);
436 xen_vbd_free(vbd); 441 xen_vbd_free(vbd);
437 return -ENOENT; 442 return -ENOENT;
@@ -450,7 +455,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
450 if (q && blk_queue_secdiscard(q)) 455 if (q && blk_queue_secdiscard(q))
451 vbd->discard_secure = true; 456 vbd->discard_secure = true;
452 457
453 DPRINTK("Successful creation of handle=%04x (dom=%u)\n", 458 pr_debug("Successful creation of handle=%04x (dom=%u)\n",
454 handle, blkif->domid); 459 handle, blkif->domid);
455 return 0; 460 return 0;
456} 461}
@@ -458,7 +463,7 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
458{ 463{
459 struct backend_info *be = dev_get_drvdata(&dev->dev); 464 struct backend_info *be = dev_get_drvdata(&dev->dev);
460 465
461 DPRINTK(""); 466 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
462 467
463 if (be->major || be->minor) 468 if (be->major || be->minor)
464 xenvbd_sysfs_delif(dev); 469 xenvbd_sysfs_delif(dev);
@@ -564,6 +569,10 @@ static int xen_blkbk_probe(struct xenbus_device *dev,
564 int err; 569 int err;
565 struct backend_info *be = kzalloc(sizeof(struct backend_info), 570 struct backend_info *be = kzalloc(sizeof(struct backend_info),
566 GFP_KERNEL); 571 GFP_KERNEL);
572
573 /* match the pr_debug in xen_blkbk_remove */
574 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
575
567 if (!be) { 576 if (!be) {
568 xenbus_dev_fatal(dev, -ENOMEM, 577 xenbus_dev_fatal(dev, -ENOMEM,
569 "allocating backend structure"); 578 "allocating backend structure");
@@ -595,7 +604,7 @@ static int xen_blkbk_probe(struct xenbus_device *dev,
595 return 0; 604 return 0;
596 605
597fail: 606fail:
598 DPRINTK("failed"); 607 pr_warn("%s failed\n", __func__);
599 xen_blkbk_remove(dev); 608 xen_blkbk_remove(dev);
600 return err; 609 return err;
601} 610}
@@ -619,7 +628,7 @@ static void backend_changed(struct xenbus_watch *watch,
619 unsigned long handle; 628 unsigned long handle;
620 char *device_type; 629 char *device_type;
621 630
622 DPRINTK(""); 631 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
623 632
624 err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x", 633 err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
625 &major, &minor); 634 &major, &minor);
@@ -638,7 +647,7 @@ static void backend_changed(struct xenbus_watch *watch,
638 647
639 if (be->major | be->minor) { 648 if (be->major | be->minor) {
640 if (be->major != major || be->minor != minor) 649 if (be->major != major || be->minor != minor)
641 pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n", 650 pr_warn("changing physical device (from %x:%x to %x:%x) not supported.\n",
642 be->major, be->minor, major, minor); 651 be->major, be->minor, major, minor);
643 return; 652 return;
644 } 653 }
@@ -699,13 +708,12 @@ static void frontend_changed(struct xenbus_device *dev,
699 struct backend_info *be = dev_get_drvdata(&dev->dev); 708 struct backend_info *be = dev_get_drvdata(&dev->dev);
700 int err; 709 int err;
701 710
702 DPRINTK("%s", xenbus_strstate(frontend_state)); 711 pr_debug("%s %p %s\n", __func__, dev, xenbus_strstate(frontend_state));
703 712
704 switch (frontend_state) { 713 switch (frontend_state) {
705 case XenbusStateInitialising: 714 case XenbusStateInitialising:
706 if (dev->state == XenbusStateClosed) { 715 if (dev->state == XenbusStateClosed) {
707 pr_info(DRV_PFX "%s: prepare for reconnect\n", 716 pr_info("%s: prepare for reconnect\n", dev->nodename);
708 dev->nodename);
709 xenbus_switch_state(dev, XenbusStateInitWait); 717 xenbus_switch_state(dev, XenbusStateInitWait);
710 } 718 }
711 break; 719 break;
@@ -772,7 +780,7 @@ static void connect(struct backend_info *be)
772 int err; 780 int err;
773 struct xenbus_device *dev = be->dev; 781 struct xenbus_device *dev = be->dev;
774 782
775 DPRINTK("%s", dev->otherend); 783 pr_debug("%s %s\n", __func__, dev->otherend);
776 784
777 /* Supply the information about the device the frontend needs */ 785 /* Supply the information about the device the frontend needs */
778again: 786again:
@@ -858,7 +866,7 @@ static int connect_ring(struct backend_info *be)
858 char protocol[64] = ""; 866 char protocol[64] = "";
859 int err; 867 int err;
860 868
861 DPRINTK("%s", dev->otherend); 869 pr_debug("%s %s\n", __func__, dev->otherend);
862 870
863 err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu", 871 err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu",
864 &ring_ref, "event-channel", "%u", &evtchn, NULL); 872 &ring_ref, "event-channel", "%u", &evtchn, NULL);
@@ -893,7 +901,7 @@ static int connect_ring(struct backend_info *be)
893 be->blkif->vbd.feature_gnt_persistent = pers_grants; 901 be->blkif->vbd.feature_gnt_persistent = pers_grants;
894 be->blkif->vbd.overflow_max_grants = 0; 902 be->blkif->vbd.overflow_max_grants = 0;
895 903
896 pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s) %s\n", 904 pr_info("ring-ref %ld, event-channel %d, protocol %d (%s) %s\n",
897 ring_ref, evtchn, be->blkif->blk_protocol, protocol, 905 ring_ref, evtchn, be->blkif->blk_protocol, protocol,
898 pers_grants ? "persistent grants" : ""); 906 pers_grants ? "persistent grants" : "");
899 907
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
deleted file mode 100644
index f62f78aef4ac..000000000000
--- a/include/linux/nbd.h
+++ /dev/null
@@ -1,46 +0,0 @@
1/*
2 * 1999 Copyright (C) Pavel Machek, pavel@ucw.cz. This code is GPL.
3 * 1999/11/04 Copyright (C) 1999 VMware, Inc. (Regis "HPReg" Duchesne)
4 * Made nbd_end_request() use the io_request_lock
5 * 2001 Copyright (C) Steven Whitehouse
6 * New nbd_end_request() for compatibility with new linux block
7 * layer code.
8 * 2003/06/24 Louis D. Langholtz <ldl@aros.net>
9 * Removed unneeded blksize_bits field from nbd_device struct.
10 * Cleanup PARANOIA usage & code.
11 * 2004/02/19 Paul Clements
12 * Removed PARANOIA, plus various cleanup and comments
13 */
14#ifndef LINUX_NBD_H
15#define LINUX_NBD_H
16
17
18#include <linux/wait.h>
19#include <linux/mutex.h>
20#include <uapi/linux/nbd.h>
21
22struct request;
23
24struct nbd_device {
25 int flags;
26 int harderror; /* Code of hard error */
27 struct socket * sock; /* If == NULL, device is not ready, yet */
28 int magic;
29
30 spinlock_t queue_lock;
31 struct list_head queue_head; /* Requests waiting result */
32 struct request *active_req;
33 wait_queue_head_t active_wq;
34 struct list_head waiting_queue; /* Requests to be sent */
35 wait_queue_head_t waiting_wq;
36
37 struct mutex tx_lock;
38 struct gendisk *disk;
39 int blksize;
40 u64 bytesize;
41 pid_t pid; /* pid of nbd-client, if attached */
42 int xmit_timeout;
43 int disconnect; /* a disconnect has been requested by user */
44};
45
46#endif
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 0adad4a5419b..8dbd05e70f09 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -117,8 +117,9 @@ struct nvme_ns {
117 117
118 unsigned ns_id; 118 unsigned ns_id;
119 int lba_shift; 119 int lba_shift;
120 int ms; 120 u16 ms;
121 int pi_type; 121 bool ext;
122 u8 pi_type;
122 u64 mode_select_num_blocks; 123 u64 mode_select_num_blocks;
123 u32 mode_select_block_len; 124 u32 mode_select_block_len;
124}; 125};