aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/cpqarray.c4
-rw-r--r--drivers/block/drbd/drbd_main.c7
-rw-r--r--drivers/block/drbd/drbd_req.c3
-rw-r--r--drivers/block/loop.c298
-rw-r--r--drivers/block/nbd.c140
-rw-r--r--drivers/block/nvme-core.c159
-rw-r--r--drivers/block/nvme-scsi.c28
-rw-r--r--drivers/block/paride/pg.c4
-rw-r--r--drivers/block/swim3.c12
-rw-r--r--drivers/block/virtio_blk.c9
-rw-r--r--drivers/block/xen-blkback/blkback.c62
-rw-r--r--drivers/block/xen-blkback/common.h6
-rw-r--r--drivers/block/xen-blkback/xenbus.c43
-rw-r--r--drivers/block/xen-blkfront.c5
-rw-r--r--drivers/block/zram/zram_drv.c73
-rw-r--r--drivers/block/zram/zram_drv.h1
16 files changed, 425 insertions, 429 deletions
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 2b9440384536..f749df9e15cd 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -405,8 +405,8 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
405 goto Enomem4; 405 goto Enomem4;
406 } 406 }
407 hba[i]->access.set_intr_mask(hba[i], 0); 407 hba[i]->access.set_intr_mask(hba[i], 0);
408 if (request_irq(hba[i]->intr, do_ida_intr, 408 if (request_irq(hba[i]->intr, do_ida_intr, IRQF_SHARED,
409 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i])) 409 hba[i]->devname, hba[i]))
410 { 410 {
411 printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n", 411 printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
412 hba[i]->intr, hba[i]->devname); 412 hba[i]->intr, hba[i]->devname);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 1fc83427199c..81fde9ef7f8e 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2107,13 +2107,12 @@ static int drbd_create_mempools(void)
2107 if (drbd_md_io_page_pool == NULL) 2107 if (drbd_md_io_page_pool == NULL)
2108 goto Enomem; 2108 goto Enomem;
2109 2109
2110 drbd_request_mempool = mempool_create(number, 2110 drbd_request_mempool = mempool_create_slab_pool(number,
2111 mempool_alloc_slab, mempool_free_slab, drbd_request_cache); 2111 drbd_request_cache);
2112 if (drbd_request_mempool == NULL) 2112 if (drbd_request_mempool == NULL)
2113 goto Enomem; 2113 goto Enomem;
2114 2114
2115 drbd_ee_mempool = mempool_create(number, 2115 drbd_ee_mempool = mempool_create_slab_pool(number, drbd_ee_cache);
2116 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
2117 if (drbd_ee_mempool == NULL) 2116 if (drbd_ee_mempool == NULL)
2118 goto Enomem; 2117 goto Enomem;
2119 2118
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 34f2f0ba409b..3907202fb9d9 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -52,9 +52,10 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device,
52{ 52{
53 struct drbd_request *req; 53 struct drbd_request *req;
54 54
55 req = mempool_alloc(drbd_request_mempool, GFP_NOIO | __GFP_ZERO); 55 req = mempool_alloc(drbd_request_mempool, GFP_NOIO);
56 if (!req) 56 if (!req)
57 return NULL; 57 return NULL;
58 memset(req, 0, sizeof(*req));
58 59
59 drbd_req_make_private_bio(req, bio_src); 60 drbd_req_make_private_bio(req, bio_src);
60 req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; 61 req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index d1f168b73634..ae3fcb4199e9 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -75,6 +75,7 @@
75#include <linux/sysfs.h> 75#include <linux/sysfs.h>
76#include <linux/miscdevice.h> 76#include <linux/miscdevice.h>
77#include <linux/falloc.h> 77#include <linux/falloc.h>
78#include <linux/uio.h>
78#include "loop.h" 79#include "loop.h"
79 80
80#include <asm/uaccess.h> 81#include <asm/uaccess.h>
@@ -87,28 +88,6 @@ static int part_shift;
87 88
88static struct workqueue_struct *loop_wq; 89static struct workqueue_struct *loop_wq;
89 90
90/*
91 * Transfer functions
92 */
93static int transfer_none(struct loop_device *lo, int cmd,
94 struct page *raw_page, unsigned raw_off,
95 struct page *loop_page, unsigned loop_off,
96 int size, sector_t real_block)
97{
98 char *raw_buf = kmap_atomic(raw_page) + raw_off;
99 char *loop_buf = kmap_atomic(loop_page) + loop_off;
100
101 if (cmd == READ)
102 memcpy(loop_buf, raw_buf, size);
103 else
104 memcpy(raw_buf, loop_buf, size);
105
106 kunmap_atomic(loop_buf);
107 kunmap_atomic(raw_buf);
108 cond_resched();
109 return 0;
110}
111
112static int transfer_xor(struct loop_device *lo, int cmd, 91static int transfer_xor(struct loop_device *lo, int cmd,
113 struct page *raw_page, unsigned raw_off, 92 struct page *raw_page, unsigned raw_off,
114 struct page *loop_page, unsigned loop_off, 93 struct page *loop_page, unsigned loop_off,
@@ -147,14 +126,13 @@ static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
147 126
148static struct loop_func_table none_funcs = { 127static struct loop_func_table none_funcs = {
149 .number = LO_CRYPT_NONE, 128 .number = LO_CRYPT_NONE,
150 .transfer = transfer_none, 129};
151};
152 130
153static struct loop_func_table xor_funcs = { 131static struct loop_func_table xor_funcs = {
154 .number = LO_CRYPT_XOR, 132 .number = LO_CRYPT_XOR,
155 .transfer = transfer_xor, 133 .transfer = transfer_xor,
156 .init = xor_init 134 .init = xor_init
157}; 135};
158 136
159/* xfer_funcs[0] is special - its release function is never called */ 137/* xfer_funcs[0] is special - its release function is never called */
160static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = { 138static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
@@ -214,206 +192,169 @@ lo_do_transfer(struct loop_device *lo, int cmd,
214 struct page *lpage, unsigned loffs, 192 struct page *lpage, unsigned loffs,
215 int size, sector_t rblock) 193 int size, sector_t rblock)
216{ 194{
217 if (unlikely(!lo->transfer)) 195 int ret;
196
197 ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
198 if (likely(!ret))
218 return 0; 199 return 0;
219 200
220 return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock); 201 printk_ratelimited(KERN_ERR
202 "loop: Transfer error at byte offset %llu, length %i.\n",
203 (unsigned long long)rblock << 9, size);
204 return ret;
221} 205}
222 206
223/** 207static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
224 * __do_lo_send_write - helper for writing data to a loop device
225 *
226 * This helper just factors out common code between do_lo_send_direct_write()
227 * and do_lo_send_write().
228 */
229static int __do_lo_send_write(struct file *file,
230 u8 *buf, const int len, loff_t pos)
231{ 208{
209 struct iov_iter i;
232 ssize_t bw; 210 ssize_t bw;
233 mm_segment_t old_fs = get_fs(); 211
212 iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
234 213
235 file_start_write(file); 214 file_start_write(file);
236 set_fs(get_ds()); 215 bw = vfs_iter_write(file, &i, ppos);
237 bw = file->f_op->write(file, buf, len, &pos);
238 set_fs(old_fs);
239 file_end_write(file); 216 file_end_write(file);
240 if (likely(bw == len)) 217
218 if (likely(bw == bvec->bv_len))
241 return 0; 219 return 0;
242 printk_ratelimited(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n", 220
243 (unsigned long long)pos, len); 221 printk_ratelimited(KERN_ERR
222 "loop: Write error at byte offset %llu, length %i.\n",
223 (unsigned long long)*ppos, bvec->bv_len);
244 if (bw >= 0) 224 if (bw >= 0)
245 bw = -EIO; 225 bw = -EIO;
246 return bw; 226 return bw;
247} 227}
248 228
249/** 229static int lo_write_simple(struct loop_device *lo, struct request *rq,
250 * do_lo_send_direct_write - helper for writing data to a loop device 230 loff_t pos)
251 *
252 * This is the fast, non-transforming version that does not need double
253 * buffering.
254 */
255static int do_lo_send_direct_write(struct loop_device *lo,
256 struct bio_vec *bvec, loff_t pos, struct page *page)
257{ 231{
258 ssize_t bw = __do_lo_send_write(lo->lo_backing_file, 232 struct bio_vec bvec;
259 kmap(bvec->bv_page) + bvec->bv_offset, 233 struct req_iterator iter;
260 bvec->bv_len, pos); 234 int ret = 0;
261 kunmap(bvec->bv_page); 235
262 cond_resched(); 236 rq_for_each_segment(bvec, rq, iter) {
263 return bw; 237 ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
238 if (ret < 0)
239 break;
240 cond_resched();
241 }
242
243 return ret;
264} 244}
265 245
266/** 246/*
267 * do_lo_send_write - helper for writing data to a loop device
268 *
269 * This is the slow, transforming version that needs to double buffer the 247 * This is the slow, transforming version that needs to double buffer the
270 * data as it cannot do the transformations in place without having direct 248 * data as it cannot do the transformations in place without having direct
271 * access to the destination pages of the backing file. 249 * access to the destination pages of the backing file.
272 */ 250 */
273static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec, 251static int lo_write_transfer(struct loop_device *lo, struct request *rq,
274 loff_t pos, struct page *page) 252 loff_t pos)
275{
276 int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page,
277 bvec->bv_offset, bvec->bv_len, pos >> 9);
278 if (likely(!ret))
279 return __do_lo_send_write(lo->lo_backing_file,
280 page_address(page), bvec->bv_len,
281 pos);
282 printk_ratelimited(KERN_ERR "loop: Transfer error at byte offset %llu, "
283 "length %i.\n", (unsigned long long)pos, bvec->bv_len);
284 if (ret > 0)
285 ret = -EIO;
286 return ret;
287}
288
289static int lo_send(struct loop_device *lo, struct request *rq, loff_t pos)
290{ 253{
291 int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t, 254 struct bio_vec bvec, b;
292 struct page *page);
293 struct bio_vec bvec;
294 struct req_iterator iter; 255 struct req_iterator iter;
295 struct page *page = NULL; 256 struct page *page;
296 int ret = 0; 257 int ret = 0;
297 258
298 if (lo->transfer != transfer_none) { 259 page = alloc_page(GFP_NOIO);
299 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); 260 if (unlikely(!page))
300 if (unlikely(!page)) 261 return -ENOMEM;
301 goto fail;
302 kmap(page);
303 do_lo_send = do_lo_send_write;
304 } else {
305 do_lo_send = do_lo_send_direct_write;
306 }
307 262
308 rq_for_each_segment(bvec, rq, iter) { 263 rq_for_each_segment(bvec, rq, iter) {
309 ret = do_lo_send(lo, &bvec, pos, page); 264 ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page,
265 bvec.bv_offset, bvec.bv_len, pos >> 9);
266 if (unlikely(ret))
267 break;
268
269 b.bv_page = page;
270 b.bv_offset = 0;
271 b.bv_len = bvec.bv_len;
272 ret = lo_write_bvec(lo->lo_backing_file, &b, &pos);
310 if (ret < 0) 273 if (ret < 0)
311 break; 274 break;
312 pos += bvec.bv_len;
313 } 275 }
314 if (page) { 276
315 kunmap(page); 277 __free_page(page);
316 __free_page(page);
317 }
318out:
319 return ret; 278 return ret;
320fail:
321 printk_ratelimited(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
322 ret = -ENOMEM;
323 goto out;
324} 279}
325 280
326struct lo_read_data { 281static int lo_read_simple(struct loop_device *lo, struct request *rq,
327 struct loop_device *lo; 282 loff_t pos)
328 struct page *page; 283{
329 unsigned offset; 284 struct bio_vec bvec;
330 int bsize; 285 struct req_iterator iter;
331}; 286 struct iov_iter i;
287 ssize_t len;
332 288
333static int 289 rq_for_each_segment(bvec, rq, iter) {
334lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, 290 iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len);
335 struct splice_desc *sd) 291 len = vfs_iter_read(lo->lo_backing_file, &i, &pos);
336{ 292 if (len < 0)
337 struct lo_read_data *p = sd->u.data; 293 return len;
338 struct loop_device *lo = p->lo;
339 struct page *page = buf->page;
340 sector_t IV;
341 int size;
342
343 IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) +
344 (buf->offset >> 9);
345 size = sd->len;
346 if (size > p->bsize)
347 size = p->bsize;
348
349 if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
350 printk_ratelimited(KERN_ERR "loop: transfer error block %ld\n",
351 page->index);
352 size = -EINVAL;
353 }
354 294
355 flush_dcache_page(p->page); 295 flush_dcache_page(bvec.bv_page);
356 296
357 if (size > 0) 297 if (len != bvec.bv_len) {
358 p->offset += size; 298 struct bio *bio;
359 299
360 return size; 300 __rq_for_each_bio(bio, rq)
361} 301 zero_fill_bio(bio);
302 break;
303 }
304 cond_resched();
305 }
362 306
363static int 307 return 0;
364lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd)
365{
366 return __splice_from_pipe(pipe, sd, lo_splice_actor);
367} 308}
368 309
369static ssize_t 310static int lo_read_transfer(struct loop_device *lo, struct request *rq,
370do_lo_receive(struct loop_device *lo, 311 loff_t pos)
371 struct bio_vec *bvec, int bsize, loff_t pos)
372{ 312{
373 struct lo_read_data cookie; 313 struct bio_vec bvec, b;
374 struct splice_desc sd; 314 struct req_iterator iter;
375 struct file *file; 315 struct iov_iter i;
376 ssize_t retval; 316 struct page *page;
317 ssize_t len;
318 int ret = 0;
377 319
378 cookie.lo = lo; 320 page = alloc_page(GFP_NOIO);
379 cookie.page = bvec->bv_page; 321 if (unlikely(!page))
380 cookie.offset = bvec->bv_offset; 322 return -ENOMEM;
381 cookie.bsize = bsize;
382 323
383 sd.len = 0; 324 rq_for_each_segment(bvec, rq, iter) {
384 sd.total_len = bvec->bv_len; 325 loff_t offset = pos;
385 sd.flags = 0;
386 sd.pos = pos;
387 sd.u.data = &cookie;
388 326
389 file = lo->lo_backing_file; 327 b.bv_page = page;
390 retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor); 328 b.bv_offset = 0;
329 b.bv_len = bvec.bv_len;
391 330
392 return retval; 331 iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len);
393} 332 len = vfs_iter_read(lo->lo_backing_file, &i, &pos);
333 if (len < 0) {
334 ret = len;
335 goto out_free_page;
336 }
394 337
395static int 338 ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page,
396lo_receive(struct loop_device *lo, struct request *rq, int bsize, loff_t pos) 339 bvec.bv_offset, len, offset >> 9);
397{ 340 if (ret)
398 struct bio_vec bvec; 341 goto out_free_page;
399 struct req_iterator iter;
400 ssize_t s;
401 342
402 rq_for_each_segment(bvec, rq, iter) { 343 flush_dcache_page(bvec.bv_page);
403 s = do_lo_receive(lo, &bvec, bsize, pos);
404 if (s < 0)
405 return s;
406 344
407 if (s != bvec.bv_len) { 345 if (len != bvec.bv_len) {
408 struct bio *bio; 346 struct bio *bio;
409 347
410 __rq_for_each_bio(bio, rq) 348 __rq_for_each_bio(bio, rq)
411 zero_fill_bio(bio); 349 zero_fill_bio(bio);
412 break; 350 break;
413 } 351 }
414 pos += bvec.bv_len;
415 } 352 }
416 return 0; 353
354 ret = 0;
355out_free_page:
356 __free_page(page);
357 return ret;
417} 358}
418 359
419static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos) 360static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos)
@@ -462,10 +403,17 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
462 ret = lo_req_flush(lo, rq); 403 ret = lo_req_flush(lo, rq);
463 else if (rq->cmd_flags & REQ_DISCARD) 404 else if (rq->cmd_flags & REQ_DISCARD)
464 ret = lo_discard(lo, rq, pos); 405 ret = lo_discard(lo, rq, pos);
406 else if (lo->transfer)
407 ret = lo_write_transfer(lo, rq, pos);
465 else 408 else
466 ret = lo_send(lo, rq, pos); 409 ret = lo_write_simple(lo, rq, pos);
467 } else 410
468 ret = lo_receive(lo, rq, lo->lo_blocksize, pos); 411 } else {
412 if (lo->transfer)
413 ret = lo_read_transfer(lo, rq, pos);
414 else
415 ret = lo_read_simple(lo, rq, pos);
416 }
469 417
470 return ret; 418 return ret;
471} 419}
@@ -767,7 +715,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
767 goto out_putf; 715 goto out_putf;
768 716
769 if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) || 717 if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
770 !file->f_op->write) 718 !file->f_op->write_iter)
771 lo_flags |= LO_FLAGS_READ_ONLY; 719 lo_flags |= LO_FLAGS_READ_ONLY;
772 720
773 lo_blocksize = S_ISBLK(inode->i_mode) ? 721 lo_blocksize = S_ISBLK(inode->i_mode) ?
@@ -786,7 +734,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
786 lo->lo_device = bdev; 734 lo->lo_device = bdev;
787 lo->lo_flags = lo_flags; 735 lo->lo_flags = lo_flags;
788 lo->lo_backing_file = file; 736 lo->lo_backing_file = file;
789 lo->transfer = transfer_none; 737 lo->transfer = NULL;
790 lo->ioctl = NULL; 738 lo->ioctl = NULL;
791 lo->lo_sizelimit = 0; 739 lo->lo_sizelimit = 0;
792 lo->old_gfp_mask = mapping_gfp_mask(mapping); 740 lo->old_gfp_mask = mapping_gfp_mask(mapping);
@@ -1005,7 +953,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1005 memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, 953 memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
1006 info->lo_encrypt_key_size); 954 info->lo_encrypt_key_size);
1007 lo->lo_key_owner = uid; 955 lo->lo_key_owner = uid;
1008 } 956 }
1009 957
1010 return 0; 958 return 0;
1011} 959}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index a98c41f72c63..39e5f7fae3ef 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -32,28 +32,36 @@
32#include <net/sock.h> 32#include <net/sock.h>
33#include <linux/net.h> 33#include <linux/net.h>
34#include <linux/kthread.h> 34#include <linux/kthread.h>
35#include <linux/types.h>
35 36
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37#include <asm/types.h> 38#include <asm/types.h>
38 39
39#include <linux/nbd.h> 40#include <linux/nbd.h>
40 41
41#define NBD_MAGIC 0x68797548 42struct nbd_device {
43 int flags;
44 int harderror; /* Code of hard error */
45 struct socket * sock; /* If == NULL, device is not ready, yet */
46 int magic;
47
48 spinlock_t queue_lock;
49 struct list_head queue_head; /* Requests waiting result */
50 struct request *active_req;
51 wait_queue_head_t active_wq;
52 struct list_head waiting_queue; /* Requests to be sent */
53 wait_queue_head_t waiting_wq;
54
55 struct mutex tx_lock;
56 struct gendisk *disk;
57 int blksize;
58 loff_t bytesize;
59 pid_t pid; /* pid of nbd-client, if attached */
60 int xmit_timeout;
61 int disconnect; /* a disconnect has been requested by user */
62};
42 63
43#ifdef NDEBUG 64#define NBD_MAGIC 0x68797548
44#define dprintk(flags, fmt...)
45#else /* NDEBUG */
46#define dprintk(flags, fmt...) do { \
47 if (debugflags & (flags)) printk(KERN_DEBUG fmt); \
48} while (0)
49#define DBG_IOCTL 0x0004
50#define DBG_INIT 0x0010
51#define DBG_EXIT 0x0020
52#define DBG_BLKDEV 0x0100
53#define DBG_RX 0x0200
54#define DBG_TX 0x0400
55static unsigned int debugflags;
56#endif /* NDEBUG */
57 65
58static unsigned int nbds_max = 16; 66static unsigned int nbds_max = 16;
59static struct nbd_device *nbd_dev; 67static struct nbd_device *nbd_dev;
@@ -71,25 +79,9 @@ static int max_part;
71 */ 79 */
72static DEFINE_SPINLOCK(nbd_lock); 80static DEFINE_SPINLOCK(nbd_lock);
73 81
74#ifndef NDEBUG 82static inline struct device *nbd_to_dev(struct nbd_device *nbd)
75static const char *ioctl_cmd_to_ascii(int cmd)
76{ 83{
77 switch (cmd) { 84 return disk_to_dev(nbd->disk);
78 case NBD_SET_SOCK: return "set-sock";
79 case NBD_SET_BLKSIZE: return "set-blksize";
80 case NBD_SET_SIZE: return "set-size";
81 case NBD_SET_TIMEOUT: return "set-timeout";
82 case NBD_SET_FLAGS: return "set-flags";
83 case NBD_DO_IT: return "do-it";
84 case NBD_CLEAR_SOCK: return "clear-sock";
85 case NBD_CLEAR_QUE: return "clear-que";
86 case NBD_PRINT_DEBUG: return "print-debug";
87 case NBD_SET_SIZE_BLOCKS: return "set-size-blocks";
88 case NBD_DISCONNECT: return "disconnect";
89 case BLKROSET: return "set-read-only";
90 case BLKFLSBUF: return "flush-buffer-cache";
91 }
92 return "unknown";
93} 85}
94 86
95static const char *nbdcmd_to_ascii(int cmd) 87static const char *nbdcmd_to_ascii(int cmd)
@@ -103,30 +95,26 @@ static const char *nbdcmd_to_ascii(int cmd)
103 } 95 }
104 return "invalid"; 96 return "invalid";
105} 97}
106#endif /* NDEBUG */
107 98
108static void nbd_end_request(struct request *req) 99static void nbd_end_request(struct nbd_device *nbd, struct request *req)
109{ 100{
110 int error = req->errors ? -EIO : 0; 101 int error = req->errors ? -EIO : 0;
111 struct request_queue *q = req->q; 102 struct request_queue *q = req->q;
112 unsigned long flags; 103 unsigned long flags;
113 104
114 dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name, 105 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req,
115 req, error ? "failed" : "done"); 106 error ? "failed" : "done");
116 107
117 spin_lock_irqsave(q->queue_lock, flags); 108 spin_lock_irqsave(q->queue_lock, flags);
118 __blk_end_request_all(req, error); 109 __blk_end_request_all(req, error);
119 spin_unlock_irqrestore(q->queue_lock, flags); 110 spin_unlock_irqrestore(q->queue_lock, flags);
120} 111}
121 112
113/*
114 * Forcibly shutdown the socket causing all listeners to error
115 */
122static void sock_shutdown(struct nbd_device *nbd, int lock) 116static void sock_shutdown(struct nbd_device *nbd, int lock)
123{ 117{
124 /* Forcibly shutdown the socket causing all listeners
125 * to error
126 *
127 * FIXME: This code is duplicated from sys_shutdown, but
128 * there should be a more generic interface rather than
129 * calling socket ops directly here */
130 if (lock) 118 if (lock)
131 mutex_lock(&nbd->tx_lock); 119 mutex_lock(&nbd->tx_lock);
132 if (nbd->sock) { 120 if (nbd->sock) {
@@ -253,17 +241,15 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
253 } 241 }
254 memcpy(request.handle, &req, sizeof(req)); 242 memcpy(request.handle, &req, sizeof(req));
255 243
256 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n", 244 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
257 nbd->disk->disk_name, req, 245 req, nbdcmd_to_ascii(nbd_cmd(req)),
258 nbdcmd_to_ascii(nbd_cmd(req)), 246 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
259 (unsigned long long)blk_rq_pos(req) << 9,
260 blk_rq_bytes(req));
261 result = sock_xmit(nbd, 1, &request, sizeof(request), 247 result = sock_xmit(nbd, 1, &request, sizeof(request),
262 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); 248 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
263 if (result <= 0) { 249 if (result <= 0) {
264 dev_err(disk_to_dev(nbd->disk), 250 dev_err(disk_to_dev(nbd->disk),
265 "Send control failed (result %d)\n", result); 251 "Send control failed (result %d)\n", result);
266 goto error_out; 252 return -EIO;
267 } 253 }
268 254
269 if (nbd_cmd(req) == NBD_CMD_WRITE) { 255 if (nbd_cmd(req) == NBD_CMD_WRITE) {
@@ -277,21 +263,18 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
277 flags = 0; 263 flags = 0;
278 if (!rq_iter_last(bvec, iter)) 264 if (!rq_iter_last(bvec, iter))
279 flags = MSG_MORE; 265 flags = MSG_MORE;
280 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", 266 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
281 nbd->disk->disk_name, req, bvec.bv_len); 267 req, bvec.bv_len);
282 result = sock_send_bvec(nbd, &bvec, flags); 268 result = sock_send_bvec(nbd, &bvec, flags);
283 if (result <= 0) { 269 if (result <= 0) {
284 dev_err(disk_to_dev(nbd->disk), 270 dev_err(disk_to_dev(nbd->disk),
285 "Send data failed (result %d)\n", 271 "Send data failed (result %d)\n",
286 result); 272 result);
287 goto error_out; 273 return -EIO;
288 } 274 }
289 } 275 }
290 } 276 }
291 return 0; 277 return 0;
292
293error_out:
294 return -EIO;
295} 278}
296 279
297static struct request *nbd_find_request(struct nbd_device *nbd, 280static struct request *nbd_find_request(struct nbd_device *nbd,
@@ -302,7 +285,7 @@ static struct request *nbd_find_request(struct nbd_device *nbd,
302 285
303 err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq); 286 err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq);
304 if (unlikely(err)) 287 if (unlikely(err))
305 goto out; 288 return ERR_PTR(err);
306 289
307 spin_lock(&nbd->queue_lock); 290 spin_lock(&nbd->queue_lock);
308 list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) { 291 list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) {
@@ -314,10 +297,7 @@ static struct request *nbd_find_request(struct nbd_device *nbd,
314 } 297 }
315 spin_unlock(&nbd->queue_lock); 298 spin_unlock(&nbd->queue_lock);
316 299
317 err = -ENOENT; 300 return ERR_PTR(-ENOENT);
318
319out:
320 return ERR_PTR(err);
321} 301}
322 302
323static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec) 303static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
@@ -371,8 +351,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
371 return req; 351 return req;
372 } 352 }
373 353
374 dprintk(DBG_RX, "%s: request %p: got reply\n", 354 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
375 nbd->disk->disk_name, req);
376 if (nbd_cmd(req) == NBD_CMD_READ) { 355 if (nbd_cmd(req) == NBD_CMD_READ) {
377 struct req_iterator iter; 356 struct req_iterator iter;
378 struct bio_vec bvec; 357 struct bio_vec bvec;
@@ -385,8 +364,8 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
385 req->errors++; 364 req->errors++;
386 return req; 365 return req;
387 } 366 }
388 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", 367 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
389 nbd->disk->disk_name, req, bvec.bv_len); 368 req, bvec.bv_len);
390 } 369 }
391 } 370 }
392 return req; 371 return req;
@@ -426,7 +405,7 @@ static int nbd_do_it(struct nbd_device *nbd)
426 } 405 }
427 406
428 while ((req = nbd_read_stat(nbd)) != NULL) 407 while ((req = nbd_read_stat(nbd)) != NULL)
429 nbd_end_request(req); 408 nbd_end_request(nbd, req);
430 409
431 device_remove_file(disk_to_dev(nbd->disk), &pid_attr); 410 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
432 nbd->pid = 0; 411 nbd->pid = 0;
@@ -455,7 +434,7 @@ static void nbd_clear_que(struct nbd_device *nbd)
455 queuelist); 434 queuelist);
456 list_del_init(&req->queuelist); 435 list_del_init(&req->queuelist);
457 req->errors++; 436 req->errors++;
458 nbd_end_request(req); 437 nbd_end_request(nbd, req);
459 } 438 }
460 439
461 while (!list_empty(&nbd->waiting_queue)) { 440 while (!list_empty(&nbd->waiting_queue)) {
@@ -463,7 +442,7 @@ static void nbd_clear_que(struct nbd_device *nbd)
463 queuelist); 442 queuelist);
464 list_del_init(&req->queuelist); 443 list_del_init(&req->queuelist);
465 req->errors++; 444 req->errors++;
466 nbd_end_request(req); 445 nbd_end_request(nbd, req);
467 } 446 }
468} 447}
469 448
@@ -507,7 +486,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
507 if (nbd_send_req(nbd, req) != 0) { 486 if (nbd_send_req(nbd, req) != 0) {
508 dev_err(disk_to_dev(nbd->disk), "Request send failed\n"); 487 dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
509 req->errors++; 488 req->errors++;
510 nbd_end_request(req); 489 nbd_end_request(nbd, req);
511 } else { 490 } else {
512 spin_lock(&nbd->queue_lock); 491 spin_lock(&nbd->queue_lock);
513 list_add_tail(&req->queuelist, &nbd->queue_head); 492 list_add_tail(&req->queuelist, &nbd->queue_head);
@@ -522,7 +501,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
522 501
523error_out: 502error_out:
524 req->errors++; 503 req->errors++;
525 nbd_end_request(req); 504 nbd_end_request(nbd, req);
526} 505}
527 506
528static int nbd_thread(void *data) 507static int nbd_thread(void *data)
@@ -570,18 +549,18 @@ static void do_nbd_request(struct request_queue *q)
570 549
571 spin_unlock_irq(q->queue_lock); 550 spin_unlock_irq(q->queue_lock);
572 551
573 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
574 req->rq_disk->disk_name, req, req->cmd_type);
575
576 nbd = req->rq_disk->private_data; 552 nbd = req->rq_disk->private_data;
577 553
578 BUG_ON(nbd->magic != NBD_MAGIC); 554 BUG_ON(nbd->magic != NBD_MAGIC);
579 555
556 dev_dbg(nbd_to_dev(nbd), "request %p: dequeued (flags=%x)\n",
557 req, req->cmd_type);
558
580 if (unlikely(!nbd->sock)) { 559 if (unlikely(!nbd->sock)) {
581 dev_err(disk_to_dev(nbd->disk), 560 dev_err(disk_to_dev(nbd->disk),
582 "Attempted send on closed socket\n"); 561 "Attempted send on closed socket\n");
583 req->errors++; 562 req->errors++;
584 nbd_end_request(req); 563 nbd_end_request(nbd, req);
585 spin_lock_irq(q->queue_lock); 564 spin_lock_irq(q->queue_lock);
586 continue; 565 continue;
587 } 566 }
@@ -706,13 +685,13 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
706 else 685 else
707 blk_queue_flush(nbd->disk->queue, 0); 686 blk_queue_flush(nbd->disk->queue, 0);
708 687
709 thread = kthread_create(nbd_thread, nbd, "%s", 688 thread = kthread_run(nbd_thread, nbd, "%s",
710 nbd->disk->disk_name); 689 nbd->disk->disk_name);
711 if (IS_ERR(thread)) { 690 if (IS_ERR(thread)) {
712 mutex_lock(&nbd->tx_lock); 691 mutex_lock(&nbd->tx_lock);
713 return PTR_ERR(thread); 692 return PTR_ERR(thread);
714 } 693 }
715 wake_up_process(thread); 694
716 error = nbd_do_it(nbd); 695 error = nbd_do_it(nbd);
717 kthread_stop(thread); 696 kthread_stop(thread);
718 697
@@ -768,10 +747,6 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
768 747
769 BUG_ON(nbd->magic != NBD_MAGIC); 748 BUG_ON(nbd->magic != NBD_MAGIC);
770 749
771 /* Anyone capable of this syscall can do *real bad* things */
772 dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
773 nbd->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
774
775 mutex_lock(&nbd->tx_lock); 750 mutex_lock(&nbd->tx_lock);
776 error = __nbd_ioctl(bdev, nbd, cmd, arg); 751 error = __nbd_ioctl(bdev, nbd, cmd, arg);
777 mutex_unlock(&nbd->tx_lock); 752 mutex_unlock(&nbd->tx_lock);
@@ -861,7 +836,6 @@ static int __init nbd_init(void)
861 } 836 }
862 837
863 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR); 838 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
864 dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags);
865 839
866 for (i = 0; i < nbds_max; i++) { 840 for (i = 0; i < nbds_max; i++) {
867 struct gendisk *disk = nbd_dev[i].disk; 841 struct gendisk *disk = nbd_dev[i].disk;
@@ -920,7 +894,3 @@ module_param(nbds_max, int, 0444);
920MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); 894MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
921module_param(max_part, int, 0444); 895module_param(max_part, int, 0444);
922MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)"); 896MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");
923#ifndef NDEBUG
924module_param(debugflags, int, 0644);
925MODULE_PARM_DESC(debugflags, "flags for controlling debug output");
926#endif
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index e23be20a3417..85b8036deaa3 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -44,7 +44,7 @@
44 44
45#define NVME_MINORS (1U << MINORBITS) 45#define NVME_MINORS (1U << MINORBITS)
46#define NVME_Q_DEPTH 1024 46#define NVME_Q_DEPTH 1024
47#define NVME_AQ_DEPTH 64 47#define NVME_AQ_DEPTH 256
48#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 48#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
49#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 49#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
50#define ADMIN_TIMEOUT (admin_timeout * HZ) 50#define ADMIN_TIMEOUT (admin_timeout * HZ)
@@ -152,6 +152,7 @@ struct nvme_cmd_info {
152 */ 152 */
153#define NVME_INT_PAGES 2 153#define NVME_INT_PAGES 2
154#define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->page_size) 154#define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->page_size)
155#define NVME_INT_MASK 0x01
155 156
156/* 157/*
157 * Will slightly overestimate the number of pages needed. This is OK 158 * Will slightly overestimate the number of pages needed. This is OK
@@ -257,7 +258,7 @@ static void *iod_get_private(struct nvme_iod *iod)
257 */ 258 */
258static bool iod_should_kfree(struct nvme_iod *iod) 259static bool iod_should_kfree(struct nvme_iod *iod)
259{ 260{
260 return (iod->private & 0x01) == 0; 261 return (iod->private & NVME_INT_MASK) == 0;
261} 262}
262 263
263/* Special values must be less than 0x1000 */ 264/* Special values must be less than 0x1000 */
@@ -301,8 +302,6 @@ static void *cancel_cmd_info(struct nvme_cmd_info *cmd, nvme_completion_fn *fn)
301static void async_req_completion(struct nvme_queue *nvmeq, void *ctx, 302static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
302 struct nvme_completion *cqe) 303 struct nvme_completion *cqe)
303{ 304{
304 struct request *req = ctx;
305
306 u32 result = le32_to_cpup(&cqe->result); 305 u32 result = le32_to_cpup(&cqe->result);
307 u16 status = le16_to_cpup(&cqe->status) >> 1; 306 u16 status = le16_to_cpup(&cqe->status) >> 1;
308 307
@@ -311,8 +310,6 @@ static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
311 if (status == NVME_SC_SUCCESS) 310 if (status == NVME_SC_SUCCESS)
312 dev_warn(nvmeq->q_dmadev, 311 dev_warn(nvmeq->q_dmadev,
313 "async event result %08x\n", result); 312 "async event result %08x\n", result);
314
315 blk_mq_free_hctx_request(nvmeq->hctx, req);
316} 313}
317 314
318static void abort_completion(struct nvme_queue *nvmeq, void *ctx, 315static void abort_completion(struct nvme_queue *nvmeq, void *ctx,
@@ -432,7 +429,6 @@ static struct nvme_iod *nvme_alloc_iod(struct request *rq, struct nvme_dev *dev,
432{ 429{
433 unsigned size = !(rq->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(rq) : 430 unsigned size = !(rq->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(rq) :
434 sizeof(struct nvme_dsm_range); 431 sizeof(struct nvme_dsm_range);
435 unsigned long mask = 0;
436 struct nvme_iod *iod; 432 struct nvme_iod *iod;
437 433
438 if (rq->nr_phys_segments <= NVME_INT_PAGES && 434 if (rq->nr_phys_segments <= NVME_INT_PAGES &&
@@ -440,9 +436,8 @@ static struct nvme_iod *nvme_alloc_iod(struct request *rq, struct nvme_dev *dev,
440 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(rq); 436 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(rq);
441 437
442 iod = cmd->iod; 438 iod = cmd->iod;
443 mask = 0x01;
444 iod_init(iod, size, rq->nr_phys_segments, 439 iod_init(iod, size, rq->nr_phys_segments,
445 (unsigned long) rq | 0x01); 440 (unsigned long) rq | NVME_INT_MASK);
446 return iod; 441 return iod;
447 } 442 }
448 443
@@ -522,8 +517,6 @@ static void nvme_dif_remap(struct request *req,
522 return; 517 return;
523 518
524 pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset; 519 pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
525 if (!pmap)
526 return;
527 520
528 p = pmap; 521 p = pmap;
529 virt = bip_get_seed(bip); 522 virt = bip_get_seed(bip);
@@ -645,12 +638,12 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
645 struct scatterlist *sg = iod->sg; 638 struct scatterlist *sg = iod->sg;
646 int dma_len = sg_dma_len(sg); 639 int dma_len = sg_dma_len(sg);
647 u64 dma_addr = sg_dma_address(sg); 640 u64 dma_addr = sg_dma_address(sg);
648 int offset = offset_in_page(dma_addr); 641 u32 page_size = dev->page_size;
642 int offset = dma_addr & (page_size - 1);
649 __le64 *prp_list; 643 __le64 *prp_list;
650 __le64 **list = iod_list(iod); 644 __le64 **list = iod_list(iod);
651 dma_addr_t prp_dma; 645 dma_addr_t prp_dma;
652 int nprps, i; 646 int nprps, i;
653 u32 page_size = dev->page_size;
654 647
655 length -= (page_size - offset); 648 length -= (page_size - offset);
656 if (length <= 0) 649 if (length <= 0)
@@ -1028,18 +1021,19 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
1028 struct nvme_cmd_info *cmd_info; 1021 struct nvme_cmd_info *cmd_info;
1029 struct request *req; 1022 struct request *req;
1030 1023
1031 req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_ATOMIC, false); 1024 req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_ATOMIC, true);
1032 if (IS_ERR(req)) 1025 if (IS_ERR(req))
1033 return PTR_ERR(req); 1026 return PTR_ERR(req);
1034 1027
1035 req->cmd_flags |= REQ_NO_TIMEOUT; 1028 req->cmd_flags |= REQ_NO_TIMEOUT;
1036 cmd_info = blk_mq_rq_to_pdu(req); 1029 cmd_info = blk_mq_rq_to_pdu(req);
1037 nvme_set_info(cmd_info, req, async_req_completion); 1030 nvme_set_info(cmd_info, NULL, async_req_completion);
1038 1031
1039 memset(&c, 0, sizeof(c)); 1032 memset(&c, 0, sizeof(c));
1040 c.common.opcode = nvme_admin_async_event; 1033 c.common.opcode = nvme_admin_async_event;
1041 c.common.command_id = req->tag; 1034 c.common.command_id = req->tag;
1042 1035
1036 blk_mq_free_hctx_request(nvmeq->hctx, req);
1043 return __nvme_submit_cmd(nvmeq, &c); 1037 return __nvme_submit_cmd(nvmeq, &c);
1044} 1038}
1045 1039
@@ -1347,6 +1341,9 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1347 nvmeq->cq_vector = -1; 1341 nvmeq->cq_vector = -1;
1348 spin_unlock_irq(&nvmeq->q_lock); 1342 spin_unlock_irq(&nvmeq->q_lock);
1349 1343
1344 if (!nvmeq->qid && nvmeq->dev->admin_q)
1345 blk_mq_freeze_queue_start(nvmeq->dev->admin_q);
1346
1350 irq_set_affinity_hint(vector, NULL); 1347 irq_set_affinity_hint(vector, NULL);
1351 free_irq(vector, nvmeq); 1348 free_irq(vector, nvmeq);
1352 1349
@@ -1378,8 +1375,6 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
1378 adapter_delete_sq(dev, qid); 1375 adapter_delete_sq(dev, qid);
1379 adapter_delete_cq(dev, qid); 1376 adapter_delete_cq(dev, qid);
1380 } 1377 }
1381 if (!qid && dev->admin_q)
1382 blk_mq_freeze_queue_start(dev->admin_q);
1383 1378
1384 spin_lock_irq(&nvmeq->q_lock); 1379 spin_lock_irq(&nvmeq->q_lock);
1385 nvme_process_cq(nvmeq); 1380 nvme_process_cq(nvmeq);
@@ -1583,6 +1578,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
1583 dev->admin_tagset.ops = &nvme_mq_admin_ops; 1578 dev->admin_tagset.ops = &nvme_mq_admin_ops;
1584 dev->admin_tagset.nr_hw_queues = 1; 1579 dev->admin_tagset.nr_hw_queues = 1;
1585 dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1; 1580 dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1;
1581 dev->admin_tagset.reserved_tags = 1;
1586 dev->admin_tagset.timeout = ADMIN_TIMEOUT; 1582 dev->admin_tagset.timeout = ADMIN_TIMEOUT;
1587 dev->admin_tagset.numa_node = dev_to_node(&dev->pci_dev->dev); 1583 dev->admin_tagset.numa_node = dev_to_node(&dev->pci_dev->dev);
1588 dev->admin_tagset.cmd_size = nvme_cmd_size(dev); 1584 dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
@@ -1749,25 +1745,31 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1749 struct nvme_dev *dev = ns->dev; 1745 struct nvme_dev *dev = ns->dev;
1750 struct nvme_user_io io; 1746 struct nvme_user_io io;
1751 struct nvme_command c; 1747 struct nvme_command c;
1752 unsigned length, meta_len; 1748 unsigned length, meta_len, prp_len;
1753 int status, i; 1749 int status, write;
1754 struct nvme_iod *iod, *meta_iod = NULL; 1750 struct nvme_iod *iod;
1755 dma_addr_t meta_dma_addr; 1751 dma_addr_t meta_dma = 0;
1756 void *meta, *uninitialized_var(meta_mem); 1752 void *meta = NULL;
1757 1753
1758 if (copy_from_user(&io, uio, sizeof(io))) 1754 if (copy_from_user(&io, uio, sizeof(io)))
1759 return -EFAULT; 1755 return -EFAULT;
1760 length = (io.nblocks + 1) << ns->lba_shift; 1756 length = (io.nblocks + 1) << ns->lba_shift;
1761 meta_len = (io.nblocks + 1) * ns->ms; 1757 meta_len = (io.nblocks + 1) * ns->ms;
1762 1758
1763 if (meta_len && ((io.metadata & 3) || !io.metadata)) 1759 if (meta_len && ((io.metadata & 3) || !io.metadata) && !ns->ext)
1764 return -EINVAL; 1760 return -EINVAL;
1761 else if (meta_len && ns->ext) {
1762 length += meta_len;
1763 meta_len = 0;
1764 }
1765
1766 write = io.opcode & 1;
1765 1767
1766 switch (io.opcode) { 1768 switch (io.opcode) {
1767 case nvme_cmd_write: 1769 case nvme_cmd_write:
1768 case nvme_cmd_read: 1770 case nvme_cmd_read:
1769 case nvme_cmd_compare: 1771 case nvme_cmd_compare:
1770 iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length); 1772 iod = nvme_map_user_pages(dev, write, io.addr, length);
1771 break; 1773 break;
1772 default: 1774 default:
1773 return -EINVAL; 1775 return -EINVAL;
@@ -1776,6 +1778,27 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1776 if (IS_ERR(iod)) 1778 if (IS_ERR(iod))
1777 return PTR_ERR(iod); 1779 return PTR_ERR(iod);
1778 1780
1781 prp_len = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
1782 if (length != prp_len) {
1783 status = -ENOMEM;
1784 goto unmap;
1785 }
1786 if (meta_len) {
1787 meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
1788 &meta_dma, GFP_KERNEL);
1789 if (!meta) {
1790 status = -ENOMEM;
1791 goto unmap;
1792 }
1793 if (write) {
1794 if (copy_from_user(meta, (void __user *)io.metadata,
1795 meta_len)) {
1796 status = -EFAULT;
1797 goto unmap;
1798 }
1799 }
1800 }
1801
1779 memset(&c, 0, sizeof(c)); 1802 memset(&c, 0, sizeof(c));
1780 c.rw.opcode = io.opcode; 1803 c.rw.opcode = io.opcode;
1781 c.rw.flags = io.flags; 1804 c.rw.flags = io.flags;
@@ -1787,75 +1810,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1787 c.rw.reftag = cpu_to_le32(io.reftag); 1810 c.rw.reftag = cpu_to_le32(io.reftag);
1788 c.rw.apptag = cpu_to_le16(io.apptag); 1811 c.rw.apptag = cpu_to_le16(io.apptag);
1789 c.rw.appmask = cpu_to_le16(io.appmask); 1812 c.rw.appmask = cpu_to_le16(io.appmask);
1790
1791 if (meta_len) {
1792 meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata,
1793 meta_len);
1794 if (IS_ERR(meta_iod)) {
1795 status = PTR_ERR(meta_iod);
1796 meta_iod = NULL;
1797 goto unmap;
1798 }
1799
1800 meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
1801 &meta_dma_addr, GFP_KERNEL);
1802 if (!meta_mem) {
1803 status = -ENOMEM;
1804 goto unmap;
1805 }
1806
1807 if (io.opcode & 1) {
1808 int meta_offset = 0;
1809
1810 for (i = 0; i < meta_iod->nents; i++) {
1811 meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
1812 meta_iod->sg[i].offset;
1813 memcpy(meta_mem + meta_offset, meta,
1814 meta_iod->sg[i].length);
1815 kunmap_atomic(meta);
1816 meta_offset += meta_iod->sg[i].length;
1817 }
1818 }
1819
1820 c.rw.metadata = cpu_to_le64(meta_dma_addr);
1821 }
1822
1823 length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
1824 c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 1813 c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
1825 c.rw.prp2 = cpu_to_le64(iod->first_dma); 1814 c.rw.prp2 = cpu_to_le64(iod->first_dma);
1826 1815 c.rw.metadata = cpu_to_le64(meta_dma);
1827 if (length != (io.nblocks + 1) << ns->lba_shift) 1816 status = nvme_submit_io_cmd(dev, ns, &c, NULL);
1828 status = -ENOMEM;
1829 else
1830 status = nvme_submit_io_cmd(dev, ns, &c, NULL);
1831
1832 if (meta_len) {
1833 if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
1834 int meta_offset = 0;
1835
1836 for (i = 0; i < meta_iod->nents; i++) {
1837 meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
1838 meta_iod->sg[i].offset;
1839 memcpy(meta, meta_mem + meta_offset,
1840 meta_iod->sg[i].length);
1841 kunmap_atomic(meta);
1842 meta_offset += meta_iod->sg[i].length;
1843 }
1844 }
1845
1846 dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem,
1847 meta_dma_addr);
1848 }
1849
1850 unmap: 1817 unmap:
1851 nvme_unmap_user_pages(dev, io.opcode & 1, iod); 1818 nvme_unmap_user_pages(dev, write, iod);
1852 nvme_free_iod(dev, iod); 1819 nvme_free_iod(dev, iod);
1853 1820 if (meta) {
1854 if (meta_iod) { 1821 if (status == NVME_SC_SUCCESS && !write) {
1855 nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod); 1822 if (copy_to_user((void __user *)io.metadata, meta,
1856 nvme_free_iod(dev, meta_iod); 1823 meta_len))
1824 status = -EFAULT;
1825 }
1826 dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma);
1857 } 1827 }
1858
1859 return status; 1828 return status;
1860} 1829}
1861 1830
@@ -2018,7 +1987,8 @@ static int nvme_revalidate_disk(struct gendisk *disk)
2018 struct nvme_dev *dev = ns->dev; 1987 struct nvme_dev *dev = ns->dev;
2019 struct nvme_id_ns *id; 1988 struct nvme_id_ns *id;
2020 dma_addr_t dma_addr; 1989 dma_addr_t dma_addr;
2021 int lbaf, pi_type, old_ms; 1990 u8 lbaf, pi_type;
1991 u16 old_ms;
2022 unsigned short bs; 1992 unsigned short bs;
2023 1993
2024 id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr, 1994 id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
@@ -2039,6 +2009,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
2039 lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK; 2009 lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
2040 ns->lba_shift = id->lbaf[lbaf].ds; 2010 ns->lba_shift = id->lbaf[lbaf].ds;
2041 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); 2011 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
2012 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
2042 2013
2043 /* 2014 /*
2044 * If identify namespace failed, use default 512 byte block size so 2015 * If identify namespace failed, use default 512 byte block size so
@@ -2055,14 +2026,14 @@ static int nvme_revalidate_disk(struct gendisk *disk)
2055 if (blk_get_integrity(disk) && (ns->pi_type != pi_type || 2026 if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
2056 ns->ms != old_ms || 2027 ns->ms != old_ms ||
2057 bs != queue_logical_block_size(disk->queue) || 2028 bs != queue_logical_block_size(disk->queue) ||
2058 (ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT))) 2029 (ns->ms && ns->ext)))
2059 blk_integrity_unregister(disk); 2030 blk_integrity_unregister(disk);
2060 2031
2061 ns->pi_type = pi_type; 2032 ns->pi_type = pi_type;
2062 blk_queue_logical_block_size(ns->queue, bs); 2033 blk_queue_logical_block_size(ns->queue, bs);
2063 2034
2064 if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) && 2035 if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) &&
2065 !(id->flbas & NVME_NS_FLBAS_META_EXT)) 2036 !ns->ext)
2066 nvme_init_integrity(ns); 2037 nvme_init_integrity(ns);
2067 2038
2068 if (id->ncap == 0 || (ns->ms && !blk_get_integrity(disk))) 2039 if (id->ncap == 0 || (ns->ms && !blk_get_integrity(disk)))
@@ -2334,7 +2305,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
2334 dev->oncs = le16_to_cpup(&ctrl->oncs); 2305 dev->oncs = le16_to_cpup(&ctrl->oncs);
2335 dev->abort_limit = ctrl->acl + 1; 2306 dev->abort_limit = ctrl->acl + 1;
2336 dev->vwc = ctrl->vwc; 2307 dev->vwc = ctrl->vwc;
2337 dev->event_limit = min(ctrl->aerl + 1, 8);
2338 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); 2308 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
2339 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); 2309 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
2340 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); 2310 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
@@ -2881,6 +2851,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
2881 2851
2882 nvme_set_irq_hints(dev); 2852 nvme_set_irq_hints(dev);
2883 2853
2854 dev->event_limit = 1;
2884 return result; 2855 return result;
2885 2856
2886 free_tags: 2857 free_tags:
@@ -3166,8 +3137,10 @@ static int __init nvme_init(void)
3166 nvme_char_major = result; 3137 nvme_char_major = result;
3167 3138
3168 nvme_class = class_create(THIS_MODULE, "nvme"); 3139 nvme_class = class_create(THIS_MODULE, "nvme");
3169 if (!nvme_class) 3140 if (IS_ERR(nvme_class)) {
3141 result = PTR_ERR(nvme_class);
3170 goto unregister_chrdev; 3142 goto unregister_chrdev;
3143 }
3171 3144
3172 result = pci_register_driver(&nvme_driver); 3145 result = pci_register_driver(&nvme_driver);
3173 if (result) 3146 if (result)
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index e10196e0182d..6b736b00f63e 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -55,6 +55,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */
55#define VPD_SERIAL_NUMBER 0x80 55#define VPD_SERIAL_NUMBER 0x80
56#define VPD_DEVICE_IDENTIFIERS 0x83 56#define VPD_DEVICE_IDENTIFIERS 0x83
57#define VPD_EXTENDED_INQUIRY 0x86 57#define VPD_EXTENDED_INQUIRY 0x86
58#define VPD_BLOCK_LIMITS 0xB0
58#define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1 59#define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1
59 60
60/* CDB offsets */ 61/* CDB offsets */
@@ -132,9 +133,10 @@ static int sg_version_num = 30534; /* 2 digits for each component */
132#define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80 133#define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80
133#define INQ_DEVICE_IDENTIFICATION_PAGE 0x83 134#define INQ_DEVICE_IDENTIFICATION_PAGE 0x83
134#define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86 135#define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86
136#define INQ_BDEV_LIMITS_PAGE 0xB0
135#define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1 137#define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1
136#define INQ_SERIAL_NUMBER_LENGTH 0x14 138#define INQ_SERIAL_NUMBER_LENGTH 0x14
137#define INQ_NUM_SUPPORTED_VPD_PAGES 5 139#define INQ_NUM_SUPPORTED_VPD_PAGES 6
138#define VERSION_SPC_4 0x06 140#define VERSION_SPC_4 0x06
139#define ACA_UNSUPPORTED 0 141#define ACA_UNSUPPORTED 0
140#define STANDARD_INQUIRY_LENGTH 36 142#define STANDARD_INQUIRY_LENGTH 36
@@ -747,6 +749,7 @@ static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns,
747 inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE; 749 inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE;
748 inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE; 750 inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE;
749 inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE; 751 inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE;
752 inq_response[9] = INQ_BDEV_LIMITS_PAGE;
750 753
751 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); 754 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
752 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 755 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
@@ -938,6 +941,25 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
938 return res; 941 return res;
939} 942}
940 943
944static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
945 u8 *inq_response, int alloc_len)
946{
947 __be32 max_sectors = cpu_to_be32(queue_max_hw_sectors(ns->queue));
948 __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors);
949 __be32 discard_desc_count = cpu_to_be32(0x100);
950
951 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
952 inq_response[1] = VPD_BLOCK_LIMITS;
953 inq_response[3] = 0x3c; /* Page Length */
954 memcpy(&inq_response[8], &max_sectors, sizeof(u32));
955 memcpy(&inq_response[20], &max_discard, sizeof(u32));
956
957 if (max_discard)
958 memcpy(&inq_response[24], &discard_desc_count, sizeof(u32));
959
960 return nvme_trans_copy_to_user(hdr, inq_response, 0x3c);
961}
962
941static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, 963static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
942 int alloc_len) 964 int alloc_len)
943{ 965{
@@ -2268,6 +2290,10 @@ static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2268 case VPD_EXTENDED_INQUIRY: 2290 case VPD_EXTENDED_INQUIRY:
2269 res = nvme_trans_ext_inq_page(ns, hdr, alloc_len); 2291 res = nvme_trans_ext_inq_page(ns, hdr, alloc_len);
2270 break; 2292 break;
2293 case VPD_BLOCK_LIMITS:
2294 res = nvme_trans_bdev_limits_page(ns, hdr, inq_response,
2295 alloc_len);
2296 break;
2271 case VPD_BLOCK_DEV_CHARACTERISTICS: 2297 case VPD_BLOCK_DEV_CHARACTERISTICS:
2272 res = nvme_trans_bdev_char_page(ns, hdr, alloc_len); 2298 res = nvme_trans_bdev_char_page(ns, hdr, alloc_len);
2273 break; 2299 break;
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index 2ce3dfd7e6b9..876d0c3eaf58 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -137,7 +137,7 @@
137 137
138*/ 138*/
139 139
140static bool verbose = 0; 140static int verbose;
141static int major = PG_MAJOR; 141static int major = PG_MAJOR;
142static char *name = PG_NAME; 142static char *name = PG_NAME;
143static int disable = 0; 143static int disable = 0;
@@ -168,7 +168,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
168 168
169#include <asm/uaccess.h> 169#include <asm/uaccess.h>
170 170
171module_param(verbose, bool, 0644); 171module_param(verbose, int, 0644);
172module_param(major, int, 0); 172module_param(major, int, 0);
173module_param(name, charp, 0); 173module_param(name, charp, 0);
174module_param_array(drive0, int, NULL, 0); 174module_param_array(drive0, int, NULL, 0);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 523ee8fd4c15..c264f2d284a7 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -440,9 +440,9 @@ static inline void seek_track(struct floppy_state *fs, int n)
440static inline void init_dma(struct dbdma_cmd *cp, int cmd, 440static inline void init_dma(struct dbdma_cmd *cp, int cmd,
441 void *buf, int count) 441 void *buf, int count)
442{ 442{
443 st_le16(&cp->req_count, count); 443 cp->req_count = cpu_to_le16(count);
444 st_le16(&cp->command, cmd); 444 cp->command = cpu_to_le16(cmd);
445 st_le32(&cp->phy_addr, virt_to_bus(buf)); 445 cp->phy_addr = cpu_to_le32(virt_to_bus(buf));
446 cp->xfer_status = 0; 446 cp->xfer_status = 0;
447} 447}
448 448
@@ -771,8 +771,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
771 } 771 }
772 /* turn off DMA */ 772 /* turn off DMA */
773 out_le32(&dr->control, (RUN | PAUSE) << 16); 773 out_le32(&dr->control, (RUN | PAUSE) << 16);
774 stat = ld_le16(&cp->xfer_status); 774 stat = le16_to_cpu(cp->xfer_status);
775 resid = ld_le16(&cp->res_count); 775 resid = le16_to_cpu(cp->res_count);
776 if (intr & ERROR_INTR) { 776 if (intr & ERROR_INTR) {
777 n = fs->scount - 1 - resid / 512; 777 n = fs->scount - 1 - resid / 512;
778 if (n > 0) { 778 if (n > 0) {
@@ -1170,7 +1170,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
1170 1170
1171 fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space); 1171 fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
1172 memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd)); 1172 memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
1173 st_le16(&fs->dma_cmd[1].command, DBDMA_STOP); 1173 fs->dma_cmd[1].command = cpu_to_le16(DBDMA_STOP);
1174 1174
1175 if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD) 1175 if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
1176 swim3_mb_event(mdev, MB_FD); 1176 swim3_mb_event(mdev, MB_FD);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 655e570b9b31..5ea2f0bbbc7c 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -342,7 +342,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
342 struct request_queue *q = vblk->disk->queue; 342 struct request_queue *q = vblk->disk->queue;
343 char cap_str_2[10], cap_str_10[10]; 343 char cap_str_2[10], cap_str_10[10];
344 char *envp[] = { "RESIZE=1", NULL }; 344 char *envp[] = { "RESIZE=1", NULL };
345 u64 capacity, size; 345 u64 capacity;
346 346
347 /* Host must always specify the capacity. */ 347 /* Host must always specify the capacity. */
348 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity); 348 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
@@ -354,9 +354,10 @@ static void virtblk_config_changed_work(struct work_struct *work)
354 capacity = (sector_t)-1; 354 capacity = (sector_t)-1;
355 } 355 }
356 356
357 size = capacity * queue_logical_block_size(q); 357 string_get_size(capacity, queue_logical_block_size(q),
358 string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); 358 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
359 string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); 359 string_get_size(capacity, queue_logical_block_size(q),
360 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
360 361
361 dev_notice(&vdev->dev, 362 dev_notice(&vdev->dev,
362 "new size: %llu %d-byte logical blocks (%s/%s)\n", 363 "new size: %llu %d-byte logical blocks (%s/%s)\n",
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 2a04d341e598..bd2b3bbbb22c 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -34,6 +34,8 @@
34 * IN THE SOFTWARE. 34 * IN THE SOFTWARE.
35 */ 35 */
36 36
37#define pr_fmt(fmt) "xen-blkback: " fmt
38
37#include <linux/spinlock.h> 39#include <linux/spinlock.h>
38#include <linux/kthread.h> 40#include <linux/kthread.h>
39#include <linux/list.h> 41#include <linux/list.h>
@@ -211,7 +213,7 @@ static int add_persistent_gnt(struct xen_blkif *blkif,
211 else if (persistent_gnt->gnt > this->gnt) 213 else if (persistent_gnt->gnt > this->gnt)
212 new = &((*new)->rb_right); 214 new = &((*new)->rb_right);
213 else { 215 else {
214 pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n"); 216 pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
215 return -EINVAL; 217 return -EINVAL;
216 } 218 }
217 } 219 }
@@ -242,7 +244,7 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
242 node = node->rb_right; 244 node = node->rb_right;
243 else { 245 else {
244 if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { 246 if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
245 pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n"); 247 pr_alert_ratelimited("requesting a grant already in use\n");
246 return NULL; 248 return NULL;
247 } 249 }
248 set_bit(PERSISTENT_GNT_ACTIVE, data->flags); 250 set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
@@ -257,7 +259,7 @@ static void put_persistent_gnt(struct xen_blkif *blkif,
257 struct persistent_gnt *persistent_gnt) 259 struct persistent_gnt *persistent_gnt)
258{ 260{
259 if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) 261 if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
260 pr_alert_ratelimited(DRV_PFX " freeing a grant already unused"); 262 pr_alert_ratelimited("freeing a grant already unused\n");
261 set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); 263 set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
262 clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); 264 clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
263 atomic_dec(&blkif->persistent_gnt_in_use); 265 atomic_dec(&blkif->persistent_gnt_in_use);
@@ -374,7 +376,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
374 } 376 }
375 377
376 if (work_pending(&blkif->persistent_purge_work)) { 378 if (work_pending(&blkif->persistent_purge_work)) {
377 pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n"); 379 pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n");
378 return; 380 return;
379 } 381 }
380 382
@@ -396,7 +398,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
396 398
397 total = num_clean; 399 total = num_clean;
398 400
399 pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); 401 pr_debug("Going to purge %u persistent grants\n", num_clean);
400 402
401 BUG_ON(!list_empty(&blkif->persistent_purge_list)); 403 BUG_ON(!list_empty(&blkif->persistent_purge_list));
402 root = &blkif->persistent_gnts; 404 root = &blkif->persistent_gnts;
@@ -428,13 +430,13 @@ purge_list:
428 * with the requested num 430 * with the requested num
429 */ 431 */
430 if (!scan_used && !clean_used) { 432 if (!scan_used && !clean_used) {
431 pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean); 433 pr_debug("Still missing %u purged frames\n", num_clean);
432 scan_used = true; 434 scan_used = true;
433 goto purge_list; 435 goto purge_list;
434 } 436 }
435finished: 437finished:
436 if (!clean_used) { 438 if (!clean_used) {
437 pr_debug(DRV_PFX "Finished scanning for grants to clean, removing used flag\n"); 439 pr_debug("Finished scanning for grants to clean, removing used flag\n");
438 clean_used = true; 440 clean_used = true;
439 goto purge_list; 441 goto purge_list;
440 } 442 }
@@ -444,7 +446,7 @@ finished:
444 446
445 /* We can defer this work */ 447 /* We can defer this work */
446 schedule_work(&blkif->persistent_purge_work); 448 schedule_work(&blkif->persistent_purge_work);
447 pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total); 449 pr_debug("Purged %u/%u\n", (total - num_clean), total);
448 return; 450 return;
449} 451}
450 452
@@ -520,20 +522,20 @@ static void xen_vbd_resize(struct xen_blkif *blkif)
520 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be); 522 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
521 unsigned long long new_size = vbd_sz(vbd); 523 unsigned long long new_size = vbd_sz(vbd);
522 524
523 pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n", 525 pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
524 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice)); 526 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
525 pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size); 527 pr_info("VBD Resize: new size %llu\n", new_size);
526 vbd->size = new_size; 528 vbd->size = new_size;
527again: 529again:
528 err = xenbus_transaction_start(&xbt); 530 err = xenbus_transaction_start(&xbt);
529 if (err) { 531 if (err) {
530 pr_warn(DRV_PFX "Error starting transaction"); 532 pr_warn("Error starting transaction\n");
531 return; 533 return;
532 } 534 }
533 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", 535 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
534 (unsigned long long)vbd_sz(vbd)); 536 (unsigned long long)vbd_sz(vbd));
535 if (err) { 537 if (err) {
536 pr_warn(DRV_PFX "Error writing new size"); 538 pr_warn("Error writing new size\n");
537 goto abort; 539 goto abort;
538 } 540 }
539 /* 541 /*
@@ -543,7 +545,7 @@ again:
543 */ 545 */
544 err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state); 546 err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
545 if (err) { 547 if (err) {
546 pr_warn(DRV_PFX "Error writing the state"); 548 pr_warn("Error writing the state\n");
547 goto abort; 549 goto abort;
548 } 550 }
549 551
@@ -551,7 +553,7 @@ again:
551 if (err == -EAGAIN) 553 if (err == -EAGAIN)
552 goto again; 554 goto again;
553 if (err) 555 if (err)
554 pr_warn(DRV_PFX "Error ending transaction"); 556 pr_warn("Error ending transaction\n");
555 return; 557 return;
556abort: 558abort:
557 xenbus_transaction_end(xbt, 1); 559 xenbus_transaction_end(xbt, 1);
@@ -578,7 +580,7 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
578 580
579static void print_stats(struct xen_blkif *blkif) 581static void print_stats(struct xen_blkif *blkif)
580{ 582{
581 pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" 583 pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
582 " | ds %4llu | pg: %4u/%4d\n", 584 " | ds %4llu | pg: %4u/%4d\n",
583 current->comm, blkif->st_oo_req, 585 current->comm, blkif->st_oo_req,
584 blkif->st_rd_req, blkif->st_wr_req, 586 blkif->st_rd_req, blkif->st_wr_req,
@@ -855,7 +857,7 @@ again:
855 /* This is a newly mapped grant */ 857 /* This is a newly mapped grant */
856 BUG_ON(new_map_idx >= segs_to_map); 858 BUG_ON(new_map_idx >= segs_to_map);
857 if (unlikely(map[new_map_idx].status != 0)) { 859 if (unlikely(map[new_map_idx].status != 0)) {
858 pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); 860 pr_debug("invalid buffer -- could not remap it\n");
859 put_free_pages(blkif, &pages[seg_idx]->page, 1); 861 put_free_pages(blkif, &pages[seg_idx]->page, 1);
860 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; 862 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
861 ret |= 1; 863 ret |= 1;
@@ -891,14 +893,14 @@ again:
891 goto next; 893 goto next;
892 } 894 }
893 pages[seg_idx]->persistent_gnt = persistent_gnt; 895 pages[seg_idx]->persistent_gnt = persistent_gnt;
894 pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", 896 pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
895 persistent_gnt->gnt, blkif->persistent_gnt_c, 897 persistent_gnt->gnt, blkif->persistent_gnt_c,
896 xen_blkif_max_pgrants); 898 xen_blkif_max_pgrants);
897 goto next; 899 goto next;
898 } 900 }
899 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { 901 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
900 blkif->vbd.overflow_max_grants = 1; 902 blkif->vbd.overflow_max_grants = 1;
901 pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n", 903 pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
902 blkif->domid, blkif->vbd.handle); 904 blkif->domid, blkif->vbd.handle);
903 } 905 }
904 /* 906 /*
@@ -916,7 +918,7 @@ next:
916 return ret; 918 return ret;
917 919
918out_of_memory: 920out_of_memory:
919 pr_alert(DRV_PFX "%s: out of memory\n", __func__); 921 pr_alert("%s: out of memory\n", __func__);
920 put_free_pages(blkif, pages_to_gnt, segs_to_map); 922 put_free_pages(blkif, pages_to_gnt, segs_to_map);
921 return -ENOMEM; 923 return -ENOMEM;
922} 924}
@@ -996,7 +998,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
996 998
997 err = xen_vbd_translate(&preq, blkif, WRITE); 999 err = xen_vbd_translate(&preq, blkif, WRITE);
998 if (err) { 1000 if (err) {
999 pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n", 1001 pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
1000 preq.sector_number, 1002 preq.sector_number,
1001 preq.sector_number + preq.nr_sects, blkif->vbd.pdevice); 1003 preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
1002 goto fail_response; 1004 goto fail_response;
@@ -1012,7 +1014,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
1012 GFP_KERNEL, secure); 1014 GFP_KERNEL, secure);
1013fail_response: 1015fail_response:
1014 if (err == -EOPNOTSUPP) { 1016 if (err == -EOPNOTSUPP) {
1015 pr_debug(DRV_PFX "discard op failed, not supported\n"); 1017 pr_debug("discard op failed, not supported\n");
1016 status = BLKIF_RSP_EOPNOTSUPP; 1018 status = BLKIF_RSP_EOPNOTSUPP;
1017 } else if (err) 1019 } else if (err)
1018 status = BLKIF_RSP_ERROR; 1020 status = BLKIF_RSP_ERROR;
@@ -1056,16 +1058,16 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
1056 /* An error fails the entire request. */ 1058 /* An error fails the entire request. */
1057 if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) && 1059 if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
1058 (error == -EOPNOTSUPP)) { 1060 (error == -EOPNOTSUPP)) {
1059 pr_debug(DRV_PFX "flush diskcache op failed, not supported\n"); 1061 pr_debug("flush diskcache op failed, not supported\n");
1060 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0); 1062 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
1061 pending_req->status = BLKIF_RSP_EOPNOTSUPP; 1063 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1062 } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && 1064 } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
1063 (error == -EOPNOTSUPP)) { 1065 (error == -EOPNOTSUPP)) {
1064 pr_debug(DRV_PFX "write barrier op failed, not supported\n"); 1066 pr_debug("write barrier op failed, not supported\n");
1065 xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0); 1067 xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
1066 pending_req->status = BLKIF_RSP_EOPNOTSUPP; 1068 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1067 } else if (error) { 1069 } else if (error) {
1068 pr_debug(DRV_PFX "Buffer not up-to-date at end of operation," 1070 pr_debug("Buffer not up-to-date at end of operation,"
1069 " error=%d\n", error); 1071 " error=%d\n", error);
1070 pending_req->status = BLKIF_RSP_ERROR; 1072 pending_req->status = BLKIF_RSP_ERROR;
1071 } 1073 }
@@ -1110,7 +1112,7 @@ __do_block_io_op(struct xen_blkif *blkif)
1110 1112
1111 if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) { 1113 if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1112 rc = blk_rings->common.rsp_prod_pvt; 1114 rc = blk_rings->common.rsp_prod_pvt;
1113 pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n", 1115 pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1114 rp, rc, rp - rc, blkif->vbd.pdevice); 1116 rp, rc, rp - rc, blkif->vbd.pdevice);
1115 return -EACCES; 1117 return -EACCES;
1116 } 1118 }
@@ -1217,8 +1219,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1217 if ((req->operation == BLKIF_OP_INDIRECT) && 1219 if ((req->operation == BLKIF_OP_INDIRECT) &&
1218 (req_operation != BLKIF_OP_READ) && 1220 (req_operation != BLKIF_OP_READ) &&
1219 (req_operation != BLKIF_OP_WRITE)) { 1221 (req_operation != BLKIF_OP_WRITE)) {
1220 pr_debug(DRV_PFX "Invalid indirect operation (%u)\n", 1222 pr_debug("Invalid indirect operation (%u)\n", req_operation);
1221 req_operation);
1222 goto fail_response; 1223 goto fail_response;
1223 } 1224 }
1224 1225
@@ -1252,8 +1253,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1252 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || 1253 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1253 unlikely((req->operation == BLKIF_OP_INDIRECT) && 1254 unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1254 (nseg > MAX_INDIRECT_SEGMENTS))) { 1255 (nseg > MAX_INDIRECT_SEGMENTS))) {
1255 pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", 1256 pr_debug("Bad number of segments in request (%d)\n", nseg);
1256 nseg);
1257 /* Haven't submitted any bio's yet. */ 1257 /* Haven't submitted any bio's yet. */
1258 goto fail_response; 1258 goto fail_response;
1259 } 1259 }
@@ -1288,7 +1288,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1288 } 1288 }
1289 1289
1290 if (xen_vbd_translate(&preq, blkif, operation) != 0) { 1290 if (xen_vbd_translate(&preq, blkif, operation) != 0) {
1291 pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", 1291 pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
1292 operation == READ ? "read" : "write", 1292 operation == READ ? "read" : "write",
1293 preq.sector_number, 1293 preq.sector_number,
1294 preq.sector_number + preq.nr_sects, 1294 preq.sector_number + preq.nr_sects,
@@ -1303,7 +1303,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1303 for (i = 0; i < nseg; i++) { 1303 for (i = 0; i < nseg; i++) {
1304 if (((int)preq.sector_number|(int)seg[i].nsec) & 1304 if (((int)preq.sector_number|(int)seg[i].nsec) &
1305 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { 1305 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1306 pr_debug(DRV_PFX "Misaligned I/O request from domain %d", 1306 pr_debug("Misaligned I/O request from domain %d\n",
1307 blkif->domid); 1307 blkif->domid);
1308 goto fail_response; 1308 goto fail_response;
1309 } 1309 }
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 375d28851860..f620b5d3f77c 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -44,12 +44,6 @@
44#include <xen/interface/io/blkif.h> 44#include <xen/interface/io/blkif.h>
45#include <xen/interface/io/protocols.h> 45#include <xen/interface/io/protocols.h>
46 46
47#define DRV_PFX "xen-blkback:"
48#define DPRINTK(fmt, args...) \
49 pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \
50 __func__, __LINE__, ##args)
51
52
53/* 47/*
54 * This is the maximum number of segments that would be allowed in indirect 48 * This is the maximum number of segments that would be allowed in indirect
55 * requests. This value will also be passed to the frontend. 49 * requests. This value will also be passed to the frontend.
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index e3afe97280b1..6ab69ad61ee1 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -14,6 +14,8 @@
14 14
15*/ 15*/
16 16
17#define pr_fmt(fmt) "xen-blkback: " fmt
18
17#include <stdarg.h> 19#include <stdarg.h>
18#include <linux/module.h> 20#include <linux/module.h>
19#include <linux/kthread.h> 21#include <linux/kthread.h>
@@ -21,6 +23,9 @@
21#include <xen/grant_table.h> 23#include <xen/grant_table.h>
22#include "common.h" 24#include "common.h"
23 25
26/* Enlarge the array size in order to fully show blkback name. */
27#define BLKBACK_NAME_LEN (20)
28
24struct backend_info { 29struct backend_info {
25 struct xenbus_device *dev; 30 struct xenbus_device *dev;
26 struct xen_blkif *blkif; 31 struct xen_blkif *blkif;
@@ -70,7 +75,7 @@ static int blkback_name(struct xen_blkif *blkif, char *buf)
70 else 75 else
71 devname = devpath; 76 devname = devpath;
72 77
73 snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname); 78 snprintf(buf, BLKBACK_NAME_LEN, "blkback.%d.%s", blkif->domid, devname);
74 kfree(devpath); 79 kfree(devpath);
75 80
76 return 0; 81 return 0;
@@ -79,7 +84,7 @@ static int blkback_name(struct xen_blkif *blkif, char *buf)
79static void xen_update_blkif_status(struct xen_blkif *blkif) 84static void xen_update_blkif_status(struct xen_blkif *blkif)
80{ 85{
81 int err; 86 int err;
82 char name[TASK_COMM_LEN]; 87 char name[BLKBACK_NAME_LEN];
83 88
84 /* Not ready to connect? */ 89 /* Not ready to connect? */
85 if (!blkif->irq || !blkif->vbd.bdev) 90 if (!blkif->irq || !blkif->vbd.bdev)
@@ -193,7 +198,7 @@ fail:
193 return ERR_PTR(-ENOMEM); 198 return ERR_PTR(-ENOMEM);
194} 199}
195 200
196static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page, 201static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t gref,
197 unsigned int evtchn) 202 unsigned int evtchn)
198{ 203{
199 int err; 204 int err;
@@ -202,7 +207,8 @@ static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
202 if (blkif->irq) 207 if (blkif->irq)
203 return 0; 208 return 0;
204 209
205 err = xenbus_map_ring_valloc(blkif->be->dev, shared_page, &blkif->blk_ring); 210 err = xenbus_map_ring_valloc(blkif->be->dev, &gref, 1,
211 &blkif->blk_ring);
206 if (err < 0) 212 if (err < 0)
207 return err; 213 return err;
208 214
@@ -423,14 +429,14 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
423 FMODE_READ : FMODE_WRITE, NULL); 429 FMODE_READ : FMODE_WRITE, NULL);
424 430
425 if (IS_ERR(bdev)) { 431 if (IS_ERR(bdev)) {
426 DPRINTK("xen_vbd_create: device %08x could not be opened.\n", 432 pr_warn("xen_vbd_create: device %08x could not be opened\n",
427 vbd->pdevice); 433 vbd->pdevice);
428 return -ENOENT; 434 return -ENOENT;
429 } 435 }
430 436
431 vbd->bdev = bdev; 437 vbd->bdev = bdev;
432 if (vbd->bdev->bd_disk == NULL) { 438 if (vbd->bdev->bd_disk == NULL) {
433 DPRINTK("xen_vbd_create: device %08x doesn't exist.\n", 439 pr_warn("xen_vbd_create: device %08x doesn't exist\n",
434 vbd->pdevice); 440 vbd->pdevice);
435 xen_vbd_free(vbd); 441 xen_vbd_free(vbd);
436 return -ENOENT; 442 return -ENOENT;
@@ -449,7 +455,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
449 if (q && blk_queue_secdiscard(q)) 455 if (q && blk_queue_secdiscard(q))
450 vbd->discard_secure = true; 456 vbd->discard_secure = true;
451 457
452 DPRINTK("Successful creation of handle=%04x (dom=%u)\n", 458 pr_debug("Successful creation of handle=%04x (dom=%u)\n",
453 handle, blkif->domid); 459 handle, blkif->domid);
454 return 0; 460 return 0;
455} 461}
@@ -457,7 +463,7 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
457{ 463{
458 struct backend_info *be = dev_get_drvdata(&dev->dev); 464 struct backend_info *be = dev_get_drvdata(&dev->dev);
459 465
460 DPRINTK(""); 466 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
461 467
462 if (be->major || be->minor) 468 if (be->major || be->minor)
463 xenvbd_sysfs_delif(dev); 469 xenvbd_sysfs_delif(dev);
@@ -563,6 +569,10 @@ static int xen_blkbk_probe(struct xenbus_device *dev,
563 int err; 569 int err;
564 struct backend_info *be = kzalloc(sizeof(struct backend_info), 570 struct backend_info *be = kzalloc(sizeof(struct backend_info),
565 GFP_KERNEL); 571 GFP_KERNEL);
572
573 /* match the pr_debug in xen_blkbk_remove */
574 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
575
566 if (!be) { 576 if (!be) {
567 xenbus_dev_fatal(dev, -ENOMEM, 577 xenbus_dev_fatal(dev, -ENOMEM,
568 "allocating backend structure"); 578 "allocating backend structure");
@@ -594,7 +604,7 @@ static int xen_blkbk_probe(struct xenbus_device *dev,
594 return 0; 604 return 0;
595 605
596fail: 606fail:
597 DPRINTK("failed"); 607 pr_warn("%s failed\n", __func__);
598 xen_blkbk_remove(dev); 608 xen_blkbk_remove(dev);
599 return err; 609 return err;
600} 610}
@@ -618,7 +628,7 @@ static void backend_changed(struct xenbus_watch *watch,
618 unsigned long handle; 628 unsigned long handle;
619 char *device_type; 629 char *device_type;
620 630
621 DPRINTK(""); 631 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
622 632
623 err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x", 633 err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
624 &major, &minor); 634 &major, &minor);
@@ -637,7 +647,7 @@ static void backend_changed(struct xenbus_watch *watch,
637 647
638 if (be->major | be->minor) { 648 if (be->major | be->minor) {
639 if (be->major != major || be->minor != minor) 649 if (be->major != major || be->minor != minor)
640 pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n", 650 pr_warn("changing physical device (from %x:%x to %x:%x) not supported.\n",
641 be->major, be->minor, major, minor); 651 be->major, be->minor, major, minor);
642 return; 652 return;
643 } 653 }
@@ -698,13 +708,12 @@ static void frontend_changed(struct xenbus_device *dev,
698 struct backend_info *be = dev_get_drvdata(&dev->dev); 708 struct backend_info *be = dev_get_drvdata(&dev->dev);
699 int err; 709 int err;
700 710
701 DPRINTK("%s", xenbus_strstate(frontend_state)); 711 pr_debug("%s %p %s\n", __func__, dev, xenbus_strstate(frontend_state));
702 712
703 switch (frontend_state) { 713 switch (frontend_state) {
704 case XenbusStateInitialising: 714 case XenbusStateInitialising:
705 if (dev->state == XenbusStateClosed) { 715 if (dev->state == XenbusStateClosed) {
706 pr_info(DRV_PFX "%s: prepare for reconnect\n", 716 pr_info("%s: prepare for reconnect\n", dev->nodename);
707 dev->nodename);
708 xenbus_switch_state(dev, XenbusStateInitWait); 717 xenbus_switch_state(dev, XenbusStateInitWait);
709 } 718 }
710 break; 719 break;
@@ -771,7 +780,7 @@ static void connect(struct backend_info *be)
771 int err; 780 int err;
772 struct xenbus_device *dev = be->dev; 781 struct xenbus_device *dev = be->dev;
773 782
774 DPRINTK("%s", dev->otherend); 783 pr_debug("%s %s\n", __func__, dev->otherend);
775 784
776 /* Supply the information about the device the frontend needs */ 785 /* Supply the information about the device the frontend needs */
777again: 786again:
@@ -857,7 +866,7 @@ static int connect_ring(struct backend_info *be)
857 char protocol[64] = ""; 866 char protocol[64] = "";
858 int err; 867 int err;
859 868
860 DPRINTK("%s", dev->otherend); 869 pr_debug("%s %s\n", __func__, dev->otherend);
861 870
862 err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu", 871 err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu",
863 &ring_ref, "event-channel", "%u", &evtchn, NULL); 872 &ring_ref, "event-channel", "%u", &evtchn, NULL);
@@ -892,7 +901,7 @@ static int connect_ring(struct backend_info *be)
892 be->blkif->vbd.feature_gnt_persistent = pers_grants; 901 be->blkif->vbd.feature_gnt_persistent = pers_grants;
893 be->blkif->vbd.overflow_max_grants = 0; 902 be->blkif->vbd.overflow_max_grants = 0;
894 903
895 pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s) %s\n", 904 pr_info("ring-ref %ld, event-channel %d, protocol %d (%s) %s\n",
896 ring_ref, evtchn, be->blkif->blk_protocol, protocol, 905 ring_ref, evtchn, be->blkif->blk_protocol, protocol,
897 pers_grants ? "persistent grants" : ""); 906 pers_grants ? "persistent grants" : "");
898 907
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 37779e4c4585..2c61cf8c6f61 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1245,6 +1245,7 @@ static int setup_blkring(struct xenbus_device *dev,
1245 struct blkfront_info *info) 1245 struct blkfront_info *info)
1246{ 1246{
1247 struct blkif_sring *sring; 1247 struct blkif_sring *sring;
1248 grant_ref_t gref;
1248 int err; 1249 int err;
1249 1250
1250 info->ring_ref = GRANT_INVALID_REF; 1251 info->ring_ref = GRANT_INVALID_REF;
@@ -1257,13 +1258,13 @@ static int setup_blkring(struct xenbus_device *dev,
1257 SHARED_RING_INIT(sring); 1258 SHARED_RING_INIT(sring);
1258 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); 1259 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
1259 1260
1260 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); 1261 err = xenbus_grant_ring(dev, info->ring.sring, 1, &gref);
1261 if (err < 0) { 1262 if (err < 0) {
1262 free_page((unsigned long)sring); 1263 free_page((unsigned long)sring);
1263 info->ring.sring = NULL; 1264 info->ring.sring = NULL;
1264 goto fail; 1265 goto fail;
1265 } 1266 }
1266 info->ring_ref = err; 1267 info->ring_ref = gref;
1267 1268
1268 err = xenbus_alloc_evtchn(dev, &info->evtchn); 1269 err = xenbus_alloc_evtchn(dev, &info->evtchn);
1269 if (err) 1270 if (err)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 871bd3550cb0..c94386aa563d 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -43,11 +43,22 @@ static const char *default_compressor = "lzo";
43/* Module params (documentation at end) */ 43/* Module params (documentation at end) */
44static unsigned int num_devices = 1; 44static unsigned int num_devices = 1;
45 45
46static inline void deprecated_attr_warn(const char *name)
47{
48 pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
49 task_pid_nr(current),
50 current->comm,
51 name,
52 "See zram documentation.");
53}
54
46#define ZRAM_ATTR_RO(name) \ 55#define ZRAM_ATTR_RO(name) \
47static ssize_t name##_show(struct device *d, \ 56static ssize_t name##_show(struct device *d, \
48 struct device_attribute *attr, char *b) \ 57 struct device_attribute *attr, char *b) \
49{ \ 58{ \
50 struct zram *zram = dev_to_zram(d); \ 59 struct zram *zram = dev_to_zram(d); \
60 \
61 deprecated_attr_warn(__stringify(name)); \
51 return scnprintf(b, PAGE_SIZE, "%llu\n", \ 62 return scnprintf(b, PAGE_SIZE, "%llu\n", \
52 (u64)atomic64_read(&zram->stats.name)); \ 63 (u64)atomic64_read(&zram->stats.name)); \
53} \ 64} \
@@ -89,6 +100,7 @@ static ssize_t orig_data_size_show(struct device *dev,
89{ 100{
90 struct zram *zram = dev_to_zram(dev); 101 struct zram *zram = dev_to_zram(dev);
91 102
103 deprecated_attr_warn("orig_data_size");
92 return scnprintf(buf, PAGE_SIZE, "%llu\n", 104 return scnprintf(buf, PAGE_SIZE, "%llu\n",
93 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT); 105 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
94} 106}
@@ -99,6 +111,7 @@ static ssize_t mem_used_total_show(struct device *dev,
99 u64 val = 0; 111 u64 val = 0;
100 struct zram *zram = dev_to_zram(dev); 112 struct zram *zram = dev_to_zram(dev);
101 113
114 deprecated_attr_warn("mem_used_total");
102 down_read(&zram->init_lock); 115 down_read(&zram->init_lock);
103 if (init_done(zram)) { 116 if (init_done(zram)) {
104 struct zram_meta *meta = zram->meta; 117 struct zram_meta *meta = zram->meta;
@@ -128,6 +141,7 @@ static ssize_t mem_limit_show(struct device *dev,
128 u64 val; 141 u64 val;
129 struct zram *zram = dev_to_zram(dev); 142 struct zram *zram = dev_to_zram(dev);
130 143
144 deprecated_attr_warn("mem_limit");
131 down_read(&zram->init_lock); 145 down_read(&zram->init_lock);
132 val = zram->limit_pages; 146 val = zram->limit_pages;
133 up_read(&zram->init_lock); 147 up_read(&zram->init_lock);
@@ -159,6 +173,7 @@ static ssize_t mem_used_max_show(struct device *dev,
159 u64 val = 0; 173 u64 val = 0;
160 struct zram *zram = dev_to_zram(dev); 174 struct zram *zram = dev_to_zram(dev);
161 175
176 deprecated_attr_warn("mem_used_max");
162 down_read(&zram->init_lock); 177 down_read(&zram->init_lock);
163 if (init_done(zram)) 178 if (init_done(zram))
164 val = atomic_long_read(&zram->stats.max_used_pages); 179 val = atomic_long_read(&zram->stats.max_used_pages);
@@ -670,8 +685,12 @@ out:
670static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, 685static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
671 int offset, int rw) 686 int offset, int rw)
672{ 687{
688 unsigned long start_time = jiffies;
673 int ret; 689 int ret;
674 690
691 generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT,
692 &zram->disk->part0);
693
675 if (rw == READ) { 694 if (rw == READ) {
676 atomic64_inc(&zram->stats.num_reads); 695 atomic64_inc(&zram->stats.num_reads);
677 ret = zram_bvec_read(zram, bvec, index, offset); 696 ret = zram_bvec_read(zram, bvec, index, offset);
@@ -680,6 +699,8 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
680 ret = zram_bvec_write(zram, bvec, index, offset); 699 ret = zram_bvec_write(zram, bvec, index, offset);
681 } 700 }
682 701
702 generic_end_io_acct(rw, &zram->disk->part0, start_time);
703
683 if (unlikely(ret)) { 704 if (unlikely(ret)) {
684 if (rw == READ) 705 if (rw == READ)
685 atomic64_inc(&zram->stats.failed_reads); 706 atomic64_inc(&zram->stats.failed_reads);
@@ -1027,6 +1048,55 @@ static DEVICE_ATTR_RW(mem_used_max);
1027static DEVICE_ATTR_RW(max_comp_streams); 1048static DEVICE_ATTR_RW(max_comp_streams);
1028static DEVICE_ATTR_RW(comp_algorithm); 1049static DEVICE_ATTR_RW(comp_algorithm);
1029 1050
1051static ssize_t io_stat_show(struct device *dev,
1052 struct device_attribute *attr, char *buf)
1053{
1054 struct zram *zram = dev_to_zram(dev);
1055 ssize_t ret;
1056
1057 down_read(&zram->init_lock);
1058 ret = scnprintf(buf, PAGE_SIZE,
1059 "%8llu %8llu %8llu %8llu\n",
1060 (u64)atomic64_read(&zram->stats.failed_reads),
1061 (u64)atomic64_read(&zram->stats.failed_writes),
1062 (u64)atomic64_read(&zram->stats.invalid_io),
1063 (u64)atomic64_read(&zram->stats.notify_free));
1064 up_read(&zram->init_lock);
1065
1066 return ret;
1067}
1068
1069static ssize_t mm_stat_show(struct device *dev,
1070 struct device_attribute *attr, char *buf)
1071{
1072 struct zram *zram = dev_to_zram(dev);
1073 u64 orig_size, mem_used = 0;
1074 long max_used;
1075 ssize_t ret;
1076
1077 down_read(&zram->init_lock);
1078 if (init_done(zram))
1079 mem_used = zs_get_total_pages(zram->meta->mem_pool);
1080
1081 orig_size = atomic64_read(&zram->stats.pages_stored);
1082 max_used = atomic_long_read(&zram->stats.max_used_pages);
1083
1084 ret = scnprintf(buf, PAGE_SIZE,
1085 "%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n",
1086 orig_size << PAGE_SHIFT,
1087 (u64)atomic64_read(&zram->stats.compr_data_size),
1088 mem_used << PAGE_SHIFT,
1089 zram->limit_pages << PAGE_SHIFT,
1090 max_used << PAGE_SHIFT,
1091 (u64)atomic64_read(&zram->stats.zero_pages),
1092 (u64)atomic64_read(&zram->stats.num_migrated));
1093 up_read(&zram->init_lock);
1094
1095 return ret;
1096}
1097
1098static DEVICE_ATTR_RO(io_stat);
1099static DEVICE_ATTR_RO(mm_stat);
1030ZRAM_ATTR_RO(num_reads); 1100ZRAM_ATTR_RO(num_reads);
1031ZRAM_ATTR_RO(num_writes); 1101ZRAM_ATTR_RO(num_writes);
1032ZRAM_ATTR_RO(failed_reads); 1102ZRAM_ATTR_RO(failed_reads);
@@ -1054,6 +1124,8 @@ static struct attribute *zram_disk_attrs[] = {
1054 &dev_attr_mem_used_max.attr, 1124 &dev_attr_mem_used_max.attr,
1055 &dev_attr_max_comp_streams.attr, 1125 &dev_attr_max_comp_streams.attr,
1056 &dev_attr_comp_algorithm.attr, 1126 &dev_attr_comp_algorithm.attr,
1127 &dev_attr_io_stat.attr,
1128 &dev_attr_mm_stat.attr,
1057 NULL, 1129 NULL,
1058}; 1130};
1059 1131
@@ -1082,6 +1154,7 @@ static int create_device(struct zram *zram, int device_id)
1082 if (!zram->disk) { 1154 if (!zram->disk) {
1083 pr_warn("Error allocating disk structure for device %d\n", 1155 pr_warn("Error allocating disk structure for device %d\n",
1084 device_id); 1156 device_id);
1157 ret = -ENOMEM;
1085 goto out_free_queue; 1158 goto out_free_queue;
1086 } 1159 }
1087 1160
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 17056e589146..570c598f4ce9 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -84,6 +84,7 @@ struct zram_stats {
84 atomic64_t compr_data_size; /* compressed size of pages stored */ 84 atomic64_t compr_data_size; /* compressed size of pages stored */
85 atomic64_t num_reads; /* failed + successful */ 85 atomic64_t num_reads; /* failed + successful */
86 atomic64_t num_writes; /* --do-- */ 86 atomic64_t num_writes; /* --do-- */
87 atomic64_t num_migrated; /* no. of migrated object */
87 atomic64_t failed_reads; /* can happen when memory is too low */ 88 atomic64_t failed_reads; /* can happen when memory is too low */
88 atomic64_t failed_writes; /* can happen when memory is too low */ 89 atomic64_t failed_writes; /* can happen when memory is too low */
89 atomic64_t invalid_io; /* non-page-aligned I/O requests */ 90 atomic64_t invalid_io; /* non-page-aligned I/O requests */