aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c1
-rw-r--r--drivers/block/nvme-core.c203
-rw-r--r--drivers/block/nvme-scsi.c36
-rw-r--r--include/linux/nvme.h14
-rw-r--r--include/uapi/linux/nvme.h50
5 files changed, 194 insertions, 110 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 9aca8c71e70b..f6f6b9af3e3f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -43,6 +43,7 @@
43EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 43EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
44EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 44EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
45EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 45EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
46EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
46EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); 47EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
47 48
48DEFINE_IDA(blk_queue_ida); 49DEFINE_IDA(blk_queue_ida);
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index a842c71dcc21..02351e217165 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -10,10 +10,6 @@
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */ 13 */
18 14
19#include <linux/nvme.h> 15#include <linux/nvme.h>
@@ -46,16 +42,26 @@
46#include <scsi/sg.h> 42#include <scsi/sg.h>
47#include <asm-generic/io-64-nonatomic-lo-hi.h> 43#include <asm-generic/io-64-nonatomic-lo-hi.h>
48 44
49#define NVME_Q_DEPTH 1024 45#include <trace/events/block.h>
46
47#define NVME_Q_DEPTH 1024
50#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 48#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
51#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 49#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
52#define ADMIN_TIMEOUT (60 * HZ) 50#define ADMIN_TIMEOUT (admin_timeout * HZ)
53#define IOD_TIMEOUT (4 * NVME_IO_TIMEOUT) 51#define IOD_TIMEOUT (retry_time * HZ)
52
53static unsigned char admin_timeout = 60;
54module_param(admin_timeout, byte, 0644);
55MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
54 56
55unsigned char io_timeout = 30; 57unsigned char nvme_io_timeout = 30;
56module_param(io_timeout, byte, 0644); 58module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
57MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 59MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
58 60
61static unsigned char retry_time = 30;
62module_param(retry_time, byte, 0644);
63MODULE_PARM_DESC(retry_time, "time in seconds to retry failed I/O");
64
59static int nvme_major; 65static int nvme_major;
60module_param(nvme_major, int, 0); 66module_param(nvme_major, int, 0);
61 67
@@ -67,6 +73,7 @@ static LIST_HEAD(dev_list);
67static struct task_struct *nvme_thread; 73static struct task_struct *nvme_thread;
68static struct workqueue_struct *nvme_workq; 74static struct workqueue_struct *nvme_workq;
69static wait_queue_head_t nvme_kthread_wait; 75static wait_queue_head_t nvme_kthread_wait;
76static struct notifier_block nvme_nb;
70 77
71static void nvme_reset_failed_dev(struct work_struct *ws); 78static void nvme_reset_failed_dev(struct work_struct *ws);
72 79
@@ -199,16 +206,13 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
199#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE) 206#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
200#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) 207#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
201#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) 208#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
202#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE) 209#define CMD_CTX_ABORT (0x318 + CMD_CTX_BASE)
203#define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE)
204 210
205static void special_completion(struct nvme_queue *nvmeq, void *ctx, 211static void special_completion(struct nvme_queue *nvmeq, void *ctx,
206 struct nvme_completion *cqe) 212 struct nvme_completion *cqe)
207{ 213{
208 if (ctx == CMD_CTX_CANCELLED) 214 if (ctx == CMD_CTX_CANCELLED)
209 return; 215 return;
210 if (ctx == CMD_CTX_FLUSH)
211 return;
212 if (ctx == CMD_CTX_ABORT) { 216 if (ctx == CMD_CTX_ABORT) {
213 ++nvmeq->dev->abort_limit; 217 ++nvmeq->dev->abort_limit;
214 return; 218 return;
@@ -247,8 +251,9 @@ static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
247 void *ctx; 251 void *ctx;
248 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 252 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
249 253
250 if (cmdid >= nvmeq->q_depth) { 254 if (cmdid >= nvmeq->q_depth || !info[cmdid].fn) {
251 *fn = special_completion; 255 if (fn)
256 *fn = special_completion;
252 return CMD_CTX_INVALID; 257 return CMD_CTX_INVALID;
253 } 258 }
254 if (fn) 259 if (fn)
@@ -281,9 +286,17 @@ static struct nvme_queue *raw_nvmeq(struct nvme_dev *dev, int qid)
281 286
282static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) __acquires(RCU) 287static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) __acquires(RCU)
283{ 288{
289 struct nvme_queue *nvmeq;
284 unsigned queue_id = get_cpu_var(*dev->io_queue); 290 unsigned queue_id = get_cpu_var(*dev->io_queue);
291
285 rcu_read_lock(); 292 rcu_read_lock();
286 return rcu_dereference(dev->queues[queue_id]); 293 nvmeq = rcu_dereference(dev->queues[queue_id]);
294 if (nvmeq)
295 return nvmeq;
296
297 rcu_read_unlock();
298 put_cpu_var(*dev->io_queue);
299 return NULL;
287} 300}
288 301
289static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU) 302static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
@@ -295,8 +308,15 @@ static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
295static struct nvme_queue *lock_nvmeq(struct nvme_dev *dev, int q_idx) 308static struct nvme_queue *lock_nvmeq(struct nvme_dev *dev, int q_idx)
296 __acquires(RCU) 309 __acquires(RCU)
297{ 310{
311 struct nvme_queue *nvmeq;
312
298 rcu_read_lock(); 313 rcu_read_lock();
299 return rcu_dereference(dev->queues[q_idx]); 314 nvmeq = rcu_dereference(dev->queues[q_idx]);
315 if (nvmeq)
316 return nvmeq;
317
318 rcu_read_unlock();
319 return NULL;
300} 320}
301 321
302static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU) 322static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
@@ -387,25 +407,30 @@ void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
387static void nvme_start_io_acct(struct bio *bio) 407static void nvme_start_io_acct(struct bio *bio)
388{ 408{
389 struct gendisk *disk = bio->bi_bdev->bd_disk; 409 struct gendisk *disk = bio->bi_bdev->bd_disk;
390 const int rw = bio_data_dir(bio); 410 if (blk_queue_io_stat(disk->queue)) {
391 int cpu = part_stat_lock(); 411 const int rw = bio_data_dir(bio);
392 part_round_stats(cpu, &disk->part0); 412 int cpu = part_stat_lock();
393 part_stat_inc(cpu, &disk->part0, ios[rw]); 413 part_round_stats(cpu, &disk->part0);
394 part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio)); 414 part_stat_inc(cpu, &disk->part0, ios[rw]);
395 part_inc_in_flight(&disk->part0, rw); 415 part_stat_add(cpu, &disk->part0, sectors[rw],
396 part_stat_unlock(); 416 bio_sectors(bio));
417 part_inc_in_flight(&disk->part0, rw);
418 part_stat_unlock();
419 }
397} 420}
398 421
399static void nvme_end_io_acct(struct bio *bio, unsigned long start_time) 422static void nvme_end_io_acct(struct bio *bio, unsigned long start_time)
400{ 423{
401 struct gendisk *disk = bio->bi_bdev->bd_disk; 424 struct gendisk *disk = bio->bi_bdev->bd_disk;
402 const int rw = bio_data_dir(bio); 425 if (blk_queue_io_stat(disk->queue)) {
403 unsigned long duration = jiffies - start_time; 426 const int rw = bio_data_dir(bio);
404 int cpu = part_stat_lock(); 427 unsigned long duration = jiffies - start_time;
405 part_stat_add(cpu, &disk->part0, ticks[rw], duration); 428 int cpu = part_stat_lock();
406 part_round_stats(cpu, &disk->part0); 429 part_stat_add(cpu, &disk->part0, ticks[rw], duration);
407 part_dec_in_flight(&disk->part0, rw); 430 part_round_stats(cpu, &disk->part0);
408 part_stat_unlock(); 431 part_dec_in_flight(&disk->part0, rw);
432 part_stat_unlock();
433 }
409} 434}
410 435
411static void bio_completion(struct nvme_queue *nvmeq, void *ctx, 436static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
@@ -414,6 +439,7 @@ static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
414 struct nvme_iod *iod = ctx; 439 struct nvme_iod *iod = ctx;
415 struct bio *bio = iod->private; 440 struct bio *bio = iod->private;
416 u16 status = le16_to_cpup(&cqe->status) >> 1; 441 u16 status = le16_to_cpup(&cqe->status) >> 1;
442 int error = 0;
417 443
418 if (unlikely(status)) { 444 if (unlikely(status)) {
419 if (!(status & NVME_SC_DNR || 445 if (!(status & NVME_SC_DNR ||
@@ -426,6 +452,7 @@ static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
426 wake_up(&nvmeq->sq_full); 452 wake_up(&nvmeq->sq_full);
427 return; 453 return;
428 } 454 }
455 error = -EIO;
429 } 456 }
430 if (iod->nents) { 457 if (iod->nents) {
431 dma_unmap_sg(nvmeq->q_dmadev, iod->sg, iod->nents, 458 dma_unmap_sg(nvmeq->q_dmadev, iod->sg, iod->nents,
@@ -433,10 +460,9 @@ static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
433 nvme_end_io_acct(bio, iod->start_time); 460 nvme_end_io_acct(bio, iod->start_time);
434 } 461 }
435 nvme_free_iod(nvmeq->dev, iod); 462 nvme_free_iod(nvmeq->dev, iod);
436 if (status) 463
437 bio_endio(bio, -EIO); 464 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio, error);
438 else 465 bio_endio(bio, error);
439 bio_endio(bio, 0);
440} 466}
441 467
442/* length is in bytes. gfp flags indicates whether we may sleep. */ 468/* length is in bytes. gfp flags indicates whether we may sleep. */
@@ -525,6 +551,8 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
525 if (!split) 551 if (!split)
526 return -ENOMEM; 552 return -ENOMEM;
527 553
554 trace_block_split(bdev_get_queue(bio->bi_bdev), bio,
555 split->bi_iter.bi_sector);
528 bio_chain(split, bio); 556 bio_chain(split, bio);
529 557
530 if (!waitqueue_active(&nvmeq->sq_full)) 558 if (!waitqueue_active(&nvmeq->sq_full))
@@ -627,16 +655,6 @@ static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
627 return 0; 655 return 0;
628} 656}
629 657
630int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
631{
632 int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
633 special_completion, NVME_IO_TIMEOUT);
634 if (unlikely(cmdid < 0))
635 return cmdid;
636
637 return nvme_submit_flush(nvmeq, ns, cmdid);
638}
639
640static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod) 658static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
641{ 659{
642 struct bio *bio = iod->private; 660 struct bio *bio = iod->private;
@@ -652,7 +670,7 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
652 670
653 if (bio->bi_rw & REQ_DISCARD) 671 if (bio->bi_rw & REQ_DISCARD)
654 return nvme_submit_discard(nvmeq, ns, bio, iod, cmdid); 672 return nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
655 if ((bio->bi_rw & REQ_FLUSH) && !iod->nents) 673 if (bio->bi_rw & REQ_FLUSH)
656 return nvme_submit_flush(nvmeq, ns, cmdid); 674 return nvme_submit_flush(nvmeq, ns, cmdid);
657 675
658 control = 0; 676 control = 0;
@@ -686,6 +704,26 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
686 return 0; 704 return 0;
687} 705}
688 706
707static int nvme_split_flush_data(struct nvme_queue *nvmeq, struct bio *bio)
708{
709 struct bio *split = bio_clone(bio, GFP_ATOMIC);
710 if (!split)
711 return -ENOMEM;
712
713 split->bi_iter.bi_size = 0;
714 split->bi_phys_segments = 0;
715 bio->bi_rw &= ~REQ_FLUSH;
716 bio_chain(split, bio);
717
718 if (!waitqueue_active(&nvmeq->sq_full))
719 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
720 bio_list_add(&nvmeq->sq_cong, split);
721 bio_list_add(&nvmeq->sq_cong, bio);
722 wake_up_process(nvme_thread);
723
724 return 0;
725}
726
689/* 727/*
690 * Called with local interrupts disabled and the q_lock held. May not sleep. 728 * Called with local interrupts disabled and the q_lock held. May not sleep.
691 */ 729 */
@@ -696,11 +734,8 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
696 int psegs = bio_phys_segments(ns->queue, bio); 734 int psegs = bio_phys_segments(ns->queue, bio);
697 int result; 735 int result;
698 736
699 if ((bio->bi_rw & REQ_FLUSH) && psegs) { 737 if ((bio->bi_rw & REQ_FLUSH) && psegs)
700 result = nvme_submit_flush_data(nvmeq, ns); 738 return nvme_split_flush_data(nvmeq, bio);
701 if (result)
702 return result;
703 }
704 739
705 iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC); 740 iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
706 if (!iod) 741 if (!iod)
@@ -795,7 +830,6 @@ static void nvme_make_request(struct request_queue *q, struct bio *bio)
795 int result = -EBUSY; 830 int result = -EBUSY;
796 831
797 if (!nvmeq) { 832 if (!nvmeq) {
798 put_nvmeq(NULL);
799 bio_endio(bio, -EIO); 833 bio_endio(bio, -EIO);
800 return; 834 return;
801 } 835 }
@@ -870,10 +904,8 @@ static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx,
870 struct nvme_queue *nvmeq; 904 struct nvme_queue *nvmeq;
871 905
872 nvmeq = lock_nvmeq(dev, q_idx); 906 nvmeq = lock_nvmeq(dev, q_idx);
873 if (!nvmeq) { 907 if (!nvmeq)
874 unlock_nvmeq(nvmeq);
875 return -ENODEV; 908 return -ENODEV;
876 }
877 909
878 cmdinfo.task = current; 910 cmdinfo.task = current;
879 cmdinfo.status = -EINTR; 911 cmdinfo.status = -EINTR;
@@ -898,9 +930,10 @@ static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx,
898 930
899 if (cmdinfo.status == -EINTR) { 931 if (cmdinfo.status == -EINTR) {
900 nvmeq = lock_nvmeq(dev, q_idx); 932 nvmeq = lock_nvmeq(dev, q_idx);
901 if (nvmeq) 933 if (nvmeq) {
902 nvme_abort_command(nvmeq, cmdid); 934 nvme_abort_command(nvmeq, cmdid);
903 unlock_nvmeq(nvmeq); 935 unlock_nvmeq(nvmeq);
936 }
904 return -EINTR; 937 return -EINTR;
905 } 938 }
906 939
@@ -1358,7 +1391,8 @@ static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
1358 return -EINTR; 1391 return -EINTR;
1359 if (time_after(jiffies, timeout)) { 1392 if (time_after(jiffies, timeout)) {
1360 dev_err(&dev->pci_dev->dev, 1393 dev_err(&dev->pci_dev->dev,
1361 "Device not ready; aborting initialisation\n"); 1394 "Device not ready; aborting %s\n", enabled ?
1395 "initialisation" : "reset");
1362 return -ENODEV; 1396 return -ENODEV;
1363 } 1397 }
1364 } 1398 }
@@ -1481,7 +1515,11 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1481 goto put_pages; 1515 goto put_pages;
1482 } 1516 }
1483 1517
1518 err = -ENOMEM;
1484 iod = nvme_alloc_iod(count, length, GFP_KERNEL); 1519 iod = nvme_alloc_iod(count, length, GFP_KERNEL);
1520 if (!iod)
1521 goto put_pages;
1522
1485 sg = iod->sg; 1523 sg = iod->sg;
1486 sg_init_table(sg, count); 1524 sg_init_table(sg, count);
1487 for (i = 0; i < count; i++) { 1525 for (i = 0; i < count; i++) {
@@ -1494,7 +1532,6 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1494 sg_mark_end(&sg[i - 1]); 1532 sg_mark_end(&sg[i - 1]);
1495 iod->nents = count; 1533 iod->nents = count;
1496 1534
1497 err = -ENOMEM;
1498 nents = dma_map_sg(&dev->pci_dev->dev, sg, count, 1535 nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
1499 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1536 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1500 if (!nents) 1537 if (!nents)
@@ -1894,6 +1931,8 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
1894 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 1931 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
1895 if (dev->max_hw_sectors) 1932 if (dev->max_hw_sectors)
1896 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); 1933 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
1934 if (dev->vwc & NVME_CTRL_VWC_PRESENT)
1935 blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
1897 1936
1898 disk->major = nvme_major; 1937 disk->major = nvme_major;
1899 disk->first_minor = 0; 1938 disk->first_minor = 0;
@@ -2062,8 +2101,13 @@ static int set_queue_count(struct nvme_dev *dev, int count)
2062 2101
2063 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0, 2102 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
2064 &result); 2103 &result);
2065 if (status) 2104 if (status < 0)
2066 return status < 0 ? -EIO : -EBUSY; 2105 return status;
2106 if (status > 0) {
2107 dev_err(&dev->pci_dev->dev, "Could not set queue count (%d)\n",
2108 status);
2109 return -EBUSY;
2110 }
2067 return min(result & 0xffff, result >> 16) + 1; 2111 return min(result & 0xffff, result >> 16) + 1;
2068} 2112}
2069 2113
@@ -2072,14 +2116,25 @@ static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
2072 return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride); 2116 return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
2073} 2117}
2074 2118
2119static void nvme_cpu_workfn(struct work_struct *work)
2120{
2121 struct nvme_dev *dev = container_of(work, struct nvme_dev, cpu_work);
2122 if (dev->initialized)
2123 nvme_assign_io_queues(dev);
2124}
2125
2075static int nvme_cpu_notify(struct notifier_block *self, 2126static int nvme_cpu_notify(struct notifier_block *self,
2076 unsigned long action, void *hcpu) 2127 unsigned long action, void *hcpu)
2077{ 2128{
2078 struct nvme_dev *dev = container_of(self, struct nvme_dev, nb); 2129 struct nvme_dev *dev;
2130
2079 switch (action) { 2131 switch (action) {
2080 case CPU_ONLINE: 2132 case CPU_ONLINE:
2081 case CPU_DEAD: 2133 case CPU_DEAD:
2082 nvme_assign_io_queues(dev); 2134 spin_lock(&dev_list_lock);
2135 list_for_each_entry(dev, &dev_list, node)
2136 schedule_work(&dev->cpu_work);
2137 spin_unlock(&dev_list_lock);
2083 break; 2138 break;
2084 } 2139 }
2085 return NOTIFY_OK; 2140 return NOTIFY_OK;
@@ -2148,11 +2203,6 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
2148 nvme_free_queues(dev, nr_io_queues + 1); 2203 nvme_free_queues(dev, nr_io_queues + 1);
2149 nvme_assign_io_queues(dev); 2204 nvme_assign_io_queues(dev);
2150 2205
2151 dev->nb.notifier_call = &nvme_cpu_notify;
2152 result = register_hotcpu_notifier(&dev->nb);
2153 if (result)
2154 goto free_queues;
2155
2156 return 0; 2206 return 0;
2157 2207
2158 free_queues: 2208 free_queues:
@@ -2184,6 +2234,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
2184 2234
2185 res = nvme_identify(dev, 0, 1, dma_addr); 2235 res = nvme_identify(dev, 0, 1, dma_addr);
2186 if (res) { 2236 if (res) {
2237 dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res);
2187 res = -EIO; 2238 res = -EIO;
2188 goto out; 2239 goto out;
2189 } 2240 }
@@ -2192,6 +2243,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
2192 nn = le32_to_cpup(&ctrl->nn); 2243 nn = le32_to_cpup(&ctrl->nn);
2193 dev->oncs = le16_to_cpup(&ctrl->oncs); 2244 dev->oncs = le16_to_cpup(&ctrl->oncs);
2194 dev->abort_limit = ctrl->acl + 1; 2245 dev->abort_limit = ctrl->acl + 1;
2246 dev->vwc = ctrl->vwc;
2195 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); 2247 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
2196 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); 2248 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
2197 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); 2249 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
@@ -2450,8 +2502,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
2450 int i; 2502 int i;
2451 2503
2452 dev->initialized = 0; 2504 dev->initialized = 0;
2453 unregister_hotcpu_notifier(&dev->nb);
2454
2455 nvme_dev_list_remove(dev); 2505 nvme_dev_list_remove(dev);
2456 2506
2457 if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) { 2507 if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
@@ -2722,6 +2772,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2722 INIT_LIST_HEAD(&dev->namespaces); 2772 INIT_LIST_HEAD(&dev->namespaces);
2723 dev->reset_workfn = nvme_reset_failed_dev; 2773 dev->reset_workfn = nvme_reset_failed_dev;
2724 INIT_WORK(&dev->reset_work, nvme_reset_workfn); 2774 INIT_WORK(&dev->reset_work, nvme_reset_workfn);
2775 INIT_WORK(&dev->cpu_work, nvme_cpu_workfn);
2725 dev->pci_dev = pdev; 2776 dev->pci_dev = pdev;
2726 pci_set_drvdata(pdev, dev); 2777 pci_set_drvdata(pdev, dev);
2727 result = nvme_set_instance(dev); 2778 result = nvme_set_instance(dev);
@@ -2801,6 +2852,7 @@ static void nvme_remove(struct pci_dev *pdev)
2801 2852
2802 pci_set_drvdata(pdev, NULL); 2853 pci_set_drvdata(pdev, NULL);
2803 flush_work(&dev->reset_work); 2854 flush_work(&dev->reset_work);
2855 flush_work(&dev->cpu_work);
2804 misc_deregister(&dev->miscdev); 2856 misc_deregister(&dev->miscdev);
2805 nvme_dev_remove(dev); 2857 nvme_dev_remove(dev);
2806 nvme_dev_shutdown(dev); 2858 nvme_dev_shutdown(dev);
@@ -2889,11 +2941,18 @@ static int __init nvme_init(void)
2889 else if (result > 0) 2941 else if (result > 0)
2890 nvme_major = result; 2942 nvme_major = result;
2891 2943
2892 result = pci_register_driver(&nvme_driver); 2944 nvme_nb.notifier_call = &nvme_cpu_notify;
2945 result = register_hotcpu_notifier(&nvme_nb);
2893 if (result) 2946 if (result)
2894 goto unregister_blkdev; 2947 goto unregister_blkdev;
2948
2949 result = pci_register_driver(&nvme_driver);
2950 if (result)
2951 goto unregister_hotcpu;
2895 return 0; 2952 return 0;
2896 2953
2954 unregister_hotcpu:
2955 unregister_hotcpu_notifier(&nvme_nb);
2897 unregister_blkdev: 2956 unregister_blkdev:
2898 unregister_blkdev(nvme_major, "nvme"); 2957 unregister_blkdev(nvme_major, "nvme");
2899 kill_workq: 2958 kill_workq:
@@ -2904,9 +2963,11 @@ static int __init nvme_init(void)
2904static void __exit nvme_exit(void) 2963static void __exit nvme_exit(void)
2905{ 2964{
2906 pci_unregister_driver(&nvme_driver); 2965 pci_unregister_driver(&nvme_driver);
2966 unregister_hotcpu_notifier(&nvme_nb);
2907 unregister_blkdev(nvme_major, "nvme"); 2967 unregister_blkdev(nvme_major, "nvme");
2908 destroy_workqueue(nvme_workq); 2968 destroy_workqueue(nvme_workq);
2909 BUG_ON(nvme_thread && !IS_ERR(nvme_thread)); 2969 BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
2970 _nvme_check_size();
2910} 2971}
2911 2972
2912MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 2973MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 2c3f5be06da1..a4cd6d691c63 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * NVM Express device driver 2 * NVM Express device driver
3 * Copyright (c) 2011, Intel Corporation. 3 * Copyright (c) 2011-2014, Intel Corporation.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -10,10 +10,6 @@
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */ 13 */
18 14
19/* 15/*
@@ -243,8 +239,6 @@ static int sg_version_num = 30534; /* 2 digits for each component */
243#define READ_CAP_16_RESP_SIZE 32 239#define READ_CAP_16_RESP_SIZE 32
244 240
245/* NVMe Namespace and Command Defines */ 241/* NVMe Namespace and Command Defines */
246#define NVME_GET_SMART_LOG_PAGE 0x02
247#define NVME_GET_FEAT_TEMP_THRESH 0x04
248#define BYTES_TO_DWORDS 4 242#define BYTES_TO_DWORDS 4
249#define NVME_MAX_FIRMWARE_SLOT 7 243#define NVME_MAX_FIRMWARE_SLOT 7
250 244
@@ -686,6 +680,7 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
686 u8 resp_data_format = 0x02; 680 u8 resp_data_format = 0x02;
687 u8 protect; 681 u8 protect;
688 u8 cmdque = 0x01 << 1; 682 u8 cmdque = 0x01 << 1;
683 u8 fw_offset = sizeof(dev->firmware_rev);
689 684
690 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), 685 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
691 &dma_addr, GFP_KERNEL); 686 &dma_addr, GFP_KERNEL);
@@ -721,7 +716,11 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
721 inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */ 716 inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */
722 strncpy(&inq_response[8], "NVMe ", 8); 717 strncpy(&inq_response[8], "NVMe ", 8);
723 strncpy(&inq_response[16], dev->model, 16); 718 strncpy(&inq_response[16], dev->model, 16);
724 strncpy(&inq_response[32], dev->firmware_rev, 4); 719
720 while (dev->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4)
721 fw_offset--;
722 fw_offset -= 4;
723 strncpy(&inq_response[32], dev->firmware_rev + fw_offset, 4);
725 724
726 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); 725 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
727 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 726 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
@@ -1018,8 +1017,8 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
1018 c.common.opcode = nvme_admin_get_log_page; 1017 c.common.opcode = nvme_admin_get_log_page;
1019 c.common.nsid = cpu_to_le32(0xFFFFFFFF); 1018 c.common.nsid = cpu_to_le32(0xFFFFFFFF);
1020 c.common.prp1 = cpu_to_le64(dma_addr); 1019 c.common.prp1 = cpu_to_le64(dma_addr);
1021 c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) / 1020 c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) /
1022 BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE); 1021 BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART);
1023 res = nvme_submit_admin_cmd(dev, &c, NULL); 1022 res = nvme_submit_admin_cmd(dev, &c, NULL);
1024 if (res != NVME_SC_SUCCESS) { 1023 if (res != NVME_SC_SUCCESS) {
1025 temp_c = LOG_TEMP_UNKNOWN; 1024 temp_c = LOG_TEMP_UNKNOWN;
@@ -1086,8 +1085,8 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1086 c.common.opcode = nvme_admin_get_log_page; 1085 c.common.opcode = nvme_admin_get_log_page;
1087 c.common.nsid = cpu_to_le32(0xFFFFFFFF); 1086 c.common.nsid = cpu_to_le32(0xFFFFFFFF);
1088 c.common.prp1 = cpu_to_le64(dma_addr); 1087 c.common.prp1 = cpu_to_le64(dma_addr);
1089 c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) / 1088 c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) /
1090 BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE); 1089 BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART);
1091 res = nvme_submit_admin_cmd(dev, &c, NULL); 1090 res = nvme_submit_admin_cmd(dev, &c, NULL);
1092 if (res != NVME_SC_SUCCESS) { 1091 if (res != NVME_SC_SUCCESS) {
1093 temp_c_cur = LOG_TEMP_UNKNOWN; 1092 temp_c_cur = LOG_TEMP_UNKNOWN;
@@ -1477,7 +1476,7 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1477 goto out_dma; 1476 goto out_dma;
1478 } 1477 }
1479 id_ctrl = mem; 1478 id_ctrl = mem;
1480 lowest_pow_st = id_ctrl->npss - 1; 1479 lowest_pow_st = max(POWER_STATE_0, (int)(id_ctrl->npss - 1));
1481 1480
1482 switch (pc) { 1481 switch (pc) {
1483 case NVME_POWER_STATE_START_VALID: 1482 case NVME_POWER_STATE_START_VALID:
@@ -1494,20 +1493,19 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1494 break; 1493 break;
1495 case NVME_POWER_STATE_IDLE: 1494 case NVME_POWER_STATE_IDLE:
1496 /* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */ 1495 /* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */
1497 /* min of desired state and (lps-1) because lps is STOP */
1498 if (pcmod == 0x0) 1496 if (pcmod == 0x0)
1499 ps_desired = min(POWER_STATE_1, (lowest_pow_st - 1)); 1497 ps_desired = POWER_STATE_1;
1500 else if (pcmod == 0x1) 1498 else if (pcmod == 0x1)
1501 ps_desired = min(POWER_STATE_2, (lowest_pow_st - 1)); 1499 ps_desired = POWER_STATE_2;
1502 else if (pcmod == 0x2) 1500 else if (pcmod == 0x2)
1503 ps_desired = min(POWER_STATE_3, (lowest_pow_st - 1)); 1501 ps_desired = POWER_STATE_3;
1504 break; 1502 break;
1505 case NVME_POWER_STATE_STANDBY: 1503 case NVME_POWER_STATE_STANDBY:
1506 /* Action unspecified if POWER CONDITION MODIFIER != [0,1] */ 1504 /* Action unspecified if POWER CONDITION MODIFIER != [0,1] */
1507 if (pcmod == 0x0) 1505 if (pcmod == 0x0)
1508 ps_desired = max(0, (lowest_pow_st - 2)); 1506 ps_desired = max(POWER_STATE_0, (lowest_pow_st - 2));
1509 else if (pcmod == 0x1) 1507 else if (pcmod == 0x1)
1510 ps_desired = max(0, (lowest_pow_st - 1)); 1508 ps_desired = max(POWER_STATE_0, (lowest_pow_st - 1));
1511 break; 1509 break;
1512 case NVME_POWER_STATE_LU_CONTROL: 1510 case NVME_POWER_STATE_LU_CONTROL:
1513 default: 1511 default:
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index a50173ca1d72..2bf403195c09 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Definitions for the NVM Express interface 2 * Definitions for the NVM Express interface
3 * Copyright (c) 2011-2013, Intel Corporation. 3 * Copyright (c) 2011-2014, Intel Corporation.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -10,10 +10,6 @@
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */ 13 */
18 14
19#ifndef _LINUX_NVME_H 15#ifndef _LINUX_NVME_H
@@ -66,8 +62,8 @@ enum {
66 62
67#define NVME_VS(major, minor) (major << 16 | minor) 63#define NVME_VS(major, minor) (major << 16 | minor)
68 64
69extern unsigned char io_timeout; 65extern unsigned char nvme_io_timeout;
70#define NVME_IO_TIMEOUT (io_timeout * HZ) 66#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
71 67
72/* 68/*
73 * Represents an NVM Express device. Each nvme_dev is a PCI function. 69 * Represents an NVM Express device. Each nvme_dev is a PCI function.
@@ -94,7 +90,7 @@ struct nvme_dev {
94 struct miscdevice miscdev; 90 struct miscdevice miscdev;
95 work_func_t reset_workfn; 91 work_func_t reset_workfn;
96 struct work_struct reset_work; 92 struct work_struct reset_work;
97 struct notifier_block nb; 93 struct work_struct cpu_work;
98 char name[12]; 94 char name[12];
99 char serial[20]; 95 char serial[20];
100 char model[40]; 96 char model[40];
@@ -103,6 +99,7 @@ struct nvme_dev {
103 u32 stripe_size; 99 u32 stripe_size;
104 u16 oncs; 100 u16 oncs;
105 u16 abort_limit; 101 u16 abort_limit;
102 u8 vwc;
106 u8 initialized; 103 u8 initialized;
107}; 104};
108 105
@@ -159,7 +156,6 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
159void nvme_unmap_user_pages(struct nvme_dev *dev, int write, 156void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
160 struct nvme_iod *iod); 157 struct nvme_iod *iod);
161int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *); 158int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *);
162int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
163int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *, 159int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
164 u32 *result); 160 u32 *result);
165int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns, 161int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index 096fe1c6f83d..29a7d8619d8d 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Definitions for the NVM Express interface 2 * Definitions for the NVM Express interface
3 * Copyright (c) 2011-2013, Intel Corporation. 3 * Copyright (c) 2011-2014, Intel Corporation.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -10,10 +10,6 @@
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */ 13 */
18 14
19#ifndef _UAPI_LINUX_NVME_H 15#ifndef _UAPI_LINUX_NVME_H
@@ -31,7 +27,12 @@ struct nvme_id_power_state {
31 __u8 read_lat; 27 __u8 read_lat;
32 __u8 write_tput; 28 __u8 write_tput;
33 __u8 write_lat; 29 __u8 write_lat;
34 __u8 rsvd16[16]; 30 __le16 idle_power;
31 __u8 idle_scale;
32 __u8 rsvd19;
33 __le16 active_power;
34 __u8 active_work_scale;
35 __u8 rsvd23[9];
35}; 36};
36 37
37enum { 38enum {
@@ -49,7 +50,9 @@ struct nvme_id_ctrl {
49 __u8 ieee[3]; 50 __u8 ieee[3];
50 __u8 mic; 51 __u8 mic;
51 __u8 mdts; 52 __u8 mdts;
52 __u8 rsvd78[178]; 53 __u16 cntlid;
54 __u32 ver;
55 __u8 rsvd84[172];
53 __le16 oacs; 56 __le16 oacs;
54 __u8 acl; 57 __u8 acl;
55 __u8 aerl; 58 __u8 aerl;
@@ -57,7 +60,11 @@ struct nvme_id_ctrl {
57 __u8 lpa; 60 __u8 lpa;
58 __u8 elpe; 61 __u8 elpe;
59 __u8 npss; 62 __u8 npss;
60 __u8 rsvd264[248]; 63 __u8 avscc;
64 __u8 apsta;
65 __le16 wctemp;
66 __le16 cctemp;
67 __u8 rsvd270[242];
61 __u8 sqes; 68 __u8 sqes;
62 __u8 cqes; 69 __u8 cqes;
63 __u8 rsvd514[2]; 70 __u8 rsvd514[2];
@@ -68,7 +75,12 @@ struct nvme_id_ctrl {
68 __u8 vwc; 75 __u8 vwc;
69 __le16 awun; 76 __le16 awun;
70 __le16 awupf; 77 __le16 awupf;
71 __u8 rsvd530[1518]; 78 __u8 nvscc;
79 __u8 rsvd531;
80 __le16 acwu;
81 __u8 rsvd534[2];
82 __le32 sgls;
83 __u8 rsvd540[1508];
72 struct nvme_id_power_state psd[32]; 84 struct nvme_id_power_state psd[32];
73 __u8 vs[1024]; 85 __u8 vs[1024];
74}; 86};
@@ -77,6 +89,7 @@ enum {
77 NVME_CTRL_ONCS_COMPARE = 1 << 0, 89 NVME_CTRL_ONCS_COMPARE = 1 << 0,
78 NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, 90 NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
79 NVME_CTRL_ONCS_DSM = 1 << 2, 91 NVME_CTRL_ONCS_DSM = 1 << 2,
92 NVME_CTRL_VWC_PRESENT = 1 << 0,
80}; 93};
81 94
82struct nvme_lbaf { 95struct nvme_lbaf {
@@ -95,7 +108,15 @@ struct nvme_id_ns {
95 __u8 mc; 108 __u8 mc;
96 __u8 dpc; 109 __u8 dpc;
97 __u8 dps; 110 __u8 dps;
98 __u8 rsvd30[98]; 111 __u8 nmic;
112 __u8 rescap;
113 __u8 fpi;
114 __u8 rsvd33;
115 __le16 nawun;
116 __le16 nawupf;
117 __le16 nacwu;
118 __u8 rsvd40[80];
119 __u8 eui64[8];
99 struct nvme_lbaf lbaf[16]; 120 struct nvme_lbaf lbaf[16];
100 __u8 rsvd192[192]; 121 __u8 rsvd192[192];
101 __u8 vs[3712]; 122 __u8 vs[3712];
@@ -126,7 +147,10 @@ struct nvme_smart_log {
126 __u8 unsafe_shutdowns[16]; 147 __u8 unsafe_shutdowns[16];
127 __u8 media_errors[16]; 148 __u8 media_errors[16];
128 __u8 num_err_log_entries[16]; 149 __u8 num_err_log_entries[16];
129 __u8 rsvd192[320]; 150 __le32 warning_temp_time;
151 __le32 critical_comp_time;
152 __le16 temp_sensor[8];
153 __u8 rsvd216[296];
130}; 154};
131 155
132enum { 156enum {
@@ -282,6 +306,10 @@ enum {
282 NVME_FEAT_WRITE_ATOMIC = 0x0a, 306 NVME_FEAT_WRITE_ATOMIC = 0x0a,
283 NVME_FEAT_ASYNC_EVENT = 0x0b, 307 NVME_FEAT_ASYNC_EVENT = 0x0b,
284 NVME_FEAT_SW_PROGRESS = 0x0c, 308 NVME_FEAT_SW_PROGRESS = 0x0c,
309 NVME_LOG_ERROR = 0x01,
310 NVME_LOG_SMART = 0x02,
311 NVME_LOG_FW_SLOT = 0x03,
312 NVME_LOG_RESERVATION = 0x80,
285 NVME_FWACT_REPL = (0 << 3), 313 NVME_FWACT_REPL = (0 << 3),
286 NVME_FWACT_REPL_ACTV = (1 << 3), 314 NVME_FWACT_REPL_ACTV = (1 << 3),
287 NVME_FWACT_ACTV = (2 << 3), 315 NVME_FWACT_ACTV = (2 << 3),