aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2014-06-18 15:58:57 -0400
committerJens Axboe <axboe@fb.com>2014-11-04 15:17:07 -0500
commit6fccf9383b280d463a7dfe1e0d048aff8df8a25e (patch)
tree59636e92f11c04a91c755f747728d6243e316c16
parent4d51abf9bcca01efa3afbe94d50cc2cda8095da8 (diff)
NVMe: Async event request
Submits NVMe asynchronous event requests, one event up to the controller maximum or number of possible different event types (8), whichever is smaller. Events successfully returned by the controller are logged. Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/block/nvme-core.c42
-rw-r--r--include/linux/nvme.h1
2 files changed, 43 insertions, 0 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index baee59530d6e..42a62bbf4a11 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -207,6 +207,7 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
207#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) 207#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
208#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) 208#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
209#define CMD_CTX_ABORT (0x318 + CMD_CTX_BASE) 209#define CMD_CTX_ABORT (0x318 + CMD_CTX_BASE)
210#define CMD_CTX_ASYNC (0x31C + CMD_CTX_BASE)
210 211
211static void special_completion(struct nvme_queue *nvmeq, void *ctx, 212static void special_completion(struct nvme_queue *nvmeq, void *ctx,
212 struct nvme_completion *cqe) 213 struct nvme_completion *cqe)
@@ -229,6 +230,17 @@ static void special_completion(struct nvme_queue *nvmeq, void *ctx,
229 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 230 cqe->command_id, le16_to_cpup(&cqe->sq_id));
230 return; 231 return;
231 } 232 }
233 if (ctx == CMD_CTX_ASYNC) {
234 u32 result = le32_to_cpup(&cqe->result);
235 u16 status = le16_to_cpup(&cqe->status) >> 1;
236
237 if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ)
238 ++nvmeq->dev->event_limit;
239 if (status == NVME_SC_SUCCESS)
240 dev_warn(nvmeq->q_dmadev,
241 "async event result %08x\n", result);
242 return;
243 }
232 244
233 dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx); 245 dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx);
234} 246}
@@ -1161,6 +1173,8 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
1161 continue; 1173 continue;
1162 if (info[cmdid].ctx == CMD_CTX_CANCELLED) 1174 if (info[cmdid].ctx == CMD_CTX_CANCELLED)
1163 continue; 1175 continue;
1176 if (timeout && info[cmdid].ctx == CMD_CTX_ASYNC)
1177 continue;
1164 if (timeout && nvmeq->dev->initialized) { 1178 if (timeout && nvmeq->dev->initialized) {
1165 nvme_abort_cmd(cmdid, nvmeq); 1179 nvme_abort_cmd(cmdid, nvmeq);
1166 continue; 1180 continue;
@@ -1842,6 +1856,27 @@ static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
1842 } 1856 }
1843} 1857}
1844 1858
1859static int nvme_submit_async_req(struct nvme_queue *nvmeq)
1860{
1861 struct nvme_command *c;
1862 int cmdid;
1863
1864 cmdid = alloc_cmdid(nvmeq, CMD_CTX_ASYNC, special_completion, 0);
1865 if (cmdid < 0)
1866 return cmdid;
1867
1868 c = &nvmeq->sq_cmds[nvmeq->sq_tail];
1869 memset(c, 0, sizeof(*c));
1870 c->common.opcode = nvme_admin_async_event;
1871 c->common.command_id = cmdid;
1872
1873 if (++nvmeq->sq_tail == nvmeq->q_depth)
1874 nvmeq->sq_tail = 0;
1875 writel(nvmeq->sq_tail, nvmeq->q_db);
1876
1877 return 0;
1878}
1879
1845static int nvme_kthread(void *data) 1880static int nvme_kthread(void *data)
1846{ 1881{
1847 struct nvme_dev *dev, *next; 1882 struct nvme_dev *dev, *next;
@@ -1875,6 +1910,12 @@ static int nvme_kthread(void *data)
1875 nvme_cancel_ios(nvmeq, true); 1910 nvme_cancel_ios(nvmeq, true);
1876 nvme_resubmit_bios(nvmeq); 1911 nvme_resubmit_bios(nvmeq);
1877 nvme_resubmit_iods(nvmeq); 1912 nvme_resubmit_iods(nvmeq);
1913
1914 while ((i == 0) && (dev->event_limit > 0)) {
1915 if (nvme_submit_async_req(nvmeq))
1916 break;
1917 dev->event_limit--;
1918 }
1878 unlock: 1919 unlock:
1879 spin_unlock_irq(&nvmeq->q_lock); 1920 spin_unlock_irq(&nvmeq->q_lock);
1880 } 1921 }
@@ -2244,6 +2285,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
2244 dev->oncs = le16_to_cpup(&ctrl->oncs); 2285 dev->oncs = le16_to_cpup(&ctrl->oncs);
2245 dev->abort_limit = ctrl->acl + 1; 2286 dev->abort_limit = ctrl->acl + 1;
2246 dev->vwc = ctrl->vwc; 2287 dev->vwc = ctrl->vwc;
2288 dev->event_limit = min(ctrl->aerl + 1, 8);
2247 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); 2289 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
2248 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); 2290 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
2249 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); 2291 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 2bf403195c09..974efd04a4b1 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -99,6 +99,7 @@ struct nvme_dev {
99 u32 stripe_size; 99 u32 stripe_size;
100 u16 oncs; 100 u16 oncs;
101 u16 abort_limit; 101 u16 abort_limit;
102 u8 event_limit;
102 u8 vwc; 103 u8 vwc;
103 u8 initialized; 104 u8 initialized;
104}; 105};