diff options
author | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2011-03-16 16:29:00 -0400 |
---|---|---|
committer | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2011-11-04 15:53:00 -0400 |
commit | fa92282149842645931580225647238428374758 (patch) | |
tree | a4a606beb428fb53d0660b90c09c48fde3e7dc03 /drivers/block/nvme.c | |
parent | 714a7a22884b74862540bc84955274d86b2f6040 (diff) |
NVMe: Fix comment formatting
Reported-by: Randy Dunlap <rdunlap@xenotime.net>
Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block/nvme.c')
-rw-r--r-- | drivers/block/nvme.c | 6 |
1 files changed, 4 insertions, 2 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c index e392919e0eac..740a9c1b81aa 100644 --- a/drivers/block/nvme.c +++ b/drivers/block/nvme.c | |||
@@ -182,7 +182,8 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx, | |||
182 | return (cmdid < 0) ? -EINTR : cmdid; | 182 | return (cmdid < 0) ? -EINTR : cmdid; |
183 | } | 183 | } |
184 | 184 | ||
185 | /* If you need more than four handlers, you'll need to change how | 185 | /* |
186 | * If you need more than four handlers, you'll need to change how | ||
186 | * alloc_cmdid and nvme_process_cq work. Consider using a special | 187 | * alloc_cmdid and nvme_process_cq work. Consider using a special |
187 | * CMD_CTX value instead, if that works for your situation. | 188 | * CMD_CTX value instead, if that works for your situation. |
188 | */ | 189 | */ |
@@ -1066,7 +1067,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) | |||
1066 | prps = nvme_setup_prps(dev, &c.common, sg, length); | 1067 | prps = nvme_setup_prps(dev, &c.common, sg, length); |
1067 | 1068 | ||
1068 | nvmeq = get_nvmeq(ns); | 1069 | nvmeq = get_nvmeq(ns); |
1069 | /* Since nvme_submit_sync_cmd sleeps, we can't keep preemption | 1070 | /* |
1071 | * Since nvme_submit_sync_cmd sleeps, we can't keep preemption | ||
1070 | * disabled. We may be preempted at any point, and be rescheduled | 1072 | * disabled. We may be preempted at any point, and be rescheduled |
1071 | * to a different CPU. That will cause cacheline bouncing, but no | 1073 | * to a different CPU. That will cause cacheline bouncing, but no |
1072 | * additional races since q_lock already protects against other CPUs. | 1074 | * additional races since q_lock already protects against other CPUs. |