aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVishal Verma <vishal.l.verma@intel.com>2016-09-30 19:19:30 -0400
committerDan Williams <dan.j.williams@intel.com>2016-09-30 20:03:45 -0400
commitbd697a80c329072b991475fa6608bb0e665b3d90 (patch)
tree9fbdafc9dce374644ef31c24dc87d85216249cbe
parent9ffd6350a103cb9e73e3abb4573c900cfead2f9b (diff)
pmem: reduce kmap_atomic sections to the memcpys only
pmem_do_bvec used to kmap_atomic at the begin, and only unmap at the end. Things like nvdimm_clear_poison may want to do nvdimm subsystem bookkeeping operations that may involve taking locks or doing memory allocations, and we can't do that from the atomic context. Reduce the atomic context to just what needs it - the memcpy to/from pmem. Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--drivers/nvdimm/pmem.c28
1 files changed, 23 insertions, 5 deletions
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 571a6c7ee2fc..42b3a8217073 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -66,13 +66,32 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
66 invalidate_pmem(pmem->virt_addr + offset, len); 66 invalidate_pmem(pmem->virt_addr + offset, len);
67} 67}
68 68
69static void write_pmem(void *pmem_addr, struct page *page,
70 unsigned int off, unsigned int len)
71{
72 void *mem = kmap_atomic(page);
73
74 memcpy_to_pmem(pmem_addr, mem + off, len);
75 kunmap_atomic(mem);
76}
77
78static int read_pmem(struct page *page, unsigned int off,
79 void *pmem_addr, unsigned int len)
80{
81 int rc;
82 void *mem = kmap_atomic(page);
83
84 rc = memcpy_from_pmem(mem + off, pmem_addr, len);
85 kunmap_atomic(mem);
86 return rc;
87}
88
69static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, 89static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
70 unsigned int len, unsigned int off, bool is_write, 90 unsigned int len, unsigned int off, bool is_write,
71 sector_t sector) 91 sector_t sector)
72{ 92{
73 int rc = 0; 93 int rc = 0;
74 bool bad_pmem = false; 94 bool bad_pmem = false;
75 void *mem = kmap_atomic(page);
76 phys_addr_t pmem_off = sector * 512 + pmem->data_offset; 95 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
77 void *pmem_addr = pmem->virt_addr + pmem_off; 96 void *pmem_addr = pmem->virt_addr + pmem_off;
78 97
@@ -83,7 +102,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
83 if (unlikely(bad_pmem)) 102 if (unlikely(bad_pmem))
84 rc = -EIO; 103 rc = -EIO;
85 else { 104 else {
86 rc = memcpy_from_pmem(mem + off, pmem_addr, len); 105 rc = read_pmem(page, off, pmem_addr, len);
87 flush_dcache_page(page); 106 flush_dcache_page(page);
88 } 107 }
89 } else { 108 } else {
@@ -102,14 +121,13 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
102 * after clear poison. 121 * after clear poison.
103 */ 122 */
104 flush_dcache_page(page); 123 flush_dcache_page(page);
105 memcpy_to_pmem(pmem_addr, mem + off, len); 124 write_pmem(pmem_addr, page, off, len);
106 if (unlikely(bad_pmem)) { 125 if (unlikely(bad_pmem)) {
107 pmem_clear_poison(pmem, pmem_off, len); 126 pmem_clear_poison(pmem, pmem_off, len);
108 memcpy_to_pmem(pmem_addr, mem + off, len); 127 write_pmem(pmem_addr, page, off, len);
109 } 128 }
110 } 129 }
111 130
112 kunmap_atomic(mem);
113 return rc; 131 return rc;
114} 132}
115 133