summaryrefslogtreecommitdiffstats
path: root/drivers/nvdimm/pmem.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 02:54:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 02:54:56 -0400
commitd3b5d35290d729a2518af00feca867385a1b08fa (patch)
tree7b56c0863d59bc57f7c7dcf5d5665c56b05f1d1b /drivers/nvdimm/pmem.c
parentaa2a4b6569d5b10491b606a86e574dff3852597a (diff)
parent71389703839ebe9cb426c72d5f0bd549592e583c (diff)
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm updates from Ingo Molnar: "The main x86 MM changes in this cycle were: - continued native kernel PCID support preparation patches to the TLB flushing code (Andy Lutomirski) - various fixes related to 32-bit compat syscall returning address over 4Gb in applications, launched from 64-bit binaries - motivated by C/R frameworks such as Virtuozzo. (Dmitry Safonov) - continued Intel 5-level paging enablement: in particular the conversion of x86 GUP to the generic GUP code. (Kirill A. Shutemov) - x86/mpx ABI corner case fixes/enhancements (Joerg Roedel) - ... plus misc updates, fixes and cleanups" * 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (62 commits) mm, zone_device: Replace {get, put}_zone_device_page() with a single reference to fix pmem crash x86/mm: Fix flush_tlb_page() on Xen x86/mm: Make flush_tlb_mm_range() more predictable x86/mm: Remove flush_tlb() and flush_tlb_current_task() x86/vm86/32: Switch to flush_tlb_mm_range() in mark_screen_rdonly() x86/mm/64: Fix crash in remove_pagetable() Revert "x86/mm/gup: Switch GUP to the generic get_user_page_fast() implementation" x86/boot/e820: Remove a redundant self assignment x86/mm: Fix dump pagetables for 4 levels of page tables x86/mpx, selftests: Only check bounds-vs-shadow when we keep shadow x86/mpx: Correctly report do_mpx_bt_fault() failures to user-space Revert "x86/mm/numa: Remove numa_nodemask_from_meminfo()" x86/espfix: Add support for 5-level paging x86/kasan: Extend KASAN to support 5-level paging x86/mm: Add basic defines/helpers for CONFIG_X86_5LEVEL=y x86/paravirt: Add 5-level support to the paravirt code x86/mm: Define virtual memory map for 5-level paging x86/asm: Remove __VIRTUAL_MASK_SHIFT==47 assert x86/boot: Detect 5-level paging support x86/mm/numa: Remove numa_nodemask_from_meminfo() ...
Diffstat (limited to 'drivers/nvdimm/pmem.c')
-rw-r--r--drivers/nvdimm/pmem.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 5b536be5a12e..fbc640bf06b0 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -25,6 +25,7 @@
25#include <linux/badblocks.h> 25#include <linux/badblocks.h>
26#include <linux/memremap.h> 26#include <linux/memremap.h>
27#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
28#include <linux/blk-mq.h>
28#include <linux/pfn_t.h> 29#include <linux/pfn_t.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
30#include <linux/pmem.h> 31#include <linux/pmem.h>
@@ -231,6 +232,11 @@ static void pmem_release_queue(void *q)
231 blk_cleanup_queue(q); 232 blk_cleanup_queue(q);
232} 233}
233 234
235static void pmem_freeze_queue(void *q)
236{
237 blk_freeze_queue_start(q);
238}
239
234static void pmem_release_disk(void *disk) 240static void pmem_release_disk(void *disk)
235{ 241{
236 del_gendisk(disk); 242 del_gendisk(disk);
@@ -284,6 +290,9 @@ static int pmem_attach_disk(struct device *dev,
284 if (!q) 290 if (!q)
285 return -ENOMEM; 291 return -ENOMEM;
286 292
293 if (devm_add_action_or_reset(dev, pmem_release_queue, q))
294 return -ENOMEM;
295
287 pmem->pfn_flags = PFN_DEV; 296 pmem->pfn_flags = PFN_DEV;
288 if (is_nd_pfn(dev)) { 297 if (is_nd_pfn(dev)) {
289 addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter, 298 addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter,
@@ -303,10 +312,10 @@ static int pmem_attach_disk(struct device *dev,
303 pmem->size, ARCH_MEMREMAP_PMEM); 312 pmem->size, ARCH_MEMREMAP_PMEM);
304 313
305 /* 314 /*
306 * At release time the queue must be dead before 315 * At release time the queue must be frozen before
307 * devm_memremap_pages is unwound 316 * devm_memremap_pages is unwound
308 */ 317 */
309 if (devm_add_action_or_reset(dev, pmem_release_queue, q)) 318 if (devm_add_action_or_reset(dev, pmem_freeze_queue, q))
310 return -ENOMEM; 319 return -ENOMEM;
311 320
312 if (IS_ERR(addr)) 321 if (IS_ERR(addr))