aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/pgalloc.h3
-rw-r--r--arch/s390/include/asm/tlb.h22
-rw-r--r--arch/s390/mm/pgtable.c63
4 files changed, 61 insertions, 28 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2b7c0fbe578..9015060919a 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -90,7 +90,6 @@ config S390
90 select HAVE_KERNEL_XZ 90 select HAVE_KERNEL_XZ
91 select HAVE_ARCH_MUTEX_CPU_RELAX 91 select HAVE_ARCH_MUTEX_CPU_RELAX
92 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 92 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
93 select HAVE_RCU_TABLE_FREE if SMP
94 select ARCH_SAVE_PAGE_KEYS if HIBERNATION 93 select ARCH_SAVE_PAGE_KEYS if HIBERNATION
95 select HAVE_MEMBLOCK 94 select HAVE_MEMBLOCK
96 select HAVE_MEMBLOCK_NODE_MAP 95 select HAVE_MEMBLOCK_NODE_MAP
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 8eef9b5b3cf..78e3041919d 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -22,10 +22,7 @@ void crst_table_free(struct mm_struct *, unsigned long *);
22 22
23unsigned long *page_table_alloc(struct mm_struct *, unsigned long); 23unsigned long *page_table_alloc(struct mm_struct *, unsigned long);
24void page_table_free(struct mm_struct *, unsigned long *); 24void page_table_free(struct mm_struct *, unsigned long *);
25#ifdef CONFIG_HAVE_RCU_TABLE_FREE
26void page_table_free_rcu(struct mmu_gather *, unsigned long *); 25void page_table_free_rcu(struct mmu_gather *, unsigned long *);
27void __tlb_remove_table(void *_table);
28#endif
29 26
30static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 27static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
31{ 28{
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index c687a2c8346..775a5eea8f9 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -30,14 +30,10 @@
30 30
31struct mmu_gather { 31struct mmu_gather {
32 struct mm_struct *mm; 32 struct mm_struct *mm;
33#ifdef CONFIG_HAVE_RCU_TABLE_FREE
34 struct mmu_table_batch *batch; 33 struct mmu_table_batch *batch;
35#endif
36 unsigned int fullmm; 34 unsigned int fullmm;
37 unsigned int need_flush;
38}; 35};
39 36
40#ifdef CONFIG_HAVE_RCU_TABLE_FREE
41struct mmu_table_batch { 37struct mmu_table_batch {
42 struct rcu_head rcu; 38 struct rcu_head rcu;
43 unsigned int nr; 39 unsigned int nr;
@@ -49,7 +45,6 @@ struct mmu_table_batch {
49 45
50extern void tlb_table_flush(struct mmu_gather *tlb); 46extern void tlb_table_flush(struct mmu_gather *tlb);
51extern void tlb_remove_table(struct mmu_gather *tlb, void *table); 47extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
52#endif
53 48
54static inline void tlb_gather_mmu(struct mmu_gather *tlb, 49static inline void tlb_gather_mmu(struct mmu_gather *tlb,
55 struct mm_struct *mm, 50 struct mm_struct *mm,
@@ -57,29 +52,20 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
57{ 52{
58 tlb->mm = mm; 53 tlb->mm = mm;
59 tlb->fullmm = full_mm_flush; 54 tlb->fullmm = full_mm_flush;
60 tlb->need_flush = 0;
61#ifdef CONFIG_HAVE_RCU_TABLE_FREE
62 tlb->batch = NULL; 55 tlb->batch = NULL;
63#endif
64 if (tlb->fullmm) 56 if (tlb->fullmm)
65 __tlb_flush_mm(mm); 57 __tlb_flush_mm(mm);
66} 58}
67 59
68static inline void tlb_flush_mmu(struct mmu_gather *tlb) 60static inline void tlb_flush_mmu(struct mmu_gather *tlb)
69{ 61{
70 if (!tlb->need_flush)
71 return;
72 tlb->need_flush = 0;
73 __tlb_flush_mm(tlb->mm);
74#ifdef CONFIG_HAVE_RCU_TABLE_FREE
75 tlb_table_flush(tlb); 62 tlb_table_flush(tlb);
76#endif
77} 63}
78 64
79static inline void tlb_finish_mmu(struct mmu_gather *tlb, 65static inline void tlb_finish_mmu(struct mmu_gather *tlb,
80 unsigned long start, unsigned long end) 66 unsigned long start, unsigned long end)
81{ 67{
82 tlb_flush_mmu(tlb); 68 tlb_table_flush(tlb);
83} 69}
84 70
85/* 71/*
@@ -105,10 +91,8 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
105static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 91static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
106 unsigned long address) 92 unsigned long address)
107{ 93{
108#ifdef CONFIG_HAVE_RCU_TABLE_FREE
109 if (!tlb->fullmm) 94 if (!tlb->fullmm)
110 return page_table_free_rcu(tlb, (unsigned long *) pte); 95 return page_table_free_rcu(tlb, (unsigned long *) pte);
111#endif
112 page_table_free(tlb->mm, (unsigned long *) pte); 96 page_table_free(tlb->mm, (unsigned long *) pte);
113} 97}
114 98
@@ -125,10 +109,8 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
125#ifdef __s390x__ 109#ifdef __s390x__
126 if (tlb->mm->context.asce_limit <= (1UL << 31)) 110 if (tlb->mm->context.asce_limit <= (1UL << 31))
127 return; 111 return;
128#ifdef CONFIG_HAVE_RCU_TABLE_FREE
129 if (!tlb->fullmm) 112 if (!tlb->fullmm)
130 return tlb_remove_table(tlb, pmd); 113 return tlb_remove_table(tlb, pmd);
131#endif
132 crst_table_free(tlb->mm, (unsigned long *) pmd); 114 crst_table_free(tlb->mm, (unsigned long *) pmd);
133#endif 115#endif
134} 116}
@@ -146,10 +128,8 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
146#ifdef __s390x__ 128#ifdef __s390x__
147 if (tlb->mm->context.asce_limit <= (1UL << 42)) 129 if (tlb->mm->context.asce_limit <= (1UL << 42))
148 return; 130 return;
149#ifdef CONFIG_HAVE_RCU_TABLE_FREE
150 if (!tlb->fullmm) 131 if (!tlb->fullmm)
151 return tlb_remove_table(tlb, pud); 132 return tlb_remove_table(tlb, pud);
152#endif
153 crst_table_free(tlb->mm, (unsigned long *) pud); 133 crst_table_free(tlb->mm, (unsigned long *) pud);
154#endif 134#endif
155} 135}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 373adf69b01..6e765bf0067 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -678,8 +678,6 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
678 } 678 }
679} 679}
680 680
681#ifdef CONFIG_HAVE_RCU_TABLE_FREE
682
683static void __page_table_free_rcu(void *table, unsigned bit) 681static void __page_table_free_rcu(void *table, unsigned bit)
684{ 682{
685 struct page *page; 683 struct page *page;
@@ -733,7 +731,66 @@ void __tlb_remove_table(void *_table)
733 free_pages((unsigned long) table, ALLOC_ORDER); 731 free_pages((unsigned long) table, ALLOC_ORDER);
734} 732}
735 733
736#endif 734static void tlb_remove_table_smp_sync(void *arg)
735{
736 /* Simply deliver the interrupt */
737}
738
739static void tlb_remove_table_one(void *table)
740{
741 /*
742 * This isn't an RCU grace period and hence the page-tables cannot be
743 * assumed to be actually RCU-freed.
744 *
745 * It is however sufficient for software page-table walkers that rely
746 * on IRQ disabling. See the comment near struct mmu_table_batch.
747 */
748 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
749 __tlb_remove_table(table);
750}
751
752static void tlb_remove_table_rcu(struct rcu_head *head)
753{
754 struct mmu_table_batch *batch;
755 int i;
756
757 batch = container_of(head, struct mmu_table_batch, rcu);
758
759 for (i = 0; i < batch->nr; i++)
760 __tlb_remove_table(batch->tables[i]);
761
762 free_page((unsigned long)batch);
763}
764
765void tlb_table_flush(struct mmu_gather *tlb)
766{
767 struct mmu_table_batch **batch = &tlb->batch;
768
769 if (*batch) {
770 __tlb_flush_mm(tlb->mm);
771 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
772 *batch = NULL;
773 }
774}
775
776void tlb_remove_table(struct mmu_gather *tlb, void *table)
777{
778 struct mmu_table_batch **batch = &tlb->batch;
779
780 if (*batch == NULL) {
781 *batch = (struct mmu_table_batch *)
782 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
783 if (*batch == NULL) {
784 __tlb_flush_mm(tlb->mm);
785 tlb_remove_table_one(table);
786 return;
787 }
788 (*batch)->nr = 0;
789 }
790 (*batch)->tables[(*batch)->nr++] = table;
791 if ((*batch)->nr == MAX_TABLE_BATCH)
792 tlb_table_flush(tlb);
793}
737 794
738/* 795/*
739 * switch on pgstes for its userspace process (for kvm) 796 * switch on pgstes for its userspace process (for kvm)