aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 21:23:28 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 21:23:28 -0500
commit59d53737a8640482995fea13c6e2c0fd016115d6 (patch)
tree3423eb92315865d76cb8d488513bfef6ab9251d0 /arch/s390
parentd3f180ea1a44aecba1b0dab2a253428e77f906bf (diff)
parent8138a67a5557ffea3a21dfd6f037842d4e748513 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge second set of updates from Andrew Morton: "More of MM" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (83 commits) mm/nommu.c: fix arithmetic overflow in __vm_enough_memory() mm/mmap.c: fix arithmetic overflow in __vm_enough_memory() vmstat: Reduce time interval to stat update on idle cpu mm/page_owner.c: remove unnecessary stack_trace field Documentation/filesystems/proc.txt: describe /proc/<pid>/map_files mm: incorporate read-only pages into transparent huge pages vmstat: do not use deferrable delayed work for vmstat_update mm: more aggressive page stealing for UNMOVABLE allocations mm: always steal split buddies in fallback allocations mm: when stealing freepages, also take pages created by splitting buddy page mincore: apply page table walker on do_mincore() mm: /proc/pid/clear_refs: avoid split_huge_page() mm: pagewalk: fix misbehavior of walk_page_range for vma(VM_PFNMAP) mempolicy: apply page table walker on queue_pages_range() arch/powerpc/mm/subpage-prot.c: use walk->vma and walk_page_vma() memcg: cleanup preparation for page table walk numa_maps: remove numa_maps->vma numa_maps: fix typo in gather_hugetbl_stats pagemap: use walk->vma instead of calling find_vma() clear_refs: remove clear_refs_private->vma and introduce clear_refs_test_walk() ...
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/mm/gup.c6
-rw-r--r--arch/s390/mm/hugetlbpage.c20
3 files changed, 3 insertions, 25 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 0441ec24ae87..fbb5ee3ae57c 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -99,7 +99,7 @@ extern unsigned long zero_page_mask;
99#endif /* CONFIG_64BIT */ 99#endif /* CONFIG_64BIT */
100#define PTRS_PER_PGD 2048 100#define PTRS_PER_PGD 2048
101 101
102#define FIRST_USER_ADDRESS 0 102#define FIRST_USER_ADDRESS 0UL
103 103
104#define pte_ERROR(e) \ 104#define pte_ERROR(e) \
105 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) 105 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 639fce464008..5c586c78ca8d 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -235,10 +235,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
235 /* Try to get the remaining pages with get_user_pages */ 235 /* Try to get the remaining pages with get_user_pages */
236 start += nr << PAGE_SHIFT; 236 start += nr << PAGE_SHIFT;
237 pages += nr; 237 pages += nr;
238 down_read(&mm->mmap_sem); 238 ret = get_user_pages_unlocked(current, mm, start,
239 ret = get_user_pages(current, mm, start, 239 nr_pages - nr, write, 0, pages);
240 nr_pages - nr, write, 0, pages, NULL);
241 up_read(&mm->mmap_sem);
242 /* Have to be a bit careful with return values */ 240 /* Have to be a bit careful with return values */
243 if (nr > 0) 241 if (nr > 0)
244 ret = (ret < 0) ? nr : ret + nr; 242 ret = (ret < 0) ? nr : ret + nr;
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 3c80d2e38f03..210ffede0153 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -192,12 +192,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
192 return 0; 192 return 0;
193} 193}
194 194
195struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
196 int write)
197{
198 return ERR_PTR(-EINVAL);
199}
200
201int pmd_huge(pmd_t pmd) 195int pmd_huge(pmd_t pmd)
202{ 196{
203 if (!MACHINE_HAS_HPAGE) 197 if (!MACHINE_HAS_HPAGE)
@@ -210,17 +204,3 @@ int pud_huge(pud_t pud)
210{ 204{
211 return 0; 205 return 0;
212} 206}
213
214struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
215 pmd_t *pmdp, int write)
216{
217 struct page *page;
218
219 if (!MACHINE_HAS_HPAGE)
220 return NULL;
221
222 page = pmd_page(*pmdp);
223 if (page)
224 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
225 return page;
226}