diff options
author | Joachim Fenkes <fenkes@de.ibm.com> | 2007-09-13 12:15:28 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-10-09 22:59:13 -0400 |
commit | c8d8beea0383e47c9d65d45f0ca95626ec435fcd (patch) | |
tree | 00f409205683f3489344eb0cf1b86f8091354fc7 /drivers/infiniband/core | |
parent | 247e020ee5e2a7bf46f2d7a3d4490a670a712a40 (diff) |
IB/umem: Add hugetlb flag to struct ib_umem
During ib_umem_get(), determine whether all pages from the memory
region are hugetlb pages and report this in the "hugetlb" member.
Low-level drivers can use this information if they need it.
Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r-- | drivers/infiniband/core/umem.c | 20 |
1 files changed, 19 insertions, 1 deletions
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 664d2faa9e74..2f54e29dc7a6 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/mm.h> | 37 | #include <linux/mm.h> |
38 | #include <linux/dma-mapping.h> | 38 | #include <linux/dma-mapping.h> |
39 | #include <linux/sched.h> | 39 | #include <linux/sched.h> |
40 | #include <linux/hugetlb.h> | ||
40 | 41 | ||
41 | #include "uverbs.h" | 42 | #include "uverbs.h" |
42 | 43 | ||
@@ -75,6 +76,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
75 | { | 76 | { |
76 | struct ib_umem *umem; | 77 | struct ib_umem *umem; |
77 | struct page **page_list; | 78 | struct page **page_list; |
79 | struct vm_area_struct **vma_list; | ||
78 | struct ib_umem_chunk *chunk; | 80 | struct ib_umem_chunk *chunk; |
79 | unsigned long locked; | 81 | unsigned long locked; |
80 | unsigned long lock_limit; | 82 | unsigned long lock_limit; |
@@ -104,6 +106,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
104 | */ | 106 | */ |
105 | umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ); | 107 | umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ); |
106 | 108 | ||
109 | /* We assume the memory is from hugetlb until proved otherwise */ | ||
110 | umem->hugetlb = 1; | ||
111 | |||
107 | INIT_LIST_HEAD(&umem->chunk_list); | 112 | INIT_LIST_HEAD(&umem->chunk_list); |
108 | 113 | ||
109 | page_list = (struct page **) __get_free_page(GFP_KERNEL); | 114 | page_list = (struct page **) __get_free_page(GFP_KERNEL); |
@@ -112,6 +117,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
112 | return ERR_PTR(-ENOMEM); | 117 | return ERR_PTR(-ENOMEM); |
113 | } | 118 | } |
114 | 119 | ||
120 | /* | ||
121 | * if we can't alloc the vma_list, it's not so bad; | ||
122 | * just assume the memory is not hugetlb memory | ||
123 | */ | ||
124 | vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL); | ||
125 | if (!vma_list) | ||
126 | umem->hugetlb = 0; | ||
127 | |||
115 | npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT; | 128 | npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT; |
116 | 129 | ||
117 | down_write(¤t->mm->mmap_sem); | 130 | down_write(¤t->mm->mmap_sem); |
@@ -131,7 +144,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
131 | ret = get_user_pages(current, current->mm, cur_base, | 144 | ret = get_user_pages(current, current->mm, cur_base, |
132 | min_t(int, npages, | 145 | min_t(int, npages, |
133 | PAGE_SIZE / sizeof (struct page *)), | 146 | PAGE_SIZE / sizeof (struct page *)), |
134 | 1, !umem->writable, page_list, NULL); | 147 | 1, !umem->writable, page_list, vma_list); |
135 | 148 | ||
136 | if (ret < 0) | 149 | if (ret < 0) |
137 | goto out; | 150 | goto out; |
@@ -152,6 +165,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
152 | 165 | ||
153 | chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK); | 166 | chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK); |
154 | for (i = 0; i < chunk->nents; ++i) { | 167 | for (i = 0; i < chunk->nents; ++i) { |
168 | if (vma_list && | ||
169 | !is_vm_hugetlb_page(vma_list[i + off])) | ||
170 | umem->hugetlb = 0; | ||
155 | chunk->page_list[i].page = page_list[i + off]; | 171 | chunk->page_list[i].page = page_list[i + off]; |
156 | chunk->page_list[i].offset = 0; | 172 | chunk->page_list[i].offset = 0; |
157 | chunk->page_list[i].length = PAGE_SIZE; | 173 | chunk->page_list[i].length = PAGE_SIZE; |
@@ -186,6 +202,8 @@ out: | |||
186 | current->mm->locked_vm = locked; | 202 | current->mm->locked_vm = locked; |
187 | 203 | ||
188 | up_write(¤t->mm->mmap_sem); | 204 | up_write(¤t->mm->mmap_sem); |
205 | if (vma_list) | ||
206 | free_page((unsigned long) vma_list); | ||
189 | free_page((unsigned long) page_list); | 207 | free_page((unsigned long) page_list); |
190 | 208 | ||
191 | return ret < 0 ? ERR_PTR(ret) : umem; | 209 | return ret < 0 ? ERR_PTR(ret) : umem; |