aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/umem.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-11 22:43:13 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-11 22:43:13 -0400
commitce9d3c9a6a9aef61525be07fe6ba27d937236aa2 (patch)
tree1b29bcb8f60fc6b59fa0d7b833cc733b8ebe17c9 /drivers/infiniband/core/umem.c
parent038a5008b2f395c85e6e71d6ddf3c684e7c405b0 (diff)
parent3d73c2884f45f9a297cbc956cea101405a9703f2 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (87 commits) mlx4_core: Fix section mismatches IPoIB: Allow setting policy to ignore multicast groups IB/mthca: Mark error paths as unlikely() in post_srq_recv functions IB/ipath: Minor fix to ordering of freeing and zeroing of tid pages. IB/ipath: Remove redundant link state checks IB/ipath: Fix IB_EVENT_PORT_ERR event IB/ipath: Better handling of unexpected GPIO interrupts IB/ipath: Maintain active time on all chips IB/ipath: Fix QHT7040 serial number check IB/ipath: Indicate a couple of chip bugs to userspace IB/ipath: iba6110 rev4 no longer needs recv header overrun workaround IB/ipath: Use counters in ipath_poll and cleanup interrupts in ipath_close IB/ipath: Remove duplicate copy of LMC IB/ipath: Add ability to set the LMC via the sysfs debugging interface IB/ipath: Optimize completion queue entry insertion and polling IB/ipath: Implement IB_EVENT_QP_LAST_WQE_REACHED IB/ipath: Generate flush CQE when QP is in error state IB/ipath: Remove redundant code IB/ipath: Future proof eeprom checksum code (contents reading) IB/ipath: UC RDMA WRITE with IMMEDIATE doesn't send the immediate ...
Diffstat (limited to 'drivers/infiniband/core/umem.c')
-rw-r--r--drivers/infiniband/core/umem.c20
1 files changed, 19 insertions, 1 deletions
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 664d2faa9e7..2f54e29dc7a 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -37,6 +37,7 @@
37#include <linux/mm.h> 37#include <linux/mm.h>
38#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
39#include <linux/sched.h> 39#include <linux/sched.h>
40#include <linux/hugetlb.h>
40 41
41#include "uverbs.h" 42#include "uverbs.h"
42 43
@@ -75,6 +76,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
75{ 76{
76 struct ib_umem *umem; 77 struct ib_umem *umem;
77 struct page **page_list; 78 struct page **page_list;
79 struct vm_area_struct **vma_list;
78 struct ib_umem_chunk *chunk; 80 struct ib_umem_chunk *chunk;
79 unsigned long locked; 81 unsigned long locked;
80 unsigned long lock_limit; 82 unsigned long lock_limit;
@@ -104,6 +106,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
104 */ 106 */
105 umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ); 107 umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ);
106 108
109 /* We assume the memory is from hugetlb until proved otherwise */
110 umem->hugetlb = 1;
111
107 INIT_LIST_HEAD(&umem->chunk_list); 112 INIT_LIST_HEAD(&umem->chunk_list);
108 113
109 page_list = (struct page **) __get_free_page(GFP_KERNEL); 114 page_list = (struct page **) __get_free_page(GFP_KERNEL);
@@ -112,6 +117,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
112 return ERR_PTR(-ENOMEM); 117 return ERR_PTR(-ENOMEM);
113 } 118 }
114 119
120 /*
121 * if we can't alloc the vma_list, it's not so bad;
122 * just assume the memory is not hugetlb memory
123 */
124 vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
125 if (!vma_list)
126 umem->hugetlb = 0;
127
115 npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT; 128 npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
116 129
117 down_write(&current->mm->mmap_sem); 130 down_write(&current->mm->mmap_sem);
@@ -131,7 +144,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
131 ret = get_user_pages(current, current->mm, cur_base, 144 ret = get_user_pages(current, current->mm, cur_base,
132 min_t(int, npages, 145 min_t(int, npages,
133 PAGE_SIZE / sizeof (struct page *)), 146 PAGE_SIZE / sizeof (struct page *)),
134 1, !umem->writable, page_list, NULL); 147 1, !umem->writable, page_list, vma_list);
135 148
136 if (ret < 0) 149 if (ret < 0)
137 goto out; 150 goto out;
@@ -152,6 +165,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
152 165
153 chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK); 166 chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
154 for (i = 0; i < chunk->nents; ++i) { 167 for (i = 0; i < chunk->nents; ++i) {
168 if (vma_list &&
169 !is_vm_hugetlb_page(vma_list[i + off]))
170 umem->hugetlb = 0;
155 chunk->page_list[i].page = page_list[i + off]; 171 chunk->page_list[i].page = page_list[i + off];
156 chunk->page_list[i].offset = 0; 172 chunk->page_list[i].offset = 0;
157 chunk->page_list[i].length = PAGE_SIZE; 173 chunk->page_list[i].length = PAGE_SIZE;
@@ -186,6 +202,8 @@ out:
186 current->mm->locked_vm = locked; 202 current->mm->locked_vm = locked;
187 203
188 up_write(&current->mm->mmap_sem); 204 up_write(&current->mm->mmap_sem);
205 if (vma_list)
206 free_page((unsigned long) vma_list);
189 free_page((unsigned long) page_list); 207 free_page((unsigned long) page_list);
190 208
191 return ret < 0 ? ERR_PTR(ret) : umem; 209 return ret < 0 ? ERR_PTR(ret) : umem;