diff options
Diffstat (limited to 'arch/sparc64/kernel/iommu.c')
-rw-r--r-- | arch/sparc64/kernel/iommu.c | 33 |
1 files changed, 21 insertions, 12 deletions
diff --git a/arch/sparc64/kernel/iommu.c b/arch/sparc64/kernel/iommu.c index 756fa24eeefa..2a37a6ca2a16 100644 --- a/arch/sparc64/kernel/iommu.c +++ b/arch/sparc64/kernel/iommu.c | |||
@@ -173,9 +173,11 @@ void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long np | |||
173 | } | 173 | } |
174 | 174 | ||
175 | int iommu_table_init(struct iommu *iommu, int tsbsize, | 175 | int iommu_table_init(struct iommu *iommu, int tsbsize, |
176 | u32 dma_offset, u32 dma_addr_mask) | 176 | u32 dma_offset, u32 dma_addr_mask, |
177 | int numa_node) | ||
177 | { | 178 | { |
178 | unsigned long i, tsbbase, order, sz, num_tsb_entries; | 179 | unsigned long i, order, sz, num_tsb_entries; |
180 | struct page *page; | ||
179 | 181 | ||
180 | num_tsb_entries = tsbsize / sizeof(iopte_t); | 182 | num_tsb_entries = tsbsize / sizeof(iopte_t); |
181 | 183 | ||
@@ -188,11 +190,12 @@ int iommu_table_init(struct iommu *iommu, int tsbsize, | |||
188 | /* Allocate and initialize the free area map. */ | 190 | /* Allocate and initialize the free area map. */ |
189 | sz = num_tsb_entries / 8; | 191 | sz = num_tsb_entries / 8; |
190 | sz = (sz + 7UL) & ~7UL; | 192 | sz = (sz + 7UL) & ~7UL; |
191 | iommu->arena.map = kzalloc(sz, GFP_KERNEL); | 193 | iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node); |
192 | if (!iommu->arena.map) { | 194 | if (!iommu->arena.map) { |
193 | printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n"); | 195 | printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n"); |
194 | return -ENOMEM; | 196 | return -ENOMEM; |
195 | } | 197 | } |
198 | memset(iommu->arena.map, 0, sz); | ||
196 | iommu->arena.limit = num_tsb_entries; | 199 | iommu->arena.limit = num_tsb_entries; |
197 | 200 | ||
198 | if (tlb_type != hypervisor) | 201 | if (tlb_type != hypervisor) |
@@ -201,21 +204,23 @@ int iommu_table_init(struct iommu *iommu, int tsbsize, | |||
201 | /* Allocate and initialize the dummy page which we | 204 | /* Allocate and initialize the dummy page which we |
202 | * set inactive IO PTEs to point to. | 205 | * set inactive IO PTEs to point to. |
203 | */ | 206 | */ |
204 | iommu->dummy_page = get_zeroed_page(GFP_KERNEL); | 207 | page = alloc_pages_node(numa_node, GFP_KERNEL, 0); |
205 | if (!iommu->dummy_page) { | 208 | if (!page) { |
206 | printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n"); | 209 | printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n"); |
207 | goto out_free_map; | 210 | goto out_free_map; |
208 | } | 211 | } |
212 | iommu->dummy_page = (unsigned long) page_address(page); | ||
213 | memset((void *)iommu->dummy_page, 0, PAGE_SIZE); | ||
209 | iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page); | 214 | iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page); |
210 | 215 | ||
211 | /* Now allocate and setup the IOMMU page table itself. */ | 216 | /* Now allocate and setup the IOMMU page table itself. */ |
212 | order = get_order(tsbsize); | 217 | order = get_order(tsbsize); |
213 | tsbbase = __get_free_pages(GFP_KERNEL, order); | 218 | page = alloc_pages_node(numa_node, GFP_KERNEL, order); |
214 | if (!tsbbase) { | 219 | if (!page) { |
215 | printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n"); | 220 | printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n"); |
216 | goto out_free_dummy_page; | 221 | goto out_free_dummy_page; |
217 | } | 222 | } |
218 | iommu->page_table = (iopte_t *)tsbbase; | 223 | iommu->page_table = (iopte_t *)page_address(page); |
219 | 224 | ||
220 | for (i = 0; i < num_tsb_entries; i++) | 225 | for (i = 0; i < num_tsb_entries; i++) |
221 | iopte_make_dummy(iommu, &iommu->page_table[i]); | 226 | iopte_make_dummy(iommu, &iommu->page_table[i]); |
@@ -276,20 +281,24 @@ static inline void iommu_free_ctx(struct iommu *iommu, int ctx) | |||
276 | static void *dma_4u_alloc_coherent(struct device *dev, size_t size, | 281 | static void *dma_4u_alloc_coherent(struct device *dev, size_t size, |
277 | dma_addr_t *dma_addrp, gfp_t gfp) | 282 | dma_addr_t *dma_addrp, gfp_t gfp) |
278 | { | 283 | { |
284 | unsigned long flags, order, first_page; | ||
279 | struct iommu *iommu; | 285 | struct iommu *iommu; |
286 | struct page *page; | ||
287 | int npages, nid; | ||
280 | iopte_t *iopte; | 288 | iopte_t *iopte; |
281 | unsigned long flags, order, first_page; | ||
282 | void *ret; | 289 | void *ret; |
283 | int npages; | ||
284 | 290 | ||
285 | size = IO_PAGE_ALIGN(size); | 291 | size = IO_PAGE_ALIGN(size); |
286 | order = get_order(size); | 292 | order = get_order(size); |
287 | if (order >= 10) | 293 | if (order >= 10) |
288 | return NULL; | 294 | return NULL; |
289 | 295 | ||
290 | first_page = __get_free_pages(gfp, order); | 296 | nid = dev->archdata.numa_node; |
291 | if (first_page == 0UL) | 297 | page = alloc_pages_node(nid, gfp, order); |
298 | if (unlikely(!page)) | ||
292 | return NULL; | 299 | return NULL; |
300 | |||
301 | first_page = (unsigned long) page_address(page); | ||
293 | memset((char *)first_page, 0, PAGE_SIZE << order); | 302 | memset((char *)first_page, 0, PAGE_SIZE << order); |
294 | 303 | ||
295 | iommu = dev->archdata.iommu; | 304 | iommu = dev->archdata.iommu; |