diff options
author | Olof Johansson <olof@lixom.net> | 2012-09-13 01:00:07 -0400 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2012-09-13 01:00:07 -0400 |
commit | 2bc733e8b443e77db5bb18e1bb8f1c535a92eb8c (patch) | |
tree | a6985ec4a761efbf2bdbaf7e75110669bf52bc15 /arch | |
parent | f5a60d4efc7a3928d6e7d37202a8560ce76cb838 (diff) | |
parent | 48540058612786d365602f3324ed97f9071092de (diff) |
Merge tag 'imx-fixes' of git://git.pengutronix.de/git/imx/linux-2.6 into fixes
ARM: i.MX: Fix SSI clock associations for i.MX25/i.MX35
* tag 'imx-fixes' of git://git.pengutronix.de/git/imx/linux-2.6:
ARM: clk-imx35: Fix SSI clock registration
ARM: clk-imx25: Fix SSI clock registration
+ Linux 3.6-rc5
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/Kconfig | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 7 | ||||
-rw-r--r-- | arch/arm/mach-imx/clk-imx25.c | 6 | ||||
-rw-r--r-- | arch/arm/mach-imx/clk-imx35.c | 6 | ||||
-rw-r--r-- | arch/arm/mach-kirkwood/common.c | 7 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 114 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 2 | ||||
-rw-r--r-- | arch/x86/xen/p2m.c | 2 |
8 files changed, 125 insertions, 21 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index c5f9ae5dbd1a..2f88d8d97701 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -6,7 +6,7 @@ config ARM | |||
6 | select HAVE_DMA_API_DEBUG | 6 | select HAVE_DMA_API_DEBUG |
7 | select HAVE_IDE if PCI || ISA || PCMCIA | 7 | select HAVE_IDE if PCI || ISA || PCMCIA |
8 | select HAVE_DMA_ATTRS | 8 | select HAVE_DMA_ATTRS |
9 | select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) | 9 | select HAVE_DMA_CONTIGUOUS if MMU |
10 | select HAVE_MEMBLOCK | 10 | select HAVE_MEMBLOCK |
11 | select RTC_LIB | 11 | select RTC_LIB |
12 | select SYS_SUPPORTS_APM_EMULATION | 12 | select SYS_SUPPORTS_APM_EMULATION |
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 2ae842df4551..5c44dcb0987b 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -203,6 +203,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size, | |||
203 | } | 203 | } |
204 | 204 | ||
205 | /* | 205 | /* |
206 | * This can be called during early boot to increase the size of the atomic | ||
207 | * coherent DMA pool above the default value of 256KiB. It must be called | ||
208 | * before postcore_initcall. | ||
209 | */ | ||
210 | extern void __init init_dma_coherent_pool_size(unsigned long size); | ||
211 | |||
212 | /* | ||
206 | * This can be called during boot to increase the size of the consistent | 213 | * This can be called during boot to increase the size of the consistent |
207 | * DMA region above it's default value of 2MB. It must be called before the | 214 | * DMA region above it's default value of 2MB. It must be called before the |
208 | * memory allocator is initialised, i.e. before any core_initcall. | 215 | * memory allocator is initialised, i.e. before any core_initcall. |
diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c index fdd8cc87c9fe..4431a62fff5b 100644 --- a/arch/arm/mach-imx/clk-imx25.c +++ b/arch/arm/mach-imx/clk-imx25.c | |||
@@ -222,10 +222,8 @@ int __init mx25_clocks_init(void) | |||
222 | clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0"); | 222 | clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0"); |
223 | clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0"); | 223 | clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0"); |
224 | clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0"); | 224 | clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0"); |
225 | clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0"); | 225 | clk_register_clkdev(clk[ssi1_ipg], NULL, "imx-ssi.0"); |
226 | clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0"); | 226 | clk_register_clkdev(clk[ssi2_ipg], NULL, "imx-ssi.1"); |
227 | clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1"); | ||
228 | clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1"); | ||
229 | clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0"); | 227 | clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0"); |
230 | clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0"); | 228 | clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0"); |
231 | clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0"); | 229 | clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0"); |
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c index c6422fb10bae..65fb8bcd86cb 100644 --- a/arch/arm/mach-imx/clk-imx35.c +++ b/arch/arm/mach-imx/clk-imx35.c | |||
@@ -230,10 +230,8 @@ int __init mx35_clocks_init() | |||
230 | clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb"); | 230 | clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb"); |
231 | clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1"); | 231 | clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1"); |
232 | clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma"); | 232 | clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma"); |
233 | clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0"); | 233 | clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0"); |
234 | clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0"); | 234 | clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1"); |
235 | clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1"); | ||
236 | clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1"); | ||
237 | /* i.mx35 has the i.mx21 type uart */ | 235 | /* i.mx35 has the i.mx21 type uart */ |
238 | clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0"); | 236 | clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0"); |
239 | clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0"); | 237 | clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0"); |
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c index 3226077735b1..1201191d7f1b 100644 --- a/arch/arm/mach-kirkwood/common.c +++ b/arch/arm/mach-kirkwood/common.c | |||
@@ -517,6 +517,13 @@ void __init kirkwood_wdt_init(void) | |||
517 | void __init kirkwood_init_early(void) | 517 | void __init kirkwood_init_early(void) |
518 | { | 518 | { |
519 | orion_time_set_base(TIMER_VIRT_BASE); | 519 | orion_time_set_base(TIMER_VIRT_BASE); |
520 | |||
521 | /* | ||
522 | * Some Kirkwood devices allocate their coherent buffers from atomic | ||
523 | * context. Increase size of atomic coherent pool to make sure such | ||
524 | * the allocations won't fail. | ||
525 | */ | ||
526 | init_dma_coherent_pool_size(SZ_1M); | ||
520 | } | 527 | } |
521 | 528 | ||
522 | int kirkwood_tclk; | 529 | int kirkwood_tclk; |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 4e7d1182e8a3..051204fc4617 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -267,17 +267,19 @@ static void __dma_free_remap(void *cpu_addr, size_t size) | |||
267 | vunmap(cpu_addr); | 267 | vunmap(cpu_addr); |
268 | } | 268 | } |
269 | 269 | ||
270 | #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K | ||
271 | |||
270 | struct dma_pool { | 272 | struct dma_pool { |
271 | size_t size; | 273 | size_t size; |
272 | spinlock_t lock; | 274 | spinlock_t lock; |
273 | unsigned long *bitmap; | 275 | unsigned long *bitmap; |
274 | unsigned long nr_pages; | 276 | unsigned long nr_pages; |
275 | void *vaddr; | 277 | void *vaddr; |
276 | struct page *page; | 278 | struct page **pages; |
277 | }; | 279 | }; |
278 | 280 | ||
279 | static struct dma_pool atomic_pool = { | 281 | static struct dma_pool atomic_pool = { |
280 | .size = SZ_256K, | 282 | .size = DEFAULT_DMA_COHERENT_POOL_SIZE, |
281 | }; | 283 | }; |
282 | 284 | ||
283 | static int __init early_coherent_pool(char *p) | 285 | static int __init early_coherent_pool(char *p) |
@@ -287,6 +289,21 @@ static int __init early_coherent_pool(char *p) | |||
287 | } | 289 | } |
288 | early_param("coherent_pool", early_coherent_pool); | 290 | early_param("coherent_pool", early_coherent_pool); |
289 | 291 | ||
292 | void __init init_dma_coherent_pool_size(unsigned long size) | ||
293 | { | ||
294 | /* | ||
295 | * Catch any attempt to set the pool size too late. | ||
296 | */ | ||
297 | BUG_ON(atomic_pool.vaddr); | ||
298 | |||
299 | /* | ||
300 | * Set architecture specific coherent pool size only if | ||
301 | * it has not been changed by kernel command line parameter. | ||
302 | */ | ||
303 | if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) | ||
304 | atomic_pool.size = size; | ||
305 | } | ||
306 | |||
290 | /* | 307 | /* |
291 | * Initialise the coherent pool for atomic allocations. | 308 | * Initialise the coherent pool for atomic allocations. |
292 | */ | 309 | */ |
@@ -297,6 +314,7 @@ static int __init atomic_pool_init(void) | |||
297 | unsigned long nr_pages = pool->size >> PAGE_SHIFT; | 314 | unsigned long nr_pages = pool->size >> PAGE_SHIFT; |
298 | unsigned long *bitmap; | 315 | unsigned long *bitmap; |
299 | struct page *page; | 316 | struct page *page; |
317 | struct page **pages; | ||
300 | void *ptr; | 318 | void *ptr; |
301 | int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); | 319 | int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); |
302 | 320 | ||
@@ -304,21 +322,31 @@ static int __init atomic_pool_init(void) | |||
304 | if (!bitmap) | 322 | if (!bitmap) |
305 | goto no_bitmap; | 323 | goto no_bitmap; |
306 | 324 | ||
325 | pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); | ||
326 | if (!pages) | ||
327 | goto no_pages; | ||
328 | |||
307 | if (IS_ENABLED(CONFIG_CMA)) | 329 | if (IS_ENABLED(CONFIG_CMA)) |
308 | ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); | 330 | ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); |
309 | else | 331 | else |
310 | ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, | 332 | ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, |
311 | &page, NULL); | 333 | &page, NULL); |
312 | if (ptr) { | 334 | if (ptr) { |
335 | int i; | ||
336 | |||
337 | for (i = 0; i < nr_pages; i++) | ||
338 | pages[i] = page + i; | ||
339 | |||
313 | spin_lock_init(&pool->lock); | 340 | spin_lock_init(&pool->lock); |
314 | pool->vaddr = ptr; | 341 | pool->vaddr = ptr; |
315 | pool->page = page; | 342 | pool->pages = pages; |
316 | pool->bitmap = bitmap; | 343 | pool->bitmap = bitmap; |
317 | pool->nr_pages = nr_pages; | 344 | pool->nr_pages = nr_pages; |
318 | pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", | 345 | pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", |
319 | (unsigned)pool->size / 1024); | 346 | (unsigned)pool->size / 1024); |
320 | return 0; | 347 | return 0; |
321 | } | 348 | } |
349 | no_pages: | ||
322 | kfree(bitmap); | 350 | kfree(bitmap); |
323 | no_bitmap: | 351 | no_bitmap: |
324 | pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", | 352 | pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", |
@@ -443,27 +471,45 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) | |||
443 | if (pageno < pool->nr_pages) { | 471 | if (pageno < pool->nr_pages) { |
444 | bitmap_set(pool->bitmap, pageno, count); | 472 | bitmap_set(pool->bitmap, pageno, count); |
445 | ptr = pool->vaddr + PAGE_SIZE * pageno; | 473 | ptr = pool->vaddr + PAGE_SIZE * pageno; |
446 | *ret_page = pool->page + pageno; | 474 | *ret_page = pool->pages[pageno]; |
475 | } else { | ||
476 | pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" | ||
477 | "Please increase it with coherent_pool= kernel parameter!\n", | ||
478 | (unsigned)pool->size / 1024); | ||
447 | } | 479 | } |
448 | spin_unlock_irqrestore(&pool->lock, flags); | 480 | spin_unlock_irqrestore(&pool->lock, flags); |
449 | 481 | ||
450 | return ptr; | 482 | return ptr; |
451 | } | 483 | } |
452 | 484 | ||
485 | static bool __in_atomic_pool(void *start, size_t size) | ||
486 | { | ||
487 | struct dma_pool *pool = &atomic_pool; | ||
488 | void *end = start + size; | ||
489 | void *pool_start = pool->vaddr; | ||
490 | void *pool_end = pool->vaddr + pool->size; | ||
491 | |||
492 | if (start < pool_start || start > pool_end) | ||
493 | return false; | ||
494 | |||
495 | if (end <= pool_end) | ||
496 | return true; | ||
497 | |||
498 | WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n", | ||
499 | start, end - 1, pool_start, pool_end - 1); | ||
500 | |||
501 | return false; | ||
502 | } | ||
503 | |||
453 | static int __free_from_pool(void *start, size_t size) | 504 | static int __free_from_pool(void *start, size_t size) |
454 | { | 505 | { |
455 | struct dma_pool *pool = &atomic_pool; | 506 | struct dma_pool *pool = &atomic_pool; |
456 | unsigned long pageno, count; | 507 | unsigned long pageno, count; |
457 | unsigned long flags; | 508 | unsigned long flags; |
458 | 509 | ||
459 | if (start < pool->vaddr || start > pool->vaddr + pool->size) | 510 | if (!__in_atomic_pool(start, size)) |
460 | return 0; | 511 | return 0; |
461 | 512 | ||
462 | if (start + size > pool->vaddr + pool->size) { | ||
463 | WARN(1, "freeing wrong coherent size from pool\n"); | ||
464 | return 0; | ||
465 | } | ||
466 | |||
467 | pageno = (start - pool->vaddr) >> PAGE_SHIFT; | 513 | pageno = (start - pool->vaddr) >> PAGE_SHIFT; |
468 | count = size >> PAGE_SHIFT; | 514 | count = size >> PAGE_SHIFT; |
469 | 515 | ||
@@ -1090,10 +1136,22 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si | |||
1090 | return 0; | 1136 | return 0; |
1091 | } | 1137 | } |
1092 | 1138 | ||
1139 | static struct page **__atomic_get_pages(void *addr) | ||
1140 | { | ||
1141 | struct dma_pool *pool = &atomic_pool; | ||
1142 | struct page **pages = pool->pages; | ||
1143 | int offs = (addr - pool->vaddr) >> PAGE_SHIFT; | ||
1144 | |||
1145 | return pages + offs; | ||
1146 | } | ||
1147 | |||
1093 | static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) | 1148 | static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) |
1094 | { | 1149 | { |
1095 | struct vm_struct *area; | 1150 | struct vm_struct *area; |
1096 | 1151 | ||
1152 | if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) | ||
1153 | return __atomic_get_pages(cpu_addr); | ||
1154 | |||
1097 | if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) | 1155 | if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) |
1098 | return cpu_addr; | 1156 | return cpu_addr; |
1099 | 1157 | ||
@@ -1103,6 +1161,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) | |||
1103 | return NULL; | 1161 | return NULL; |
1104 | } | 1162 | } |
1105 | 1163 | ||
1164 | static void *__iommu_alloc_atomic(struct device *dev, size_t size, | ||
1165 | dma_addr_t *handle) | ||
1166 | { | ||
1167 | struct page *page; | ||
1168 | void *addr; | ||
1169 | |||
1170 | addr = __alloc_from_pool(size, &page); | ||
1171 | if (!addr) | ||
1172 | return NULL; | ||
1173 | |||
1174 | *handle = __iommu_create_mapping(dev, &page, size); | ||
1175 | if (*handle == DMA_ERROR_CODE) | ||
1176 | goto err_mapping; | ||
1177 | |||
1178 | return addr; | ||
1179 | |||
1180 | err_mapping: | ||
1181 | __free_from_pool(addr, size); | ||
1182 | return NULL; | ||
1183 | } | ||
1184 | |||
1185 | static void __iommu_free_atomic(struct device *dev, struct page **pages, | ||
1186 | dma_addr_t handle, size_t size) | ||
1187 | { | ||
1188 | __iommu_remove_mapping(dev, handle, size); | ||
1189 | __free_from_pool(page_address(pages[0]), size); | ||
1190 | } | ||
1191 | |||
1106 | static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, | 1192 | static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, |
1107 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) | 1193 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) |
1108 | { | 1194 | { |
@@ -1113,6 +1199,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, | |||
1113 | *handle = DMA_ERROR_CODE; | 1199 | *handle = DMA_ERROR_CODE; |
1114 | size = PAGE_ALIGN(size); | 1200 | size = PAGE_ALIGN(size); |
1115 | 1201 | ||
1202 | if (gfp & GFP_ATOMIC) | ||
1203 | return __iommu_alloc_atomic(dev, size, handle); | ||
1204 | |||
1116 | pages = __iommu_alloc_buffer(dev, size, gfp); | 1205 | pages = __iommu_alloc_buffer(dev, size, gfp); |
1117 | if (!pages) | 1206 | if (!pages) |
1118 | return NULL; | 1207 | return NULL; |
@@ -1179,6 +1268,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |||
1179 | return; | 1268 | return; |
1180 | } | 1269 | } |
1181 | 1270 | ||
1271 | if (__in_atomic_pool(cpu_addr, size)) { | ||
1272 | __iommu_free_atomic(dev, pages, handle, size); | ||
1273 | return; | ||
1274 | } | ||
1275 | |||
1182 | if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { | 1276 | if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { |
1183 | unmap_kernel_range((unsigned long)cpu_addr, size); | 1277 | unmap_kernel_range((unsigned long)cpu_addr, size); |
1184 | vunmap(cpu_addr); | 1278 | vunmap(cpu_addr); |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index b65a76133f4f..5141d808e751 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1283,7 +1283,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, | |||
1283 | cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); | 1283 | cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); |
1284 | 1284 | ||
1285 | args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; | 1285 | args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; |
1286 | if (start != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { | 1286 | if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { |
1287 | args->op.cmd = MMUEXT_INVLPG_MULTI; | 1287 | args->op.cmd = MMUEXT_INVLPG_MULTI; |
1288 | args->op.arg1.linear_addr = start; | 1288 | args->op.arg1.linear_addr = start; |
1289 | } | 1289 | } |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index d4b255463253..76ba0e97e530 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -599,7 +599,7 @@ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_ | |||
599 | if (p2m_index(set_pfn)) | 599 | if (p2m_index(set_pfn)) |
600 | return false; | 600 | return false; |
601 | 601 | ||
602 | for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { | 602 | for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { |
603 | topidx = p2m_top_index(pfn); | 603 | topidx = p2m_top_index(pfn); |
604 | 604 | ||
605 | if (!p2m_top[topidx]) | 605 | if (!p2m_top[topidx]) |