diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-28 12:45:31 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-28 12:45:31 -0400 |
| commit | f8245e91a5121acc435e509aa56cd04d445a74c7 (patch) | |
| tree | 2228dd5b184174ee2091be77f0b5b71640f74e5c /arch | |
| parent | b30fc14c5c94728baa42b7c17d83ea17185b5c40 (diff) | |
| parent | 60817c9b31ef7897d60bca2f384cbc316a3fdd8b (diff) | |
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86, memory hotplug: remove wrong -1 in calling init_memory_mapping()
x86: keep the /proc/meminfo page count correct
x86/uv: memory allocation at initialization
xen: fix Xen domU boot with batched mprotect
Diffstat (limited to 'arch')
| -rw-r--r-- | arch/x86/kernel/genx2apic_uv_x.c | 7 | ||||
| -rw-r--r-- | arch/x86/mm/init_64.c | 14 | ||||
| -rw-r--r-- | arch/x86/xen/mmu.c | 18 |
3 files changed, 27 insertions, 12 deletions
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 680a06557c5e..2c7dbdb98278 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | #include <linux/ctype.h> | 15 | #include <linux/ctype.h> |
| 16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
| 17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
| 18 | #include <linux/bootmem.h> | ||
| 19 | #include <linux/module.h> | 18 | #include <linux/module.h> |
| 20 | #include <linux/hardirq.h> | 19 | #include <linux/hardirq.h> |
| 21 | #include <asm/smp.h> | 20 | #include <asm/smp.h> |
| @@ -398,16 +397,16 @@ void __init uv_system_init(void) | |||
| 398 | printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); | 397 | printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); |
| 399 | 398 | ||
| 400 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); | 399 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); |
| 401 | uv_blade_info = alloc_bootmem_pages(bytes); | 400 | uv_blade_info = kmalloc(bytes, GFP_KERNEL); |
| 402 | 401 | ||
| 403 | get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); | 402 | get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); |
| 404 | 403 | ||
| 405 | bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); | 404 | bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); |
| 406 | uv_node_to_blade = alloc_bootmem_pages(bytes); | 405 | uv_node_to_blade = kmalloc(bytes, GFP_KERNEL); |
| 407 | memset(uv_node_to_blade, 255, bytes); | 406 | memset(uv_node_to_blade, 255, bytes); |
| 408 | 407 | ||
| 409 | bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); | 408 | bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); |
| 410 | uv_cpu_to_blade = alloc_bootmem_pages(bytes); | 409 | uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL); |
| 411 | memset(uv_cpu_to_blade, 255, bytes); | 410 | memset(uv_cpu_to_blade, 255, bytes); |
| 412 | 411 | ||
| 413 | blade = 0; | 412 | blade = 0; |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index b8e461d49412..f79a02f64d10 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
| @@ -350,8 +350,10 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, | |||
| 350 | * pagetable pages as RO. So assume someone who pre-setup | 350 | * pagetable pages as RO. So assume someone who pre-setup |
| 351 | * these mappings are more intelligent. | 351 | * these mappings are more intelligent. |
| 352 | */ | 352 | */ |
| 353 | if (pte_val(*pte)) | 353 | if (pte_val(*pte)) { |
| 354 | pages++; | ||
| 354 | continue; | 355 | continue; |
| 356 | } | ||
| 355 | 357 | ||
| 356 | if (0) | 358 | if (0) |
| 357 | printk(" pte=%p addr=%lx pte=%016lx\n", | 359 | printk(" pte=%p addr=%lx pte=%016lx\n", |
| @@ -418,8 +420,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
| 418 | * not differ with respect to page frame and | 420 | * not differ with respect to page frame and |
| 419 | * attributes. | 421 | * attributes. |
| 420 | */ | 422 | */ |
| 421 | if (page_size_mask & (1 << PG_LEVEL_2M)) | 423 | if (page_size_mask & (1 << PG_LEVEL_2M)) { |
| 424 | pages++; | ||
| 422 | continue; | 425 | continue; |
| 426 | } | ||
| 423 | new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); | 427 | new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); |
| 424 | } | 428 | } |
| 425 | 429 | ||
| @@ -499,8 +503,10 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, | |||
| 499 | * not differ with respect to page frame and | 503 | * not differ with respect to page frame and |
| 500 | * attributes. | 504 | * attributes. |
| 501 | */ | 505 | */ |
| 502 | if (page_size_mask & (1 << PG_LEVEL_1G)) | 506 | if (page_size_mask & (1 << PG_LEVEL_1G)) { |
| 507 | pages++; | ||
| 503 | continue; | 508 | continue; |
| 509 | } | ||
| 504 | prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); | 510 | prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); |
| 505 | } | 511 | } |
| 506 | 512 | ||
| @@ -831,7 +837,7 @@ int arch_add_memory(int nid, u64 start, u64 size) | |||
| 831 | unsigned long nr_pages = size >> PAGE_SHIFT; | 837 | unsigned long nr_pages = size >> PAGE_SHIFT; |
| 832 | int ret; | 838 | int ret; |
| 833 | 839 | ||
| 834 | last_mapped_pfn = init_memory_mapping(start, start + size-1); | 840 | last_mapped_pfn = init_memory_mapping(start, start + size); |
| 835 | if (last_mapped_pfn > max_pfn_mapped) | 841 | if (last_mapped_pfn > max_pfn_mapped) |
| 836 | max_pfn_mapped = last_mapped_pfn; | 842 | max_pfn_mapped = last_mapped_pfn; |
| 837 | 843 | ||
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index d4d52f5a1cf7..aba77b2b7d18 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
| @@ -246,11 +246,21 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr) | |||
| 246 | { | 246 | { |
| 247 | unsigned long address = (unsigned long)vaddr; | 247 | unsigned long address = (unsigned long)vaddr; |
| 248 | unsigned int level; | 248 | unsigned int level; |
| 249 | pte_t *pte = lookup_address(address, &level); | 249 | pte_t *pte; |
| 250 | unsigned offset = address & ~PAGE_MASK; | 250 | unsigned offset; |
| 251 | 251 | ||
| 252 | BUG_ON(pte == NULL); | 252 | /* |
| 253 | * if the PFN is in the linear mapped vaddr range, we can just use | ||
| 254 | * the (quick) virt_to_machine() p2m lookup | ||
| 255 | */ | ||
| 256 | if (virt_addr_valid(vaddr)) | ||
| 257 | return virt_to_machine(vaddr); | ||
| 253 | 258 | ||
| 259 | /* otherwise we have to do a (slower) full page-table walk */ | ||
| 260 | |||
| 261 | pte = lookup_address(address, &level); | ||
| 262 | BUG_ON(pte == NULL); | ||
| 263 | offset = address & ~PAGE_MASK; | ||
| 254 | return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); | 264 | return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); |
| 255 | } | 265 | } |
| 256 | 266 | ||
| @@ -410,7 +420,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | |||
| 410 | 420 | ||
| 411 | xen_mc_batch(); | 421 | xen_mc_batch(); |
| 412 | 422 | ||
| 413 | u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; | 423 | u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; |
| 414 | u.val = pte_val_ma(pte); | 424 | u.val = pte_val_ma(pte); |
| 415 | xen_extend_mmu_update(&u); | 425 | xen_extend_mmu_update(&u); |
| 416 | 426 | ||
