diff options
author | Len Brown <len.brown@intel.com> | 2011-03-23 02:33:54 -0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2011-03-23 02:33:54 -0400 |
commit | 5c129a8600100a5d0f5fdbc1014c5dba1d307bc4 (patch) | |
tree | 9877a14b49cff43d0ba10c12f407ec551c77daa5 /arch/x86 | |
parent | 797b10a07069e153d41aedb4ae8e76660279e2ee (diff) | |
parent | 521cb40b0c44418a4fd36dc633f575813d59a43d (diff) |
Merge commit 'v2.6.38' into release
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/boot/compressed/mkpiggy.c | 7 | ||||
-rw-r--r-- | arch/x86/include/asm/ce4100.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/msr-index.h | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/uv/uv_bau.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/check.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 13 | ||||
-rw-r--r-- | arch/x86/mm/fault.c | 14 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 6 | ||||
-rw-r--r-- | arch/x86/mm/numa_64.c | 6 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 18 | ||||
-rw-r--r-- | arch/x86/mm/pgtable.c | 11 | ||||
-rw-r--r-- | arch/x86/pci/ce4100.c | 7 | ||||
-rw-r--r-- | arch/x86/platform/ce4100/ce4100.c | 2 | ||||
-rw-r--r-- | arch/x86/platform/olpc/olpc_dt.c | 3 | ||||
-rw-r--r-- | arch/x86/platform/uv/tlb_uv.c | 4 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 10 |
18 files changed, 75 insertions, 55 deletions
diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c index 646aa78ba5fd..46a823882437 100644 --- a/arch/x86/boot/compressed/mkpiggy.c +++ b/arch/x86/boot/compressed/mkpiggy.c | |||
@@ -62,7 +62,12 @@ int main(int argc, char *argv[]) | |||
62 | if (fseek(f, -4L, SEEK_END)) { | 62 | if (fseek(f, -4L, SEEK_END)) { |
63 | perror(argv[1]); | 63 | perror(argv[1]); |
64 | } | 64 | } |
65 | fread(&olen, sizeof olen, 1, f); | 65 | |
66 | if (fread(&olen, sizeof(olen), 1, f) != 1) { | ||
67 | perror(argv[1]); | ||
68 | return 1; | ||
69 | } | ||
70 | |||
66 | ilen = ftell(f); | 71 | ilen = ftell(f); |
67 | olen = getle32(&olen); | 72 | olen = getle32(&olen); |
68 | fclose(f); | 73 | fclose(f); |
diff --git a/arch/x86/include/asm/ce4100.h b/arch/x86/include/asm/ce4100.h new file mode 100644 index 000000000000..e656ad8c0a2e --- /dev/null +++ b/arch/x86/include/asm/ce4100.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _ASM_CE4100_H_ | ||
2 | #define _ASM_CE4100_H_ | ||
3 | |||
4 | int ce4100_pci_init(void); | ||
5 | |||
6 | #endif | ||
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 4d0dfa0d998e..43a18c77676d 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -36,6 +36,11 @@ | |||
36 | #define MSR_IA32_PERFCTR1 0x000000c2 | 36 | #define MSR_IA32_PERFCTR1 0x000000c2 |
37 | #define MSR_FSB_FREQ 0x000000cd | 37 | #define MSR_FSB_FREQ 0x000000cd |
38 | 38 | ||
39 | #define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 | ||
40 | #define NHM_C3_AUTO_DEMOTE (1UL << 25) | ||
41 | #define NHM_C1_AUTO_DEMOTE (1UL << 26) | ||
42 | #define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25) | ||
43 | |||
39 | #define MSR_MTRRcap 0x000000fe | 44 | #define MSR_MTRRcap 0x000000fe |
40 | #define MSR_IA32_BBL_CR_CTL 0x00000119 | 45 | #define MSR_IA32_BBL_CR_CTL 0x00000119 |
41 | 46 | ||
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index ce1d54c8a433..3e094af443c3 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h | |||
@@ -176,7 +176,7 @@ struct bau_msg_payload { | |||
176 | struct bau_msg_header { | 176 | struct bau_msg_header { |
177 | unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ | 177 | unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ |
178 | /* bits 5:0 */ | 178 | /* bits 5:0 */ |
179 | unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */ | 179 | unsigned int base_dest_nodeid:15; /* nasid of the */ |
180 | /* bits 20:6 */ /* first bit in uvhub map */ | 180 | /* bits 20:6 */ /* first bit in uvhub map */ |
181 | unsigned int command:8; /* message type */ | 181 | unsigned int command:8; /* message type */ |
182 | /* bits 28:21 */ | 182 | /* bits 28:21 */ |
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c index 13a389179514..452932d34730 100644 --- a/arch/x86/kernel/check.c +++ b/arch/x86/kernel/check.c | |||
@@ -106,8 +106,8 @@ void __init setup_bios_corruption_check(void) | |||
106 | addr += size; | 106 | addr += size; |
107 | } | 107 | } |
108 | 108 | ||
109 | printk(KERN_INFO "Scanning %d areas for low memory corruption\n", | 109 | if (num_scan_areas) |
110 | num_scan_areas); | 110 | printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas); |
111 | } | 111 | } |
112 | 112 | ||
113 | 113 | ||
@@ -143,12 +143,12 @@ static void check_corruption(struct work_struct *dummy) | |||
143 | { | 143 | { |
144 | check_for_bios_corruption(); | 144 | check_for_bios_corruption(); |
145 | schedule_delayed_work(&bios_check_work, | 145 | schedule_delayed_work(&bios_check_work, |
146 | round_jiffies_relative(corruption_check_period*HZ)); | 146 | round_jiffies_relative(corruption_check_period*HZ)); |
147 | } | 147 | } |
148 | 148 | ||
149 | static int start_periodic_check_for_corruption(void) | 149 | static int start_periodic_check_for_corruption(void) |
150 | { | 150 | { |
151 | if (!memory_corruption_check || corruption_check_period == 0) | 151 | if (!num_scan_areas || !memory_corruption_check || corruption_check_period == 0) |
152 | return 0; | 152 | return 0; |
153 | 153 | ||
154 | printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n", | 154 | printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n", |
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index bd1cac747f67..52c93648e492 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
@@ -158,9 +158,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) | |||
158 | { | 158 | { |
159 | if (c->x86 == 0x06) { | 159 | if (c->x86 == 0x06) { |
160 | if (cpu_has(c, X86_FEATURE_EST)) | 160 | if (cpu_has(c, X86_FEATURE_EST)) |
161 | printk(KERN_WARNING PFX "Warning: EST-capable CPU " | 161 | printk_once(KERN_WARNING PFX "Warning: EST-capable " |
162 | "detected. The acpi-cpufreq module offers " | 162 | "CPU detected. The acpi-cpufreq module offers " |
163 | "voltage scaling in addition of frequency " | 163 | "voltage scaling in addition to frequency " |
164 | "scaling. You should use that instead of " | 164 | "scaling. You should use that instead of " |
165 | "p4-clockmod, if possible.\n"); | 165 | "p4-clockmod, if possible.\n"); |
166 | switch (c->x86_model) { | 166 | switch (c->x86_model) { |
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c index 4f6f679f2799..4a5a42b842ad 100644 --- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c | |||
@@ -195,7 +195,7 @@ static unsigned int pcc_get_freq(unsigned int cpu) | |||
195 | cmd_incomplete: | 195 | cmd_incomplete: |
196 | iowrite16(0, &pcch_hdr->status); | 196 | iowrite16(0, &pcch_hdr->status); |
197 | spin_unlock(&pcc_lock); | 197 | spin_unlock(&pcc_lock); |
198 | return -EINVAL; | 198 | return 0; |
199 | } | 199 | } |
200 | 200 | ||
201 | static int pcc_cpufreq_target(struct cpufreq_policy *policy, | 201 | static int pcc_cpufreq_target(struct cpufreq_policy *policy, |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 35c7e65e59be..c567dec854f6 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -1537,6 +1537,7 @@ static struct notifier_block cpb_nb = { | |||
1537 | static int __cpuinit powernowk8_init(void) | 1537 | static int __cpuinit powernowk8_init(void) |
1538 | { | 1538 | { |
1539 | unsigned int i, supported_cpus = 0, cpu; | 1539 | unsigned int i, supported_cpus = 0, cpu; |
1540 | int rv; | ||
1540 | 1541 | ||
1541 | for_each_online_cpu(i) { | 1542 | for_each_online_cpu(i) { |
1542 | int rc; | 1543 | int rc; |
@@ -1555,14 +1556,14 @@ static int __cpuinit powernowk8_init(void) | |||
1555 | 1556 | ||
1556 | cpb_capable = true; | 1557 | cpb_capable = true; |
1557 | 1558 | ||
1558 | register_cpu_notifier(&cpb_nb); | ||
1559 | |||
1560 | msrs = msrs_alloc(); | 1559 | msrs = msrs_alloc(); |
1561 | if (!msrs) { | 1560 | if (!msrs) { |
1562 | printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); | 1561 | printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); |
1563 | return -ENOMEM; | 1562 | return -ENOMEM; |
1564 | } | 1563 | } |
1565 | 1564 | ||
1565 | register_cpu_notifier(&cpb_nb); | ||
1566 | |||
1566 | rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); | 1567 | rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); |
1567 | 1568 | ||
1568 | for_each_cpu(cpu, cpu_online_mask) { | 1569 | for_each_cpu(cpu, cpu_online_mask) { |
@@ -1574,7 +1575,13 @@ static int __cpuinit powernowk8_init(void) | |||
1574 | (cpb_enabled ? "on" : "off")); | 1575 | (cpb_enabled ? "on" : "off")); |
1575 | } | 1576 | } |
1576 | 1577 | ||
1577 | return cpufreq_register_driver(&cpufreq_amd64_driver); | 1578 | rv = cpufreq_register_driver(&cpufreq_amd64_driver); |
1579 | if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) { | ||
1580 | unregister_cpu_notifier(&cpb_nb); | ||
1581 | msrs_free(msrs); | ||
1582 | msrs = NULL; | ||
1583 | } | ||
1584 | return rv; | ||
1578 | } | 1585 | } |
1579 | 1586 | ||
1580 | /* driver entry point for term */ | 1587 | /* driver entry point for term */ |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 7d90ceb882a4..20e3f8702d1e 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -229,15 +229,14 @@ void vmalloc_sync_all(void) | |||
229 | for (address = VMALLOC_START & PMD_MASK; | 229 | for (address = VMALLOC_START & PMD_MASK; |
230 | address >= TASK_SIZE && address < FIXADDR_TOP; | 230 | address >= TASK_SIZE && address < FIXADDR_TOP; |
231 | address += PMD_SIZE) { | 231 | address += PMD_SIZE) { |
232 | |||
233 | unsigned long flags; | ||
234 | struct page *page; | 232 | struct page *page; |
235 | 233 | ||
236 | spin_lock_irqsave(&pgd_lock, flags); | 234 | spin_lock(&pgd_lock); |
237 | list_for_each_entry(page, &pgd_list, lru) { | 235 | list_for_each_entry(page, &pgd_list, lru) { |
238 | spinlock_t *pgt_lock; | 236 | spinlock_t *pgt_lock; |
239 | pmd_t *ret; | 237 | pmd_t *ret; |
240 | 238 | ||
239 | /* the pgt_lock only for Xen */ | ||
241 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; | 240 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
242 | 241 | ||
243 | spin_lock(pgt_lock); | 242 | spin_lock(pgt_lock); |
@@ -247,7 +246,7 @@ void vmalloc_sync_all(void) | |||
247 | if (!ret) | 246 | if (!ret) |
248 | break; | 247 | break; |
249 | } | 248 | } |
250 | spin_unlock_irqrestore(&pgd_lock, flags); | 249 | spin_unlock(&pgd_lock); |
251 | } | 250 | } |
252 | } | 251 | } |
253 | 252 | ||
@@ -828,6 +827,13 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, | |||
828 | unsigned long address, unsigned int fault) | 827 | unsigned long address, unsigned int fault) |
829 | { | 828 | { |
830 | if (fault & VM_FAULT_OOM) { | 829 | if (fault & VM_FAULT_OOM) { |
830 | /* Kernel mode? Handle exceptions or die: */ | ||
831 | if (!(error_code & PF_USER)) { | ||
832 | up_read(¤t->mm->mmap_sem); | ||
833 | no_context(regs, error_code, address); | ||
834 | return; | ||
835 | } | ||
836 | |||
831 | out_of_memory(regs, error_code, address); | 837 | out_of_memory(regs, error_code, address); |
832 | } else { | 838 | } else { |
833 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| | 839 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 71a59296af80..c14a5422e152 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -105,18 +105,18 @@ void sync_global_pgds(unsigned long start, unsigned long end) | |||
105 | 105 | ||
106 | for (address = start; address <= end; address += PGDIR_SIZE) { | 106 | for (address = start; address <= end; address += PGDIR_SIZE) { |
107 | const pgd_t *pgd_ref = pgd_offset_k(address); | 107 | const pgd_t *pgd_ref = pgd_offset_k(address); |
108 | unsigned long flags; | ||
109 | struct page *page; | 108 | struct page *page; |
110 | 109 | ||
111 | if (pgd_none(*pgd_ref)) | 110 | if (pgd_none(*pgd_ref)) |
112 | continue; | 111 | continue; |
113 | 112 | ||
114 | spin_lock_irqsave(&pgd_lock, flags); | 113 | spin_lock(&pgd_lock); |
115 | list_for_each_entry(page, &pgd_list, lru) { | 114 | list_for_each_entry(page, &pgd_list, lru) { |
116 | pgd_t *pgd; | 115 | pgd_t *pgd; |
117 | spinlock_t *pgt_lock; | 116 | spinlock_t *pgt_lock; |
118 | 117 | ||
119 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | 118 | pgd = (pgd_t *)page_address(page) + pgd_index(address); |
119 | /* the pgt_lock only for Xen */ | ||
120 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; | 120 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
121 | spin_lock(pgt_lock); | 121 | spin_lock(pgt_lock); |
122 | 122 | ||
@@ -128,7 +128,7 @@ void sync_global_pgds(unsigned long start, unsigned long end) | |||
128 | 128 | ||
129 | spin_unlock(pgt_lock); | 129 | spin_unlock(pgt_lock); |
130 | } | 130 | } |
131 | spin_unlock_irqrestore(&pgd_lock, flags); | 131 | spin_unlock(&pgd_lock); |
132 | } | 132 | } |
133 | } | 133 | } |
134 | 134 | ||
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 95ea1551eebc..1337c51b07d7 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -780,11 +780,7 @@ void __cpuinit numa_add_cpu(int cpu) | |||
780 | int physnid; | 780 | int physnid; |
781 | int nid = NUMA_NO_NODE; | 781 | int nid = NUMA_NO_NODE; |
782 | 782 | ||
783 | apicid = early_per_cpu(x86_cpu_to_apicid, cpu); | 783 | nid = early_cpu_to_node(cpu); |
784 | if (apicid != BAD_APICID) | ||
785 | nid = apicid_to_node[apicid]; | ||
786 | if (nid == NUMA_NO_NODE) | ||
787 | nid = early_cpu_to_node(cpu); | ||
788 | BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); | 784 | BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); |
789 | 785 | ||
790 | /* | 786 | /* |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index d343b3c81f3c..90825f2eb0f4 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -57,12 +57,10 @@ static unsigned long direct_pages_count[PG_LEVEL_NUM]; | |||
57 | 57 | ||
58 | void update_page_count(int level, unsigned long pages) | 58 | void update_page_count(int level, unsigned long pages) |
59 | { | 59 | { |
60 | unsigned long flags; | ||
61 | |||
62 | /* Protect against CPA */ | 60 | /* Protect against CPA */ |
63 | spin_lock_irqsave(&pgd_lock, flags); | 61 | spin_lock(&pgd_lock); |
64 | direct_pages_count[level] += pages; | 62 | direct_pages_count[level] += pages; |
65 | spin_unlock_irqrestore(&pgd_lock, flags); | 63 | spin_unlock(&pgd_lock); |
66 | } | 64 | } |
67 | 65 | ||
68 | static void split_page_count(int level) | 66 | static void split_page_count(int level) |
@@ -394,7 +392,7 @@ static int | |||
394 | try_preserve_large_page(pte_t *kpte, unsigned long address, | 392 | try_preserve_large_page(pte_t *kpte, unsigned long address, |
395 | struct cpa_data *cpa) | 393 | struct cpa_data *cpa) |
396 | { | 394 | { |
397 | unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; | 395 | unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn; |
398 | pte_t new_pte, old_pte, *tmp; | 396 | pte_t new_pte, old_pte, *tmp; |
399 | pgprot_t old_prot, new_prot, req_prot; | 397 | pgprot_t old_prot, new_prot, req_prot; |
400 | int i, do_split = 1; | 398 | int i, do_split = 1; |
@@ -403,7 +401,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
403 | if (cpa->force_split) | 401 | if (cpa->force_split) |
404 | return 1; | 402 | return 1; |
405 | 403 | ||
406 | spin_lock_irqsave(&pgd_lock, flags); | 404 | spin_lock(&pgd_lock); |
407 | /* | 405 | /* |
408 | * Check for races, another CPU might have split this page | 406 | * Check for races, another CPU might have split this page |
409 | * up already: | 407 | * up already: |
@@ -498,14 +496,14 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
498 | } | 496 | } |
499 | 497 | ||
500 | out_unlock: | 498 | out_unlock: |
501 | spin_unlock_irqrestore(&pgd_lock, flags); | 499 | spin_unlock(&pgd_lock); |
502 | 500 | ||
503 | return do_split; | 501 | return do_split; |
504 | } | 502 | } |
505 | 503 | ||
506 | static int split_large_page(pte_t *kpte, unsigned long address) | 504 | static int split_large_page(pte_t *kpte, unsigned long address) |
507 | { | 505 | { |
508 | unsigned long flags, pfn, pfninc = 1; | 506 | unsigned long pfn, pfninc = 1; |
509 | unsigned int i, level; | 507 | unsigned int i, level; |
510 | pte_t *pbase, *tmp; | 508 | pte_t *pbase, *tmp; |
511 | pgprot_t ref_prot; | 509 | pgprot_t ref_prot; |
@@ -519,7 +517,7 @@ static int split_large_page(pte_t *kpte, unsigned long address) | |||
519 | if (!base) | 517 | if (!base) |
520 | return -ENOMEM; | 518 | return -ENOMEM; |
521 | 519 | ||
522 | spin_lock_irqsave(&pgd_lock, flags); | 520 | spin_lock(&pgd_lock); |
523 | /* | 521 | /* |
524 | * Check for races, another CPU might have split this page | 522 | * Check for races, another CPU might have split this page |
525 | * up for us already: | 523 | * up for us already: |
@@ -591,7 +589,7 @@ out_unlock: | |||
591 | */ | 589 | */ |
592 | if (base) | 590 | if (base) |
593 | __free_page(base); | 591 | __free_page(base); |
594 | spin_unlock_irqrestore(&pgd_lock, flags); | 592 | spin_unlock(&pgd_lock); |
595 | 593 | ||
596 | return 0; | 594 | return 0; |
597 | } | 595 | } |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 500242d3c96d..0113d19c8aa6 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -121,14 +121,12 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) | |||
121 | 121 | ||
122 | static void pgd_dtor(pgd_t *pgd) | 122 | static void pgd_dtor(pgd_t *pgd) |
123 | { | 123 | { |
124 | unsigned long flags; /* can be called from interrupt context */ | ||
125 | |||
126 | if (SHARED_KERNEL_PMD) | 124 | if (SHARED_KERNEL_PMD) |
127 | return; | 125 | return; |
128 | 126 | ||
129 | spin_lock_irqsave(&pgd_lock, flags); | 127 | spin_lock(&pgd_lock); |
130 | pgd_list_del(pgd); | 128 | pgd_list_del(pgd); |
131 | spin_unlock_irqrestore(&pgd_lock, flags); | 129 | spin_unlock(&pgd_lock); |
132 | } | 130 | } |
133 | 131 | ||
134 | /* | 132 | /* |
@@ -260,7 +258,6 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
260 | { | 258 | { |
261 | pgd_t *pgd; | 259 | pgd_t *pgd; |
262 | pmd_t *pmds[PREALLOCATED_PMDS]; | 260 | pmd_t *pmds[PREALLOCATED_PMDS]; |
263 | unsigned long flags; | ||
264 | 261 | ||
265 | pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); | 262 | pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); |
266 | 263 | ||
@@ -280,12 +277,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
280 | * respect to anything walking the pgd_list, so that they | 277 | * respect to anything walking the pgd_list, so that they |
281 | * never see a partially populated pgd. | 278 | * never see a partially populated pgd. |
282 | */ | 279 | */ |
283 | spin_lock_irqsave(&pgd_lock, flags); | 280 | spin_lock(&pgd_lock); |
284 | 281 | ||
285 | pgd_ctor(mm, pgd); | 282 | pgd_ctor(mm, pgd); |
286 | pgd_prepopulate_pmd(mm, pgd, pmds); | 283 | pgd_prepopulate_pmd(mm, pgd, pmds); |
287 | 284 | ||
288 | spin_unlock_irqrestore(&pgd_lock, flags); | 285 | spin_unlock(&pgd_lock); |
289 | 286 | ||
290 | return pgd; | 287 | return pgd; |
291 | 288 | ||
diff --git a/arch/x86/pci/ce4100.c b/arch/x86/pci/ce4100.c index 85b68ef5e809..9260b3eb18d4 100644 --- a/arch/x86/pci/ce4100.c +++ b/arch/x86/pci/ce4100.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | 36 | ||
37 | #include <asm/ce4100.h> | ||
37 | #include <asm/pci_x86.h> | 38 | #include <asm/pci_x86.h> |
38 | 39 | ||
39 | struct sim_reg { | 40 | struct sim_reg { |
@@ -306,10 +307,10 @@ struct pci_raw_ops ce4100_pci_conf = { | |||
306 | .write = ce4100_conf_write, | 307 | .write = ce4100_conf_write, |
307 | }; | 308 | }; |
308 | 309 | ||
309 | static int __init ce4100_pci_init(void) | 310 | int __init ce4100_pci_init(void) |
310 | { | 311 | { |
311 | init_sim_regs(); | 312 | init_sim_regs(); |
312 | raw_pci_ops = &ce4100_pci_conf; | 313 | raw_pci_ops = &ce4100_pci_conf; |
313 | return 0; | 314 | /* Indicate caller that it should invoke pci_legacy_init() */ |
315 | return 1; | ||
314 | } | 316 | } |
315 | subsys_initcall(ce4100_pci_init); | ||
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c index d2c0d51a7178..cd6f184c3b3f 100644 --- a/arch/x86/platform/ce4100/ce4100.c +++ b/arch/x86/platform/ce4100/ce4100.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/serial_reg.h> | 15 | #include <linux/serial_reg.h> |
16 | #include <linux/serial_8250.h> | 16 | #include <linux/serial_8250.h> |
17 | 17 | ||
18 | #include <asm/ce4100.h> | ||
18 | #include <asm/setup.h> | 19 | #include <asm/setup.h> |
19 | #include <asm/io.h> | 20 | #include <asm/io.h> |
20 | 21 | ||
@@ -129,4 +130,5 @@ void __init x86_ce4100_early_setup(void) | |||
129 | x86_init.resources.probe_roms = x86_init_noop; | 130 | x86_init.resources.probe_roms = x86_init_noop; |
130 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; | 131 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; |
131 | x86_init.mpparse.find_smp_config = sdv_find_smp_config; | 132 | x86_init.mpparse.find_smp_config = sdv_find_smp_config; |
133 | x86_init.pci.init = ce4100_pci_init; | ||
132 | } | 134 | } |
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c index dab874647530..044bda5b3174 100644 --- a/arch/x86/platform/olpc/olpc_dt.c +++ b/arch/x86/platform/olpc/olpc_dt.c | |||
@@ -140,8 +140,7 @@ void * __init prom_early_alloc(unsigned long size) | |||
140 | * wasted bootmem) and hand off chunks of it to callers. | 140 | * wasted bootmem) and hand off chunks of it to callers. |
141 | */ | 141 | */ |
142 | res = alloc_bootmem(chunk_size); | 142 | res = alloc_bootmem(chunk_size); |
143 | if (!res) | 143 | BUG_ON(!res); |
144 | return NULL; | ||
145 | prom_early_allocated += chunk_size; | 144 | prom_early_allocated += chunk_size; |
146 | memset(res, 0, chunk_size); | 145 | memset(res, 0, chunk_size); |
147 | free_mem = chunk_size; | 146 | free_mem = chunk_size; |
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index df58e9cad96a..a7b38d35c29a 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -1364,11 +1364,11 @@ uv_activation_descriptor_init(int node, int pnode) | |||
1364 | memset(bd2, 0, sizeof(struct bau_desc)); | 1364 | memset(bd2, 0, sizeof(struct bau_desc)); |
1365 | bd2->header.sw_ack_flag = 1; | 1365 | bd2->header.sw_ack_flag = 1; |
1366 | /* | 1366 | /* |
1367 | * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub | 1367 | * base_dest_nodeid is the nasid of the first uvhub |
1368 | * in the partition. The bit map will indicate uvhub numbers, | 1368 | * in the partition. The bit map will indicate uvhub numbers, |
1369 | * which are 0-N in a partition. Pnodes are unique system-wide. | 1369 | * which are 0-N in a partition. Pnodes are unique system-wide. |
1370 | */ | 1370 | */ |
1371 | bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1; | 1371 | bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode); |
1372 | bd2->header.dest_subnodeid = 0x10; /* the LB */ | 1372 | bd2->header.dest_subnodeid = 0x10; /* the LB */ |
1373 | bd2->header.command = UV_NET_ENDPOINT_INTD; | 1373 | bd2->header.command = UV_NET_ENDPOINT_INTD; |
1374 | bd2->header.int_both = 1; | 1374 | bd2->header.int_both = 1; |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 5e92b61ad574..f6089421147a 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -986,10 +986,9 @@ static void xen_pgd_pin(struct mm_struct *mm) | |||
986 | */ | 986 | */ |
987 | void xen_mm_pin_all(void) | 987 | void xen_mm_pin_all(void) |
988 | { | 988 | { |
989 | unsigned long flags; | ||
990 | struct page *page; | 989 | struct page *page; |
991 | 990 | ||
992 | spin_lock_irqsave(&pgd_lock, flags); | 991 | spin_lock(&pgd_lock); |
993 | 992 | ||
994 | list_for_each_entry(page, &pgd_list, lru) { | 993 | list_for_each_entry(page, &pgd_list, lru) { |
995 | if (!PagePinned(page)) { | 994 | if (!PagePinned(page)) { |
@@ -998,7 +997,7 @@ void xen_mm_pin_all(void) | |||
998 | } | 997 | } |
999 | } | 998 | } |
1000 | 999 | ||
1001 | spin_unlock_irqrestore(&pgd_lock, flags); | 1000 | spin_unlock(&pgd_lock); |
1002 | } | 1001 | } |
1003 | 1002 | ||
1004 | /* | 1003 | /* |
@@ -1099,10 +1098,9 @@ static void xen_pgd_unpin(struct mm_struct *mm) | |||
1099 | */ | 1098 | */ |
1100 | void xen_mm_unpin_all(void) | 1099 | void xen_mm_unpin_all(void) |
1101 | { | 1100 | { |
1102 | unsigned long flags; | ||
1103 | struct page *page; | 1101 | struct page *page; |
1104 | 1102 | ||
1105 | spin_lock_irqsave(&pgd_lock, flags); | 1103 | spin_lock(&pgd_lock); |
1106 | 1104 | ||
1107 | list_for_each_entry(page, &pgd_list, lru) { | 1105 | list_for_each_entry(page, &pgd_list, lru) { |
1108 | if (PageSavePinned(page)) { | 1106 | if (PageSavePinned(page)) { |
@@ -1112,7 +1110,7 @@ void xen_mm_unpin_all(void) | |||
1112 | } | 1110 | } |
1113 | } | 1111 | } |
1114 | 1112 | ||
1115 | spin_unlock_irqrestore(&pgd_lock, flags); | 1113 | spin_unlock(&pgd_lock); |
1116 | } | 1114 | } |
1117 | 1115 | ||
1118 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) | 1116 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) |