diff options
Diffstat (limited to 'arch/x86/kernel')
65 files changed, 838 insertions, 598 deletions
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 67e929b89875..fb1035cd9a6a 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -624,6 +624,7 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table) | |||
624 | } | 624 | } |
625 | 625 | ||
626 | hpet_address = hpet_tbl->address.address; | 626 | hpet_address = hpet_tbl->address.address; |
627 | hpet_blockid = hpet_tbl->sequence; | ||
627 | 628 | ||
628 | /* | 629 | /* |
629 | * Some broken BIOSes advertise HPET at 0x0. We really do not | 630 | * Some broken BIOSes advertise HPET at 0x0. We really do not |
@@ -1122,7 +1123,7 @@ static int __init acpi_parse_madt_ioapic_entries(void) | |||
1122 | if (!acpi_sci_override_gsi) | 1123 | if (!acpi_sci_override_gsi) |
1123 | acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0); | 1124 | acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0); |
1124 | 1125 | ||
1125 | /* Fill in identity legacy mapings where no override */ | 1126 | /* Fill in identity legacy mappings where no override */ |
1126 | mp_config_acpi_legacy_irqs(); | 1127 | mp_config_acpi_legacy_irqs(); |
1127 | 1128 | ||
1128 | count = | 1129 | count = |
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index ca93638ba430..82e508677b91 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -78,12 +78,9 @@ int acpi_save_state_mem(void) | |||
78 | #ifndef CONFIG_64BIT | 78 | #ifndef CONFIG_64BIT |
79 | store_gdt((struct desc_ptr *)&header->pmode_gdt); | 79 | store_gdt((struct desc_ptr *)&header->pmode_gdt); |
80 | 80 | ||
81 | header->pmode_efer_low = nx_enabled; | 81 | if (rdmsr_safe(MSR_EFER, &header->pmode_efer_low, |
82 | if (header->pmode_efer_low & 1) { | 82 | &header->pmode_efer_high)) |
83 | /* This is strange, why not save efer, always? */ | 83 | header->pmode_efer_low = header->pmode_efer_high = 0; |
84 | rdmsr(MSR_EFER, header->pmode_efer_low, | ||
85 | header->pmode_efer_high); | ||
86 | } | ||
87 | #endif /* !CONFIG_64BIT */ | 84 | #endif /* !CONFIG_64BIT */ |
88 | 85 | ||
89 | header->pmode_cr0 = read_cr0(); | 86 | header->pmode_cr0 = read_cr0(); |
@@ -119,29 +116,32 @@ void acpi_restore_state_mem(void) | |||
119 | 116 | ||
120 | 117 | ||
121 | /** | 118 | /** |
122 | * acpi_reserve_bootmem - do _very_ early ACPI initialisation | 119 | * acpi_reserve_wakeup_memory - do _very_ early ACPI initialisation |
123 | * | 120 | * |
124 | * We allocate a page from the first 1MB of memory for the wakeup | 121 | * We allocate a page from the first 1MB of memory for the wakeup |
125 | * routine for when we come back from a sleep state. The | 122 | * routine for when we come back from a sleep state. The |
126 | * runtime allocator allows specification of <16MB pages, but not | 123 | * runtime allocator allows specification of <16MB pages, but not |
127 | * <1MB pages. | 124 | * <1MB pages. |
128 | */ | 125 | */ |
129 | void __init acpi_reserve_bootmem(void) | 126 | void __init acpi_reserve_wakeup_memory(void) |
130 | { | 127 | { |
128 | unsigned long mem; | ||
129 | |||
131 | if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) { | 130 | if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) { |
132 | printk(KERN_ERR | 131 | printk(KERN_ERR |
133 | "ACPI: Wakeup code way too big, S3 disabled.\n"); | 132 | "ACPI: Wakeup code way too big, S3 disabled.\n"); |
134 | return; | 133 | return; |
135 | } | 134 | } |
136 | 135 | ||
137 | acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE); | 136 | mem = find_e820_area(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE); |
138 | 137 | ||
139 | if (!acpi_realmode) { | 138 | if (mem == -1L) { |
140 | printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); | 139 | printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); |
141 | return; | 140 | return; |
142 | } | 141 | } |
143 | 142 | acpi_realmode = (unsigned long) phys_to_virt(mem); | |
144 | acpi_wakeup_address = virt_to_phys((void *)acpi_realmode); | 143 | acpi_wakeup_address = mem; |
144 | reserve_early(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); | ||
145 | } | 145 | } |
146 | 146 | ||
147 | 147 | ||
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 32fb09102a13..b990b5cc9541 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -166,6 +166,43 @@ static void iommu_uninit_device(struct device *dev) | |||
166 | { | 166 | { |
167 | kfree(dev->archdata.iommu); | 167 | kfree(dev->archdata.iommu); |
168 | } | 168 | } |
169 | |||
170 | void __init amd_iommu_uninit_devices(void) | ||
171 | { | ||
172 | struct pci_dev *pdev = NULL; | ||
173 | |||
174 | for_each_pci_dev(pdev) { | ||
175 | |||
176 | if (!check_device(&pdev->dev)) | ||
177 | continue; | ||
178 | |||
179 | iommu_uninit_device(&pdev->dev); | ||
180 | } | ||
181 | } | ||
182 | |||
183 | int __init amd_iommu_init_devices(void) | ||
184 | { | ||
185 | struct pci_dev *pdev = NULL; | ||
186 | int ret = 0; | ||
187 | |||
188 | for_each_pci_dev(pdev) { | ||
189 | |||
190 | if (!check_device(&pdev->dev)) | ||
191 | continue; | ||
192 | |||
193 | ret = iommu_init_device(&pdev->dev); | ||
194 | if (ret) | ||
195 | goto out_free; | ||
196 | } | ||
197 | |||
198 | return 0; | ||
199 | |||
200 | out_free: | ||
201 | |||
202 | amd_iommu_uninit_devices(); | ||
203 | |||
204 | return ret; | ||
205 | } | ||
169 | #ifdef CONFIG_AMD_IOMMU_STATS | 206 | #ifdef CONFIG_AMD_IOMMU_STATS |
170 | 207 | ||
171 | /* | 208 | /* |
@@ -1587,6 +1624,11 @@ static struct notifier_block device_nb = { | |||
1587 | .notifier_call = device_change_notifier, | 1624 | .notifier_call = device_change_notifier, |
1588 | }; | 1625 | }; |
1589 | 1626 | ||
1627 | void amd_iommu_init_notifier(void) | ||
1628 | { | ||
1629 | bus_register_notifier(&pci_bus_type, &device_nb); | ||
1630 | } | ||
1631 | |||
1590 | /***************************************************************************** | 1632 | /***************************************************************************** |
1591 | * | 1633 | * |
1592 | * The next functions belong to the dma_ops mapping/unmapping code. | 1634 | * The next functions belong to the dma_ops mapping/unmapping code. |
@@ -1783,7 +1825,7 @@ retry: | |||
1783 | goto out; | 1825 | goto out; |
1784 | 1826 | ||
1785 | /* | 1827 | /* |
1786 | * aperture was sucessfully enlarged by 128 MB, try | 1828 | * aperture was successfully enlarged by 128 MB, try |
1787 | * allocation again | 1829 | * allocation again |
1788 | */ | 1830 | */ |
1789 | goto retry; | 1831 | goto retry; |
@@ -2145,8 +2187,6 @@ static void prealloc_protection_domains(void) | |||
2145 | if (!check_device(&dev->dev)) | 2187 | if (!check_device(&dev->dev)) |
2146 | continue; | 2188 | continue; |
2147 | 2189 | ||
2148 | iommu_init_device(&dev->dev); | ||
2149 | |||
2150 | /* Is there already any domain for it? */ | 2190 | /* Is there already any domain for it? */ |
2151 | if (domain_for_device(&dev->dev)) | 2191 | if (domain_for_device(&dev->dev)) |
2152 | continue; | 2192 | continue; |
@@ -2215,8 +2255,6 @@ int __init amd_iommu_init_dma_ops(void) | |||
2215 | 2255 | ||
2216 | register_iommu(&amd_iommu_ops); | 2256 | register_iommu(&amd_iommu_ops); |
2217 | 2257 | ||
2218 | bus_register_notifier(&pci_bus_type, &device_nb); | ||
2219 | |||
2220 | amd_iommu_stats_init(); | 2258 | amd_iommu_stats_init(); |
2221 | 2259 | ||
2222 | return 0; | 2260 | return 0; |
@@ -2490,7 +2528,7 @@ int __init amd_iommu_init_passthrough(void) | |||
2490 | struct pci_dev *dev = NULL; | 2528 | struct pci_dev *dev = NULL; |
2491 | u16 devid; | 2529 | u16 devid; |
2492 | 2530 | ||
2493 | /* allocate passthroug domain */ | 2531 | /* allocate passthrough domain */ |
2494 | pt_domain = protection_domain_alloc(); | 2532 | pt_domain = protection_domain_alloc(); |
2495 | if (!pt_domain) | 2533 | if (!pt_domain) |
2496 | return -ENOMEM; | 2534 | return -ENOMEM; |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 7ffc39965233..1dca9c34eaeb 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -1274,6 +1274,10 @@ static int __init amd_iommu_init(void) | |||
1274 | if (ret) | 1274 | if (ret) |
1275 | goto free; | 1275 | goto free; |
1276 | 1276 | ||
1277 | ret = amd_iommu_init_devices(); | ||
1278 | if (ret) | ||
1279 | goto free; | ||
1280 | |||
1277 | if (iommu_pass_through) | 1281 | if (iommu_pass_through) |
1278 | ret = amd_iommu_init_passthrough(); | 1282 | ret = amd_iommu_init_passthrough(); |
1279 | else | 1283 | else |
@@ -1281,6 +1285,8 @@ static int __init amd_iommu_init(void) | |||
1281 | if (ret) | 1285 | if (ret) |
1282 | goto free; | 1286 | goto free; |
1283 | 1287 | ||
1288 | amd_iommu_init_notifier(); | ||
1289 | |||
1284 | enable_iommus(); | 1290 | enable_iommus(); |
1285 | 1291 | ||
1286 | if (iommu_pass_through) | 1292 | if (iommu_pass_through) |
@@ -1296,6 +1302,9 @@ out: | |||
1296 | return ret; | 1302 | return ret; |
1297 | 1303 | ||
1298 | free: | 1304 | free: |
1305 | |||
1306 | amd_iommu_uninit_devices(); | ||
1307 | |||
1299 | free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, | 1308 | free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, |
1300 | get_order(MAX_DOMAIN_ID/8)); | 1309 | get_order(MAX_DOMAIN_ID/8)); |
1301 | 1310 | ||
@@ -1336,6 +1345,9 @@ void __init amd_iommu_detect(void) | |||
1336 | iommu_detected = 1; | 1345 | iommu_detected = 1; |
1337 | amd_iommu_detected = 1; | 1346 | amd_iommu_detected = 1; |
1338 | x86_init.iommu.iommu_init = amd_iommu_init; | 1347 | x86_init.iommu.iommu_init = amd_iommu_init; |
1348 | |||
1349 | /* Make sure ACS will be enabled */ | ||
1350 | pci_request_acs(); | ||
1339 | } | 1351 | } |
1340 | } | 1352 | } |
1341 | 1353 | ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index ad8c75b9e453..efb2b9cd132c 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -647,7 +647,7 @@ static int __init calibrate_APIC_clock(void) | |||
647 | calibration_result = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS; | 647 | calibration_result = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS; |
648 | 648 | ||
649 | apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta); | 649 | apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta); |
650 | apic_printk(APIC_VERBOSE, "..... mult: %ld\n", lapic_clockevent.mult); | 650 | apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult); |
651 | apic_printk(APIC_VERBOSE, "..... calibration result: %u\n", | 651 | apic_printk(APIC_VERBOSE, "..... calibration result: %u\n", |
652 | calibration_result); | 652 | calibration_result); |
653 | 653 | ||
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index d9acc3bee0f4..e31b9ffe25f5 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c | |||
@@ -127,7 +127,7 @@ static u32 noop_apic_read(u32 reg) | |||
127 | 127 | ||
128 | static void noop_apic_write(u32 reg, u32 v) | 128 | static void noop_apic_write(u32 reg, u32 v) |
129 | { | 129 | { |
130 | WARN_ON_ONCE((cpu_has_apic || !disable_apic)); | 130 | WARN_ON_ONCE(cpu_has_apic && !disable_apic); |
131 | } | 131 | } |
132 | 132 | ||
133 | struct apic apic_noop = { | 133 | struct apic apic_noop = { |
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index e85f8fb7f8e7..dd2b5f264643 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
@@ -27,6 +27,9 @@ | |||
27 | * | 27 | * |
28 | * http://www.unisys.com | 28 | * http://www.unisys.com |
29 | */ | 29 | */ |
30 | |||
31 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
32 | |||
30 | #include <linux/notifier.h> | 33 | #include <linux/notifier.h> |
31 | #include <linux/spinlock.h> | 34 | #include <linux/spinlock.h> |
32 | #include <linux/cpumask.h> | 35 | #include <linux/cpumask.h> |
@@ -223,9 +226,9 @@ static int parse_unisys_oem(char *oemptr) | |||
223 | mip_addr = val; | 226 | mip_addr = val; |
224 | mip = (struct mip_reg *)val; | 227 | mip = (struct mip_reg *)val; |
225 | mip_reg = __va(mip); | 228 | mip_reg = __va(mip); |
226 | pr_debug("es7000_mipcfg: host_reg = 0x%lx \n", | 229 | pr_debug("host_reg = 0x%lx\n", |
227 | (unsigned long)host_reg); | 230 | (unsigned long)host_reg); |
228 | pr_debug("es7000_mipcfg: mip_reg = 0x%lx \n", | 231 | pr_debug("mip_reg = 0x%lx\n", |
229 | (unsigned long)mip_reg); | 232 | (unsigned long)mip_reg); |
230 | success++; | 233 | success++; |
231 | break; | 234 | break; |
@@ -401,7 +404,7 @@ static void es7000_enable_apic_mode(void) | |||
401 | if (!es7000_plat) | 404 | if (!es7000_plat) |
402 | return; | 405 | return; |
403 | 406 | ||
404 | printk(KERN_INFO "ES7000: Enabling APIC mode.\n"); | 407 | pr_info("Enabling APIC mode.\n"); |
405 | memset(&es7000_mip_reg, 0, sizeof(struct mip_reg)); | 408 | memset(&es7000_mip_reg, 0, sizeof(struct mip_reg)); |
406 | es7000_mip_reg.off_0x00 = MIP_SW_APIC; | 409 | es7000_mip_reg.off_0x00 = MIP_SW_APIC; |
407 | es7000_mip_reg.off_0x38 = MIP_VALID; | 410 | es7000_mip_reg.off_0x38 = MIP_VALID; |
@@ -514,8 +517,7 @@ static void es7000_setup_apic_routing(void) | |||
514 | { | 517 | { |
515 | int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); | 518 | int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); |
516 | 519 | ||
517 | printk(KERN_INFO | 520 | pr_info("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", |
518 | "Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", | ||
519 | (apic_version[apic] == 0x14) ? | 521 | (apic_version[apic] == 0x14) ? |
520 | "Physical Cluster" : "Logical Cluster", | 522 | "Physical Cluster" : "Logical Cluster", |
521 | nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); | 523 | nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index c0b4468683f9..d5d498fbee4b 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -3267,7 +3267,8 @@ void destroy_irq(unsigned int irq) | |||
3267 | * MSI message composition | 3267 | * MSI message composition |
3268 | */ | 3268 | */ |
3269 | #ifdef CONFIG_PCI_MSI | 3269 | #ifdef CONFIG_PCI_MSI |
3270 | static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) | 3270 | static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, |
3271 | struct msi_msg *msg, u8 hpet_id) | ||
3271 | { | 3272 | { |
3272 | struct irq_cfg *cfg; | 3273 | struct irq_cfg *cfg; |
3273 | int err; | 3274 | int err; |
@@ -3301,7 +3302,10 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
3301 | irte.dest_id = IRTE_DEST(dest); | 3302 | irte.dest_id = IRTE_DEST(dest); |
3302 | 3303 | ||
3303 | /* Set source-id of interrupt request */ | 3304 | /* Set source-id of interrupt request */ |
3304 | set_msi_sid(&irte, pdev); | 3305 | if (pdev) |
3306 | set_msi_sid(&irte, pdev); | ||
3307 | else | ||
3308 | set_hpet_sid(&irte, hpet_id); | ||
3305 | 3309 | ||
3306 | modify_irte(irq, &irte); | 3310 | modify_irte(irq, &irte); |
3307 | 3311 | ||
@@ -3466,7 +3470,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | |||
3466 | int ret; | 3470 | int ret; |
3467 | struct msi_msg msg; | 3471 | struct msi_msg msg; |
3468 | 3472 | ||
3469 | ret = msi_compose_msg(dev, irq, &msg); | 3473 | ret = msi_compose_msg(dev, irq, &msg, -1); |
3470 | if (ret < 0) | 3474 | if (ret < 0) |
3471 | return ret; | 3475 | return ret; |
3472 | 3476 | ||
@@ -3599,7 +3603,7 @@ int arch_setup_dmar_msi(unsigned int irq) | |||
3599 | int ret; | 3603 | int ret; |
3600 | struct msi_msg msg; | 3604 | struct msi_msg msg; |
3601 | 3605 | ||
3602 | ret = msi_compose_msg(NULL, irq, &msg); | 3606 | ret = msi_compose_msg(NULL, irq, &msg, -1); |
3603 | if (ret < 0) | 3607 | if (ret < 0) |
3604 | return ret; | 3608 | return ret; |
3605 | dmar_msi_write(irq, &msg); | 3609 | dmar_msi_write(irq, &msg); |
@@ -3639,6 +3643,19 @@ static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
3639 | 3643 | ||
3640 | #endif /* CONFIG_SMP */ | 3644 | #endif /* CONFIG_SMP */ |
3641 | 3645 | ||
3646 | static struct irq_chip ir_hpet_msi_type = { | ||
3647 | .name = "IR-HPET_MSI", | ||
3648 | .unmask = hpet_msi_unmask, | ||
3649 | .mask = hpet_msi_mask, | ||
3650 | #ifdef CONFIG_INTR_REMAP | ||
3651 | .ack = ir_ack_apic_edge, | ||
3652 | #ifdef CONFIG_SMP | ||
3653 | .set_affinity = ir_set_msi_irq_affinity, | ||
3654 | #endif | ||
3655 | #endif | ||
3656 | .retrigger = ioapic_retrigger_irq, | ||
3657 | }; | ||
3658 | |||
3642 | static struct irq_chip hpet_msi_type = { | 3659 | static struct irq_chip hpet_msi_type = { |
3643 | .name = "HPET_MSI", | 3660 | .name = "HPET_MSI", |
3644 | .unmask = hpet_msi_unmask, | 3661 | .unmask = hpet_msi_unmask, |
@@ -3650,20 +3667,36 @@ static struct irq_chip hpet_msi_type = { | |||
3650 | .retrigger = ioapic_retrigger_irq, | 3667 | .retrigger = ioapic_retrigger_irq, |
3651 | }; | 3668 | }; |
3652 | 3669 | ||
3653 | int arch_setup_hpet_msi(unsigned int irq) | 3670 | int arch_setup_hpet_msi(unsigned int irq, unsigned int id) |
3654 | { | 3671 | { |
3655 | int ret; | 3672 | int ret; |
3656 | struct msi_msg msg; | 3673 | struct msi_msg msg; |
3657 | struct irq_desc *desc = irq_to_desc(irq); | 3674 | struct irq_desc *desc = irq_to_desc(irq); |
3658 | 3675 | ||
3659 | ret = msi_compose_msg(NULL, irq, &msg); | 3676 | if (intr_remapping_enabled) { |
3677 | struct intel_iommu *iommu = map_hpet_to_ir(id); | ||
3678 | int index; | ||
3679 | |||
3680 | if (!iommu) | ||
3681 | return -1; | ||
3682 | |||
3683 | index = alloc_irte(iommu, irq, 1); | ||
3684 | if (index < 0) | ||
3685 | return -1; | ||
3686 | } | ||
3687 | |||
3688 | ret = msi_compose_msg(NULL, irq, &msg, id); | ||
3660 | if (ret < 0) | 3689 | if (ret < 0) |
3661 | return ret; | 3690 | return ret; |
3662 | 3691 | ||
3663 | hpet_msi_write(irq, &msg); | 3692 | hpet_msi_write(irq, &msg); |
3664 | desc->status |= IRQ_MOVE_PCNTXT; | 3693 | desc->status |= IRQ_MOVE_PCNTXT; |
3665 | set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq, | 3694 | if (irq_remapped(irq)) |
3666 | "edge"); | 3695 | set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, |
3696 | handle_edge_irq, "edge"); | ||
3697 | else | ||
3698 | set_irq_chip_and_handler_name(irq, &hpet_msi_type, | ||
3699 | handle_edge_irq, "edge"); | ||
3667 | 3700 | ||
3668 | return 0; | 3701 | return 0; |
3669 | } | 3702 | } |
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index 6389432a9dbf..0159a69396cb 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
@@ -361,7 +361,7 @@ void stop_apic_nmi_watchdog(void *unused) | |||
361 | */ | 361 | */ |
362 | 362 | ||
363 | static DEFINE_PER_CPU(unsigned, last_irq_sum); | 363 | static DEFINE_PER_CPU(unsigned, last_irq_sum); |
364 | static DEFINE_PER_CPU(local_t, alert_counter); | 364 | static DEFINE_PER_CPU(long, alert_counter); |
365 | static DEFINE_PER_CPU(int, nmi_touch); | 365 | static DEFINE_PER_CPU(int, nmi_touch); |
366 | 366 | ||
367 | void touch_nmi_watchdog(void) | 367 | void touch_nmi_watchdog(void) |
@@ -438,8 +438,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
438 | * Ayiee, looks like this CPU is stuck ... | 438 | * Ayiee, looks like this CPU is stuck ... |
439 | * wait a few IRQs (5 seconds) before doing the oops ... | 439 | * wait a few IRQs (5 seconds) before doing the oops ... |
440 | */ | 440 | */ |
441 | local_inc(&__get_cpu_var(alert_counter)); | 441 | __this_cpu_inc(per_cpu_var(alert_counter)); |
442 | if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz) | 442 | if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz) |
443 | /* | 443 | /* |
444 | * die_nmi will return ONLY if NOTIFY_STOP happens.. | 444 | * die_nmi will return ONLY if NOTIFY_STOP happens.. |
445 | */ | 445 | */ |
@@ -447,7 +447,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
447 | regs, panic_on_timeout); | 447 | regs, panic_on_timeout); |
448 | } else { | 448 | } else { |
449 | __get_cpu_var(last_irq_sum) = sum; | 449 | __get_cpu_var(last_irq_sum) = sum; |
450 | local_set(&__get_cpu_var(alert_counter), 0); | 450 | __this_cpu_write(per_cpu_var(alert_counter), 0); |
451 | } | 451 | } |
452 | 452 | ||
453 | /* see if the nmi watchdog went off */ | 453 | /* see if the nmi watchdog went off */ |
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index 07cdbdcd7a92..98c4665f251c 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c | |||
@@ -264,11 +264,6 @@ static void __init smp_read_mpc_oem(struct mpc_table *mpc) | |||
264 | static __init void early_check_numaq(void) | 264 | static __init void early_check_numaq(void) |
265 | { | 265 | { |
266 | /* | 266 | /* |
267 | * Find possible boot-time SMP configuration: | ||
268 | */ | ||
269 | early_find_smp_config(); | ||
270 | |||
271 | /* | ||
272 | * get boot-time SMP configuration: | 267 | * get boot-time SMP configuration: |
273 | */ | 268 | */ |
274 | if (smp_found_config) | 269 | if (smp_found_config) |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 130c4b934877..b684bb303cbf 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -30,10 +30,22 @@ | |||
30 | #include <asm/apic.h> | 30 | #include <asm/apic.h> |
31 | #include <asm/ipi.h> | 31 | #include <asm/ipi.h> |
32 | #include <asm/smp.h> | 32 | #include <asm/smp.h> |
33 | #include <asm/x86_init.h> | ||
33 | 34 | ||
34 | DEFINE_PER_CPU(int, x2apic_extra_bits); | 35 | DEFINE_PER_CPU(int, x2apic_extra_bits); |
35 | 36 | ||
36 | static enum uv_system_type uv_system_type; | 37 | static enum uv_system_type uv_system_type; |
38 | static u64 gru_start_paddr, gru_end_paddr; | ||
39 | |||
40 | static inline bool is_GRU_range(u64 start, u64 end) | ||
41 | { | ||
42 | return start >= gru_start_paddr && end <= gru_end_paddr; | ||
43 | } | ||
44 | |||
45 | static bool uv_is_untracked_pat_range(u64 start, u64 end) | ||
46 | { | ||
47 | return is_ISA_range(start, end) || is_GRU_range(start, end); | ||
48 | } | ||
37 | 49 | ||
38 | static int early_get_nodeid(void) | 50 | static int early_get_nodeid(void) |
39 | { | 51 | { |
@@ -49,6 +61,7 @@ static int early_get_nodeid(void) | |||
49 | static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 61 | static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
50 | { | 62 | { |
51 | if (!strcmp(oem_id, "SGI")) { | 63 | if (!strcmp(oem_id, "SGI")) { |
64 | x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; | ||
52 | if (!strcmp(oem_table_id, "UVL")) | 65 | if (!strcmp(oem_table_id, "UVL")) |
53 | uv_system_type = UV_LEGACY_APIC; | 66 | uv_system_type = UV_LEGACY_APIC; |
54 | else if (!strcmp(oem_table_id, "UVX")) | 67 | else if (!strcmp(oem_table_id, "UVX")) |
@@ -385,8 +398,12 @@ static __init void map_gru_high(int max_pnode) | |||
385 | int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT; | 398 | int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT; |
386 | 399 | ||
387 | gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); | 400 | gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); |
388 | if (gru.s.enable) | 401 | if (gru.s.enable) { |
389 | map_high("GRU", gru.s.base, shift, max_pnode, map_wb); | 402 | map_high("GRU", gru.s.base, shift, max_pnode, map_wb); |
403 | gru_start_paddr = ((u64)gru.s.base << shift); | ||
404 | gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1); | ||
405 | |||
406 | } | ||
390 | } | 407 | } |
391 | 408 | ||
392 | static __init void map_mmr_high(int max_pnode) | 409 | static __init void map_mmr_high(int max_pnode) |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index a4ec8b647544..20399b7b0c3f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1093,7 +1093,7 @@ static void clear_all_debug_regs(void) | |||
1093 | 1093 | ||
1094 | void __cpuinit cpu_init(void) | 1094 | void __cpuinit cpu_init(void) |
1095 | { | 1095 | { |
1096 | struct orig_ist *orig_ist; | 1096 | struct orig_ist *oist; |
1097 | struct task_struct *me; | 1097 | struct task_struct *me; |
1098 | struct tss_struct *t; | 1098 | struct tss_struct *t; |
1099 | unsigned long v; | 1099 | unsigned long v; |
@@ -1102,7 +1102,7 @@ void __cpuinit cpu_init(void) | |||
1102 | 1102 | ||
1103 | cpu = stack_smp_processor_id(); | 1103 | cpu = stack_smp_processor_id(); |
1104 | t = &per_cpu(init_tss, cpu); | 1104 | t = &per_cpu(init_tss, cpu); |
1105 | orig_ist = &per_cpu(orig_ist, cpu); | 1105 | oist = &per_cpu(orig_ist, cpu); |
1106 | 1106 | ||
1107 | #ifdef CONFIG_NUMA | 1107 | #ifdef CONFIG_NUMA |
1108 | if (cpu != 0 && percpu_read(node_number) == 0 && | 1108 | if (cpu != 0 && percpu_read(node_number) == 0 && |
@@ -1136,19 +1136,19 @@ void __cpuinit cpu_init(void) | |||
1136 | wrmsrl(MSR_KERNEL_GS_BASE, 0); | 1136 | wrmsrl(MSR_KERNEL_GS_BASE, 0); |
1137 | barrier(); | 1137 | barrier(); |
1138 | 1138 | ||
1139 | check_efer(); | 1139 | x86_configure_nx(); |
1140 | if (cpu != 0) | 1140 | if (cpu != 0) |
1141 | enable_x2apic(); | 1141 | enable_x2apic(); |
1142 | 1142 | ||
1143 | /* | 1143 | /* |
1144 | * set up and load the per-CPU TSS | 1144 | * set up and load the per-CPU TSS |
1145 | */ | 1145 | */ |
1146 | if (!orig_ist->ist[0]) { | 1146 | if (!oist->ist[0]) { |
1147 | char *estacks = per_cpu(exception_stacks, cpu); | 1147 | char *estacks = per_cpu(exception_stacks, cpu); |
1148 | 1148 | ||
1149 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | 1149 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
1150 | estacks += exception_stack_sizes[v]; | 1150 | estacks += exception_stack_sizes[v]; |
1151 | orig_ist->ist[v] = t->x86_tss.ist[v] = | 1151 | oist->ist[v] = t->x86_tss.ist[v] = |
1152 | (unsigned long)estacks; | 1152 | (unsigned long)estacks; |
1153 | } | 1153 | } |
1154 | } | 1154 | } |
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c index dca325c03999..b368cd862997 100644 --- a/arch/x86/kernel/cpu/cpu_debug.c +++ b/arch/x86/kernel/cpu/cpu_debug.c | |||
@@ -30,9 +30,9 @@ | |||
30 | #include <asm/apic.h> | 30 | #include <asm/apic.h> |
31 | #include <asm/desc.h> | 31 | #include <asm/desc.h> |
32 | 32 | ||
33 | static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr); | 33 | static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpud_arr); |
34 | static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr); | 34 | static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], cpud_priv_arr); |
35 | static DEFINE_PER_CPU(int, cpu_priv_count); | 35 | static DEFINE_PER_CPU(int, cpud_priv_count); |
36 | 36 | ||
37 | static DEFINE_MUTEX(cpu_debug_lock); | 37 | static DEFINE_MUTEX(cpu_debug_lock); |
38 | 38 | ||
@@ -531,7 +531,7 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg, | |||
531 | 531 | ||
532 | /* Already intialized */ | 532 | /* Already intialized */ |
533 | if (file == CPU_INDEX_BIT) | 533 | if (file == CPU_INDEX_BIT) |
534 | if (per_cpu(cpu_arr[type].init, cpu)) | 534 | if (per_cpu(cpud_arr[type].init, cpu)) |
535 | return 0; | 535 | return 0; |
536 | 536 | ||
537 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 537 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
@@ -543,8 +543,8 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg, | |||
543 | priv->reg = reg; | 543 | priv->reg = reg; |
544 | priv->file = file; | 544 | priv->file = file; |
545 | mutex_lock(&cpu_debug_lock); | 545 | mutex_lock(&cpu_debug_lock); |
546 | per_cpu(priv_arr[type], cpu) = priv; | 546 | per_cpu(cpud_priv_arr[type], cpu) = priv; |
547 | per_cpu(cpu_priv_count, cpu)++; | 547 | per_cpu(cpud_priv_count, cpu)++; |
548 | mutex_unlock(&cpu_debug_lock); | 548 | mutex_unlock(&cpu_debug_lock); |
549 | 549 | ||
550 | if (file) | 550 | if (file) |
@@ -552,10 +552,10 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg, | |||
552 | dentry, (void *)priv, &cpu_fops); | 552 | dentry, (void *)priv, &cpu_fops); |
553 | else { | 553 | else { |
554 | debugfs_create_file(cpu_base[type].name, S_IRUGO, | 554 | debugfs_create_file(cpu_base[type].name, S_IRUGO, |
555 | per_cpu(cpu_arr[type].dentry, cpu), | 555 | per_cpu(cpud_arr[type].dentry, cpu), |
556 | (void *)priv, &cpu_fops); | 556 | (void *)priv, &cpu_fops); |
557 | mutex_lock(&cpu_debug_lock); | 557 | mutex_lock(&cpu_debug_lock); |
558 | per_cpu(cpu_arr[type].init, cpu) = 1; | 558 | per_cpu(cpud_arr[type].init, cpu) = 1; |
559 | mutex_unlock(&cpu_debug_lock); | 559 | mutex_unlock(&cpu_debug_lock); |
560 | } | 560 | } |
561 | 561 | ||
@@ -615,7 +615,7 @@ static int cpu_init_allreg(unsigned cpu, struct dentry *dentry) | |||
615 | if (!is_typeflag_valid(cpu, cpu_base[type].flag)) | 615 | if (!is_typeflag_valid(cpu, cpu_base[type].flag)) |
616 | continue; | 616 | continue; |
617 | cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry); | 617 | cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry); |
618 | per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry; | 618 | per_cpu(cpud_arr[type].dentry, cpu) = cpu_dentry; |
619 | 619 | ||
620 | if (type < CPU_TSS_BIT) | 620 | if (type < CPU_TSS_BIT) |
621 | err = cpu_init_msr(cpu, type, cpu_dentry); | 621 | err = cpu_init_msr(cpu, type, cpu_dentry); |
@@ -647,11 +647,11 @@ static int cpu_init_cpu(void) | |||
647 | err = cpu_init_allreg(cpu, cpu_dentry); | 647 | err = cpu_init_allreg(cpu, cpu_dentry); |
648 | 648 | ||
649 | pr_info("cpu%d(%d) debug files %d\n", | 649 | pr_info("cpu%d(%d) debug files %d\n", |
650 | cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu)); | 650 | cpu, nr_cpu_ids, per_cpu(cpud_priv_count, cpu)); |
651 | if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) { | 651 | if (per_cpu(cpud_priv_count, cpu) > MAX_CPU_FILES) { |
652 | pr_err("Register files count %d exceeds limit %d\n", | 652 | pr_err("Register files count %d exceeds limit %d\n", |
653 | per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES); | 653 | per_cpu(cpud_priv_count, cpu), MAX_CPU_FILES); |
654 | per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES; | 654 | per_cpu(cpud_priv_count, cpu) = MAX_CPU_FILES; |
655 | err = -ENFILE; | 655 | err = -ENFILE; |
656 | } | 656 | } |
657 | if (err) | 657 | if (err) |
@@ -676,8 +676,8 @@ static void __exit cpu_debug_exit(void) | |||
676 | debugfs_remove_recursive(cpu_debugfs_dir); | 676 | debugfs_remove_recursive(cpu_debugfs_dir); |
677 | 677 | ||
678 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) | 678 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) |
679 | for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++) | 679 | for (i = 0; i < per_cpu(cpud_priv_count, cpu); i++) |
680 | kfree(per_cpu(priv_arr[i], cpu)); | 680 | kfree(per_cpu(cpud_priv_arr[i], cpu)); |
681 | } | 681 | } |
682 | 682 | ||
683 | module_init(cpu_debug_init); | 683 | module_init(cpu_debug_init); |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 8b581d3905cb..f28decf8dde3 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -68,9 +68,9 @@ struct acpi_cpufreq_data { | |||
68 | unsigned int cpu_feature; | 68 | unsigned int cpu_feature; |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); | 71 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data); |
72 | 72 | ||
73 | static DEFINE_PER_CPU(struct aperfmperf, old_perf); | 73 | static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf); |
74 | 74 | ||
75 | /* acpi_perf_data is a pointer to percpu data. */ | 75 | /* acpi_perf_data is a pointer to percpu data. */ |
76 | static struct acpi_processor_performance *acpi_perf_data; | 76 | static struct acpi_processor_performance *acpi_perf_data; |
@@ -214,14 +214,14 @@ static u32 get_cur_val(const struct cpumask *mask) | |||
214 | if (unlikely(cpumask_empty(mask))) | 214 | if (unlikely(cpumask_empty(mask))) |
215 | return 0; | 215 | return 0; |
216 | 216 | ||
217 | switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) { | 217 | switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { |
218 | case SYSTEM_INTEL_MSR_CAPABLE: | 218 | case SYSTEM_INTEL_MSR_CAPABLE: |
219 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; | 219 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; |
220 | cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; | 220 | cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; |
221 | break; | 221 | break; |
222 | case SYSTEM_IO_CAPABLE: | 222 | case SYSTEM_IO_CAPABLE: |
223 | cmd.type = SYSTEM_IO_CAPABLE; | 223 | cmd.type = SYSTEM_IO_CAPABLE; |
224 | perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data; | 224 | perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; |
225 | cmd.addr.io.port = perf->control_register.address; | 225 | cmd.addr.io.port = perf->control_register.address; |
226 | cmd.addr.io.bit_width = perf->control_register.bit_width; | 226 | cmd.addr.io.bit_width = perf->control_register.bit_width; |
227 | break; | 227 | break; |
@@ -268,8 +268,8 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy, | |||
268 | if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) | 268 | if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) |
269 | return 0; | 269 | return 0; |
270 | 270 | ||
271 | ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf); | 271 | ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf); |
272 | per_cpu(old_perf, cpu) = perf; | 272 | per_cpu(acfreq_old_perf, cpu) = perf; |
273 | 273 | ||
274 | retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; | 274 | retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; |
275 | 275 | ||
@@ -278,7 +278,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy, | |||
278 | 278 | ||
279 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | 279 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) |
280 | { | 280 | { |
281 | struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); | 281 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); |
282 | unsigned int freq; | 282 | unsigned int freq; |
283 | unsigned int cached_freq; | 283 | unsigned int cached_freq; |
284 | 284 | ||
@@ -322,7 +322,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, | |||
322 | static int acpi_cpufreq_target(struct cpufreq_policy *policy, | 322 | static int acpi_cpufreq_target(struct cpufreq_policy *policy, |
323 | unsigned int target_freq, unsigned int relation) | 323 | unsigned int target_freq, unsigned int relation) |
324 | { | 324 | { |
325 | struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); | 325 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
326 | struct acpi_processor_performance *perf; | 326 | struct acpi_processor_performance *perf; |
327 | struct cpufreq_freqs freqs; | 327 | struct cpufreq_freqs freqs; |
328 | struct drv_cmd cmd; | 328 | struct drv_cmd cmd; |
@@ -416,7 +416,7 @@ out: | |||
416 | 416 | ||
417 | static int acpi_cpufreq_verify(struct cpufreq_policy *policy) | 417 | static int acpi_cpufreq_verify(struct cpufreq_policy *policy) |
418 | { | 418 | { |
419 | struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); | 419 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
420 | 420 | ||
421 | dprintk("acpi_cpufreq_verify\n"); | 421 | dprintk("acpi_cpufreq_verify\n"); |
422 | 422 | ||
@@ -574,7 +574,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
574 | return -ENOMEM; | 574 | return -ENOMEM; |
575 | 575 | ||
576 | data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); | 576 | data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); |
577 | per_cpu(drv_data, cpu) = data; | 577 | per_cpu(acfreq_data, cpu) = data; |
578 | 578 | ||
579 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) | 579 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) |
580 | acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; | 580 | acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; |
@@ -725,20 +725,20 @@ err_unreg: | |||
725 | acpi_processor_unregister_performance(perf, cpu); | 725 | acpi_processor_unregister_performance(perf, cpu); |
726 | err_free: | 726 | err_free: |
727 | kfree(data); | 727 | kfree(data); |
728 | per_cpu(drv_data, cpu) = NULL; | 728 | per_cpu(acfreq_data, cpu) = NULL; |
729 | 729 | ||
730 | return result; | 730 | return result; |
731 | } | 731 | } |
732 | 732 | ||
733 | static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) | 733 | static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) |
734 | { | 734 | { |
735 | struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); | 735 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
736 | 736 | ||
737 | dprintk("acpi_cpufreq_cpu_exit\n"); | 737 | dprintk("acpi_cpufreq_cpu_exit\n"); |
738 | 738 | ||
739 | if (data) { | 739 | if (data) { |
740 | cpufreq_frequency_table_put_attr(policy->cpu); | 740 | cpufreq_frequency_table_put_attr(policy->cpu); |
741 | per_cpu(drv_data, policy->cpu) = NULL; | 741 | per_cpu(acfreq_data, policy->cpu) = NULL; |
742 | acpi_processor_unregister_performance(data->acpi_data, | 742 | acpi_processor_unregister_performance(data->acpi_data, |
743 | policy->cpu); | 743 | policy->cpu); |
744 | kfree(data); | 744 | kfree(data); |
@@ -749,7 +749,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |||
749 | 749 | ||
750 | static int acpi_cpufreq_resume(struct cpufreq_policy *policy) | 750 | static int acpi_cpufreq_resume(struct cpufreq_policy *policy) |
751 | { | 751 | { |
752 | struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); | 752 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
753 | 753 | ||
754 | dprintk("acpi_cpufreq_resume\n"); | 754 | dprintk("acpi_cpufreq_resume\n"); |
755 | 755 | ||
@@ -764,14 +764,15 @@ static struct freq_attr *acpi_cpufreq_attr[] = { | |||
764 | }; | 764 | }; |
765 | 765 | ||
766 | static struct cpufreq_driver acpi_cpufreq_driver = { | 766 | static struct cpufreq_driver acpi_cpufreq_driver = { |
767 | .verify = acpi_cpufreq_verify, | 767 | .verify = acpi_cpufreq_verify, |
768 | .target = acpi_cpufreq_target, | 768 | .target = acpi_cpufreq_target, |
769 | .init = acpi_cpufreq_cpu_init, | 769 | .bios_limit = acpi_processor_get_bios_limit, |
770 | .exit = acpi_cpufreq_cpu_exit, | 770 | .init = acpi_cpufreq_cpu_init, |
771 | .resume = acpi_cpufreq_resume, | 771 | .exit = acpi_cpufreq_cpu_exit, |
772 | .name = "acpi-cpufreq", | 772 | .resume = acpi_cpufreq_resume, |
773 | .owner = THIS_MODULE, | 773 | .name = "acpi-cpufreq", |
774 | .attr = acpi_cpufreq_attr, | 774 | .owner = THIS_MODULE, |
775 | .attr = acpi_cpufreq_attr, | ||
775 | }; | 776 | }; |
776 | 777 | ||
777 | static int __init acpi_cpufreq_init(void) | 778 | static int __init acpi_cpufreq_init(void) |
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c index cabd2fa3fc93..7e7eea4f8261 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c | |||
@@ -885,7 +885,7 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | |||
885 | 885 | ||
886 | /* Find ACPI data for processor */ | 886 | /* Find ACPI data for processor */ |
887 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, | 887 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, |
888 | ACPI_UINT32_MAX, &longhaul_walk_callback, | 888 | ACPI_UINT32_MAX, &longhaul_walk_callback, NULL, |
889 | NULL, (void *)&pr); | 889 | NULL, (void *)&pr); |
890 | 890 | ||
891 | /* Check ACPI support for C3 state */ | 891 | /* Check ACPI support for C3 state */ |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c index f10dea409f40..cb01dac267d3 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c | |||
@@ -164,7 +164,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy) | |||
164 | } | 164 | } |
165 | 165 | ||
166 | /* cpuinfo and default policy values */ | 166 | /* cpuinfo and default policy values */ |
167 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 167 | policy->cpuinfo.transition_latency = 200000; |
168 | policy->cur = busfreq * max_multiplier; | 168 | policy->cur = busfreq * max_multiplier; |
169 | 169 | ||
170 | result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio); | 170 | result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio); |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c index d47c775eb0ab..9a97116f89e5 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c | |||
@@ -714,14 +714,17 @@ static struct freq_attr *powernow_table_attr[] = { | |||
714 | }; | 714 | }; |
715 | 715 | ||
716 | static struct cpufreq_driver powernow_driver = { | 716 | static struct cpufreq_driver powernow_driver = { |
717 | .verify = powernow_verify, | 717 | .verify = powernow_verify, |
718 | .target = powernow_target, | 718 | .target = powernow_target, |
719 | .get = powernow_get, | 719 | .get = powernow_get, |
720 | .init = powernow_cpu_init, | 720 | #ifdef CONFIG_X86_POWERNOW_K7_ACPI |
721 | .exit = powernow_cpu_exit, | 721 | .bios_limit = acpi_processor_get_bios_limit, |
722 | .name = "powernow-k7", | 722 | #endif |
723 | .owner = THIS_MODULE, | 723 | .init = powernow_cpu_init, |
724 | .attr = powernow_table_attr, | 724 | .exit = powernow_cpu_exit, |
725 | .name = "powernow-k7", | ||
726 | .owner = THIS_MODULE, | ||
727 | .attr = powernow_table_attr, | ||
725 | }; | 728 | }; |
726 | 729 | ||
727 | static int __init powernow_init(void) | 730 | static int __init powernow_init(void) |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 3f12dabeab52..a9df9441a9a2 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -1118,7 +1118,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, | |||
1118 | static int powernowk8_target(struct cpufreq_policy *pol, | 1118 | static int powernowk8_target(struct cpufreq_policy *pol, |
1119 | unsigned targfreq, unsigned relation) | 1119 | unsigned targfreq, unsigned relation) |
1120 | { | 1120 | { |
1121 | cpumask_t oldmask; | 1121 | cpumask_var_t oldmask; |
1122 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); | 1122 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); |
1123 | u32 checkfid; | 1123 | u32 checkfid; |
1124 | u32 checkvid; | 1124 | u32 checkvid; |
@@ -1131,9 +1131,13 @@ static int powernowk8_target(struct cpufreq_policy *pol, | |||
1131 | checkfid = data->currfid; | 1131 | checkfid = data->currfid; |
1132 | checkvid = data->currvid; | 1132 | checkvid = data->currvid; |
1133 | 1133 | ||
1134 | /* only run on specific CPU from here on */ | 1134 | /* only run on specific CPU from here on. */ |
1135 | oldmask = current->cpus_allowed; | 1135 | /* This is poor form: use a workqueue or smp_call_function_single */ |
1136 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); | 1136 | if (!alloc_cpumask_var(&oldmask, GFP_KERNEL)) |
1137 | return -ENOMEM; | ||
1138 | |||
1139 | cpumask_copy(oldmask, tsk_cpumask(current)); | ||
1140 | set_cpus_allowed_ptr(current, cpumask_of(pol->cpu)); | ||
1137 | 1141 | ||
1138 | if (smp_processor_id() != pol->cpu) { | 1142 | if (smp_processor_id() != pol->cpu) { |
1139 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); | 1143 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); |
@@ -1193,7 +1197,8 @@ static int powernowk8_target(struct cpufreq_policy *pol, | |||
1193 | ret = 0; | 1197 | ret = 0; |
1194 | 1198 | ||
1195 | err_out: | 1199 | err_out: |
1196 | set_cpus_allowed_ptr(current, &oldmask); | 1200 | set_cpus_allowed_ptr(current, oldmask); |
1201 | free_cpumask_var(oldmask); | ||
1197 | return ret; | 1202 | return ret; |
1198 | } | 1203 | } |
1199 | 1204 | ||
@@ -1393,14 +1398,15 @@ static struct freq_attr *powernow_k8_attr[] = { | |||
1393 | }; | 1398 | }; |
1394 | 1399 | ||
1395 | static struct cpufreq_driver cpufreq_amd64_driver = { | 1400 | static struct cpufreq_driver cpufreq_amd64_driver = { |
1396 | .verify = powernowk8_verify, | 1401 | .verify = powernowk8_verify, |
1397 | .target = powernowk8_target, | 1402 | .target = powernowk8_target, |
1398 | .init = powernowk8_cpu_init, | 1403 | .bios_limit = acpi_processor_get_bios_limit, |
1399 | .exit = __devexit_p(powernowk8_cpu_exit), | 1404 | .init = powernowk8_cpu_init, |
1400 | .get = powernowk8_get, | 1405 | .exit = __devexit_p(powernowk8_cpu_exit), |
1401 | .name = "powernow-k8", | 1406 | .get = powernowk8_get, |
1402 | .owner = THIS_MODULE, | 1407 | .name = "powernow-k8", |
1403 | .attr = powernow_k8_attr, | 1408 | .owner = THIS_MODULE, |
1409 | .attr = powernow_k8_attr, | ||
1404 | }; | 1410 | }; |
1405 | 1411 | ||
1406 | /* driver entry point for init */ | 1412 | /* driver entry point for init */ |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 3ae5a7a3a500..2ce8e0b5cc54 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
@@ -39,7 +39,7 @@ static struct pci_dev *speedstep_chipset_dev; | |||
39 | 39 | ||
40 | /* speedstep_processor | 40 | /* speedstep_processor |
41 | */ | 41 | */ |
42 | static unsigned int speedstep_processor; | 42 | static enum speedstep_processor speedstep_processor; |
43 | 43 | ||
44 | static u32 pmbase; | 44 | static u32 pmbase; |
45 | 45 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c index f4c290b8482f..ad0083abfa23 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c | |||
@@ -34,7 +34,7 @@ static int relaxed_check; | |||
34 | * GET PROCESSOR CORE SPEED IN KHZ * | 34 | * GET PROCESSOR CORE SPEED IN KHZ * |
35 | *********************************************************************/ | 35 | *********************************************************************/ |
36 | 36 | ||
37 | static unsigned int pentium3_get_frequency(unsigned int processor) | 37 | static unsigned int pentium3_get_frequency(enum speedstep_processor processor) |
38 | { | 38 | { |
39 | /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ | 39 | /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ |
40 | struct { | 40 | struct { |
@@ -227,7 +227,7 @@ static unsigned int pentium4_get_frequency(void) | |||
227 | 227 | ||
228 | 228 | ||
229 | /* Warning: may get called from smp_call_function_single. */ | 229 | /* Warning: may get called from smp_call_function_single. */ |
230 | unsigned int speedstep_get_frequency(unsigned int processor) | 230 | unsigned int speedstep_get_frequency(enum speedstep_processor processor) |
231 | { | 231 | { |
232 | switch (processor) { | 232 | switch (processor) { |
233 | case SPEEDSTEP_CPU_PCORE: | 233 | case SPEEDSTEP_CPU_PCORE: |
@@ -380,7 +380,7 @@ EXPORT_SYMBOL_GPL(speedstep_detect_processor); | |||
380 | * DETECT SPEEDSTEP SPEEDS * | 380 | * DETECT SPEEDSTEP SPEEDS * |
381 | *********************************************************************/ | 381 | *********************************************************************/ |
382 | 382 | ||
383 | unsigned int speedstep_get_freqs(unsigned int processor, | 383 | unsigned int speedstep_get_freqs(enum speedstep_processor processor, |
384 | unsigned int *low_speed, | 384 | unsigned int *low_speed, |
385 | unsigned int *high_speed, | 385 | unsigned int *high_speed, |
386 | unsigned int *transition_latency, | 386 | unsigned int *transition_latency, |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h index 2b6c04e5a304..70d9cea1219d 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h | |||
@@ -11,18 +11,18 @@ | |||
11 | 11 | ||
12 | 12 | ||
13 | /* processors */ | 13 | /* processors */ |
14 | 14 | enum speedstep_processor { | |
15 | #define SPEEDSTEP_CPU_PIII_C_EARLY 0x00000001 /* Coppermine core */ | 15 | SPEEDSTEP_CPU_PIII_C_EARLY = 0x00000001, /* Coppermine core */ |
16 | #define SPEEDSTEP_CPU_PIII_C 0x00000002 /* Coppermine core */ | 16 | SPEEDSTEP_CPU_PIII_C = 0x00000002, /* Coppermine core */ |
17 | #define SPEEDSTEP_CPU_PIII_T 0x00000003 /* Tualatin core */ | 17 | SPEEDSTEP_CPU_PIII_T = 0x00000003, /* Tualatin core */ |
18 | #define SPEEDSTEP_CPU_P4M 0x00000004 /* P4-M */ | 18 | SPEEDSTEP_CPU_P4M = 0x00000004, /* P4-M */ |
19 | |||
20 | /* the following processors are not speedstep-capable and are not auto-detected | 19 | /* the following processors are not speedstep-capable and are not auto-detected |
21 | * in speedstep_detect_processor(). However, their speed can be detected using | 20 | * in speedstep_detect_processor(). However, their speed can be detected using |
22 | * the speedstep_get_frequency() call. */ | 21 | * the speedstep_get_frequency() call. */ |
23 | #define SPEEDSTEP_CPU_PM 0xFFFFFF03 /* Pentium M */ | 22 | SPEEDSTEP_CPU_PM = 0xFFFFFF03, /* Pentium M */ |
24 | #define SPEEDSTEP_CPU_P4D 0xFFFFFF04 /* desktop P4 */ | 23 | SPEEDSTEP_CPU_P4D = 0xFFFFFF04, /* desktop P4 */ |
25 | #define SPEEDSTEP_CPU_PCORE 0xFFFFFF05 /* Core */ | 24 | SPEEDSTEP_CPU_PCORE = 0xFFFFFF05, /* Core */ |
25 | }; | ||
26 | 26 | ||
27 | /* speedstep states -- only two of them */ | 27 | /* speedstep states -- only two of them */ |
28 | 28 | ||
@@ -31,10 +31,10 @@ | |||
31 | 31 | ||
32 | 32 | ||
33 | /* detect a speedstep-capable processor */ | 33 | /* detect a speedstep-capable processor */ |
34 | extern unsigned int speedstep_detect_processor (void); | 34 | extern enum speedstep_processor speedstep_detect_processor(void); |
35 | 35 | ||
36 | /* detect the current speed (in khz) of the processor */ | 36 | /* detect the current speed (in khz) of the processor */ |
37 | extern unsigned int speedstep_get_frequency(unsigned int processor); | 37 | extern unsigned int speedstep_get_frequency(enum speedstep_processor processor); |
38 | 38 | ||
39 | 39 | ||
40 | /* detect the low and high speeds of the processor. The callback | 40 | /* detect the low and high speeds of the processor. The callback |
@@ -42,7 +42,7 @@ extern unsigned int speedstep_get_frequency(unsigned int processor); | |||
42 | * SPEEDSTEP_LOW; the second argument is zero so that no | 42 | * SPEEDSTEP_LOW; the second argument is zero so that no |
43 | * cpufreq_notify_transition calls are initiated. | 43 | * cpufreq_notify_transition calls are initiated. |
44 | */ | 44 | */ |
45 | extern unsigned int speedstep_get_freqs(unsigned int processor, | 45 | extern unsigned int speedstep_get_freqs(enum speedstep_processor processor, |
46 | unsigned int *low_speed, | 46 | unsigned int *low_speed, |
47 | unsigned int *high_speed, | 47 | unsigned int *high_speed, |
48 | unsigned int *transition_latency, | 48 | unsigned int *transition_latency, |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c index befea088e4f5..04d73c114e49 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c | |||
@@ -35,7 +35,7 @@ static int smi_cmd; | |||
35 | static unsigned int smi_sig; | 35 | static unsigned int smi_sig; |
36 | 36 | ||
37 | /* info about the processor */ | 37 | /* info about the processor */ |
38 | static unsigned int speedstep_processor; | 38 | static enum speedstep_processor speedstep_processor; |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * There are only two frequency states for each processor. Values | 41 | * There are only two frequency states for each processor. Values |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 40e1835b35e8..c900b73f9224 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -263,8 +263,12 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | |||
263 | /* Don't do the funky fallback heuristics the AMD version employs | 263 | /* Don't do the funky fallback heuristics the AMD version employs |
264 | for now. */ | 264 | for now. */ |
265 | node = apicid_to_node[apicid]; | 265 | node = apicid_to_node[apicid]; |
266 | if (node == NUMA_NO_NODE || !node_online(node)) | 266 | if (node == NUMA_NO_NODE) |
267 | node = first_node(node_online_map); | 267 | node = first_node(node_online_map); |
268 | else if (!node_online(node)) { | ||
269 | /* reuse the value from init_cpu_to_node() */ | ||
270 | node = cpu_to_node(cpu); | ||
271 | } | ||
268 | numa_set_node(cpu, node); | 272 | numa_set_node(cpu, node); |
269 | 273 | ||
270 | printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node); | 274 | printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node); |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 0df4c2b7107f..0c06bca2a1dc 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -94,7 +94,7 @@ static const struct _cache_table __cpuinitconst cache_table[] = | |||
94 | { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */ | 94 | { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */ |
95 | { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */ | 95 | { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */ |
96 | { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */ | 96 | { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */ |
97 | { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */ | 97 | { 0xd7, LVL_3, 2048 }, /* 8-way set assoc, 64 byte line size */ |
98 | { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */ | 98 | { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */ |
99 | { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */ | 99 | { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */ |
100 | { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */ | 100 | { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */ |
@@ -102,6 +102,9 @@ static const struct _cache_table __cpuinitconst cache_table[] = | |||
102 | { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */ | 102 | { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */ |
103 | { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */ | 103 | { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */ |
104 | { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */ | 104 | { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */ |
105 | { 0xea, LVL_3, 12288 }, /* 24-way set assoc, 64 byte line size */ | ||
106 | { 0xeb, LVL_3, 18432 }, /* 24-way set assoc, 64 byte line size */ | ||
107 | { 0xec, LVL_3, 24576 }, /* 24-way set assoc, 64 byte line size */ | ||
105 | { 0x00, 0, 0} | 108 | { 0x00, 0, 0} |
106 | }; | 109 | }; |
107 | 110 | ||
@@ -496,8 +499,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
496 | #ifdef CONFIG_SYSFS | 499 | #ifdef CONFIG_SYSFS |
497 | 500 | ||
498 | /* pointer to _cpuid4_info array (for each cache leaf) */ | 501 | /* pointer to _cpuid4_info array (for each cache leaf) */ |
499 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); | 502 | static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); |
500 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) | 503 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) |
501 | 504 | ||
502 | #ifdef CONFIG_SMP | 505 | #ifdef CONFIG_SMP |
503 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 506 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) |
@@ -510,7 +513,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
510 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { | 513 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { |
511 | struct cpuinfo_x86 *d; | 514 | struct cpuinfo_x86 *d; |
512 | for_each_online_cpu(i) { | 515 | for_each_online_cpu(i) { |
513 | if (!per_cpu(cpuid4_info, i)) | 516 | if (!per_cpu(ici_cpuid4_info, i)) |
514 | continue; | 517 | continue; |
515 | d = &cpu_data(i); | 518 | d = &cpu_data(i); |
516 | this_leaf = CPUID4_INFO_IDX(i, index); | 519 | this_leaf = CPUID4_INFO_IDX(i, index); |
@@ -532,7 +535,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
532 | c->apicid >> index_msb) { | 535 | c->apicid >> index_msb) { |
533 | cpumask_set_cpu(i, | 536 | cpumask_set_cpu(i, |
534 | to_cpumask(this_leaf->shared_cpu_map)); | 537 | to_cpumask(this_leaf->shared_cpu_map)); |
535 | if (i != cpu && per_cpu(cpuid4_info, i)) { | 538 | if (i != cpu && per_cpu(ici_cpuid4_info, i)) { |
536 | sibling_leaf = | 539 | sibling_leaf = |
537 | CPUID4_INFO_IDX(i, index); | 540 | CPUID4_INFO_IDX(i, index); |
538 | cpumask_set_cpu(cpu, to_cpumask( | 541 | cpumask_set_cpu(cpu, to_cpumask( |
@@ -571,8 +574,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) | |||
571 | for (i = 0; i < num_cache_leaves; i++) | 574 | for (i = 0; i < num_cache_leaves; i++) |
572 | cache_remove_shared_cpu_map(cpu, i); | 575 | cache_remove_shared_cpu_map(cpu, i); |
573 | 576 | ||
574 | kfree(per_cpu(cpuid4_info, cpu)); | 577 | kfree(per_cpu(ici_cpuid4_info, cpu)); |
575 | per_cpu(cpuid4_info, cpu) = NULL; | 578 | per_cpu(ici_cpuid4_info, cpu) = NULL; |
576 | } | 579 | } |
577 | 580 | ||
578 | static int | 581 | static int |
@@ -611,15 +614,15 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
611 | if (num_cache_leaves == 0) | 614 | if (num_cache_leaves == 0) |
612 | return -ENOENT; | 615 | return -ENOENT; |
613 | 616 | ||
614 | per_cpu(cpuid4_info, cpu) = kzalloc( | 617 | per_cpu(ici_cpuid4_info, cpu) = kzalloc( |
615 | sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); | 618 | sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); |
616 | if (per_cpu(cpuid4_info, cpu) == NULL) | 619 | if (per_cpu(ici_cpuid4_info, cpu) == NULL) |
617 | return -ENOMEM; | 620 | return -ENOMEM; |
618 | 621 | ||
619 | smp_call_function_single(cpu, get_cpu_leaves, &retval, true); | 622 | smp_call_function_single(cpu, get_cpu_leaves, &retval, true); |
620 | if (retval) { | 623 | if (retval) { |
621 | kfree(per_cpu(cpuid4_info, cpu)); | 624 | kfree(per_cpu(ici_cpuid4_info, cpu)); |
622 | per_cpu(cpuid4_info, cpu) = NULL; | 625 | per_cpu(ici_cpuid4_info, cpu) = NULL; |
623 | } | 626 | } |
624 | 627 | ||
625 | return retval; | 628 | return retval; |
@@ -631,7 +634,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
631 | extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ | 634 | extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ |
632 | 635 | ||
633 | /* pointer to kobject for cpuX/cache */ | 636 | /* pointer to kobject for cpuX/cache */ |
634 | static DEFINE_PER_CPU(struct kobject *, cache_kobject); | 637 | static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject); |
635 | 638 | ||
636 | struct _index_kobject { | 639 | struct _index_kobject { |
637 | struct kobject kobj; | 640 | struct kobject kobj; |
@@ -640,8 +643,8 @@ struct _index_kobject { | |||
640 | }; | 643 | }; |
641 | 644 | ||
642 | /* pointer to array of kobjects for cpuX/cache/indexY */ | 645 | /* pointer to array of kobjects for cpuX/cache/indexY */ |
643 | static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); | 646 | static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject); |
644 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) | 647 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y])) |
645 | 648 | ||
646 | #define show_one_plus(file_name, object, val) \ | 649 | #define show_one_plus(file_name, object, val) \ |
647 | static ssize_t show_##file_name \ | 650 | static ssize_t show_##file_name \ |
@@ -860,10 +863,10 @@ static struct kobj_type ktype_percpu_entry = { | |||
860 | 863 | ||
861 | static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) | 864 | static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) |
862 | { | 865 | { |
863 | kfree(per_cpu(cache_kobject, cpu)); | 866 | kfree(per_cpu(ici_cache_kobject, cpu)); |
864 | kfree(per_cpu(index_kobject, cpu)); | 867 | kfree(per_cpu(ici_index_kobject, cpu)); |
865 | per_cpu(cache_kobject, cpu) = NULL; | 868 | per_cpu(ici_cache_kobject, cpu) = NULL; |
866 | per_cpu(index_kobject, cpu) = NULL; | 869 | per_cpu(ici_index_kobject, cpu) = NULL; |
867 | free_cache_attributes(cpu); | 870 | free_cache_attributes(cpu); |
868 | } | 871 | } |
869 | 872 | ||
@@ -879,14 +882,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) | |||
879 | return err; | 882 | return err; |
880 | 883 | ||
881 | /* Allocate all required memory */ | 884 | /* Allocate all required memory */ |
882 | per_cpu(cache_kobject, cpu) = | 885 | per_cpu(ici_cache_kobject, cpu) = |
883 | kzalloc(sizeof(struct kobject), GFP_KERNEL); | 886 | kzalloc(sizeof(struct kobject), GFP_KERNEL); |
884 | if (unlikely(per_cpu(cache_kobject, cpu) == NULL)) | 887 | if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL)) |
885 | goto err_out; | 888 | goto err_out; |
886 | 889 | ||
887 | per_cpu(index_kobject, cpu) = kzalloc( | 890 | per_cpu(ici_index_kobject, cpu) = kzalloc( |
888 | sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); | 891 | sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); |
889 | if (unlikely(per_cpu(index_kobject, cpu) == NULL)) | 892 | if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL)) |
890 | goto err_out; | 893 | goto err_out; |
891 | 894 | ||
892 | return 0; | 895 | return 0; |
@@ -910,7 +913,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
910 | if (unlikely(retval < 0)) | 913 | if (unlikely(retval < 0)) |
911 | return retval; | 914 | return retval; |
912 | 915 | ||
913 | retval = kobject_init_and_add(per_cpu(cache_kobject, cpu), | 916 | retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu), |
914 | &ktype_percpu_entry, | 917 | &ktype_percpu_entry, |
915 | &sys_dev->kobj, "%s", "cache"); | 918 | &sys_dev->kobj, "%s", "cache"); |
916 | if (retval < 0) { | 919 | if (retval < 0) { |
@@ -924,12 +927,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
924 | this_object->index = i; | 927 | this_object->index = i; |
925 | retval = kobject_init_and_add(&(this_object->kobj), | 928 | retval = kobject_init_and_add(&(this_object->kobj), |
926 | &ktype_cache, | 929 | &ktype_cache, |
927 | per_cpu(cache_kobject, cpu), | 930 | per_cpu(ici_cache_kobject, cpu), |
928 | "index%1lu", i); | 931 | "index%1lu", i); |
929 | if (unlikely(retval)) { | 932 | if (unlikely(retval)) { |
930 | for (j = 0; j < i; j++) | 933 | for (j = 0; j < i; j++) |
931 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); | 934 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); |
932 | kobject_put(per_cpu(cache_kobject, cpu)); | 935 | kobject_put(per_cpu(ici_cache_kobject, cpu)); |
933 | cpuid4_cache_sysfs_exit(cpu); | 936 | cpuid4_cache_sysfs_exit(cpu); |
934 | return retval; | 937 | return retval; |
935 | } | 938 | } |
@@ -937,7 +940,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
937 | } | 940 | } |
938 | cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); | 941 | cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); |
939 | 942 | ||
940 | kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); | 943 | kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD); |
941 | return 0; | 944 | return 0; |
942 | } | 945 | } |
943 | 946 | ||
@@ -946,7 +949,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
946 | unsigned int cpu = sys_dev->id; | 949 | unsigned int cpu = sys_dev->id; |
947 | unsigned long i; | 950 | unsigned long i; |
948 | 951 | ||
949 | if (per_cpu(cpuid4_info, cpu) == NULL) | 952 | if (per_cpu(ici_cpuid4_info, cpu) == NULL) |
950 | return; | 953 | return; |
951 | if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) | 954 | if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) |
952 | return; | 955 | return; |
@@ -954,7 +957,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
954 | 957 | ||
955 | for (i = 0; i < num_cache_leaves; i++) | 958 | for (i = 0; i < num_cache_leaves; i++) |
956 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); | 959 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); |
957 | kobject_put(per_cpu(cache_kobject, cpu)); | 960 | kobject_put(per_cpu(ici_cache_kobject, cpu)); |
958 | cpuid4_cache_sysfs_exit(cpu); | 961 | cpuid4_cache_sysfs_exit(cpu); |
959 | } | 962 | } |
960 | 963 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 0bcaa3875863..a8aacd4b513c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -1388,13 +1388,14 @@ static void __mcheck_cpu_init_timer(void) | |||
1388 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1388 | struct timer_list *t = &__get_cpu_var(mce_timer); |
1389 | int *n = &__get_cpu_var(mce_next_interval); | 1389 | int *n = &__get_cpu_var(mce_next_interval); |
1390 | 1390 | ||
1391 | setup_timer(t, mce_start_timer, smp_processor_id()); | ||
1392 | |||
1391 | if (mce_ignore_ce) | 1393 | if (mce_ignore_ce) |
1392 | return; | 1394 | return; |
1393 | 1395 | ||
1394 | *n = check_interval * HZ; | 1396 | *n = check_interval * HZ; |
1395 | if (!*n) | 1397 | if (!*n) |
1396 | return; | 1398 | return; |
1397 | setup_timer(t, mce_start_timer, smp_processor_id()); | ||
1398 | t->expires = round_jiffies(jiffies + *n); | 1399 | t->expires = round_jiffies(jiffies + *n); |
1399 | add_timer_on(t, smp_processor_id()); | 1400 | add_timer_on(t, smp_processor_id()); |
1400 | } | 1401 | } |
@@ -1928,7 +1929,7 @@ error2: | |||
1928 | sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[j].attr); | 1929 | sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[j].attr); |
1929 | error: | 1930 | error: |
1930 | while (--i >= 0) | 1931 | while (--i >= 0) |
1931 | sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[i].attr); | 1932 | sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]); |
1932 | 1933 | ||
1933 | sysdev_unregister(&per_cpu(mce_dev, cpu)); | 1934 | sysdev_unregister(&per_cpu(mce_dev, cpu)); |
1934 | 1935 | ||
@@ -2016,9 +2017,11 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
2016 | break; | 2017 | break; |
2017 | case CPU_DOWN_FAILED: | 2018 | case CPU_DOWN_FAILED: |
2018 | case CPU_DOWN_FAILED_FROZEN: | 2019 | case CPU_DOWN_FAILED_FROZEN: |
2019 | t->expires = round_jiffies(jiffies + | 2020 | if (!mce_ignore_ce && check_interval) { |
2021 | t->expires = round_jiffies(jiffies + | ||
2020 | __get_cpu_var(mce_next_interval)); | 2022 | __get_cpu_var(mce_next_interval)); |
2021 | add_timer_on(t, cpu); | 2023 | add_timer_on(t, cpu); |
2024 | } | ||
2022 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); | 2025 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); |
2023 | break; | 2026 | break; |
2024 | case CPU_POST_DEAD: | 2027 | case CPU_POST_DEAD: |
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 73c86db5acbe..09b1698e0466 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c | |||
@@ -170,6 +170,41 @@ static int __init cmp_range(const void *x1, const void *x2) | |||
170 | return start1 - start2; | 170 | return start1 - start2; |
171 | } | 171 | } |
172 | 172 | ||
173 | static int __init clean_sort_range(struct res_range *range, int az) | ||
174 | { | ||
175 | int i, j, k = az - 1, nr_range = 0; | ||
176 | |||
177 | for (i = 0; i < k; i++) { | ||
178 | if (range[i].end) | ||
179 | continue; | ||
180 | for (j = k; j > i; j--) { | ||
181 | if (range[j].end) { | ||
182 | k = j; | ||
183 | break; | ||
184 | } | ||
185 | } | ||
186 | if (j == i) | ||
187 | break; | ||
188 | range[i].start = range[k].start; | ||
189 | range[i].end = range[k].end; | ||
190 | range[k].start = 0; | ||
191 | range[k].end = 0; | ||
192 | k--; | ||
193 | } | ||
194 | /* count it */ | ||
195 | for (i = 0; i < az; i++) { | ||
196 | if (!range[i].end) { | ||
197 | nr_range = i; | ||
198 | break; | ||
199 | } | ||
200 | } | ||
201 | |||
202 | /* sort them */ | ||
203 | sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); | ||
204 | |||
205 | return nr_range; | ||
206 | } | ||
207 | |||
173 | #define BIOS_BUG_MSG KERN_WARNING \ | 208 | #define BIOS_BUG_MSG KERN_WARNING \ |
174 | "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n" | 209 | "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n" |
175 | 210 | ||
@@ -223,22 +258,18 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | |||
223 | subtract_range(range, extra_remove_base, | 258 | subtract_range(range, extra_remove_base, |
224 | extra_remove_base + extra_remove_size - 1); | 259 | extra_remove_base + extra_remove_size - 1); |
225 | 260 | ||
226 | /* get new range num */ | ||
227 | nr_range = 0; | ||
228 | for (i = 0; i < RANGE_NUM; i++) { | ||
229 | if (!range[i].end) | ||
230 | continue; | ||
231 | nr_range++; | ||
232 | } | ||
233 | if (debug_print) { | 261 | if (debug_print) { |
234 | printk(KERN_DEBUG "After UC checking\n"); | 262 | printk(KERN_DEBUG "After UC checking\n"); |
235 | for (i = 0; i < nr_range; i++) | 263 | for (i = 0; i < RANGE_NUM; i++) { |
264 | if (!range[i].end) | ||
265 | continue; | ||
236 | printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n", | 266 | printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n", |
237 | range[i].start, range[i].end + 1); | 267 | range[i].start, range[i].end + 1); |
268 | } | ||
238 | } | 269 | } |
239 | 270 | ||
240 | /* sort the ranges */ | 271 | /* sort the ranges */ |
241 | sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); | 272 | nr_range = clean_sort_range(range, RANGE_NUM); |
242 | if (debug_print) { | 273 | if (debug_print) { |
243 | printk(KERN_DEBUG "After sorting\n"); | 274 | printk(KERN_DEBUG "After sorting\n"); |
244 | for (i = 0; i < nr_range; i++) | 275 | for (i = 0; i < nr_range; i++) |
@@ -689,8 +720,6 @@ static int __init mtrr_need_cleanup(void) | |||
689 | continue; | 720 | continue; |
690 | if (!size) | 721 | if (!size) |
691 | type = MTRR_NUM_TYPES; | 722 | type = MTRR_NUM_TYPES; |
692 | if (type == MTRR_TYPE_WRPROT) | ||
693 | type = MTRR_TYPE_UNCACHABLE; | ||
694 | num[type]++; | 723 | num[type]++; |
695 | } | 724 | } |
696 | 725 | ||
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index c1bbed1021d9..45506d5dd8df 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -1286,7 +1286,7 @@ x86_perf_event_set_period(struct perf_event *event, | |||
1286 | return 0; | 1286 | return 0; |
1287 | 1287 | ||
1288 | /* | 1288 | /* |
1289 | * If we are way outside a reasoable range then just skip forward: | 1289 | * If we are way outside a reasonable range then just skip forward: |
1290 | */ | 1290 | */ |
1291 | if (unlikely(left <= -period)) { | 1291 | if (unlikely(left <= -period)) { |
1292 | left = period; | 1292 | left = period; |
@@ -1632,6 +1632,7 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc) | |||
1632 | 1632 | ||
1633 | data.period = event->hw.last_period; | 1633 | data.period = event->hw.last_period; |
1634 | data.addr = 0; | 1634 | data.addr = 0; |
1635 | data.raw = NULL; | ||
1635 | regs.ip = 0; | 1636 | regs.ip = 0; |
1636 | 1637 | ||
1637 | /* | 1638 | /* |
@@ -1749,6 +1750,7 @@ static int p6_pmu_handle_irq(struct pt_regs *regs) | |||
1749 | u64 val; | 1750 | u64 val; |
1750 | 1751 | ||
1751 | data.addr = 0; | 1752 | data.addr = 0; |
1753 | data.raw = NULL; | ||
1752 | 1754 | ||
1753 | cpuc = &__get_cpu_var(cpu_hw_events); | 1755 | cpuc = &__get_cpu_var(cpu_hw_events); |
1754 | 1756 | ||
@@ -1794,6 +1796,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
1794 | u64 ack, status; | 1796 | u64 ack, status; |
1795 | 1797 | ||
1796 | data.addr = 0; | 1798 | data.addr = 0; |
1799 | data.raw = NULL; | ||
1797 | 1800 | ||
1798 | cpuc = &__get_cpu_var(cpu_hw_events); | 1801 | cpuc = &__get_cpu_var(cpu_hw_events); |
1799 | 1802 | ||
@@ -1857,6 +1860,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) | |||
1857 | u64 val; | 1860 | u64 val; |
1858 | 1861 | ||
1859 | data.addr = 0; | 1862 | data.addr = 0; |
1863 | data.raw = NULL; | ||
1860 | 1864 | ||
1861 | cpuc = &__get_cpu_var(cpu_hw_events); | 1865 | cpuc = &__get_cpu_var(cpu_hw_events); |
1862 | 1866 | ||
@@ -2062,12 +2066,6 @@ static __init int p6_pmu_init(void) | |||
2062 | 2066 | ||
2063 | x86_pmu = p6_pmu; | 2067 | x86_pmu = p6_pmu; |
2064 | 2068 | ||
2065 | if (!cpu_has_apic) { | ||
2066 | pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n"); | ||
2067 | pr_info("no hardware sampling interrupt available.\n"); | ||
2068 | x86_pmu.apic = 0; | ||
2069 | } | ||
2070 | |||
2071 | return 0; | 2069 | return 0; |
2072 | } | 2070 | } |
2073 | 2071 | ||
@@ -2159,6 +2157,16 @@ static __init int amd_pmu_init(void) | |||
2159 | return 0; | 2157 | return 0; |
2160 | } | 2158 | } |
2161 | 2159 | ||
2160 | static void __init pmu_check_apic(void) | ||
2161 | { | ||
2162 | if (cpu_has_apic) | ||
2163 | return; | ||
2164 | |||
2165 | x86_pmu.apic = 0; | ||
2166 | pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n"); | ||
2167 | pr_info("no hardware sampling interrupt available.\n"); | ||
2168 | } | ||
2169 | |||
2162 | void __init init_hw_perf_events(void) | 2170 | void __init init_hw_perf_events(void) |
2163 | { | 2171 | { |
2164 | int err; | 2172 | int err; |
@@ -2180,6 +2188,8 @@ void __init init_hw_perf_events(void) | |||
2180 | return; | 2188 | return; |
2181 | } | 2189 | } |
2182 | 2190 | ||
2191 | pmu_check_apic(); | ||
2192 | |||
2183 | pr_cont("%s PMU driver.\n", x86_pmu.name); | 2193 | pr_cont("%s PMU driver.\n", x86_pmu.name); |
2184 | 2194 | ||
2185 | if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) { | 2195 | if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) { |
@@ -2287,7 +2297,7 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip) | |||
2287 | 2297 | ||
2288 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); | 2298 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); |
2289 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); | 2299 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); |
2290 | static DEFINE_PER_CPU(int, in_nmi_frame); | 2300 | static DEFINE_PER_CPU(int, in_ignored_frame); |
2291 | 2301 | ||
2292 | 2302 | ||
2293 | static void | 2303 | static void |
@@ -2303,8 +2313,9 @@ static void backtrace_warning(void *data, char *msg) | |||
2303 | 2313 | ||
2304 | static int backtrace_stack(void *data, char *name) | 2314 | static int backtrace_stack(void *data, char *name) |
2305 | { | 2315 | { |
2306 | per_cpu(in_nmi_frame, smp_processor_id()) = | 2316 | per_cpu(in_ignored_frame, smp_processor_id()) = |
2307 | x86_is_stack_id(NMI_STACK, name); | 2317 | x86_is_stack_id(NMI_STACK, name) || |
2318 | x86_is_stack_id(DEBUG_STACK, name); | ||
2308 | 2319 | ||
2309 | return 0; | 2320 | return 0; |
2310 | } | 2321 | } |
@@ -2313,7 +2324,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) | |||
2313 | { | 2324 | { |
2314 | struct perf_callchain_entry *entry = data; | 2325 | struct perf_callchain_entry *entry = data; |
2315 | 2326 | ||
2316 | if (per_cpu(in_nmi_frame, smp_processor_id())) | 2327 | if (per_cpu(in_ignored_frame, smp_processor_id())) |
2317 | return; | 2328 | return; |
2318 | 2329 | ||
2319 | if (reliable) | 2330 | if (reliable) |
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index ef42a038f1a6..1c47390dd0e5 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c | |||
@@ -265,13 +265,13 @@ struct ds_context { | |||
265 | int cpu; | 265 | int cpu; |
266 | }; | 266 | }; |
267 | 267 | ||
268 | static DEFINE_PER_CPU(struct ds_context *, cpu_context); | 268 | static DEFINE_PER_CPU(struct ds_context *, cpu_ds_context); |
269 | 269 | ||
270 | 270 | ||
271 | static struct ds_context *ds_get_context(struct task_struct *task, int cpu) | 271 | static struct ds_context *ds_get_context(struct task_struct *task, int cpu) |
272 | { | 272 | { |
273 | struct ds_context **p_context = | 273 | struct ds_context **p_context = |
274 | (task ? &task->thread.ds_ctx : &per_cpu(cpu_context, cpu)); | 274 | (task ? &task->thread.ds_ctx : &per_cpu(cpu_ds_context, cpu)); |
275 | struct ds_context *context = NULL; | 275 | struct ds_context *context = NULL; |
276 | struct ds_context *new_context = NULL; | 276 | struct ds_context *new_context = NULL; |
277 | 277 | ||
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 8e740934bd1f..b13af53883aa 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c | |||
@@ -103,6 +103,35 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | |||
103 | return NULL; | 103 | return NULL; |
104 | } | 104 | } |
105 | 105 | ||
106 | static inline int | ||
107 | in_irq_stack(unsigned long *stack, unsigned long *irq_stack, | ||
108 | unsigned long *irq_stack_end) | ||
109 | { | ||
110 | return (stack >= irq_stack && stack < irq_stack_end); | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * We are returning from the irq stack and go to the previous one. | ||
115 | * If the previous stack is also in the irq stack, then bp in the first | ||
116 | * frame of the irq stack points to the previous, interrupted one. | ||
117 | * Otherwise we have another level of indirection: We first save | ||
118 | * the bp of the previous stack, then we switch the stack to the irq one | ||
119 | * and save a new bp that links to the previous one. | ||
120 | * (See save_args()) | ||
121 | */ | ||
122 | static inline unsigned long | ||
123 | fixup_bp_irq_link(unsigned long bp, unsigned long *stack, | ||
124 | unsigned long *irq_stack, unsigned long *irq_stack_end) | ||
125 | { | ||
126 | #ifdef CONFIG_FRAME_POINTER | ||
127 | struct stack_frame *frame = (struct stack_frame *)bp; | ||
128 | |||
129 | if (!in_irq_stack(stack, irq_stack, irq_stack_end)) | ||
130 | return (unsigned long)frame->next_frame; | ||
131 | #endif | ||
132 | return bp; | ||
133 | } | ||
134 | |||
106 | /* | 135 | /* |
107 | * x86-64 can have up to three kernel stacks: | 136 | * x86-64 can have up to three kernel stacks: |
108 | * process stack | 137 | * process stack |
@@ -175,7 +204,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
175 | irq_stack = irq_stack_end - | 204 | irq_stack = irq_stack_end - |
176 | (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack); | 205 | (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack); |
177 | 206 | ||
178 | if (stack >= irq_stack && stack < irq_stack_end) { | 207 | if (in_irq_stack(stack, irq_stack, irq_stack_end)) { |
179 | if (ops->stack(data, "IRQ") < 0) | 208 | if (ops->stack(data, "IRQ") < 0) |
180 | break; | 209 | break; |
181 | bp = print_context_stack(tinfo, stack, bp, | 210 | bp = print_context_stack(tinfo, stack, bp, |
@@ -186,6 +215,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
186 | * pointer (index -1 to end) in the IRQ stack: | 215 | * pointer (index -1 to end) in the IRQ stack: |
187 | */ | 216 | */ |
188 | stack = (unsigned long *) (irq_stack_end[-1]); | 217 | stack = (unsigned long *) (irq_stack_end[-1]); |
218 | bp = fixup_bp_irq_link(bp, stack, irq_stack, | ||
219 | irq_stack_end); | ||
189 | irq_stack_end = NULL; | 220 | irq_stack_end = NULL; |
190 | ops->stack(data, "EOI"); | 221 | ops->stack(data, "EOI"); |
191 | continue; | 222 | continue; |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 4deb8fc849dd..673f693fb451 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -977,8 +977,8 @@ apicinterrupt UV_BAU_MESSAGE \ | |||
977 | #endif | 977 | #endif |
978 | apicinterrupt LOCAL_TIMER_VECTOR \ | 978 | apicinterrupt LOCAL_TIMER_VECTOR \ |
979 | apic_timer_interrupt smp_apic_timer_interrupt | 979 | apic_timer_interrupt smp_apic_timer_interrupt |
980 | apicinterrupt GENERIC_INTERRUPT_VECTOR \ | 980 | apicinterrupt X86_PLATFORM_IPI_VECTOR \ |
981 | generic_interrupt smp_generic_interrupt | 981 | x86_platform_ipi smp_x86_platform_ipi |
982 | 982 | ||
983 | #ifdef CONFIG_SMP | 983 | #ifdef CONFIG_SMP |
984 | apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \ | 984 | apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \ |
@@ -1076,10 +1076,10 @@ ENTRY(\sym) | |||
1076 | TRACE_IRQS_OFF | 1076 | TRACE_IRQS_OFF |
1077 | movq %rsp,%rdi /* pt_regs pointer */ | 1077 | movq %rsp,%rdi /* pt_regs pointer */ |
1078 | xorl %esi,%esi /* no error code */ | 1078 | xorl %esi,%esi /* no error code */ |
1079 | PER_CPU(init_tss, %rbp) | 1079 | PER_CPU(init_tss, %r12) |
1080 | subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp) | 1080 | subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12) |
1081 | call \do_sym | 1081 | call \do_sym |
1082 | addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp) | 1082 | addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12) |
1083 | jmp paranoid_exit /* %ebx: no swapgs flag */ | 1083 | jmp paranoid_exit /* %ebx: no swapgs flag */ |
1084 | CFI_ENDPROC | 1084 | CFI_ENDPROC |
1085 | END(\sym) | 1085 | END(\sym) |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 5a1b9758fd62..309689245431 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -189,9 +189,26 @@ static void wait_for_nmi(void) | |||
189 | nmi_wait_count++; | 189 | nmi_wait_count++; |
190 | } | 190 | } |
191 | 191 | ||
192 | static inline int | ||
193 | within(unsigned long addr, unsigned long start, unsigned long end) | ||
194 | { | ||
195 | return addr >= start && addr < end; | ||
196 | } | ||
197 | |||
192 | static int | 198 | static int |
193 | do_ftrace_mod_code(unsigned long ip, void *new_code) | 199 | do_ftrace_mod_code(unsigned long ip, void *new_code) |
194 | { | 200 | { |
201 | /* | ||
202 | * On x86_64, kernel text mappings are mapped read-only with | ||
203 | * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead | ||
204 | * of the kernel text mapping to modify the kernel text. | ||
205 | * | ||
206 | * For 32bit kernels, these mappings are same and we can use | ||
207 | * kernel identity mapping to modify code. | ||
208 | */ | ||
209 | if (within(ip, (unsigned long)_text, (unsigned long)_etext)) | ||
210 | ip = (unsigned long)__va(__pa(ip)); | ||
211 | |||
195 | mod_code_ip = (void *)ip; | 212 | mod_code_ip = (void *)ip; |
196 | mod_code_newcode = new_code; | 213 | mod_code_newcode = new_code; |
197 | 214 | ||
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 050c278481b1..7fd318bac59c 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <asm/asm-offsets.h> | 18 | #include <asm/asm-offsets.h> |
19 | #include <asm/setup.h> | 19 | #include <asm/setup.h> |
20 | #include <asm/processor-flags.h> | 20 | #include <asm/processor-flags.h> |
21 | #include <asm/msr-index.h> | ||
22 | #include <asm/cpufeature.h> | ||
21 | #include <asm/percpu.h> | 23 | #include <asm/percpu.h> |
22 | 24 | ||
23 | /* Physical address */ | 25 | /* Physical address */ |
@@ -297,25 +299,27 @@ ENTRY(startup_32_smp) | |||
297 | orl %edx,%eax | 299 | orl %edx,%eax |
298 | movl %eax,%cr4 | 300 | movl %eax,%cr4 |
299 | 301 | ||
300 | btl $5, %eax # check if PAE is enabled | 302 | testb $X86_CR4_PAE, %al # check if PAE is enabled |
301 | jnc 6f | 303 | jz 6f |
302 | 304 | ||
303 | /* Check if extended functions are implemented */ | 305 | /* Check if extended functions are implemented */ |
304 | movl $0x80000000, %eax | 306 | movl $0x80000000, %eax |
305 | cpuid | 307 | cpuid |
306 | cmpl $0x80000000, %eax | 308 | /* Value must be in the range 0x80000001 to 0x8000ffff */ |
307 | jbe 6f | 309 | subl $0x80000001, %eax |
310 | cmpl $(0x8000ffff-0x80000001), %eax | ||
311 | ja 6f | ||
308 | mov $0x80000001, %eax | 312 | mov $0x80000001, %eax |
309 | cpuid | 313 | cpuid |
310 | /* Execute Disable bit supported? */ | 314 | /* Execute Disable bit supported? */ |
311 | btl $20, %edx | 315 | btl $(X86_FEATURE_NX & 31), %edx |
312 | jnc 6f | 316 | jnc 6f |
313 | 317 | ||
314 | /* Setup EFER (Extended Feature Enable Register) */ | 318 | /* Setup EFER (Extended Feature Enable Register) */ |
315 | movl $0xc0000080, %ecx | 319 | movl $MSR_EFER, %ecx |
316 | rdmsr | 320 | rdmsr |
317 | 321 | ||
318 | btsl $11, %eax | 322 | btsl $_EFER_NX, %eax |
319 | /* Make changes effective */ | 323 | /* Make changes effective */ |
320 | wrmsr | 324 | wrmsr |
321 | 325 | ||
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 22db86a37643..2d8b5035371c 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -262,11 +262,11 @@ ENTRY(secondary_startup_64) | |||
262 | .quad x86_64_start_kernel | 262 | .quad x86_64_start_kernel |
263 | ENTRY(initial_gs) | 263 | ENTRY(initial_gs) |
264 | .quad INIT_PER_CPU_VAR(irq_stack_union) | 264 | .quad INIT_PER_CPU_VAR(irq_stack_union) |
265 | __FINITDATA | ||
266 | 265 | ||
267 | ENTRY(stack_start) | 266 | ENTRY(stack_start) |
268 | .quad init_thread_union+THREAD_SIZE-8 | 267 | .quad init_thread_union+THREAD_SIZE-8 |
269 | .word 0 | 268 | .word 0 |
269 | __FINITDATA | ||
270 | 270 | ||
271 | bad_address: | 271 | bad_address: |
272 | jmp bad_address | 272 | jmp bad_address |
@@ -340,6 +340,7 @@ ENTRY(name) | |||
340 | i = i + 1 ; \ | 340 | i = i + 1 ; \ |
341 | .endr | 341 | .endr |
342 | 342 | ||
343 | .data | ||
343 | /* | 344 | /* |
344 | * This default setting generates an ident mapping at address 0x100000 | 345 | * This default setting generates an ident mapping at address 0x100000 |
345 | * and a mapping for the kernel that precisely maps virtual address | 346 | * and a mapping for the kernel that precisely maps virtual address |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index dedc2bddf7a5..ba6e65884603 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -33,6 +33,7 @@ | |||
33 | * HPET address is set in acpi/boot.c, when an ACPI entry exists | 33 | * HPET address is set in acpi/boot.c, when an ACPI entry exists |
34 | */ | 34 | */ |
35 | unsigned long hpet_address; | 35 | unsigned long hpet_address; |
36 | u8 hpet_blockid; /* OS timer block num */ | ||
36 | #ifdef CONFIG_PCI_MSI | 37 | #ifdef CONFIG_PCI_MSI |
37 | static unsigned long hpet_num_timers; | 38 | static unsigned long hpet_num_timers; |
38 | #endif | 39 | #endif |
@@ -47,12 +48,12 @@ struct hpet_dev { | |||
47 | char name[10]; | 48 | char name[10]; |
48 | }; | 49 | }; |
49 | 50 | ||
50 | unsigned long hpet_readl(unsigned long a) | 51 | inline unsigned int hpet_readl(unsigned int a) |
51 | { | 52 | { |
52 | return readl(hpet_virt_address + a); | 53 | return readl(hpet_virt_address + a); |
53 | } | 54 | } |
54 | 55 | ||
55 | static inline void hpet_writel(unsigned long d, unsigned long a) | 56 | static inline void hpet_writel(unsigned int d, unsigned int a) |
56 | { | 57 | { |
57 | writel(d, hpet_virt_address + a); | 58 | writel(d, hpet_virt_address + a); |
58 | } | 59 | } |
@@ -167,7 +168,7 @@ do { \ | |||
167 | 168 | ||
168 | static void hpet_reserve_msi_timers(struct hpet_data *hd); | 169 | static void hpet_reserve_msi_timers(struct hpet_data *hd); |
169 | 170 | ||
170 | static void hpet_reserve_platform_timers(unsigned long id) | 171 | static void hpet_reserve_platform_timers(unsigned int id) |
171 | { | 172 | { |
172 | struct hpet __iomem *hpet = hpet_virt_address; | 173 | struct hpet __iomem *hpet = hpet_virt_address; |
173 | struct hpet_timer __iomem *timer = &hpet->hpet_timers[2]; | 174 | struct hpet_timer __iomem *timer = &hpet->hpet_timers[2]; |
@@ -205,7 +206,7 @@ static void hpet_reserve_platform_timers(unsigned long id) | |||
205 | 206 | ||
206 | } | 207 | } |
207 | #else | 208 | #else |
208 | static void hpet_reserve_platform_timers(unsigned long id) { } | 209 | static void hpet_reserve_platform_timers(unsigned int id) { } |
209 | #endif | 210 | #endif |
210 | 211 | ||
211 | /* | 212 | /* |
@@ -246,7 +247,7 @@ static void hpet_reset_counter(void) | |||
246 | 247 | ||
247 | static void hpet_start_counter(void) | 248 | static void hpet_start_counter(void) |
248 | { | 249 | { |
249 | unsigned long cfg = hpet_readl(HPET_CFG); | 250 | unsigned int cfg = hpet_readl(HPET_CFG); |
250 | cfg |= HPET_CFG_ENABLE; | 251 | cfg |= HPET_CFG_ENABLE; |
251 | hpet_writel(cfg, HPET_CFG); | 252 | hpet_writel(cfg, HPET_CFG); |
252 | } | 253 | } |
@@ -271,7 +272,7 @@ static void hpet_resume_counter(void) | |||
271 | 272 | ||
272 | static void hpet_enable_legacy_int(void) | 273 | static void hpet_enable_legacy_int(void) |
273 | { | 274 | { |
274 | unsigned long cfg = hpet_readl(HPET_CFG); | 275 | unsigned int cfg = hpet_readl(HPET_CFG); |
275 | 276 | ||
276 | cfg |= HPET_CFG_LEGACY; | 277 | cfg |= HPET_CFG_LEGACY; |
277 | hpet_writel(cfg, HPET_CFG); | 278 | hpet_writel(cfg, HPET_CFG); |
@@ -314,7 +315,7 @@ static int hpet_setup_msi_irq(unsigned int irq); | |||
314 | static void hpet_set_mode(enum clock_event_mode mode, | 315 | static void hpet_set_mode(enum clock_event_mode mode, |
315 | struct clock_event_device *evt, int timer) | 316 | struct clock_event_device *evt, int timer) |
316 | { | 317 | { |
317 | unsigned long cfg, cmp, now; | 318 | unsigned int cfg, cmp, now; |
318 | uint64_t delta; | 319 | uint64_t delta; |
319 | 320 | ||
320 | switch (mode) { | 321 | switch (mode) { |
@@ -323,7 +324,7 @@ static void hpet_set_mode(enum clock_event_mode mode, | |||
323 | delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; | 324 | delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; |
324 | delta >>= evt->shift; | 325 | delta >>= evt->shift; |
325 | now = hpet_readl(HPET_COUNTER); | 326 | now = hpet_readl(HPET_COUNTER); |
326 | cmp = now + (unsigned long) delta; | 327 | cmp = now + (unsigned int) delta; |
327 | cfg = hpet_readl(HPET_Tn_CFG(timer)); | 328 | cfg = hpet_readl(HPET_Tn_CFG(timer)); |
328 | /* Make sure we use edge triggered interrupts */ | 329 | /* Make sure we use edge triggered interrupts */ |
329 | cfg &= ~HPET_TN_LEVEL; | 330 | cfg &= ~HPET_TN_LEVEL; |
@@ -339,7 +340,7 @@ static void hpet_set_mode(enum clock_event_mode mode, | |||
339 | * (See AMD-8111 HyperTransport I/O Hub Data Sheet, | 340 | * (See AMD-8111 HyperTransport I/O Hub Data Sheet, |
340 | * Publication # 24674) | 341 | * Publication # 24674) |
341 | */ | 342 | */ |
342 | hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer)); | 343 | hpet_writel((unsigned int) delta, HPET_Tn_CMP(timer)); |
343 | hpet_start_counter(); | 344 | hpet_start_counter(); |
344 | hpet_print_config(); | 345 | hpet_print_config(); |
345 | break; | 346 | break; |
@@ -383,13 +384,24 @@ static int hpet_next_event(unsigned long delta, | |||
383 | hpet_writel(cnt, HPET_Tn_CMP(timer)); | 384 | hpet_writel(cnt, HPET_Tn_CMP(timer)); |
384 | 385 | ||
385 | /* | 386 | /* |
386 | * We need to read back the CMP register to make sure that | 387 | * We need to read back the CMP register on certain HPET |
387 | * what we wrote hit the chip before we compare it to the | 388 | * implementations (ATI chipsets) which seem to delay the |
388 | * counter. | 389 | * transfer of the compare register into the internal compare |
390 | * logic. With small deltas this might actually be too late as | ||
391 | * the counter could already be higher than the compare value | ||
392 | * at that point and we would wait for the next hpet interrupt | ||
393 | * forever. We found out that reading the CMP register back | ||
394 | * forces the transfer so we can rely on the comparison with | ||
395 | * the counter register below. If the read back from the | ||
396 | * compare register does not match the value we programmed | ||
397 | * then we might have a real hardware problem. We can not do | ||
398 | * much about it here, but at least alert the user/admin with | ||
399 | * a prominent warning. | ||
389 | */ | 400 | */ |
390 | WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt); | 401 | WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt, |
402 | KERN_WARNING "hpet: compare register read back failed.\n"); | ||
391 | 403 | ||
392 | return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; | 404 | return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; |
393 | } | 405 | } |
394 | 406 | ||
395 | static void hpet_legacy_set_mode(enum clock_event_mode mode, | 407 | static void hpet_legacy_set_mode(enum clock_event_mode mode, |
@@ -415,7 +427,7 @@ static struct hpet_dev *hpet_devs; | |||
415 | void hpet_msi_unmask(unsigned int irq) | 427 | void hpet_msi_unmask(unsigned int irq) |
416 | { | 428 | { |
417 | struct hpet_dev *hdev = get_irq_data(irq); | 429 | struct hpet_dev *hdev = get_irq_data(irq); |
418 | unsigned long cfg; | 430 | unsigned int cfg; |
419 | 431 | ||
420 | /* unmask it */ | 432 | /* unmask it */ |
421 | cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); | 433 | cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); |
@@ -425,7 +437,7 @@ void hpet_msi_unmask(unsigned int irq) | |||
425 | 437 | ||
426 | void hpet_msi_mask(unsigned int irq) | 438 | void hpet_msi_mask(unsigned int irq) |
427 | { | 439 | { |
428 | unsigned long cfg; | 440 | unsigned int cfg; |
429 | struct hpet_dev *hdev = get_irq_data(irq); | 441 | struct hpet_dev *hdev = get_irq_data(irq); |
430 | 442 | ||
431 | /* mask it */ | 443 | /* mask it */ |
@@ -467,7 +479,7 @@ static int hpet_msi_next_event(unsigned long delta, | |||
467 | 479 | ||
468 | static int hpet_setup_msi_irq(unsigned int irq) | 480 | static int hpet_setup_msi_irq(unsigned int irq) |
469 | { | 481 | { |
470 | if (arch_setup_hpet_msi(irq)) { | 482 | if (arch_setup_hpet_msi(irq, hpet_blockid)) { |
471 | destroy_irq(irq); | 483 | destroy_irq(irq); |
472 | return -EINVAL; | 484 | return -EINVAL; |
473 | } | 485 | } |
@@ -584,6 +596,8 @@ static void hpet_msi_capability_lookup(unsigned int start_timer) | |||
584 | unsigned int num_timers_used = 0; | 596 | unsigned int num_timers_used = 0; |
585 | int i; | 597 | int i; |
586 | 598 | ||
599 | if (boot_cpu_has(X86_FEATURE_ARAT)) | ||
600 | return; | ||
587 | id = hpet_readl(HPET_ID); | 601 | id = hpet_readl(HPET_ID); |
588 | 602 | ||
589 | num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT); | 603 | num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT); |
@@ -598,7 +612,7 @@ static void hpet_msi_capability_lookup(unsigned int start_timer) | |||
598 | 612 | ||
599 | for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) { | 613 | for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) { |
600 | struct hpet_dev *hdev = &hpet_devs[num_timers_used]; | 614 | struct hpet_dev *hdev = &hpet_devs[num_timers_used]; |
601 | unsigned long cfg = hpet_readl(HPET_Tn_CFG(i)); | 615 | unsigned int cfg = hpet_readl(HPET_Tn_CFG(i)); |
602 | 616 | ||
603 | /* Only consider HPET timer with MSI support */ | 617 | /* Only consider HPET timer with MSI support */ |
604 | if (!(cfg & HPET_TN_FSB_CAP)) | 618 | if (!(cfg & HPET_TN_FSB_CAP)) |
@@ -813,7 +827,7 @@ static int hpet_clocksource_register(void) | |||
813 | */ | 827 | */ |
814 | int __init hpet_enable(void) | 828 | int __init hpet_enable(void) |
815 | { | 829 | { |
816 | unsigned long id; | 830 | unsigned int id; |
817 | int i; | 831 | int i; |
818 | 832 | ||
819 | if (!is_hpet_capable()) | 833 | if (!is_hpet_capable()) |
@@ -872,10 +886,8 @@ int __init hpet_enable(void) | |||
872 | 886 | ||
873 | if (id & HPET_ID_LEGSUP) { | 887 | if (id & HPET_ID_LEGSUP) { |
874 | hpet_legacy_clockevent_register(); | 888 | hpet_legacy_clockevent_register(); |
875 | hpet_msi_capability_lookup(2); | ||
876 | return 1; | 889 | return 1; |
877 | } | 890 | } |
878 | hpet_msi_capability_lookup(0); | ||
879 | return 0; | 891 | return 0; |
880 | 892 | ||
881 | out_nohpet: | 893 | out_nohpet: |
@@ -908,9 +920,17 @@ static __init int hpet_late_init(void) | |||
908 | if (!hpet_virt_address) | 920 | if (!hpet_virt_address) |
909 | return -ENODEV; | 921 | return -ENODEV; |
910 | 922 | ||
923 | if (hpet_readl(HPET_ID) & HPET_ID_LEGSUP) | ||
924 | hpet_msi_capability_lookup(2); | ||
925 | else | ||
926 | hpet_msi_capability_lookup(0); | ||
927 | |||
911 | hpet_reserve_platform_timers(hpet_readl(HPET_ID)); | 928 | hpet_reserve_platform_timers(hpet_readl(HPET_ID)); |
912 | hpet_print_config(); | 929 | hpet_print_config(); |
913 | 930 | ||
931 | if (boot_cpu_has(X86_FEATURE_ARAT)) | ||
932 | return 0; | ||
933 | |||
914 | for_each_online_cpu(cpu) { | 934 | for_each_online_cpu(cpu) { |
915 | hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu); | 935 | hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu); |
916 | } | 936 | } |
@@ -925,7 +945,7 @@ fs_initcall(hpet_late_init); | |||
925 | void hpet_disable(void) | 945 | void hpet_disable(void) |
926 | { | 946 | { |
927 | if (is_hpet_capable()) { | 947 | if (is_hpet_capable()) { |
928 | unsigned long cfg = hpet_readl(HPET_CFG); | 948 | unsigned int cfg = hpet_readl(HPET_CFG); |
929 | 949 | ||
930 | if (hpet_legacy_int_enabled) { | 950 | if (hpet_legacy_int_enabled) { |
931 | cfg &= ~HPET_CFG_LEGACY; | 951 | cfg &= ~HPET_CFG_LEGACY; |
@@ -965,8 +985,8 @@ static int hpet_prev_update_sec; | |||
965 | static struct rtc_time hpet_alarm_time; | 985 | static struct rtc_time hpet_alarm_time; |
966 | static unsigned long hpet_pie_count; | 986 | static unsigned long hpet_pie_count; |
967 | static u32 hpet_t1_cmp; | 987 | static u32 hpet_t1_cmp; |
968 | static unsigned long hpet_default_delta; | 988 | static u32 hpet_default_delta; |
969 | static unsigned long hpet_pie_delta; | 989 | static u32 hpet_pie_delta; |
970 | static unsigned long hpet_pie_limit; | 990 | static unsigned long hpet_pie_limit; |
971 | 991 | ||
972 | static rtc_irq_handler irq_handler; | 992 | static rtc_irq_handler irq_handler; |
@@ -1017,7 +1037,8 @@ EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler); | |||
1017 | */ | 1037 | */ |
1018 | int hpet_rtc_timer_init(void) | 1038 | int hpet_rtc_timer_init(void) |
1019 | { | 1039 | { |
1020 | unsigned long cfg, cnt, delta, flags; | 1040 | unsigned int cfg, cnt, delta; |
1041 | unsigned long flags; | ||
1021 | 1042 | ||
1022 | if (!is_hpet_enabled()) | 1043 | if (!is_hpet_enabled()) |
1023 | return 0; | 1044 | return 0; |
@@ -1027,7 +1048,7 @@ int hpet_rtc_timer_init(void) | |||
1027 | 1048 | ||
1028 | clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC; | 1049 | clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC; |
1029 | clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT; | 1050 | clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT; |
1030 | hpet_default_delta = (unsigned long) clc; | 1051 | hpet_default_delta = clc; |
1031 | } | 1052 | } |
1032 | 1053 | ||
1033 | if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) | 1054 | if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) |
@@ -1113,7 +1134,7 @@ int hpet_set_periodic_freq(unsigned long freq) | |||
1113 | clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC; | 1134 | clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC; |
1114 | do_div(clc, freq); | 1135 | do_div(clc, freq); |
1115 | clc >>= hpet_clockevent.shift; | 1136 | clc >>= hpet_clockevent.shift; |
1116 | hpet_pie_delta = (unsigned long) clc; | 1137 | hpet_pie_delta = clc; |
1117 | } | 1138 | } |
1118 | return 1; | 1139 | return 1; |
1119 | } | 1140 | } |
@@ -1127,7 +1148,7 @@ EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq); | |||
1127 | 1148 | ||
1128 | static void hpet_rtc_timer_reinit(void) | 1149 | static void hpet_rtc_timer_reinit(void) |
1129 | { | 1150 | { |
1130 | unsigned long cfg, delta; | 1151 | unsigned int cfg, delta; |
1131 | int lost_ints = -1; | 1152 | int lost_ints = -1; |
1132 | 1153 | ||
1133 | if (unlikely(!hpet_rtc_flags)) { | 1154 | if (unlikely(!hpet_rtc_flags)) { |
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index d42f65ac4927..05d5fec64a94 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -362,8 +362,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp, | |||
362 | return ret; | 362 | return ret; |
363 | } | 363 | } |
364 | 364 | ||
365 | if (bp->callback) | 365 | ret = arch_store_info(bp); |
366 | ret = arch_store_info(bp); | ||
367 | 366 | ||
368 | if (ret < 0) | 367 | if (ret < 0) |
369 | return ret; | 368 | return ret; |
@@ -519,7 +518,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) | |||
519 | break; | 518 | break; |
520 | } | 519 | } |
521 | 520 | ||
522 | (bp->callback)(bp, args->regs); | 521 | perf_bp_event(bp, args->regs); |
523 | 522 | ||
524 | rcu_read_unlock(); | 523 | rcu_read_unlock(); |
525 | } | 524 | } |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index fee6cc2b2079..664bcb7384ac 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -18,7 +18,7 @@ | |||
18 | atomic_t irq_err_count; | 18 | atomic_t irq_err_count; |
19 | 19 | ||
20 | /* Function pointer for generic interrupt vector handling */ | 20 | /* Function pointer for generic interrupt vector handling */ |
21 | void (*generic_interrupt_extension)(void) = NULL; | 21 | void (*x86_platform_ipi_callback)(void) = NULL; |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * 'what should we do if we get a hw irq event on an illegal vector'. | 24 | * 'what should we do if we get a hw irq event on an illegal vector'. |
@@ -72,10 +72,10 @@ static int show_other_interrupts(struct seq_file *p, int prec) | |||
72 | seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs); | 72 | seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs); |
73 | seq_printf(p, " Performance pending work\n"); | 73 | seq_printf(p, " Performance pending work\n"); |
74 | #endif | 74 | #endif |
75 | if (generic_interrupt_extension) { | 75 | if (x86_platform_ipi_callback) { |
76 | seq_printf(p, "%*s: ", prec, "PLT"); | 76 | seq_printf(p, "%*s: ", prec, "PLT"); |
77 | for_each_online_cpu(j) | 77 | for_each_online_cpu(j) |
78 | seq_printf(p, "%10u ", irq_stats(j)->generic_irqs); | 78 | seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis); |
79 | seq_printf(p, " Platform interrupts\n"); | 79 | seq_printf(p, " Platform interrupts\n"); |
80 | } | 80 | } |
81 | #ifdef CONFIG_SMP | 81 | #ifdef CONFIG_SMP |
@@ -187,8 +187,8 @@ u64 arch_irq_stat_cpu(unsigned int cpu) | |||
187 | sum += irq_stats(cpu)->apic_perf_irqs; | 187 | sum += irq_stats(cpu)->apic_perf_irqs; |
188 | sum += irq_stats(cpu)->apic_pending_irqs; | 188 | sum += irq_stats(cpu)->apic_pending_irqs; |
189 | #endif | 189 | #endif |
190 | if (generic_interrupt_extension) | 190 | if (x86_platform_ipi_callback) |
191 | sum += irq_stats(cpu)->generic_irqs; | 191 | sum += irq_stats(cpu)->x86_platform_ipis; |
192 | #ifdef CONFIG_SMP | 192 | #ifdef CONFIG_SMP |
193 | sum += irq_stats(cpu)->irq_resched_count; | 193 | sum += irq_stats(cpu)->irq_resched_count; |
194 | sum += irq_stats(cpu)->irq_call_count; | 194 | sum += irq_stats(cpu)->irq_call_count; |
@@ -251,9 +251,9 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) | |||
251 | } | 251 | } |
252 | 252 | ||
253 | /* | 253 | /* |
254 | * Handler for GENERIC_INTERRUPT_VECTOR. | 254 | * Handler for X86_PLATFORM_IPI_VECTOR. |
255 | */ | 255 | */ |
256 | void smp_generic_interrupt(struct pt_regs *regs) | 256 | void smp_x86_platform_ipi(struct pt_regs *regs) |
257 | { | 257 | { |
258 | struct pt_regs *old_regs = set_irq_regs(regs); | 258 | struct pt_regs *old_regs = set_irq_regs(regs); |
259 | 259 | ||
@@ -263,10 +263,10 @@ void smp_generic_interrupt(struct pt_regs *regs) | |||
263 | 263 | ||
264 | irq_enter(); | 264 | irq_enter(); |
265 | 265 | ||
266 | inc_irq_stat(generic_irqs); | 266 | inc_irq_stat(x86_platform_ipis); |
267 | 267 | ||
268 | if (generic_interrupt_extension) | 268 | if (x86_platform_ipi_callback) |
269 | generic_interrupt_extension(); | 269 | x86_platform_ipi_callback(); |
270 | 270 | ||
271 | irq_exit(); | 271 | irq_exit(); |
272 | 272 | ||
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 40f30773fb29..d5932226614f 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -200,8 +200,8 @@ static void __init apic_intr_init(void) | |||
200 | /* self generated IPI for local APIC timer */ | 200 | /* self generated IPI for local APIC timer */ |
201 | alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); | 201 | alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); |
202 | 202 | ||
203 | /* generic IPI for platform specific use */ | 203 | /* IPI for X86 platform specific use */ |
204 | alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt); | 204 | alloc_intr_gate(X86_PLATFORM_IPI_VECTOR, x86_platform_ipi); |
205 | 205 | ||
206 | /* IPI vectors for APIC spurious and error interrupts */ | 206 | /* IPI vectors for APIC spurious and error interrupts */ |
207 | alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); | 207 | alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); |
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 20a5b3689463..dd74fe7273b1 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -86,9 +86,15 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
86 | gdb_regs[GDB_DS] = regs->ds; | 86 | gdb_regs[GDB_DS] = regs->ds; |
87 | gdb_regs[GDB_ES] = regs->es; | 87 | gdb_regs[GDB_ES] = regs->es; |
88 | gdb_regs[GDB_CS] = regs->cs; | 88 | gdb_regs[GDB_CS] = regs->cs; |
89 | gdb_regs[GDB_SS] = __KERNEL_DS; | ||
90 | gdb_regs[GDB_FS] = 0xFFFF; | 89 | gdb_regs[GDB_FS] = 0xFFFF; |
91 | gdb_regs[GDB_GS] = 0xFFFF; | 90 | gdb_regs[GDB_GS] = 0xFFFF; |
91 | if (user_mode_vm(regs)) { | ||
92 | gdb_regs[GDB_SS] = regs->ss; | ||
93 | gdb_regs[GDB_SP] = regs->sp; | ||
94 | } else { | ||
95 | gdb_regs[GDB_SS] = __KERNEL_DS; | ||
96 | gdb_regs[GDB_SP] = kernel_stack_pointer(regs); | ||
97 | } | ||
92 | #else | 98 | #else |
93 | gdb_regs[GDB_R8] = regs->r8; | 99 | gdb_regs[GDB_R8] = regs->r8; |
94 | gdb_regs[GDB_R9] = regs->r9; | 100 | gdb_regs[GDB_R9] = regs->r9; |
@@ -101,8 +107,8 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
101 | gdb_regs32[GDB_PS] = regs->flags; | 107 | gdb_regs32[GDB_PS] = regs->flags; |
102 | gdb_regs32[GDB_CS] = regs->cs; | 108 | gdb_regs32[GDB_CS] = regs->cs; |
103 | gdb_regs32[GDB_SS] = regs->ss; | 109 | gdb_regs32[GDB_SS] = regs->ss; |
104 | #endif | ||
105 | gdb_regs[GDB_SP] = kernel_stack_pointer(regs); | 110 | gdb_regs[GDB_SP] = kernel_stack_pointer(regs); |
111 | #endif | ||
106 | } | 112 | } |
107 | 113 | ||
108 | /** | 114 | /** |
@@ -220,8 +226,7 @@ static void kgdb_correct_hw_break(void) | |||
220 | dr7 |= ((breakinfo[breakno].len << 2) | | 226 | dr7 |= ((breakinfo[breakno].len << 2) | |
221 | breakinfo[breakno].type) << | 227 | breakinfo[breakno].type) << |
222 | ((breakno << 2) + 16); | 228 | ((breakno << 2) + 16); |
223 | if (breakno >= 0 && breakno <= 3) | 229 | set_debugreg(breakinfo[breakno].addr, breakno); |
224 | set_debugreg(breakinfo[breakno].addr, breakno); | ||
225 | 230 | ||
226 | } else { | 231 | } else { |
227 | if ((dr7 & breakbit) && !breakinfo[breakno].enabled) { | 232 | if ((dr7 & breakbit) && !breakinfo[breakno].enabled) { |
@@ -395,7 +400,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, | |||
395 | /* set the trace bit if we're stepping */ | 400 | /* set the trace bit if we're stepping */ |
396 | if (remcomInBuffer[0] == 's') { | 401 | if (remcomInBuffer[0] == 's') { |
397 | linux_regs->flags |= X86_EFLAGS_TF; | 402 | linux_regs->flags |= X86_EFLAGS_TF; |
398 | kgdb_single_step = 1; | ||
399 | atomic_set(&kgdb_cpu_doing_single_step, | 403 | atomic_set(&kgdb_cpu_doing_single_step, |
400 | raw_smp_processor_id()); | 404 | raw_smp_processor_id()); |
401 | } | 405 | } |
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 1f3186ce213c..5b8c7505b3bc 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -481,7 +481,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, | |||
481 | 481 | ||
482 | /* | 482 | /* |
483 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they | 483 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they |
484 | * remain disabled thorough out this function. | 484 | * remain disabled throughout this function. |
485 | */ | 485 | */ |
486 | static int __kprobes kprobe_handler(struct pt_regs *regs) | 486 | static int __kprobes kprobe_handler(struct pt_regs *regs) |
487 | { | 487 | { |
@@ -818,7 +818,7 @@ no_change: | |||
818 | 818 | ||
819 | /* | 819 | /* |
820 | * Interrupts are disabled on entry as trap1 is an interrupt gate and they | 820 | * Interrupts are disabled on entry as trap1 is an interrupt gate and they |
821 | * remain disabled thoroughout this function. | 821 | * remain disabled throughout this function. |
822 | */ | 822 | */ |
823 | static int __kprobes post_kprobe_handler(struct pt_regs *regs) | 823 | static int __kprobes post_kprobe_handler(struct pt_regs *regs) |
824 | { | 824 | { |
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index c843f8406da2..a3fa43ba5d3b 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c | |||
@@ -158,8 +158,7 @@ int machine_kexec_prepare(struct kimage *image) | |||
158 | { | 158 | { |
159 | int error; | 159 | int error; |
160 | 160 | ||
161 | if (nx_enabled) | 161 | set_pages_x(image->control_code_page, 1); |
162 | set_pages_x(image->control_code_page, 1); | ||
163 | error = machine_kexec_alloc_page_tables(image); | 162 | error = machine_kexec_alloc_page_tables(image); |
164 | if (error) | 163 | if (error) |
165 | return error; | 164 | return error; |
@@ -173,8 +172,7 @@ int machine_kexec_prepare(struct kimage *image) | |||
173 | */ | 172 | */ |
174 | void machine_kexec_cleanup(struct kimage *image) | 173 | void machine_kexec_cleanup(struct kimage *image) |
175 | { | 174 | { |
176 | if (nx_enabled) | 175 | set_pages_nx(image->control_code_page, 1); |
177 | set_pages_nx(image->control_code_page, 1); | ||
178 | machine_kexec_free_page_tables(image); | 176 | machine_kexec_free_page_tables(image); |
179 | } | 177 | } |
180 | 178 | ||
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index f4c538b681ca..37542b67c57e 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -13,6 +13,9 @@ | |||
13 | * Licensed under the terms of the GNU General Public | 13 | * Licensed under the terms of the GNU General Public |
14 | * License version 2. See file COPYING for details. | 14 | * License version 2. See file COPYING for details. |
15 | */ | 15 | */ |
16 | |||
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
18 | |||
16 | #include <linux/firmware.h> | 19 | #include <linux/firmware.h> |
17 | #include <linux/pci_ids.h> | 20 | #include <linux/pci_ids.h> |
18 | #include <linux/uaccess.h> | 21 | #include <linux/uaccess.h> |
@@ -33,6 +36,9 @@ MODULE_LICENSE("GPL v2"); | |||
33 | #define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000 | 36 | #define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000 |
34 | #define UCODE_UCODE_TYPE 0x00000001 | 37 | #define UCODE_UCODE_TYPE 0x00000001 |
35 | 38 | ||
39 | const struct firmware *firmware; | ||
40 | static int supported_cpu; | ||
41 | |||
36 | struct equiv_cpu_entry { | 42 | struct equiv_cpu_entry { |
37 | u32 installed_cpu; | 43 | u32 installed_cpu; |
38 | u32 fixed_errata_mask; | 44 | u32 fixed_errata_mask; |
@@ -71,17 +77,14 @@ static struct equiv_cpu_entry *equiv_cpu_table; | |||
71 | 77 | ||
72 | static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) | 78 | static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) |
73 | { | 79 | { |
74 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
75 | u32 dummy; | 80 | u32 dummy; |
76 | 81 | ||
77 | memset(csig, 0, sizeof(*csig)); | 82 | if (!supported_cpu) |
78 | if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { | ||
79 | printk(KERN_WARNING "microcode: CPU%d: AMD CPU family 0x%x not " | ||
80 | "supported\n", cpu, c->x86); | ||
81 | return -1; | 83 | return -1; |
82 | } | 84 | |
85 | memset(csig, 0, sizeof(*csig)); | ||
83 | rdmsr(MSR_AMD64_PATCH_LEVEL, csig->rev, dummy); | 86 | rdmsr(MSR_AMD64_PATCH_LEVEL, csig->rev, dummy); |
84 | printk(KERN_INFO "microcode: CPU%d: patch_level=0x%x\n", cpu, csig->rev); | 87 | pr_info("CPU%d: patch_level=0x%x\n", cpu, csig->rev); |
85 | return 0; | 88 | return 0; |
86 | } | 89 | } |
87 | 90 | ||
@@ -103,23 +106,16 @@ static int get_matching_microcode(int cpu, void *mc, int rev) | |||
103 | i++; | 106 | i++; |
104 | } | 107 | } |
105 | 108 | ||
106 | if (!equiv_cpu_id) { | 109 | if (!equiv_cpu_id) |
107 | printk(KERN_WARNING "microcode: CPU%d: cpu revision " | ||
108 | "not listed in equivalent cpu table\n", cpu); | ||
109 | return 0; | 110 | return 0; |
110 | } | ||
111 | 111 | ||
112 | if (mc_header->processor_rev_id != equiv_cpu_id) { | 112 | if (mc_header->processor_rev_id != equiv_cpu_id) |
113 | printk(KERN_ERR "microcode: CPU%d: patch mismatch " | ||
114 | "(processor_rev_id: %x, equiv_cpu_id: %x)\n", | ||
115 | cpu, mc_header->processor_rev_id, equiv_cpu_id); | ||
116 | return 0; | 113 | return 0; |
117 | } | ||
118 | 114 | ||
119 | /* ucode might be chipset specific -- currently we don't support this */ | 115 | /* ucode might be chipset specific -- currently we don't support this */ |
120 | if (mc_header->nb_dev_id || mc_header->sb_dev_id) { | 116 | if (mc_header->nb_dev_id || mc_header->sb_dev_id) { |
121 | printk(KERN_ERR "microcode: CPU%d: loading of chipset " | 117 | pr_err("CPU%d: loading of chipset specific code not yet supported\n", |
122 | "specific code not yet supported\n", cpu); | 118 | cpu); |
123 | return 0; | 119 | return 0; |
124 | } | 120 | } |
125 | 121 | ||
@@ -148,14 +144,12 @@ static int apply_microcode_amd(int cpu) | |||
148 | 144 | ||
149 | /* check current patch id and patch's id for match */ | 145 | /* check current patch id and patch's id for match */ |
150 | if (rev != mc_amd->hdr.patch_id) { | 146 | if (rev != mc_amd->hdr.patch_id) { |
151 | printk(KERN_ERR "microcode: CPU%d: update failed " | 147 | pr_err("CPU%d: update failed (for patch_level=0x%x)\n", |
152 | "(for patch_level=0x%x)\n", cpu, mc_amd->hdr.patch_id); | 148 | cpu, mc_amd->hdr.patch_id); |
153 | return -1; | 149 | return -1; |
154 | } | 150 | } |
155 | 151 | ||
156 | printk(KERN_INFO "microcode: CPU%d: updated (new patch_level=0x%x)\n", | 152 | pr_info("CPU%d: updated (new patch_level=0x%x)\n", cpu, rev); |
157 | cpu, rev); | ||
158 | |||
159 | uci->cpu_sig.rev = rev; | 153 | uci->cpu_sig.rev = rev; |
160 | 154 | ||
161 | return 0; | 155 | return 0; |
@@ -178,18 +172,14 @@ get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size) | |||
178 | return NULL; | 172 | return NULL; |
179 | 173 | ||
180 | if (section_hdr[0] != UCODE_UCODE_TYPE) { | 174 | if (section_hdr[0] != UCODE_UCODE_TYPE) { |
181 | printk(KERN_ERR "microcode: error: invalid type field in " | 175 | pr_err("error: invalid type field in container file section header\n"); |
182 | "container file section header\n"); | ||
183 | return NULL; | 176 | return NULL; |
184 | } | 177 | } |
185 | 178 | ||
186 | total_size = (unsigned long) (section_hdr[4] + (section_hdr[5] << 8)); | 179 | total_size = (unsigned long) (section_hdr[4] + (section_hdr[5] << 8)); |
187 | 180 | ||
188 | printk(KERN_DEBUG "microcode: size %u, total_size %u\n", | ||
189 | size, total_size); | ||
190 | |||
191 | if (total_size > size || total_size > UCODE_MAX_SIZE) { | 181 | if (total_size > size || total_size > UCODE_MAX_SIZE) { |
192 | printk(KERN_ERR "microcode: error: size mismatch\n"); | 182 | pr_err("error: size mismatch\n"); |
193 | return NULL; | 183 | return NULL; |
194 | } | 184 | } |
195 | 185 | ||
@@ -218,15 +208,13 @@ static int install_equiv_cpu_table(const u8 *buf) | |||
218 | size = buf_pos[2]; | 208 | size = buf_pos[2]; |
219 | 209 | ||
220 | if (buf_pos[1] != UCODE_EQUIV_CPU_TABLE_TYPE || !size) { | 210 | if (buf_pos[1] != UCODE_EQUIV_CPU_TABLE_TYPE || !size) { |
221 | printk(KERN_ERR "microcode: error: invalid type field in " | 211 | pr_err("error: invalid type field in container file section header\n"); |
222 | "container file section header\n"); | ||
223 | return 0; | 212 | return 0; |
224 | } | 213 | } |
225 | 214 | ||
226 | equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size); | 215 | equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size); |
227 | if (!equiv_cpu_table) { | 216 | if (!equiv_cpu_table) { |
228 | printk(KERN_ERR "microcode: failed to allocate " | 217 | pr_err("failed to allocate equivalent CPU table\n"); |
229 | "equivalent CPU table\n"); | ||
230 | return 0; | 218 | return 0; |
231 | } | 219 | } |
232 | 220 | ||
@@ -259,8 +247,7 @@ generic_load_microcode(int cpu, const u8 *data, size_t size) | |||
259 | 247 | ||
260 | offset = install_equiv_cpu_table(ucode_ptr); | 248 | offset = install_equiv_cpu_table(ucode_ptr); |
261 | if (!offset) { | 249 | if (!offset) { |
262 | printk(KERN_ERR "microcode: failed to create " | 250 | pr_err("failed to create equivalent cpu table\n"); |
263 | "equivalent cpu table\n"); | ||
264 | return UCODE_ERROR; | 251 | return UCODE_ERROR; |
265 | } | 252 | } |
266 | 253 | ||
@@ -291,8 +278,7 @@ generic_load_microcode(int cpu, const u8 *data, size_t size) | |||
291 | if (!leftover) { | 278 | if (!leftover) { |
292 | vfree(uci->mc); | 279 | vfree(uci->mc); |
293 | uci->mc = new_mc; | 280 | uci->mc = new_mc; |
294 | pr_debug("microcode: CPU%d found a matching microcode " | 281 | pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", |
295 | "update with version 0x%x (current=0x%x)\n", | ||
296 | cpu, new_rev, uci->cpu_sig.rev); | 282 | cpu, new_rev, uci->cpu_sig.rev); |
297 | } else { | 283 | } else { |
298 | vfree(new_mc); | 284 | vfree(new_mc); |
@@ -308,33 +294,26 @@ generic_load_microcode(int cpu, const u8 *data, size_t size) | |||
308 | 294 | ||
309 | static enum ucode_state request_microcode_fw(int cpu, struct device *device) | 295 | static enum ucode_state request_microcode_fw(int cpu, struct device *device) |
310 | { | 296 | { |
311 | const char *fw_name = "amd-ucode/microcode_amd.bin"; | ||
312 | const struct firmware *firmware; | ||
313 | enum ucode_state ret; | 297 | enum ucode_state ret; |
314 | 298 | ||
315 | if (request_firmware(&firmware, fw_name, device)) { | 299 | if (firmware == NULL) |
316 | printk(KERN_ERR "microcode: failed to load file %s\n", fw_name); | ||
317 | return UCODE_NFOUND; | 300 | return UCODE_NFOUND; |
318 | } | ||
319 | 301 | ||
320 | if (*(u32 *)firmware->data != UCODE_MAGIC) { | 302 | if (*(u32 *)firmware->data != UCODE_MAGIC) { |
321 | printk(KERN_ERR "microcode: invalid UCODE_MAGIC (0x%08x)\n", | 303 | pr_err("invalid UCODE_MAGIC (0x%08x)\n", |
322 | *(u32 *)firmware->data); | 304 | *(u32 *)firmware->data); |
323 | return UCODE_ERROR; | 305 | return UCODE_ERROR; |
324 | } | 306 | } |
325 | 307 | ||
326 | ret = generic_load_microcode(cpu, firmware->data, firmware->size); | 308 | ret = generic_load_microcode(cpu, firmware->data, firmware->size); |
327 | 309 | ||
328 | release_firmware(firmware); | ||
329 | |||
330 | return ret; | 310 | return ret; |
331 | } | 311 | } |
332 | 312 | ||
333 | static enum ucode_state | 313 | static enum ucode_state |
334 | request_microcode_user(int cpu, const void __user *buf, size_t size) | 314 | request_microcode_user(int cpu, const void __user *buf, size_t size) |
335 | { | 315 | { |
336 | printk(KERN_INFO "microcode: AMD microcode update via " | 316 | pr_info("AMD microcode update via /dev/cpu/microcode not supported\n"); |
337 | "/dev/cpu/microcode not supported\n"); | ||
338 | return UCODE_ERROR; | 317 | return UCODE_ERROR; |
339 | } | 318 | } |
340 | 319 | ||
@@ -346,7 +325,31 @@ static void microcode_fini_cpu_amd(int cpu) | |||
346 | uci->mc = NULL; | 325 | uci->mc = NULL; |
347 | } | 326 | } |
348 | 327 | ||
328 | void init_microcode_amd(struct device *device) | ||
329 | { | ||
330 | const char *fw_name = "amd-ucode/microcode_amd.bin"; | ||
331 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
332 | |||
333 | WARN_ON(c->x86_vendor != X86_VENDOR_AMD); | ||
334 | |||
335 | if (c->x86 < 0x10) { | ||
336 | pr_warning("AMD CPU family 0x%x not supported\n", c->x86); | ||
337 | return; | ||
338 | } | ||
339 | supported_cpu = 1; | ||
340 | |||
341 | if (request_firmware(&firmware, fw_name, device)) | ||
342 | pr_err("failed to load file %s\n", fw_name); | ||
343 | } | ||
344 | |||
345 | void fini_microcode_amd(void) | ||
346 | { | ||
347 | release_firmware(firmware); | ||
348 | } | ||
349 | |||
349 | static struct microcode_ops microcode_amd_ops = { | 350 | static struct microcode_ops microcode_amd_ops = { |
351 | .init = init_microcode_amd, | ||
352 | .fini = fini_microcode_amd, | ||
350 | .request_microcode_user = request_microcode_user, | 353 | .request_microcode_user = request_microcode_user, |
351 | .request_microcode_fw = request_microcode_fw, | 354 | .request_microcode_fw = request_microcode_fw, |
352 | .collect_cpu_info = collect_cpu_info_amd, | 355 | .collect_cpu_info = collect_cpu_info_amd, |
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 2bcad3926edb..844c02c65fcb 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
@@ -70,6 +70,9 @@ | |||
70 | * Fix sigmatch() macro to handle old CPUs with pf == 0. | 70 | * Fix sigmatch() macro to handle old CPUs with pf == 0. |
71 | * Thanks to Stuart Swales for pointing out this bug. | 71 | * Thanks to Stuart Swales for pointing out this bug. |
72 | */ | 72 | */ |
73 | |||
74 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
75 | |||
73 | #include <linux/platform_device.h> | 76 | #include <linux/platform_device.h> |
74 | #include <linux/miscdevice.h> | 77 | #include <linux/miscdevice.h> |
75 | #include <linux/capability.h> | 78 | #include <linux/capability.h> |
@@ -209,7 +212,7 @@ static ssize_t microcode_write(struct file *file, const char __user *buf, | |||
209 | ssize_t ret = -EINVAL; | 212 | ssize_t ret = -EINVAL; |
210 | 213 | ||
211 | if ((len >> PAGE_SHIFT) > totalram_pages) { | 214 | if ((len >> PAGE_SHIFT) > totalram_pages) { |
212 | pr_err("microcode: too much data (max %ld pages)\n", totalram_pages); | 215 | pr_err("too much data (max %ld pages)\n", totalram_pages); |
213 | return ret; | 216 | return ret; |
214 | } | 217 | } |
215 | 218 | ||
@@ -244,7 +247,7 @@ static int __init microcode_dev_init(void) | |||
244 | 247 | ||
245 | error = misc_register(µcode_dev); | 248 | error = misc_register(µcode_dev); |
246 | if (error) { | 249 | if (error) { |
247 | pr_err("microcode: can't misc_register on minor=%d\n", MICROCODE_MINOR); | 250 | pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR); |
248 | return error; | 251 | return error; |
249 | } | 252 | } |
250 | 253 | ||
@@ -359,7 +362,7 @@ static enum ucode_state microcode_resume_cpu(int cpu) | |||
359 | if (!uci->mc) | 362 | if (!uci->mc) |
360 | return UCODE_NFOUND; | 363 | return UCODE_NFOUND; |
361 | 364 | ||
362 | pr_debug("microcode: CPU%d updated upon resume\n", cpu); | 365 | pr_debug("CPU%d updated upon resume\n", cpu); |
363 | apply_microcode_on_target(cpu); | 366 | apply_microcode_on_target(cpu); |
364 | 367 | ||
365 | return UCODE_OK; | 368 | return UCODE_OK; |
@@ -379,7 +382,7 @@ static enum ucode_state microcode_init_cpu(int cpu) | |||
379 | ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev); | 382 | ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev); |
380 | 383 | ||
381 | if (ustate == UCODE_OK) { | 384 | if (ustate == UCODE_OK) { |
382 | pr_debug("microcode: CPU%d updated upon init\n", cpu); | 385 | pr_debug("CPU%d updated upon init\n", cpu); |
383 | apply_microcode_on_target(cpu); | 386 | apply_microcode_on_target(cpu); |
384 | } | 387 | } |
385 | 388 | ||
@@ -391,7 +394,7 @@ static enum ucode_state microcode_update_cpu(int cpu) | |||
391 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 394 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
392 | enum ucode_state ustate; | 395 | enum ucode_state ustate; |
393 | 396 | ||
394 | if (uci->valid) | 397 | if (uci->valid && uci->mc) |
395 | ustate = microcode_resume_cpu(cpu); | 398 | ustate = microcode_resume_cpu(cpu); |
396 | else | 399 | else |
397 | ustate = microcode_init_cpu(cpu); | 400 | ustate = microcode_init_cpu(cpu); |
@@ -406,7 +409,7 @@ static int mc_sysdev_add(struct sys_device *sys_dev) | |||
406 | if (!cpu_online(cpu)) | 409 | if (!cpu_online(cpu)) |
407 | return 0; | 410 | return 0; |
408 | 411 | ||
409 | pr_debug("microcode: CPU%d added\n", cpu); | 412 | pr_debug("CPU%d added\n", cpu); |
410 | 413 | ||
411 | err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group); | 414 | err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group); |
412 | if (err) | 415 | if (err) |
@@ -425,7 +428,7 @@ static int mc_sysdev_remove(struct sys_device *sys_dev) | |||
425 | if (!cpu_online(cpu)) | 428 | if (!cpu_online(cpu)) |
426 | return 0; | 429 | return 0; |
427 | 430 | ||
428 | pr_debug("microcode: CPU%d removed\n", cpu); | 431 | pr_debug("CPU%d removed\n", cpu); |
429 | microcode_fini_cpu(cpu); | 432 | microcode_fini_cpu(cpu); |
430 | sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); | 433 | sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); |
431 | return 0; | 434 | return 0; |
@@ -473,15 +476,15 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) | |||
473 | microcode_update_cpu(cpu); | 476 | microcode_update_cpu(cpu); |
474 | case CPU_DOWN_FAILED: | 477 | case CPU_DOWN_FAILED: |
475 | case CPU_DOWN_FAILED_FROZEN: | 478 | case CPU_DOWN_FAILED_FROZEN: |
476 | pr_debug("microcode: CPU%d added\n", cpu); | 479 | pr_debug("CPU%d added\n", cpu); |
477 | if (sysfs_create_group(&sys_dev->kobj, &mc_attr_group)) | 480 | if (sysfs_create_group(&sys_dev->kobj, &mc_attr_group)) |
478 | pr_err("microcode: Failed to create group for CPU%d\n", cpu); | 481 | pr_err("Failed to create group for CPU%d\n", cpu); |
479 | break; | 482 | break; |
480 | case CPU_DOWN_PREPARE: | 483 | case CPU_DOWN_PREPARE: |
481 | case CPU_DOWN_PREPARE_FROZEN: | 484 | case CPU_DOWN_PREPARE_FROZEN: |
482 | /* Suspend is in progress, only remove the interface */ | 485 | /* Suspend is in progress, only remove the interface */ |
483 | sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); | 486 | sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); |
484 | pr_debug("microcode: CPU%d removed\n", cpu); | 487 | pr_debug("CPU%d removed\n", cpu); |
485 | break; | 488 | break; |
486 | case CPU_DEAD: | 489 | case CPU_DEAD: |
487 | case CPU_UP_CANCELED_FROZEN: | 490 | case CPU_UP_CANCELED_FROZEN: |
@@ -507,7 +510,7 @@ static int __init microcode_init(void) | |||
507 | microcode_ops = init_amd_microcode(); | 510 | microcode_ops = init_amd_microcode(); |
508 | 511 | ||
509 | if (!microcode_ops) { | 512 | if (!microcode_ops) { |
510 | pr_err("microcode: no support for this CPU vendor\n"); | 513 | pr_err("no support for this CPU vendor\n"); |
511 | return -ENODEV; | 514 | return -ENODEV; |
512 | } | 515 | } |
513 | 516 | ||
@@ -518,6 +521,9 @@ static int __init microcode_init(void) | |||
518 | return PTR_ERR(microcode_pdev); | 521 | return PTR_ERR(microcode_pdev); |
519 | } | 522 | } |
520 | 523 | ||
524 | if (microcode_ops->init) | ||
525 | microcode_ops->init(µcode_pdev->dev); | ||
526 | |||
521 | get_online_cpus(); | 527 | get_online_cpus(); |
522 | mutex_lock(µcode_mutex); | 528 | mutex_lock(µcode_mutex); |
523 | 529 | ||
@@ -538,8 +544,7 @@ static int __init microcode_init(void) | |||
538 | register_hotcpu_notifier(&mc_cpu_notifier); | 544 | register_hotcpu_notifier(&mc_cpu_notifier); |
539 | 545 | ||
540 | pr_info("Microcode Update Driver: v" MICROCODE_VERSION | 546 | pr_info("Microcode Update Driver: v" MICROCODE_VERSION |
541 | " <tigran@aivazian.fsnet.co.uk>," | 547 | " <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n"); |
542 | " Peter Oruba\n"); | ||
543 | 548 | ||
544 | return 0; | 549 | return 0; |
545 | } | 550 | } |
@@ -561,6 +566,9 @@ static void __exit microcode_exit(void) | |||
561 | 566 | ||
562 | platform_device_unregister(microcode_pdev); | 567 | platform_device_unregister(microcode_pdev); |
563 | 568 | ||
569 | if (microcode_ops->fini) | ||
570 | microcode_ops->fini(); | ||
571 | |||
564 | microcode_ops = NULL; | 572 | microcode_ops = NULL; |
565 | 573 | ||
566 | pr_info("Microcode Update Driver: v" MICROCODE_VERSION " removed.\n"); | 574 | pr_info("Microcode Update Driver: v" MICROCODE_VERSION " removed.\n"); |
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c index 0d334ddd0a96..ebd193e476ca 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/microcode_intel.c | |||
@@ -70,6 +70,9 @@ | |||
70 | * Fix sigmatch() macro to handle old CPUs with pf == 0. | 70 | * Fix sigmatch() macro to handle old CPUs with pf == 0. |
71 | * Thanks to Stuart Swales for pointing out this bug. | 71 | * Thanks to Stuart Swales for pointing out this bug. |
72 | */ | 72 | */ |
73 | |||
74 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
75 | |||
73 | #include <linux/firmware.h> | 76 | #include <linux/firmware.h> |
74 | #include <linux/uaccess.h> | 77 | #include <linux/uaccess.h> |
75 | #include <linux/kernel.h> | 78 | #include <linux/kernel.h> |
@@ -146,8 +149,7 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) | |||
146 | 149 | ||
147 | if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || | 150 | if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || |
148 | cpu_has(c, X86_FEATURE_IA64)) { | 151 | cpu_has(c, X86_FEATURE_IA64)) { |
149 | printk(KERN_ERR "microcode: CPU%d not a capable Intel " | 152 | pr_err("CPU%d not a capable Intel processor\n", cpu_num); |
150 | "processor\n", cpu_num); | ||
151 | return -1; | 153 | return -1; |
152 | } | 154 | } |
153 | 155 | ||
@@ -165,8 +167,8 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) | |||
165 | /* get the current revision from MSR 0x8B */ | 167 | /* get the current revision from MSR 0x8B */ |
166 | rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev); | 168 | rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev); |
167 | 169 | ||
168 | printk(KERN_INFO "microcode: CPU%d sig=0x%x, pf=0x%x, revision=0x%x\n", | 170 | pr_info("CPU%d sig=0x%x, pf=0x%x, revision=0x%x\n", |
169 | cpu_num, csig->sig, csig->pf, csig->rev); | 171 | cpu_num, csig->sig, csig->pf, csig->rev); |
170 | 172 | ||
171 | return 0; | 173 | return 0; |
172 | } | 174 | } |
@@ -194,28 +196,24 @@ static int microcode_sanity_check(void *mc) | |||
194 | data_size = get_datasize(mc_header); | 196 | data_size = get_datasize(mc_header); |
195 | 197 | ||
196 | if (data_size + MC_HEADER_SIZE > total_size) { | 198 | if (data_size + MC_HEADER_SIZE > total_size) { |
197 | printk(KERN_ERR "microcode: error! " | 199 | pr_err("error! Bad data size in microcode data file\n"); |
198 | "Bad data size in microcode data file\n"); | ||
199 | return -EINVAL; | 200 | return -EINVAL; |
200 | } | 201 | } |
201 | 202 | ||
202 | if (mc_header->ldrver != 1 || mc_header->hdrver != 1) { | 203 | if (mc_header->ldrver != 1 || mc_header->hdrver != 1) { |
203 | printk(KERN_ERR "microcode: error! " | 204 | pr_err("error! Unknown microcode update format\n"); |
204 | "Unknown microcode update format\n"); | ||
205 | return -EINVAL; | 205 | return -EINVAL; |
206 | } | 206 | } |
207 | ext_table_size = total_size - (MC_HEADER_SIZE + data_size); | 207 | ext_table_size = total_size - (MC_HEADER_SIZE + data_size); |
208 | if (ext_table_size) { | 208 | if (ext_table_size) { |
209 | if ((ext_table_size < EXT_HEADER_SIZE) | 209 | if ((ext_table_size < EXT_HEADER_SIZE) |
210 | || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) { | 210 | || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) { |
211 | printk(KERN_ERR "microcode: error! " | 211 | pr_err("error! Small exttable size in microcode data file\n"); |
212 | "Small exttable size in microcode data file\n"); | ||
213 | return -EINVAL; | 212 | return -EINVAL; |
214 | } | 213 | } |
215 | ext_header = mc + MC_HEADER_SIZE + data_size; | 214 | ext_header = mc + MC_HEADER_SIZE + data_size; |
216 | if (ext_table_size != exttable_size(ext_header)) { | 215 | if (ext_table_size != exttable_size(ext_header)) { |
217 | printk(KERN_ERR "microcode: error! " | 216 | pr_err("error! Bad exttable size in microcode data file\n"); |
218 | "Bad exttable size in microcode data file\n"); | ||
219 | return -EFAULT; | 217 | return -EFAULT; |
220 | } | 218 | } |
221 | ext_sigcount = ext_header->count; | 219 | ext_sigcount = ext_header->count; |
@@ -230,8 +228,7 @@ static int microcode_sanity_check(void *mc) | |||
230 | while (i--) | 228 | while (i--) |
231 | ext_table_sum += ext_tablep[i]; | 229 | ext_table_sum += ext_tablep[i]; |
232 | if (ext_table_sum) { | 230 | if (ext_table_sum) { |
233 | printk(KERN_WARNING "microcode: aborting, " | 231 | pr_warning("aborting, bad extended signature table checksum\n"); |
234 | "bad extended signature table checksum\n"); | ||
235 | return -EINVAL; | 232 | return -EINVAL; |
236 | } | 233 | } |
237 | } | 234 | } |
@@ -242,7 +239,7 @@ static int microcode_sanity_check(void *mc) | |||
242 | while (i--) | 239 | while (i--) |
243 | orig_sum += ((int *)mc)[i]; | 240 | orig_sum += ((int *)mc)[i]; |
244 | if (orig_sum) { | 241 | if (orig_sum) { |
245 | printk(KERN_ERR "microcode: aborting, bad checksum\n"); | 242 | pr_err("aborting, bad checksum\n"); |
246 | return -EINVAL; | 243 | return -EINVAL; |
247 | } | 244 | } |
248 | if (!ext_table_size) | 245 | if (!ext_table_size) |
@@ -255,7 +252,7 @@ static int microcode_sanity_check(void *mc) | |||
255 | - (mc_header->sig + mc_header->pf + mc_header->cksum) | 252 | - (mc_header->sig + mc_header->pf + mc_header->cksum) |
256 | + (ext_sig->sig + ext_sig->pf + ext_sig->cksum); | 253 | + (ext_sig->sig + ext_sig->pf + ext_sig->cksum); |
257 | if (sum) { | 254 | if (sum) { |
258 | printk(KERN_ERR "microcode: aborting, bad checksum\n"); | 255 | pr_err("aborting, bad checksum\n"); |
259 | return -EINVAL; | 256 | return -EINVAL; |
260 | } | 257 | } |
261 | } | 258 | } |
@@ -327,13 +324,11 @@ static int apply_microcode(int cpu) | |||
327 | rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); | 324 | rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); |
328 | 325 | ||
329 | if (val[1] != mc_intel->hdr.rev) { | 326 | if (val[1] != mc_intel->hdr.rev) { |
330 | printk(KERN_ERR "microcode: CPU%d update " | 327 | pr_err("CPU%d update to revision 0x%x failed\n", |
331 | "to revision 0x%x failed\n", | 328 | cpu_num, mc_intel->hdr.rev); |
332 | cpu_num, mc_intel->hdr.rev); | ||
333 | return -1; | 329 | return -1; |
334 | } | 330 | } |
335 | printk(KERN_INFO "microcode: CPU%d updated to revision " | 331 | pr_info("CPU%d updated to revision 0x%x, date = %04x-%02x-%02x \n", |
336 | "0x%x, date = %04x-%02x-%02x \n", | ||
337 | cpu_num, val[1], | 332 | cpu_num, val[1], |
338 | mc_intel->hdr.date & 0xffff, | 333 | mc_intel->hdr.date & 0xffff, |
339 | mc_intel->hdr.date >> 24, | 334 | mc_intel->hdr.date >> 24, |
@@ -362,8 +357,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
362 | 357 | ||
363 | mc_size = get_totalsize(&mc_header); | 358 | mc_size = get_totalsize(&mc_header); |
364 | if (!mc_size || mc_size > leftover) { | 359 | if (!mc_size || mc_size > leftover) { |
365 | printk(KERN_ERR "microcode: error!" | 360 | pr_err("error! Bad data in microcode data file\n"); |
366 | "Bad data in microcode data file\n"); | ||
367 | break; | 361 | break; |
368 | } | 362 | } |
369 | 363 | ||
@@ -405,9 +399,8 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
405 | vfree(uci->mc); | 399 | vfree(uci->mc); |
406 | uci->mc = (struct microcode_intel *)new_mc; | 400 | uci->mc = (struct microcode_intel *)new_mc; |
407 | 401 | ||
408 | pr_debug("microcode: CPU%d found a matching microcode update with" | 402 | pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", |
409 | " version 0x%x (current=0x%x)\n", | 403 | cpu, new_rev, uci->cpu_sig.rev); |
410 | cpu, new_rev, uci->cpu_sig.rev); | ||
411 | out: | 404 | out: |
412 | return state; | 405 | return state; |
413 | } | 406 | } |
@@ -429,7 +422,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) | |||
429 | c->x86, c->x86_model, c->x86_mask); | 422 | c->x86, c->x86_model, c->x86_mask); |
430 | 423 | ||
431 | if (request_firmware(&firmware, name, device)) { | 424 | if (request_firmware(&firmware, name, device)) { |
432 | pr_debug("microcode: data file %s load failed\n", name); | 425 | pr_debug("data file %s load failed\n", name); |
433 | return UCODE_NFOUND; | 426 | return UCODE_NFOUND; |
434 | } | 427 | } |
435 | 428 | ||
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 5be95ef4ffec..35a57c963df9 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -667,36 +667,18 @@ void __init default_get_smp_config(unsigned int early) | |||
667 | */ | 667 | */ |
668 | } | 668 | } |
669 | 669 | ||
670 | static void __init smp_reserve_bootmem(struct mpf_intel *mpf) | 670 | static void __init smp_reserve_memory(struct mpf_intel *mpf) |
671 | { | 671 | { |
672 | unsigned long size = get_mpc_size(mpf->physptr); | 672 | unsigned long size = get_mpc_size(mpf->physptr); |
673 | #ifdef CONFIG_X86_32 | ||
674 | /* | ||
675 | * We cannot access to MPC table to compute table size yet, | ||
676 | * as only few megabytes from the bottom is mapped now. | ||
677 | * PC-9800's MPC table places on the very last of physical | ||
678 | * memory; so that simply reserving PAGE_SIZE from mpf->physptr | ||
679 | * yields BUG() in reserve_bootmem. | ||
680 | * also need to make sure physptr is below than max_low_pfn | ||
681 | * we don't need reserve the area above max_low_pfn | ||
682 | */ | ||
683 | unsigned long end = max_low_pfn * PAGE_SIZE; | ||
684 | 673 | ||
685 | if (mpf->physptr < end) { | 674 | reserve_early(mpf->physptr, mpf->physptr+size, "MP-table mpc"); |
686 | if (mpf->physptr + size > end) | ||
687 | size = end - mpf->physptr; | ||
688 | reserve_bootmem_generic(mpf->physptr, size, BOOTMEM_DEFAULT); | ||
689 | } | ||
690 | #else | ||
691 | reserve_bootmem_generic(mpf->physptr, size, BOOTMEM_DEFAULT); | ||
692 | #endif | ||
693 | } | 675 | } |
694 | 676 | ||
695 | static int __init smp_scan_config(unsigned long base, unsigned long length, | 677 | static int __init smp_scan_config(unsigned long base, unsigned long length) |
696 | unsigned reserve) | ||
697 | { | 678 | { |
698 | unsigned int *bp = phys_to_virt(base); | 679 | unsigned int *bp = phys_to_virt(base); |
699 | struct mpf_intel *mpf; | 680 | struct mpf_intel *mpf; |
681 | unsigned long mem; | ||
700 | 682 | ||
701 | apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n", | 683 | apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n", |
702 | bp, length); | 684 | bp, length); |
@@ -717,12 +699,10 @@ static int __init smp_scan_config(unsigned long base, unsigned long length, | |||
717 | printk(KERN_INFO "found SMP MP-table at [%p] %llx\n", | 699 | printk(KERN_INFO "found SMP MP-table at [%p] %llx\n", |
718 | mpf, (u64)virt_to_phys(mpf)); | 700 | mpf, (u64)virt_to_phys(mpf)); |
719 | 701 | ||
720 | if (!reserve) | 702 | mem = virt_to_phys(mpf); |
721 | return 1; | 703 | reserve_early(mem, mem + sizeof(*mpf), "MP-table mpf"); |
722 | reserve_bootmem_generic(virt_to_phys(mpf), sizeof(*mpf), | ||
723 | BOOTMEM_DEFAULT); | ||
724 | if (mpf->physptr) | 704 | if (mpf->physptr) |
725 | smp_reserve_bootmem(mpf); | 705 | smp_reserve_memory(mpf); |
726 | 706 | ||
727 | return 1; | 707 | return 1; |
728 | } | 708 | } |
@@ -732,7 +712,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length, | |||
732 | return 0; | 712 | return 0; |
733 | } | 713 | } |
734 | 714 | ||
735 | void __init default_find_smp_config(unsigned int reserve) | 715 | void __init default_find_smp_config(void) |
736 | { | 716 | { |
737 | unsigned int address; | 717 | unsigned int address; |
738 | 718 | ||
@@ -744,9 +724,9 @@ void __init default_find_smp_config(unsigned int reserve) | |||
744 | * 2) Scan the top 1K of base RAM | 724 | * 2) Scan the top 1K of base RAM |
745 | * 3) Scan the 64K of bios | 725 | * 3) Scan the 64K of bios |
746 | */ | 726 | */ |
747 | if (smp_scan_config(0x0, 0x400, reserve) || | 727 | if (smp_scan_config(0x0, 0x400) || |
748 | smp_scan_config(639 * 0x400, 0x400, reserve) || | 728 | smp_scan_config(639 * 0x400, 0x400) || |
749 | smp_scan_config(0xF0000, 0x10000, reserve)) | 729 | smp_scan_config(0xF0000, 0x10000)) |
750 | return; | 730 | return; |
751 | /* | 731 | /* |
752 | * If it is an SMP machine we should know now, unless the | 732 | * If it is an SMP machine we should know now, unless the |
@@ -767,7 +747,7 @@ void __init default_find_smp_config(unsigned int reserve) | |||
767 | 747 | ||
768 | address = get_bios_ebda(); | 748 | address = get_bios_ebda(); |
769 | if (address) | 749 | if (address) |
770 | smp_scan_config(address, 0x400, reserve); | 750 | smp_scan_config(address, 0x400); |
771 | } | 751 | } |
772 | 752 | ||
773 | #ifdef CONFIG_X86_IO_APIC | 753 | #ifdef CONFIG_X86_IO_APIC |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 744508e7cfdd..7a7bd4e3ec49 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -9,6 +9,9 @@ | |||
9 | #include <linux/pm.h> | 9 | #include <linux/pm.h> |
10 | #include <linux/clockchips.h> | 10 | #include <linux/clockchips.h> |
11 | #include <linux/random.h> | 11 | #include <linux/random.h> |
12 | #include <linux/user-return-notifier.h> | ||
13 | #include <linux/dmi.h> | ||
14 | #include <linux/utsname.h> | ||
12 | #include <trace/events/power.h> | 15 | #include <trace/events/power.h> |
13 | #include <linux/hw_breakpoint.h> | 16 | #include <linux/hw_breakpoint.h> |
14 | #include <asm/system.h> | 17 | #include <asm/system.h> |
@@ -89,6 +92,25 @@ void exit_thread(void) | |||
89 | } | 92 | } |
90 | } | 93 | } |
91 | 94 | ||
95 | void show_regs_common(void) | ||
96 | { | ||
97 | const char *board, *product; | ||
98 | |||
99 | board = dmi_get_system_info(DMI_BOARD_NAME); | ||
100 | if (!board) | ||
101 | board = ""; | ||
102 | product = dmi_get_system_info(DMI_PRODUCT_NAME); | ||
103 | if (!product) | ||
104 | product = ""; | ||
105 | |||
106 | printk("\n"); | ||
107 | printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n", | ||
108 | current->pid, current->comm, print_tainted(), | ||
109 | init_utsname()->release, | ||
110 | (int)strcspn(init_utsname()->version, " "), | ||
111 | init_utsname()->version, board, product); | ||
112 | } | ||
113 | |||
92 | void flush_thread(void) | 114 | void flush_thread(void) |
93 | { | 115 | { |
94 | struct task_struct *tsk = current; | 116 | struct task_struct *tsk = current; |
@@ -209,6 +231,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
209 | */ | 231 | */ |
210 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); | 232 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); |
211 | } | 233 | } |
234 | propagate_user_return_notify(prev_p, next_p); | ||
212 | } | 235 | } |
213 | 236 | ||
214 | int sys_fork(struct pt_regs *regs) | 237 | int sys_fork(struct pt_regs *regs) |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 075580b35682..120b88797a75 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/vmalloc.h> | 23 | #include <linux/vmalloc.h> |
24 | #include <linux/user.h> | 24 | #include <linux/user.h> |
25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/utsname.h> | ||
27 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
28 | #include <linux/reboot.h> | 27 | #include <linux/reboot.h> |
29 | #include <linux/init.h> | 28 | #include <linux/init.h> |
@@ -35,7 +34,6 @@ | |||
35 | #include <linux/tick.h> | 34 | #include <linux/tick.h> |
36 | #include <linux/percpu.h> | 35 | #include <linux/percpu.h> |
37 | #include <linux/prctl.h> | 36 | #include <linux/prctl.h> |
38 | #include <linux/dmi.h> | ||
39 | #include <linux/ftrace.h> | 37 | #include <linux/ftrace.h> |
40 | #include <linux/uaccess.h> | 38 | #include <linux/uaccess.h> |
41 | #include <linux/io.h> | 39 | #include <linux/io.h> |
@@ -128,7 +126,6 @@ void __show_regs(struct pt_regs *regs, int all) | |||
128 | unsigned long d0, d1, d2, d3, d6, d7; | 126 | unsigned long d0, d1, d2, d3, d6, d7; |
129 | unsigned long sp; | 127 | unsigned long sp; |
130 | unsigned short ss, gs; | 128 | unsigned short ss, gs; |
131 | const char *board; | ||
132 | 129 | ||
133 | if (user_mode_vm(regs)) { | 130 | if (user_mode_vm(regs)) { |
134 | sp = regs->sp; | 131 | sp = regs->sp; |
@@ -140,16 +137,7 @@ void __show_regs(struct pt_regs *regs, int all) | |||
140 | savesegment(gs, gs); | 137 | savesegment(gs, gs); |
141 | } | 138 | } |
142 | 139 | ||
143 | printk("\n"); | 140 | show_regs_common(); |
144 | |||
145 | board = dmi_get_system_info(DMI_PRODUCT_NAME); | ||
146 | if (!board) | ||
147 | board = ""; | ||
148 | printk("Pid: %d, comm: %s %s (%s %.*s) %s\n", | ||
149 | task_pid_nr(current), current->comm, | ||
150 | print_tainted(), init_utsname()->release, | ||
151 | (int)strcspn(init_utsname()->version, " "), | ||
152 | init_utsname()->version, board); | ||
153 | 141 | ||
154 | printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", | 142 | printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", |
155 | (u16)regs->cs, regs->ip, regs->flags, | 143 | (u16)regs->cs, regs->ip, regs->flags, |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index a98fe88fab64..e5ab0cd0ef36 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/user.h> | 27 | #include <linux/user.h> |
28 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
29 | #include <linux/utsname.h> | ||
30 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
31 | #include <linux/module.h> | 30 | #include <linux/module.h> |
32 | #include <linux/ptrace.h> | 31 | #include <linux/ptrace.h> |
@@ -38,7 +37,6 @@ | |||
38 | #include <linux/uaccess.h> | 37 | #include <linux/uaccess.h> |
39 | #include <linux/io.h> | 38 | #include <linux/io.h> |
40 | #include <linux/ftrace.h> | 39 | #include <linux/ftrace.h> |
41 | #include <linux/dmi.h> | ||
42 | 40 | ||
43 | #include <asm/pgtable.h> | 41 | #include <asm/pgtable.h> |
44 | #include <asm/system.h> | 42 | #include <asm/system.h> |
@@ -163,18 +161,8 @@ void __show_regs(struct pt_regs *regs, int all) | |||
163 | unsigned long d0, d1, d2, d3, d6, d7; | 161 | unsigned long d0, d1, d2, d3, d6, d7; |
164 | unsigned int fsindex, gsindex; | 162 | unsigned int fsindex, gsindex; |
165 | unsigned int ds, cs, es; | 163 | unsigned int ds, cs, es; |
166 | const char *board; | 164 | |
167 | 165 | show_regs_common(); | |
168 | printk("\n"); | ||
169 | print_modules(); | ||
170 | board = dmi_get_system_info(DMI_PRODUCT_NAME); | ||
171 | if (!board) | ||
172 | board = ""; | ||
173 | printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n", | ||
174 | current->pid, current->comm, print_tainted(), | ||
175 | init_utsname()->release, | ||
176 | (int)strcspn(init_utsname()->version, " "), | ||
177 | init_utsname()->version, board); | ||
178 | printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); | 166 | printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); |
179 | printk_address(regs->ip, 1); | 167 | printk_address(regs->ip, 1); |
180 | printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, | 168 | printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, |
@@ -349,26 +337,42 @@ out: | |||
349 | return err; | 337 | return err; |
350 | } | 338 | } |
351 | 339 | ||
352 | void | 340 | static void |
353 | start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) | 341 | start_thread_common(struct pt_regs *regs, unsigned long new_ip, |
342 | unsigned long new_sp, | ||
343 | unsigned int _cs, unsigned int _ss, unsigned int _ds) | ||
354 | { | 344 | { |
355 | loadsegment(fs, 0); | 345 | loadsegment(fs, 0); |
356 | loadsegment(es, 0); | 346 | loadsegment(es, _ds); |
357 | loadsegment(ds, 0); | 347 | loadsegment(ds, _ds); |
358 | load_gs_index(0); | 348 | load_gs_index(0); |
359 | regs->ip = new_ip; | 349 | regs->ip = new_ip; |
360 | regs->sp = new_sp; | 350 | regs->sp = new_sp; |
361 | percpu_write(old_rsp, new_sp); | 351 | percpu_write(old_rsp, new_sp); |
362 | regs->cs = __USER_CS; | 352 | regs->cs = _cs; |
363 | regs->ss = __USER_DS; | 353 | regs->ss = _ss; |
364 | regs->flags = 0x200; | 354 | regs->flags = X86_EFLAGS_IF; |
365 | set_fs(USER_DS); | 355 | set_fs(USER_DS); |
366 | /* | 356 | /* |
367 | * Free the old FP and other extended state | 357 | * Free the old FP and other extended state |
368 | */ | 358 | */ |
369 | free_thread_xstate(current); | 359 | free_thread_xstate(current); |
370 | } | 360 | } |
371 | EXPORT_SYMBOL_GPL(start_thread); | 361 | |
362 | void | ||
363 | start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) | ||
364 | { | ||
365 | start_thread_common(regs, new_ip, new_sp, | ||
366 | __USER_CS, __USER_DS, 0); | ||
367 | } | ||
368 | |||
369 | #ifdef CONFIG_IA32_EMULATION | ||
370 | void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp) | ||
371 | { | ||
372 | start_thread_common(regs, new_ip, new_sp, | ||
373 | __USER32_CS, __USER32_DS, __USER32_DS); | ||
374 | } | ||
375 | #endif | ||
372 | 376 | ||
373 | /* | 377 | /* |
374 | * switch_to(x,y) should switch tasks from x to y. | 378 | * switch_to(x,y) should switch tasks from x to y. |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 04d182a7cfdb..7079ddaf0731 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -555,7 +555,9 @@ static int genregs_set(struct task_struct *target, | |||
555 | return ret; | 555 | return ret; |
556 | } | 556 | } |
557 | 557 | ||
558 | static void ptrace_triggered(struct perf_event *bp, void *data) | 558 | static void ptrace_triggered(struct perf_event *bp, int nmi, |
559 | struct perf_sample_data *data, | ||
560 | struct pt_regs *regs) | ||
559 | { | 561 | { |
560 | int i; | 562 | int i; |
561 | struct thread_struct *thread = &(current->thread); | 563 | struct thread_struct *thread = &(current->thread); |
@@ -593,13 +595,13 @@ static unsigned long ptrace_get_dr7(struct perf_event *bp[]) | |||
593 | return dr7; | 595 | return dr7; |
594 | } | 596 | } |
595 | 597 | ||
596 | static struct perf_event * | 598 | static int |
597 | ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, | 599 | ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, |
598 | struct task_struct *tsk, int disabled) | 600 | struct task_struct *tsk, int disabled) |
599 | { | 601 | { |
600 | int err; | 602 | int err; |
601 | int gen_len, gen_type; | 603 | int gen_len, gen_type; |
602 | DEFINE_BREAKPOINT_ATTR(attr); | 604 | struct perf_event_attr attr; |
603 | 605 | ||
604 | /* | 606 | /* |
605 | * We shoud have at least an inactive breakpoint at this | 607 | * We shoud have at least an inactive breakpoint at this |
@@ -607,18 +609,18 @@ ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, | |||
607 | * written the address register first | 609 | * written the address register first |
608 | */ | 610 | */ |
609 | if (!bp) | 611 | if (!bp) |
610 | return ERR_PTR(-EINVAL); | 612 | return -EINVAL; |
611 | 613 | ||
612 | err = arch_bp_generic_fields(len, type, &gen_len, &gen_type); | 614 | err = arch_bp_generic_fields(len, type, &gen_len, &gen_type); |
613 | if (err) | 615 | if (err) |
614 | return ERR_PTR(err); | 616 | return err; |
615 | 617 | ||
616 | attr = bp->attr; | 618 | attr = bp->attr; |
617 | attr.bp_len = gen_len; | 619 | attr.bp_len = gen_len; |
618 | attr.bp_type = gen_type; | 620 | attr.bp_type = gen_type; |
619 | attr.disabled = disabled; | 621 | attr.disabled = disabled; |
620 | 622 | ||
621 | return modify_user_hw_breakpoint(bp, &attr, bp->callback, tsk); | 623 | return modify_user_hw_breakpoint(bp, &attr); |
622 | } | 624 | } |
623 | 625 | ||
624 | /* | 626 | /* |
@@ -656,28 +658,17 @@ restore: | |||
656 | if (!second_pass) | 658 | if (!second_pass) |
657 | continue; | 659 | continue; |
658 | 660 | ||
659 | thread->ptrace_bps[i] = NULL; | 661 | rc = ptrace_modify_breakpoint(bp, len, type, |
660 | bp = ptrace_modify_breakpoint(bp, len, type, | ||
661 | tsk, 1); | 662 | tsk, 1); |
662 | if (IS_ERR(bp)) { | 663 | if (rc) |
663 | rc = PTR_ERR(bp); | ||
664 | thread->ptrace_bps[i] = NULL; | ||
665 | break; | 664 | break; |
666 | } | ||
667 | thread->ptrace_bps[i] = bp; | ||
668 | } | 665 | } |
669 | continue; | 666 | continue; |
670 | } | 667 | } |
671 | 668 | ||
672 | bp = ptrace_modify_breakpoint(bp, len, type, tsk, 0); | 669 | rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0); |
673 | 670 | if (rc) | |
674 | /* Incorrect bp, or we have a bug in bp API */ | ||
675 | if (IS_ERR(bp)) { | ||
676 | rc = PTR_ERR(bp); | ||
677 | thread->ptrace_bps[i] = NULL; | ||
678 | break; | 671 | break; |
679 | } | ||
680 | thread->ptrace_bps[i] = bp; | ||
681 | } | 672 | } |
682 | /* | 673 | /* |
683 | * Make a second pass to free the remaining unused breakpoints | 674 | * Make a second pass to free the remaining unused breakpoints |
@@ -721,9 +712,10 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, | |||
721 | { | 712 | { |
722 | struct perf_event *bp; | 713 | struct perf_event *bp; |
723 | struct thread_struct *t = &tsk->thread; | 714 | struct thread_struct *t = &tsk->thread; |
724 | DEFINE_BREAKPOINT_ATTR(attr); | 715 | struct perf_event_attr attr; |
725 | 716 | ||
726 | if (!t->ptrace_bps[nr]) { | 717 | if (!t->ptrace_bps[nr]) { |
718 | hw_breakpoint_init(&attr); | ||
727 | /* | 719 | /* |
728 | * Put stub len and type to register (reserve) an inactive but | 720 | * Put stub len and type to register (reserve) an inactive but |
729 | * correct bp | 721 | * correct bp |
@@ -734,26 +726,32 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, | |||
734 | attr.disabled = 1; | 726 | attr.disabled = 1; |
735 | 727 | ||
736 | bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); | 728 | bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); |
729 | |||
730 | /* | ||
731 | * CHECKME: the previous code returned -EIO if the addr wasn't | ||
732 | * a valid task virtual addr. The new one will return -EINVAL in | ||
733 | * this case. | ||
734 | * -EINVAL may be what we want for in-kernel breakpoints users, | ||
735 | * but -EIO looks better for ptrace, since we refuse a register | ||
736 | * writing for the user. And anyway this is the previous | ||
737 | * behaviour. | ||
738 | */ | ||
739 | if (IS_ERR(bp)) | ||
740 | return PTR_ERR(bp); | ||
741 | |||
742 | t->ptrace_bps[nr] = bp; | ||
737 | } else { | 743 | } else { |
744 | int err; | ||
745 | |||
738 | bp = t->ptrace_bps[nr]; | 746 | bp = t->ptrace_bps[nr]; |
739 | t->ptrace_bps[nr] = NULL; | ||
740 | 747 | ||
741 | attr = bp->attr; | 748 | attr = bp->attr; |
742 | attr.bp_addr = addr; | 749 | attr.bp_addr = addr; |
743 | bp = modify_user_hw_breakpoint(bp, &attr, bp->callback, tsk); | 750 | err = modify_user_hw_breakpoint(bp, &attr); |
751 | if (err) | ||
752 | return err; | ||
744 | } | 753 | } |
745 | /* | ||
746 | * CHECKME: the previous code returned -EIO if the addr wasn't a | ||
747 | * valid task virtual addr. The new one will return -EINVAL in this | ||
748 | * case. | ||
749 | * -EINVAL may be what we want for in-kernel breakpoints users, but | ||
750 | * -EIO looks better for ptrace, since we refuse a register writing | ||
751 | * for the user. And anyway this is the previous behaviour. | ||
752 | */ | ||
753 | if (IS_ERR(bp)) | ||
754 | return PTR_ERR(bp); | ||
755 | 754 | ||
756 | t->ptrace_bps[nr] = bp; | ||
757 | 755 | ||
758 | return 0; | 756 | return 0; |
759 | } | 757 | } |
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 6c3b2c6fd772..18093d7498f0 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c | |||
@@ -499,6 +499,7 @@ static void __init quirk_amd_nb_node(struct pci_dev *dev) | |||
499 | { | 499 | { |
500 | struct pci_dev *nb_ht; | 500 | struct pci_dev *nb_ht; |
501 | unsigned int devfn; | 501 | unsigned int devfn; |
502 | u32 node; | ||
502 | u32 val; | 503 | u32 val; |
503 | 504 | ||
504 | devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0); | 505 | devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0); |
@@ -507,7 +508,13 @@ static void __init quirk_amd_nb_node(struct pci_dev *dev) | |||
507 | return; | 508 | return; |
508 | 509 | ||
509 | pci_read_config_dword(nb_ht, 0x60, &val); | 510 | pci_read_config_dword(nb_ht, 0x60, &val); |
510 | set_dev_node(&dev->dev, val & 7); | 511 | node = val & 7; |
512 | /* | ||
513 | * Some hardware may return an invalid node ID, | ||
514 | * so check it first: | ||
515 | */ | ||
516 | if (node_online(node)) | ||
517 | set_dev_node(&dev->dev, node); | ||
511 | pci_dev_put(nb_ht); | 518 | pci_dev_put(nb_ht); |
512 | } | 519 | } |
513 | 520 | ||
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 2b97fc5b124e..1545bc0c9845 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -259,6 +259,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
259 | DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"), | 259 | DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"), |
260 | }, | 260 | }, |
261 | }, | 261 | }, |
262 | { /* Handle problems with rebooting on ASUS P4S800 */ | ||
263 | .callback = set_bios_reboot, | ||
264 | .ident = "ASUS P4S800", | ||
265 | .matches = { | ||
266 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | ||
267 | DMI_MATCH(DMI_BOARD_NAME, "P4S800"), | ||
268 | }, | ||
269 | }, | ||
262 | { } | 270 | { } |
263 | }; | 271 | }; |
264 | 272 | ||
diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c index 61a837743fe5..201eab63b05f 100644 --- a/arch/x86/kernel/reboot_fixups_32.c +++ b/arch/x86/kernel/reboot_fixups_32.c | |||
@@ -80,6 +80,7 @@ void mach_reboot_fixups(void) | |||
80 | continue; | 80 | continue; |
81 | 81 | ||
82 | cur->reboot_fixup(dev); | 82 | cur->reboot_fixup(dev); |
83 | pci_dev_put(dev); | ||
83 | } | 84 | } |
84 | } | 85 | } |
85 | 86 | ||
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 82e88cdda9bc..946a311a25c9 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -106,6 +106,7 @@ | |||
106 | #include <asm/percpu.h> | 106 | #include <asm/percpu.h> |
107 | #include <asm/topology.h> | 107 | #include <asm/topology.h> |
108 | #include <asm/apicdef.h> | 108 | #include <asm/apicdef.h> |
109 | #include <asm/k8.h> | ||
109 | #ifdef CONFIG_X86_64 | 110 | #ifdef CONFIG_X86_64 |
110 | #include <asm/numa_64.h> | 111 | #include <asm/numa_64.h> |
111 | #endif | 112 | #endif |
@@ -487,42 +488,11 @@ static void __init reserve_early_setup_data(void) | |||
487 | 488 | ||
488 | #ifdef CONFIG_KEXEC | 489 | #ifdef CONFIG_KEXEC |
489 | 490 | ||
490 | /** | ||
491 | * Reserve @size bytes of crashkernel memory at any suitable offset. | ||
492 | * | ||
493 | * @size: Size of the crashkernel memory to reserve. | ||
494 | * Returns the base address on success, and -1ULL on failure. | ||
495 | */ | ||
496 | static | ||
497 | unsigned long long __init find_and_reserve_crashkernel(unsigned long long size) | ||
498 | { | ||
499 | const unsigned long long alignment = 16<<20; /* 16M */ | ||
500 | unsigned long long start = 0LL; | ||
501 | |||
502 | while (1) { | ||
503 | int ret; | ||
504 | |||
505 | start = find_e820_area(start, ULONG_MAX, size, alignment); | ||
506 | if (start == -1ULL) | ||
507 | return start; | ||
508 | |||
509 | /* try to reserve it */ | ||
510 | ret = reserve_bootmem_generic(start, size, BOOTMEM_EXCLUSIVE); | ||
511 | if (ret >= 0) | ||
512 | return start; | ||
513 | |||
514 | start += alignment; | ||
515 | } | ||
516 | } | ||
517 | |||
518 | static inline unsigned long long get_total_mem(void) | 491 | static inline unsigned long long get_total_mem(void) |
519 | { | 492 | { |
520 | unsigned long long total; | 493 | unsigned long long total; |
521 | 494 | ||
522 | total = max_low_pfn - min_low_pfn; | 495 | total = max_pfn - min_low_pfn; |
523 | #ifdef CONFIG_HIGHMEM | ||
524 | total += highend_pfn - highstart_pfn; | ||
525 | #endif | ||
526 | 496 | ||
527 | return total << PAGE_SHIFT; | 497 | return total << PAGE_SHIFT; |
528 | } | 498 | } |
@@ -542,21 +512,25 @@ static void __init reserve_crashkernel(void) | |||
542 | 512 | ||
543 | /* 0 means: find the address automatically */ | 513 | /* 0 means: find the address automatically */ |
544 | if (crash_base <= 0) { | 514 | if (crash_base <= 0) { |
545 | crash_base = find_and_reserve_crashkernel(crash_size); | 515 | const unsigned long long alignment = 16<<20; /* 16M */ |
516 | |||
517 | crash_base = find_e820_area(alignment, ULONG_MAX, crash_size, | ||
518 | alignment); | ||
546 | if (crash_base == -1ULL) { | 519 | if (crash_base == -1ULL) { |
547 | pr_info("crashkernel reservation failed. " | 520 | pr_info("crashkernel reservation failed - No suitable area found.\n"); |
548 | "No suitable area found.\n"); | ||
549 | return; | 521 | return; |
550 | } | 522 | } |
551 | } else { | 523 | } else { |
552 | ret = reserve_bootmem_generic(crash_base, crash_size, | 524 | unsigned long long start; |
553 | BOOTMEM_EXCLUSIVE); | 525 | |
554 | if (ret < 0) { | 526 | start = find_e820_area(crash_base, ULONG_MAX, crash_size, |
555 | pr_info("crashkernel reservation failed - " | 527 | 1<<20); |
556 | "memory is in use\n"); | 528 | if (start != crash_base) { |
529 | pr_info("crashkernel reservation failed - memory is in use.\n"); | ||
557 | return; | 530 | return; |
558 | } | 531 | } |
559 | } | 532 | } |
533 | reserve_early(crash_base, crash_base + crash_size, "CRASH KERNEL"); | ||
560 | 534 | ||
561 | printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " | 535 | printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " |
562 | "for crashkernel (System RAM: %ldMB)\n", | 536 | "for crashkernel (System RAM: %ldMB)\n", |
@@ -699,6 +673,9 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = { | |||
699 | 673 | ||
700 | void __init setup_arch(char **cmdline_p) | 674 | void __init setup_arch(char **cmdline_p) |
701 | { | 675 | { |
676 | int acpi = 0; | ||
677 | int k8 = 0; | ||
678 | |||
702 | #ifdef CONFIG_X86_32 | 679 | #ifdef CONFIG_X86_32 |
703 | memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); | 680 | memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); |
704 | visws_early_detect(); | 681 | visws_early_detect(); |
@@ -791,21 +768,18 @@ void __init setup_arch(char **cmdline_p) | |||
791 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); | 768 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); |
792 | *cmdline_p = command_line; | 769 | *cmdline_p = command_line; |
793 | 770 | ||
794 | #ifdef CONFIG_X86_64 | ||
795 | /* | 771 | /* |
796 | * Must call this twice: Once just to detect whether hardware doesn't | 772 | * x86_configure_nx() is called before parse_early_param() to detect |
797 | * support NX (so that the early EHCI debug console setup can safely | 773 | * whether hardware doesn't support NX (so that the early EHCI debug |
798 | * call set_fixmap(), and then again after parsing early parameters to | 774 | * console setup can safely call set_fixmap()). It may then be called |
799 | * honor the respective command line option. | 775 | * again from within noexec_setup() during parsing early parameters |
776 | * to honor the respective command line option. | ||
800 | */ | 777 | */ |
801 | check_efer(); | 778 | x86_configure_nx(); |
802 | #endif | ||
803 | 779 | ||
804 | parse_early_param(); | 780 | parse_early_param(); |
805 | 781 | ||
806 | #ifdef CONFIG_X86_64 | 782 | x86_report_nx(); |
807 | check_efer(); | ||
808 | #endif | ||
809 | 783 | ||
810 | /* Must be before kernel pagetables are setup */ | 784 | /* Must be before kernel pagetables are setup */ |
811 | vmi_activate(); | 785 | vmi_activate(); |
@@ -901,6 +875,13 @@ void __init setup_arch(char **cmdline_p) | |||
901 | 875 | ||
902 | reserve_brk(); | 876 | reserve_brk(); |
903 | 877 | ||
878 | #ifdef CONFIG_ACPI_SLEEP | ||
879 | /* | ||
880 | * Reserve low memory region for sleep support. | ||
881 | * even before init_memory_mapping | ||
882 | */ | ||
883 | acpi_reserve_wakeup_memory(); | ||
884 | #endif | ||
904 | init_gbpages(); | 885 | init_gbpages(); |
905 | 886 | ||
906 | /* max_pfn_mapped is updated here */ | 887 | /* max_pfn_mapped is updated here */ |
@@ -927,6 +908,8 @@ void __init setup_arch(char **cmdline_p) | |||
927 | 908 | ||
928 | reserve_initrd(); | 909 | reserve_initrd(); |
929 | 910 | ||
911 | reserve_crashkernel(); | ||
912 | |||
930 | vsmp_init(); | 913 | vsmp_init(); |
931 | 914 | ||
932 | io_delay_init(); | 915 | io_delay_init(); |
@@ -938,27 +921,24 @@ void __init setup_arch(char **cmdline_p) | |||
938 | 921 | ||
939 | early_acpi_boot_init(); | 922 | early_acpi_boot_init(); |
940 | 923 | ||
924 | /* | ||
925 | * Find and reserve possible boot-time SMP configuration: | ||
926 | */ | ||
927 | find_smp_config(); | ||
928 | |||
941 | #ifdef CONFIG_ACPI_NUMA | 929 | #ifdef CONFIG_ACPI_NUMA |
942 | /* | 930 | /* |
943 | * Parse SRAT to discover nodes. | 931 | * Parse SRAT to discover nodes. |
944 | */ | 932 | */ |
945 | acpi_numa_init(); | 933 | acpi = acpi_numa_init(); |
946 | #endif | 934 | #endif |
947 | 935 | ||
948 | initmem_init(0, max_pfn); | 936 | #ifdef CONFIG_K8_NUMA |
949 | 937 | if (!acpi) | |
950 | #ifdef CONFIG_ACPI_SLEEP | 938 | k8 = !k8_numa_init(0, max_pfn); |
951 | /* | ||
952 | * Reserve low memory region for sleep support. | ||
953 | */ | ||
954 | acpi_reserve_bootmem(); | ||
955 | #endif | 939 | #endif |
956 | /* | ||
957 | * Find and reserve possible boot-time SMP configuration: | ||
958 | */ | ||
959 | find_smp_config(); | ||
960 | 940 | ||
961 | reserve_crashkernel(); | 941 | initmem_init(0, max_pfn, acpi, k8); |
962 | 942 | ||
963 | #ifdef CONFIG_X86_64 | 943 | #ifdef CONFIG_X86_64 |
964 | /* | 944 | /* |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index d559af913e1f..35abcb8b00e9 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -1,3 +1,5 @@ | |||
1 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
2 | |||
1 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
2 | #include <linux/module.h> | 4 | #include <linux/module.h> |
3 | #include <linux/init.h> | 5 | #include <linux/init.h> |
@@ -20,9 +22,9 @@ | |||
20 | #include <asm/stackprotector.h> | 22 | #include <asm/stackprotector.h> |
21 | 23 | ||
22 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | 24 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
23 | # define DBG(x...) printk(KERN_DEBUG x) | 25 | # define DBG(fmt, ...) pr_dbg(fmt, ##__VA_ARGS__) |
24 | #else | 26 | #else |
25 | # define DBG(x...) | 27 | # define DBG(fmt, ...) do { if (0) pr_dbg(fmt, ##__VA_ARGS__); } while (0) |
26 | #endif | 28 | #endif |
27 | 29 | ||
28 | DEFINE_PER_CPU(int, cpu_number); | 30 | DEFINE_PER_CPU(int, cpu_number); |
@@ -116,8 +118,8 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, | |||
116 | } else { | 118 | } else { |
117 | ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node), | 119 | ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node), |
118 | size, align, goal); | 120 | size, align, goal); |
119 | pr_debug("per cpu data for cpu%d %lu bytes on node%d at " | 121 | pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n", |
120 | "%016lx\n", cpu, size, node, __pa(ptr)); | 122 | cpu, size, node, __pa(ptr)); |
121 | } | 123 | } |
122 | return ptr; | 124 | return ptr; |
123 | #else | 125 | #else |
@@ -198,8 +200,7 @@ void __init setup_per_cpu_areas(void) | |||
198 | pcpu_cpu_distance, | 200 | pcpu_cpu_distance, |
199 | pcpu_fc_alloc, pcpu_fc_free); | 201 | pcpu_fc_alloc, pcpu_fc_free); |
200 | if (rc < 0) | 202 | if (rc < 0) |
201 | pr_warning("PERCPU: %s allocator failed (%d), " | 203 | pr_warning("%s allocator failed (%d), falling back to page size\n", |
202 | "falling back to page size\n", | ||
203 | pcpu_fc_names[pcpu_chosen_fc], rc); | 204 | pcpu_fc_names[pcpu_chosen_fc], rc); |
204 | } | 205 | } |
205 | if (rc < 0) | 206 | if (rc < 0) |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index fbf3b07c8567..74fe6d86dc5d 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/stddef.h> | 19 | #include <linux/stddef.h> |
20 | #include <linux/personality.h> | 20 | #include <linux/personality.h> |
21 | #include <linux/uaccess.h> | 21 | #include <linux/uaccess.h> |
22 | #include <linux/user-return-notifier.h> | ||
22 | 23 | ||
23 | #include <asm/processor.h> | 24 | #include <asm/processor.h> |
24 | #include <asm/ucontext.h> | 25 | #include <asm/ucontext.h> |
@@ -863,6 +864,8 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) | |||
863 | if (current->replacement_session_keyring) | 864 | if (current->replacement_session_keyring) |
864 | key_replace_session_keyring(); | 865 | key_replace_session_keyring(); |
865 | } | 866 | } |
867 | if (thread_info_flags & _TIF_USER_RETURN_NOTIFY) | ||
868 | fire_user_return_notifiers(); | ||
866 | 869 | ||
867 | #ifdef CONFIG_X86_32 | 870 | #ifdef CONFIG_X86_32 |
868 | clear_thread_flag(TIF_IRET); | 871 | clear_thread_flag(TIF_IRET); |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 324f2a44c221..29e6744f51e3 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -687,7 +687,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) | |||
687 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | 687 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), |
688 | }; | 688 | }; |
689 | 689 | ||
690 | INIT_WORK(&c_idle.work, do_fork_idle); | 690 | INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle); |
691 | 691 | ||
692 | alternatives_smp_switch(1); | 692 | alternatives_smp_switch(1); |
693 | 693 | ||
@@ -713,6 +713,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) | |||
713 | 713 | ||
714 | if (IS_ERR(c_idle.idle)) { | 714 | if (IS_ERR(c_idle.idle)) { |
715 | printk("failed fork for CPU %d\n", cpu); | 715 | printk("failed fork for CPU %d\n", cpu); |
716 | destroy_work_on_stack(&c_idle.work); | ||
716 | return PTR_ERR(c_idle.idle); | 717 | return PTR_ERR(c_idle.idle); |
717 | } | 718 | } |
718 | 719 | ||
@@ -831,6 +832,7 @@ do_rest: | |||
831 | smpboot_restore_warm_reset_vector(); | 832 | smpboot_restore_warm_reset_vector(); |
832 | } | 833 | } |
833 | 834 | ||
835 | destroy_work_on_stack(&c_idle.work); | ||
834 | return boot_error; | 836 | return boot_error; |
835 | } | 837 | } |
836 | 838 | ||
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c index 1884a8d12bfa..dee1ff7cba58 100644 --- a/arch/x86/kernel/sys_i386_32.c +++ b/arch/x86/kernel/sys_i386_32.c | |||
@@ -24,31 +24,6 @@ | |||
24 | 24 | ||
25 | #include <asm/syscalls.h> | 25 | #include <asm/syscalls.h> |
26 | 26 | ||
27 | asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | ||
28 | unsigned long prot, unsigned long flags, | ||
29 | unsigned long fd, unsigned long pgoff) | ||
30 | { | ||
31 | int error = -EBADF; | ||
32 | struct file *file = NULL; | ||
33 | struct mm_struct *mm = current->mm; | ||
34 | |||
35 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
36 | if (!(flags & MAP_ANONYMOUS)) { | ||
37 | file = fget(fd); | ||
38 | if (!file) | ||
39 | goto out; | ||
40 | } | ||
41 | |||
42 | down_write(&mm->mmap_sem); | ||
43 | error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
44 | up_write(&mm->mmap_sem); | ||
45 | |||
46 | if (file) | ||
47 | fput(file); | ||
48 | out: | ||
49 | return error; | ||
50 | } | ||
51 | |||
52 | /* | 27 | /* |
53 | * Perform the select(nd, in, out, ex, tv) and mmap() system | 28 | * Perform the select(nd, in, out, ex, tv) and mmap() system |
54 | * calls. Linux/i386 didn't use to be able to handle more than | 29 | * calls. Linux/i386 didn't use to be able to handle more than |
@@ -77,7 +52,7 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg) | |||
77 | if (a.offset & ~PAGE_MASK) | 52 | if (a.offset & ~PAGE_MASK) |
78 | goto out; | 53 | goto out; |
79 | 54 | ||
80 | err = sys_mmap2(a.addr, a.len, a.prot, a.flags, | 55 | err = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, |
81 | a.fd, a.offset >> PAGE_SHIFT); | 56 | a.fd, a.offset >> PAGE_SHIFT); |
82 | out: | 57 | out: |
83 | return err; | 58 | return err; |
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 45e00eb09c3a..8aa2057efd12 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -23,26 +23,11 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, | |||
23 | unsigned long, fd, unsigned long, off) | 23 | unsigned long, fd, unsigned long, off) |
24 | { | 24 | { |
25 | long error; | 25 | long error; |
26 | struct file *file; | ||
27 | |||
28 | error = -EINVAL; | 26 | error = -EINVAL; |
29 | if (off & ~PAGE_MASK) | 27 | if (off & ~PAGE_MASK) |
30 | goto out; | 28 | goto out; |
31 | 29 | ||
32 | error = -EBADF; | 30 | error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); |
33 | file = NULL; | ||
34 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
35 | if (!(flags & MAP_ANONYMOUS)) { | ||
36 | file = fget(fd); | ||
37 | if (!file) | ||
38 | goto out; | ||
39 | } | ||
40 | down_write(¤t->mm->mmap_sem); | ||
41 | error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT); | ||
42 | up_write(¤t->mm->mmap_sem); | ||
43 | |||
44 | if (file) | ||
45 | fput(file); | ||
46 | out: | 31 | out: |
47 | return error; | 32 | return error; |
48 | } | 33 | } |
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index 0157cd26d7cc..15228b5d3eb7 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S | |||
@@ -191,7 +191,7 @@ ENTRY(sys_call_table) | |||
191 | .long sys_ni_syscall /* reserved for streams2 */ | 191 | .long sys_ni_syscall /* reserved for streams2 */ |
192 | .long ptregs_vfork /* 190 */ | 192 | .long ptregs_vfork /* 190 */ |
193 | .long sys_getrlimit | 193 | .long sys_getrlimit |
194 | .long sys_mmap2 | 194 | .long sys_mmap_pgoff |
195 | .long sys_truncate64 | 195 | .long sys_truncate64 |
196 | .long sys_ftruncate64 | 196 | .long sys_ftruncate64 |
197 | .long sys_stat64 /* 195 */ | 197 | .long sys_stat64 /* 195 */ |
@@ -336,3 +336,4 @@ ENTRY(sys_call_table) | |||
336 | .long sys_pwritev | 336 | .long sys_pwritev |
337 | .long sys_rt_tgsigqueueinfo /* 335 */ | 337 | .long sys_rt_tgsigqueueinfo /* 335 */ |
338 | .long sys_perf_event_open | 338 | .long sys_perf_event_open |
339 | .long sys_recvmmsg | ||
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 1740c85e24bb..364d015efebc 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -817,10 +817,8 @@ static int __init uv_init_blade(int blade) | |||
817 | */ | 817 | */ |
818 | apicid = blade_to_first_apicid(blade); | 818 | apicid = blade_to_first_apicid(blade); |
819 | pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); | 819 | pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); |
820 | if ((pa & 0xff) != UV_BAU_MESSAGE) { | 820 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, |
821 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, | ||
822 | ((apicid << 32) | UV_BAU_MESSAGE)); | 821 | ((apicid << 32) | UV_BAU_MESSAGE)); |
823 | } | ||
824 | return 0; | 822 | return 0; |
825 | } | 823 | } |
826 | 824 | ||
diff --git a/arch/x86/kernel/uv_time.c b/arch/x86/kernel/uv_time.c index 583f11d5c480..3c84aa001c11 100644 --- a/arch/x86/kernel/uv_time.c +++ b/arch/x86/kernel/uv_time.c | |||
@@ -74,7 +74,7 @@ struct uv_rtc_timer_head { | |||
74 | */ | 74 | */ |
75 | static struct uv_rtc_timer_head **blade_info __read_mostly; | 75 | static struct uv_rtc_timer_head **blade_info __read_mostly; |
76 | 76 | ||
77 | static int uv_rtc_enable; | 77 | static int uv_rtc_evt_enable; |
78 | 78 | ||
79 | /* | 79 | /* |
80 | * Hardware interface routines | 80 | * Hardware interface routines |
@@ -90,7 +90,7 @@ static void uv_rtc_send_IPI(int cpu) | |||
90 | pnode = uv_apicid_to_pnode(apicid); | 90 | pnode = uv_apicid_to_pnode(apicid); |
91 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | 91 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | |
92 | (apicid << UVH_IPI_INT_APIC_ID_SHFT) | | 92 | (apicid << UVH_IPI_INT_APIC_ID_SHFT) | |
93 | (GENERIC_INTERRUPT_VECTOR << UVH_IPI_INT_VECTOR_SHFT); | 93 | (X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT); |
94 | 94 | ||
95 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); | 95 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
96 | } | 96 | } |
@@ -115,7 +115,7 @@ static int uv_setup_intr(int cpu, u64 expires) | |||
115 | uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS, | 115 | uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS, |
116 | UVH_EVENT_OCCURRED0_RTC1_MASK); | 116 | UVH_EVENT_OCCURRED0_RTC1_MASK); |
117 | 117 | ||
118 | val = (GENERIC_INTERRUPT_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) | | 118 | val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) | |
119 | ((u64)cpu_physical_id(cpu) << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); | 119 | ((u64)cpu_physical_id(cpu) << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); |
120 | 120 | ||
121 | /* Set configuration */ | 121 | /* Set configuration */ |
@@ -123,7 +123,10 @@ static int uv_setup_intr(int cpu, u64 expires) | |||
123 | /* Initialize comparator value */ | 123 | /* Initialize comparator value */ |
124 | uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires); | 124 | uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires); |
125 | 125 | ||
126 | return (expires < uv_read_rtc(NULL) && !uv_intr_pending(pnode)); | 126 | if (uv_read_rtc(NULL) <= expires) |
127 | return 0; | ||
128 | |||
129 | return !uv_intr_pending(pnode); | ||
127 | } | 130 | } |
128 | 131 | ||
129 | /* | 132 | /* |
@@ -223,6 +226,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires) | |||
223 | 226 | ||
224 | next_cpu = head->next_cpu; | 227 | next_cpu = head->next_cpu; |
225 | *t = expires; | 228 | *t = expires; |
229 | |||
226 | /* Will this one be next to go off? */ | 230 | /* Will this one be next to go off? */ |
227 | if (next_cpu < 0 || bcpu == next_cpu || | 231 | if (next_cpu < 0 || bcpu == next_cpu || |
228 | expires < head->cpu[next_cpu].expires) { | 232 | expires < head->cpu[next_cpu].expires) { |
@@ -231,7 +235,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires) | |||
231 | *t = ULLONG_MAX; | 235 | *t = ULLONG_MAX; |
232 | uv_rtc_find_next_timer(head, pnode); | 236 | uv_rtc_find_next_timer(head, pnode); |
233 | spin_unlock_irqrestore(&head->lock, flags); | 237 | spin_unlock_irqrestore(&head->lock, flags); |
234 | return 1; | 238 | return -ETIME; |
235 | } | 239 | } |
236 | } | 240 | } |
237 | 241 | ||
@@ -244,7 +248,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires) | |||
244 | * | 248 | * |
245 | * Returns 1 if this timer was pending. | 249 | * Returns 1 if this timer was pending. |
246 | */ | 250 | */ |
247 | static int uv_rtc_unset_timer(int cpu) | 251 | static int uv_rtc_unset_timer(int cpu, int force) |
248 | { | 252 | { |
249 | int pnode = uv_cpu_to_pnode(cpu); | 253 | int pnode = uv_cpu_to_pnode(cpu); |
250 | int bid = uv_cpu_to_blade_id(cpu); | 254 | int bid = uv_cpu_to_blade_id(cpu); |
@@ -256,14 +260,15 @@ static int uv_rtc_unset_timer(int cpu) | |||
256 | 260 | ||
257 | spin_lock_irqsave(&head->lock, flags); | 261 | spin_lock_irqsave(&head->lock, flags); |
258 | 262 | ||
259 | if (head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) | 263 | if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) |
260 | rc = 1; | 264 | rc = 1; |
261 | 265 | ||
262 | *t = ULLONG_MAX; | 266 | if (rc) { |
263 | 267 | *t = ULLONG_MAX; | |
264 | /* Was the hardware setup for this timer? */ | 268 | /* Was the hardware setup for this timer? */ |
265 | if (head->next_cpu == bcpu) | 269 | if (head->next_cpu == bcpu) |
266 | uv_rtc_find_next_timer(head, pnode); | 270 | uv_rtc_find_next_timer(head, pnode); |
271 | } | ||
267 | 272 | ||
268 | spin_unlock_irqrestore(&head->lock, flags); | 273 | spin_unlock_irqrestore(&head->lock, flags); |
269 | 274 | ||
@@ -310,32 +315,32 @@ static void uv_rtc_timer_setup(enum clock_event_mode mode, | |||
310 | break; | 315 | break; |
311 | case CLOCK_EVT_MODE_UNUSED: | 316 | case CLOCK_EVT_MODE_UNUSED: |
312 | case CLOCK_EVT_MODE_SHUTDOWN: | 317 | case CLOCK_EVT_MODE_SHUTDOWN: |
313 | uv_rtc_unset_timer(ced_cpu); | 318 | uv_rtc_unset_timer(ced_cpu, 1); |
314 | break; | 319 | break; |
315 | } | 320 | } |
316 | } | 321 | } |
317 | 322 | ||
318 | static void uv_rtc_interrupt(void) | 323 | static void uv_rtc_interrupt(void) |
319 | { | 324 | { |
320 | struct clock_event_device *ced = &__get_cpu_var(cpu_ced); | ||
321 | int cpu = smp_processor_id(); | 325 | int cpu = smp_processor_id(); |
326 | struct clock_event_device *ced = &per_cpu(cpu_ced, cpu); | ||
322 | 327 | ||
323 | if (!ced || !ced->event_handler) | 328 | if (!ced || !ced->event_handler) |
324 | return; | 329 | return; |
325 | 330 | ||
326 | if (uv_rtc_unset_timer(cpu) != 1) | 331 | if (uv_rtc_unset_timer(cpu, 0) != 1) |
327 | return; | 332 | return; |
328 | 333 | ||
329 | ced->event_handler(ced); | 334 | ced->event_handler(ced); |
330 | } | 335 | } |
331 | 336 | ||
332 | static int __init uv_enable_rtc(char *str) | 337 | static int __init uv_enable_evt_rtc(char *str) |
333 | { | 338 | { |
334 | uv_rtc_enable = 1; | 339 | uv_rtc_evt_enable = 1; |
335 | 340 | ||
336 | return 1; | 341 | return 1; |
337 | } | 342 | } |
338 | __setup("uvrtc", uv_enable_rtc); | 343 | __setup("uvrtcevt", uv_enable_evt_rtc); |
339 | 344 | ||
340 | static __init void uv_rtc_register_clockevents(struct work_struct *dummy) | 345 | static __init void uv_rtc_register_clockevents(struct work_struct *dummy) |
341 | { | 346 | { |
@@ -350,27 +355,32 @@ static __init int uv_rtc_setup_clock(void) | |||
350 | { | 355 | { |
351 | int rc; | 356 | int rc; |
352 | 357 | ||
353 | if (!uv_rtc_enable || !is_uv_system() || generic_interrupt_extension) | 358 | if (!is_uv_system()) |
354 | return -ENODEV; | 359 | return -ENODEV; |
355 | 360 | ||
356 | generic_interrupt_extension = uv_rtc_interrupt; | ||
357 | |||
358 | clocksource_uv.mult = clocksource_hz2mult(sn_rtc_cycles_per_second, | 361 | clocksource_uv.mult = clocksource_hz2mult(sn_rtc_cycles_per_second, |
359 | clocksource_uv.shift); | 362 | clocksource_uv.shift); |
360 | 363 | ||
364 | /* If single blade, prefer tsc */ | ||
365 | if (uv_num_possible_blades() == 1) | ||
366 | clocksource_uv.rating = 250; | ||
367 | |||
361 | rc = clocksource_register(&clocksource_uv); | 368 | rc = clocksource_register(&clocksource_uv); |
362 | if (rc) { | 369 | if (rc) |
363 | generic_interrupt_extension = NULL; | 370 | printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc); |
371 | else | ||
372 | printk(KERN_INFO "UV RTC clocksource registered freq %lu MHz\n", | ||
373 | sn_rtc_cycles_per_second/(unsigned long)1E6); | ||
374 | |||
375 | if (rc || !uv_rtc_evt_enable || x86_platform_ipi_callback) | ||
364 | return rc; | 376 | return rc; |
365 | } | ||
366 | 377 | ||
367 | /* Setup and register clockevents */ | 378 | /* Setup and register clockevents */ |
368 | rc = uv_rtc_allocate_timers(); | 379 | rc = uv_rtc_allocate_timers(); |
369 | if (rc) { | 380 | if (rc) |
370 | clocksource_unregister(&clocksource_uv); | 381 | goto error; |
371 | generic_interrupt_extension = NULL; | 382 | |
372 | return rc; | 383 | x86_platform_ipi_callback = uv_rtc_interrupt; |
373 | } | ||
374 | 384 | ||
375 | clock_event_device_uv.mult = div_sc(sn_rtc_cycles_per_second, | 385 | clock_event_device_uv.mult = div_sc(sn_rtc_cycles_per_second, |
376 | NSEC_PER_SEC, clock_event_device_uv.shift); | 386 | NSEC_PER_SEC, clock_event_device_uv.shift); |
@@ -383,11 +393,19 @@ static __init int uv_rtc_setup_clock(void) | |||
383 | 393 | ||
384 | rc = schedule_on_each_cpu(uv_rtc_register_clockevents); | 394 | rc = schedule_on_each_cpu(uv_rtc_register_clockevents); |
385 | if (rc) { | 395 | if (rc) { |
386 | clocksource_unregister(&clocksource_uv); | 396 | x86_platform_ipi_callback = NULL; |
387 | generic_interrupt_extension = NULL; | ||
388 | uv_rtc_deallocate_timers(); | 397 | uv_rtc_deallocate_timers(); |
398 | goto error; | ||
389 | } | 399 | } |
390 | 400 | ||
401 | printk(KERN_INFO "UV RTC clockevents registered\n"); | ||
402 | |||
403 | return 0; | ||
404 | |||
405 | error: | ||
406 | clocksource_unregister(&clocksource_uv); | ||
407 | printk(KERN_INFO "UV RTC clockevents failed rc %d\n", rc); | ||
408 | |||
391 | return rc; | 409 | return rc; |
392 | } | 410 | } |
393 | arch_initcall(uv_rtc_setup_clock); | 411 | arch_initcall(uv_rtc_setup_clock); |
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index abda6f53e71e..34a279a7471d 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c | |||
@@ -197,7 +197,7 @@ static void __init MP_processor_info(struct mpc_cpu *m) | |||
197 | apic_version[m->apicid] = ver; | 197 | apic_version[m->apicid] = ver; |
198 | } | 198 | } |
199 | 199 | ||
200 | static void __init visws_find_smp_config(unsigned int reserve) | 200 | static void __init visws_find_smp_config(void) |
201 | { | 201 | { |
202 | struct mpc_cpu *mp = phys_to_virt(CO_CPU_TAB_PHYS); | 202 | struct mpc_cpu *mp = phys_to_virt(CO_CPU_TAB_PHYS); |
203 | unsigned short ncpus = readw(phys_to_virt(CO_CPU_NUM_PHYS)); | 203 | unsigned short ncpus = readw(phys_to_virt(CO_CPU_NUM_PHYS)); |
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index 611b9e2360d3..74c92bb194df 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c | |||
@@ -226,7 +226,7 @@ static void __devinit vmi_time_init_clockevent(void) | |||
226 | evt->min_delta_ns = clockevent_delta2ns(1, evt); | 226 | evt->min_delta_ns = clockevent_delta2ns(1, evt); |
227 | evt->cpumask = cpumask_of(cpu); | 227 | evt->cpumask = cpumask_of(cpu); |
228 | 228 | ||
229 | printk(KERN_WARNING "vmi: registering clock event %s. mult=%lu shift=%u\n", | 229 | printk(KERN_WARNING "vmi: registering clock event %s. mult=%u shift=%u\n", |
230 | evt->name, evt->mult, evt->shift); | 230 | evt->name, evt->mult, evt->shift); |
231 | clockevents_register_device(evt); | 231 | clockevents_register_device(evt); |
232 | } | 232 | } |
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 3c68fe2d46cf..f3f2104408d9 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
@@ -41,6 +41,32 @@ ENTRY(phys_startup_64) | |||
41 | jiffies_64 = jiffies; | 41 | jiffies_64 = jiffies; |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) | ||
45 | /* | ||
46 | * On 64-bit, align RODATA to 2MB so that even with CONFIG_DEBUG_RODATA | ||
47 | * we retain large page mappings for boundaries spanning kernel text, rodata | ||
48 | * and data sections. | ||
49 | * | ||
50 | * However, kernel identity mappings will have different RWX permissions | ||
51 | * to the pages mapping to text and to the pages padding (which are freed) the | ||
52 | * text section. Hence kernel identity mappings will be broken to smaller | ||
53 | * pages. For 64-bit, kernel text and kernel identity mappings are different, | ||
54 | * so we can enable protection checks that come with CONFIG_DEBUG_RODATA, | ||
55 | * as well as retain 2MB large page mappings for kernel text. | ||
56 | */ | ||
57 | #define X64_ALIGN_DEBUG_RODATA_BEGIN . = ALIGN(HPAGE_SIZE); | ||
58 | |||
59 | #define X64_ALIGN_DEBUG_RODATA_END \ | ||
60 | . = ALIGN(HPAGE_SIZE); \ | ||
61 | __end_rodata_hpage_align = .; | ||
62 | |||
63 | #else | ||
64 | |||
65 | #define X64_ALIGN_DEBUG_RODATA_BEGIN | ||
66 | #define X64_ALIGN_DEBUG_RODATA_END | ||
67 | |||
68 | #endif | ||
69 | |||
44 | PHDRS { | 70 | PHDRS { |
45 | text PT_LOAD FLAGS(5); /* R_E */ | 71 | text PT_LOAD FLAGS(5); /* R_E */ |
46 | data PT_LOAD FLAGS(7); /* RWE */ | 72 | data PT_LOAD FLAGS(7); /* RWE */ |
@@ -90,7 +116,9 @@ SECTIONS | |||
90 | 116 | ||
91 | EXCEPTION_TABLE(16) :text = 0x9090 | 117 | EXCEPTION_TABLE(16) :text = 0x9090 |
92 | 118 | ||
119 | X64_ALIGN_DEBUG_RODATA_BEGIN | ||
93 | RO_DATA(PAGE_SIZE) | 120 | RO_DATA(PAGE_SIZE) |
121 | X64_ALIGN_DEBUG_RODATA_END | ||
94 | 122 | ||
95 | /* Data */ | 123 | /* Data */ |
96 | .data : AT(ADDR(.data) - LOAD_OFFSET) { | 124 | .data : AT(ADDR(.data) - LOAD_OFFSET) { |
@@ -107,13 +135,13 @@ SECTIONS | |||
107 | 135 | ||
108 | PAGE_ALIGNED_DATA(PAGE_SIZE) | 136 | PAGE_ALIGNED_DATA(PAGE_SIZE) |
109 | 137 | ||
110 | CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES) | 138 | CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) |
111 | 139 | ||
112 | DATA_DATA | 140 | DATA_DATA |
113 | CONSTRUCTORS | 141 | CONSTRUCTORS |
114 | 142 | ||
115 | /* rarely changed data like cpu maps */ | 143 | /* rarely changed data like cpu maps */ |
116 | READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES) | 144 | READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES) |
117 | 145 | ||
118 | /* End of data section */ | 146 | /* End of data section */ |
119 | _edata = .; | 147 | _edata = .; |
@@ -137,12 +165,12 @@ SECTIONS | |||
137 | *(.vsyscall_0) | 165 | *(.vsyscall_0) |
138 | } :user | 166 | } :user |
139 | 167 | ||
140 | . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); | 168 | . = ALIGN(L1_CACHE_BYTES); |
141 | .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) { | 169 | .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) { |
142 | *(.vsyscall_fn) | 170 | *(.vsyscall_fn) |
143 | } | 171 | } |
144 | 172 | ||
145 | . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); | 173 | . = ALIGN(L1_CACHE_BYTES); |
146 | .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) { | 174 | .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) { |
147 | *(.vsyscall_gtod_data) | 175 | *(.vsyscall_gtod_data) |
148 | } | 176 | } |
@@ -166,7 +194,7 @@ SECTIONS | |||
166 | } | 194 | } |
167 | vgetcpu_mode = VVIRT(.vgetcpu_mode); | 195 | vgetcpu_mode = VVIRT(.vgetcpu_mode); |
168 | 196 | ||
169 | . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); | 197 | . = ALIGN(L1_CACHE_BYTES); |
170 | .jiffies : AT(VLOAD(.jiffies)) { | 198 | .jiffies : AT(VLOAD(.jiffies)) { |
171 | *(.jiffies) | 199 | *(.jiffies) |
172 | } | 200 | } |
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 8cb4974ff599..9055e5872ff0 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -73,7 +73,8 @@ void update_vsyscall_tz(void) | |||
73 | write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); | 73 | write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); |
74 | } | 74 | } |
75 | 75 | ||
76 | void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) | 76 | void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, |
77 | u32 mult) | ||
77 | { | 78 | { |
78 | unsigned long flags; | 79 | unsigned long flags; |
79 | 80 | ||
@@ -82,7 +83,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) | |||
82 | vsyscall_gtod_data.clock.vread = clock->vread; | 83 | vsyscall_gtod_data.clock.vread = clock->vread; |
83 | vsyscall_gtod_data.clock.cycle_last = clock->cycle_last; | 84 | vsyscall_gtod_data.clock.cycle_last = clock->cycle_last; |
84 | vsyscall_gtod_data.clock.mask = clock->mask; | 85 | vsyscall_gtod_data.clock.mask = clock->mask; |
85 | vsyscall_gtod_data.clock.mult = clock->mult; | 86 | vsyscall_gtod_data.clock.mult = mult; |
86 | vsyscall_gtod_data.clock.shift = clock->shift; | 87 | vsyscall_gtod_data.clock.shift = clock->shift; |
87 | vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; | 88 | vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; |
88 | vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; | 89 | vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; |
@@ -237,7 +238,7 @@ static ctl_table kernel_table2[] = { | |||
237 | }; | 238 | }; |
238 | 239 | ||
239 | static ctl_table kernel_root_table2[] = { | 240 | static ctl_table kernel_root_table2[] = { |
240 | { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555, | 241 | { .procname = "kernel", .mode = 0555, |
241 | .child = kernel_table2 }, | 242 | .child = kernel_table2 }, |
242 | {} | 243 | {} |
243 | }; | 244 | }; |
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index d11c5ff7c65e..ccd179dec36e 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <asm/e820.h> | 13 | #include <asm/e820.h> |
14 | #include <asm/time.h> | 14 | #include <asm/time.h> |
15 | #include <asm/irq.h> | 15 | #include <asm/irq.h> |
16 | #include <asm/pat.h> | ||
16 | #include <asm/tsc.h> | 17 | #include <asm/tsc.h> |
17 | #include <asm/iommu.h> | 18 | #include <asm/iommu.h> |
18 | 19 | ||
@@ -80,4 +81,5 @@ struct x86_platform_ops x86_platform = { | |||
80 | .get_wallclock = mach_get_cmos_time, | 81 | .get_wallclock = mach_get_cmos_time, |
81 | .set_wallclock = mach_set_rtc_mmss, | 82 | .set_wallclock = mach_set_rtc_mmss, |
82 | .iommu_shutdown = iommu_shutdown_noop, | 83 | .iommu_shutdown = iommu_shutdown_noop, |
84 | .is_untracked_pat_range = is_ISA_range, | ||
83 | }; | 85 | }; |