aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
authorAndi Kleen <andi@basil.nowhere.org>2006-11-21 04:22:09 -0500
committerAndi Kleen <andi@basil.nowhere.org>2006-11-21 04:22:09 -0500
commit1b7f6a626f0ff511c3840678466cbfe1d62c0b29 (patch)
tree415e8c838c0067bff384afb8a2c91e5f7c6d11d3 /arch/x86_64/kernel
parentb3edc9cec07ade41aaf1804f7c9e876afa90c862 (diff)
parent3f5a6ca31c334011fd929501a078424c0d3f71be (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/e820.c4
-rw-r--r--arch/x86_64/kernel/early-quirks.c8
-rw-r--r--arch/x86_64/kernel/io_apic.c169
-rw-r--r--arch/x86_64/kernel/process.c7
-rw-r--r--arch/x86_64/kernel/smp.c3
-rw-r--r--arch/x86_64/kernel/smpboot.c7
-rw-r--r--arch/x86_64/kernel/time.c11
-rw-r--r--arch/x86_64/kernel/traps.c6
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S8
-rw-r--r--arch/x86_64/kernel/vsyscall.c47
10 files changed, 187 insertions, 83 deletions
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index a75c829c2b02..6fe191c58084 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -278,7 +278,7 @@ e820_register_active_regions(int nid, unsigned long start_pfn,
278 >> PAGE_SHIFT; 278 >> PAGE_SHIFT;
279 279
280 /* Skip map entries smaller than a page */ 280 /* Skip map entries smaller than a page */
281 if (ei_startpfn > ei_endpfn) 281 if (ei_startpfn >= ei_endpfn)
282 continue; 282 continue;
283 283
284 /* Check if end_pfn_map should be updated */ 284 /* Check if end_pfn_map should be updated */
@@ -594,7 +594,9 @@ static int __init parse_memmap_opt(char *p)
594 * size before original memory map is 594 * size before original memory map is
595 * reset. 595 * reset.
596 */ 596 */
597 e820_register_active_regions(0, 0, -1UL);
597 saved_max_pfn = e820_end_of_ram(); 598 saved_max_pfn = e820_end_of_ram();
599 remove_all_active_ranges();
598#endif 600#endif
599 end_pfn_map = 0; 601 end_pfn_map = 0;
600 e820.nr_map = 0; 602 e820.nr_map = 0;
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c
index 2b1245d86258..68273bff58cc 100644
--- a/arch/x86_64/kernel/early-quirks.c
+++ b/arch/x86_64/kernel/early-quirks.c
@@ -45,7 +45,13 @@ static void nvidia_bugs(void)
45 /* 45 /*
46 * All timer overrides on Nvidia are 46 * All timer overrides on Nvidia are
47 * wrong unless HPET is enabled. 47 * wrong unless HPET is enabled.
48 * Unfortunately that's not true on many Asus boards.
49 * We don't know yet how to detect this automatically, but
50 * at least allow a command line override.
48 */ 51 */
52 if (acpi_use_timer_override)
53 return;
54
49 nvidia_hpet_detected = 0; 55 nvidia_hpet_detected = 0;
50 acpi_table_parse(ACPI_HPET, nvidia_hpet_check); 56 acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
51 if (nvidia_hpet_detected == 0) { 57 if (nvidia_hpet_detected == 0) {
@@ -53,6 +59,8 @@ static void nvidia_bugs(void)
53 printk(KERN_INFO "Nvidia board " 59 printk(KERN_INFO "Nvidia board "
54 "detected. Ignoring ACPI " 60 "detected. Ignoring ACPI "
55 "timer override.\n"); 61 "timer override.\n");
62 printk(KERN_INFO "If you got timer trouble "
63 "try acpi_use_timer_override\n");
56 } 64 }
57#endif 65#endif
58 /* RED-PEN skip them on mptables too? */ 66 /* RED-PEN skip them on mptables too? */
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index b000017e4b5d..14654e682411 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -63,7 +63,7 @@ int timer_over_8254 __initdata = 1;
63static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 63static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
64 64
65static DEFINE_SPINLOCK(ioapic_lock); 65static DEFINE_SPINLOCK(ioapic_lock);
66static DEFINE_SPINLOCK(vector_lock); 66DEFINE_SPINLOCK(vector_lock);
67 67
68/* 68/*
69 * # of IRQ routing registers 69 * # of IRQ routing registers
@@ -88,6 +88,52 @@ static struct irq_pin_list {
88 short apic, pin, next; 88 short apic, pin, next;
89} irq_2_pin[PIN_MAP_SIZE]; 89} irq_2_pin[PIN_MAP_SIZE];
90 90
91struct io_apic {
92 unsigned int index;
93 unsigned int unused[3];
94 unsigned int data;
95};
96
97static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
98{
99 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
100 + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK);
101}
102
103static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
104{
105 struct io_apic __iomem *io_apic = io_apic_base(apic);
106 writel(reg, &io_apic->index);
107 return readl(&io_apic->data);
108}
109
110static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
111{
112 struct io_apic __iomem *io_apic = io_apic_base(apic);
113 writel(reg, &io_apic->index);
114 writel(value, &io_apic->data);
115}
116
117/*
118 * Re-write a value: to be used for read-modify-write
119 * cycles where the read already set up the index register.
120 */
121static inline void io_apic_modify(unsigned int apic, unsigned int value)
122{
123 struct io_apic __iomem *io_apic = io_apic_base(apic);
124 writel(value, &io_apic->data);
125}
126
127/*
128 * Synchronize the IO-APIC and the CPU by doing
129 * a dummy read from the IO-APIC
130 */
131static inline void io_apic_sync(unsigned int apic)
132{
133 struct io_apic __iomem *io_apic = io_apic_base(apic);
134 readl(&io_apic->data);
135}
136
91#define __DO_ACTION(R, ACTION, FINAL) \ 137#define __DO_ACTION(R, ACTION, FINAL) \
92 \ 138 \
93{ \ 139{ \
@@ -126,12 +172,34 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
126 return eu.entry; 172 return eu.entry;
127} 173}
128 174
175/*
176 * When we write a new IO APIC routing entry, we need to write the high
177 * word first! If the mask bit in the low word is clear, we will enable
178 * the interrupt, and we need to make sure the entry is fully populated
179 * before that happens.
180 */
129static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 181static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
130{ 182{
131 unsigned long flags; 183 unsigned long flags;
132 union entry_union eu; 184 union entry_union eu;
133 eu.entry = e; 185 eu.entry = e;
134 spin_lock_irqsave(&ioapic_lock, flags); 186 spin_lock_irqsave(&ioapic_lock, flags);
187 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
188 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
189 spin_unlock_irqrestore(&ioapic_lock, flags);
190}
191
192/*
193 * When we mask an IO APIC routing entry, we need to write the low
194 * word first, in order to set the mask bit before we change the
195 * high bits!
196 */
197static void ioapic_mask_entry(int apic, int pin)
198{
199 unsigned long flags;
200 union entry_union eu = { .entry.mask = 1 };
201
202 spin_lock_irqsave(&ioapic_lock, flags);
135 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 203 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
136 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 204 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
137 spin_unlock_irqrestore(&ioapic_lock, flags); 205 spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -256,9 +324,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
256 /* 324 /*
257 * Disable it in the IO-APIC irq-routing table: 325 * Disable it in the IO-APIC irq-routing table:
258 */ 326 */
259 memset(&entry, 0, sizeof(entry)); 327 ioapic_mask_entry(apic, pin);
260 entry.mask = 1;
261 ioapic_write_entry(apic, pin, entry);
262} 328}
263 329
264static void clear_IO_APIC (void) 330static void clear_IO_APIC (void)
@@ -612,15 +678,15 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
612 * Also, we've got to be careful not to trash gate 678 * Also, we've got to be careful not to trash gate
613 * 0x80, because int 0x80 is hm, kind of importantish. ;) 679 * 0x80, because int 0x80 is hm, kind of importantish. ;)
614 */ 680 */
615 static struct { 681 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
616 int vector;
617 int offset;
618 } pos[NR_CPUS] = { [ 0 ... NR_CPUS - 1] = {FIRST_DEVICE_VECTOR, 0} };
619 int old_vector = -1; 682 int old_vector = -1;
620 int cpu; 683 int cpu;
621 684
622 BUG_ON((unsigned)irq >= NR_IRQ_VECTORS); 685 BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
623 686
687 /* Only try and allocate irqs on cpus that are present */
688 cpus_and(mask, mask, cpu_online_map);
689
624 if (irq_vector[irq] > 0) 690 if (irq_vector[irq] > 0)
625 old_vector = irq_vector[irq]; 691 old_vector = irq_vector[irq];
626 if (old_vector > 0) { 692 if (old_vector > 0) {
@@ -630,15 +696,15 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
630 } 696 }
631 697
632 for_each_cpu_mask(cpu, mask) { 698 for_each_cpu_mask(cpu, mask) {
633 cpumask_t domain; 699 cpumask_t domain, new_mask;
634 int first, new_cpu; 700 int new_cpu;
635 int vector, offset; 701 int vector, offset;
636 702
637 domain = vector_allocation_domain(cpu); 703 domain = vector_allocation_domain(cpu);
638 first = first_cpu(domain); 704 cpus_and(new_mask, domain, cpu_online_map);
639 705
640 vector = pos[first].vector; 706 vector = current_vector;
641 offset = pos[first].offset; 707 offset = current_offset;
642next: 708next:
643 vector += 8; 709 vector += 8;
644 if (vector >= FIRST_SYSTEM_VECTOR) { 710 if (vector >= FIRST_SYSTEM_VECTOR) {
@@ -646,24 +712,24 @@ next:
646 offset = (offset + 1) % 8; 712 offset = (offset + 1) % 8;
647 vector = FIRST_DEVICE_VECTOR + offset; 713 vector = FIRST_DEVICE_VECTOR + offset;
648 } 714 }
649 if (unlikely(pos[first].vector == vector)) 715 if (unlikely(current_vector == vector))
650 continue; 716 continue;
651 if (vector == IA32_SYSCALL_VECTOR) 717 if (vector == IA32_SYSCALL_VECTOR)
652 goto next; 718 goto next;
653 for_each_cpu_mask(new_cpu, domain) 719 for_each_cpu_mask(new_cpu, new_mask)
654 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 720 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
655 goto next; 721 goto next;
656 /* Found one! */ 722 /* Found one! */
657 for_each_cpu_mask(new_cpu, domain) { 723 current_vector = vector;
658 pos[new_cpu].vector = vector; 724 current_offset = offset;
659 pos[new_cpu].offset = offset;
660 }
661 if (old_vector >= 0) { 725 if (old_vector >= 0) {
726 cpumask_t old_mask;
662 int old_cpu; 727 int old_cpu;
663 for_each_cpu_mask(old_cpu, irq_domain[irq]) 728 cpus_and(old_mask, irq_domain[irq], cpu_online_map);
729 for_each_cpu_mask(old_cpu, old_mask)
664 per_cpu(vector_irq, old_cpu)[old_vector] = -1; 730 per_cpu(vector_irq, old_cpu)[old_vector] = -1;
665 } 731 }
666 for_each_cpu_mask(new_cpu, domain) 732 for_each_cpu_mask(new_cpu, new_mask)
667 per_cpu(vector_irq, new_cpu)[vector] = irq; 733 per_cpu(vector_irq, new_cpu)[vector] = irq;
668 irq_vector[irq] = vector; 734 irq_vector[irq] = vector;
669 irq_domain[irq] = domain; 735 irq_domain[irq] = domain;
@@ -684,6 +750,32 @@ static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
684 return vector; 750 return vector;
685} 751}
686 752
753void __setup_vector_irq(int cpu)
754{
755 /* Initialize vector_irq on a new cpu */
756 /* This function must be called with vector_lock held */
757 unsigned long flags;
758 int irq, vector;
759
760
761 /* Mark the inuse vectors */
762 for (irq = 0; irq < NR_IRQ_VECTORS; ++irq) {
763 if (!cpu_isset(cpu, irq_domain[irq]))
764 continue;
765 vector = irq_vector[irq];
766 per_cpu(vector_irq, cpu)[vector] = irq;
767 }
768 /* Mark the free vectors */
769 for (vector = 0; vector < NR_VECTORS; ++vector) {
770 irq = per_cpu(vector_irq, cpu)[vector];
771 if (irq < 0)
772 continue;
773 if (!cpu_isset(cpu, irq_domain[irq]))
774 per_cpu(vector_irq, cpu)[vector] = -1;
775 }
776}
777
778
687extern void (*interrupt[NR_IRQS])(void); 779extern void (*interrupt[NR_IRQS])(void);
688 780
689static struct irq_chip ioapic_chip; 781static struct irq_chip ioapic_chip;
@@ -698,9 +790,11 @@ static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
698 trigger == IOAPIC_LEVEL) 790 trigger == IOAPIC_LEVEL)
699 set_irq_chip_and_handler_name(irq, &ioapic_chip, 791 set_irq_chip_and_handler_name(irq, &ioapic_chip,
700 handle_fasteoi_irq, "fasteoi"); 792 handle_fasteoi_irq, "fasteoi");
701 else 793 else {
794 irq_desc[irq].status |= IRQ_DELAYED_DISABLE;
702 set_irq_chip_and_handler_name(irq, &ioapic_chip, 795 set_irq_chip_and_handler_name(irq, &ioapic_chip,
703 handle_edge_irq, "edge"); 796 handle_edge_irq, "edge");
797 }
704} 798}
705 799
706static void __init setup_IO_APIC_irqs(void) 800static void __init setup_IO_APIC_irqs(void)
@@ -1863,18 +1957,16 @@ void arch_teardown_msi_irq(unsigned int irq)
1863 1957
1864static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) 1958static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
1865{ 1959{
1866 u32 low, high; 1960 struct ht_irq_msg msg;
1867 low = read_ht_irq_low(irq); 1961 fetch_ht_irq_msg(irq, &msg);
1868 high = read_ht_irq_high(irq);
1869 1962
1870 low &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK); 1963 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
1871 high &= ~(HT_IRQ_HIGH_DEST_ID_MASK); 1964 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
1872 1965
1873 low |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest); 1966 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
1874 high |= HT_IRQ_HIGH_DEST_ID(dest); 1967 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
1875 1968
1876 write_ht_irq_low(irq, low); 1969 write_ht_irq_msg(irq, &msg);
1877 write_ht_irq_high(irq, high);
1878} 1970}
1879 1971
1880static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) 1972static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
@@ -1895,7 +1987,7 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
1895 1987
1896 dest = cpu_mask_to_apicid(tmp); 1988 dest = cpu_mask_to_apicid(tmp);
1897 1989
1898 target_ht_irq(irq, dest, vector & 0xff); 1990 target_ht_irq(irq, dest, vector);
1899 set_native_irq_info(irq, mask); 1991 set_native_irq_info(irq, mask);
1900} 1992}
1901#endif 1993#endif
@@ -1918,14 +2010,15 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
1918 2010
1919 vector = assign_irq_vector(irq, TARGET_CPUS, &tmp); 2011 vector = assign_irq_vector(irq, TARGET_CPUS, &tmp);
1920 if (vector >= 0) { 2012 if (vector >= 0) {
1921 u32 low, high; 2013 struct ht_irq_msg msg;
1922 unsigned dest; 2014 unsigned dest;
1923 2015
1924 dest = cpu_mask_to_apicid(tmp); 2016 dest = cpu_mask_to_apicid(tmp);
1925 2017
1926 high = HT_IRQ_HIGH_DEST_ID(dest); 2018 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
1927 2019
1928 low = HT_IRQ_LOW_BASE | 2020 msg.address_lo =
2021 HT_IRQ_LOW_BASE |
1929 HT_IRQ_LOW_DEST_ID(dest) | 2022 HT_IRQ_LOW_DEST_ID(dest) |
1930 HT_IRQ_LOW_VECTOR(vector) | 2023 HT_IRQ_LOW_VECTOR(vector) |
1931 ((INT_DEST_MODE == 0) ? 2024 ((INT_DEST_MODE == 0) ?
@@ -1934,10 +2027,10 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
1934 HT_IRQ_LOW_RQEOI_EDGE | 2027 HT_IRQ_LOW_RQEOI_EDGE |
1935 ((INT_DELIVERY_MODE != dest_LowestPrio) ? 2028 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
1936 HT_IRQ_LOW_MT_FIXED : 2029 HT_IRQ_LOW_MT_FIXED :
1937 HT_IRQ_LOW_MT_ARBITRATED); 2030 HT_IRQ_LOW_MT_ARBITRATED) |
2031 HT_IRQ_LOW_IRQ_MASKED;
1938 2032
1939 write_ht_irq_low(irq, low); 2033 write_ht_irq_msg(irq, &msg);
1940 write_ht_irq_high(irq, high);
1941 2034
1942 set_irq_chip_and_handler_name(irq, &ht_irq_chip, 2035 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
1943 handle_edge_irq, "edge"); 2036 handle_edge_irq, "edge");
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 49f7fac6229e..7451a4c43c16 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -88,9 +88,8 @@ void enter_idle(void)
88 88
89static void __exit_idle(void) 89static void __exit_idle(void)
90{ 90{
91 if (read_pda(isidle) == 0) 91 if (test_and_clear_bit_pda(0, isidle) == 0)
92 return; 92 return;
93 write_pda(isidle, 0);
94 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); 93 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
95} 94}
96 95
@@ -145,7 +144,7 @@ static void poll_idle (void)
145void cpu_idle_wait(void) 144void cpu_idle_wait(void)
146{ 145{
147 unsigned int cpu, this_cpu = get_cpu(); 146 unsigned int cpu, this_cpu = get_cpu();
148 cpumask_t map; 147 cpumask_t map, tmp = current->cpus_allowed;
149 148
150 set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); 149 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
151 put_cpu(); 150 put_cpu();
@@ -168,6 +167,8 @@ void cpu_idle_wait(void)
168 } 167 }
169 cpus_and(map, map, cpu_online_map); 168 cpus_and(map, map, cpu_online_map);
170 } while (!cpus_empty(map)); 169 } while (!cpus_empty(map));
170
171 set_cpus_allowed(current, tmp);
171} 172}
172EXPORT_SYMBOL_GPL(cpu_idle_wait); 173EXPORT_SYMBOL_GPL(cpu_idle_wait);
173 174
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 4f67697f5036..9f74c883568c 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -376,9 +376,8 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
376 /* prevent preemption and reschedule on another processor */ 376 /* prevent preemption and reschedule on another processor */
377 int me = get_cpu(); 377 int me = get_cpu();
378 if (cpu == me) { 378 if (cpu == me) {
379 WARN_ON(1);
380 put_cpu(); 379 put_cpu();
381 return -EBUSY; 380 return 0;
382 } 381 }
383 spin_lock_bh(&call_lock); 382 spin_lock_bh(&call_lock);
384 __smp_call_function_single(cpu, func, info, nonatomic, wait); 383 __smp_call_function_single(cpu, func, info, nonatomic, wait);
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 7b7a6870288a..62c2e747af58 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -581,12 +581,16 @@ void __cpuinit start_secondary(void)
581 * smp_call_function(). 581 * smp_call_function().
582 */ 582 */
583 lock_ipi_call_lock(); 583 lock_ipi_call_lock();
584 spin_lock(&vector_lock);
584 585
586 /* Setup the per cpu irq handling data structures */
587 __setup_vector_irq(smp_processor_id());
585 /* 588 /*
586 * Allow the master to continue. 589 * Allow the master to continue.
587 */ 590 */
588 cpu_set(smp_processor_id(), cpu_online_map); 591 cpu_set(smp_processor_id(), cpu_online_map);
589 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 592 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
593 spin_unlock(&vector_lock);
590 unlock_ipi_call_lock(); 594 unlock_ipi_call_lock();
591 595
592 cpu_idle(); 596 cpu_idle();
@@ -799,7 +803,6 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
799 cpu, node); 803 cpu, node);
800 } 804 }
801 805
802
803 alternatives_smp_switch(1); 806 alternatives_smp_switch(1);
804 807
805 c_idle.idle = get_idle_for_cpu(cpu); 808 c_idle.idle = get_idle_for_cpu(cpu);
@@ -1246,8 +1249,10 @@ int __cpu_disable(void)
1246 local_irq_disable(); 1249 local_irq_disable();
1247 remove_siblinginfo(cpu); 1250 remove_siblinginfo(cpu);
1248 1251
1252 spin_lock(&vector_lock);
1249 /* It's now safe to remove this processor from the online map */ 1253 /* It's now safe to remove this processor from the online map */
1250 cpu_clear(cpu, cpu_online_map); 1254 cpu_clear(cpu, cpu_online_map);
1255 spin_unlock(&vector_lock);
1251 remove_cpu_from_maps(); 1256 remove_cpu_from_maps();
1252 fixup_irqs(cpu_online_map); 1257 fixup_irqs(cpu_online_map);
1253 return 0; 1258 return 0;
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 88722f11ca13..e3ef544d2cfb 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -876,15 +876,6 @@ static struct irqaction irq0 = {
876 timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL 876 timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL
877}; 877};
878 878
879static int __cpuinit
880time_cpu_notifier(struct notifier_block *nb, unsigned long action, void *hcpu)
881{
882 unsigned cpu = (unsigned long) hcpu;
883 if (action == CPU_ONLINE)
884 vsyscall_set_cpu(cpu);
885 return NOTIFY_DONE;
886}
887
888void __init time_init(void) 879void __init time_init(void)
889{ 880{
890 if (nohpet) 881 if (nohpet)
@@ -925,8 +916,6 @@ void __init time_init(void)
925 vxtime.last_tsc = get_cycles_sync(); 916 vxtime.last_tsc = get_cycles_sync();
926 set_cyc2ns_scale(cpu_khz); 917 set_cyc2ns_scale(cpu_khz);
927 setup_irq(0, &irq0); 918 setup_irq(0, &irq0);
928 hotcpu_notifier(time_cpu_notifier, 0);
929 time_cpu_notifier(NULL, CPU_ONLINE, (void *)(long)smp_processor_id());
930 919
931#ifndef CONFIG_SMP 920#ifndef CONFIG_SMP
932 time_init_gtod(); 921 time_init_gtod();
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 7819022a8db5..a153d0a01b72 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -290,6 +290,12 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
290 if (tsk && tsk != current) 290 if (tsk && tsk != current)
291 stack = (unsigned long *)tsk->thread.rsp; 291 stack = (unsigned long *)tsk->thread.rsp;
292 } 292 }
293 /*
294 * Align the stack pointer on word boundary, later loops
295 * rely on that (and corruption / debug info bugs can cause
296 * unaligned values here):
297 */
298 stack = (unsigned long *)((unsigned long)stack & ~(sizeof(long)-1));
293 299
294 /* 300 /*
295 * Print function call entries within a stack. 'cond' is the 301 * Print function call entries within a stack. 'cond' is the
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index 1283614c9b24..edb24aa714b4 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -175,13 +175,7 @@ SECTIONS
175 __setup_end = .; 175 __setup_end = .;
176 __initcall_start = .; 176 __initcall_start = .;
177 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { 177 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
178 *(.initcall1.init) 178 INITCALLS
179 *(.initcall2.init)
180 *(.initcall3.init)
181 *(.initcall4.init)
182 *(.initcall5.init)
183 *(.initcall6.init)
184 *(.initcall7.init)
185 } 179 }
186 __initcall_end = .; 180 __initcall_end = .;
187 __con_initcall_start = .; 181 __con_initcall_start = .;
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index a98b460af6a1..92546c1526f1 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -27,6 +27,9 @@
27#include <linux/jiffies.h> 27#include <linux/jiffies.h>
28#include <linux/sysctl.h> 28#include <linux/sysctl.h>
29#include <linux/getcpu.h> 29#include <linux/getcpu.h>
30#include <linux/cpu.h>
31#include <linux/smp.h>
32#include <linux/notifier.h>
30 33
31#include <asm/vsyscall.h> 34#include <asm/vsyscall.h>
32#include <asm/pgtable.h> 35#include <asm/pgtable.h>
@@ -243,32 +246,17 @@ static ctl_table kernel_root_table2[] = {
243 246
244#endif 247#endif
245 248
246static void __cpuinit write_rdtscp_cb(void *info) 249/* Assume __initcall executes before all user space. Hopefully kmod
247{ 250 doesn't violate that. We'll find out if it does. */
248 write_rdtscp_aux((unsigned long)info); 251static void __cpuinit vsyscall_set_cpu(int cpu)
249}
250
251void __cpuinit vsyscall_set_cpu(int cpu)
252{ 252{
253 unsigned long *d; 253 unsigned long *d;
254 unsigned long node = 0; 254 unsigned long node = 0;
255#ifdef CONFIG_NUMA 255#ifdef CONFIG_NUMA
256 node = cpu_to_node[cpu]; 256 node = cpu_to_node[cpu];
257#endif 257#endif
258 if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) { 258 if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP))
259 void *info = (void *)((node << 12) | cpu); 259 write_rdtscp_aux((node << 12) | cpu);
260 /* Can happen on preemptive kernel */
261 if (get_cpu() == cpu)
262 write_rdtscp_cb(info);
263#ifdef CONFIG_SMP
264 else {
265 /* the notifier is unfortunately not executed on the
266 target CPU */
267 smp_call_function_single(cpu,write_rdtscp_cb,info,0,1);
268 }
269#endif
270 put_cpu();
271 }
272 260
273 /* Store cpu number in limit so that it can be loaded quickly 261 /* Store cpu number in limit so that it can be loaded quickly
274 in user space in vgetcpu. 262 in user space in vgetcpu.
@@ -280,6 +268,23 @@ void __cpuinit vsyscall_set_cpu(int cpu)
280 *d |= (node >> 4) << 48; 268 *d |= (node >> 4) << 48;
281} 269}
282 270
271static void __cpuinit cpu_vsyscall_init(void *arg)
272{
273 /* preemption should be already off */
274 vsyscall_set_cpu(raw_smp_processor_id());
275}
276
277#ifdef CONFIG_HOTPLUG_CPU
278static int __cpuinit
279cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
280{
281 long cpu = (long)arg;
282 if (action == CPU_ONLINE)
283 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
284 return NOTIFY_DONE;
285}
286#endif
287
283static void __init map_vsyscall(void) 288static void __init map_vsyscall(void)
284{ 289{
285 extern char __vsyscall_0; 290 extern char __vsyscall_0;
@@ -299,6 +304,8 @@ static int __init vsyscall_init(void)
299#ifdef CONFIG_SYSCTL 304#ifdef CONFIG_SYSCTL
300 register_sysctl_table(kernel_root_table2, 0); 305 register_sysctl_table(kernel_root_table2, 0);
301#endif 306#endif
307 on_each_cpu(cpu_vsyscall_init, NULL, 0, 1);
308 hotcpu_notifier(cpu_vsyscall_notifier, 0);
302 return 0; 309 return 0;
303} 310}
304 311