aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-22 06:44:16 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-22 06:44:20 -0400
commit3568b71d46bea87da1936902b6fbb2a3b1154b3d (patch)
tree621ba4ccef15b87ce16f0fc2cc1e9b2a3f565ffc /arch/x86
parent2a3313f494c2f3f74a27d66f0f14b38558b7dba2 (diff)
parent091069740304c979f957ceacec39c461d0192158 (diff)
Merge commit 'v2.6.30-rc3' into x86/urgent
Merge reason: hpet.c changed upstream, make sure we test against that Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/ia32/sys_ia32.c19
-rw-r--r--arch/x86/include/asm/desc.h2
-rw-r--r--arch/x86/include/asm/hardirq.h2
-rw-r--r--arch/x86/include/asm/processor.h6
-rw-r--r--arch/x86/include/asm/tlbflush.h2
-rw-r--r--arch/x86/kernel/hpet.c6
-rw-r--r--arch/x86/kernel/i8253.c2
-rw-r--r--arch/x86/kernel/kvmclock.c7
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/xen/time.c7
12 files changed, 30 insertions, 29 deletions
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index efac92fd1efb..085a8c35f149 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -129,21 +129,12 @@ asmlinkage long sys32_fstatat(unsigned int dfd, char __user *filename,
129 struct stat64 __user *statbuf, int flag) 129 struct stat64 __user *statbuf, int flag)
130{ 130{
131 struct kstat stat; 131 struct kstat stat;
132 int error = -EINVAL; 132 int error;
133 133
134 if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0) 134 error = vfs_fstatat(dfd, filename, &stat, flag);
135 goto out; 135 if (error)
136 136 return error;
137 if (flag & AT_SYMLINK_NOFOLLOW) 137 return cp_stat64(statbuf, &stat);
138 error = vfs_lstat_fd(dfd, filename, &stat);
139 else
140 error = vfs_stat_fd(dfd, filename, &stat);
141
142 if (!error)
143 error = cp_stat64(statbuf, &stat);
144
145out:
146 return error;
147} 138}
148 139
149/* 140/*
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 5623c50d67b2..c45f415ce315 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -37,7 +37,7 @@ extern gate_desc idt_table[];
37struct gdt_page { 37struct gdt_page {
38 struct desc_struct gdt[GDT_ENTRIES]; 38 struct desc_struct gdt[GDT_ENTRIES];
39} __attribute__((aligned(PAGE_SIZE))); 39} __attribute__((aligned(PAGE_SIZE)));
40DECLARE_PER_CPU(struct gdt_page, gdt_page); 40DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
41 41
42static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) 42static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
43{ 43{
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 039db6aa8e02..37555e52f980 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -26,7 +26,7 @@ typedef struct {
26#endif 26#endif
27} ____cacheline_aligned irq_cpustat_t; 27} ____cacheline_aligned irq_cpustat_t;
28 28
29DECLARE_PER_CPU(irq_cpustat_t, irq_stat); 29DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
30 30
31/* We can have at most NR_VECTORS irqs routed to a cpu at a time */ 31/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
32#define MAX_HARDIRQS_PER_CPU NR_VECTORS 32#define MAX_HARDIRQS_PER_CPU NR_VECTORS
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index fcf4d92e7e04..c2cceae709c8 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -138,7 +138,7 @@ extern struct tss_struct doublefault_tss;
138extern __u32 cleared_cpu_caps[NCAPINTS]; 138extern __u32 cleared_cpu_caps[NCAPINTS];
139 139
140#ifdef CONFIG_SMP 140#ifdef CONFIG_SMP
141DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); 141DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
142#define cpu_data(cpu) per_cpu(cpu_info, cpu) 142#define cpu_data(cpu) per_cpu(cpu_info, cpu)
143#define current_cpu_data __get_cpu_var(cpu_info) 143#define current_cpu_data __get_cpu_var(cpu_info)
144#else 144#else
@@ -270,7 +270,7 @@ struct tss_struct {
270 270
271} ____cacheline_aligned; 271} ____cacheline_aligned;
272 272
273DECLARE_PER_CPU(struct tss_struct, init_tss); 273DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
274 274
275/* 275/*
276 * Save the original ist values for checking stack pointers during debugging 276 * Save the original ist values for checking stack pointers during debugging
@@ -393,7 +393,7 @@ union irq_stack_union {
393 }; 393 };
394}; 394};
395 395
396DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); 396DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union);
397DECLARE_INIT_PER_CPU(irq_stack_union); 397DECLARE_INIT_PER_CPU(irq_stack_union);
398 398
399DECLARE_PER_CPU(char *, irq_stack_ptr); 399DECLARE_PER_CPU(char *, irq_stack_ptr);
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index d3539f998f88..16a5c84b0329 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -152,7 +152,7 @@ struct tlb_state {
152 struct mm_struct *active_mm; 152 struct mm_struct *active_mm;
153 int state; 153 int state;
154}; 154};
155DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); 155DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
156 156
157static inline void reset_lazy_tlbstate(void) 157static inline void reset_lazy_tlbstate(void)
158{ 158{
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 648b3a2a3a44..3f0019e0a229 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -722,7 +722,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
722/* 722/*
723 * Clock source related code 723 * Clock source related code
724 */ 724 */
725static cycle_t read_hpet(void) 725static cycle_t read_hpet(struct clocksource *cs)
726{ 726{
727 return (cycle_t)hpet_readl(HPET_COUNTER); 727 return (cycle_t)hpet_readl(HPET_COUNTER);
728} 728}
@@ -756,7 +756,7 @@ static int hpet_clocksource_register(void)
756 hpet_restart_counter(); 756 hpet_restart_counter();
757 757
758 /* Verify whether hpet counter works */ 758 /* Verify whether hpet counter works */
759 t1 = read_hpet(); 759 t1 = hpet_readl(HPET_COUNTER);
760 rdtscll(start); 760 rdtscll(start);
761 761
762 /* 762 /*
@@ -770,7 +770,7 @@ static int hpet_clocksource_register(void)
770 rdtscll(now); 770 rdtscll(now);
771 } while ((now - start) < 200000UL); 771 } while ((now - start) < 200000UL);
772 772
773 if (t1 == read_hpet()) { 773 if (t1 == hpet_readl(HPET_COUNTER)) {
774 printk(KERN_WARNING 774 printk(KERN_WARNING
775 "HPET counter not counting. HPET disabled\n"); 775 "HPET counter not counting. HPET disabled\n");
776 return -ENODEV; 776 return -ENODEV;
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 3475440baa54..c2e0bb0890d4 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -129,7 +129,7 @@ void __init setup_pit_timer(void)
129 * to just read by itself. So use jiffies to emulate a free 129 * to just read by itself. So use jiffies to emulate a free
130 * running counter: 130 * running counter:
131 */ 131 */
132static cycle_t pit_read(void) 132static cycle_t pit_read(struct clocksource *cs)
133{ 133{
134 static int old_count; 134 static int old_count;
135 static u32 old_jifs; 135 static u32 old_jifs;
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 137f2e8132df..223af43f1526 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -77,6 +77,11 @@ static cycle_t kvm_clock_read(void)
77 return ret; 77 return ret;
78} 78}
79 79
80static cycle_t kvm_clock_get_cycles(struct clocksource *cs)
81{
82 return kvm_clock_read();
83}
84
80/* 85/*
81 * If we don't do that, there is the possibility that the guest 86 * If we don't do that, there is the possibility that the guest
82 * will calibrate under heavy load - thus, getting a lower lpj - 87 * will calibrate under heavy load - thus, getting a lower lpj -
@@ -107,7 +112,7 @@ static void kvm_get_preset_lpj(void)
107 112
108static struct clocksource kvm_clock = { 113static struct clocksource kvm_clock = {
109 .name = "kvm-clock", 114 .name = "kvm-clock",
110 .read = kvm_clock_read, 115 .read = kvm_clock_get_cycles,
111 .rating = 400, 116 .rating = 400,
112 .mask = CLOCKSOURCE_MASK(64), 117 .mask = CLOCKSOURCE_MASK(64),
113 .mult = 1 << KVM_SCALE, 118 .mult = 1 << KVM_SCALE,
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 7a567ebe6361..d57de05dc430 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -699,7 +699,7 @@ static struct clocksource clocksource_tsc;
699 * code, which is necessary to support wrapping clocksources like pm 699 * code, which is necessary to support wrapping clocksources like pm
700 * timer. 700 * timer.
701 */ 701 */
702static cycle_t read_tsc(void) 702static cycle_t read_tsc(struct clocksource *cs)
703{ 703{
704 cycle_t ret = (cycle_t)get_cycles(); 704 cycle_t ret = (cycle_t)get_cycles();
705 705
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index d303369a7bad..2b3eb82efeeb 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -283,7 +283,7 @@ void __devinit vmi_time_ap_init(void)
283/** vmi clocksource */ 283/** vmi clocksource */
284static struct clocksource clocksource_vmi; 284static struct clocksource clocksource_vmi;
285 285
286static cycle_t read_real_cycles(void) 286static cycle_t read_real_cycles(struct clocksource *cs)
287{ 287{
288 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); 288 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
289 return max(ret, clocksource_vmi.cycle_last); 289 return max(ret, clocksource_vmi.cycle_last);
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index a2085368a3dc..ca7ec44bafc3 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -663,7 +663,7 @@ static unsigned long lguest_tsc_khz(void)
663 663
664/* If we can't use the TSC, the kernel falls back to our lower-priority 664/* If we can't use the TSC, the kernel falls back to our lower-priority
665 * "lguest_clock", where we read the time value given to us by the Host. */ 665 * "lguest_clock", where we read the time value given to us by the Host. */
666static cycle_t lguest_clock_read(void) 666static cycle_t lguest_clock_read(struct clocksource *cs)
667{ 667{
668 unsigned long sec, nsec; 668 unsigned long sec, nsec;
669 669
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 14f240623497..0a5aa44299a5 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -213,6 +213,11 @@ cycle_t xen_clocksource_read(void)
213 return ret; 213 return ret;
214} 214}
215 215
216static cycle_t xen_clocksource_get_cycles(struct clocksource *cs)
217{
218 return xen_clocksource_read();
219}
220
216static void xen_read_wallclock(struct timespec *ts) 221static void xen_read_wallclock(struct timespec *ts)
217{ 222{
218 struct shared_info *s = HYPERVISOR_shared_info; 223 struct shared_info *s = HYPERVISOR_shared_info;
@@ -241,7 +246,7 @@ int xen_set_wallclock(unsigned long now)
241static struct clocksource xen_clocksource __read_mostly = { 246static struct clocksource xen_clocksource __read_mostly = {
242 .name = "xen", 247 .name = "xen",
243 .rating = 400, 248 .rating = 400,
244 .read = xen_clocksource_read, 249 .read = xen_clocksource_get_cycles,
245 .mask = ~0, 250 .mask = ~0,
246 .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */ 251 .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */
247 .shift = XEN_SHIFT, 252 .shift = XEN_SHIFT,