aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-04 17:42:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-04 17:42:58 -0400
commit3a143125ddc4e2e0ca1e67fb4bedd45c36e59cc7 (patch)
tree0e5bc6fcc66b4cbfef909646b8a6b8f7f9d08956
parenta1aa758d0019f2ac4ea558b3987a07c12fa19f61 (diff)
parent5761d64b277c287a7520b868c32d656ef03374b4 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: x86: revert assign IRQs to hpet timer x86: tsc prevent time going backwards xen: Clear PG_pinned in release_{pt,pd}() xen: Do not pin/unpin PMD pages xen: refactor xen_{alloc,release}_{pt,pd}() x86, agpgart: scary messages are fortunately obsolete xen: fix grant table bug x86: fix breakage of vSMP irq operations x86: print message if nmi_watchdog=2 cannot be enabled x86: fix nmi_watchdog=2 on Pentium-D CPUs
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c7
-rw-r--r--arch/x86/kernel/hpet.c9
-rw-r--r--arch/x86/kernel/pci-gart_64.c10
-rw-r--r--arch/x86/kernel/tsc_32.c15
-rw-r--r--arch/x86/kernel/tsc_64.c23
-rw-r--r--arch/x86/xen/enlighten.c29
-rw-r--r--arch/x86/xen/mmu.c7
-rw-r--r--arch/x86/xen/mmu.h7
-rw-r--r--drivers/char/hpet.c51
-rw-r--r--drivers/xen/grant-table.c16
-rw-r--r--include/asm-x86/irqflags.h29
-rw-r--r--include/linux/hpet.h2
12 files changed, 123 insertions, 82 deletions
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 9b838324b818..b943e10ad814 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -652,9 +652,6 @@ static void probe_nmi_watchdog(void)
652 wd_ops = &p6_wd_ops; 652 wd_ops = &p6_wd_ops;
653 break; 653 break;
654 case 15: 654 case 15:
655 if (boot_cpu_data.x86_model > 0x4)
656 return;
657
658 wd_ops = &p4_wd_ops; 655 wd_ops = &p4_wd_ops;
659 break; 656 break;
660 default: 657 default:
@@ -670,8 +667,10 @@ int lapic_watchdog_init(unsigned nmi_hz)
670{ 667{
671 if (!wd_ops) { 668 if (!wd_ops) {
672 probe_nmi_watchdog(); 669 probe_nmi_watchdog();
673 if (!wd_ops) 670 if (!wd_ops) {
671 printk(KERN_INFO "NMI watchdog: CPU not supported\n");
674 return -1; 672 return -1;
673 }
675 674
676 if (!wd_ops->reserve()) { 675 if (!wd_ops->reserve()) {
677 printk(KERN_ERR 676 printk(KERN_ERR
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 235fd6c77504..36652ea1a265 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -133,13 +133,16 @@ static void hpet_reserve_platform_timers(unsigned long id)
133#ifdef CONFIG_HPET_EMULATE_RTC 133#ifdef CONFIG_HPET_EMULATE_RTC
134 hpet_reserve_timer(&hd, 1); 134 hpet_reserve_timer(&hd, 1);
135#endif 135#endif
136
136 hd.hd_irq[0] = HPET_LEGACY_8254; 137 hd.hd_irq[0] = HPET_LEGACY_8254;
137 hd.hd_irq[1] = HPET_LEGACY_RTC; 138 hd.hd_irq[1] = HPET_LEGACY_RTC;
138 139
139 for (i = 2; i < nrtimers; timer++, i++) 140 for (i = 2; i < nrtimers; timer++, i++)
140 hd.hd_irq[i] = (timer->hpet_config & Tn_INT_ROUTE_CNF_MASK) >> 141 hd.hd_irq[i] = (timer->hpet_config & Tn_INT_ROUTE_CNF_MASK) >>
141 Tn_INT_ROUTE_CNF_SHIFT; 142 Tn_INT_ROUTE_CNF_SHIFT;
143
142 hpet_alloc(&hd); 144 hpet_alloc(&hd);
145
143} 146}
144#else 147#else
145static void hpet_reserve_platform_timers(unsigned long id) { } 148static void hpet_reserve_platform_timers(unsigned long id) { }
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index faf3229f8fb3..700e4647dd30 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -615,8 +615,8 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
615 615
616 nommu: 616 nommu:
617 /* Should not happen anymore */ 617 /* Should not happen anymore */
618 printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n" 618 printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
619 KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n"); 619 KERN_WARNING "falling back to iommu=soft.\n");
620 return -1; 620 return -1;
621} 621}
622 622
@@ -692,9 +692,9 @@ void __init gart_iommu_init(void)
692 !gart_iommu_aperture || 692 !gart_iommu_aperture ||
693 (no_agp && init_k8_gatt(&info) < 0)) { 693 (no_agp && init_k8_gatt(&info) < 0)) {
694 if (end_pfn > MAX_DMA32_PFN) { 694 if (end_pfn > MAX_DMA32_PFN) {
695 printk(KERN_ERR "WARNING more than 4GB of memory " 695 printk(KERN_WARNING "More than 4GB of memory "
696 "but GART IOMMU not available.\n" 696 "but GART IOMMU not available.\n"
697 KERN_ERR "WARNING 32bit PCI may malfunction.\n"); 697 KERN_WARNING "falling back to iommu=soft.\n");
698 } 698 }
699 return; 699 return;
700 } 700 }
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
index f14cfd9d1f94..d7498b34c8e9 100644
--- a/arch/x86/kernel/tsc_32.c
+++ b/arch/x86/kernel/tsc_32.c
@@ -287,14 +287,27 @@ core_initcall(cpufreq_tsc);
287/* clock source code */ 287/* clock source code */
288 288
289static unsigned long current_tsc_khz = 0; 289static unsigned long current_tsc_khz = 0;
290static struct clocksource clocksource_tsc;
290 291
292/*
293 * We compare the TSC to the cycle_last value in the clocksource
294 * structure to avoid a nasty time-warp issue. This can be observed in
295 * a very small window right after one CPU updated cycle_last under
296 * xtime lock and the other CPU reads a TSC value which is smaller
297 * than the cycle_last reference value due to a TSC which is slighty
298 * behind. This delta is nowhere else observable, but in that case it
299 * results in a forward time jump in the range of hours due to the
300 * unsigned delta calculation of the time keeping core code, which is
301 * necessary to support wrapping clocksources like pm timer.
302 */
291static cycle_t read_tsc(void) 303static cycle_t read_tsc(void)
292{ 304{
293 cycle_t ret; 305 cycle_t ret;
294 306
295 rdtscll(ret); 307 rdtscll(ret);
296 308
297 return ret; 309 return ret >= clocksource_tsc.cycle_last ?
310 ret : clocksource_tsc.cycle_last;
298} 311}
299 312
300static struct clocksource clocksource_tsc = { 313static struct clocksource clocksource_tsc = {
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c
index 947554ddabb6..01fc9f0c39e2 100644
--- a/arch/x86/kernel/tsc_64.c
+++ b/arch/x86/kernel/tsc_64.c
@@ -11,6 +11,7 @@
11#include <asm/hpet.h> 11#include <asm/hpet.h>
12#include <asm/timex.h> 12#include <asm/timex.h>
13#include <asm/timer.h> 13#include <asm/timer.h>
14#include <asm/vgtod.h>
14 15
15static int notsc __initdata = 0; 16static int notsc __initdata = 0;
16 17
@@ -290,18 +291,34 @@ int __init notsc_setup(char *s)
290 291
291__setup("notsc", notsc_setup); 292__setup("notsc", notsc_setup);
292 293
294static struct clocksource clocksource_tsc;
293 295
294/* clock source code: */ 296/*
297 * We compare the TSC to the cycle_last value in the clocksource
298 * structure to avoid a nasty time-warp. This can be observed in a
299 * very small window right after one CPU updated cycle_last under
300 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
301 * is smaller than the cycle_last reference value due to a TSC which
302 * is slighty behind. This delta is nowhere else observable, but in
303 * that case it results in a forward time jump in the range of hours
304 * due to the unsigned delta calculation of the time keeping core
305 * code, which is necessary to support wrapping clocksources like pm
306 * timer.
307 */
295static cycle_t read_tsc(void) 308static cycle_t read_tsc(void)
296{ 309{
297 cycle_t ret = (cycle_t)get_cycles(); 310 cycle_t ret = (cycle_t)get_cycles();
298 return ret; 311
312 return ret >= clocksource_tsc.cycle_last ?
313 ret : clocksource_tsc.cycle_last;
299} 314}
300 315
301static cycle_t __vsyscall_fn vread_tsc(void) 316static cycle_t __vsyscall_fn vread_tsc(void)
302{ 317{
303 cycle_t ret = (cycle_t)vget_cycles(); 318 cycle_t ret = (cycle_t)vget_cycles();
304 return ret; 319
320 return ret >= __vsyscall_gtod_data.clock.cycle_last ?
321 ret : __vsyscall_gtod_data.clock.cycle_last;
305} 322}
306 323
307static struct clocksource clocksource_tsc = { 324static struct clocksource clocksource_tsc = {
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index de4e6f05840b..27ee26aedf94 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -667,10 +667,10 @@ static void xen_release_pt_init(u32 pfn)
667 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 667 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
668} 668}
669 669
670static void pin_pagetable_pfn(unsigned level, unsigned long pfn) 670static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
671{ 671{
672 struct mmuext_op op; 672 struct mmuext_op op;
673 op.cmd = level; 673 op.cmd = cmd;
674 op.arg1.mfn = pfn_to_mfn(pfn); 674 op.arg1.mfn = pfn_to_mfn(pfn);
675 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) 675 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
676 BUG(); 676 BUG();
@@ -687,7 +687,8 @@ static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level)
687 687
688 if (!PageHighMem(page)) { 688 if (!PageHighMem(page)) {
689 make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); 689 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
690 pin_pagetable_pfn(level, pfn); 690 if (level == PT_PTE)
691 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
691 } else 692 } else
692 /* make sure there are no stray mappings of 693 /* make sure there are no stray mappings of
693 this page */ 694 this page */
@@ -697,27 +698,39 @@ static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level)
697 698
698static void xen_alloc_pt(struct mm_struct *mm, u32 pfn) 699static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
699{ 700{
700 xen_alloc_ptpage(mm, pfn, MMUEXT_PIN_L1_TABLE); 701 xen_alloc_ptpage(mm, pfn, PT_PTE);
701} 702}
702 703
703static void xen_alloc_pd(struct mm_struct *mm, u32 pfn) 704static void xen_alloc_pd(struct mm_struct *mm, u32 pfn)
704{ 705{
705 xen_alloc_ptpage(mm, pfn, MMUEXT_PIN_L2_TABLE); 706 xen_alloc_ptpage(mm, pfn, PT_PMD);
706} 707}
707 708
708/* This should never happen until we're OK to use struct page */ 709/* This should never happen until we're OK to use struct page */
709static void xen_release_pt(u32 pfn) 710static void xen_release_ptpage(u32 pfn, unsigned level)
710{ 711{
711 struct page *page = pfn_to_page(pfn); 712 struct page *page = pfn_to_page(pfn);
712 713
713 if (PagePinned(page)) { 714 if (PagePinned(page)) {
714 if (!PageHighMem(page)) { 715 if (!PageHighMem(page)) {
715 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); 716 if (level == PT_PTE)
717 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
716 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 718 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
717 } 719 }
720 ClearPagePinned(page);
718 } 721 }
719} 722}
720 723
724static void xen_release_pt(u32 pfn)
725{
726 xen_release_ptpage(pfn, PT_PTE);
727}
728
729static void xen_release_pd(u32 pfn)
730{
731 xen_release_ptpage(pfn, PT_PMD);
732}
733
721#ifdef CONFIG_HIGHPTE 734#ifdef CONFIG_HIGHPTE
722static void *xen_kmap_atomic_pte(struct page *page, enum km_type type) 735static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
723{ 736{
@@ -838,7 +851,7 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
838 pv_mmu_ops.alloc_pt = xen_alloc_pt; 851 pv_mmu_ops.alloc_pt = xen_alloc_pt;
839 pv_mmu_ops.alloc_pd = xen_alloc_pd; 852 pv_mmu_ops.alloc_pd = xen_alloc_pd;
840 pv_mmu_ops.release_pt = xen_release_pt; 853 pv_mmu_ops.release_pt = xen_release_pt;
841 pv_mmu_ops.release_pd = xen_release_pt; 854 pv_mmu_ops.release_pd = xen_release_pd;
842 pv_mmu_ops.set_pte = xen_set_pte; 855 pv_mmu_ops.set_pte = xen_set_pte;
843 856
844 setup_shared_info(); 857 setup_shared_info();
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 0144395448ae..2a054ef2a3da 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -310,13 +310,6 @@ pgd_t xen_make_pgd(unsigned long pgd)
310} 310}
311#endif /* CONFIG_X86_PAE */ 311#endif /* CONFIG_X86_PAE */
312 312
313enum pt_level {
314 PT_PGD,
315 PT_PUD,
316 PT_PMD,
317 PT_PTE
318};
319
320/* 313/*
321 (Yet another) pagetable walker. This one is intended for pinning a 314 (Yet another) pagetable walker. This one is intended for pinning a
322 pagetable. This means that it walks a pagetable and calls the 315 pagetable. This means that it walks a pagetable and calls the
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h
index c9ff27f3ac3a..b5e189b1519d 100644
--- a/arch/x86/xen/mmu.h
+++ b/arch/x86/xen/mmu.h
@@ -3,6 +3,13 @@
3#include <linux/linkage.h> 3#include <linux/linkage.h>
4#include <asm/page.h> 4#include <asm/page.h>
5 5
6enum pt_level {
7 PT_PGD,
8 PT_PUD,
9 PT_PMD,
10 PT_PTE
11};
12
6/* 13/*
7 * Page-directory addresses above 4GB do not fit into architectural %cr3. 14 * Page-directory addresses above 4GB do not fit into architectural %cr3.
8 * When accessing %cr3, or equivalent field in vcpu_guest_context, guests 15 * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 465ad35ed38f..1399971be689 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -731,14 +731,14 @@ static unsigned long hpet_calibrate(struct hpets *hpetp)
731 731
732int hpet_alloc(struct hpet_data *hdp) 732int hpet_alloc(struct hpet_data *hdp)
733{ 733{
734 u64 cap, mcfg, hpet_config; 734 u64 cap, mcfg;
735 struct hpet_dev *devp; 735 struct hpet_dev *devp;
736 u32 i, ntimer, irq; 736 u32 i, ntimer;
737 struct hpets *hpetp; 737 struct hpets *hpetp;
738 size_t siz; 738 size_t siz;
739 struct hpet __iomem *hpet; 739 struct hpet __iomem *hpet;
740 static struct hpets *last = NULL; 740 static struct hpets *last = NULL;
741 unsigned long period, irq_bitmap; 741 unsigned long period;
742 unsigned long long temp; 742 unsigned long long temp;
743 743
744 /* 744 /*
@@ -765,47 +765,11 @@ int hpet_alloc(struct hpet_data *hdp)
765 hpetp->hp_hpet_phys = hdp->hd_phys_address; 765 hpetp->hp_hpet_phys = hdp->hd_phys_address;
766 766
767 hpetp->hp_ntimer = hdp->hd_nirqs; 767 hpetp->hp_ntimer = hdp->hd_nirqs;
768 hpet = hpetp->hp_hpet;
769
770 /* Assign IRQs statically for legacy devices */
771 hpetp->hp_dev[0].hd_hdwirq = hdp->hd_irq[0];
772 hpetp->hp_dev[1].hd_hdwirq = hdp->hd_irq[1];
773
774 /* Assign IRQs dynamically for the others */
775 for (i = 2, devp = &hpetp->hp_dev[2]; i < hdp->hd_nirqs; i++, devp++) {
776 struct hpet_timer __iomem *timer;
777 768
778 timer = &hpet->hpet_timers[devp - hpetp->hp_dev]; 769 for (i = 0; i < hdp->hd_nirqs; i++)
770 hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];
779 771
780 /* Check if there's already an IRQ assigned to the timer */ 772 hpet = hpetp->hp_hpet;
781 if (hdp->hd_irq[i]) {
782 hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];
783 continue;
784 }
785
786 hpet_config = readq(&timer->hpet_config);
787 irq_bitmap = (hpet_config & Tn_INT_ROUTE_CAP_MASK)
788 >> Tn_INT_ROUTE_CAP_SHIFT;
789 if (!irq_bitmap)
790 irq = 0; /* No valid IRQ Assignable */
791 else {
792 irq = find_first_bit(&irq_bitmap, 32);
793 do {
794 hpet_config |= irq << Tn_INT_ROUTE_CNF_SHIFT;
795 writeq(hpet_config, &timer->hpet_config);
796
797 /*
798 * Verify whether we have written a valid
799 * IRQ number by reading it back again
800 */
801 hpet_config = readq(&timer->hpet_config);
802 if (irq == (hpet_config & Tn_INT_ROUTE_CNF_MASK)
803 >> Tn_INT_ROUTE_CNF_SHIFT)
804 break; /* Success */
805 } while ((irq = (find_next_bit(&irq_bitmap, 32, irq))));
806 }
807 hpetp->hp_dev[i].hd_hdwirq = irq;
808 }
809 773
810 cap = readq(&hpet->hpet_cap); 774 cap = readq(&hpet->hpet_cap);
811 775
@@ -836,8 +800,7 @@ int hpet_alloc(struct hpet_data *hdp)
836 hpetp->hp_which, hdp->hd_phys_address, 800 hpetp->hp_which, hdp->hd_phys_address,
837 hpetp->hp_ntimer > 1 ? "s" : ""); 801 hpetp->hp_ntimer > 1 ? "s" : "");
838 for (i = 0; i < hpetp->hp_ntimer; i++) 802 for (i = 0; i < hpetp->hp_ntimer; i++)
839 printk("%s %d", i > 0 ? "," : "", 803 printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
840 hpetp->hp_dev[i].hd_hdwirq);
841 printk("\n"); 804 printk("\n");
842 805
843 printk(KERN_INFO "hpet%u: %u %d-bit timers, %Lu Hz\n", 806 printk(KERN_INFO "hpet%u: %u %d-bit timers, %Lu Hz\n",
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index ea94dbabf9a9..d85dc6d41c2a 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -381,11 +381,15 @@ EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
381static int grow_gnttab_list(unsigned int more_frames) 381static int grow_gnttab_list(unsigned int more_frames)
382{ 382{
383 unsigned int new_nr_grant_frames, extra_entries, i; 383 unsigned int new_nr_grant_frames, extra_entries, i;
384 unsigned int nr_glist_frames, new_nr_glist_frames;
384 385
385 new_nr_grant_frames = nr_grant_frames + more_frames; 386 new_nr_grant_frames = nr_grant_frames + more_frames;
386 extra_entries = more_frames * GREFS_PER_GRANT_FRAME; 387 extra_entries = more_frames * GREFS_PER_GRANT_FRAME;
387 388
388 for (i = nr_grant_frames; i < new_nr_grant_frames; i++) { 389 nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
390 new_nr_glist_frames =
391 (new_nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
392 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
389 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); 393 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
390 if (!gnttab_list[i]) 394 if (!gnttab_list[i])
391 goto grow_nomem; 395 goto grow_nomem;
@@ -407,7 +411,7 @@ static int grow_gnttab_list(unsigned int more_frames)
407 return 0; 411 return 0;
408 412
409grow_nomem: 413grow_nomem:
410 for ( ; i >= nr_grant_frames; i--) 414 for ( ; i >= nr_glist_frames; i--)
411 free_page((unsigned long) gnttab_list[i]); 415 free_page((unsigned long) gnttab_list[i]);
412 return -ENOMEM; 416 return -ENOMEM;
413} 417}
@@ -530,7 +534,7 @@ static int gnttab_expand(unsigned int req_entries)
530static int __devinit gnttab_init(void) 534static int __devinit gnttab_init(void)
531{ 535{
532 int i; 536 int i;
533 unsigned int max_nr_glist_frames; 537 unsigned int max_nr_glist_frames, nr_glist_frames;
534 unsigned int nr_init_grefs; 538 unsigned int nr_init_grefs;
535 539
536 if (!is_running_on_xen()) 540 if (!is_running_on_xen())
@@ -543,15 +547,15 @@ static int __devinit gnttab_init(void)
543 * grant reference free list on the current hypervisor. 547 * grant reference free list on the current hypervisor.
544 */ 548 */
545 max_nr_glist_frames = (boot_max_nr_grant_frames * 549 max_nr_glist_frames = (boot_max_nr_grant_frames *
546 GREFS_PER_GRANT_FRAME / 550 GREFS_PER_GRANT_FRAME / RPP);
547 (PAGE_SIZE / sizeof(grant_ref_t)));
548 551
549 gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), 552 gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
550 GFP_KERNEL); 553 GFP_KERNEL);
551 if (gnttab_list == NULL) 554 if (gnttab_list == NULL)
552 return -ENOMEM; 555 return -ENOMEM;
553 556
554 for (i = 0; i < nr_grant_frames; i++) { 557 nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
558 for (i = 0; i < nr_glist_frames; i++) {
555 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); 559 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
556 if (gnttab_list[i] == NULL) 560 if (gnttab_list[i] == NULL)
557 goto ini_nomem; 561 goto ini_nomem;
diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h
index 92021c1ffa3a..0e2292483b35 100644
--- a/include/asm-x86/irqflags.h
+++ b/include/asm-x86/irqflags.h
@@ -70,6 +70,26 @@ static inline void raw_local_irq_restore(unsigned long flags)
70 native_restore_fl(flags); 70 native_restore_fl(flags);
71} 71}
72 72
73#ifdef CONFIG_X86_VSMP
74
75/*
76 * Interrupt control for the VSMP architecture:
77 */
78
79static inline void raw_local_irq_disable(void)
80{
81 unsigned long flags = __raw_local_save_flags();
82 raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
83}
84
85static inline void raw_local_irq_enable(void)
86{
87 unsigned long flags = __raw_local_save_flags();
88 raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
89}
90
91#else
92
73static inline void raw_local_irq_disable(void) 93static inline void raw_local_irq_disable(void)
74{ 94{
75 native_irq_disable(); 95 native_irq_disable();
@@ -80,6 +100,8 @@ static inline void raw_local_irq_enable(void)
80 native_irq_enable(); 100 native_irq_enable();
81} 101}
82 102
103#endif
104
83/* 105/*
84 * Used in the idle loop; sti takes one instruction cycle 106 * Used in the idle loop; sti takes one instruction cycle
85 * to complete: 107 * to complete:
@@ -137,10 +159,17 @@ static inline unsigned long __raw_local_irq_save(void)
137#define raw_local_irq_save(flags) \ 159#define raw_local_irq_save(flags) \
138 do { (flags) = __raw_local_irq_save(); } while (0) 160 do { (flags) = __raw_local_irq_save(); } while (0)
139 161
162#ifdef CONFIG_X86_VSMP
163static inline int raw_irqs_disabled_flags(unsigned long flags)
164{
165 return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC);
166}
167#else
140static inline int raw_irqs_disabled_flags(unsigned long flags) 168static inline int raw_irqs_disabled_flags(unsigned long flags)
141{ 169{
142 return !(flags & X86_EFLAGS_IF); 170 return !(flags & X86_EFLAGS_IF);
143} 171}
172#endif
144 173
145static inline int raw_irqs_disabled(void) 174static inline int raw_irqs_disabled(void)
146{ 175{
diff --git a/include/linux/hpet.h b/include/linux/hpet.h
index 9cd94bfd07e5..2dc29ce6c8e4 100644
--- a/include/linux/hpet.h
+++ b/include/linux/hpet.h
@@ -64,7 +64,7 @@ struct hpet {
64 */ 64 */
65 65
66#define Tn_INT_ROUTE_CAP_MASK (0xffffffff00000000ULL) 66#define Tn_INT_ROUTE_CAP_MASK (0xffffffff00000000ULL)
67#define Tn_INT_ROUTE_CAP_SHIFT (32UL) 67#define Tn_INI_ROUTE_CAP_SHIFT (32UL)
68#define Tn_FSB_INT_DELCAP_MASK (0x8000UL) 68#define Tn_FSB_INT_DELCAP_MASK (0x8000UL)
69#define Tn_FSB_INT_DELCAP_SHIFT (15) 69#define Tn_FSB_INT_DELCAP_SHIFT (15)
70#define Tn_FSB_EN_CNF_MASK (0x4000UL) 70#define Tn_FSB_EN_CNF_MASK (0x4000UL)