aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mach-omap2/clock34xx.c35
-rw-r--r--arch/arm/mach-omap2/pm-debug.c4
-rw-r--r--arch/arm/mach-omap2/pm34xx.c187
-rw-r--r--arch/arm/mach-omap2/powerdomain.c39
-rw-r--r--arch/arm/plat-omap/include/mach/cpu.h37
-rw-r--r--arch/arm/plat-omap/include/mach/powerdomain.h2
-rw-r--r--arch/arm/plat-omap/iovmm.c9
-rw-r--r--arch/arm/plat-omap/sram.c3
-rw-r--r--arch/m68knommu/kernel/asm-offsets.c28
-rw-r--r--arch/m68knommu/kernel/entry.S6
-rw-r--r--arch/m68knommu/mm/init.c2
-rw-r--r--arch/m68knommu/platform/5206e/config.c1
-rw-r--r--arch/m68knommu/platform/68328/entry.S32
-rw-r--r--arch/m68knommu/platform/68360/entry.S16
-rw-r--r--arch/m68knommu/platform/coldfire/entry.S20
-rw-r--r--arch/microblaze/kernel/entry.S2
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S2
-rw-r--r--arch/microblaze/kernel/process.c2
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/fixmap.h4
-rw-r--r--arch/parisc/include/asm/hardirq.h20
-rw-r--r--arch/parisc/include/asm/ptrace.h5
-rw-r--r--arch/parisc/include/asm/syscall.h40
-rw-r--r--arch/parisc/include/asm/thread_info.h14
-rw-r--r--arch/parisc/kernel/asm-offsets.c4
-rw-r--r--arch/parisc/kernel/entry.S21
-rw-r--r--arch/parisc/kernel/irq.c5
-rw-r--r--arch/parisc/kernel/module.c2
-rw-r--r--arch/parisc/kernel/ptrace.c42
-rw-r--r--arch/parisc/kernel/signal.c5
-rw-r--r--arch/parisc/kernel/syscall.S22
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S10
-rw-r--r--arch/parisc/mm/init.c11
-rw-r--r--arch/s390/kvm/kvm-s390.h2
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/hardirq_32.h12
-rw-r--r--arch/sparc/include/asm/irq_32.h4
-rw-r--r--arch/sparc/include/asm/pgtable_64.h4
-rw-r--r--arch/sparc/kernel/ktlb.S8
-rw-r--r--arch/sparc/kernel/perf_event.c577
-rw-r--r--arch/sparc/oprofile/init.c1
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/Kconfig.cpu3
-rw-r--r--arch/x86/ia32/ia32entry.S36
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/include/asm/mce.h2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c58
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c17
-rw-r--r--arch/x86/kernel/early_printk.c5
-rw-r--r--arch/x86/kernel/i386_ksyms_32.c2
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kernel/pci-dma.c2
-rw-r--r--arch/x86/kernel/smp.c1
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu.c84
-rw-r--r--arch/x86/kvm/paging_tmpl.h18
-rw-r--r--arch/x86/kvm/svm.c25
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/lib/Makefile6
60 files changed, 1120 insertions, 395 deletions
diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c
index fafcd32e690..489556eecbd 100644
--- a/arch/arm/mach-omap2/clock34xx.c
+++ b/arch/arm/mach-omap2/clock34xx.c
@@ -338,6 +338,13 @@ static struct omap_clk omap34xx_clks[] = {
338 */ 338 */
339#define SDRC_MPURATE_LOOPS 96 339#define SDRC_MPURATE_LOOPS 96
340 340
341/*
342 * DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks
343 * that are sourced by DPLL5, and both of these require this clock
344 * to be at 120 MHz for proper operation.
345 */
346#define DPLL5_FREQ_FOR_USBHOST 120000000
347
341/** 348/**
342 * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI 349 * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI
343 * @clk: struct clk * being enabled 350 * @clk: struct clk * being enabled
@@ -1056,6 +1063,28 @@ void omap2_clk_prepare_for_reboot(void)
1056#endif 1063#endif
1057} 1064}
1058 1065
1066static void omap3_clk_lock_dpll5(void)
1067{
1068 struct clk *dpll5_clk;
1069 struct clk *dpll5_m2_clk;
1070
1071 dpll5_clk = clk_get(NULL, "dpll5_ck");
1072 clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST);
1073 clk_enable(dpll5_clk);
1074
1075 /* Enable autoidle to allow it to enter low power bypass */
1076 omap3_dpll_allow_idle(dpll5_clk);
1077
1078 /* Program dpll5_m2_clk divider for no division */
1079 dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck");
1080 clk_enable(dpll5_m2_clk);
1081 clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST);
1082
1083 clk_disable(dpll5_m2_clk);
1084 clk_disable(dpll5_clk);
1085 return;
1086}
1087
1059/* REVISIT: Move this init stuff out into clock.c */ 1088/* REVISIT: Move this init stuff out into clock.c */
1060 1089
1061/* 1090/*
@@ -1148,6 +1177,12 @@ int __init omap2_clk_init(void)
1148 */ 1177 */
1149 clk_enable_init_clocks(); 1178 clk_enable_init_clocks();
1150 1179
1180 /*
1181 * Lock DPLL5 and put it in autoidle.
1182 */
1183 if (omap_rev() >= OMAP3430_REV_ES2_0)
1184 omap3_clk_lock_dpll5();
1185
1151 /* Avoid sleeping during omap2_clk_prepare_for_reboot() */ 1186 /* Avoid sleeping during omap2_clk_prepare_for_reboot() */
1152 /* REVISIT: not yet ready for 343x */ 1187 /* REVISIT: not yet ready for 343x */
1153#if 0 1188#if 0
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index 1b4c1600f8d..2fc4d6abbd0 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -541,7 +541,7 @@ static int __init pm_dbg_init(void)
541 printk(KERN_ERR "%s: only OMAP3 supported\n", __func__); 541 printk(KERN_ERR "%s: only OMAP3 supported\n", __func__);
542 return -ENODEV; 542 return -ENODEV;
543 } 543 }
544 544
545 d = debugfs_create_dir("pm_debug", NULL); 545 d = debugfs_create_dir("pm_debug", NULL);
546 if (IS_ERR(d)) 546 if (IS_ERR(d))
547 return PTR_ERR(d); 547 return PTR_ERR(d);
@@ -551,7 +551,7 @@ static int __init pm_dbg_init(void)
551 (void) debugfs_create_file("time", S_IRUGO, 551 (void) debugfs_create_file("time", S_IRUGO,
552 d, (void *)DEBUG_FILE_TIMERS, &debug_fops); 552 d, (void *)DEBUG_FILE_TIMERS, &debug_fops);
553 553
554 pwrdm_for_each(pwrdms_setup, (void *)d); 554 pwrdm_for_each_nolock(pwrdms_setup, (void *)d);
555 555
556 pm_dbg_dir = debugfs_create_dir("registers", d); 556 pm_dbg_dir = debugfs_create_dir("registers", d);
557 if (IS_ERR(pm_dbg_dir)) 557 if (IS_ERR(pm_dbg_dir))
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 0ff5a6c53aa..378c2f61835 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -51,97 +51,112 @@ static void (*_omap_sram_idle)(u32 *addr, int save_state);
51 51
52static struct powerdomain *mpu_pwrdm; 52static struct powerdomain *mpu_pwrdm;
53 53
54/* PRCM Interrupt Handler for wakeups */ 54/*
55static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id) 55 * PRCM Interrupt Handler Helper Function
56 *
57 * The purpose of this function is to clear any wake-up events latched
58 * in the PRCM PM_WKST_x registers. It is possible that a wake-up event
59 * may occur whilst attempting to clear a PM_WKST_x register and thus
60 * set another bit in this register. A while loop is used to ensure
61 * that any peripheral wake-up events occurring while attempting to
62 * clear the PM_WKST_x are detected and cleared.
63 */
64static int prcm_clear_mod_irqs(s16 module, u8 regs)
56{ 65{
57 u32 wkst, irqstatus_mpu; 66 u32 wkst, fclk, iclk, clken;
58 u32 fclk, iclk; 67 u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1;
59 68 u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1;
60 /* WKUP */ 69 u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1;
61 wkst = prm_read_mod_reg(WKUP_MOD, PM_WKST); 70 u16 grpsel_off = (regs == 3) ?
71 OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL;
72 int c = 0;
73
74 wkst = prm_read_mod_reg(module, wkst_off);
75 wkst &= prm_read_mod_reg(module, grpsel_off);
62 if (wkst) { 76 if (wkst) {
63 iclk = cm_read_mod_reg(WKUP_MOD, CM_ICLKEN); 77 iclk = cm_read_mod_reg(module, iclk_off);
64 fclk = cm_read_mod_reg(WKUP_MOD, CM_FCLKEN); 78 fclk = cm_read_mod_reg(module, fclk_off);
65 cm_set_mod_reg_bits(wkst, WKUP_MOD, CM_ICLKEN); 79 while (wkst) {
66 cm_set_mod_reg_bits(wkst, WKUP_MOD, CM_FCLKEN); 80 clken = wkst;
67 prm_write_mod_reg(wkst, WKUP_MOD, PM_WKST); 81 cm_set_mod_reg_bits(clken, module, iclk_off);
68 while (prm_read_mod_reg(WKUP_MOD, PM_WKST)) 82 /*
69 cpu_relax(); 83 * For USBHOST, we don't know whether HOST1 or
70 cm_write_mod_reg(iclk, WKUP_MOD, CM_ICLKEN); 84 * HOST2 woke us up, so enable both f-clocks
71 cm_write_mod_reg(fclk, WKUP_MOD, CM_FCLKEN); 85 */
86 if (module == OMAP3430ES2_USBHOST_MOD)
87 clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT;
88 cm_set_mod_reg_bits(clken, module, fclk_off);
89 prm_write_mod_reg(wkst, module, wkst_off);
90 wkst = prm_read_mod_reg(module, wkst_off);
91 c++;
92 }
93 cm_write_mod_reg(iclk, module, iclk_off);
94 cm_write_mod_reg(fclk, module, fclk_off);
72 } 95 }
73 96
74 /* CORE */ 97 return c;
75 wkst = prm_read_mod_reg(CORE_MOD, PM_WKST1); 98}
76 if (wkst) {
77 iclk = cm_read_mod_reg(CORE_MOD, CM_ICLKEN1);
78 fclk = cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
79 cm_set_mod_reg_bits(wkst, CORE_MOD, CM_ICLKEN1);
80 cm_set_mod_reg_bits(wkst, CORE_MOD, CM_FCLKEN1);
81 prm_write_mod_reg(wkst, CORE_MOD, PM_WKST1);
82 while (prm_read_mod_reg(CORE_MOD, PM_WKST1))
83 cpu_relax();
84 cm_write_mod_reg(iclk, CORE_MOD, CM_ICLKEN1);
85 cm_write_mod_reg(fclk, CORE_MOD, CM_FCLKEN1);
86 }
87 wkst = prm_read_mod_reg(CORE_MOD, OMAP3430ES2_PM_WKST3);
88 if (wkst) {
89 iclk = cm_read_mod_reg(CORE_MOD, CM_ICLKEN3);
90 fclk = cm_read_mod_reg(CORE_MOD, OMAP3430ES2_CM_FCLKEN3);
91 cm_set_mod_reg_bits(wkst, CORE_MOD, CM_ICLKEN3);
92 cm_set_mod_reg_bits(wkst, CORE_MOD, OMAP3430ES2_CM_FCLKEN3);
93 prm_write_mod_reg(wkst, CORE_MOD, OMAP3430ES2_PM_WKST3);
94 while (prm_read_mod_reg(CORE_MOD, OMAP3430ES2_PM_WKST3))
95 cpu_relax();
96 cm_write_mod_reg(iclk, CORE_MOD, CM_ICLKEN3);
97 cm_write_mod_reg(fclk, CORE_MOD, OMAP3430ES2_CM_FCLKEN3);
98 }
99 99
100 /* PER */ 100static int _prcm_int_handle_wakeup(void)
101 wkst = prm_read_mod_reg(OMAP3430_PER_MOD, PM_WKST); 101{
102 if (wkst) { 102 int c;
103 iclk = cm_read_mod_reg(OMAP3430_PER_MOD, CM_ICLKEN);
104 fclk = cm_read_mod_reg(OMAP3430_PER_MOD, CM_FCLKEN);
105 cm_set_mod_reg_bits(wkst, OMAP3430_PER_MOD, CM_ICLKEN);
106 cm_set_mod_reg_bits(wkst, OMAP3430_PER_MOD, CM_FCLKEN);
107 prm_write_mod_reg(wkst, OMAP3430_PER_MOD, PM_WKST);
108 while (prm_read_mod_reg(OMAP3430_PER_MOD, PM_WKST))
109 cpu_relax();
110 cm_write_mod_reg(iclk, OMAP3430_PER_MOD, CM_ICLKEN);
111 cm_write_mod_reg(fclk, OMAP3430_PER_MOD, CM_FCLKEN);
112 }
113 103
104 c = prcm_clear_mod_irqs(WKUP_MOD, 1);
105 c += prcm_clear_mod_irqs(CORE_MOD, 1);
106 c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1);
114 if (omap_rev() > OMAP3430_REV_ES1_0) { 107 if (omap_rev() > OMAP3430_REV_ES1_0) {
115 /* USBHOST */ 108 c += prcm_clear_mod_irqs(CORE_MOD, 3);
116 wkst = prm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, PM_WKST); 109 c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1);
117 if (wkst) {
118 iclk = cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD,
119 CM_ICLKEN);
120 fclk = cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD,
121 CM_FCLKEN);
122 cm_set_mod_reg_bits(wkst, OMAP3430ES2_USBHOST_MOD,
123 CM_ICLKEN);
124 cm_set_mod_reg_bits(wkst, OMAP3430ES2_USBHOST_MOD,
125 CM_FCLKEN);
126 prm_write_mod_reg(wkst, OMAP3430ES2_USBHOST_MOD,
127 PM_WKST);
128 while (prm_read_mod_reg(OMAP3430ES2_USBHOST_MOD,
129 PM_WKST))
130 cpu_relax();
131 cm_write_mod_reg(iclk, OMAP3430ES2_USBHOST_MOD,
132 CM_ICLKEN);
133 cm_write_mod_reg(fclk, OMAP3430ES2_USBHOST_MOD,
134 CM_FCLKEN);
135 }
136 } 110 }
137 111
138 irqstatus_mpu = prm_read_mod_reg(OCP_MOD, 112 return c;
139 OMAP3_PRM_IRQSTATUS_MPU_OFFSET); 113}
140 prm_write_mod_reg(irqstatus_mpu, OCP_MOD, 114
141 OMAP3_PRM_IRQSTATUS_MPU_OFFSET); 115/*
116 * PRCM Interrupt Handler
117 *
118 * The PRM_IRQSTATUS_MPU register indicates if there are any pending
119 * interrupts from the PRCM for the MPU. These bits must be cleared in
120 * order to clear the PRCM interrupt. The PRCM interrupt handler is
121 * implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear
122 * the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU
123 * register indicates that a wake-up event is pending for the MPU and
124 * this bit can only be cleared if the all the wake-up events latched
125 * in the various PM_WKST_x registers have been cleared. The interrupt
126 * handler is implemented using a do-while loop so that if a wake-up
127 * event occurred during the processing of the prcm interrupt handler
128 * (setting a bit in the corresponding PM_WKST_x register and thus
129 * preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register)
130 * this would be handled.
131 */
132static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
133{
134 u32 irqstatus_mpu;
135 int c = 0;
136
137 do {
138 irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
139 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
140
141 if (irqstatus_mpu & (OMAP3430_WKUP_ST | OMAP3430_IO_ST)) {
142 c = _prcm_int_handle_wakeup();
143
144 /*
145 * Is the MPU PRCM interrupt handler racing with the
146 * IVA2 PRCM interrupt handler ?
147 */
148 WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup "
149 "but no wakeup sources are marked\n");
150 } else {
151 /* XXX we need to expand our PRCM interrupt handler */
152 WARN(1, "prcm: WARNING: PRCM interrupt received, but "
153 "no code to handle it (%08x)\n", irqstatus_mpu);
154 }
155
156 prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
157 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
142 158
143 while (prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET)) 159 } while (prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET));
144 cpu_relax();
145 160
146 return IRQ_HANDLED; 161 return IRQ_HANDLED;
147} 162}
@@ -624,6 +639,16 @@ static void __init prcm_setup_regs(void)
624 prm_write_mod_reg(OMAP3430_IO_EN | OMAP3430_WKUP_EN, 639 prm_write_mod_reg(OMAP3430_IO_EN | OMAP3430_WKUP_EN,
625 OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); 640 OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
626 641
642 /* Enable GPIO wakeups in PER */
643 prm_write_mod_reg(OMAP3430_EN_GPIO2 | OMAP3430_EN_GPIO3 |
644 OMAP3430_EN_GPIO4 | OMAP3430_EN_GPIO5 |
645 OMAP3430_EN_GPIO6, OMAP3430_PER_MOD, PM_WKEN);
646 /* and allow them to wake up MPU */
647 prm_write_mod_reg(OMAP3430_GRPSEL_GPIO2 | OMAP3430_EN_GPIO3 |
648 OMAP3430_GRPSEL_GPIO4 | OMAP3430_EN_GPIO5 |
649 OMAP3430_GRPSEL_GPIO6,
650 OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
651
627 /* Don't attach IVA interrupts */ 652 /* Don't attach IVA interrupts */
628 prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL); 653 prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
629 prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1); 654 prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 2594cbff394..f00289abd30 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -273,35 +273,50 @@ struct powerdomain *pwrdm_lookup(const char *name)
273} 273}
274 274
275/** 275/**
276 * pwrdm_for_each - call function on each registered clockdomain 276 * pwrdm_for_each_nolock - call function on each registered clockdomain
277 * @fn: callback function * 277 * @fn: callback function *
278 * 278 *
279 * Call the supplied function for each registered powerdomain. The 279 * Call the supplied function for each registered powerdomain. The
280 * callback function can return anything but 0 to bail out early from 280 * callback function can return anything but 0 to bail out early from
281 * the iterator. The callback function is called with the pwrdm_rwlock 281 * the iterator. Returns the last return value of the callback function, which
282 * held for reading, so no powerdomain structure manipulation 282 * should be 0 for success or anything else to indicate failure; or -EINVAL if
283 * functions should be called from the callback, although hardware 283 * the function pointer is null.
284 * powerdomain control functions are fine. Returns the last return
285 * value of the callback function, which should be 0 for success or
286 * anything else to indicate failure; or -EINVAL if the function
287 * pointer is null.
288 */ 284 */
289int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user), 285int pwrdm_for_each_nolock(int (*fn)(struct powerdomain *pwrdm, void *user),
290 void *user) 286 void *user)
291{ 287{
292 struct powerdomain *temp_pwrdm; 288 struct powerdomain *temp_pwrdm;
293 unsigned long flags;
294 int ret = 0; 289 int ret = 0;
295 290
296 if (!fn) 291 if (!fn)
297 return -EINVAL; 292 return -EINVAL;
298 293
299 read_lock_irqsave(&pwrdm_rwlock, flags);
300 list_for_each_entry(temp_pwrdm, &pwrdm_list, node) { 294 list_for_each_entry(temp_pwrdm, &pwrdm_list, node) {
301 ret = (*fn)(temp_pwrdm, user); 295 ret = (*fn)(temp_pwrdm, user);
302 if (ret) 296 if (ret)
303 break; 297 break;
304 } 298 }
299
300 return ret;
301}
302
303/**
304 * pwrdm_for_each - call function on each registered clockdomain
305 * @fn: callback function *
306 *
307 * This function is the same as 'pwrdm_for_each_nolock()', but keeps the
308 * &pwrdm_rwlock locked for reading, so no powerdomain structure manipulation
309 * functions should be called from the callback, although hardware powerdomain
310 * control functions are fine.
311 */
312int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user),
313 void *user)
314{
315 unsigned long flags;
316 int ret;
317
318 read_lock_irqsave(&pwrdm_rwlock, flags);
319 ret = pwrdm_for_each_nolock(fn, user);
305 read_unlock_irqrestore(&pwrdm_rwlock, flags); 320 read_unlock_irqrestore(&pwrdm_rwlock, flags);
306 321
307 return ret; 322 return ret;
diff --git a/arch/arm/plat-omap/include/mach/cpu.h b/arch/arm/plat-omap/include/mach/cpu.h
index 11e73d9e892..f129efb3075 100644
--- a/arch/arm/plat-omap/include/mach/cpu.h
+++ b/arch/arm/plat-omap/include/mach/cpu.h
@@ -303,32 +303,21 @@ IS_OMAP_TYPE(3430, 0x3430)
303#define cpu_is_omap2430() 0 303#define cpu_is_omap2430() 0
304#define cpu_is_omap3430() 0 304#define cpu_is_omap3430() 0
305 305
306#if defined(MULTI_OMAP1)
307# if defined(CONFIG_ARCH_OMAP730)
308# undef cpu_is_omap730
309# define cpu_is_omap730() is_omap730()
310# endif
311# if defined(CONFIG_ARCH_OMAP850)
312# undef cpu_is_omap850
313# define cpu_is_omap850() is_omap850()
314# endif
315#else
316# if defined(CONFIG_ARCH_OMAP730)
317# undef cpu_is_omap730
318# define cpu_is_omap730() 1
319# endif
320#endif
321#else
322# if defined(CONFIG_ARCH_OMAP850)
323# undef cpu_is_omap850
324# define cpu_is_omap850() 1
325# endif
326#endif
327
328/* 306/*
329 * Whether we have MULTI_OMAP1 or not, we still need to distinguish 307 * Whether we have MULTI_OMAP1 or not, we still need to distinguish
330 * between 330 vs. 1510 and 1611B/5912 vs. 1710. 308 * between 730 vs 850, 330 vs. 1510 and 1611B/5912 vs. 1710.
331 */ 309 */
310
311#if defined(CONFIG_ARCH_OMAP730)
312# undef cpu_is_omap730
313# define cpu_is_omap730() is_omap730()
314#endif
315
316#if defined(CONFIG_ARCH_OMAP850)
317# undef cpu_is_omap850
318# define cpu_is_omap850() is_omap850()
319#endif
320
332#if defined(CONFIG_ARCH_OMAP15XX) 321#if defined(CONFIG_ARCH_OMAP15XX)
333# undef cpu_is_omap310 322# undef cpu_is_omap310
334# undef cpu_is_omap1510 323# undef cpu_is_omap1510
@@ -433,3 +422,5 @@ IS_OMAP_TYPE(3430, 0x3430)
433 422
434int omap_chip_is(struct omap_chip_id oci); 423int omap_chip_is(struct omap_chip_id oci);
435void omap2_check_revision(void); 424void omap2_check_revision(void);
425
426#endif
diff --git a/arch/arm/plat-omap/include/mach/powerdomain.h b/arch/arm/plat-omap/include/mach/powerdomain.h
index 6271d8556a4..fa6461423bd 100644
--- a/arch/arm/plat-omap/include/mach/powerdomain.h
+++ b/arch/arm/plat-omap/include/mach/powerdomain.h
@@ -135,6 +135,8 @@ struct powerdomain *pwrdm_lookup(const char *name);
135 135
136int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user), 136int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user),
137 void *user); 137 void *user);
138int pwrdm_for_each_nolock(int (*fn)(struct powerdomain *pwrdm, void *user),
139 void *user);
138 140
139int pwrdm_add_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm); 141int pwrdm_add_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm);
140int pwrdm_del_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm); 142int pwrdm_del_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm);
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
index 57f7122a091..dc3fac3dd0e 100644
--- a/arch/arm/plat-omap/iovmm.c
+++ b/arch/arm/plat-omap/iovmm.c
@@ -47,7 +47,7 @@
47 * 'va': mpu virtual address 47 * 'va': mpu virtual address
48 * 48 *
49 * 'c': contiguous memory area 49 * 'c': contiguous memory area
50 * 'd': dicontiguous memory area 50 * 'd': discontiguous memory area
51 * 'a': anonymous memory allocation 51 * 'a': anonymous memory allocation
52 * '()': optional feature 52 * '()': optional feature
53 * 53 *
@@ -363,8 +363,9 @@ void *da_to_va(struct iommu *obj, u32 da)
363 goto out; 363 goto out;
364 } 364 }
365 va = area->va; 365 va = area->va;
366 mutex_unlock(&obj->mmap_lock);
367out: 366out:
367 mutex_unlock(&obj->mmap_lock);
368
368 return va; 369 return va;
369} 370}
370EXPORT_SYMBOL_GPL(da_to_va); 371EXPORT_SYMBOL_GPL(da_to_va);
@@ -398,7 +399,7 @@ static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
398{ 399{
399 /* 400 /*
400 * Actually this is not necessary at all, just exists for 401 * Actually this is not necessary at all, just exists for
401 * consistency of the code readibility. 402 * consistency of the code readability.
402 */ 403 */
403 BUG_ON(!sgt); 404 BUG_ON(!sgt);
404} 405}
@@ -434,7 +435,7 @@ static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
434{ 435{
435 /* 436 /*
436 * Actually this is not necessary at all, just exists for 437 * Actually this is not necessary at all, just exists for
437 * consistency of the code readibility 438 * consistency of the code readability
438 */ 439 */
439 BUG_ON(!sgt); 440 BUG_ON(!sgt);
440} 441}
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 925f64711c3..75d1f26e5b1 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -270,7 +270,8 @@ void * omap_sram_push(void * start, unsigned long size)
270 omap_sram_ceil -= size; 270 omap_sram_ceil -= size;
271 omap_sram_ceil = ROUND_DOWN(omap_sram_ceil, sizeof(void *)); 271 omap_sram_ceil = ROUND_DOWN(omap_sram_ceil, sizeof(void *));
272 memcpy((void *)omap_sram_ceil, start, size); 272 memcpy((void *)omap_sram_ceil, start, size);
273 flush_icache_range((unsigned long)start, (unsigned long)(start + size)); 273 flush_icache_range((unsigned long)omap_sram_ceil,
274 (unsigned long)(omap_sram_ceil + size));
274 275
275 return (void *)omap_sram_ceil; 276 return (void *)omap_sram_ceil;
276} 277}
diff --git a/arch/m68knommu/kernel/asm-offsets.c b/arch/m68knommu/kernel/asm-offsets.c
index 594ee0e657f..9a8876f715d 100644
--- a/arch/m68knommu/kernel/asm-offsets.c
+++ b/arch/m68knommu/kernel/asm-offsets.c
@@ -45,25 +45,25 @@ int main(void)
45 DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate)); 45 DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
46 46
47 /* offsets into the pt_regs */ 47 /* offsets into the pt_regs */
48 DEFINE(PT_D0, offsetof(struct pt_regs, d0)); 48 DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
49 DEFINE(PT_ORIG_D0, offsetof(struct pt_regs, orig_d0)); 49 DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
50 DEFINE(PT_D1, offsetof(struct pt_regs, d1)); 50 DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
51 DEFINE(PT_D2, offsetof(struct pt_regs, d2)); 51 DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
52 DEFINE(PT_D3, offsetof(struct pt_regs, d3)); 52 DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
53 DEFINE(PT_D4, offsetof(struct pt_regs, d4)); 53 DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
54 DEFINE(PT_D5, offsetof(struct pt_regs, d5)); 54 DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
55 DEFINE(PT_A0, offsetof(struct pt_regs, a0)); 55 DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
56 DEFINE(PT_A1, offsetof(struct pt_regs, a1)); 56 DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
57 DEFINE(PT_A2, offsetof(struct pt_regs, a2)); 57 DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
58 DEFINE(PT_PC, offsetof(struct pt_regs, pc)); 58 DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
59 DEFINE(PT_SR, offsetof(struct pt_regs, sr)); 59 DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
60 60
61#ifdef CONFIG_COLDFIRE 61#ifdef CONFIG_COLDFIRE
62 /* bitfields are a bit difficult */ 62 /* bitfields are a bit difficult */
63 DEFINE(PT_FORMATVEC, offsetof(struct pt_regs, sr) - 2); 63 DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2);
64#else 64#else
65 /* bitfields are a bit difficult */ 65 /* bitfields are a bit difficult */
66 DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4); 66 DEFINE(PT_OFF_VECTOR, offsetof(struct pt_regs, pc) + 4);
67#endif 67#endif
68 68
69 /* signal defines */ 69 /* signal defines */
diff --git a/arch/m68knommu/kernel/entry.S b/arch/m68knommu/kernel/entry.S
index f56faa5c9cd..56043ade394 100644
--- a/arch/m68knommu/kernel/entry.S
+++ b/arch/m68knommu/kernel/entry.S
@@ -46,7 +46,7 @@
46ENTRY(buserr) 46ENTRY(buserr)
47 SAVE_ALL 47 SAVE_ALL
48 moveq #-1,%d0 48 moveq #-1,%d0
49 movel %d0,%sp@(PT_ORIG_D0) 49 movel %d0,%sp@(PT_OFF_ORIG_D0)
50 movel %sp,%sp@- /* stack frame pointer argument */ 50 movel %sp,%sp@- /* stack frame pointer argument */
51 jsr buserr_c 51 jsr buserr_c
52 addql #4,%sp 52 addql #4,%sp
@@ -55,7 +55,7 @@ ENTRY(buserr)
55ENTRY(trap) 55ENTRY(trap)
56 SAVE_ALL 56 SAVE_ALL
57 moveq #-1,%d0 57 moveq #-1,%d0
58 movel %d0,%sp@(PT_ORIG_D0) 58 movel %d0,%sp@(PT_OFF_ORIG_D0)
59 movel %sp,%sp@- /* stack frame pointer argument */ 59 movel %sp,%sp@- /* stack frame pointer argument */
60 jsr trap_c 60 jsr trap_c
61 addql #4,%sp 61 addql #4,%sp
@@ -67,7 +67,7 @@ ENTRY(trap)
67ENTRY(dbginterrupt) 67ENTRY(dbginterrupt)
68 SAVE_ALL 68 SAVE_ALL
69 moveq #-1,%d0 69 moveq #-1,%d0
70 movel %d0,%sp@(PT_ORIG_D0) 70 movel %d0,%sp@(PT_OFF_ORIG_D0)
71 movel %sp,%sp@- /* stack frame pointer argument */ 71 movel %sp,%sp@- /* stack frame pointer argument */
72 jsr dbginterrupt_c 72 jsr dbginterrupt_c
73 addql #4,%sp 73 addql #4,%sp
diff --git a/arch/m68knommu/mm/init.c b/arch/m68knommu/mm/init.c
index b1703c67a4f..f3236d0b522 100644
--- a/arch/m68knommu/mm/init.c
+++ b/arch/m68knommu/mm/init.c
@@ -162,7 +162,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
162 totalram_pages++; 162 totalram_pages++;
163 pages++; 163 pages++;
164 } 164 }
165 printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages); 165 printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages * (PAGE_SIZE / 1024));
166} 166}
167#endif 167#endif
168 168
diff --git a/arch/m68knommu/platform/5206e/config.c b/arch/m68knommu/platform/5206e/config.c
index 0f41ba82a3b..942397984c6 100644
--- a/arch/m68knommu/platform/5206e/config.c
+++ b/arch/m68knommu/platform/5206e/config.c
@@ -17,7 +17,6 @@
17#include <asm/mcfsim.h> 17#include <asm/mcfsim.h>
18#include <asm/mcfuart.h> 18#include <asm/mcfuart.h>
19#include <asm/mcfdma.h> 19#include <asm/mcfdma.h>
20#include <asm/mcfuart.h>
21 20
22/***************************************************************************/ 21/***************************************************************************/
23 22
diff --git a/arch/m68knommu/platform/68328/entry.S b/arch/m68knommu/platform/68328/entry.S
index b1aef72f3ba..9d80d2c4286 100644
--- a/arch/m68knommu/platform/68328/entry.S
+++ b/arch/m68knommu/platform/68328/entry.S
@@ -39,17 +39,17 @@
39.globl inthandler7 39.globl inthandler7
40 40
41badsys: 41badsys:
42 movel #-ENOSYS,%sp@(PT_D0) 42 movel #-ENOSYS,%sp@(PT_OFF_D0)
43 jra ret_from_exception 43 jra ret_from_exception
44 44
45do_trace: 45do_trace:
46 movel #-ENOSYS,%sp@(PT_D0) /* needed for strace*/ 46 movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
47 subql #4,%sp 47 subql #4,%sp
48 SAVE_SWITCH_STACK 48 SAVE_SWITCH_STACK
49 jbsr syscall_trace 49 jbsr syscall_trace
50 RESTORE_SWITCH_STACK 50 RESTORE_SWITCH_STACK
51 addql #4,%sp 51 addql #4,%sp
52 movel %sp@(PT_ORIG_D0),%d1 52 movel %sp@(PT_OFF_ORIG_D0),%d1
53 movel #-ENOSYS,%d0 53 movel #-ENOSYS,%d0
54 cmpl #NR_syscalls,%d1 54 cmpl #NR_syscalls,%d1
55 jcc 1f 55 jcc 1f
@@ -57,7 +57,7 @@ do_trace:
57 lea sys_call_table, %a0 57 lea sys_call_table, %a0
58 jbsr %a0@(%d1) 58 jbsr %a0@(%d1)
59 59
601: movel %d0,%sp@(PT_D0) /* save the return value */ 601: movel %d0,%sp@(PT_OFF_D0) /* save the return value */
61 subql #4,%sp /* dummy return address */ 61 subql #4,%sp /* dummy return address */
62 SAVE_SWITCH_STACK 62 SAVE_SWITCH_STACK
63 jbsr syscall_trace 63 jbsr syscall_trace
@@ -75,7 +75,7 @@ ENTRY(system_call)
75 jbsr set_esp0 75 jbsr set_esp0
76 addql #4,%sp 76 addql #4,%sp
77 77
78 movel %sp@(PT_ORIG_D0),%d0 78 movel %sp@(PT_OFF_ORIG_D0),%d0
79 79
80 movel %sp,%d1 /* get thread_info pointer */ 80 movel %sp,%d1 /* get thread_info pointer */
81 andl #-THREAD_SIZE,%d1 81 andl #-THREAD_SIZE,%d1
@@ -88,10 +88,10 @@ ENTRY(system_call)
88 lea sys_call_table,%a0 88 lea sys_call_table,%a0
89 movel %a0@(%d0), %a0 89 movel %a0@(%d0), %a0
90 jbsr %a0@ 90 jbsr %a0@
91 movel %d0,%sp@(PT_D0) /* save the return value*/ 91 movel %d0,%sp@(PT_OFF_D0) /* save the return value*/
92 92
93ret_from_exception: 93ret_from_exception:
94 btst #5,%sp@(PT_SR) /* check if returning to kernel*/ 94 btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/
95 jeq Luser_return /* if so, skip resched, signals*/ 95 jeq Luser_return /* if so, skip resched, signals*/
96 96
97Lkernel_return: 97Lkernel_return:
@@ -133,7 +133,7 @@ Lreturn:
133 */ 133 */
134inthandler1: 134inthandler1:
135 SAVE_ALL 135 SAVE_ALL
136 movew %sp@(PT_VECTOR), %d0 136 movew %sp@(PT_OFF_VECTOR), %d0
137 and #0x3ff, %d0 137 and #0x3ff, %d0
138 138
139 movel %sp,%sp@- 139 movel %sp,%sp@-
@@ -144,7 +144,7 @@ inthandler1:
144 144
145inthandler2: 145inthandler2:
146 SAVE_ALL 146 SAVE_ALL
147 movew %sp@(PT_VECTOR), %d0 147 movew %sp@(PT_OFF_VECTOR), %d0
148 and #0x3ff, %d0 148 and #0x3ff, %d0
149 149
150 movel %sp,%sp@- 150 movel %sp,%sp@-
@@ -155,7 +155,7 @@ inthandler2:
155 155
156inthandler3: 156inthandler3:
157 SAVE_ALL 157 SAVE_ALL
158 movew %sp@(PT_VECTOR), %d0 158 movew %sp@(PT_OFF_VECTOR), %d0
159 and #0x3ff, %d0 159 and #0x3ff, %d0
160 160
161 movel %sp,%sp@- 161 movel %sp,%sp@-
@@ -166,7 +166,7 @@ inthandler3:
166 166
167inthandler4: 167inthandler4:
168 SAVE_ALL 168 SAVE_ALL
169 movew %sp@(PT_VECTOR), %d0 169 movew %sp@(PT_OFF_VECTOR), %d0
170 and #0x3ff, %d0 170 and #0x3ff, %d0
171 171
172 movel %sp,%sp@- 172 movel %sp,%sp@-
@@ -177,7 +177,7 @@ inthandler4:
177 177
178inthandler5: 178inthandler5:
179 SAVE_ALL 179 SAVE_ALL
180 movew %sp@(PT_VECTOR), %d0 180 movew %sp@(PT_OFF_VECTOR), %d0
181 and #0x3ff, %d0 181 and #0x3ff, %d0
182 182
183 movel %sp,%sp@- 183 movel %sp,%sp@-
@@ -188,7 +188,7 @@ inthandler5:
188 188
189inthandler6: 189inthandler6:
190 SAVE_ALL 190 SAVE_ALL
191 movew %sp@(PT_VECTOR), %d0 191 movew %sp@(PT_OFF_VECTOR), %d0
192 and #0x3ff, %d0 192 and #0x3ff, %d0
193 193
194 movel %sp,%sp@- 194 movel %sp,%sp@-
@@ -199,7 +199,7 @@ inthandler6:
199 199
200inthandler7: 200inthandler7:
201 SAVE_ALL 201 SAVE_ALL
202 movew %sp@(PT_VECTOR), %d0 202 movew %sp@(PT_OFF_VECTOR), %d0
203 and #0x3ff, %d0 203 and #0x3ff, %d0
204 204
205 movel %sp,%sp@- 205 movel %sp,%sp@-
@@ -210,7 +210,7 @@ inthandler7:
210 210
211inthandler: 211inthandler:
212 SAVE_ALL 212 SAVE_ALL
213 movew %sp@(PT_VECTOR), %d0 213 movew %sp@(PT_OFF_VECTOR), %d0
214 and #0x3ff, %d0 214 and #0x3ff, %d0
215 215
216 movel %sp,%sp@- 216 movel %sp,%sp@-
@@ -224,7 +224,7 @@ ret_from_interrupt:
2242: 2242:
225 RESTORE_ALL 225 RESTORE_ALL
2261: 2261:
227 moveb %sp@(PT_SR), %d0 227 moveb %sp@(PT_OFF_SR), %d0
228 and #7, %d0 228 and #7, %d0
229 jhi 2b 229 jhi 2b
230 230
diff --git a/arch/m68knommu/platform/68360/entry.S b/arch/m68knommu/platform/68360/entry.S
index 55dfefe3864..6d3460a39ca 100644
--- a/arch/m68knommu/platform/68360/entry.S
+++ b/arch/m68knommu/platform/68360/entry.S
@@ -35,17 +35,17 @@
35.globl inthandler 35.globl inthandler
36 36
37badsys: 37badsys:
38 movel #-ENOSYS,%sp@(PT_D0) 38 movel #-ENOSYS,%sp@(PT_OFF_D0)
39 jra ret_from_exception 39 jra ret_from_exception
40 40
41do_trace: 41do_trace:
42 movel #-ENOSYS,%sp@(PT_D0) /* needed for strace*/ 42 movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
43 subql #4,%sp 43 subql #4,%sp
44 SAVE_SWITCH_STACK 44 SAVE_SWITCH_STACK
45 jbsr syscall_trace 45 jbsr syscall_trace
46 RESTORE_SWITCH_STACK 46 RESTORE_SWITCH_STACK
47 addql #4,%sp 47 addql #4,%sp
48 movel %sp@(PT_ORIG_D0),%d1 48 movel %sp@(PT_OFF_ORIG_D0),%d1
49 movel #-ENOSYS,%d0 49 movel #-ENOSYS,%d0
50 cmpl #NR_syscalls,%d1 50 cmpl #NR_syscalls,%d1
51 jcc 1f 51 jcc 1f
@@ -53,7 +53,7 @@ do_trace:
53 lea sys_call_table, %a0 53 lea sys_call_table, %a0
54 jbsr %a0@(%d1) 54 jbsr %a0@(%d1)
55 55
561: movel %d0,%sp@(PT_D0) /* save the return value */ 561: movel %d0,%sp@(PT_OFF_D0) /* save the return value */
57 subql #4,%sp /* dummy return address */ 57 subql #4,%sp /* dummy return address */
58 SAVE_SWITCH_STACK 58 SAVE_SWITCH_STACK
59 jbsr syscall_trace 59 jbsr syscall_trace
@@ -79,10 +79,10 @@ ENTRY(system_call)
79 lea sys_call_table,%a0 79 lea sys_call_table,%a0
80 movel %a0@(%d0), %a0 80 movel %a0@(%d0), %a0
81 jbsr %a0@ 81 jbsr %a0@
82 movel %d0,%sp@(PT_D0) /* save the return value*/ 82 movel %d0,%sp@(PT_OFF_D0) /* save the return value*/
83 83
84ret_from_exception: 84ret_from_exception:
85 btst #5,%sp@(PT_SR) /* check if returning to kernel*/ 85 btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/
86 jeq Luser_return /* if so, skip resched, signals*/ 86 jeq Luser_return /* if so, skip resched, signals*/
87 87
88Lkernel_return: 88Lkernel_return:
@@ -124,7 +124,7 @@ Lreturn:
124 */ 124 */
125inthandler: 125inthandler:
126 SAVE_ALL 126 SAVE_ALL
127 movew %sp@(PT_VECTOR), %d0 127 movew %sp@(PT_OFF_VECTOR), %d0
128 and.l #0x3ff, %d0 128 and.l #0x3ff, %d0
129 lsr.l #0x02, %d0 129 lsr.l #0x02, %d0
130 130
@@ -139,7 +139,7 @@ ret_from_interrupt:
1392: 1392:
140 RESTORE_ALL 140 RESTORE_ALL
1411: 1411:
142 moveb %sp@(PT_SR), %d0 142 moveb %sp@(PT_OFF_SR), %d0
143 and #7, %d0 143 and #7, %d0
144 jhi 2b 144 jhi 2b
145 /* check if we need to do software interrupts */ 145 /* check if we need to do software interrupts */
diff --git a/arch/m68knommu/platform/coldfire/entry.S b/arch/m68knommu/platform/coldfire/entry.S
index 3b471c0da24..dd7d591f70e 100644
--- a/arch/m68knommu/platform/coldfire/entry.S
+++ b/arch/m68knommu/platform/coldfire/entry.S
@@ -81,11 +81,11 @@ ENTRY(system_call)
81 81
82 movel %d3,%a0 82 movel %d3,%a0
83 jbsr %a0@ 83 jbsr %a0@
84 movel %d0,%sp@(PT_D0) /* save the return value */ 84 movel %d0,%sp@(PT_OFF_D0) /* save the return value */
85 jra ret_from_exception 85 jra ret_from_exception
861: 861:
87 movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_D0 */ 87 movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_OFF_D0 */
88 movel %d2,PT_D0(%sp) /* on syscall entry */ 88 movel %d2,PT_OFF_D0(%sp) /* on syscall entry */
89 subql #4,%sp 89 subql #4,%sp
90 SAVE_SWITCH_STACK 90 SAVE_SWITCH_STACK
91 jbsr syscall_trace 91 jbsr syscall_trace
@@ -93,7 +93,7 @@ ENTRY(system_call)
93 addql #4,%sp 93 addql #4,%sp
94 movel %d3,%a0 94 movel %d3,%a0
95 jbsr %a0@ 95 jbsr %a0@
96 movel %d0,%sp@(PT_D0) /* save the return value */ 96 movel %d0,%sp@(PT_OFF_D0) /* save the return value */
97 subql #4,%sp /* dummy return address */ 97 subql #4,%sp /* dummy return address */
98 SAVE_SWITCH_STACK 98 SAVE_SWITCH_STACK
99 jbsr syscall_trace 99 jbsr syscall_trace
@@ -104,7 +104,7 @@ ret_from_signal:
104 104
105ret_from_exception: 105ret_from_exception:
106 move #0x2700,%sr /* disable intrs */ 106 move #0x2700,%sr /* disable intrs */
107 btst #5,%sp@(PT_SR) /* check if returning to kernel */ 107 btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel */
108 jeq Luser_return /* if so, skip resched, signals */ 108 jeq Luser_return /* if so, skip resched, signals */
109 109
110#ifdef CONFIG_PREEMPT 110#ifdef CONFIG_PREEMPT
@@ -142,8 +142,8 @@ Luser_return:
142Lreturn: 142Lreturn:
143 move #0x2700,%sr /* disable intrs */ 143 move #0x2700,%sr /* disable intrs */
144 movel sw_usp,%a0 /* get usp */ 144 movel sw_usp,%a0 /* get usp */
145 movel %sp@(PT_PC),%a0@- /* copy exception program counter */ 145 movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */
146 movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */ 146 movel %sp@(PT_OFF_FORMATVEC),%a0@- /* copy exception format/vector/sr */
147 moveml %sp@,%d1-%d5/%a0-%a2 147 moveml %sp@,%d1-%d5/%a0-%a2
148 lea %sp@(32),%sp /* space for 8 regs */ 148 lea %sp@(32),%sp /* space for 8 regs */
149 movel %sp@+,%d0 149 movel %sp@+,%d0
@@ -181,9 +181,9 @@ Lsignal_return:
181ENTRY(inthandler) 181ENTRY(inthandler)
182 SAVE_ALL 182 SAVE_ALL
183 moveq #-1,%d0 183 moveq #-1,%d0
184 movel %d0,%sp@(PT_ORIG_D0) 184 movel %d0,%sp@(PT_OFF_ORIG_D0)
185 185
186 movew %sp@(PT_FORMATVEC),%d0 /* put exception # in d0 */ 186 movew %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */
187 andl #0x03fc,%d0 /* mask out vector only */ 187 andl #0x03fc,%d0 /* mask out vector only */
188 188
189 movel %sp,%sp@- /* push regs arg */ 189 movel %sp,%sp@- /* push regs arg */
@@ -203,7 +203,7 @@ ENTRY(inthandler)
203ENTRY(fasthandler) 203ENTRY(fasthandler)
204 SAVE_LOCAL 204 SAVE_LOCAL
205 205
206 movew %sp@(PT_FORMATVEC),%d0 206 movew %sp@(PT_OFF_FORMATVEC),%d0
207 andl #0x03fc,%d0 /* mask out vector only */ 207 andl #0x03fc,%d0 /* mask out vector only */
208 208
209 movel %sp,%sp@- /* push regs arg */ 209 movel %sp,%sp@- /* push regs arg */
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index acc1f05d1e2..e3ecb36dd55 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -592,6 +592,8 @@ C_ENTRY(full_exception_trap):
592 nop 592 nop
593 mfs r7, rfsr; /* save FSR */ 593 mfs r7, rfsr; /* save FSR */
594 nop 594 nop
595 mts rfsr, r0; /* Clear sticky fsr */
596 nop
595 la r12, r0, full_exception 597 la r12, r0, full_exception
596 set_vms; 598 set_vms;
597 rtbd r12, 0; 599 rtbd r12, 0;
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 6b0288ebccd..2b86c03aa84 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -384,7 +384,7 @@ handle_other_ex: /* Handle Other exceptions here */
384 addk r8, r17, r0; /* Load exception address */ 384 addk r8, r17, r0; /* Load exception address */
385 bralid r15, full_exception; /* Branch to the handler */ 385 bralid r15, full_exception; /* Branch to the handler */
386 nop; 386 nop;
387 mts r0, rfsr; /* Clear sticky fsr */ 387 mts rfsr, r0; /* Clear sticky fsr */
388 nop 388 nop
389 389
390 /* 390 /*
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 4201c743cc9..c592d475b3d 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -235,7 +235,9 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
235 regs->pc = pc; 235 regs->pc = pc;
236 regs->r1 = usp; 236 regs->r1 = usp;
237 regs->pt_mode = 0; 237 regs->pt_mode = 0;
238#ifdef CONFIG_MMU
238 regs->msr |= MSR_UMS; 239 regs->msr |= MSR_UMS;
240#endif
239} 241}
240 242
241#ifdef CONFIG_MMU 243#ifdef CONFIG_MMU
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index f388dc68f60..524d9352f17 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -18,6 +18,7 @@ config PARISC
18 select BUG 18 select BUG
19 select HAVE_PERF_EVENTS 19 select HAVE_PERF_EVENTS
20 select GENERIC_ATOMIC64 if !64BIT 20 select GENERIC_ATOMIC64 if !64BIT
21 select HAVE_ARCH_TRACEHOOK
21 help 22 help
22 The PA-RISC microprocessor is designed by Hewlett-Packard and used 23 The PA-RISC microprocessor is designed by Hewlett-Packard and used
23 in many of their workstations & servers (HP9000 700 and 800 series, 24 in many of their workstations & servers (HP9000 700 and 800 series,
diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h
index de3fe3a1822..6fec4d4a1a1 100644
--- a/arch/parisc/include/asm/fixmap.h
+++ b/arch/parisc/include/asm/fixmap.h
@@ -21,9 +21,9 @@
21#define KERNEL_MAP_END (TMPALIAS_MAP_START) 21#define KERNEL_MAP_END (TMPALIAS_MAP_START)
22 22
23#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
24extern void *vmalloc_start; 24extern void *parisc_vmalloc_start;
25#define PCXL_DMA_MAP_SIZE (8*1024*1024) 25#define PCXL_DMA_MAP_SIZE (8*1024*1024)
26#define VMALLOC_START ((unsigned long)vmalloc_start) 26#define VMALLOC_START ((unsigned long)parisc_vmalloc_start)
27#define VMALLOC_END (KERNEL_MAP_END) 27#define VMALLOC_END (KERNEL_MAP_END)
28#endif /*__ASSEMBLY__*/ 28#endif /*__ASSEMBLY__*/
29 29
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h
index ce93133d511..0d68184a76c 100644
--- a/arch/parisc/include/asm/hardirq.h
+++ b/arch/parisc/include/asm/hardirq.h
@@ -1,29 +1,11 @@
1/* hardirq.h: PA-RISC hard IRQ support. 1/* hardirq.h: PA-RISC hard IRQ support.
2 * 2 *
3 * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx> 3 * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx>
4 *
5 * The locking is really quite interesting. There's a cpu-local
6 * count of how many interrupts are being handled, and a global
7 * lock. An interrupt can only be serviced if the global lock
8 * is free. You can't be sure no more interrupts are being
9 * serviced until you've acquired the lock and then checked
10 * all the per-cpu interrupt counts are all zero. It's a specialised
11 * br_lock, and that's exactly how Sparc does it. We don't because
12 * it's more locking for us. This way is lock-free in the interrupt path.
13 */ 4 */
14 5
15#ifndef _PARISC_HARDIRQ_H 6#ifndef _PARISC_HARDIRQ_H
16#define _PARISC_HARDIRQ_H 7#define _PARISC_HARDIRQ_H
17 8
18#include <linux/threads.h> 9#include <asm-generic/hardirq.h>
19#include <linux/irq.h>
20
21typedef struct {
22 unsigned long __softirq_pending; /* set_bit is used on this */
23} ____cacheline_aligned irq_cpustat_t;
24
25#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
26
27void ack_bad_irq(unsigned int irq);
28 10
29#endif /* _PARISC_HARDIRQ_H */ 11#endif /* _PARISC_HARDIRQ_H */
diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h
index 302f68dc889..aead40b16dd 100644
--- a/arch/parisc/include/asm/ptrace.h
+++ b/arch/parisc/include/asm/ptrace.h
@@ -59,8 +59,11 @@ void user_enable_block_step(struct task_struct *task);
59#define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) 59#define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0)
60#define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) 60#define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0)
61#define instruction_pointer(regs) ((regs)->iaoq[0] & ~3) 61#define instruction_pointer(regs) ((regs)->iaoq[0] & ~3)
62#define user_stack_pointer(regs) ((regs)->gr[30])
62unsigned long profile_pc(struct pt_regs *); 63unsigned long profile_pc(struct pt_regs *);
63extern void show_regs(struct pt_regs *); 64extern void show_regs(struct pt_regs *);
64#endif 65
66
67#endif /* __KERNEL__ */
65 68
66#endif 69#endif
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h
new file mode 100644
index 00000000000..8bdfd2c8c39
--- /dev/null
+++ b/arch/parisc/include/asm/syscall.h
@@ -0,0 +1,40 @@
1/* syscall.h */
2
3#ifndef _ASM_PARISC_SYSCALL_H_
4#define _ASM_PARISC_SYSCALL_H_
5
6#include <linux/err.h>
7#include <asm/ptrace.h>
8
9static inline long syscall_get_nr(struct task_struct *tsk,
10 struct pt_regs *regs)
11{
12 return regs->gr[20];
13}
14
15static inline void syscall_get_arguments(struct task_struct *tsk,
16 struct pt_regs *regs, unsigned int i,
17 unsigned int n, unsigned long *args)
18{
19 BUG_ON(i);
20
21 switch (n) {
22 case 6:
23 args[5] = regs->gr[21];
24 case 5:
25 args[4] = regs->gr[22];
26 case 4:
27 args[3] = regs->gr[23];
28 case 3:
29 args[2] = regs->gr[24];
30 case 2:
31 args[1] = regs->gr[25];
32 case 1:
33 args[0] = regs->gr[26];
34 break;
35 default:
36 BUG();
37 }
38}
39
40#endif /*_ASM_PARISC_SYSCALL_H_*/
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index ac775a76bff..7ecc1039cfe 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -32,6 +32,11 @@ struct thread_info {
32#define init_thread_info (init_thread_union.thread_info) 32#define init_thread_info (init_thread_union.thread_info)
33#define init_stack (init_thread_union.stack) 33#define init_stack (init_thread_union.stack)
34 34
35/* how to get the thread information struct from C */
36#define current_thread_info() ((struct thread_info *)mfctl(30))
37
38#endif /* !__ASSEMBLY */
39
35/* thread information allocation */ 40/* thread information allocation */
36 41
37#define THREAD_SIZE_ORDER 2 42#define THREAD_SIZE_ORDER 2
@@ -40,11 +45,6 @@ struct thread_info {
40#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 45#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
41#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) 46#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER)
42 47
43/* how to get the thread information struct from C */
44#define current_thread_info() ((struct thread_info *)mfctl(30))
45
46#endif /* !__ASSEMBLY */
47
48#define PREEMPT_ACTIVE_BIT 28 48#define PREEMPT_ACTIVE_BIT 28
49#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT) 49#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT)
50 50
@@ -60,6 +60,8 @@ struct thread_info {
60#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ 60#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
61#define TIF_FREEZE 7 /* is freezing for suspend */ 61#define TIF_FREEZE 7 /* is freezing for suspend */
62#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ 62#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
63#define TIF_SINGLESTEP 9 /* single stepping? */
64#define TIF_BLOCKSTEP 10 /* branch stepping? */
63 65
64#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 66#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
65#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 67#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -69,6 +71,8 @@ struct thread_info {
69#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 71#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
70#define _TIF_FREEZE (1 << TIF_FREEZE) 72#define _TIF_FREEZE (1 << TIF_FREEZE)
71#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 73#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
74#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
75#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
72 76
73#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ 77#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
74 _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) 78 _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK)
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index 699cf8ef211..fcd3c707bf1 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -270,8 +270,8 @@ int main(void)
270 DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count)); 270 DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count));
271 DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop)); 271 DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop));
272 BLANK(); 272 BLANK();
273 DEFINE(PA_BLOCKSTEP_BIT, 31-PT_BLOCKSTEP_BIT); 273 DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP);
274 DEFINE(PA_SINGLESTEP_BIT, 31-PT_SINGLESTEP_BIT); 274 DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP);
275 BLANK(); 275 BLANK();
276 DEFINE(ASM_PMD_SHIFT, PMD_SHIFT); 276 DEFINE(ASM_PMD_SHIFT, PMD_SHIFT);
277 DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT); 277 DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT);
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 8c4712b74dc..3a44f7f704f 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -2047,12 +2047,13 @@ syscall_do_signal:
2047 b,n syscall_check_sig 2047 b,n syscall_check_sig
2048 2048
2049syscall_restore: 2049syscall_restore:
2050 /* Are we being ptraced? */
2051 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 2050 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2052 2051
2053 ldw TASK_PTRACE(%r1), %r19 2052 /* Are we being ptraced? */
2054 bb,< %r19,31,syscall_restore_rfi 2053 ldw TASK_FLAGS(%r1),%r19
2055 nop 2054 ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2
2055 and,COND(=) %r19,%r2,%r0
2056 b,n syscall_restore_rfi
2056 2057
2057 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ 2058 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
2058 rest_fp %r19 2059 rest_fp %r19
@@ -2113,16 +2114,16 @@ syscall_restore_rfi:
2113 ldi 0x0b,%r20 /* Create new PSW */ 2114 ldi 0x0b,%r20 /* Create new PSW */
2114 depi -1,13,1,%r20 /* C, Q, D, and I bits */ 2115 depi -1,13,1,%r20 /* C, Q, D, and I bits */
2115 2116
2116 /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are 2117 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
2117 * set in include/linux/ptrace.h and converted to PA bitmap 2118 * set in thread_info.h and converted to PA bitmap
2118 * numbers in asm-offsets.c */ 2119 * numbers in asm-offsets.c */
2119 2120
2120 /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */ 2121 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
2121 extru,= %r19,PA_SINGLESTEP_BIT,1,%r0 2122 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
2122 depi -1,27,1,%r20 /* R bit */ 2123 depi -1,27,1,%r20 /* R bit */
2123 2124
2124 /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */ 2125 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
2125 extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0 2126 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
2126 depi -1,7,1,%r20 /* T bit */ 2127 depi -1,7,1,%r20 /* T bit */
2127 2128
2128 STREG %r20,TASK_PT_PSW(%r1) 2129 STREG %r20,TASK_PT_PSW(%r1)
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 330f536a932..2e7610cb33d 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -423,8 +423,3 @@ void __init init_IRQ(void)
423 set_eiem(cpu_eiem); /* EIEM : enable all external intr */ 423 set_eiem(cpu_eiem); /* EIEM : enable all external intr */
424 424
425} 425}
426
427void ack_bad_irq(unsigned int irq)
428{
429 printk(KERN_WARNING "unexpected IRQ %d\n", irq);
430}
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 61ee0eec4e6..212074653df 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -893,7 +893,7 @@ int module_finalize(const Elf_Ehdr *hdr,
893 * ourselves */ 893 * ourselves */
894 for (i = 1; i < hdr->e_shnum; i++) { 894 for (i = 1; i < hdr->e_shnum; i++) {
895 if(sechdrs[i].sh_type == SHT_SYMTAB 895 if(sechdrs[i].sh_type == SHT_SYMTAB
896 && (sechdrs[i].sh_type & SHF_ALLOC)) { 896 && (sechdrs[i].sh_flags & SHF_ALLOC)) {
897 int strindex = sechdrs[i].sh_link; 897 int strindex = sechdrs[i].sh_link;
898 /* FIXME: AWFUL HACK 898 /* FIXME: AWFUL HACK
899 * The cast is to drop the const from 899 * The cast is to drop the const from
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 927db3668b6..c4f49e45129 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -13,6 +13,7 @@
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/ptrace.h> 15#include <linux/ptrace.h>
16#include <linux/tracehook.h>
16#include <linux/user.h> 17#include <linux/user.h>
17#include <linux/personality.h> 18#include <linux/personality.h>
18#include <linux/security.h> 19#include <linux/security.h>
@@ -35,7 +36,8 @@
35 */ 36 */
36void ptrace_disable(struct task_struct *task) 37void ptrace_disable(struct task_struct *task)
37{ 38{
38 task->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP); 39 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
40 clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
39 41
40 /* make sure the trap bits are not set */ 42 /* make sure the trap bits are not set */
41 pa_psw(task)->r = 0; 43 pa_psw(task)->r = 0;
@@ -55,8 +57,8 @@ void user_disable_single_step(struct task_struct *task)
55 57
56void user_enable_single_step(struct task_struct *task) 58void user_enable_single_step(struct task_struct *task)
57{ 59{
58 task->ptrace &= ~PT_BLOCKSTEP; 60 clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
59 task->ptrace |= PT_SINGLESTEP; 61 set_tsk_thread_flag(task, TIF_SINGLESTEP);
60 62
61 if (pa_psw(task)->n) { 63 if (pa_psw(task)->n) {
62 struct siginfo si; 64 struct siginfo si;
@@ -98,8 +100,8 @@ void user_enable_single_step(struct task_struct *task)
98 100
99void user_enable_block_step(struct task_struct *task) 101void user_enable_block_step(struct task_struct *task)
100{ 102{
101 task->ptrace &= ~PT_SINGLESTEP; 103 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
102 task->ptrace |= PT_BLOCKSTEP; 104 set_tsk_thread_flag(task, TIF_BLOCKSTEP);
103 105
104 /* Enable taken branch trap. */ 106 /* Enable taken branch trap. */
105 pa_psw(task)->r = 0; 107 pa_psw(task)->r = 0;
@@ -263,22 +265,20 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
263} 265}
264#endif 266#endif
265 267
268long do_syscall_trace_enter(struct pt_regs *regs)
269{
270 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
271 tracehook_report_syscall_entry(regs))
272 return -1L;
273
274 return regs->gr[20];
275}
266 276
267void syscall_trace(void) 277void do_syscall_trace_exit(struct pt_regs *regs)
268{ 278{
269 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 279 int stepping = test_thread_flag(TIF_SINGLESTEP) ||
270 return; 280 test_thread_flag(TIF_BLOCKSTEP);
271 if (!(current->ptrace & PT_PTRACED)) 281
272 return; 282 if (stepping || test_thread_flag(TIF_SYSCALL_TRACE))
273 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) 283 tracehook_report_syscall_exit(regs, stepping);
274 ? 0x80 : 0));
275 /*
276 * this isn't the same as continuing with a signal, but it will do
277 * for normal use. strace only continues with a signal if the
278 * stopping signal is not SIGTRAP. -brl
279 */
280 if (current->exit_code) {
281 send_sig(current->exit_code, current, 1);
282 current->exit_code = 0;
283 }
284} 284}
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 8eb3c63c407..e8467e4aa8d 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -21,6 +21,7 @@
21#include <linux/errno.h> 21#include <linux/errno.h>
22#include <linux/wait.h> 22#include <linux/wait.h>
23#include <linux/ptrace.h> 23#include <linux/ptrace.h>
24#include <linux/tracehook.h>
24#include <linux/unistd.h> 25#include <linux/unistd.h>
25#include <linux/stddef.h> 26#include <linux/stddef.h>
26#include <linux/compat.h> 27#include <linux/compat.h>
@@ -34,7 +35,6 @@
34#include <asm/asm-offsets.h> 35#include <asm/asm-offsets.h>
35 36
36#ifdef CONFIG_COMPAT 37#ifdef CONFIG_COMPAT
37#include <linux/compat.h>
38#include "signal32.h" 38#include "signal32.h"
39#endif 39#endif
40 40
@@ -468,6 +468,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
468 sigaddset(&current->blocked,sig); 468 sigaddset(&current->blocked,sig);
469 recalc_sigpending(); 469 recalc_sigpending();
470 spin_unlock_irq(&current->sighand->siglock); 470 spin_unlock_irq(&current->sighand->siglock);
471
472 tracehook_signal_handler(sig, info, ka, regs, 0);
473
471 return 1; 474 return 1;
472} 475}
473 476
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 59fc1a43ec3..f5f96021caa 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -288,18 +288,23 @@ tracesys:
288 STREG %r18,PT_GR18(%r2) 288 STREG %r18,PT_GR18(%r2)
289 /* Finished saving things for the debugger */ 289 /* Finished saving things for the debugger */
290 290
291 ldil L%syscall_trace,%r1 291 copy %r2,%r26
292 ldil L%do_syscall_trace_enter,%r1
292 ldil L%tracesys_next,%r2 293 ldil L%tracesys_next,%r2
293 be R%syscall_trace(%sr7,%r1) 294 be R%do_syscall_trace_enter(%sr7,%r1)
294 ldo R%tracesys_next(%r2),%r2 295 ldo R%tracesys_next(%r2),%r2
295 296
296tracesys_next: 297tracesys_next:
298 /* do_syscall_trace_enter either returned the syscallno, or -1L,
299 * so we skip restoring the PT_GR20 below, since we pulled it from
300 * task->thread.regs.gr[20] above.
301 */
302 copy %ret0,%r20
297 ldil L%sys_call_table,%r1 303 ldil L%sys_call_table,%r1
298 ldo R%sys_call_table(%r1), %r19 304 ldo R%sys_call_table(%r1), %r19
299 305
300 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 306 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
301 LDREG TI_TASK(%r1), %r1 307 LDREG TI_TASK(%r1), %r1
302 LDREG TASK_PT_GR20(%r1), %r20
303 LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ 308 LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */
304 LDREG TASK_PT_GR25(%r1), %r25 309 LDREG TASK_PT_GR25(%r1), %r25
305 LDREG TASK_PT_GR24(%r1), %r24 310 LDREG TASK_PT_GR24(%r1), %r24
@@ -336,7 +341,8 @@ tracesys_exit:
336#ifdef CONFIG_64BIT 341#ifdef CONFIG_64BIT
337 ldo -16(%r30),%r29 /* Reference param save area */ 342 ldo -16(%r30),%r29 /* Reference param save area */
338#endif 343#endif
339 bl syscall_trace, %r2 344 ldo TASK_REGS(%r1),%r26
345 bl do_syscall_trace_exit,%r2
340 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ 346 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
341 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 347 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
342 LDREG TI_TASK(%r1), %r1 348 LDREG TI_TASK(%r1), %r1
@@ -353,12 +359,12 @@ tracesys_exit:
353 359
354tracesys_sigexit: 360tracesys_sigexit:
355 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 361 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
356 LDREG 0(%r1), %r1 362 LDREG TI_TASK(%r1), %r1
357#ifdef CONFIG_64BIT 363#ifdef CONFIG_64BIT
358 ldo -16(%r30),%r29 /* Reference param save area */ 364 ldo -16(%r30),%r29 /* Reference param save area */
359#endif 365#endif
360 bl syscall_trace, %r2 366 bl do_syscall_trace_exit,%r2
361 nop 367 ldo TASK_REGS(%r1),%r26
362 368
363 ldil L%syscall_exit_rfi,%r1 369 ldil L%syscall_exit_rfi,%r1
364 be,n R%syscall_exit_rfi(%sr7,%r1) 370 be,n R%syscall_exit_rfi(%sr7,%r1)
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 775be2791bc..fda4baa059b 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -28,6 +28,7 @@
28#include <asm/cache.h> 28#include <asm/cache.h>
29#include <asm/page.h> 29#include <asm/page.h>
30#include <asm/asm-offsets.h> 30#include <asm/asm-offsets.h>
31#include <asm/thread_info.h>
31 32
32/* ld script to make hppa Linux kernel */ 33/* ld script to make hppa Linux kernel */
33#ifndef CONFIG_64BIT 34#ifndef CONFIG_64BIT
@@ -134,6 +135,15 @@ SECTIONS
134 __init_begin = .; 135 __init_begin = .;
135 INIT_TEXT_SECTION(16384) 136 INIT_TEXT_SECTION(16384)
136 INIT_DATA_SECTION(16) 137 INIT_DATA_SECTION(16)
138 /* we have to discard exit text and such at runtime, not link time */
139 .exit.text :
140 {
141 EXIT_TEXT
142 }
143 .exit.data :
144 {
145 EXIT_DATA
146 }
137 147
138 PERCPU(PAGE_SIZE) 148 PERCPU(PAGE_SIZE)
139 . = ALIGN(PAGE_SIZE); 149 . = ALIGN(PAGE_SIZE);
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index d5aca31fddb..13b6e3e59b9 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -434,8 +434,8 @@ void mark_rodata_ro(void)
434#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ 434#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
435 & ~(VM_MAP_OFFSET-1))) 435 & ~(VM_MAP_OFFSET-1)))
436 436
437void *vmalloc_start __read_mostly; 437void *parisc_vmalloc_start __read_mostly;
438EXPORT_SYMBOL(vmalloc_start); 438EXPORT_SYMBOL(parisc_vmalloc_start);
439 439
440#ifdef CONFIG_PA11 440#ifdef CONFIG_PA11
441unsigned long pcxl_dma_start __read_mostly; 441unsigned long pcxl_dma_start __read_mostly;
@@ -496,13 +496,14 @@ void __init mem_init(void)
496#ifdef CONFIG_PA11 496#ifdef CONFIG_PA11
497 if (hppa_dma_ops == &pcxl_dma_ops) { 497 if (hppa_dma_ops == &pcxl_dma_ops) {
498 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); 498 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
499 vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE); 499 parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
500 + PCXL_DMA_MAP_SIZE);
500 } else { 501 } else {
501 pcxl_dma_start = 0; 502 pcxl_dma_start = 0;
502 vmalloc_start = SET_MAP_OFFSET(MAP_START); 503 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
503 } 504 }
504#else 505#else
505 vmalloc_start = SET_MAP_OFFSET(MAP_START); 506 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
506#endif 507#endif
507 508
508 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", 509 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index ec5eee7c25d..06cce8285ba 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -58,7 +58,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
58int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); 58int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
59int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); 59int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
60 60
61static inline int kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu) 61static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu)
62{ 62{
63 return vcpu->arch.sie_block->gmslm 63 return vcpu->arch.sie_block->gmslm
64 - vcpu->arch.sie_block->gmsor 64 - vcpu->arch.sie_block->gmsor
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index ac45aab741a..05ef5380a68 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -26,6 +26,7 @@ config SPARC
26 select RTC_CLASS 26 select RTC_CLASS
27 select RTC_DRV_M48T59 27 select RTC_DRV_M48T59
28 select HAVE_PERF_EVENTS 28 select HAVE_PERF_EVENTS
29 select PERF_USE_VMALLOC
29 select HAVE_DMA_ATTRS 30 select HAVE_DMA_ATTRS
30 select HAVE_DMA_API_DEBUG 31 select HAVE_DMA_API_DEBUG
31 32
@@ -48,6 +49,7 @@ config SPARC64
48 select RTC_DRV_SUN4V 49 select RTC_DRV_SUN4V
49 select RTC_DRV_STARFIRE 50 select RTC_DRV_STARFIRE
50 select HAVE_PERF_EVENTS 51 select HAVE_PERF_EVENTS
52 select PERF_USE_VMALLOC
51 53
52config ARCH_DEFCONFIG 54config ARCH_DEFCONFIG
53 string 55 string
diff --git a/arch/sparc/include/asm/hardirq_32.h b/arch/sparc/include/asm/hardirq_32.h
index 4f63ed8df55..162007643cd 100644
--- a/arch/sparc/include/asm/hardirq_32.h
+++ b/arch/sparc/include/asm/hardirq_32.h
@@ -7,17 +7,7 @@
7#ifndef __SPARC_HARDIRQ_H 7#ifndef __SPARC_HARDIRQ_H
8#define __SPARC_HARDIRQ_H 8#define __SPARC_HARDIRQ_H
9 9
10#include <linux/threads.h>
11#include <linux/spinlock.h>
12#include <linux/cache.h>
13
14/* entry.S is sensitive to the offsets of these fields */ /* XXX P3 Is it? */
15typedef struct {
16 unsigned int __softirq_pending;
17} ____cacheline_aligned irq_cpustat_t;
18
19#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
20
21#define HARDIRQ_BITS 8 10#define HARDIRQ_BITS 8
11#include <asm-generic/hardirq.h>
22 12
23#endif /* __SPARC_HARDIRQ_H */ 13#endif /* __SPARC_HARDIRQ_H */
diff --git a/arch/sparc/include/asm/irq_32.h b/arch/sparc/include/asm/irq_32.h
index ea43057d476..cbf4801deaa 100644
--- a/arch/sparc/include/asm/irq_32.h
+++ b/arch/sparc/include/asm/irq_32.h
@@ -6,10 +6,10 @@
6#ifndef _SPARC_IRQ_H 6#ifndef _SPARC_IRQ_H
7#define _SPARC_IRQ_H 7#define _SPARC_IRQ_H
8 8
9#include <linux/interrupt.h>
10
11#define NR_IRQS 16 9#define NR_IRQS 16
12 10
11#include <linux/interrupt.h>
12
13#define irq_canonicalize(irq) (irq) 13#define irq_canonicalize(irq) (irq)
14 14
15extern void __init init_IRQ(void); 15extern void __init init_IRQ(void);
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 0ff92fa2206..f3cb790fa2a 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -41,8 +41,8 @@
41#define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL) 41#define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
42#define HI_OBP_ADDRESS _AC(0x0000000100000000,UL) 42#define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
43#define VMALLOC_START _AC(0x0000000100000000,UL) 43#define VMALLOC_START _AC(0x0000000100000000,UL)
44#define VMALLOC_END _AC(0x0000000200000000,UL) 44#define VMALLOC_END _AC(0x0000010000000000,UL)
45#define VMEMMAP_BASE _AC(0x0000000200000000,UL) 45#define VMEMMAP_BASE _AC(0x0000010000000000,UL)
46 46
47#define vmemmap ((struct page *)VMEMMAP_BASE) 47#define vmemmap ((struct page *)VMEMMAP_BASE)
48 48
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index 3ea6e8cde8c..1d361477d7d 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -280,8 +280,8 @@ kvmap_dtlb_nonlinear:
280 280
281#ifdef CONFIG_SPARSEMEM_VMEMMAP 281#ifdef CONFIG_SPARSEMEM_VMEMMAP
282 /* Do not use the TSB for vmemmap. */ 282 /* Do not use the TSB for vmemmap. */
283 mov (VMEMMAP_BASE >> 24), %g5 283 mov (VMEMMAP_BASE >> 40), %g5
284 sllx %g5, 24, %g5 284 sllx %g5, 40, %g5
285 cmp %g4,%g5 285 cmp %g4,%g5
286 bgeu,pn %xcc, kvmap_vmemmap 286 bgeu,pn %xcc, kvmap_vmemmap
287 nop 287 nop
@@ -293,8 +293,8 @@ kvmap_dtlb_tsbmiss:
293 sethi %hi(MODULES_VADDR), %g5 293 sethi %hi(MODULES_VADDR), %g5
294 cmp %g4, %g5 294 cmp %g4, %g5
295 blu,pn %xcc, kvmap_dtlb_longpath 295 blu,pn %xcc, kvmap_dtlb_longpath
296 mov (VMALLOC_END >> 24), %g5 296 mov (VMALLOC_END >> 40), %g5
297 sllx %g5, 24, %g5 297 sllx %g5, 40, %g5
298 cmp %g4, %g5 298 cmp %g4, %g5
299 bgeu,pn %xcc, kvmap_dtlb_longpath 299 bgeu,pn %xcc, kvmap_dtlb_longpath
300 nop 300 nop
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 2d6a1b10c81..04db9274389 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -56,7 +56,8 @@ struct cpu_hw_events {
56 struct perf_event *events[MAX_HWEVENTS]; 56 struct perf_event *events[MAX_HWEVENTS];
57 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; 57 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
58 unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; 58 unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
59 int enabled; 59 u64 pcr;
60 int enabled;
60}; 61};
61DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; 62DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
62 63
@@ -68,8 +69,30 @@ struct perf_event_map {
68#define PIC_LOWER 0x02 69#define PIC_LOWER 0x02
69}; 70};
70 71
72static unsigned long perf_event_encode(const struct perf_event_map *pmap)
73{
74 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
75}
76
77static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk)
78{
79 *msk = val & 0xff;
80 *enc = val >> 16;
81}
82
83#define C(x) PERF_COUNT_HW_CACHE_##x
84
85#define CACHE_OP_UNSUPPORTED 0xfffe
86#define CACHE_OP_NONSENSE 0xffff
87
88typedef struct perf_event_map cache_map_t
89 [PERF_COUNT_HW_CACHE_MAX]
90 [PERF_COUNT_HW_CACHE_OP_MAX]
91 [PERF_COUNT_HW_CACHE_RESULT_MAX];
92
71struct sparc_pmu { 93struct sparc_pmu {
72 const struct perf_event_map *(*event_map)(int); 94 const struct perf_event_map *(*event_map)(int);
95 const cache_map_t *cache_map;
73 int max_events; 96 int max_events;
74 int upper_shift; 97 int upper_shift;
75 int lower_shift; 98 int lower_shift;
@@ -80,21 +103,109 @@ struct sparc_pmu {
80 int lower_nop; 103 int lower_nop;
81}; 104};
82 105
83static const struct perf_event_map ultra3i_perfmon_event_map[] = { 106static const struct perf_event_map ultra3_perfmon_event_map[] = {
84 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, 107 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
85 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, 108 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
86 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, 109 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
87 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, 110 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
88}; 111};
89 112
90static const struct perf_event_map *ultra3i_event_map(int event_id) 113static const struct perf_event_map *ultra3_event_map(int event_id)
91{ 114{
92 return &ultra3i_perfmon_event_map[event_id]; 115 return &ultra3_perfmon_event_map[event_id];
93} 116}
94 117
95static const struct sparc_pmu ultra3i_pmu = { 118static const cache_map_t ultra3_cache_map = {
96 .event_map = ultra3i_event_map, 119[C(L1D)] = {
97 .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map), 120 [C(OP_READ)] = {
121 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
122 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
123 },
124 [C(OP_WRITE)] = {
125 [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
126 [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
127 },
128 [C(OP_PREFETCH)] = {
129 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
130 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
131 },
132},
133[C(L1I)] = {
134 [C(OP_READ)] = {
135 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
136 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
137 },
138 [ C(OP_WRITE) ] = {
139 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
140 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
141 },
142 [ C(OP_PREFETCH) ] = {
143 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
144 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
145 },
146},
147[C(LL)] = {
148 [C(OP_READ)] = {
149 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
150 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
151 },
152 [C(OP_WRITE)] = {
153 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
154 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
155 },
156 [C(OP_PREFETCH)] = {
157 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
158 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
159 },
160},
161[C(DTLB)] = {
162 [C(OP_READ)] = {
163 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
164 [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
165 },
166 [ C(OP_WRITE) ] = {
167 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
168 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
169 },
170 [ C(OP_PREFETCH) ] = {
171 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
172 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
173 },
174},
175[C(ITLB)] = {
176 [C(OP_READ)] = {
177 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
178 [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
179 },
180 [ C(OP_WRITE) ] = {
181 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
182 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
183 },
184 [ C(OP_PREFETCH) ] = {
185 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
186 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
187 },
188},
189[C(BPU)] = {
190 [C(OP_READ)] = {
191 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
192 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
193 },
194 [ C(OP_WRITE) ] = {
195 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
196 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
197 },
198 [ C(OP_PREFETCH) ] = {
199 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
200 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
201 },
202},
203};
204
205static const struct sparc_pmu ultra3_pmu = {
206 .event_map = ultra3_event_map,
207 .cache_map = &ultra3_cache_map,
208 .max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
98 .upper_shift = 11, 209 .upper_shift = 11,
99 .lower_shift = 4, 210 .lower_shift = 4,
100 .event_mask = 0x3f, 211 .event_mask = 0x3f,
@@ -102,6 +213,121 @@ static const struct sparc_pmu ultra3i_pmu = {
102 .lower_nop = 0x14, 213 .lower_nop = 0x14,
103}; 214};
104 215
216/* Niagara1 is very limited. The upper PIC is hard-locked to count
217 * only instructions, so it is free running which creates all kinds of
218 * problems. Some hardware designs make one wonder if the creator
219 * even looked at how this stuff gets used by software.
220 */
221static const struct perf_event_map niagara1_perfmon_event_map[] = {
222 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
223 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
224 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
225 [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
226};
227
228static const struct perf_event_map *niagara1_event_map(int event_id)
229{
230 return &niagara1_perfmon_event_map[event_id];
231}
232
233static const cache_map_t niagara1_cache_map = {
234[C(L1D)] = {
235 [C(OP_READ)] = {
236 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
237 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
238 },
239 [C(OP_WRITE)] = {
240 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
241 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
242 },
243 [C(OP_PREFETCH)] = {
244 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
245 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
246 },
247},
248[C(L1I)] = {
249 [C(OP_READ)] = {
250 [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
251 [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
252 },
253 [ C(OP_WRITE) ] = {
254 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
255 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
256 },
257 [ C(OP_PREFETCH) ] = {
258 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
259 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
260 },
261},
262[C(LL)] = {
263 [C(OP_READ)] = {
264 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
265 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
266 },
267 [C(OP_WRITE)] = {
268 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
269 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
270 },
271 [C(OP_PREFETCH)] = {
272 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
273 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
274 },
275},
276[C(DTLB)] = {
277 [C(OP_READ)] = {
278 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
279 [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
280 },
281 [ C(OP_WRITE) ] = {
282 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
283 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
284 },
285 [ C(OP_PREFETCH) ] = {
286 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
287 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
288 },
289},
290[C(ITLB)] = {
291 [C(OP_READ)] = {
292 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
293 [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
294 },
295 [ C(OP_WRITE) ] = {
296 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
297 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
298 },
299 [ C(OP_PREFETCH) ] = {
300 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
301 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
302 },
303},
304[C(BPU)] = {
305 [C(OP_READ)] = {
306 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
307 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
308 },
309 [ C(OP_WRITE) ] = {
310 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
311 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
312 },
313 [ C(OP_PREFETCH) ] = {
314 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
315 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
316 },
317},
318};
319
320static const struct sparc_pmu niagara1_pmu = {
321 .event_map = niagara1_event_map,
322 .cache_map = &niagara1_cache_map,
323 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
324 .upper_shift = 0,
325 .lower_shift = 4,
326 .event_mask = 0x7,
327 .upper_nop = 0x0,
328 .lower_nop = 0x0,
329};
330
105static const struct perf_event_map niagara2_perfmon_event_map[] = { 331static const struct perf_event_map niagara2_perfmon_event_map[] = {
106 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, 332 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
107 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, 333 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
@@ -116,8 +342,96 @@ static const struct perf_event_map *niagara2_event_map(int event_id)
116 return &niagara2_perfmon_event_map[event_id]; 342 return &niagara2_perfmon_event_map[event_id];
117} 343}
118 344
345static const cache_map_t niagara2_cache_map = {
346[C(L1D)] = {
347 [C(OP_READ)] = {
348 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
349 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
350 },
351 [C(OP_WRITE)] = {
352 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
353 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
354 },
355 [C(OP_PREFETCH)] = {
356 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
357 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
358 },
359},
360[C(L1I)] = {
361 [C(OP_READ)] = {
362 [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
363 [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
364 },
365 [ C(OP_WRITE) ] = {
366 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
367 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
368 },
369 [ C(OP_PREFETCH) ] = {
370 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
371 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
372 },
373},
374[C(LL)] = {
375 [C(OP_READ)] = {
376 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
377 [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
378 },
379 [C(OP_WRITE)] = {
380 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
381 [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
382 },
383 [C(OP_PREFETCH)] = {
384 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
385 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
386 },
387},
388[C(DTLB)] = {
389 [C(OP_READ)] = {
390 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
391 [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
392 },
393 [ C(OP_WRITE) ] = {
394 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
395 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
396 },
397 [ C(OP_PREFETCH) ] = {
398 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
399 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
400 },
401},
402[C(ITLB)] = {
403 [C(OP_READ)] = {
404 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
405 [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
406 },
407 [ C(OP_WRITE) ] = {
408 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
409 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
410 },
411 [ C(OP_PREFETCH) ] = {
412 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
413 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
414 },
415},
416[C(BPU)] = {
417 [C(OP_READ)] = {
418 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
419 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
420 },
421 [ C(OP_WRITE) ] = {
422 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
423 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
424 },
425 [ C(OP_PREFETCH) ] = {
426 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
427 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
428 },
429},
430};
431
119static const struct sparc_pmu niagara2_pmu = { 432static const struct sparc_pmu niagara2_pmu = {
120 .event_map = niagara2_event_map, 433 .event_map = niagara2_event_map,
434 .cache_map = &niagara2_cache_map,
121 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), 435 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
122 .upper_shift = 19, 436 .upper_shift = 19,
123 .lower_shift = 6, 437 .lower_shift = 6,
@@ -151,23 +465,30 @@ static u64 nop_for_index(int idx)
151 sparc_pmu->lower_nop, idx); 465 sparc_pmu->lower_nop, idx);
152} 466}
153 467
154static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc, 468static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
155 int idx)
156{ 469{
157 u64 val, mask = mask_for_index(idx); 470 u64 val, mask = mask_for_index(idx);
158 471
159 val = pcr_ops->read(); 472 val = cpuc->pcr;
160 pcr_ops->write((val & ~mask) | hwc->config); 473 val &= ~mask;
474 val |= hwc->config;
475 cpuc->pcr = val;
476
477 pcr_ops->write(cpuc->pcr);
161} 478}
162 479
163static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc, 480static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
164 int idx)
165{ 481{
166 u64 mask = mask_for_index(idx); 482 u64 mask = mask_for_index(idx);
167 u64 nop = nop_for_index(idx); 483 u64 nop = nop_for_index(idx);
168 u64 val = pcr_ops->read(); 484 u64 val;
169 485
170 pcr_ops->write((val & ~mask) | nop); 486 val = cpuc->pcr;
487 val &= ~mask;
488 val |= nop;
489 cpuc->pcr = val;
490
491 pcr_ops->write(cpuc->pcr);
171} 492}
172 493
173void hw_perf_enable(void) 494void hw_perf_enable(void)
@@ -182,7 +503,7 @@ void hw_perf_enable(void)
182 cpuc->enabled = 1; 503 cpuc->enabled = 1;
183 barrier(); 504 barrier();
184 505
185 val = pcr_ops->read(); 506 val = cpuc->pcr;
186 507
187 for (i = 0; i < MAX_HWEVENTS; i++) { 508 for (i = 0; i < MAX_HWEVENTS; i++) {
188 struct perf_event *cp = cpuc->events[i]; 509 struct perf_event *cp = cpuc->events[i];
@@ -194,7 +515,9 @@ void hw_perf_enable(void)
194 val |= hwc->config_base; 515 val |= hwc->config_base;
195 } 516 }
196 517
197 pcr_ops->write(val); 518 cpuc->pcr = val;
519
520 pcr_ops->write(cpuc->pcr);
198} 521}
199 522
200void hw_perf_disable(void) 523void hw_perf_disable(void)
@@ -207,10 +530,12 @@ void hw_perf_disable(void)
207 530
208 cpuc->enabled = 0; 531 cpuc->enabled = 0;
209 532
210 val = pcr_ops->read(); 533 val = cpuc->pcr;
211 val &= ~(PCR_UTRACE | PCR_STRACE | 534 val &= ~(PCR_UTRACE | PCR_STRACE |
212 sparc_pmu->hv_bit | sparc_pmu->irq_bit); 535 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
213 pcr_ops->write(val); 536 cpuc->pcr = val;
537
538 pcr_ops->write(cpuc->pcr);
214} 539}
215 540
216static u32 read_pmc(int idx) 541static u32 read_pmc(int idx)
@@ -242,7 +567,7 @@ static void write_pmc(int idx, u64 val)
242} 567}
243 568
244static int sparc_perf_event_set_period(struct perf_event *event, 569static int sparc_perf_event_set_period(struct perf_event *event,
245 struct hw_perf_event *hwc, int idx) 570 struct hw_perf_event *hwc, int idx)
246{ 571{
247 s64 left = atomic64_read(&hwc->period_left); 572 s64 left = atomic64_read(&hwc->period_left);
248 s64 period = hwc->sample_period; 573 s64 period = hwc->sample_period;
@@ -282,19 +607,19 @@ static int sparc_pmu_enable(struct perf_event *event)
282 if (test_and_set_bit(idx, cpuc->used_mask)) 607 if (test_and_set_bit(idx, cpuc->used_mask))
283 return -EAGAIN; 608 return -EAGAIN;
284 609
285 sparc_pmu_disable_event(hwc, idx); 610 sparc_pmu_disable_event(cpuc, hwc, idx);
286 611
287 cpuc->events[idx] = event; 612 cpuc->events[idx] = event;
288 set_bit(idx, cpuc->active_mask); 613 set_bit(idx, cpuc->active_mask);
289 614
290 sparc_perf_event_set_period(event, hwc, idx); 615 sparc_perf_event_set_period(event, hwc, idx);
291 sparc_pmu_enable_event(hwc, idx); 616 sparc_pmu_enable_event(cpuc, hwc, idx);
292 perf_event_update_userpage(event); 617 perf_event_update_userpage(event);
293 return 0; 618 return 0;
294} 619}
295 620
296static u64 sparc_perf_event_update(struct perf_event *event, 621static u64 sparc_perf_event_update(struct perf_event *event,
297 struct hw_perf_event *hwc, int idx) 622 struct hw_perf_event *hwc, int idx)
298{ 623{
299 int shift = 64 - 32; 624 int shift = 64 - 32;
300 u64 prev_raw_count, new_raw_count; 625 u64 prev_raw_count, new_raw_count;
@@ -324,7 +649,7 @@ static void sparc_pmu_disable(struct perf_event *event)
324 int idx = hwc->idx; 649 int idx = hwc->idx;
325 650
326 clear_bit(idx, cpuc->active_mask); 651 clear_bit(idx, cpuc->active_mask);
327 sparc_pmu_disable_event(hwc, idx); 652 sparc_pmu_disable_event(cpuc, hwc, idx);
328 653
329 barrier(); 654 barrier();
330 655
@@ -338,18 +663,29 @@ static void sparc_pmu_disable(struct perf_event *event)
338static void sparc_pmu_read(struct perf_event *event) 663static void sparc_pmu_read(struct perf_event *event)
339{ 664{
340 struct hw_perf_event *hwc = &event->hw; 665 struct hw_perf_event *hwc = &event->hw;
666
341 sparc_perf_event_update(event, hwc, hwc->idx); 667 sparc_perf_event_update(event, hwc, hwc->idx);
342} 668}
343 669
344static void sparc_pmu_unthrottle(struct perf_event *event) 670static void sparc_pmu_unthrottle(struct perf_event *event)
345{ 671{
672 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
346 struct hw_perf_event *hwc = &event->hw; 673 struct hw_perf_event *hwc = &event->hw;
347 sparc_pmu_enable_event(hwc, hwc->idx); 674
675 sparc_pmu_enable_event(cpuc, hwc, hwc->idx);
348} 676}
349 677
350static atomic_t active_events = ATOMIC_INIT(0); 678static atomic_t active_events = ATOMIC_INIT(0);
351static DEFINE_MUTEX(pmc_grab_mutex); 679static DEFINE_MUTEX(pmc_grab_mutex);
352 680
681static void perf_stop_nmi_watchdog(void *unused)
682{
683 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
684
685 stop_nmi_watchdog(NULL);
686 cpuc->pcr = pcr_ops->read();
687}
688
353void perf_event_grab_pmc(void) 689void perf_event_grab_pmc(void)
354{ 690{
355 if (atomic_inc_not_zero(&active_events)) 691 if (atomic_inc_not_zero(&active_events))
@@ -358,7 +694,7 @@ void perf_event_grab_pmc(void)
358 mutex_lock(&pmc_grab_mutex); 694 mutex_lock(&pmc_grab_mutex);
359 if (atomic_read(&active_events) == 0) { 695 if (atomic_read(&active_events) == 0) {
360 if (atomic_read(&nmi_active) > 0) { 696 if (atomic_read(&nmi_active) > 0) {
361 on_each_cpu(stop_nmi_watchdog, NULL, 1); 697 on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
362 BUG_ON(atomic_read(&nmi_active) != 0); 698 BUG_ON(atomic_read(&nmi_active) != 0);
363 } 699 }
364 atomic_inc(&active_events); 700 atomic_inc(&active_events);
@@ -375,30 +711,160 @@ void perf_event_release_pmc(void)
375 } 711 }
376} 712}
377 713
714static const struct perf_event_map *sparc_map_cache_event(u64 config)
715{
716 unsigned int cache_type, cache_op, cache_result;
717 const struct perf_event_map *pmap;
718
719 if (!sparc_pmu->cache_map)
720 return ERR_PTR(-ENOENT);
721
722 cache_type = (config >> 0) & 0xff;
723 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
724 return ERR_PTR(-EINVAL);
725
726 cache_op = (config >> 8) & 0xff;
727 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
728 return ERR_PTR(-EINVAL);
729
730 cache_result = (config >> 16) & 0xff;
731 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
732 return ERR_PTR(-EINVAL);
733
734 pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
735
736 if (pmap->encoding == CACHE_OP_UNSUPPORTED)
737 return ERR_PTR(-ENOENT);
738
739 if (pmap->encoding == CACHE_OP_NONSENSE)
740 return ERR_PTR(-EINVAL);
741
742 return pmap;
743}
744
378static void hw_perf_event_destroy(struct perf_event *event) 745static void hw_perf_event_destroy(struct perf_event *event)
379{ 746{
380 perf_event_release_pmc(); 747 perf_event_release_pmc();
381} 748}
382 749
750/* Make sure all events can be scheduled into the hardware at
751 * the same time. This is simplified by the fact that we only
752 * need to support 2 simultaneous HW events.
753 */
754static int sparc_check_constraints(unsigned long *events, int n_ev)
755{
756 if (n_ev <= perf_max_events) {
757 u8 msk1, msk2;
758 u16 dummy;
759
760 if (n_ev == 1)
761 return 0;
762 BUG_ON(n_ev != 2);
763 perf_event_decode(events[0], &dummy, &msk1);
764 perf_event_decode(events[1], &dummy, &msk2);
765
766 /* If both events can go on any counter, OK. */
767 if (msk1 == (PIC_UPPER | PIC_LOWER) &&
768 msk2 == (PIC_UPPER | PIC_LOWER))
769 return 0;
770
771 /* If one event is limited to a specific counter,
772 * and the other can go on both, OK.
773 */
774 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
775 msk2 == (PIC_UPPER | PIC_LOWER))
776 return 0;
777 if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) &&
778 msk1 == (PIC_UPPER | PIC_LOWER))
779 return 0;
780
781 /* If the events are fixed to different counters, OK. */
782 if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) ||
783 (msk1 == PIC_LOWER && msk2 == PIC_UPPER))
784 return 0;
785
786 /* Otherwise, there is a conflict. */
787 }
788
789 return -1;
790}
791
792static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
793{
794 int eu = 0, ek = 0, eh = 0;
795 struct perf_event *event;
796 int i, n, first;
797
798 n = n_prev + n_new;
799 if (n <= 1)
800 return 0;
801
802 first = 1;
803 for (i = 0; i < n; i++) {
804 event = evts[i];
805 if (first) {
806 eu = event->attr.exclude_user;
807 ek = event->attr.exclude_kernel;
808 eh = event->attr.exclude_hv;
809 first = 0;
810 } else if (event->attr.exclude_user != eu ||
811 event->attr.exclude_kernel != ek ||
812 event->attr.exclude_hv != eh) {
813 return -EAGAIN;
814 }
815 }
816
817 return 0;
818}
819
820static int collect_events(struct perf_event *group, int max_count,
821 struct perf_event *evts[], unsigned long *events)
822{
823 struct perf_event *event;
824 int n = 0;
825
826 if (!is_software_event(group)) {
827 if (n >= max_count)
828 return -1;
829 evts[n] = group;
830 events[n++] = group->hw.event_base;
831 }
832 list_for_each_entry(event, &group->sibling_list, group_entry) {
833 if (!is_software_event(event) &&
834 event->state != PERF_EVENT_STATE_OFF) {
835 if (n >= max_count)
836 return -1;
837 evts[n] = event;
838 events[n++] = event->hw.event_base;
839 }
840 }
841 return n;
842}
843
383static int __hw_perf_event_init(struct perf_event *event) 844static int __hw_perf_event_init(struct perf_event *event)
384{ 845{
385 struct perf_event_attr *attr = &event->attr; 846 struct perf_event_attr *attr = &event->attr;
847 struct perf_event *evts[MAX_HWEVENTS];
386 struct hw_perf_event *hwc = &event->hw; 848 struct hw_perf_event *hwc = &event->hw;
849 unsigned long events[MAX_HWEVENTS];
387 const struct perf_event_map *pmap; 850 const struct perf_event_map *pmap;
388 u64 enc; 851 u64 enc;
852 int n;
389 853
390 if (atomic_read(&nmi_active) < 0) 854 if (atomic_read(&nmi_active) < 0)
391 return -ENODEV; 855 return -ENODEV;
392 856
393 if (attr->type != PERF_TYPE_HARDWARE) 857 if (attr->type == PERF_TYPE_HARDWARE) {
858 if (attr->config >= sparc_pmu->max_events)
859 return -EINVAL;
860 pmap = sparc_pmu->event_map(attr->config);
861 } else if (attr->type == PERF_TYPE_HW_CACHE) {
862 pmap = sparc_map_cache_event(attr->config);
863 if (IS_ERR(pmap))
864 return PTR_ERR(pmap);
865 } else
394 return -EOPNOTSUPP; 866 return -EOPNOTSUPP;
395 867
396 if (attr->config >= sparc_pmu->max_events)
397 return -EINVAL;
398
399 perf_event_grab_pmc();
400 event->destroy = hw_perf_event_destroy;
401
402 /* We save the enable bits in the config_base. So to 868 /* We save the enable bits in the config_base. So to
403 * turn off sampling just write 'config', and to enable 869 * turn off sampling just write 'config', and to enable
404 * things write 'config | config_base'. 870 * things write 'config | config_base'.
@@ -411,15 +877,39 @@ static int __hw_perf_event_init(struct perf_event *event)
411 if (!attr->exclude_hv) 877 if (!attr->exclude_hv)
412 hwc->config_base |= sparc_pmu->hv_bit; 878 hwc->config_base |= sparc_pmu->hv_bit;
413 879
880 hwc->event_base = perf_event_encode(pmap);
881
882 enc = pmap->encoding;
883
884 n = 0;
885 if (event->group_leader != event) {
886 n = collect_events(event->group_leader,
887 perf_max_events - 1,
888 evts, events);
889 if (n < 0)
890 return -EINVAL;
891 }
892 events[n] = hwc->event_base;
893 evts[n] = event;
894
895 if (check_excludes(evts, n, 1))
896 return -EINVAL;
897
898 if (sparc_check_constraints(events, n + 1))
899 return -EINVAL;
900
901 /* Try to do all error checking before this point, as unwinding
902 * state after grabbing the PMC is difficult.
903 */
904 perf_event_grab_pmc();
905 event->destroy = hw_perf_event_destroy;
906
414 if (!hwc->sample_period) { 907 if (!hwc->sample_period) {
415 hwc->sample_period = MAX_PERIOD; 908 hwc->sample_period = MAX_PERIOD;
416 hwc->last_period = hwc->sample_period; 909 hwc->last_period = hwc->sample_period;
417 atomic64_set(&hwc->period_left, hwc->sample_period); 910 atomic64_set(&hwc->period_left, hwc->sample_period);
418 } 911 }
419 912
420 pmap = sparc_pmu->event_map(attr->config);
421
422 enc = pmap->encoding;
423 if (pmap->pic_mask & PIC_UPPER) { 913 if (pmap->pic_mask & PIC_UPPER) {
424 hwc->idx = PIC_UPPER_INDEX; 914 hwc->idx = PIC_UPPER_INDEX;
425 enc <<= sparc_pmu->upper_shift; 915 enc <<= sparc_pmu->upper_shift;
@@ -472,7 +962,7 @@ void perf_event_print_debug(void)
472} 962}
473 963
474static int __kprobes perf_event_nmi_handler(struct notifier_block *self, 964static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
475 unsigned long cmd, void *__args) 965 unsigned long cmd, void *__args)
476{ 966{
477 struct die_args *args = __args; 967 struct die_args *args = __args;
478 struct perf_sample_data data; 968 struct perf_sample_data data;
@@ -513,7 +1003,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
513 continue; 1003 continue;
514 1004
515 if (perf_event_overflow(event, 1, &data, regs)) 1005 if (perf_event_overflow(event, 1, &data, regs))
516 sparc_pmu_disable_event(hwc, idx); 1006 sparc_pmu_disable_event(cpuc, hwc, idx);
517 } 1007 }
518 1008
519 return NOTIFY_STOP; 1009 return NOTIFY_STOP;
@@ -525,8 +1015,15 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = {
525 1015
526static bool __init supported_pmu(void) 1016static bool __init supported_pmu(void)
527{ 1017{
528 if (!strcmp(sparc_pmu_type, "ultra3i")) { 1018 if (!strcmp(sparc_pmu_type, "ultra3") ||
529 sparc_pmu = &ultra3i_pmu; 1019 !strcmp(sparc_pmu_type, "ultra3+") ||
1020 !strcmp(sparc_pmu_type, "ultra3i") ||
1021 !strcmp(sparc_pmu_type, "ultra4+")) {
1022 sparc_pmu = &ultra3_pmu;
1023 return true;
1024 }
1025 if (!strcmp(sparc_pmu_type, "niagara")) {
1026 sparc_pmu = &niagara1_pmu;
530 return true; 1027 return true;
531 } 1028 }
532 if (!strcmp(sparc_pmu_type, "niagara2")) { 1029 if (!strcmp(sparc_pmu_type, "niagara2")) {
diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c
index f97cb8b6ee5..f9024bccff1 100644
--- a/arch/sparc/oprofile/init.c
+++ b/arch/sparc/oprofile/init.c
@@ -11,6 +11,7 @@
11#include <linux/oprofile.h> 11#include <linux/oprofile.h>
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/param.h> /* for HZ */
14 15
15#ifdef CONFIG_SPARC64 16#ifdef CONFIG_SPARC64
16#include <linux/notifier.h> 17#include <linux/notifier.h>
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 8da93745c08..c876bace8fd 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -86,10 +86,6 @@ config STACKTRACE_SUPPORT
86config HAVE_LATENCYTOP_SUPPORT 86config HAVE_LATENCYTOP_SUPPORT
87 def_bool y 87 def_bool y
88 88
89config FAST_CMPXCHG_LOCAL
90 bool
91 default y
92
93config MMU 89config MMU
94 def_bool y 90 def_bool y
95 91
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 527519b8a9f..f2824fb8c79 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -400,7 +400,7 @@ config X86_TSC
400 400
401config X86_CMPXCHG64 401config X86_CMPXCHG64
402 def_bool y 402 def_bool y
403 depends on X86_PAE || X86_64 403 depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
404 404
405# this should be set for all -march=.. options where the compiler 405# this should be set for all -march=.. options where the compiler
406# generates cmov. 406# generates cmov.
@@ -412,6 +412,7 @@ config X86_MINIMUM_CPU_FAMILY
412 int 412 int
413 default "64" if X86_64 413 default "64" if X86_64
414 default "6" if X86_32 && X86_P6_NOP 414 default "6" if X86_32 && X86_P6_NOP
415 default "5" if X86_32 && X86_CMPXCHG64
415 default "4" if X86_32 && (X86_XADD || X86_CMPXCHG || X86_BSWAP || X86_WP_WORKS_OK) 416 default "4" if X86_32 && (X86_XADD || X86_CMPXCHG || X86_BSWAP || X86_WP_WORKS_OK)
416 default "3" 417 default "3"
417 418
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 74619c4f9fd..1733f9f65e8 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -21,8 +21,8 @@
21#define __AUDIT_ARCH_LE 0x40000000 21#define __AUDIT_ARCH_LE 0x40000000
22 22
23#ifndef CONFIG_AUDITSYSCALL 23#ifndef CONFIG_AUDITSYSCALL
24#define sysexit_audit int_ret_from_sys_call 24#define sysexit_audit ia32_ret_from_sys_call
25#define sysretl_audit int_ret_from_sys_call 25#define sysretl_audit ia32_ret_from_sys_call
26#endif 26#endif
27 27
28#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8) 28#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
@@ -39,12 +39,12 @@
39 .endm 39 .endm
40 40
41 /* clobbers %eax */ 41 /* clobbers %eax */
42 .macro CLEAR_RREGS _r9=rax 42 .macro CLEAR_RREGS offset=0, _r9=rax
43 xorl %eax,%eax 43 xorl %eax,%eax
44 movq %rax,R11(%rsp) 44 movq %rax,\offset+R11(%rsp)
45 movq %rax,R10(%rsp) 45 movq %rax,\offset+R10(%rsp)
46 movq %\_r9,R9(%rsp) 46 movq %\_r9,\offset+R9(%rsp)
47 movq %rax,R8(%rsp) 47 movq %rax,\offset+R8(%rsp)
48 .endm 48 .endm
49 49
50 /* 50 /*
@@ -172,6 +172,10 @@ sysexit_from_sys_call:
172 movl RIP-R11(%rsp),%edx /* User %eip */ 172 movl RIP-R11(%rsp),%edx /* User %eip */
173 CFI_REGISTER rip,rdx 173 CFI_REGISTER rip,rdx
174 RESTORE_ARGS 1,24,1,1,1,1 174 RESTORE_ARGS 1,24,1,1,1,1
175 xorq %r8,%r8
176 xorq %r9,%r9
177 xorq %r10,%r10
178 xorq %r11,%r11
175 popfq 179 popfq
176 CFI_ADJUST_CFA_OFFSET -8 180 CFI_ADJUST_CFA_OFFSET -8
177 /*CFI_RESTORE rflags*/ 181 /*CFI_RESTORE rflags*/
@@ -202,7 +206,7 @@ sysexit_from_sys_call:
202 206
203 .macro auditsys_exit exit,ebpsave=RBP 207 .macro auditsys_exit exit,ebpsave=RBP
204 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) 208 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
205 jnz int_ret_from_sys_call 209 jnz ia32_ret_from_sys_call
206 TRACE_IRQS_ON 210 TRACE_IRQS_ON
207 sti 211 sti
208 movl %eax,%esi /* second arg, syscall return value */ 212 movl %eax,%esi /* second arg, syscall return value */
@@ -218,8 +222,9 @@ sysexit_from_sys_call:
218 cli 222 cli
219 TRACE_IRQS_OFF 223 TRACE_IRQS_OFF
220 testl %edi,TI_flags(%r10) 224 testl %edi,TI_flags(%r10)
221 jnz int_with_check 225 jz \exit
222 jmp \exit 226 CLEAR_RREGS -ARGOFFSET
227 jmp int_with_check
223 .endm 228 .endm
224 229
225sysenter_auditsys: 230sysenter_auditsys:
@@ -329,6 +334,9 @@ sysretl_from_sys_call:
329 CFI_REGISTER rip,rcx 334 CFI_REGISTER rip,rcx
330 movl EFLAGS-ARGOFFSET(%rsp),%r11d 335 movl EFLAGS-ARGOFFSET(%rsp),%r11d
331 /*CFI_REGISTER rflags,r11*/ 336 /*CFI_REGISTER rflags,r11*/
337 xorq %r10,%r10
338 xorq %r9,%r9
339 xorq %r8,%r8
332 TRACE_IRQS_ON 340 TRACE_IRQS_ON
333 movl RSP-ARGOFFSET(%rsp),%esp 341 movl RSP-ARGOFFSET(%rsp),%esp
334 CFI_RESTORE rsp 342 CFI_RESTORE rsp
@@ -353,7 +361,7 @@ cstar_tracesys:
353#endif 361#endif
354 xchgl %r9d,%ebp 362 xchgl %r9d,%ebp
355 SAVE_REST 363 SAVE_REST
356 CLEAR_RREGS r9 364 CLEAR_RREGS 0, r9
357 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ 365 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
358 movq %rsp,%rdi /* &pt_regs -> arg1 */ 366 movq %rsp,%rdi /* &pt_regs -> arg1 */
359 call syscall_trace_enter 367 call syscall_trace_enter
@@ -425,6 +433,8 @@ ia32_do_call:
425 call *ia32_sys_call_table(,%rax,8) # xxx: rip relative 433 call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
426ia32_sysret: 434ia32_sysret:
427 movq %rax,RAX-ARGOFFSET(%rsp) 435 movq %rax,RAX-ARGOFFSET(%rsp)
436ia32_ret_from_sys_call:
437 CLEAR_RREGS -ARGOFFSET
428 jmp int_ret_from_sys_call 438 jmp int_ret_from_sys_call
429 439
430ia32_tracesys: 440ia32_tracesys:
@@ -442,8 +452,8 @@ END(ia32_syscall)
442 452
443ia32_badsys: 453ia32_badsys:
444 movq $0,ORIG_RAX-ARGOFFSET(%rsp) 454 movq $0,ORIG_RAX-ARGOFFSET(%rsp)
445 movq $-ENOSYS,RAX-ARGOFFSET(%rsp) 455 movq $-ENOSYS,%rax
446 jmp int_ret_from_sys_call 456 jmp ia32_sysret
447 457
448quiet_ni_syscall: 458quiet_ni_syscall:
449 movq $-ENOSYS,%rax 459 movq $-ENOSYS,%rax
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3be000435fa..d83892226f7 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -796,6 +796,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
796#define KVM_ARCH_WANT_MMU_NOTIFIER 796#define KVM_ARCH_WANT_MMU_NOTIFIER
797int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 797int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
798int kvm_age_hva(struct kvm *kvm, unsigned long hva); 798int kvm_age_hva(struct kvm *kvm, unsigned long hva);
799void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
799int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); 800int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
800int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); 801int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
801int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); 802int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index b608a64c581..f1363b72364 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -133,6 +133,8 @@ static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
133static inline void enable_p5_mce(void) {} 133static inline void enable_p5_mce(void) {}
134#endif 134#endif
135 135
136extern void (*x86_mce_decode_callback)(struct mce *m);
137
136void mce_setup(struct mce *m); 138void mce_setup(struct mce *m);
137void mce_log(struct mce *m); 139void mce_log(struct mce *m);
138DECLARE_PER_CPU(struct sys_device, mce_dev); 140DECLARE_PER_CPU(struct sys_device, mce_dev);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 183c3457d2f..b1598a9436d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -85,6 +85,18 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
85static DEFINE_PER_CPU(struct mce, mces_seen); 85static DEFINE_PER_CPU(struct mce, mces_seen);
86static int cpu_missing; 86static int cpu_missing;
87 87
88static void default_decode_mce(struct mce *m)
89{
90 pr_emerg("No human readable MCE decoding support on this CPU type.\n");
91 pr_emerg("Run the message through 'mcelog --ascii' to decode.\n");
92}
93
94/*
95 * CPU/chipset specific EDAC code can register a callback here to print
96 * MCE errors in a human-readable form:
97 */
98void (*x86_mce_decode_callback)(struct mce *m) = default_decode_mce;
99EXPORT_SYMBOL(x86_mce_decode_callback);
88 100
89/* MCA banks polled by the period polling timer for corrected events */ 101/* MCA banks polled by the period polling timer for corrected events */
90DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { 102DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
@@ -165,46 +177,46 @@ void mce_log(struct mce *mce)
165 set_bit(0, &mce_need_notify); 177 set_bit(0, &mce_need_notify);
166} 178}
167 179
168void __weak decode_mce(struct mce *m)
169{
170 return;
171}
172
173static void print_mce(struct mce *m) 180static void print_mce(struct mce *m)
174{ 181{
175 printk(KERN_EMERG 182 pr_emerg("CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
176 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
177 m->extcpu, m->mcgstatus, m->bank, m->status); 183 m->extcpu, m->mcgstatus, m->bank, m->status);
184
178 if (m->ip) { 185 if (m->ip) {
179 printk(KERN_EMERG "RIP%s %02x:<%016Lx> ", 186 pr_emerg("RIP%s %02x:<%016Lx> ",
180 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", 187 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
181 m->cs, m->ip); 188 m->cs, m->ip);
189
182 if (m->cs == __KERNEL_CS) 190 if (m->cs == __KERNEL_CS)
183 print_symbol("{%s}", m->ip); 191 print_symbol("{%s}", m->ip);
184 printk(KERN_CONT "\n"); 192 pr_cont("\n");
185 } 193 }
186 printk(KERN_EMERG "TSC %llx ", m->tsc); 194
195 pr_emerg("TSC %llx ", m->tsc);
187 if (m->addr) 196 if (m->addr)
188 printk(KERN_CONT "ADDR %llx ", m->addr); 197 pr_cont("ADDR %llx ", m->addr);
189 if (m->misc) 198 if (m->misc)
190 printk(KERN_CONT "MISC %llx ", m->misc); 199 pr_cont("MISC %llx ", m->misc);
191 printk(KERN_CONT "\n"); 200
192 printk(KERN_EMERG "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n", 201 pr_cont("\n");
193 m->cpuvendor, m->cpuid, m->time, m->socketid, 202 pr_emerg("PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
194 m->apicid); 203 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid);
195 204
196 decode_mce(m); 205 /*
206 * Print out human-readable details about the MCE error,
207 * (if the CPU has an implementation for that):
208 */
209 x86_mce_decode_callback(m);
197} 210}
198 211
199static void print_mce_head(void) 212static void print_mce_head(void)
200{ 213{
201 printk(KERN_EMERG "\nHARDWARE ERROR\n"); 214 pr_emerg("\nHARDWARE ERROR\n");
202} 215}
203 216
204static void print_mce_tail(void) 217static void print_mce_tail(void)
205{ 218{
206 printk(KERN_EMERG "This is not a software problem!\n" 219 pr_emerg("This is not a software problem!\n");
207 "Run through mcelog --ascii to decode and contact your hardware vendor\n");
208} 220}
209 221
210#define PANIC_TIMEOUT 5 /* 5 seconds */ 222#define PANIC_TIMEOUT 5 /* 5 seconds */
@@ -218,6 +230,7 @@ static atomic_t mce_fake_paniced;
218static void wait_for_panic(void) 230static void wait_for_panic(void)
219{ 231{
220 long timeout = PANIC_TIMEOUT*USEC_PER_SEC; 232 long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
233
221 preempt_disable(); 234 preempt_disable();
222 local_irq_enable(); 235 local_irq_enable();
223 while (timeout-- > 0) 236 while (timeout-- > 0)
@@ -285,6 +298,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
285static int msr_to_offset(u32 msr) 298static int msr_to_offset(u32 msr)
286{ 299{
287 unsigned bank = __get_cpu_var(injectm.bank); 300 unsigned bank = __get_cpu_var(injectm.bank);
301
288 if (msr == rip_msr) 302 if (msr == rip_msr)
289 return offsetof(struct mce, ip); 303 return offsetof(struct mce, ip);
290 if (msr == MSR_IA32_MCx_STATUS(bank)) 304 if (msr == MSR_IA32_MCx_STATUS(bank))
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index f04e7252760..3c1b12d461d 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -96,17 +96,24 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
96 unsigned long long base, size; 96 unsigned long long base, size;
97 char *ptr; 97 char *ptr;
98 char line[LINE_SIZE]; 98 char line[LINE_SIZE];
99 int length;
99 size_t linelen; 100 size_t linelen;
100 101
101 if (!capable(CAP_SYS_ADMIN)) 102 if (!capable(CAP_SYS_ADMIN))
102 return -EPERM; 103 return -EPERM;
103 if (!len)
104 return -EINVAL;
105 104
106 memset(line, 0, LINE_SIZE); 105 memset(line, 0, LINE_SIZE);
107 if (len > LINE_SIZE) 106
108 len = LINE_SIZE; 107 length = len;
109 if (copy_from_user(line, buf, len - 1)) 108 length--;
109
110 if (length > LINE_SIZE - 1)
111 length = LINE_SIZE - 1;
112
113 if (length < 0)
114 return -EINVAL;
115
116 if (copy_from_user(line, buf, length))
110 return -EFAULT; 117 return -EFAULT;
111 118
112 linelen = strlen(line); 119 linelen = strlen(line);
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 41fd965c80c..b9c830c12b4 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -206,8 +206,11 @@ static int __init setup_early_printk(char *buf)
206 206
207 while (*buf != '\0') { 207 while (*buf != '\0') {
208 if (!strncmp(buf, "serial", 6)) { 208 if (!strncmp(buf, "serial", 6)) {
209 early_serial_init(buf + 6); 209 buf += 6;
210 early_serial_init(buf);
210 early_console_register(&early_serial_console, keep); 211 early_console_register(&early_serial_console, keep);
212 if (!strncmp(buf, ",ttyS", 5))
213 buf += 5;
211 } 214 }
212 if (!strncmp(buf, "ttyS", 4)) { 215 if (!strncmp(buf, "ttyS", 4)) {
213 early_serial_init(buf + 4); 216 early_serial_init(buf + 4);
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
index 1736c5a725a..9c3bd4a2050 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -15,8 +15,10 @@ EXPORT_SYMBOL(mcount);
15 * the export, but dont use it from C code, it is used 15 * the export, but dont use it from C code, it is used
16 * by assembly code and is not using C calling convention! 16 * by assembly code and is not using C calling convention!
17 */ 17 */
18#ifndef CONFIG_X86_CMPXCHG64
18extern void cmpxchg8b_emu(void); 19extern void cmpxchg8b_emu(void);
19EXPORT_SYMBOL(cmpxchg8b_emu); 20EXPORT_SYMBOL(cmpxchg8b_emu);
21#endif
20 22
21/* Networking helper routines. */ 23/* Networking helper routines. */
22EXPORT_SYMBOL(csum_partial_copy_generic); 24EXPORT_SYMBOL(csum_partial_copy_generic);
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 74656d1d4e3..39120619951 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -244,6 +244,7 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
244 __func__, smp_processor_id(), vector, irq); 244 __func__, smp_processor_id(), vector, irq);
245 } 245 }
246 246
247 run_local_timers();
247 irq_exit(); 248 irq_exit();
248 249
249 set_irq_regs(old_regs); 250 set_irq_regs(old_regs);
@@ -268,6 +269,7 @@ void smp_generic_interrupt(struct pt_regs *regs)
268 if (generic_interrupt_extension) 269 if (generic_interrupt_extension)
269 generic_interrupt_extension(); 270 generic_interrupt_extension();
270 271
272 run_local_timers();
271 irq_exit(); 273 irq_exit();
272 274
273 set_irq_regs(old_regs); 275 set_irq_regs(old_regs);
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 64b838eac18..d20009b4e6e 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -35,7 +35,7 @@ int iommu_detected __read_mostly = 0;
35 35
36/* 36/*
37 * This variable becomes 1 if iommu=pt is passed on the kernel command line. 37 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
38 * If this variable is 1, IOMMU implementations do no DMA ranslation for 38 * If this variable is 1, IOMMU implementations do no DMA translation for
39 * devices and allow every device to access to whole physical memory. This is 39 * devices and allow every device to access to whole physical memory. This is
40 * useful if a user want to use an IOMMU only for KVM device assignment to 40 * useful if a user want to use an IOMMU only for KVM device assignment to
41 * guests and not for driver dma translation. 41 * guests and not for driver dma translation.
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index ec1de97600e..d915d956e66 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -198,6 +198,7 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
198{ 198{
199 ack_APIC_irq(); 199 ack_APIC_irq();
200 inc_irq_stat(irq_resched_count); 200 inc_irq_stat(irq_resched_count);
201 run_local_timers();
201 /* 202 /*
202 * KVM uses this interrupt to force a cpu out of guest mode 203 * KVM uses this interrupt to force a cpu out of guest mode
203 */ 204 */
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 1ae5ceba7eb..7024224f0fc 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -664,7 +664,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
664{ 664{
665 ktime_t now = apic->lapic_timer.timer.base->get_time(); 665 ktime_t now = apic->lapic_timer.timer.base->get_time();
666 666
667 apic->lapic_timer.period = apic_get_reg(apic, APIC_TMICT) * 667 apic->lapic_timer.period = (u64)apic_get_reg(apic, APIC_TMICT) *
668 APIC_BUS_CYCLE_NS * apic->divide_count; 668 APIC_BUS_CYCLE_NS * apic->divide_count;
669 atomic_set(&apic->lapic_timer.pending, 0); 669 atomic_set(&apic->lapic_timer.pending, 0);
670 670
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index eca41ae9f45..685a4ffac8e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -156,6 +156,8 @@ module_param(oos_shadow, bool, 0644);
156#define CREATE_TRACE_POINTS 156#define CREATE_TRACE_POINTS
157#include "mmutrace.h" 157#include "mmutrace.h"
158 158
159#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
160
159#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 161#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
160 162
161struct kvm_rmap_desc { 163struct kvm_rmap_desc {
@@ -634,9 +636,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
634 if (*spte & shadow_accessed_mask) 636 if (*spte & shadow_accessed_mask)
635 kvm_set_pfn_accessed(pfn); 637 kvm_set_pfn_accessed(pfn);
636 if (is_writeble_pte(*spte)) 638 if (is_writeble_pte(*spte))
637 kvm_release_pfn_dirty(pfn); 639 kvm_set_pfn_dirty(pfn);
638 else
639 kvm_release_pfn_clean(pfn);
640 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level); 640 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level);
641 if (!*rmapp) { 641 if (!*rmapp) {
642 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); 642 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
@@ -748,7 +748,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
748 return write_protected; 748 return write_protected;
749} 749}
750 750
751static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp) 751static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
752{ 752{
753 u64 *spte; 753 u64 *spte;
754 int need_tlb_flush = 0; 754 int need_tlb_flush = 0;
@@ -763,8 +763,45 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
763 return need_tlb_flush; 763 return need_tlb_flush;
764} 764}
765 765
766static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, 766static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
767 int (*handler)(struct kvm *kvm, unsigned long *rmapp)) 767{
768 int need_flush = 0;
769 u64 *spte, new_spte;
770 pte_t *ptep = (pte_t *)data;
771 pfn_t new_pfn;
772
773 WARN_ON(pte_huge(*ptep));
774 new_pfn = pte_pfn(*ptep);
775 spte = rmap_next(kvm, rmapp, NULL);
776 while (spte) {
777 BUG_ON(!is_shadow_present_pte(*spte));
778 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
779 need_flush = 1;
780 if (pte_write(*ptep)) {
781 rmap_remove(kvm, spte);
782 __set_spte(spte, shadow_trap_nonpresent_pte);
783 spte = rmap_next(kvm, rmapp, NULL);
784 } else {
785 new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
786 new_spte |= (u64)new_pfn << PAGE_SHIFT;
787
788 new_spte &= ~PT_WRITABLE_MASK;
789 new_spte &= ~SPTE_HOST_WRITEABLE;
790 if (is_writeble_pte(*spte))
791 kvm_set_pfn_dirty(spte_to_pfn(*spte));
792 __set_spte(spte, new_spte);
793 spte = rmap_next(kvm, rmapp, spte);
794 }
795 }
796 if (need_flush)
797 kvm_flush_remote_tlbs(kvm);
798
799 return 0;
800}
801
802static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, u64 data,
803 int (*handler)(struct kvm *kvm, unsigned long *rmapp,
804 u64 data))
768{ 805{
769 int i, j; 806 int i, j;
770 int retval = 0; 807 int retval = 0;
@@ -786,13 +823,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
786 if (hva >= start && hva < end) { 823 if (hva >= start && hva < end) {
787 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; 824 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
788 825
789 retval |= handler(kvm, &memslot->rmap[gfn_offset]); 826 retval |= handler(kvm, &memslot->rmap[gfn_offset],
827 data);
790 828
791 for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { 829 for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
792 int idx = gfn_offset; 830 int idx = gfn_offset;
793 idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j); 831 idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j);
794 retval |= handler(kvm, 832 retval |= handler(kvm,
795 &memslot->lpage_info[j][idx].rmap_pde); 833 &memslot->lpage_info[j][idx].rmap_pde,
834 data);
796 } 835 }
797 } 836 }
798 } 837 }
@@ -802,10 +841,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
802 841
803int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 842int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
804{ 843{
805 return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); 844 return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
806} 845}
807 846
808static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp) 847void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
848{
849 kvm_handle_hva(kvm, hva, (u64)&pte, kvm_set_pte_rmapp);
850}
851
852static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
809{ 853{
810 u64 *spte; 854 u64 *spte;
811 int young = 0; 855 int young = 0;
@@ -841,13 +885,13 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
841 gfn = unalias_gfn(vcpu->kvm, gfn); 885 gfn = unalias_gfn(vcpu->kvm, gfn);
842 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); 886 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
843 887
844 kvm_unmap_rmapp(vcpu->kvm, rmapp); 888 kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
845 kvm_flush_remote_tlbs(vcpu->kvm); 889 kvm_flush_remote_tlbs(vcpu->kvm);
846} 890}
847 891
848int kvm_age_hva(struct kvm *kvm, unsigned long hva) 892int kvm_age_hva(struct kvm *kvm, unsigned long hva)
849{ 893{
850 return kvm_handle_hva(kvm, hva, kvm_age_rmapp); 894 return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
851} 895}
852 896
853#ifdef MMU_DEBUG 897#ifdef MMU_DEBUG
@@ -1756,7 +1800,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1756 unsigned pte_access, int user_fault, 1800 unsigned pte_access, int user_fault,
1757 int write_fault, int dirty, int level, 1801 int write_fault, int dirty, int level,
1758 gfn_t gfn, pfn_t pfn, bool speculative, 1802 gfn_t gfn, pfn_t pfn, bool speculative,
1759 bool can_unsync) 1803 bool can_unsync, bool reset_host_protection)
1760{ 1804{
1761 u64 spte; 1805 u64 spte;
1762 int ret = 0; 1806 int ret = 0;
@@ -1783,6 +1827,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1783 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, 1827 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
1784 kvm_is_mmio_pfn(pfn)); 1828 kvm_is_mmio_pfn(pfn));
1785 1829
1830 if (reset_host_protection)
1831 spte |= SPTE_HOST_WRITEABLE;
1832
1786 spte |= (u64)pfn << PAGE_SHIFT; 1833 spte |= (u64)pfn << PAGE_SHIFT;
1787 1834
1788 if ((pte_access & ACC_WRITE_MASK) 1835 if ((pte_access & ACC_WRITE_MASK)
@@ -1828,7 +1875,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1828 unsigned pt_access, unsigned pte_access, 1875 unsigned pt_access, unsigned pte_access,
1829 int user_fault, int write_fault, int dirty, 1876 int user_fault, int write_fault, int dirty,
1830 int *ptwrite, int level, gfn_t gfn, 1877 int *ptwrite, int level, gfn_t gfn,
1831 pfn_t pfn, bool speculative) 1878 pfn_t pfn, bool speculative,
1879 bool reset_host_protection)
1832{ 1880{
1833 int was_rmapped = 0; 1881 int was_rmapped = 0;
1834 int was_writeble = is_writeble_pte(*sptep); 1882 int was_writeble = is_writeble_pte(*sptep);
@@ -1860,7 +1908,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1860 } 1908 }
1861 1909
1862 if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, 1910 if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
1863 dirty, level, gfn, pfn, speculative, true)) { 1911 dirty, level, gfn, pfn, speculative, true,
1912 reset_host_protection)) {
1864 if (write_fault) 1913 if (write_fault)
1865 *ptwrite = 1; 1914 *ptwrite = 1;
1866 kvm_x86_ops->tlb_flush(vcpu); 1915 kvm_x86_ops->tlb_flush(vcpu);
@@ -1877,8 +1926,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1877 page_header_update_slot(vcpu->kvm, sptep, gfn); 1926 page_header_update_slot(vcpu->kvm, sptep, gfn);
1878 if (!was_rmapped) { 1927 if (!was_rmapped) {
1879 rmap_count = rmap_add(vcpu, sptep, gfn); 1928 rmap_count = rmap_add(vcpu, sptep, gfn);
1880 if (!is_rmap_spte(*sptep)) 1929 kvm_release_pfn_clean(pfn);
1881 kvm_release_pfn_clean(pfn);
1882 if (rmap_count > RMAP_RECYCLE_THRESHOLD) 1930 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
1883 rmap_recycle(vcpu, sptep, gfn); 1931 rmap_recycle(vcpu, sptep, gfn);
1884 } else { 1932 } else {
@@ -1909,7 +1957,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1909 if (iterator.level == level) { 1957 if (iterator.level == level) {
1910 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL, 1958 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
1911 0, write, 1, &pt_write, 1959 0, write, 1, &pt_write,
1912 level, gfn, pfn, false); 1960 level, gfn, pfn, false, true);
1913 ++vcpu->stat.pf_fixed; 1961 ++vcpu->stat.pf_fixed;
1914 break; 1962 break;
1915 } 1963 }
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index d2fec9c12d2..72558f8ff3f 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -273,9 +273,13 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
273 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq)) 273 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
274 return; 274 return;
275 kvm_get_pfn(pfn); 275 kvm_get_pfn(pfn);
276 /*
277 * we call mmu_set_spte() with reset_host_protection = true beacuse that
278 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
279 */
276 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, 280 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
277 gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL, 281 gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL,
278 gpte_to_gfn(gpte), pfn, true); 282 gpte_to_gfn(gpte), pfn, true, true);
279} 283}
280 284
281/* 285/*
@@ -308,7 +312,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
308 user_fault, write_fault, 312 user_fault, write_fault,
309 gw->ptes[gw->level-1] & PT_DIRTY_MASK, 313 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
310 ptwrite, level, 314 ptwrite, level,
311 gw->gfn, pfn, false); 315 gw->gfn, pfn, false, true);
312 break; 316 break;
313 } 317 }
314 318
@@ -558,6 +562,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
558static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 562static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
559{ 563{
560 int i, offset, nr_present; 564 int i, offset, nr_present;
565 bool reset_host_protection;
561 566
562 offset = nr_present = 0; 567 offset = nr_present = 0;
563 568
@@ -595,9 +600,16 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
595 600
596 nr_present++; 601 nr_present++;
597 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); 602 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
603 if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) {
604 pte_access &= ~ACC_WRITE_MASK;
605 reset_host_protection = 0;
606 } else {
607 reset_host_protection = 1;
608 }
598 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, 609 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
599 is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn, 610 is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
600 spte_to_pfn(sp->spt[i]), true, false); 611 spte_to_pfn(sp->spt[i]), true, false,
612 reset_host_protection);
601 } 613 }
602 614
603 return !nr_present; 615 return !nr_present;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 944cc9c04b3..c17404add91 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -767,6 +767,8 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
767 rdtscll(tsc_this); 767 rdtscll(tsc_this);
768 delta = vcpu->arch.host_tsc - tsc_this; 768 delta = vcpu->arch.host_tsc - tsc_this;
769 svm->vmcb->control.tsc_offset += delta; 769 svm->vmcb->control.tsc_offset += delta;
770 if (is_nested(svm))
771 svm->nested.hsave->control.tsc_offset += delta;
770 vcpu->cpu = cpu; 772 vcpu->cpu = cpu;
771 kvm_migrate_timers(vcpu); 773 kvm_migrate_timers(vcpu);
772 svm->asid_generation = 0; 774 svm->asid_generation = 0;
@@ -2057,10 +2059,14 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
2057 2059
2058 switch (ecx) { 2060 switch (ecx) {
2059 case MSR_IA32_TSC: { 2061 case MSR_IA32_TSC: {
2060 u64 tsc; 2062 u64 tsc_offset;
2063
2064 if (is_nested(svm))
2065 tsc_offset = svm->nested.hsave->control.tsc_offset;
2066 else
2067 tsc_offset = svm->vmcb->control.tsc_offset;
2061 2068
2062 rdtscll(tsc); 2069 *data = tsc_offset + native_read_tsc();
2063 *data = svm->vmcb->control.tsc_offset + tsc;
2064 break; 2070 break;
2065 } 2071 }
2066 case MSR_K6_STAR: 2072 case MSR_K6_STAR:
@@ -2146,10 +2152,17 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2146 2152
2147 switch (ecx) { 2153 switch (ecx) {
2148 case MSR_IA32_TSC: { 2154 case MSR_IA32_TSC: {
2149 u64 tsc; 2155 u64 tsc_offset = data - native_read_tsc();
2156 u64 g_tsc_offset = 0;
2157
2158 if (is_nested(svm)) {
2159 g_tsc_offset = svm->vmcb->control.tsc_offset -
2160 svm->nested.hsave->control.tsc_offset;
2161 svm->nested.hsave->control.tsc_offset = tsc_offset;
2162 }
2163
2164 svm->vmcb->control.tsc_offset = tsc_offset + g_tsc_offset;
2150 2165
2151 rdtscll(tsc);
2152 svm->vmcb->control.tsc_offset = data - tsc;
2153 break; 2166 break;
2154 } 2167 }
2155 case MSR_K6_STAR: 2168 case MSR_K6_STAR:
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f3812014bd0..ed53b42caba 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -709,7 +709,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
709 if (vcpu->cpu != cpu) { 709 if (vcpu->cpu != cpu) {
710 vcpu_clear(vmx); 710 vcpu_clear(vmx);
711 kvm_migrate_timers(vcpu); 711 kvm_migrate_timers(vcpu);
712 vpid_sync_vcpu_all(vmx); 712 set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
713 local_irq_disable(); 713 local_irq_disable();
714 list_add(&vmx->local_vcpus_link, 714 list_add(&vmx->local_vcpus_link,
715 &per_cpu(vcpus_on_cpu, cpu)); 715 &per_cpu(vcpus_on_cpu, cpu));
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index be451ee4424..9b9695322f5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1591,6 +1591,8 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
1591 1591
1592 if (cpuid->nent < 1) 1592 if (cpuid->nent < 1)
1593 goto out; 1593 goto out;
1594 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1595 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
1594 r = -ENOMEM; 1596 r = -ENOMEM;
1595 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent); 1597 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1596 if (!cpuid_entries) 1598 if (!cpuid_entries)
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 3e549b8ec8c..85f5db95c60 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -15,8 +15,10 @@ ifeq ($(CONFIG_X86_32),y)
15 obj-y += atomic64_32.o 15 obj-y += atomic64_32.o
16 lib-y += checksum_32.o 16 lib-y += checksum_32.o
17 lib-y += strstr_32.o 17 lib-y += strstr_32.o
18 lib-y += semaphore_32.o string_32.o cmpxchg8b_emu.o 18 lib-y += semaphore_32.o string_32.o
19 19ifneq ($(CONFIG_X86_CMPXCHG64),y)
20 lib-y += cmpxchg8b_emu.o
21endif
20 lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o 22 lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
21else 23else
22 obj-y += io_64.o iomap_copy_64.o 24 obj-y += io_64.o iomap_copy_64.o