diff options
Diffstat (limited to 'arch')
272 files changed, 6673 insertions, 4905 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 99193b160232..beea3ccebb5e 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -30,6 +30,18 @@ config OPROFILE_IBS | |||
30 | 30 | ||
31 | If unsure, say N. | 31 | If unsure, say N. |
32 | 32 | ||
33 | config OPROFILE_EVENT_MULTIPLEX | ||
34 | bool "OProfile multiplexing support (EXPERIMENTAL)" | ||
35 | default n | ||
36 | depends on OPROFILE && X86 | ||
37 | help | ||
38 | The number of hardware counters is limited. The multiplexing | ||
39 | feature enables OProfile to gather more events than counters | ||
40 | are provided by the hardware. This is realized by switching | ||
41 | between events at an user specified time interval. | ||
42 | |||
43 | If unsure, say N. | ||
44 | |||
33 | config HAVE_OPROFILE | 45 | config HAVE_OPROFILE |
34 | bool | 46 | bool |
35 | 47 | ||
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index 60c83abfde70..5076a8860b18 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h | |||
@@ -75,6 +75,7 @@ register struct thread_info *__current_thread_info __asm__("$8"); | |||
75 | #define TIF_UAC_SIGBUS 7 | 75 | #define TIF_UAC_SIGBUS 7 |
76 | #define TIF_MEMDIE 8 | 76 | #define TIF_MEMDIE 8 |
77 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ | 77 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ |
78 | #define TIF_NOTIFY_RESUME 10 /* callback before returning to user */ | ||
78 | #define TIF_FREEZE 16 /* is freezing for suspend */ | 79 | #define TIF_FREEZE 16 /* is freezing for suspend */ |
79 | 80 | ||
80 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 81 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
@@ -82,10 +83,12 @@ register struct thread_info *__current_thread_info __asm__("$8"); | |||
82 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 83 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
83 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 84 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
84 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 85 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
86 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | ||
85 | #define _TIF_FREEZE (1<<TIF_FREEZE) | 87 | #define _TIF_FREEZE (1<<TIF_FREEZE) |
86 | 88 | ||
87 | /* Work to do on interrupt/exception return. */ | 89 | /* Work to do on interrupt/exception return. */ |
88 | #define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED) | 90 | #define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ |
91 | _TIF_NOTIFY_RESUME) | ||
89 | 92 | ||
90 | /* Work to do on any return to userspace. */ | 93 | /* Work to do on any return to userspace. */ |
91 | #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ | 94 | #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ |
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index df65eaa84c4c..0932dbb1ef8e 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/binfmts.h> | 20 | #include <linux/binfmts.h> |
21 | #include <linux/bitops.h> | 21 | #include <linux/bitops.h> |
22 | #include <linux/syscalls.h> | 22 | #include <linux/syscalls.h> |
23 | #include <linux/tracehook.h> | ||
23 | 24 | ||
24 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
25 | #include <asm/sigcontext.h> | 26 | #include <asm/sigcontext.h> |
@@ -683,4 +684,11 @@ do_notify_resume(struct pt_regs *regs, struct switch_stack *sw, | |||
683 | { | 684 | { |
684 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) | 685 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) |
685 | do_signal(regs, sw, r0, r19); | 686 | do_signal(regs, sw, r0, r19); |
687 | |||
688 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
689 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
690 | tracehook_notify_resume(regs); | ||
691 | if (current->replacement_session_keyring) | ||
692 | key_replace_session_keyring(); | ||
693 | } | ||
686 | } | 694 | } |
diff --git a/arch/arm/configs/kirkwood_defconfig b/arch/arm/configs/kirkwood_defconfig index 0a1abb978d7e..af74cc2de8b6 100644 --- a/arch/arm/configs/kirkwood_defconfig +++ b/arch/arm/configs/kirkwood_defconfig | |||
@@ -629,7 +629,7 @@ CONFIG_SCSI_LOWLEVEL=y | |||
629 | CONFIG_ATA=y | 629 | CONFIG_ATA=y |
630 | # CONFIG_ATA_NONSTANDARD is not set | 630 | # CONFIG_ATA_NONSTANDARD is not set |
631 | CONFIG_SATA_PMP=y | 631 | CONFIG_SATA_PMP=y |
632 | # CONFIG_SATA_AHCI is not set | 632 | CONFIG_SATA_AHCI=y |
633 | # CONFIG_SATA_SIL24 is not set | 633 | # CONFIG_SATA_SIL24 is not set |
634 | CONFIG_ATA_SFF=y | 634 | CONFIG_ATA_SFF=y |
635 | # CONFIG_SATA_SVW is not set | 635 | # CONFIG_SATA_SVW is not set |
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 73394e50cbca..d3a39b1e6c0f 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h | |||
@@ -130,11 +130,13 @@ extern void vfp_sync_state(struct thread_info *thread); | |||
130 | * TIF_SYSCALL_TRACE - syscall trace active | 130 | * TIF_SYSCALL_TRACE - syscall trace active |
131 | * TIF_SIGPENDING - signal pending | 131 | * TIF_SIGPENDING - signal pending |
132 | * TIF_NEED_RESCHED - rescheduling necessary | 132 | * TIF_NEED_RESCHED - rescheduling necessary |
133 | * TIF_NOTIFY_RESUME - callback before returning to user | ||
133 | * TIF_USEDFPU - FPU was used by this task this quantum (SMP) | 134 | * TIF_USEDFPU - FPU was used by this task this quantum (SMP) |
134 | * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED | 135 | * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED |
135 | */ | 136 | */ |
136 | #define TIF_SIGPENDING 0 | 137 | #define TIF_SIGPENDING 0 |
137 | #define TIF_NEED_RESCHED 1 | 138 | #define TIF_NEED_RESCHED 1 |
139 | #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ | ||
138 | #define TIF_SYSCALL_TRACE 8 | 140 | #define TIF_SYSCALL_TRACE 8 |
139 | #define TIF_POLLING_NRFLAG 16 | 141 | #define TIF_POLLING_NRFLAG 16 |
140 | #define TIF_USING_IWMMXT 17 | 142 | #define TIF_USING_IWMMXT 17 |
@@ -143,6 +145,7 @@ extern void vfp_sync_state(struct thread_info *thread); | |||
143 | 145 | ||
144 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 146 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
145 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 147 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
148 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | ||
146 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 149 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
147 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | 150 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
148 | #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) | 151 | #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 8c3de1a350b5..7813ab782fda 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -51,7 +51,7 @@ fast_work_pending: | |||
51 | work_pending: | 51 | work_pending: |
52 | tst r1, #_TIF_NEED_RESCHED | 52 | tst r1, #_TIF_NEED_RESCHED |
53 | bne work_resched | 53 | bne work_resched |
54 | tst r1, #_TIF_SIGPENDING | 54 | tst r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME |
55 | beq no_work_pending | 55 | beq no_work_pending |
56 | mov r0, sp @ 'regs' | 56 | mov r0, sp @ 'regs' |
57 | mov r2, why @ 'syscall' | 57 | mov r2, why @ 'syscall' |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index f6bc5d442782..b76fe06d92e7 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/personality.h> | 12 | #include <linux/personality.h> |
13 | #include <linux/freezer.h> | 13 | #include <linux/freezer.h> |
14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
15 | #include <linux/tracehook.h> | ||
15 | 16 | ||
16 | #include <asm/elf.h> | 17 | #include <asm/elf.h> |
17 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
@@ -707,4 +708,11 @@ do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall) | |||
707 | { | 708 | { |
708 | if (thread_flags & _TIF_SIGPENDING) | 709 | if (thread_flags & _TIF_SIGPENDING) |
709 | do_signal(¤t->blocked, regs, syscall); | 710 | do_signal(¤t->blocked, regs, syscall); |
711 | |||
712 | if (thread_flags & _TIF_NOTIFY_RESUME) { | ||
713 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
714 | tracehook_notify_resume(regs); | ||
715 | if (current->replacement_session_keyring) | ||
716 | key_replace_session_keyring(); | ||
717 | } | ||
710 | } | 718 | } |
diff --git a/arch/arm/mach-kirkwood/ts219-setup.c b/arch/arm/mach-kirkwood/ts219-setup.c index 01aa213c0a6f..ec1a64f263d2 100644 --- a/arch/arm/mach-kirkwood/ts219-setup.c +++ b/arch/arm/mach-kirkwood/ts219-setup.c | |||
@@ -206,6 +206,15 @@ static void __init qnap_ts219_init(void) | |||
206 | 206 | ||
207 | } | 207 | } |
208 | 208 | ||
209 | static int __init ts219_pci_init(void) | ||
210 | { | ||
211 | if (machine_is_ts219()) | ||
212 | kirkwood_pcie_init(); | ||
213 | |||
214 | return 0; | ||
215 | } | ||
216 | subsys_initcall(ts219_pci_init); | ||
217 | |||
209 | MACHINE_START(TS219, "QNAP TS-119/TS-219") | 218 | MACHINE_START(TS219, "QNAP TS-119/TS-219") |
210 | /* Maintainer: Martin Michlmayr <tbm@cyrius.com> */ | 219 | /* Maintainer: Martin Michlmayr <tbm@cyrius.com> */ |
211 | .phys_io = KIRKWOOD_REGS_PHYS_BASE, | 220 | .phys_io = KIRKWOOD_REGS_PHYS_BASE, |
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c index 99b6e1546311..0447d26d454b 100644 --- a/arch/arm/mach-omap2/mcbsp.c +++ b/arch/arm/mach-omap2/mcbsp.c | |||
@@ -128,6 +128,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
128 | .rx_irq = INT_24XX_MCBSP1_IRQ_RX, | 128 | .rx_irq = INT_24XX_MCBSP1_IRQ_RX, |
129 | .tx_irq = INT_24XX_MCBSP1_IRQ_TX, | 129 | .tx_irq = INT_24XX_MCBSP1_IRQ_TX, |
130 | .ops = &omap2_mcbsp_ops, | 130 | .ops = &omap2_mcbsp_ops, |
131 | .buffer_size = 0x6F, | ||
131 | }, | 132 | }, |
132 | { | 133 | { |
133 | .phys_base = OMAP34XX_MCBSP2_BASE, | 134 | .phys_base = OMAP34XX_MCBSP2_BASE, |
@@ -136,6 +137,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
136 | .rx_irq = INT_24XX_MCBSP2_IRQ_RX, | 137 | .rx_irq = INT_24XX_MCBSP2_IRQ_RX, |
137 | .tx_irq = INT_24XX_MCBSP2_IRQ_TX, | 138 | .tx_irq = INT_24XX_MCBSP2_IRQ_TX, |
138 | .ops = &omap2_mcbsp_ops, | 139 | .ops = &omap2_mcbsp_ops, |
140 | .buffer_size = 0x3FF, | ||
139 | }, | 141 | }, |
140 | { | 142 | { |
141 | .phys_base = OMAP34XX_MCBSP3_BASE, | 143 | .phys_base = OMAP34XX_MCBSP3_BASE, |
@@ -144,6 +146,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
144 | .rx_irq = INT_24XX_MCBSP3_IRQ_RX, | 146 | .rx_irq = INT_24XX_MCBSP3_IRQ_RX, |
145 | .tx_irq = INT_24XX_MCBSP3_IRQ_TX, | 147 | .tx_irq = INT_24XX_MCBSP3_IRQ_TX, |
146 | .ops = &omap2_mcbsp_ops, | 148 | .ops = &omap2_mcbsp_ops, |
149 | .buffer_size = 0x6F, | ||
147 | }, | 150 | }, |
148 | { | 151 | { |
149 | .phys_base = OMAP34XX_MCBSP4_BASE, | 152 | .phys_base = OMAP34XX_MCBSP4_BASE, |
@@ -152,6 +155,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
152 | .rx_irq = INT_24XX_MCBSP4_IRQ_RX, | 155 | .rx_irq = INT_24XX_MCBSP4_IRQ_RX, |
153 | .tx_irq = INT_24XX_MCBSP4_IRQ_TX, | 156 | .tx_irq = INT_24XX_MCBSP4_IRQ_TX, |
154 | .ops = &omap2_mcbsp_ops, | 157 | .ops = &omap2_mcbsp_ops, |
158 | .buffer_size = 0x6F, | ||
155 | }, | 159 | }, |
156 | { | 160 | { |
157 | .phys_base = OMAP34XX_MCBSP5_BASE, | 161 | .phys_base = OMAP34XX_MCBSP5_BASE, |
@@ -160,6 +164,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
160 | .rx_irq = INT_24XX_MCBSP5_IRQ_RX, | 164 | .rx_irq = INT_24XX_MCBSP5_IRQ_RX, |
161 | .tx_irq = INT_24XX_MCBSP5_IRQ_TX, | 165 | .tx_irq = INT_24XX_MCBSP5_IRQ_TX, |
162 | .ops = &omap2_mcbsp_ops, | 166 | .ops = &omap2_mcbsp_ops, |
167 | .buffer_size = 0x6F, | ||
163 | }, | 168 | }, |
164 | }; | 169 | }; |
165 | #define OMAP34XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap34xx_mcbsp_pdata) | 170 | #define OMAP34XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap34xx_mcbsp_pdata) |
diff --git a/arch/arm/mach-pxa/include/mach/audio.h b/arch/arm/mach-pxa/include/mach/audio.h index 16eb02552d5d..a3449e35a6f5 100644 --- a/arch/arm/mach-pxa/include/mach/audio.h +++ b/arch/arm/mach-pxa/include/mach/audio.h | |||
@@ -3,10 +3,12 @@ | |||
3 | 3 | ||
4 | #include <sound/core.h> | 4 | #include <sound/core.h> |
5 | #include <sound/pcm.h> | 5 | #include <sound/pcm.h> |
6 | #include <sound/ac97_codec.h> | ||
6 | 7 | ||
7 | /* | 8 | /* |
8 | * @reset_gpio: AC97 reset gpio (normally gpio113 or gpio95) | 9 | * @reset_gpio: AC97 reset gpio (normally gpio113 or gpio95) |
9 | * a -1 value means no gpio will be used for reset | 10 | * a -1 value means no gpio will be used for reset |
11 | * @codec_pdata: AC97 codec platform_data | ||
10 | 12 | ||
11 | * reset_gpio should only be specified for pxa27x CPUs where a silicon | 13 | * reset_gpio should only be specified for pxa27x CPUs where a silicon |
12 | * bug prevents correct operation of the reset line. If not specified, | 14 | * bug prevents correct operation of the reset line. If not specified, |
@@ -20,6 +22,7 @@ typedef struct { | |||
20 | void (*resume)(void *); | 22 | void (*resume)(void *); |
21 | void *priv; | 23 | void *priv; |
22 | int reset_gpio; | 24 | int reset_gpio; |
25 | void *codec_pdata[AC97_BUS_MAX_DEVICES]; | ||
23 | } pxa2xx_audio_ops_t; | 26 | } pxa2xx_audio_ops_t; |
24 | 27 | ||
25 | extern void pxa_set_ac97_info(pxa2xx_audio_ops_t *ops); | 28 | extern void pxa_set_ac97_info(pxa2xx_audio_ops_t *ops); |
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index e3ac94f09006..9b00f4cbc903 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c | |||
@@ -1127,6 +1127,11 @@ int omap_dma_running(void) | |||
1127 | void omap_dma_link_lch(int lch_head, int lch_queue) | 1127 | void omap_dma_link_lch(int lch_head, int lch_queue) |
1128 | { | 1128 | { |
1129 | if (omap_dma_in_1510_mode()) { | 1129 | if (omap_dma_in_1510_mode()) { |
1130 | if (lch_head == lch_queue) { | ||
1131 | dma_write(dma_read(CCR(lch_head)) | (3 << 8), | ||
1132 | CCR(lch_head)); | ||
1133 | return; | ||
1134 | } | ||
1130 | printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); | 1135 | printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); |
1131 | BUG(); | 1136 | BUG(); |
1132 | return; | 1137 | return; |
@@ -1149,6 +1154,11 @@ EXPORT_SYMBOL(omap_dma_link_lch); | |||
1149 | void omap_dma_unlink_lch(int lch_head, int lch_queue) | 1154 | void omap_dma_unlink_lch(int lch_head, int lch_queue) |
1150 | { | 1155 | { |
1151 | if (omap_dma_in_1510_mode()) { | 1156 | if (omap_dma_in_1510_mode()) { |
1157 | if (lch_head == lch_queue) { | ||
1158 | dma_write(dma_read(CCR(lch_head)) & ~(3 << 8), | ||
1159 | CCR(lch_head)); | ||
1160 | return; | ||
1161 | } | ||
1152 | printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); | 1162 | printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); |
1153 | BUG(); | 1163 | BUG(); |
1154 | return; | 1164 | return; |
diff --git a/arch/arm/plat-omap/include/mach/mcbsp.h b/arch/arm/plat-omap/include/mach/mcbsp.h index bb154ea76769..63a3f254af7b 100644 --- a/arch/arm/plat-omap/include/mach/mcbsp.h +++ b/arch/arm/plat-omap/include/mach/mcbsp.h | |||
@@ -134,6 +134,11 @@ | |||
134 | #define OMAP_MCBSP_REG_XCERG 0x74 | 134 | #define OMAP_MCBSP_REG_XCERG 0x74 |
135 | #define OMAP_MCBSP_REG_XCERH 0x78 | 135 | #define OMAP_MCBSP_REG_XCERH 0x78 |
136 | #define OMAP_MCBSP_REG_SYSCON 0x8C | 136 | #define OMAP_MCBSP_REG_SYSCON 0x8C |
137 | #define OMAP_MCBSP_REG_THRSH2 0x90 | ||
138 | #define OMAP_MCBSP_REG_THRSH1 0x94 | ||
139 | #define OMAP_MCBSP_REG_IRQST 0xA0 | ||
140 | #define OMAP_MCBSP_REG_IRQEN 0xA4 | ||
141 | #define OMAP_MCBSP_REG_WAKEUPEN 0xA8 | ||
137 | #define OMAP_MCBSP_REG_XCCR 0xAC | 142 | #define OMAP_MCBSP_REG_XCCR 0xAC |
138 | #define OMAP_MCBSP_REG_RCCR 0xB0 | 143 | #define OMAP_MCBSP_REG_RCCR 0xB0 |
139 | 144 | ||
@@ -249,8 +254,27 @@ | |||
249 | #define RDISABLE 0x0001 | 254 | #define RDISABLE 0x0001 |
250 | 255 | ||
251 | /********************** McBSP SYSCONFIG bit definitions ********************/ | 256 | /********************** McBSP SYSCONFIG bit definitions ********************/ |
257 | #define CLOCKACTIVITY(value) ((value)<<8) | ||
258 | #define SIDLEMODE(value) ((value)<<3) | ||
259 | #define ENAWAKEUP 0x0004 | ||
252 | #define SOFTRST 0x0002 | 260 | #define SOFTRST 0x0002 |
253 | 261 | ||
262 | /********************** McBSP DMA operating modes **************************/ | ||
263 | #define MCBSP_DMA_MODE_ELEMENT 0 | ||
264 | #define MCBSP_DMA_MODE_THRESHOLD 1 | ||
265 | #define MCBSP_DMA_MODE_FRAME 2 | ||
266 | |||
267 | /********************** McBSP WAKEUPEN bit definitions *********************/ | ||
268 | #define XEMPTYEOFEN 0x4000 | ||
269 | #define XRDYEN 0x0400 | ||
270 | #define XEOFEN 0x0200 | ||
271 | #define XFSXEN 0x0100 | ||
272 | #define XSYNCERREN 0x0080 | ||
273 | #define RRDYEN 0x0008 | ||
274 | #define REOFEN 0x0004 | ||
275 | #define RFSREN 0x0002 | ||
276 | #define RSYNCERREN 0x0001 | ||
277 | |||
254 | /* we don't do multichannel for now */ | 278 | /* we don't do multichannel for now */ |
255 | struct omap_mcbsp_reg_cfg { | 279 | struct omap_mcbsp_reg_cfg { |
256 | u16 spcr2; | 280 | u16 spcr2; |
@@ -344,6 +368,9 @@ struct omap_mcbsp_platform_data { | |||
344 | u8 dma_rx_sync, dma_tx_sync; | 368 | u8 dma_rx_sync, dma_tx_sync; |
345 | u16 rx_irq, tx_irq; | 369 | u16 rx_irq, tx_irq; |
346 | struct omap_mcbsp_ops *ops; | 370 | struct omap_mcbsp_ops *ops; |
371 | #ifdef CONFIG_ARCH_OMAP34XX | ||
372 | u16 buffer_size; | ||
373 | #endif | ||
347 | }; | 374 | }; |
348 | 375 | ||
349 | struct omap_mcbsp { | 376 | struct omap_mcbsp { |
@@ -377,6 +404,11 @@ struct omap_mcbsp { | |||
377 | struct omap_mcbsp_platform_data *pdata; | 404 | struct omap_mcbsp_platform_data *pdata; |
378 | struct clk *iclk; | 405 | struct clk *iclk; |
379 | struct clk *fclk; | 406 | struct clk *fclk; |
407 | #ifdef CONFIG_ARCH_OMAP34XX | ||
408 | int dma_op_mode; | ||
409 | u16 max_tx_thres; | ||
410 | u16 max_rx_thres; | ||
411 | #endif | ||
380 | }; | 412 | }; |
381 | extern struct omap_mcbsp **mcbsp_ptr; | 413 | extern struct omap_mcbsp **mcbsp_ptr; |
382 | extern int omap_mcbsp_count; | 414 | extern int omap_mcbsp_count; |
@@ -385,10 +417,25 @@ int omap_mcbsp_init(void); | |||
385 | void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config, | 417 | void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config, |
386 | int size); | 418 | int size); |
387 | void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg * config); | 419 | void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg * config); |
420 | #ifdef CONFIG_ARCH_OMAP34XX | ||
421 | void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold); | ||
422 | void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold); | ||
423 | u16 omap_mcbsp_get_max_tx_threshold(unsigned int id); | ||
424 | u16 omap_mcbsp_get_max_rx_threshold(unsigned int id); | ||
425 | int omap_mcbsp_get_dma_op_mode(unsigned int id); | ||
426 | #else | ||
427 | static inline void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold) | ||
428 | { } | ||
429 | static inline void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold) | ||
430 | { } | ||
431 | static inline u16 omap_mcbsp_get_max_tx_threshold(unsigned int id) { return 0; } | ||
432 | static inline u16 omap_mcbsp_get_max_rx_threshold(unsigned int id) { return 0; } | ||
433 | static inline int omap_mcbsp_get_dma_op_mode(unsigned int id) { return 0; } | ||
434 | #endif | ||
388 | int omap_mcbsp_request(unsigned int id); | 435 | int omap_mcbsp_request(unsigned int id); |
389 | void omap_mcbsp_free(unsigned int id); | 436 | void omap_mcbsp_free(unsigned int id); |
390 | void omap_mcbsp_start(unsigned int id); | 437 | void omap_mcbsp_start(unsigned int id, int tx, int rx); |
391 | void omap_mcbsp_stop(unsigned int id); | 438 | void omap_mcbsp_stop(unsigned int id, int tx, int rx); |
392 | void omap_mcbsp_xmit_word(unsigned int id, u32 word); | 439 | void omap_mcbsp_xmit_word(unsigned int id, u32 word); |
393 | u32 omap_mcbsp_recv_word(unsigned int id); | 440 | u32 omap_mcbsp_recv_word(unsigned int id); |
394 | 441 | ||
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c index efa0e0111f38..8dc7927906f1 100644 --- a/arch/arm/plat-omap/mcbsp.c +++ b/arch/arm/plat-omap/mcbsp.c | |||
@@ -198,6 +198,170 @@ void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg *config) | |||
198 | } | 198 | } |
199 | EXPORT_SYMBOL(omap_mcbsp_config); | 199 | EXPORT_SYMBOL(omap_mcbsp_config); |
200 | 200 | ||
201 | #ifdef CONFIG_ARCH_OMAP34XX | ||
202 | /* | ||
203 | * omap_mcbsp_set_tx_threshold configures how to deal | ||
204 | * with transmit threshold. the threshold value and handler can be | ||
205 | * configure in here. | ||
206 | */ | ||
207 | void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold) | ||
208 | { | ||
209 | struct omap_mcbsp *mcbsp; | ||
210 | void __iomem *io_base; | ||
211 | |||
212 | if (!cpu_is_omap34xx()) | ||
213 | return; | ||
214 | |||
215 | if (!omap_mcbsp_check_valid_id(id)) { | ||
216 | printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); | ||
217 | return; | ||
218 | } | ||
219 | mcbsp = id_to_mcbsp_ptr(id); | ||
220 | io_base = mcbsp->io_base; | ||
221 | |||
222 | OMAP_MCBSP_WRITE(io_base, THRSH2, threshold); | ||
223 | } | ||
224 | EXPORT_SYMBOL(omap_mcbsp_set_tx_threshold); | ||
225 | |||
226 | /* | ||
227 | * omap_mcbsp_set_rx_threshold configures how to deal | ||
228 | * with receive threshold. the threshold value and handler can be | ||
229 | * configure in here. | ||
230 | */ | ||
231 | void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold) | ||
232 | { | ||
233 | struct omap_mcbsp *mcbsp; | ||
234 | void __iomem *io_base; | ||
235 | |||
236 | if (!cpu_is_omap34xx()) | ||
237 | return; | ||
238 | |||
239 | if (!omap_mcbsp_check_valid_id(id)) { | ||
240 | printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); | ||
241 | return; | ||
242 | } | ||
243 | mcbsp = id_to_mcbsp_ptr(id); | ||
244 | io_base = mcbsp->io_base; | ||
245 | |||
246 | OMAP_MCBSP_WRITE(io_base, THRSH1, threshold); | ||
247 | } | ||
248 | EXPORT_SYMBOL(omap_mcbsp_set_rx_threshold); | ||
249 | |||
250 | /* | ||
251 | * omap_mcbsp_get_max_tx_thres just return the current configured | ||
252 | * maximum threshold for transmission | ||
253 | */ | ||
254 | u16 omap_mcbsp_get_max_tx_threshold(unsigned int id) | ||
255 | { | ||
256 | struct omap_mcbsp *mcbsp; | ||
257 | |||
258 | if (!omap_mcbsp_check_valid_id(id)) { | ||
259 | printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); | ||
260 | return -ENODEV; | ||
261 | } | ||
262 | mcbsp = id_to_mcbsp_ptr(id); | ||
263 | |||
264 | return mcbsp->max_tx_thres; | ||
265 | } | ||
266 | EXPORT_SYMBOL(omap_mcbsp_get_max_tx_threshold); | ||
267 | |||
268 | /* | ||
269 | * omap_mcbsp_get_max_rx_thres just return the current configured | ||
270 | * maximum threshold for reception | ||
271 | */ | ||
272 | u16 omap_mcbsp_get_max_rx_threshold(unsigned int id) | ||
273 | { | ||
274 | struct omap_mcbsp *mcbsp; | ||
275 | |||
276 | if (!omap_mcbsp_check_valid_id(id)) { | ||
277 | printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); | ||
278 | return -ENODEV; | ||
279 | } | ||
280 | mcbsp = id_to_mcbsp_ptr(id); | ||
281 | |||
282 | return mcbsp->max_rx_thres; | ||
283 | } | ||
284 | EXPORT_SYMBOL(omap_mcbsp_get_max_rx_threshold); | ||
285 | |||
286 | /* | ||
287 | * omap_mcbsp_get_dma_op_mode just return the current configured | ||
288 | * operating mode for the mcbsp channel | ||
289 | */ | ||
290 | int omap_mcbsp_get_dma_op_mode(unsigned int id) | ||
291 | { | ||
292 | struct omap_mcbsp *mcbsp; | ||
293 | int dma_op_mode; | ||
294 | |||
295 | if (!omap_mcbsp_check_valid_id(id)) { | ||
296 | printk(KERN_ERR "%s: Invalid id (%u)\n", __func__, id + 1); | ||
297 | return -ENODEV; | ||
298 | } | ||
299 | mcbsp = id_to_mcbsp_ptr(id); | ||
300 | |||
301 | spin_lock_irq(&mcbsp->lock); | ||
302 | dma_op_mode = mcbsp->dma_op_mode; | ||
303 | spin_unlock_irq(&mcbsp->lock); | ||
304 | |||
305 | return dma_op_mode; | ||
306 | } | ||
307 | EXPORT_SYMBOL(omap_mcbsp_get_dma_op_mode); | ||
308 | |||
309 | static inline void omap34xx_mcbsp_request(struct omap_mcbsp *mcbsp) | ||
310 | { | ||
311 | /* | ||
312 | * Enable wakup behavior, smart idle and all wakeups | ||
313 | * REVISIT: some wakeups may be unnecessary | ||
314 | */ | ||
315 | if (cpu_is_omap34xx()) { | ||
316 | u16 syscon; | ||
317 | |||
318 | syscon = OMAP_MCBSP_READ(mcbsp->io_base, SYSCON); | ||
319 | syscon &= ~(ENAWAKEUP | SIDLEMODE(0x03) | CLOCKACTIVITY(0x03)); | ||
320 | |||
321 | spin_lock_irq(&mcbsp->lock); | ||
322 | if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) { | ||
323 | syscon |= (ENAWAKEUP | SIDLEMODE(0x02) | | ||
324 | CLOCKACTIVITY(0x02)); | ||
325 | OMAP_MCBSP_WRITE(mcbsp->io_base, WAKEUPEN, | ||
326 | XRDYEN | RRDYEN); | ||
327 | } else { | ||
328 | syscon |= SIDLEMODE(0x01); | ||
329 | } | ||
330 | spin_unlock_irq(&mcbsp->lock); | ||
331 | |||
332 | OMAP_MCBSP_WRITE(mcbsp->io_base, SYSCON, syscon); | ||
333 | } | ||
334 | } | ||
335 | |||
336 | static inline void omap34xx_mcbsp_free(struct omap_mcbsp *mcbsp) | ||
337 | { | ||
338 | /* | ||
339 | * Disable wakup behavior, smart idle and all wakeups | ||
340 | */ | ||
341 | if (cpu_is_omap34xx()) { | ||
342 | u16 syscon; | ||
343 | |||
344 | syscon = OMAP_MCBSP_READ(mcbsp->io_base, SYSCON); | ||
345 | syscon &= ~(ENAWAKEUP | SIDLEMODE(0x03) | CLOCKACTIVITY(0x03)); | ||
346 | /* | ||
347 | * HW bug workaround - If no_idle mode is taken, we need to | ||
348 | * go to smart_idle before going to always_idle, or the | ||
349 | * device will not hit retention anymore. | ||
350 | */ | ||
351 | syscon |= SIDLEMODE(0x02); | ||
352 | OMAP_MCBSP_WRITE(mcbsp->io_base, SYSCON, syscon); | ||
353 | |||
354 | syscon &= ~(SIDLEMODE(0x03)); | ||
355 | OMAP_MCBSP_WRITE(mcbsp->io_base, SYSCON, syscon); | ||
356 | |||
357 | OMAP_MCBSP_WRITE(mcbsp->io_base, WAKEUPEN, 0); | ||
358 | } | ||
359 | } | ||
360 | #else | ||
361 | static inline void omap34xx_mcbsp_request(struct omap_mcbsp *mcbsp) {} | ||
362 | static inline void omap34xx_mcbsp_free(struct omap_mcbsp *mcbsp) {} | ||
363 | #endif | ||
364 | |||
201 | /* | 365 | /* |
202 | * We can choose between IRQ based or polled IO. | 366 | * We can choose between IRQ based or polled IO. |
203 | * This needs to be called before omap_mcbsp_request(). | 367 | * This needs to be called before omap_mcbsp_request(). |
@@ -257,6 +421,9 @@ int omap_mcbsp_request(unsigned int id) | |||
257 | clk_enable(mcbsp->iclk); | 421 | clk_enable(mcbsp->iclk); |
258 | clk_enable(mcbsp->fclk); | 422 | clk_enable(mcbsp->fclk); |
259 | 423 | ||
424 | /* Do procedure specific to omap34xx arch, if applicable */ | ||
425 | omap34xx_mcbsp_request(mcbsp); | ||
426 | |||
260 | /* | 427 | /* |
261 | * Make sure that transmitter, receiver and sample-rate generator are | 428 | * Make sure that transmitter, receiver and sample-rate generator are |
262 | * not running before activating IRQs. | 429 | * not running before activating IRQs. |
@@ -305,6 +472,9 @@ void omap_mcbsp_free(unsigned int id) | |||
305 | if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->free) | 472 | if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->free) |
306 | mcbsp->pdata->ops->free(id); | 473 | mcbsp->pdata->ops->free(id); |
307 | 474 | ||
475 | /* Do procedure specific to omap34xx arch, if applicable */ | ||
476 | omap34xx_mcbsp_free(mcbsp); | ||
477 | |||
308 | clk_disable(mcbsp->fclk); | 478 | clk_disable(mcbsp->fclk); |
309 | clk_disable(mcbsp->iclk); | 479 | clk_disable(mcbsp->iclk); |
310 | 480 | ||
@@ -328,14 +498,15 @@ void omap_mcbsp_free(unsigned int id) | |||
328 | EXPORT_SYMBOL(omap_mcbsp_free); | 498 | EXPORT_SYMBOL(omap_mcbsp_free); |
329 | 499 | ||
330 | /* | 500 | /* |
331 | * Here we start the McBSP, by enabling the sample | 501 | * Here we start the McBSP, by enabling transmitter, receiver or both. |
332 | * generator, both transmitter and receivers, | 502 | * If no transmitter or receiver is active prior calling, then sample-rate |
333 | * and the frame sync. | 503 | * generator and frame sync are started. |
334 | */ | 504 | */ |
335 | void omap_mcbsp_start(unsigned int id) | 505 | void omap_mcbsp_start(unsigned int id, int tx, int rx) |
336 | { | 506 | { |
337 | struct omap_mcbsp *mcbsp; | 507 | struct omap_mcbsp *mcbsp; |
338 | void __iomem *io_base; | 508 | void __iomem *io_base; |
509 | int idle; | ||
339 | u16 w; | 510 | u16 w; |
340 | 511 | ||
341 | if (!omap_mcbsp_check_valid_id(id)) { | 512 | if (!omap_mcbsp_check_valid_id(id)) { |
@@ -348,32 +519,58 @@ void omap_mcbsp_start(unsigned int id) | |||
348 | mcbsp->rx_word_length = (OMAP_MCBSP_READ(io_base, RCR1) >> 5) & 0x7; | 519 | mcbsp->rx_word_length = (OMAP_MCBSP_READ(io_base, RCR1) >> 5) & 0x7; |
349 | mcbsp->tx_word_length = (OMAP_MCBSP_READ(io_base, XCR1) >> 5) & 0x7; | 520 | mcbsp->tx_word_length = (OMAP_MCBSP_READ(io_base, XCR1) >> 5) & 0x7; |
350 | 521 | ||
351 | /* Start the sample generator */ | 522 | idle = !((OMAP_MCBSP_READ(io_base, SPCR2) | |
352 | w = OMAP_MCBSP_READ(io_base, SPCR2); | 523 | OMAP_MCBSP_READ(io_base, SPCR1)) & 1); |
353 | OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 6)); | 524 | |
525 | if (idle) { | ||
526 | /* Start the sample generator */ | ||
527 | w = OMAP_MCBSP_READ(io_base, SPCR2); | ||
528 | OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 6)); | ||
529 | } | ||
354 | 530 | ||
355 | /* Enable transmitter and receiver */ | 531 | /* Enable transmitter and receiver */ |
532 | tx &= 1; | ||
356 | w = OMAP_MCBSP_READ(io_base, SPCR2); | 533 | w = OMAP_MCBSP_READ(io_base, SPCR2); |
357 | OMAP_MCBSP_WRITE(io_base, SPCR2, w | 1); | 534 | OMAP_MCBSP_WRITE(io_base, SPCR2, w | tx); |
358 | 535 | ||
536 | rx &= 1; | ||
359 | w = OMAP_MCBSP_READ(io_base, SPCR1); | 537 | w = OMAP_MCBSP_READ(io_base, SPCR1); |
360 | OMAP_MCBSP_WRITE(io_base, SPCR1, w | 1); | 538 | OMAP_MCBSP_WRITE(io_base, SPCR1, w | rx); |
361 | 539 | ||
362 | udelay(100); | 540 | /* |
541 | * Worst case: CLKSRG*2 = 8000khz: (1/8000) * 2 * 2 usec | ||
542 | * REVISIT: 100us may give enough time for two CLKSRG, however | ||
543 | * due to some unknown PM related, clock gating etc. reason it | ||
544 | * is now at 500us. | ||
545 | */ | ||
546 | udelay(500); | ||
363 | 547 | ||
364 | /* Start frame sync */ | 548 | if (idle) { |
365 | w = OMAP_MCBSP_READ(io_base, SPCR2); | 549 | /* Start frame sync */ |
366 | OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 7)); | 550 | w = OMAP_MCBSP_READ(io_base, SPCR2); |
551 | OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 7)); | ||
552 | } | ||
553 | |||
554 | if (cpu_is_omap2430() || cpu_is_omap34xx()) { | ||
555 | /* Release the transmitter and receiver */ | ||
556 | w = OMAP_MCBSP_READ(io_base, XCCR); | ||
557 | w &= ~(tx ? XDISABLE : 0); | ||
558 | OMAP_MCBSP_WRITE(io_base, XCCR, w); | ||
559 | w = OMAP_MCBSP_READ(io_base, RCCR); | ||
560 | w &= ~(rx ? RDISABLE : 0); | ||
561 | OMAP_MCBSP_WRITE(io_base, RCCR, w); | ||
562 | } | ||
367 | 563 | ||
368 | /* Dump McBSP Regs */ | 564 | /* Dump McBSP Regs */ |
369 | omap_mcbsp_dump_reg(id); | 565 | omap_mcbsp_dump_reg(id); |
370 | } | 566 | } |
371 | EXPORT_SYMBOL(omap_mcbsp_start); | 567 | EXPORT_SYMBOL(omap_mcbsp_start); |
372 | 568 | ||
373 | void omap_mcbsp_stop(unsigned int id) | 569 | void omap_mcbsp_stop(unsigned int id, int tx, int rx) |
374 | { | 570 | { |
375 | struct omap_mcbsp *mcbsp; | 571 | struct omap_mcbsp *mcbsp; |
376 | void __iomem *io_base; | 572 | void __iomem *io_base; |
573 | int idle; | ||
377 | u16 w; | 574 | u16 w; |
378 | 575 | ||
379 | if (!omap_mcbsp_check_valid_id(id)) { | 576 | if (!omap_mcbsp_check_valid_id(id)) { |
@@ -385,16 +582,33 @@ void omap_mcbsp_stop(unsigned int id) | |||
385 | io_base = mcbsp->io_base; | 582 | io_base = mcbsp->io_base; |
386 | 583 | ||
387 | /* Reset transmitter */ | 584 | /* Reset transmitter */ |
585 | tx &= 1; | ||
586 | if (cpu_is_omap2430() || cpu_is_omap34xx()) { | ||
587 | w = OMAP_MCBSP_READ(io_base, XCCR); | ||
588 | w |= (tx ? XDISABLE : 0); | ||
589 | OMAP_MCBSP_WRITE(io_base, XCCR, w); | ||
590 | } | ||
388 | w = OMAP_MCBSP_READ(io_base, SPCR2); | 591 | w = OMAP_MCBSP_READ(io_base, SPCR2); |
389 | OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~(1)); | 592 | OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~tx); |
390 | 593 | ||
391 | /* Reset receiver */ | 594 | /* Reset receiver */ |
595 | rx &= 1; | ||
596 | if (cpu_is_omap2430() || cpu_is_omap34xx()) { | ||
597 | w = OMAP_MCBSP_READ(io_base, RCCR); | ||
598 | w |= (tx ? RDISABLE : 0); | ||
599 | OMAP_MCBSP_WRITE(io_base, RCCR, w); | ||
600 | } | ||
392 | w = OMAP_MCBSP_READ(io_base, SPCR1); | 601 | w = OMAP_MCBSP_READ(io_base, SPCR1); |
393 | OMAP_MCBSP_WRITE(io_base, SPCR1, w & ~(1)); | 602 | OMAP_MCBSP_WRITE(io_base, SPCR1, w & ~rx); |
394 | 603 | ||
395 | /* Reset the sample rate generator */ | 604 | idle = !((OMAP_MCBSP_READ(io_base, SPCR2) | |
396 | w = OMAP_MCBSP_READ(io_base, SPCR2); | 605 | OMAP_MCBSP_READ(io_base, SPCR1)) & 1); |
397 | OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~(1 << 6)); | 606 | |
607 | if (idle) { | ||
608 | /* Reset the sample rate generator */ | ||
609 | w = OMAP_MCBSP_READ(io_base, SPCR2); | ||
610 | OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~(1 << 6)); | ||
611 | } | ||
398 | } | 612 | } |
399 | EXPORT_SYMBOL(omap_mcbsp_stop); | 613 | EXPORT_SYMBOL(omap_mcbsp_stop); |
400 | 614 | ||
@@ -883,6 +1097,149 @@ void omap_mcbsp_set_spi_mode(unsigned int id, | |||
883 | } | 1097 | } |
884 | EXPORT_SYMBOL(omap_mcbsp_set_spi_mode); | 1098 | EXPORT_SYMBOL(omap_mcbsp_set_spi_mode); |
885 | 1099 | ||
1100 | #ifdef CONFIG_ARCH_OMAP34XX | ||
1101 | #define max_thres(m) (mcbsp->pdata->buffer_size) | ||
1102 | #define valid_threshold(m, val) ((val) <= max_thres(m)) | ||
1103 | #define THRESHOLD_PROP_BUILDER(prop) \ | ||
1104 | static ssize_t prop##_show(struct device *dev, \ | ||
1105 | struct device_attribute *attr, char *buf) \ | ||
1106 | { \ | ||
1107 | struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); \ | ||
1108 | \ | ||
1109 | return sprintf(buf, "%u\n", mcbsp->prop); \ | ||
1110 | } \ | ||
1111 | \ | ||
1112 | static ssize_t prop##_store(struct device *dev, \ | ||
1113 | struct device_attribute *attr, \ | ||
1114 | const char *buf, size_t size) \ | ||
1115 | { \ | ||
1116 | struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); \ | ||
1117 | unsigned long val; \ | ||
1118 | int status; \ | ||
1119 | \ | ||
1120 | status = strict_strtoul(buf, 0, &val); \ | ||
1121 | if (status) \ | ||
1122 | return status; \ | ||
1123 | \ | ||
1124 | if (!valid_threshold(mcbsp, val)) \ | ||
1125 | return -EDOM; \ | ||
1126 | \ | ||
1127 | mcbsp->prop = val; \ | ||
1128 | return size; \ | ||
1129 | } \ | ||
1130 | \ | ||
1131 | static DEVICE_ATTR(prop, 0644, prop##_show, prop##_store); | ||
1132 | |||
1133 | THRESHOLD_PROP_BUILDER(max_tx_thres); | ||
1134 | THRESHOLD_PROP_BUILDER(max_rx_thres); | ||
1135 | |||
1136 | static const char *dma_op_modes[] = { | ||
1137 | "element", "threshold", "frame", | ||
1138 | }; | ||
1139 | |||
1140 | static ssize_t dma_op_mode_show(struct device *dev, | ||
1141 | struct device_attribute *attr, char *buf) | ||
1142 | { | ||
1143 | struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); | ||
1144 | int dma_op_mode, i = 0; | ||
1145 | ssize_t len = 0; | ||
1146 | const char * const *s; | ||
1147 | |||
1148 | spin_lock_irq(&mcbsp->lock); | ||
1149 | dma_op_mode = mcbsp->dma_op_mode; | ||
1150 | spin_unlock_irq(&mcbsp->lock); | ||
1151 | |||
1152 | for (s = &dma_op_modes[i]; i < ARRAY_SIZE(dma_op_modes); s++, i++) { | ||
1153 | if (dma_op_mode == i) | ||
1154 | len += sprintf(buf + len, "[%s] ", *s); | ||
1155 | else | ||
1156 | len += sprintf(buf + len, "%s ", *s); | ||
1157 | } | ||
1158 | len += sprintf(buf + len, "\n"); | ||
1159 | |||
1160 | return len; | ||
1161 | } | ||
1162 | |||
1163 | static ssize_t dma_op_mode_store(struct device *dev, | ||
1164 | struct device_attribute *attr, | ||
1165 | const char *buf, size_t size) | ||
1166 | { | ||
1167 | struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); | ||
1168 | const char * const *s; | ||
1169 | int i = 0; | ||
1170 | |||
1171 | for (s = &dma_op_modes[i]; i < ARRAY_SIZE(dma_op_modes); s++, i++) | ||
1172 | if (sysfs_streq(buf, *s)) | ||
1173 | break; | ||
1174 | |||
1175 | if (i == ARRAY_SIZE(dma_op_modes)) | ||
1176 | return -EINVAL; | ||
1177 | |||
1178 | spin_lock_irq(&mcbsp->lock); | ||
1179 | if (!mcbsp->free) { | ||
1180 | size = -EBUSY; | ||
1181 | goto unlock; | ||
1182 | } | ||
1183 | mcbsp->dma_op_mode = i; | ||
1184 | |||
1185 | unlock: | ||
1186 | spin_unlock_irq(&mcbsp->lock); | ||
1187 | |||
1188 | return size; | ||
1189 | } | ||
1190 | |||
1191 | static DEVICE_ATTR(dma_op_mode, 0644, dma_op_mode_show, dma_op_mode_store); | ||
1192 | |||
1193 | static const struct attribute *additional_attrs[] = { | ||
1194 | &dev_attr_max_tx_thres.attr, | ||
1195 | &dev_attr_max_rx_thres.attr, | ||
1196 | &dev_attr_dma_op_mode.attr, | ||
1197 | NULL, | ||
1198 | }; | ||
1199 | |||
1200 | static const struct attribute_group additional_attr_group = { | ||
1201 | .attrs = (struct attribute **)additional_attrs, | ||
1202 | }; | ||
1203 | |||
1204 | static inline int __devinit omap_additional_add(struct device *dev) | ||
1205 | { | ||
1206 | return sysfs_create_group(&dev->kobj, &additional_attr_group); | ||
1207 | } | ||
1208 | |||
1209 | static inline void __devexit omap_additional_remove(struct device *dev) | ||
1210 | { | ||
1211 | sysfs_remove_group(&dev->kobj, &additional_attr_group); | ||
1212 | } | ||
1213 | |||
1214 | static inline void __devinit omap34xx_device_init(struct omap_mcbsp *mcbsp) | ||
1215 | { | ||
1216 | mcbsp->dma_op_mode = MCBSP_DMA_MODE_ELEMENT; | ||
1217 | if (cpu_is_omap34xx()) { | ||
1218 | mcbsp->max_tx_thres = max_thres(mcbsp); | ||
1219 | mcbsp->max_rx_thres = max_thres(mcbsp); | ||
1220 | /* | ||
1221 | * REVISIT: Set dmap_op_mode to THRESHOLD as default | ||
1222 | * for mcbsp2 instances. | ||
1223 | */ | ||
1224 | if (omap_additional_add(mcbsp->dev)) | ||
1225 | dev_warn(mcbsp->dev, | ||
1226 | "Unable to create additional controls\n"); | ||
1227 | } else { | ||
1228 | mcbsp->max_tx_thres = -EINVAL; | ||
1229 | mcbsp->max_rx_thres = -EINVAL; | ||
1230 | } | ||
1231 | } | ||
1232 | |||
1233 | static inline void __devexit omap34xx_device_exit(struct omap_mcbsp *mcbsp) | ||
1234 | { | ||
1235 | if (cpu_is_omap34xx()) | ||
1236 | omap_additional_remove(mcbsp->dev); | ||
1237 | } | ||
1238 | #else | ||
1239 | static inline void __devinit omap34xx_device_init(struct omap_mcbsp *mcbsp) {} | ||
1240 | static inline void __devexit omap34xx_device_exit(struct omap_mcbsp *mcbsp) {} | ||
1241 | #endif /* CONFIG_ARCH_OMAP34XX */ | ||
1242 | |||
886 | /* | 1243 | /* |
887 | * McBSP1 and McBSP3 are directly mapped on 1610 and 1510. | 1244 | * McBSP1 and McBSP3 are directly mapped on 1610 and 1510. |
888 | * 730 has only 2 McBSP, and both of them are MPU peripherals. | 1245 | * 730 has only 2 McBSP, and both of them are MPU peripherals. |
@@ -953,6 +1310,10 @@ static int __devinit omap_mcbsp_probe(struct platform_device *pdev) | |||
953 | mcbsp->dev = &pdev->dev; | 1310 | mcbsp->dev = &pdev->dev; |
954 | mcbsp_ptr[id] = mcbsp; | 1311 | mcbsp_ptr[id] = mcbsp; |
955 | platform_set_drvdata(pdev, mcbsp); | 1312 | platform_set_drvdata(pdev, mcbsp); |
1313 | |||
1314 | /* Initialize mcbsp properties for OMAP34XX if needed / applicable */ | ||
1315 | omap34xx_device_init(mcbsp); | ||
1316 | |||
956 | return 0; | 1317 | return 0; |
957 | 1318 | ||
958 | err_fclk: | 1319 | err_fclk: |
@@ -976,6 +1337,8 @@ static int __devexit omap_mcbsp_remove(struct platform_device *pdev) | |||
976 | mcbsp->pdata->ops->free) | 1337 | mcbsp->pdata->ops->free) |
977 | mcbsp->pdata->ops->free(mcbsp->id); | 1338 | mcbsp->pdata->ops->free(mcbsp->id); |
978 | 1339 | ||
1340 | omap34xx_device_exit(mcbsp); | ||
1341 | |||
979 | clk_disable(mcbsp->fclk); | 1342 | clk_disable(mcbsp->fclk); |
980 | clk_disable(mcbsp->iclk); | 1343 | clk_disable(mcbsp->iclk); |
981 | clk_put(mcbsp->fclk); | 1344 | clk_put(mcbsp->fclk); |
diff --git a/arch/arm/plat-orion/include/plat/gpio.h b/arch/arm/plat-orion/include/plat/gpio.h index 9646a94ed3d0..07c430fdc9ef 100644 --- a/arch/arm/plat-orion/include/plat/gpio.h +++ b/arch/arm/plat-orion/include/plat/gpio.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #ifndef __PLAT_GPIO_H | 11 | #ifndef __PLAT_GPIO_H |
12 | #define __PLAT_GPIO_H | 12 | #define __PLAT_GPIO_H |
13 | 13 | ||
14 | #include <linux/init.h> | ||
15 | |||
14 | /* | 16 | /* |
15 | * GENERIC_GPIO primitives. | 17 | * GENERIC_GPIO primitives. |
16 | */ | 18 | */ |
diff --git a/arch/arm/plat-s3c/include/plat/audio-simtec.h b/arch/arm/plat-s3c/include/plat/audio-simtec.h new file mode 100644 index 000000000000..0f440b9168db --- /dev/null +++ b/arch/arm/plat-s3c/include/plat/audio-simtec.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* arch/arm/plat-s3c/include/plat/audio-simtec.h | ||
2 | * | ||
3 | * Copyright 2008 Simtec Electronics | ||
4 | * http://armlinux.simtec.co.uk/ | ||
5 | * Ben Dooks <ben@simtec.co.uk> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * Simtec Audio support. | ||
12 | */ | ||
13 | |||
14 | /** | ||
15 | * struct s3c24xx_audio_simtec_pdata - platform data for simtec audio | ||
16 | * @use_mpllin: Select codec clock from MPLLin | ||
17 | * @output_cdclk: Need to output CDCLK to the codec | ||
18 | * @have_mic: Set if we have a MIC socket | ||
19 | * @have_lout: Set if we have a LineOut socket | ||
20 | * @amp_gpio: GPIO pin to enable the AMP | ||
21 | * @amp_gain: Option GPIO to control AMP gain | ||
22 | */ | ||
23 | struct s3c24xx_audio_simtec_pdata { | ||
24 | unsigned int use_mpllin:1; | ||
25 | unsigned int output_cdclk:1; | ||
26 | |||
27 | unsigned int have_mic:1; | ||
28 | unsigned int have_lout:1; | ||
29 | |||
30 | int amp_gpio; | ||
31 | int amp_gain[2]; | ||
32 | |||
33 | void (*startup)(void); | ||
34 | }; | ||
35 | |||
36 | extern int simtec_audio_add(const char *codec_name, | ||
37 | struct s3c24xx_audio_simtec_pdata *pdata); | ||
diff --git a/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h index 0fad7571030e..07659dad1748 100644 --- a/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h +++ b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h | |||
@@ -33,6 +33,11 @@ | |||
33 | #define S3C2412_IISCON_RXDMA_ACTIVE (1 << 1) | 33 | #define S3C2412_IISCON_RXDMA_ACTIVE (1 << 1) |
34 | #define S3C2412_IISCON_IIS_ACTIVE (1 << 0) | 34 | #define S3C2412_IISCON_IIS_ACTIVE (1 << 0) |
35 | 35 | ||
36 | #define S3C64XX_IISMOD_BLC_16BIT (0 << 13) | ||
37 | #define S3C64XX_IISMOD_BLC_8BIT (1 << 13) | ||
38 | #define S3C64XX_IISMOD_BLC_24BIT (2 << 13) | ||
39 | #define S3C64XX_IISMOD_BLC_MASK (3 << 13) | ||
40 | |||
36 | #define S3C64XX_IISMOD_IMS_PCLK (0 << 10) | 41 | #define S3C64XX_IISMOD_IMS_PCLK (0 << 10) |
37 | #define S3C64XX_IISMOD_IMS_SYSMUX (1 << 10) | 42 | #define S3C64XX_IISMOD_IMS_SYSMUX (1 << 10) |
38 | 43 | ||
diff --git a/arch/avr32/boards/favr-32/setup.c b/arch/avr32/boards/favr-32/setup.c index 46c9b0a224cf..75f19f47fb2f 100644 --- a/arch/avr32/boards/favr-32/setup.c +++ b/arch/avr32/boards/favr-32/setup.c | |||
@@ -72,6 +72,10 @@ static struct ads7846_platform_data ads7843_data = { | |||
72 | .debounce_max = 20, | 72 | .debounce_max = 20, |
73 | .debounce_rep = 4, | 73 | .debounce_rep = 4, |
74 | .debounce_tol = 5, | 74 | .debounce_tol = 5, |
75 | |||
76 | .keep_vref_on = true, | ||
77 | .settle_delay_usecs = 500, | ||
78 | .penirq_recheck_delay_usecs = 100, | ||
75 | }; | 79 | }; |
76 | 80 | ||
77 | static struct spi_board_info __initdata spi1_board_info[] = { | 81 | static struct spi_board_info __initdata spi1_board_info[] = { |
diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h index fc42de5ca209..fd0c5d7e9337 100644 --- a/arch/avr32/include/asm/thread_info.h +++ b/arch/avr32/include/asm/thread_info.h | |||
@@ -84,6 +84,7 @@ static inline struct thread_info *current_thread_info(void) | |||
84 | #define TIF_MEMDIE 6 | 84 | #define TIF_MEMDIE 6 |
85 | #define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */ | 85 | #define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */ |
86 | #define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */ | 86 | #define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */ |
87 | #define TIF_NOTIFY_RESUME 9 /* callback before returning to user */ | ||
87 | #define TIF_FREEZE 29 | 88 | #define TIF_FREEZE 29 |
88 | #define TIF_DEBUG 30 /* debugging enabled */ | 89 | #define TIF_DEBUG 30 /* debugging enabled */ |
89 | #define TIF_USERSPACE 31 /* true if FS sets userspace */ | 90 | #define TIF_USERSPACE 31 /* true if FS sets userspace */ |
@@ -96,6 +97,7 @@ static inline struct thread_info *current_thread_info(void) | |||
96 | #define _TIF_MEMDIE (1 << TIF_MEMDIE) | 97 | #define _TIF_MEMDIE (1 << TIF_MEMDIE) |
97 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | 98 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) |
98 | #define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP) | 99 | #define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP) |
100 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | ||
99 | #define _TIF_FREEZE (1 << TIF_FREEZE) | 101 | #define _TIF_FREEZE (1 << TIF_FREEZE) |
100 | 102 | ||
101 | /* Note: The masks below must never span more than 16 bits! */ | 103 | /* Note: The masks below must never span more than 16 bits! */ |
@@ -103,13 +105,15 @@ static inline struct thread_info *current_thread_info(void) | |||
103 | /* work to do on interrupt/exception return */ | 105 | /* work to do on interrupt/exception return */ |
104 | #define _TIF_WORK_MASK \ | 106 | #define _TIF_WORK_MASK \ |
105 | ((1 << TIF_SIGPENDING) \ | 107 | ((1 << TIF_SIGPENDING) \ |
108 | | _TIF_NOTIFY_RESUME \ | ||
106 | | (1 << TIF_NEED_RESCHED) \ | 109 | | (1 << TIF_NEED_RESCHED) \ |
107 | | (1 << TIF_POLLING_NRFLAG) \ | 110 | | (1 << TIF_POLLING_NRFLAG) \ |
108 | | (1 << TIF_BREAKPOINT) \ | 111 | | (1 << TIF_BREAKPOINT) \ |
109 | | (1 << TIF_RESTORE_SIGMASK)) | 112 | | (1 << TIF_RESTORE_SIGMASK)) |
110 | 113 | ||
111 | /* work to do on any return to userspace */ | 114 | /* work to do on any return to userspace */ |
112 | #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | (1 << TIF_SYSCALL_TRACE)) | 115 | #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | (1 << TIF_SYSCALL_TRACE) | \ |
116 | _TIF_NOTIFY_RESUME) | ||
113 | /* work to do on return from debug mode */ | 117 | /* work to do on return from debug mode */ |
114 | #define _TIF_DBGWORK_MASK (_TIF_WORK_MASK & ~(1 << TIF_BREAKPOINT)) | 118 | #define _TIF_DBGWORK_MASK (_TIF_WORK_MASK & ~(1 << TIF_BREAKPOINT)) |
115 | 119 | ||
diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S index 009a80155d67..169268c40ae2 100644 --- a/arch/avr32/kernel/entry-avr32b.S +++ b/arch/avr32/kernel/entry-avr32b.S | |||
@@ -281,7 +281,7 @@ syscall_exit_work: | |||
281 | ld.w r1, r0[TI_flags] | 281 | ld.w r1, r0[TI_flags] |
282 | rjmp 1b | 282 | rjmp 1b |
283 | 283 | ||
284 | 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | 284 | 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NOTIFY_RESUME |
285 | tst r1, r2 | 285 | tst r1, r2 |
286 | breq 3f | 286 | breq 3f |
287 | unmask_interrupts | 287 | unmask_interrupts |
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c index 27227561bad6..64f886fac2ef 100644 --- a/arch/avr32/kernel/signal.c +++ b/arch/avr32/kernel/signal.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/ptrace.h> | 16 | #include <linux/ptrace.h> |
17 | #include <linux/unistd.h> | 17 | #include <linux/unistd.h> |
18 | #include <linux/freezer.h> | 18 | #include <linux/freezer.h> |
19 | #include <linux/tracehook.h> | ||
19 | 20 | ||
20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
21 | #include <asm/ucontext.h> | 22 | #include <asm/ucontext.h> |
@@ -322,4 +323,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti) | |||
322 | 323 | ||
323 | if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) | 324 | if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) |
324 | do_signal(regs, ¤t->blocked, syscall); | 325 | do_signal(regs, ¤t->blocked, syscall); |
326 | |||
327 | if (ti->flags & _TIF_NOTIFY_RESUME) { | ||
328 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
329 | tracehook_notify_resume(regs); | ||
330 | if (current->replacement_session_keyring) | ||
331 | key_replace_session_keyring(); | ||
332 | } | ||
325 | } | 333 | } |
diff --git a/arch/avr32/lib/memcpy.S b/arch/avr32/lib/memcpy.S index 0abb26142b64..c2ca49d705af 100644 --- a/arch/avr32/lib/memcpy.S +++ b/arch/avr32/lib/memcpy.S | |||
@@ -24,8 +24,8 @@ memcpy: | |||
24 | brne 1f | 24 | brne 1f |
25 | 25 | ||
26 | /* At this point, "from" is word-aligned */ | 26 | /* At this point, "from" is word-aligned */ |
27 | 2: sub r10, 4 | 27 | 2: mov r9, r12 |
28 | mov r9, r12 | 28 | 5: sub r10, 4 |
29 | brlt 4f | 29 | brlt 4f |
30 | 30 | ||
31 | 3: ld.w r8, r11++ | 31 | 3: ld.w r8, r11++ |
@@ -49,6 +49,7 @@ memcpy: | |||
49 | 49 | ||
50 | /* Handle unaligned "from" pointer */ | 50 | /* Handle unaligned "from" pointer */ |
51 | 1: sub r10, 4 | 51 | 1: sub r10, 4 |
52 | movlt r9, r12 | ||
52 | brlt 4b | 53 | brlt 4b |
53 | add r10, r9 | 54 | add r10, r9 |
54 | lsl r9, 2 | 55 | lsl r9, 2 |
@@ -59,4 +60,13 @@ memcpy: | |||
59 | st.b r12++, r8 | 60 | st.b r12++, r8 |
60 | ld.ub r8, r11++ | 61 | ld.ub r8, r11++ |
61 | st.b r12++, r8 | 62 | st.b r12++, r8 |
62 | rjmp 2b | 63 | mov r8, r12 |
64 | add pc, pc, r9 | ||
65 | sub r8, 1 | ||
66 | nop | ||
67 | sub r8, 1 | ||
68 | nop | ||
69 | sub r8, 1 | ||
70 | nop | ||
71 | mov r9, r8 | ||
72 | rjmp 5b | ||
diff --git a/arch/cris/kernel/ptrace.c b/arch/cris/kernel/ptrace.c index b326023baab2..48b0f3912632 100644 --- a/arch/cris/kernel/ptrace.c +++ b/arch/cris/kernel/ptrace.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
17 | #include <linux/ptrace.h> | 17 | #include <linux/ptrace.h> |
18 | #include <linux/user.h> | 18 | #include <linux/user.h> |
19 | #include <linux/tracehook.h> | ||
19 | 20 | ||
20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
21 | #include <asm/page.h> | 22 | #include <asm/page.h> |
@@ -36,4 +37,11 @@ void do_notify_resume(int canrestart, struct pt_regs *regs, | |||
36 | /* deal with pending signal delivery */ | 37 | /* deal with pending signal delivery */ |
37 | if (thread_info_flags & _TIF_SIGPENDING) | 38 | if (thread_info_flags & _TIF_SIGPENDING) |
38 | do_signal(canrestart,regs); | 39 | do_signal(canrestart,regs); |
40 | |||
41 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
42 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
43 | tracehook_notify_resume(regs); | ||
44 | if (current->replacement_session_keyring) | ||
45 | key_replace_session_keyring(); | ||
46 | } | ||
39 | } | 47 | } |
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c index 4a7a62c6e783..6b0a2b6fed6a 100644 --- a/arch/frv/kernel/signal.c +++ b/arch/frv/kernel/signal.c | |||
@@ -572,6 +572,8 @@ asmlinkage void do_notify_resume(__u32 thread_info_flags) | |||
572 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 572 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
573 | clear_thread_flag(TIF_NOTIFY_RESUME); | 573 | clear_thread_flag(TIF_NOTIFY_RESUME); |
574 | tracehook_notify_resume(__frame); | 574 | tracehook_notify_resume(__frame); |
575 | if (current->replacement_session_keyring) | ||
576 | key_replace_session_keyring(); | ||
575 | } | 577 | } |
576 | 578 | ||
577 | } /* end do_notify_resume() */ | 579 | } /* end do_notify_resume() */ |
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h index 8bbc8b0ee45d..70e67e47d020 100644 --- a/arch/h8300/include/asm/thread_info.h +++ b/arch/h8300/include/asm/thread_info.h | |||
@@ -89,6 +89,7 @@ static inline struct thread_info *current_thread_info(void) | |||
89 | TIF_NEED_RESCHED */ | 89 | TIF_NEED_RESCHED */ |
90 | #define TIF_MEMDIE 4 | 90 | #define TIF_MEMDIE 4 |
91 | #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ | 91 | #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ |
92 | #define TIF_NOTIFY_RESUME 6 /* callback before returning to user */ | ||
92 | #define TIF_FREEZE 16 /* is freezing for suspend */ | 93 | #define TIF_FREEZE 16 /* is freezing for suspend */ |
93 | 94 | ||
94 | /* as above, but as bit values */ | 95 | /* as above, but as bit values */ |
@@ -97,6 +98,7 @@ static inline struct thread_info *current_thread_info(void) | |||
97 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 98 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
98 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 99 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
99 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 100 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
101 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | ||
100 | #define _TIF_FREEZE (1<<TIF_FREEZE) | 102 | #define _TIF_FREEZE (1<<TIF_FREEZE) |
101 | 103 | ||
102 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ | 104 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ |
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c index cf3472f7389b..af842c369d24 100644 --- a/arch/h8300/kernel/signal.c +++ b/arch/h8300/kernel/signal.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/tty.h> | 39 | #include <linux/tty.h> |
40 | #include <linux/binfmts.h> | 40 | #include <linux/binfmts.h> |
41 | #include <linux/freezer.h> | 41 | #include <linux/freezer.h> |
42 | #include <linux/tracehook.h> | ||
42 | 43 | ||
43 | #include <asm/setup.h> | 44 | #include <asm/setup.h> |
44 | #include <asm/uaccess.h> | 45 | #include <asm/uaccess.h> |
@@ -552,4 +553,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags) | |||
552 | { | 553 | { |
553 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) | 554 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) |
554 | do_signal(regs, NULL); | 555 | do_signal(regs, NULL); |
556 | |||
557 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
558 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
559 | tracehook_notify_resume(regs); | ||
560 | if (current->replacement_session_keyring) | ||
561 | key_replace_session_keyring(); | ||
562 | } | ||
555 | } | 563 | } |
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 5a61b5c2e18f..8d3c79cd81e7 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h | |||
@@ -44,7 +44,6 @@ static inline void dma_free_coherent(struct device *dev, size_t size, | |||
44 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 44 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
45 | 45 | ||
46 | #define get_dma_ops(dev) platform_dma_get_ops(dev) | 46 | #define get_dma_ops(dev) platform_dma_get_ops(dev) |
47 | #define flush_write_buffers() | ||
48 | 47 | ||
49 | #include <asm-generic/dma-mapping-common.h> | 48 | #include <asm-generic/dma-mapping-common.h> |
50 | 49 | ||
@@ -69,6 +68,24 @@ dma_set_mask (struct device *dev, u64 mask) | |||
69 | return 0; | 68 | return 0; |
70 | } | 69 | } |
71 | 70 | ||
71 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | ||
72 | { | ||
73 | if (!dev->dma_mask) | ||
74 | return 0; | ||
75 | |||
76 | return addr + size <= *dev->dma_mask; | ||
77 | } | ||
78 | |||
79 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | ||
80 | { | ||
81 | return paddr; | ||
82 | } | ||
83 | |||
84 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) | ||
85 | { | ||
86 | return daddr; | ||
87 | } | ||
88 | |||
72 | extern int dma_get_cache_alignment(void); | 89 | extern int dma_get_cache_alignment(void); |
73 | 90 | ||
74 | static inline void | 91 | static inline void |
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c index 39a3cd0a4173..f2c1600da097 100644 --- a/arch/ia64/kernel/dma-mapping.c +++ b/arch/ia64/kernel/dma-mapping.c | |||
@@ -10,7 +10,9 @@ EXPORT_SYMBOL(dma_ops); | |||
10 | 10 | ||
11 | static int __init dma_init(void) | 11 | static int __init dma_init(void) |
12 | { | 12 | { |
13 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | 13 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
14 | |||
15 | return 0; | ||
14 | } | 16 | } |
15 | fs_initcall(dma_init); | 17 | fs_initcall(dma_init); |
16 | 18 | ||
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 5d7c0e5b9e76..89969e950045 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -192,6 +192,8 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) | |||
192 | if (test_thread_flag(TIF_NOTIFY_RESUME)) { | 192 | if (test_thread_flag(TIF_NOTIFY_RESUME)) { |
193 | clear_thread_flag(TIF_NOTIFY_RESUME); | 193 | clear_thread_flag(TIF_NOTIFY_RESUME); |
194 | tracehook_notify_resume(&scr->pt); | 194 | tracehook_notify_resume(&scr->pt); |
195 | if (current->replacement_session_keyring) | ||
196 | key_replace_session_keyring(); | ||
195 | } | 197 | } |
196 | 198 | ||
197 | /* copy user rbs to kernel rbs */ | 199 | /* copy user rbs to kernel rbs */ |
diff --git a/arch/ia64/lib/ip_fast_csum.S b/arch/ia64/lib/ip_fast_csum.S index 1f86aeb2c948..620d9dc5220f 100644 --- a/arch/ia64/lib/ip_fast_csum.S +++ b/arch/ia64/lib/ip_fast_csum.S | |||
@@ -96,20 +96,22 @@ END(ip_fast_csum) | |||
96 | GLOBAL_ENTRY(csum_ipv6_magic) | 96 | GLOBAL_ENTRY(csum_ipv6_magic) |
97 | ld4 r20=[in0],4 | 97 | ld4 r20=[in0],4 |
98 | ld4 r21=[in1],4 | 98 | ld4 r21=[in1],4 |
99 | dep r15=in3,in2,32,16 | 99 | zxt4 in2=in2 |
100 | ;; | 100 | ;; |
101 | ld4 r22=[in0],4 | 101 | ld4 r22=[in0],4 |
102 | ld4 r23=[in1],4 | 102 | ld4 r23=[in1],4 |
103 | mux1 r15=r15,@rev | 103 | dep r15=in3,in2,32,16 |
104 | ;; | 104 | ;; |
105 | ld4 r24=[in0],4 | 105 | ld4 r24=[in0],4 |
106 | ld4 r25=[in1],4 | 106 | ld4 r25=[in1],4 |
107 | shr.u r15=r15,16 | 107 | mux1 r15=r15,@rev |
108 | add r16=r20,r21 | 108 | add r16=r20,r21 |
109 | add r17=r22,r23 | 109 | add r17=r22,r23 |
110 | zxt4 in4=in4 | ||
110 | ;; | 111 | ;; |
111 | ld4 r26=[in0],4 | 112 | ld4 r26=[in0],4 |
112 | ld4 r27=[in1],4 | 113 | ld4 r27=[in1],4 |
114 | shr.u r15=r15,16 | ||
113 | add r18=r24,r25 | 115 | add r18=r24,r25 |
114 | add r8=r16,r17 | 116 | add r8=r16,r17 |
115 | ;; | 117 | ;; |
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c index fb8332690179..dbeadb9c8e20 100644 --- a/arch/ia64/xen/time.c +++ b/arch/ia64/xen/time.c | |||
@@ -133,8 +133,7 @@ consider_steal_time(unsigned long new_itm) | |||
133 | account_idle_ticks(blocked); | 133 | account_idle_ticks(blocked); |
134 | run_local_timers(); | 134 | run_local_timers(); |
135 | 135 | ||
136 | if (rcu_pending(cpu)) | 136 | rcu_check_callbacks(cpu, user_mode(get_irq_regs())); |
137 | rcu_check_callbacks(cpu, user_mode(get_irq_regs())); | ||
138 | 137 | ||
139 | scheduler_tick(); | 138 | scheduler_tick(); |
140 | run_posix_cpu_timers(p); | 139 | run_posix_cpu_timers(p); |
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h index 07bb5bd00e2a..71578151a403 100644 --- a/arch/m32r/include/asm/thread_info.h +++ b/arch/m32r/include/asm/thread_info.h | |||
@@ -149,6 +149,7 @@ static inline unsigned int get_thread_fault_code(void) | |||
149 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ | 149 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ |
150 | #define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */ | 150 | #define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */ |
151 | #define TIF_IRET 4 /* return with iret */ | 151 | #define TIF_IRET 4 /* return with iret */ |
152 | #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */ | ||
152 | #define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */ | 153 | #define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */ |
153 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ | 154 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ |
154 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 155 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
@@ -160,6 +161,7 @@ static inline unsigned int get_thread_fault_code(void) | |||
160 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 161 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
161 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) | 162 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) |
162 | #define _TIF_IRET (1<<TIF_IRET) | 163 | #define _TIF_IRET (1<<TIF_IRET) |
164 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | ||
163 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 165 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
164 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) | 166 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) |
165 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 167 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c index 18124542a6eb..144b0f124fc7 100644 --- a/arch/m32r/kernel/signal.c +++ b/arch/m32r/kernel/signal.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/stddef.h> | 21 | #include <linux/stddef.h> |
22 | #include <linux/personality.h> | 22 | #include <linux/personality.h> |
23 | #include <linux/freezer.h> | 23 | #include <linux/freezer.h> |
24 | #include <linux/tracehook.h> | ||
24 | #include <asm/cacheflush.h> | 25 | #include <asm/cacheflush.h> |
25 | #include <asm/ucontext.h> | 26 | #include <asm/ucontext.h> |
26 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
@@ -408,5 +409,12 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, | |||
408 | if (thread_info_flags & _TIF_SIGPENDING) | 409 | if (thread_info_flags & _TIF_SIGPENDING) |
409 | do_signal(regs,oldset); | 410 | do_signal(regs,oldset); |
410 | 411 | ||
412 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
413 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
414 | tracehook_notify_resume(regs); | ||
415 | if (current->replacement_session_keyring) | ||
416 | key_replace_session_keyring(); | ||
417 | } | ||
418 | |||
411 | clear_thread_flag(TIF_IRET); | 419 | clear_thread_flag(TIF_IRET); |
412 | } | 420 | } |
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c index 6e562751ad51..6c74751c7b82 100644 --- a/arch/m68k/amiga/config.c +++ b/arch/m68k/amiga/config.c | |||
@@ -574,10 +574,11 @@ static int a2000_hwclk(int op, struct rtc_time *t) | |||
574 | 574 | ||
575 | tod_2000.cntrl1 = TOD2000_CNTRL1_HOLD; | 575 | tod_2000.cntrl1 = TOD2000_CNTRL1_HOLD; |
576 | 576 | ||
577 | while ((tod_2000.cntrl1 & TOD2000_CNTRL1_BUSY) && cnt--) { | 577 | while ((tod_2000.cntrl1 & TOD2000_CNTRL1_BUSY) && cnt) { |
578 | tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD; | 578 | tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD; |
579 | udelay(70); | 579 | udelay(70); |
580 | tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD; | 580 | tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD; |
581 | --cnt; | ||
581 | } | 582 | } |
582 | 583 | ||
583 | if (!cnt) | 584 | if (!cnt) |
@@ -649,10 +650,11 @@ static int amiga_set_clock_mmss(unsigned long nowtime) | |||
649 | 650 | ||
650 | tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD; | 651 | tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD; |
651 | 652 | ||
652 | while ((tod_2000.cntrl1 & TOD2000_CNTRL1_BUSY) && cnt--) { | 653 | while ((tod_2000.cntrl1 & TOD2000_CNTRL1_BUSY) && cnt) { |
653 | tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD; | 654 | tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD; |
654 | udelay(70); | 655 | udelay(70); |
655 | tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD; | 656 | tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD; |
657 | --cnt; | ||
656 | } | 658 | } |
657 | 659 | ||
658 | if (!cnt) | 660 | if (!cnt) |
diff --git a/arch/m68k/include/asm/entry_mm.h b/arch/m68k/include/asm/entry_mm.h index 5202f5a5b420..474125886218 100644 --- a/arch/m68k/include/asm/entry_mm.h +++ b/arch/m68k/include/asm/entry_mm.h | |||
@@ -46,7 +46,6 @@ | |||
46 | #define curptr a2 | 46 | #define curptr a2 |
47 | 47 | ||
48 | LFLUSH_I_AND_D = 0x00000808 | 48 | LFLUSH_I_AND_D = 0x00000808 |
49 | LSIGTRAP = 5 | ||
50 | 49 | ||
51 | /* process bits for task_struct.ptrace */ | 50 | /* process bits for task_struct.ptrace */ |
52 | PT_TRACESYS_OFF = 3 | 51 | PT_TRACESYS_OFF = 3 |
@@ -118,9 +117,6 @@ PT_DTRACE_BIT = 2 | |||
118 | #define STR(X) STR1(X) | 117 | #define STR(X) STR1(X) |
119 | #define STR1(X) #X | 118 | #define STR1(X) #X |
120 | 119 | ||
121 | #define PT_OFF_ORIG_D0 0x24 | ||
122 | #define PT_OFF_FORMATVEC 0x32 | ||
123 | #define PT_OFF_SR 0x2C | ||
124 | #define SAVE_ALL_INT \ | 120 | #define SAVE_ALL_INT \ |
125 | "clrl %%sp@-;" /* stk_adj */ \ | 121 | "clrl %%sp@-;" /* stk_adj */ \ |
126 | "pea -1:w;" /* orig d0 = -1 */ \ | 122 | "pea -1:w;" /* orig d0 = -1 */ \ |
diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h index c2553d26273d..907ed03d792f 100644 --- a/arch/m68k/include/asm/entry_no.h +++ b/arch/m68k/include/asm/entry_no.h | |||
@@ -72,8 +72,8 @@ LENOSYS = 38 | |||
72 | lea %sp@(-32),%sp /* space for 8 regs */ | 72 | lea %sp@(-32),%sp /* space for 8 regs */ |
73 | moveml %d1-%d5/%a0-%a2,%sp@ | 73 | moveml %d1-%d5/%a0-%a2,%sp@ |
74 | movel sw_usp,%a0 /* get usp */ | 74 | movel sw_usp,%a0 /* get usp */ |
75 | movel %a0@-,%sp@(PT_PC) /* copy exception program counter */ | 75 | movel %a0@-,%sp@(PT_OFF_PC) /* copy exception program counter */ |
76 | movel %a0@-,%sp@(PT_FORMATVEC)/* copy exception format/vector/sr */ | 76 | movel %a0@-,%sp@(PT_OFF_FORMATVEC)/*copy exception format/vector/sr */ |
77 | bra 7f | 77 | bra 7f |
78 | 6: | 78 | 6: |
79 | clrl %sp@- /* stkadj */ | 79 | clrl %sp@- /* stkadj */ |
@@ -89,8 +89,8 @@ LENOSYS = 38 | |||
89 | bnes 8f /* no, skip */ | 89 | bnes 8f /* no, skip */ |
90 | move #0x2700,%sr /* disable intrs */ | 90 | move #0x2700,%sr /* disable intrs */ |
91 | movel sw_usp,%a0 /* get usp */ | 91 | movel sw_usp,%a0 /* get usp */ |
92 | movel %sp@(PT_PC),%a0@- /* copy exception program counter */ | 92 | movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */ |
93 | movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */ | 93 | movel %sp@(PT_OFF_FORMATVEC),%a0@-/*copy exception format/vector/sr */ |
94 | moveml %sp@,%d1-%d5/%a0-%a2 | 94 | moveml %sp@,%d1-%d5/%a0-%a2 |
95 | lea %sp@(32),%sp /* space for 8 regs */ | 95 | lea %sp@(32),%sp /* space for 8 regs */ |
96 | movel %sp@+,%d0 | 96 | movel %sp@+,%d0 |
diff --git a/arch/m68k/include/asm/math-emu.h b/arch/m68k/include/asm/math-emu.h index ddfab96403cb..5e9249b0014c 100644 --- a/arch/m68k/include/asm/math-emu.h +++ b/arch/m68k/include/asm/math-emu.h | |||
@@ -145,16 +145,16 @@ extern unsigned int fp_debugprint; | |||
145 | * these are only used during instruction decoding | 145 | * these are only used during instruction decoding |
146 | * where we always know how deep we're on the stack. | 146 | * where we always know how deep we're on the stack. |
147 | */ | 147 | */ |
148 | #define FPS_DO (PT_D0) | 148 | #define FPS_DO (PT_OFF_D0) |
149 | #define FPS_D1 (PT_D1) | 149 | #define FPS_D1 (PT_OFF_D1) |
150 | #define FPS_D2 (PT_D2) | 150 | #define FPS_D2 (PT_OFF_D2) |
151 | #define FPS_A0 (PT_A0) | 151 | #define FPS_A0 (PT_OFF_A0) |
152 | #define FPS_A1 (PT_A1) | 152 | #define FPS_A1 (PT_OFF_A1) |
153 | #define FPS_A2 (PT_A2) | 153 | #define FPS_A2 (PT_OFF_A2) |
154 | #define FPS_SR (PT_SR) | 154 | #define FPS_SR (PT_OFF_SR) |
155 | #define FPS_PC (PT_PC) | 155 | #define FPS_PC (PT_OFF_PC) |
156 | #define FPS_EA (PT_PC+6) | 156 | #define FPS_EA (PT_OFF_PC+6) |
157 | #define FPS_PC2 (PT_PC+10) | 157 | #define FPS_PC2 (PT_OFF_PC+10) |
158 | 158 | ||
159 | .macro fp_get_fp_reg | 159 | .macro fp_get_fp_reg |
160 | lea (FPD_FPREG,FPDATA,%d0.w*4),%a0 | 160 | lea (FPD_FPREG,FPDATA,%d0.w*4),%a0 |
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h index 15ee4c74a9f0..2f02f264e694 100644 --- a/arch/m68k/include/asm/motorola_pgalloc.h +++ b/arch/m68k/include/asm/motorola_pgalloc.h | |||
@@ -36,12 +36,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addres | |||
36 | return NULL; | 36 | return NULL; |
37 | 37 | ||
38 | pte = kmap(page); | 38 | pte = kmap(page); |
39 | if (pte) { | 39 | __flush_page_to_ram(pte); |
40 | __flush_page_to_ram(pte); | 40 | flush_tlb_kernel_page(pte); |
41 | flush_tlb_kernel_page(pte); | 41 | nocache_page(pte); |
42 | nocache_page(pte); | 42 | kunmap(page); |
43 | } | ||
44 | kunmap(pte); | ||
45 | pgtable_page_ctor(page); | 43 | pgtable_page_ctor(page); |
46 | return page; | 44 | return page; |
47 | } | 45 | } |
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h index 0b604f0f192d..fe60e1abaee8 100644 --- a/arch/m68k/include/asm/pgtable_mm.h +++ b/arch/m68k/include/asm/pgtable_mm.h | |||
@@ -135,8 +135,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, | |||
135 | #endif | 135 | #endif |
136 | 136 | ||
137 | #ifndef __ASSEMBLY__ | 137 | #ifndef __ASSEMBLY__ |
138 | #include <asm-generic/pgtable.h> | ||
139 | |||
140 | /* | 138 | /* |
141 | * Macro to mark a page protection value as "uncacheable". | 139 | * Macro to mark a page protection value as "uncacheable". |
142 | */ | 140 | */ |
@@ -154,6 +152,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, | |||
154 | ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \ | 152 | ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \ |
155 | : (prot))) | 153 | : (prot))) |
156 | 154 | ||
155 | #include <asm-generic/pgtable.h> | ||
157 | #endif /* !__ASSEMBLY__ */ | 156 | #endif /* !__ASSEMBLY__ */ |
158 | 157 | ||
159 | /* | 158 | /* |
diff --git a/arch/m68k/include/asm/thread_info_mm.h b/arch/m68k/include/asm/thread_info_mm.h index 6ea5c33b3c56..b6da3882be9b 100644 --- a/arch/m68k/include/asm/thread_info_mm.h +++ b/arch/m68k/include/asm/thread_info_mm.h | |||
@@ -1,6 +1,10 @@ | |||
1 | #ifndef _ASM_M68K_THREAD_INFO_H | 1 | #ifndef _ASM_M68K_THREAD_INFO_H |
2 | #define _ASM_M68K_THREAD_INFO_H | 2 | #define _ASM_M68K_THREAD_INFO_H |
3 | 3 | ||
4 | #ifndef ASM_OFFSETS_C | ||
5 | #include <asm/asm-offsets.h> | ||
6 | #endif | ||
7 | #include <asm/current.h> | ||
4 | #include <asm/types.h> | 8 | #include <asm/types.h> |
5 | #include <asm/page.h> | 9 | #include <asm/page.h> |
6 | 10 | ||
@@ -31,7 +35,12 @@ struct thread_info { | |||
31 | #define init_thread_info (init_task.thread.info) | 35 | #define init_thread_info (init_task.thread.info) |
32 | #define init_stack (init_thread_union.stack) | 36 | #define init_stack (init_thread_union.stack) |
33 | 37 | ||
34 | #define task_thread_info(tsk) (&(tsk)->thread.info) | 38 | #ifdef ASM_OFFSETS_C |
39 | #define task_thread_info(tsk) ((struct thread_info *) NULL) | ||
40 | #else | ||
41 | #define task_thread_info(tsk) ((struct thread_info *)((char *)tsk+TASK_TINFO)) | ||
42 | #endif | ||
43 | |||
35 | #define task_stack_page(tsk) ((tsk)->stack) | 44 | #define task_stack_page(tsk) ((tsk)->stack) |
36 | #define current_thread_info() task_thread_info(current) | 45 | #define current_thread_info() task_thread_info(current) |
37 | 46 | ||
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index aa29a8640f74..946d8691f2b0 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h | |||
@@ -334,10 +334,12 @@ | |||
334 | #define __NR_inotify_init1 328 | 334 | #define __NR_inotify_init1 328 |
335 | #define __NR_preadv 329 | 335 | #define __NR_preadv 329 |
336 | #define __NR_pwritev 330 | 336 | #define __NR_pwritev 330 |
337 | #define __NR_rt_tgsigqueueinfo 331 | ||
338 | #define __NR_perf_counter_open 332 | ||
337 | 339 | ||
338 | #ifdef __KERNEL__ | 340 | #ifdef __KERNEL__ |
339 | 341 | ||
340 | #define NR_syscalls 331 | 342 | #define NR_syscalls 333 |
341 | 343 | ||
342 | #define __ARCH_WANT_IPC_PARSE_VERSION | 344 | #define __ARCH_WANT_IPC_PARSE_VERSION |
343 | #define __ARCH_WANT_OLD_READDIR | 345 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c index b1f012f6c493..73e5e581245b 100644 --- a/arch/m68k/kernel/asm-offsets.c +++ b/arch/m68k/kernel/asm-offsets.c | |||
@@ -8,6 +8,8 @@ | |||
8 | * #defines from the assembly-language output. | 8 | * #defines from the assembly-language output. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define ASM_OFFSETS_C | ||
12 | |||
11 | #include <linux/stddef.h> | 13 | #include <linux/stddef.h> |
12 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
13 | #include <linux/kernel_stat.h> | 15 | #include <linux/kernel_stat.h> |
@@ -27,6 +29,9 @@ int main(void) | |||
27 | DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info)); | 29 | DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info)); |
28 | DEFINE(TASK_MM, offsetof(struct task_struct, mm)); | 30 | DEFINE(TASK_MM, offsetof(struct task_struct, mm)); |
29 | DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); | 31 | DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); |
32 | #ifdef CONFIG_MMU | ||
33 | DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info)); | ||
34 | #endif | ||
30 | 35 | ||
31 | /* offsets into the thread struct */ | 36 | /* offsets into the thread struct */ |
32 | DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); | 37 | DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); |
@@ -44,20 +49,20 @@ int main(void) | |||
44 | DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags)); | 49 | DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags)); |
45 | 50 | ||
46 | /* offsets into the pt_regs */ | 51 | /* offsets into the pt_regs */ |
47 | DEFINE(PT_D0, offsetof(struct pt_regs, d0)); | 52 | DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0)); |
48 | DEFINE(PT_ORIG_D0, offsetof(struct pt_regs, orig_d0)); | 53 | DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0)); |
49 | DEFINE(PT_D1, offsetof(struct pt_regs, d1)); | 54 | DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1)); |
50 | DEFINE(PT_D2, offsetof(struct pt_regs, d2)); | 55 | DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2)); |
51 | DEFINE(PT_D3, offsetof(struct pt_regs, d3)); | 56 | DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3)); |
52 | DEFINE(PT_D4, offsetof(struct pt_regs, d4)); | 57 | DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4)); |
53 | DEFINE(PT_D5, offsetof(struct pt_regs, d5)); | 58 | DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5)); |
54 | DEFINE(PT_A0, offsetof(struct pt_regs, a0)); | 59 | DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0)); |
55 | DEFINE(PT_A1, offsetof(struct pt_regs, a1)); | 60 | DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1)); |
56 | DEFINE(PT_A2, offsetof(struct pt_regs, a2)); | 61 | DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2)); |
57 | DEFINE(PT_PC, offsetof(struct pt_regs, pc)); | 62 | DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc)); |
58 | DEFINE(PT_SR, offsetof(struct pt_regs, sr)); | 63 | DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr)); |
59 | /* bitfields are a bit difficult */ | 64 | /* bitfields are a bit difficult */ |
60 | DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4); | 65 | DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4); |
61 | 66 | ||
62 | /* offsets into the irq_handler struct */ | 67 | /* offsets into the irq_handler struct */ |
63 | DEFINE(IRQ_HANDLER, offsetof(struct irq_node, handler)); | 68 | DEFINE(IRQ_HANDLER, offsetof(struct irq_node, handler)); |
@@ -84,10 +89,10 @@ int main(void) | |||
84 | DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref)); | 89 | DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref)); |
85 | 90 | ||
86 | /* signal defines */ | 91 | /* signal defines */ |
87 | DEFINE(SIGSEGV, SIGSEGV); | 92 | DEFINE(LSIGSEGV, SIGSEGV); |
88 | DEFINE(SEGV_MAPERR, SEGV_MAPERR); | 93 | DEFINE(LSEGV_MAPERR, SEGV_MAPERR); |
89 | DEFINE(SIGTRAP, SIGTRAP); | 94 | DEFINE(LSIGTRAP, SIGTRAP); |
90 | DEFINE(TRAP_TRACE, TRAP_TRACE); | 95 | DEFINE(LTRAP_TRACE, TRAP_TRACE); |
91 | 96 | ||
92 | /* offsets into the custom struct */ | 97 | /* offsets into the custom struct */ |
93 | DEFINE(CUSTOMBASE, &amiga_custom); | 98 | DEFINE(CUSTOMBASE, &amiga_custom); |
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S index 8744f60c07a9..922f52e7ed1a 100644 --- a/arch/m68k/kernel/entry.S +++ b/arch/m68k/kernel/entry.S | |||
@@ -77,17 +77,17 @@ ENTRY(ret_from_fork) | |||
77 | jra .Lret_from_exception | 77 | jra .Lret_from_exception |
78 | 78 | ||
79 | do_trace_entry: | 79 | do_trace_entry: |
80 | movel #-ENOSYS,%sp@(PT_D0) | needed for strace | 80 | movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace |
81 | subql #4,%sp | 81 | subql #4,%sp |
82 | SAVE_SWITCH_STACK | 82 | SAVE_SWITCH_STACK |
83 | jbsr syscall_trace | 83 | jbsr syscall_trace |
84 | RESTORE_SWITCH_STACK | 84 | RESTORE_SWITCH_STACK |
85 | addql #4,%sp | 85 | addql #4,%sp |
86 | movel %sp@(PT_ORIG_D0),%d0 | 86 | movel %sp@(PT_OFF_ORIG_D0),%d0 |
87 | cmpl #NR_syscalls,%d0 | 87 | cmpl #NR_syscalls,%d0 |
88 | jcs syscall | 88 | jcs syscall |
89 | badsys: | 89 | badsys: |
90 | movel #-ENOSYS,%sp@(PT_D0) | 90 | movel #-ENOSYS,%sp@(PT_OFF_D0) |
91 | jra ret_from_syscall | 91 | jra ret_from_syscall |
92 | 92 | ||
93 | do_trace_exit: | 93 | do_trace_exit: |
@@ -103,7 +103,7 @@ ENTRY(ret_from_signal) | |||
103 | addql #4,%sp | 103 | addql #4,%sp |
104 | /* on 68040 complete pending writebacks if any */ | 104 | /* on 68040 complete pending writebacks if any */ |
105 | #ifdef CONFIG_M68040 | 105 | #ifdef CONFIG_M68040 |
106 | bfextu %sp@(PT_VECTOR){#0,#4},%d0 | 106 | bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0 |
107 | subql #7,%d0 | bus error frame ? | 107 | subql #7,%d0 | bus error frame ? |
108 | jbne 1f | 108 | jbne 1f |
109 | movel %sp,%sp@- | 109 | movel %sp,%sp@- |
@@ -127,7 +127,7 @@ ENTRY(system_call) | |||
127 | jcc badsys | 127 | jcc badsys |
128 | syscall: | 128 | syscall: |
129 | jbsr @(sys_call_table,%d0:l:4)@(0) | 129 | jbsr @(sys_call_table,%d0:l:4)@(0) |
130 | movel %d0,%sp@(PT_D0) | save the return value | 130 | movel %d0,%sp@(PT_OFF_D0) | save the return value |
131 | ret_from_syscall: | 131 | ret_from_syscall: |
132 | |oriw #0x0700,%sr | 132 | |oriw #0x0700,%sr |
133 | movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0 | 133 | movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0 |
@@ -135,7 +135,7 @@ ret_from_syscall: | |||
135 | 1: RESTORE_ALL | 135 | 1: RESTORE_ALL |
136 | 136 | ||
137 | syscall_exit_work: | 137 | syscall_exit_work: |
138 | btst #5,%sp@(PT_SR) | check if returning to kernel | 138 | btst #5,%sp@(PT_OFF_SR) | check if returning to kernel |
139 | bnes 1b | if so, skip resched, signals | 139 | bnes 1b | if so, skip resched, signals |
140 | lslw #1,%d0 | 140 | lslw #1,%d0 |
141 | jcs do_trace_exit | 141 | jcs do_trace_exit |
@@ -148,7 +148,7 @@ syscall_exit_work: | |||
148 | 148 | ||
149 | ENTRY(ret_from_exception) | 149 | ENTRY(ret_from_exception) |
150 | .Lret_from_exception: | 150 | .Lret_from_exception: |
151 | btst #5,%sp@(PT_SR) | check if returning to kernel | 151 | btst #5,%sp@(PT_OFF_SR) | check if returning to kernel |
152 | bnes 1f | if so, skip resched, signals | 152 | bnes 1f | if so, skip resched, signals |
153 | | only allow interrupts when we are really the last one on the | 153 | | only allow interrupts when we are really the last one on the |
154 | | kernel stack, otherwise stack overflow can occur during | 154 | | kernel stack, otherwise stack overflow can occur during |
@@ -182,7 +182,7 @@ do_signal_return: | |||
182 | jbra resume_userspace | 182 | jbra resume_userspace |
183 | 183 | ||
184 | do_delayed_trace: | 184 | do_delayed_trace: |
185 | bclr #7,%sp@(PT_SR) | clear trace bit in SR | 185 | bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR |
186 | pea 1 | send SIGTRAP | 186 | pea 1 | send SIGTRAP |
187 | movel %curptr,%sp@- | 187 | movel %curptr,%sp@- |
188 | pea LSIGTRAP | 188 | pea LSIGTRAP |
@@ -199,7 +199,7 @@ ENTRY(auto_inthandler) | |||
199 | GET_CURRENT(%d0) | 199 | GET_CURRENT(%d0) |
200 | addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) | 200 | addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) |
201 | | put exception # in d0 | 201 | | put exception # in d0 |
202 | bfextu %sp@(PT_VECTOR){#4,#10},%d0 | 202 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 |
203 | subw #VEC_SPUR,%d0 | 203 | subw #VEC_SPUR,%d0 |
204 | 204 | ||
205 | movel %sp,%sp@- | 205 | movel %sp,%sp@- |
@@ -216,7 +216,7 @@ ret_from_interrupt: | |||
216 | ALIGN | 216 | ALIGN |
217 | ret_from_last_interrupt: | 217 | ret_from_last_interrupt: |
218 | moveq #(~ALLOWINT>>8)&0xff,%d0 | 218 | moveq #(~ALLOWINT>>8)&0xff,%d0 |
219 | andb %sp@(PT_SR),%d0 | 219 | andb %sp@(PT_OFF_SR),%d0 |
220 | jne 2b | 220 | jne 2b |
221 | 221 | ||
222 | /* check if we need to do software interrupts */ | 222 | /* check if we need to do software interrupts */ |
@@ -232,7 +232,7 @@ ENTRY(user_inthandler) | |||
232 | GET_CURRENT(%d0) | 232 | GET_CURRENT(%d0) |
233 | addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) | 233 | addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) |
234 | | put exception # in d0 | 234 | | put exception # in d0 |
235 | bfextu %sp@(PT_VECTOR){#4,#10},%d0 | 235 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 |
236 | user_irqvec_fixup = . + 2 | 236 | user_irqvec_fixup = . + 2 |
237 | subw #VEC_USER,%d0 | 237 | subw #VEC_USER,%d0 |
238 | 238 | ||
@@ -755,4 +755,6 @@ sys_call_table: | |||
755 | .long sys_inotify_init1 | 755 | .long sys_inotify_init1 |
756 | .long sys_preadv | 756 | .long sys_preadv |
757 | .long sys_pwritev /* 330 */ | 757 | .long sys_pwritev /* 330 */ |
758 | .long sys_rt_tgsigqueueinfo | ||
759 | .long sys_perf_counter_open | ||
758 | 760 | ||
diff --git a/arch/m68k/math-emu/fp_entry.S b/arch/m68k/math-emu/fp_entry.S index 954b4f304a7d..a3fe1f348dfe 100644 --- a/arch/m68k/math-emu/fp_entry.S +++ b/arch/m68k/math-emu/fp_entry.S | |||
@@ -85,8 +85,8 @@ fp_err_ua2: | |||
85 | fp_err_ua1: | 85 | fp_err_ua1: |
86 | addq.l #4,%sp | 86 | addq.l #4,%sp |
87 | move.l %a0,-(%sp) | 87 | move.l %a0,-(%sp) |
88 | pea SEGV_MAPERR | 88 | pea LSEGV_MAPERR |
89 | pea SIGSEGV | 89 | pea LSIGSEGV |
90 | jsr fpemu_signal | 90 | jsr fpemu_signal |
91 | add.w #12,%sp | 91 | add.w #12,%sp |
92 | jra ret_from_exception | 92 | jra ret_from_exception |
@@ -96,8 +96,8 @@ fp_err_ua1: | |||
96 | | it does not really belong here, but... | 96 | | it does not really belong here, but... |
97 | fp_sendtrace060: | 97 | fp_sendtrace060: |
98 | move.l (FPS_PC,%sp),-(%sp) | 98 | move.l (FPS_PC,%sp),-(%sp) |
99 | pea TRAP_TRACE | 99 | pea LTRAP_TRACE |
100 | pea SIGTRAP | 100 | pea LSIGTRAP |
101 | jsr fpemu_signal | 101 | jsr fpemu_signal |
102 | add.w #12,%sp | 102 | add.w #12,%sp |
103 | jra ret_from_exception | 103 | jra ret_from_exception |
@@ -122,17 +122,17 @@ fp_get_data_reg: | |||
122 | .long fp_get_d6, fp_get_d7 | 122 | .long fp_get_d6, fp_get_d7 |
123 | 123 | ||
124 | fp_get_d0: | 124 | fp_get_d0: |
125 | move.l (PT_D0+8,%sp),%d0 | 125 | move.l (PT_OFF_D0+8,%sp),%d0 |
126 | printf PREGISTER,"{d0->%08x}",1,%d0 | 126 | printf PREGISTER,"{d0->%08x}",1,%d0 |
127 | rts | 127 | rts |
128 | 128 | ||
129 | fp_get_d1: | 129 | fp_get_d1: |
130 | move.l (PT_D1+8,%sp),%d0 | 130 | move.l (PT_OFF_D1+8,%sp),%d0 |
131 | printf PREGISTER,"{d1->%08x}",1,%d0 | 131 | printf PREGISTER,"{d1->%08x}",1,%d0 |
132 | rts | 132 | rts |
133 | 133 | ||
134 | fp_get_d2: | 134 | fp_get_d2: |
135 | move.l (PT_D2+8,%sp),%d0 | 135 | move.l (PT_OFF_D2+8,%sp),%d0 |
136 | printf PREGISTER,"{d2->%08x}",1,%d0 | 136 | printf PREGISTER,"{d2->%08x}",1,%d0 |
137 | rts | 137 | rts |
138 | 138 | ||
@@ -173,35 +173,35 @@ fp_put_data_reg: | |||
173 | 173 | ||
174 | fp_put_d0: | 174 | fp_put_d0: |
175 | printf PREGISTER,"{d0<-%08x}",1,%d0 | 175 | printf PREGISTER,"{d0<-%08x}",1,%d0 |
176 | move.l %d0,(PT_D0+8,%sp) | 176 | move.l %d0,(PT_OFF_D0+8,%sp) |
177 | rts | 177 | rts |
178 | 178 | ||
179 | fp_put_d1: | 179 | fp_put_d1: |
180 | printf PREGISTER,"{d1<-%08x}",1,%d0 | 180 | printf PREGISTER,"{d1<-%08x}",1,%d0 |
181 | move.l %d0,(PT_D1+8,%sp) | 181 | move.l %d0,(PT_OFF_D1+8,%sp) |
182 | rts | 182 | rts |
183 | 183 | ||
184 | fp_put_d2: | 184 | fp_put_d2: |
185 | printf PREGISTER,"{d2<-%08x}",1,%d0 | 185 | printf PREGISTER,"{d2<-%08x}",1,%d0 |
186 | move.l %d0,(PT_D2+8,%sp) | 186 | move.l %d0,(PT_OFF_D2+8,%sp) |
187 | rts | 187 | rts |
188 | 188 | ||
189 | fp_put_d3: | 189 | fp_put_d3: |
190 | printf PREGISTER,"{d3<-%08x}",1,%d0 | 190 | printf PREGISTER,"{d3<-%08x}",1,%d0 |
191 | | move.l %d0,%d3 | 191 | | move.l %d0,%d3 |
192 | move.l %d0,(PT_D3+8,%sp) | 192 | move.l %d0,(PT_OFF_D3+8,%sp) |
193 | rts | 193 | rts |
194 | 194 | ||
195 | fp_put_d4: | 195 | fp_put_d4: |
196 | printf PREGISTER,"{d4<-%08x}",1,%d0 | 196 | printf PREGISTER,"{d4<-%08x}",1,%d0 |
197 | | move.l %d0,%d4 | 197 | | move.l %d0,%d4 |
198 | move.l %d0,(PT_D4+8,%sp) | 198 | move.l %d0,(PT_OFF_D4+8,%sp) |
199 | rts | 199 | rts |
200 | 200 | ||
201 | fp_put_d5: | 201 | fp_put_d5: |
202 | printf PREGISTER,"{d5<-%08x}",1,%d0 | 202 | printf PREGISTER,"{d5<-%08x}",1,%d0 |
203 | | move.l %d0,%d5 | 203 | | move.l %d0,%d5 |
204 | move.l %d0,(PT_D5+8,%sp) | 204 | move.l %d0,(PT_OFF_D5+8,%sp) |
205 | rts | 205 | rts |
206 | 206 | ||
207 | fp_put_d6: | 207 | fp_put_d6: |
@@ -225,17 +225,17 @@ fp_get_addr_reg: | |||
225 | .long fp_get_a6, fp_get_a7 | 225 | .long fp_get_a6, fp_get_a7 |
226 | 226 | ||
227 | fp_get_a0: | 227 | fp_get_a0: |
228 | move.l (PT_A0+8,%sp),%a0 | 228 | move.l (PT_OFF_A0+8,%sp),%a0 |
229 | printf PREGISTER,"{a0->%08x}",1,%a0 | 229 | printf PREGISTER,"{a0->%08x}",1,%a0 |
230 | rts | 230 | rts |
231 | 231 | ||
232 | fp_get_a1: | 232 | fp_get_a1: |
233 | move.l (PT_A1+8,%sp),%a0 | 233 | move.l (PT_OFF_A1+8,%sp),%a0 |
234 | printf PREGISTER,"{a1->%08x}",1,%a0 | 234 | printf PREGISTER,"{a1->%08x}",1,%a0 |
235 | rts | 235 | rts |
236 | 236 | ||
237 | fp_get_a2: | 237 | fp_get_a2: |
238 | move.l (PT_A2+8,%sp),%a0 | 238 | move.l (PT_OFF_A2+8,%sp),%a0 |
239 | printf PREGISTER,"{a2->%08x}",1,%a0 | 239 | printf PREGISTER,"{a2->%08x}",1,%a0 |
240 | rts | 240 | rts |
241 | 241 | ||
@@ -276,17 +276,17 @@ fp_put_addr_reg: | |||
276 | 276 | ||
277 | fp_put_a0: | 277 | fp_put_a0: |
278 | printf PREGISTER,"{a0<-%08x}",1,%a0 | 278 | printf PREGISTER,"{a0<-%08x}",1,%a0 |
279 | move.l %a0,(PT_A0+8,%sp) | 279 | move.l %a0,(PT_OFF_A0+8,%sp) |
280 | rts | 280 | rts |
281 | 281 | ||
282 | fp_put_a1: | 282 | fp_put_a1: |
283 | printf PREGISTER,"{a1<-%08x}",1,%a0 | 283 | printf PREGISTER,"{a1<-%08x}",1,%a0 |
284 | move.l %a0,(PT_A1+8,%sp) | 284 | move.l %a0,(PT_OFF_A1+8,%sp) |
285 | rts | 285 | rts |
286 | 286 | ||
287 | fp_put_a2: | 287 | fp_put_a2: |
288 | printf PREGISTER,"{a2<-%08x}",1,%a0 | 288 | printf PREGISTER,"{a2<-%08x}",1,%a0 |
289 | move.l %a0,(PT_A2+8,%sp) | 289 | move.l %a0,(PT_OFF_A2+8,%sp) |
290 | rts | 290 | rts |
291 | 291 | ||
292 | fp_put_a3: | 292 | fp_put_a3: |
diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S index c0b8782832fd..0ae123e08985 100644 --- a/arch/m68knommu/kernel/syscalltable.S +++ b/arch/m68knommu/kernel/syscalltable.S | |||
@@ -349,6 +349,8 @@ ENTRY(sys_call_table) | |||
349 | .long sys_inotify_init1 | 349 | .long sys_inotify_init1 |
350 | .long sys_preadv | 350 | .long sys_preadv |
351 | .long sys_pwritev /* 330 */ | 351 | .long sys_pwritev /* 330 */ |
352 | .long sys_rt_tgsigqueueinfo | ||
353 | .long sys_perf_counter_open | ||
352 | 354 | ||
353 | .rept NR_syscalls-(.-sys_call_table)/4 | 355 | .rept NR_syscalls-(.-sys_call_table)/4 |
354 | .long sys_ni_syscall | 356 | .long sys_ni_syscall |
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index f9df720d2e40..01cc1630b66c 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h | |||
@@ -115,6 +115,7 @@ register struct thread_info *__current_thread_info __asm__("$28"); | |||
115 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ | 115 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ |
116 | #define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */ | 116 | #define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */ |
117 | #define TIF_SECCOMP 4 /* secure computing */ | 117 | #define TIF_SECCOMP 4 /* secure computing */ |
118 | #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */ | ||
118 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ | 119 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ |
119 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ | 120 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ |
120 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 121 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
@@ -139,6 +140,7 @@ register struct thread_info *__current_thread_info __asm__("$28"); | |||
139 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 140 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
140 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | 141 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) |
141 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | 142 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) |
143 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | ||
142 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 144 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
143 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) | 145 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) |
144 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 146 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 830c5ef9932b..6254041b942f 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/compiler.h> | 21 | #include <linux/compiler.h> |
22 | #include <linux/syscalls.h> | 22 | #include <linux/syscalls.h> |
23 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
24 | #include <linux/tracehook.h> | ||
24 | 25 | ||
25 | #include <asm/abi.h> | 26 | #include <asm/abi.h> |
26 | #include <asm/asm.h> | 27 | #include <asm/asm.h> |
@@ -700,4 +701,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, | |||
700 | /* deal with pending signal delivery */ | 701 | /* deal with pending signal delivery */ |
701 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) | 702 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) |
702 | do_signal(regs); | 703 | do_signal(regs); |
704 | |||
705 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
706 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
707 | tracehook_notify_resume(regs); | ||
708 | if (current->replacement_session_keyring) | ||
709 | key_replace_session_keyring(); | ||
710 | } | ||
703 | } | 711 | } |
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c index feb2f2e810db..a21f43bc68e2 100644 --- a/arch/mn10300/kernel/signal.c +++ b/arch/mn10300/kernel/signal.c | |||
@@ -568,5 +568,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags) | |||
568 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 568 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
569 | clear_thread_flag(TIF_NOTIFY_RESUME); | 569 | clear_thread_flag(TIF_NOTIFY_RESUME); |
570 | tracehook_notify_resume(__frame); | 570 | tracehook_notify_resume(__frame); |
571 | if (current->replacement_session_keyring) | ||
572 | key_replace_session_keyring(); | ||
571 | } | 573 | } |
572 | } | 574 | } |
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index 4ce0edfbe969..ac775a76bff7 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h | |||
@@ -59,6 +59,7 @@ struct thread_info { | |||
59 | #define TIF_MEMDIE 5 | 59 | #define TIF_MEMDIE 5 |
60 | #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ | 60 | #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ |
61 | #define TIF_FREEZE 7 /* is freezing for suspend */ | 61 | #define TIF_FREEZE 7 /* is freezing for suspend */ |
62 | #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ | ||
62 | 63 | ||
63 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 64 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
64 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 65 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
@@ -67,8 +68,9 @@ struct thread_info { | |||
67 | #define _TIF_32BIT (1 << TIF_32BIT) | 68 | #define _TIF_32BIT (1 << TIF_32BIT) |
68 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | 69 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) |
69 | #define _TIF_FREEZE (1 << TIF_FREEZE) | 70 | #define _TIF_FREEZE (1 << TIF_FREEZE) |
71 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | ||
70 | 72 | ||
71 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | \ | 73 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ |
72 | _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) | 74 | _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) |
73 | 75 | ||
74 | #endif /* __KERNEL__ */ | 76 | #endif /* __KERNEL__ */ |
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index e552e547cb93..8c4712b74dc1 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S | |||
@@ -948,7 +948,7 @@ intr_check_sig: | |||
948 | /* As above */ | 948 | /* As above */ |
949 | mfctl %cr30,%r1 | 949 | mfctl %cr30,%r1 |
950 | LDREG TI_FLAGS(%r1),%r19 | 950 | LDREG TI_FLAGS(%r1),%r19 |
951 | ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r20 | 951 | ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r20 |
952 | and,COND(<>) %r19, %r20, %r0 | 952 | and,COND(<>) %r19, %r20, %r0 |
953 | b,n intr_restore /* skip past if we've nothing to do */ | 953 | b,n intr_restore /* skip past if we've nothing to do */ |
954 | 954 | ||
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index f82544225e8e..8eb3c63c407a 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/stddef.h> | 25 | #include <linux/stddef.h> |
26 | #include <linux/compat.h> | 26 | #include <linux/compat.h> |
27 | #include <linux/elf.h> | 27 | #include <linux/elf.h> |
28 | #include <linux/tracehook.h> | ||
28 | #include <asm/ucontext.h> | 29 | #include <asm/ucontext.h> |
29 | #include <asm/rt_sigframe.h> | 30 | #include <asm/rt_sigframe.h> |
30 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
@@ -645,4 +646,11 @@ void do_notify_resume(struct pt_regs *regs, long in_syscall) | |||
645 | if (test_thread_flag(TIF_SIGPENDING) || | 646 | if (test_thread_flag(TIF_SIGPENDING) || |
646 | test_thread_flag(TIF_RESTORE_SIGMASK)) | 647 | test_thread_flag(TIF_RESTORE_SIGMASK)) |
647 | do_signal(regs, in_syscall); | 648 | do_signal(regs, in_syscall); |
649 | |||
650 | if (test_thread_flag(TIF_NOTIFY_RESUME)) { | ||
651 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
652 | tracehook_notify_resume(regs); | ||
653 | if (current->replacement_session_keyring) | ||
654 | key_replace_session_keyring(); | ||
655 | } | ||
648 | } | 656 | } |
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 528f0ff9b273..8b58bf0b7d5a 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c | |||
@@ -532,7 +532,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) | |||
532 | /* Kill the user process later */ | 532 | /* Kill the user process later */ |
533 | regs->iaoq[0] = 0 | 3; | 533 | regs->iaoq[0] = 0 | 3; |
534 | regs->iaoq[1] = regs->iaoq[0] + 4; | 534 | regs->iaoq[1] = regs->iaoq[0] + 4; |
535 | regs->iasq[0] = regs->iasq[0] = regs->sr[7]; | 535 | regs->iasq[0] = regs->iasq[1] = regs->sr[7]; |
536 | regs->gr[0] &= ~PSW_B; | 536 | regs->gr[0] &= ~PSW_B; |
537 | return; | 537 | return; |
538 | } | 538 | } |
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig index e28e65e7a0e1..7de127e4ceef 100644 --- a/arch/powerpc/configs/ps3_defconfig +++ b/arch/powerpc/configs/ps3_defconfig | |||
@@ -1,13 +1,14 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.30-rc5 | 3 | # Linux kernel version: 2.6.31-rc7 |
4 | # Fri May 15 10:37:00 2009 | 4 | # Mon Aug 24 17:38:50 2009 |
5 | # | 5 | # |
6 | CONFIG_PPC64=y | 6 | CONFIG_PPC64=y |
7 | 7 | ||
8 | # | 8 | # |
9 | # Processor support | 9 | # Processor support |
10 | # | 10 | # |
11 | CONFIG_PPC_BOOK3S_64=y | ||
11 | CONFIG_PPC_BOOK3S=y | 12 | CONFIG_PPC_BOOK3S=y |
12 | # CONFIG_POWER4_ONLY is not set | 13 | # CONFIG_POWER4_ONLY is not set |
13 | CONFIG_POWER3=y | 14 | CONFIG_POWER3=y |
@@ -20,6 +21,7 @@ CONFIG_PPC_STD_MMU=y | |||
20 | CONFIG_PPC_STD_MMU_64=y | 21 | CONFIG_PPC_STD_MMU_64=y |
21 | CONFIG_PPC_MM_SLICES=y | 22 | CONFIG_PPC_MM_SLICES=y |
22 | CONFIG_VIRT_CPU_ACCOUNTING=y | 23 | CONFIG_VIRT_CPU_ACCOUNTING=y |
24 | CONFIG_PPC_HAVE_PMU_SUPPORT=y | ||
23 | CONFIG_SMP=y | 25 | CONFIG_SMP=y |
24 | CONFIG_NR_CPUS=2 | 26 | CONFIG_NR_CPUS=2 |
25 | CONFIG_64BIT=y | 27 | CONFIG_64BIT=y |
@@ -31,6 +33,7 @@ CONFIG_GENERIC_TIME=y | |||
31 | CONFIG_GENERIC_TIME_VSYSCALL=y | 33 | CONFIG_GENERIC_TIME_VSYSCALL=y |
32 | CONFIG_GENERIC_CLOCKEVENTS=y | 34 | CONFIG_GENERIC_CLOCKEVENTS=y |
33 | CONFIG_GENERIC_HARDIRQS=y | 35 | CONFIG_GENERIC_HARDIRQS=y |
36 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y | ||
34 | CONFIG_HAVE_SETUP_PER_CPU_AREA=y | 37 | CONFIG_HAVE_SETUP_PER_CPU_AREA=y |
35 | CONFIG_IRQ_PER_CPU=y | 38 | CONFIG_IRQ_PER_CPU=y |
36 | CONFIG_STACKTRACE_SUPPORT=y | 39 | CONFIG_STACKTRACE_SUPPORT=y |
@@ -41,7 +44,6 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y | |||
41 | CONFIG_ARCH_HAS_ILOG2_U32=y | 44 | CONFIG_ARCH_HAS_ILOG2_U32=y |
42 | CONFIG_ARCH_HAS_ILOG2_U64=y | 45 | CONFIG_ARCH_HAS_ILOG2_U64=y |
43 | CONFIG_GENERIC_HWEIGHT=y | 46 | CONFIG_GENERIC_HWEIGHT=y |
44 | CONFIG_GENERIC_CALIBRATE_DELAY=y | ||
45 | CONFIG_GENERIC_FIND_NEXT_BIT=y | 47 | CONFIG_GENERIC_FIND_NEXT_BIT=y |
46 | CONFIG_ARCH_NO_VIRT_TO_BUS=y | 48 | CONFIG_ARCH_NO_VIRT_TO_BUS=y |
47 | CONFIG_PPC=y | 49 | CONFIG_PPC=y |
@@ -62,6 +64,7 @@ CONFIG_DTC=y | |||
62 | # CONFIG_PPC_DCR_MMIO is not set | 64 | # CONFIG_PPC_DCR_MMIO is not set |
63 | CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y | 65 | CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y |
64 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | 66 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" |
67 | CONFIG_CONSTRUCTORS=y | ||
65 | 68 | ||
66 | # | 69 | # |
67 | # General setup | 70 | # General setup |
@@ -113,7 +116,6 @@ CONFIG_SYSCTL_SYSCALL=y | |||
113 | CONFIG_KALLSYMS=y | 116 | CONFIG_KALLSYMS=y |
114 | CONFIG_KALLSYMS_ALL=y | 117 | CONFIG_KALLSYMS_ALL=y |
115 | CONFIG_KALLSYMS_EXTRA_PASS=y | 118 | CONFIG_KALLSYMS_EXTRA_PASS=y |
116 | # CONFIG_STRIP_ASM_SYMS is not set | ||
117 | CONFIG_HOTPLUG=y | 119 | CONFIG_HOTPLUG=y |
118 | CONFIG_PRINTK=y | 120 | CONFIG_PRINTK=y |
119 | CONFIG_BUG=y | 121 | CONFIG_BUG=y |
@@ -126,7 +128,14 @@ CONFIG_TIMERFD=y | |||
126 | CONFIG_EVENTFD=y | 128 | CONFIG_EVENTFD=y |
127 | CONFIG_SHMEM=y | 129 | CONFIG_SHMEM=y |
128 | CONFIG_AIO=y | 130 | CONFIG_AIO=y |
131 | CONFIG_HAVE_PERF_COUNTERS=y | ||
132 | |||
133 | # | ||
134 | # Performance Counters | ||
135 | # | ||
136 | # CONFIG_PERF_COUNTERS is not set | ||
129 | CONFIG_VM_EVENT_COUNTERS=y | 137 | CONFIG_VM_EVENT_COUNTERS=y |
138 | # CONFIG_STRIP_ASM_SYMS is not set | ||
130 | # CONFIG_COMPAT_BRK is not set | 139 | # CONFIG_COMPAT_BRK is not set |
131 | CONFIG_SLAB=y | 140 | CONFIG_SLAB=y |
132 | # CONFIG_SLUB is not set | 141 | # CONFIG_SLUB is not set |
@@ -145,6 +154,11 @@ CONFIG_HAVE_KRETPROBES=y | |||
145 | CONFIG_HAVE_ARCH_TRACEHOOK=y | 154 | CONFIG_HAVE_ARCH_TRACEHOOK=y |
146 | CONFIG_HAVE_DMA_ATTRS=y | 155 | CONFIG_HAVE_DMA_ATTRS=y |
147 | CONFIG_USE_GENERIC_SMP_HELPERS=y | 156 | CONFIG_USE_GENERIC_SMP_HELPERS=y |
157 | |||
158 | # | ||
159 | # GCOV-based kernel profiling | ||
160 | # | ||
161 | # CONFIG_GCOV_KERNEL is not set | ||
148 | # CONFIG_SLOW_WORK is not set | 162 | # CONFIG_SLOW_WORK is not set |
149 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set | 163 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set |
150 | CONFIG_SLABINFO=y | 164 | CONFIG_SLABINFO=y |
@@ -210,7 +224,7 @@ CONFIG_PPC_CELL=y | |||
210 | # | 224 | # |
211 | # Cell Broadband Engine options | 225 | # Cell Broadband Engine options |
212 | # | 226 | # |
213 | CONFIG_SPU_FS=y | 227 | CONFIG_SPU_FS=m |
214 | CONFIG_SPU_FS_64K_LS=y | 228 | CONFIG_SPU_FS_64K_LS=y |
215 | # CONFIG_SPU_TRACE is not set | 229 | # CONFIG_SPU_TRACE is not set |
216 | CONFIG_SPU_BASE=y | 230 | CONFIG_SPU_BASE=y |
@@ -255,6 +269,7 @@ CONFIG_BINFMT_MISC=y | |||
255 | CONFIG_HUGETLB_PAGE_SIZE_VARIABLE=y | 269 | CONFIG_HUGETLB_PAGE_SIZE_VARIABLE=y |
256 | # CONFIG_IOMMU_VMERGE is not set | 270 | # CONFIG_IOMMU_VMERGE is not set |
257 | CONFIG_IOMMU_HELPER=y | 271 | CONFIG_IOMMU_HELPER=y |
272 | # CONFIG_SWIOTLB is not set | ||
258 | CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y | 273 | CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y |
259 | CONFIG_ARCH_HAS_WALK_MEMORY=y | 274 | CONFIG_ARCH_HAS_WALK_MEMORY=y |
260 | CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y | 275 | CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y |
@@ -285,9 +300,9 @@ CONFIG_MIGRATION=y | |||
285 | CONFIG_PHYS_ADDR_T_64BIT=y | 300 | CONFIG_PHYS_ADDR_T_64BIT=y |
286 | CONFIG_ZONE_DMA_FLAG=1 | 301 | CONFIG_ZONE_DMA_FLAG=1 |
287 | CONFIG_BOUNCE=y | 302 | CONFIG_BOUNCE=y |
288 | CONFIG_UNEVICTABLE_LRU=y | ||
289 | CONFIG_HAVE_MLOCK=y | 303 | CONFIG_HAVE_MLOCK=y |
290 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y | 304 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y |
305 | CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 | ||
291 | CONFIG_ARCH_MEMORY_PROBE=y | 306 | CONFIG_ARCH_MEMORY_PROBE=y |
292 | CONFIG_PPC_HAS_HASH_64K=y | 307 | CONFIG_PPC_HAS_HASH_64K=y |
293 | CONFIG_PPC_4K_PAGES=y | 308 | CONFIG_PPC_4K_PAGES=y |
@@ -399,6 +414,7 @@ CONFIG_IPV6_NDISC_NODETYPE=y | |||
399 | # CONFIG_ECONET is not set | 414 | # CONFIG_ECONET is not set |
400 | # CONFIG_WAN_ROUTER is not set | 415 | # CONFIG_WAN_ROUTER is not set |
401 | # CONFIG_PHONET is not set | 416 | # CONFIG_PHONET is not set |
417 | # CONFIG_IEEE802154 is not set | ||
402 | # CONFIG_NET_SCHED is not set | 418 | # CONFIG_NET_SCHED is not set |
403 | # CONFIG_DCB is not set | 419 | # CONFIG_DCB is not set |
404 | 420 | ||
@@ -433,11 +449,14 @@ CONFIG_BT_HCIBTUSB=m | |||
433 | CONFIG_WIRELESS=y | 449 | CONFIG_WIRELESS=y |
434 | CONFIG_CFG80211=m | 450 | CONFIG_CFG80211=m |
435 | # CONFIG_CFG80211_REG_DEBUG is not set | 451 | # CONFIG_CFG80211_REG_DEBUG is not set |
452 | # CONFIG_CFG80211_DEBUGFS is not set | ||
436 | # CONFIG_WIRELESS_OLD_REGULATORY is not set | 453 | # CONFIG_WIRELESS_OLD_REGULATORY is not set |
437 | CONFIG_WIRELESS_EXT=y | 454 | CONFIG_WIRELESS_EXT=y |
438 | # CONFIG_WIRELESS_EXT_SYSFS is not set | 455 | # CONFIG_WIRELESS_EXT_SYSFS is not set |
439 | # CONFIG_LIB80211 is not set | 456 | # CONFIG_LIB80211 is not set |
440 | CONFIG_MAC80211=m | 457 | CONFIG_MAC80211=m |
458 | CONFIG_MAC80211_DEFAULT_PS=y | ||
459 | CONFIG_MAC80211_DEFAULT_PS_VALUE=1 | ||
441 | 460 | ||
442 | # | 461 | # |
443 | # Rate control algorithm selection | 462 | # Rate control algorithm selection |
@@ -447,7 +466,6 @@ CONFIG_MAC80211_RC_PID=y | |||
447 | CONFIG_MAC80211_RC_DEFAULT_PID=y | 466 | CONFIG_MAC80211_RC_DEFAULT_PID=y |
448 | # CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set | 467 | # CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set |
449 | CONFIG_MAC80211_RC_DEFAULT="pid" | 468 | CONFIG_MAC80211_RC_DEFAULT="pid" |
450 | # CONFIG_MAC80211_MESH is not set | ||
451 | # CONFIG_MAC80211_LEDS is not set | 469 | # CONFIG_MAC80211_LEDS is not set |
452 | # CONFIG_MAC80211_DEBUGFS is not set | 470 | # CONFIG_MAC80211_DEBUGFS is not set |
453 | # CONFIG_MAC80211_DEBUG_MENU is not set | 471 | # CONFIG_MAC80211_DEBUG_MENU is not set |
@@ -472,77 +490,7 @@ CONFIG_EXTRA_FIRMWARE="" | |||
472 | # CONFIG_DEBUG_DEVRES is not set | 490 | # CONFIG_DEBUG_DEVRES is not set |
473 | # CONFIG_SYS_HYPERVISOR is not set | 491 | # CONFIG_SYS_HYPERVISOR is not set |
474 | # CONFIG_CONNECTOR is not set | 492 | # CONFIG_CONNECTOR is not set |
475 | CONFIG_MTD=y | 493 | # CONFIG_MTD is not set |
476 | CONFIG_MTD_DEBUG=y | ||
477 | CONFIG_MTD_DEBUG_VERBOSE=0 | ||
478 | # CONFIG_MTD_CONCAT is not set | ||
479 | # CONFIG_MTD_PARTITIONS is not set | ||
480 | # CONFIG_MTD_TESTS is not set | ||
481 | |||
482 | # | ||
483 | # User Modules And Translation Layers | ||
484 | # | ||
485 | # CONFIG_MTD_CHAR is not set | ||
486 | CONFIG_MTD_BLKDEVS=y | ||
487 | CONFIG_MTD_BLOCK=y | ||
488 | # CONFIG_FTL is not set | ||
489 | # CONFIG_NFTL is not set | ||
490 | # CONFIG_INFTL is not set | ||
491 | # CONFIG_RFD_FTL is not set | ||
492 | # CONFIG_SSFDC is not set | ||
493 | # CONFIG_MTD_OOPS is not set | ||
494 | |||
495 | # | ||
496 | # RAM/ROM/Flash chip drivers | ||
497 | # | ||
498 | # CONFIG_MTD_CFI is not set | ||
499 | # CONFIG_MTD_JEDECPROBE is not set | ||
500 | CONFIG_MTD_MAP_BANK_WIDTH_1=y | ||
501 | CONFIG_MTD_MAP_BANK_WIDTH_2=y | ||
502 | CONFIG_MTD_MAP_BANK_WIDTH_4=y | ||
503 | # CONFIG_MTD_MAP_BANK_WIDTH_8 is not set | ||
504 | # CONFIG_MTD_MAP_BANK_WIDTH_16 is not set | ||
505 | # CONFIG_MTD_MAP_BANK_WIDTH_32 is not set | ||
506 | CONFIG_MTD_CFI_I1=y | ||
507 | CONFIG_MTD_CFI_I2=y | ||
508 | # CONFIG_MTD_CFI_I4 is not set | ||
509 | # CONFIG_MTD_CFI_I8 is not set | ||
510 | # CONFIG_MTD_RAM is not set | ||
511 | # CONFIG_MTD_ROM is not set | ||
512 | # CONFIG_MTD_ABSENT is not set | ||
513 | |||
514 | # | ||
515 | # Mapping drivers for chip access | ||
516 | # | ||
517 | # CONFIG_MTD_COMPLEX_MAPPINGS is not set | ||
518 | # CONFIG_MTD_PLATRAM is not set | ||
519 | |||
520 | # | ||
521 | # Self-contained MTD device drivers | ||
522 | # | ||
523 | # CONFIG_MTD_SLRAM is not set | ||
524 | # CONFIG_MTD_PHRAM is not set | ||
525 | # CONFIG_MTD_MTDRAM is not set | ||
526 | # CONFIG_MTD_BLOCK2MTD is not set | ||
527 | |||
528 | # | ||
529 | # Disk-On-Chip Device Drivers | ||
530 | # | ||
531 | # CONFIG_MTD_DOC2000 is not set | ||
532 | # CONFIG_MTD_DOC2001 is not set | ||
533 | # CONFIG_MTD_DOC2001PLUS is not set | ||
534 | # CONFIG_MTD_NAND is not set | ||
535 | # CONFIG_MTD_ONENAND is not set | ||
536 | |||
537 | # | ||
538 | # LPDDR flash memory drivers | ||
539 | # | ||
540 | # CONFIG_MTD_LPDDR is not set | ||
541 | |||
542 | # | ||
543 | # UBI - Unsorted block images | ||
544 | # | ||
545 | # CONFIG_MTD_UBI is not set | ||
546 | CONFIG_OF_DEVICE=y | 494 | CONFIG_OF_DEVICE=y |
547 | # CONFIG_PARPORT is not set | 495 | # CONFIG_PARPORT is not set |
548 | CONFIG_BLK_DEV=y | 496 | CONFIG_BLK_DEV=y |
@@ -590,10 +538,6 @@ CONFIG_BLK_DEV_SR=y | |||
590 | # CONFIG_BLK_DEV_SR_VENDOR is not set | 538 | # CONFIG_BLK_DEV_SR_VENDOR is not set |
591 | CONFIG_CHR_DEV_SG=m | 539 | CONFIG_CHR_DEV_SG=m |
592 | # CONFIG_CHR_DEV_SCH is not set | 540 | # CONFIG_CHR_DEV_SCH is not set |
593 | |||
594 | # | ||
595 | # Some SCSI devices (e.g. CD jukebox) support multiple LUNs | ||
596 | # | ||
597 | CONFIG_SCSI_MULTI_LUN=y | 541 | CONFIG_SCSI_MULTI_LUN=y |
598 | # CONFIG_SCSI_CONSTANTS is not set | 542 | # CONFIG_SCSI_CONSTANTS is not set |
599 | # CONFIG_SCSI_LOGGING is not set | 543 | # CONFIG_SCSI_LOGGING is not set |
@@ -626,7 +570,6 @@ CONFIG_BLK_DEV_DM=m | |||
626 | # CONFIG_DM_UEVENT is not set | 570 | # CONFIG_DM_UEVENT is not set |
627 | # CONFIG_MACINTOSH_DRIVERS is not set | 571 | # CONFIG_MACINTOSH_DRIVERS is not set |
628 | CONFIG_NETDEVICES=y | 572 | CONFIG_NETDEVICES=y |
629 | CONFIG_COMPAT_NET_DEV_OPS=y | ||
630 | # CONFIG_DUMMY is not set | 573 | # CONFIG_DUMMY is not set |
631 | # CONFIG_BONDING is not set | 574 | # CONFIG_BONDING is not set |
632 | # CONFIG_MACVLAN is not set | 575 | # CONFIG_MACVLAN is not set |
@@ -646,10 +589,11 @@ CONFIG_MII=m | |||
646 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set | 589 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set |
647 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set | 590 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set |
648 | # CONFIG_B44 is not set | 591 | # CONFIG_B44 is not set |
592 | # CONFIG_KS8842 is not set | ||
649 | CONFIG_NETDEV_1000=y | 593 | CONFIG_NETDEV_1000=y |
650 | CONFIG_GELIC_NET=y | 594 | CONFIG_GELIC_NET=y |
651 | CONFIG_GELIC_WIRELESS=y | 595 | CONFIG_GELIC_WIRELESS=y |
652 | CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE=y | 596 | # CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE is not set |
653 | # CONFIG_NETDEV_10000 is not set | 597 | # CONFIG_NETDEV_10000 is not set |
654 | 598 | ||
655 | # | 599 | # |
@@ -669,8 +613,7 @@ CONFIG_WLAN_80211=y | |||
669 | # CONFIG_HOSTAP is not set | 613 | # CONFIG_HOSTAP is not set |
670 | # CONFIG_B43 is not set | 614 | # CONFIG_B43 is not set |
671 | # CONFIG_B43LEGACY is not set | 615 | # CONFIG_B43LEGACY is not set |
672 | CONFIG_ZD1211RW=m | 616 | # CONFIG_ZD1211RW is not set |
673 | # CONFIG_ZD1211RW_DEBUG is not set | ||
674 | # CONFIG_RT2X00 is not set | 617 | # CONFIG_RT2X00 is not set |
675 | 618 | ||
676 | # | 619 | # |
@@ -682,7 +625,7 @@ CONFIG_ZD1211RW=m | |||
682 | # | 625 | # |
683 | # CONFIG_USB_CATC is not set | 626 | # CONFIG_USB_CATC is not set |
684 | # CONFIG_USB_KAWETH is not set | 627 | # CONFIG_USB_KAWETH is not set |
685 | CONFIG_USB_PEGASUS=m | 628 | # CONFIG_USB_PEGASUS is not set |
686 | # CONFIG_USB_RTL8150 is not set | 629 | # CONFIG_USB_RTL8150 is not set |
687 | CONFIG_USB_USBNET=m | 630 | CONFIG_USB_USBNET=m |
688 | CONFIG_USB_NET_AX8817X=m | 631 | CONFIG_USB_NET_AX8817X=m |
@@ -693,10 +636,11 @@ CONFIG_USB_NET_AX8817X=m | |||
693 | # CONFIG_USB_NET_GL620A is not set | 636 | # CONFIG_USB_NET_GL620A is not set |
694 | # CONFIG_USB_NET_NET1080 is not set | 637 | # CONFIG_USB_NET_NET1080 is not set |
695 | # CONFIG_USB_NET_PLUSB is not set | 638 | # CONFIG_USB_NET_PLUSB is not set |
696 | CONFIG_USB_NET_MCS7830=m | 639 | # CONFIG_USB_NET_MCS7830 is not set |
697 | # CONFIG_USB_NET_RNDIS_HOST is not set | 640 | # CONFIG_USB_NET_RNDIS_HOST is not set |
698 | # CONFIG_USB_NET_CDC_SUBSET is not set | 641 | # CONFIG_USB_NET_CDC_SUBSET is not set |
699 | # CONFIG_USB_NET_ZAURUS is not set | 642 | # CONFIG_USB_NET_ZAURUS is not set |
643 | # CONFIG_USB_NET_INT51X1 is not set | ||
700 | # CONFIG_WAN is not set | 644 | # CONFIG_WAN is not set |
701 | CONFIG_PPP=m | 645 | CONFIG_PPP=m |
702 | CONFIG_PPP_MULTILINK=y | 646 | CONFIG_PPP_MULTILINK=y |
@@ -771,8 +715,7 @@ CONFIG_DEVKMEM=y | |||
771 | # | 715 | # |
772 | CONFIG_UNIX98_PTYS=y | 716 | CONFIG_UNIX98_PTYS=y |
773 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set | 717 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set |
774 | CONFIG_LEGACY_PTYS=y | 718 | # CONFIG_LEGACY_PTYS is not set |
775 | CONFIG_LEGACY_PTY_COUNT=16 | ||
776 | # CONFIG_HVC_UDBG is not set | 719 | # CONFIG_HVC_UDBG is not set |
777 | # CONFIG_IPMI_HANDLER is not set | 720 | # CONFIG_IPMI_HANDLER is not set |
778 | # CONFIG_HW_RANDOM is not set | 721 | # CONFIG_HW_RANDOM is not set |
@@ -782,6 +725,11 @@ CONFIG_LEGACY_PTY_COUNT=16 | |||
782 | # CONFIG_TCG_TPM is not set | 725 | # CONFIG_TCG_TPM is not set |
783 | # CONFIG_I2C is not set | 726 | # CONFIG_I2C is not set |
784 | # CONFIG_SPI is not set | 727 | # CONFIG_SPI is not set |
728 | |||
729 | # | ||
730 | # PPS support | ||
731 | # | ||
732 | # CONFIG_PPS is not set | ||
785 | CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y | 733 | CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y |
786 | # CONFIG_GPIOLIB is not set | 734 | # CONFIG_GPIOLIB is not set |
787 | # CONFIG_W1 is not set | 735 | # CONFIG_W1 is not set |
@@ -805,22 +753,7 @@ CONFIG_SSB_POSSIBLE=y | |||
805 | # CONFIG_HTC_PASIC3 is not set | 753 | # CONFIG_HTC_PASIC3 is not set |
806 | # CONFIG_MFD_TMIO is not set | 754 | # CONFIG_MFD_TMIO is not set |
807 | # CONFIG_REGULATOR is not set | 755 | # CONFIG_REGULATOR is not set |
808 | 756 | # CONFIG_MEDIA_SUPPORT is not set | |
809 | # | ||
810 | # Multimedia devices | ||
811 | # | ||
812 | |||
813 | # | ||
814 | # Multimedia core support | ||
815 | # | ||
816 | # CONFIG_VIDEO_DEV is not set | ||
817 | # CONFIG_DVB_CORE is not set | ||
818 | # CONFIG_VIDEO_MEDIA is not set | ||
819 | |||
820 | # | ||
821 | # Multimedia drivers | ||
822 | # | ||
823 | # CONFIG_DAB is not set | ||
824 | 757 | ||
825 | # | 758 | # |
826 | # Graphics support | 759 | # Graphics support |
@@ -898,6 +831,11 @@ CONFIG_SND_SUPPORT_OLD_API=y | |||
898 | CONFIG_SND_VERBOSE_PROCFS=y | 831 | CONFIG_SND_VERBOSE_PROCFS=y |
899 | # CONFIG_SND_VERBOSE_PRINTK is not set | 832 | # CONFIG_SND_VERBOSE_PRINTK is not set |
900 | # CONFIG_SND_DEBUG is not set | 833 | # CONFIG_SND_DEBUG is not set |
834 | # CONFIG_SND_RAWMIDI_SEQ is not set | ||
835 | # CONFIG_SND_OPL3_LIB_SEQ is not set | ||
836 | # CONFIG_SND_OPL4_LIB_SEQ is not set | ||
837 | # CONFIG_SND_SBAWE_SEQ is not set | ||
838 | # CONFIG_SND_EMU10K1_SEQ is not set | ||
901 | # CONFIG_SND_DRIVERS is not set | 839 | # CONFIG_SND_DRIVERS is not set |
902 | CONFIG_SND_PPC=y | 840 | CONFIG_SND_PPC=y |
903 | CONFIG_SND_PS3=m | 841 | CONFIG_SND_PS3=m |
@@ -930,29 +868,34 @@ CONFIG_USB_HIDDEV=y | |||
930 | # Special HID drivers | 868 | # Special HID drivers |
931 | # | 869 | # |
932 | # CONFIG_HID_A4TECH is not set | 870 | # CONFIG_HID_A4TECH is not set |
933 | # CONFIG_HID_APPLE is not set | 871 | CONFIG_HID_APPLE=m |
934 | # CONFIG_HID_BELKIN is not set | 872 | CONFIG_HID_BELKIN=m |
935 | # CONFIG_HID_CHERRY is not set | 873 | CONFIG_HID_CHERRY=m |
936 | # CONFIG_HID_CHICONY is not set | 874 | # CONFIG_HID_CHICONY is not set |
937 | # CONFIG_HID_CYPRESS is not set | 875 | # CONFIG_HID_CYPRESS is not set |
938 | # CONFIG_DRAGONRISE_FF is not set | 876 | # CONFIG_HID_DRAGONRISE is not set |
939 | # CONFIG_HID_EZKEY is not set | 877 | CONFIG_HID_EZKEY=m |
940 | # CONFIG_HID_KYE is not set | 878 | # CONFIG_HID_KYE is not set |
941 | # CONFIG_HID_GYRATION is not set | 879 | # CONFIG_HID_GYRATION is not set |
942 | # CONFIG_HID_KENSINGTON is not set | 880 | # CONFIG_HID_KENSINGTON is not set |
943 | # CONFIG_HID_LOGITECH is not set | 881 | CONFIG_HID_LOGITECH=m |
944 | # CONFIG_HID_MICROSOFT is not set | 882 | # CONFIG_LOGITECH_FF is not set |
883 | # CONFIG_LOGIRUMBLEPAD2_FF is not set | ||
884 | CONFIG_HID_MICROSOFT=m | ||
945 | # CONFIG_HID_MONTEREY is not set | 885 | # CONFIG_HID_MONTEREY is not set |
946 | # CONFIG_HID_NTRIG is not set | 886 | # CONFIG_HID_NTRIG is not set |
947 | # CONFIG_HID_PANTHERLORD is not set | 887 | # CONFIG_HID_PANTHERLORD is not set |
948 | # CONFIG_HID_PETALYNX is not set | 888 | # CONFIG_HID_PETALYNX is not set |
949 | # CONFIG_HID_SAMSUNG is not set | 889 | # CONFIG_HID_SAMSUNG is not set |
950 | CONFIG_HID_SONY=m | 890 | CONFIG_HID_SONY=m |
951 | # CONFIG_HID_SUNPLUS is not set | 891 | CONFIG_HID_SUNPLUS=m |
952 | # CONFIG_GREENASIA_FF is not set | 892 | # CONFIG_HID_GREENASIA is not set |
893 | CONFIG_HID_SMARTJOYPLUS=m | ||
894 | # CONFIG_SMARTJOYPLUS_FF is not set | ||
953 | # CONFIG_HID_TOPSEED is not set | 895 | # CONFIG_HID_TOPSEED is not set |
954 | # CONFIG_THRUSTMASTER_FF is not set | 896 | # CONFIG_HID_THRUSTMASTER is not set |
955 | # CONFIG_ZEROPLUS_FF is not set | 897 | # CONFIG_HID_WACOM is not set |
898 | # CONFIG_HID_ZEROPLUS is not set | ||
956 | CONFIG_USB_SUPPORT=y | 899 | CONFIG_USB_SUPPORT=y |
957 | CONFIG_USB_ARCH_HAS_HCD=y | 900 | CONFIG_USB_ARCH_HAS_HCD=y |
958 | CONFIG_USB_ARCH_HAS_OHCI=y | 901 | CONFIG_USB_ARCH_HAS_OHCI=y |
@@ -988,6 +931,8 @@ CONFIG_USB_EHCI_BIG_ENDIAN_MMIO=y | |||
988 | # CONFIG_USB_ISP116X_HCD is not set | 931 | # CONFIG_USB_ISP116X_HCD is not set |
989 | # CONFIG_USB_ISP1760_HCD is not set | 932 | # CONFIG_USB_ISP1760_HCD is not set |
990 | CONFIG_USB_OHCI_HCD=m | 933 | CONFIG_USB_OHCI_HCD=m |
934 | # CONFIG_USB_OHCI_HCD_PPC_OF_BE is not set | ||
935 | # CONFIG_USB_OHCI_HCD_PPC_OF_LE is not set | ||
991 | # CONFIG_USB_OHCI_HCD_PPC_OF is not set | 936 | # CONFIG_USB_OHCI_HCD_PPC_OF is not set |
992 | # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set | 937 | # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set |
993 | CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y | 938 | CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y |
@@ -1115,6 +1060,10 @@ CONFIG_RTC_DRV_PS3=m | |||
1115 | # CONFIG_DMADEVICES is not set | 1060 | # CONFIG_DMADEVICES is not set |
1116 | # CONFIG_AUXDISPLAY is not set | 1061 | # CONFIG_AUXDISPLAY is not set |
1117 | # CONFIG_UIO is not set | 1062 | # CONFIG_UIO is not set |
1063 | |||
1064 | # | ||
1065 | # TI VLYNQ | ||
1066 | # | ||
1118 | # CONFIG_STAGING is not set | 1067 | # CONFIG_STAGING is not set |
1119 | 1068 | ||
1120 | # | 1069 | # |
@@ -1141,11 +1090,12 @@ CONFIG_FS_MBCACHE=y | |||
1141 | # CONFIG_REISERFS_FS is not set | 1090 | # CONFIG_REISERFS_FS is not set |
1142 | # CONFIG_JFS_FS is not set | 1091 | # CONFIG_JFS_FS is not set |
1143 | # CONFIG_FS_POSIX_ACL is not set | 1092 | # CONFIG_FS_POSIX_ACL is not set |
1144 | CONFIG_FILE_LOCKING=y | ||
1145 | # CONFIG_XFS_FS is not set | 1093 | # CONFIG_XFS_FS is not set |
1146 | # CONFIG_GFS2_FS is not set | 1094 | # CONFIG_GFS2_FS is not set |
1147 | # CONFIG_OCFS2_FS is not set | 1095 | # CONFIG_OCFS2_FS is not set |
1148 | # CONFIG_BTRFS_FS is not set | 1096 | # CONFIG_BTRFS_FS is not set |
1097 | CONFIG_FILE_LOCKING=y | ||
1098 | CONFIG_FSNOTIFY=y | ||
1149 | CONFIG_DNOTIFY=y | 1099 | CONFIG_DNOTIFY=y |
1150 | CONFIG_INOTIFY=y | 1100 | CONFIG_INOTIFY=y |
1151 | CONFIG_INOTIFY_USER=y | 1101 | CONFIG_INOTIFY_USER=y |
@@ -1205,7 +1155,6 @@ CONFIG_MISC_FILESYSTEMS=y | |||
1205 | # CONFIG_BEFS_FS is not set | 1155 | # CONFIG_BEFS_FS is not set |
1206 | # CONFIG_BFS_FS is not set | 1156 | # CONFIG_BFS_FS is not set |
1207 | # CONFIG_EFS_FS is not set | 1157 | # CONFIG_EFS_FS is not set |
1208 | # CONFIG_JFFS2_FS is not set | ||
1209 | # CONFIG_CRAMFS is not set | 1158 | # CONFIG_CRAMFS is not set |
1210 | # CONFIG_SQUASHFS is not set | 1159 | # CONFIG_SQUASHFS is not set |
1211 | # CONFIG_VXFS_FS is not set | 1160 | # CONFIG_VXFS_FS is not set |
@@ -1222,6 +1171,7 @@ CONFIG_NFS_FS=y | |||
1222 | CONFIG_NFS_V3=y | 1171 | CONFIG_NFS_V3=y |
1223 | # CONFIG_NFS_V3_ACL is not set | 1172 | # CONFIG_NFS_V3_ACL is not set |
1224 | CONFIG_NFS_V4=y | 1173 | CONFIG_NFS_V4=y |
1174 | # CONFIG_NFS_V4_1 is not set | ||
1225 | CONFIG_ROOT_NFS=y | 1175 | CONFIG_ROOT_NFS=y |
1226 | # CONFIG_NFSD is not set | 1176 | # CONFIG_NFSD is not set |
1227 | CONFIG_LOCKD=y | 1177 | CONFIG_LOCKD=y |
@@ -1359,7 +1309,6 @@ CONFIG_DEBUG_MEMORY_INIT=y | |||
1359 | CONFIG_DEBUG_LIST=y | 1309 | CONFIG_DEBUG_LIST=y |
1360 | # CONFIG_DEBUG_SG is not set | 1310 | # CONFIG_DEBUG_SG is not set |
1361 | # CONFIG_DEBUG_NOTIFIERS is not set | 1311 | # CONFIG_DEBUG_NOTIFIERS is not set |
1362 | # CONFIG_BOOT_PRINTK_DELAY is not set | ||
1363 | # CONFIG_RCU_TORTURE_TEST is not set | 1312 | # CONFIG_RCU_TORTURE_TEST is not set |
1364 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | 1313 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set |
1365 | # CONFIG_BACKTRACE_SELF_TEST is not set | 1314 | # CONFIG_BACKTRACE_SELF_TEST is not set |
@@ -1374,31 +1323,21 @@ CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y | |||
1374 | CONFIG_HAVE_DYNAMIC_FTRACE=y | 1323 | CONFIG_HAVE_DYNAMIC_FTRACE=y |
1375 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y | 1324 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y |
1376 | CONFIG_RING_BUFFER=y | 1325 | CONFIG_RING_BUFFER=y |
1326 | CONFIG_EVENT_TRACING=y | ||
1327 | CONFIG_CONTEXT_SWITCH_TRACER=y | ||
1377 | CONFIG_TRACING=y | 1328 | CONFIG_TRACING=y |
1378 | CONFIG_TRACING_SUPPORT=y | 1329 | CONFIG_TRACING_SUPPORT=y |
1379 | 1330 | # CONFIG_FTRACE is not set | |
1380 | # | ||
1381 | # Tracers | ||
1382 | # | ||
1383 | # CONFIG_FUNCTION_TRACER is not set | ||
1384 | # CONFIG_IRQSOFF_TRACER is not set | ||
1385 | # CONFIG_SCHED_TRACER is not set | ||
1386 | # CONFIG_CONTEXT_SWITCH_TRACER is not set | ||
1387 | # CONFIG_EVENT_TRACER is not set | ||
1388 | # CONFIG_BOOT_TRACER is not set | ||
1389 | # CONFIG_TRACE_BRANCH_PROFILING is not set | ||
1390 | # CONFIG_STACK_TRACER is not set | ||
1391 | # CONFIG_KMEMTRACE is not set | ||
1392 | # CONFIG_WORKQUEUE_TRACER is not set | ||
1393 | # CONFIG_BLK_DEV_IO_TRACE is not set | ||
1394 | # CONFIG_FTRACE_STARTUP_TEST is not set | ||
1395 | # CONFIG_DYNAMIC_DEBUG is not set | 1331 | # CONFIG_DYNAMIC_DEBUG is not set |
1396 | # CONFIG_SAMPLES is not set | 1332 | # CONFIG_SAMPLES is not set |
1397 | CONFIG_HAVE_ARCH_KGDB=y | 1333 | CONFIG_HAVE_ARCH_KGDB=y |
1398 | # CONFIG_KGDB is not set | 1334 | # CONFIG_KGDB is not set |
1335 | # CONFIG_PPC_DISABLE_WERROR is not set | ||
1336 | CONFIG_PPC_WERROR=y | ||
1399 | CONFIG_PRINT_STACK_DEPTH=64 | 1337 | CONFIG_PRINT_STACK_DEPTH=64 |
1400 | CONFIG_DEBUG_STACKOVERFLOW=y | 1338 | CONFIG_DEBUG_STACKOVERFLOW=y |
1401 | # CONFIG_DEBUG_STACK_USAGE is not set | 1339 | # CONFIG_DEBUG_STACK_USAGE is not set |
1340 | # CONFIG_PPC_EMULATED_STATS is not set | ||
1402 | # CONFIG_CODE_PATCHING_SELFTEST is not set | 1341 | # CONFIG_CODE_PATCHING_SELFTEST is not set |
1403 | # CONFIG_FTR_FIXUP_SELFTEST is not set | 1342 | # CONFIG_FTR_FIXUP_SELFTEST is not set |
1404 | # CONFIG_MSI_BITMAP_SELFTEST is not set | 1343 | # CONFIG_MSI_BITMAP_SELFTEST is not set |
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index b44aaabdd1a6..0c34371ec49c 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -424,6 +424,29 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
424 | #endif | 424 | #endif |
425 | } | 425 | } |
426 | 426 | ||
427 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | ||
428 | { | ||
429 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
430 | |||
431 | if (ops->addr_needs_map && ops->addr_needs_map(dev, addr, size)) | ||
432 | return 0; | ||
433 | |||
434 | if (!dev->dma_mask) | ||
435 | return 0; | ||
436 | |||
437 | return addr + size <= *dev->dma_mask; | ||
438 | } | ||
439 | |||
440 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | ||
441 | { | ||
442 | return paddr + get_dma_direct_offset(dev); | ||
443 | } | ||
444 | |||
445 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) | ||
446 | { | ||
447 | return daddr - get_dma_direct_offset(dev); | ||
448 | } | ||
449 | |||
427 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 450 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
428 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 451 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
429 | #ifdef CONFIG_NOT_COHERENT_CACHE | 452 | #ifdef CONFIG_NOT_COHERENT_CACHE |
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index eb17da781128..2a5da069714e 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h | |||
@@ -104,8 +104,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
104 | else | 104 | else |
105 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); | 105 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); |
106 | 106 | ||
107 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) | 107 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) |
108 | /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we | 108 | /* Second case is 32-bit with 64-bit PTE. In this case, we |
109 | * can just store as long as we do the two halves in the right order | 109 | * can just store as long as we do the two halves in the right order |
110 | * with a barrier in between. This is possible because we take care, | 110 | * with a barrier in between. This is possible because we take care, |
111 | * in the hash code, to pre-invalidate if the PTE was already hashed, | 111 | * in the hash code, to pre-invalidate if the PTE was already hashed, |
@@ -140,7 +140,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
140 | 140 | ||
141 | #else | 141 | #else |
142 | /* Anything else just stores the PTE normally. That covers all 64-bit | 142 | /* Anything else just stores the PTE normally. That covers all 64-bit |
143 | * cases, and 32-bit non-hash with 64-bit PTEs in UP mode | 143 | * cases, and 32-bit non-hash with 32-bit PTEs. |
144 | */ | 144 | */ |
145 | *ptep = pte; | 145 | *ptep = pte; |
146 | #endif | 146 | #endif |
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index c3b193121f81..198266cf9e2d 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h | |||
@@ -54,7 +54,7 @@ | |||
54 | * This returns the old value in the lock, so we succeeded | 54 | * This returns the old value in the lock, so we succeeded |
55 | * in getting the lock if the return value is 0. | 55 | * in getting the lock if the return value is 0. |
56 | */ | 56 | */ |
57 | static inline unsigned long __spin_trylock(raw_spinlock_t *lock) | 57 | static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) |
58 | { | 58 | { |
59 | unsigned long tmp, token; | 59 | unsigned long tmp, token; |
60 | 60 | ||
@@ -76,7 +76,7 @@ static inline unsigned long __spin_trylock(raw_spinlock_t *lock) | |||
76 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 76 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
77 | { | 77 | { |
78 | CLEAR_IO_SYNC; | 78 | CLEAR_IO_SYNC; |
79 | return __spin_trylock(lock) == 0; | 79 | return arch_spin_trylock(lock) == 0; |
80 | } | 80 | } |
81 | 81 | ||
82 | /* | 82 | /* |
@@ -108,7 +108,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
108 | { | 108 | { |
109 | CLEAR_IO_SYNC; | 109 | CLEAR_IO_SYNC; |
110 | while (1) { | 110 | while (1) { |
111 | if (likely(__spin_trylock(lock) == 0)) | 111 | if (likely(arch_spin_trylock(lock) == 0)) |
112 | break; | 112 | break; |
113 | do { | 113 | do { |
114 | HMT_low(); | 114 | HMT_low(); |
@@ -126,7 +126,7 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | |||
126 | 126 | ||
127 | CLEAR_IO_SYNC; | 127 | CLEAR_IO_SYNC; |
128 | while (1) { | 128 | while (1) { |
129 | if (likely(__spin_trylock(lock) == 0)) | 129 | if (likely(arch_spin_trylock(lock) == 0)) |
130 | break; | 130 | break; |
131 | local_save_flags(flags_dis); | 131 | local_save_flags(flags_dis); |
132 | local_irq_restore(flags); | 132 | local_irq_restore(flags); |
@@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); | |||
181 | * This returns the old value in the lock + 1, | 181 | * This returns the old value in the lock + 1, |
182 | * so we got a read lock if the return value is > 0. | 182 | * so we got a read lock if the return value is > 0. |
183 | */ | 183 | */ |
184 | static inline long __read_trylock(raw_rwlock_t *rw) | 184 | static inline long arch_read_trylock(raw_rwlock_t *rw) |
185 | { | 185 | { |
186 | long tmp; | 186 | long tmp; |
187 | 187 | ||
@@ -205,7 +205,7 @@ static inline long __read_trylock(raw_rwlock_t *rw) | |||
205 | * This returns the old value in the lock, | 205 | * This returns the old value in the lock, |
206 | * so we got the write lock if the return value is 0. | 206 | * so we got the write lock if the return value is 0. |
207 | */ | 207 | */ |
208 | static inline long __write_trylock(raw_rwlock_t *rw) | 208 | static inline long arch_write_trylock(raw_rwlock_t *rw) |
209 | { | 209 | { |
210 | long tmp, token; | 210 | long tmp, token; |
211 | 211 | ||
@@ -228,7 +228,7 @@ static inline long __write_trylock(raw_rwlock_t *rw) | |||
228 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 228 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
229 | { | 229 | { |
230 | while (1) { | 230 | while (1) { |
231 | if (likely(__read_trylock(rw) > 0)) | 231 | if (likely(arch_read_trylock(rw) > 0)) |
232 | break; | 232 | break; |
233 | do { | 233 | do { |
234 | HMT_low(); | 234 | HMT_low(); |
@@ -242,7 +242,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
242 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 242 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
243 | { | 243 | { |
244 | while (1) { | 244 | while (1) { |
245 | if (likely(__write_trylock(rw) == 0)) | 245 | if (likely(arch_write_trylock(rw) == 0)) |
246 | break; | 246 | break; |
247 | do { | 247 | do { |
248 | HMT_low(); | 248 | HMT_low(); |
@@ -255,12 +255,12 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
255 | 255 | ||
256 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 256 | static inline int __raw_read_trylock(raw_rwlock_t *rw) |
257 | { | 257 | { |
258 | return __read_trylock(rw) > 0; | 258 | return arch_read_trylock(rw) > 0; |
259 | } | 259 | } |
260 | 260 | ||
261 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 261 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
262 | { | 262 | { |
263 | return __write_trylock(rw) == 0; | 263 | return arch_write_trylock(rw) == 0; |
264 | } | 264 | } |
265 | 265 | ||
266 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 266 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index b73396b93905..9619285f64e8 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o | |||
97 | 97 | ||
98 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 98 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
99 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 99 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
100 | obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o | 100 | obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o perf_callchain.o |
101 | obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ | 101 | obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ |
102 | power5+-pmu.o power6-pmu.o power7-pmu.o | 102 | power5+-pmu.o power6-pmu.o power7-pmu.o |
103 | obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o | 103 | obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 561b64652311..197b15646eeb 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -67,6 +67,8 @@ int main(void) | |||
67 | DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id)); | 67 | DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id)); |
68 | #ifdef CONFIG_PPC64 | 68 | #ifdef CONFIG_PPC64 |
69 | DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); | 69 | DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); |
70 | DEFINE(SIGSEGV, SIGSEGV); | ||
71 | DEFINE(NMI_MASK, NMI_MASK); | ||
70 | #else | 72 | #else |
71 | DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); | 73 | DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); |
72 | #endif /* CONFIG_PPC64 */ | 74 | #endif /* CONFIG_PPC64 */ |
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 68ccf11e4f19..e8a57de85bcf 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c | |||
@@ -24,50 +24,12 @@ | |||
24 | int swiotlb __read_mostly; | 24 | int swiotlb __read_mostly; |
25 | unsigned int ppc_swiotlb_enable; | 25 | unsigned int ppc_swiotlb_enable; |
26 | 26 | ||
27 | void *swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t addr) | ||
28 | { | ||
29 | unsigned long pfn = PFN_DOWN(swiotlb_bus_to_phys(hwdev, addr)); | ||
30 | void *pageaddr = page_address(pfn_to_page(pfn)); | ||
31 | |||
32 | if (pageaddr != NULL) | ||
33 | return pageaddr + (addr % PAGE_SIZE); | ||
34 | return NULL; | ||
35 | } | ||
36 | |||
37 | dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) | ||
38 | { | ||
39 | return paddr + get_dma_direct_offset(hwdev); | ||
40 | } | ||
41 | |||
42 | phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr) | ||
43 | |||
44 | { | ||
45 | return baddr - get_dma_direct_offset(hwdev); | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * Determine if an address needs bounce buffering via swiotlb. | ||
50 | * Going forward I expect the swiotlb code to generalize on using | ||
51 | * a dma_ops->addr_needs_map, and this function will move from here to the | ||
52 | * generic swiotlb code. | ||
53 | */ | ||
54 | int | ||
55 | swiotlb_arch_address_needs_mapping(struct device *hwdev, dma_addr_t addr, | ||
56 | size_t size) | ||
57 | { | ||
58 | struct dma_mapping_ops *dma_ops = get_dma_ops(hwdev); | ||
59 | |||
60 | BUG_ON(!dma_ops); | ||
61 | return dma_ops->addr_needs_map(hwdev, addr, size); | ||
62 | } | ||
63 | |||
64 | /* | 27 | /* |
65 | * Determine if an address is reachable by a pci device, or if we must bounce. | 28 | * Determine if an address is reachable by a pci device, or if we must bounce. |
66 | */ | 29 | */ |
67 | static int | 30 | static int |
68 | swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) | 31 | swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) |
69 | { | 32 | { |
70 | u64 mask = dma_get_mask(hwdev); | ||
71 | dma_addr_t max; | 33 | dma_addr_t max; |
72 | struct pci_controller *hose; | 34 | struct pci_controller *hose; |
73 | struct pci_dev *pdev = to_pci_dev(hwdev); | 35 | struct pci_dev *pdev = to_pci_dev(hwdev); |
@@ -79,16 +41,9 @@ swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) | |||
79 | if ((addr + size > max) | (addr < hose->dma_window_base_cur)) | 41 | if ((addr + size > max) | (addr < hose->dma_window_base_cur)) |
80 | return 1; | 42 | return 1; |
81 | 43 | ||
82 | return !is_buffer_dma_capable(mask, addr, size); | 44 | return 0; |
83 | } | ||
84 | |||
85 | static int | ||
86 | swiotlb_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) | ||
87 | { | ||
88 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); | ||
89 | } | 45 | } |
90 | 46 | ||
91 | |||
92 | /* | 47 | /* |
93 | * At the moment, all platforms that use this code only require | 48 | * At the moment, all platforms that use this code only require |
94 | * swiotlb to be used if we're operating on HIGHMEM. Since | 49 | * swiotlb to be used if we're operating on HIGHMEM. Since |
@@ -104,7 +59,6 @@ struct dma_mapping_ops swiotlb_dma_ops = { | |||
104 | .dma_supported = swiotlb_dma_supported, | 59 | .dma_supported = swiotlb_dma_supported, |
105 | .map_page = swiotlb_map_page, | 60 | .map_page = swiotlb_map_page, |
106 | .unmap_page = swiotlb_unmap_page, | 61 | .unmap_page = swiotlb_unmap_page, |
107 | .addr_needs_map = swiotlb_addr_needs_map, | ||
108 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, | 62 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, |
109 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, | 63 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, |
110 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | 64 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index eb898112e577..8ac85e08ffae 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -729,6 +729,11 @@ BEGIN_FTR_SECTION | |||
729 | bne- do_ste_alloc /* If so handle it */ | 729 | bne- do_ste_alloc /* If so handle it */ |
730 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | 730 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) |
731 | 731 | ||
732 | clrrdi r11,r1,THREAD_SHIFT | ||
733 | lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ | ||
734 | andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ | ||
735 | bne 77f /* then don't call hash_page now */ | ||
736 | |||
732 | /* | 737 | /* |
733 | * On iSeries, we soft-disable interrupts here, then | 738 | * On iSeries, we soft-disable interrupts here, then |
734 | * hard-enable interrupts so that the hash_page code can spin on | 739 | * hard-enable interrupts so that the hash_page code can spin on |
@@ -833,6 +838,20 @@ handle_page_fault: | |||
833 | bl .low_hash_fault | 838 | bl .low_hash_fault |
834 | b .ret_from_except | 839 | b .ret_from_except |
835 | 840 | ||
841 | /* | ||
842 | * We come here as a result of a DSI at a point where we don't want | ||
843 | * to call hash_page, such as when we are accessing memory (possibly | ||
844 | * user memory) inside a PMU interrupt that occurred while interrupts | ||
845 | * were soft-disabled. We want to invoke the exception handler for | ||
846 | * the access, or panic if there isn't a handler. | ||
847 | */ | ||
848 | 77: bl .save_nvgprs | ||
849 | mr r4,r3 | ||
850 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
851 | li r5,SIGSEGV | ||
852 | bl .bad_page_fault | ||
853 | b .ret_from_except | ||
854 | |||
836 | /* here we have a segment miss */ | 855 | /* here we have a segment miss */ |
837 | do_ste_alloc: | 856 | do_ste_alloc: |
838 | bl .ste_allocate /* try to insert stab entry */ | 857 | bl .ste_allocate /* try to insert stab entry */ |
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c new file mode 100644 index 000000000000..f74b62c67511 --- /dev/null +++ b/arch/powerpc/kernel/perf_callchain.c | |||
@@ -0,0 +1,527 @@ | |||
1 | /* | ||
2 | * Performance counter callchain support - powerpc architecture code | ||
3 | * | ||
4 | * Copyright © 2009 Paul Mackerras, IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/perf_counter.h> | ||
14 | #include <linux/percpu.h> | ||
15 | #include <linux/uaccess.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <asm/ptrace.h> | ||
18 | #include <asm/pgtable.h> | ||
19 | #include <asm/sigcontext.h> | ||
20 | #include <asm/ucontext.h> | ||
21 | #include <asm/vdso.h> | ||
22 | #ifdef CONFIG_PPC64 | ||
23 | #include "ppc32.h" | ||
24 | #endif | ||
25 | |||
26 | /* | ||
27 | * Store another value in a callchain_entry. | ||
28 | */ | ||
29 | static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
30 | { | ||
31 | unsigned int nr = entry->nr; | ||
32 | |||
33 | if (nr < PERF_MAX_STACK_DEPTH) { | ||
34 | entry->ip[nr] = ip; | ||
35 | entry->nr = nr + 1; | ||
36 | } | ||
37 | } | ||
38 | |||
39 | /* | ||
40 | * Is sp valid as the address of the next kernel stack frame after prev_sp? | ||
41 | * The next frame may be in a different stack area but should not go | ||
42 | * back down in the same stack area. | ||
43 | */ | ||
44 | static int valid_next_sp(unsigned long sp, unsigned long prev_sp) | ||
45 | { | ||
46 | if (sp & 0xf) | ||
47 | return 0; /* must be 16-byte aligned */ | ||
48 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) | ||
49 | return 0; | ||
50 | if (sp >= prev_sp + STACK_FRAME_OVERHEAD) | ||
51 | return 1; | ||
52 | /* | ||
53 | * sp could decrease when we jump off an interrupt stack | ||
54 | * back to the regular process stack. | ||
55 | */ | ||
56 | if ((sp & ~(THREAD_SIZE - 1)) != (prev_sp & ~(THREAD_SIZE - 1))) | ||
57 | return 1; | ||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | static void perf_callchain_kernel(struct pt_regs *regs, | ||
62 | struct perf_callchain_entry *entry) | ||
63 | { | ||
64 | unsigned long sp, next_sp; | ||
65 | unsigned long next_ip; | ||
66 | unsigned long lr; | ||
67 | long level = 0; | ||
68 | unsigned long *fp; | ||
69 | |||
70 | lr = regs->link; | ||
71 | sp = regs->gpr[1]; | ||
72 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
73 | callchain_store(entry, regs->nip); | ||
74 | |||
75 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) | ||
76 | return; | ||
77 | |||
78 | for (;;) { | ||
79 | fp = (unsigned long *) sp; | ||
80 | next_sp = fp[0]; | ||
81 | |||
82 | if (next_sp == sp + STACK_INT_FRAME_SIZE && | ||
83 | fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { | ||
84 | /* | ||
85 | * This looks like an interrupt frame for an | ||
86 | * interrupt that occurred in the kernel | ||
87 | */ | ||
88 | regs = (struct pt_regs *)(sp + STACK_FRAME_OVERHEAD); | ||
89 | next_ip = regs->nip; | ||
90 | lr = regs->link; | ||
91 | level = 0; | ||
92 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
93 | |||
94 | } else { | ||
95 | if (level == 0) | ||
96 | next_ip = lr; | ||
97 | else | ||
98 | next_ip = fp[STACK_FRAME_LR_SAVE]; | ||
99 | |||
100 | /* | ||
101 | * We can't tell which of the first two addresses | ||
102 | * we get are valid, but we can filter out the | ||
103 | * obviously bogus ones here. We replace them | ||
104 | * with 0 rather than removing them entirely so | ||
105 | * that userspace can tell which is which. | ||
106 | */ | ||
107 | if ((level == 1 && next_ip == lr) || | ||
108 | (level <= 1 && !kernel_text_address(next_ip))) | ||
109 | next_ip = 0; | ||
110 | |||
111 | ++level; | ||
112 | } | ||
113 | |||
114 | callchain_store(entry, next_ip); | ||
115 | if (!valid_next_sp(next_sp, sp)) | ||
116 | return; | ||
117 | sp = next_sp; | ||
118 | } | ||
119 | } | ||
120 | |||
121 | #ifdef CONFIG_PPC64 | ||
122 | |||
123 | #ifdef CONFIG_HUGETLB_PAGE | ||
124 | #define is_huge_psize(pagesize) (HPAGE_SHIFT && mmu_huge_psizes[pagesize]) | ||
125 | #else | ||
126 | #define is_huge_psize(pagesize) 0 | ||
127 | #endif | ||
128 | |||
129 | /* | ||
130 | * On 64-bit we don't want to invoke hash_page on user addresses from | ||
131 | * interrupt context, so if the access faults, we read the page tables | ||
132 | * to find which page (if any) is mapped and access it directly. | ||
133 | */ | ||
134 | static int read_user_stack_slow(void __user *ptr, void *ret, int nb) | ||
135 | { | ||
136 | pgd_t *pgdir; | ||
137 | pte_t *ptep, pte; | ||
138 | int pagesize; | ||
139 | unsigned long addr = (unsigned long) ptr; | ||
140 | unsigned long offset; | ||
141 | unsigned long pfn; | ||
142 | void *kaddr; | ||
143 | |||
144 | pgdir = current->mm->pgd; | ||
145 | if (!pgdir) | ||
146 | return -EFAULT; | ||
147 | |||
148 | pagesize = get_slice_psize(current->mm, addr); | ||
149 | |||
150 | /* align address to page boundary */ | ||
151 | offset = addr & ((1ul << mmu_psize_defs[pagesize].shift) - 1); | ||
152 | addr -= offset; | ||
153 | |||
154 | if (is_huge_psize(pagesize)) | ||
155 | ptep = huge_pte_offset(current->mm, addr); | ||
156 | else | ||
157 | ptep = find_linux_pte(pgdir, addr); | ||
158 | |||
159 | if (ptep == NULL) | ||
160 | return -EFAULT; | ||
161 | pte = *ptep; | ||
162 | if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER)) | ||
163 | return -EFAULT; | ||
164 | pfn = pte_pfn(pte); | ||
165 | if (!page_is_ram(pfn)) | ||
166 | return -EFAULT; | ||
167 | |||
168 | /* no highmem to worry about here */ | ||
169 | kaddr = pfn_to_kaddr(pfn); | ||
170 | memcpy(ret, kaddr + offset, nb); | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret) | ||
175 | { | ||
176 | if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) || | ||
177 | ((unsigned long)ptr & 7)) | ||
178 | return -EFAULT; | ||
179 | |||
180 | if (!__get_user_inatomic(*ret, ptr)) | ||
181 | return 0; | ||
182 | |||
183 | return read_user_stack_slow(ptr, ret, 8); | ||
184 | } | ||
185 | |||
186 | static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) | ||
187 | { | ||
188 | if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) || | ||
189 | ((unsigned long)ptr & 3)) | ||
190 | return -EFAULT; | ||
191 | |||
192 | if (!__get_user_inatomic(*ret, ptr)) | ||
193 | return 0; | ||
194 | |||
195 | return read_user_stack_slow(ptr, ret, 4); | ||
196 | } | ||
197 | |||
198 | static inline int valid_user_sp(unsigned long sp, int is_64) | ||
199 | { | ||
200 | if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32) | ||
201 | return 0; | ||
202 | return 1; | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * 64-bit user processes use the same stack frame for RT and non-RT signals. | ||
207 | */ | ||
208 | struct signal_frame_64 { | ||
209 | char dummy[__SIGNAL_FRAMESIZE]; | ||
210 | struct ucontext uc; | ||
211 | unsigned long unused[2]; | ||
212 | unsigned int tramp[6]; | ||
213 | struct siginfo *pinfo; | ||
214 | void *puc; | ||
215 | struct siginfo info; | ||
216 | char abigap[288]; | ||
217 | }; | ||
218 | |||
219 | static int is_sigreturn_64_address(unsigned long nip, unsigned long fp) | ||
220 | { | ||
221 | if (nip == fp + offsetof(struct signal_frame_64, tramp)) | ||
222 | return 1; | ||
223 | if (vdso64_rt_sigtramp && current->mm->context.vdso_base && | ||
224 | nip == current->mm->context.vdso_base + vdso64_rt_sigtramp) | ||
225 | return 1; | ||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * Do some sanity checking on the signal frame pointed to by sp. | ||
231 | * We check the pinfo and puc pointers in the frame. | ||
232 | */ | ||
233 | static int sane_signal_64_frame(unsigned long sp) | ||
234 | { | ||
235 | struct signal_frame_64 __user *sf; | ||
236 | unsigned long pinfo, puc; | ||
237 | |||
238 | sf = (struct signal_frame_64 __user *) sp; | ||
239 | if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) || | ||
240 | read_user_stack_64((unsigned long __user *) &sf->puc, &puc)) | ||
241 | return 0; | ||
242 | return pinfo == (unsigned long) &sf->info && | ||
243 | puc == (unsigned long) &sf->uc; | ||
244 | } | ||
245 | |||
246 | static void perf_callchain_user_64(struct pt_regs *regs, | ||
247 | struct perf_callchain_entry *entry) | ||
248 | { | ||
249 | unsigned long sp, next_sp; | ||
250 | unsigned long next_ip; | ||
251 | unsigned long lr; | ||
252 | long level = 0; | ||
253 | struct signal_frame_64 __user *sigframe; | ||
254 | unsigned long __user *fp, *uregs; | ||
255 | |||
256 | next_ip = regs->nip; | ||
257 | lr = regs->link; | ||
258 | sp = regs->gpr[1]; | ||
259 | callchain_store(entry, PERF_CONTEXT_USER); | ||
260 | callchain_store(entry, next_ip); | ||
261 | |||
262 | for (;;) { | ||
263 | fp = (unsigned long __user *) sp; | ||
264 | if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) | ||
265 | return; | ||
266 | if (level > 0 && read_user_stack_64(&fp[2], &next_ip)) | ||
267 | return; | ||
268 | |||
269 | /* | ||
270 | * Note: the next_sp - sp >= signal frame size check | ||
271 | * is true when next_sp < sp, which can happen when | ||
272 | * transitioning from an alternate signal stack to the | ||
273 | * normal stack. | ||
274 | */ | ||
275 | if (next_sp - sp >= sizeof(struct signal_frame_64) && | ||
276 | (is_sigreturn_64_address(next_ip, sp) || | ||
277 | (level <= 1 && is_sigreturn_64_address(lr, sp))) && | ||
278 | sane_signal_64_frame(sp)) { | ||
279 | /* | ||
280 | * This looks like an signal frame | ||
281 | */ | ||
282 | sigframe = (struct signal_frame_64 __user *) sp; | ||
283 | uregs = sigframe->uc.uc_mcontext.gp_regs; | ||
284 | if (read_user_stack_64(&uregs[PT_NIP], &next_ip) || | ||
285 | read_user_stack_64(&uregs[PT_LNK], &lr) || | ||
286 | read_user_stack_64(&uregs[PT_R1], &sp)) | ||
287 | return; | ||
288 | level = 0; | ||
289 | callchain_store(entry, PERF_CONTEXT_USER); | ||
290 | callchain_store(entry, next_ip); | ||
291 | continue; | ||
292 | } | ||
293 | |||
294 | if (level == 0) | ||
295 | next_ip = lr; | ||
296 | callchain_store(entry, next_ip); | ||
297 | ++level; | ||
298 | sp = next_sp; | ||
299 | } | ||
300 | } | ||
301 | |||
302 | static inline int current_is_64bit(void) | ||
303 | { | ||
304 | /* | ||
305 | * We can't use test_thread_flag() here because we may be on an | ||
306 | * interrupt stack, and the thread flags don't get copied over | ||
307 | * from the thread_info on the main stack to the interrupt stack. | ||
308 | */ | ||
309 | return !test_ti_thread_flag(task_thread_info(current), TIF_32BIT); | ||
310 | } | ||
311 | |||
312 | #else /* CONFIG_PPC64 */ | ||
313 | /* | ||
314 | * On 32-bit we just access the address and let hash_page create a | ||
315 | * HPTE if necessary, so there is no need to fall back to reading | ||
316 | * the page tables. Since this is called at interrupt level, | ||
317 | * do_page_fault() won't treat a DSI as a page fault. | ||
318 | */ | ||
319 | static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) | ||
320 | { | ||
321 | if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) || | ||
322 | ((unsigned long)ptr & 3)) | ||
323 | return -EFAULT; | ||
324 | |||
325 | return __get_user_inatomic(*ret, ptr); | ||
326 | } | ||
327 | |||
328 | static inline void perf_callchain_user_64(struct pt_regs *regs, | ||
329 | struct perf_callchain_entry *entry) | ||
330 | { | ||
331 | } | ||
332 | |||
333 | static inline int current_is_64bit(void) | ||
334 | { | ||
335 | return 0; | ||
336 | } | ||
337 | |||
338 | static inline int valid_user_sp(unsigned long sp, int is_64) | ||
339 | { | ||
340 | if (!sp || (sp & 7) || sp > TASK_SIZE - 32) | ||
341 | return 0; | ||
342 | return 1; | ||
343 | } | ||
344 | |||
345 | #define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE | ||
346 | #define sigcontext32 sigcontext | ||
347 | #define mcontext32 mcontext | ||
348 | #define ucontext32 ucontext | ||
349 | #define compat_siginfo_t struct siginfo | ||
350 | |||
351 | #endif /* CONFIG_PPC64 */ | ||
352 | |||
353 | /* | ||
354 | * Layout for non-RT signal frames | ||
355 | */ | ||
356 | struct signal_frame_32 { | ||
357 | char dummy[__SIGNAL_FRAMESIZE32]; | ||
358 | struct sigcontext32 sctx; | ||
359 | struct mcontext32 mctx; | ||
360 | int abigap[56]; | ||
361 | }; | ||
362 | |||
363 | /* | ||
364 | * Layout for RT signal frames | ||
365 | */ | ||
366 | struct rt_signal_frame_32 { | ||
367 | char dummy[__SIGNAL_FRAMESIZE32 + 16]; | ||
368 | compat_siginfo_t info; | ||
369 | struct ucontext32 uc; | ||
370 | int abigap[56]; | ||
371 | }; | ||
372 | |||
373 | static int is_sigreturn_32_address(unsigned int nip, unsigned int fp) | ||
374 | { | ||
375 | if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad)) | ||
376 | return 1; | ||
377 | if (vdso32_sigtramp && current->mm->context.vdso_base && | ||
378 | nip == current->mm->context.vdso_base + vdso32_sigtramp) | ||
379 | return 1; | ||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp) | ||
384 | { | ||
385 | if (nip == fp + offsetof(struct rt_signal_frame_32, | ||
386 | uc.uc_mcontext.mc_pad)) | ||
387 | return 1; | ||
388 | if (vdso32_rt_sigtramp && current->mm->context.vdso_base && | ||
389 | nip == current->mm->context.vdso_base + vdso32_rt_sigtramp) | ||
390 | return 1; | ||
391 | return 0; | ||
392 | } | ||
393 | |||
394 | static int sane_signal_32_frame(unsigned int sp) | ||
395 | { | ||
396 | struct signal_frame_32 __user *sf; | ||
397 | unsigned int regs; | ||
398 | |||
399 | sf = (struct signal_frame_32 __user *) (unsigned long) sp; | ||
400 | if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, ®s)) | ||
401 | return 0; | ||
402 | return regs == (unsigned long) &sf->mctx; | ||
403 | } | ||
404 | |||
405 | static int sane_rt_signal_32_frame(unsigned int sp) | ||
406 | { | ||
407 | struct rt_signal_frame_32 __user *sf; | ||
408 | unsigned int regs; | ||
409 | |||
410 | sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp; | ||
411 | if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, ®s)) | ||
412 | return 0; | ||
413 | return regs == (unsigned long) &sf->uc.uc_mcontext; | ||
414 | } | ||
415 | |||
416 | static unsigned int __user *signal_frame_32_regs(unsigned int sp, | ||
417 | unsigned int next_sp, unsigned int next_ip) | ||
418 | { | ||
419 | struct mcontext32 __user *mctx = NULL; | ||
420 | struct signal_frame_32 __user *sf; | ||
421 | struct rt_signal_frame_32 __user *rt_sf; | ||
422 | |||
423 | /* | ||
424 | * Note: the next_sp - sp >= signal frame size check | ||
425 | * is true when next_sp < sp, for example, when | ||
426 | * transitioning from an alternate signal stack to the | ||
427 | * normal stack. | ||
428 | */ | ||
429 | if (next_sp - sp >= sizeof(struct signal_frame_32) && | ||
430 | is_sigreturn_32_address(next_ip, sp) && | ||
431 | sane_signal_32_frame(sp)) { | ||
432 | sf = (struct signal_frame_32 __user *) (unsigned long) sp; | ||
433 | mctx = &sf->mctx; | ||
434 | } | ||
435 | |||
436 | if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) && | ||
437 | is_rt_sigreturn_32_address(next_ip, sp) && | ||
438 | sane_rt_signal_32_frame(sp)) { | ||
439 | rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp; | ||
440 | mctx = &rt_sf->uc.uc_mcontext; | ||
441 | } | ||
442 | |||
443 | if (!mctx) | ||
444 | return NULL; | ||
445 | return mctx->mc_gregs; | ||
446 | } | ||
447 | |||
448 | static void perf_callchain_user_32(struct pt_regs *regs, | ||
449 | struct perf_callchain_entry *entry) | ||
450 | { | ||
451 | unsigned int sp, next_sp; | ||
452 | unsigned int next_ip; | ||
453 | unsigned int lr; | ||
454 | long level = 0; | ||
455 | unsigned int __user *fp, *uregs; | ||
456 | |||
457 | next_ip = regs->nip; | ||
458 | lr = regs->link; | ||
459 | sp = regs->gpr[1]; | ||
460 | callchain_store(entry, PERF_CONTEXT_USER); | ||
461 | callchain_store(entry, next_ip); | ||
462 | |||
463 | while (entry->nr < PERF_MAX_STACK_DEPTH) { | ||
464 | fp = (unsigned int __user *) (unsigned long) sp; | ||
465 | if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp)) | ||
466 | return; | ||
467 | if (level > 0 && read_user_stack_32(&fp[1], &next_ip)) | ||
468 | return; | ||
469 | |||
470 | uregs = signal_frame_32_regs(sp, next_sp, next_ip); | ||
471 | if (!uregs && level <= 1) | ||
472 | uregs = signal_frame_32_regs(sp, next_sp, lr); | ||
473 | if (uregs) { | ||
474 | /* | ||
475 | * This looks like an signal frame, so restart | ||
476 | * the stack trace with the values in it. | ||
477 | */ | ||
478 | if (read_user_stack_32(&uregs[PT_NIP], &next_ip) || | ||
479 | read_user_stack_32(&uregs[PT_LNK], &lr) || | ||
480 | read_user_stack_32(&uregs[PT_R1], &sp)) | ||
481 | return; | ||
482 | level = 0; | ||
483 | callchain_store(entry, PERF_CONTEXT_USER); | ||
484 | callchain_store(entry, next_ip); | ||
485 | continue; | ||
486 | } | ||
487 | |||
488 | if (level == 0) | ||
489 | next_ip = lr; | ||
490 | callchain_store(entry, next_ip); | ||
491 | ++level; | ||
492 | sp = next_sp; | ||
493 | } | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * Since we can't get PMU interrupts inside a PMU interrupt handler, | ||
498 | * we don't need separate irq and nmi entries here. | ||
499 | */ | ||
500 | static DEFINE_PER_CPU(struct perf_callchain_entry, callchain); | ||
501 | |||
502 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
503 | { | ||
504 | struct perf_callchain_entry *entry = &__get_cpu_var(callchain); | ||
505 | |||
506 | entry->nr = 0; | ||
507 | |||
508 | if (current->pid == 0) /* idle task? */ | ||
509 | return entry; | ||
510 | |||
511 | if (!user_mode(regs)) { | ||
512 | perf_callchain_kernel(regs, entry); | ||
513 | if (current->mm) | ||
514 | regs = task_pt_regs(current); | ||
515 | else | ||
516 | regs = NULL; | ||
517 | } | ||
518 | |||
519 | if (regs) { | ||
520 | if (current_is_64bit()) | ||
521 | perf_callchain_user_64(regs, entry); | ||
522 | else | ||
523 | perf_callchain_user_32(regs, entry); | ||
524 | } | ||
525 | |||
526 | return entry; | ||
527 | } | ||
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index 388cf57ad827..018d094d92f9 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c | |||
@@ -317,7 +317,7 @@ static int power7_generic_events[] = { | |||
317 | */ | 317 | */ |
318 | static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | 318 | static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { |
319 | [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ | 319 | [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ |
320 | [C(OP_READ)] = { 0x400f0, 0xc880 }, | 320 | [C(OP_READ)] = { 0xc880, 0x400f0 }, |
321 | [C(OP_WRITE)] = { 0, 0x300f0 }, | 321 | [C(OP_WRITE)] = { 0, 0x300f0 }, |
322 | [C(OP_PREFETCH)] = { 0xd8b8, 0 }, | 322 | [C(OP_PREFETCH)] = { 0xd8b8, 0 }, |
323 | }, | 323 | }, |
@@ -327,8 +327,8 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | |||
327 | [C(OP_PREFETCH)] = { 0x408a, 0 }, | 327 | [C(OP_PREFETCH)] = { 0x408a, 0 }, |
328 | }, | 328 | }, |
329 | [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ | 329 | [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ |
330 | [C(OP_READ)] = { 0x6080, 0x6084 }, | 330 | [C(OP_READ)] = { 0x16080, 0x26080 }, |
331 | [C(OP_WRITE)] = { 0x6082, 0x6086 }, | 331 | [C(OP_WRITE)] = { 0x16082, 0x26082 }, |
332 | [C(OP_PREFETCH)] = { 0, 0 }, | 332 | [C(OP_PREFETCH)] = { 0, 0 }, |
333 | }, | 333 | }, |
334 | [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ | 334 | [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ |
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 5b7038f248b6..a685652effeb 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
@@ -92,15 +92,13 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize, | |||
92 | : "memory" ); | 92 | : "memory" ); |
93 | } | 93 | } |
94 | 94 | ||
95 | void slb_flush_and_rebolt(void) | 95 | static void __slb_flush_and_rebolt(void) |
96 | { | 96 | { |
97 | /* If you change this make sure you change SLB_NUM_BOLTED | 97 | /* If you change this make sure you change SLB_NUM_BOLTED |
98 | * appropriately too. */ | 98 | * appropriately too. */ |
99 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; | 99 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; |
100 | unsigned long ksp_esid_data, ksp_vsid_data; | 100 | unsigned long ksp_esid_data, ksp_vsid_data; |
101 | 101 | ||
102 | WARN_ON(!irqs_disabled()); | ||
103 | |||
104 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; | 102 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; |
105 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; | 103 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; |
106 | lflags = SLB_VSID_KERNEL | linear_llp; | 104 | lflags = SLB_VSID_KERNEL | linear_llp; |
@@ -117,12 +115,6 @@ void slb_flush_and_rebolt(void) | |||
117 | ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; | 115 | ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; |
118 | } | 116 | } |
119 | 117 | ||
120 | /* | ||
121 | * We can't take a PMU exception in the following code, so hard | ||
122 | * disable interrupts. | ||
123 | */ | ||
124 | hard_irq_disable(); | ||
125 | |||
126 | /* We need to do this all in asm, so we're sure we don't touch | 118 | /* We need to do this all in asm, so we're sure we don't touch |
127 | * the stack between the slbia and rebolting it. */ | 119 | * the stack between the slbia and rebolting it. */ |
128 | asm volatile("isync\n" | 120 | asm volatile("isync\n" |
@@ -139,6 +131,21 @@ void slb_flush_and_rebolt(void) | |||
139 | : "memory"); | 131 | : "memory"); |
140 | } | 132 | } |
141 | 133 | ||
134 | void slb_flush_and_rebolt(void) | ||
135 | { | ||
136 | |||
137 | WARN_ON(!irqs_disabled()); | ||
138 | |||
139 | /* | ||
140 | * We can't take a PMU exception in the following code, so hard | ||
141 | * disable interrupts. | ||
142 | */ | ||
143 | hard_irq_disable(); | ||
144 | |||
145 | __slb_flush_and_rebolt(); | ||
146 | get_paca()->slb_cache_ptr = 0; | ||
147 | } | ||
148 | |||
142 | void slb_vmalloc_update(void) | 149 | void slb_vmalloc_update(void) |
143 | { | 150 | { |
144 | unsigned long vflags; | 151 | unsigned long vflags; |
@@ -180,12 +187,20 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2) | |||
180 | /* Flush all user entries from the segment table of the current processor. */ | 187 | /* Flush all user entries from the segment table of the current processor. */ |
181 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | 188 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) |
182 | { | 189 | { |
183 | unsigned long offset = get_paca()->slb_cache_ptr; | 190 | unsigned long offset; |
184 | unsigned long slbie_data = 0; | 191 | unsigned long slbie_data = 0; |
185 | unsigned long pc = KSTK_EIP(tsk); | 192 | unsigned long pc = KSTK_EIP(tsk); |
186 | unsigned long stack = KSTK_ESP(tsk); | 193 | unsigned long stack = KSTK_ESP(tsk); |
187 | unsigned long unmapped_base; | 194 | unsigned long unmapped_base; |
188 | 195 | ||
196 | /* | ||
197 | * We need interrupts hard-disabled here, not just soft-disabled, | ||
198 | * so that a PMU interrupt can't occur, which might try to access | ||
199 | * user memory (to get a stack trace) and possible cause an SLB miss | ||
200 | * which would update the slb_cache/slb_cache_ptr fields in the PACA. | ||
201 | */ | ||
202 | hard_irq_disable(); | ||
203 | offset = get_paca()->slb_cache_ptr; | ||
189 | if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && | 204 | if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && |
190 | offset <= SLB_CACHE_ENTRIES) { | 205 | offset <= SLB_CACHE_ENTRIES) { |
191 | int i; | 206 | int i; |
@@ -200,7 +215,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |||
200 | } | 215 | } |
201 | asm volatile("isync" : : : "memory"); | 216 | asm volatile("isync" : : : "memory"); |
202 | } else { | 217 | } else { |
203 | slb_flush_and_rebolt(); | 218 | __slb_flush_and_rebolt(); |
204 | } | 219 | } |
205 | 220 | ||
206 | /* Workaround POWER5 < DD2.1 issue */ | 221 | /* Workaround POWER5 < DD2.1 issue */ |
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 98cd1dc2ae75..ab5fb48b3e90 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c | |||
@@ -164,7 +164,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) | |||
164 | { | 164 | { |
165 | struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr; | 165 | struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr; |
166 | struct stab_entry *ste; | 166 | struct stab_entry *ste; |
167 | unsigned long offset = __get_cpu_var(stab_cache_ptr); | 167 | unsigned long offset; |
168 | unsigned long pc = KSTK_EIP(tsk); | 168 | unsigned long pc = KSTK_EIP(tsk); |
169 | unsigned long stack = KSTK_ESP(tsk); | 169 | unsigned long stack = KSTK_ESP(tsk); |
170 | unsigned long unmapped_base; | 170 | unsigned long unmapped_base; |
@@ -172,6 +172,15 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) | |||
172 | /* Force previous translations to complete. DRENG */ | 172 | /* Force previous translations to complete. DRENG */ |
173 | asm volatile("isync" : : : "memory"); | 173 | asm volatile("isync" : : : "memory"); |
174 | 174 | ||
175 | /* | ||
176 | * We need interrupts hard-disabled here, not just soft-disabled, | ||
177 | * so that a PMU interrupt can't occur, which might try to access | ||
178 | * user memory (to get a stack trace) and possible cause an STAB miss | ||
179 | * which would update the stab_cache/stab_cache_ptr per-cpu variables. | ||
180 | */ | ||
181 | hard_irq_disable(); | ||
182 | |||
183 | offset = __get_cpu_var(stab_cache_ptr); | ||
175 | if (offset <= NR_STAB_CACHE_ENTRIES) { | 184 | if (offset <= NR_STAB_CACHE_ENTRIES) { |
176 | int i; | 185 | int i; |
177 | 186 | ||
diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c index b178a1e66c91..40b5cb433005 100644 --- a/arch/powerpc/platforms/ps3/time.c +++ b/arch/powerpc/platforms/ps3/time.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | 23 | ||
24 | #include <asm/firmware.h> | ||
24 | #include <asm/rtc.h> | 25 | #include <asm/rtc.h> |
25 | #include <asm/lv1call.h> | 26 | #include <asm/lv1call.h> |
26 | #include <asm/ps3.h> | 27 | #include <asm/ps3.h> |
@@ -84,6 +85,9 @@ static int __init ps3_rtc_init(void) | |||
84 | { | 85 | { |
85 | struct platform_device *pdev; | 86 | struct platform_device *pdev; |
86 | 87 | ||
88 | if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) | ||
89 | return -ENODEV; | ||
90 | |||
87 | pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0); | 91 | pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0); |
88 | if (IS_ERR(pdev)) | 92 | if (IS_ERR(pdev)) |
89 | return PTR_ERR(pdev); | 93 | return PTR_ERR(pdev); |
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c index 3ee1fd37bbfc..40edad520770 100644 --- a/arch/powerpc/sysdev/xilinx_intc.c +++ b/arch/powerpc/sysdev/xilinx_intc.c | |||
@@ -234,7 +234,6 @@ static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc) | |||
234 | generic_handle_irq(cascade_irq); | 234 | generic_handle_irq(cascade_irq); |
235 | 235 | ||
236 | /* Let xilinx_intc end the interrupt */ | 236 | /* Let xilinx_intc end the interrupt */ |
237 | desc->chip->ack(irq); | ||
238 | desc->chip->unmask(irq); | 237 | desc->chip->unmask(irq); |
239 | } | 238 | } |
240 | 239 | ||
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 2ae5d72f47ed..1c866efd217d 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -84,7 +84,7 @@ config S390 | |||
84 | select HAVE_FUNCTION_TRACER | 84 | select HAVE_FUNCTION_TRACER |
85 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 85 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
86 | select HAVE_FTRACE_MCOUNT_RECORD | 86 | select HAVE_FTRACE_MCOUNT_RECORD |
87 | select HAVE_FTRACE_SYSCALLS | 87 | select HAVE_SYSCALL_TRACEPOINTS |
88 | select HAVE_DYNAMIC_FTRACE | 88 | select HAVE_DYNAMIC_FTRACE |
89 | select HAVE_FUNCTION_GRAPH_TRACER | 89 | select HAVE_FUNCTION_GRAPH_TRACER |
90 | select HAVE_DEFAULT_NO_SPIN_MUTEXES | 90 | select HAVE_DEFAULT_NO_SPIN_MUTEXES |
@@ -95,7 +95,6 @@ config S390 | |||
95 | select HAVE_ARCH_TRACEHOOK | 95 | select HAVE_ARCH_TRACEHOOK |
96 | select INIT_ALL_POSSIBLE | 96 | select INIT_ALL_POSSIBLE |
97 | select HAVE_PERF_COUNTERS | 97 | select HAVE_PERF_COUNTERS |
98 | select GENERIC_ATOMIC64 if !64BIT | ||
99 | 98 | ||
100 | config SCHED_OMIT_FRAME_POINTER | 99 | config SCHED_OMIT_FRAME_POINTER |
101 | bool | 100 | bool |
@@ -481,13 +480,6 @@ config CMM_IUCV | |||
481 | Select this option to enable the special message interface to | 480 | Select this option to enable the special message interface to |
482 | the cooperative memory management. | 481 | the cooperative memory management. |
483 | 482 | ||
484 | config PAGE_STATES | ||
485 | bool "Unused page notification" | ||
486 | help | ||
487 | This enables the notification of unused pages to the | ||
488 | hypervisor. The ESSA instruction is used to do the states | ||
489 | changes between a page that has content and the unused state. | ||
490 | |||
491 | config APPLDATA_BASE | 483 | config APPLDATA_BASE |
492 | bool "Linux - VM Monitor Stream, base infrastructure" | 484 | bool "Linux - VM Monitor Stream, base infrastructure" |
493 | depends on PROC_FS | 485 | depends on PROC_FS |
diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 0ff387cebf88..fc8fb20e7fc0 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile | |||
@@ -88,8 +88,7 @@ LDFLAGS_vmlinux := -e start | |||
88 | head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o | 88 | head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o |
89 | 89 | ||
90 | core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ | 90 | core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ |
91 | arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ \ | 91 | arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ |
92 | arch/s390/power/ | ||
93 | 92 | ||
94 | libs-y += arch/s390/lib/ | 93 | libs-y += arch/s390/lib/ |
95 | drivers-y += drivers/s390/ | 94 | drivers-y += drivers/s390/ |
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 4aba83b31596..2bc479ab3a66 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c | |||
@@ -250,8 +250,9 @@ static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
250 | const u8 *temp_key = key; | 250 | const u8 *temp_key = key; |
251 | u32 *flags = &tfm->crt_flags; | 251 | u32 *flags = &tfm->crt_flags; |
252 | 252 | ||
253 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) { | 253 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE)) && |
254 | *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; | 254 | (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { |
255 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
255 | return -EINVAL; | 256 | return -EINVAL; |
256 | } | 257 | } |
257 | for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) { | 258 | for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) { |
@@ -411,9 +412,9 @@ static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
411 | 412 | ||
412 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && | 413 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && |
413 | memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], | 414 | memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], |
414 | DES_KEY_SIZE))) { | 415 | DES_KEY_SIZE)) && |
415 | 416 | (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | |
416 | *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; | 417 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; |
417 | return -EINVAL; | 418 | return -EINVAL; |
418 | } | 419 | } |
419 | for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) { | 420 | for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) { |
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index e85ba348722a..f6de7826c979 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c | |||
@@ -46,12 +46,38 @@ static int sha1_init(struct shash_desc *desc) | |||
46 | return 0; | 46 | return 0; |
47 | } | 47 | } |
48 | 48 | ||
49 | static int sha1_export(struct shash_desc *desc, void *out) | ||
50 | { | ||
51 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
52 | struct sha1_state *octx = out; | ||
53 | |||
54 | octx->count = sctx->count; | ||
55 | memcpy(octx->state, sctx->state, sizeof(octx->state)); | ||
56 | memcpy(octx->buffer, sctx->buf, sizeof(octx->buffer)); | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | static int sha1_import(struct shash_desc *desc, const void *in) | ||
61 | { | ||
62 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
63 | const struct sha1_state *ictx = in; | ||
64 | |||
65 | sctx->count = ictx->count; | ||
66 | memcpy(sctx->state, ictx->state, sizeof(ictx->state)); | ||
67 | memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer)); | ||
68 | sctx->func = KIMD_SHA_1; | ||
69 | return 0; | ||
70 | } | ||
71 | |||
49 | static struct shash_alg alg = { | 72 | static struct shash_alg alg = { |
50 | .digestsize = SHA1_DIGEST_SIZE, | 73 | .digestsize = SHA1_DIGEST_SIZE, |
51 | .init = sha1_init, | 74 | .init = sha1_init, |
52 | .update = s390_sha_update, | 75 | .update = s390_sha_update, |
53 | .final = s390_sha_final, | 76 | .final = s390_sha_final, |
77 | .export = sha1_export, | ||
78 | .import = sha1_import, | ||
54 | .descsize = sizeof(struct s390_sha_ctx), | 79 | .descsize = sizeof(struct s390_sha_ctx), |
80 | .statesize = sizeof(struct sha1_state), | ||
55 | .base = { | 81 | .base = { |
56 | .cra_name = "sha1", | 82 | .cra_name = "sha1", |
57 | .cra_driver_name= "sha1-s390", | 83 | .cra_driver_name= "sha1-s390", |
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index f9fefc569632..61a7db372121 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c | |||
@@ -42,12 +42,38 @@ static int sha256_init(struct shash_desc *desc) | |||
42 | return 0; | 42 | return 0; |
43 | } | 43 | } |
44 | 44 | ||
45 | static int sha256_export(struct shash_desc *desc, void *out) | ||
46 | { | ||
47 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
48 | struct sha256_state *octx = out; | ||
49 | |||
50 | octx->count = sctx->count; | ||
51 | memcpy(octx->state, sctx->state, sizeof(octx->state)); | ||
52 | memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); | ||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static int sha256_import(struct shash_desc *desc, const void *in) | ||
57 | { | ||
58 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
59 | const struct sha256_state *ictx = in; | ||
60 | |||
61 | sctx->count = ictx->count; | ||
62 | memcpy(sctx->state, ictx->state, sizeof(ictx->state)); | ||
63 | memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); | ||
64 | sctx->func = KIMD_SHA_256; | ||
65 | return 0; | ||
66 | } | ||
67 | |||
45 | static struct shash_alg alg = { | 68 | static struct shash_alg alg = { |
46 | .digestsize = SHA256_DIGEST_SIZE, | 69 | .digestsize = SHA256_DIGEST_SIZE, |
47 | .init = sha256_init, | 70 | .init = sha256_init, |
48 | .update = s390_sha_update, | 71 | .update = s390_sha_update, |
49 | .final = s390_sha_final, | 72 | .final = s390_sha_final, |
73 | .export = sha256_export, | ||
74 | .import = sha256_import, | ||
50 | .descsize = sizeof(struct s390_sha_ctx), | 75 | .descsize = sizeof(struct s390_sha_ctx), |
76 | .statesize = sizeof(struct sha256_state), | ||
51 | .base = { | 77 | .base = { |
52 | .cra_name = "sha256", | 78 | .cra_name = "sha256", |
53 | .cra_driver_name= "sha256-s390", | 79 | .cra_driver_name= "sha256-s390", |
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c index 83192bfc8048..4bf73d0dc525 100644 --- a/arch/s390/crypto/sha512_s390.c +++ b/arch/s390/crypto/sha512_s390.c | |||
@@ -13,7 +13,10 @@ | |||
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | #include <crypto/internal/hash.h> | 15 | #include <crypto/internal/hash.h> |
16 | #include <crypto/sha.h> | ||
17 | #include <linux/errno.h> | ||
16 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | 20 | #include <linux/module.h> |
18 | 21 | ||
19 | #include "sha.h" | 22 | #include "sha.h" |
@@ -37,12 +40,42 @@ static int sha512_init(struct shash_desc *desc) | |||
37 | return 0; | 40 | return 0; |
38 | } | 41 | } |
39 | 42 | ||
43 | static int sha512_export(struct shash_desc *desc, void *out) | ||
44 | { | ||
45 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
46 | struct sha512_state *octx = out; | ||
47 | |||
48 | octx->count[0] = sctx->count; | ||
49 | octx->count[1] = 0; | ||
50 | memcpy(octx->state, sctx->state, sizeof(octx->state)); | ||
51 | memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | static int sha512_import(struct shash_desc *desc, const void *in) | ||
56 | { | ||
57 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
58 | const struct sha512_state *ictx = in; | ||
59 | |||
60 | if (unlikely(ictx->count[1])) | ||
61 | return -ERANGE; | ||
62 | sctx->count = ictx->count[0]; | ||
63 | |||
64 | memcpy(sctx->state, ictx->state, sizeof(ictx->state)); | ||
65 | memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); | ||
66 | sctx->func = KIMD_SHA_512; | ||
67 | return 0; | ||
68 | } | ||
69 | |||
40 | static struct shash_alg sha512_alg = { | 70 | static struct shash_alg sha512_alg = { |
41 | .digestsize = SHA512_DIGEST_SIZE, | 71 | .digestsize = SHA512_DIGEST_SIZE, |
42 | .init = sha512_init, | 72 | .init = sha512_init, |
43 | .update = s390_sha_update, | 73 | .update = s390_sha_update, |
44 | .final = s390_sha_final, | 74 | .final = s390_sha_final, |
75 | .export = sha512_export, | ||
76 | .import = sha512_import, | ||
45 | .descsize = sizeof(struct s390_sha_ctx), | 77 | .descsize = sizeof(struct s390_sha_ctx), |
78 | .statesize = sizeof(struct sha512_state), | ||
46 | .base = { | 79 | .base = { |
47 | .cra_name = "sha512", | 80 | .cra_name = "sha512", |
48 | .cra_driver_name= "sha512-s390", | 81 | .cra_driver_name= "sha512-s390", |
@@ -78,7 +111,10 @@ static struct shash_alg sha384_alg = { | |||
78 | .init = sha384_init, | 111 | .init = sha384_init, |
79 | .update = s390_sha_update, | 112 | .update = s390_sha_update, |
80 | .final = s390_sha_final, | 113 | .final = s390_sha_final, |
114 | .export = sha512_export, | ||
115 | .import = sha512_import, | ||
81 | .descsize = sizeof(struct s390_sha_ctx), | 116 | .descsize = sizeof(struct s390_sha_ctx), |
117 | .statesize = sizeof(struct sha512_state), | ||
82 | .base = { | 118 | .base = { |
83 | .cra_name = "sha384", | 119 | .cra_name = "sha384", |
84 | .cra_driver_name= "sha384-s390", | 120 | .cra_driver_name= "sha384-s390", |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index fcba206529f3..4e91a2573cc4 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -900,7 +900,7 @@ CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y | |||
900 | CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y | 900 | CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y |
901 | CONFIG_HAVE_DYNAMIC_FTRACE=y | 901 | CONFIG_HAVE_DYNAMIC_FTRACE=y |
902 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y | 902 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y |
903 | CONFIG_HAVE_FTRACE_SYSCALLS=y | 903 | CONFIG_HAVE_SYSCALL_TRACEPOINTS=y |
904 | CONFIG_TRACING_SUPPORT=y | 904 | CONFIG_TRACING_SUPPORT=y |
905 | CONFIG_FTRACE=y | 905 | CONFIG_FTRACE=y |
906 | # CONFIG_FUNCTION_TRACER is not set | 906 | # CONFIG_FUNCTION_TRACER is not set |
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 5a805df216bb..bd9914b89488 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c | |||
@@ -355,11 +355,7 @@ static struct dentry *hypfs_create_file(struct super_block *sb, | |||
355 | { | 355 | { |
356 | struct dentry *dentry; | 356 | struct dentry *dentry; |
357 | struct inode *inode; | 357 | struct inode *inode; |
358 | struct qstr qname; | ||
359 | 358 | ||
360 | qname.name = name; | ||
361 | qname.len = strlen(name); | ||
362 | qname.hash = full_name_hash(name, qname.len); | ||
363 | mutex_lock(&parent->d_inode->i_mutex); | 359 | mutex_lock(&parent->d_inode->i_mutex); |
364 | dentry = lookup_one_len(name, parent, strlen(name)); | 360 | dentry = lookup_one_len(name, parent, strlen(name)); |
365 | if (IS_ERR(dentry)) { | 361 | if (IS_ERR(dentry)) { |
@@ -426,7 +422,7 @@ struct dentry *hypfs_create_u64(struct super_block *sb, struct dentry *dir, | |||
426 | char tmp[TMP_SIZE]; | 422 | char tmp[TMP_SIZE]; |
427 | struct dentry *dentry; | 423 | struct dentry *dentry; |
428 | 424 | ||
429 | snprintf(tmp, TMP_SIZE, "%lld\n", (unsigned long long int)value); | 425 | snprintf(tmp, TMP_SIZE, "%llu\n", (unsigned long long int)value); |
430 | buffer = kstrdup(tmp, GFP_KERNEL); | 426 | buffer = kstrdup(tmp, GFP_KERNEL); |
431 | if (!buffer) | 427 | if (!buffer) |
432 | return ERR_PTR(-ENOMEM); | 428 | return ERR_PTR(-ENOMEM); |
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index c7d0abfb0f00..ae7c8f9f94a5 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h | |||
@@ -1,33 +1,23 @@ | |||
1 | #ifndef __ARCH_S390_ATOMIC__ | 1 | #ifndef __ARCH_S390_ATOMIC__ |
2 | #define __ARCH_S390_ATOMIC__ | 2 | #define __ARCH_S390_ATOMIC__ |
3 | 3 | ||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/types.h> | ||
6 | |||
7 | /* | 4 | /* |
8 | * include/asm-s390/atomic.h | 5 | * Copyright 1999,2009 IBM Corp. |
6 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, | ||
7 | * Denis Joseph Barrow, | ||
8 | * Arnd Bergmann <arndb@de.ibm.com>, | ||
9 | * | 9 | * |
10 | * S390 version | 10 | * Atomic operations that C can't guarantee us. |
11 | * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation | 11 | * Useful for resource counting etc. |
12 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | 12 | * s390 uses 'Compare And Swap' for atomicity in SMP enviroment. |
13 | * Denis Joseph Barrow, | ||
14 | * Arnd Bergmann (arndb@de.ibm.com) | ||
15 | * | ||
16 | * Derived from "include/asm-i386/bitops.h" | ||
17 | * Copyright (C) 1992, Linus Torvalds | ||
18 | * | 13 | * |
19 | */ | 14 | */ |
20 | 15 | ||
21 | /* | 16 | #include <linux/compiler.h> |
22 | * Atomic operations that C can't guarantee us. Useful for | 17 | #include <linux/types.h> |
23 | * resource counting etc.. | ||
24 | * S390 uses 'Compare And Swap' for atomicity in SMP enviroment | ||
25 | */ | ||
26 | 18 | ||
27 | #define ATOMIC_INIT(i) { (i) } | 19 | #define ATOMIC_INIT(i) { (i) } |
28 | 20 | ||
29 | #ifdef __KERNEL__ | ||
30 | |||
31 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 21 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
32 | 22 | ||
33 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ | 23 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ |
@@ -77,7 +67,7 @@ static inline void atomic_set(atomic_t *v, int i) | |||
77 | barrier(); | 67 | barrier(); |
78 | } | 68 | } |
79 | 69 | ||
80 | static __inline__ int atomic_add_return(int i, atomic_t * v) | 70 | static inline int atomic_add_return(int i, atomic_t *v) |
81 | { | 71 | { |
82 | return __CS_LOOP(v, i, "ar"); | 72 | return __CS_LOOP(v, i, "ar"); |
83 | } | 73 | } |
@@ -87,7 +77,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) | |||
87 | #define atomic_inc_return(_v) atomic_add_return(1, _v) | 77 | #define atomic_inc_return(_v) atomic_add_return(1, _v) |
88 | #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) | 78 | #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) |
89 | 79 | ||
90 | static __inline__ int atomic_sub_return(int i, atomic_t * v) | 80 | static inline int atomic_sub_return(int i, atomic_t *v) |
91 | { | 81 | { |
92 | return __CS_LOOP(v, i, "sr"); | 82 | return __CS_LOOP(v, i, "sr"); |
93 | } | 83 | } |
@@ -97,19 +87,19 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) | |||
97 | #define atomic_dec_return(_v) atomic_sub_return(1, _v) | 87 | #define atomic_dec_return(_v) atomic_sub_return(1, _v) |
98 | #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) | 88 | #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) |
99 | 89 | ||
100 | static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) | 90 | static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) |
101 | { | 91 | { |
102 | __CS_LOOP(v, ~mask, "nr"); | 92 | __CS_LOOP(v, ~mask, "nr"); |
103 | } | 93 | } |
104 | 94 | ||
105 | static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) | 95 | static inline void atomic_set_mask(unsigned long mask, atomic_t *v) |
106 | { | 96 | { |
107 | __CS_LOOP(v, mask, "or"); | 97 | __CS_LOOP(v, mask, "or"); |
108 | } | 98 | } |
109 | 99 | ||
110 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 100 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
111 | 101 | ||
112 | static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) | 102 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
113 | { | 103 | { |
114 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 104 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
115 | asm volatile( | 105 | asm volatile( |
@@ -127,7 +117,7 @@ static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) | |||
127 | return old; | 117 | return old; |
128 | } | 118 | } |
129 | 119 | ||
130 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | 120 | static inline int atomic_add_unless(atomic_t *v, int a, int u) |
131 | { | 121 | { |
132 | int c, old; | 122 | int c, old; |
133 | c = atomic_read(v); | 123 | c = atomic_read(v); |
@@ -146,9 +136,10 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
146 | 136 | ||
147 | #undef __CS_LOOP | 137 | #undef __CS_LOOP |
148 | 138 | ||
149 | #ifdef __s390x__ | ||
150 | #define ATOMIC64_INIT(i) { (i) } | 139 | #define ATOMIC64_INIT(i) { (i) } |
151 | 140 | ||
141 | #ifdef CONFIG_64BIT | ||
142 | |||
152 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 143 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
153 | 144 | ||
154 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ | 145 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ |
@@ -162,7 +153,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
162 | : "=&d" (old_val), "=&d" (new_val), \ | 153 | : "=&d" (old_val), "=&d" (new_val), \ |
163 | "=Q" (((atomic_t *)(ptr))->counter) \ | 154 | "=Q" (((atomic_t *)(ptr))->counter) \ |
164 | : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ | 155 | : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ |
165 | : "cc", "memory" ); \ | 156 | : "cc", "memory"); \ |
166 | new_val; \ | 157 | new_val; \ |
167 | }) | 158 | }) |
168 | 159 | ||
@@ -180,7 +171,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
180 | "=m" (((atomic_t *)(ptr))->counter) \ | 171 | "=m" (((atomic_t *)(ptr))->counter) \ |
181 | : "a" (ptr), "d" (op_val), \ | 172 | : "a" (ptr), "d" (op_val), \ |
182 | "m" (((atomic_t *)(ptr))->counter) \ | 173 | "m" (((atomic_t *)(ptr))->counter) \ |
183 | : "cc", "memory" ); \ | 174 | : "cc", "memory"); \ |
184 | new_val; \ | 175 | new_val; \ |
185 | }) | 176 | }) |
186 | 177 | ||
@@ -198,39 +189,29 @@ static inline void atomic64_set(atomic64_t *v, long long i) | |||
198 | barrier(); | 189 | barrier(); |
199 | } | 190 | } |
200 | 191 | ||
201 | static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) | 192 | static inline long long atomic64_add_return(long long i, atomic64_t *v) |
202 | { | 193 | { |
203 | return __CSG_LOOP(v, i, "agr"); | 194 | return __CSG_LOOP(v, i, "agr"); |
204 | } | 195 | } |
205 | #define atomic64_add(_i, _v) atomic64_add_return(_i, _v) | ||
206 | #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) | ||
207 | #define atomic64_inc(_v) atomic64_add_return(1, _v) | ||
208 | #define atomic64_inc_return(_v) atomic64_add_return(1, _v) | ||
209 | #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) | ||
210 | 196 | ||
211 | static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v) | 197 | static inline long long atomic64_sub_return(long long i, atomic64_t *v) |
212 | { | 198 | { |
213 | return __CSG_LOOP(v, i, "sgr"); | 199 | return __CSG_LOOP(v, i, "sgr"); |
214 | } | 200 | } |
215 | #define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) | ||
216 | #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) | ||
217 | #define atomic64_dec(_v) atomic64_sub_return(1, _v) | ||
218 | #define atomic64_dec_return(_v) atomic64_sub_return(1, _v) | ||
219 | #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) | ||
220 | 201 | ||
221 | static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) | 202 | static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) |
222 | { | 203 | { |
223 | __CSG_LOOP(v, ~mask, "ngr"); | 204 | __CSG_LOOP(v, ~mask, "ngr"); |
224 | } | 205 | } |
225 | 206 | ||
226 | static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) | 207 | static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) |
227 | { | 208 | { |
228 | __CSG_LOOP(v, mask, "ogr"); | 209 | __CSG_LOOP(v, mask, "ogr"); |
229 | } | 210 | } |
230 | 211 | ||
231 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | 212 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
232 | 213 | ||
233 | static __inline__ long long atomic64_cmpxchg(atomic64_t *v, | 214 | static inline long long atomic64_cmpxchg(atomic64_t *v, |
234 | long long old, long long new) | 215 | long long old, long long new) |
235 | { | 216 | { |
236 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 217 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
@@ -249,8 +230,112 @@ static __inline__ long long atomic64_cmpxchg(atomic64_t *v, | |||
249 | return old; | 230 | return old; |
250 | } | 231 | } |
251 | 232 | ||
252 | static __inline__ int atomic64_add_unless(atomic64_t *v, | 233 | #undef __CSG_LOOP |
253 | long long a, long long u) | 234 | |
235 | #else /* CONFIG_64BIT */ | ||
236 | |||
237 | typedef struct { | ||
238 | long long counter; | ||
239 | } atomic64_t; | ||
240 | |||
241 | static inline long long atomic64_read(const atomic64_t *v) | ||
242 | { | ||
243 | register_pair rp; | ||
244 | |||
245 | asm volatile( | ||
246 | " lm %0,%N0,0(%1)" | ||
247 | : "=&d" (rp) | ||
248 | : "a" (&v->counter), "m" (v->counter) | ||
249 | ); | ||
250 | return rp.pair; | ||
251 | } | ||
252 | |||
253 | static inline void atomic64_set(atomic64_t *v, long long i) | ||
254 | { | ||
255 | register_pair rp = {.pair = i}; | ||
256 | |||
257 | asm volatile( | ||
258 | " stm %1,%N1,0(%2)" | ||
259 | : "=m" (v->counter) | ||
260 | : "d" (rp), "a" (&v->counter) | ||
261 | ); | ||
262 | } | ||
263 | |||
264 | static inline long long atomic64_xchg(atomic64_t *v, long long new) | ||
265 | { | ||
266 | register_pair rp_new = {.pair = new}; | ||
267 | register_pair rp_old; | ||
268 | |||
269 | asm volatile( | ||
270 | " lm %0,%N0,0(%2)\n" | ||
271 | "0: cds %0,%3,0(%2)\n" | ||
272 | " jl 0b\n" | ||
273 | : "=&d" (rp_old), "+m" (v->counter) | ||
274 | : "a" (&v->counter), "d" (rp_new) | ||
275 | : "cc"); | ||
276 | return rp_old.pair; | ||
277 | } | ||
278 | |||
279 | static inline long long atomic64_cmpxchg(atomic64_t *v, | ||
280 | long long old, long long new) | ||
281 | { | ||
282 | register_pair rp_old = {.pair = old}; | ||
283 | register_pair rp_new = {.pair = new}; | ||
284 | |||
285 | asm volatile( | ||
286 | " cds %0,%3,0(%2)" | ||
287 | : "+&d" (rp_old), "+m" (v->counter) | ||
288 | : "a" (&v->counter), "d" (rp_new) | ||
289 | : "cc"); | ||
290 | return rp_old.pair; | ||
291 | } | ||
292 | |||
293 | |||
294 | static inline long long atomic64_add_return(long long i, atomic64_t *v) | ||
295 | { | ||
296 | long long old, new; | ||
297 | |||
298 | do { | ||
299 | old = atomic64_read(v); | ||
300 | new = old + i; | ||
301 | } while (atomic64_cmpxchg(v, old, new) != old); | ||
302 | return new; | ||
303 | } | ||
304 | |||
305 | static inline long long atomic64_sub_return(long long i, atomic64_t *v) | ||
306 | { | ||
307 | long long old, new; | ||
308 | |||
309 | do { | ||
310 | old = atomic64_read(v); | ||
311 | new = old - i; | ||
312 | } while (atomic64_cmpxchg(v, old, new) != old); | ||
313 | return new; | ||
314 | } | ||
315 | |||
316 | static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v) | ||
317 | { | ||
318 | long long old, new; | ||
319 | |||
320 | do { | ||
321 | old = atomic64_read(v); | ||
322 | new = old | mask; | ||
323 | } while (atomic64_cmpxchg(v, old, new) != old); | ||
324 | } | ||
325 | |||
326 | static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v) | ||
327 | { | ||
328 | long long old, new; | ||
329 | |||
330 | do { | ||
331 | old = atomic64_read(v); | ||
332 | new = old & mask; | ||
333 | } while (atomic64_cmpxchg(v, old, new) != old); | ||
334 | } | ||
335 | |||
336 | #endif /* CONFIG_64BIT */ | ||
337 | |||
338 | static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) | ||
254 | { | 339 | { |
255 | long long c, old; | 340 | long long c, old; |
256 | c = atomic64_read(v); | 341 | c = atomic64_read(v); |
@@ -265,15 +350,17 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, | |||
265 | return c != u; | 350 | return c != u; |
266 | } | 351 | } |
267 | 352 | ||
268 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | 353 | #define atomic64_add(_i, _v) atomic64_add_return(_i, _v) |
269 | 354 | #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) | |
270 | #undef __CSG_LOOP | 355 | #define atomic64_inc(_v) atomic64_add_return(1, _v) |
271 | 356 | #define atomic64_inc_return(_v) atomic64_add_return(1, _v) | |
272 | #else /* __s390x__ */ | 357 | #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) |
273 | 358 | #define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) | |
274 | #include <asm-generic/atomic64.h> | 359 | #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) |
275 | 360 | #define atomic64_dec(_v) atomic64_sub_return(1, _v) | |
276 | #endif /* __s390x__ */ | 361 | #define atomic64_dec_return(_v) atomic64_sub_return(1, _v) |
362 | #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) | ||
363 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | ||
277 | 364 | ||
278 | #define smp_mb__before_atomic_dec() smp_mb() | 365 | #define smp_mb__before_atomic_dec() smp_mb() |
279 | #define smp_mb__after_atomic_dec() smp_mb() | 366 | #define smp_mb__after_atomic_dec() smp_mb() |
@@ -281,5 +368,5 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, | |||
281 | #define smp_mb__after_atomic_inc() smp_mb() | 368 | #define smp_mb__after_atomic_inc() smp_mb() |
282 | 369 | ||
283 | #include <asm-generic/atomic-long.h> | 370 | #include <asm-generic/atomic-long.h> |
284 | #endif /* __KERNEL__ */ | 371 | |
285 | #endif /* __ARCH_S390_ATOMIC__ */ | 372 | #endif /* __ARCH_S390_ATOMIC__ */ |
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h index d5a8e7c1477c..6c00f6800a34 100644 --- a/arch/s390/include/asm/checksum.h +++ b/arch/s390/include/asm/checksum.h | |||
@@ -78,28 +78,11 @@ csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum) | |||
78 | */ | 78 | */ |
79 | static inline __sum16 csum_fold(__wsum sum) | 79 | static inline __sum16 csum_fold(__wsum sum) |
80 | { | 80 | { |
81 | #ifndef __s390x__ | 81 | u32 csum = (__force u32) sum; |
82 | register_pair rp; | ||
83 | 82 | ||
84 | asm volatile( | 83 | csum += (csum >> 16) + (csum << 16); |
85 | " slr %N1,%N1\n" /* %0 = H L */ | 84 | csum >>= 16; |
86 | " lr %1,%0\n" /* %0 = H L, %1 = H L 0 0 */ | 85 | return (__force __sum16) ~csum; |
87 | " srdl %1,16\n" /* %0 = H L, %1 = 0 H L 0 */ | ||
88 | " alr %1,%N1\n" /* %0 = H L, %1 = L H L 0 */ | ||
89 | " alr %0,%1\n" /* %0 = H+L+C L+H */ | ||
90 | " srl %0,16\n" /* %0 = H+L+C */ | ||
91 | : "+&d" (sum), "=d" (rp) : : "cc"); | ||
92 | #else /* __s390x__ */ | ||
93 | asm volatile( | ||
94 | " sr 3,3\n" /* %0 = H*65536 + L */ | ||
95 | " lr 2,%0\n" /* %0 = H L, 2/3 = H L / 0 0 */ | ||
96 | " srdl 2,16\n" /* %0 = H L, 2/3 = 0 H / L 0 */ | ||
97 | " alr 2,3\n" /* %0 = H L, 2/3 = L H / L 0 */ | ||
98 | " alr %0,2\n" /* %0 = H+L+C L+H */ | ||
99 | " srl %0,16\n" /* %0 = H+L+C */ | ||
100 | : "+&d" (sum) : : "cc", "2", "3"); | ||
101 | #endif /* __s390x__ */ | ||
102 | return (__force __sum16) ~sum; | ||
103 | } | 86 | } |
104 | 87 | ||
105 | /* | 88 | /* |
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h index 807997f7414b..4943654ed7fd 100644 --- a/arch/s390/include/asm/chsc.h +++ b/arch/s390/include/asm/chsc.h | |||
@@ -125,4 +125,32 @@ struct chsc_cpd_info { | |||
125 | #define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info) | 125 | #define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info) |
126 | #define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal) | 126 | #define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal) |
127 | 127 | ||
128 | #ifdef __KERNEL__ | ||
129 | |||
130 | struct css_general_char { | ||
131 | u64 : 12; | ||
132 | u32 dynio : 1; /* bit 12 */ | ||
133 | u32 : 28; | ||
134 | u32 aif : 1; /* bit 41 */ | ||
135 | u32 : 3; | ||
136 | u32 mcss : 1; /* bit 45 */ | ||
137 | u32 fcs : 1; /* bit 46 */ | ||
138 | u32 : 1; | ||
139 | u32 ext_mb : 1; /* bit 48 */ | ||
140 | u32 : 7; | ||
141 | u32 aif_tdd : 1; /* bit 56 */ | ||
142 | u32 : 1; | ||
143 | u32 qebsm : 1; /* bit 58 */ | ||
144 | u32 : 8; | ||
145 | u32 aif_osa : 1; /* bit 67 */ | ||
146 | u32 : 14; | ||
147 | u32 cib : 1; /* bit 82 */ | ||
148 | u32 : 5; | ||
149 | u32 fcx : 1; /* bit 88 */ | ||
150 | u32 : 7; | ||
151 | }__attribute__((packed)); | ||
152 | |||
153 | extern struct css_general_char css_general_characteristics; | ||
154 | |||
155 | #endif /* __KERNEL__ */ | ||
128 | #endif | 156 | #endif |
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index 619bf94b11f1..e85679af54dd 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h | |||
@@ -15,228 +15,7 @@ | |||
15 | #define LPM_ANYPATH 0xff | 15 | #define LPM_ANYPATH 0xff |
16 | #define __MAX_CSSID 0 | 16 | #define __MAX_CSSID 0 |
17 | 17 | ||
18 | /** | 18 | #include <asm/scsw.h> |
19 | * struct cmd_scsw - command-mode subchannel status word | ||
20 | * @key: subchannel key | ||
21 | * @sctl: suspend control | ||
22 | * @eswf: esw format | ||
23 | * @cc: deferred condition code | ||
24 | * @fmt: format | ||
25 | * @pfch: prefetch | ||
26 | * @isic: initial-status interruption control | ||
27 | * @alcc: address-limit checking control | ||
28 | * @ssi: suppress-suspended interruption | ||
29 | * @zcc: zero condition code | ||
30 | * @ectl: extended control | ||
31 | * @pno: path not operational | ||
32 | * @res: reserved | ||
33 | * @fctl: function control | ||
34 | * @actl: activity control | ||
35 | * @stctl: status control | ||
36 | * @cpa: channel program address | ||
37 | * @dstat: device status | ||
38 | * @cstat: subchannel status | ||
39 | * @count: residual count | ||
40 | */ | ||
41 | struct cmd_scsw { | ||
42 | __u32 key : 4; | ||
43 | __u32 sctl : 1; | ||
44 | __u32 eswf : 1; | ||
45 | __u32 cc : 2; | ||
46 | __u32 fmt : 1; | ||
47 | __u32 pfch : 1; | ||
48 | __u32 isic : 1; | ||
49 | __u32 alcc : 1; | ||
50 | __u32 ssi : 1; | ||
51 | __u32 zcc : 1; | ||
52 | __u32 ectl : 1; | ||
53 | __u32 pno : 1; | ||
54 | __u32 res : 1; | ||
55 | __u32 fctl : 3; | ||
56 | __u32 actl : 7; | ||
57 | __u32 stctl : 5; | ||
58 | __u32 cpa; | ||
59 | __u32 dstat : 8; | ||
60 | __u32 cstat : 8; | ||
61 | __u32 count : 16; | ||
62 | } __attribute__ ((packed)); | ||
63 | |||
64 | /** | ||
65 | * struct tm_scsw - transport-mode subchannel status word | ||
66 | * @key: subchannel key | ||
67 | * @eswf: esw format | ||
68 | * @cc: deferred condition code | ||
69 | * @fmt: format | ||
70 | * @x: IRB-format control | ||
71 | * @q: interrogate-complete | ||
72 | * @ectl: extended control | ||
73 | * @pno: path not operational | ||
74 | * @fctl: function control | ||
75 | * @actl: activity control | ||
76 | * @stctl: status control | ||
77 | * @tcw: TCW address | ||
78 | * @dstat: device status | ||
79 | * @cstat: subchannel status | ||
80 | * @fcxs: FCX status | ||
81 | * @schxs: subchannel-extended status | ||
82 | */ | ||
83 | struct tm_scsw { | ||
84 | u32 key:4; | ||
85 | u32 :1; | ||
86 | u32 eswf:1; | ||
87 | u32 cc:2; | ||
88 | u32 fmt:3; | ||
89 | u32 x:1; | ||
90 | u32 q:1; | ||
91 | u32 :1; | ||
92 | u32 ectl:1; | ||
93 | u32 pno:1; | ||
94 | u32 :1; | ||
95 | u32 fctl:3; | ||
96 | u32 actl:7; | ||
97 | u32 stctl:5; | ||
98 | u32 tcw; | ||
99 | u32 dstat:8; | ||
100 | u32 cstat:8; | ||
101 | u32 fcxs:8; | ||
102 | u32 schxs:8; | ||
103 | } __attribute__ ((packed)); | ||
104 | |||
105 | /** | ||
106 | * union scsw - subchannel status word | ||
107 | * @cmd: command-mode SCSW | ||
108 | * @tm: transport-mode SCSW | ||
109 | */ | ||
110 | union scsw { | ||
111 | struct cmd_scsw cmd; | ||
112 | struct tm_scsw tm; | ||
113 | } __attribute__ ((packed)); | ||
114 | |||
115 | int scsw_is_tm(union scsw *scsw); | ||
116 | u32 scsw_key(union scsw *scsw); | ||
117 | u32 scsw_eswf(union scsw *scsw); | ||
118 | u32 scsw_cc(union scsw *scsw); | ||
119 | u32 scsw_ectl(union scsw *scsw); | ||
120 | u32 scsw_pno(union scsw *scsw); | ||
121 | u32 scsw_fctl(union scsw *scsw); | ||
122 | u32 scsw_actl(union scsw *scsw); | ||
123 | u32 scsw_stctl(union scsw *scsw); | ||
124 | u32 scsw_dstat(union scsw *scsw); | ||
125 | u32 scsw_cstat(union scsw *scsw); | ||
126 | int scsw_is_solicited(union scsw *scsw); | ||
127 | int scsw_is_valid_key(union scsw *scsw); | ||
128 | int scsw_is_valid_eswf(union scsw *scsw); | ||
129 | int scsw_is_valid_cc(union scsw *scsw); | ||
130 | int scsw_is_valid_ectl(union scsw *scsw); | ||
131 | int scsw_is_valid_pno(union scsw *scsw); | ||
132 | int scsw_is_valid_fctl(union scsw *scsw); | ||
133 | int scsw_is_valid_actl(union scsw *scsw); | ||
134 | int scsw_is_valid_stctl(union scsw *scsw); | ||
135 | int scsw_is_valid_dstat(union scsw *scsw); | ||
136 | int scsw_is_valid_cstat(union scsw *scsw); | ||
137 | int scsw_cmd_is_valid_key(union scsw *scsw); | ||
138 | int scsw_cmd_is_valid_sctl(union scsw *scsw); | ||
139 | int scsw_cmd_is_valid_eswf(union scsw *scsw); | ||
140 | int scsw_cmd_is_valid_cc(union scsw *scsw); | ||
141 | int scsw_cmd_is_valid_fmt(union scsw *scsw); | ||
142 | int scsw_cmd_is_valid_pfch(union scsw *scsw); | ||
143 | int scsw_cmd_is_valid_isic(union scsw *scsw); | ||
144 | int scsw_cmd_is_valid_alcc(union scsw *scsw); | ||
145 | int scsw_cmd_is_valid_ssi(union scsw *scsw); | ||
146 | int scsw_cmd_is_valid_zcc(union scsw *scsw); | ||
147 | int scsw_cmd_is_valid_ectl(union scsw *scsw); | ||
148 | int scsw_cmd_is_valid_pno(union scsw *scsw); | ||
149 | int scsw_cmd_is_valid_fctl(union scsw *scsw); | ||
150 | int scsw_cmd_is_valid_actl(union scsw *scsw); | ||
151 | int scsw_cmd_is_valid_stctl(union scsw *scsw); | ||
152 | int scsw_cmd_is_valid_dstat(union scsw *scsw); | ||
153 | int scsw_cmd_is_valid_cstat(union scsw *scsw); | ||
154 | int scsw_cmd_is_solicited(union scsw *scsw); | ||
155 | int scsw_tm_is_valid_key(union scsw *scsw); | ||
156 | int scsw_tm_is_valid_eswf(union scsw *scsw); | ||
157 | int scsw_tm_is_valid_cc(union scsw *scsw); | ||
158 | int scsw_tm_is_valid_fmt(union scsw *scsw); | ||
159 | int scsw_tm_is_valid_x(union scsw *scsw); | ||
160 | int scsw_tm_is_valid_q(union scsw *scsw); | ||
161 | int scsw_tm_is_valid_ectl(union scsw *scsw); | ||
162 | int scsw_tm_is_valid_pno(union scsw *scsw); | ||
163 | int scsw_tm_is_valid_fctl(union scsw *scsw); | ||
164 | int scsw_tm_is_valid_actl(union scsw *scsw); | ||
165 | int scsw_tm_is_valid_stctl(union scsw *scsw); | ||
166 | int scsw_tm_is_valid_dstat(union scsw *scsw); | ||
167 | int scsw_tm_is_valid_cstat(union scsw *scsw); | ||
168 | int scsw_tm_is_valid_fcxs(union scsw *scsw); | ||
169 | int scsw_tm_is_valid_schxs(union scsw *scsw); | ||
170 | int scsw_tm_is_solicited(union scsw *scsw); | ||
171 | |||
172 | #define SCSW_FCTL_CLEAR_FUNC 0x1 | ||
173 | #define SCSW_FCTL_HALT_FUNC 0x2 | ||
174 | #define SCSW_FCTL_START_FUNC 0x4 | ||
175 | |||
176 | #define SCSW_ACTL_SUSPENDED 0x1 | ||
177 | #define SCSW_ACTL_DEVACT 0x2 | ||
178 | #define SCSW_ACTL_SCHACT 0x4 | ||
179 | #define SCSW_ACTL_CLEAR_PEND 0x8 | ||
180 | #define SCSW_ACTL_HALT_PEND 0x10 | ||
181 | #define SCSW_ACTL_START_PEND 0x20 | ||
182 | #define SCSW_ACTL_RESUME_PEND 0x40 | ||
183 | |||
184 | #define SCSW_STCTL_STATUS_PEND 0x1 | ||
185 | #define SCSW_STCTL_SEC_STATUS 0x2 | ||
186 | #define SCSW_STCTL_PRIM_STATUS 0x4 | ||
187 | #define SCSW_STCTL_INTER_STATUS 0x8 | ||
188 | #define SCSW_STCTL_ALERT_STATUS 0x10 | ||
189 | |||
190 | #define DEV_STAT_ATTENTION 0x80 | ||
191 | #define DEV_STAT_STAT_MOD 0x40 | ||
192 | #define DEV_STAT_CU_END 0x20 | ||
193 | #define DEV_STAT_BUSY 0x10 | ||
194 | #define DEV_STAT_CHN_END 0x08 | ||
195 | #define DEV_STAT_DEV_END 0x04 | ||
196 | #define DEV_STAT_UNIT_CHECK 0x02 | ||
197 | #define DEV_STAT_UNIT_EXCEP 0x01 | ||
198 | |||
199 | #define SCHN_STAT_PCI 0x80 | ||
200 | #define SCHN_STAT_INCORR_LEN 0x40 | ||
201 | #define SCHN_STAT_PROG_CHECK 0x20 | ||
202 | #define SCHN_STAT_PROT_CHECK 0x10 | ||
203 | #define SCHN_STAT_CHN_DATA_CHK 0x08 | ||
204 | #define SCHN_STAT_CHN_CTRL_CHK 0x04 | ||
205 | #define SCHN_STAT_INTF_CTRL_CHK 0x02 | ||
206 | #define SCHN_STAT_CHAIN_CHECK 0x01 | ||
207 | |||
208 | /* | ||
209 | * architectured values for first sense byte | ||
210 | */ | ||
211 | #define SNS0_CMD_REJECT 0x80 | ||
212 | #define SNS_CMD_REJECT SNS0_CMD_REJEC | ||
213 | #define SNS0_INTERVENTION_REQ 0x40 | ||
214 | #define SNS0_BUS_OUT_CHECK 0x20 | ||
215 | #define SNS0_EQUIPMENT_CHECK 0x10 | ||
216 | #define SNS0_DATA_CHECK 0x08 | ||
217 | #define SNS0_OVERRUN 0x04 | ||
218 | #define SNS0_INCOMPL_DOMAIN 0x01 | ||
219 | |||
220 | /* | ||
221 | * architectured values for second sense byte | ||
222 | */ | ||
223 | #define SNS1_PERM_ERR 0x80 | ||
224 | #define SNS1_INV_TRACK_FORMAT 0x40 | ||
225 | #define SNS1_EOC 0x20 | ||
226 | #define SNS1_MESSAGE_TO_OPER 0x10 | ||
227 | #define SNS1_NO_REC_FOUND 0x08 | ||
228 | #define SNS1_FILE_PROTECTED 0x04 | ||
229 | #define SNS1_WRITE_INHIBITED 0x02 | ||
230 | #define SNS1_INPRECISE_END 0x01 | ||
231 | |||
232 | /* | ||
233 | * architectured values for third sense byte | ||
234 | */ | ||
235 | #define SNS2_REQ_INH_WRITE 0x80 | ||
236 | #define SNS2_CORRECTABLE 0x40 | ||
237 | #define SNS2_FIRST_LOG_ERR 0x20 | ||
238 | #define SNS2_ENV_DATA_PRESENT 0x10 | ||
239 | #define SNS2_INPRECISE_END 0x04 | ||
240 | 19 | ||
241 | /** | 20 | /** |
242 | * struct ccw1 - channel command word | 21 | * struct ccw1 - channel command word |
diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h new file mode 100644 index 000000000000..471234b90574 --- /dev/null +++ b/arch/s390/include/asm/cpu.h | |||
@@ -0,0 +1,26 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2000,2009 | ||
3 | * Author(s): Hartmut Penner <hp@de.ibm.com>, | ||
4 | * Martin Schwidefsky <schwidefsky@de.ibm.com>, | ||
5 | * Christian Ehrhardt <ehrhardt@de.ibm.com>, | ||
6 | */ | ||
7 | |||
8 | #ifndef _ASM_S390_CPU_H | ||
9 | #define _ASM_S390_CPU_H | ||
10 | |||
11 | #define MAX_CPU_ADDRESS 255 | ||
12 | |||
13 | #ifndef __ASSEMBLY__ | ||
14 | |||
15 | #include <linux/types.h> | ||
16 | |||
17 | struct cpuid | ||
18 | { | ||
19 | unsigned int version : 8; | ||
20 | unsigned int ident : 24; | ||
21 | unsigned int machine : 16; | ||
22 | unsigned int unused : 16; | ||
23 | } __packed; | ||
24 | |||
25 | #endif /* __ASSEMBLY__ */ | ||
26 | #endif /* _ASM_S390_CPU_H */ | ||
diff --git a/arch/s390/include/asm/cpuid.h b/arch/s390/include/asm/cpuid.h deleted file mode 100644 index 07836a2e5222..000000000000 --- a/arch/s390/include/asm/cpuid.h +++ /dev/null | |||
@@ -1,25 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2000,2009 | ||
3 | * Author(s): Hartmut Penner <hp@de.ibm.com>, | ||
4 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
5 | * Christian Ehrhardt <ehrhardt@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef _ASM_S390_CPUID_H_ | ||
9 | #define _ASM_S390_CPUID_H_ | ||
10 | |||
11 | /* | ||
12 | * CPU type and hardware bug flags. Kept separately for each CPU. | ||
13 | * Members of this structure are referenced in head.S, so think twice | ||
14 | * before touching them. [mj] | ||
15 | */ | ||
16 | |||
17 | typedef struct | ||
18 | { | ||
19 | unsigned int version : 8; | ||
20 | unsigned int ident : 24; | ||
21 | unsigned int machine : 16; | ||
22 | unsigned int unused : 16; | ||
23 | } __attribute__ ((packed)) cpuid_t; | ||
24 | |||
25 | #endif /* _ASM_S390_CPUID_H_ */ | ||
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h index 31ed5686a968..18124b75a7ab 100644 --- a/arch/s390/include/asm/debug.h +++ b/arch/s390/include/asm/debug.h | |||
@@ -167,6 +167,10 @@ debug_text_event(debug_info_t* id, int level, const char* txt) | |||
167 | return debug_event_common(id,level,txt,strlen(txt)); | 167 | return debug_event_common(id,level,txt,strlen(txt)); |
168 | } | 168 | } |
169 | 169 | ||
170 | /* | ||
171 | * IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are | ||
172 | * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details! | ||
173 | */ | ||
170 | extern debug_entry_t * | 174 | extern debug_entry_t * |
171 | debug_sprintf_event(debug_info_t* id,int level,char *string,...) | 175 | debug_sprintf_event(debug_info_t* id,int level,char *string,...) |
172 | __attribute__ ((format(printf, 3, 4))); | 176 | __attribute__ ((format(printf, 3, 4))); |
@@ -206,7 +210,10 @@ debug_text_exception(debug_info_t* id, int level, const char* txt) | |||
206 | return debug_exception_common(id,level,txt,strlen(txt)); | 210 | return debug_exception_common(id,level,txt,strlen(txt)); |
207 | } | 211 | } |
208 | 212 | ||
209 | 213 | /* | |
214 | * IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are | ||
215 | * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details! | ||
216 | */ | ||
210 | extern debug_entry_t * | 217 | extern debug_entry_t * |
211 | debug_sprintf_exception(debug_info_t* id,int level,char *string,...) | 218 | debug_sprintf_exception(debug_info_t* id,int level,char *string,...) |
212 | __attribute__ ((format(printf, 3, 4))); | 219 | __attribute__ ((format(printf, 3, 4))); |
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h index 89ec7056da28..498bc3892385 100644 --- a/arch/s390/include/asm/hardirq.h +++ b/arch/s390/include/asm/hardirq.h | |||
@@ -18,13 +18,6 @@ | |||
18 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
19 | #include <asm/lowcore.h> | 19 | #include <asm/lowcore.h> |
20 | 20 | ||
21 | /* irq_cpustat_t is unused currently, but could be converted | ||
22 | * into a percpu variable instead of storing softirq_pending | ||
23 | * on the lowcore */ | ||
24 | typedef struct { | ||
25 | unsigned int __softirq_pending; | ||
26 | } irq_cpustat_t; | ||
27 | |||
28 | #define local_softirq_pending() (S390_lowcore.softirq_pending) | 21 | #define local_softirq_pending() (S390_lowcore.softirq_pending) |
29 | 22 | ||
30 | #define __ARCH_IRQ_STAT | 23 | #define __ARCH_IRQ_STAT |
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 1171e6d144a3..5e95d95450b3 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h | |||
@@ -57,6 +57,8 @@ struct ipl_block_fcp { | |||
57 | } __attribute__((packed)); | 57 | } __attribute__((packed)); |
58 | 58 | ||
59 | #define DIAG308_VMPARM_SIZE 64 | 59 | #define DIAG308_VMPARM_SIZE 64 |
60 | #define DIAG308_SCPDATA_SIZE (PAGE_SIZE - (sizeof(struct ipl_list_hdr) + \ | ||
61 | offsetof(struct ipl_block_fcp, scp_data))) | ||
60 | 62 | ||
61 | struct ipl_block_ccw { | 63 | struct ipl_block_ccw { |
62 | u8 load_parm[8]; | 64 | u8 load_parm[8]; |
@@ -91,7 +93,8 @@ extern void do_halt(void); | |||
91 | extern void do_poff(void); | 93 | extern void do_poff(void); |
92 | extern void ipl_save_parameters(void); | 94 | extern void ipl_save_parameters(void); |
93 | extern void ipl_update_parameters(void); | 95 | extern void ipl_update_parameters(void); |
94 | extern void get_ipl_vmparm(char *); | 96 | extern size_t append_ipl_vmparm(char *, size_t); |
97 | extern size_t append_ipl_scpdata(char *, size_t); | ||
95 | 98 | ||
96 | enum { | 99 | enum { |
97 | IPL_DEVNO_VALID = 1, | 100 | IPL_DEVNO_VALID = 1, |
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 1cd02f6073a0..698988f69403 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/kvm_host.h> | 18 | #include <linux/kvm_host.h> |
19 | #include <asm/debug.h> | 19 | #include <asm/debug.h> |
20 | #include <asm/cpuid.h> | 20 | #include <asm/cpu.h> |
21 | 21 | ||
22 | #define KVM_MAX_VCPUS 64 | 22 | #define KVM_MAX_VCPUS 64 |
23 | #define KVM_MEMORY_SLOTS 32 | 23 | #define KVM_MEMORY_SLOTS 32 |
@@ -217,8 +217,8 @@ struct kvm_vcpu_arch { | |||
217 | struct hrtimer ckc_timer; | 217 | struct hrtimer ckc_timer; |
218 | struct tasklet_struct tasklet; | 218 | struct tasklet_struct tasklet; |
219 | union { | 219 | union { |
220 | cpuid_t cpu_id; | 220 | struct cpuid cpu_id; |
221 | u64 stidp_data; | 221 | u64 stidp_data; |
222 | }; | 222 | }; |
223 | }; | 223 | }; |
224 | 224 | ||
diff --git a/arch/s390/include/asm/kvm_virtio.h b/arch/s390/include/asm/kvm_virtio.h index 0503936f101f..acdfdff26611 100644 --- a/arch/s390/include/asm/kvm_virtio.h +++ b/arch/s390/include/asm/kvm_virtio.h | |||
@@ -54,14 +54,4 @@ struct kvm_vqconfig { | |||
54 | * This is pagesize for historical reasons. */ | 54 | * This is pagesize for historical reasons. */ |
55 | #define KVM_S390_VIRTIO_RING_ALIGN 4096 | 55 | #define KVM_S390_VIRTIO_RING_ALIGN 4096 |
56 | 56 | ||
57 | #ifdef __KERNEL__ | ||
58 | /* early virtio console setup */ | ||
59 | #ifdef CONFIG_S390_GUEST | ||
60 | extern void s390_virtio_console_init(void); | ||
61 | #else | ||
62 | static inline void s390_virtio_console_init(void) | ||
63 | { | ||
64 | } | ||
65 | #endif /* CONFIG_VIRTIO_CONSOLE */ | ||
66 | #endif /* __KERNEL__ */ | ||
67 | #endif | 57 | #endif |
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 5046ad6b7a63..6bc9426a6fbf 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h | |||
@@ -132,7 +132,7 @@ | |||
132 | 132 | ||
133 | #ifndef __ASSEMBLY__ | 133 | #ifndef __ASSEMBLY__ |
134 | 134 | ||
135 | #include <asm/cpuid.h> | 135 | #include <asm/cpu.h> |
136 | #include <asm/ptrace.h> | 136 | #include <asm/ptrace.h> |
137 | #include <linux/types.h> | 137 | #include <linux/types.h> |
138 | 138 | ||
@@ -275,7 +275,7 @@ struct _lowcore | |||
275 | __u32 user_exec_asce; /* 0x02ac */ | 275 | __u32 user_exec_asce; /* 0x02ac */ |
276 | 276 | ||
277 | /* SMP info area */ | 277 | /* SMP info area */ |
278 | cpuid_t cpu_id; /* 0x02b0 */ | 278 | struct cpuid cpu_id; /* 0x02b0 */ |
279 | __u32 cpu_nr; /* 0x02b8 */ | 279 | __u32 cpu_nr; /* 0x02b8 */ |
280 | __u32 softirq_pending; /* 0x02bc */ | 280 | __u32 softirq_pending; /* 0x02bc */ |
281 | __u32 percpu_offset; /* 0x02c0 */ | 281 | __u32 percpu_offset; /* 0x02c0 */ |
@@ -380,7 +380,7 @@ struct _lowcore | |||
380 | __u64 user_exec_asce; /* 0x0318 */ | 380 | __u64 user_exec_asce; /* 0x0318 */ |
381 | 381 | ||
382 | /* SMP info area */ | 382 | /* SMP info area */ |
383 | cpuid_t cpu_id; /* 0x0320 */ | 383 | struct cpuid cpu_id; /* 0x0320 */ |
384 | __u32 cpu_nr; /* 0x0328 */ | 384 | __u32 cpu_nr; /* 0x0328 */ |
385 | __u32 softirq_pending; /* 0x032c */ | 385 | __u32 softirq_pending; /* 0x032c */ |
386 | __u64 percpu_offset; /* 0x0330 */ | 386 | __u64 percpu_offset; /* 0x0330 */ |
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index 3b59216e6284..03be99919d62 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define __MMU_H | 2 | #define __MMU_H |
3 | 3 | ||
4 | typedef struct { | 4 | typedef struct { |
5 | spinlock_t list_lock; | ||
5 | struct list_head crst_list; | 6 | struct list_head crst_list; |
6 | struct list_head pgtable_list; | 7 | struct list_head pgtable_list; |
7 | unsigned long asce_bits; | 8 | unsigned long asce_bits; |
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 3e3594d01f83..5e9daf5d7f22 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h | |||
@@ -125,8 +125,6 @@ page_get_storage_key(unsigned long addr) | |||
125 | return skey; | 125 | return skey; |
126 | } | 126 | } |
127 | 127 | ||
128 | #ifdef CONFIG_PAGE_STATES | ||
129 | |||
130 | struct page; | 128 | struct page; |
131 | void arch_free_page(struct page *page, int order); | 129 | void arch_free_page(struct page *page, int order); |
132 | void arch_alloc_page(struct page *page, int order); | 130 | void arch_alloc_page(struct page *page, int order); |
@@ -134,8 +132,6 @@ void arch_alloc_page(struct page *page, int order); | |||
134 | #define HAVE_ARCH_FREE_PAGE | 132 | #define HAVE_ARCH_FREE_PAGE |
135 | #define HAVE_ARCH_ALLOC_PAGE | 133 | #define HAVE_ARCH_ALLOC_PAGE |
136 | 134 | ||
137 | #endif | ||
138 | |||
139 | #endif /* !__ASSEMBLY__ */ | 135 | #endif /* !__ASSEMBLY__ */ |
140 | 136 | ||
141 | #define __PAGE_OFFSET 0x0UL | 137 | #define __PAGE_OFFSET 0x0UL |
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index b2658b9220fe..ddad5903341c 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h | |||
@@ -140,6 +140,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |||
140 | 140 | ||
141 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | 141 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
142 | { | 142 | { |
143 | spin_lock_init(&mm->context.list_lock); | ||
143 | INIT_LIST_HEAD(&mm->context.crst_list); | 144 | INIT_LIST_HEAD(&mm->context.crst_list); |
144 | INIT_LIST_HEAD(&mm->context.pgtable_list); | 145 | INIT_LIST_HEAD(&mm->context.pgtable_list); |
145 | return (pgd_t *) crst_table_alloc(mm, s390_noexec); | 146 | return (pgd_t *) crst_table_alloc(mm, s390_noexec); |
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index c139fa7b8e89..cf8eed3fa779 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -14,7 +14,7 @@ | |||
14 | #define __ASM_S390_PROCESSOR_H | 14 | #define __ASM_S390_PROCESSOR_H |
15 | 15 | ||
16 | #include <linux/linkage.h> | 16 | #include <linux/linkage.h> |
17 | #include <asm/cpuid.h> | 17 | #include <asm/cpu.h> |
18 | #include <asm/page.h> | 18 | #include <asm/page.h> |
19 | #include <asm/ptrace.h> | 19 | #include <asm/ptrace.h> |
20 | #include <asm/setup.h> | 20 | #include <asm/setup.h> |
@@ -26,7 +26,7 @@ | |||
26 | */ | 26 | */ |
27 | #define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; }) | 27 | #define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; }) |
28 | 28 | ||
29 | static inline void get_cpu_id(cpuid_t *ptr) | 29 | static inline void get_cpu_id(struct cpuid *ptr) |
30 | { | 30 | { |
31 | asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr)); | 31 | asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr)); |
32 | } | 32 | } |
diff --git a/arch/s390/include/asm/scatterlist.h b/arch/s390/include/asm/scatterlist.h index 29ec8e28c8df..35d786fe93ae 100644 --- a/arch/s390/include/asm/scatterlist.h +++ b/arch/s390/include/asm/scatterlist.h | |||
@@ -1,19 +1 @@ | |||
1 | #ifndef _ASMS390_SCATTERLIST_H | #include <asm-generic/scatterlist.h> | |
2 | #define _ASMS390_SCATTERLIST_H | ||
3 | |||
4 | struct scatterlist { | ||
5 | #ifdef CONFIG_DEBUG_SG | ||
6 | unsigned long sg_magic; | ||
7 | #endif | ||
8 | unsigned long page_link; | ||
9 | unsigned int offset; | ||
10 | unsigned int length; | ||
11 | }; | ||
12 | |||
13 | #ifdef __s390x__ | ||
14 | #define ISA_DMA_THRESHOLD (0xffffffffffffffffUL) | ||
15 | #else | ||
16 | #define ISA_DMA_THRESHOLD (0xffffffffUL) | ||
17 | #endif | ||
18 | |||
19 | #endif /* _ASMS390X_SCATTERLIST_H */ | ||
diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h new file mode 100644 index 000000000000..de389cb54d28 --- /dev/null +++ b/arch/s390/include/asm/scsw.h | |||
@@ -0,0 +1,956 @@ | |||
1 | /* | ||
2 | * Helper functions for scsw access. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008,2009 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef _ASM_S390_SCSW_H_ | ||
9 | #define _ASM_S390_SCSW_H_ | ||
10 | |||
11 | #include <linux/types.h> | ||
12 | #include <asm/chsc.h> | ||
13 | #include <asm/cio.h> | ||
14 | |||
15 | /** | ||
16 | * struct cmd_scsw - command-mode subchannel status word | ||
17 | * @key: subchannel key | ||
18 | * @sctl: suspend control | ||
19 | * @eswf: esw format | ||
20 | * @cc: deferred condition code | ||
21 | * @fmt: format | ||
22 | * @pfch: prefetch | ||
23 | * @isic: initial-status interruption control | ||
24 | * @alcc: address-limit checking control | ||
25 | * @ssi: suppress-suspended interruption | ||
26 | * @zcc: zero condition code | ||
27 | * @ectl: extended control | ||
28 | * @pno: path not operational | ||
29 | * @res: reserved | ||
30 | * @fctl: function control | ||
31 | * @actl: activity control | ||
32 | * @stctl: status control | ||
33 | * @cpa: channel program address | ||
34 | * @dstat: device status | ||
35 | * @cstat: subchannel status | ||
36 | * @count: residual count | ||
37 | */ | ||
38 | struct cmd_scsw { | ||
39 | __u32 key : 4; | ||
40 | __u32 sctl : 1; | ||
41 | __u32 eswf : 1; | ||
42 | __u32 cc : 2; | ||
43 | __u32 fmt : 1; | ||
44 | __u32 pfch : 1; | ||
45 | __u32 isic : 1; | ||
46 | __u32 alcc : 1; | ||
47 | __u32 ssi : 1; | ||
48 | __u32 zcc : 1; | ||
49 | __u32 ectl : 1; | ||
50 | __u32 pno : 1; | ||
51 | __u32 res : 1; | ||
52 | __u32 fctl : 3; | ||
53 | __u32 actl : 7; | ||
54 | __u32 stctl : 5; | ||
55 | __u32 cpa; | ||
56 | __u32 dstat : 8; | ||
57 | __u32 cstat : 8; | ||
58 | __u32 count : 16; | ||
59 | } __attribute__ ((packed)); | ||
60 | |||
61 | /** | ||
62 | * struct tm_scsw - transport-mode subchannel status word | ||
63 | * @key: subchannel key | ||
64 | * @eswf: esw format | ||
65 | * @cc: deferred condition code | ||
66 | * @fmt: format | ||
67 | * @x: IRB-format control | ||
68 | * @q: interrogate-complete | ||
69 | * @ectl: extended control | ||
70 | * @pno: path not operational | ||
71 | * @fctl: function control | ||
72 | * @actl: activity control | ||
73 | * @stctl: status control | ||
74 | * @tcw: TCW address | ||
75 | * @dstat: device status | ||
76 | * @cstat: subchannel status | ||
77 | * @fcxs: FCX status | ||
78 | * @schxs: subchannel-extended status | ||
79 | */ | ||
80 | struct tm_scsw { | ||
81 | u32 key:4; | ||
82 | u32 :1; | ||
83 | u32 eswf:1; | ||
84 | u32 cc:2; | ||
85 | u32 fmt:3; | ||
86 | u32 x:1; | ||
87 | u32 q:1; | ||
88 | u32 :1; | ||
89 | u32 ectl:1; | ||
90 | u32 pno:1; | ||
91 | u32 :1; | ||
92 | u32 fctl:3; | ||
93 | u32 actl:7; | ||
94 | u32 stctl:5; | ||
95 | u32 tcw; | ||
96 | u32 dstat:8; | ||
97 | u32 cstat:8; | ||
98 | u32 fcxs:8; | ||
99 | u32 schxs:8; | ||
100 | } __attribute__ ((packed)); | ||
101 | |||
102 | /** | ||
103 | * union scsw - subchannel status word | ||
104 | * @cmd: command-mode SCSW | ||
105 | * @tm: transport-mode SCSW | ||
106 | */ | ||
107 | union scsw { | ||
108 | struct cmd_scsw cmd; | ||
109 | struct tm_scsw tm; | ||
110 | } __attribute__ ((packed)); | ||
111 | |||
112 | #define SCSW_FCTL_CLEAR_FUNC 0x1 | ||
113 | #define SCSW_FCTL_HALT_FUNC 0x2 | ||
114 | #define SCSW_FCTL_START_FUNC 0x4 | ||
115 | |||
116 | #define SCSW_ACTL_SUSPENDED 0x1 | ||
117 | #define SCSW_ACTL_DEVACT 0x2 | ||
118 | #define SCSW_ACTL_SCHACT 0x4 | ||
119 | #define SCSW_ACTL_CLEAR_PEND 0x8 | ||
120 | #define SCSW_ACTL_HALT_PEND 0x10 | ||
121 | #define SCSW_ACTL_START_PEND 0x20 | ||
122 | #define SCSW_ACTL_RESUME_PEND 0x40 | ||
123 | |||
124 | #define SCSW_STCTL_STATUS_PEND 0x1 | ||
125 | #define SCSW_STCTL_SEC_STATUS 0x2 | ||
126 | #define SCSW_STCTL_PRIM_STATUS 0x4 | ||
127 | #define SCSW_STCTL_INTER_STATUS 0x8 | ||
128 | #define SCSW_STCTL_ALERT_STATUS 0x10 | ||
129 | |||
130 | #define DEV_STAT_ATTENTION 0x80 | ||
131 | #define DEV_STAT_STAT_MOD 0x40 | ||
132 | #define DEV_STAT_CU_END 0x20 | ||
133 | #define DEV_STAT_BUSY 0x10 | ||
134 | #define DEV_STAT_CHN_END 0x08 | ||
135 | #define DEV_STAT_DEV_END 0x04 | ||
136 | #define DEV_STAT_UNIT_CHECK 0x02 | ||
137 | #define DEV_STAT_UNIT_EXCEP 0x01 | ||
138 | |||
139 | #define SCHN_STAT_PCI 0x80 | ||
140 | #define SCHN_STAT_INCORR_LEN 0x40 | ||
141 | #define SCHN_STAT_PROG_CHECK 0x20 | ||
142 | #define SCHN_STAT_PROT_CHECK 0x10 | ||
143 | #define SCHN_STAT_CHN_DATA_CHK 0x08 | ||
144 | #define SCHN_STAT_CHN_CTRL_CHK 0x04 | ||
145 | #define SCHN_STAT_INTF_CTRL_CHK 0x02 | ||
146 | #define SCHN_STAT_CHAIN_CHECK 0x01 | ||
147 | |||
148 | /* | ||
149 | * architectured values for first sense byte | ||
150 | */ | ||
151 | #define SNS0_CMD_REJECT 0x80 | ||
152 | #define SNS_CMD_REJECT SNS0_CMD_REJEC | ||
153 | #define SNS0_INTERVENTION_REQ 0x40 | ||
154 | #define SNS0_BUS_OUT_CHECK 0x20 | ||
155 | #define SNS0_EQUIPMENT_CHECK 0x10 | ||
156 | #define SNS0_DATA_CHECK 0x08 | ||
157 | #define SNS0_OVERRUN 0x04 | ||
158 | #define SNS0_INCOMPL_DOMAIN 0x01 | ||
159 | |||
160 | /* | ||
161 | * architectured values for second sense byte | ||
162 | */ | ||
163 | #define SNS1_PERM_ERR 0x80 | ||
164 | #define SNS1_INV_TRACK_FORMAT 0x40 | ||
165 | #define SNS1_EOC 0x20 | ||
166 | #define SNS1_MESSAGE_TO_OPER 0x10 | ||
167 | #define SNS1_NO_REC_FOUND 0x08 | ||
168 | #define SNS1_FILE_PROTECTED 0x04 | ||
169 | #define SNS1_WRITE_INHIBITED 0x02 | ||
170 | #define SNS1_INPRECISE_END 0x01 | ||
171 | |||
172 | /* | ||
173 | * architectured values for third sense byte | ||
174 | */ | ||
175 | #define SNS2_REQ_INH_WRITE 0x80 | ||
176 | #define SNS2_CORRECTABLE 0x40 | ||
177 | #define SNS2_FIRST_LOG_ERR 0x20 | ||
178 | #define SNS2_ENV_DATA_PRESENT 0x10 | ||
179 | #define SNS2_INPRECISE_END 0x04 | ||
180 | |||
181 | /** | ||
182 | * scsw_is_tm - check for transport mode scsw | ||
183 | * @scsw: pointer to scsw | ||
184 | * | ||
185 | * Return non-zero if the specified scsw is a transport mode scsw, zero | ||
186 | * otherwise. | ||
187 | */ | ||
188 | static inline int scsw_is_tm(union scsw *scsw) | ||
189 | { | ||
190 | return css_general_characteristics.fcx && (scsw->tm.x == 1); | ||
191 | } | ||
192 | |||
193 | /** | ||
194 | * scsw_key - return scsw key field | ||
195 | * @scsw: pointer to scsw | ||
196 | * | ||
197 | * Return the value of the key field of the specified scsw, regardless of | ||
198 | * whether it is a transport mode or command mode scsw. | ||
199 | */ | ||
200 | static inline u32 scsw_key(union scsw *scsw) | ||
201 | { | ||
202 | if (scsw_is_tm(scsw)) | ||
203 | return scsw->tm.key; | ||
204 | else | ||
205 | return scsw->cmd.key; | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * scsw_eswf - return scsw eswf field | ||
210 | * @scsw: pointer to scsw | ||
211 | * | ||
212 | * Return the value of the eswf field of the specified scsw, regardless of | ||
213 | * whether it is a transport mode or command mode scsw. | ||
214 | */ | ||
215 | static inline u32 scsw_eswf(union scsw *scsw) | ||
216 | { | ||
217 | if (scsw_is_tm(scsw)) | ||
218 | return scsw->tm.eswf; | ||
219 | else | ||
220 | return scsw->cmd.eswf; | ||
221 | } | ||
222 | |||
223 | /** | ||
224 | * scsw_cc - return scsw cc field | ||
225 | * @scsw: pointer to scsw | ||
226 | * | ||
227 | * Return the value of the cc field of the specified scsw, regardless of | ||
228 | * whether it is a transport mode or command mode scsw. | ||
229 | */ | ||
230 | static inline u32 scsw_cc(union scsw *scsw) | ||
231 | { | ||
232 | if (scsw_is_tm(scsw)) | ||
233 | return scsw->tm.cc; | ||
234 | else | ||
235 | return scsw->cmd.cc; | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * scsw_ectl - return scsw ectl field | ||
240 | * @scsw: pointer to scsw | ||
241 | * | ||
242 | * Return the value of the ectl field of the specified scsw, regardless of | ||
243 | * whether it is a transport mode or command mode scsw. | ||
244 | */ | ||
245 | static inline u32 scsw_ectl(union scsw *scsw) | ||
246 | { | ||
247 | if (scsw_is_tm(scsw)) | ||
248 | return scsw->tm.ectl; | ||
249 | else | ||
250 | return scsw->cmd.ectl; | ||
251 | } | ||
252 | |||
253 | /** | ||
254 | * scsw_pno - return scsw pno field | ||
255 | * @scsw: pointer to scsw | ||
256 | * | ||
257 | * Return the value of the pno field of the specified scsw, regardless of | ||
258 | * whether it is a transport mode or command mode scsw. | ||
259 | */ | ||
260 | static inline u32 scsw_pno(union scsw *scsw) | ||
261 | { | ||
262 | if (scsw_is_tm(scsw)) | ||
263 | return scsw->tm.pno; | ||
264 | else | ||
265 | return scsw->cmd.pno; | ||
266 | } | ||
267 | |||
268 | /** | ||
269 | * scsw_fctl - return scsw fctl field | ||
270 | * @scsw: pointer to scsw | ||
271 | * | ||
272 | * Return the value of the fctl field of the specified scsw, regardless of | ||
273 | * whether it is a transport mode or command mode scsw. | ||
274 | */ | ||
275 | static inline u32 scsw_fctl(union scsw *scsw) | ||
276 | { | ||
277 | if (scsw_is_tm(scsw)) | ||
278 | return scsw->tm.fctl; | ||
279 | else | ||
280 | return scsw->cmd.fctl; | ||
281 | } | ||
282 | |||
283 | /** | ||
284 | * scsw_actl - return scsw actl field | ||
285 | * @scsw: pointer to scsw | ||
286 | * | ||
287 | * Return the value of the actl field of the specified scsw, regardless of | ||
288 | * whether it is a transport mode or command mode scsw. | ||
289 | */ | ||
290 | static inline u32 scsw_actl(union scsw *scsw) | ||
291 | { | ||
292 | if (scsw_is_tm(scsw)) | ||
293 | return scsw->tm.actl; | ||
294 | else | ||
295 | return scsw->cmd.actl; | ||
296 | } | ||
297 | |||
298 | /** | ||
299 | * scsw_stctl - return scsw stctl field | ||
300 | * @scsw: pointer to scsw | ||
301 | * | ||
302 | * Return the value of the stctl field of the specified scsw, regardless of | ||
303 | * whether it is a transport mode or command mode scsw. | ||
304 | */ | ||
305 | static inline u32 scsw_stctl(union scsw *scsw) | ||
306 | { | ||
307 | if (scsw_is_tm(scsw)) | ||
308 | return scsw->tm.stctl; | ||
309 | else | ||
310 | return scsw->cmd.stctl; | ||
311 | } | ||
312 | |||
313 | /** | ||
314 | * scsw_dstat - return scsw dstat field | ||
315 | * @scsw: pointer to scsw | ||
316 | * | ||
317 | * Return the value of the dstat field of the specified scsw, regardless of | ||
318 | * whether it is a transport mode or command mode scsw. | ||
319 | */ | ||
320 | static inline u32 scsw_dstat(union scsw *scsw) | ||
321 | { | ||
322 | if (scsw_is_tm(scsw)) | ||
323 | return scsw->tm.dstat; | ||
324 | else | ||
325 | return scsw->cmd.dstat; | ||
326 | } | ||
327 | |||
328 | /** | ||
329 | * scsw_cstat - return scsw cstat field | ||
330 | * @scsw: pointer to scsw | ||
331 | * | ||
332 | * Return the value of the cstat field of the specified scsw, regardless of | ||
333 | * whether it is a transport mode or command mode scsw. | ||
334 | */ | ||
335 | static inline u32 scsw_cstat(union scsw *scsw) | ||
336 | { | ||
337 | if (scsw_is_tm(scsw)) | ||
338 | return scsw->tm.cstat; | ||
339 | else | ||
340 | return scsw->cmd.cstat; | ||
341 | } | ||
342 | |||
343 | /** | ||
344 | * scsw_cmd_is_valid_key - check key field validity | ||
345 | * @scsw: pointer to scsw | ||
346 | * | ||
347 | * Return non-zero if the key field of the specified command mode scsw is | ||
348 | * valid, zero otherwise. | ||
349 | */ | ||
350 | static inline int scsw_cmd_is_valid_key(union scsw *scsw) | ||
351 | { | ||
352 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
353 | } | ||
354 | |||
355 | /** | ||
356 | * scsw_cmd_is_valid_sctl - check fctl field validity | ||
357 | * @scsw: pointer to scsw | ||
358 | * | ||
359 | * Return non-zero if the fctl field of the specified command mode scsw is | ||
360 | * valid, zero otherwise. | ||
361 | */ | ||
362 | static inline int scsw_cmd_is_valid_sctl(union scsw *scsw) | ||
363 | { | ||
364 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
365 | } | ||
366 | |||
367 | /** | ||
368 | * scsw_cmd_is_valid_eswf - check eswf field validity | ||
369 | * @scsw: pointer to scsw | ||
370 | * | ||
371 | * Return non-zero if the eswf field of the specified command mode scsw is | ||
372 | * valid, zero otherwise. | ||
373 | */ | ||
374 | static inline int scsw_cmd_is_valid_eswf(union scsw *scsw) | ||
375 | { | ||
376 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); | ||
377 | } | ||
378 | |||
379 | /** | ||
380 | * scsw_cmd_is_valid_cc - check cc field validity | ||
381 | * @scsw: pointer to scsw | ||
382 | * | ||
383 | * Return non-zero if the cc field of the specified command mode scsw is | ||
384 | * valid, zero otherwise. | ||
385 | */ | ||
386 | static inline int scsw_cmd_is_valid_cc(union scsw *scsw) | ||
387 | { | ||
388 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && | ||
389 | (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); | ||
390 | } | ||
391 | |||
392 | /** | ||
393 | * scsw_cmd_is_valid_fmt - check fmt field validity | ||
394 | * @scsw: pointer to scsw | ||
395 | * | ||
396 | * Return non-zero if the fmt field of the specified command mode scsw is | ||
397 | * valid, zero otherwise. | ||
398 | */ | ||
399 | static inline int scsw_cmd_is_valid_fmt(union scsw *scsw) | ||
400 | { | ||
401 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
402 | } | ||
403 | |||
404 | /** | ||
405 | * scsw_cmd_is_valid_pfch - check pfch field validity | ||
406 | * @scsw: pointer to scsw | ||
407 | * | ||
408 | * Return non-zero if the pfch field of the specified command mode scsw is | ||
409 | * valid, zero otherwise. | ||
410 | */ | ||
411 | static inline int scsw_cmd_is_valid_pfch(union scsw *scsw) | ||
412 | { | ||
413 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
414 | } | ||
415 | |||
416 | /** | ||
417 | * scsw_cmd_is_valid_isic - check isic field validity | ||
418 | * @scsw: pointer to scsw | ||
419 | * | ||
420 | * Return non-zero if the isic field of the specified command mode scsw is | ||
421 | * valid, zero otherwise. | ||
422 | */ | ||
423 | static inline int scsw_cmd_is_valid_isic(union scsw *scsw) | ||
424 | { | ||
425 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
426 | } | ||
427 | |||
428 | /** | ||
429 | * scsw_cmd_is_valid_alcc - check alcc field validity | ||
430 | * @scsw: pointer to scsw | ||
431 | * | ||
432 | * Return non-zero if the alcc field of the specified command mode scsw is | ||
433 | * valid, zero otherwise. | ||
434 | */ | ||
435 | static inline int scsw_cmd_is_valid_alcc(union scsw *scsw) | ||
436 | { | ||
437 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * scsw_cmd_is_valid_ssi - check ssi field validity | ||
442 | * @scsw: pointer to scsw | ||
443 | * | ||
444 | * Return non-zero if the ssi field of the specified command mode scsw is | ||
445 | * valid, zero otherwise. | ||
446 | */ | ||
447 | static inline int scsw_cmd_is_valid_ssi(union scsw *scsw) | ||
448 | { | ||
449 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
450 | } | ||
451 | |||
452 | /** | ||
453 | * scsw_cmd_is_valid_zcc - check zcc field validity | ||
454 | * @scsw: pointer to scsw | ||
455 | * | ||
456 | * Return non-zero if the zcc field of the specified command mode scsw is | ||
457 | * valid, zero otherwise. | ||
458 | */ | ||
459 | static inline int scsw_cmd_is_valid_zcc(union scsw *scsw) | ||
460 | { | ||
461 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && | ||
462 | (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS); | ||
463 | } | ||
464 | |||
465 | /** | ||
466 | * scsw_cmd_is_valid_ectl - check ectl field validity | ||
467 | * @scsw: pointer to scsw | ||
468 | * | ||
469 | * Return non-zero if the ectl field of the specified command mode scsw is | ||
470 | * valid, zero otherwise. | ||
471 | */ | ||
472 | static inline int scsw_cmd_is_valid_ectl(union scsw *scsw) | ||
473 | { | ||
474 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
475 | !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && | ||
476 | (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS); | ||
477 | } | ||
478 | |||
479 | /** | ||
480 | * scsw_cmd_is_valid_pno - check pno field validity | ||
481 | * @scsw: pointer to scsw | ||
482 | * | ||
483 | * Return non-zero if the pno field of the specified command mode scsw is | ||
484 | * valid, zero otherwise. | ||
485 | */ | ||
486 | static inline int scsw_cmd_is_valid_pno(union scsw *scsw) | ||
487 | { | ||
488 | return (scsw->cmd.fctl != 0) && | ||
489 | (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
490 | (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) || | ||
491 | ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && | ||
492 | (scsw->cmd.actl & SCSW_ACTL_SUSPENDED))); | ||
493 | } | ||
494 | |||
495 | /** | ||
496 | * scsw_cmd_is_valid_fctl - check fctl field validity | ||
497 | * @scsw: pointer to scsw | ||
498 | * | ||
499 | * Return non-zero if the fctl field of the specified command mode scsw is | ||
500 | * valid, zero otherwise. | ||
501 | */ | ||
502 | static inline int scsw_cmd_is_valid_fctl(union scsw *scsw) | ||
503 | { | ||
504 | /* Only valid if pmcw.dnv == 1*/ | ||
505 | return 1; | ||
506 | } | ||
507 | |||
508 | /** | ||
509 | * scsw_cmd_is_valid_actl - check actl field validity | ||
510 | * @scsw: pointer to scsw | ||
511 | * | ||
512 | * Return non-zero if the actl field of the specified command mode scsw is | ||
513 | * valid, zero otherwise. | ||
514 | */ | ||
515 | static inline int scsw_cmd_is_valid_actl(union scsw *scsw) | ||
516 | { | ||
517 | /* Only valid if pmcw.dnv == 1*/ | ||
518 | return 1; | ||
519 | } | ||
520 | |||
521 | /** | ||
522 | * scsw_cmd_is_valid_stctl - check stctl field validity | ||
523 | * @scsw: pointer to scsw | ||
524 | * | ||
525 | * Return non-zero if the stctl field of the specified command mode scsw is | ||
526 | * valid, zero otherwise. | ||
527 | */ | ||
528 | static inline int scsw_cmd_is_valid_stctl(union scsw *scsw) | ||
529 | { | ||
530 | /* Only valid if pmcw.dnv == 1*/ | ||
531 | return 1; | ||
532 | } | ||
533 | |||
534 | /** | ||
535 | * scsw_cmd_is_valid_dstat - check dstat field validity | ||
536 | * @scsw: pointer to scsw | ||
537 | * | ||
538 | * Return non-zero if the dstat field of the specified command mode scsw is | ||
539 | * valid, zero otherwise. | ||
540 | */ | ||
541 | static inline int scsw_cmd_is_valid_dstat(union scsw *scsw) | ||
542 | { | ||
543 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
544 | (scsw->cmd.cc != 3); | ||
545 | } | ||
546 | |||
547 | /** | ||
548 | * scsw_cmd_is_valid_cstat - check cstat field validity | ||
549 | * @scsw: pointer to scsw | ||
550 | * | ||
551 | * Return non-zero if the cstat field of the specified command mode scsw is | ||
552 | * valid, zero otherwise. | ||
553 | */ | ||
554 | static inline int scsw_cmd_is_valid_cstat(union scsw *scsw) | ||
555 | { | ||
556 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
557 | (scsw->cmd.cc != 3); | ||
558 | } | ||
559 | |||
560 | /** | ||
561 | * scsw_tm_is_valid_key - check key field validity | ||
562 | * @scsw: pointer to scsw | ||
563 | * | ||
564 | * Return non-zero if the key field of the specified transport mode scsw is | ||
565 | * valid, zero otherwise. | ||
566 | */ | ||
567 | static inline int scsw_tm_is_valid_key(union scsw *scsw) | ||
568 | { | ||
569 | return (scsw->tm.fctl & SCSW_FCTL_START_FUNC); | ||
570 | } | ||
571 | |||
572 | /** | ||
573 | * scsw_tm_is_valid_eswf - check eswf field validity | ||
574 | * @scsw: pointer to scsw | ||
575 | * | ||
576 | * Return non-zero if the eswf field of the specified transport mode scsw is | ||
577 | * valid, zero otherwise. | ||
578 | */ | ||
579 | static inline int scsw_tm_is_valid_eswf(union scsw *scsw) | ||
580 | { | ||
581 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); | ||
582 | } | ||
583 | |||
584 | /** | ||
585 | * scsw_tm_is_valid_cc - check cc field validity | ||
586 | * @scsw: pointer to scsw | ||
587 | * | ||
588 | * Return non-zero if the cc field of the specified transport mode scsw is | ||
589 | * valid, zero otherwise. | ||
590 | */ | ||
591 | static inline int scsw_tm_is_valid_cc(union scsw *scsw) | ||
592 | { | ||
593 | return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) && | ||
594 | (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); | ||
595 | } | ||
596 | |||
597 | /** | ||
598 | * scsw_tm_is_valid_fmt - check fmt field validity | ||
599 | * @scsw: pointer to scsw | ||
600 | * | ||
601 | * Return non-zero if the fmt field of the specified transport mode scsw is | ||
602 | * valid, zero otherwise. | ||
603 | */ | ||
604 | static inline int scsw_tm_is_valid_fmt(union scsw *scsw) | ||
605 | { | ||
606 | return 1; | ||
607 | } | ||
608 | |||
609 | /** | ||
610 | * scsw_tm_is_valid_x - check x field validity | ||
611 | * @scsw: pointer to scsw | ||
612 | * | ||
613 | * Return non-zero if the x field of the specified transport mode scsw is | ||
614 | * valid, zero otherwise. | ||
615 | */ | ||
616 | static inline int scsw_tm_is_valid_x(union scsw *scsw) | ||
617 | { | ||
618 | return 1; | ||
619 | } | ||
620 | |||
621 | /** | ||
622 | * scsw_tm_is_valid_q - check q field validity | ||
623 | * @scsw: pointer to scsw | ||
624 | * | ||
625 | * Return non-zero if the q field of the specified transport mode scsw is | ||
626 | * valid, zero otherwise. | ||
627 | */ | ||
628 | static inline int scsw_tm_is_valid_q(union scsw *scsw) | ||
629 | { | ||
630 | return 1; | ||
631 | } | ||
632 | |||
633 | /** | ||
634 | * scsw_tm_is_valid_ectl - check ectl field validity | ||
635 | * @scsw: pointer to scsw | ||
636 | * | ||
637 | * Return non-zero if the ectl field of the specified transport mode scsw is | ||
638 | * valid, zero otherwise. | ||
639 | */ | ||
640 | static inline int scsw_tm_is_valid_ectl(union scsw *scsw) | ||
641 | { | ||
642 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
643 | !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && | ||
644 | (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS); | ||
645 | } | ||
646 | |||
647 | /** | ||
648 | * scsw_tm_is_valid_pno - check pno field validity | ||
649 | * @scsw: pointer to scsw | ||
650 | * | ||
651 | * Return non-zero if the pno field of the specified transport mode scsw is | ||
652 | * valid, zero otherwise. | ||
653 | */ | ||
654 | static inline int scsw_tm_is_valid_pno(union scsw *scsw) | ||
655 | { | ||
656 | return (scsw->tm.fctl != 0) && | ||
657 | (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
658 | (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) || | ||
659 | ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && | ||
660 | (scsw->tm.actl & SCSW_ACTL_SUSPENDED))); | ||
661 | } | ||
662 | |||
663 | /** | ||
664 | * scsw_tm_is_valid_fctl - check fctl field validity | ||
665 | * @scsw: pointer to scsw | ||
666 | * | ||
667 | * Return non-zero if the fctl field of the specified transport mode scsw is | ||
668 | * valid, zero otherwise. | ||
669 | */ | ||
670 | static inline int scsw_tm_is_valid_fctl(union scsw *scsw) | ||
671 | { | ||
672 | /* Only valid if pmcw.dnv == 1*/ | ||
673 | return 1; | ||
674 | } | ||
675 | |||
676 | /** | ||
677 | * scsw_tm_is_valid_actl - check actl field validity | ||
678 | * @scsw: pointer to scsw | ||
679 | * | ||
680 | * Return non-zero if the actl field of the specified transport mode scsw is | ||
681 | * valid, zero otherwise. | ||
682 | */ | ||
683 | static inline int scsw_tm_is_valid_actl(union scsw *scsw) | ||
684 | { | ||
685 | /* Only valid if pmcw.dnv == 1*/ | ||
686 | return 1; | ||
687 | } | ||
688 | |||
689 | /** | ||
690 | * scsw_tm_is_valid_stctl - check stctl field validity | ||
691 | * @scsw: pointer to scsw | ||
692 | * | ||
693 | * Return non-zero if the stctl field of the specified transport mode scsw is | ||
694 | * valid, zero otherwise. | ||
695 | */ | ||
696 | static inline int scsw_tm_is_valid_stctl(union scsw *scsw) | ||
697 | { | ||
698 | /* Only valid if pmcw.dnv == 1*/ | ||
699 | return 1; | ||
700 | } | ||
701 | |||
702 | /** | ||
703 | * scsw_tm_is_valid_dstat - check dstat field validity | ||
704 | * @scsw: pointer to scsw | ||
705 | * | ||
706 | * Return non-zero if the dstat field of the specified transport mode scsw is | ||
707 | * valid, zero otherwise. | ||
708 | */ | ||
709 | static inline int scsw_tm_is_valid_dstat(union scsw *scsw) | ||
710 | { | ||
711 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
712 | (scsw->tm.cc != 3); | ||
713 | } | ||
714 | |||
715 | /** | ||
716 | * scsw_tm_is_valid_cstat - check cstat field validity | ||
717 | * @scsw: pointer to scsw | ||
718 | * | ||
719 | * Return non-zero if the cstat field of the specified transport mode scsw is | ||
720 | * valid, zero otherwise. | ||
721 | */ | ||
722 | static inline int scsw_tm_is_valid_cstat(union scsw *scsw) | ||
723 | { | ||
724 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
725 | (scsw->tm.cc != 3); | ||
726 | } | ||
727 | |||
728 | /** | ||
729 | * scsw_tm_is_valid_fcxs - check fcxs field validity | ||
730 | * @scsw: pointer to scsw | ||
731 | * | ||
732 | * Return non-zero if the fcxs field of the specified transport mode scsw is | ||
733 | * valid, zero otherwise. | ||
734 | */ | ||
735 | static inline int scsw_tm_is_valid_fcxs(union scsw *scsw) | ||
736 | { | ||
737 | return 1; | ||
738 | } | ||
739 | |||
740 | /** | ||
741 | * scsw_tm_is_valid_schxs - check schxs field validity | ||
742 | * @scsw: pointer to scsw | ||
743 | * | ||
744 | * Return non-zero if the schxs field of the specified transport mode scsw is | ||
745 | * valid, zero otherwise. | ||
746 | */ | ||
747 | static inline int scsw_tm_is_valid_schxs(union scsw *scsw) | ||
748 | { | ||
749 | return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK | | ||
750 | SCHN_STAT_INTF_CTRL_CHK | | ||
751 | SCHN_STAT_PROT_CHECK | | ||
752 | SCHN_STAT_CHN_DATA_CHK)); | ||
753 | } | ||
754 | |||
755 | /** | ||
756 | * scsw_is_valid_actl - check actl field validity | ||
757 | * @scsw: pointer to scsw | ||
758 | * | ||
759 | * Return non-zero if the actl field of the specified scsw is valid, | ||
760 | * regardless of whether it is a transport mode or command mode scsw. | ||
761 | * Return zero if the field does not contain a valid value. | ||
762 | */ | ||
763 | static inline int scsw_is_valid_actl(union scsw *scsw) | ||
764 | { | ||
765 | if (scsw_is_tm(scsw)) | ||
766 | return scsw_tm_is_valid_actl(scsw); | ||
767 | else | ||
768 | return scsw_cmd_is_valid_actl(scsw); | ||
769 | } | ||
770 | |||
771 | /** | ||
772 | * scsw_is_valid_cc - check cc field validity | ||
773 | * @scsw: pointer to scsw | ||
774 | * | ||
775 | * Return non-zero if the cc field of the specified scsw is valid, | ||
776 | * regardless of whether it is a transport mode or command mode scsw. | ||
777 | * Return zero if the field does not contain a valid value. | ||
778 | */ | ||
779 | static inline int scsw_is_valid_cc(union scsw *scsw) | ||
780 | { | ||
781 | if (scsw_is_tm(scsw)) | ||
782 | return scsw_tm_is_valid_cc(scsw); | ||
783 | else | ||
784 | return scsw_cmd_is_valid_cc(scsw); | ||
785 | } | ||
786 | |||
787 | /** | ||
788 | * scsw_is_valid_cstat - check cstat field validity | ||
789 | * @scsw: pointer to scsw | ||
790 | * | ||
791 | * Return non-zero if the cstat field of the specified scsw is valid, | ||
792 | * regardless of whether it is a transport mode or command mode scsw. | ||
793 | * Return zero if the field does not contain a valid value. | ||
794 | */ | ||
795 | static inline int scsw_is_valid_cstat(union scsw *scsw) | ||
796 | { | ||
797 | if (scsw_is_tm(scsw)) | ||
798 | return scsw_tm_is_valid_cstat(scsw); | ||
799 | else | ||
800 | return scsw_cmd_is_valid_cstat(scsw); | ||
801 | } | ||
802 | |||
803 | /** | ||
804 | * scsw_is_valid_dstat - check dstat field validity | ||
805 | * @scsw: pointer to scsw | ||
806 | * | ||
807 | * Return non-zero if the dstat field of the specified scsw is valid, | ||
808 | * regardless of whether it is a transport mode or command mode scsw. | ||
809 | * Return zero if the field does not contain a valid value. | ||
810 | */ | ||
811 | static inline int scsw_is_valid_dstat(union scsw *scsw) | ||
812 | { | ||
813 | if (scsw_is_tm(scsw)) | ||
814 | return scsw_tm_is_valid_dstat(scsw); | ||
815 | else | ||
816 | return scsw_cmd_is_valid_dstat(scsw); | ||
817 | } | ||
818 | |||
819 | /** | ||
820 | * scsw_is_valid_ectl - check ectl field validity | ||
821 | * @scsw: pointer to scsw | ||
822 | * | ||
823 | * Return non-zero if the ectl field of the specified scsw is valid, | ||
824 | * regardless of whether it is a transport mode or command mode scsw. | ||
825 | * Return zero if the field does not contain a valid value. | ||
826 | */ | ||
827 | static inline int scsw_is_valid_ectl(union scsw *scsw) | ||
828 | { | ||
829 | if (scsw_is_tm(scsw)) | ||
830 | return scsw_tm_is_valid_ectl(scsw); | ||
831 | else | ||
832 | return scsw_cmd_is_valid_ectl(scsw); | ||
833 | } | ||
834 | |||
835 | /** | ||
836 | * scsw_is_valid_eswf - check eswf field validity | ||
837 | * @scsw: pointer to scsw | ||
838 | * | ||
839 | * Return non-zero if the eswf field of the specified scsw is valid, | ||
840 | * regardless of whether it is a transport mode or command mode scsw. | ||
841 | * Return zero if the field does not contain a valid value. | ||
842 | */ | ||
843 | static inline int scsw_is_valid_eswf(union scsw *scsw) | ||
844 | { | ||
845 | if (scsw_is_tm(scsw)) | ||
846 | return scsw_tm_is_valid_eswf(scsw); | ||
847 | else | ||
848 | return scsw_cmd_is_valid_eswf(scsw); | ||
849 | } | ||
850 | |||
851 | /** | ||
852 | * scsw_is_valid_fctl - check fctl field validity | ||
853 | * @scsw: pointer to scsw | ||
854 | * | ||
855 | * Return non-zero if the fctl field of the specified scsw is valid, | ||
856 | * regardless of whether it is a transport mode or command mode scsw. | ||
857 | * Return zero if the field does not contain a valid value. | ||
858 | */ | ||
859 | static inline int scsw_is_valid_fctl(union scsw *scsw) | ||
860 | { | ||
861 | if (scsw_is_tm(scsw)) | ||
862 | return scsw_tm_is_valid_fctl(scsw); | ||
863 | else | ||
864 | return scsw_cmd_is_valid_fctl(scsw); | ||
865 | } | ||
866 | |||
867 | /** | ||
868 | * scsw_is_valid_key - check key field validity | ||
869 | * @scsw: pointer to scsw | ||
870 | * | ||
871 | * Return non-zero if the key field of the specified scsw is valid, | ||
872 | * regardless of whether it is a transport mode or command mode scsw. | ||
873 | * Return zero if the field does not contain a valid value. | ||
874 | */ | ||
875 | static inline int scsw_is_valid_key(union scsw *scsw) | ||
876 | { | ||
877 | if (scsw_is_tm(scsw)) | ||
878 | return scsw_tm_is_valid_key(scsw); | ||
879 | else | ||
880 | return scsw_cmd_is_valid_key(scsw); | ||
881 | } | ||
882 | |||
883 | /** | ||
884 | * scsw_is_valid_pno - check pno field validity | ||
885 | * @scsw: pointer to scsw | ||
886 | * | ||
887 | * Return non-zero if the pno field of the specified scsw is valid, | ||
888 | * regardless of whether it is a transport mode or command mode scsw. | ||
889 | * Return zero if the field does not contain a valid value. | ||
890 | */ | ||
891 | static inline int scsw_is_valid_pno(union scsw *scsw) | ||
892 | { | ||
893 | if (scsw_is_tm(scsw)) | ||
894 | return scsw_tm_is_valid_pno(scsw); | ||
895 | else | ||
896 | return scsw_cmd_is_valid_pno(scsw); | ||
897 | } | ||
898 | |||
899 | /** | ||
900 | * scsw_is_valid_stctl - check stctl field validity | ||
901 | * @scsw: pointer to scsw | ||
902 | * | ||
903 | * Return non-zero if the stctl field of the specified scsw is valid, | ||
904 | * regardless of whether it is a transport mode or command mode scsw. | ||
905 | * Return zero if the field does not contain a valid value. | ||
906 | */ | ||
907 | static inline int scsw_is_valid_stctl(union scsw *scsw) | ||
908 | { | ||
909 | if (scsw_is_tm(scsw)) | ||
910 | return scsw_tm_is_valid_stctl(scsw); | ||
911 | else | ||
912 | return scsw_cmd_is_valid_stctl(scsw); | ||
913 | } | ||
914 | |||
915 | /** | ||
916 | * scsw_cmd_is_solicited - check for solicited scsw | ||
917 | * @scsw: pointer to scsw | ||
918 | * | ||
919 | * Return non-zero if the command mode scsw indicates that the associated | ||
920 | * status condition is solicited, zero if it is unsolicited. | ||
921 | */ | ||
922 | static inline int scsw_cmd_is_solicited(union scsw *scsw) | ||
923 | { | ||
924 | return (scsw->cmd.cc != 0) || (scsw->cmd.stctl != | ||
925 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); | ||
926 | } | ||
927 | |||
928 | /** | ||
929 | * scsw_tm_is_solicited - check for solicited scsw | ||
930 | * @scsw: pointer to scsw | ||
931 | * | ||
932 | * Return non-zero if the transport mode scsw indicates that the associated | ||
933 | * status condition is solicited, zero if it is unsolicited. | ||
934 | */ | ||
935 | static inline int scsw_tm_is_solicited(union scsw *scsw) | ||
936 | { | ||
937 | return (scsw->tm.cc != 0) || (scsw->tm.stctl != | ||
938 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); | ||
939 | } | ||
940 | |||
941 | /** | ||
942 | * scsw_is_solicited - check for solicited scsw | ||
943 | * @scsw: pointer to scsw | ||
944 | * | ||
945 | * Return non-zero if the transport or command mode scsw indicates that the | ||
946 | * associated status condition is solicited, zero if it is unsolicited. | ||
947 | */ | ||
948 | static inline int scsw_is_solicited(union scsw *scsw) | ||
949 | { | ||
950 | if (scsw_is_tm(scsw)) | ||
951 | return scsw_tm_is_solicited(scsw); | ||
952 | else | ||
953 | return scsw_cmd_is_solicited(scsw); | ||
954 | } | ||
955 | |||
956 | #endif /* _ASM_S390_SCSW_H_ */ | ||
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 38b0fc221ed7..e37478e87286 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #ifndef _ASM_S390_SETUP_H | 8 | #ifndef _ASM_S390_SETUP_H |
9 | #define _ASM_S390_SETUP_H | 9 | #define _ASM_S390_SETUP_H |
10 | 10 | ||
11 | #define COMMAND_LINE_SIZE 1024 | 11 | #define COMMAND_LINE_SIZE 4096 |
12 | 12 | ||
13 | #define ARCH_COMMAND_LINE_SIZE 896 | 13 | #define ARCH_COMMAND_LINE_SIZE 896 |
14 | 14 | ||
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index 72137bc907ac..c991fe6473c9 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h | |||
@@ -51,32 +51,7 @@ extern void machine_power_off_smp(void); | |||
51 | #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */ | 51 | #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */ |
52 | 52 | ||
53 | #define raw_smp_processor_id() (S390_lowcore.cpu_nr) | 53 | #define raw_smp_processor_id() (S390_lowcore.cpu_nr) |
54 | 54 | #define cpu_logical_map(cpu) (cpu) | |
55 | /* | ||
56 | * returns 1 if cpu is in stopped/check stopped state or not operational | ||
57 | * returns 0 otherwise | ||
58 | */ | ||
59 | static inline int | ||
60 | smp_cpu_not_running(int cpu) | ||
61 | { | ||
62 | __u32 status; | ||
63 | |||
64 | switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { | ||
65 | case sigp_order_code_accepted: | ||
66 | case sigp_status_stored: | ||
67 | /* Check for stopped and check stop state */ | ||
68 | if (status & 0x50) | ||
69 | return 1; | ||
70 | break; | ||
71 | case sigp_not_operational: | ||
72 | return 1; | ||
73 | default: | ||
74 | break; | ||
75 | } | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | #define cpu_logical_map(cpu) (cpu) | ||
80 | 55 | ||
81 | extern int __cpu_disable (void); | 56 | extern int __cpu_disable (void); |
82 | extern void __cpu_die (unsigned int cpu); | 57 | extern void __cpu_die (unsigned int cpu); |
@@ -91,11 +66,6 @@ extern void arch_send_call_function_ipi(cpumask_t mask); | |||
91 | 66 | ||
92 | #endif | 67 | #endif |
93 | 68 | ||
94 | #ifndef CONFIG_SMP | ||
95 | #define hard_smp_processor_id() 0 | ||
96 | #define smp_cpu_not_running(cpu) 1 | ||
97 | #endif | ||
98 | |||
99 | #ifdef CONFIG_HOTPLUG_CPU | 69 | #ifdef CONFIG_HOTPLUG_CPU |
100 | extern int smp_rescan_cpus(void); | 70 | extern int smp_rescan_cpus(void); |
101 | #else | 71 | #else |
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index c9af0d19c7ab..41ce6861174e 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
@@ -191,4 +191,33 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
191 | #define _raw_read_relax(lock) cpu_relax() | 191 | #define _raw_read_relax(lock) cpu_relax() |
192 | #define _raw_write_relax(lock) cpu_relax() | 192 | #define _raw_write_relax(lock) cpu_relax() |
193 | 193 | ||
194 | #define __always_inline__spin_lock | ||
195 | #define __always_inline__read_lock | ||
196 | #define __always_inline__write_lock | ||
197 | #define __always_inline__spin_lock_bh | ||
198 | #define __always_inline__read_lock_bh | ||
199 | #define __always_inline__write_lock_bh | ||
200 | #define __always_inline__spin_lock_irq | ||
201 | #define __always_inline__read_lock_irq | ||
202 | #define __always_inline__write_lock_irq | ||
203 | #define __always_inline__spin_lock_irqsave | ||
204 | #define __always_inline__read_lock_irqsave | ||
205 | #define __always_inline__write_lock_irqsave | ||
206 | #define __always_inline__spin_trylock | ||
207 | #define __always_inline__read_trylock | ||
208 | #define __always_inline__write_trylock | ||
209 | #define __always_inline__spin_trylock_bh | ||
210 | #define __always_inline__spin_unlock | ||
211 | #define __always_inline__read_unlock | ||
212 | #define __always_inline__write_unlock | ||
213 | #define __always_inline__spin_unlock_bh | ||
214 | #define __always_inline__read_unlock_bh | ||
215 | #define __always_inline__write_unlock_bh | ||
216 | #define __always_inline__spin_unlock_irq | ||
217 | #define __always_inline__read_unlock_irq | ||
218 | #define __always_inline__write_unlock_irq | ||
219 | #define __always_inline__spin_unlock_irqrestore | ||
220 | #define __always_inline__read_unlock_irqrestore | ||
221 | #define __always_inline__write_unlock_irqrestore | ||
222 | |||
194 | #endif /* __ASM_SPINLOCK_H */ | 223 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 4fb83c1cdb77..379661d2f81a 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h | |||
@@ -109,11 +109,7 @@ extern void pfault_fini(void); | |||
109 | #define pfault_fini() do { } while (0) | 109 | #define pfault_fini() do { } while (0) |
110 | #endif /* CONFIG_PFAULT */ | 110 | #endif /* CONFIG_PFAULT */ |
111 | 111 | ||
112 | #ifdef CONFIG_PAGE_STATES | ||
113 | extern void cmma_init(void); | 112 | extern void cmma_init(void); |
114 | #else | ||
115 | static inline void cmma_init(void) { } | ||
116 | #endif | ||
117 | 113 | ||
118 | #define finish_arch_switch(prev) do { \ | 114 | #define finish_arch_switch(prev) do { \ |
119 | set_fs(current->thread.mm_segment); \ | 115 | set_fs(current->thread.mm_segment); \ |
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index ba1cab9fc1f9..07eb61b2fb3a 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h | |||
@@ -92,7 +92,7 @@ static inline struct thread_info *current_thread_info(void) | |||
92 | #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ | 92 | #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ |
93 | #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ | 93 | #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ |
94 | #define TIF_SECCOMP 10 /* secure computing */ | 94 | #define TIF_SECCOMP 10 /* secure computing */ |
95 | #define TIF_SYSCALL_FTRACE 11 /* ftrace syscall instrumentation */ | 95 | #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ |
96 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ | 96 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ |
97 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling | 97 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling |
98 | TIF_NEED_RESCHED */ | 98 | TIF_NEED_RESCHED */ |
@@ -111,7 +111,7 @@ static inline struct thread_info *current_thread_info(void) | |||
111 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 111 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
112 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | 112 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) |
113 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | 113 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) |
114 | #define _TIF_SYSCALL_FTRACE (1<<TIF_SYSCALL_FTRACE) | 114 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) |
115 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) | 115 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) |
116 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 116 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
117 | #define _TIF_31BIT (1<<TIF_31BIT) | 117 | #define _TIF_31BIT (1<<TIF_31BIT) |
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index cc21e3e20fd7..24aa1cda20ad 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h | |||
@@ -90,4 +90,18 @@ unsigned long long monotonic_clock(void); | |||
90 | 90 | ||
91 | extern u64 sched_clock_base_cc; | 91 | extern u64 sched_clock_base_cc; |
92 | 92 | ||
93 | /** | ||
94 | * get_clock_monotonic - returns current time in clock rate units | ||
95 | * | ||
96 | * The caller must ensure that preemption is disabled. | ||
97 | * The clock and sched_clock_base get changed via stop_machine. | ||
98 | * Therefore preemption must be disabled when calling this | ||
99 | * function, otherwise the returned value is not guaranteed to | ||
100 | * be monotonic. | ||
101 | */ | ||
102 | static inline unsigned long long get_clock_monotonic(void) | ||
103 | { | ||
104 | return get_clock_xt() - sched_clock_base_cc; | ||
105 | } | ||
106 | |||
93 | #endif | 107 | #endif |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index c75ed43b1a18..c7be8e10b87e 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -32,7 +32,7 @@ extra-y += head.o init_task.o vmlinux.lds | |||
32 | 32 | ||
33 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o | 33 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o |
34 | obj-$(CONFIG_SMP) += smp.o topology.o | 34 | obj-$(CONFIG_SMP) += smp.o topology.o |
35 | 35 | obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o | |
36 | obj-$(CONFIG_AUDIT) += audit.o | 36 | obj-$(CONFIG_AUDIT) += audit.o |
37 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o | 37 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o |
38 | obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ | 38 | obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ |
@@ -41,7 +41,7 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ | |||
41 | 41 | ||
42 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 42 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
43 | obj-$(CONFIG_KPROBES) += kprobes.o | 43 | obj-$(CONFIG_KPROBES) += kprobes.o |
44 | obj-$(CONFIG_FUNCTION_TRACER) += mcount.o | 44 | obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o) |
45 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 45 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
46 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 46 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
47 | 47 | ||
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index cae14c499511..bf8b4ae7ff2d 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Heiko Carstens <heiko.carstens@de.ibm.com> | 6 | * Heiko Carstens <heiko.carstens@de.ibm.com> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "setup" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
10 | #include <linux/init.h> | 13 | #include <linux/init.h> |
11 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
@@ -16,6 +19,7 @@ | |||
16 | #include <linux/module.h> | 19 | #include <linux/module.h> |
17 | #include <linux/pfn.h> | 20 | #include <linux/pfn.h> |
18 | #include <linux/uaccess.h> | 21 | #include <linux/uaccess.h> |
22 | #include <linux/kernel.h> | ||
19 | #include <asm/ebcdic.h> | 23 | #include <asm/ebcdic.h> |
20 | #include <asm/ipl.h> | 24 | #include <asm/ipl.h> |
21 | #include <asm/lowcore.h> | 25 | #include <asm/lowcore.h> |
@@ -35,8 +39,6 @@ | |||
35 | 39 | ||
36 | char kernel_nss_name[NSS_NAME_SIZE + 1]; | 40 | char kernel_nss_name[NSS_NAME_SIZE + 1]; |
37 | 41 | ||
38 | static unsigned long machine_flags; | ||
39 | |||
40 | static void __init setup_boot_command_line(void); | 42 | static void __init setup_boot_command_line(void); |
41 | 43 | ||
42 | /* | 44 | /* |
@@ -81,6 +83,8 @@ asm( | |||
81 | " br 14\n" | 83 | " br 14\n" |
82 | " .size savesys_ipl_nss, .-savesys_ipl_nss\n"); | 84 | " .size savesys_ipl_nss, .-savesys_ipl_nss\n"); |
83 | 85 | ||
86 | static __initdata char upper_command_line[COMMAND_LINE_SIZE]; | ||
87 | |||
84 | static noinline __init void create_kernel_nss(void) | 88 | static noinline __init void create_kernel_nss(void) |
85 | { | 89 | { |
86 | unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size; | 90 | unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size; |
@@ -90,7 +94,6 @@ static noinline __init void create_kernel_nss(void) | |||
90 | int response; | 94 | int response; |
91 | size_t len; | 95 | size_t len; |
92 | char *savesys_ptr; | 96 | char *savesys_ptr; |
93 | char upper_command_line[COMMAND_LINE_SIZE]; | ||
94 | char defsys_cmd[DEFSYS_CMD_SIZE]; | 97 | char defsys_cmd[DEFSYS_CMD_SIZE]; |
95 | char savesys_cmd[SAVESYS_CMD_SIZE]; | 98 | char savesys_cmd[SAVESYS_CMD_SIZE]; |
96 | 99 | ||
@@ -141,6 +144,8 @@ static noinline __init void create_kernel_nss(void) | |||
141 | __cpcmd(defsys_cmd, NULL, 0, &response); | 144 | __cpcmd(defsys_cmd, NULL, 0, &response); |
142 | 145 | ||
143 | if (response != 0) { | 146 | if (response != 0) { |
147 | pr_err("Defining the Linux kernel NSS failed with rc=%d\n", | ||
148 | response); | ||
144 | kernel_nss_name[0] = '\0'; | 149 | kernel_nss_name[0] = '\0'; |
145 | return; | 150 | return; |
146 | } | 151 | } |
@@ -153,8 +158,11 @@ static noinline __init void create_kernel_nss(void) | |||
153 | * max SAVESYS_CMD_SIZE | 158 | * max SAVESYS_CMD_SIZE |
154 | * On error: response contains the numeric portion of cp error message. | 159 | * On error: response contains the numeric portion of cp error message. |
155 | * for SAVESYS it will be >= 263 | 160 | * for SAVESYS it will be >= 263 |
161 | * for missing privilege class, it will be 1 | ||
156 | */ | 162 | */ |
157 | if (response > SAVESYS_CMD_SIZE) { | 163 | if (response > SAVESYS_CMD_SIZE || response == 1) { |
164 | pr_err("Saving the Linux kernel NSS failed with rc=%d\n", | ||
165 | response); | ||
158 | kernel_nss_name[0] = '\0'; | 166 | kernel_nss_name[0] = '\0'; |
159 | return; | 167 | return; |
160 | } | 168 | } |
@@ -205,12 +213,9 @@ static noinline __init void detect_machine_type(void) | |||
205 | 213 | ||
206 | /* Running under KVM? If not we assume z/VM */ | 214 | /* Running under KVM? If not we assume z/VM */ |
207 | if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3)) | 215 | if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3)) |
208 | machine_flags |= MACHINE_FLAG_KVM; | 216 | S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; |
209 | else | 217 | else |
210 | machine_flags |= MACHINE_FLAG_VM; | 218 | S390_lowcore.machine_flags |= MACHINE_FLAG_VM; |
211 | |||
212 | /* Store machine flags for setting up lowcore early */ | ||
213 | S390_lowcore.machine_flags = machine_flags; | ||
214 | } | 219 | } |
215 | 220 | ||
216 | static __init void early_pgm_check_handler(void) | 221 | static __init void early_pgm_check_handler(void) |
@@ -245,7 +250,7 @@ static noinline __init void setup_hpage(void) | |||
245 | facilities = stfl(); | 250 | facilities = stfl(); |
246 | if (!(facilities & (1UL << 23)) || !(facilities & (1UL << 29))) | 251 | if (!(facilities & (1UL << 23)) || !(facilities & (1UL << 29))) |
247 | return; | 252 | return; |
248 | machine_flags |= MACHINE_FLAG_HPAGE; | 253 | S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE; |
249 | __ctl_set_bit(0, 23); | 254 | __ctl_set_bit(0, 23); |
250 | #endif | 255 | #endif |
251 | } | 256 | } |
@@ -263,7 +268,7 @@ static __init void detect_mvpg(void) | |||
263 | EX_TABLE(0b,1b) | 268 | EX_TABLE(0b,1b) |
264 | : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0"); | 269 | : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0"); |
265 | if (!rc) | 270 | if (!rc) |
266 | machine_flags |= MACHINE_FLAG_MVPG; | 271 | S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG; |
267 | #endif | 272 | #endif |
268 | } | 273 | } |
269 | 274 | ||
@@ -279,7 +284,7 @@ static __init void detect_ieee(void) | |||
279 | EX_TABLE(0b,1b) | 284 | EX_TABLE(0b,1b) |
280 | : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc"); | 285 | : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc"); |
281 | if (!rc) | 286 | if (!rc) |
282 | machine_flags |= MACHINE_FLAG_IEEE; | 287 | S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE; |
283 | #endif | 288 | #endif |
284 | } | 289 | } |
285 | 290 | ||
@@ -298,7 +303,7 @@ static __init void detect_csp(void) | |||
298 | EX_TABLE(0b,1b) | 303 | EX_TABLE(0b,1b) |
299 | : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2"); | 304 | : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2"); |
300 | if (!rc) | 305 | if (!rc) |
301 | machine_flags |= MACHINE_FLAG_CSP; | 306 | S390_lowcore.machine_flags |= MACHINE_FLAG_CSP; |
302 | #endif | 307 | #endif |
303 | } | 308 | } |
304 | 309 | ||
@@ -315,7 +320,7 @@ static __init void detect_diag9c(void) | |||
315 | EX_TABLE(0b,1b) | 320 | EX_TABLE(0b,1b) |
316 | : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc"); | 321 | : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc"); |
317 | if (!rc) | 322 | if (!rc) |
318 | machine_flags |= MACHINE_FLAG_DIAG9C; | 323 | S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C; |
319 | } | 324 | } |
320 | 325 | ||
321 | static __init void detect_diag44(void) | 326 | static __init void detect_diag44(void) |
@@ -330,7 +335,7 @@ static __init void detect_diag44(void) | |||
330 | EX_TABLE(0b,1b) | 335 | EX_TABLE(0b,1b) |
331 | : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc"); | 336 | : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc"); |
332 | if (!rc) | 337 | if (!rc) |
333 | machine_flags |= MACHINE_FLAG_DIAG44; | 338 | S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44; |
334 | #endif | 339 | #endif |
335 | } | 340 | } |
336 | 341 | ||
@@ -341,11 +346,11 @@ static __init void detect_machine_facilities(void) | |||
341 | 346 | ||
342 | facilities = stfl(); | 347 | facilities = stfl(); |
343 | if (facilities & (1 << 28)) | 348 | if (facilities & (1 << 28)) |
344 | machine_flags |= MACHINE_FLAG_IDTE; | 349 | S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; |
345 | if (facilities & (1 << 23)) | 350 | if (facilities & (1 << 23)) |
346 | machine_flags |= MACHINE_FLAG_PFMF; | 351 | S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF; |
347 | if (facilities & (1 << 4)) | 352 | if (facilities & (1 << 4)) |
348 | machine_flags |= MACHINE_FLAG_MVCOS; | 353 | S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; |
349 | #endif | 354 | #endif |
350 | } | 355 | } |
351 | 356 | ||
@@ -367,21 +372,35 @@ static __init void rescue_initrd(void) | |||
367 | } | 372 | } |
368 | 373 | ||
369 | /* Set up boot command line */ | 374 | /* Set up boot command line */ |
370 | static void __init setup_boot_command_line(void) | 375 | static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t)) |
371 | { | 376 | { |
372 | char *parm = NULL; | 377 | char *parm, *delim; |
378 | size_t rc, len; | ||
379 | |||
380 | len = strlen(boot_command_line); | ||
381 | |||
382 | delim = boot_command_line + len; /* '\0' character position */ | ||
383 | parm = boot_command_line + len + 1; /* append right after '\0' */ | ||
373 | 384 | ||
385 | rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1); | ||
386 | if (rc) { | ||
387 | if (*parm == '=') | ||
388 | memmove(boot_command_line, parm + 1, rc); | ||
389 | else | ||
390 | *delim = ' '; /* replace '\0' with space */ | ||
391 | } | ||
392 | } | ||
393 | |||
394 | static void __init setup_boot_command_line(void) | ||
395 | { | ||
374 | /* copy arch command line */ | 396 | /* copy arch command line */ |
375 | strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); | 397 | strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); |
376 | 398 | ||
377 | /* append IPL PARM data to the boot command line */ | 399 | /* append IPL PARM data to the boot command line */ |
378 | if (MACHINE_IS_VM) { | 400 | if (MACHINE_IS_VM) |
379 | parm = boot_command_line + strlen(boot_command_line); | 401 | append_to_cmdline(append_ipl_vmparm); |
380 | *parm++ = ' '; | 402 | |
381 | get_ipl_vmparm(parm); | 403 | append_to_cmdline(append_ipl_scpdata); |
382 | if (parm[0] == '=') | ||
383 | memmove(boot_command_line, parm + 1, strlen(parm)); | ||
384 | } | ||
385 | } | 404 | } |
386 | 405 | ||
387 | 406 | ||
@@ -413,7 +432,6 @@ void __init startup_init(void) | |||
413 | setup_hpage(); | 432 | setup_hpage(); |
414 | sclp_facilities_detect(); | 433 | sclp_facilities_detect(); |
415 | detect_memory_layout(memory_chunk); | 434 | detect_memory_layout(memory_chunk); |
416 | S390_lowcore.machine_flags = machine_flags; | ||
417 | #ifdef CONFIG_DYNAMIC_FTRACE | 435 | #ifdef CONFIG_DYNAMIC_FTRACE |
418 | S390_lowcore.ftrace_func = (unsigned long)ftrace_caller; | 436 | S390_lowcore.ftrace_func = (unsigned long)ftrace_caller; |
419 | #endif | 437 | #endif |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index c4c80a22bc1f..f43d2ee54464 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -54,7 +54,7 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | |||
54 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 54 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
55 | _TIF_MCCK_PENDING) | 55 | _TIF_MCCK_PENDING) |
56 | _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ | 56 | _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ |
57 | _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8) | 57 | _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8) |
58 | 58 | ||
59 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | 59 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER |
60 | STACK_SIZE = 1 << STACK_SHIFT | 60 | STACK_SIZE = 1 << STACK_SHIFT |
@@ -278,7 +278,8 @@ sysc_return: | |||
278 | bnz BASED(sysc_work) # there is work to do (signals etc.) | 278 | bnz BASED(sysc_work) # there is work to do (signals etc.) |
279 | sysc_restore: | 279 | sysc_restore: |
280 | #ifdef CONFIG_TRACE_IRQFLAGS | 280 | #ifdef CONFIG_TRACE_IRQFLAGS |
281 | la %r1,BASED(sysc_restore_trace_psw) | 281 | la %r1,BASED(sysc_restore_trace_psw_addr) |
282 | l %r1,0(%r1) | ||
282 | lpsw 0(%r1) | 283 | lpsw 0(%r1) |
283 | sysc_restore_trace: | 284 | sysc_restore_trace: |
284 | TRACE_IRQS_CHECK | 285 | TRACE_IRQS_CHECK |
@@ -289,10 +290,15 @@ sysc_leave: | |||
289 | sysc_done: | 290 | sysc_done: |
290 | 291 | ||
291 | #ifdef CONFIG_TRACE_IRQFLAGS | 292 | #ifdef CONFIG_TRACE_IRQFLAGS |
293 | sysc_restore_trace_psw_addr: | ||
294 | .long sysc_restore_trace_psw | ||
295 | |||
296 | .section .data,"aw",@progbits | ||
292 | .align 8 | 297 | .align 8 |
293 | .globl sysc_restore_trace_psw | 298 | .globl sysc_restore_trace_psw |
294 | sysc_restore_trace_psw: | 299 | sysc_restore_trace_psw: |
295 | .long 0, sysc_restore_trace + 0x80000000 | 300 | .long 0, sysc_restore_trace + 0x80000000 |
301 | .previous | ||
296 | #endif | 302 | #endif |
297 | 303 | ||
298 | # | 304 | # |
@@ -606,7 +612,8 @@ io_return: | |||
606 | bnz BASED(io_work) # there is work to do (signals etc.) | 612 | bnz BASED(io_work) # there is work to do (signals etc.) |
607 | io_restore: | 613 | io_restore: |
608 | #ifdef CONFIG_TRACE_IRQFLAGS | 614 | #ifdef CONFIG_TRACE_IRQFLAGS |
609 | la %r1,BASED(io_restore_trace_psw) | 615 | la %r1,BASED(io_restore_trace_psw_addr) |
616 | l %r1,0(%r1) | ||
610 | lpsw 0(%r1) | 617 | lpsw 0(%r1) |
611 | io_restore_trace: | 618 | io_restore_trace: |
612 | TRACE_IRQS_CHECK | 619 | TRACE_IRQS_CHECK |
@@ -617,10 +624,15 @@ io_leave: | |||
617 | io_done: | 624 | io_done: |
618 | 625 | ||
619 | #ifdef CONFIG_TRACE_IRQFLAGS | 626 | #ifdef CONFIG_TRACE_IRQFLAGS |
627 | io_restore_trace_psw_addr: | ||
628 | .long io_restore_trace_psw | ||
629 | |||
630 | .section .data,"aw",@progbits | ||
620 | .align 8 | 631 | .align 8 |
621 | .globl io_restore_trace_psw | 632 | .globl io_restore_trace_psw |
622 | io_restore_trace_psw: | 633 | io_restore_trace_psw: |
623 | .long 0, io_restore_trace + 0x80000000 | 634 | .long 0, io_restore_trace + 0x80000000 |
635 | .previous | ||
624 | #endif | 636 | #endif |
625 | 637 | ||
626 | # | 638 | # |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index f6618e9e15ef..a6f7b20df616 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -57,7 +57,7 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | |||
57 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 57 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
58 | _TIF_MCCK_PENDING) | 58 | _TIF_MCCK_PENDING) |
59 | _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ | 59 | _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ |
60 | _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8) | 60 | _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8) |
61 | 61 | ||
62 | #define BASED(name) name-system_call(%r13) | 62 | #define BASED(name) name-system_call(%r13) |
63 | 63 | ||
@@ -284,10 +284,12 @@ sysc_leave: | |||
284 | sysc_done: | 284 | sysc_done: |
285 | 285 | ||
286 | #ifdef CONFIG_TRACE_IRQFLAGS | 286 | #ifdef CONFIG_TRACE_IRQFLAGS |
287 | .section .data,"aw",@progbits | ||
287 | .align 8 | 288 | .align 8 |
288 | .globl sysc_restore_trace_psw | 289 | .globl sysc_restore_trace_psw |
289 | sysc_restore_trace_psw: | 290 | sysc_restore_trace_psw: |
290 | .quad 0, sysc_restore_trace | 291 | .quad 0, sysc_restore_trace |
292 | .previous | ||
291 | #endif | 293 | #endif |
292 | 294 | ||
293 | # | 295 | # |
@@ -595,10 +597,12 @@ io_leave: | |||
595 | io_done: | 597 | io_done: |
596 | 598 | ||
597 | #ifdef CONFIG_TRACE_IRQFLAGS | 599 | #ifdef CONFIG_TRACE_IRQFLAGS |
600 | .section .data,"aw",@progbits | ||
598 | .align 8 | 601 | .align 8 |
599 | .globl io_restore_trace_psw | 602 | .globl io_restore_trace_psw |
600 | io_restore_trace_psw: | 603 | io_restore_trace_psw: |
601 | .quad 0, io_restore_trace | 604 | .quad 0, io_restore_trace |
605 | .previous | ||
602 | #endif | 606 | #endif |
603 | 607 | ||
604 | # | 608 | # |
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 3e298e64f0db..57bdcb1e3cdf 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c | |||
@@ -220,6 +220,29 @@ struct syscall_metadata *syscall_nr_to_meta(int nr) | |||
220 | return syscalls_metadata[nr]; | 220 | return syscalls_metadata[nr]; |
221 | } | 221 | } |
222 | 222 | ||
223 | int syscall_name_to_nr(char *name) | ||
224 | { | ||
225 | int i; | ||
226 | |||
227 | if (!syscalls_metadata) | ||
228 | return -1; | ||
229 | for (i = 0; i < NR_syscalls; i++) | ||
230 | if (syscalls_metadata[i]) | ||
231 | if (!strcmp(syscalls_metadata[i]->name, name)) | ||
232 | return i; | ||
233 | return -1; | ||
234 | } | ||
235 | |||
236 | void set_syscall_enter_id(int num, int id) | ||
237 | { | ||
238 | syscalls_metadata[num]->enter_id = id; | ||
239 | } | ||
240 | |||
241 | void set_syscall_exit_id(int num, int id) | ||
242 | { | ||
243 | syscalls_metadata[num]->exit_id = id; | ||
244 | } | ||
245 | |||
223 | static struct syscall_metadata *find_syscall_meta(unsigned long syscall) | 246 | static struct syscall_metadata *find_syscall_meta(unsigned long syscall) |
224 | { | 247 | { |
225 | struct syscall_metadata *start; | 248 | struct syscall_metadata *start; |
@@ -237,24 +260,19 @@ static struct syscall_metadata *find_syscall_meta(unsigned long syscall) | |||
237 | return NULL; | 260 | return NULL; |
238 | } | 261 | } |
239 | 262 | ||
240 | void arch_init_ftrace_syscalls(void) | 263 | static int __init arch_init_ftrace_syscalls(void) |
241 | { | 264 | { |
242 | struct syscall_metadata *meta; | 265 | struct syscall_metadata *meta; |
243 | int i; | 266 | int i; |
244 | static atomic_t refs; | ||
245 | |||
246 | if (atomic_inc_return(&refs) != 1) | ||
247 | goto out; | ||
248 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls, | 267 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls, |
249 | GFP_KERNEL); | 268 | GFP_KERNEL); |
250 | if (!syscalls_metadata) | 269 | if (!syscalls_metadata) |
251 | goto out; | 270 | return -ENOMEM; |
252 | for (i = 0; i < NR_syscalls; i++) { | 271 | for (i = 0; i < NR_syscalls; i++) { |
253 | meta = find_syscall_meta((unsigned long)sys_call_table[i]); | 272 | meta = find_syscall_meta((unsigned long)sys_call_table[i]); |
254 | syscalls_metadata[i] = meta; | 273 | syscalls_metadata[i] = meta; |
255 | } | 274 | } |
256 | return; | 275 | return 0; |
257 | out: | ||
258 | atomic_dec(&refs); | ||
259 | } | 276 | } |
277 | arch_initcall(arch_init_ftrace_syscalls); | ||
260 | #endif | 278 | #endif |
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index ec6882348520..c52b4f7742fa 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <asm/asm-offsets.h> | 27 | #include <asm/asm-offsets.h> |
28 | #include <asm/thread_info.h> | 28 | #include <asm/thread_info.h> |
29 | #include <asm/page.h> | 29 | #include <asm/page.h> |
30 | #include <asm/cpu.h> | ||
30 | 31 | ||
31 | #ifdef CONFIG_64BIT | 32 | #ifdef CONFIG_64BIT |
32 | #define ARCH_OFFSET 4 | 33 | #define ARCH_OFFSET 4 |
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index 2ced846065b7..602b508cd4c4 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S | |||
@@ -24,6 +24,7 @@ startup_continue: | |||
24 | # Setup stack | 24 | # Setup stack |
25 | # | 25 | # |
26 | l %r15,.Linittu-.LPG1(%r13) | 26 | l %r15,.Linittu-.LPG1(%r13) |
27 | st %r15,__LC_THREAD_INFO # cache thread info in lowcore | ||
27 | mvc __LC_CURRENT(4),__TI_task(%r15) | 28 | mvc __LC_CURRENT(4),__TI_task(%r15) |
28 | ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE | 29 | ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE |
29 | st %r15,__LC_KERNEL_STACK # set end of kernel stack | 30 | st %r15,__LC_KERNEL_STACK # set end of kernel stack |
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 65667b2e65ce..6a250808092b 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S | |||
@@ -62,9 +62,9 @@ startup_continue: | |||
62 | clr %r11,%r12 | 62 | clr %r11,%r12 |
63 | je 5f # no more space in prefix array | 63 | je 5f # no more space in prefix array |
64 | 4: | 64 | 4: |
65 | ahi %r8,1 # next cpu (r8 += 1) | 65 | ahi %r8,1 # next cpu (r8 += 1) |
66 | cl %r8,.Llast_cpu-.LPG1(%r13) # is last possible cpu ? | 66 | chi %r8,MAX_CPU_ADDRESS # is last possible cpu ? |
67 | jl 1b # jump if not last cpu | 67 | jle 1b # jump if not last cpu |
68 | 5: | 68 | 5: |
69 | lhi %r1,2 # mode 2 = esame (dump) | 69 | lhi %r1,2 # mode 2 = esame (dump) |
70 | j 6f | 70 | j 6f |
@@ -92,6 +92,7 @@ startup_continue: | |||
92 | # Setup stack | 92 | # Setup stack |
93 | # | 93 | # |
94 | larl %r15,init_thread_union | 94 | larl %r15,init_thread_union |
95 | stg %r15,__LC_THREAD_INFO # cache thread info in lowcore | ||
95 | lg %r14,__TI_task(%r15) # cache current in lowcore | 96 | lg %r14,__TI_task(%r15) # cache current in lowcore |
96 | stg %r14,__LC_CURRENT | 97 | stg %r14,__LC_CURRENT |
97 | aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE | 98 | aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE |
@@ -129,8 +130,6 @@ startup_continue: | |||
129 | #ifdef CONFIG_ZFCPDUMP | 130 | #ifdef CONFIG_ZFCPDUMP |
130 | .Lcurrent_cpu: | 131 | .Lcurrent_cpu: |
131 | .long 0x0 | 132 | .long 0x0 |
132 | .Llast_cpu: | ||
133 | .long 0x0000ffff | ||
134 | .Lpref_arr_ptr: | 133 | .Lpref_arr_ptr: |
135 | .long zfcpdump_prefix_array | 134 | .long zfcpdump_prefix_array |
136 | #endif /* CONFIG_ZFCPDUMP */ | 135 | #endif /* CONFIG_ZFCPDUMP */ |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 371a2d88f4ac..ee57a42e6e93 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -272,17 +272,18 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr, | |||
272 | static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); | 272 | static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); |
273 | 273 | ||
274 | /* VM IPL PARM routines */ | 274 | /* VM IPL PARM routines */ |
275 | static void reipl_get_ascii_vmparm(char *dest, | 275 | size_t reipl_get_ascii_vmparm(char *dest, size_t size, |
276 | const struct ipl_parameter_block *ipb) | 276 | const struct ipl_parameter_block *ipb) |
277 | { | 277 | { |
278 | int i; | 278 | int i; |
279 | int len = 0; | 279 | size_t len; |
280 | char has_lowercase = 0; | 280 | char has_lowercase = 0; |
281 | 281 | ||
282 | len = 0; | ||
282 | if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) && | 283 | if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) && |
283 | (ipb->ipl_info.ccw.vm_parm_len > 0)) { | 284 | (ipb->ipl_info.ccw.vm_parm_len > 0)) { |
284 | 285 | ||
285 | len = ipb->ipl_info.ccw.vm_parm_len; | 286 | len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len); |
286 | memcpy(dest, ipb->ipl_info.ccw.vm_parm, len); | 287 | memcpy(dest, ipb->ipl_info.ccw.vm_parm, len); |
287 | /* If at least one character is lowercase, we assume mixed | 288 | /* If at least one character is lowercase, we assume mixed |
288 | * case; otherwise we convert everything to lowercase. | 289 | * case; otherwise we convert everything to lowercase. |
@@ -299,14 +300,20 @@ static void reipl_get_ascii_vmparm(char *dest, | |||
299 | EBCASC(dest, len); | 300 | EBCASC(dest, len); |
300 | } | 301 | } |
301 | dest[len] = 0; | 302 | dest[len] = 0; |
303 | |||
304 | return len; | ||
302 | } | 305 | } |
303 | 306 | ||
304 | void get_ipl_vmparm(char *dest) | 307 | size_t append_ipl_vmparm(char *dest, size_t size) |
305 | { | 308 | { |
309 | size_t rc; | ||
310 | |||
311 | rc = 0; | ||
306 | if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW)) | 312 | if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW)) |
307 | reipl_get_ascii_vmparm(dest, &ipl_block); | 313 | rc = reipl_get_ascii_vmparm(dest, size, &ipl_block); |
308 | else | 314 | else |
309 | dest[0] = 0; | 315 | dest[0] = 0; |
316 | return rc; | ||
310 | } | 317 | } |
311 | 318 | ||
312 | static ssize_t ipl_vm_parm_show(struct kobject *kobj, | 319 | static ssize_t ipl_vm_parm_show(struct kobject *kobj, |
@@ -314,10 +321,65 @@ static ssize_t ipl_vm_parm_show(struct kobject *kobj, | |||
314 | { | 321 | { |
315 | char parm[DIAG308_VMPARM_SIZE + 1] = {}; | 322 | char parm[DIAG308_VMPARM_SIZE + 1] = {}; |
316 | 323 | ||
317 | get_ipl_vmparm(parm); | 324 | append_ipl_vmparm(parm, sizeof(parm)); |
318 | return sprintf(page, "%s\n", parm); | 325 | return sprintf(page, "%s\n", parm); |
319 | } | 326 | } |
320 | 327 | ||
328 | static size_t scpdata_length(const char* buf, size_t count) | ||
329 | { | ||
330 | while (count) { | ||
331 | if (buf[count - 1] != '\0' && buf[count - 1] != ' ') | ||
332 | break; | ||
333 | count--; | ||
334 | } | ||
335 | return count; | ||
336 | } | ||
337 | |||
338 | size_t reipl_append_ascii_scpdata(char *dest, size_t size, | ||
339 | const struct ipl_parameter_block *ipb) | ||
340 | { | ||
341 | size_t count; | ||
342 | size_t i; | ||
343 | int has_lowercase; | ||
344 | |||
345 | count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data, | ||
346 | ipb->ipl_info.fcp.scp_data_len)); | ||
347 | if (!count) | ||
348 | goto out; | ||
349 | |||
350 | has_lowercase = 0; | ||
351 | for (i = 0; i < count; i++) { | ||
352 | if (!isascii(ipb->ipl_info.fcp.scp_data[i])) { | ||
353 | count = 0; | ||
354 | goto out; | ||
355 | } | ||
356 | if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i])) | ||
357 | has_lowercase = 1; | ||
358 | } | ||
359 | |||
360 | if (has_lowercase) | ||
361 | memcpy(dest, ipb->ipl_info.fcp.scp_data, count); | ||
362 | else | ||
363 | for (i = 0; i < count; i++) | ||
364 | dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]); | ||
365 | out: | ||
366 | dest[count] = '\0'; | ||
367 | return count; | ||
368 | } | ||
369 | |||
370 | size_t append_ipl_scpdata(char *dest, size_t len) | ||
371 | { | ||
372 | size_t rc; | ||
373 | |||
374 | rc = 0; | ||
375 | if (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP) | ||
376 | rc = reipl_append_ascii_scpdata(dest, len, &ipl_block); | ||
377 | else | ||
378 | dest[0] = 0; | ||
379 | return rc; | ||
380 | } | ||
381 | |||
382 | |||
321 | static struct kobj_attribute sys_ipl_vm_parm_attr = | 383 | static struct kobj_attribute sys_ipl_vm_parm_attr = |
322 | __ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL); | 384 | __ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL); |
323 | 385 | ||
@@ -553,7 +615,7 @@ static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb, | |||
553 | { | 615 | { |
554 | char vmparm[DIAG308_VMPARM_SIZE + 1] = {}; | 616 | char vmparm[DIAG308_VMPARM_SIZE + 1] = {}; |
555 | 617 | ||
556 | reipl_get_ascii_vmparm(vmparm, ipb); | 618 | reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb); |
557 | return sprintf(page, "%s\n", vmparm); | 619 | return sprintf(page, "%s\n", vmparm); |
558 | } | 620 | } |
559 | 621 | ||
@@ -626,6 +688,59 @@ static struct kobj_attribute sys_reipl_ccw_vmparm_attr = | |||
626 | 688 | ||
627 | /* FCP reipl device attributes */ | 689 | /* FCP reipl device attributes */ |
628 | 690 | ||
691 | static ssize_t reipl_fcp_scpdata_read(struct kobject *kobj, | ||
692 | struct bin_attribute *attr, | ||
693 | char *buf, loff_t off, size_t count) | ||
694 | { | ||
695 | size_t size = reipl_block_fcp->ipl_info.fcp.scp_data_len; | ||
696 | void *scp_data = reipl_block_fcp->ipl_info.fcp.scp_data; | ||
697 | |||
698 | return memory_read_from_buffer(buf, count, &off, scp_data, size); | ||
699 | } | ||
700 | |||
701 | static ssize_t reipl_fcp_scpdata_write(struct kobject *kobj, | ||
702 | struct bin_attribute *attr, | ||
703 | char *buf, loff_t off, size_t count) | ||
704 | { | ||
705 | size_t padding; | ||
706 | size_t scpdata_len; | ||
707 | |||
708 | if (off < 0) | ||
709 | return -EINVAL; | ||
710 | |||
711 | if (off >= DIAG308_SCPDATA_SIZE) | ||
712 | return -ENOSPC; | ||
713 | |||
714 | if (count > DIAG308_SCPDATA_SIZE - off) | ||
715 | count = DIAG308_SCPDATA_SIZE - off; | ||
716 | |||
717 | memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf + off, count); | ||
718 | scpdata_len = off + count; | ||
719 | |||
720 | if (scpdata_len % 8) { | ||
721 | padding = 8 - (scpdata_len % 8); | ||
722 | memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len, | ||
723 | 0, padding); | ||
724 | scpdata_len += padding; | ||
725 | } | ||
726 | |||
727 | reipl_block_fcp->ipl_info.fcp.scp_data_len = scpdata_len; | ||
728 | reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN + scpdata_len; | ||
729 | reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN + scpdata_len; | ||
730 | |||
731 | return count; | ||
732 | } | ||
733 | |||
734 | static struct bin_attribute sys_reipl_fcp_scp_data_attr = { | ||
735 | .attr = { | ||
736 | .name = "scp_data", | ||
737 | .mode = S_IRUGO | S_IWUSR, | ||
738 | }, | ||
739 | .size = PAGE_SIZE, | ||
740 | .read = reipl_fcp_scpdata_read, | ||
741 | .write = reipl_fcp_scpdata_write, | ||
742 | }; | ||
743 | |||
629 | DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", | 744 | DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", |
630 | reipl_block_fcp->ipl_info.fcp.wwpn); | 745 | reipl_block_fcp->ipl_info.fcp.wwpn); |
631 | DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n", | 746 | DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n", |
@@ -647,7 +762,6 @@ static struct attribute *reipl_fcp_attrs[] = { | |||
647 | }; | 762 | }; |
648 | 763 | ||
649 | static struct attribute_group reipl_fcp_attr_group = { | 764 | static struct attribute_group reipl_fcp_attr_group = { |
650 | .name = IPL_FCP_STR, | ||
651 | .attrs = reipl_fcp_attrs, | 765 | .attrs = reipl_fcp_attrs, |
652 | }; | 766 | }; |
653 | 767 | ||
@@ -895,6 +1009,7 @@ static struct kobj_attribute reipl_type_attr = | |||
895 | __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store); | 1009 | __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store); |
896 | 1010 | ||
897 | static struct kset *reipl_kset; | 1011 | static struct kset *reipl_kset; |
1012 | static struct kset *reipl_fcp_kset; | ||
898 | 1013 | ||
899 | static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb, | 1014 | static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb, |
900 | const enum ipl_method m) | 1015 | const enum ipl_method m) |
@@ -906,7 +1021,7 @@ static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb, | |||
906 | 1021 | ||
907 | reipl_get_ascii_loadparm(loadparm, ipb); | 1022 | reipl_get_ascii_loadparm(loadparm, ipb); |
908 | reipl_get_ascii_nss_name(nss_name, ipb); | 1023 | reipl_get_ascii_nss_name(nss_name, ipb); |
909 | reipl_get_ascii_vmparm(vmparm, ipb); | 1024 | reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb); |
910 | 1025 | ||
911 | switch (m) { | 1026 | switch (m) { |
912 | case REIPL_METHOD_CCW_VM: | 1027 | case REIPL_METHOD_CCW_VM: |
@@ -1076,23 +1191,44 @@ static int __init reipl_fcp_init(void) | |||
1076 | int rc; | 1191 | int rc; |
1077 | 1192 | ||
1078 | if (!diag308_set_works) { | 1193 | if (!diag308_set_works) { |
1079 | if (ipl_info.type == IPL_TYPE_FCP) | 1194 | if (ipl_info.type == IPL_TYPE_FCP) { |
1080 | make_attrs_ro(reipl_fcp_attrs); | 1195 | make_attrs_ro(reipl_fcp_attrs); |
1081 | else | 1196 | sys_reipl_fcp_scp_data_attr.attr.mode = S_IRUGO; |
1197 | } else | ||
1082 | return 0; | 1198 | return 0; |
1083 | } | 1199 | } |
1084 | 1200 | ||
1085 | reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); | 1201 | reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); |
1086 | if (!reipl_block_fcp) | 1202 | if (!reipl_block_fcp) |
1087 | return -ENOMEM; | 1203 | return -ENOMEM; |
1088 | rc = sysfs_create_group(&reipl_kset->kobj, &reipl_fcp_attr_group); | 1204 | |
1205 | /* sysfs: create fcp kset for mixing attr group and bin attrs */ | ||
1206 | reipl_fcp_kset = kset_create_and_add(IPL_FCP_STR, NULL, | ||
1207 | &reipl_kset->kobj); | ||
1208 | if (!reipl_kset) { | ||
1209 | free_page((unsigned long) reipl_block_fcp); | ||
1210 | return -ENOMEM; | ||
1211 | } | ||
1212 | |||
1213 | rc = sysfs_create_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group); | ||
1214 | if (rc) { | ||
1215 | kset_unregister(reipl_fcp_kset); | ||
1216 | free_page((unsigned long) reipl_block_fcp); | ||
1217 | return rc; | ||
1218 | } | ||
1219 | |||
1220 | rc = sysfs_create_bin_file(&reipl_fcp_kset->kobj, | ||
1221 | &sys_reipl_fcp_scp_data_attr); | ||
1089 | if (rc) { | 1222 | if (rc) { |
1090 | free_page((unsigned long)reipl_block_fcp); | 1223 | sysfs_remove_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group); |
1224 | kset_unregister(reipl_fcp_kset); | ||
1225 | free_page((unsigned long) reipl_block_fcp); | ||
1091 | return rc; | 1226 | return rc; |
1092 | } | 1227 | } |
1093 | if (ipl_info.type == IPL_TYPE_FCP) { | 1228 | |
1229 | if (ipl_info.type == IPL_TYPE_FCP) | ||
1094 | memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); | 1230 | memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); |
1095 | } else { | 1231 | else { |
1096 | reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; | 1232 | reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; |
1097 | reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION; | 1233 | reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION; |
1098 | reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN; | 1234 | reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN; |
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 2a0a5e97ba8c..dfe015d7398c 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S | |||
@@ -11,111 +11,27 @@ | |||
11 | ftrace_stub: | 11 | ftrace_stub: |
12 | br %r14 | 12 | br %r14 |
13 | 13 | ||
14 | #ifdef CONFIG_64BIT | ||
15 | |||
16 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
17 | |||
18 | .globl _mcount | 14 | .globl _mcount |
19 | _mcount: | 15 | _mcount: |
20 | br %r14 | 16 | #ifdef CONFIG_DYNAMIC_FTRACE |
21 | |||
22 | .globl ftrace_caller | ||
23 | ftrace_caller: | ||
24 | larl %r1,function_trace_stop | ||
25 | icm %r1,0xf,0(%r1) | ||
26 | bnzr %r14 | ||
27 | stmg %r2,%r5,32(%r15) | ||
28 | stg %r14,112(%r15) | ||
29 | lgr %r1,%r15 | ||
30 | aghi %r15,-160 | ||
31 | stg %r1,__SF_BACKCHAIN(%r15) | ||
32 | lgr %r2,%r14 | ||
33 | lg %r3,168(%r15) | ||
34 | larl %r14,ftrace_dyn_func | ||
35 | lg %r14,0(%r14) | ||
36 | basr %r14,%r14 | ||
37 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
38 | .globl ftrace_graph_caller | ||
39 | ftrace_graph_caller: | ||
40 | # This unconditional branch gets runtime patched. Change only if | ||
41 | # you know what you are doing. See ftrace_enable_graph_caller(). | ||
42 | j 0f | ||
43 | lg %r2,272(%r15) | ||
44 | lg %r3,168(%r15) | ||
45 | brasl %r14,prepare_ftrace_return | ||
46 | stg %r2,168(%r15) | ||
47 | 0: | ||
48 | #endif | ||
49 | aghi %r15,160 | ||
50 | lmg %r2,%r5,32(%r15) | ||
51 | lg %r14,112(%r15) | ||
52 | br %r14 | 17 | br %r14 |
53 | 18 | ||
54 | .data | 19 | .data |
55 | .globl ftrace_dyn_func | 20 | .globl ftrace_dyn_func |
56 | ftrace_dyn_func: | 21 | ftrace_dyn_func: |
57 | .quad ftrace_stub | 22 | .long ftrace_stub |
58 | .previous | 23 | .previous |
59 | 24 | ||
60 | #else /* CONFIG_DYNAMIC_FTRACE */ | ||
61 | |||
62 | .globl _mcount | ||
63 | _mcount: | ||
64 | larl %r1,function_trace_stop | ||
65 | icm %r1,0xf,0(%r1) | ||
66 | bnzr %r14 | ||
67 | stmg %r2,%r5,32(%r15) | ||
68 | stg %r14,112(%r15) | ||
69 | lgr %r1,%r15 | ||
70 | aghi %r15,-160 | ||
71 | stg %r1,__SF_BACKCHAIN(%r15) | ||
72 | lgr %r2,%r14 | ||
73 | lg %r3,168(%r15) | ||
74 | larl %r14,ftrace_trace_function | ||
75 | lg %r14,0(%r14) | ||
76 | basr %r14,%r14 | ||
77 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
78 | lg %r2,272(%r15) | ||
79 | lg %r3,168(%r15) | ||
80 | brasl %r14,prepare_ftrace_return | ||
81 | stg %r2,168(%r15) | ||
82 | #endif | ||
83 | aghi %r15,160 | ||
84 | lmg %r2,%r5,32(%r15) | ||
85 | lg %r14,112(%r15) | ||
86 | br %r14 | ||
87 | |||
88 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
89 | |||
90 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
91 | |||
92 | .globl return_to_handler | ||
93 | return_to_handler: | ||
94 | stmg %r2,%r5,32(%r15) | ||
95 | lgr %r1,%r15 | ||
96 | aghi %r15,-160 | ||
97 | stg %r1,__SF_BACKCHAIN(%r15) | ||
98 | brasl %r14,ftrace_return_to_handler | ||
99 | aghi %r15,160 | ||
100 | lgr %r14,%r2 | ||
101 | lmg %r2,%r5,32(%r15) | ||
102 | br %r14 | ||
103 | |||
104 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
105 | |||
106 | #else /* CONFIG_64BIT */ | ||
107 | |||
108 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
109 | |||
110 | .globl _mcount | ||
111 | _mcount: | ||
112 | br %r14 | ||
113 | |||
114 | .globl ftrace_caller | 25 | .globl ftrace_caller |
115 | ftrace_caller: | 26 | ftrace_caller: |
27 | #endif | ||
116 | stm %r2,%r5,16(%r15) | 28 | stm %r2,%r5,16(%r15) |
117 | bras %r1,2f | 29 | bras %r1,2f |
30 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
31 | 0: .long ftrace_dyn_func | ||
32 | #else | ||
118 | 0: .long ftrace_trace_function | 33 | 0: .long ftrace_trace_function |
34 | #endif | ||
119 | 1: .long function_trace_stop | 35 | 1: .long function_trace_stop |
120 | 2: l %r2,1b-0b(%r1) | 36 | 2: l %r2,1b-0b(%r1) |
121 | icm %r2,0xf,0(%r2) | 37 | icm %r2,0xf,0(%r2) |
@@ -131,53 +47,13 @@ ftrace_caller: | |||
131 | l %r14,0(%r14) | 47 | l %r14,0(%r14) |
132 | basr %r14,%r14 | 48 | basr %r14,%r14 |
133 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 49 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
50 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
134 | .globl ftrace_graph_caller | 51 | .globl ftrace_graph_caller |
135 | ftrace_graph_caller: | 52 | ftrace_graph_caller: |
136 | # This unconditional branch gets runtime patched. Change only if | 53 | # This unconditional branch gets runtime patched. Change only if |
137 | # you know what you are doing. See ftrace_enable_graph_caller(). | 54 | # you know what you are doing. See ftrace_enable_graph_caller(). |
138 | j 1f | 55 | j 1f |
139 | bras %r1,0f | ||
140 | .long prepare_ftrace_return | ||
141 | 0: l %r2,152(%r15) | ||
142 | l %r4,0(%r1) | ||
143 | l %r3,100(%r15) | ||
144 | basr %r14,%r4 | ||
145 | st %r2,100(%r15) | ||
146 | 1: | ||
147 | #endif | 56 | #endif |
148 | ahi %r15,96 | ||
149 | l %r14,56(%r15) | ||
150 | 3: lm %r2,%r5,16(%r15) | ||
151 | br %r14 | ||
152 | |||
153 | .data | ||
154 | .globl ftrace_dyn_func | ||
155 | ftrace_dyn_func: | ||
156 | .long ftrace_stub | ||
157 | .previous | ||
158 | |||
159 | #else /* CONFIG_DYNAMIC_FTRACE */ | ||
160 | |||
161 | .globl _mcount | ||
162 | _mcount: | ||
163 | stm %r2,%r5,16(%r15) | ||
164 | bras %r1,2f | ||
165 | 0: .long ftrace_trace_function | ||
166 | 1: .long function_trace_stop | ||
167 | 2: l %r2,1b-0b(%r1) | ||
168 | icm %r2,0xf,0(%r2) | ||
169 | jnz 3f | ||
170 | st %r14,56(%r15) | ||
171 | lr %r0,%r15 | ||
172 | ahi %r15,-96 | ||
173 | l %r3,100(%r15) | ||
174 | la %r2,0(%r14) | ||
175 | st %r0,__SF_BACKCHAIN(%r15) | ||
176 | la %r3,0(%r3) | ||
177 | l %r14,0b-0b(%r1) | ||
178 | l %r14,0(%r14) | ||
179 | basr %r14,%r14 | ||
180 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
181 | bras %r1,0f | 57 | bras %r1,0f |
182 | .long prepare_ftrace_return | 58 | .long prepare_ftrace_return |
183 | 0: l %r2,152(%r15) | 59 | 0: l %r2,152(%r15) |
@@ -185,14 +61,13 @@ _mcount: | |||
185 | l %r3,100(%r15) | 61 | l %r3,100(%r15) |
186 | basr %r14,%r4 | 62 | basr %r14,%r4 |
187 | st %r2,100(%r15) | 63 | st %r2,100(%r15) |
64 | 1: | ||
188 | #endif | 65 | #endif |
189 | ahi %r15,96 | 66 | ahi %r15,96 |
190 | l %r14,56(%r15) | 67 | l %r14,56(%r15) |
191 | 3: lm %r2,%r5,16(%r15) | 68 | 3: lm %r2,%r5,16(%r15) |
192 | br %r14 | 69 | br %r14 |
193 | 70 | ||
194 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
195 | |||
196 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 71 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
197 | 72 | ||
198 | .globl return_to_handler | 73 | .globl return_to_handler |
@@ -211,6 +86,4 @@ return_to_handler: | |||
211 | lm %r2,%r5,16(%r15) | 86 | lm %r2,%r5,16(%r15) |
212 | br %r14 | 87 | br %r14 |
213 | 88 | ||
214 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 89 | #endif |
215 | |||
216 | #endif /* CONFIG_64BIT */ | ||
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S new file mode 100644 index 000000000000..c37211c6092b --- /dev/null +++ b/arch/s390/kernel/mcount64.S | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2008,2009 | ||
3 | * | ||
4 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <asm/asm-offsets.h> | ||
9 | |||
10 | .globl ftrace_stub | ||
11 | ftrace_stub: | ||
12 | br %r14 | ||
13 | |||
14 | .globl _mcount | ||
15 | _mcount: | ||
16 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
17 | br %r14 | ||
18 | |||
19 | .data | ||
20 | .globl ftrace_dyn_func | ||
21 | ftrace_dyn_func: | ||
22 | .quad ftrace_stub | ||
23 | .previous | ||
24 | |||
25 | .globl ftrace_caller | ||
26 | ftrace_caller: | ||
27 | #endif | ||
28 | larl %r1,function_trace_stop | ||
29 | icm %r1,0xf,0(%r1) | ||
30 | bnzr %r14 | ||
31 | stmg %r2,%r5,32(%r15) | ||
32 | stg %r14,112(%r15) | ||
33 | lgr %r1,%r15 | ||
34 | aghi %r15,-160 | ||
35 | stg %r1,__SF_BACKCHAIN(%r15) | ||
36 | lgr %r2,%r14 | ||
37 | lg %r3,168(%r15) | ||
38 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
39 | larl %r14,ftrace_dyn_func | ||
40 | #else | ||
41 | larl %r14,ftrace_trace_function | ||
42 | #endif | ||
43 | lg %r14,0(%r14) | ||
44 | basr %r14,%r14 | ||
45 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
46 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
47 | .globl ftrace_graph_caller | ||
48 | ftrace_graph_caller: | ||
49 | # This unconditional branch gets runtime patched. Change only if | ||
50 | # you know what you are doing. See ftrace_enable_graph_caller(). | ||
51 | j 0f | ||
52 | #endif | ||
53 | lg %r2,272(%r15) | ||
54 | lg %r3,168(%r15) | ||
55 | brasl %r14,prepare_ftrace_return | ||
56 | stg %r2,168(%r15) | ||
57 | 0: | ||
58 | #endif | ||
59 | aghi %r15,160 | ||
60 | lmg %r2,%r5,32(%r15) | ||
61 | lg %r14,112(%r15) | ||
62 | br %r14 | ||
63 | |||
64 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
65 | |||
66 | .globl return_to_handler | ||
67 | return_to_handler: | ||
68 | stmg %r2,%r5,32(%r15) | ||
69 | lgr %r1,%r15 | ||
70 | aghi %r15,-160 | ||
71 | stg %r1,__SF_BACKCHAIN(%r15) | ||
72 | brasl %r14,ftrace_return_to_handler | ||
73 | aghi %r15,160 | ||
74 | lgr %r14,%r2 | ||
75 | lmg %r2,%r5,32(%r15) | ||
76 | br %r14 | ||
77 | |||
78 | #endif | ||
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 43acd73105b7..f3ddd7ac06c5 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -51,6 +51,9 @@ | |||
51 | #include "compat_ptrace.h" | 51 | #include "compat_ptrace.h" |
52 | #endif | 52 | #endif |
53 | 53 | ||
54 | #define CREATE_TRACE_POINTS | ||
55 | #include <trace/events/syscalls.h> | ||
56 | |||
54 | enum s390_regset { | 57 | enum s390_regset { |
55 | REGSET_GENERAL, | 58 | REGSET_GENERAL, |
56 | REGSET_FP, | 59 | REGSET_FP, |
@@ -661,8 +664,8 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) | |||
661 | ret = -1; | 664 | ret = -1; |
662 | } | 665 | } |
663 | 666 | ||
664 | if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) | 667 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
665 | ftrace_syscall_enter(regs); | 668 | trace_sys_enter(regs, regs->gprs[2]); |
666 | 669 | ||
667 | if (unlikely(current->audit_context)) | 670 | if (unlikely(current->audit_context)) |
668 | audit_syscall_entry(is_compat_task() ? | 671 | audit_syscall_entry(is_compat_task() ? |
@@ -679,8 +682,8 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs) | |||
679 | audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), | 682 | audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), |
680 | regs->gprs[2]); | 683 | regs->gprs[2]); |
681 | 684 | ||
682 | if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) | 685 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
683 | ftrace_syscall_exit(regs); | 686 | trace_sys_exit(regs, regs->gprs[2]); |
684 | 687 | ||
685 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | 688 | if (test_thread_flag(TIF_SYSCALL_TRACE)) |
686 | tracehook_report_syscall_exit(regs, 0); | 689 | tracehook_report_syscall_exit(regs, 0); |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 9717717c6fea..9ed13a1ed376 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -154,6 +154,16 @@ static int __init condev_setup(char *str) | |||
154 | 154 | ||
155 | __setup("condev=", condev_setup); | 155 | __setup("condev=", condev_setup); |
156 | 156 | ||
157 | static void __init set_preferred_console(void) | ||
158 | { | ||
159 | if (MACHINE_IS_KVM) | ||
160 | add_preferred_console("hvc", 0, NULL); | ||
161 | else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP) | ||
162 | add_preferred_console("ttyS", 0, NULL); | ||
163 | else if (CONSOLE_IS_3270) | ||
164 | add_preferred_console("tty3270", 0, NULL); | ||
165 | } | ||
166 | |||
157 | static int __init conmode_setup(char *str) | 167 | static int __init conmode_setup(char *str) |
158 | { | 168 | { |
159 | #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) | 169 | #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) |
@@ -168,6 +178,7 @@ static int __init conmode_setup(char *str) | |||
168 | if (strncmp(str, "3270", 5) == 0) | 178 | if (strncmp(str, "3270", 5) == 0) |
169 | SET_CONSOLE_3270; | 179 | SET_CONSOLE_3270; |
170 | #endif | 180 | #endif |
181 | set_preferred_console(); | ||
171 | return 1; | 182 | return 1; |
172 | } | 183 | } |
173 | 184 | ||
@@ -780,9 +791,6 @@ static void __init setup_hwcaps(void) | |||
780 | void __init | 791 | void __init |
781 | setup_arch(char **cmdline_p) | 792 | setup_arch(char **cmdline_p) |
782 | { | 793 | { |
783 | /* set up preferred console */ | ||
784 | add_preferred_console("ttyS", 0, NULL); | ||
785 | |||
786 | /* | 794 | /* |
787 | * print what head.S has found out about the machine | 795 | * print what head.S has found out about the machine |
788 | */ | 796 | */ |
@@ -802,11 +810,9 @@ setup_arch(char **cmdline_p) | |||
802 | if (MACHINE_IS_VM) | 810 | if (MACHINE_IS_VM) |
803 | pr_info("Linux is running as a z/VM " | 811 | pr_info("Linux is running as a z/VM " |
804 | "guest operating system in 64-bit mode\n"); | 812 | "guest operating system in 64-bit mode\n"); |
805 | else if (MACHINE_IS_KVM) { | 813 | else if (MACHINE_IS_KVM) |
806 | pr_info("Linux is running under KVM in 64-bit mode\n"); | 814 | pr_info("Linux is running under KVM in 64-bit mode\n"); |
807 | add_preferred_console("hvc", 0, NULL); | 815 | else |
808 | s390_virtio_console_init(); | ||
809 | } else | ||
810 | pr_info("Linux is running natively in 64-bit mode\n"); | 816 | pr_info("Linux is running natively in 64-bit mode\n"); |
811 | #endif /* CONFIG_64BIT */ | 817 | #endif /* CONFIG_64BIT */ |
812 | 818 | ||
@@ -851,6 +857,7 @@ setup_arch(char **cmdline_p) | |||
851 | 857 | ||
852 | /* Setup default console */ | 858 | /* Setup default console */ |
853 | conmode_default(); | 859 | conmode_default(); |
860 | set_preferred_console(); | ||
854 | 861 | ||
855 | /* Setup zfcpdump support */ | 862 | /* Setup zfcpdump support */ |
856 | setup_zfcpdump(console_devno); | 863 | setup_zfcpdump(console_devno); |
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 062bd64e65fa..6b4fef877f9d 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -536,4 +536,6 @@ void do_notify_resume(struct pt_regs *regs) | |||
536 | { | 536 | { |
537 | clear_thread_flag(TIF_NOTIFY_RESUME); | 537 | clear_thread_flag(TIF_NOTIFY_RESUME); |
538 | tracehook_notify_resume(regs); | 538 | tracehook_notify_resume(regs); |
539 | if (current->replacement_session_keyring) | ||
540 | key_replace_session_keyring(); | ||
539 | } | 541 | } |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index be2cae083406..56c16876b919 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <asm/sclp.h> | 49 | #include <asm/sclp.h> |
50 | #include <asm/cputime.h> | 50 | #include <asm/cputime.h> |
51 | #include <asm/vdso.h> | 51 | #include <asm/vdso.h> |
52 | #include <asm/cpu.h> | ||
52 | #include "entry.h" | 53 | #include "entry.h" |
53 | 54 | ||
54 | static struct task_struct *current_set[NR_CPUS]; | 55 | static struct task_struct *current_set[NR_CPUS]; |
@@ -70,6 +71,23 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); | |||
70 | 71 | ||
71 | static void smp_ext_bitcall(int, ec_bit_sig); | 72 | static void smp_ext_bitcall(int, ec_bit_sig); |
72 | 73 | ||
74 | static int cpu_stopped(int cpu) | ||
75 | { | ||
76 | __u32 status; | ||
77 | |||
78 | switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { | ||
79 | case sigp_order_code_accepted: | ||
80 | case sigp_status_stored: | ||
81 | /* Check for stopped and check stop state */ | ||
82 | if (status & 0x50) | ||
83 | return 1; | ||
84 | break; | ||
85 | default: | ||
86 | break; | ||
87 | } | ||
88 | return 0; | ||
89 | } | ||
90 | |||
73 | void smp_send_stop(void) | 91 | void smp_send_stop(void) |
74 | { | 92 | { |
75 | int cpu, rc; | 93 | int cpu, rc; |
@@ -86,7 +104,7 @@ void smp_send_stop(void) | |||
86 | rc = signal_processor(cpu, sigp_stop); | 104 | rc = signal_processor(cpu, sigp_stop); |
87 | } while (rc == sigp_busy); | 105 | } while (rc == sigp_busy); |
88 | 106 | ||
89 | while (!smp_cpu_not_running(cpu)) | 107 | while (!cpu_stopped(cpu)) |
90 | cpu_relax(); | 108 | cpu_relax(); |
91 | } | 109 | } |
92 | } | 110 | } |
@@ -269,19 +287,6 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } | |||
269 | 287 | ||
270 | #endif /* CONFIG_ZFCPDUMP */ | 288 | #endif /* CONFIG_ZFCPDUMP */ |
271 | 289 | ||
272 | static int cpu_stopped(int cpu) | ||
273 | { | ||
274 | __u32 status; | ||
275 | |||
276 | /* Check for stopped state */ | ||
277 | if (signal_processor_ps(&status, 0, cpu, sigp_sense) == | ||
278 | sigp_status_stored) { | ||
279 | if (status & 0x40) | ||
280 | return 1; | ||
281 | } | ||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | static int cpu_known(int cpu_id) | 290 | static int cpu_known(int cpu_id) |
286 | { | 291 | { |
287 | int cpu; | 292 | int cpu; |
@@ -300,7 +305,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail) | |||
300 | logical_cpu = cpumask_first(&avail); | 305 | logical_cpu = cpumask_first(&avail); |
301 | if (logical_cpu >= nr_cpu_ids) | 306 | if (logical_cpu >= nr_cpu_ids) |
302 | return 0; | 307 | return 0; |
303 | for (cpu_id = 0; cpu_id <= 65535; cpu_id++) { | 308 | for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) { |
304 | if (cpu_known(cpu_id)) | 309 | if (cpu_known(cpu_id)) |
305 | continue; | 310 | continue; |
306 | __cpu_logical_map[logical_cpu] = cpu_id; | 311 | __cpu_logical_map[logical_cpu] = cpu_id; |
@@ -379,7 +384,7 @@ static void __init smp_detect_cpus(void) | |||
379 | /* Use sigp detection algorithm if sclp doesn't work. */ | 384 | /* Use sigp detection algorithm if sclp doesn't work. */ |
380 | if (sclp_get_cpu_info(info)) { | 385 | if (sclp_get_cpu_info(info)) { |
381 | smp_use_sigp_detection = 1; | 386 | smp_use_sigp_detection = 1; |
382 | for (cpu = 0; cpu <= 65535; cpu++) { | 387 | for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { |
383 | if (cpu == boot_cpu_addr) | 388 | if (cpu == boot_cpu_addr) |
384 | continue; | 389 | continue; |
385 | __cpu_logical_map[CPU_INIT_NO] = cpu; | 390 | __cpu_logical_map[CPU_INIT_NO] = cpu; |
@@ -635,7 +640,7 @@ int __cpu_disable(void) | |||
635 | void __cpu_die(unsigned int cpu) | 640 | void __cpu_die(unsigned int cpu) |
636 | { | 641 | { |
637 | /* Wait until target cpu is down */ | 642 | /* Wait until target cpu is down */ |
638 | while (!smp_cpu_not_running(cpu)) | 643 | while (!cpu_stopped(cpu)) |
639 | cpu_relax(); | 644 | cpu_relax(); |
640 | smp_free_lowcore(cpu); | 645 | smp_free_lowcore(cpu); |
641 | pr_info("Processor %d stopped\n", cpu); | 646 | pr_info("Processor %d stopped\n", cpu); |
diff --git a/arch/s390/power/swsusp.c b/arch/s390/kernel/suspend.c index bd1f5c6b0b8c..086bee970cae 100644 --- a/arch/s390/power/swsusp.c +++ b/arch/s390/kernel/suspend.c | |||
@@ -1,13 +1,44 @@ | |||
1 | /* | 1 | /* |
2 | * Support for suspend and resume on s390 | 2 | * Suspend support specific for s390. |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2009 | 4 | * Copyright IBM Corp. 2009 |
5 | * | 5 | * |
6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> | 6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> |
7 | * | ||
8 | */ | 7 | */ |
9 | 8 | ||
9 | #include <linux/suspend.h> | ||
10 | #include <linux/reboot.h> | ||
11 | #include <linux/pfn.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <asm/sections.h> | ||
10 | #include <asm/system.h> | 14 | #include <asm/system.h> |
15 | #include <asm/ipl.h> | ||
16 | |||
17 | /* | ||
18 | * References to section boundaries | ||
19 | */ | ||
20 | extern const void __nosave_begin, __nosave_end; | ||
21 | |||
22 | /* | ||
23 | * check if given pfn is in the 'nosave' or in the read only NSS section | ||
24 | */ | ||
25 | int pfn_is_nosave(unsigned long pfn) | ||
26 | { | ||
27 | unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; | ||
28 | unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) | ||
29 | >> PAGE_SHIFT; | ||
30 | unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1; | ||
31 | unsigned long stext_pfn = PFN_DOWN(__pa(&_stext)); | ||
32 | |||
33 | if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) | ||
34 | return 1; | ||
35 | if (pfn >= stext_pfn && pfn <= eshared_pfn) { | ||
36 | if (ipl_info.type == IPL_TYPE_NSS) | ||
37 | return 1; | ||
38 | } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0)) | ||
39 | return 1; | ||
40 | return 0; | ||
41 | } | ||
11 | 42 | ||
12 | void save_processor_state(void) | 43 | void save_processor_state(void) |
13 | { | 44 | { |
diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index b26df5c5933e..7cd6b096f0d1 100644 --- a/arch/s390/power/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S | |||
@@ -21,7 +21,7 @@ | |||
21 | * This function runs with disabled interrupts. | 21 | * This function runs with disabled interrupts. |
22 | */ | 22 | */ |
23 | .section .text | 23 | .section .text |
24 | .align 2 | 24 | .align 4 |
25 | .globl swsusp_arch_suspend | 25 | .globl swsusp_arch_suspend |
26 | swsusp_arch_suspend: | 26 | swsusp_arch_suspend: |
27 | stmg %r6,%r15,__SF_GPRS(%r15) | 27 | stmg %r6,%r15,__SF_GPRS(%r15) |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index d4c8e9c47c81..54e327e9af04 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #define TICK_SIZE tick | 60 | #define TICK_SIZE tick |
61 | 61 | ||
62 | u64 sched_clock_base_cc = -1; /* Force to data section. */ | 62 | u64 sched_clock_base_cc = -1; /* Force to data section. */ |
63 | EXPORT_SYMBOL_GPL(sched_clock_base_cc); | ||
63 | 64 | ||
64 | static DEFINE_PER_CPU(struct clock_event_device, comparators); | 65 | static DEFINE_PER_CPU(struct clock_event_device, comparators); |
65 | 66 | ||
@@ -68,7 +69,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators); | |||
68 | */ | 69 | */ |
69 | unsigned long long notrace sched_clock(void) | 70 | unsigned long long notrace sched_clock(void) |
70 | { | 71 | { |
71 | return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9; | 72 | return (get_clock_monotonic() * 125) >> 9; |
72 | } | 73 | } |
73 | 74 | ||
74 | /* | 75 | /* |
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index a53db23ee092..7315f9e67e1d 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S | |||
@@ -52,55 +52,18 @@ SECTIONS | |||
52 | . = ALIGN(PAGE_SIZE); | 52 | . = ALIGN(PAGE_SIZE); |
53 | _eshared = .; /* End of shareable data */ | 53 | _eshared = .; /* End of shareable data */ |
54 | 54 | ||
55 | . = ALIGN(16); /* Exception table */ | 55 | EXCEPTION_TABLE(16) :data |
56 | __ex_table : { | ||
57 | __start___ex_table = .; | ||
58 | *(__ex_table) | ||
59 | __stop___ex_table = .; | ||
60 | } :data | ||
61 | |||
62 | .data : { /* Data */ | ||
63 | DATA_DATA | ||
64 | CONSTRUCTORS | ||
65 | } | ||
66 | |||
67 | . = ALIGN(PAGE_SIZE); | ||
68 | .data_nosave : { | ||
69 | __nosave_begin = .; | ||
70 | *(.data.nosave) | ||
71 | } | ||
72 | . = ALIGN(PAGE_SIZE); | ||
73 | __nosave_end = .; | ||
74 | |||
75 | . = ALIGN(PAGE_SIZE); | ||
76 | .data.page_aligned : { | ||
77 | *(.data.idt) | ||
78 | } | ||
79 | 56 | ||
80 | . = ALIGN(0x100); | 57 | RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE) |
81 | .data.cacheline_aligned : { | ||
82 | *(.data.cacheline_aligned) | ||
83 | } | ||
84 | 58 | ||
85 | . = ALIGN(0x100); | ||
86 | .data.read_mostly : { | ||
87 | *(.data.read_mostly) | ||
88 | } | ||
89 | _edata = .; /* End of data section */ | 59 | _edata = .; /* End of data section */ |
90 | 60 | ||
91 | . = ALIGN(THREAD_SIZE); /* init_task */ | ||
92 | .data.init_task : { | ||
93 | *(.data.init_task) | ||
94 | } | ||
95 | |||
96 | /* will be freed after init */ | 61 | /* will be freed after init */ |
97 | . = ALIGN(PAGE_SIZE); /* Init code and data */ | 62 | . = ALIGN(PAGE_SIZE); /* Init code and data */ |
98 | __init_begin = .; | 63 | __init_begin = .; |
99 | .init.text : { | 64 | |
100 | _sinittext = .; | 65 | INIT_TEXT_SECTION(PAGE_SIZE) |
101 | INIT_TEXT | 66 | |
102 | _einittext = .; | ||
103 | } | ||
104 | /* | 67 | /* |
105 | * .exit.text is discarded at runtime, not link time, | 68 | * .exit.text is discarded at runtime, not link time, |
106 | * to deal with references from __bug_table | 69 | * to deal with references from __bug_table |
@@ -111,49 +74,13 @@ SECTIONS | |||
111 | 74 | ||
112 | /* early.c uses stsi, which requires page aligned data. */ | 75 | /* early.c uses stsi, which requires page aligned data. */ |
113 | . = ALIGN(PAGE_SIZE); | 76 | . = ALIGN(PAGE_SIZE); |
114 | .init.data : { | 77 | INIT_DATA_SECTION(0x100) |
115 | INIT_DATA | ||
116 | } | ||
117 | . = ALIGN(0x100); | ||
118 | .init.setup : { | ||
119 | __setup_start = .; | ||
120 | *(.init.setup) | ||
121 | __setup_end = .; | ||
122 | } | ||
123 | .initcall.init : { | ||
124 | __initcall_start = .; | ||
125 | INITCALLS | ||
126 | __initcall_end = .; | ||
127 | } | ||
128 | |||
129 | .con_initcall.init : { | ||
130 | __con_initcall_start = .; | ||
131 | *(.con_initcall.init) | ||
132 | __con_initcall_end = .; | ||
133 | } | ||
134 | SECURITY_INIT | ||
135 | |||
136 | #ifdef CONFIG_BLK_DEV_INITRD | ||
137 | . = ALIGN(0x100); | ||
138 | .init.ramfs : { | ||
139 | __initramfs_start = .; | ||
140 | *(.init.ramfs) | ||
141 | . = ALIGN(2); | ||
142 | __initramfs_end = .; | ||
143 | } | ||
144 | #endif | ||
145 | 78 | ||
146 | PERCPU(PAGE_SIZE) | 79 | PERCPU(PAGE_SIZE) |
147 | . = ALIGN(PAGE_SIZE); | 80 | . = ALIGN(PAGE_SIZE); |
148 | __init_end = .; /* freed after init ends here */ | 81 | __init_end = .; /* freed after init ends here */ |
149 | 82 | ||
150 | /* BSS */ | 83 | BSS_SECTION(0, 2, 0) |
151 | .bss : { | ||
152 | __bss_start = .; | ||
153 | *(.bss) | ||
154 | . = ALIGN(2); | ||
155 | __bss_stop = .; | ||
156 | } | ||
157 | 84 | ||
158 | _end = . ; | 85 | _end = . ; |
159 | 86 | ||
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile index db05661ac895..eec054484419 100644 --- a/arch/s390/mm/Makefile +++ b/arch/s390/mm/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the linux s390-specific parts of the memory manager. | 2 | # Makefile for the linux s390-specific parts of the memory manager. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o | 5 | obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \ |
6 | page-states.o | ||
6 | obj-$(CONFIG_CMM) += cmm.o | 7 | obj-$(CONFIG_CMM) += cmm.o |
7 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 8 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
8 | obj-$(CONFIG_PAGE_STATES) += page-states.o | ||
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index e5e119fe03b2..1abbadd497e1 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * Copyright (C) 1995 Linus Torvalds | 10 | * Copyright (C) 1995 Linus Torvalds |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/perf_counter.h> | ||
13 | #include <linux/signal.h> | 14 | #include <linux/signal.h> |
14 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
15 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
@@ -305,7 +306,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write) | |||
305 | * interrupts again and then search the VMAs | 306 | * interrupts again and then search the VMAs |
306 | */ | 307 | */ |
307 | local_irq_enable(); | 308 | local_irq_enable(); |
308 | 309 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | |
309 | down_read(&mm->mmap_sem); | 310 | down_read(&mm->mmap_sem); |
310 | 311 | ||
311 | si_code = SEGV_MAPERR; | 312 | si_code = SEGV_MAPERR; |
@@ -363,11 +364,15 @@ good_area: | |||
363 | } | 364 | } |
364 | BUG(); | 365 | BUG(); |
365 | } | 366 | } |
366 | if (fault & VM_FAULT_MAJOR) | 367 | if (fault & VM_FAULT_MAJOR) { |
367 | tsk->maj_flt++; | 368 | tsk->maj_flt++; |
368 | else | 369 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, |
370 | regs, address); | ||
371 | } else { | ||
369 | tsk->min_flt++; | 372 | tsk->min_flt++; |
370 | 373 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | |
374 | regs, address); | ||
375 | } | ||
371 | up_read(&mm->mmap_sem); | 376 | up_read(&mm->mmap_sem); |
372 | /* | 377 | /* |
373 | * The instruction that caused the program check will | 378 | * The instruction that caused the program check will |
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index fc0ad73ffd90..f92ec203ad92 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/mm/page-states.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | 2 | * Copyright IBM Corp. 2008 |
5 | * | 3 | * |
6 | * Guest page hinting for unused pages. | 4 | * Guest page hinting for unused pages. |
@@ -17,11 +15,12 @@ | |||
17 | #define ESSA_SET_STABLE 1 | 15 | #define ESSA_SET_STABLE 1 |
18 | #define ESSA_SET_UNUSED 2 | 16 | #define ESSA_SET_UNUSED 2 |
19 | 17 | ||
20 | static int cmma_flag; | 18 | static int cmma_flag = 1; |
21 | 19 | ||
22 | static int __init cmma(char *str) | 20 | static int __init cmma(char *str) |
23 | { | 21 | { |
24 | char *parm; | 22 | char *parm; |
23 | |||
25 | parm = strstrip(str); | 24 | parm = strstrip(str); |
26 | if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) { | 25 | if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) { |
27 | cmma_flag = 1; | 26 | cmma_flag = 1; |
@@ -32,7 +31,6 @@ static int __init cmma(char *str) | |||
32 | return 1; | 31 | return 1; |
33 | return 0; | 32 | return 0; |
34 | } | 33 | } |
35 | |||
36 | __setup("cmma=", cmma); | 34 | __setup("cmma=", cmma); |
37 | 35 | ||
38 | void __init cmma_init(void) | 36 | void __init cmma_init(void) |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 565667207985..c70215247071 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -78,9 +78,9 @@ unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) | |||
78 | } | 78 | } |
79 | page->index = page_to_phys(shadow); | 79 | page->index = page_to_phys(shadow); |
80 | } | 80 | } |
81 | spin_lock(&mm->page_table_lock); | 81 | spin_lock(&mm->context.list_lock); |
82 | list_add(&page->lru, &mm->context.crst_list); | 82 | list_add(&page->lru, &mm->context.crst_list); |
83 | spin_unlock(&mm->page_table_lock); | 83 | spin_unlock(&mm->context.list_lock); |
84 | return (unsigned long *) page_to_phys(page); | 84 | return (unsigned long *) page_to_phys(page); |
85 | } | 85 | } |
86 | 86 | ||
@@ -89,9 +89,9 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table) | |||
89 | unsigned long *shadow = get_shadow_table(table); | 89 | unsigned long *shadow = get_shadow_table(table); |
90 | struct page *page = virt_to_page(table); | 90 | struct page *page = virt_to_page(table); |
91 | 91 | ||
92 | spin_lock(&mm->page_table_lock); | 92 | spin_lock(&mm->context.list_lock); |
93 | list_del(&page->lru); | 93 | list_del(&page->lru); |
94 | spin_unlock(&mm->page_table_lock); | 94 | spin_unlock(&mm->context.list_lock); |
95 | if (shadow) | 95 | if (shadow) |
96 | free_pages((unsigned long) shadow, ALLOC_ORDER); | 96 | free_pages((unsigned long) shadow, ALLOC_ORDER); |
97 | free_pages((unsigned long) table, ALLOC_ORDER); | 97 | free_pages((unsigned long) table, ALLOC_ORDER); |
@@ -182,7 +182,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
182 | unsigned long bits; | 182 | unsigned long bits; |
183 | 183 | ||
184 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; | 184 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; |
185 | spin_lock(&mm->page_table_lock); | 185 | spin_lock(&mm->context.list_lock); |
186 | page = NULL; | 186 | page = NULL; |
187 | if (!list_empty(&mm->context.pgtable_list)) { | 187 | if (!list_empty(&mm->context.pgtable_list)) { |
188 | page = list_first_entry(&mm->context.pgtable_list, | 188 | page = list_first_entry(&mm->context.pgtable_list, |
@@ -191,7 +191,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
191 | page = NULL; | 191 | page = NULL; |
192 | } | 192 | } |
193 | if (!page) { | 193 | if (!page) { |
194 | spin_unlock(&mm->page_table_lock); | 194 | spin_unlock(&mm->context.list_lock); |
195 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); | 195 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); |
196 | if (!page) | 196 | if (!page) |
197 | return NULL; | 197 | return NULL; |
@@ -202,7 +202,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
202 | clear_table_pgstes(table); | 202 | clear_table_pgstes(table); |
203 | else | 203 | else |
204 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); | 204 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); |
205 | spin_lock(&mm->page_table_lock); | 205 | spin_lock(&mm->context.list_lock); |
206 | list_add(&page->lru, &mm->context.pgtable_list); | 206 | list_add(&page->lru, &mm->context.pgtable_list); |
207 | } | 207 | } |
208 | table = (unsigned long *) page_to_phys(page); | 208 | table = (unsigned long *) page_to_phys(page); |
@@ -213,7 +213,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
213 | page->flags |= bits; | 213 | page->flags |= bits; |
214 | if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) | 214 | if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) |
215 | list_move_tail(&page->lru, &mm->context.pgtable_list); | 215 | list_move_tail(&page->lru, &mm->context.pgtable_list); |
216 | spin_unlock(&mm->page_table_lock); | 216 | spin_unlock(&mm->context.list_lock); |
217 | return table; | 217 | return table; |
218 | } | 218 | } |
219 | 219 | ||
@@ -225,7 +225,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) | |||
225 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; | 225 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; |
226 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); | 226 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); |
227 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | 227 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
228 | spin_lock(&mm->page_table_lock); | 228 | spin_lock(&mm->context.list_lock); |
229 | page->flags ^= bits; | 229 | page->flags ^= bits; |
230 | if (page->flags & FRAG_MASK) { | 230 | if (page->flags & FRAG_MASK) { |
231 | /* Page now has some free pgtable fragments. */ | 231 | /* Page now has some free pgtable fragments. */ |
@@ -234,7 +234,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) | |||
234 | } else | 234 | } else |
235 | /* All fragments of the 4K page have been freed. */ | 235 | /* All fragments of the 4K page have been freed. */ |
236 | list_del(&page->lru); | 236 | list_del(&page->lru); |
237 | spin_unlock(&mm->page_table_lock); | 237 | spin_unlock(&mm->context.list_lock); |
238 | if (page) { | 238 | if (page) { |
239 | pgtable_page_dtor(page); | 239 | pgtable_page_dtor(page); |
240 | __free_page(page); | 240 | __free_page(page); |
@@ -245,7 +245,7 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) | |||
245 | { | 245 | { |
246 | struct page *page; | 246 | struct page *page; |
247 | 247 | ||
248 | spin_lock(&mm->page_table_lock); | 248 | spin_lock(&mm->context.list_lock); |
249 | /* Free shadow region and segment tables. */ | 249 | /* Free shadow region and segment tables. */ |
250 | list_for_each_entry(page, &mm->context.crst_list, lru) | 250 | list_for_each_entry(page, &mm->context.crst_list, lru) |
251 | if (page->index) { | 251 | if (page->index) { |
@@ -255,7 +255,7 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) | |||
255 | /* "Free" second halves of page tables. */ | 255 | /* "Free" second halves of page tables. */ |
256 | list_for_each_entry(page, &mm->context.pgtable_list, lru) | 256 | list_for_each_entry(page, &mm->context.pgtable_list, lru) |
257 | page->flags &= ~SECOND_HALVES; | 257 | page->flags &= ~SECOND_HALVES; |
258 | spin_unlock(&mm->page_table_lock); | 258 | spin_unlock(&mm->context.list_lock); |
259 | mm->context.noexec = 0; | 259 | mm->context.noexec = 0; |
260 | update_mm(mm, tsk); | 260 | update_mm(mm, tsk); |
261 | } | 261 | } |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index e4868bfc672f..5f91a38d7592 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -331,6 +331,7 @@ void __init vmem_map_init(void) | |||
331 | unsigned long start, end; | 331 | unsigned long start, end; |
332 | int i; | 332 | int i; |
333 | 333 | ||
334 | spin_lock_init(&init_mm.context.list_lock); | ||
334 | INIT_LIST_HEAD(&init_mm.context.crst_list); | 335 | INIT_LIST_HEAD(&init_mm.context.crst_list); |
335 | INIT_LIST_HEAD(&init_mm.context.pgtable_list); | 336 | INIT_LIST_HEAD(&init_mm.context.pgtable_list); |
336 | init_mm.context.noexec = 0; | 337 | init_mm.context.noexec = 0; |
diff --git a/arch/s390/power/Makefile b/arch/s390/power/Makefile deleted file mode 100644 index 973bb45a8fec..000000000000 --- a/arch/s390/power/Makefile +++ /dev/null | |||
@@ -1,8 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for s390 PM support | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_HIBERNATION) += suspend.o | ||
6 | obj-$(CONFIG_HIBERNATION) += swsusp.o | ||
7 | obj-$(CONFIG_HIBERNATION) += swsusp_64.o | ||
8 | obj-$(CONFIG_HIBERNATION) += swsusp_asm64.o | ||
diff --git a/arch/s390/power/suspend.c b/arch/s390/power/suspend.c deleted file mode 100644 index b3351eceebbe..000000000000 --- a/arch/s390/power/suspend.c +++ /dev/null | |||
@@ -1,40 +0,0 @@ | |||
1 | /* | ||
2 | * Suspend support specific for s390. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * | ||
6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/mm.h> | ||
10 | #include <linux/suspend.h> | ||
11 | #include <linux/reboot.h> | ||
12 | #include <linux/pfn.h> | ||
13 | #include <asm/sections.h> | ||
14 | #include <asm/ipl.h> | ||
15 | |||
16 | /* | ||
17 | * References to section boundaries | ||
18 | */ | ||
19 | extern const void __nosave_begin, __nosave_end; | ||
20 | |||
21 | /* | ||
22 | * check if given pfn is in the 'nosave' or in the read only NSS section | ||
23 | */ | ||
24 | int pfn_is_nosave(unsigned long pfn) | ||
25 | { | ||
26 | unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; | ||
27 | unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) | ||
28 | >> PAGE_SHIFT; | ||
29 | unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1; | ||
30 | unsigned long stext_pfn = PFN_DOWN(__pa(&_stext)); | ||
31 | |||
32 | if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) | ||
33 | return 1; | ||
34 | if (pfn >= stext_pfn && pfn <= eshared_pfn) { | ||
35 | if (ipl_info.type == IPL_TYPE_NSS) | ||
36 | return 1; | ||
37 | } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0)) | ||
38 | return 1; | ||
39 | return 0; | ||
40 | } | ||
diff --git a/arch/s390/power/swsusp_64.c b/arch/s390/power/swsusp_64.c deleted file mode 100644 index 9516a517d72f..000000000000 --- a/arch/s390/power/swsusp_64.c +++ /dev/null | |||
@@ -1,17 +0,0 @@ | |||
1 | /* | ||
2 | * Support for suspend and resume on s390 | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * | ||
6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include <asm/system.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | |||
13 | void do_after_copyback(void) | ||
14 | { | ||
15 | mb(); | ||
16 | } | ||
17 | |||
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index b5afbec1db59..04a21883f327 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c | |||
@@ -640,5 +640,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0, | |||
640 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 640 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
641 | clear_thread_flag(TIF_NOTIFY_RESUME); | 641 | clear_thread_flag(TIF_NOTIFY_RESUME); |
642 | tracehook_notify_resume(regs); | 642 | tracehook_notify_resume(regs); |
643 | if (current->replacement_session_keyring) | ||
644 | key_replace_session_keyring(); | ||
643 | } | 645 | } |
644 | } | 646 | } |
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index 0663a0ee6021..9e5c9b1d7e98 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c | |||
@@ -772,5 +772,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info | |||
772 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 772 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
773 | clear_thread_flag(TIF_NOTIFY_RESUME); | 773 | clear_thread_flag(TIF_NOTIFY_RESUME); |
774 | tracehook_notify_resume(regs); | 774 | tracehook_notify_resume(regs); |
775 | if (current->replacement_session_keyring) | ||
776 | key_replace_session_keyring(); | ||
775 | } | 777 | } |
776 | } | 778 | } |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 3f8b6a92eabd..233cff53a623 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -25,6 +25,8 @@ config SPARC | |||
25 | select ARCH_WANT_OPTIONAL_GPIOLIB | 25 | select ARCH_WANT_OPTIONAL_GPIOLIB |
26 | select RTC_CLASS | 26 | select RTC_CLASS |
27 | select RTC_DRV_M48T59 | 27 | select RTC_DRV_M48T59 |
28 | select HAVE_DMA_ATTRS | ||
29 | select HAVE_DMA_API_DEBUG | ||
28 | 30 | ||
29 | config SPARC32 | 31 | config SPARC32 |
30 | def_bool !64BIT | 32 | def_bool !64BIT |
diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig index 8bcd27af724b..a0f62a808edb 100644 --- a/arch/sparc/configs/sparc32_defconfig +++ b/arch/sparc/configs/sparc32_defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.30-rc2 | 3 | # Linux kernel version: 2.6.31-rc1 |
4 | # Fri Apr 17 04:04:46 2009 | 4 | # Tue Aug 18 23:45:52 2009 |
5 | # | 5 | # |
6 | # CONFIG_64BIT is not set | 6 | # CONFIG_64BIT is not set |
7 | CONFIG_SPARC=y | 7 | CONFIG_SPARC=y |
@@ -17,6 +17,7 @@ CONFIG_GENERIC_ISA_DMA=y | |||
17 | CONFIG_ARCH_NO_VIRT_TO_BUS=y | 17 | CONFIG_ARCH_NO_VIRT_TO_BUS=y |
18 | CONFIG_OF=y | 18 | CONFIG_OF=y |
19 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | 19 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" |
20 | CONFIG_CONSTRUCTORS=y | ||
20 | 21 | ||
21 | # | 22 | # |
22 | # General setup | 23 | # General setup |
@@ -74,7 +75,6 @@ CONFIG_SYSCTL_SYSCALL=y | |||
74 | CONFIG_KALLSYMS=y | 75 | CONFIG_KALLSYMS=y |
75 | # CONFIG_KALLSYMS_ALL is not set | 76 | # CONFIG_KALLSYMS_ALL is not set |
76 | # CONFIG_KALLSYMS_EXTRA_PASS is not set | 77 | # CONFIG_KALLSYMS_EXTRA_PASS is not set |
77 | # CONFIG_STRIP_ASM_SYMS is not set | ||
78 | CONFIG_HOTPLUG=y | 78 | CONFIG_HOTPLUG=y |
79 | CONFIG_PRINTK=y | 79 | CONFIG_PRINTK=y |
80 | CONFIG_BUG=y | 80 | CONFIG_BUG=y |
@@ -87,8 +87,13 @@ CONFIG_TIMERFD=y | |||
87 | CONFIG_EVENTFD=y | 87 | CONFIG_EVENTFD=y |
88 | CONFIG_SHMEM=y | 88 | CONFIG_SHMEM=y |
89 | CONFIG_AIO=y | 89 | CONFIG_AIO=y |
90 | |||
91 | # | ||
92 | # Performance Counters | ||
93 | # | ||
90 | CONFIG_VM_EVENT_COUNTERS=y | 94 | CONFIG_VM_EVENT_COUNTERS=y |
91 | CONFIG_PCI_QUIRKS=y | 95 | CONFIG_PCI_QUIRKS=y |
96 | # CONFIG_STRIP_ASM_SYMS is not set | ||
92 | CONFIG_COMPAT_BRK=y | 97 | CONFIG_COMPAT_BRK=y |
93 | CONFIG_SLAB=y | 98 | CONFIG_SLAB=y |
94 | # CONFIG_SLUB is not set | 99 | # CONFIG_SLUB is not set |
@@ -97,6 +102,10 @@ CONFIG_SLAB=y | |||
97 | # CONFIG_MARKERS is not set | 102 | # CONFIG_MARKERS is not set |
98 | CONFIG_HAVE_OPROFILE=y | 103 | CONFIG_HAVE_OPROFILE=y |
99 | CONFIG_HAVE_ARCH_TRACEHOOK=y | 104 | CONFIG_HAVE_ARCH_TRACEHOOK=y |
105 | |||
106 | # | ||
107 | # GCOV-based kernel profiling | ||
108 | # | ||
100 | # CONFIG_SLOW_WORK is not set | 109 | # CONFIG_SLOW_WORK is not set |
101 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set | 110 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set |
102 | CONFIG_SLABINFO=y | 111 | CONFIG_SLABINFO=y |
@@ -109,7 +118,7 @@ CONFIG_MODULE_UNLOAD=y | |||
109 | # CONFIG_MODVERSIONS is not set | 118 | # CONFIG_MODVERSIONS is not set |
110 | # CONFIG_MODULE_SRCVERSION_ALL is not set | 119 | # CONFIG_MODULE_SRCVERSION_ALL is not set |
111 | CONFIG_BLOCK=y | 120 | CONFIG_BLOCK=y |
112 | # CONFIG_LBD is not set | 121 | CONFIG_LBDAF=y |
113 | # CONFIG_BLK_DEV_BSG is not set | 122 | # CONFIG_BLK_DEV_BSG is not set |
114 | # CONFIG_BLK_DEV_INTEGRITY is not set | 123 | # CONFIG_BLK_DEV_INTEGRITY is not set |
115 | 124 | ||
@@ -154,9 +163,9 @@ CONFIG_SPLIT_PTLOCK_CPUS=4 | |||
154 | # CONFIG_PHYS_ADDR_T_64BIT is not set | 163 | # CONFIG_PHYS_ADDR_T_64BIT is not set |
155 | CONFIG_ZONE_DMA_FLAG=1 | 164 | CONFIG_ZONE_DMA_FLAG=1 |
156 | CONFIG_BOUNCE=y | 165 | CONFIG_BOUNCE=y |
157 | CONFIG_UNEVICTABLE_LRU=y | ||
158 | CONFIG_HAVE_MLOCK=y | 166 | CONFIG_HAVE_MLOCK=y |
159 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y | 167 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y |
168 | CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 | ||
160 | CONFIG_SUN_PM=y | 169 | CONFIG_SUN_PM=y |
161 | # CONFIG_SPARC_LED is not set | 170 | # CONFIG_SPARC_LED is not set |
162 | CONFIG_SERIAL_CONSOLE=y | 171 | CONFIG_SERIAL_CONSOLE=y |
@@ -264,6 +273,7 @@ CONFIG_IPV6_TUNNEL=m | |||
264 | # CONFIG_ECONET is not set | 273 | # CONFIG_ECONET is not set |
265 | # CONFIG_WAN_ROUTER is not set | 274 | # CONFIG_WAN_ROUTER is not set |
266 | # CONFIG_PHONET is not set | 275 | # CONFIG_PHONET is not set |
276 | # CONFIG_IEEE802154 is not set | ||
267 | # CONFIG_NET_SCHED is not set | 277 | # CONFIG_NET_SCHED is not set |
268 | # CONFIG_DCB is not set | 278 | # CONFIG_DCB is not set |
269 | 279 | ||
@@ -281,7 +291,11 @@ CONFIG_WIRELESS=y | |||
281 | CONFIG_WIRELESS_OLD_REGULATORY=y | 291 | CONFIG_WIRELESS_OLD_REGULATORY=y |
282 | # CONFIG_WIRELESS_EXT is not set | 292 | # CONFIG_WIRELESS_EXT is not set |
283 | # CONFIG_LIB80211 is not set | 293 | # CONFIG_LIB80211 is not set |
284 | # CONFIG_MAC80211 is not set | 294 | |
295 | # | ||
296 | # CFG80211 needs to be enabled for MAC80211 | ||
297 | # | ||
298 | CONFIG_MAC80211_DEFAULT_PS_VALUE=0 | ||
285 | # CONFIG_WIMAX is not set | 299 | # CONFIG_WIMAX is not set |
286 | # CONFIG_RFKILL is not set | 300 | # CONFIG_RFKILL is not set |
287 | # CONFIG_NET_9P is not set | 301 | # CONFIG_NET_9P is not set |
@@ -335,6 +349,7 @@ CONFIG_MISC_DEVICES=y | |||
335 | # EEPROM support | 349 | # EEPROM support |
336 | # | 350 | # |
337 | # CONFIG_EEPROM_93CX6 is not set | 351 | # CONFIG_EEPROM_93CX6 is not set |
352 | # CONFIG_CB710_CORE is not set | ||
338 | CONFIG_HAVE_IDE=y | 353 | CONFIG_HAVE_IDE=y |
339 | # CONFIG_IDE is not set | 354 | # CONFIG_IDE is not set |
340 | 355 | ||
@@ -358,10 +373,6 @@ CONFIG_BLK_DEV_SR=m | |||
358 | # CONFIG_BLK_DEV_SR_VENDOR is not set | 373 | # CONFIG_BLK_DEV_SR_VENDOR is not set |
359 | CONFIG_CHR_DEV_SG=m | 374 | CONFIG_CHR_DEV_SG=m |
360 | # CONFIG_CHR_DEV_SCH is not set | 375 | # CONFIG_CHR_DEV_SCH is not set |
361 | |||
362 | # | ||
363 | # Some SCSI devices (e.g. CD jukebox) support multiple LUNs | ||
364 | # | ||
365 | # CONFIG_SCSI_MULTI_LUN is not set | 376 | # CONFIG_SCSI_MULTI_LUN is not set |
366 | # CONFIG_SCSI_CONSTANTS is not set | 377 | # CONFIG_SCSI_CONSTANTS is not set |
367 | # CONFIG_SCSI_LOGGING is not set | 378 | # CONFIG_SCSI_LOGGING is not set |
@@ -379,6 +390,7 @@ CONFIG_SCSI_SPI_ATTRS=y | |||
379 | CONFIG_SCSI_LOWLEVEL=y | 390 | CONFIG_SCSI_LOWLEVEL=y |
380 | # CONFIG_ISCSI_TCP is not set | 391 | # CONFIG_ISCSI_TCP is not set |
381 | # CONFIG_SCSI_CXGB3_ISCSI is not set | 392 | # CONFIG_SCSI_CXGB3_ISCSI is not set |
393 | # CONFIG_SCSI_BNX2_ISCSI is not set | ||
382 | # CONFIG_BLK_DEV_3W_XXXX_RAID is not set | 394 | # CONFIG_BLK_DEV_3W_XXXX_RAID is not set |
383 | # CONFIG_SCSI_3W_9XXX is not set | 395 | # CONFIG_SCSI_3W_9XXX is not set |
384 | # CONFIG_SCSI_ACARD is not set | 396 | # CONFIG_SCSI_ACARD is not set |
@@ -387,6 +399,7 @@ CONFIG_SCSI_LOWLEVEL=y | |||
387 | # CONFIG_SCSI_AIC7XXX_OLD is not set | 399 | # CONFIG_SCSI_AIC7XXX_OLD is not set |
388 | # CONFIG_SCSI_AIC79XX is not set | 400 | # CONFIG_SCSI_AIC79XX is not set |
389 | # CONFIG_SCSI_AIC94XX is not set | 401 | # CONFIG_SCSI_AIC94XX is not set |
402 | # CONFIG_SCSI_MVSAS is not set | ||
390 | # CONFIG_SCSI_ARCMSR is not set | 403 | # CONFIG_SCSI_ARCMSR is not set |
391 | # CONFIG_MEGARAID_NEWGEN is not set | 404 | # CONFIG_MEGARAID_NEWGEN is not set |
392 | # CONFIG_MEGARAID_LEGACY is not set | 405 | # CONFIG_MEGARAID_LEGACY is not set |
@@ -401,7 +414,6 @@ CONFIG_SCSI_LOWLEVEL=y | |||
401 | # CONFIG_SCSI_IPS is not set | 414 | # CONFIG_SCSI_IPS is not set |
402 | # CONFIG_SCSI_INITIO is not set | 415 | # CONFIG_SCSI_INITIO is not set |
403 | # CONFIG_SCSI_INIA100 is not set | 416 | # CONFIG_SCSI_INIA100 is not set |
404 | # CONFIG_SCSI_MVSAS is not set | ||
405 | # CONFIG_SCSI_STEX is not set | 417 | # CONFIG_SCSI_STEX is not set |
406 | # CONFIG_SCSI_SYM53C8XX_2 is not set | 418 | # CONFIG_SCSI_SYM53C8XX_2 is not set |
407 | # CONFIG_SCSI_QLOGIC_1280 is not set | 419 | # CONFIG_SCSI_QLOGIC_1280 is not set |
@@ -426,13 +438,16 @@ CONFIG_SCSI_SUNESP=y | |||
426 | # | 438 | # |
427 | 439 | ||
428 | # | 440 | # |
429 | # Enable only one of the two stacks, unless you know what you are doing | 441 | # You can enable one or both FireWire driver stacks. |
442 | # | ||
443 | |||
444 | # | ||
445 | # See the help texts for more information. | ||
430 | # | 446 | # |
431 | # CONFIG_FIREWIRE is not set | 447 | # CONFIG_FIREWIRE is not set |
432 | # CONFIG_IEEE1394 is not set | 448 | # CONFIG_IEEE1394 is not set |
433 | # CONFIG_I2O is not set | 449 | # CONFIG_I2O is not set |
434 | CONFIG_NETDEVICES=y | 450 | CONFIG_NETDEVICES=y |
435 | CONFIG_COMPAT_NET_DEV_OPS=y | ||
436 | CONFIG_DUMMY=m | 451 | CONFIG_DUMMY=m |
437 | # CONFIG_BONDING is not set | 452 | # CONFIG_BONDING is not set |
438 | # CONFIG_MACVLAN is not set | 453 | # CONFIG_MACVLAN is not set |
@@ -463,6 +478,7 @@ CONFIG_SUNQE=m | |||
463 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set | 478 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set |
464 | # CONFIG_NET_PCI is not set | 479 | # CONFIG_NET_PCI is not set |
465 | # CONFIG_B44 is not set | 480 | # CONFIG_B44 is not set |
481 | # CONFIG_KS8842 is not set | ||
466 | # CONFIG_ATL2 is not set | 482 | # CONFIG_ATL2 is not set |
467 | CONFIG_NETDEV_1000=y | 483 | CONFIG_NETDEV_1000=y |
468 | # CONFIG_ACENIC is not set | 484 | # CONFIG_ACENIC is not set |
@@ -482,6 +498,7 @@ CONFIG_NETDEV_1000=y | |||
482 | # CONFIG_VIA_VELOCITY is not set | 498 | # CONFIG_VIA_VELOCITY is not set |
483 | # CONFIG_TIGON3 is not set | 499 | # CONFIG_TIGON3 is not set |
484 | # CONFIG_BNX2 is not set | 500 | # CONFIG_BNX2 is not set |
501 | # CONFIG_CNIC is not set | ||
485 | # CONFIG_QLA3XXX is not set | 502 | # CONFIG_QLA3XXX is not set |
486 | # CONFIG_ATL1 is not set | 503 | # CONFIG_ATL1 is not set |
487 | # CONFIG_ATL1E is not set | 504 | # CONFIG_ATL1E is not set |
@@ -629,6 +646,11 @@ CONFIG_HW_RANDOM=m | |||
629 | CONFIG_DEVPORT=y | 646 | CONFIG_DEVPORT=y |
630 | # CONFIG_I2C is not set | 647 | # CONFIG_I2C is not set |
631 | # CONFIG_SPI is not set | 648 | # CONFIG_SPI is not set |
649 | |||
650 | # | ||
651 | # PPS support | ||
652 | # | ||
653 | # CONFIG_PPS is not set | ||
632 | CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y | 654 | CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y |
633 | # CONFIG_GPIOLIB is not set | 655 | # CONFIG_GPIOLIB is not set |
634 | # CONFIG_W1 is not set | 656 | # CONFIG_W1 is not set |
@@ -668,22 +690,7 @@ CONFIG_SSB_POSSIBLE=y | |||
668 | # CONFIG_HTC_PASIC3 is not set | 690 | # CONFIG_HTC_PASIC3 is not set |
669 | # CONFIG_MFD_TMIO is not set | 691 | # CONFIG_MFD_TMIO is not set |
670 | # CONFIG_REGULATOR is not set | 692 | # CONFIG_REGULATOR is not set |
671 | 693 | # CONFIG_MEDIA_SUPPORT is not set | |
672 | # | ||
673 | # Multimedia devices | ||
674 | # | ||
675 | |||
676 | # | ||
677 | # Multimedia core support | ||
678 | # | ||
679 | # CONFIG_VIDEO_DEV is not set | ||
680 | # CONFIG_DVB_CORE is not set | ||
681 | # CONFIG_VIDEO_MEDIA is not set | ||
682 | |||
683 | # | ||
684 | # Multimedia drivers | ||
685 | # | ||
686 | # CONFIG_DAB is not set | ||
687 | 694 | ||
688 | # | 695 | # |
689 | # Graphics support | 696 | # Graphics support |
@@ -776,6 +783,10 @@ CONFIG_RTC_DRV_M48T59=y | |||
776 | # CONFIG_DMADEVICES is not set | 783 | # CONFIG_DMADEVICES is not set |
777 | # CONFIG_AUXDISPLAY is not set | 784 | # CONFIG_AUXDISPLAY is not set |
778 | # CONFIG_UIO is not set | 785 | # CONFIG_UIO is not set |
786 | |||
787 | # | ||
788 | # TI VLYNQ | ||
789 | # | ||
779 | # CONFIG_STAGING is not set | 790 | # CONFIG_STAGING is not set |
780 | 791 | ||
781 | # | 792 | # |
@@ -799,10 +810,12 @@ CONFIG_FS_MBCACHE=y | |||
799 | # CONFIG_REISERFS_FS is not set | 810 | # CONFIG_REISERFS_FS is not set |
800 | # CONFIG_JFS_FS is not set | 811 | # CONFIG_JFS_FS is not set |
801 | CONFIG_FS_POSIX_ACL=y | 812 | CONFIG_FS_POSIX_ACL=y |
802 | CONFIG_FILE_LOCKING=y | ||
803 | # CONFIG_XFS_FS is not set | 813 | # CONFIG_XFS_FS is not set |
814 | # CONFIG_GFS2_FS is not set | ||
804 | # CONFIG_OCFS2_FS is not set | 815 | # CONFIG_OCFS2_FS is not set |
805 | # CONFIG_BTRFS_FS is not set | 816 | # CONFIG_BTRFS_FS is not set |
817 | CONFIG_FILE_LOCKING=y | ||
818 | CONFIG_FSNOTIFY=y | ||
806 | CONFIG_DNOTIFY=y | 819 | CONFIG_DNOTIFY=y |
807 | CONFIG_INOTIFY=y | 820 | CONFIG_INOTIFY=y |
808 | CONFIG_INOTIFY_USER=y | 821 | CONFIG_INOTIFY_USER=y |
@@ -985,6 +998,7 @@ CONFIG_KGDB=y | |||
985 | CONFIG_KGDB_SERIAL_CONSOLE=y | 998 | CONFIG_KGDB_SERIAL_CONSOLE=y |
986 | CONFIG_KGDB_TESTS=y | 999 | CONFIG_KGDB_TESTS=y |
987 | # CONFIG_KGDB_TESTS_ON_BOOT is not set | 1000 | # CONFIG_KGDB_TESTS_ON_BOOT is not set |
1001 | # CONFIG_KMEMCHECK is not set | ||
988 | # CONFIG_DEBUG_STACK_USAGE is not set | 1002 | # CONFIG_DEBUG_STACK_USAGE is not set |
989 | # CONFIG_STACK_DEBUG is not set | 1003 | # CONFIG_STACK_DEBUG is not set |
990 | 1004 | ||
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index 0123a4c596ce..fdddf7a6f725 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.30 | 3 | # Linux kernel version: 2.6.31-rc1 |
4 | # Tue Jun 16 04:59:36 2009 | 4 | # Tue Aug 18 23:56:02 2009 |
5 | # | 5 | # |
6 | CONFIG_64BIT=y | 6 | CONFIG_64BIT=y |
7 | CONFIG_SPARC=y | 7 | CONFIG_SPARC=y |
@@ -26,6 +26,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y | |||
26 | CONFIG_OF=y | 26 | CONFIG_OF=y |
27 | CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y | 27 | CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y |
28 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | 28 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" |
29 | CONFIG_CONSTRUCTORS=y | ||
29 | 30 | ||
30 | # | 31 | # |
31 | # General setup | 32 | # General setup |
@@ -119,6 +120,11 @@ CONFIG_HAVE_KPROBES=y | |||
119 | CONFIG_HAVE_KRETPROBES=y | 120 | CONFIG_HAVE_KRETPROBES=y |
120 | CONFIG_HAVE_ARCH_TRACEHOOK=y | 121 | CONFIG_HAVE_ARCH_TRACEHOOK=y |
121 | CONFIG_USE_GENERIC_SMP_HELPERS=y | 122 | CONFIG_USE_GENERIC_SMP_HELPERS=y |
123 | |||
124 | # | ||
125 | # GCOV-based kernel profiling | ||
126 | # | ||
127 | # CONFIG_GCOV_KERNEL is not set | ||
122 | # CONFIG_SLOW_WORK is not set | 128 | # CONFIG_SLOW_WORK is not set |
123 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set | 129 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set |
124 | CONFIG_SLABINFO=y | 130 | CONFIG_SLABINFO=y |
@@ -204,7 +210,6 @@ CONFIG_MIGRATION=y | |||
204 | CONFIG_PHYS_ADDR_T_64BIT=y | 210 | CONFIG_PHYS_ADDR_T_64BIT=y |
205 | CONFIG_ZONE_DMA_FLAG=0 | 211 | CONFIG_ZONE_DMA_FLAG=0 |
206 | CONFIG_NR_QUICK=1 | 212 | CONFIG_NR_QUICK=1 |
207 | CONFIG_UNEVICTABLE_LRU=y | ||
208 | CONFIG_HAVE_MLOCK=y | 213 | CONFIG_HAVE_MLOCK=y |
209 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y | 214 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y |
210 | CONFIG_DEFAULT_MMAP_MIN_ADDR=8192 | 215 | CONFIG_DEFAULT_MMAP_MIN_ADDR=8192 |
@@ -410,6 +415,7 @@ CONFIG_MISC_DEVICES=y | |||
410 | # | 415 | # |
411 | # CONFIG_EEPROM_AT24 is not set | 416 | # CONFIG_EEPROM_AT24 is not set |
412 | # CONFIG_EEPROM_LEGACY is not set | 417 | # CONFIG_EEPROM_LEGACY is not set |
418 | # CONFIG_EEPROM_MAX6875 is not set | ||
413 | # CONFIG_EEPROM_93CX6 is not set | 419 | # CONFIG_EEPROM_93CX6 is not set |
414 | # CONFIG_CB710_CORE is not set | 420 | # CONFIG_CB710_CORE is not set |
415 | CONFIG_HAVE_IDE=y | 421 | CONFIG_HAVE_IDE=y |
@@ -562,6 +568,7 @@ CONFIG_BLK_DEV_DM=m | |||
562 | CONFIG_DM_CRYPT=m | 568 | CONFIG_DM_CRYPT=m |
563 | CONFIG_DM_SNAPSHOT=m | 569 | CONFIG_DM_SNAPSHOT=m |
564 | CONFIG_DM_MIRROR=m | 570 | CONFIG_DM_MIRROR=m |
571 | # CONFIG_DM_LOG_USERSPACE is not set | ||
565 | CONFIG_DM_ZERO=m | 572 | CONFIG_DM_ZERO=m |
566 | # CONFIG_DM_MULTIPATH is not set | 573 | # CONFIG_DM_MULTIPATH is not set |
567 | # CONFIG_DM_DELAY is not set | 574 | # CONFIG_DM_DELAY is not set |
@@ -573,7 +580,11 @@ CONFIG_DM_ZERO=m | |||
573 | # | 580 | # |
574 | 581 | ||
575 | # | 582 | # |
576 | # Enable only one of the two stacks, unless you know what you are doing | 583 | # You can enable one or both FireWire driver stacks. |
584 | # | ||
585 | |||
586 | # | ||
587 | # See the help texts for more information. | ||
577 | # | 588 | # |
578 | # CONFIG_FIREWIRE is not set | 589 | # CONFIG_FIREWIRE is not set |
579 | # CONFIG_IEEE1394 is not set | 590 | # CONFIG_IEEE1394 is not set |
@@ -667,6 +678,7 @@ CONFIG_E1000E=m | |||
667 | # CONFIG_VIA_VELOCITY is not set | 678 | # CONFIG_VIA_VELOCITY is not set |
668 | CONFIG_TIGON3=m | 679 | CONFIG_TIGON3=m |
669 | CONFIG_BNX2=m | 680 | CONFIG_BNX2=m |
681 | # CONFIG_CNIC is not set | ||
670 | # CONFIG_QLA3XXX is not set | 682 | # CONFIG_QLA3XXX is not set |
671 | # CONFIG_ATL1 is not set | 683 | # CONFIG_ATL1 is not set |
672 | # CONFIG_ATL1E is not set | 684 | # CONFIG_ATL1E is not set |
@@ -773,6 +785,7 @@ CONFIG_MOUSE_SERIAL=y | |||
773 | # CONFIG_MOUSE_APPLETOUCH is not set | 785 | # CONFIG_MOUSE_APPLETOUCH is not set |
774 | # CONFIG_MOUSE_BCM5974 is not set | 786 | # CONFIG_MOUSE_BCM5974 is not set |
775 | # CONFIG_MOUSE_VSXXXAA is not set | 787 | # CONFIG_MOUSE_VSXXXAA is not set |
788 | # CONFIG_MOUSE_SYNAPTICS_I2C is not set | ||
776 | # CONFIG_INPUT_JOYSTICK is not set | 789 | # CONFIG_INPUT_JOYSTICK is not set |
777 | # CONFIG_INPUT_TABLET is not set | 790 | # CONFIG_INPUT_TABLET is not set |
778 | # CONFIG_INPUT_TOUCHSCREEN is not set | 791 | # CONFIG_INPUT_TOUCHSCREEN is not set |
@@ -870,6 +883,7 @@ CONFIG_I2C_ALGOBIT=y | |||
870 | # | 883 | # |
871 | # I2C system bus drivers (mostly embedded / system-on-chip) | 884 | # I2C system bus drivers (mostly embedded / system-on-chip) |
872 | # | 885 | # |
886 | # CONFIG_I2C_DESIGNWARE is not set | ||
873 | # CONFIG_I2C_OCORES is not set | 887 | # CONFIG_I2C_OCORES is not set |
874 | # CONFIG_I2C_SIMTEC is not set | 888 | # CONFIG_I2C_SIMTEC is not set |
875 | 889 | ||
@@ -898,13 +912,17 @@ CONFIG_I2C_ALGOBIT=y | |||
898 | # CONFIG_SENSORS_PCF8574 is not set | 912 | # CONFIG_SENSORS_PCF8574 is not set |
899 | # CONFIG_PCF8575 is not set | 913 | # CONFIG_PCF8575 is not set |
900 | # CONFIG_SENSORS_PCA9539 is not set | 914 | # CONFIG_SENSORS_PCA9539 is not set |
901 | # CONFIG_SENSORS_MAX6875 is not set | ||
902 | # CONFIG_SENSORS_TSL2550 is not set | 915 | # CONFIG_SENSORS_TSL2550 is not set |
903 | # CONFIG_I2C_DEBUG_CORE is not set | 916 | # CONFIG_I2C_DEBUG_CORE is not set |
904 | # CONFIG_I2C_DEBUG_ALGO is not set | 917 | # CONFIG_I2C_DEBUG_ALGO is not set |
905 | # CONFIG_I2C_DEBUG_BUS is not set | 918 | # CONFIG_I2C_DEBUG_BUS is not set |
906 | # CONFIG_I2C_DEBUG_CHIP is not set | 919 | # CONFIG_I2C_DEBUG_CHIP is not set |
907 | # CONFIG_SPI is not set | 920 | # CONFIG_SPI is not set |
921 | |||
922 | # | ||
923 | # PPS support | ||
924 | # | ||
925 | # CONFIG_PPS is not set | ||
908 | CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y | 926 | CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y |
909 | # CONFIG_GPIOLIB is not set | 927 | # CONFIG_GPIOLIB is not set |
910 | # CONFIG_W1 is not set | 928 | # CONFIG_W1 is not set |
@@ -959,6 +977,7 @@ CONFIG_HWMON=y | |||
959 | # CONFIG_SENSORS_SMSC47B397 is not set | 977 | # CONFIG_SENSORS_SMSC47B397 is not set |
960 | # CONFIG_SENSORS_ADS7828 is not set | 978 | # CONFIG_SENSORS_ADS7828 is not set |
961 | # CONFIG_SENSORS_THMC50 is not set | 979 | # CONFIG_SENSORS_THMC50 is not set |
980 | # CONFIG_SENSORS_TMP401 is not set | ||
962 | # CONFIG_SENSORS_VIA686A is not set | 981 | # CONFIG_SENSORS_VIA686A is not set |
963 | # CONFIG_SENSORS_VT1211 is not set | 982 | # CONFIG_SENSORS_VT1211 is not set |
964 | # CONFIG_SENSORS_VT8231 is not set | 983 | # CONFIG_SENSORS_VT8231 is not set |
@@ -994,23 +1013,9 @@ CONFIG_SSB_POSSIBLE=y | |||
994 | # CONFIG_MFD_WM8400 is not set | 1013 | # CONFIG_MFD_WM8400 is not set |
995 | # CONFIG_MFD_WM8350_I2C is not set | 1014 | # CONFIG_MFD_WM8350_I2C is not set |
996 | # CONFIG_MFD_PCF50633 is not set | 1015 | # CONFIG_MFD_PCF50633 is not set |
1016 | # CONFIG_AB3100_CORE is not set | ||
997 | # CONFIG_REGULATOR is not set | 1017 | # CONFIG_REGULATOR is not set |
998 | 1018 | # CONFIG_MEDIA_SUPPORT is not set | |
999 | # | ||
1000 | # Multimedia devices | ||
1001 | # | ||
1002 | |||
1003 | # | ||
1004 | # Multimedia core support | ||
1005 | # | ||
1006 | # CONFIG_VIDEO_DEV is not set | ||
1007 | # CONFIG_DVB_CORE is not set | ||
1008 | # CONFIG_VIDEO_MEDIA is not set | ||
1009 | |||
1010 | # | ||
1011 | # Multimedia drivers | ||
1012 | # | ||
1013 | # CONFIG_DAB is not set | ||
1014 | 1019 | ||
1015 | # | 1020 | # |
1016 | # Graphics support | 1021 | # Graphics support |
@@ -1284,7 +1289,6 @@ CONFIG_USB=y | |||
1284 | # | 1289 | # |
1285 | # Miscellaneous USB options | 1290 | # Miscellaneous USB options |
1286 | # | 1291 | # |
1287 | CONFIG_USB_DEVICEFS=y | ||
1288 | # CONFIG_USB_DEVICE_CLASS is not set | 1292 | # CONFIG_USB_DEVICE_CLASS is not set |
1289 | # CONFIG_USB_DYNAMIC_MINORS is not set | 1293 | # CONFIG_USB_DYNAMIC_MINORS is not set |
1290 | # CONFIG_USB_OTG is not set | 1294 | # CONFIG_USB_OTG is not set |
@@ -1296,6 +1300,7 @@ CONFIG_USB_DEVICEFS=y | |||
1296 | # USB Host Controller Drivers | 1300 | # USB Host Controller Drivers |
1297 | # | 1301 | # |
1298 | # CONFIG_USB_C67X00_HCD is not set | 1302 | # CONFIG_USB_C67X00_HCD is not set |
1303 | # CONFIG_USB_XHCI_HCD is not set | ||
1299 | CONFIG_USB_EHCI_HCD=m | 1304 | CONFIG_USB_EHCI_HCD=m |
1300 | # CONFIG_USB_EHCI_ROOT_HUB_TT is not set | 1305 | # CONFIG_USB_EHCI_ROOT_HUB_TT is not set |
1301 | # CONFIG_USB_EHCI_TT_NEWSCHED is not set | 1306 | # CONFIG_USB_EHCI_TT_NEWSCHED is not set |
@@ -1374,7 +1379,6 @@ CONFIG_USB_STORAGE=m | |||
1374 | # CONFIG_USB_LD is not set | 1379 | # CONFIG_USB_LD is not set |
1375 | # CONFIG_USB_TRANCEVIBRATOR is not set | 1380 | # CONFIG_USB_TRANCEVIBRATOR is not set |
1376 | # CONFIG_USB_IOWARRIOR is not set | 1381 | # CONFIG_USB_IOWARRIOR is not set |
1377 | # CONFIG_USB_TEST is not set | ||
1378 | # CONFIG_USB_ISIGHTFW is not set | 1382 | # CONFIG_USB_ISIGHTFW is not set |
1379 | # CONFIG_USB_VST is not set | 1383 | # CONFIG_USB_VST is not set |
1380 | # CONFIG_USB_GADGET is not set | 1384 | # CONFIG_USB_GADGET is not set |
@@ -1420,6 +1424,7 @@ CONFIG_RTC_INTF_DEV=y | |||
1420 | # CONFIG_RTC_DRV_S35390A is not set | 1424 | # CONFIG_RTC_DRV_S35390A is not set |
1421 | # CONFIG_RTC_DRV_FM3130 is not set | 1425 | # CONFIG_RTC_DRV_FM3130 is not set |
1422 | # CONFIG_RTC_DRV_RX8581 is not set | 1426 | # CONFIG_RTC_DRV_RX8581 is not set |
1427 | # CONFIG_RTC_DRV_RX8025 is not set | ||
1423 | 1428 | ||
1424 | # | 1429 | # |
1425 | # SPI RTC drivers | 1430 | # SPI RTC drivers |
@@ -1448,6 +1453,10 @@ CONFIG_RTC_DRV_STARFIRE=y | |||
1448 | # CONFIG_DMADEVICES is not set | 1453 | # CONFIG_DMADEVICES is not set |
1449 | # CONFIG_AUXDISPLAY is not set | 1454 | # CONFIG_AUXDISPLAY is not set |
1450 | # CONFIG_UIO is not set | 1455 | # CONFIG_UIO is not set |
1456 | |||
1457 | # | ||
1458 | # TI VLYNQ | ||
1459 | # | ||
1451 | # CONFIG_STAGING is not set | 1460 | # CONFIG_STAGING is not set |
1452 | 1461 | ||
1453 | # | 1462 | # |
@@ -1480,11 +1489,11 @@ CONFIG_FS_MBCACHE=y | |||
1480 | # CONFIG_REISERFS_FS is not set | 1489 | # CONFIG_REISERFS_FS is not set |
1481 | # CONFIG_JFS_FS is not set | 1490 | # CONFIG_JFS_FS is not set |
1482 | CONFIG_FS_POSIX_ACL=y | 1491 | CONFIG_FS_POSIX_ACL=y |
1483 | CONFIG_FILE_LOCKING=y | ||
1484 | # CONFIG_XFS_FS is not set | 1492 | # CONFIG_XFS_FS is not set |
1485 | # CONFIG_GFS2_FS is not set | 1493 | # CONFIG_GFS2_FS is not set |
1486 | # CONFIG_OCFS2_FS is not set | 1494 | # CONFIG_OCFS2_FS is not set |
1487 | # CONFIG_BTRFS_FS is not set | 1495 | # CONFIG_BTRFS_FS is not set |
1496 | CONFIG_FILE_LOCKING=y | ||
1488 | CONFIG_FSNOTIFY=y | 1497 | CONFIG_FSNOTIFY=y |
1489 | CONFIG_DNOTIFY=y | 1498 | CONFIG_DNOTIFY=y |
1490 | CONFIG_INOTIFY=y | 1499 | CONFIG_INOTIFY=y |
@@ -1560,7 +1569,7 @@ CONFIG_NETWORK_FILESYSTEMS=y | |||
1560 | # CONFIG_PARTITION_ADVANCED is not set | 1569 | # CONFIG_PARTITION_ADVANCED is not set |
1561 | CONFIG_MSDOS_PARTITION=y | 1570 | CONFIG_MSDOS_PARTITION=y |
1562 | CONFIG_SUN_PARTITION=y | 1571 | CONFIG_SUN_PARTITION=y |
1563 | CONFIG_NLS=m | 1572 | CONFIG_NLS=y |
1564 | CONFIG_NLS_DEFAULT="iso8859-1" | 1573 | CONFIG_NLS_DEFAULT="iso8859-1" |
1565 | # CONFIG_NLS_CODEPAGE_437 is not set | 1574 | # CONFIG_NLS_CODEPAGE_437 is not set |
1566 | # CONFIG_NLS_CODEPAGE_737 is not set | 1575 | # CONFIG_NLS_CODEPAGE_737 is not set |
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 204e4bf64438..5a8c308e2b5c 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/scatterlist.h> | 4 | #include <linux/scatterlist.h> |
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
6 | #include <linux/dma-debug.h> | ||
6 | 7 | ||
7 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | 8 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) |
8 | 9 | ||
@@ -13,142 +14,40 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask); | |||
13 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 14 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
14 | #define dma_is_consistent(d, h) (1) | 15 | #define dma_is_consistent(d, h) (1) |
15 | 16 | ||
16 | struct dma_ops { | 17 | extern struct dma_map_ops *dma_ops, pci32_dma_ops; |
17 | void *(*alloc_coherent)(struct device *dev, size_t size, | 18 | extern struct bus_type pci_bus_type; |
18 | dma_addr_t *dma_handle, gfp_t flag); | ||
19 | void (*free_coherent)(struct device *dev, size_t size, | ||
20 | void *cpu_addr, dma_addr_t dma_handle); | ||
21 | dma_addr_t (*map_page)(struct device *dev, struct page *page, | ||
22 | unsigned long offset, size_t size, | ||
23 | enum dma_data_direction direction); | ||
24 | void (*unmap_page)(struct device *dev, dma_addr_t dma_addr, | ||
25 | size_t size, | ||
26 | enum dma_data_direction direction); | ||
27 | int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, | ||
28 | enum dma_data_direction direction); | ||
29 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, | ||
30 | int nhwentries, | ||
31 | enum dma_data_direction direction); | ||
32 | void (*sync_single_for_cpu)(struct device *dev, | ||
33 | dma_addr_t dma_handle, size_t size, | ||
34 | enum dma_data_direction direction); | ||
35 | void (*sync_single_for_device)(struct device *dev, | ||
36 | dma_addr_t dma_handle, size_t size, | ||
37 | enum dma_data_direction direction); | ||
38 | void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, | ||
39 | int nelems, | ||
40 | enum dma_data_direction direction); | ||
41 | void (*sync_sg_for_device)(struct device *dev, | ||
42 | struct scatterlist *sg, int nents, | ||
43 | enum dma_data_direction dir); | ||
44 | }; | ||
45 | extern const struct dma_ops *dma_ops; | ||
46 | 19 | ||
47 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | 20 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
48 | dma_addr_t *dma_handle, gfp_t flag) | ||
49 | { | ||
50 | return dma_ops->alloc_coherent(dev, size, dma_handle, flag); | ||
51 | } | ||
52 | |||
53 | static inline void dma_free_coherent(struct device *dev, size_t size, | ||
54 | void *cpu_addr, dma_addr_t dma_handle) | ||
55 | { | ||
56 | dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); | ||
57 | } | ||
58 | |||
59 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | ||
60 | size_t size, | ||
61 | enum dma_data_direction direction) | ||
62 | { | ||
63 | return dma_ops->map_page(dev, virt_to_page(cpu_addr), | ||
64 | (unsigned long)cpu_addr & ~PAGE_MASK, size, | ||
65 | direction); | ||
66 | } | ||
67 | |||
68 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
69 | size_t size, | ||
70 | enum dma_data_direction direction) | ||
71 | { | ||
72 | dma_ops->unmap_page(dev, dma_addr, size, direction); | ||
73 | } | ||
74 | |||
75 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
76 | unsigned long offset, size_t size, | ||
77 | enum dma_data_direction direction) | ||
78 | { | ||
79 | return dma_ops->map_page(dev, page, offset, size, direction); | ||
80 | } | ||
81 | |||
82 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
83 | size_t size, | ||
84 | enum dma_data_direction direction) | ||
85 | { | ||
86 | dma_ops->unmap_page(dev, dma_address, size, direction); | ||
87 | } | ||
88 | |||
89 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
90 | int nents, enum dma_data_direction direction) | ||
91 | { | ||
92 | return dma_ops->map_sg(dev, sg, nents, direction); | ||
93 | } | ||
94 | |||
95 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
96 | int nents, enum dma_data_direction direction) | ||
97 | { | 21 | { |
98 | dma_ops->unmap_sg(dev, sg, nents, direction); | 22 | #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) |
99 | } | 23 | if (dev->bus == &pci_bus_type) |
100 | 24 | return &pci32_dma_ops; | |
101 | static inline void dma_sync_single_for_cpu(struct device *dev, | 25 | #endif |
102 | dma_addr_t dma_handle, size_t size, | 26 | return dma_ops; |
103 | enum dma_data_direction direction) | ||
104 | { | ||
105 | dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction); | ||
106 | } | 27 | } |
107 | 28 | ||
108 | static inline void dma_sync_single_for_device(struct device *dev, | 29 | #include <asm-generic/dma-mapping-common.h> |
109 | dma_addr_t dma_handle, | ||
110 | size_t size, | ||
111 | enum dma_data_direction direction) | ||
112 | { | ||
113 | if (dma_ops->sync_single_for_device) | ||
114 | dma_ops->sync_single_for_device(dev, dma_handle, size, | ||
115 | direction); | ||
116 | } | ||
117 | 30 | ||
118 | static inline void dma_sync_sg_for_cpu(struct device *dev, | 31 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
119 | struct scatterlist *sg, int nelems, | 32 | dma_addr_t *dma_handle, gfp_t flag) |
120 | enum dma_data_direction direction) | ||
121 | { | 33 | { |
122 | dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction); | 34 | struct dma_map_ops *ops = get_dma_ops(dev); |
123 | } | 35 | void *cpu_addr; |
124 | 36 | ||
125 | static inline void dma_sync_sg_for_device(struct device *dev, | 37 | cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag); |
126 | struct scatterlist *sg, int nelems, | 38 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); |
127 | enum dma_data_direction direction) | 39 | return cpu_addr; |
128 | { | ||
129 | if (dma_ops->sync_sg_for_device) | ||
130 | dma_ops->sync_sg_for_device(dev, sg, nelems, direction); | ||
131 | } | 40 | } |
132 | 41 | ||
133 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | 42 | static inline void dma_free_coherent(struct device *dev, size_t size, |
134 | dma_addr_t dma_handle, | 43 | void *cpu_addr, dma_addr_t dma_handle) |
135 | unsigned long offset, | ||
136 | size_t size, | ||
137 | enum dma_data_direction dir) | ||
138 | { | 44 | { |
139 | dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir); | 45 | struct dma_map_ops *ops = get_dma_ops(dev); |
140 | } | ||
141 | 46 | ||
142 | static inline void dma_sync_single_range_for_device(struct device *dev, | 47 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); |
143 | dma_addr_t dma_handle, | 48 | ops->free_coherent(dev, size, cpu_addr, dma_handle); |
144 | unsigned long offset, | ||
145 | size_t size, | ||
146 | enum dma_data_direction dir) | ||
147 | { | ||
148 | dma_sync_single_for_device(dev, dma_handle+offset, size, dir); | ||
149 | } | 49 | } |
150 | 50 | ||
151 | |||
152 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 51 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
153 | { | 52 | { |
154 | return (dma_addr == DMA_ERROR_CODE); | 53 | return (dma_addr == DMA_ERROR_CODE); |
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h index 1934f2cbf513..a0b443cb3c1f 100644 --- a/arch/sparc/include/asm/irq_64.h +++ b/arch/sparc/include/asm/irq_64.h | |||
@@ -89,8 +89,8 @@ static inline unsigned long get_softint(void) | |||
89 | return retval; | 89 | return retval; |
90 | } | 90 | } |
91 | 91 | ||
92 | void __trigger_all_cpu_backtrace(void); | 92 | void arch_trigger_all_cpu_backtrace(void); |
93 | #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() | 93 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace |
94 | 94 | ||
95 | extern void *hardirq_stack[NR_CPUS]; | 95 | extern void *hardirq_stack[NR_CPUS]; |
96 | extern void *softirq_stack[NR_CPUS]; | 96 | extern void *softirq_stack[NR_CPUS]; |
diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h index 6e14fd179335..d9c031f9910f 100644 --- a/arch/sparc/include/asm/pci.h +++ b/arch/sparc/include/asm/pci.h | |||
@@ -5,4 +5,7 @@ | |||
5 | #else | 5 | #else |
6 | #include <asm/pci_32.h> | 6 | #include <asm/pci_32.h> |
7 | #endif | 7 | #endif |
8 | |||
9 | #include <asm-generic/pci-dma-compat.h> | ||
10 | |||
8 | #endif | 11 | #endif |
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h index b41c4c198159..ac0e8369fd97 100644 --- a/arch/sparc/include/asm/pci_32.h +++ b/arch/sparc/include/asm/pci_32.h | |||
@@ -31,42 +31,8 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) | |||
31 | */ | 31 | */ |
32 | #define PCI_DMA_BUS_IS_PHYS (0) | 32 | #define PCI_DMA_BUS_IS_PHYS (0) |
33 | 33 | ||
34 | #include <asm/scatterlist.h> | ||
35 | |||
36 | struct pci_dev; | 34 | struct pci_dev; |
37 | 35 | ||
38 | /* Allocate and map kernel buffer using consistent mode DMA for a device. | ||
39 | * hwdev should be valid struct pci_dev pointer for PCI devices. | ||
40 | */ | ||
41 | extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle); | ||
42 | |||
43 | /* Free and unmap a consistent DMA buffer. | ||
44 | * cpu_addr is what was returned from pci_alloc_consistent, | ||
45 | * size must be the same as what as passed into pci_alloc_consistent, | ||
46 | * and likewise dma_addr must be the same as what *dma_addrp was set to. | ||
47 | * | ||
48 | * References to the memory and mappings assosciated with cpu_addr/dma_addr | ||
49 | * past this call are illegal. | ||
50 | */ | ||
51 | extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); | ||
52 | |||
53 | /* Map a single buffer of the indicated size for DMA in streaming mode. | ||
54 | * The 32-bit bus address to use is returned. | ||
55 | * | ||
56 | * Once the device is given the dma address, the device owns this memory | ||
57 | * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed. | ||
58 | */ | ||
59 | extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction); | ||
60 | |||
61 | /* Unmap a single streaming mode DMA translation. The dma_addr and size | ||
62 | * must match what was provided for in a previous pci_map_single call. All | ||
63 | * other usages are undefined. | ||
64 | * | ||
65 | * After this call, reads by the cpu to the buffer are guaranteed to see | ||
66 | * whatever the device wrote there. | ||
67 | */ | ||
68 | extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction); | ||
69 | |||
70 | /* pci_unmap_{single,page} is not a nop, thus... */ | 36 | /* pci_unmap_{single,page} is not a nop, thus... */ |
71 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | 37 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ |
72 | dma_addr_t ADDR_NAME; | 38 | dma_addr_t ADDR_NAME; |
@@ -81,69 +47,6 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t | |||
81 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | 47 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ |
82 | (((PTR)->LEN_NAME) = (VAL)) | 48 | (((PTR)->LEN_NAME) = (VAL)) |
83 | 49 | ||
84 | /* | ||
85 | * Same as above, only with pages instead of mapped addresses. | ||
86 | */ | ||
87 | extern dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, | ||
88 | unsigned long offset, size_t size, int direction); | ||
89 | extern void pci_unmap_page(struct pci_dev *hwdev, | ||
90 | dma_addr_t dma_address, size_t size, int direction); | ||
91 | |||
92 | /* Map a set of buffers described by scatterlist in streaming | ||
93 | * mode for DMA. This is the scather-gather version of the | ||
94 | * above pci_map_single interface. Here the scatter gather list | ||
95 | * elements are each tagged with the appropriate dma address | ||
96 | * and length. They are obtained via sg_dma_{address,length}(SG). | ||
97 | * | ||
98 | * NOTE: An implementation may be able to use a smaller number of | ||
99 | * DMA address/length pairs than there are SG table elements. | ||
100 | * (for example via virtual mapping capabilities) | ||
101 | * The routine returns the number of addr/length pairs actually | ||
102 | * used, at most nents. | ||
103 | * | ||
104 | * Device ownership issues as mentioned above for pci_map_single are | ||
105 | * the same here. | ||
106 | */ | ||
107 | extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction); | ||
108 | |||
109 | /* Unmap a set of streaming mode DMA translations. | ||
110 | * Again, cpu read rules concerning calls here are the same as for | ||
111 | * pci_unmap_single() above. | ||
112 | */ | ||
113 | extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction); | ||
114 | |||
115 | /* Make physical memory consistent for a single | ||
116 | * streaming mode DMA translation after a transfer. | ||
117 | * | ||
118 | * If you perform a pci_map_single() but wish to interrogate the | ||
119 | * buffer using the cpu, yet do not wish to teardown the PCI dma | ||
120 | * mapping, you must call this function before doing so. At the | ||
121 | * next point you give the PCI dma address back to the card, you | ||
122 | * must first perform a pci_dma_sync_for_device, and then the device | ||
123 | * again owns the buffer. | ||
124 | */ | ||
125 | extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction); | ||
126 | extern void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction); | ||
127 | |||
128 | /* Make physical memory consistent for a set of streaming | ||
129 | * mode DMA translations after a transfer. | ||
130 | * | ||
131 | * The same as pci_dma_sync_single_* but for a scatter-gather list, | ||
132 | * same rules and usage. | ||
133 | */ | ||
134 | extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction); | ||
135 | extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction); | ||
136 | |||
137 | /* Return whether the given PCI device DMA address mask can | ||
138 | * be supported properly. For example, if your device can | ||
139 | * only drive the low 24-bits during PCI bus mastering, then | ||
140 | * you would pass 0x00ffffff as the mask to this function. | ||
141 | */ | ||
142 | static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask) | ||
143 | { | ||
144 | return 1; | ||
145 | } | ||
146 | |||
147 | #ifdef CONFIG_PCI | 50 | #ifdef CONFIG_PCI |
148 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, | 51 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, |
149 | enum pci_dma_burst_strategy *strat, | 52 | enum pci_dma_burst_strategy *strat, |
@@ -154,14 +57,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, | |||
154 | } | 57 | } |
155 | #endif | 58 | #endif |
156 | 59 | ||
157 | #define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0) | ||
158 | |||
159 | static inline int pci_dma_mapping_error(struct pci_dev *pdev, | ||
160 | dma_addr_t dma_addr) | ||
161 | { | ||
162 | return (dma_addr == PCI_DMA_ERROR_CODE); | ||
163 | } | ||
164 | |||
165 | struct device_node; | 60 | struct device_node; |
166 | extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); | 61 | extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); |
167 | 62 | ||
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h index 7a1e3566e59c..5cc9f6aa5494 100644 --- a/arch/sparc/include/asm/pci_64.h +++ b/arch/sparc/include/asm/pci_64.h | |||
@@ -35,37 +35,6 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) | |||
35 | */ | 35 | */ |
36 | #define PCI_DMA_BUS_IS_PHYS (0) | 36 | #define PCI_DMA_BUS_IS_PHYS (0) |
37 | 37 | ||
38 | static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, | ||
39 | dma_addr_t *dma_handle) | ||
40 | { | ||
41 | return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC); | ||
42 | } | ||
43 | |||
44 | static inline void pci_free_consistent(struct pci_dev *pdev, size_t size, | ||
45 | void *vaddr, dma_addr_t dma_handle) | ||
46 | { | ||
47 | return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle); | ||
48 | } | ||
49 | |||
50 | static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, | ||
51 | size_t size, int direction) | ||
52 | { | ||
53 | return dma_map_single(&pdev->dev, ptr, size, | ||
54 | (enum dma_data_direction) direction); | ||
55 | } | ||
56 | |||
57 | static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, | ||
58 | size_t size, int direction) | ||
59 | { | ||
60 | dma_unmap_single(&pdev->dev, dma_addr, size, | ||
61 | (enum dma_data_direction) direction); | ||
62 | } | ||
63 | |||
64 | #define pci_map_page(dev, page, off, size, dir) \ | ||
65 | pci_map_single(dev, (page_address(page) + (off)), size, dir) | ||
66 | #define pci_unmap_page(dev,addr,sz,dir) \ | ||
67 | pci_unmap_single(dev,addr,sz,dir) | ||
68 | |||
69 | /* pci_unmap_{single,page} is not a nop, thus... */ | 38 | /* pci_unmap_{single,page} is not a nop, thus... */ |
70 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | 39 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ |
71 | dma_addr_t ADDR_NAME; | 40 | dma_addr_t ADDR_NAME; |
@@ -80,57 +49,6 @@ static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, | |||
80 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | 49 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ |
81 | (((PTR)->LEN_NAME) = (VAL)) | 50 | (((PTR)->LEN_NAME) = (VAL)) |
82 | 51 | ||
83 | static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, | ||
84 | int nents, int direction) | ||
85 | { | ||
86 | return dma_map_sg(&pdev->dev, sg, nents, | ||
87 | (enum dma_data_direction) direction); | ||
88 | } | ||
89 | |||
90 | static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, | ||
91 | int nents, int direction) | ||
92 | { | ||
93 | dma_unmap_sg(&pdev->dev, sg, nents, | ||
94 | (enum dma_data_direction) direction); | ||
95 | } | ||
96 | |||
97 | static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, | ||
98 | dma_addr_t dma_handle, | ||
99 | size_t size, int direction) | ||
100 | { | ||
101 | dma_sync_single_for_cpu(&pdev->dev, dma_handle, size, | ||
102 | (enum dma_data_direction) direction); | ||
103 | } | ||
104 | |||
105 | static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev, | ||
106 | dma_addr_t dma_handle, | ||
107 | size_t size, int direction) | ||
108 | { | ||
109 | /* No flushing needed to sync cpu writes to the device. */ | ||
110 | } | ||
111 | |||
112 | static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, | ||
113 | struct scatterlist *sg, | ||
114 | int nents, int direction) | ||
115 | { | ||
116 | dma_sync_sg_for_cpu(&pdev->dev, sg, nents, | ||
117 | (enum dma_data_direction) direction); | ||
118 | } | ||
119 | |||
120 | static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev, | ||
121 | struct scatterlist *sg, | ||
122 | int nelems, int direction) | ||
123 | { | ||
124 | /* No flushing needed to sync cpu writes to the device. */ | ||
125 | } | ||
126 | |||
127 | /* Return whether the given PCI device DMA address mask can | ||
128 | * be supported properly. For example, if your device can | ||
129 | * only drive the low 24-bits during PCI bus mastering, then | ||
130 | * you would pass 0x00ffffff as the mask to this function. | ||
131 | */ | ||
132 | extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask); | ||
133 | |||
134 | /* PCI IOMMU mapping bypass support. */ | 52 | /* PCI IOMMU mapping bypass support. */ |
135 | 53 | ||
136 | /* PCI 64-bit addressing works for all slots on all controller | 54 | /* PCI 64-bit addressing works for all slots on all controller |
@@ -140,12 +58,6 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask); | |||
140 | #define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) | 58 | #define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) |
141 | #define PCI64_ADDR_BASE 0xfffc000000000000UL | 59 | #define PCI64_ADDR_BASE 0xfffc000000000000UL |
142 | 60 | ||
143 | static inline int pci_dma_mapping_error(struct pci_dev *pdev, | ||
144 | dma_addr_t dma_addr) | ||
145 | { | ||
146 | return dma_mapping_error(&pdev->dev, dma_addr); | ||
147 | } | ||
148 | |||
149 | #ifdef CONFIG_PCI | 61 | #ifdef CONFIG_PCI |
150 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, | 62 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, |
151 | enum pci_dma_burst_strategy *strat, | 63 | enum pci_dma_burst_strategy *strat, |
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index b049abf9902f..0ff92fa22064 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
@@ -726,11 +726,17 @@ extern unsigned long pte_file(pte_t); | |||
726 | extern pte_t pgoff_to_pte(unsigned long); | 726 | extern pte_t pgoff_to_pte(unsigned long); |
727 | #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) | 727 | #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) |
728 | 728 | ||
729 | extern unsigned long *sparc64_valid_addr_bitmap; | 729 | extern unsigned long sparc64_valid_addr_bitmap[]; |
730 | 730 | ||
731 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ | 731 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ |
732 | #define kern_addr_valid(addr) \ | 732 | static inline bool kern_addr_valid(unsigned long addr) |
733 | (test_bit(__pa((unsigned long)(addr))>>22, sparc64_valid_addr_bitmap)) | 733 | { |
734 | unsigned long paddr = __pa(addr); | ||
735 | |||
736 | if ((paddr >> 41UL) != 0UL) | ||
737 | return false; | ||
738 | return test_bit(paddr >> 22, sparc64_valid_addr_bitmap); | ||
739 | } | ||
734 | 740 | ||
735 | extern int page_in_phys_avail(unsigned long paddr); | 741 | extern int page_in_phys_avail(unsigned long paddr); |
736 | 742 | ||
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 46f91ab66a50..857630cff636 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h | |||
@@ -76,7 +76,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
76 | * | 76 | * |
77 | * Unfortunately this scheme limits us to ~16,000,000 cpus. | 77 | * Unfortunately this scheme limits us to ~16,000,000 cpus. |
78 | */ | 78 | */ |
79 | static inline void __read_lock(raw_rwlock_t *rw) | 79 | static inline void arch_read_lock(raw_rwlock_t *rw) |
80 | { | 80 | { |
81 | register raw_rwlock_t *lp asm("g1"); | 81 | register raw_rwlock_t *lp asm("g1"); |
82 | lp = rw; | 82 | lp = rw; |
@@ -92,11 +92,11 @@ static inline void __read_lock(raw_rwlock_t *rw) | |||
92 | #define __raw_read_lock(lock) \ | 92 | #define __raw_read_lock(lock) \ |
93 | do { unsigned long flags; \ | 93 | do { unsigned long flags; \ |
94 | local_irq_save(flags); \ | 94 | local_irq_save(flags); \ |
95 | __read_lock(lock); \ | 95 | arch_read_lock(lock); \ |
96 | local_irq_restore(flags); \ | 96 | local_irq_restore(flags); \ |
97 | } while(0) | 97 | } while(0) |
98 | 98 | ||
99 | static inline void __read_unlock(raw_rwlock_t *rw) | 99 | static inline void arch_read_unlock(raw_rwlock_t *rw) |
100 | { | 100 | { |
101 | register raw_rwlock_t *lp asm("g1"); | 101 | register raw_rwlock_t *lp asm("g1"); |
102 | lp = rw; | 102 | lp = rw; |
@@ -112,7 +112,7 @@ static inline void __read_unlock(raw_rwlock_t *rw) | |||
112 | #define __raw_read_unlock(lock) \ | 112 | #define __raw_read_unlock(lock) \ |
113 | do { unsigned long flags; \ | 113 | do { unsigned long flags; \ |
114 | local_irq_save(flags); \ | 114 | local_irq_save(flags); \ |
115 | __read_unlock(lock); \ | 115 | arch_read_unlock(lock); \ |
116 | local_irq_restore(flags); \ | 116 | local_irq_restore(flags); \ |
117 | } while(0) | 117 | } while(0) |
118 | 118 | ||
@@ -150,7 +150,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
150 | return (val == 0); | 150 | return (val == 0); |
151 | } | 151 | } |
152 | 152 | ||
153 | static inline int __read_trylock(raw_rwlock_t *rw) | 153 | static inline int arch_read_trylock(raw_rwlock_t *rw) |
154 | { | 154 | { |
155 | register raw_rwlock_t *lp asm("g1"); | 155 | register raw_rwlock_t *lp asm("g1"); |
156 | register int res asm("o0"); | 156 | register int res asm("o0"); |
@@ -169,7 +169,7 @@ static inline int __read_trylock(raw_rwlock_t *rw) | |||
169 | ({ unsigned long flags; \ | 169 | ({ unsigned long flags; \ |
170 | int res; \ | 170 | int res; \ |
171 | local_irq_save(flags); \ | 171 | local_irq_save(flags); \ |
172 | res = __read_trylock(lock); \ | 172 | res = arch_read_trylock(lock); \ |
173 | local_irq_restore(flags); \ | 173 | local_irq_restore(flags); \ |
174 | res; \ | 174 | res; \ |
175 | }) | 175 | }) |
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index f6b2b92ad8d2..43e514783582 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h | |||
@@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla | |||
92 | 92 | ||
93 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ | 93 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ |
94 | 94 | ||
95 | static void inline __read_lock(raw_rwlock_t *lock) | 95 | static void inline arch_read_lock(raw_rwlock_t *lock) |
96 | { | 96 | { |
97 | unsigned long tmp1, tmp2; | 97 | unsigned long tmp1, tmp2; |
98 | 98 | ||
@@ -115,7 +115,7 @@ static void inline __read_lock(raw_rwlock_t *lock) | |||
115 | : "memory"); | 115 | : "memory"); |
116 | } | 116 | } |
117 | 117 | ||
118 | static int inline __read_trylock(raw_rwlock_t *lock) | 118 | static int inline arch_read_trylock(raw_rwlock_t *lock) |
119 | { | 119 | { |
120 | int tmp1, tmp2; | 120 | int tmp1, tmp2; |
121 | 121 | ||
@@ -136,7 +136,7 @@ static int inline __read_trylock(raw_rwlock_t *lock) | |||
136 | return tmp1; | 136 | return tmp1; |
137 | } | 137 | } |
138 | 138 | ||
139 | static void inline __read_unlock(raw_rwlock_t *lock) | 139 | static void inline arch_read_unlock(raw_rwlock_t *lock) |
140 | { | 140 | { |
141 | unsigned long tmp1, tmp2; | 141 | unsigned long tmp1, tmp2; |
142 | 142 | ||
@@ -152,7 +152,7 @@ static void inline __read_unlock(raw_rwlock_t *lock) | |||
152 | : "memory"); | 152 | : "memory"); |
153 | } | 153 | } |
154 | 154 | ||
155 | static void inline __write_lock(raw_rwlock_t *lock) | 155 | static void inline arch_write_lock(raw_rwlock_t *lock) |
156 | { | 156 | { |
157 | unsigned long mask, tmp1, tmp2; | 157 | unsigned long mask, tmp1, tmp2; |
158 | 158 | ||
@@ -177,7 +177,7 @@ static void inline __write_lock(raw_rwlock_t *lock) | |||
177 | : "memory"); | 177 | : "memory"); |
178 | } | 178 | } |
179 | 179 | ||
180 | static void inline __write_unlock(raw_rwlock_t *lock) | 180 | static void inline arch_write_unlock(raw_rwlock_t *lock) |
181 | { | 181 | { |
182 | __asm__ __volatile__( | 182 | __asm__ __volatile__( |
183 | " stw %%g0, [%0]" | 183 | " stw %%g0, [%0]" |
@@ -186,7 +186,7 @@ static void inline __write_unlock(raw_rwlock_t *lock) | |||
186 | : "memory"); | 186 | : "memory"); |
187 | } | 187 | } |
188 | 188 | ||
189 | static int inline __write_trylock(raw_rwlock_t *lock) | 189 | static int inline arch_write_trylock(raw_rwlock_t *lock) |
190 | { | 190 | { |
191 | unsigned long mask, tmp1, tmp2, result; | 191 | unsigned long mask, tmp1, tmp2, result; |
192 | 192 | ||
@@ -210,14 +210,14 @@ static int inline __write_trylock(raw_rwlock_t *lock) | |||
210 | return result; | 210 | return result; |
211 | } | 211 | } |
212 | 212 | ||
213 | #define __raw_read_lock(p) __read_lock(p) | 213 | #define __raw_read_lock(p) arch_read_lock(p) |
214 | #define __raw_read_lock_flags(p, f) __read_lock(p) | 214 | #define __raw_read_lock_flags(p, f) arch_read_lock(p) |
215 | #define __raw_read_trylock(p) __read_trylock(p) | 215 | #define __raw_read_trylock(p) arch_read_trylock(p) |
216 | #define __raw_read_unlock(p) __read_unlock(p) | 216 | #define __raw_read_unlock(p) arch_read_unlock(p) |
217 | #define __raw_write_lock(p) __write_lock(p) | 217 | #define __raw_write_lock(p) arch_write_lock(p) |
218 | #define __raw_write_lock_flags(p, f) __write_lock(p) | 218 | #define __raw_write_lock_flags(p, f) arch_write_lock(p) |
219 | #define __raw_write_unlock(p) __write_unlock(p) | 219 | #define __raw_write_unlock(p) arch_write_unlock(p) |
220 | #define __raw_write_trylock(p) __write_trylock(p) | 220 | #define __raw_write_trylock(p) arch_write_trylock(p) |
221 | 221 | ||
222 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) | 222 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) |
223 | #define __raw_write_can_lock(rw) (!(rw)->lock) | 223 | #define __raw_write_can_lock(rw) (!(rw)->lock) |
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 475ce4696acd..29b88a580661 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile | |||
@@ -61,7 +61,7 @@ obj-$(CONFIG_SPARC64_SMP) += cpumap.o | |||
61 | obj-$(CONFIG_SPARC32) += devres.o | 61 | obj-$(CONFIG_SPARC32) += devres.o |
62 | devres-y := ../../../kernel/irq/devres.o | 62 | devres-y := ../../../kernel/irq/devres.o |
63 | 63 | ||
64 | obj-$(CONFIG_SPARC32) += dma.o | 64 | obj-y += dma.o |
65 | 65 | ||
66 | obj-$(CONFIG_SPARC32_PCI) += pcic.o | 66 | obj-$(CONFIG_SPARC32_PCI) += pcic.o |
67 | 67 | ||
diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c index 524c32f97c55..e1ba8ee21b9a 100644 --- a/arch/sparc/kernel/dma.c +++ b/arch/sparc/kernel/dma.c | |||
@@ -1,178 +1,13 @@ | |||
1 | /* dma.c: PCI and SBUS DMA accessors for 32-bit sparc. | ||
2 | * | ||
3 | * Copyright (C) 2008 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | 1 | #include <linux/kernel.h> |
7 | #include <linux/module.h> | 2 | #include <linux/module.h> |
8 | #include <linux/dma-mapping.h> | 3 | #include <linux/dma-mapping.h> |
9 | #include <linux/scatterlist.h> | 4 | #include <linux/dma-debug.h> |
10 | #include <linux/mm.h> | ||
11 | |||
12 | #ifdef CONFIG_PCI | ||
13 | #include <linux/pci.h> | ||
14 | #endif | ||
15 | 5 | ||
16 | #include "dma.h" | 6 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 15) |
17 | 7 | ||
18 | int dma_supported(struct device *dev, u64 mask) | 8 | static int __init dma_init(void) |
19 | { | 9 | { |
20 | #ifdef CONFIG_PCI | 10 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
21 | if (dev->bus == &pci_bus_type) | ||
22 | return pci_dma_supported(to_pci_dev(dev), mask); | ||
23 | #endif | ||
24 | return 0; | 11 | return 0; |
25 | } | 12 | } |
26 | EXPORT_SYMBOL(dma_supported); | 13 | fs_initcall(dma_init); |
27 | |||
28 | int dma_set_mask(struct device *dev, u64 dma_mask) | ||
29 | { | ||
30 | #ifdef CONFIG_PCI | ||
31 | if (dev->bus == &pci_bus_type) | ||
32 | return pci_set_dma_mask(to_pci_dev(dev), dma_mask); | ||
33 | #endif | ||
34 | return -EOPNOTSUPP; | ||
35 | } | ||
36 | EXPORT_SYMBOL(dma_set_mask); | ||
37 | |||
38 | static void *dma32_alloc_coherent(struct device *dev, size_t size, | ||
39 | dma_addr_t *dma_handle, gfp_t flag) | ||
40 | { | ||
41 | #ifdef CONFIG_PCI | ||
42 | if (dev->bus == &pci_bus_type) | ||
43 | return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle); | ||
44 | #endif | ||
45 | return sbus_alloc_consistent(dev, size, dma_handle); | ||
46 | } | ||
47 | |||
48 | static void dma32_free_coherent(struct device *dev, size_t size, | ||
49 | void *cpu_addr, dma_addr_t dma_handle) | ||
50 | { | ||
51 | #ifdef CONFIG_PCI | ||
52 | if (dev->bus == &pci_bus_type) { | ||
53 | pci_free_consistent(to_pci_dev(dev), size, | ||
54 | cpu_addr, dma_handle); | ||
55 | return; | ||
56 | } | ||
57 | #endif | ||
58 | sbus_free_consistent(dev, size, cpu_addr, dma_handle); | ||
59 | } | ||
60 | |||
61 | static dma_addr_t dma32_map_page(struct device *dev, struct page *page, | ||
62 | unsigned long offset, size_t size, | ||
63 | enum dma_data_direction direction) | ||
64 | { | ||
65 | #ifdef CONFIG_PCI | ||
66 | if (dev->bus == &pci_bus_type) | ||
67 | return pci_map_page(to_pci_dev(dev), page, offset, | ||
68 | size, (int)direction); | ||
69 | #endif | ||
70 | return sbus_map_single(dev, page_address(page) + offset, | ||
71 | size, (int)direction); | ||
72 | } | ||
73 | |||
74 | static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
75 | size_t size, enum dma_data_direction direction) | ||
76 | { | ||
77 | #ifdef CONFIG_PCI | ||
78 | if (dev->bus == &pci_bus_type) { | ||
79 | pci_unmap_page(to_pci_dev(dev), dma_address, | ||
80 | size, (int)direction); | ||
81 | return; | ||
82 | } | ||
83 | #endif | ||
84 | sbus_unmap_single(dev, dma_address, size, (int)direction); | ||
85 | } | ||
86 | |||
87 | static int dma32_map_sg(struct device *dev, struct scatterlist *sg, | ||
88 | int nents, enum dma_data_direction direction) | ||
89 | { | ||
90 | #ifdef CONFIG_PCI | ||
91 | if (dev->bus == &pci_bus_type) | ||
92 | return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction); | ||
93 | #endif | ||
94 | return sbus_map_sg(dev, sg, nents, direction); | ||
95 | } | ||
96 | |||
97 | void dma32_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
98 | int nents, enum dma_data_direction direction) | ||
99 | { | ||
100 | #ifdef CONFIG_PCI | ||
101 | if (dev->bus == &pci_bus_type) { | ||
102 | pci_unmap_sg(to_pci_dev(dev), sg, nents, (int)direction); | ||
103 | return; | ||
104 | } | ||
105 | #endif | ||
106 | sbus_unmap_sg(dev, sg, nents, (int)direction); | ||
107 | } | ||
108 | |||
109 | static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
110 | size_t size, | ||
111 | enum dma_data_direction direction) | ||
112 | { | ||
113 | #ifdef CONFIG_PCI | ||
114 | if (dev->bus == &pci_bus_type) { | ||
115 | pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle, | ||
116 | size, (int)direction); | ||
117 | return; | ||
118 | } | ||
119 | #endif | ||
120 | sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction); | ||
121 | } | ||
122 | |||
123 | static void dma32_sync_single_for_device(struct device *dev, | ||
124 | dma_addr_t dma_handle, size_t size, | ||
125 | enum dma_data_direction direction) | ||
126 | { | ||
127 | #ifdef CONFIG_PCI | ||
128 | if (dev->bus == &pci_bus_type) { | ||
129 | pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle, | ||
130 | size, (int)direction); | ||
131 | return; | ||
132 | } | ||
133 | #endif | ||
134 | sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction); | ||
135 | } | ||
136 | |||
137 | static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
138 | int nelems, enum dma_data_direction direction) | ||
139 | { | ||
140 | #ifdef CONFIG_PCI | ||
141 | if (dev->bus == &pci_bus_type) { | ||
142 | pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, | ||
143 | nelems, (int)direction); | ||
144 | return; | ||
145 | } | ||
146 | #endif | ||
147 | BUG(); | ||
148 | } | ||
149 | |||
150 | static void dma32_sync_sg_for_device(struct device *dev, | ||
151 | struct scatterlist *sg, int nelems, | ||
152 | enum dma_data_direction direction) | ||
153 | { | ||
154 | #ifdef CONFIG_PCI | ||
155 | if (dev->bus == &pci_bus_type) { | ||
156 | pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, | ||
157 | nelems, (int)direction); | ||
158 | return; | ||
159 | } | ||
160 | #endif | ||
161 | BUG(); | ||
162 | } | ||
163 | |||
164 | static const struct dma_ops dma32_dma_ops = { | ||
165 | .alloc_coherent = dma32_alloc_coherent, | ||
166 | .free_coherent = dma32_free_coherent, | ||
167 | .map_page = dma32_map_page, | ||
168 | .unmap_page = dma32_unmap_page, | ||
169 | .map_sg = dma32_map_sg, | ||
170 | .unmap_sg = dma32_unmap_sg, | ||
171 | .sync_single_for_cpu = dma32_sync_single_for_cpu, | ||
172 | .sync_single_for_device = dma32_sync_single_for_device, | ||
173 | .sync_sg_for_cpu = dma32_sync_sg_for_cpu, | ||
174 | .sync_sg_for_device = dma32_sync_sg_for_device, | ||
175 | }; | ||
176 | |||
177 | const struct dma_ops *dma_ops = &dma32_dma_ops; | ||
178 | EXPORT_SYMBOL(dma_ops); | ||
diff --git a/arch/sparc/kernel/dma.h b/arch/sparc/kernel/dma.h deleted file mode 100644 index f8d8951adb53..000000000000 --- a/arch/sparc/kernel/dma.h +++ /dev/null | |||
@@ -1,14 +0,0 @@ | |||
1 | void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp); | ||
2 | void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba); | ||
3 | dma_addr_t sbus_map_single(struct device *dev, void *va, | ||
4 | size_t len, int direction); | ||
5 | void sbus_unmap_single(struct device *dev, dma_addr_t ba, | ||
6 | size_t n, int direction); | ||
7 | int sbus_map_sg(struct device *dev, struct scatterlist *sg, | ||
8 | int n, int direction); | ||
9 | void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
10 | int n, int direction); | ||
11 | void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, | ||
12 | size_t size, int direction); | ||
13 | void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, | ||
14 | size_t size, int direction); | ||
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 0aeaefe696b9..7690cc219ecc 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c | |||
@@ -353,7 +353,8 @@ static void dma_4u_free_coherent(struct device *dev, size_t size, | |||
353 | 353 | ||
354 | static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, | 354 | static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, |
355 | unsigned long offset, size_t sz, | 355 | unsigned long offset, size_t sz, |
356 | enum dma_data_direction direction) | 356 | enum dma_data_direction direction, |
357 | struct dma_attrs *attrs) | ||
357 | { | 358 | { |
358 | struct iommu *iommu; | 359 | struct iommu *iommu; |
359 | struct strbuf *strbuf; | 360 | struct strbuf *strbuf; |
@@ -474,7 +475,8 @@ do_flush_sync: | |||
474 | } | 475 | } |
475 | 476 | ||
476 | static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, | 477 | static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, |
477 | size_t sz, enum dma_data_direction direction) | 478 | size_t sz, enum dma_data_direction direction, |
479 | struct dma_attrs *attrs) | ||
478 | { | 480 | { |
479 | struct iommu *iommu; | 481 | struct iommu *iommu; |
480 | struct strbuf *strbuf; | 482 | struct strbuf *strbuf; |
@@ -520,7 +522,8 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, | |||
520 | } | 522 | } |
521 | 523 | ||
522 | static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, | 524 | static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, |
523 | int nelems, enum dma_data_direction direction) | 525 | int nelems, enum dma_data_direction direction, |
526 | struct dma_attrs *attrs) | ||
524 | { | 527 | { |
525 | struct scatterlist *s, *outs, *segstart; | 528 | struct scatterlist *s, *outs, *segstart; |
526 | unsigned long flags, handle, prot, ctx; | 529 | unsigned long flags, handle, prot, ctx; |
@@ -691,7 +694,8 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) | |||
691 | } | 694 | } |
692 | 695 | ||
693 | static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, | 696 | static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, |
694 | int nelems, enum dma_data_direction direction) | 697 | int nelems, enum dma_data_direction direction, |
698 | struct dma_attrs *attrs) | ||
695 | { | 699 | { |
696 | unsigned long flags, ctx; | 700 | unsigned long flags, ctx; |
697 | struct scatterlist *sg; | 701 | struct scatterlist *sg; |
@@ -822,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, | |||
822 | spin_unlock_irqrestore(&iommu->lock, flags); | 826 | spin_unlock_irqrestore(&iommu->lock, flags); |
823 | } | 827 | } |
824 | 828 | ||
825 | static const struct dma_ops sun4u_dma_ops = { | 829 | static struct dma_map_ops sun4u_dma_ops = { |
826 | .alloc_coherent = dma_4u_alloc_coherent, | 830 | .alloc_coherent = dma_4u_alloc_coherent, |
827 | .free_coherent = dma_4u_free_coherent, | 831 | .free_coherent = dma_4u_free_coherent, |
828 | .map_page = dma_4u_map_page, | 832 | .map_page = dma_4u_map_page, |
@@ -833,9 +837,11 @@ static const struct dma_ops sun4u_dma_ops = { | |||
833 | .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, | 837 | .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, |
834 | }; | 838 | }; |
835 | 839 | ||
836 | const struct dma_ops *dma_ops = &sun4u_dma_ops; | 840 | struct dma_map_ops *dma_ops = &sun4u_dma_ops; |
837 | EXPORT_SYMBOL(dma_ops); | 841 | EXPORT_SYMBOL(dma_ops); |
838 | 842 | ||
843 | extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask); | ||
844 | |||
839 | int dma_supported(struct device *dev, u64 device_mask) | 845 | int dma_supported(struct device *dev, u64 device_mask) |
840 | { | 846 | { |
841 | struct iommu *iommu = dev->archdata.iommu; | 847 | struct iommu *iommu = dev->archdata.iommu; |
@@ -849,7 +855,7 @@ int dma_supported(struct device *dev, u64 device_mask) | |||
849 | 855 | ||
850 | #ifdef CONFIG_PCI | 856 | #ifdef CONFIG_PCI |
851 | if (dev->bus == &pci_bus_type) | 857 | if (dev->bus == &pci_bus_type) |
852 | return pci_dma_supported(to_pci_dev(dev), device_mask); | 858 | return pci64_dma_supported(to_pci_dev(dev), device_mask); |
853 | #endif | 859 | #endif |
854 | 860 | ||
855 | return 0; | 861 | return 0; |
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 87ea0d03d975..edbea232c617 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c | |||
@@ -48,8 +48,6 @@ | |||
48 | #include <asm/iommu.h> | 48 | #include <asm/iommu.h> |
49 | #include <asm/io-unit.h> | 49 | #include <asm/io-unit.h> |
50 | 50 | ||
51 | #include "dma.h" | ||
52 | |||
53 | #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ | 51 | #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ |
54 | 52 | ||
55 | static struct resource *_sparc_find_resource(struct resource *r, | 53 | static struct resource *_sparc_find_resource(struct resource *r, |
@@ -246,7 +244,8 @@ EXPORT_SYMBOL(sbus_set_sbus64); | |||
246 | * Typically devices use them for control blocks. | 244 | * Typically devices use them for control blocks. |
247 | * CPU may access them without any explicit flushing. | 245 | * CPU may access them without any explicit flushing. |
248 | */ | 246 | */ |
249 | void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp) | 247 | static void *sbus_alloc_coherent(struct device *dev, size_t len, |
248 | dma_addr_t *dma_addrp, gfp_t gfp) | ||
250 | { | 249 | { |
251 | struct of_device *op = to_of_device(dev); | 250 | struct of_device *op = to_of_device(dev); |
252 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; | 251 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; |
@@ -299,7 +298,8 @@ err_nopages: | |||
299 | return NULL; | 298 | return NULL; |
300 | } | 299 | } |
301 | 300 | ||
302 | void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) | 301 | static void sbus_free_coherent(struct device *dev, size_t n, void *p, |
302 | dma_addr_t ba) | ||
303 | { | 303 | { |
304 | struct resource *res; | 304 | struct resource *res; |
305 | struct page *pgv; | 305 | struct page *pgv; |
@@ -317,7 +317,7 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) | |||
317 | 317 | ||
318 | n = (n + PAGE_SIZE-1) & PAGE_MASK; | 318 | n = (n + PAGE_SIZE-1) & PAGE_MASK; |
319 | if ((res->end-res->start)+1 != n) { | 319 | if ((res->end-res->start)+1 != n) { |
320 | printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n", | 320 | printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n", |
321 | (long)((res->end-res->start)+1), n); | 321 | (long)((res->end-res->start)+1), n); |
322 | return; | 322 | return; |
323 | } | 323 | } |
@@ -337,8 +337,13 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) | |||
337 | * CPU view of this memory may be inconsistent with | 337 | * CPU view of this memory may be inconsistent with |
338 | * a device view and explicit flushing is necessary. | 338 | * a device view and explicit flushing is necessary. |
339 | */ | 339 | */ |
340 | dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction) | 340 | static dma_addr_t sbus_map_page(struct device *dev, struct page *page, |
341 | unsigned long offset, size_t len, | ||
342 | enum dma_data_direction dir, | ||
343 | struct dma_attrs *attrs) | ||
341 | { | 344 | { |
345 | void *va = page_address(page) + offset; | ||
346 | |||
342 | /* XXX why are some lengths signed, others unsigned? */ | 347 | /* XXX why are some lengths signed, others unsigned? */ |
343 | if (len <= 0) { | 348 | if (len <= 0) { |
344 | return 0; | 349 | return 0; |
@@ -350,12 +355,14 @@ dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int directi | |||
350 | return mmu_get_scsi_one(dev, va, len); | 355 | return mmu_get_scsi_one(dev, va, len); |
351 | } | 356 | } |
352 | 357 | ||
353 | void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction) | 358 | static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n, |
359 | enum dma_data_direction dir, struct dma_attrs *attrs) | ||
354 | { | 360 | { |
355 | mmu_release_scsi_one(dev, ba, n); | 361 | mmu_release_scsi_one(dev, ba, n); |
356 | } | 362 | } |
357 | 363 | ||
358 | int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction) | 364 | static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, |
365 | enum dma_data_direction dir, struct dma_attrs *attrs) | ||
359 | { | 366 | { |
360 | mmu_get_scsi_sgl(dev, sg, n); | 367 | mmu_get_scsi_sgl(dev, sg, n); |
361 | 368 | ||
@@ -366,19 +373,38 @@ int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction | |||
366 | return n; | 373 | return n; |
367 | } | 374 | } |
368 | 375 | ||
369 | void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction) | 376 | static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, |
377 | enum dma_data_direction dir, struct dma_attrs *attrs) | ||
370 | { | 378 | { |
371 | mmu_release_scsi_sgl(dev, sg, n); | 379 | mmu_release_scsi_sgl(dev, sg, n); |
372 | } | 380 | } |
373 | 381 | ||
374 | void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction) | 382 | static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
383 | int n, enum dma_data_direction dir) | ||
375 | { | 384 | { |
385 | BUG(); | ||
376 | } | 386 | } |
377 | 387 | ||
378 | void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction) | 388 | static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
389 | int n, enum dma_data_direction dir) | ||
379 | { | 390 | { |
391 | BUG(); | ||
380 | } | 392 | } |
381 | 393 | ||
394 | struct dma_map_ops sbus_dma_ops = { | ||
395 | .alloc_coherent = sbus_alloc_coherent, | ||
396 | .free_coherent = sbus_free_coherent, | ||
397 | .map_page = sbus_map_page, | ||
398 | .unmap_page = sbus_unmap_page, | ||
399 | .map_sg = sbus_map_sg, | ||
400 | .unmap_sg = sbus_unmap_sg, | ||
401 | .sync_sg_for_cpu = sbus_sync_sg_for_cpu, | ||
402 | .sync_sg_for_device = sbus_sync_sg_for_device, | ||
403 | }; | ||
404 | |||
405 | struct dma_map_ops *dma_ops = &sbus_dma_ops; | ||
406 | EXPORT_SYMBOL(dma_ops); | ||
407 | |||
382 | static int __init sparc_register_ioport(void) | 408 | static int __init sparc_register_ioport(void) |
383 | { | 409 | { |
384 | register_proc_sparc_ioport(); | 410 | register_proc_sparc_ioport(); |
@@ -395,7 +421,8 @@ arch_initcall(sparc_register_ioport); | |||
395 | /* Allocate and map kernel buffer using consistent mode DMA for a device. | 421 | /* Allocate and map kernel buffer using consistent mode DMA for a device. |
396 | * hwdev should be valid struct pci_dev pointer for PCI devices. | 422 | * hwdev should be valid struct pci_dev pointer for PCI devices. |
397 | */ | 423 | */ |
398 | void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) | 424 | static void *pci32_alloc_coherent(struct device *dev, size_t len, |
425 | dma_addr_t *pba, gfp_t gfp) | ||
399 | { | 426 | { |
400 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; | 427 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; |
401 | unsigned long va; | 428 | unsigned long va; |
@@ -439,7 +466,6 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) | |||
439 | *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ | 466 | *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ |
440 | return (void *) res->start; | 467 | return (void *) res->start; |
441 | } | 468 | } |
442 | EXPORT_SYMBOL(pci_alloc_consistent); | ||
443 | 469 | ||
444 | /* Free and unmap a consistent DMA buffer. | 470 | /* Free and unmap a consistent DMA buffer. |
445 | * cpu_addr is what was returned from pci_alloc_consistent, | 471 | * cpu_addr is what was returned from pci_alloc_consistent, |
@@ -449,7 +475,8 @@ EXPORT_SYMBOL(pci_alloc_consistent); | |||
449 | * References to the memory and mappings associated with cpu_addr/dma_addr | 475 | * References to the memory and mappings associated with cpu_addr/dma_addr |
450 | * past this call are illegal. | 476 | * past this call are illegal. |
451 | */ | 477 | */ |
452 | void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) | 478 | static void pci32_free_coherent(struct device *dev, size_t n, void *p, |
479 | dma_addr_t ba) | ||
453 | { | 480 | { |
454 | struct resource *res; | 481 | struct resource *res; |
455 | unsigned long pgp; | 482 | unsigned long pgp; |
@@ -481,60 +508,18 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) | |||
481 | 508 | ||
482 | free_pages(pgp, get_order(n)); | 509 | free_pages(pgp, get_order(n)); |
483 | } | 510 | } |
484 | EXPORT_SYMBOL(pci_free_consistent); | ||
485 | |||
486 | /* Map a single buffer of the indicated size for DMA in streaming mode. | ||
487 | * The 32-bit bus address to use is returned. | ||
488 | * | ||
489 | * Once the device is given the dma address, the device owns this memory | ||
490 | * until either pci_unmap_single or pci_dma_sync_single_* is performed. | ||
491 | */ | ||
492 | dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, | ||
493 | int direction) | ||
494 | { | ||
495 | BUG_ON(direction == PCI_DMA_NONE); | ||
496 | /* IIep is write-through, not flushing. */ | ||
497 | return virt_to_phys(ptr); | ||
498 | } | ||
499 | EXPORT_SYMBOL(pci_map_single); | ||
500 | |||
501 | /* Unmap a single streaming mode DMA translation. The dma_addr and size | ||
502 | * must match what was provided for in a previous pci_map_single call. All | ||
503 | * other usages are undefined. | ||
504 | * | ||
505 | * After this call, reads by the cpu to the buffer are guaranteed to see | ||
506 | * whatever the device wrote there. | ||
507 | */ | ||
508 | void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, | ||
509 | int direction) | ||
510 | { | ||
511 | BUG_ON(direction == PCI_DMA_NONE); | ||
512 | if (direction != PCI_DMA_TODEVICE) { | ||
513 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | ||
514 | (size + PAGE_SIZE-1) & PAGE_MASK); | ||
515 | } | ||
516 | } | ||
517 | EXPORT_SYMBOL(pci_unmap_single); | ||
518 | 511 | ||
519 | /* | 512 | /* |
520 | * Same as pci_map_single, but with pages. | 513 | * Same as pci_map_single, but with pages. |
521 | */ | 514 | */ |
522 | dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, | 515 | static dma_addr_t pci32_map_page(struct device *dev, struct page *page, |
523 | unsigned long offset, size_t size, int direction) | 516 | unsigned long offset, size_t size, |
517 | enum dma_data_direction dir, | ||
518 | struct dma_attrs *attrs) | ||
524 | { | 519 | { |
525 | BUG_ON(direction == PCI_DMA_NONE); | ||
526 | /* IIep is write-through, not flushing. */ | 520 | /* IIep is write-through, not flushing. */ |
527 | return page_to_phys(page) + offset; | 521 | return page_to_phys(page) + offset; |
528 | } | 522 | } |
529 | EXPORT_SYMBOL(pci_map_page); | ||
530 | |||
531 | void pci_unmap_page(struct pci_dev *hwdev, | ||
532 | dma_addr_t dma_address, size_t size, int direction) | ||
533 | { | ||
534 | BUG_ON(direction == PCI_DMA_NONE); | ||
535 | /* mmu_inval_dma_area XXX */ | ||
536 | } | ||
537 | EXPORT_SYMBOL(pci_unmap_page); | ||
538 | 523 | ||
539 | /* Map a set of buffers described by scatterlist in streaming | 524 | /* Map a set of buffers described by scatterlist in streaming |
540 | * mode for DMA. This is the scather-gather version of the | 525 | * mode for DMA. This is the scather-gather version of the |
@@ -551,13 +536,13 @@ EXPORT_SYMBOL(pci_unmap_page); | |||
551 | * Device ownership issues as mentioned above for pci_map_single are | 536 | * Device ownership issues as mentioned above for pci_map_single are |
552 | * the same here. | 537 | * the same here. |
553 | */ | 538 | */ |
554 | int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, | 539 | static int pci32_map_sg(struct device *device, struct scatterlist *sgl, |
555 | int direction) | 540 | int nents, enum dma_data_direction dir, |
541 | struct dma_attrs *attrs) | ||
556 | { | 542 | { |
557 | struct scatterlist *sg; | 543 | struct scatterlist *sg; |
558 | int n; | 544 | int n; |
559 | 545 | ||
560 | BUG_ON(direction == PCI_DMA_NONE); | ||
561 | /* IIep is write-through, not flushing. */ | 546 | /* IIep is write-through, not flushing. */ |
562 | for_each_sg(sgl, sg, nents, n) { | 547 | for_each_sg(sgl, sg, nents, n) { |
563 | BUG_ON(page_address(sg_page(sg)) == NULL); | 548 | BUG_ON(page_address(sg_page(sg)) == NULL); |
@@ -566,20 +551,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, | |||
566 | } | 551 | } |
567 | return nents; | 552 | return nents; |
568 | } | 553 | } |
569 | EXPORT_SYMBOL(pci_map_sg); | ||
570 | 554 | ||
571 | /* Unmap a set of streaming mode DMA translations. | 555 | /* Unmap a set of streaming mode DMA translations. |
572 | * Again, cpu read rules concerning calls here are the same as for | 556 | * Again, cpu read rules concerning calls here are the same as for |
573 | * pci_unmap_single() above. | 557 | * pci_unmap_single() above. |
574 | */ | 558 | */ |
575 | void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, | 559 | static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, |
576 | int direction) | 560 | int nents, enum dma_data_direction dir, |
561 | struct dma_attrs *attrs) | ||
577 | { | 562 | { |
578 | struct scatterlist *sg; | 563 | struct scatterlist *sg; |
579 | int n; | 564 | int n; |
580 | 565 | ||
581 | BUG_ON(direction == PCI_DMA_NONE); | 566 | if (dir != PCI_DMA_TODEVICE) { |
582 | if (direction != PCI_DMA_TODEVICE) { | ||
583 | for_each_sg(sgl, sg, nents, n) { | 567 | for_each_sg(sgl, sg, nents, n) { |
584 | BUG_ON(page_address(sg_page(sg)) == NULL); | 568 | BUG_ON(page_address(sg_page(sg)) == NULL); |
585 | mmu_inval_dma_area( | 569 | mmu_inval_dma_area( |
@@ -588,7 +572,6 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, | |||
588 | } | 572 | } |
589 | } | 573 | } |
590 | } | 574 | } |
591 | EXPORT_SYMBOL(pci_unmap_sg); | ||
592 | 575 | ||
593 | /* Make physical memory consistent for a single | 576 | /* Make physical memory consistent for a single |
594 | * streaming mode DMA translation before or after a transfer. | 577 | * streaming mode DMA translation before or after a transfer. |
@@ -600,25 +583,23 @@ EXPORT_SYMBOL(pci_unmap_sg); | |||
600 | * must first perform a pci_dma_sync_for_device, and then the | 583 | * must first perform a pci_dma_sync_for_device, and then the |
601 | * device again owns the buffer. | 584 | * device again owns the buffer. |
602 | */ | 585 | */ |
603 | void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) | 586 | static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba, |
587 | size_t size, enum dma_data_direction dir) | ||
604 | { | 588 | { |
605 | BUG_ON(direction == PCI_DMA_NONE); | 589 | if (dir != PCI_DMA_TODEVICE) { |
606 | if (direction != PCI_DMA_TODEVICE) { | ||
607 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 590 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), |
608 | (size + PAGE_SIZE-1) & PAGE_MASK); | 591 | (size + PAGE_SIZE-1) & PAGE_MASK); |
609 | } | 592 | } |
610 | } | 593 | } |
611 | EXPORT_SYMBOL(pci_dma_sync_single_for_cpu); | ||
612 | 594 | ||
613 | void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) | 595 | static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba, |
596 | size_t size, enum dma_data_direction dir) | ||
614 | { | 597 | { |
615 | BUG_ON(direction == PCI_DMA_NONE); | 598 | if (dir != PCI_DMA_TODEVICE) { |
616 | if (direction != PCI_DMA_TODEVICE) { | ||
617 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 599 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), |
618 | (size + PAGE_SIZE-1) & PAGE_MASK); | 600 | (size + PAGE_SIZE-1) & PAGE_MASK); |
619 | } | 601 | } |
620 | } | 602 | } |
621 | EXPORT_SYMBOL(pci_dma_sync_single_for_device); | ||
622 | 603 | ||
623 | /* Make physical memory consistent for a set of streaming | 604 | /* Make physical memory consistent for a set of streaming |
624 | * mode DMA translations after a transfer. | 605 | * mode DMA translations after a transfer. |
@@ -626,13 +607,13 @@ EXPORT_SYMBOL(pci_dma_sync_single_for_device); | |||
626 | * The same as pci_dma_sync_single_* but for a scatter-gather list, | 607 | * The same as pci_dma_sync_single_* but for a scatter-gather list, |
627 | * same rules and usage. | 608 | * same rules and usage. |
628 | */ | 609 | */ |
629 | void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) | 610 | static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, |
611 | int nents, enum dma_data_direction dir) | ||
630 | { | 612 | { |
631 | struct scatterlist *sg; | 613 | struct scatterlist *sg; |
632 | int n; | 614 | int n; |
633 | 615 | ||
634 | BUG_ON(direction == PCI_DMA_NONE); | 616 | if (dir != PCI_DMA_TODEVICE) { |
635 | if (direction != PCI_DMA_TODEVICE) { | ||
636 | for_each_sg(sgl, sg, nents, n) { | 617 | for_each_sg(sgl, sg, nents, n) { |
637 | BUG_ON(page_address(sg_page(sg)) == NULL); | 618 | BUG_ON(page_address(sg_page(sg)) == NULL); |
638 | mmu_inval_dma_area( | 619 | mmu_inval_dma_area( |
@@ -641,15 +622,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int | |||
641 | } | 622 | } |
642 | } | 623 | } |
643 | } | 624 | } |
644 | EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu); | ||
645 | 625 | ||
646 | void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) | 626 | static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl, |
627 | int nents, enum dma_data_direction dir) | ||
647 | { | 628 | { |
648 | struct scatterlist *sg; | 629 | struct scatterlist *sg; |
649 | int n; | 630 | int n; |
650 | 631 | ||
651 | BUG_ON(direction == PCI_DMA_NONE); | 632 | if (dir != PCI_DMA_TODEVICE) { |
652 | if (direction != PCI_DMA_TODEVICE) { | ||
653 | for_each_sg(sgl, sg, nents, n) { | 633 | for_each_sg(sgl, sg, nents, n) { |
654 | BUG_ON(page_address(sg_page(sg)) == NULL); | 634 | BUG_ON(page_address(sg_page(sg)) == NULL); |
655 | mmu_inval_dma_area( | 635 | mmu_inval_dma_area( |
@@ -658,9 +638,49 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, | |||
658 | } | 638 | } |
659 | } | 639 | } |
660 | } | 640 | } |
661 | EXPORT_SYMBOL(pci_dma_sync_sg_for_device); | 641 | |
642 | struct dma_map_ops pci32_dma_ops = { | ||
643 | .alloc_coherent = pci32_alloc_coherent, | ||
644 | .free_coherent = pci32_free_coherent, | ||
645 | .map_page = pci32_map_page, | ||
646 | .map_sg = pci32_map_sg, | ||
647 | .unmap_sg = pci32_unmap_sg, | ||
648 | .sync_single_for_cpu = pci32_sync_single_for_cpu, | ||
649 | .sync_single_for_device = pci32_sync_single_for_device, | ||
650 | .sync_sg_for_cpu = pci32_sync_sg_for_cpu, | ||
651 | .sync_sg_for_device = pci32_sync_sg_for_device, | ||
652 | }; | ||
653 | EXPORT_SYMBOL(pci32_dma_ops); | ||
654 | |||
662 | #endif /* CONFIG_PCI */ | 655 | #endif /* CONFIG_PCI */ |
663 | 656 | ||
657 | /* | ||
658 | * Return whether the given PCI device DMA address mask can be | ||
659 | * supported properly. For example, if your device can only drive the | ||
660 | * low 24-bits during PCI bus mastering, then you would pass | ||
661 | * 0x00ffffff as the mask to this function. | ||
662 | */ | ||
663 | int dma_supported(struct device *dev, u64 mask) | ||
664 | { | ||
665 | #ifdef CONFIG_PCI | ||
666 | if (dev->bus == &pci_bus_type) | ||
667 | return 1; | ||
668 | #endif | ||
669 | return 0; | ||
670 | } | ||
671 | EXPORT_SYMBOL(dma_supported); | ||
672 | |||
673 | int dma_set_mask(struct device *dev, u64 dma_mask) | ||
674 | { | ||
675 | #ifdef CONFIG_PCI | ||
676 | if (dev->bus == &pci_bus_type) | ||
677 | return pci_set_dma_mask(to_pci_dev(dev), dma_mask); | ||
678 | #endif | ||
679 | return -EOPNOTSUPP; | ||
680 | } | ||
681 | EXPORT_SYMBOL(dma_set_mask); | ||
682 | |||
683 | |||
664 | #ifdef CONFIG_PROC_FS | 684 | #ifdef CONFIG_PROC_FS |
665 | 685 | ||
666 | static int | 686 | static int |
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index f0ee79055409..8daab33fc17d 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c | |||
@@ -886,7 +886,7 @@ void notrace init_irqwork_curcpu(void) | |||
886 | * Therefore you cannot make any OBP calls, not even prom_printf, | 886 | * Therefore you cannot make any OBP calls, not even prom_printf, |
887 | * from these two routines. | 887 | * from these two routines. |
888 | */ | 888 | */ |
889 | static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) | 889 | static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) |
890 | { | 890 | { |
891 | unsigned long num_entries = (qmask + 1) / 64; | 891 | unsigned long num_entries = (qmask + 1) / 64; |
892 | unsigned long status; | 892 | unsigned long status; |
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S index cef8defcd7a9..3ea6e8cde8c5 100644 --- a/arch/sparc/kernel/ktlb.S +++ b/arch/sparc/kernel/ktlb.S | |||
@@ -151,12 +151,46 @@ kvmap_dtlb_4v: | |||
151 | * Must preserve %g1 and %g6 (TAG). | 151 | * Must preserve %g1 and %g6 (TAG). |
152 | */ | 152 | */ |
153 | kvmap_dtlb_tsb4m_miss: | 153 | kvmap_dtlb_tsb4m_miss: |
154 | sethi %hi(kpte_linear_bitmap), %g2 | 154 | /* Clear the PAGE_OFFSET top virtual bits, shift |
155 | or %g2, %lo(kpte_linear_bitmap), %g2 | 155 | * down to get PFN, and make sure PFN is in range. |
156 | */ | ||
157 | sllx %g4, 21, %g5 | ||
156 | 158 | ||
157 | /* Clear the PAGE_OFFSET top virtual bits, then shift | 159 | /* Check to see if we know about valid memory at the 4MB |
158 | * down to get a 256MB physical address index. | 160 | * chunk this physical address will reside within. |
159 | */ | 161 | */ |
162 | srlx %g5, 21 + 41, %g2 | ||
163 | brnz,pn %g2, kvmap_dtlb_longpath | ||
164 | nop | ||
165 | |||
166 | /* This unconditional branch and delay-slot nop gets patched | ||
167 | * by the sethi sequence once the bitmap is properly setup. | ||
168 | */ | ||
169 | .globl valid_addr_bitmap_insn | ||
170 | valid_addr_bitmap_insn: | ||
171 | ba,pt %xcc, 2f | ||
172 | nop | ||
173 | .subsection 2 | ||
174 | .globl valid_addr_bitmap_patch | ||
175 | valid_addr_bitmap_patch: | ||
176 | sethi %hi(sparc64_valid_addr_bitmap), %g7 | ||
177 | or %g7, %lo(sparc64_valid_addr_bitmap), %g7 | ||
178 | .previous | ||
179 | |||
180 | srlx %g5, 21 + 22, %g2 | ||
181 | srlx %g2, 6, %g5 | ||
182 | and %g2, 63, %g2 | ||
183 | sllx %g5, 3, %g5 | ||
184 | ldx [%g7 + %g5], %g5 | ||
185 | mov 1, %g7 | ||
186 | sllx %g7, %g2, %g7 | ||
187 | andcc %g5, %g7, %g0 | ||
188 | be,pn %xcc, kvmap_dtlb_longpath | ||
189 | |||
190 | 2: sethi %hi(kpte_linear_bitmap), %g2 | ||
191 | or %g2, %lo(kpte_linear_bitmap), %g2 | ||
192 | |||
193 | /* Get the 256MB physical address index. */ | ||
160 | sllx %g4, 21, %g5 | 194 | sllx %g4, 21, %g5 |
161 | mov 1, %g7 | 195 | mov 1, %g7 |
162 | srlx %g5, 21 + 28, %g5 | 196 | srlx %g5, 21 + 28, %g5 |
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index 2c0cc72d295b..b75bf502cd42 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c | |||
@@ -103,7 +103,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) | |||
103 | } | 103 | } |
104 | if (!touched && __get_cpu_var(last_irq_sum) == sum) { | 104 | if (!touched && __get_cpu_var(last_irq_sum) == sum) { |
105 | local_inc(&__get_cpu_var(alert_counter)); | 105 | local_inc(&__get_cpu_var(alert_counter)); |
106 | if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz) | 106 | if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz) |
107 | die_nmi("BUG: NMI Watchdog detected LOCKUP", | 107 | die_nmi("BUG: NMI Watchdog detected LOCKUP", |
108 | regs, panic_on_timeout); | 108 | regs, panic_on_timeout); |
109 | } else { | 109 | } else { |
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 57859ad23547..c68648662802 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c | |||
@@ -1039,7 +1039,7 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) | |||
1039 | pci_dev_put(ali_isa_bridge); | 1039 | pci_dev_put(ali_isa_bridge); |
1040 | } | 1040 | } |
1041 | 1041 | ||
1042 | int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) | 1042 | int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask) |
1043 | { | 1043 | { |
1044 | u64 dma_addr_mask; | 1044 | u64 dma_addr_mask; |
1045 | 1045 | ||
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 2485eaa23101..23c33ff9c31e 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c | |||
@@ -232,7 +232,8 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, | |||
232 | 232 | ||
233 | static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, | 233 | static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, |
234 | unsigned long offset, size_t sz, | 234 | unsigned long offset, size_t sz, |
235 | enum dma_data_direction direction) | 235 | enum dma_data_direction direction, |
236 | struct dma_attrs *attrs) | ||
236 | { | 237 | { |
237 | struct iommu *iommu; | 238 | struct iommu *iommu; |
238 | unsigned long flags, npages, oaddr; | 239 | unsigned long flags, npages, oaddr; |
@@ -296,7 +297,8 @@ iommu_map_fail: | |||
296 | } | 297 | } |
297 | 298 | ||
298 | static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, | 299 | static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, |
299 | size_t sz, enum dma_data_direction direction) | 300 | size_t sz, enum dma_data_direction direction, |
301 | struct dma_attrs *attrs) | ||
300 | { | 302 | { |
301 | struct pci_pbm_info *pbm; | 303 | struct pci_pbm_info *pbm; |
302 | struct iommu *iommu; | 304 | struct iommu *iommu; |
@@ -336,7 +338,8 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, | |||
336 | } | 338 | } |
337 | 339 | ||
338 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | 340 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, |
339 | int nelems, enum dma_data_direction direction) | 341 | int nelems, enum dma_data_direction direction, |
342 | struct dma_attrs *attrs) | ||
340 | { | 343 | { |
341 | struct scatterlist *s, *outs, *segstart; | 344 | struct scatterlist *s, *outs, *segstart; |
342 | unsigned long flags, handle, prot; | 345 | unsigned long flags, handle, prot; |
@@ -478,7 +481,8 @@ iommu_map_failed: | |||
478 | } | 481 | } |
479 | 482 | ||
480 | static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, | 483 | static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, |
481 | int nelems, enum dma_data_direction direction) | 484 | int nelems, enum dma_data_direction direction, |
485 | struct dma_attrs *attrs) | ||
482 | { | 486 | { |
483 | struct pci_pbm_info *pbm; | 487 | struct pci_pbm_info *pbm; |
484 | struct scatterlist *sg; | 488 | struct scatterlist *sg; |
@@ -521,29 +525,13 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
521 | spin_unlock_irqrestore(&iommu->lock, flags); | 525 | spin_unlock_irqrestore(&iommu->lock, flags); |
522 | } | 526 | } |
523 | 527 | ||
524 | static void dma_4v_sync_single_for_cpu(struct device *dev, | 528 | static struct dma_map_ops sun4v_dma_ops = { |
525 | dma_addr_t bus_addr, size_t sz, | ||
526 | enum dma_data_direction direction) | ||
527 | { | ||
528 | /* Nothing to do... */ | ||
529 | } | ||
530 | |||
531 | static void dma_4v_sync_sg_for_cpu(struct device *dev, | ||
532 | struct scatterlist *sglist, int nelems, | ||
533 | enum dma_data_direction direction) | ||
534 | { | ||
535 | /* Nothing to do... */ | ||
536 | } | ||
537 | |||
538 | static const struct dma_ops sun4v_dma_ops = { | ||
539 | .alloc_coherent = dma_4v_alloc_coherent, | 529 | .alloc_coherent = dma_4v_alloc_coherent, |
540 | .free_coherent = dma_4v_free_coherent, | 530 | .free_coherent = dma_4v_free_coherent, |
541 | .map_page = dma_4v_map_page, | 531 | .map_page = dma_4v_map_page, |
542 | .unmap_page = dma_4v_unmap_page, | 532 | .unmap_page = dma_4v_unmap_page, |
543 | .map_sg = dma_4v_map_sg, | 533 | .map_sg = dma_4v_map_sg, |
544 | .unmap_sg = dma_4v_unmap_sg, | 534 | .unmap_sg = dma_4v_unmap_sg, |
545 | .sync_single_for_cpu = dma_4v_sync_single_for_cpu, | ||
546 | .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu, | ||
547 | }; | 535 | }; |
548 | 536 | ||
549 | static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm, | 537 | static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm, |
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 4041f94e7724..18d67854a1b8 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c | |||
@@ -251,7 +251,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp) | |||
251 | } | 251 | } |
252 | } | 252 | } |
253 | 253 | ||
254 | void __trigger_all_cpu_backtrace(void) | 254 | void arch_trigger_all_cpu_backtrace(void) |
255 | { | 255 | { |
256 | struct thread_info *tp = current_thread_info(); | 256 | struct thread_info *tp = current_thread_info(); |
257 | struct pt_regs *regs = get_irq_regs(); | 257 | struct pt_regs *regs = get_irq_regs(); |
@@ -304,7 +304,7 @@ void __trigger_all_cpu_backtrace(void) | |||
304 | 304 | ||
305 | static void sysrq_handle_globreg(int key, struct tty_struct *tty) | 305 | static void sysrq_handle_globreg(int key, struct tty_struct *tty) |
306 | { | 306 | { |
307 | __trigger_all_cpu_backtrace(); | 307 | arch_trigger_all_cpu_backtrace(); |
308 | } | 308 | } |
309 | 309 | ||
310 | static struct sysrq_key_op sparc_globalreg_op = { | 310 | static struct sysrq_key_op sparc_globalreg_op = { |
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 181d069a2d44..7ce1a1005b1d 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c | |||
@@ -590,6 +590,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, | |||
590 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 590 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
591 | clear_thread_flag(TIF_NOTIFY_RESUME); | 591 | clear_thread_flag(TIF_NOTIFY_RESUME); |
592 | tracehook_notify_resume(regs); | 592 | tracehook_notify_resume(regs); |
593 | if (current->replacement_session_keyring) | ||
594 | key_replace_session_keyring(); | ||
593 | } | 595 | } |
594 | } | 596 | } |
595 | 597 | ||
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index ec82d76dc6f2..647afbda7ae1 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c | |||
@@ -613,5 +613,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long | |||
613 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 613 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
614 | clear_thread_flag(TIF_NOTIFY_RESUME); | 614 | clear_thread_flag(TIF_NOTIFY_RESUME); |
615 | tracehook_notify_resume(regs); | 615 | tracehook_notify_resume(regs); |
616 | if (current->replacement_session_keyring) | ||
617 | key_replace_session_keyring(); | ||
616 | } | 618 | } |
617 | } | 619 | } |
620 | |||
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index 54fb02468f0d..68791cad7b74 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c | |||
@@ -162,9 +162,6 @@ extern void cpu_panic(void); | |||
162 | */ | 162 | */ |
163 | 163 | ||
164 | extern struct linux_prom_registers smp_penguin_ctable; | 164 | extern struct linux_prom_registers smp_penguin_ctable; |
165 | extern unsigned long trapbase_cpu1[]; | ||
166 | extern unsigned long trapbase_cpu2[]; | ||
167 | extern unsigned long trapbase_cpu3[]; | ||
168 | 165 | ||
169 | void __init smp4d_boot_cpus(void) | 166 | void __init smp4d_boot_cpus(void) |
170 | { | 167 | { |
@@ -235,25 +232,6 @@ void __init smp4d_smp_done(void) | |||
235 | *prev = first; | 232 | *prev = first; |
236 | local_flush_cache_all(); | 233 | local_flush_cache_all(); |
237 | 234 | ||
238 | /* Free unneeded trap tables */ | ||
239 | ClearPageReserved(virt_to_page(trapbase_cpu1)); | ||
240 | init_page_count(virt_to_page(trapbase_cpu1)); | ||
241 | free_page((unsigned long)trapbase_cpu1); | ||
242 | totalram_pages++; | ||
243 | num_physpages++; | ||
244 | |||
245 | ClearPageReserved(virt_to_page(trapbase_cpu2)); | ||
246 | init_page_count(virt_to_page(trapbase_cpu2)); | ||
247 | free_page((unsigned long)trapbase_cpu2); | ||
248 | totalram_pages++; | ||
249 | num_physpages++; | ||
250 | |||
251 | ClearPageReserved(virt_to_page(trapbase_cpu3)); | ||
252 | init_page_count(virt_to_page(trapbase_cpu3)); | ||
253 | free_page((unsigned long)trapbase_cpu3); | ||
254 | totalram_pages++; | ||
255 | num_physpages++; | ||
256 | |||
257 | /* Ok, they are spinning and ready to go. */ | 235 | /* Ok, they are spinning and ready to go. */ |
258 | smp_processors_ready = 1; | 236 | smp_processors_ready = 1; |
259 | sun4d_distribute_irqs(); | 237 | sun4d_distribute_irqs(); |
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index 960b113d0006..762d6eedd944 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c | |||
@@ -121,9 +121,6 @@ void __cpuinit smp4m_callin(void) | |||
121 | */ | 121 | */ |
122 | 122 | ||
123 | extern struct linux_prom_registers smp_penguin_ctable; | 123 | extern struct linux_prom_registers smp_penguin_ctable; |
124 | extern unsigned long trapbase_cpu1[]; | ||
125 | extern unsigned long trapbase_cpu2[]; | ||
126 | extern unsigned long trapbase_cpu3[]; | ||
127 | 124 | ||
128 | void __init smp4m_boot_cpus(void) | 125 | void __init smp4m_boot_cpus(void) |
129 | { | 126 | { |
@@ -193,29 +190,6 @@ void __init smp4m_smp_done(void) | |||
193 | *prev = first; | 190 | *prev = first; |
194 | local_flush_cache_all(); | 191 | local_flush_cache_all(); |
195 | 192 | ||
196 | /* Free unneeded trap tables */ | ||
197 | if (!cpu_isset(1, cpu_present_map)) { | ||
198 | ClearPageReserved(virt_to_page(trapbase_cpu1)); | ||
199 | init_page_count(virt_to_page(trapbase_cpu1)); | ||
200 | free_page((unsigned long)trapbase_cpu1); | ||
201 | totalram_pages++; | ||
202 | num_physpages++; | ||
203 | } | ||
204 | if (!cpu_isset(2, cpu_present_map)) { | ||
205 | ClearPageReserved(virt_to_page(trapbase_cpu2)); | ||
206 | init_page_count(virt_to_page(trapbase_cpu2)); | ||
207 | free_page((unsigned long)trapbase_cpu2); | ||
208 | totalram_pages++; | ||
209 | num_physpages++; | ||
210 | } | ||
211 | if (!cpu_isset(3, cpu_present_map)) { | ||
212 | ClearPageReserved(virt_to_page(trapbase_cpu3)); | ||
213 | init_page_count(virt_to_page(trapbase_cpu3)); | ||
214 | free_page((unsigned long)trapbase_cpu3); | ||
215 | totalram_pages++; | ||
216 | num_physpages++; | ||
217 | } | ||
218 | |||
219 | /* Ok, they are spinning and ready to go. */ | 193 | /* Ok, they are spinning and ready to go. */ |
220 | } | 194 | } |
221 | 195 | ||
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S index f061c4dda9ef..aed94869ad6a 100644 --- a/arch/sparc/kernel/sys32.S +++ b/arch/sparc/kernel/sys32.S | |||
@@ -134,10 +134,12 @@ SIGN1(sys32_getpeername, sys_getpeername, %o0) | |||
134 | SIGN1(sys32_getsockname, sys_getsockname, %o0) | 134 | SIGN1(sys32_getsockname, sys_getsockname, %o0) |
135 | SIGN2(sys32_ioprio_get, sys_ioprio_get, %o0, %o1) | 135 | SIGN2(sys32_ioprio_get, sys_ioprio_get, %o0, %o1) |
136 | SIGN3(sys32_ioprio_set, sys_ioprio_set, %o0, %o1, %o2) | 136 | SIGN3(sys32_ioprio_set, sys_ioprio_set, %o0, %o1, %o2) |
137 | SIGN2(sys32_splice, sys_splice, %o0, %o1) | 137 | SIGN2(sys32_splice, sys_splice, %o0, %o2) |
138 | SIGN2(sys32_sync_file_range, compat_sync_file_range, %o0, %o5) | 138 | SIGN2(sys32_sync_file_range, compat_sync_file_range, %o0, %o5) |
139 | SIGN2(sys32_tee, sys_tee, %o0, %o1) | 139 | SIGN2(sys32_tee, sys_tee, %o0, %o1) |
140 | SIGN1(sys32_vmsplice, compat_sys_vmsplice, %o0) | 140 | SIGN1(sys32_vmsplice, compat_sys_vmsplice, %o0) |
141 | SIGN1(sys32_truncate, sys_truncate, %o1) | ||
142 | SIGN1(sys32_ftruncate, sys_ftruncate, %o1) | ||
141 | 143 | ||
142 | .globl sys32_mmap2 | 144 | .globl sys32_mmap2 |
143 | sys32_mmap2: | 145 | sys32_mmap2: |
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 6b3ee88e253c..2ee7250ba7ae 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S | |||
@@ -43,8 +43,8 @@ sys_call_table32: | |||
43 | /*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall | 43 | /*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall |
44 | .word sys32_getgroups, compat_sys_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd | 44 | .word sys32_getgroups, compat_sys_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd |
45 | /*120*/ .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod | 45 | /*120*/ .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod |
46 | .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate | 46 | .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys32_truncate |
47 | /*130*/ .word sys_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall | 47 | /*130*/ .word sys32_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall |
48 | .word sys_nis_syscall, sys32_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64 | 48 | .word sys_nis_syscall, sys32_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64 |
49 | /*140*/ .word sys32_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit | 49 | /*140*/ .word sys32_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit |
50 | .word compat_sys_setrlimit, sys_pivot_root, sys32_prctl, sys_pciconfig_read, sys_pciconfig_write | 50 | .word compat_sys_setrlimit, sys_pivot_root, sys32_prctl, sys_pciconfig_read, sys_pciconfig_write |
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index a5e30c642ee3..b99f81c4906f 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c | |||
@@ -319,9 +319,10 @@ no_context: | |||
319 | */ | 319 | */ |
320 | out_of_memory: | 320 | out_of_memory: |
321 | up_read(&mm->mmap_sem); | 321 | up_read(&mm->mmap_sem); |
322 | printk("VM: killing process %s\n", tsk->comm); | 322 | if (from_user) { |
323 | if (from_user) | 323 | pagefault_out_of_memory(); |
324 | do_group_exit(SIGKILL); | 324 | return; |
325 | } | ||
325 | goto no_context; | 326 | goto no_context; |
326 | 327 | ||
327 | do_sigbus: | 328 | do_sigbus: |
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index e5620b27c8bf..43b0da96a4fb 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c | |||
@@ -447,9 +447,10 @@ handle_kernel_fault: | |||
447 | out_of_memory: | 447 | out_of_memory: |
448 | insn = get_fault_insn(regs, insn); | 448 | insn = get_fault_insn(regs, insn); |
449 | up_read(&mm->mmap_sem); | 449 | up_read(&mm->mmap_sem); |
450 | printk("VM: killing process %s\n", current->comm); | 450 | if (!(regs->tstate & TSTATE_PRIV)) { |
451 | if (!(regs->tstate & TSTATE_PRIV)) | 451 | pagefault_out_of_memory(); |
452 | do_group_exit(SIGKILL); | 452 | return; |
453 | } | ||
453 | goto handle_kernel_fault; | 454 | goto handle_kernel_fault; |
454 | 455 | ||
455 | intr_or_no_mm: | 456 | intr_or_no_mm: |
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index ed6be6ba2f4e..a70a5e1904d9 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -145,7 +145,8 @@ static void __init read_obp_memory(const char *property, | |||
145 | cmp_p64, NULL); | 145 | cmp_p64, NULL); |
146 | } | 146 | } |
147 | 147 | ||
148 | unsigned long *sparc64_valid_addr_bitmap __read_mostly; | 148 | unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES / |
149 | sizeof(unsigned long)]; | ||
149 | EXPORT_SYMBOL(sparc64_valid_addr_bitmap); | 150 | EXPORT_SYMBOL(sparc64_valid_addr_bitmap); |
150 | 151 | ||
151 | /* Kernel physical address base and size in bytes. */ | 152 | /* Kernel physical address base and size in bytes. */ |
@@ -1874,7 +1875,7 @@ static int pavail_rescan_ents __initdata; | |||
1874 | * memory list again, and make sure it provides at least as much | 1875 | * memory list again, and make sure it provides at least as much |
1875 | * memory as 'pavail' does. | 1876 | * memory as 'pavail' does. |
1876 | */ | 1877 | */ |
1877 | static void __init setup_valid_addr_bitmap_from_pavail(void) | 1878 | static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap) |
1878 | { | 1879 | { |
1879 | int i; | 1880 | int i; |
1880 | 1881 | ||
@@ -1897,8 +1898,7 @@ static void __init setup_valid_addr_bitmap_from_pavail(void) | |||
1897 | 1898 | ||
1898 | if (new_start <= old_start && | 1899 | if (new_start <= old_start && |
1899 | new_end >= (old_start + PAGE_SIZE)) { | 1900 | new_end >= (old_start + PAGE_SIZE)) { |
1900 | set_bit(old_start >> 22, | 1901 | set_bit(old_start >> 22, bitmap); |
1901 | sparc64_valid_addr_bitmap); | ||
1902 | goto do_next_page; | 1902 | goto do_next_page; |
1903 | } | 1903 | } |
1904 | } | 1904 | } |
@@ -1919,20 +1919,21 @@ static void __init setup_valid_addr_bitmap_from_pavail(void) | |||
1919 | } | 1919 | } |
1920 | } | 1920 | } |
1921 | 1921 | ||
1922 | static void __init patch_tlb_miss_handler_bitmap(void) | ||
1923 | { | ||
1924 | extern unsigned int valid_addr_bitmap_insn[]; | ||
1925 | extern unsigned int valid_addr_bitmap_patch[]; | ||
1926 | |||
1927 | valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1]; | ||
1928 | mb(); | ||
1929 | valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0]; | ||
1930 | flushi(&valid_addr_bitmap_insn[0]); | ||
1931 | } | ||
1932 | |||
1922 | void __init mem_init(void) | 1933 | void __init mem_init(void) |
1923 | { | 1934 | { |
1924 | unsigned long codepages, datapages, initpages; | 1935 | unsigned long codepages, datapages, initpages; |
1925 | unsigned long addr, last; | 1936 | unsigned long addr, last; |
1926 | int i; | ||
1927 | |||
1928 | i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6); | ||
1929 | i += 1; | ||
1930 | sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3); | ||
1931 | if (sparc64_valid_addr_bitmap == NULL) { | ||
1932 | prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); | ||
1933 | prom_halt(); | ||
1934 | } | ||
1935 | memset(sparc64_valid_addr_bitmap, 0, i << 3); | ||
1936 | 1937 | ||
1937 | addr = PAGE_OFFSET + kern_base; | 1938 | addr = PAGE_OFFSET + kern_base; |
1938 | last = PAGE_ALIGN(kern_size) + addr; | 1939 | last = PAGE_ALIGN(kern_size) + addr; |
@@ -1941,15 +1942,19 @@ void __init mem_init(void) | |||
1941 | addr += PAGE_SIZE; | 1942 | addr += PAGE_SIZE; |
1942 | } | 1943 | } |
1943 | 1944 | ||
1944 | setup_valid_addr_bitmap_from_pavail(); | 1945 | setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap); |
1946 | patch_tlb_miss_handler_bitmap(); | ||
1945 | 1947 | ||
1946 | high_memory = __va(last_valid_pfn << PAGE_SHIFT); | 1948 | high_memory = __va(last_valid_pfn << PAGE_SHIFT); |
1947 | 1949 | ||
1948 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 1950 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
1949 | for_each_online_node(i) { | 1951 | { |
1950 | if (NODE_DATA(i)->node_spanned_pages != 0) { | 1952 | int i; |
1951 | totalram_pages += | 1953 | for_each_online_node(i) { |
1952 | free_all_bootmem_node(NODE_DATA(i)); | 1954 | if (NODE_DATA(i)->node_spanned_pages != 0) { |
1955 | totalram_pages += | ||
1956 | free_all_bootmem_node(NODE_DATA(i)); | ||
1957 | } | ||
1953 | } | 1958 | } |
1954 | } | 1959 | } |
1955 | #else | 1960 | #else |
diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h index 16063870a489..c2f772dbd556 100644 --- a/arch/sparc/mm/init_64.h +++ b/arch/sparc/mm/init_64.h | |||
@@ -5,10 +5,13 @@ | |||
5 | * marked non-static so that assembler code can get at them. | 5 | * marked non-static so that assembler code can get at them. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define MAX_PHYS_ADDRESS (1UL << 42UL) | 8 | #define MAX_PHYS_ADDRESS (1UL << 41UL) |
9 | #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) | 9 | #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) |
10 | #define KPTE_BITMAP_BYTES \ | 10 | #define KPTE_BITMAP_BYTES \ |
11 | ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8) | 11 | ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8) |
12 | #define VALID_ADDR_BITMAP_CHUNK_SZ (4UL * 1024UL * 1024UL) | ||
13 | #define VALID_ADDR_BITMAP_BYTES \ | ||
14 | ((MAX_PHYS_ADDRESS / VALID_ADDR_BITMAP_CHUNK_SZ) / 8) | ||
12 | 15 | ||
13 | extern unsigned long kern_linear_pte_xor[2]; | 16 | extern unsigned long kern_linear_pte_xor[2]; |
14 | extern unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; | 17 | extern unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; |
diff --git a/arch/sparc/prom/misc_64.c b/arch/sparc/prom/misc_64.c index eedffb4fec2d..39fc6af21b7c 100644 --- a/arch/sparc/prom/misc_64.c +++ b/arch/sparc/prom/misc_64.c | |||
@@ -88,7 +88,7 @@ void prom_cmdline(void) | |||
88 | /* Drop into the prom, but completely terminate the program. | 88 | /* Drop into the prom, but completely terminate the program. |
89 | * No chance of continuing. | 89 | * No chance of continuing. |
90 | */ | 90 | */ |
91 | void prom_halt(void) | 91 | void notrace prom_halt(void) |
92 | { | 92 | { |
93 | #ifdef CONFIG_SUN_LDOMS | 93 | #ifdef CONFIG_SUN_LDOMS |
94 | if (ldom_domaining_enabled) | 94 | if (ldom_domaining_enabled) |
diff --git a/arch/sparc/prom/printf.c b/arch/sparc/prom/printf.c index 660943ee4c2a..ca869266b9f3 100644 --- a/arch/sparc/prom/printf.c +++ b/arch/sparc/prom/printf.c | |||
@@ -14,14 +14,14 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/compiler.h> | ||
17 | 18 | ||
18 | #include <asm/openprom.h> | 19 | #include <asm/openprom.h> |
19 | #include <asm/oplib.h> | 20 | #include <asm/oplib.h> |
20 | 21 | ||
21 | static char ppbuf[1024]; | 22 | static char ppbuf[1024]; |
22 | 23 | ||
23 | void | 24 | void notrace prom_write(const char *buf, unsigned int n) |
24 | prom_write(const char *buf, unsigned int n) | ||
25 | { | 25 | { |
26 | char ch; | 26 | char ch; |
27 | 27 | ||
@@ -33,8 +33,7 @@ prom_write(const char *buf, unsigned int n) | |||
33 | } | 33 | } |
34 | } | 34 | } |
35 | 35 | ||
36 | void | 36 | void notrace prom_printf(const char *fmt, ...) |
37 | prom_printf(const char *fmt, ...) | ||
38 | { | 37 | { |
39 | va_list args; | 38 | va_list args; |
40 | int i; | 39 | int i; |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 13ffa5df37d7..fc20fdc0f7f2 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -38,7 +38,7 @@ config X86 | |||
38 | select HAVE_FUNCTION_GRAPH_FP_TEST | 38 | select HAVE_FUNCTION_GRAPH_FP_TEST |
39 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 39 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
40 | select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE | 40 | select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE |
41 | select HAVE_FTRACE_SYSCALLS | 41 | select HAVE_SYSCALL_TRACEPOINTS |
42 | select HAVE_KVM | 42 | select HAVE_KVM |
43 | select HAVE_ARCH_KGDB | 43 | select HAVE_ARCH_KGDB |
44 | select HAVE_ARCH_TRACEHOOK | 44 | select HAVE_ARCH_TRACEHOOK |
@@ -586,7 +586,6 @@ config GART_IOMMU | |||
586 | bool "GART IOMMU support" if EMBEDDED | 586 | bool "GART IOMMU support" if EMBEDDED |
587 | default y | 587 | default y |
588 | select SWIOTLB | 588 | select SWIOTLB |
589 | select AGP | ||
590 | depends on X86_64 && PCI | 589 | depends on X86_64 && PCI |
591 | ---help--- | 590 | ---help--- |
592 | Support for full DMA access of devices with 32bit memory access only | 591 | Support for full DMA access of devices with 32bit memory access only |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 8a4c24c96d08..5128b178529f 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -74,7 +74,7 @@ endif | |||
74 | 74 | ||
75 | ifdef CONFIG_CC_STACKPROTECTOR | 75 | ifdef CONFIG_CC_STACKPROTECTOR |
76 | cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh | 76 | cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh |
77 | ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC)),y) | 77 | ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(biarch)),y) |
78 | stackp-y := -fstack-protector | 78 | stackp-y := -fstack-protector |
79 | stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += -fstack-protector-all | 79 | stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += -fstack-protector-all |
80 | KBUILD_CFLAGS += $(stackp-y) | 80 | KBUILD_CFLAGS += $(stackp-y) |
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index e2ff504b4ddc..f8ed0658404c 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | # create a compressed vmlinux image from the original vmlinux | 4 | # create a compressed vmlinux image from the original vmlinux |
5 | # | 5 | # |
6 | 6 | ||
7 | targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma head_$(BITS).o misc.o piggy.o | 7 | targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma head_$(BITS).o misc.o piggy.o |
8 | 8 | ||
9 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 | 9 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 |
10 | KBUILD_CFLAGS += -fno-strict-aliasing -fPIC | 10 | KBUILD_CFLAGS += -fno-strict-aliasing -fPIC |
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index edb992ebef92..d28fad19654a 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig | |||
@@ -2355,7 +2355,7 @@ CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y | |||
2355 | CONFIG_HAVE_DYNAMIC_FTRACE=y | 2355 | CONFIG_HAVE_DYNAMIC_FTRACE=y |
2356 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y | 2356 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y |
2357 | CONFIG_HAVE_HW_BRANCH_TRACER=y | 2357 | CONFIG_HAVE_HW_BRANCH_TRACER=y |
2358 | CONFIG_HAVE_FTRACE_SYSCALLS=y | 2358 | CONFIG_HAVE_SYSCALL_TRACEPOINTS=y |
2359 | CONFIG_RING_BUFFER=y | 2359 | CONFIG_RING_BUFFER=y |
2360 | CONFIG_TRACING=y | 2360 | CONFIG_TRACING=y |
2361 | CONFIG_TRACING_SUPPORT=y | 2361 | CONFIG_TRACING_SUPPORT=y |
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index cee1dd2e69b2..6c86acd847a4 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig | |||
@@ -2329,7 +2329,7 @@ CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y | |||
2329 | CONFIG_HAVE_DYNAMIC_FTRACE=y | 2329 | CONFIG_HAVE_DYNAMIC_FTRACE=y |
2330 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y | 2330 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y |
2331 | CONFIG_HAVE_HW_BRANCH_TRACER=y | 2331 | CONFIG_HAVE_HW_BRANCH_TRACER=y |
2332 | CONFIG_HAVE_FTRACE_SYSCALLS=y | 2332 | CONFIG_HAVE_SYSCALL_TRACEPOINTS=y |
2333 | CONFIG_RING_BUFFER=y | 2333 | CONFIG_RING_BUFFER=y |
2334 | CONFIG_TRACING=y | 2334 | CONFIG_TRACING=y |
2335 | CONFIG_TRACING_SUPPORT=y | 2335 | CONFIG_TRACING_SUPPORT=y |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 7667235ade19..585edebe12cf 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -629,7 +629,7 @@ static int __init aesni_init(void) | |||
629 | int err; | 629 | int err; |
630 | 630 | ||
631 | if (!cpu_has_aes) { | 631 | if (!cpu_has_aes) { |
632 | printk(KERN_ERR "Intel AES-NI instructions are not detected.\n"); | 632 | printk(KERN_INFO "Intel AES-NI instructions are not detected.\n"); |
633 | return -ENODEV; | 633 | return -ENODEV; |
634 | } | 634 | } |
635 | if ((err = crypto_register_alg(&aesni_alg))) | 635 | if ((err = crypto_register_alg(&aesni_alg))) |
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index e590261ba059..ba331bfd1112 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
@@ -537,7 +537,7 @@ ia32_sys_call_table: | |||
537 | .quad sys_mkdir | 537 | .quad sys_mkdir |
538 | .quad sys_rmdir /* 40 */ | 538 | .quad sys_rmdir /* 40 */ |
539 | .quad sys_dup | 539 | .quad sys_dup |
540 | .quad sys32_pipe | 540 | .quad sys_pipe |
541 | .quad compat_sys_times | 541 | .quad compat_sys_times |
542 | .quad quiet_ni_syscall /* old prof syscall holder */ | 542 | .quad quiet_ni_syscall /* old prof syscall holder */ |
543 | .quad sys_brk /* 45 */ | 543 | .quad sys_brk /* 45 */ |
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index 085a8c35f149..9f5527198825 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c | |||
@@ -189,20 +189,6 @@ asmlinkage long sys32_mprotect(unsigned long start, size_t len, | |||
189 | return sys_mprotect(start, len, prot); | 189 | return sys_mprotect(start, len, prot); |
190 | } | 190 | } |
191 | 191 | ||
192 | asmlinkage long sys32_pipe(int __user *fd) | ||
193 | { | ||
194 | int retval; | ||
195 | int fds[2]; | ||
196 | |||
197 | retval = do_pipe_flags(fds, 0); | ||
198 | if (retval) | ||
199 | goto out; | ||
200 | if (copy_to_user(fd, fds, sizeof(fds))) | ||
201 | retval = -EFAULT; | ||
202 | out: | ||
203 | return retval; | ||
204 | } | ||
205 | |||
206 | asmlinkage long sys32_rt_sigaction(int sig, struct sigaction32 __user *act, | 192 | asmlinkage long sys32_rt_sigaction(int sig, struct sigaction32 __user *act, |
207 | struct sigaction32 __user *oact, | 193 | struct sigaction32 __user *oact, |
208 | unsigned int sigsetsize) | 194 | unsigned int sigsetsize) |
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 1a37bcdc8606..c240efc74e00 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -73,8 +73,6 @@ static inline void alternatives_smp_module_del(struct module *mod) {} | |||
73 | static inline void alternatives_smp_switch(int smp) {} | 73 | static inline void alternatives_smp_switch(int smp) {} |
74 | #endif /* CONFIG_SMP */ | 74 | #endif /* CONFIG_SMP */ |
75 | 75 | ||
76 | const unsigned char *const *find_nop_table(void); | ||
77 | |||
78 | /* alternative assembly primitive: */ | 76 | /* alternative assembly primitive: */ |
79 | #define ALTERNATIVE(oldinstr, newinstr, feature) \ | 77 | #define ALTERNATIVE(oldinstr, newinstr, feature) \ |
80 | \ | 78 | \ |
@@ -144,8 +142,6 @@ static inline void apply_paravirt(struct paravirt_patch_site *start, | |||
144 | #define __parainstructions_end NULL | 142 | #define __parainstructions_end NULL |
145 | #endif | 143 | #endif |
146 | 144 | ||
147 | extern void add_nops(void *insns, unsigned int len); | ||
148 | |||
149 | /* | 145 | /* |
150 | * Clear and restore the kernel write-protection flag on the local CPU. | 146 | * Clear and restore the kernel write-protection flag on the local CPU. |
151 | * Allows the kernel to edit read-only pages. | 147 | * Allows the kernel to edit read-only pages. |
@@ -161,10 +157,7 @@ extern void add_nops(void *insns, unsigned int len); | |||
161 | * Intel's errata. | 157 | * Intel's errata. |
162 | * On the local CPU you need to be protected again NMI or MCE handlers seeing an | 158 | * On the local CPU you need to be protected again NMI or MCE handlers seeing an |
163 | * inconsistent instruction while you patch. | 159 | * inconsistent instruction while you patch. |
164 | * The _early version expects the memory to already be RW. | ||
165 | */ | 160 | */ |
166 | |||
167 | extern void *text_poke(void *addr, const void *opcode, size_t len); | 161 | extern void *text_poke(void *addr, const void *opcode, size_t len); |
168 | extern void *text_poke_early(void *addr, const void *opcode, size_t len); | ||
169 | 162 | ||
170 | #endif /* _ASM_X86_ALTERNATIVE_H */ | 163 | #endif /* _ASM_X86_ALTERNATIVE_H */ |
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h index bdf96f119f06..ac95995b7bad 100644 --- a/arch/x86/include/asm/amd_iommu.h +++ b/arch/x86/include/asm/amd_iommu.h | |||
@@ -25,6 +25,7 @@ | |||
25 | #ifdef CONFIG_AMD_IOMMU | 25 | #ifdef CONFIG_AMD_IOMMU |
26 | extern int amd_iommu_init(void); | 26 | extern int amd_iommu_init(void); |
27 | extern int amd_iommu_init_dma_ops(void); | 27 | extern int amd_iommu_init_dma_ops(void); |
28 | extern int amd_iommu_init_passthrough(void); | ||
28 | extern void amd_iommu_detect(void); | 29 | extern void amd_iommu_detect(void); |
29 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); | 30 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); |
30 | extern void amd_iommu_flush_all_domains(void); | 31 | extern void amd_iommu_flush_all_domains(void); |
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 0c878caaa0a2..2a2cc7a78a81 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -143,22 +143,29 @@ | |||
143 | #define EVT_BUFFER_SIZE 8192 /* 512 entries */ | 143 | #define EVT_BUFFER_SIZE 8192 /* 512 entries */ |
144 | #define EVT_LEN_MASK (0x9ULL << 56) | 144 | #define EVT_LEN_MASK (0x9ULL << 56) |
145 | 145 | ||
146 | #define PAGE_MODE_NONE 0x00 | ||
146 | #define PAGE_MODE_1_LEVEL 0x01 | 147 | #define PAGE_MODE_1_LEVEL 0x01 |
147 | #define PAGE_MODE_2_LEVEL 0x02 | 148 | #define PAGE_MODE_2_LEVEL 0x02 |
148 | #define PAGE_MODE_3_LEVEL 0x03 | 149 | #define PAGE_MODE_3_LEVEL 0x03 |
149 | 150 | #define PAGE_MODE_4_LEVEL 0x04 | |
150 | #define IOMMU_PDE_NL_0 0x000ULL | 151 | #define PAGE_MODE_5_LEVEL 0x05 |
151 | #define IOMMU_PDE_NL_1 0x200ULL | 152 | #define PAGE_MODE_6_LEVEL 0x06 |
152 | #define IOMMU_PDE_NL_2 0x400ULL | 153 | |
153 | #define IOMMU_PDE_NL_3 0x600ULL | 154 | #define PM_LEVEL_SHIFT(x) (12 + ((x) * 9)) |
154 | 155 | #define PM_LEVEL_SIZE(x) (((x) < 6) ? \ | |
155 | #define IOMMU_PTE_L2_INDEX(address) (((address) >> 30) & 0x1ffULL) | 156 | ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \ |
156 | #define IOMMU_PTE_L1_INDEX(address) (((address) >> 21) & 0x1ffULL) | 157 | (0xffffffffffffffffULL)) |
157 | #define IOMMU_PTE_L0_INDEX(address) (((address) >> 12) & 0x1ffULL) | 158 | #define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) |
158 | 159 | #define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) | |
159 | #define IOMMU_MAP_SIZE_L1 (1ULL << 21) | 160 | #define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ |
160 | #define IOMMU_MAP_SIZE_L2 (1ULL << 30) | 161 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW) |
161 | #define IOMMU_MAP_SIZE_L3 (1ULL << 39) | 162 | #define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL) |
163 | |||
164 | #define PM_MAP_4k 0 | ||
165 | #define PM_ADDR_MASK 0x000ffffffffff000ULL | ||
166 | #define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \ | ||
167 | (~((1ULL << (12 + ((lvl) * 9))) - 1))) | ||
168 | #define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) | ||
162 | 169 | ||
163 | #define IOMMU_PTE_P (1ULL << 0) | 170 | #define IOMMU_PTE_P (1ULL << 0) |
164 | #define IOMMU_PTE_TV (1ULL << 1) | 171 | #define IOMMU_PTE_TV (1ULL << 1) |
@@ -167,11 +174,6 @@ | |||
167 | #define IOMMU_PTE_IR (1ULL << 61) | 174 | #define IOMMU_PTE_IR (1ULL << 61) |
168 | #define IOMMU_PTE_IW (1ULL << 62) | 175 | #define IOMMU_PTE_IW (1ULL << 62) |
169 | 176 | ||
170 | #define IOMMU_L1_PDE(address) \ | ||
171 | ((address) | IOMMU_PDE_NL_1 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW) | ||
172 | #define IOMMU_L2_PDE(address) \ | ||
173 | ((address) | IOMMU_PDE_NL_2 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW) | ||
174 | |||
175 | #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) | 177 | #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) |
176 | #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) | 178 | #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) |
177 | #define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) | 179 | #define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) |
@@ -194,11 +196,14 @@ | |||
194 | #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ | 196 | #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ |
195 | #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops | 197 | #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops |
196 | domain for an IOMMU */ | 198 | domain for an IOMMU */ |
199 | #define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page | ||
200 | translation */ | ||
201 | |||
197 | extern bool amd_iommu_dump; | 202 | extern bool amd_iommu_dump; |
198 | #define DUMP_printk(format, arg...) \ | 203 | #define DUMP_printk(format, arg...) \ |
199 | do { \ | 204 | do { \ |
200 | if (amd_iommu_dump) \ | 205 | if (amd_iommu_dump) \ |
201 | printk(KERN_INFO "AMD IOMMU: " format, ## arg); \ | 206 | printk(KERN_INFO "AMD-Vi: " format, ## arg); \ |
202 | } while(0); | 207 | } while(0); |
203 | 208 | ||
204 | /* | 209 | /* |
@@ -226,6 +231,7 @@ struct protection_domain { | |||
226 | int mode; /* paging mode (0-6 levels) */ | 231 | int mode; /* paging mode (0-6 levels) */ |
227 | u64 *pt_root; /* page table root pointer */ | 232 | u64 *pt_root; /* page table root pointer */ |
228 | unsigned long flags; /* flags to find out type of domain */ | 233 | unsigned long flags; /* flags to find out type of domain */ |
234 | bool updated; /* complete domain flush required */ | ||
229 | unsigned dev_cnt; /* devices assigned to this domain */ | 235 | unsigned dev_cnt; /* devices assigned to this domain */ |
230 | void *priv; /* private data */ | 236 | void *priv; /* private data */ |
231 | }; | 237 | }; |
@@ -337,6 +343,9 @@ struct amd_iommu { | |||
337 | /* if one, we need to send a completion wait command */ | 343 | /* if one, we need to send a completion wait command */ |
338 | bool need_sync; | 344 | bool need_sync; |
339 | 345 | ||
346 | /* becomes true if a command buffer reset is running */ | ||
347 | bool reset_in_progress; | ||
348 | |||
340 | /* default dma_ops domain for that IOMMU */ | 349 | /* default dma_ops domain for that IOMMU */ |
341 | struct dma_ops_domain *default_dom; | 350 | struct dma_ops_domain *default_dom; |
342 | }; | 351 | }; |
@@ -457,4 +466,7 @@ static inline void amd_iommu_stats_init(void) { } | |||
457 | 466 | ||
458 | #endif /* CONFIG_AMD_IOMMU_STATS */ | 467 | #endif /* CONFIG_AMD_IOMMU_STATS */ |
459 | 468 | ||
469 | /* some function prototypes */ | ||
470 | extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); | ||
471 | |||
460 | #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ | 472 | #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index bb7d47925847..586b7adb8e53 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -183,6 +183,10 @@ static inline int x2apic_enabled(void) | |||
183 | } | 183 | } |
184 | 184 | ||
185 | #define x2apic_supported() (cpu_has_x2apic) | 185 | #define x2apic_supported() (cpu_has_x2apic) |
186 | static inline void x2apic_force_phys(void) | ||
187 | { | ||
188 | x2apic_phys = 1; | ||
189 | } | ||
186 | #else | 190 | #else |
187 | static inline void check_x2apic(void) | 191 | static inline void check_x2apic(void) |
188 | { | 192 | { |
@@ -194,6 +198,9 @@ static inline int x2apic_enabled(void) | |||
194 | { | 198 | { |
195 | return 0; | 199 | return 0; |
196 | } | 200 | } |
201 | static inline void x2apic_force_phys(void) | ||
202 | { | ||
203 | } | ||
197 | 204 | ||
198 | #define x2apic_preenabled 0 | 205 | #define x2apic_preenabled 0 |
199 | #define x2apic_supported() 0 | 206 | #define x2apic_supported() 0 |
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 7ddb36ab933b..7386bfa4f4bc 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h | |||
@@ -8,7 +8,8 @@ | |||
8 | * Ingo Molnar <mingo@redhat.com>, 1999, 2000 | 8 | * Ingo Molnar <mingo@redhat.com>, 1999, 2000 |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define APIC_DEFAULT_PHYS_BASE 0xfee00000 | 11 | #define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000 |
12 | #define APIC_DEFAULT_PHYS_BASE 0xfee00000 | ||
12 | 13 | ||
13 | #define APIC_ID 0x20 | 14 | #define APIC_ID 0x20 |
14 | 15 | ||
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index c993e9e0fed4..e8de2f6f5ca5 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h | |||
@@ -291,11 +291,24 @@ static inline unsigned long get_desc_base(const struct desc_struct *desc) | |||
291 | return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24); | 291 | return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24); |
292 | } | 292 | } |
293 | 293 | ||
294 | static inline void set_desc_base(struct desc_struct *desc, unsigned long base) | ||
295 | { | ||
296 | desc->base0 = base & 0xffff; | ||
297 | desc->base1 = (base >> 16) & 0xff; | ||
298 | desc->base2 = (base >> 24) & 0xff; | ||
299 | } | ||
300 | |||
294 | static inline unsigned long get_desc_limit(const struct desc_struct *desc) | 301 | static inline unsigned long get_desc_limit(const struct desc_struct *desc) |
295 | { | 302 | { |
296 | return desc->limit0 | (desc->limit << 16); | 303 | return desc->limit0 | (desc->limit << 16); |
297 | } | 304 | } |
298 | 305 | ||
306 | static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit) | ||
307 | { | ||
308 | desc->limit0 = limit & 0xffff; | ||
309 | desc->limit = (limit >> 16) & 0xf; | ||
310 | } | ||
311 | |||
299 | static inline void _set_gate(int gate, unsigned type, void *addr, | 312 | static inline void _set_gate(int gate, unsigned type, void *addr, |
300 | unsigned dpl, unsigned ist, unsigned seg) | 313 | unsigned dpl, unsigned ist, unsigned seg) |
301 | { | 314 | { |
diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h index a6adefa28b94..9d6684849fd9 100644 --- a/arch/x86/include/asm/desc_defs.h +++ b/arch/x86/include/asm/desc_defs.h | |||
@@ -34,6 +34,12 @@ struct desc_struct { | |||
34 | }; | 34 | }; |
35 | } __attribute__((packed)); | 35 | } __attribute__((packed)); |
36 | 36 | ||
37 | #define GDT_ENTRY_INIT(flags, base, limit) { { { \ | ||
38 | .a = ((limit) & 0xffff) | (((base) & 0xffff) << 16), \ | ||
39 | .b = (((base) & 0xff0000) >> 16) | (((flags) & 0xf0ff) << 8) | \ | ||
40 | ((limit) & 0xf0000) | ((base) & 0xff000000), \ | ||
41 | } } } | ||
42 | |||
37 | enum { | 43 | enum { |
38 | GATE_INTERRUPT = 0xE, | 44 | GATE_INTERRUPT = 0xE, |
39 | GATE_TRAP = 0xF, | 45 | GATE_TRAP = 0xF, |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 1c3f9435f1c9..0ee770d23d0e 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -55,6 +55,24 @@ extern int dma_set_mask(struct device *dev, u64 mask); | |||
55 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 55 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
56 | dma_addr_t *dma_addr, gfp_t flag); | 56 | dma_addr_t *dma_addr, gfp_t flag); |
57 | 57 | ||
58 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | ||
59 | { | ||
60 | if (!dev->dma_mask) | ||
61 | return 0; | ||
62 | |||
63 | return addr + size <= *dev->dma_mask; | ||
64 | } | ||
65 | |||
66 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | ||
67 | { | ||
68 | return paddr; | ||
69 | } | ||
70 | |||
71 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) | ||
72 | { | ||
73 | return daddr; | ||
74 | } | ||
75 | |||
58 | static inline void | 76 | static inline void |
59 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 77 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
60 | enum dma_data_direction dir) | 78 | enum dma_data_direction dir) |
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index bd2c6511c887..db24c2278be0 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -28,13 +28,6 @@ | |||
28 | 28 | ||
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | /* FIXME: I don't want to stay hardcoded */ | ||
32 | #ifdef CONFIG_X86_64 | ||
33 | # define FTRACE_SYSCALL_MAX 296 | ||
34 | #else | ||
35 | # define FTRACE_SYSCALL_MAX 333 | ||
36 | #endif | ||
37 | |||
38 | #ifdef CONFIG_FUNCTION_TRACER | 31 | #ifdef CONFIG_FUNCTION_TRACER |
39 | #define MCOUNT_ADDR ((long)(mcount)) | 32 | #define MCOUNT_ADDR ((long)(mcount)) |
40 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ | 33 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ |
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 330ee807f89e..85232d32fcb8 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h | |||
@@ -150,11 +150,10 @@ extern int timer_through_8259; | |||
150 | #define io_apic_assign_pci_irqs \ | 150 | #define io_apic_assign_pci_irqs \ |
151 | (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) | 151 | (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) |
152 | 152 | ||
153 | #ifdef CONFIG_ACPI | 153 | extern u8 io_apic_unique_id(u8 id); |
154 | extern int io_apic_get_unique_id(int ioapic, int apic_id); | 154 | extern int io_apic_get_unique_id(int ioapic, int apic_id); |
155 | extern int io_apic_get_version(int ioapic); | 155 | extern int io_apic_get_version(int ioapic); |
156 | extern int io_apic_get_redir_entries(int ioapic); | 156 | extern int io_apic_get_redir_entries(int ioapic); |
157 | #endif /* CONFIG_ACPI */ | ||
158 | 157 | ||
159 | struct io_apic_irq_attr; | 158 | struct io_apic_irq_attr; |
160 | extern int io_apic_set_pci_routing(struct device *dev, int irq, | 159 | extern int io_apic_set_pci_routing(struct device *dev, int irq, |
@@ -177,6 +176,16 @@ extern int setup_ioapic_entry(int apic, int irq, | |||
177 | int polarity, int vector, int pin); | 176 | int polarity, int vector, int pin); |
178 | extern void ioapic_write_entry(int apic, int pin, | 177 | extern void ioapic_write_entry(int apic, int pin, |
179 | struct IO_APIC_route_entry e); | 178 | struct IO_APIC_route_entry e); |
179 | |||
180 | struct mp_ioapic_gsi{ | ||
181 | int gsi_base; | ||
182 | int gsi_end; | ||
183 | }; | ||
184 | extern struct mp_ioapic_gsi mp_gsi_routing[]; | ||
185 | int mp_find_ioapic(int gsi); | ||
186 | int mp_find_ioapic_pin(int ioapic, int gsi); | ||
187 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base); | ||
188 | |||
180 | #else /* !CONFIG_X86_IO_APIC */ | 189 | #else /* !CONFIG_X86_IO_APIC */ |
181 | #define io_apic_assign_pci_irqs 0 | 190 | #define io_apic_assign_pci_irqs 0 |
182 | static const int timer_through_8259 = 0; | 191 | static const int timer_through_8259 = 0; |
diff --git a/arch/x86/include/asm/ioctls.h b/arch/x86/include/asm/ioctls.h index 0d5b23b7b06e..ec34c760665e 100644 --- a/arch/x86/include/asm/ioctls.h +++ b/arch/x86/include/asm/ioctls.h | |||
@@ -1,94 +1 @@ | |||
1 | #ifndef _ASM_X86_IOCTLS_H | #include <asm-generic/ioctls.h> | |
2 | #define _ASM_X86_IOCTLS_H | ||
3 | |||
4 | #include <asm/ioctl.h> | ||
5 | |||
6 | /* 0x54 is just a magic number to make these relatively unique ('T') */ | ||
7 | |||
8 | #define TCGETS 0x5401 | ||
9 | #define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */ | ||
10 | #define TCSETSW 0x5403 | ||
11 | #define TCSETSF 0x5404 | ||
12 | #define TCGETA 0x5405 | ||
13 | #define TCSETA 0x5406 | ||
14 | #define TCSETAW 0x5407 | ||
15 | #define TCSETAF 0x5408 | ||
16 | #define TCSBRK 0x5409 | ||
17 | #define TCXONC 0x540A | ||
18 | #define TCFLSH 0x540B | ||
19 | #define TIOCEXCL 0x540C | ||
20 | #define TIOCNXCL 0x540D | ||
21 | #define TIOCSCTTY 0x540E | ||
22 | #define TIOCGPGRP 0x540F | ||
23 | #define TIOCSPGRP 0x5410 | ||
24 | #define TIOCOUTQ 0x5411 | ||
25 | #define TIOCSTI 0x5412 | ||
26 | #define TIOCGWINSZ 0x5413 | ||
27 | #define TIOCSWINSZ 0x5414 | ||
28 | #define TIOCMGET 0x5415 | ||
29 | #define TIOCMBIS 0x5416 | ||
30 | #define TIOCMBIC 0x5417 | ||
31 | #define TIOCMSET 0x5418 | ||
32 | #define TIOCGSOFTCAR 0x5419 | ||
33 | #define TIOCSSOFTCAR 0x541A | ||
34 | #define FIONREAD 0x541B | ||
35 | #define TIOCINQ FIONREAD | ||
36 | #define TIOCLINUX 0x541C | ||
37 | #define TIOCCONS 0x541D | ||
38 | #define TIOCGSERIAL 0x541E | ||
39 | #define TIOCSSERIAL 0x541F | ||
40 | #define TIOCPKT 0x5420 | ||
41 | #define FIONBIO 0x5421 | ||
42 | #define TIOCNOTTY 0x5422 | ||
43 | #define TIOCSETD 0x5423 | ||
44 | #define TIOCGETD 0x5424 | ||
45 | #define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ | ||
46 | /* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */ | ||
47 | #define TIOCSBRK 0x5427 /* BSD compatibility */ | ||
48 | #define TIOCCBRK 0x5428 /* BSD compatibility */ | ||
49 | #define TIOCGSID 0x5429 /* Return the session ID of FD */ | ||
50 | #define TCGETS2 _IOR('T', 0x2A, struct termios2) | ||
51 | #define TCSETS2 _IOW('T', 0x2B, struct termios2) | ||
52 | #define TCSETSW2 _IOW('T', 0x2C, struct termios2) | ||
53 | #define TCSETSF2 _IOW('T', 0x2D, struct termios2) | ||
54 | #define TIOCGRS485 0x542E | ||
55 | #define TIOCSRS485 0x542F | ||
56 | #define TIOCGPTN _IOR('T', 0x30, unsigned int) | ||
57 | /* Get Pty Number (of pty-mux device) */ | ||
58 | #define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ | ||
59 | #define TCGETX 0x5432 /* SYS5 TCGETX compatibility */ | ||
60 | #define TCSETX 0x5433 | ||
61 | #define TCSETXF 0x5434 | ||
62 | #define TCSETXW 0x5435 | ||
63 | |||
64 | #define FIONCLEX 0x5450 | ||
65 | #define FIOCLEX 0x5451 | ||
66 | #define FIOASYNC 0x5452 | ||
67 | #define TIOCSERCONFIG 0x5453 | ||
68 | #define TIOCSERGWILD 0x5454 | ||
69 | #define TIOCSERSWILD 0x5455 | ||
70 | #define TIOCGLCKTRMIOS 0x5456 | ||
71 | #define TIOCSLCKTRMIOS 0x5457 | ||
72 | #define TIOCSERGSTRUCT 0x5458 /* For debugging only */ | ||
73 | #define TIOCSERGETLSR 0x5459 /* Get line status register */ | ||
74 | #define TIOCSERGETMULTI 0x545A /* Get multiport config */ | ||
75 | #define TIOCSERSETMULTI 0x545B /* Set multiport config */ | ||
76 | |||
77 | #define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ | ||
78 | #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ | ||
79 | #define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ | ||
80 | #define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ | ||
81 | #define FIOQSIZE 0x5460 | ||
82 | |||
83 | /* Used for packet mode */ | ||
84 | #define TIOCPKT_DATA 0 | ||
85 | #define TIOCPKT_FLUSHREAD 1 | ||
86 | #define TIOCPKT_FLUSHWRITE 2 | ||
87 | #define TIOCPKT_STOP 4 | ||
88 | #define TIOCPKT_START 8 | ||
89 | #define TIOCPKT_NOSTOP 16 | ||
90 | #define TIOCPKT_DOSTOP 32 | ||
91 | |||
92 | #define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ | ||
93 | |||
94 | #endif /* _ASM_X86_IOCTLS_H */ | ||
diff --git a/arch/x86/include/asm/ipcbuf.h b/arch/x86/include/asm/ipcbuf.h index ee678fd51594..84c7e51cb6d0 100644 --- a/arch/x86/include/asm/ipcbuf.h +++ b/arch/x86/include/asm/ipcbuf.h | |||
@@ -1,28 +1 @@ | |||
1 | #ifndef _ASM_X86_IPCBUF_H | #include <asm-generic/ipcbuf.h> | |
2 | #define _ASM_X86_IPCBUF_H | ||
3 | |||
4 | /* | ||
5 | * The ipc64_perm structure for x86 architecture. | ||
6 | * Note extra padding because this structure is passed back and forth | ||
7 | * between kernel and user space. | ||
8 | * | ||
9 | * Pad space is left for: | ||
10 | * - 32-bit mode_t and seq | ||
11 | * - 2 miscellaneous 32-bit values | ||
12 | */ | ||
13 | |||
14 | struct ipc64_perm { | ||
15 | __kernel_key_t key; | ||
16 | __kernel_uid32_t uid; | ||
17 | __kernel_gid32_t gid; | ||
18 | __kernel_uid32_t cuid; | ||
19 | __kernel_gid32_t cgid; | ||
20 | __kernel_mode_t mode; | ||
21 | unsigned short __pad1; | ||
22 | unsigned short seq; | ||
23 | unsigned short __pad2; | ||
24 | unsigned long __unused1; | ||
25 | unsigned long __unused2; | ||
26 | }; | ||
27 | |||
28 | #endif /* _ASM_X86_IPCBUF_H */ | ||
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index c6ccbe7e81ad..9e2b952f810a 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h | |||
@@ -13,14 +13,13 @@ static inline unsigned long native_save_fl(void) | |||
13 | unsigned long flags; | 13 | unsigned long flags; |
14 | 14 | ||
15 | /* | 15 | /* |
16 | * Note: this needs to be "=r" not "=rm", because we have the | 16 | * "=rm" is safe here, because "pop" adjusts the stack before |
17 | * stack offset from what gcc expects at the time the "pop" is | 17 | * it evaluates its effective address -- this is part of the |
18 | * executed, and so a memory reference with respect to the stack | 18 | * documented behavior of the "pop" instruction. |
19 | * would end up using the wrong address. | ||
20 | */ | 19 | */ |
21 | asm volatile("# __raw_save_flags\n\t" | 20 | asm volatile("# __raw_save_flags\n\t" |
22 | "pushf ; pop %0" | 21 | "pushf ; pop %0" |
23 | : "=r" (flags) | 22 | : "=rm" (flags) |
24 | : /* no input */ | 23 | : /* no input */ |
25 | : "memory"); | 24 | : "memory"); |
26 | 25 | ||
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h index 5136dad57cbb..0d97deba1e35 100644 --- a/arch/x86/include/asm/lguest.h +++ b/arch/x86/include/asm/lguest.h | |||
@@ -90,8 +90,9 @@ static inline void lguest_set_ts(void) | |||
90 | } | 90 | } |
91 | 91 | ||
92 | /* Full 4G segment descriptors, suitable for CS and DS. */ | 92 | /* Full 4G segment descriptors, suitable for CS and DS. */ |
93 | #define FULL_EXEC_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9b00} } }) | 93 | #define FULL_EXEC_SEGMENT \ |
94 | #define FULL_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9300} } }) | 94 | ((struct desc_struct)GDT_ENTRY_INIT(0xc09b, 0, 0xfffff)) |
95 | #define FULL_SEGMENT ((struct desc_struct)GDT_ENTRY_INIT(0xc093, 0, 0xfffff)) | ||
95 | 96 | ||
96 | #endif /* __ASSEMBLY__ */ | 97 | #endif /* __ASSEMBLY__ */ |
97 | 98 | ||
diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h index 751af2550ed9..593e51d4643f 100644 --- a/arch/x86/include/asm/mman.h +++ b/arch/x86/include/asm/mman.h | |||
@@ -1,20 +1,8 @@ | |||
1 | #ifndef _ASM_X86_MMAN_H | 1 | #ifndef _ASM_X86_MMAN_H |
2 | #define _ASM_X86_MMAN_H | 2 | #define _ASM_X86_MMAN_H |
3 | 3 | ||
4 | #include <asm-generic/mman-common.h> | ||
5 | |||
6 | #define MAP_32BIT 0x40 /* only give out 32bit addresses */ | 4 | #define MAP_32BIT 0x40 /* only give out 32bit addresses */ |
7 | 5 | ||
8 | #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ | 6 | #include <asm-generic/mman.h> |
9 | #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ | ||
10 | #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ | ||
11 | #define MAP_LOCKED 0x2000 /* pages are locked */ | ||
12 | #define MAP_NORESERVE 0x4000 /* don't check for reservations */ | ||
13 | #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ | ||
14 | #define MAP_NONBLOCK 0x10000 /* do not block on IO */ | ||
15 | #define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ | ||
16 | |||
17 | #define MCL_CURRENT 1 /* lock all current mappings */ | ||
18 | #define MCL_FUTURE 2 /* lock all future mappings */ | ||
19 | 7 | ||
20 | #endif /* _ASM_X86_MMAN_H */ | 8 | #endif /* _ASM_X86_MMAN_H */ |
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h index e959c4afab59..3e2ce58a31a3 100644 --- a/arch/x86/include/asm/module.h +++ b/arch/x86/include/asm/module.h | |||
@@ -1,18 +1,7 @@ | |||
1 | #ifndef _ASM_X86_MODULE_H | 1 | #ifndef _ASM_X86_MODULE_H |
2 | #define _ASM_X86_MODULE_H | 2 | #define _ASM_X86_MODULE_H |
3 | 3 | ||
4 | /* x86_32/64 are simple */ | 4 | #include <asm-generic/module.h> |
5 | struct mod_arch_specific {}; | ||
6 | |||
7 | #ifdef CONFIG_X86_32 | ||
8 | # define Elf_Shdr Elf32_Shdr | ||
9 | # define Elf_Sym Elf32_Sym | ||
10 | # define Elf_Ehdr Elf32_Ehdr | ||
11 | #else | ||
12 | # define Elf_Shdr Elf64_Shdr | ||
13 | # define Elf_Sym Elf64_Sym | ||
14 | # define Elf_Ehdr Elf64_Ehdr | ||
15 | #endif | ||
16 | 5 | ||
17 | #ifdef CONFIG_X86_64 | 6 | #ifdef CONFIG_X86_64 |
18 | /* X86_64 does not define MODULE_PROC_FAMILY */ | 7 | /* X86_64 does not define MODULE_PROC_FAMILY */ |
diff --git a/arch/x86/include/asm/msgbuf.h b/arch/x86/include/asm/msgbuf.h index 7e4e9481f51c..809134c644a6 100644 --- a/arch/x86/include/asm/msgbuf.h +++ b/arch/x86/include/asm/msgbuf.h | |||
@@ -1,39 +1 @@ | |||
1 | #ifndef _ASM_X86_MSGBUF_H | #include <asm-generic/msgbuf.h> | |
2 | #define _ASM_X86_MSGBUF_H | ||
3 | |||
4 | /* | ||
5 | * The msqid64_ds structure for i386 architecture. | ||
6 | * Note extra padding because this structure is passed back and forth | ||
7 | * between kernel and user space. | ||
8 | * | ||
9 | * Pad space on i386 is left for: | ||
10 | * - 64-bit time_t to solve y2038 problem | ||
11 | * - 2 miscellaneous 32-bit values | ||
12 | * | ||
13 | * Pad space on x8664 is left for: | ||
14 | * - 2 miscellaneous 64-bit values | ||
15 | */ | ||
16 | struct msqid64_ds { | ||
17 | struct ipc64_perm msg_perm; | ||
18 | __kernel_time_t msg_stime; /* last msgsnd time */ | ||
19 | #ifdef __i386__ | ||
20 | unsigned long __unused1; | ||
21 | #endif | ||
22 | __kernel_time_t msg_rtime; /* last msgrcv time */ | ||
23 | #ifdef __i386__ | ||
24 | unsigned long __unused2; | ||
25 | #endif | ||
26 | __kernel_time_t msg_ctime; /* last change time */ | ||
27 | #ifdef __i386__ | ||
28 | unsigned long __unused3; | ||
29 | #endif | ||
30 | unsigned long msg_cbytes; /* current number of bytes on queue */ | ||
31 | unsigned long msg_qnum; /* number of messages in queue */ | ||
32 | unsigned long msg_qbytes; /* max number of bytes on queue */ | ||
33 | __kernel_pid_t msg_lspid; /* pid of last msgsnd */ | ||
34 | __kernel_pid_t msg_lrpid; /* last receive pid */ | ||
35 | unsigned long __unused4; | ||
36 | unsigned long __unused5; | ||
37 | }; | ||
38 | |||
39 | #endif /* _ASM_X86_MSGBUF_H */ | ||
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index c86e5ed4af51..e63cf7d441e1 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h | |||
@@ -45,8 +45,8 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, | |||
45 | void __user *, size_t *, loff_t *); | 45 | void __user *, size_t *, loff_t *); |
46 | extern int unknown_nmi_panic; | 46 | extern int unknown_nmi_panic; |
47 | 47 | ||
48 | void __trigger_all_cpu_backtrace(void); | 48 | void arch_trigger_all_cpu_backtrace(void); |
49 | #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() | 49 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace |
50 | 50 | ||
51 | static inline void localise_nmi_watchdog(void) | 51 | static inline void localise_nmi_watchdog(void) |
52 | { | 52 | { |
diff --git a/arch/x86/include/asm/param.h b/arch/x86/include/asm/param.h index 6f0d0422f4ca..965d45427975 100644 --- a/arch/x86/include/asm/param.h +++ b/arch/x86/include/asm/param.h | |||
@@ -1,22 +1 @@ | |||
1 | #ifndef _ASM_X86_PARAM_H | #include <asm-generic/param.h> | |
2 | #define _ASM_X86_PARAM_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | # define HZ CONFIG_HZ /* Internal kernel timer frequency */ | ||
6 | # define USER_HZ 100 /* some user interfaces are */ | ||
7 | # define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */ | ||
8 | #endif | ||
9 | |||
10 | #ifndef HZ | ||
11 | #define HZ 100 | ||
12 | #endif | ||
13 | |||
14 | #define EXEC_PAGESIZE 4096 | ||
15 | |||
16 | #ifndef NOGROUP | ||
17 | #define NOGROUP (-1) | ||
18 | #endif | ||
19 | |||
20 | #define MAXHOSTNAMELEN 64 /* max length of hostname */ | ||
21 | |||
22 | #endif /* _ASM_X86_PARAM_H */ | ||
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h index fa64e401589d..e7b7c938ae27 100644 --- a/arch/x86/include/asm/perf_counter.h +++ b/arch/x86/include/asm/perf_counter.h | |||
@@ -84,6 +84,16 @@ union cpuid10_edx { | |||
84 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b | 84 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b |
85 | #define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) | 85 | #define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) |
86 | 86 | ||
87 | /* | ||
88 | * We model BTS tracing as another fixed-mode PMC. | ||
89 | * | ||
90 | * We choose a value in the middle of the fixed counter range, since lower | ||
91 | * values are used by actual fixed counters and higher values are used | ||
92 | * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. | ||
93 | */ | ||
94 | #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) | ||
95 | |||
96 | |||
87 | #ifdef CONFIG_PERF_COUNTERS | 97 | #ifdef CONFIG_PERF_COUNTERS |
88 | extern void init_hw_perf_counters(void); | 98 | extern void init_hw_perf_counters(void); |
89 | extern void perf_counters_lapic_init(void); | 99 | extern void perf_counters_lapic_init(void); |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 3cc06e3fceb8..16748077559a 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_X86_PGTABLE_H | 2 | #define _ASM_X86_PGTABLE_H |
3 | 3 | ||
4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
5 | #include <asm/e820.h> | ||
5 | 6 | ||
6 | #include <asm/pgtable_types.h> | 7 | #include <asm/pgtable_types.h> |
7 | 8 | ||
@@ -269,10 +270,17 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | |||
269 | 270 | ||
270 | #define canon_pgprot(p) __pgprot(massage_pgprot(p)) | 271 | #define canon_pgprot(p) __pgprot(massage_pgprot(p)) |
271 | 272 | ||
272 | static inline int is_new_memtype_allowed(unsigned long flags, | 273 | static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, |
273 | unsigned long new_flags) | 274 | unsigned long flags, |
275 | unsigned long new_flags) | ||
274 | { | 276 | { |
275 | /* | 277 | /* |
278 | * PAT type is always WB for ISA. So no need to check. | ||
279 | */ | ||
280 | if (is_ISA_range(paddr, paddr + size - 1)) | ||
281 | return 1; | ||
282 | |||
283 | /* | ||
276 | * Certain new memtypes are not allowed with certain | 284 | * Certain new memtypes are not allowed with certain |
277 | * requested memtype: | 285 | * requested memtype: |
278 | * - request is uncached, return cannot be write-back | 286 | * - request is uncached, return cannot be write-back |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 2db56c57a281..e08ea043e085 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -403,7 +403,17 @@ extern unsigned long kernel_eflags; | |||
403 | extern asmlinkage void ignore_sysret(void); | 403 | extern asmlinkage void ignore_sysret(void); |
404 | #else /* X86_64 */ | 404 | #else /* X86_64 */ |
405 | #ifdef CONFIG_CC_STACKPROTECTOR | 405 | #ifdef CONFIG_CC_STACKPROTECTOR |
406 | DECLARE_PER_CPU(unsigned long, stack_canary); | 406 | /* |
407 | * Make sure stack canary segment base is cached-aligned: | ||
408 | * "For Intel Atom processors, avoid non zero segment base address | ||
409 | * that is not aligned to cache line boundary at all cost." | ||
410 | * (Optim Ref Manual Assembly/Compiler Coding Rule 15.) | ||
411 | */ | ||
412 | struct stack_canary { | ||
413 | char __pad[20]; /* canary at %gs:20 */ | ||
414 | unsigned long canary; | ||
415 | }; | ||
416 | DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); | ||
407 | #endif | 417 | #endif |
408 | #endif /* X86_64 */ | 418 | #endif /* X86_64 */ |
409 | 419 | ||
diff --git a/arch/x86/include/asm/scatterlist.h b/arch/x86/include/asm/scatterlist.h index 263d397d2eef..75af592677ec 100644 --- a/arch/x86/include/asm/scatterlist.h +++ b/arch/x86/include/asm/scatterlist.h | |||
@@ -1,33 +1,8 @@ | |||
1 | #ifndef _ASM_X86_SCATTERLIST_H | 1 | #ifndef _ASM_X86_SCATTERLIST_H |
2 | #define _ASM_X86_SCATTERLIST_H | 2 | #define _ASM_X86_SCATTERLIST_H |
3 | 3 | ||
4 | #include <asm/types.h> | ||
5 | |||
6 | struct scatterlist { | ||
7 | #ifdef CONFIG_DEBUG_SG | ||
8 | unsigned long sg_magic; | ||
9 | #endif | ||
10 | unsigned long page_link; | ||
11 | unsigned int offset; | ||
12 | unsigned int length; | ||
13 | dma_addr_t dma_address; | ||
14 | unsigned int dma_length; | ||
15 | }; | ||
16 | |||
17 | #define ARCH_HAS_SG_CHAIN | ||
18 | #define ISA_DMA_THRESHOLD (0x00ffffff) | 4 | #define ISA_DMA_THRESHOLD (0x00ffffff) |
19 | 5 | ||
20 | /* | 6 | #include <asm-generic/scatterlist.h> |
21 | * These macros should be used after a pci_map_sg call has been done | ||
22 | * to get bus addresses of each of the SG entries and their lengths. | ||
23 | * You should only work with the number of sg entries pci_map_sg | ||
24 | * returns. | ||
25 | */ | ||
26 | #define sg_dma_address(sg) ((sg)->dma_address) | ||
27 | #ifdef CONFIG_X86_32 | ||
28 | # define sg_dma_len(sg) ((sg)->length) | ||
29 | #else | ||
30 | # define sg_dma_len(sg) ((sg)->dma_length) | ||
31 | #endif | ||
32 | 7 | ||
33 | #endif /* _ASM_X86_SCATTERLIST_H */ | 8 | #endif /* _ASM_X86_SCATTERLIST_H */ |
diff --git a/arch/x86/include/asm/shmbuf.h b/arch/x86/include/asm/shmbuf.h index b51413b74971..83c05fc2de38 100644 --- a/arch/x86/include/asm/shmbuf.h +++ b/arch/x86/include/asm/shmbuf.h | |||
@@ -1,51 +1 @@ | |||
1 | #ifndef _ASM_X86_SHMBUF_H | #include <asm-generic/shmbuf.h> | |
2 | #define _ASM_X86_SHMBUF_H | ||
3 | |||
4 | /* | ||
5 | * The shmid64_ds structure for x86 architecture. | ||
6 | * Note extra padding because this structure is passed back and forth | ||
7 | * between kernel and user space. | ||
8 | * | ||
9 | * Pad space on 32 bit is left for: | ||
10 | * - 64-bit time_t to solve y2038 problem | ||
11 | * - 2 miscellaneous 32-bit values | ||
12 | * | ||
13 | * Pad space on 64 bit is left for: | ||
14 | * - 2 miscellaneous 64-bit values | ||
15 | */ | ||
16 | |||
17 | struct shmid64_ds { | ||
18 | struct ipc64_perm shm_perm; /* operation perms */ | ||
19 | size_t shm_segsz; /* size of segment (bytes) */ | ||
20 | __kernel_time_t shm_atime; /* last attach time */ | ||
21 | #ifdef __i386__ | ||
22 | unsigned long __unused1; | ||
23 | #endif | ||
24 | __kernel_time_t shm_dtime; /* last detach time */ | ||
25 | #ifdef __i386__ | ||
26 | unsigned long __unused2; | ||
27 | #endif | ||
28 | __kernel_time_t shm_ctime; /* last change time */ | ||
29 | #ifdef __i386__ | ||
30 | unsigned long __unused3; | ||
31 | #endif | ||
32 | __kernel_pid_t shm_cpid; /* pid of creator */ | ||
33 | __kernel_pid_t shm_lpid; /* pid of last operator */ | ||
34 | unsigned long shm_nattch; /* no. of current attaches */ | ||
35 | unsigned long __unused4; | ||
36 | unsigned long __unused5; | ||
37 | }; | ||
38 | |||
39 | struct shminfo64 { | ||
40 | unsigned long shmmax; | ||
41 | unsigned long shmmin; | ||
42 | unsigned long shmmni; | ||
43 | unsigned long shmseg; | ||
44 | unsigned long shmall; | ||
45 | unsigned long __unused1; | ||
46 | unsigned long __unused2; | ||
47 | unsigned long __unused3; | ||
48 | unsigned long __unused4; | ||
49 | }; | ||
50 | |||
51 | #endif /* _ASM_X86_SHMBUF_H */ | ||
diff --git a/arch/x86/include/asm/socket.h b/arch/x86/include/asm/socket.h index ca8bf2cd0ba9..6b71384b9d8b 100644 --- a/arch/x86/include/asm/socket.h +++ b/arch/x86/include/asm/socket.h | |||
@@ -1,60 +1 @@ | |||
1 | #ifndef _ASM_X86_SOCKET_H | #include <asm-generic/socket.h> | |
2 | #define _ASM_X86_SOCKET_H | ||
3 | |||
4 | #include <asm/sockios.h> | ||
5 | |||
6 | /* For setsockopt(2) */ | ||
7 | #define SOL_SOCKET 1 | ||
8 | |||
9 | #define SO_DEBUG 1 | ||
10 | #define SO_REUSEADDR 2 | ||
11 | #define SO_TYPE 3 | ||
12 | #define SO_ERROR 4 | ||
13 | #define SO_DONTROUTE 5 | ||
14 | #define SO_BROADCAST 6 | ||
15 | #define SO_SNDBUF 7 | ||
16 | #define SO_RCVBUF 8 | ||
17 | #define SO_SNDBUFFORCE 32 | ||
18 | #define SO_RCVBUFFORCE 33 | ||
19 | #define SO_KEEPALIVE 9 | ||
20 | #define SO_OOBINLINE 10 | ||
21 | #define SO_NO_CHECK 11 | ||
22 | #define SO_PRIORITY 12 | ||
23 | #define SO_LINGER 13 | ||
24 | #define SO_BSDCOMPAT 14 | ||
25 | /* To add :#define SO_REUSEPORT 15 */ | ||
26 | #define SO_PASSCRED 16 | ||
27 | #define SO_PEERCRED 17 | ||
28 | #define SO_RCVLOWAT 18 | ||
29 | #define SO_SNDLOWAT 19 | ||
30 | #define SO_RCVTIMEO 20 | ||
31 | #define SO_SNDTIMEO 21 | ||
32 | |||
33 | /* Security levels - as per NRL IPv6 - don't actually do anything */ | ||
34 | #define SO_SECURITY_AUTHENTICATION 22 | ||
35 | #define SO_SECURITY_ENCRYPTION_TRANSPORT 23 | ||
36 | #define SO_SECURITY_ENCRYPTION_NETWORK 24 | ||
37 | |||
38 | #define SO_BINDTODEVICE 25 | ||
39 | |||
40 | /* Socket filtering */ | ||
41 | #define SO_ATTACH_FILTER 26 | ||
42 | #define SO_DETACH_FILTER 27 | ||
43 | |||
44 | #define SO_PEERNAME 28 | ||
45 | #define SO_TIMESTAMP 29 | ||
46 | #define SCM_TIMESTAMP SO_TIMESTAMP | ||
47 | |||
48 | #define SO_ACCEPTCONN 30 | ||
49 | |||
50 | #define SO_PEERSEC 31 | ||
51 | #define SO_PASSSEC 34 | ||
52 | #define SO_TIMESTAMPNS 35 | ||
53 | #define SCM_TIMESTAMPNS SO_TIMESTAMPNS | ||
54 | |||
55 | #define SO_MARK 36 | ||
56 | |||
57 | #define SO_TIMESTAMPING 37 | ||
58 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | ||
59 | |||
60 | #endif /* _ASM_X86_SOCKET_H */ | ||
diff --git a/arch/x86/include/asm/sockios.h b/arch/x86/include/asm/sockios.h index 49cc72b5d3c9..def6d4746ee7 100644 --- a/arch/x86/include/asm/sockios.h +++ b/arch/x86/include/asm/sockios.h | |||
@@ -1,13 +1 @@ | |||
1 | #ifndef _ASM_X86_SOCKIOS_H | #include <asm-generic/sockios.h> | |
2 | #define _ASM_X86_SOCKIOS_H | ||
3 | |||
4 | /* Socket-level I/O control calls. */ | ||
5 | #define FIOSETOWN 0x8901 | ||
6 | #define SIOCSPGRP 0x8902 | ||
7 | #define FIOGETOWN 0x8903 | ||
8 | #define SIOCGPGRP 0x8904 | ||
9 | #define SIOCATMARK 0x8905 | ||
10 | #define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ | ||
11 | #define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ | ||
12 | |||
13 | #endif /* _ASM_X86_SOCKIOS_H */ | ||
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index c2d742c6e15f..157517763565 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h | |||
@@ -48,7 +48,7 @@ | |||
48 | * head_32 for boot CPU and setup_per_cpu_areas() for others. | 48 | * head_32 for boot CPU and setup_per_cpu_areas() for others. |
49 | */ | 49 | */ |
50 | #define GDT_STACK_CANARY_INIT \ | 50 | #define GDT_STACK_CANARY_INIT \ |
51 | [GDT_ENTRY_STACK_CANARY] = { { { 0x00000018, 0x00409000 } } }, | 51 | [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18), |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * Initialize the stackprotector canary value. | 54 | * Initialize the stackprotector canary value. |
@@ -78,21 +78,19 @@ static __always_inline void boot_init_stack_canary(void) | |||
78 | #ifdef CONFIG_X86_64 | 78 | #ifdef CONFIG_X86_64 |
79 | percpu_write(irq_stack_union.stack_canary, canary); | 79 | percpu_write(irq_stack_union.stack_canary, canary); |
80 | #else | 80 | #else |
81 | percpu_write(stack_canary, canary); | 81 | percpu_write(stack_canary.canary, canary); |
82 | #endif | 82 | #endif |
83 | } | 83 | } |
84 | 84 | ||
85 | static inline void setup_stack_canary_segment(int cpu) | 85 | static inline void setup_stack_canary_segment(int cpu) |
86 | { | 86 | { |
87 | #ifdef CONFIG_X86_32 | 87 | #ifdef CONFIG_X86_32 |
88 | unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu) - 20; | 88 | unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu); |
89 | struct desc_struct *gdt_table = get_cpu_gdt_table(cpu); | 89 | struct desc_struct *gdt_table = get_cpu_gdt_table(cpu); |
90 | struct desc_struct desc; | 90 | struct desc_struct desc; |
91 | 91 | ||
92 | desc = gdt_table[GDT_ENTRY_STACK_CANARY]; | 92 | desc = gdt_table[GDT_ENTRY_STACK_CANARY]; |
93 | desc.base0 = canary & 0xffff; | 93 | set_desc_base(&desc, canary); |
94 | desc.base1 = (canary >> 16) & 0xff; | ||
95 | desc.base2 = (canary >> 24) & 0xff; | ||
96 | write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S); | 94 | write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S); |
97 | #endif | 95 | #endif |
98 | } | 96 | } |
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index 643c59b4bc6e..f08f97374892 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h | |||
@@ -31,7 +31,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
31 | "movl %P[task_canary](%[next]), %%ebx\n\t" \ | 31 | "movl %P[task_canary](%[next]), %%ebx\n\t" \ |
32 | "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" | 32 | "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" |
33 | #define __switch_canary_oparam \ | 33 | #define __switch_canary_oparam \ |
34 | , [stack_canary] "=m" (per_cpu_var(stack_canary)) | 34 | , [stack_canary] "=m" (per_cpu_var(stack_canary.canary)) |
35 | #define __switch_canary_iparam \ | 35 | #define __switch_canary_iparam \ |
36 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | 36 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) |
37 | #else /* CC_STACKPROTECTOR */ | 37 | #else /* CC_STACKPROTECTOR */ |
@@ -150,33 +150,6 @@ do { \ | |||
150 | #endif | 150 | #endif |
151 | 151 | ||
152 | #ifdef __KERNEL__ | 152 | #ifdef __KERNEL__ |
153 | #define _set_base(addr, base) do { unsigned long __pr; \ | ||
154 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | ||
155 | "rorl $16,%%edx\n\t" \ | ||
156 | "movb %%dl,%2\n\t" \ | ||
157 | "movb %%dh,%3" \ | ||
158 | :"=&d" (__pr) \ | ||
159 | :"m" (*((addr)+2)), \ | ||
160 | "m" (*((addr)+4)), \ | ||
161 | "m" (*((addr)+7)), \ | ||
162 | "0" (base) \ | ||
163 | ); } while (0) | ||
164 | |||
165 | #define _set_limit(addr, limit) do { unsigned long __lr; \ | ||
166 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | ||
167 | "rorl $16,%%edx\n\t" \ | ||
168 | "movb %2,%%dh\n\t" \ | ||
169 | "andb $0xf0,%%dh\n\t" \ | ||
170 | "orb %%dh,%%dl\n\t" \ | ||
171 | "movb %%dl,%2" \ | ||
172 | :"=&d" (__lr) \ | ||
173 | :"m" (*(addr)), \ | ||
174 | "m" (*((addr)+6)), \ | ||
175 | "0" (limit) \ | ||
176 | ); } while (0) | ||
177 | |||
178 | #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base)) | ||
179 | #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1)) | ||
180 | 153 | ||
181 | extern void native_load_gs_index(unsigned); | 154 | extern void native_load_gs_index(unsigned); |
182 | 155 | ||
diff --git a/arch/x86/include/asm/termbits.h b/arch/x86/include/asm/termbits.h index af1b70ea440f..3935b106de79 100644 --- a/arch/x86/include/asm/termbits.h +++ b/arch/x86/include/asm/termbits.h | |||
@@ -1,198 +1 @@ | |||
1 | #ifndef _ASM_X86_TERMBITS_H | #include <asm-generic/termbits.h> | |
2 | #define _ASM_X86_TERMBITS_H | ||
3 | |||
4 | #include <linux/posix_types.h> | ||
5 | |||
6 | typedef unsigned char cc_t; | ||
7 | typedef unsigned int speed_t; | ||
8 | typedef unsigned int tcflag_t; | ||
9 | |||
10 | #define NCCS 19 | ||
11 | struct termios { | ||
12 | tcflag_t c_iflag; /* input mode flags */ | ||
13 | tcflag_t c_oflag; /* output mode flags */ | ||
14 | tcflag_t c_cflag; /* control mode flags */ | ||
15 | tcflag_t c_lflag; /* local mode flags */ | ||
16 | cc_t c_line; /* line discipline */ | ||
17 | cc_t c_cc[NCCS]; /* control characters */ | ||
18 | }; | ||
19 | |||
20 | struct termios2 { | ||
21 | tcflag_t c_iflag; /* input mode flags */ | ||
22 | tcflag_t c_oflag; /* output mode flags */ | ||
23 | tcflag_t c_cflag; /* control mode flags */ | ||
24 | tcflag_t c_lflag; /* local mode flags */ | ||
25 | cc_t c_line; /* line discipline */ | ||
26 | cc_t c_cc[NCCS]; /* control characters */ | ||
27 | speed_t c_ispeed; /* input speed */ | ||
28 | speed_t c_ospeed; /* output speed */ | ||
29 | }; | ||
30 | |||
31 | struct ktermios { | ||
32 | tcflag_t c_iflag; /* input mode flags */ | ||
33 | tcflag_t c_oflag; /* output mode flags */ | ||
34 | tcflag_t c_cflag; /* control mode flags */ | ||
35 | tcflag_t c_lflag; /* local mode flags */ | ||
36 | cc_t c_line; /* line discipline */ | ||
37 | cc_t c_cc[NCCS]; /* control characters */ | ||
38 | speed_t c_ispeed; /* input speed */ | ||
39 | speed_t c_ospeed; /* output speed */ | ||
40 | }; | ||
41 | |||
42 | /* c_cc characters */ | ||
43 | #define VINTR 0 | ||
44 | #define VQUIT 1 | ||
45 | #define VERASE 2 | ||
46 | #define VKILL 3 | ||
47 | #define VEOF 4 | ||
48 | #define VTIME 5 | ||
49 | #define VMIN 6 | ||
50 | #define VSWTC 7 | ||
51 | #define VSTART 8 | ||
52 | #define VSTOP 9 | ||
53 | #define VSUSP 10 | ||
54 | #define VEOL 11 | ||
55 | #define VREPRINT 12 | ||
56 | #define VDISCARD 13 | ||
57 | #define VWERASE 14 | ||
58 | #define VLNEXT 15 | ||
59 | #define VEOL2 16 | ||
60 | |||
61 | /* c_iflag bits */ | ||
62 | #define IGNBRK 0000001 | ||
63 | #define BRKINT 0000002 | ||
64 | #define IGNPAR 0000004 | ||
65 | #define PARMRK 0000010 | ||
66 | #define INPCK 0000020 | ||
67 | #define ISTRIP 0000040 | ||
68 | #define INLCR 0000100 | ||
69 | #define IGNCR 0000200 | ||
70 | #define ICRNL 0000400 | ||
71 | #define IUCLC 0001000 | ||
72 | #define IXON 0002000 | ||
73 | #define IXANY 0004000 | ||
74 | #define IXOFF 0010000 | ||
75 | #define IMAXBEL 0020000 | ||
76 | #define IUTF8 0040000 | ||
77 | |||
78 | /* c_oflag bits */ | ||
79 | #define OPOST 0000001 | ||
80 | #define OLCUC 0000002 | ||
81 | #define ONLCR 0000004 | ||
82 | #define OCRNL 0000010 | ||
83 | #define ONOCR 0000020 | ||
84 | #define ONLRET 0000040 | ||
85 | #define OFILL 0000100 | ||
86 | #define OFDEL 0000200 | ||
87 | #define NLDLY 0000400 | ||
88 | #define NL0 0000000 | ||
89 | #define NL1 0000400 | ||
90 | #define CRDLY 0003000 | ||
91 | #define CR0 0000000 | ||
92 | #define CR1 0001000 | ||
93 | #define CR2 0002000 | ||
94 | #define CR3 0003000 | ||
95 | #define TABDLY 0014000 | ||
96 | #define TAB0 0000000 | ||
97 | #define TAB1 0004000 | ||
98 | #define TAB2 0010000 | ||
99 | #define TAB3 0014000 | ||
100 | #define XTABS 0014000 | ||
101 | #define BSDLY 0020000 | ||
102 | #define BS0 0000000 | ||
103 | #define BS1 0020000 | ||
104 | #define VTDLY 0040000 | ||
105 | #define VT0 0000000 | ||
106 | #define VT1 0040000 | ||
107 | #define FFDLY 0100000 | ||
108 | #define FF0 0000000 | ||
109 | #define FF1 0100000 | ||
110 | |||
111 | /* c_cflag bit meaning */ | ||
112 | #define CBAUD 0010017 | ||
113 | #define B0 0000000 /* hang up */ | ||
114 | #define B50 0000001 | ||
115 | #define B75 0000002 | ||
116 | #define B110 0000003 | ||
117 | #define B134 0000004 | ||
118 | #define B150 0000005 | ||
119 | #define B200 0000006 | ||
120 | #define B300 0000007 | ||
121 | #define B600 0000010 | ||
122 | #define B1200 0000011 | ||
123 | #define B1800 0000012 | ||
124 | #define B2400 0000013 | ||
125 | #define B4800 0000014 | ||
126 | #define B9600 0000015 | ||
127 | #define B19200 0000016 | ||
128 | #define B38400 0000017 | ||
129 | #define EXTA B19200 | ||
130 | #define EXTB B38400 | ||
131 | #define CSIZE 0000060 | ||
132 | #define CS5 0000000 | ||
133 | #define CS6 0000020 | ||
134 | #define CS7 0000040 | ||
135 | #define CS8 0000060 | ||
136 | #define CSTOPB 0000100 | ||
137 | #define CREAD 0000200 | ||
138 | #define PARENB 0000400 | ||
139 | #define PARODD 0001000 | ||
140 | #define HUPCL 0002000 | ||
141 | #define CLOCAL 0004000 | ||
142 | #define CBAUDEX 0010000 | ||
143 | #define BOTHER 0010000 /* non standard rate */ | ||
144 | #define B57600 0010001 | ||
145 | #define B115200 0010002 | ||
146 | #define B230400 0010003 | ||
147 | #define B460800 0010004 | ||
148 | #define B500000 0010005 | ||
149 | #define B576000 0010006 | ||
150 | #define B921600 0010007 | ||
151 | #define B1000000 0010010 | ||
152 | #define B1152000 0010011 | ||
153 | #define B1500000 0010012 | ||
154 | #define B2000000 0010013 | ||
155 | #define B2500000 0010014 | ||
156 | #define B3000000 0010015 | ||
157 | #define B3500000 0010016 | ||
158 | #define B4000000 0010017 | ||
159 | #define CIBAUD 002003600000 /* input baud rate */ | ||
160 | #define CMSPAR 010000000000 /* mark or space (stick) parity */ | ||
161 | #define CRTSCTS 020000000000 /* flow control */ | ||
162 | |||
163 | #define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */ | ||
164 | |||
165 | /* c_lflag bits */ | ||
166 | #define ISIG 0000001 | ||
167 | #define ICANON 0000002 | ||
168 | #define XCASE 0000004 | ||
169 | #define ECHO 0000010 | ||
170 | #define ECHOE 0000020 | ||
171 | #define ECHOK 0000040 | ||
172 | #define ECHONL 0000100 | ||
173 | #define NOFLSH 0000200 | ||
174 | #define TOSTOP 0000400 | ||
175 | #define ECHOCTL 0001000 | ||
176 | #define ECHOPRT 0002000 | ||
177 | #define ECHOKE 0004000 | ||
178 | #define FLUSHO 0010000 | ||
179 | #define PENDIN 0040000 | ||
180 | #define IEXTEN 0100000 | ||
181 | |||
182 | /* tcflow() and TCXONC use these */ | ||
183 | #define TCOOFF 0 | ||
184 | #define TCOON 1 | ||
185 | #define TCIOFF 2 | ||
186 | #define TCION 3 | ||
187 | |||
188 | /* tcflush() and TCFLSH use these */ | ||
189 | #define TCIFLUSH 0 | ||
190 | #define TCOFLUSH 1 | ||
191 | #define TCIOFLUSH 2 | ||
192 | |||
193 | /* tcsetattr uses these */ | ||
194 | #define TCSANOW 0 | ||
195 | #define TCSADRAIN 1 | ||
196 | #define TCSAFLUSH 2 | ||
197 | |||
198 | #endif /* _ASM_X86_TERMBITS_H */ | ||
diff --git a/arch/x86/include/asm/termios.h b/arch/x86/include/asm/termios.h index c4ee8056baca..280d78a9d966 100644 --- a/arch/x86/include/asm/termios.h +++ b/arch/x86/include/asm/termios.h | |||
@@ -1,114 +1 @@ | |||
1 | #ifndef _ASM_X86_TERMIOS_H | #include <asm-generic/termios.h> | |
2 | #define _ASM_X86_TERMIOS_H | ||
3 | |||
4 | #include <asm/termbits.h> | ||
5 | #include <asm/ioctls.h> | ||
6 | |||
7 | struct winsize { | ||
8 | unsigned short ws_row; | ||
9 | unsigned short ws_col; | ||
10 | unsigned short ws_xpixel; | ||
11 | unsigned short ws_ypixel; | ||
12 | }; | ||
13 | |||
14 | #define NCC 8 | ||
15 | struct termio { | ||
16 | unsigned short c_iflag; /* input mode flags */ | ||
17 | unsigned short c_oflag; /* output mode flags */ | ||
18 | unsigned short c_cflag; /* control mode flags */ | ||
19 | unsigned short c_lflag; /* local mode flags */ | ||
20 | unsigned char c_line; /* line discipline */ | ||
21 | unsigned char c_cc[NCC]; /* control characters */ | ||
22 | }; | ||
23 | |||
24 | /* modem lines */ | ||
25 | #define TIOCM_LE 0x001 | ||
26 | #define TIOCM_DTR 0x002 | ||
27 | #define TIOCM_RTS 0x004 | ||
28 | #define TIOCM_ST 0x008 | ||
29 | #define TIOCM_SR 0x010 | ||
30 | #define TIOCM_CTS 0x020 | ||
31 | #define TIOCM_CAR 0x040 | ||
32 | #define TIOCM_RNG 0x080 | ||
33 | #define TIOCM_DSR 0x100 | ||
34 | #define TIOCM_CD TIOCM_CAR | ||
35 | #define TIOCM_RI TIOCM_RNG | ||
36 | #define TIOCM_OUT1 0x2000 | ||
37 | #define TIOCM_OUT2 0x4000 | ||
38 | #define TIOCM_LOOP 0x8000 | ||
39 | |||
40 | /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ | ||
41 | |||
42 | #ifdef __KERNEL__ | ||
43 | |||
44 | #include <asm/uaccess.h> | ||
45 | |||
46 | /* intr=^C quit=^\ erase=del kill=^U | ||
47 | eof=^D vtime=\0 vmin=\1 sxtc=\0 | ||
48 | start=^Q stop=^S susp=^Z eol=\0 | ||
49 | reprint=^R discard=^U werase=^W lnext=^V | ||
50 | eol2=\0 | ||
51 | */ | ||
52 | #define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" | ||
53 | |||
54 | /* | ||
55 | * Translate a "termio" structure into a "termios". Ugh. | ||
56 | */ | ||
57 | #define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ | ||
58 | unsigned short __tmp; \ | ||
59 | get_user(__tmp,&(termio)->x); \ | ||
60 | *(unsigned short *) &(termios)->x = __tmp; \ | ||
61 | } | ||
62 | |||
63 | static inline int user_termio_to_kernel_termios(struct ktermios *termios, | ||
64 | struct termio __user *termio) | ||
65 | { | ||
66 | SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); | ||
67 | SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); | ||
68 | SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); | ||
69 | SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); | ||
70 | get_user(termios->c_line, &termio->c_line); | ||
71 | return copy_from_user(termios->c_cc, termio->c_cc, NCC); | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * Translate a "termios" structure into a "termio". Ugh. | ||
76 | */ | ||
77 | static inline int kernel_termios_to_user_termio(struct termio __user *termio, | ||
78 | struct ktermios *termios) | ||
79 | { | ||
80 | put_user((termios)->c_iflag, &(termio)->c_iflag); | ||
81 | put_user((termios)->c_oflag, &(termio)->c_oflag); | ||
82 | put_user((termios)->c_cflag, &(termio)->c_cflag); | ||
83 | put_user((termios)->c_lflag, &(termio)->c_lflag); | ||
84 | put_user((termios)->c_line, &(termio)->c_line); | ||
85 | return copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); | ||
86 | } | ||
87 | |||
88 | static inline int user_termios_to_kernel_termios(struct ktermios *k, | ||
89 | struct termios2 __user *u) | ||
90 | { | ||
91 | return copy_from_user(k, u, sizeof(struct termios2)); | ||
92 | } | ||
93 | |||
94 | static inline int kernel_termios_to_user_termios(struct termios2 __user *u, | ||
95 | struct ktermios *k) | ||
96 | { | ||
97 | return copy_to_user(u, k, sizeof(struct termios2)); | ||
98 | } | ||
99 | |||
100 | static inline int user_termios_to_kernel_termios_1(struct ktermios *k, | ||
101 | struct termios __user *u) | ||
102 | { | ||
103 | return copy_from_user(k, u, sizeof(struct termios)); | ||
104 | } | ||
105 | |||
106 | static inline int kernel_termios_to_user_termios_1(struct termios __user *u, | ||
107 | struct ktermios *k) | ||
108 | { | ||
109 | return copy_to_user(u, k, sizeof(struct termios)); | ||
110 | } | ||
111 | |||
112 | #endif /* __KERNEL__ */ | ||
113 | |||
114 | #endif /* _ASM_X86_TERMIOS_H */ | ||
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index fad7d40b75f8..6f7786aea4fc 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -95,7 +95,7 @@ struct thread_info { | |||
95 | #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ | 95 | #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ |
96 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ | 96 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ |
97 | #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ | 97 | #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ |
98 | #define TIF_SYSCALL_FTRACE 28 /* for ftrace syscall instrumentation */ | 98 | #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ |
99 | 99 | ||
100 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 100 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
101 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 101 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
@@ -118,17 +118,17 @@ struct thread_info { | |||
118 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) | 118 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) |
119 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) | 119 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) |
120 | #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) | 120 | #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) |
121 | #define _TIF_SYSCALL_FTRACE (1 << TIF_SYSCALL_FTRACE) | 121 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) |
122 | 122 | ||
123 | /* work to do in syscall_trace_enter() */ | 123 | /* work to do in syscall_trace_enter() */ |
124 | #define _TIF_WORK_SYSCALL_ENTRY \ | 124 | #define _TIF_WORK_SYSCALL_ENTRY \ |
125 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_FTRACE | \ | 125 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \ |
126 | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | _TIF_SINGLESTEP) | 126 | _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT) |
127 | 127 | ||
128 | /* work to do in syscall_trace_leave() */ | 128 | /* work to do in syscall_trace_leave() */ |
129 | #define _TIF_WORK_SYSCALL_EXIT \ | 129 | #define _TIF_WORK_SYSCALL_EXIT \ |
130 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ | 130 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ |
131 | _TIF_SYSCALL_FTRACE) | 131 | _TIF_SYSCALL_TRACEPOINT) |
132 | 132 | ||
133 | /* work to do on interrupt/exception return */ | 133 | /* work to do on interrupt/exception return */ |
134 | #define _TIF_WORK_MASK \ | 134 | #define _TIF_WORK_MASK \ |
@@ -137,7 +137,8 @@ struct thread_info { | |||
137 | _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU)) | 137 | _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU)) |
138 | 138 | ||
139 | /* work to do on any return to user space */ | 139 | /* work to do on any return to user space */ |
140 | #define _TIF_ALLWORK_MASK ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_FTRACE) | 140 | #define _TIF_ALLWORK_MASK \ |
141 | ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT) | ||
141 | 142 | ||
142 | /* Only used for 64 bit */ | 143 | /* Only used for 64 bit */ |
143 | #define _TIF_DO_NOTIFY_MASK \ | 144 | #define _TIF_DO_NOTIFY_MASK \ |
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 066ef590d7e0..26d06e052a18 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
@@ -129,25 +129,34 @@ extern unsigned long node_remap_size[]; | |||
129 | #endif | 129 | #endif |
130 | 130 | ||
131 | /* sched_domains SD_NODE_INIT for NUMA machines */ | 131 | /* sched_domains SD_NODE_INIT for NUMA machines */ |
132 | #define SD_NODE_INIT (struct sched_domain) { \ | 132 | #define SD_NODE_INIT (struct sched_domain) { \ |
133 | .min_interval = 8, \ | 133 | .min_interval = 8, \ |
134 | .max_interval = 32, \ | 134 | .max_interval = 32, \ |
135 | .busy_factor = 32, \ | 135 | .busy_factor = 32, \ |
136 | .imbalance_pct = 125, \ | 136 | .imbalance_pct = 125, \ |
137 | .cache_nice_tries = SD_CACHE_NICE_TRIES, \ | 137 | .cache_nice_tries = SD_CACHE_NICE_TRIES, \ |
138 | .busy_idx = 3, \ | 138 | .busy_idx = 3, \ |
139 | .idle_idx = SD_IDLE_IDX, \ | 139 | .idle_idx = SD_IDLE_IDX, \ |
140 | .newidle_idx = SD_NEWIDLE_IDX, \ | 140 | .newidle_idx = SD_NEWIDLE_IDX, \ |
141 | .wake_idx = 1, \ | 141 | .wake_idx = 1, \ |
142 | .forkexec_idx = SD_FORKEXEC_IDX, \ | 142 | .forkexec_idx = SD_FORKEXEC_IDX, \ |
143 | .flags = SD_LOAD_BALANCE \ | 143 | \ |
144 | | SD_BALANCE_EXEC \ | 144 | .flags = 1*SD_LOAD_BALANCE \ |
145 | | SD_BALANCE_FORK \ | 145 | | 1*SD_BALANCE_NEWIDLE \ |
146 | | SD_WAKE_AFFINE \ | 146 | | 1*SD_BALANCE_EXEC \ |
147 | | SD_WAKE_BALANCE \ | 147 | | 1*SD_BALANCE_FORK \ |
148 | | SD_SERIALIZE, \ | 148 | | 0*SD_WAKE_IDLE \ |
149 | .last_balance = jiffies, \ | 149 | | 1*SD_WAKE_AFFINE \ |
150 | .balance_interval = 1, \ | 150 | | 1*SD_WAKE_BALANCE \ |
151 | | 0*SD_SHARE_CPUPOWER \ | ||
152 | | 0*SD_POWERSAVINGS_BALANCE \ | ||
153 | | 0*SD_SHARE_PKG_RESOURCES \ | ||
154 | | 1*SD_SERIALIZE \ | ||
155 | | 1*SD_WAKE_IDLE_FAR \ | ||
156 | | 0*SD_PREFER_SIBLING \ | ||
157 | , \ | ||
158 | .last_balance = jiffies, \ | ||
159 | .balance_interval = 1, \ | ||
151 | } | 160 | } |
152 | 161 | ||
153 | #ifdef CONFIG_X86_64_ACPI_NUMA | 162 | #ifdef CONFIG_X86_64_ACPI_NUMA |
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index bfd74c032fca..4da91ad69e0d 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h | |||
@@ -81,9 +81,7 @@ extern int panic_on_unrecovered_nmi; | |||
81 | 81 | ||
82 | void math_error(void __user *); | 82 | void math_error(void __user *); |
83 | void math_emulate(struct math_emu_info *); | 83 | void math_emulate(struct math_emu_info *); |
84 | #ifdef CONFIG_X86_32 | 84 | #ifndef CONFIG_X86_32 |
85 | unsigned long patch_espfix_desc(unsigned long, unsigned long); | ||
86 | #else | ||
87 | asmlinkage void smp_thermal_interrupt(void); | 85 | asmlinkage void smp_thermal_interrupt(void); |
88 | asmlinkage void mce_threshold_interrupt(void); | 86 | asmlinkage void mce_threshold_interrupt(void); |
89 | #endif | 87 | #endif |
diff --git a/arch/x86/include/asm/types.h b/arch/x86/include/asm/types.h index 09b97745772f..df1da20f4534 100644 --- a/arch/x86/include/asm/types.h +++ b/arch/x86/include/asm/types.h | |||
@@ -1,19 +1,11 @@ | |||
1 | #ifndef _ASM_X86_TYPES_H | 1 | #ifndef _ASM_X86_TYPES_H |
2 | #define _ASM_X86_TYPES_H | 2 | #define _ASM_X86_TYPES_H |
3 | 3 | ||
4 | #include <asm-generic/int-ll64.h> | 4 | #define dma_addr_t dma_addr_t |
5 | 5 | ||
6 | #ifndef __ASSEMBLY__ | 6 | #include <asm-generic/types.h> |
7 | |||
8 | typedef unsigned short umode_t; | ||
9 | 7 | ||
10 | #endif /* __ASSEMBLY__ */ | ||
11 | |||
12 | /* | ||
13 | * These aren't exported outside the kernel to avoid name space clashes | ||
14 | */ | ||
15 | #ifdef __KERNEL__ | 8 | #ifdef __KERNEL__ |
16 | |||
17 | #ifndef __ASSEMBLY__ | 9 | #ifndef __ASSEMBLY__ |
18 | 10 | ||
19 | typedef u64 dma64_addr_t; | 11 | typedef u64 dma64_addr_t; |
diff --git a/arch/x86/include/asm/ucontext.h b/arch/x86/include/asm/ucontext.h index 87324cf439d9..b7c29c8017f2 100644 --- a/arch/x86/include/asm/ucontext.h +++ b/arch/x86/include/asm/ucontext.h | |||
@@ -7,12 +7,6 @@ | |||
7 | * sigcontext struct (uc_mcontext). | 7 | * sigcontext struct (uc_mcontext). |
8 | */ | 8 | */ |
9 | 9 | ||
10 | struct ucontext { | 10 | #include <asm-generic/ucontext.h> |
11 | unsigned long uc_flags; | ||
12 | struct ucontext *uc_link; | ||
13 | stack_t uc_stack; | ||
14 | struct sigcontext uc_mcontext; | ||
15 | sigset_t uc_sigmask; /* mask last for extensibility */ | ||
16 | }; | ||
17 | 11 | ||
18 | #endif /* _ASM_X86_UCONTEXT_H */ | 12 | #endif /* _ASM_X86_UCONTEXT_H */ |
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h index 732a30706153..8deaada61bc8 100644 --- a/arch/x86/include/asm/unistd_32.h +++ b/arch/x86/include/asm/unistd_32.h | |||
@@ -345,6 +345,8 @@ | |||
345 | 345 | ||
346 | #ifdef __KERNEL__ | 346 | #ifdef __KERNEL__ |
347 | 347 | ||
348 | #define NR_syscalls 337 | ||
349 | |||
348 | #define __ARCH_WANT_IPC_PARSE_VERSION | 350 | #define __ARCH_WANT_IPC_PARSE_VERSION |
349 | #define __ARCH_WANT_OLD_READDIR | 351 | #define __ARCH_WANT_OLD_READDIR |
350 | #define __ARCH_WANT_OLD_STAT | 352 | #define __ARCH_WANT_OLD_STAT |
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index 900e1617e672..b9f3c60de5f7 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h | |||
@@ -688,6 +688,12 @@ __SYSCALL(__NR_perf_counter_open, sys_perf_counter_open) | |||
688 | #endif /* __NO_STUBS */ | 688 | #endif /* __NO_STUBS */ |
689 | 689 | ||
690 | #ifdef __KERNEL__ | 690 | #ifdef __KERNEL__ |
691 | |||
692 | #ifndef COMPILE_OFFSETS | ||
693 | #include <asm/asm-offsets.h> | ||
694 | #define NR_syscalls (__NR_syscall_max + 1) | ||
695 | #endif | ||
696 | |||
691 | /* | 697 | /* |
692 | * "Conditional" syscalls | 698 | * "Conditional" syscalls |
693 | * | 699 | * |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 6b8ca3a0285d..67e929b89875 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -833,106 +833,6 @@ static int __init acpi_parse_madt_lapic_entries(void) | |||
833 | extern int es7000_plat; | 833 | extern int es7000_plat; |
834 | #endif | 834 | #endif |
835 | 835 | ||
836 | static struct { | ||
837 | int gsi_base; | ||
838 | int gsi_end; | ||
839 | } mp_ioapic_routing[MAX_IO_APICS]; | ||
840 | |||
841 | int mp_find_ioapic(int gsi) | ||
842 | { | ||
843 | int i = 0; | ||
844 | |||
845 | /* Find the IOAPIC that manages this GSI. */ | ||
846 | for (i = 0; i < nr_ioapics; i++) { | ||
847 | if ((gsi >= mp_ioapic_routing[i].gsi_base) | ||
848 | && (gsi <= mp_ioapic_routing[i].gsi_end)) | ||
849 | return i; | ||
850 | } | ||
851 | |||
852 | printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); | ||
853 | return -1; | ||
854 | } | ||
855 | |||
856 | int mp_find_ioapic_pin(int ioapic, int gsi) | ||
857 | { | ||
858 | if (WARN_ON(ioapic == -1)) | ||
859 | return -1; | ||
860 | if (WARN_ON(gsi > mp_ioapic_routing[ioapic].gsi_end)) | ||
861 | return -1; | ||
862 | |||
863 | return gsi - mp_ioapic_routing[ioapic].gsi_base; | ||
864 | } | ||
865 | |||
866 | static u8 __init uniq_ioapic_id(u8 id) | ||
867 | { | ||
868 | #ifdef CONFIG_X86_32 | ||
869 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && | ||
870 | !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
871 | return io_apic_get_unique_id(nr_ioapics, id); | ||
872 | else | ||
873 | return id; | ||
874 | #else | ||
875 | int i; | ||
876 | DECLARE_BITMAP(used, 256); | ||
877 | bitmap_zero(used, 256); | ||
878 | for (i = 0; i < nr_ioapics; i++) { | ||
879 | struct mpc_ioapic *ia = &mp_ioapics[i]; | ||
880 | __set_bit(ia->apicid, used); | ||
881 | } | ||
882 | if (!test_bit(id, used)) | ||
883 | return id; | ||
884 | return find_first_zero_bit(used, 256); | ||
885 | #endif | ||
886 | } | ||
887 | |||
888 | static int bad_ioapic(unsigned long address) | ||
889 | { | ||
890 | if (nr_ioapics >= MAX_IO_APICS) { | ||
891 | printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " | ||
892 | "(found %d)\n", MAX_IO_APICS, nr_ioapics); | ||
893 | panic("Recompile kernel with bigger MAX_IO_APICS!\n"); | ||
894 | } | ||
895 | if (!address) { | ||
896 | printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" | ||
897 | " found in table, skipping!\n"); | ||
898 | return 1; | ||
899 | } | ||
900 | return 0; | ||
901 | } | ||
902 | |||
903 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) | ||
904 | { | ||
905 | int idx = 0; | ||
906 | |||
907 | if (bad_ioapic(address)) | ||
908 | return; | ||
909 | |||
910 | idx = nr_ioapics; | ||
911 | |||
912 | mp_ioapics[idx].type = MP_IOAPIC; | ||
913 | mp_ioapics[idx].flags = MPC_APIC_USABLE; | ||
914 | mp_ioapics[idx].apicaddr = address; | ||
915 | |||
916 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); | ||
917 | mp_ioapics[idx].apicid = uniq_ioapic_id(id); | ||
918 | mp_ioapics[idx].apicver = io_apic_get_version(idx); | ||
919 | |||
920 | /* | ||
921 | * Build basic GSI lookup table to facilitate gsi->io_apic lookups | ||
922 | * and to prevent reprogramming of IOAPIC pins (PCI GSIs). | ||
923 | */ | ||
924 | mp_ioapic_routing[idx].gsi_base = gsi_base; | ||
925 | mp_ioapic_routing[idx].gsi_end = gsi_base + | ||
926 | io_apic_get_redir_entries(idx); | ||
927 | |||
928 | printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " | ||
929 | "GSI %d-%d\n", idx, mp_ioapics[idx].apicid, | ||
930 | mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr, | ||
931 | mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end); | ||
932 | |||
933 | nr_ioapics++; | ||
934 | } | ||
935 | |||
936 | int __init acpi_probe_gsi(void) | 836 | int __init acpi_probe_gsi(void) |
937 | { | 837 | { |
938 | int idx; | 838 | int idx; |
@@ -947,7 +847,7 @@ int __init acpi_probe_gsi(void) | |||
947 | 847 | ||
948 | max_gsi = 0; | 848 | max_gsi = 0; |
949 | for (idx = 0; idx < nr_ioapics; idx++) { | 849 | for (idx = 0; idx < nr_ioapics; idx++) { |
950 | gsi = mp_ioapic_routing[idx].gsi_end; | 850 | gsi = mp_gsi_routing[idx].gsi_end; |
951 | 851 | ||
952 | if (gsi > max_gsi) | 852 | if (gsi > max_gsi) |
953 | max_gsi = gsi; | 853 | max_gsi = gsi; |
@@ -1179,9 +1079,8 @@ static int __init acpi_parse_madt_ioapic_entries(void) | |||
1179 | * If MPS is present, it will handle them, | 1079 | * If MPS is present, it will handle them, |
1180 | * otherwise the system will stay in PIC mode | 1080 | * otherwise the system will stay in PIC mode |
1181 | */ | 1081 | */ |
1182 | if (acpi_disabled || acpi_noirq) { | 1082 | if (acpi_disabled || acpi_noirq) |
1183 | return -ENODEV; | 1083 | return -ENODEV; |
1184 | } | ||
1185 | 1084 | ||
1186 | if (!cpu_has_apic) | 1085 | if (!cpu_has_apic) |
1187 | return -ENODEV; | 1086 | return -ENODEV; |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index b8ebd0b689b1..de7353c0ce9c 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/sched.h> | 2 | #include <linux/sched.h> |
3 | #include <linux/mutex.h> | 3 | #include <linux/mutex.h> |
4 | #include <linux/list.h> | 4 | #include <linux/list.h> |
5 | #include <linux/stringify.h> | ||
5 | #include <linux/kprobes.h> | 6 | #include <linux/kprobes.h> |
6 | #include <linux/mm.h> | 7 | #include <linux/mm.h> |
7 | #include <linux/vmalloc.h> | 8 | #include <linux/vmalloc.h> |
@@ -32,7 +33,7 @@ __setup("smp-alt-boot", bootonly); | |||
32 | #define smp_alt_once 1 | 33 | #define smp_alt_once 1 |
33 | #endif | 34 | #endif |
34 | 35 | ||
35 | static int debug_alternative; | 36 | static int __initdata_or_module debug_alternative; |
36 | 37 | ||
37 | static int __init debug_alt(char *str) | 38 | static int __init debug_alt(char *str) |
38 | { | 39 | { |
@@ -51,7 +52,7 @@ static int __init setup_noreplace_smp(char *str) | |||
51 | __setup("noreplace-smp", setup_noreplace_smp); | 52 | __setup("noreplace-smp", setup_noreplace_smp); |
52 | 53 | ||
53 | #ifdef CONFIG_PARAVIRT | 54 | #ifdef CONFIG_PARAVIRT |
54 | static int noreplace_paravirt = 0; | 55 | static int __initdata_or_module noreplace_paravirt = 0; |
55 | 56 | ||
56 | static int __init setup_noreplace_paravirt(char *str) | 57 | static int __init setup_noreplace_paravirt(char *str) |
57 | { | 58 | { |
@@ -64,16 +65,17 @@ __setup("noreplace-paravirt", setup_noreplace_paravirt); | |||
64 | #define DPRINTK(fmt, args...) if (debug_alternative) \ | 65 | #define DPRINTK(fmt, args...) if (debug_alternative) \ |
65 | printk(KERN_DEBUG fmt, args) | 66 | printk(KERN_DEBUG fmt, args) |
66 | 67 | ||
67 | #ifdef GENERIC_NOP1 | 68 | #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) |
68 | /* Use inline assembly to define this because the nops are defined | 69 | /* Use inline assembly to define this because the nops are defined |
69 | as inline assembly strings in the include files and we cannot | 70 | as inline assembly strings in the include files and we cannot |
70 | get them easily into strings. */ | 71 | get them easily into strings. */ |
71 | asm("\t.section .rodata, \"a\"\nintelnops: " | 72 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nintelnops: " |
72 | GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 | 73 | GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 |
73 | GENERIC_NOP7 GENERIC_NOP8 | 74 | GENERIC_NOP7 GENERIC_NOP8 |
74 | "\t.previous"); | 75 | "\t.previous"); |
75 | extern const unsigned char intelnops[]; | 76 | extern const unsigned char intelnops[]; |
76 | static const unsigned char *const intel_nops[ASM_NOP_MAX+1] = { | 77 | static const unsigned char *const __initconst_or_module |
78 | intel_nops[ASM_NOP_MAX+1] = { | ||
77 | NULL, | 79 | NULL, |
78 | intelnops, | 80 | intelnops, |
79 | intelnops + 1, | 81 | intelnops + 1, |
@@ -87,12 +89,13 @@ static const unsigned char *const intel_nops[ASM_NOP_MAX+1] = { | |||
87 | #endif | 89 | #endif |
88 | 90 | ||
89 | #ifdef K8_NOP1 | 91 | #ifdef K8_NOP1 |
90 | asm("\t.section .rodata, \"a\"\nk8nops: " | 92 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk8nops: " |
91 | K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 | 93 | K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 |
92 | K8_NOP7 K8_NOP8 | 94 | K8_NOP7 K8_NOP8 |
93 | "\t.previous"); | 95 | "\t.previous"); |
94 | extern const unsigned char k8nops[]; | 96 | extern const unsigned char k8nops[]; |
95 | static const unsigned char *const k8_nops[ASM_NOP_MAX+1] = { | 97 | static const unsigned char *const __initconst_or_module |
98 | k8_nops[ASM_NOP_MAX+1] = { | ||
96 | NULL, | 99 | NULL, |
97 | k8nops, | 100 | k8nops, |
98 | k8nops + 1, | 101 | k8nops + 1, |
@@ -105,13 +108,14 @@ static const unsigned char *const k8_nops[ASM_NOP_MAX+1] = { | |||
105 | }; | 108 | }; |
106 | #endif | 109 | #endif |
107 | 110 | ||
108 | #ifdef K7_NOP1 | 111 | #if defined(K7_NOP1) && !defined(CONFIG_X86_64) |
109 | asm("\t.section .rodata, \"a\"\nk7nops: " | 112 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk7nops: " |
110 | K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 | 113 | K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 |
111 | K7_NOP7 K7_NOP8 | 114 | K7_NOP7 K7_NOP8 |
112 | "\t.previous"); | 115 | "\t.previous"); |
113 | extern const unsigned char k7nops[]; | 116 | extern const unsigned char k7nops[]; |
114 | static const unsigned char *const k7_nops[ASM_NOP_MAX+1] = { | 117 | static const unsigned char *const __initconst_or_module |
118 | k7_nops[ASM_NOP_MAX+1] = { | ||
115 | NULL, | 119 | NULL, |
116 | k7nops, | 120 | k7nops, |
117 | k7nops + 1, | 121 | k7nops + 1, |
@@ -125,12 +129,13 @@ static const unsigned char *const k7_nops[ASM_NOP_MAX+1] = { | |||
125 | #endif | 129 | #endif |
126 | 130 | ||
127 | #ifdef P6_NOP1 | 131 | #ifdef P6_NOP1 |
128 | asm("\t.section .rodata, \"a\"\np6nops: " | 132 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\np6nops: " |
129 | P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6 | 133 | P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6 |
130 | P6_NOP7 P6_NOP8 | 134 | P6_NOP7 P6_NOP8 |
131 | "\t.previous"); | 135 | "\t.previous"); |
132 | extern const unsigned char p6nops[]; | 136 | extern const unsigned char p6nops[]; |
133 | static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = { | 137 | static const unsigned char *const __initconst_or_module |
138 | p6_nops[ASM_NOP_MAX+1] = { | ||
134 | NULL, | 139 | NULL, |
135 | p6nops, | 140 | p6nops, |
136 | p6nops + 1, | 141 | p6nops + 1, |
@@ -146,7 +151,7 @@ static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = { | |||
146 | #ifdef CONFIG_X86_64 | 151 | #ifdef CONFIG_X86_64 |
147 | 152 | ||
148 | extern char __vsyscall_0; | 153 | extern char __vsyscall_0; |
149 | const unsigned char *const *find_nop_table(void) | 154 | static const unsigned char *const *__init_or_module find_nop_table(void) |
150 | { | 155 | { |
151 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | 156 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
152 | boot_cpu_has(X86_FEATURE_NOPL)) | 157 | boot_cpu_has(X86_FEATURE_NOPL)) |
@@ -157,7 +162,7 @@ const unsigned char *const *find_nop_table(void) | |||
157 | 162 | ||
158 | #else /* CONFIG_X86_64 */ | 163 | #else /* CONFIG_X86_64 */ |
159 | 164 | ||
160 | const unsigned char *const *find_nop_table(void) | 165 | static const unsigned char *const *__init_or_module find_nop_table(void) |
161 | { | 166 | { |
162 | if (boot_cpu_has(X86_FEATURE_K8)) | 167 | if (boot_cpu_has(X86_FEATURE_K8)) |
163 | return k8_nops; | 168 | return k8_nops; |
@@ -172,7 +177,7 @@ const unsigned char *const *find_nop_table(void) | |||
172 | #endif /* CONFIG_X86_64 */ | 177 | #endif /* CONFIG_X86_64 */ |
173 | 178 | ||
174 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ | 179 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ |
175 | void add_nops(void *insns, unsigned int len) | 180 | static void __init_or_module add_nops(void *insns, unsigned int len) |
176 | { | 181 | { |
177 | const unsigned char *const *noptable = find_nop_table(); | 182 | const unsigned char *const *noptable = find_nop_table(); |
178 | 183 | ||
@@ -185,10 +190,10 @@ void add_nops(void *insns, unsigned int len) | |||
185 | len -= noplen; | 190 | len -= noplen; |
186 | } | 191 | } |
187 | } | 192 | } |
188 | EXPORT_SYMBOL_GPL(add_nops); | ||
189 | 193 | ||
190 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; | 194 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; |
191 | extern u8 *__smp_locks[], *__smp_locks_end[]; | 195 | extern u8 *__smp_locks[], *__smp_locks_end[]; |
196 | static void *text_poke_early(void *addr, const void *opcode, size_t len); | ||
192 | 197 | ||
193 | /* Replace instructions with better alternatives for this CPU type. | 198 | /* Replace instructions with better alternatives for this CPU type. |
194 | This runs before SMP is initialized to avoid SMP problems with | 199 | This runs before SMP is initialized to avoid SMP problems with |
@@ -196,7 +201,8 @@ extern u8 *__smp_locks[], *__smp_locks_end[]; | |||
196 | APs have less capabilities than the boot processor are not handled. | 201 | APs have less capabilities than the boot processor are not handled. |
197 | Tough. Make sure you disable such features by hand. */ | 202 | Tough. Make sure you disable such features by hand. */ |
198 | 203 | ||
199 | void apply_alternatives(struct alt_instr *start, struct alt_instr *end) | 204 | void __init_or_module apply_alternatives(struct alt_instr *start, |
205 | struct alt_instr *end) | ||
200 | { | 206 | { |
201 | struct alt_instr *a; | 207 | struct alt_instr *a; |
202 | char insnbuf[MAX_PATCH_LEN]; | 208 | char insnbuf[MAX_PATCH_LEN]; |
@@ -279,9 +285,10 @@ static LIST_HEAD(smp_alt_modules); | |||
279 | static DEFINE_MUTEX(smp_alt); | 285 | static DEFINE_MUTEX(smp_alt); |
280 | static int smp_mode = 1; /* protected by smp_alt */ | 286 | static int smp_mode = 1; /* protected by smp_alt */ |
281 | 287 | ||
282 | void alternatives_smp_module_add(struct module *mod, char *name, | 288 | void __init_or_module alternatives_smp_module_add(struct module *mod, |
283 | void *locks, void *locks_end, | 289 | char *name, |
284 | void *text, void *text_end) | 290 | void *locks, void *locks_end, |
291 | void *text, void *text_end) | ||
285 | { | 292 | { |
286 | struct smp_alt_module *smp; | 293 | struct smp_alt_module *smp; |
287 | 294 | ||
@@ -317,7 +324,7 @@ void alternatives_smp_module_add(struct module *mod, char *name, | |||
317 | mutex_unlock(&smp_alt); | 324 | mutex_unlock(&smp_alt); |
318 | } | 325 | } |
319 | 326 | ||
320 | void alternatives_smp_module_del(struct module *mod) | 327 | void __init_or_module alternatives_smp_module_del(struct module *mod) |
321 | { | 328 | { |
322 | struct smp_alt_module *item; | 329 | struct smp_alt_module *item; |
323 | 330 | ||
@@ -386,8 +393,8 @@ void alternatives_smp_switch(int smp) | |||
386 | #endif | 393 | #endif |
387 | 394 | ||
388 | #ifdef CONFIG_PARAVIRT | 395 | #ifdef CONFIG_PARAVIRT |
389 | void apply_paravirt(struct paravirt_patch_site *start, | 396 | void __init_or_module apply_paravirt(struct paravirt_patch_site *start, |
390 | struct paravirt_patch_site *end) | 397 | struct paravirt_patch_site *end) |
391 | { | 398 | { |
392 | struct paravirt_patch_site *p; | 399 | struct paravirt_patch_site *p; |
393 | char insnbuf[MAX_PATCH_LEN]; | 400 | char insnbuf[MAX_PATCH_LEN]; |
@@ -485,7 +492,8 @@ void __init alternative_instructions(void) | |||
485 | * instructions. And on the local CPU you need to be protected again NMI or MCE | 492 | * instructions. And on the local CPU you need to be protected again NMI or MCE |
486 | * handlers seeing an inconsistent instruction while you patch. | 493 | * handlers seeing an inconsistent instruction while you patch. |
487 | */ | 494 | */ |
488 | void *text_poke_early(void *addr, const void *opcode, size_t len) | 495 | static void *__init_or_module text_poke_early(void *addr, const void *opcode, |
496 | size_t len) | ||
489 | { | 497 | { |
490 | unsigned long flags; | 498 | unsigned long flags; |
491 | local_irq_save(flags); | 499 | local_irq_save(flags); |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 6c99f5037801..98f230f6a28d 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -41,9 +41,13 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock); | |||
41 | static LIST_HEAD(iommu_pd_list); | 41 | static LIST_HEAD(iommu_pd_list); |
42 | static DEFINE_SPINLOCK(iommu_pd_list_lock); | 42 | static DEFINE_SPINLOCK(iommu_pd_list_lock); |
43 | 43 | ||
44 | #ifdef CONFIG_IOMMU_API | 44 | /* |
45 | * Domain for untranslated devices - only allocated | ||
46 | * if iommu=pt passed on kernel cmd line. | ||
47 | */ | ||
48 | static struct protection_domain *pt_domain; | ||
49 | |||
45 | static struct iommu_ops amd_iommu_ops; | 50 | static struct iommu_ops amd_iommu_ops; |
46 | #endif | ||
47 | 51 | ||
48 | /* | 52 | /* |
49 | * general struct to manage commands send to an IOMMU | 53 | * general struct to manage commands send to an IOMMU |
@@ -55,16 +59,16 @@ struct iommu_cmd { | |||
55 | static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | 59 | static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, |
56 | struct unity_map_entry *e); | 60 | struct unity_map_entry *e); |
57 | static struct dma_ops_domain *find_protection_domain(u16 devid); | 61 | static struct dma_ops_domain *find_protection_domain(u16 devid); |
58 | static u64* alloc_pte(struct protection_domain *dom, | 62 | static u64 *alloc_pte(struct protection_domain *domain, |
59 | unsigned long address, u64 | 63 | unsigned long address, int end_lvl, |
60 | **pte_page, gfp_t gfp); | 64 | u64 **pte_page, gfp_t gfp); |
61 | static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, | 65 | static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, |
62 | unsigned long start_page, | 66 | unsigned long start_page, |
63 | unsigned int pages); | 67 | unsigned int pages); |
64 | 68 | static void reset_iommu_command_buffer(struct amd_iommu *iommu); | |
65 | #ifndef BUS_NOTIFY_UNBOUND_DRIVER | 69 | static u64 *fetch_pte(struct protection_domain *domain, |
66 | #define BUS_NOTIFY_UNBOUND_DRIVER 0x0005 | 70 | unsigned long address, int map_size); |
67 | #endif | 71 | static void update_domain(struct protection_domain *domain); |
68 | 72 | ||
69 | #ifdef CONFIG_AMD_IOMMU_STATS | 73 | #ifdef CONFIG_AMD_IOMMU_STATS |
70 | 74 | ||
@@ -138,7 +142,25 @@ static int iommu_has_npcache(struct amd_iommu *iommu) | |||
138 | * | 142 | * |
139 | ****************************************************************************/ | 143 | ****************************************************************************/ |
140 | 144 | ||
141 | static void iommu_print_event(void *__evt) | 145 | static void dump_dte_entry(u16 devid) |
146 | { | ||
147 | int i; | ||
148 | |||
149 | for (i = 0; i < 8; ++i) | ||
150 | pr_err("AMD-Vi: DTE[%d]: %08x\n", i, | ||
151 | amd_iommu_dev_table[devid].data[i]); | ||
152 | } | ||
153 | |||
154 | static void dump_command(unsigned long phys_addr) | ||
155 | { | ||
156 | struct iommu_cmd *cmd = phys_to_virt(phys_addr); | ||
157 | int i; | ||
158 | |||
159 | for (i = 0; i < 4; ++i) | ||
160 | pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]); | ||
161 | } | ||
162 | |||
163 | static void iommu_print_event(struct amd_iommu *iommu, void *__evt) | ||
142 | { | 164 | { |
143 | u32 *event = __evt; | 165 | u32 *event = __evt; |
144 | int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; | 166 | int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; |
@@ -147,7 +169,7 @@ static void iommu_print_event(void *__evt) | |||
147 | int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; | 169 | int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; |
148 | u64 address = (u64)(((u64)event[3]) << 32) | event[2]; | 170 | u64 address = (u64)(((u64)event[3]) << 32) | event[2]; |
149 | 171 | ||
150 | printk(KERN_ERR "AMD IOMMU: Event logged ["); | 172 | printk(KERN_ERR "AMD-Vi: Event logged ["); |
151 | 173 | ||
152 | switch (type) { | 174 | switch (type) { |
153 | case EVENT_TYPE_ILL_DEV: | 175 | case EVENT_TYPE_ILL_DEV: |
@@ -155,6 +177,7 @@ static void iommu_print_event(void *__evt) | |||
155 | "address=0x%016llx flags=0x%04x]\n", | 177 | "address=0x%016llx flags=0x%04x]\n", |
156 | PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), | 178 | PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), |
157 | address, flags); | 179 | address, flags); |
180 | dump_dte_entry(devid); | ||
158 | break; | 181 | break; |
159 | case EVENT_TYPE_IO_FAULT: | 182 | case EVENT_TYPE_IO_FAULT: |
160 | printk("IO_PAGE_FAULT device=%02x:%02x.%x " | 183 | printk("IO_PAGE_FAULT device=%02x:%02x.%x " |
@@ -176,6 +199,8 @@ static void iommu_print_event(void *__evt) | |||
176 | break; | 199 | break; |
177 | case EVENT_TYPE_ILL_CMD: | 200 | case EVENT_TYPE_ILL_CMD: |
178 | printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); | 201 | printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); |
202 | reset_iommu_command_buffer(iommu); | ||
203 | dump_command(address); | ||
179 | break; | 204 | break; |
180 | case EVENT_TYPE_CMD_HARD_ERR: | 205 | case EVENT_TYPE_CMD_HARD_ERR: |
181 | printk("COMMAND_HARDWARE_ERROR address=0x%016llx " | 206 | printk("COMMAND_HARDWARE_ERROR address=0x%016llx " |
@@ -209,7 +234,7 @@ static void iommu_poll_events(struct amd_iommu *iommu) | |||
209 | tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); | 234 | tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); |
210 | 235 | ||
211 | while (head != tail) { | 236 | while (head != tail) { |
212 | iommu_print_event(iommu->evt_buf + head); | 237 | iommu_print_event(iommu, iommu->evt_buf + head); |
213 | head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size; | 238 | head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size; |
214 | } | 239 | } |
215 | 240 | ||
@@ -296,8 +321,11 @@ static void __iommu_wait_for_completion(struct amd_iommu *iommu) | |||
296 | status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; | 321 | status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; |
297 | writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); | 322 | writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); |
298 | 323 | ||
299 | if (unlikely(i == EXIT_LOOP_COUNT)) | 324 | if (unlikely(i == EXIT_LOOP_COUNT)) { |
300 | panic("AMD IOMMU: Completion wait loop failed\n"); | 325 | spin_unlock(&iommu->lock); |
326 | reset_iommu_command_buffer(iommu); | ||
327 | spin_lock(&iommu->lock); | ||
328 | } | ||
301 | } | 329 | } |
302 | 330 | ||
303 | /* | 331 | /* |
@@ -445,47 +473,78 @@ static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid) | |||
445 | } | 473 | } |
446 | 474 | ||
447 | /* | 475 | /* |
476 | * This function flushes one domain on one IOMMU | ||
477 | */ | ||
478 | static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid) | ||
479 | { | ||
480 | struct iommu_cmd cmd; | ||
481 | unsigned long flags; | ||
482 | |||
483 | __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, | ||
484 | domid, 1, 1); | ||
485 | |||
486 | spin_lock_irqsave(&iommu->lock, flags); | ||
487 | __iommu_queue_command(iommu, &cmd); | ||
488 | __iommu_completion_wait(iommu); | ||
489 | __iommu_wait_for_completion(iommu); | ||
490 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
491 | } | ||
492 | |||
493 | static void flush_all_domains_on_iommu(struct amd_iommu *iommu) | ||
494 | { | ||
495 | int i; | ||
496 | |||
497 | for (i = 1; i < MAX_DOMAIN_ID; ++i) { | ||
498 | if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) | ||
499 | continue; | ||
500 | flush_domain_on_iommu(iommu, i); | ||
501 | } | ||
502 | |||
503 | } | ||
504 | |||
505 | /* | ||
448 | * This function is used to flush the IO/TLB for a given protection domain | 506 | * This function is used to flush the IO/TLB for a given protection domain |
449 | * on every IOMMU in the system | 507 | * on every IOMMU in the system |
450 | */ | 508 | */ |
451 | static void iommu_flush_domain(u16 domid) | 509 | static void iommu_flush_domain(u16 domid) |
452 | { | 510 | { |
453 | unsigned long flags; | ||
454 | struct amd_iommu *iommu; | 511 | struct amd_iommu *iommu; |
455 | struct iommu_cmd cmd; | ||
456 | 512 | ||
457 | INC_STATS_COUNTER(domain_flush_all); | 513 | INC_STATS_COUNTER(domain_flush_all); |
458 | 514 | ||
459 | __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, | 515 | for_each_iommu(iommu) |
460 | domid, 1, 1); | 516 | flush_domain_on_iommu(iommu, domid); |
461 | |||
462 | for_each_iommu(iommu) { | ||
463 | spin_lock_irqsave(&iommu->lock, flags); | ||
464 | __iommu_queue_command(iommu, &cmd); | ||
465 | __iommu_completion_wait(iommu); | ||
466 | __iommu_wait_for_completion(iommu); | ||
467 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
468 | } | ||
469 | } | 517 | } |
470 | 518 | ||
471 | void amd_iommu_flush_all_domains(void) | 519 | void amd_iommu_flush_all_domains(void) |
472 | { | 520 | { |
521 | struct amd_iommu *iommu; | ||
522 | |||
523 | for_each_iommu(iommu) | ||
524 | flush_all_domains_on_iommu(iommu); | ||
525 | } | ||
526 | |||
527 | static void flush_all_devices_for_iommu(struct amd_iommu *iommu) | ||
528 | { | ||
473 | int i; | 529 | int i; |
474 | 530 | ||
475 | for (i = 1; i < MAX_DOMAIN_ID; ++i) { | 531 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { |
476 | if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) | 532 | if (iommu != amd_iommu_rlookup_table[i]) |
477 | continue; | 533 | continue; |
478 | iommu_flush_domain(i); | 534 | |
535 | iommu_queue_inv_dev_entry(iommu, i); | ||
536 | iommu_completion_wait(iommu); | ||
479 | } | 537 | } |
480 | } | 538 | } |
481 | 539 | ||
482 | void amd_iommu_flush_all_devices(void) | 540 | static void flush_devices_by_domain(struct protection_domain *domain) |
483 | { | 541 | { |
484 | struct amd_iommu *iommu; | 542 | struct amd_iommu *iommu; |
485 | int i; | 543 | int i; |
486 | 544 | ||
487 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { | 545 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { |
488 | if (amd_iommu_pd_table[i] == NULL) | 546 | if ((domain == NULL && amd_iommu_pd_table[i] == NULL) || |
547 | (amd_iommu_pd_table[i] != domain)) | ||
489 | continue; | 548 | continue; |
490 | 549 | ||
491 | iommu = amd_iommu_rlookup_table[i]; | 550 | iommu = amd_iommu_rlookup_table[i]; |
@@ -497,6 +556,27 @@ void amd_iommu_flush_all_devices(void) | |||
497 | } | 556 | } |
498 | } | 557 | } |
499 | 558 | ||
559 | static void reset_iommu_command_buffer(struct amd_iommu *iommu) | ||
560 | { | ||
561 | pr_err("AMD-Vi: Resetting IOMMU command buffer\n"); | ||
562 | |||
563 | if (iommu->reset_in_progress) | ||
564 | panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n"); | ||
565 | |||
566 | iommu->reset_in_progress = true; | ||
567 | |||
568 | amd_iommu_reset_cmd_buffer(iommu); | ||
569 | flush_all_devices_for_iommu(iommu); | ||
570 | flush_all_domains_on_iommu(iommu); | ||
571 | |||
572 | iommu->reset_in_progress = false; | ||
573 | } | ||
574 | |||
575 | void amd_iommu_flush_all_devices(void) | ||
576 | { | ||
577 | flush_devices_by_domain(NULL); | ||
578 | } | ||
579 | |||
500 | /**************************************************************************** | 580 | /**************************************************************************** |
501 | * | 581 | * |
502 | * The functions below are used the create the page table mappings for | 582 | * The functions below are used the create the page table mappings for |
@@ -514,18 +594,21 @@ void amd_iommu_flush_all_devices(void) | |||
514 | static int iommu_map_page(struct protection_domain *dom, | 594 | static int iommu_map_page(struct protection_domain *dom, |
515 | unsigned long bus_addr, | 595 | unsigned long bus_addr, |
516 | unsigned long phys_addr, | 596 | unsigned long phys_addr, |
517 | int prot) | 597 | int prot, |
598 | int map_size) | ||
518 | { | 599 | { |
519 | u64 __pte, *pte; | 600 | u64 __pte, *pte; |
520 | 601 | ||
521 | bus_addr = PAGE_ALIGN(bus_addr); | 602 | bus_addr = PAGE_ALIGN(bus_addr); |
522 | phys_addr = PAGE_ALIGN(phys_addr); | 603 | phys_addr = PAGE_ALIGN(phys_addr); |
523 | 604 | ||
524 | /* only support 512GB address spaces for now */ | 605 | BUG_ON(!PM_ALIGNED(map_size, bus_addr)); |
525 | if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) | 606 | BUG_ON(!PM_ALIGNED(map_size, phys_addr)); |
607 | |||
608 | if (!(prot & IOMMU_PROT_MASK)) | ||
526 | return -EINVAL; | 609 | return -EINVAL; |
527 | 610 | ||
528 | pte = alloc_pte(dom, bus_addr, NULL, GFP_KERNEL); | 611 | pte = alloc_pte(dom, bus_addr, map_size, NULL, GFP_KERNEL); |
529 | 612 | ||
530 | if (IOMMU_PTE_PRESENT(*pte)) | 613 | if (IOMMU_PTE_PRESENT(*pte)) |
531 | return -EBUSY; | 614 | return -EBUSY; |
@@ -538,29 +621,18 @@ static int iommu_map_page(struct protection_domain *dom, | |||
538 | 621 | ||
539 | *pte = __pte; | 622 | *pte = __pte; |
540 | 623 | ||
624 | update_domain(dom); | ||
625 | |||
541 | return 0; | 626 | return 0; |
542 | } | 627 | } |
543 | 628 | ||
544 | static void iommu_unmap_page(struct protection_domain *dom, | 629 | static void iommu_unmap_page(struct protection_domain *dom, |
545 | unsigned long bus_addr) | 630 | unsigned long bus_addr, int map_size) |
546 | { | 631 | { |
547 | u64 *pte; | 632 | u64 *pte = fetch_pte(dom, bus_addr, map_size); |
548 | |||
549 | pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)]; | ||
550 | |||
551 | if (!IOMMU_PTE_PRESENT(*pte)) | ||
552 | return; | ||
553 | |||
554 | pte = IOMMU_PTE_PAGE(*pte); | ||
555 | pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)]; | ||
556 | 633 | ||
557 | if (!IOMMU_PTE_PRESENT(*pte)) | 634 | if (pte) |
558 | return; | 635 | *pte = 0; |
559 | |||
560 | pte = IOMMU_PTE_PAGE(*pte); | ||
561 | pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)]; | ||
562 | |||
563 | *pte = 0; | ||
564 | } | 636 | } |
565 | 637 | ||
566 | /* | 638 | /* |
@@ -615,7 +687,8 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | |||
615 | 687 | ||
616 | for (addr = e->address_start; addr < e->address_end; | 688 | for (addr = e->address_start; addr < e->address_end; |
617 | addr += PAGE_SIZE) { | 689 | addr += PAGE_SIZE) { |
618 | ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot); | 690 | ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, |
691 | PM_MAP_4k); | ||
619 | if (ret) | 692 | if (ret) |
620 | return ret; | 693 | return ret; |
621 | /* | 694 | /* |
@@ -670,24 +743,29 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, | |||
670 | * This function checks if there is a PTE for a given dma address. If | 743 | * This function checks if there is a PTE for a given dma address. If |
671 | * there is one, it returns the pointer to it. | 744 | * there is one, it returns the pointer to it. |
672 | */ | 745 | */ |
673 | static u64* fetch_pte(struct protection_domain *domain, | 746 | static u64 *fetch_pte(struct protection_domain *domain, |
674 | unsigned long address) | 747 | unsigned long address, int map_size) |
675 | { | 748 | { |
749 | int level; | ||
676 | u64 *pte; | 750 | u64 *pte; |
677 | 751 | ||
678 | pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(address)]; | 752 | level = domain->mode - 1; |
753 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; | ||
679 | 754 | ||
680 | if (!IOMMU_PTE_PRESENT(*pte)) | 755 | while (level > map_size) { |
681 | return NULL; | 756 | if (!IOMMU_PTE_PRESENT(*pte)) |
757 | return NULL; | ||
682 | 758 | ||
683 | pte = IOMMU_PTE_PAGE(*pte); | 759 | level -= 1; |
684 | pte = &pte[IOMMU_PTE_L1_INDEX(address)]; | ||
685 | 760 | ||
686 | if (!IOMMU_PTE_PRESENT(*pte)) | 761 | pte = IOMMU_PTE_PAGE(*pte); |
687 | return NULL; | 762 | pte = &pte[PM_LEVEL_INDEX(level, address)]; |
688 | 763 | ||
689 | pte = IOMMU_PTE_PAGE(*pte); | 764 | if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) { |
690 | pte = &pte[IOMMU_PTE_L0_INDEX(address)]; | 765 | pte = NULL; |
766 | break; | ||
767 | } | ||
768 | } | ||
691 | 769 | ||
692 | return pte; | 770 | return pte; |
693 | } | 771 | } |
@@ -727,7 +805,7 @@ static int alloc_new_range(struct amd_iommu *iommu, | |||
727 | u64 *pte, *pte_page; | 805 | u64 *pte, *pte_page; |
728 | 806 | ||
729 | for (i = 0; i < num_ptes; ++i) { | 807 | for (i = 0; i < num_ptes; ++i) { |
730 | pte = alloc_pte(&dma_dom->domain, address, | 808 | pte = alloc_pte(&dma_dom->domain, address, PM_MAP_4k, |
731 | &pte_page, gfp); | 809 | &pte_page, gfp); |
732 | if (!pte) | 810 | if (!pte) |
733 | goto out_free; | 811 | goto out_free; |
@@ -760,16 +838,20 @@ static int alloc_new_range(struct amd_iommu *iommu, | |||
760 | for (i = dma_dom->aperture[index]->offset; | 838 | for (i = dma_dom->aperture[index]->offset; |
761 | i < dma_dom->aperture_size; | 839 | i < dma_dom->aperture_size; |
762 | i += PAGE_SIZE) { | 840 | i += PAGE_SIZE) { |
763 | u64 *pte = fetch_pte(&dma_dom->domain, i); | 841 | u64 *pte = fetch_pte(&dma_dom->domain, i, PM_MAP_4k); |
764 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) | 842 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
765 | continue; | 843 | continue; |
766 | 844 | ||
767 | dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1); | 845 | dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1); |
768 | } | 846 | } |
769 | 847 | ||
848 | update_domain(&dma_dom->domain); | ||
849 | |||
770 | return 0; | 850 | return 0; |
771 | 851 | ||
772 | out_free: | 852 | out_free: |
853 | update_domain(&dma_dom->domain); | ||
854 | |||
773 | free_page((unsigned long)dma_dom->aperture[index]->bitmap); | 855 | free_page((unsigned long)dma_dom->aperture[index]->bitmap); |
774 | 856 | ||
775 | kfree(dma_dom->aperture[index]); | 857 | kfree(dma_dom->aperture[index]); |
@@ -1009,7 +1091,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu) | |||
1009 | dma_dom->domain.id = domain_id_alloc(); | 1091 | dma_dom->domain.id = domain_id_alloc(); |
1010 | if (dma_dom->domain.id == 0) | 1092 | if (dma_dom->domain.id == 0) |
1011 | goto free_dma_dom; | 1093 | goto free_dma_dom; |
1012 | dma_dom->domain.mode = PAGE_MODE_3_LEVEL; | 1094 | dma_dom->domain.mode = PAGE_MODE_2_LEVEL; |
1013 | dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); | 1095 | dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); |
1014 | dma_dom->domain.flags = PD_DMA_OPS_MASK; | 1096 | dma_dom->domain.flags = PD_DMA_OPS_MASK; |
1015 | dma_dom->domain.priv = dma_dom; | 1097 | dma_dom->domain.priv = dma_dom; |
@@ -1063,6 +1145,41 @@ static struct protection_domain *domain_for_device(u16 devid) | |||
1063 | return dom; | 1145 | return dom; |
1064 | } | 1146 | } |
1065 | 1147 | ||
1148 | static void set_dte_entry(u16 devid, struct protection_domain *domain) | ||
1149 | { | ||
1150 | u64 pte_root = virt_to_phys(domain->pt_root); | ||
1151 | |||
1152 | pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) | ||
1153 | << DEV_ENTRY_MODE_SHIFT; | ||
1154 | pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; | ||
1155 | |||
1156 | amd_iommu_dev_table[devid].data[2] = domain->id; | ||
1157 | amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); | ||
1158 | amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); | ||
1159 | |||
1160 | amd_iommu_pd_table[devid] = domain; | ||
1161 | } | ||
1162 | |||
1163 | /* | ||
1164 | * If a device is not yet associated with a domain, this function does | ||
1165 | * assigns it visible for the hardware | ||
1166 | */ | ||
1167 | static void __attach_device(struct amd_iommu *iommu, | ||
1168 | struct protection_domain *domain, | ||
1169 | u16 devid) | ||
1170 | { | ||
1171 | /* lock domain */ | ||
1172 | spin_lock(&domain->lock); | ||
1173 | |||
1174 | /* update DTE entry */ | ||
1175 | set_dte_entry(devid, domain); | ||
1176 | |||
1177 | domain->dev_cnt += 1; | ||
1178 | |||
1179 | /* ready */ | ||
1180 | spin_unlock(&domain->lock); | ||
1181 | } | ||
1182 | |||
1066 | /* | 1183 | /* |
1067 | * If a device is not yet associated with a domain, this function does | 1184 | * If a device is not yet associated with a domain, this function does |
1068 | * assigns it visible for the hardware | 1185 | * assigns it visible for the hardware |
@@ -1072,27 +1189,16 @@ static void attach_device(struct amd_iommu *iommu, | |||
1072 | u16 devid) | 1189 | u16 devid) |
1073 | { | 1190 | { |
1074 | unsigned long flags; | 1191 | unsigned long flags; |
1075 | u64 pte_root = virt_to_phys(domain->pt_root); | ||
1076 | |||
1077 | domain->dev_cnt += 1; | ||
1078 | |||
1079 | pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) | ||
1080 | << DEV_ENTRY_MODE_SHIFT; | ||
1081 | pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; | ||
1082 | 1192 | ||
1083 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | 1193 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); |
1084 | amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); | 1194 | __attach_device(iommu, domain, devid); |
1085 | amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); | ||
1086 | amd_iommu_dev_table[devid].data[2] = domain->id; | ||
1087 | |||
1088 | amd_iommu_pd_table[devid] = domain; | ||
1089 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 1195 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
1090 | 1196 | ||
1091 | /* | 1197 | /* |
1092 | * We might boot into a crash-kernel here. The crashed kernel | 1198 | * We might boot into a crash-kernel here. The crashed kernel |
1093 | * left the caches in the IOMMU dirty. So we have to flush | 1199 | * left the caches in the IOMMU dirty. So we have to flush |
1094 | * here to evict all dirty stuff. | 1200 | * here to evict all dirty stuff. |
1095 | */ | 1201 | */ |
1096 | iommu_queue_inv_dev_entry(iommu, devid); | 1202 | iommu_queue_inv_dev_entry(iommu, devid); |
1097 | iommu_flush_tlb_pde(iommu, domain->id); | 1203 | iommu_flush_tlb_pde(iommu, domain->id); |
1098 | } | 1204 | } |
@@ -1119,6 +1225,15 @@ static void __detach_device(struct protection_domain *domain, u16 devid) | |||
1119 | 1225 | ||
1120 | /* ready */ | 1226 | /* ready */ |
1121 | spin_unlock(&domain->lock); | 1227 | spin_unlock(&domain->lock); |
1228 | |||
1229 | /* | ||
1230 | * If we run in passthrough mode the device must be assigned to the | ||
1231 | * passthrough domain if it is detached from any other domain | ||
1232 | */ | ||
1233 | if (iommu_pass_through) { | ||
1234 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; | ||
1235 | __attach_device(iommu, pt_domain, devid); | ||
1236 | } | ||
1122 | } | 1237 | } |
1123 | 1238 | ||
1124 | /* | 1239 | /* |
@@ -1164,6 +1279,8 @@ static int device_change_notifier(struct notifier_block *nb, | |||
1164 | case BUS_NOTIFY_UNBOUND_DRIVER: | 1279 | case BUS_NOTIFY_UNBOUND_DRIVER: |
1165 | if (!domain) | 1280 | if (!domain) |
1166 | goto out; | 1281 | goto out; |
1282 | if (iommu_pass_through) | ||
1283 | break; | ||
1167 | detach_device(domain, devid); | 1284 | detach_device(domain, devid); |
1168 | break; | 1285 | break; |
1169 | case BUS_NOTIFY_ADD_DEVICE: | 1286 | case BUS_NOTIFY_ADD_DEVICE: |
@@ -1292,39 +1409,91 @@ static int get_device_resources(struct device *dev, | |||
1292 | return 1; | 1409 | return 1; |
1293 | } | 1410 | } |
1294 | 1411 | ||
1412 | static void update_device_table(struct protection_domain *domain) | ||
1413 | { | ||
1414 | unsigned long flags; | ||
1415 | int i; | ||
1416 | |||
1417 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { | ||
1418 | if (amd_iommu_pd_table[i] != domain) | ||
1419 | continue; | ||
1420 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | ||
1421 | set_dte_entry(i, domain); | ||
1422 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | ||
1423 | } | ||
1424 | } | ||
1425 | |||
1426 | static void update_domain(struct protection_domain *domain) | ||
1427 | { | ||
1428 | if (!domain->updated) | ||
1429 | return; | ||
1430 | |||
1431 | update_device_table(domain); | ||
1432 | flush_devices_by_domain(domain); | ||
1433 | iommu_flush_domain(domain->id); | ||
1434 | |||
1435 | domain->updated = false; | ||
1436 | } | ||
1437 | |||
1295 | /* | 1438 | /* |
1296 | * If the pte_page is not yet allocated this function is called | 1439 | * This function is used to add another level to an IO page table. Adding |
1440 | * another level increases the size of the address space by 9 bits to a size up | ||
1441 | * to 64 bits. | ||
1297 | */ | 1442 | */ |
1298 | static u64* alloc_pte(struct protection_domain *dom, | 1443 | static bool increase_address_space(struct protection_domain *domain, |
1299 | unsigned long address, u64 **pte_page, gfp_t gfp) | 1444 | gfp_t gfp) |
1445 | { | ||
1446 | u64 *pte; | ||
1447 | |||
1448 | if (domain->mode == PAGE_MODE_6_LEVEL) | ||
1449 | /* address space already 64 bit large */ | ||
1450 | return false; | ||
1451 | |||
1452 | pte = (void *)get_zeroed_page(gfp); | ||
1453 | if (!pte) | ||
1454 | return false; | ||
1455 | |||
1456 | *pte = PM_LEVEL_PDE(domain->mode, | ||
1457 | virt_to_phys(domain->pt_root)); | ||
1458 | domain->pt_root = pte; | ||
1459 | domain->mode += 1; | ||
1460 | domain->updated = true; | ||
1461 | |||
1462 | return true; | ||
1463 | } | ||
1464 | |||
1465 | static u64 *alloc_pte(struct protection_domain *domain, | ||
1466 | unsigned long address, | ||
1467 | int end_lvl, | ||
1468 | u64 **pte_page, | ||
1469 | gfp_t gfp) | ||
1300 | { | 1470 | { |
1301 | u64 *pte, *page; | 1471 | u64 *pte, *page; |
1472 | int level; | ||
1302 | 1473 | ||
1303 | pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(address)]; | 1474 | while (address > PM_LEVEL_SIZE(domain->mode)) |
1475 | increase_address_space(domain, gfp); | ||
1304 | 1476 | ||
1305 | if (!IOMMU_PTE_PRESENT(*pte)) { | 1477 | level = domain->mode - 1; |
1306 | page = (u64 *)get_zeroed_page(gfp); | 1478 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; |
1307 | if (!page) | ||
1308 | return NULL; | ||
1309 | *pte = IOMMU_L2_PDE(virt_to_phys(page)); | ||
1310 | } | ||
1311 | 1479 | ||
1312 | pte = IOMMU_PTE_PAGE(*pte); | 1480 | while (level > end_lvl) { |
1313 | pte = &pte[IOMMU_PTE_L1_INDEX(address)]; | 1481 | if (!IOMMU_PTE_PRESENT(*pte)) { |
1482 | page = (u64 *)get_zeroed_page(gfp); | ||
1483 | if (!page) | ||
1484 | return NULL; | ||
1485 | *pte = PM_LEVEL_PDE(level, virt_to_phys(page)); | ||
1486 | } | ||
1314 | 1487 | ||
1315 | if (!IOMMU_PTE_PRESENT(*pte)) { | 1488 | level -= 1; |
1316 | page = (u64 *)get_zeroed_page(gfp); | ||
1317 | if (!page) | ||
1318 | return NULL; | ||
1319 | *pte = IOMMU_L1_PDE(virt_to_phys(page)); | ||
1320 | } | ||
1321 | 1489 | ||
1322 | pte = IOMMU_PTE_PAGE(*pte); | 1490 | pte = IOMMU_PTE_PAGE(*pte); |
1323 | 1491 | ||
1324 | if (pte_page) | 1492 | if (pte_page && level == end_lvl) |
1325 | *pte_page = pte; | 1493 | *pte_page = pte; |
1326 | 1494 | ||
1327 | pte = &pte[IOMMU_PTE_L0_INDEX(address)]; | 1495 | pte = &pte[PM_LEVEL_INDEX(level, address)]; |
1496 | } | ||
1328 | 1497 | ||
1329 | return pte; | 1498 | return pte; |
1330 | } | 1499 | } |
@@ -1344,10 +1513,13 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom, | |||
1344 | 1513 | ||
1345 | pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; | 1514 | pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; |
1346 | if (!pte) { | 1515 | if (!pte) { |
1347 | pte = alloc_pte(&dom->domain, address, &pte_page, GFP_ATOMIC); | 1516 | pte = alloc_pte(&dom->domain, address, PM_MAP_4k, &pte_page, |
1517 | GFP_ATOMIC); | ||
1348 | aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; | 1518 | aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; |
1349 | } else | 1519 | } else |
1350 | pte += IOMMU_PTE_L0_INDEX(address); | 1520 | pte += PM_LEVEL_INDEX(0, address); |
1521 | |||
1522 | update_domain(&dom->domain); | ||
1351 | 1523 | ||
1352 | return pte; | 1524 | return pte; |
1353 | } | 1525 | } |
@@ -1409,7 +1581,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu, | |||
1409 | if (!pte) | 1581 | if (!pte) |
1410 | return; | 1582 | return; |
1411 | 1583 | ||
1412 | pte += IOMMU_PTE_L0_INDEX(address); | 1584 | pte += PM_LEVEL_INDEX(0, address); |
1413 | 1585 | ||
1414 | WARN_ON(!*pte); | 1586 | WARN_ON(!*pte); |
1415 | 1587 | ||
@@ -1988,19 +2160,47 @@ static void cleanup_domain(struct protection_domain *domain) | |||
1988 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 2160 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
1989 | } | 2161 | } |
1990 | 2162 | ||
1991 | static int amd_iommu_domain_init(struct iommu_domain *dom) | 2163 | static void protection_domain_free(struct protection_domain *domain) |
2164 | { | ||
2165 | if (!domain) | ||
2166 | return; | ||
2167 | |||
2168 | if (domain->id) | ||
2169 | domain_id_free(domain->id); | ||
2170 | |||
2171 | kfree(domain); | ||
2172 | } | ||
2173 | |||
2174 | static struct protection_domain *protection_domain_alloc(void) | ||
1992 | { | 2175 | { |
1993 | struct protection_domain *domain; | 2176 | struct protection_domain *domain; |
1994 | 2177 | ||
1995 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); | 2178 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); |
1996 | if (!domain) | 2179 | if (!domain) |
1997 | return -ENOMEM; | 2180 | return NULL; |
1998 | 2181 | ||
1999 | spin_lock_init(&domain->lock); | 2182 | spin_lock_init(&domain->lock); |
2000 | domain->mode = PAGE_MODE_3_LEVEL; | ||
2001 | domain->id = domain_id_alloc(); | 2183 | domain->id = domain_id_alloc(); |
2002 | if (!domain->id) | 2184 | if (!domain->id) |
2185 | goto out_err; | ||
2186 | |||
2187 | return domain; | ||
2188 | |||
2189 | out_err: | ||
2190 | kfree(domain); | ||
2191 | |||
2192 | return NULL; | ||
2193 | } | ||
2194 | |||
2195 | static int amd_iommu_domain_init(struct iommu_domain *dom) | ||
2196 | { | ||
2197 | struct protection_domain *domain; | ||
2198 | |||
2199 | domain = protection_domain_alloc(); | ||
2200 | if (!domain) | ||
2003 | goto out_free; | 2201 | goto out_free; |
2202 | |||
2203 | domain->mode = PAGE_MODE_3_LEVEL; | ||
2004 | domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); | 2204 | domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); |
2005 | if (!domain->pt_root) | 2205 | if (!domain->pt_root) |
2006 | goto out_free; | 2206 | goto out_free; |
@@ -2010,7 +2210,7 @@ static int amd_iommu_domain_init(struct iommu_domain *dom) | |||
2010 | return 0; | 2210 | return 0; |
2011 | 2211 | ||
2012 | out_free: | 2212 | out_free: |
2013 | kfree(domain); | 2213 | protection_domain_free(domain); |
2014 | 2214 | ||
2015 | return -ENOMEM; | 2215 | return -ENOMEM; |
2016 | } | 2216 | } |
@@ -2115,7 +2315,7 @@ static int amd_iommu_map_range(struct iommu_domain *dom, | |||
2115 | paddr &= PAGE_MASK; | 2315 | paddr &= PAGE_MASK; |
2116 | 2316 | ||
2117 | for (i = 0; i < npages; ++i) { | 2317 | for (i = 0; i < npages; ++i) { |
2118 | ret = iommu_map_page(domain, iova, paddr, prot); | 2318 | ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); |
2119 | if (ret) | 2319 | if (ret) |
2120 | return ret; | 2320 | return ret; |
2121 | 2321 | ||
@@ -2136,7 +2336,7 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom, | |||
2136 | iova &= PAGE_MASK; | 2336 | iova &= PAGE_MASK; |
2137 | 2337 | ||
2138 | for (i = 0; i < npages; ++i) { | 2338 | for (i = 0; i < npages; ++i) { |
2139 | iommu_unmap_page(domain, iova); | 2339 | iommu_unmap_page(domain, iova, PM_MAP_4k); |
2140 | iova += PAGE_SIZE; | 2340 | iova += PAGE_SIZE; |
2141 | } | 2341 | } |
2142 | 2342 | ||
@@ -2151,21 +2351,9 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, | |||
2151 | phys_addr_t paddr; | 2351 | phys_addr_t paddr; |
2152 | u64 *pte; | 2352 | u64 *pte; |
2153 | 2353 | ||
2154 | pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(iova)]; | 2354 | pte = fetch_pte(domain, iova, PM_MAP_4k); |
2155 | |||
2156 | if (!IOMMU_PTE_PRESENT(*pte)) | ||
2157 | return 0; | ||
2158 | |||
2159 | pte = IOMMU_PTE_PAGE(*pte); | ||
2160 | pte = &pte[IOMMU_PTE_L1_INDEX(iova)]; | ||
2161 | |||
2162 | if (!IOMMU_PTE_PRESENT(*pte)) | ||
2163 | return 0; | ||
2164 | |||
2165 | pte = IOMMU_PTE_PAGE(*pte); | ||
2166 | pte = &pte[IOMMU_PTE_L0_INDEX(iova)]; | ||
2167 | 2355 | ||
2168 | if (!IOMMU_PTE_PRESENT(*pte)) | 2356 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
2169 | return 0; | 2357 | return 0; |
2170 | 2358 | ||
2171 | paddr = *pte & IOMMU_PAGE_MASK; | 2359 | paddr = *pte & IOMMU_PAGE_MASK; |
@@ -2191,3 +2379,46 @@ static struct iommu_ops amd_iommu_ops = { | |||
2191 | .domain_has_cap = amd_iommu_domain_has_cap, | 2379 | .domain_has_cap = amd_iommu_domain_has_cap, |
2192 | }; | 2380 | }; |
2193 | 2381 | ||
2382 | /***************************************************************************** | ||
2383 | * | ||
2384 | * The next functions do a basic initialization of IOMMU for pass through | ||
2385 | * mode | ||
2386 | * | ||
2387 | * In passthrough mode the IOMMU is initialized and enabled but not used for | ||
2388 | * DMA-API translation. | ||
2389 | * | ||
2390 | *****************************************************************************/ | ||
2391 | |||
2392 | int __init amd_iommu_init_passthrough(void) | ||
2393 | { | ||
2394 | struct pci_dev *dev = NULL; | ||
2395 | u16 devid, devid2; | ||
2396 | |||
2397 | /* allocate passthroug domain */ | ||
2398 | pt_domain = protection_domain_alloc(); | ||
2399 | if (!pt_domain) | ||
2400 | return -ENOMEM; | ||
2401 | |||
2402 | pt_domain->mode |= PAGE_MODE_NONE; | ||
2403 | |||
2404 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
2405 | struct amd_iommu *iommu; | ||
2406 | |||
2407 | devid = calc_devid(dev->bus->number, dev->devfn); | ||
2408 | if (devid > amd_iommu_last_bdf) | ||
2409 | continue; | ||
2410 | |||
2411 | devid2 = amd_iommu_alias_table[devid]; | ||
2412 | |||
2413 | iommu = amd_iommu_rlookup_table[devid2]; | ||
2414 | if (!iommu) | ||
2415 | continue; | ||
2416 | |||
2417 | __attach_device(iommu, pt_domain, devid); | ||
2418 | __attach_device(iommu, pt_domain, devid2); | ||
2419 | } | ||
2420 | |||
2421 | pr_info("AMD-Vi: Initialized for Passthrough Mode\n"); | ||
2422 | |||
2423 | return 0; | ||
2424 | } | ||
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index c1b17e97252e..b4b61d462dcc 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -252,7 +252,7 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit) | |||
252 | /* Function to enable the hardware */ | 252 | /* Function to enable the hardware */ |
253 | static void iommu_enable(struct amd_iommu *iommu) | 253 | static void iommu_enable(struct amd_iommu *iommu) |
254 | { | 254 | { |
255 | printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at %s cap 0x%hx\n", | 255 | printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n", |
256 | dev_name(&iommu->dev->dev), iommu->cap_ptr); | 256 | dev_name(&iommu->dev->dev), iommu->cap_ptr); |
257 | 257 | ||
258 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); | 258 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); |
@@ -435,6 +435,20 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) | |||
435 | } | 435 | } |
436 | 436 | ||
437 | /* | 437 | /* |
438 | * This function resets the command buffer if the IOMMU stopped fetching | ||
439 | * commands from it. | ||
440 | */ | ||
441 | void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) | ||
442 | { | ||
443 | iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); | ||
444 | |||
445 | writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); | ||
446 | writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | ||
447 | |||
448 | iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); | ||
449 | } | ||
450 | |||
451 | /* | ||
438 | * This function writes the command buffer address to the hardware and | 452 | * This function writes the command buffer address to the hardware and |
439 | * enables it. | 453 | * enables it. |
440 | */ | 454 | */ |
@@ -450,11 +464,7 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu) | |||
450 | memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, | 464 | memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, |
451 | &entry, sizeof(entry)); | 465 | &entry, sizeof(entry)); |
452 | 466 | ||
453 | /* set head and tail to zero manually */ | 467 | amd_iommu_reset_cmd_buffer(iommu); |
454 | writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); | ||
455 | writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | ||
456 | |||
457 | iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); | ||
458 | } | 468 | } |
459 | 469 | ||
460 | static void __init free_command_buffer(struct amd_iommu *iommu) | 470 | static void __init free_command_buffer(struct amd_iommu *iommu) |
@@ -858,7 +868,7 @@ static int __init init_iommu_all(struct acpi_table_header *table) | |||
858 | switch (*p) { | 868 | switch (*p) { |
859 | case ACPI_IVHD_TYPE: | 869 | case ACPI_IVHD_TYPE: |
860 | 870 | ||
861 | DUMP_printk("IOMMU: device: %02x:%02x.%01x cap: %04x " | 871 | DUMP_printk("device: %02x:%02x.%01x cap: %04x " |
862 | "seg: %d flags: %01x info %04x\n", | 872 | "seg: %d flags: %01x info %04x\n", |
863 | PCI_BUS(h->devid), PCI_SLOT(h->devid), | 873 | PCI_BUS(h->devid), PCI_SLOT(h->devid), |
864 | PCI_FUNC(h->devid), h->cap_ptr, | 874 | PCI_FUNC(h->devid), h->cap_ptr, |
@@ -902,7 +912,7 @@ static int __init iommu_setup_msi(struct amd_iommu *iommu) | |||
902 | 912 | ||
903 | r = request_irq(iommu->dev->irq, amd_iommu_int_handler, | 913 | r = request_irq(iommu->dev->irq, amd_iommu_int_handler, |
904 | IRQF_SAMPLE_RANDOM, | 914 | IRQF_SAMPLE_RANDOM, |
905 | "AMD IOMMU", | 915 | "AMD-Vi", |
906 | NULL); | 916 | NULL); |
907 | 917 | ||
908 | if (r) { | 918 | if (r) { |
@@ -1150,7 +1160,7 @@ int __init amd_iommu_init(void) | |||
1150 | 1160 | ||
1151 | 1161 | ||
1152 | if (no_iommu) { | 1162 | if (no_iommu) { |
1153 | printk(KERN_INFO "AMD IOMMU disabled by kernel command line\n"); | 1163 | printk(KERN_INFO "AMD-Vi disabled by kernel command line\n"); |
1154 | return 0; | 1164 | return 0; |
1155 | } | 1165 | } |
1156 | 1166 | ||
@@ -1242,22 +1252,28 @@ int __init amd_iommu_init(void) | |||
1242 | if (ret) | 1252 | if (ret) |
1243 | goto free; | 1253 | goto free; |
1244 | 1254 | ||
1245 | ret = amd_iommu_init_dma_ops(); | 1255 | if (iommu_pass_through) |
1256 | ret = amd_iommu_init_passthrough(); | ||
1257 | else | ||
1258 | ret = amd_iommu_init_dma_ops(); | ||
1246 | if (ret) | 1259 | if (ret) |
1247 | goto free; | 1260 | goto free; |
1248 | 1261 | ||
1249 | enable_iommus(); | 1262 | enable_iommus(); |
1250 | 1263 | ||
1251 | printk(KERN_INFO "AMD IOMMU: device isolation "); | 1264 | if (iommu_pass_through) |
1265 | goto out; | ||
1266 | |||
1267 | printk(KERN_INFO "AMD-Vi: device isolation "); | ||
1252 | if (amd_iommu_isolate) | 1268 | if (amd_iommu_isolate) |
1253 | printk("enabled\n"); | 1269 | printk("enabled\n"); |
1254 | else | 1270 | else |
1255 | printk("disabled\n"); | 1271 | printk("disabled\n"); |
1256 | 1272 | ||
1257 | if (amd_iommu_unmap_flush) | 1273 | if (amd_iommu_unmap_flush) |
1258 | printk(KERN_INFO "AMD IOMMU: IO/TLB flush on unmap enabled\n"); | 1274 | printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); |
1259 | else | 1275 | else |
1260 | printk(KERN_INFO "AMD IOMMU: Lazy IO/TLB flushing enabled\n"); | 1276 | printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); |
1261 | 1277 | ||
1262 | out: | 1278 | out: |
1263 | return ret; | 1279 | return ret; |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 676debfc1702..128111d8ffe0 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
21 | #include <linux/ioport.h> | 21 | #include <linux/ioport.h> |
22 | #include <linux/suspend.h> | 22 | #include <linux/suspend.h> |
23 | #include <linux/kmemleak.h> | ||
23 | #include <asm/e820.h> | 24 | #include <asm/e820.h> |
24 | #include <asm/io.h> | 25 | #include <asm/io.h> |
25 | #include <asm/iommu.h> | 26 | #include <asm/iommu.h> |
@@ -94,6 +95,11 @@ static u32 __init allocate_aperture(void) | |||
94 | * code for safe | 95 | * code for safe |
95 | */ | 96 | */ |
96 | p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20); | 97 | p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20); |
98 | /* | ||
99 | * Kmemleak should not scan this block as it may not be mapped via the | ||
100 | * kernel direct mapping. | ||
101 | */ | ||
102 | kmemleak_ignore(p); | ||
97 | if (!p || __pa(p)+aper_size > 0xffffffff) { | 103 | if (!p || __pa(p)+aper_size > 0xffffffff) { |
98 | printk(KERN_ERR | 104 | printk(KERN_ERR |
99 | "Cannot allocate aperture memory hole (%p,%uK)\n", | 105 | "Cannot allocate aperture memory hole (%p,%uK)\n", |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 0a1c2830ec66..159740decc41 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <asm/mtrr.h> | 49 | #include <asm/mtrr.h> |
50 | #include <asm/smp.h> | 50 | #include <asm/smp.h> |
51 | #include <asm/mce.h> | 51 | #include <asm/mce.h> |
52 | #include <asm/kvm_para.h> | ||
52 | 53 | ||
53 | unsigned int num_processors; | 54 | unsigned int num_processors; |
54 | 55 | ||
@@ -1361,52 +1362,80 @@ void enable_x2apic(void) | |||
1361 | } | 1362 | } |
1362 | #endif /* CONFIG_X86_X2APIC */ | 1363 | #endif /* CONFIG_X86_X2APIC */ |
1363 | 1364 | ||
1364 | void __init enable_IR_x2apic(void) | 1365 | int __init enable_IR(void) |
1365 | { | 1366 | { |
1366 | #ifdef CONFIG_INTR_REMAP | 1367 | #ifdef CONFIG_INTR_REMAP |
1367 | int ret; | ||
1368 | unsigned long flags; | ||
1369 | struct IO_APIC_route_entry **ioapic_entries = NULL; | ||
1370 | |||
1371 | ret = dmar_table_init(); | ||
1372 | if (ret) { | ||
1373 | pr_debug("dmar_table_init() failed with %d:\n", ret); | ||
1374 | goto ir_failed; | ||
1375 | } | ||
1376 | |||
1377 | if (!intr_remapping_supported()) { | 1368 | if (!intr_remapping_supported()) { |
1378 | pr_debug("intr-remapping not supported\n"); | 1369 | pr_debug("intr-remapping not supported\n"); |
1379 | goto ir_failed; | 1370 | return 0; |
1380 | } | 1371 | } |
1381 | 1372 | ||
1382 | |||
1383 | if (!x2apic_preenabled && skip_ioapic_setup) { | 1373 | if (!x2apic_preenabled && skip_ioapic_setup) { |
1384 | pr_info("Skipped enabling intr-remap because of skipping " | 1374 | pr_info("Skipped enabling intr-remap because of skipping " |
1385 | "io-apic setup\n"); | 1375 | "io-apic setup\n"); |
1386 | return; | 1376 | return 0; |
1387 | } | 1377 | } |
1388 | 1378 | ||
1379 | if (enable_intr_remapping(x2apic_supported())) | ||
1380 | return 0; | ||
1381 | |||
1382 | pr_info("Enabled Interrupt-remapping\n"); | ||
1383 | |||
1384 | return 1; | ||
1385 | |||
1386 | #endif | ||
1387 | return 0; | ||
1388 | } | ||
1389 | |||
1390 | void __init enable_IR_x2apic(void) | ||
1391 | { | ||
1392 | unsigned long flags; | ||
1393 | struct IO_APIC_route_entry **ioapic_entries = NULL; | ||
1394 | int ret, x2apic_enabled = 0; | ||
1395 | int dmar_table_init_ret = 0; | ||
1396 | |||
1397 | #ifdef CONFIG_INTR_REMAP | ||
1398 | dmar_table_init_ret = dmar_table_init(); | ||
1399 | if (dmar_table_init_ret) | ||
1400 | pr_debug("dmar_table_init() failed with %d:\n", | ||
1401 | dmar_table_init_ret); | ||
1402 | #endif | ||
1403 | |||
1389 | ioapic_entries = alloc_ioapic_entries(); | 1404 | ioapic_entries = alloc_ioapic_entries(); |
1390 | if (!ioapic_entries) { | 1405 | if (!ioapic_entries) { |
1391 | pr_info("Allocate ioapic_entries failed: %d\n", ret); | 1406 | pr_err("Allocate ioapic_entries failed\n"); |
1392 | goto end; | 1407 | goto out; |
1393 | } | 1408 | } |
1394 | 1409 | ||
1395 | ret = save_IO_APIC_setup(ioapic_entries); | 1410 | ret = save_IO_APIC_setup(ioapic_entries); |
1396 | if (ret) { | 1411 | if (ret) { |
1397 | pr_info("Saving IO-APIC state failed: %d\n", ret); | 1412 | pr_info("Saving IO-APIC state failed: %d\n", ret); |
1398 | goto end; | 1413 | goto out; |
1399 | } | 1414 | } |
1400 | 1415 | ||
1401 | local_irq_save(flags); | 1416 | local_irq_save(flags); |
1402 | mask_IO_APIC_setup(ioapic_entries); | ||
1403 | mask_8259A(); | 1417 | mask_8259A(); |
1418 | mask_IO_APIC_setup(ioapic_entries); | ||
1404 | 1419 | ||
1405 | ret = enable_intr_remapping(x2apic_supported()); | 1420 | if (dmar_table_init_ret) |
1406 | if (ret) | 1421 | ret = 0; |
1407 | goto end_restore; | 1422 | else |
1423 | ret = enable_IR(); | ||
1408 | 1424 | ||
1409 | pr_info("Enabled Interrupt-remapping\n"); | 1425 | if (!ret) { |
1426 | /* IR is required if there is APIC ID > 255 even when running | ||
1427 | * under KVM | ||
1428 | */ | ||
1429 | if (max_physical_apicid > 255 || !kvm_para_available()) | ||
1430 | goto nox2apic; | ||
1431 | /* | ||
1432 | * without IR all CPUs can be addressed by IOAPIC/MSI | ||
1433 | * only in physical mode | ||
1434 | */ | ||
1435 | x2apic_force_phys(); | ||
1436 | } | ||
1437 | |||
1438 | x2apic_enabled = 1; | ||
1410 | 1439 | ||
1411 | if (x2apic_supported() && !x2apic_mode) { | 1440 | if (x2apic_supported() && !x2apic_mode) { |
1412 | x2apic_mode = 1; | 1441 | x2apic_mode = 1; |
@@ -1414,41 +1443,25 @@ void __init enable_IR_x2apic(void) | |||
1414 | pr_info("Enabled x2apic\n"); | 1443 | pr_info("Enabled x2apic\n"); |
1415 | } | 1444 | } |
1416 | 1445 | ||
1417 | end_restore: | 1446 | nox2apic: |
1418 | if (ret) | 1447 | if (!ret) /* IR enabling failed */ |
1419 | /* | ||
1420 | * IR enabling failed | ||
1421 | */ | ||
1422 | restore_IO_APIC_setup(ioapic_entries); | 1448 | restore_IO_APIC_setup(ioapic_entries); |
1423 | |||
1424 | unmask_8259A(); | 1449 | unmask_8259A(); |
1425 | local_irq_restore(flags); | 1450 | local_irq_restore(flags); |
1426 | 1451 | ||
1427 | end: | 1452 | out: |
1428 | if (ioapic_entries) | 1453 | if (ioapic_entries) |
1429 | free_ioapic_entries(ioapic_entries); | 1454 | free_ioapic_entries(ioapic_entries); |
1430 | 1455 | ||
1431 | if (!ret) | 1456 | if (x2apic_enabled) |
1432 | return; | 1457 | return; |
1433 | 1458 | ||
1434 | ir_failed: | ||
1435 | if (x2apic_preenabled) | 1459 | if (x2apic_preenabled) |
1436 | panic("x2apic enabled by bios. But IR enabling failed"); | 1460 | panic("x2apic: enabled by BIOS but kernel init failed."); |
1437 | else if (cpu_has_x2apic) | 1461 | else if (cpu_has_x2apic) |
1438 | pr_info("Not enabling x2apic,Intr-remapping\n"); | 1462 | pr_info("Not enabling x2apic, Intr-remapping init failed.\n"); |
1439 | #else | ||
1440 | if (!cpu_has_x2apic) | ||
1441 | return; | ||
1442 | |||
1443 | if (x2apic_preenabled) | ||
1444 | panic("x2apic enabled prior OS handover," | ||
1445 | " enable CONFIG_X86_X2APIC, CONFIG_INTR_REMAP"); | ||
1446 | #endif | ||
1447 | |||
1448 | return; | ||
1449 | } | 1463 | } |
1450 | 1464 | ||
1451 | |||
1452 | #ifdef CONFIG_X86_64 | 1465 | #ifdef CONFIG_X86_64 |
1453 | /* | 1466 | /* |
1454 | * Detect and enable local APICs on non-SMP boards. | 1467 | * Detect and enable local APICs on non-SMP boards. |
@@ -1549,8 +1562,6 @@ no_apic: | |||
1549 | #ifdef CONFIG_X86_64 | 1562 | #ifdef CONFIG_X86_64 |
1550 | void __init early_init_lapic_mapping(void) | 1563 | void __init early_init_lapic_mapping(void) |
1551 | { | 1564 | { |
1552 | unsigned long phys_addr; | ||
1553 | |||
1554 | /* | 1565 | /* |
1555 | * If no local APIC can be found then go out | 1566 | * If no local APIC can be found then go out |
1556 | * : it means there is no mpatable and MADT | 1567 | * : it means there is no mpatable and MADT |
@@ -1558,11 +1569,9 @@ void __init early_init_lapic_mapping(void) | |||
1558 | if (!smp_found_config) | 1569 | if (!smp_found_config) |
1559 | return; | 1570 | return; |
1560 | 1571 | ||
1561 | phys_addr = mp_lapic_addr; | 1572 | set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); |
1562 | |||
1563 | set_fixmap_nocache(FIX_APIC_BASE, phys_addr); | ||
1564 | apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", | 1573 | apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", |
1565 | APIC_BASE, phys_addr); | 1574 | APIC_BASE, mp_lapic_addr); |
1566 | 1575 | ||
1567 | /* | 1576 | /* |
1568 | * Fetch the APIC ID of the BSP in case we have a | 1577 | * Fetch the APIC ID of the BSP in case we have a |
@@ -1651,7 +1660,6 @@ int __init APIC_init_uniprocessor(void) | |||
1651 | APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { | 1660 | APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { |
1652 | pr_err("BIOS bug, local APIC 0x%x not detected!...\n", | 1661 | pr_err("BIOS bug, local APIC 0x%x not detected!...\n", |
1653 | boot_cpu_physical_apicid); | 1662 | boot_cpu_physical_apicid); |
1654 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); | ||
1655 | return -1; | 1663 | return -1; |
1656 | } | 1664 | } |
1657 | #endif | 1665 | #endif |
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 8952a5890281..89174f847b49 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
@@ -167,7 +167,7 @@ static int es7000_apic_is_cluster(void) | |||
167 | { | 167 | { |
168 | /* MPENTIUMIII */ | 168 | /* MPENTIUMIII */ |
169 | if (boot_cpu_data.x86 == 6 && | 169 | if (boot_cpu_data.x86 == 6 && |
170 | (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) | 170 | (boot_cpu_data.x86_model >= 7 && boot_cpu_data.x86_model <= 11)) |
171 | return 1; | 171 | return 1; |
172 | 172 | ||
173 | return 0; | 173 | return 0; |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index d2ed6c5ddc80..3c8f9e75d038 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -66,6 +66,8 @@ | |||
66 | #include <asm/apic.h> | 66 | #include <asm/apic.h> |
67 | 67 | ||
68 | #define __apicdebuginit(type) static type __init | 68 | #define __apicdebuginit(type) static type __init |
69 | #define for_each_irq_pin(entry, head) \ | ||
70 | for (entry = head; entry; entry = entry->next) | ||
69 | 71 | ||
70 | /* | 72 | /* |
71 | * Is the SiS APIC rmw bug present ? | 73 | * Is the SiS APIC rmw bug present ? |
@@ -85,6 +87,9 @@ int nr_ioapic_registers[MAX_IO_APICS]; | |||
85 | struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; | 87 | struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; |
86 | int nr_ioapics; | 88 | int nr_ioapics; |
87 | 89 | ||
90 | /* IO APIC gsi routing info */ | ||
91 | struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS]; | ||
92 | |||
88 | /* MP IRQ source entries */ | 93 | /* MP IRQ source entries */ |
89 | struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; | 94 | struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; |
90 | 95 | ||
@@ -116,15 +121,6 @@ static int __init parse_noapic(char *str) | |||
116 | } | 121 | } |
117 | early_param("noapic", parse_noapic); | 122 | early_param("noapic", parse_noapic); |
118 | 123 | ||
119 | struct irq_pin_list; | ||
120 | |||
121 | /* | ||
122 | * This is performance-critical, we want to do it O(1) | ||
123 | * | ||
124 | * the indexing order of this array favors 1:1 mappings | ||
125 | * between pins and IRQs. | ||
126 | */ | ||
127 | |||
128 | struct irq_pin_list { | 124 | struct irq_pin_list { |
129 | int apic, pin; | 125 | int apic, pin; |
130 | struct irq_pin_list *next; | 126 | struct irq_pin_list *next; |
@@ -139,6 +135,11 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int node) | |||
139 | return pin; | 135 | return pin; |
140 | } | 136 | } |
141 | 137 | ||
138 | /* | ||
139 | * This is performance-critical, we want to do it O(1) | ||
140 | * | ||
141 | * Most irqs are mapped 1:1 with pins. | ||
142 | */ | ||
142 | struct irq_cfg { | 143 | struct irq_cfg { |
143 | struct irq_pin_list *irq_2_pin; | 144 | struct irq_pin_list *irq_2_pin; |
144 | cpumask_var_t domain; | 145 | cpumask_var_t domain; |
@@ -414,13 +415,10 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg) | |||
414 | unsigned long flags; | 415 | unsigned long flags; |
415 | 416 | ||
416 | spin_lock_irqsave(&ioapic_lock, flags); | 417 | spin_lock_irqsave(&ioapic_lock, flags); |
417 | entry = cfg->irq_2_pin; | 418 | for_each_irq_pin(entry, cfg->irq_2_pin) { |
418 | for (;;) { | ||
419 | unsigned int reg; | 419 | unsigned int reg; |
420 | int pin; | 420 | int pin; |
421 | 421 | ||
422 | if (!entry) | ||
423 | break; | ||
424 | pin = entry->pin; | 422 | pin = entry->pin; |
425 | reg = io_apic_read(entry->apic, 0x10 + pin*2); | 423 | reg = io_apic_read(entry->apic, 0x10 + pin*2); |
426 | /* Is the remote IRR bit set? */ | 424 | /* Is the remote IRR bit set? */ |
@@ -428,9 +426,6 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg) | |||
428 | spin_unlock_irqrestore(&ioapic_lock, flags); | 426 | spin_unlock_irqrestore(&ioapic_lock, flags); |
429 | return true; | 427 | return true; |
430 | } | 428 | } |
431 | if (!entry->next) | ||
432 | break; | ||
433 | entry = entry->next; | ||
434 | } | 429 | } |
435 | spin_unlock_irqrestore(&ioapic_lock, flags); | 430 | spin_unlock_irqrestore(&ioapic_lock, flags); |
436 | 431 | ||
@@ -498,72 +493,68 @@ static void ioapic_mask_entry(int apic, int pin) | |||
498 | * shared ISA-space IRQs, so we have to support them. We are super | 493 | * shared ISA-space IRQs, so we have to support them. We are super |
499 | * fast in the common case, and fast for shared ISA-space IRQs. | 494 | * fast in the common case, and fast for shared ISA-space IRQs. |
500 | */ | 495 | */ |
501 | static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) | 496 | static int |
497 | add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) | ||
502 | { | 498 | { |
503 | struct irq_pin_list *entry; | 499 | struct irq_pin_list **last, *entry; |
504 | 500 | ||
505 | entry = cfg->irq_2_pin; | 501 | /* don't allow duplicates */ |
506 | if (!entry) { | 502 | last = &cfg->irq_2_pin; |
507 | entry = get_one_free_irq_2_pin(node); | 503 | for_each_irq_pin(entry, cfg->irq_2_pin) { |
508 | if (!entry) { | ||
509 | printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n", | ||
510 | apic, pin); | ||
511 | return; | ||
512 | } | ||
513 | cfg->irq_2_pin = entry; | ||
514 | entry->apic = apic; | ||
515 | entry->pin = pin; | ||
516 | return; | ||
517 | } | ||
518 | |||
519 | while (entry->next) { | ||
520 | /* not again, please */ | ||
521 | if (entry->apic == apic && entry->pin == pin) | 504 | if (entry->apic == apic && entry->pin == pin) |
522 | return; | 505 | return 0; |
523 | 506 | last = &entry->next; | |
524 | entry = entry->next; | ||
525 | } | 507 | } |
526 | 508 | ||
527 | entry->next = get_one_free_irq_2_pin(node); | 509 | entry = get_one_free_irq_2_pin(node); |
528 | entry = entry->next; | 510 | if (!entry) { |
511 | printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", | ||
512 | node, apic, pin); | ||
513 | return -ENOMEM; | ||
514 | } | ||
529 | entry->apic = apic; | 515 | entry->apic = apic; |
530 | entry->pin = pin; | 516 | entry->pin = pin; |
517 | |||
518 | *last = entry; | ||
519 | return 0; | ||
520 | } | ||
521 | |||
522 | static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) | ||
523 | { | ||
524 | if (add_pin_to_irq_node_nopanic(cfg, node, apic, pin)) | ||
525 | panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); | ||
531 | } | 526 | } |
532 | 527 | ||
533 | /* | 528 | /* |
534 | * Reroute an IRQ to a different pin. | 529 | * Reroute an IRQ to a different pin. |
535 | */ | 530 | */ |
536 | static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, | 531 | static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, |
537 | int oldapic, int oldpin, | 532 | int oldapic, int oldpin, |
538 | int newapic, int newpin) | 533 | int newapic, int newpin) |
539 | { | 534 | { |
540 | struct irq_pin_list *entry = cfg->irq_2_pin; | 535 | struct irq_pin_list *entry; |
541 | int replaced = 0; | ||
542 | 536 | ||
543 | while (entry) { | 537 | for_each_irq_pin(entry, cfg->irq_2_pin) { |
544 | if (entry->apic == oldapic && entry->pin == oldpin) { | 538 | if (entry->apic == oldapic && entry->pin == oldpin) { |
545 | entry->apic = newapic; | 539 | entry->apic = newapic; |
546 | entry->pin = newpin; | 540 | entry->pin = newpin; |
547 | replaced = 1; | ||
548 | /* every one is different, right? */ | 541 | /* every one is different, right? */ |
549 | break; | 542 | return; |
550 | } | 543 | } |
551 | entry = entry->next; | ||
552 | } | 544 | } |
553 | 545 | ||
554 | /* why? call replace before add? */ | 546 | /* old apic/pin didn't exist, so just add new ones */ |
555 | if (!replaced) | 547 | add_pin_to_irq_node(cfg, node, newapic, newpin); |
556 | add_pin_to_irq_node(cfg, node, newapic, newpin); | ||
557 | } | 548 | } |
558 | 549 | ||
559 | static inline void io_apic_modify_irq(struct irq_cfg *cfg, | 550 | static void io_apic_modify_irq(struct irq_cfg *cfg, |
560 | int mask_and, int mask_or, | 551 | int mask_and, int mask_or, |
561 | void (*final)(struct irq_pin_list *entry)) | 552 | void (*final)(struct irq_pin_list *entry)) |
562 | { | 553 | { |
563 | int pin; | 554 | int pin; |
564 | struct irq_pin_list *entry; | 555 | struct irq_pin_list *entry; |
565 | 556 | ||
566 | for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) { | 557 | for_each_irq_pin(entry, cfg->irq_2_pin) { |
567 | unsigned int reg; | 558 | unsigned int reg; |
568 | pin = entry->pin; | 559 | pin = entry->pin; |
569 | reg = io_apic_read(entry->apic, 0x10 + pin * 2); | 560 | reg = io_apic_read(entry->apic, 0x10 + pin * 2); |
@@ -580,7 +571,6 @@ static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) | |||
580 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); | 571 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); |
581 | } | 572 | } |
582 | 573 | ||
583 | #ifdef CONFIG_X86_64 | ||
584 | static void io_apic_sync(struct irq_pin_list *entry) | 574 | static void io_apic_sync(struct irq_pin_list *entry) |
585 | { | 575 | { |
586 | /* | 576 | /* |
@@ -596,11 +586,6 @@ static void __mask_IO_APIC_irq(struct irq_cfg *cfg) | |||
596 | { | 586 | { |
597 | io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); | 587 | io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); |
598 | } | 588 | } |
599 | #else /* CONFIG_X86_32 */ | ||
600 | static void __mask_IO_APIC_irq(struct irq_cfg *cfg) | ||
601 | { | ||
602 | io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, NULL); | ||
603 | } | ||
604 | 589 | ||
605 | static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg) | 590 | static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg) |
606 | { | 591 | { |
@@ -613,7 +598,6 @@ static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg) | |||
613 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, | 598 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, |
614 | IO_APIC_REDIR_LEVEL_TRIGGER, NULL); | 599 | IO_APIC_REDIR_LEVEL_TRIGGER, NULL); |
615 | } | 600 | } |
616 | #endif /* CONFIG_X86_32 */ | ||
617 | 601 | ||
618 | static void mask_IO_APIC_irq_desc(struct irq_desc *desc) | 602 | static void mask_IO_APIC_irq_desc(struct irq_desc *desc) |
619 | { | 603 | { |
@@ -1702,12 +1686,8 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1702 | if (!entry) | 1686 | if (!entry) |
1703 | continue; | 1687 | continue; |
1704 | printk(KERN_DEBUG "IRQ%d ", irq); | 1688 | printk(KERN_DEBUG "IRQ%d ", irq); |
1705 | for (;;) { | 1689 | for_each_irq_pin(entry, cfg->irq_2_pin) |
1706 | printk("-> %d:%d", entry->apic, entry->pin); | 1690 | printk("-> %d:%d", entry->apic, entry->pin); |
1707 | if (!entry->next) | ||
1708 | break; | ||
1709 | entry = entry->next; | ||
1710 | } | ||
1711 | printk("\n"); | 1691 | printk("\n"); |
1712 | } | 1692 | } |
1713 | 1693 | ||
@@ -2211,7 +2191,6 @@ static unsigned int startup_ioapic_irq(unsigned int irq) | |||
2211 | return was_pending; | 2191 | return was_pending; |
2212 | } | 2192 | } |
2213 | 2193 | ||
2214 | #ifdef CONFIG_X86_64 | ||
2215 | static int ioapic_retrigger_irq(unsigned int irq) | 2194 | static int ioapic_retrigger_irq(unsigned int irq) |
2216 | { | 2195 | { |
2217 | 2196 | ||
@@ -2224,14 +2203,6 @@ static int ioapic_retrigger_irq(unsigned int irq) | |||
2224 | 2203 | ||
2225 | return 1; | 2204 | return 1; |
2226 | } | 2205 | } |
2227 | #else | ||
2228 | static int ioapic_retrigger_irq(unsigned int irq) | ||
2229 | { | ||
2230 | apic->send_IPI_self(irq_cfg(irq)->vector); | ||
2231 | |||
2232 | return 1; | ||
2233 | } | ||
2234 | #endif | ||
2235 | 2206 | ||
2236 | /* | 2207 | /* |
2237 | * Level and edge triggered IO-APIC interrupts need different handling, | 2208 | * Level and edge triggered IO-APIC interrupts need different handling, |
@@ -2269,13 +2240,9 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
2269 | struct irq_pin_list *entry; | 2240 | struct irq_pin_list *entry; |
2270 | u8 vector = cfg->vector; | 2241 | u8 vector = cfg->vector; |
2271 | 2242 | ||
2272 | entry = cfg->irq_2_pin; | 2243 | for_each_irq_pin(entry, cfg->irq_2_pin) { |
2273 | for (;;) { | ||
2274 | unsigned int reg; | 2244 | unsigned int reg; |
2275 | 2245 | ||
2276 | if (!entry) | ||
2277 | break; | ||
2278 | |||
2279 | apic = entry->apic; | 2246 | apic = entry->apic; |
2280 | pin = entry->pin; | 2247 | pin = entry->pin; |
2281 | /* | 2248 | /* |
@@ -2288,9 +2255,6 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
2288 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; | 2255 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; |
2289 | reg |= vector; | 2256 | reg |= vector; |
2290 | io_apic_modify(apic, 0x10 + pin*2, reg); | 2257 | io_apic_modify(apic, 0x10 + pin*2, reg); |
2291 | if (!entry->next) | ||
2292 | break; | ||
2293 | entry = entry->next; | ||
2294 | } | 2258 | } |
2295 | } | 2259 | } |
2296 | 2260 | ||
@@ -2515,11 +2479,8 @@ atomic_t irq_mis_count; | |||
2515 | static void ack_apic_level(unsigned int irq) | 2479 | static void ack_apic_level(unsigned int irq) |
2516 | { | 2480 | { |
2517 | struct irq_desc *desc = irq_to_desc(irq); | 2481 | struct irq_desc *desc = irq_to_desc(irq); |
2518 | |||
2519 | #ifdef CONFIG_X86_32 | ||
2520 | unsigned long v; | 2482 | unsigned long v; |
2521 | int i; | 2483 | int i; |
2522 | #endif | ||
2523 | struct irq_cfg *cfg; | 2484 | struct irq_cfg *cfg; |
2524 | int do_unmask_irq = 0; | 2485 | int do_unmask_irq = 0; |
2525 | 2486 | ||
@@ -2532,31 +2493,28 @@ static void ack_apic_level(unsigned int irq) | |||
2532 | } | 2493 | } |
2533 | #endif | 2494 | #endif |
2534 | 2495 | ||
2535 | #ifdef CONFIG_X86_32 | ||
2536 | /* | 2496 | /* |
2537 | * It appears there is an erratum which affects at least version 0x11 | 2497 | * It appears there is an erratum which affects at least version 0x11 |
2538 | * of I/O APIC (that's the 82093AA and cores integrated into various | 2498 | * of I/O APIC (that's the 82093AA and cores integrated into various |
2539 | * chipsets). Under certain conditions a level-triggered interrupt is | 2499 | * chipsets). Under certain conditions a level-triggered interrupt is |
2540 | * erroneously delivered as edge-triggered one but the respective IRR | 2500 | * erroneously delivered as edge-triggered one but the respective IRR |
2541 | * bit gets set nevertheless. As a result the I/O unit expects an EOI | 2501 | * bit gets set nevertheless. As a result the I/O unit expects an EOI |
2542 | * message but it will never arrive and further interrupts are blocked | 2502 | * message but it will never arrive and further interrupts are blocked |
2543 | * from the source. The exact reason is so far unknown, but the | 2503 | * from the source. The exact reason is so far unknown, but the |
2544 | * phenomenon was observed when two consecutive interrupt requests | 2504 | * phenomenon was observed when two consecutive interrupt requests |
2545 | * from a given source get delivered to the same CPU and the source is | 2505 | * from a given source get delivered to the same CPU and the source is |
2546 | * temporarily disabled in between. | 2506 | * temporarily disabled in between. |
2547 | * | 2507 | * |
2548 | * A workaround is to simulate an EOI message manually. We achieve it | 2508 | * A workaround is to simulate an EOI message manually. We achieve it |
2549 | * by setting the trigger mode to edge and then to level when the edge | 2509 | * by setting the trigger mode to edge and then to level when the edge |
2550 | * trigger mode gets detected in the TMR of a local APIC for a | 2510 | * trigger mode gets detected in the TMR of a local APIC for a |
2551 | * level-triggered interrupt. We mask the source for the time of the | 2511 | * level-triggered interrupt. We mask the source for the time of the |
2552 | * operation to prevent an edge-triggered interrupt escaping meanwhile. | 2512 | * operation to prevent an edge-triggered interrupt escaping meanwhile. |
2553 | * The idea is from Manfred Spraul. --macro | 2513 | * The idea is from Manfred Spraul. --macro |
2554 | */ | 2514 | */ |
2555 | cfg = desc->chip_data; | 2515 | cfg = desc->chip_data; |
2556 | i = cfg->vector; | 2516 | i = cfg->vector; |
2557 | |||
2558 | v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); | 2517 | v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); |
2559 | #endif | ||
2560 | 2518 | ||
2561 | /* | 2519 | /* |
2562 | * We must acknowledge the irq before we move it or the acknowledge will | 2520 | * We must acknowledge the irq before we move it or the acknowledge will |
@@ -2598,7 +2556,7 @@ static void ack_apic_level(unsigned int irq) | |||
2598 | unmask_IO_APIC_irq_desc(desc); | 2556 | unmask_IO_APIC_irq_desc(desc); |
2599 | } | 2557 | } |
2600 | 2558 | ||
2601 | #ifdef CONFIG_X86_32 | 2559 | /* Tail end of version 0x11 I/O APIC bug workaround */ |
2602 | if (!(v & (1 << (i & 0x1f)))) { | 2560 | if (!(v & (1 << (i & 0x1f)))) { |
2603 | atomic_inc(&irq_mis_count); | 2561 | atomic_inc(&irq_mis_count); |
2604 | spin_lock(&ioapic_lock); | 2562 | spin_lock(&ioapic_lock); |
@@ -2606,26 +2564,15 @@ static void ack_apic_level(unsigned int irq) | |||
2606 | __unmask_and_level_IO_APIC_irq(cfg); | 2564 | __unmask_and_level_IO_APIC_irq(cfg); |
2607 | spin_unlock(&ioapic_lock); | 2565 | spin_unlock(&ioapic_lock); |
2608 | } | 2566 | } |
2609 | #endif | ||
2610 | } | 2567 | } |
2611 | 2568 | ||
2612 | #ifdef CONFIG_INTR_REMAP | 2569 | #ifdef CONFIG_INTR_REMAP |
2613 | static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | 2570 | static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) |
2614 | { | 2571 | { |
2615 | int apic, pin; | ||
2616 | struct irq_pin_list *entry; | 2572 | struct irq_pin_list *entry; |
2617 | 2573 | ||
2618 | entry = cfg->irq_2_pin; | 2574 | for_each_irq_pin(entry, cfg->irq_2_pin) |
2619 | for (;;) { | 2575 | io_apic_eoi(entry->apic, entry->pin); |
2620 | |||
2621 | if (!entry) | ||
2622 | break; | ||
2623 | |||
2624 | apic = entry->apic; | ||
2625 | pin = entry->pin; | ||
2626 | io_apic_eoi(apic, pin); | ||
2627 | entry = entry->next; | ||
2628 | } | ||
2629 | } | 2576 | } |
2630 | 2577 | ||
2631 | static void | 2578 | static void |
@@ -3241,8 +3188,7 @@ void destroy_irq(unsigned int irq) | |||
3241 | cfg = desc->chip_data; | 3188 | cfg = desc->chip_data; |
3242 | dynamic_irq_cleanup(irq); | 3189 | dynamic_irq_cleanup(irq); |
3243 | /* connect back irq_cfg */ | 3190 | /* connect back irq_cfg */ |
3244 | if (desc) | 3191 | desc->chip_data = cfg; |
3245 | desc->chip_data = cfg; | ||
3246 | 3192 | ||
3247 | free_irte(irq); | 3193 | free_irte(irq); |
3248 | spin_lock_irqsave(&vector_lock, flags); | 3194 | spin_lock_irqsave(&vector_lock, flags); |
@@ -3912,7 +3858,11 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq, | |||
3912 | */ | 3858 | */ |
3913 | if (irq >= NR_IRQS_LEGACY) { | 3859 | if (irq >= NR_IRQS_LEGACY) { |
3914 | cfg = desc->chip_data; | 3860 | cfg = desc->chip_data; |
3915 | add_pin_to_irq_node(cfg, node, ioapic, pin); | 3861 | if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) { |
3862 | printk(KERN_INFO "can not add pin %d for irq %d\n", | ||
3863 | pin, irq); | ||
3864 | return 0; | ||
3865 | } | ||
3916 | } | 3866 | } |
3917 | 3867 | ||
3918 | setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity); | 3868 | setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity); |
@@ -3941,11 +3891,28 @@ int io_apic_set_pci_routing(struct device *dev, int irq, | |||
3941 | return __io_apic_set_pci_routing(dev, irq, irq_attr); | 3891 | return __io_apic_set_pci_routing(dev, irq, irq_attr); |
3942 | } | 3892 | } |
3943 | 3893 | ||
3944 | /* -------------------------------------------------------------------------- | 3894 | u8 __init io_apic_unique_id(u8 id) |
3945 | ACPI-based IOAPIC Configuration | 3895 | { |
3946 | -------------------------------------------------------------------------- */ | 3896 | #ifdef CONFIG_X86_32 |
3897 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && | ||
3898 | !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
3899 | return io_apic_get_unique_id(nr_ioapics, id); | ||
3900 | else | ||
3901 | return id; | ||
3902 | #else | ||
3903 | int i; | ||
3904 | DECLARE_BITMAP(used, 256); | ||
3947 | 3905 | ||
3948 | #ifdef CONFIG_ACPI | 3906 | bitmap_zero(used, 256); |
3907 | for (i = 0; i < nr_ioapics; i++) { | ||
3908 | struct mpc_ioapic *ia = &mp_ioapics[i]; | ||
3909 | __set_bit(ia->apicid, used); | ||
3910 | } | ||
3911 | if (!test_bit(id, used)) | ||
3912 | return id; | ||
3913 | return find_first_zero_bit(used, 256); | ||
3914 | #endif | ||
3915 | } | ||
3949 | 3916 | ||
3950 | #ifdef CONFIG_X86_32 | 3917 | #ifdef CONFIG_X86_32 |
3951 | int __init io_apic_get_unique_id(int ioapic, int apic_id) | 3918 | int __init io_apic_get_unique_id(int ioapic, int apic_id) |
@@ -4054,8 +4021,6 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) | |||
4054 | return 0; | 4021 | return 0; |
4055 | } | 4022 | } |
4056 | 4023 | ||
4057 | #endif /* CONFIG_ACPI */ | ||
4058 | |||
4059 | /* | 4024 | /* |
4060 | * This function currently is only a helper for the i386 smp boot process where | 4025 | * This function currently is only a helper for the i386 smp boot process where |
4061 | * we need to reprogram the ioredtbls to cater for the cpus which have come online | 4026 | * we need to reprogram the ioredtbls to cater for the cpus which have come online |
@@ -4109,7 +4074,7 @@ void __init setup_ioapic_dest(void) | |||
4109 | 4074 | ||
4110 | static struct resource *ioapic_resources; | 4075 | static struct resource *ioapic_resources; |
4111 | 4076 | ||
4112 | static struct resource * __init ioapic_setup_resources(void) | 4077 | static struct resource * __init ioapic_setup_resources(int nr_ioapics) |
4113 | { | 4078 | { |
4114 | unsigned long n; | 4079 | unsigned long n; |
4115 | struct resource *res; | 4080 | struct resource *res; |
@@ -4125,15 +4090,13 @@ static struct resource * __init ioapic_setup_resources(void) | |||
4125 | mem = alloc_bootmem(n); | 4090 | mem = alloc_bootmem(n); |
4126 | res = (void *)mem; | 4091 | res = (void *)mem; |
4127 | 4092 | ||
4128 | if (mem != NULL) { | 4093 | mem += sizeof(struct resource) * nr_ioapics; |
4129 | mem += sizeof(struct resource) * nr_ioapics; | ||
4130 | 4094 | ||
4131 | for (i = 0; i < nr_ioapics; i++) { | 4095 | for (i = 0; i < nr_ioapics; i++) { |
4132 | res[i].name = mem; | 4096 | res[i].name = mem; |
4133 | res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 4097 | res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
4134 | sprintf(mem, "IOAPIC %u", i); | 4098 | sprintf(mem, "IOAPIC %u", i); |
4135 | mem += IOAPIC_RESOURCE_NAME_SIZE; | 4099 | mem += IOAPIC_RESOURCE_NAME_SIZE; |
4136 | } | ||
4137 | } | 4100 | } |
4138 | 4101 | ||
4139 | ioapic_resources = res; | 4102 | ioapic_resources = res; |
@@ -4147,7 +4110,7 @@ void __init ioapic_init_mappings(void) | |||
4147 | struct resource *ioapic_res; | 4110 | struct resource *ioapic_res; |
4148 | int i; | 4111 | int i; |
4149 | 4112 | ||
4150 | ioapic_res = ioapic_setup_resources(); | 4113 | ioapic_res = ioapic_setup_resources(nr_ioapics); |
4151 | for (i = 0; i < nr_ioapics; i++) { | 4114 | for (i = 0; i < nr_ioapics; i++) { |
4152 | if (smp_found_config) { | 4115 | if (smp_found_config) { |
4153 | ioapic_phys = mp_ioapics[i].apicaddr; | 4116 | ioapic_phys = mp_ioapics[i].apicaddr; |
@@ -4176,11 +4139,9 @@ fake_ioapic_page: | |||
4176 | __fix_to_virt(idx), ioapic_phys); | 4139 | __fix_to_virt(idx), ioapic_phys); |
4177 | idx++; | 4140 | idx++; |
4178 | 4141 | ||
4179 | if (ioapic_res != NULL) { | 4142 | ioapic_res->start = ioapic_phys; |
4180 | ioapic_res->start = ioapic_phys; | 4143 | ioapic_res->end = ioapic_phys + (4 * 1024) - 1; |
4181 | ioapic_res->end = ioapic_phys + (4 * 1024) - 1; | 4144 | ioapic_res++; |
4182 | ioapic_res++; | ||
4183 | } | ||
4184 | } | 4145 | } |
4185 | } | 4146 | } |
4186 | 4147 | ||
@@ -4201,3 +4162,76 @@ void __init ioapic_insert_resources(void) | |||
4201 | r++; | 4162 | r++; |
4202 | } | 4163 | } |
4203 | } | 4164 | } |
4165 | |||
4166 | int mp_find_ioapic(int gsi) | ||
4167 | { | ||
4168 | int i = 0; | ||
4169 | |||
4170 | /* Find the IOAPIC that manages this GSI. */ | ||
4171 | for (i = 0; i < nr_ioapics; i++) { | ||
4172 | if ((gsi >= mp_gsi_routing[i].gsi_base) | ||
4173 | && (gsi <= mp_gsi_routing[i].gsi_end)) | ||
4174 | return i; | ||
4175 | } | ||
4176 | |||
4177 | printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); | ||
4178 | return -1; | ||
4179 | } | ||
4180 | |||
4181 | int mp_find_ioapic_pin(int ioapic, int gsi) | ||
4182 | { | ||
4183 | if (WARN_ON(ioapic == -1)) | ||
4184 | return -1; | ||
4185 | if (WARN_ON(gsi > mp_gsi_routing[ioapic].gsi_end)) | ||
4186 | return -1; | ||
4187 | |||
4188 | return gsi - mp_gsi_routing[ioapic].gsi_base; | ||
4189 | } | ||
4190 | |||
4191 | static int bad_ioapic(unsigned long address) | ||
4192 | { | ||
4193 | if (nr_ioapics >= MAX_IO_APICS) { | ||
4194 | printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded " | ||
4195 | "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics); | ||
4196 | return 1; | ||
4197 | } | ||
4198 | if (!address) { | ||
4199 | printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address" | ||
4200 | " found in table, skipping!\n"); | ||
4201 | return 1; | ||
4202 | } | ||
4203 | return 0; | ||
4204 | } | ||
4205 | |||
4206 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) | ||
4207 | { | ||
4208 | int idx = 0; | ||
4209 | |||
4210 | if (bad_ioapic(address)) | ||
4211 | return; | ||
4212 | |||
4213 | idx = nr_ioapics; | ||
4214 | |||
4215 | mp_ioapics[idx].type = MP_IOAPIC; | ||
4216 | mp_ioapics[idx].flags = MPC_APIC_USABLE; | ||
4217 | mp_ioapics[idx].apicaddr = address; | ||
4218 | |||
4219 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); | ||
4220 | mp_ioapics[idx].apicid = io_apic_unique_id(id); | ||
4221 | mp_ioapics[idx].apicver = io_apic_get_version(idx); | ||
4222 | |||
4223 | /* | ||
4224 | * Build basic GSI lookup table to facilitate gsi->io_apic lookups | ||
4225 | * and to prevent reprogramming of IOAPIC pins (PCI GSIs). | ||
4226 | */ | ||
4227 | mp_gsi_routing[idx].gsi_base = gsi_base; | ||
4228 | mp_gsi_routing[idx].gsi_end = gsi_base + | ||
4229 | io_apic_get_redir_entries(idx); | ||
4230 | |||
4231 | printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " | ||
4232 | "GSI %d-%d\n", idx, mp_ioapics[idx].apicid, | ||
4233 | mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr, | ||
4234 | mp_gsi_routing[idx].gsi_base, mp_gsi_routing[idx].gsi_end); | ||
4235 | |||
4236 | nr_ioapics++; | ||
4237 | } | ||
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index 6ef00ba4c886..08385e090a6f 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c | |||
@@ -153,7 +153,7 @@ int safe_smp_processor_id(void) | |||
153 | { | 153 | { |
154 | int apicid, cpuid; | 154 | int apicid, cpuid; |
155 | 155 | ||
156 | if (!boot_cpu_has(X86_FEATURE_APIC)) | 156 | if (!cpu_has_apic) |
157 | return 0; | 157 | return 0; |
158 | 158 | ||
159 | apicid = hard_smp_processor_id(); | 159 | apicid = hard_smp_processor_id(); |
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index b3025b43b63a..db7220220d09 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
@@ -39,7 +39,7 @@ | |||
39 | int unknown_nmi_panic; | 39 | int unknown_nmi_panic; |
40 | int nmi_watchdog_enabled; | 40 | int nmi_watchdog_enabled; |
41 | 41 | ||
42 | static cpumask_var_t backtrace_mask; | 42 | static cpumask_t backtrace_mask __read_mostly; |
43 | 43 | ||
44 | /* nmi_active: | 44 | /* nmi_active: |
45 | * >0: the lapic NMI watchdog is active, but can be disabled | 45 | * >0: the lapic NMI watchdog is active, but can be disabled |
@@ -138,7 +138,6 @@ int __init check_nmi_watchdog(void) | |||
138 | if (!prev_nmi_count) | 138 | if (!prev_nmi_count) |
139 | goto error; | 139 | goto error; |
140 | 140 | ||
141 | alloc_cpumask_var(&backtrace_mask, GFP_KERNEL|__GFP_ZERO); | ||
142 | printk(KERN_INFO "Testing NMI watchdog ... "); | 141 | printk(KERN_INFO "Testing NMI watchdog ... "); |
143 | 142 | ||
144 | #ifdef CONFIG_SMP | 143 | #ifdef CONFIG_SMP |
@@ -415,14 +414,17 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
415 | } | 414 | } |
416 | 415 | ||
417 | /* We can be called before check_nmi_watchdog, hence NULL check. */ | 416 | /* We can be called before check_nmi_watchdog, hence NULL check. */ |
418 | if (backtrace_mask != NULL && cpumask_test_cpu(cpu, backtrace_mask)) { | 417 | if (cpumask_test_cpu(cpu, &backtrace_mask)) { |
419 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ | 418 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ |
420 | 419 | ||
421 | spin_lock(&lock); | 420 | spin_lock(&lock); |
422 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); | 421 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); |
422 | show_regs(regs); | ||
423 | dump_stack(); | 423 | dump_stack(); |
424 | spin_unlock(&lock); | 424 | spin_unlock(&lock); |
425 | cpumask_clear_cpu(cpu, backtrace_mask); | 425 | cpumask_clear_cpu(cpu, &backtrace_mask); |
426 | |||
427 | rc = 1; | ||
426 | } | 428 | } |
427 | 429 | ||
428 | /* Could check oops_in_progress here too, but it's safer not to */ | 430 | /* Could check oops_in_progress here too, but it's safer not to */ |
@@ -552,14 +554,18 @@ int do_nmi_callback(struct pt_regs *regs, int cpu) | |||
552 | return 0; | 554 | return 0; |
553 | } | 555 | } |
554 | 556 | ||
555 | void __trigger_all_cpu_backtrace(void) | 557 | void arch_trigger_all_cpu_backtrace(void) |
556 | { | 558 | { |
557 | int i; | 559 | int i; |
558 | 560 | ||
559 | cpumask_copy(backtrace_mask, cpu_online_mask); | 561 | cpumask_copy(&backtrace_mask, cpu_online_mask); |
562 | |||
563 | printk(KERN_INFO "sending NMI to all CPUs:\n"); | ||
564 | apic->send_IPI_all(NMI_VECTOR); | ||
565 | |||
560 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ | 566 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ |
561 | for (i = 0; i < 10 * 1000; i++) { | 567 | for (i = 0; i < 10 * 1000; i++) { |
562 | if (cpumask_empty(backtrace_mask)) | 568 | if (cpumask_empty(&backtrace_mask)) |
563 | break; | 569 | break; |
564 | mdelay(1); | 570 | mdelay(1); |
565 | } | 571 | } |
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index bc3e880f9b82..65edc180fc82 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c | |||
@@ -44,17 +44,22 @@ static struct apic *apic_probe[] __initdata = { | |||
44 | NULL, | 44 | NULL, |
45 | }; | 45 | }; |
46 | 46 | ||
47 | static int apicid_phys_pkg_id(int initial_apic_id, int index_msb) | ||
48 | { | ||
49 | return hard_smp_processor_id() >> index_msb; | ||
50 | } | ||
51 | |||
47 | /* | 52 | /* |
48 | * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. | 53 | * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. |
49 | */ | 54 | */ |
50 | void __init default_setup_apic_routing(void) | 55 | void __init default_setup_apic_routing(void) |
51 | { | 56 | { |
52 | #ifdef CONFIG_X86_X2APIC | 57 | #ifdef CONFIG_X86_X2APIC |
53 | if (x2apic_mode && (apic != &apic_x2apic_phys && | 58 | if (x2apic_mode |
54 | #ifdef CONFIG_X86_UV | 59 | #ifdef CONFIG_X86_UV |
55 | apic != &apic_x2apic_uv_x && | 60 | && apic != &apic_x2apic_uv_x |
56 | #endif | 61 | #endif |
57 | apic != &apic_x2apic_cluster)) { | 62 | ) { |
58 | if (x2apic_phys) | 63 | if (x2apic_phys) |
59 | apic = &apic_x2apic_phys; | 64 | apic = &apic_x2apic_phys; |
60 | else | 65 | else |
@@ -69,6 +74,11 @@ void __init default_setup_apic_routing(void) | |||
69 | printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); | 74 | printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); |
70 | } | 75 | } |
71 | 76 | ||
77 | if (is_vsmp_box()) { | ||
78 | /* need to update phys_pkg_id */ | ||
79 | apic->phys_pkg_id = apicid_phys_pkg_id; | ||
80 | } | ||
81 | |||
72 | /* | 82 | /* |
73 | * Now that apic routing model is selected, configure the | 83 | * Now that apic routing model is selected, configure the |
74 | * fault handling for intr remapping. | 84 | * fault handling for intr remapping. |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 442b5508893f..151ace69a5aa 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -403,7 +403,15 @@ static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue); | |||
403 | static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); | 403 | static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); |
404 | static struct apm_user *user_list; | 404 | static struct apm_user *user_list; |
405 | static DEFINE_SPINLOCK(user_list_lock); | 405 | static DEFINE_SPINLOCK(user_list_lock); |
406 | static const struct desc_struct bad_bios_desc = { { { 0, 0x00409200 } } }; | 406 | |
407 | /* | ||
408 | * Set up a segment that references the real mode segment 0x40 | ||
409 | * that extends up to the end of page zero (that we have reserved). | ||
410 | * This is for buggy BIOS's that refer to (real mode) segment 0x40 | ||
411 | * even though they are called in protected mode. | ||
412 | */ | ||
413 | static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, | ||
414 | (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); | ||
407 | 415 | ||
408 | static const char driver_version[] = "1.16ac"; /* no spaces */ | 416 | static const char driver_version[] = "1.16ac"; /* no spaces */ |
409 | 417 | ||
@@ -2332,15 +2340,6 @@ static int __init apm_init(void) | |||
2332 | pm_flags |= PM_APM; | 2340 | pm_flags |= PM_APM; |
2333 | 2341 | ||
2334 | /* | 2342 | /* |
2335 | * Set up a segment that references the real mode segment 0x40 | ||
2336 | * that extends up to the end of page zero (that we have reserved). | ||
2337 | * This is for buggy BIOS's that refer to (real mode) segment 0x40 | ||
2338 | * even though they are called in protected mode. | ||
2339 | */ | ||
2340 | set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); | ||
2341 | _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); | ||
2342 | |||
2343 | /* | ||
2344 | * Set up the long jump entry point to the APM BIOS, which is called | 2343 | * Set up the long jump entry point to the APM BIOS, which is called |
2345 | * from inline assembly. | 2344 | * from inline assembly. |
2346 | */ | 2345 | */ |
@@ -2358,12 +2357,12 @@ static int __init apm_init(void) | |||
2358 | * code to that CPU. | 2357 | * code to that CPU. |
2359 | */ | 2358 | */ |
2360 | gdt = get_cpu_gdt_table(0); | 2359 | gdt = get_cpu_gdt_table(0); |
2361 | set_base(gdt[APM_CS >> 3], | 2360 | set_desc_base(&gdt[APM_CS >> 3], |
2362 | __va((unsigned long)apm_info.bios.cseg << 4)); | 2361 | (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4)); |
2363 | set_base(gdt[APM_CS_16 >> 3], | 2362 | set_desc_base(&gdt[APM_CS_16 >> 3], |
2364 | __va((unsigned long)apm_info.bios.cseg_16 << 4)); | 2363 | (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4)); |
2365 | set_base(gdt[APM_DS >> 3], | 2364 | set_desc_base(&gdt[APM_DS >> 3], |
2366 | __va((unsigned long)apm_info.bios.dseg << 4)); | 2365 | (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4)); |
2367 | 2366 | ||
2368 | proc_create("apm", 0, NULL, &apm_file_ops); | 2367 | proc_create("apm", 0, NULL, &apm_file_ops); |
2369 | 2368 | ||
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 898ecc47e129..4a6aeedcd965 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * This code generates raw asm output which is post-processed to extract | 3 | * This code generates raw asm output which is post-processed to extract |
4 | * and format the required data. | 4 | * and format the required data. |
5 | */ | 5 | */ |
6 | #define COMPILE_OFFSETS | ||
6 | 7 | ||
7 | #include <linux/crypto.h> | 8 | #include <linux/crypto.h> |
8 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 3efcb2b96a15..c1f253dac155 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -7,6 +7,10 @@ ifdef CONFIG_FUNCTION_TRACER | |||
7 | CFLAGS_REMOVE_common.o = -pg | 7 | CFLAGS_REMOVE_common.o = -pg |
8 | endif | 8 | endif |
9 | 9 | ||
10 | # Make sure load_percpu_segment has no stackprotector | ||
11 | nostackp := $(call cc-option, -fno-stack-protector) | ||
12 | CFLAGS_common.o := $(nostackp) | ||
13 | |||
10 | obj-y := intel_cacheinfo.o addon_cpuid_features.o | 14 | obj-y := intel_cacheinfo.o addon_cpuid_features.o |
11 | obj-y += proc.o capflags.o powerflags.o common.o | 15 | obj-y += proc.o capflags.o powerflags.o common.o |
12 | obj-y += vmware.o hypervisor.o | 16 | obj-y += vmware.o hypervisor.o |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index e1600c775e1d..22a47c82f3c0 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -2,7 +2,7 @@ | |||
2 | #include <linux/bitops.h> | 2 | #include <linux/bitops.h> |
3 | #include <linux/mm.h> | 3 | #include <linux/mm.h> |
4 | 4 | ||
5 | #include <asm/io.h> | 5 | #include <linux/io.h> |
6 | #include <asm/processor.h> | 6 | #include <asm/processor.h> |
7 | #include <asm/apic.h> | 7 | #include <asm/apic.h> |
8 | #include <asm/cpu.h> | 8 | #include <asm/cpu.h> |
@@ -45,8 +45,8 @@ static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) | |||
45 | #define CBAR_ENB (0x80000000) | 45 | #define CBAR_ENB (0x80000000) |
46 | #define CBAR_KEY (0X000000CB) | 46 | #define CBAR_KEY (0X000000CB) |
47 | if (c->x86_model == 9 || c->x86_model == 10) { | 47 | if (c->x86_model == 9 || c->x86_model == 10) { |
48 | if (inl (CBAR) & CBAR_ENB) | 48 | if (inl(CBAR) & CBAR_ENB) |
49 | outl (0 | CBAR_KEY, CBAR); | 49 | outl(0 | CBAR_KEY, CBAR); |
50 | } | 50 | } |
51 | } | 51 | } |
52 | 52 | ||
@@ -87,9 +87,10 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) | |||
87 | d = d2-d; | 87 | d = d2-d; |
88 | 88 | ||
89 | if (d > 20*K6_BUG_LOOP) | 89 | if (d > 20*K6_BUG_LOOP) |
90 | printk("system stability may be impaired when more than 32 MB are used.\n"); | 90 | printk(KERN_CONT |
91 | "system stability may be impaired when more than 32 MB are used.\n"); | ||
91 | else | 92 | else |
92 | printk("probably OK (after B9730xxxx).\n"); | 93 | printk(KERN_CONT "probably OK (after B9730xxxx).\n"); |
93 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); | 94 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); |
94 | } | 95 | } |
95 | 96 | ||
@@ -219,8 +220,9 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | |||
219 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { | 220 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { |
220 | rdmsr(MSR_K7_CLK_CTL, l, h); | 221 | rdmsr(MSR_K7_CLK_CTL, l, h); |
221 | if ((l & 0xfff00000) != 0x20000000) { | 222 | if ((l & 0xfff00000) != 0x20000000) { |
222 | printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, | 223 | printk(KERN_INFO |
223 | ((l & 0x000fffff)|0x20000000)); | 224 | "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", |
225 | l, ((l & 0x000fffff)|0x20000000)); | ||
224 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); | 226 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); |
225 | } | 227 | } |
226 | } | 228 | } |
@@ -460,7 +462,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
460 | u32 level; | 462 | u32 level; |
461 | 463 | ||
462 | level = cpuid_eax(1); | 464 | level = cpuid_eax(1); |
463 | if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) | 465 | if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) |
464 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | 466 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
465 | 467 | ||
466 | /* | 468 | /* |
@@ -568,27 +570,30 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
568 | * benefit in doing so. | 570 | * benefit in doing so. |
569 | */ | 571 | */ |
570 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { | 572 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { |
571 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); | 573 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); |
572 | if ((tseg>>PMD_SHIFT) < | 574 | if ((tseg>>PMD_SHIFT) < |
573 | (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || | 575 | (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || |
574 | ((tseg>>PMD_SHIFT) < | 576 | ((tseg>>PMD_SHIFT) < |
575 | (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && | 577 | (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && |
576 | (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) | 578 | (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) |
577 | set_memory_4k((unsigned long)__va(tseg), 1); | 579 | set_memory_4k((unsigned long)__va(tseg), 1); |
578 | } | 580 | } |
579 | } | 581 | } |
580 | #endif | 582 | #endif |
581 | } | 583 | } |
582 | 584 | ||
583 | #ifdef CONFIG_X86_32 | 585 | #ifdef CONFIG_X86_32 |
584 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) | 586 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, |
587 | unsigned int size) | ||
585 | { | 588 | { |
586 | /* AMD errata T13 (order #21922) */ | 589 | /* AMD errata T13 (order #21922) */ |
587 | if ((c->x86 == 6)) { | 590 | if ((c->x86 == 6)) { |
588 | if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */ | 591 | /* Duron Rev A0 */ |
592 | if (c->x86_model == 3 && c->x86_mask == 0) | ||
589 | size = 64; | 593 | size = 64; |
594 | /* Tbird rev A1/A2 */ | ||
590 | if (c->x86_model == 4 && | 595 | if (c->x86_model == 4 && |
591 | (c->x86_mask == 0 || c->x86_mask == 1)) /* Tbird rev A1/A2 */ | 596 | (c->x86_mask == 0 || c->x86_mask == 1)) |
592 | size = 256; | 597 | size = 256; |
593 | } | 598 | } |
594 | return size; | 599 | return size; |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index c8e315f1aa83..01a265212395 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -81,7 +81,7 @@ static void __init check_fpu(void) | |||
81 | 81 | ||
82 | boot_cpu_data.fdiv_bug = fdiv_bug; | 82 | boot_cpu_data.fdiv_bug = fdiv_bug; |
83 | if (boot_cpu_data.fdiv_bug) | 83 | if (boot_cpu_data.fdiv_bug) |
84 | printk("Hmm, FPU with FDIV bug.\n"); | 84 | printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n"); |
85 | } | 85 | } |
86 | 86 | ||
87 | static void __init check_hlt(void) | 87 | static void __init check_hlt(void) |
@@ -98,7 +98,7 @@ static void __init check_hlt(void) | |||
98 | halt(); | 98 | halt(); |
99 | halt(); | 99 | halt(); |
100 | halt(); | 100 | halt(); |
101 | printk("OK.\n"); | 101 | printk(KERN_CONT "OK.\n"); |
102 | } | 102 | } |
103 | 103 | ||
104 | /* | 104 | /* |
@@ -122,9 +122,9 @@ static void __init check_popad(void) | |||
122 | * CPU hard. Too bad. | 122 | * CPU hard. Too bad. |
123 | */ | 123 | */ |
124 | if (res != 12345678) | 124 | if (res != 12345678) |
125 | printk("Buggy.\n"); | 125 | printk(KERN_CONT "Buggy.\n"); |
126 | else | 126 | else |
127 | printk("OK.\n"); | 127 | printk(KERN_CONT "OK.\n"); |
128 | #endif | 128 | #endif |
129 | } | 129 | } |
130 | 130 | ||
@@ -156,7 +156,7 @@ void __init check_bugs(void) | |||
156 | { | 156 | { |
157 | identify_boot_cpu(); | 157 | identify_boot_cpu(); |
158 | #ifndef CONFIG_SMP | 158 | #ifndef CONFIG_SMP |
159 | printk("CPU: "); | 159 | printk(KERN_INFO "CPU: "); |
160 | print_cpu_info(&boot_cpu_data); | 160 | print_cpu_info(&boot_cpu_data); |
161 | #endif | 161 | #endif |
162 | check_config(); | 162 | check_config(); |
diff --git a/arch/x86/kernel/cpu/bugs_64.c b/arch/x86/kernel/cpu/bugs_64.c index 9a3ed0649d4e..04f0fe5af83e 100644 --- a/arch/x86/kernel/cpu/bugs_64.c +++ b/arch/x86/kernel/cpu/bugs_64.c | |||
@@ -15,7 +15,7 @@ void __init check_bugs(void) | |||
15 | { | 15 | { |
16 | identify_boot_cpu(); | 16 | identify_boot_cpu(); |
17 | #if !defined(CONFIG_SMP) | 17 | #if !defined(CONFIG_SMP) |
18 | printk("CPU: "); | 18 | printk(KERN_INFO "CPU: "); |
19 | print_cpu_info(&boot_cpu_data); | 19 | print_cpu_info(&boot_cpu_data); |
20 | #endif | 20 | #endif |
21 | alternative_instructions(); | 21 | alternative_instructions(); |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 5ce60a88027b..55a6abe40394 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -18,8 +18,8 @@ | |||
18 | #include <asm/hypervisor.h> | 18 | #include <asm/hypervisor.h> |
19 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
20 | #include <asm/sections.h> | 20 | #include <asm/sections.h> |
21 | #include <asm/topology.h> | 21 | #include <linux/topology.h> |
22 | #include <asm/cpumask.h> | 22 | #include <linux/cpumask.h> |
23 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
24 | #include <asm/atomic.h> | 24 | #include <asm/atomic.h> |
25 | #include <asm/proto.h> | 25 | #include <asm/proto.h> |
@@ -28,13 +28,13 @@ | |||
28 | #include <asm/desc.h> | 28 | #include <asm/desc.h> |
29 | #include <asm/i387.h> | 29 | #include <asm/i387.h> |
30 | #include <asm/mtrr.h> | 30 | #include <asm/mtrr.h> |
31 | #include <asm/numa.h> | 31 | #include <linux/numa.h> |
32 | #include <asm/asm.h> | 32 | #include <asm/asm.h> |
33 | #include <asm/cpu.h> | 33 | #include <asm/cpu.h> |
34 | #include <asm/mce.h> | 34 | #include <asm/mce.h> |
35 | #include <asm/msr.h> | 35 | #include <asm/msr.h> |
36 | #include <asm/pat.h> | 36 | #include <asm/pat.h> |
37 | #include <asm/smp.h> | 37 | #include <linux/smp.h> |
38 | 38 | ||
39 | #ifdef CONFIG_X86_LOCAL_APIC | 39 | #ifdef CONFIG_X86_LOCAL_APIC |
40 | #include <asm/uv/uv.h> | 40 | #include <asm/uv/uv.h> |
@@ -94,45 +94,45 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | |||
94 | * TLS descriptors are currently at a different place compared to i386. | 94 | * TLS descriptors are currently at a different place compared to i386. |
95 | * Hopefully nobody expects them at a fixed place (Wine?) | 95 | * Hopefully nobody expects them at a fixed place (Wine?) |
96 | */ | 96 | */ |
97 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, | 97 | [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), |
98 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, | 98 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), |
99 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, | 99 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), |
100 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, | 100 | [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), |
101 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, | 101 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), |
102 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, | 102 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), |
103 | #else | 103 | #else |
104 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, | 104 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), |
105 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, | 105 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
106 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, | 106 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), |
107 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } }, | 107 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), |
108 | /* | 108 | /* |
109 | * Segments used for calling PnP BIOS have byte granularity. | 109 | * Segments used for calling PnP BIOS have byte granularity. |
110 | * They code segments and data segments have fixed 64k limits, | 110 | * They code segments and data segments have fixed 64k limits, |
111 | * the transfer segment sizes are set at run time. | 111 | * the transfer segment sizes are set at run time. |
112 | */ | 112 | */ |
113 | /* 32-bit code */ | 113 | /* 32-bit code */ |
114 | [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, | 114 | [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
115 | /* 16-bit code */ | 115 | /* 16-bit code */ |
116 | [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, | 116 | [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
117 | /* 16-bit data */ | 117 | /* 16-bit data */ |
118 | [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, | 118 | [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), |
119 | /* 16-bit data */ | 119 | /* 16-bit data */ |
120 | [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, | 120 | [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), |
121 | /* 16-bit data */ | 121 | /* 16-bit data */ |
122 | [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, | 122 | [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), |
123 | /* | 123 | /* |
124 | * The APM segments have byte granularity and their bases | 124 | * The APM segments have byte granularity and their bases |
125 | * are set at run time. All have 64k limits. | 125 | * are set at run time. All have 64k limits. |
126 | */ | 126 | */ |
127 | /* 32-bit code */ | 127 | /* 32-bit code */ |
128 | [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, | 128 | [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
129 | /* 16-bit code */ | 129 | /* 16-bit code */ |
130 | [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, | 130 | [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
131 | /* data */ | 131 | /* data */ |
132 | [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, | 132 | [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), |
133 | 133 | ||
134 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x0000ffff, 0x00cf9200 } } }, | 134 | [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
135 | [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, | 135 | [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
136 | GDT_STACK_CANARY_INIT | 136 | GDT_STACK_CANARY_INIT |
137 | #endif | 137 | #endif |
138 | } }; | 138 | } }; |
@@ -982,7 +982,7 @@ static __init int setup_disablecpuid(char *arg) | |||
982 | __setup("clearcpuid=", setup_disablecpuid); | 982 | __setup("clearcpuid=", setup_disablecpuid); |
983 | 983 | ||
984 | #ifdef CONFIG_X86_64 | 984 | #ifdef CONFIG_X86_64 |
985 | struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; | 985 | struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; |
986 | 986 | ||
987 | DEFINE_PER_CPU_FIRST(union irq_stack_union, | 987 | DEFINE_PER_CPU_FIRST(union irq_stack_union, |
988 | irq_stack_union) __aligned(PAGE_SIZE); | 988 | irq_stack_union) __aligned(PAGE_SIZE); |
@@ -1043,7 +1043,7 @@ DEFINE_PER_CPU(struct orig_ist, orig_ist); | |||
1043 | #else /* CONFIG_X86_64 */ | 1043 | #else /* CONFIG_X86_64 */ |
1044 | 1044 | ||
1045 | #ifdef CONFIG_CC_STACKPROTECTOR | 1045 | #ifdef CONFIG_CC_STACKPROTECTOR |
1046 | DEFINE_PER_CPU(unsigned long, stack_canary); | 1046 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
1047 | #endif | 1047 | #endif |
1048 | 1048 | ||
1049 | /* Make sure %fs and %gs are initialized properly in idle threads */ | 1049 | /* Make sure %fs and %gs are initialized properly in idle threads */ |
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 593171e967ef..19807b89f058 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
@@ -3,10 +3,10 @@ | |||
3 | #include <linux/delay.h> | 3 | #include <linux/delay.h> |
4 | #include <linux/pci.h> | 4 | #include <linux/pci.h> |
5 | #include <asm/dma.h> | 5 | #include <asm/dma.h> |
6 | #include <asm/io.h> | 6 | #include <linux/io.h> |
7 | #include <asm/processor-cyrix.h> | 7 | #include <asm/processor-cyrix.h> |
8 | #include <asm/processor-flags.h> | 8 | #include <asm/processor-flags.h> |
9 | #include <asm/timer.h> | 9 | #include <linux/timer.h> |
10 | #include <asm/pci-direct.h> | 10 | #include <asm/pci-direct.h> |
11 | #include <asm/tsc.h> | 11 | #include <asm/tsc.h> |
12 | 12 | ||
@@ -282,7 +282,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
282 | * The 5510/5520 companion chips have a funky PIT. | 282 | * The 5510/5520 companion chips have a funky PIT. |
283 | */ | 283 | */ |
284 | if (vendor == PCI_VENDOR_ID_CYRIX && | 284 | if (vendor == PCI_VENDOR_ID_CYRIX && |
285 | (device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520)) | 285 | (device == PCI_DEVICE_ID_CYRIX_5510 || |
286 | device == PCI_DEVICE_ID_CYRIX_5520)) | ||
286 | mark_tsc_unstable("cyrix 5510/5520 detected"); | 287 | mark_tsc_unstable("cyrix 5510/5520 detected"); |
287 | } | 288 | } |
288 | #endif | 289 | #endif |
@@ -299,7 +300,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
299 | * ? : 0x7x | 300 | * ? : 0x7x |
300 | * GX1 : 0x8x GX1 datasheet 56 | 301 | * GX1 : 0x8x GX1 datasheet 56 |
301 | */ | 302 | */ |
302 | if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f)) | 303 | if ((0x30 <= dir1 && dir1 <= 0x6f) || |
304 | (0x80 <= dir1 && dir1 <= 0x8f)) | ||
303 | geode_configure(); | 305 | geode_configure(); |
304 | return; | 306 | return; |
305 | } else { /* MediaGX */ | 307 | } else { /* MediaGX */ |
@@ -427,9 +429,12 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) | |||
427 | printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n"); | 429 | printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n"); |
428 | local_irq_save(flags); | 430 | local_irq_save(flags); |
429 | ccr3 = getCx86(CX86_CCR3); | 431 | ccr3 = getCx86(CX86_CCR3); |
430 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 432 | /* enable MAPEN */ |
431 | setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); /* enable cpuid */ | 433 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); |
432 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 434 | /* enable cpuid */ |
435 | setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); | ||
436 | /* disable MAPEN */ | ||
437 | setCx86(CX86_CCR3, ccr3); | ||
433 | local_irq_restore(flags); | 438 | local_irq_restore(flags); |
434 | } | 439 | } |
435 | } | 440 | } |
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index fb5b86af0b01..93ba8eeb100a 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c | |||
@@ -28,11 +28,10 @@ | |||
28 | static inline void __cpuinit | 28 | static inline void __cpuinit |
29 | detect_hypervisor_vendor(struct cpuinfo_x86 *c) | 29 | detect_hypervisor_vendor(struct cpuinfo_x86 *c) |
30 | { | 30 | { |
31 | if (vmware_platform()) { | 31 | if (vmware_platform()) |
32 | c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE; | 32 | c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE; |
33 | } else { | 33 | else |
34 | c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE; | 34 | c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE; |
35 | } | ||
36 | } | 35 | } |
37 | 36 | ||
38 | unsigned long get_hypervisor_tsc_freq(void) | 37 | unsigned long get_hypervisor_tsc_freq(void) |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 3260ab044996..80a722a071b5 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -7,17 +7,17 @@ | |||
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/thread_info.h> | 8 | #include <linux/thread_info.h> |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/uaccess.h> | ||
10 | 11 | ||
11 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
12 | #include <asm/pgtable.h> | 13 | #include <asm/pgtable.h> |
13 | #include <asm/msr.h> | 14 | #include <asm/msr.h> |
14 | #include <asm/uaccess.h> | ||
15 | #include <asm/ds.h> | 15 | #include <asm/ds.h> |
16 | #include <asm/bugs.h> | 16 | #include <asm/bugs.h> |
17 | #include <asm/cpu.h> | 17 | #include <asm/cpu.h> |
18 | 18 | ||
19 | #ifdef CONFIG_X86_64 | 19 | #ifdef CONFIG_X86_64 |
20 | #include <asm/topology.h> | 20 | #include <linux/topology.h> |
21 | #include <asm/numa_64.h> | 21 | #include <asm/numa_64.h> |
22 | #endif | 22 | #endif |
23 | 23 | ||
@@ -174,7 +174,8 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |||
174 | #ifdef CONFIG_X86_F00F_BUG | 174 | #ifdef CONFIG_X86_F00F_BUG |
175 | /* | 175 | /* |
176 | * All current models of Pentium and Pentium with MMX technology CPUs | 176 | * All current models of Pentium and Pentium with MMX technology CPUs |
177 | * have the F0 0F bug, which lets nonprivileged users lock up the system. | 177 | * have the F0 0F bug, which lets nonprivileged users lock up the |
178 | * system. | ||
178 | * Note that the workaround only should be initialized once... | 179 | * Note that the workaround only should be initialized once... |
179 | */ | 180 | */ |
180 | c->f00f_bug = 0; | 181 | c->f00f_bug = 0; |
@@ -207,7 +208,7 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |||
207 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); | 208 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); |
208 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); | 209 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); |
209 | lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; | 210 | lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; |
210 | wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); | 211 | wrmsr(MSR_IA32_MISC_ENABLE, lo, hi); |
211 | } | 212 | } |
212 | } | 213 | } |
213 | 214 | ||
@@ -283,7 +284,7 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) | |||
283 | /* Intel has a non-standard dependency on %ecx for this CPUID level. */ | 284 | /* Intel has a non-standard dependency on %ecx for this CPUID level. */ |
284 | cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); | 285 | cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); |
285 | if (eax & 0x1f) | 286 | if (eax & 0x1f) |
286 | return ((eax >> 26) + 1); | 287 | return (eax >> 26) + 1; |
287 | else | 288 | else |
288 | return 1; | 289 | return 1; |
289 | } | 290 | } |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index cb98a73e5167..804c40e2bc3e 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Changes: | 4 | * Changes: |
5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) | 5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) |
6 | * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. | 6 | * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. |
7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. | 7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. |
8 | */ | 8 | */ |
9 | 9 | ||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/pci.h> | 16 | #include <linux/pci.h> |
17 | 17 | ||
18 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
19 | #include <asm/smp.h> | 19 | #include <linux/smp.h> |
20 | #include <asm/k8.h> | 20 | #include <asm/k8.h> |
21 | 21 | ||
22 | #define LVL_1_INST 1 | 22 | #define LVL_1_INST 1 |
@@ -25,14 +25,15 @@ | |||
25 | #define LVL_3 4 | 25 | #define LVL_3 4 |
26 | #define LVL_TRACE 5 | 26 | #define LVL_TRACE 5 |
27 | 27 | ||
28 | struct _cache_table | 28 | struct _cache_table { |
29 | { | ||
30 | unsigned char descriptor; | 29 | unsigned char descriptor; |
31 | char cache_type; | 30 | char cache_type; |
32 | short size; | 31 | short size; |
33 | }; | 32 | }; |
34 | 33 | ||
35 | /* all the cache descriptor types we care about (no TLB or trace cache entries) */ | 34 | /* All the cache descriptor types we care about (no TLB or |
35 | trace cache entries) */ | ||
36 | |||
36 | static const struct _cache_table __cpuinitconst cache_table[] = | 37 | static const struct _cache_table __cpuinitconst cache_table[] = |
37 | { | 38 | { |
38 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ | 39 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ |
@@ -105,8 +106,7 @@ static const struct _cache_table __cpuinitconst cache_table[] = | |||
105 | }; | 106 | }; |
106 | 107 | ||
107 | 108 | ||
108 | enum _cache_type | 109 | enum _cache_type { |
109 | { | ||
110 | CACHE_TYPE_NULL = 0, | 110 | CACHE_TYPE_NULL = 0, |
111 | CACHE_TYPE_DATA = 1, | 111 | CACHE_TYPE_DATA = 1, |
112 | CACHE_TYPE_INST = 2, | 112 | CACHE_TYPE_INST = 2, |
@@ -170,31 +170,31 @@ unsigned short num_cache_leaves; | |||
170 | Maybe later */ | 170 | Maybe later */ |
171 | union l1_cache { | 171 | union l1_cache { |
172 | struct { | 172 | struct { |
173 | unsigned line_size : 8; | 173 | unsigned line_size:8; |
174 | unsigned lines_per_tag : 8; | 174 | unsigned lines_per_tag:8; |
175 | unsigned assoc : 8; | 175 | unsigned assoc:8; |
176 | unsigned size_in_kb : 8; | 176 | unsigned size_in_kb:8; |
177 | }; | 177 | }; |
178 | unsigned val; | 178 | unsigned val; |
179 | }; | 179 | }; |
180 | 180 | ||
181 | union l2_cache { | 181 | union l2_cache { |
182 | struct { | 182 | struct { |
183 | unsigned line_size : 8; | 183 | unsigned line_size:8; |
184 | unsigned lines_per_tag : 4; | 184 | unsigned lines_per_tag:4; |
185 | unsigned assoc : 4; | 185 | unsigned assoc:4; |
186 | unsigned size_in_kb : 16; | 186 | unsigned size_in_kb:16; |
187 | }; | 187 | }; |
188 | unsigned val; | 188 | unsigned val; |
189 | }; | 189 | }; |
190 | 190 | ||
191 | union l3_cache { | 191 | union l3_cache { |
192 | struct { | 192 | struct { |
193 | unsigned line_size : 8; | 193 | unsigned line_size:8; |
194 | unsigned lines_per_tag : 4; | 194 | unsigned lines_per_tag:4; |
195 | unsigned assoc : 4; | 195 | unsigned assoc:4; |
196 | unsigned res : 2; | 196 | unsigned res:2; |
197 | unsigned size_encoded : 14; | 197 | unsigned size_encoded:14; |
198 | }; | 198 | }; |
199 | unsigned val; | 199 | unsigned val; |
200 | }; | 200 | }; |
@@ -350,7 +350,8 @@ static int __cpuinit find_num_cache_leaves(void) | |||
350 | 350 | ||
351 | unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | 351 | unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) |
352 | { | 352 | { |
353 | unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ | 353 | /* Cache sizes */ |
354 | unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; | ||
354 | unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ | 355 | unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ |
355 | unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ | 356 | unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ |
356 | unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; | 357 | unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; |
@@ -377,8 +378,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
377 | 378 | ||
378 | retval = cpuid4_cache_lookup_regs(i, &this_leaf); | 379 | retval = cpuid4_cache_lookup_regs(i, &this_leaf); |
379 | if (retval >= 0) { | 380 | if (retval >= 0) { |
380 | switch(this_leaf.eax.split.level) { | 381 | switch (this_leaf.eax.split.level) { |
381 | case 1: | 382 | case 1: |
382 | if (this_leaf.eax.split.type == | 383 | if (this_leaf.eax.split.type == |
383 | CACHE_TYPE_DATA) | 384 | CACHE_TYPE_DATA) |
384 | new_l1d = this_leaf.size/1024; | 385 | new_l1d = this_leaf.size/1024; |
@@ -386,19 +387,20 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
386 | CACHE_TYPE_INST) | 387 | CACHE_TYPE_INST) |
387 | new_l1i = this_leaf.size/1024; | 388 | new_l1i = this_leaf.size/1024; |
388 | break; | 389 | break; |
389 | case 2: | 390 | case 2: |
390 | new_l2 = this_leaf.size/1024; | 391 | new_l2 = this_leaf.size/1024; |
391 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; | 392 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; |
392 | index_msb = get_count_order(num_threads_sharing); | 393 | index_msb = get_count_order(num_threads_sharing); |
393 | l2_id = c->apicid >> index_msb; | 394 | l2_id = c->apicid >> index_msb; |
394 | break; | 395 | break; |
395 | case 3: | 396 | case 3: |
396 | new_l3 = this_leaf.size/1024; | 397 | new_l3 = this_leaf.size/1024; |
397 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; | 398 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; |
398 | index_msb = get_count_order(num_threads_sharing); | 399 | index_msb = get_count_order( |
400 | num_threads_sharing); | ||
399 | l3_id = c->apicid >> index_msb; | 401 | l3_id = c->apicid >> index_msb; |
400 | break; | 402 | break; |
401 | default: | 403 | default: |
402 | break; | 404 | break; |
403 | } | 405 | } |
404 | } | 406 | } |
@@ -421,22 +423,21 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
421 | /* Number of times to iterate */ | 423 | /* Number of times to iterate */ |
422 | n = cpuid_eax(2) & 0xFF; | 424 | n = cpuid_eax(2) & 0xFF; |
423 | 425 | ||
424 | for ( i = 0 ; i < n ; i++ ) { | 426 | for (i = 0 ; i < n ; i++) { |
425 | cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); | 427 | cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); |
426 | 428 | ||
427 | /* If bit 31 is set, this is an unknown format */ | 429 | /* If bit 31 is set, this is an unknown format */ |
428 | for ( j = 0 ; j < 3 ; j++ ) { | 430 | for (j = 0 ; j < 3 ; j++) |
429 | if (regs[j] & (1 << 31)) regs[j] = 0; | 431 | if (regs[j] & (1 << 31)) |
430 | } | 432 | regs[j] = 0; |
431 | 433 | ||
432 | /* Byte 0 is level count, not a descriptor */ | 434 | /* Byte 0 is level count, not a descriptor */ |
433 | for ( j = 1 ; j < 16 ; j++ ) { | 435 | for (j = 1 ; j < 16 ; j++) { |
434 | unsigned char des = dp[j]; | 436 | unsigned char des = dp[j]; |
435 | unsigned char k = 0; | 437 | unsigned char k = 0; |
436 | 438 | ||
437 | /* look up this descriptor in the table */ | 439 | /* look up this descriptor in the table */ |
438 | while (cache_table[k].descriptor != 0) | 440 | while (cache_table[k].descriptor != 0) { |
439 | { | ||
440 | if (cache_table[k].descriptor == des) { | 441 | if (cache_table[k].descriptor == des) { |
441 | if (only_trace && cache_table[k].cache_type != LVL_TRACE) | 442 | if (only_trace && cache_table[k].cache_type != LVL_TRACE) |
442 | break; | 443 | break; |
@@ -488,14 +489,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
488 | } | 489 | } |
489 | 490 | ||
490 | if (trace) | 491 | if (trace) |
491 | printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); | 492 | printk(KERN_INFO "CPU: Trace cache: %dK uops", trace); |
492 | else if ( l1i ) | 493 | else if (l1i) |
493 | printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); | 494 | printk(KERN_INFO "CPU: L1 I cache: %dK", l1i); |
494 | 495 | ||
495 | if (l1d) | 496 | if (l1d) |
496 | printk(", L1 D cache: %dK\n", l1d); | 497 | printk(KERN_CONT ", L1 D cache: %dK\n", l1d); |
497 | else | 498 | else |
498 | printk("\n"); | 499 | printk(KERN_CONT "\n"); |
499 | 500 | ||
500 | if (l2) | 501 | if (l2) |
501 | printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); | 502 | printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); |
@@ -570,8 +571,13 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
570 | } | 571 | } |
571 | } | 572 | } |
572 | #else | 573 | #else |
573 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {} | 574 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) |
574 | static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {} | 575 | { |
576 | } | ||
577 | |||
578 | static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | ||
579 | { | ||
580 | } | ||
575 | #endif | 581 | #endif |
576 | 582 | ||
577 | static void __cpuinit free_cache_attributes(unsigned int cpu) | 583 | static void __cpuinit free_cache_attributes(unsigned int cpu) |
@@ -657,7 +663,7 @@ static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); | |||
657 | static ssize_t show_##file_name \ | 663 | static ssize_t show_##file_name \ |
658 | (struct _cpuid4_info *this_leaf, char *buf) \ | 664 | (struct _cpuid4_info *this_leaf, char *buf) \ |
659 | { \ | 665 | { \ |
660 | return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \ | 666 | return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ |
661 | } | 667 | } |
662 | 668 | ||
663 | show_one_plus(level, eax.split.level, 0); | 669 | show_one_plus(level, eax.split.level, 0); |
@@ -668,7 +674,7 @@ show_one_plus(number_of_sets, ecx.split.number_of_sets, 1); | |||
668 | 674 | ||
669 | static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf) | 675 | static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf) |
670 | { | 676 | { |
671 | return sprintf (buf, "%luK\n", this_leaf->size / 1024); | 677 | return sprintf(buf, "%luK\n", this_leaf->size / 1024); |
672 | } | 678 | } |
673 | 679 | ||
674 | static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | 680 | static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, |
@@ -681,7 +687,7 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | |||
681 | const struct cpumask *mask; | 687 | const struct cpumask *mask; |
682 | 688 | ||
683 | mask = to_cpumask(this_leaf->shared_cpu_map); | 689 | mask = to_cpumask(this_leaf->shared_cpu_map); |
684 | n = type? | 690 | n = type ? |
685 | cpulist_scnprintf(buf, len-2, mask) : | 691 | cpulist_scnprintf(buf, len-2, mask) : |
686 | cpumask_scnprintf(buf, len-2, mask); | 692 | cpumask_scnprintf(buf, len-2, mask); |
687 | buf[n++] = '\n'; | 693 | buf[n++] = '\n'; |
@@ -812,7 +818,7 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, | |||
812 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, | 818 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, |
813 | show_cache_disable_1, store_cache_disable_1); | 819 | show_cache_disable_1, store_cache_disable_1); |
814 | 820 | ||
815 | static struct attribute * default_attrs[] = { | 821 | static struct attribute *default_attrs[] = { |
816 | &type.attr, | 822 | &type.attr, |
817 | &level.attr, | 823 | &level.attr, |
818 | &coherency_line_size.attr, | 824 | &coherency_line_size.attr, |
@@ -827,7 +833,7 @@ static struct attribute * default_attrs[] = { | |||
827 | NULL | 833 | NULL |
828 | }; | 834 | }; |
829 | 835 | ||
830 | static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) | 836 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) |
831 | { | 837 | { |
832 | struct _cache_attr *fattr = to_attr(attr); | 838 | struct _cache_attr *fattr = to_attr(attr); |
833 | struct _index_kobject *this_leaf = to_object(kobj); | 839 | struct _index_kobject *this_leaf = to_object(kobj); |
@@ -840,8 +846,8 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) | |||
840 | return ret; | 846 | return ret; |
841 | } | 847 | } |
842 | 848 | ||
843 | static ssize_t store(struct kobject * kobj, struct attribute * attr, | 849 | static ssize_t store(struct kobject *kobj, struct attribute *attr, |
844 | const char * buf, size_t count) | 850 | const char *buf, size_t count) |
845 | { | 851 | { |
846 | struct _cache_attr *fattr = to_attr(attr); | 852 | struct _cache_attr *fattr = to_attr(attr); |
847 | struct _index_kobject *this_leaf = to_object(kobj); | 853 | struct _index_kobject *this_leaf = to_object(kobj); |
@@ -895,7 +901,7 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) | |||
895 | goto err_out; | 901 | goto err_out; |
896 | 902 | ||
897 | per_cpu(index_kobject, cpu) = kzalloc( | 903 | per_cpu(index_kobject, cpu) = kzalloc( |
898 | sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL); | 904 | sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); |
899 | if (unlikely(per_cpu(index_kobject, cpu) == NULL)) | 905 | if (unlikely(per_cpu(index_kobject, cpu) == NULL)) |
900 | goto err_out; | 906 | goto err_out; |
901 | 907 | ||
@@ -929,7 +935,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
929 | } | 935 | } |
930 | 936 | ||
931 | for (i = 0; i < num_cache_leaves; i++) { | 937 | for (i = 0; i < num_cache_leaves; i++) { |
932 | this_object = INDEX_KOBJECT_PTR(cpu,i); | 938 | this_object = INDEX_KOBJECT_PTR(cpu, i); |
933 | this_object->cpu = cpu; | 939 | this_object->cpu = cpu; |
934 | this_object->index = i; | 940 | this_object->index = i; |
935 | retval = kobject_init_and_add(&(this_object->kobj), | 941 | retval = kobject_init_and_add(&(this_object->kobj), |
@@ -937,9 +943,8 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
937 | per_cpu(cache_kobject, cpu), | 943 | per_cpu(cache_kobject, cpu), |
938 | "index%1lu", i); | 944 | "index%1lu", i); |
939 | if (unlikely(retval)) { | 945 | if (unlikely(retval)) { |
940 | for (j = 0; j < i; j++) { | 946 | for (j = 0; j < i; j++) |
941 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj)); | 947 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); |
942 | } | ||
943 | kobject_put(per_cpu(cache_kobject, cpu)); | 948 | kobject_put(per_cpu(cache_kobject, cpu)); |
944 | cpuid4_cache_sysfs_exit(cpu); | 949 | cpuid4_cache_sysfs_exit(cpu); |
945 | return retval; | 950 | return retval; |
@@ -964,7 +969,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
964 | cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); | 969 | cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); |
965 | 970 | ||
966 | for (i = 0; i < num_cache_leaves; i++) | 971 | for (i = 0; i < num_cache_leaves; i++) |
967 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); | 972 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); |
968 | kobject_put(per_cpu(cache_kobject, cpu)); | 973 | kobject_put(per_cpu(cache_kobject, cpu)); |
969 | cpuid4_cache_sysfs_exit(cpu); | 974 | cpuid4_cache_sysfs_exit(cpu); |
970 | } | 975 | } |
@@ -989,8 +994,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, | |||
989 | return NOTIFY_OK; | 994 | return NOTIFY_OK; |
990 | } | 995 | } |
991 | 996 | ||
992 | static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = | 997 | static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = { |
993 | { | ||
994 | .notifier_call = cacheinfo_cpu_callback, | 998 | .notifier_call = cacheinfo_cpu_callback, |
995 | }; | 999 | }; |
996 | 1000 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c index ee2331b0e58f..33af14110dfd 100644 --- a/arch/x86/kernel/cpu/mtrr/amd.c +++ b/arch/x86/kernel/cpu/mtrr/amd.c | |||
@@ -7,15 +7,15 @@ | |||
7 | 7 | ||
8 | static void | 8 | static void |
9 | amd_get_mtrr(unsigned int reg, unsigned long *base, | 9 | amd_get_mtrr(unsigned int reg, unsigned long *base, |
10 | unsigned long *size, mtrr_type * type) | 10 | unsigned long *size, mtrr_type *type) |
11 | { | 11 | { |
12 | unsigned long low, high; | 12 | unsigned long low, high; |
13 | 13 | ||
14 | rdmsr(MSR_K6_UWCCR, low, high); | 14 | rdmsr(MSR_K6_UWCCR, low, high); |
15 | /* Upper dword is region 1, lower is region 0 */ | 15 | /* Upper dword is region 1, lower is region 0 */ |
16 | if (reg == 1) | 16 | if (reg == 1) |
17 | low = high; | 17 | low = high; |
18 | /* The base masks off on the right alignment */ | 18 | /* The base masks off on the right alignment */ |
19 | *base = (low & 0xFFFE0000) >> PAGE_SHIFT; | 19 | *base = (low & 0xFFFE0000) >> PAGE_SHIFT; |
20 | *type = 0; | 20 | *type = 0; |
21 | if (low & 1) | 21 | if (low & 1) |
@@ -27,74 +27,81 @@ amd_get_mtrr(unsigned int reg, unsigned long *base, | |||
27 | return; | 27 | return; |
28 | } | 28 | } |
29 | /* | 29 | /* |
30 | * This needs a little explaining. The size is stored as an | 30 | * This needs a little explaining. The size is stored as an |
31 | * inverted mask of bits of 128K granularity 15 bits long offset | 31 | * inverted mask of bits of 128K granularity 15 bits long offset |
32 | * 2 bits | 32 | * 2 bits. |
33 | * | 33 | * |
34 | * So to get a size we do invert the mask and add 1 to the lowest | 34 | * So to get a size we do invert the mask and add 1 to the lowest |
35 | * mask bit (4 as its 2 bits in). This gives us a size we then shift | 35 | * mask bit (4 as its 2 bits in). This gives us a size we then shift |
36 | * to turn into 128K blocks | 36 | * to turn into 128K blocks. |
37 | * | 37 | * |
38 | * eg 111 1111 1111 1100 is 512K | 38 | * eg 111 1111 1111 1100 is 512K |
39 | * | 39 | * |
40 | * invert 000 0000 0000 0011 | 40 | * invert 000 0000 0000 0011 |
41 | * +1 000 0000 0000 0100 | 41 | * +1 000 0000 0000 0100 |
42 | * *128K ... | 42 | * *128K ... |
43 | */ | 43 | */ |
44 | low = (~low) & 0x1FFFC; | 44 | low = (~low) & 0x1FFFC; |
45 | *size = (low + 4) << (15 - PAGE_SHIFT); | 45 | *size = (low + 4) << (15 - PAGE_SHIFT); |
46 | return; | ||
47 | } | 46 | } |
48 | 47 | ||
49 | static void amd_set_mtrr(unsigned int reg, unsigned long base, | 48 | /** |
50 | unsigned long size, mtrr_type type) | 49 | * amd_set_mtrr - Set variable MTRR register on the local CPU. |
51 | /* [SUMMARY] Set variable MTRR register on the local CPU. | 50 | * |
52 | <reg> The register to set. | 51 | * @reg The register to set. |
53 | <base> The base address of the region. | 52 | * @base The base address of the region. |
54 | <size> The size of the region. If this is 0 the region is disabled. | 53 | * @size The size of the region. If this is 0 the region is disabled. |
55 | <type> The type of the region. | 54 | * @type The type of the region. |
56 | [RETURNS] Nothing. | 55 | * |
57 | */ | 56 | * Returns nothing. |
57 | */ | ||
58 | static void | ||
59 | amd_set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) | ||
58 | { | 60 | { |
59 | u32 regs[2]; | 61 | u32 regs[2]; |
60 | 62 | ||
61 | /* | 63 | /* |
62 | * Low is MTRR0 , High MTRR 1 | 64 | * Low is MTRR0, High MTRR 1 |
63 | */ | 65 | */ |
64 | rdmsr(MSR_K6_UWCCR, regs[0], regs[1]); | 66 | rdmsr(MSR_K6_UWCCR, regs[0], regs[1]); |
65 | /* | 67 | /* |
66 | * Blank to disable | 68 | * Blank to disable |
67 | */ | 69 | */ |
68 | if (size == 0) | 70 | if (size == 0) { |
69 | regs[reg] = 0; | 71 | regs[reg] = 0; |
70 | else | 72 | } else { |
71 | /* Set the register to the base, the type (off by one) and an | 73 | /* |
72 | inverted bitmask of the size The size is the only odd | 74 | * Set the register to the base, the type (off by one) and an |
73 | bit. We are fed say 512K We invert this and we get 111 1111 | 75 | * inverted bitmask of the size The size is the only odd |
74 | 1111 1011 but if you subtract one and invert you get the | 76 | * bit. We are fed say 512K We invert this and we get 111 1111 |
75 | desired 111 1111 1111 1100 mask | 77 | * 1111 1011 but if you subtract one and invert you get the |
76 | 78 | * desired 111 1111 1111 1100 mask | |
77 | But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! */ | 79 | * |
80 | * But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! | ||
81 | */ | ||
78 | regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC) | 82 | regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC) |
79 | | (base << PAGE_SHIFT) | (type + 1); | 83 | | (base << PAGE_SHIFT) | (type + 1); |
84 | } | ||
80 | 85 | ||
81 | /* | 86 | /* |
82 | * The writeback rule is quite specific. See the manual. Its | 87 | * The writeback rule is quite specific. See the manual. Its |
83 | * disable local interrupts, write back the cache, set the mtrr | 88 | * disable local interrupts, write back the cache, set the mtrr |
84 | */ | 89 | */ |
85 | wbinvd(); | 90 | wbinvd(); |
86 | wrmsr(MSR_K6_UWCCR, regs[0], regs[1]); | 91 | wrmsr(MSR_K6_UWCCR, regs[0], regs[1]); |
87 | } | 92 | } |
88 | 93 | ||
89 | static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type) | 94 | static int |
95 | amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type) | ||
90 | { | 96 | { |
91 | /* Apply the K6 block alignment and size rules | 97 | /* |
92 | In order | 98 | * Apply the K6 block alignment and size rules |
93 | o Uncached or gathering only | 99 | * In order |
94 | o 128K or bigger block | 100 | * o Uncached or gathering only |
95 | o Power of 2 block | 101 | * o 128K or bigger block |
96 | o base suitably aligned to the power | 102 | * o Power of 2 block |
97 | */ | 103 | * o base suitably aligned to the power |
104 | */ | ||
98 | if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT)) | 105 | if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT)) |
99 | || (size & ~(size - 1)) - size || (base & (size - 1))) | 106 | || (size & ~(size - 1)) - size || (base & (size - 1))) |
100 | return -EINVAL; | 107 | return -EINVAL; |
@@ -115,5 +122,3 @@ int __init amd_init_mtrr(void) | |||
115 | set_mtrr_ops(&amd_mtrr_ops); | 122 | set_mtrr_ops(&amd_mtrr_ops); |
116 | return 0; | 123 | return 0; |
117 | } | 124 | } |
118 | |||
119 | //arch_initcall(amd_mtrr_init); | ||
diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c index cb9aa3a7a7ab..de89f14eff3a 100644 --- a/arch/x86/kernel/cpu/mtrr/centaur.c +++ b/arch/x86/kernel/cpu/mtrr/centaur.c | |||
@@ -1,7 +1,9 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <linux/mm.h> | 2 | #include <linux/mm.h> |
3 | |||
3 | #include <asm/mtrr.h> | 4 | #include <asm/mtrr.h> |
4 | #include <asm/msr.h> | 5 | #include <asm/msr.h> |
6 | |||
5 | #include "mtrr.h" | 7 | #include "mtrr.h" |
6 | 8 | ||
7 | static struct { | 9 | static struct { |
@@ -12,25 +14,25 @@ static struct { | |||
12 | static u8 centaur_mcr_reserved; | 14 | static u8 centaur_mcr_reserved; |
13 | static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */ | 15 | static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */ |
14 | 16 | ||
15 | /* | 17 | /** |
16 | * Report boot time MCR setups | 18 | * centaur_get_free_region - Get a free MTRR. |
19 | * | ||
20 | * @base: The starting (base) address of the region. | ||
21 | * @size: The size (in bytes) of the region. | ||
22 | * | ||
23 | * Returns: the index of the region on success, else -1 on error. | ||
17 | */ | 24 | */ |
18 | |||
19 | static int | 25 | static int |
20 | centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg) | 26 | centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg) |
21 | /* [SUMMARY] Get a free MTRR. | ||
22 | <base> The starting (base) address of the region. | ||
23 | <size> The size (in bytes) of the region. | ||
24 | [RETURNS] The index of the region on success, else -1 on error. | ||
25 | */ | ||
26 | { | 27 | { |
27 | int i, max; | ||
28 | mtrr_type ltype; | ||
29 | unsigned long lbase, lsize; | 28 | unsigned long lbase, lsize; |
29 | mtrr_type ltype; | ||
30 | int i, max; | ||
30 | 31 | ||
31 | max = num_var_ranges; | 32 | max = num_var_ranges; |
32 | if (replace_reg >= 0 && replace_reg < max) | 33 | if (replace_reg >= 0 && replace_reg < max) |
33 | return replace_reg; | 34 | return replace_reg; |
35 | |||
34 | for (i = 0; i < max; ++i) { | 36 | for (i = 0; i < max; ++i) { |
35 | if (centaur_mcr_reserved & (1 << i)) | 37 | if (centaur_mcr_reserved & (1 << i)) |
36 | continue; | 38 | continue; |
@@ -38,11 +40,14 @@ centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg) | |||
38 | if (lsize == 0) | 40 | if (lsize == 0) |
39 | return i; | 41 | return i; |
40 | } | 42 | } |
43 | |||
41 | return -ENOSPC; | 44 | return -ENOSPC; |
42 | } | 45 | } |
43 | 46 | ||
44 | void | 47 | /* |
45 | mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) | 48 | * Report boot time MCR setups |
49 | */ | ||
50 | void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) | ||
46 | { | 51 | { |
47 | centaur_mcr[mcr].low = lo; | 52 | centaur_mcr[mcr].low = lo; |
48 | centaur_mcr[mcr].high = hi; | 53 | centaur_mcr[mcr].high = hi; |
@@ -54,33 +59,35 @@ centaur_get_mcr(unsigned int reg, unsigned long *base, | |||
54 | { | 59 | { |
55 | *base = centaur_mcr[reg].high >> PAGE_SHIFT; | 60 | *base = centaur_mcr[reg].high >> PAGE_SHIFT; |
56 | *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT; | 61 | *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT; |
57 | *type = MTRR_TYPE_WRCOMB; /* If it is there, it is write-combining */ | 62 | *type = MTRR_TYPE_WRCOMB; /* write-combining */ |
63 | |||
58 | if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2)) | 64 | if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2)) |
59 | *type = MTRR_TYPE_UNCACHABLE; | 65 | *type = MTRR_TYPE_UNCACHABLE; |
60 | if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25) | 66 | if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25) |
61 | *type = MTRR_TYPE_WRBACK; | 67 | *type = MTRR_TYPE_WRBACK; |
62 | if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31) | 68 | if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31) |
63 | *type = MTRR_TYPE_WRBACK; | 69 | *type = MTRR_TYPE_WRBACK; |
64 | |||
65 | } | 70 | } |
66 | 71 | ||
67 | static void centaur_set_mcr(unsigned int reg, unsigned long base, | 72 | static void |
68 | unsigned long size, mtrr_type type) | 73 | centaur_set_mcr(unsigned int reg, unsigned long base, |
74 | unsigned long size, mtrr_type type) | ||
69 | { | 75 | { |
70 | unsigned long low, high; | 76 | unsigned long low, high; |
71 | 77 | ||
72 | if (size == 0) { | 78 | if (size == 0) { |
73 | /* Disable */ | 79 | /* Disable */ |
74 | high = low = 0; | 80 | high = low = 0; |
75 | } else { | 81 | } else { |
76 | high = base << PAGE_SHIFT; | 82 | high = base << PAGE_SHIFT; |
77 | if (centaur_mcr_type == 0) | 83 | if (centaur_mcr_type == 0) { |
78 | low = -size << PAGE_SHIFT | 0x1f; /* only support write-combining... */ | 84 | /* Only support write-combining... */ |
79 | else { | 85 | low = -size << PAGE_SHIFT | 0x1f; |
86 | } else { | ||
80 | if (type == MTRR_TYPE_UNCACHABLE) | 87 | if (type == MTRR_TYPE_UNCACHABLE) |
81 | low = -size << PAGE_SHIFT | 0x02; /* NC */ | 88 | low = -size << PAGE_SHIFT | 0x02; /* NC */ |
82 | else | 89 | else |
83 | low = -size << PAGE_SHIFT | 0x09; /* WWO,WC */ | 90 | low = -size << PAGE_SHIFT | 0x09; /* WWO, WC */ |
84 | } | 91 | } |
85 | } | 92 | } |
86 | centaur_mcr[reg].high = high; | 93 | centaur_mcr[reg].high = high; |
@@ -88,118 +95,16 @@ static void centaur_set_mcr(unsigned int reg, unsigned long base, | |||
88 | wrmsr(MSR_IDT_MCR0 + reg, low, high); | 95 | wrmsr(MSR_IDT_MCR0 + reg, low, high); |
89 | } | 96 | } |
90 | 97 | ||
91 | #if 0 | 98 | static int |
92 | /* | 99 | centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int type) |
93 | * Initialise the later (saner) Winchip MCR variant. In this version | ||
94 | * the BIOS can pass us the registers it has used (but not their values) | ||
95 | * and the control register is read/write | ||
96 | */ | ||
97 | |||
98 | static void __init | ||
99 | centaur_mcr1_init(void) | ||
100 | { | ||
101 | unsigned i; | ||
102 | u32 lo, hi; | ||
103 | |||
104 | /* Unfortunately, MCR's are read-only, so there is no way to | ||
105 | * find out what the bios might have done. | ||
106 | */ | ||
107 | |||
108 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
109 | if (((lo >> 17) & 7) == 1) { /* Type 1 Winchip2 MCR */ | ||
110 | lo &= ~0x1C0; /* clear key */ | ||
111 | lo |= 0x040; /* set key to 1 */ | ||
112 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); /* unlock MCR */ | ||
113 | } | ||
114 | |||
115 | centaur_mcr_type = 1; | ||
116 | |||
117 | /* | ||
118 | * Clear any unconfigured MCR's. | ||
119 | */ | ||
120 | |||
121 | for (i = 0; i < 8; ++i) { | ||
122 | if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) { | ||
123 | if (!(lo & (1 << (9 + i)))) | ||
124 | wrmsr(MSR_IDT_MCR0 + i, 0, 0); | ||
125 | else | ||
126 | /* | ||
127 | * If the BIOS set up an MCR we cannot see it | ||
128 | * but we don't wish to obliterate it | ||
129 | */ | ||
130 | centaur_mcr_reserved |= (1 << i); | ||
131 | } | ||
132 | } | ||
133 | /* | ||
134 | * Throw the main write-combining switch... | ||
135 | * However if OOSTORE is enabled then people have already done far | ||
136 | * cleverer things and we should behave. | ||
137 | */ | ||
138 | |||
139 | lo |= 15; /* Write combine enables */ | ||
140 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * Initialise the original winchip with read only MCR registers | ||
145 | * no used bitmask for the BIOS to pass on and write only control | ||
146 | */ | ||
147 | |||
148 | static void __init | ||
149 | centaur_mcr0_init(void) | ||
150 | { | ||
151 | unsigned i; | ||
152 | |||
153 | /* Unfortunately, MCR's are read-only, so there is no way to | ||
154 | * find out what the bios might have done. | ||
155 | */ | ||
156 | |||
157 | /* Clear any unconfigured MCR's. | ||
158 | * This way we are sure that the centaur_mcr array contains the actual | ||
159 | * values. The disadvantage is that any BIOS tweaks are thus undone. | ||
160 | * | ||
161 | */ | ||
162 | for (i = 0; i < 8; ++i) { | ||
163 | if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) | ||
164 | wrmsr(MSR_IDT_MCR0 + i, 0, 0); | ||
165 | } | ||
166 | |||
167 | wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); /* Write only */ | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * Initialise Winchip series MCR registers | ||
172 | */ | ||
173 | |||
174 | static void __init | ||
175 | centaur_mcr_init(void) | ||
176 | { | ||
177 | struct set_mtrr_context ctxt; | ||
178 | |||
179 | set_mtrr_prepare_save(&ctxt); | ||
180 | set_mtrr_cache_disable(&ctxt); | ||
181 | |||
182 | if (boot_cpu_data.x86_model == 4) | ||
183 | centaur_mcr0_init(); | ||
184 | else if (boot_cpu_data.x86_model == 8 || boot_cpu_data.x86_model == 9) | ||
185 | centaur_mcr1_init(); | ||
186 | |||
187 | set_mtrr_done(&ctxt); | ||
188 | } | ||
189 | #endif | ||
190 | |||
191 | static int centaur_validate_add_page(unsigned long base, | ||
192 | unsigned long size, unsigned int type) | ||
193 | { | 100 | { |
194 | /* | 101 | /* |
195 | * FIXME: Winchip2 supports uncached | 102 | * FIXME: Winchip2 supports uncached |
196 | */ | 103 | */ |
197 | if (type != MTRR_TYPE_WRCOMB && | 104 | if (type != MTRR_TYPE_WRCOMB && |
198 | (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) { | 105 | (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) { |
199 | printk(KERN_WARNING | 106 | pr_warning("mtrr: only write-combining%s supported\n", |
200 | "mtrr: only write-combining%s supported\n", | 107 | centaur_mcr_type ? " and uncacheable are" : " is"); |
201 | centaur_mcr_type ? " and uncacheable are" | ||
202 | : " is"); | ||
203 | return -EINVAL; | 108 | return -EINVAL; |
204 | } | 109 | } |
205 | return 0; | 110 | return 0; |
@@ -207,7 +112,6 @@ static int centaur_validate_add_page(unsigned long base, | |||
207 | 112 | ||
208 | static struct mtrr_ops centaur_mtrr_ops = { | 113 | static struct mtrr_ops centaur_mtrr_ops = { |
209 | .vendor = X86_VENDOR_CENTAUR, | 114 | .vendor = X86_VENDOR_CENTAUR, |
210 | // .init = centaur_mcr_init, | ||
211 | .set = centaur_set_mcr, | 115 | .set = centaur_set_mcr, |
212 | .get = centaur_get_mcr, | 116 | .get = centaur_get_mcr, |
213 | .get_free_region = centaur_get_free_region, | 117 | .get_free_region = centaur_get_free_region, |
@@ -220,5 +124,3 @@ int __init centaur_init_mtrr(void) | |||
220 | set_mtrr_ops(¢aur_mtrr_ops); | 124 | set_mtrr_ops(¢aur_mtrr_ops); |
221 | return 0; | 125 | return 0; |
222 | } | 126 | } |
223 | |||
224 | //arch_initcall(centaur_init_mtrr); | ||
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 1d584a18a50d..315738c74aad 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c | |||
@@ -1,51 +1,75 @@ | |||
1 | /* MTRR (Memory Type Range Register) cleanup | 1 | /* |
2 | 2 | * MTRR (Memory Type Range Register) cleanup | |
3 | Copyright (C) 2009 Yinghai Lu | 3 | * |
4 | 4 | * Copyright (C) 2009 Yinghai Lu | |
5 | This library is free software; you can redistribute it and/or | 5 | * |
6 | modify it under the terms of the GNU Library General Public | 6 | * This library is free software; you can redistribute it and/or |
7 | License as published by the Free Software Foundation; either | 7 | * modify it under the terms of the GNU Library General Public |
8 | version 2 of the License, or (at your option) any later version. | 8 | * License as published by the Free Software Foundation; either |
9 | 9 | * version 2 of the License, or (at your option) any later version. | |
10 | This library is distributed in the hope that it will be useful, | 10 | * |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * This library is distributed in the hope that it will be useful, |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | Library General Public License for more details. | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | 14 | * Library General Public License for more details. | |
15 | You should have received a copy of the GNU Library General Public | 15 | * |
16 | License along with this library; if not, write to the Free | 16 | * You should have received a copy of the GNU Library General Public |
17 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 17 | * License along with this library; if not, write to the Free |
18 | */ | 18 | * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
19 | 19 | */ | |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/pci.h> | 22 | #include <linux/pci.h> |
23 | #include <linux/smp.h> | 23 | #include <linux/smp.h> |
24 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
25 | #include <linux/mutex.h> | ||
26 | #include <linux/sort.h> | 25 | #include <linux/sort.h> |
26 | #include <linux/mutex.h> | ||
27 | #include <linux/uaccess.h> | ||
28 | #include <linux/kvm_para.h> | ||
27 | 29 | ||
30 | #include <asm/processor.h> | ||
28 | #include <asm/e820.h> | 31 | #include <asm/e820.h> |
29 | #include <asm/mtrr.h> | 32 | #include <asm/mtrr.h> |
30 | #include <asm/uaccess.h> | ||
31 | #include <asm/processor.h> | ||
32 | #include <asm/msr.h> | 33 | #include <asm/msr.h> |
33 | #include <asm/kvm_para.h> | ||
34 | #include "mtrr.h" | ||
35 | 34 | ||
36 | /* should be related to MTRR_VAR_RANGES nums */ | 35 | #include "mtrr.h" |
37 | #define RANGE_NUM 256 | ||
38 | 36 | ||
39 | struct res_range { | 37 | struct res_range { |
40 | unsigned long start; | 38 | unsigned long start; |
41 | unsigned long end; | 39 | unsigned long end; |
40 | }; | ||
41 | |||
42 | struct var_mtrr_range_state { | ||
43 | unsigned long base_pfn; | ||
44 | unsigned long size_pfn; | ||
45 | mtrr_type type; | ||
46 | }; | ||
47 | |||
48 | struct var_mtrr_state { | ||
49 | unsigned long range_startk; | ||
50 | unsigned long range_sizek; | ||
51 | unsigned long chunk_sizek; | ||
52 | unsigned long gran_sizek; | ||
53 | unsigned int reg; | ||
42 | }; | 54 | }; |
43 | 55 | ||
56 | /* Should be related to MTRR_VAR_RANGES nums */ | ||
57 | #define RANGE_NUM 256 | ||
58 | |||
59 | static struct res_range __initdata range[RANGE_NUM]; | ||
60 | static int __initdata nr_range; | ||
61 | |||
62 | static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; | ||
63 | |||
64 | static int __initdata debug_print; | ||
65 | #define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0) | ||
66 | |||
67 | |||
44 | static int __init | 68 | static int __init |
45 | add_range(struct res_range *range, int nr_range, unsigned long start, | 69 | add_range(struct res_range *range, int nr_range, |
46 | unsigned long end) | 70 | unsigned long start, unsigned long end) |
47 | { | 71 | { |
48 | /* out of slots */ | 72 | /* Out of slots: */ |
49 | if (nr_range >= RANGE_NUM) | 73 | if (nr_range >= RANGE_NUM) |
50 | return nr_range; | 74 | return nr_range; |
51 | 75 | ||
@@ -58,12 +82,12 @@ add_range(struct res_range *range, int nr_range, unsigned long start, | |||
58 | } | 82 | } |
59 | 83 | ||
60 | static int __init | 84 | static int __init |
61 | add_range_with_merge(struct res_range *range, int nr_range, unsigned long start, | 85 | add_range_with_merge(struct res_range *range, int nr_range, |
62 | unsigned long end) | 86 | unsigned long start, unsigned long end) |
63 | { | 87 | { |
64 | int i; | 88 | int i; |
65 | 89 | ||
66 | /* try to merge it with old one */ | 90 | /* Try to merge it with old one: */ |
67 | for (i = 0; i < nr_range; i++) { | 91 | for (i = 0; i < nr_range; i++) { |
68 | unsigned long final_start, final_end; | 92 | unsigned long final_start, final_end; |
69 | unsigned long common_start, common_end; | 93 | unsigned long common_start, common_end; |
@@ -84,7 +108,7 @@ add_range_with_merge(struct res_range *range, int nr_range, unsigned long start, | |||
84 | return nr_range; | 108 | return nr_range; |
85 | } | 109 | } |
86 | 110 | ||
87 | /* need to add that */ | 111 | /* Need to add it: */ |
88 | return add_range(range, nr_range, start, end); | 112 | return add_range(range, nr_range, start, end); |
89 | } | 113 | } |
90 | 114 | ||
@@ -117,7 +141,7 @@ subtract_range(struct res_range *range, unsigned long start, unsigned long end) | |||
117 | } | 141 | } |
118 | 142 | ||
119 | if (start > range[j].start && end < range[j].end) { | 143 | if (start > range[j].start && end < range[j].end) { |
120 | /* find the new spare */ | 144 | /* Find the new spare: */ |
121 | for (i = 0; i < RANGE_NUM; i++) { | 145 | for (i = 0; i < RANGE_NUM; i++) { |
122 | if (range[i].end == 0) | 146 | if (range[i].end == 0) |
123 | break; | 147 | break; |
@@ -146,14 +170,8 @@ static int __init cmp_range(const void *x1, const void *x2) | |||
146 | return start1 - start2; | 170 | return start1 - start2; |
147 | } | 171 | } |
148 | 172 | ||
149 | struct var_mtrr_range_state { | 173 | #define BIOS_BUG_MSG KERN_WARNING \ |
150 | unsigned long base_pfn; | 174 | "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n" |
151 | unsigned long size_pfn; | ||
152 | mtrr_type type; | ||
153 | }; | ||
154 | |||
155 | static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; | ||
156 | static int __initdata debug_print; | ||
157 | 175 | ||
158 | static int __init | 176 | static int __init |
159 | x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | 177 | x86_get_mtrr_mem_range(struct res_range *range, int nr_range, |
@@ -180,7 +198,7 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | |||
180 | range[i].start, range[i].end + 1); | 198 | range[i].start, range[i].end + 1); |
181 | } | 199 | } |
182 | 200 | ||
183 | /* take out UC ranges */ | 201 | /* Take out UC ranges: */ |
184 | for (i = 0; i < num_var_ranges; i++) { | 202 | for (i = 0; i < num_var_ranges; i++) { |
185 | type = range_state[i].type; | 203 | type = range_state[i].type; |
186 | if (type != MTRR_TYPE_UNCACHABLE && | 204 | if (type != MTRR_TYPE_UNCACHABLE && |
@@ -193,9 +211,7 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | |||
193 | if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed && | 211 | if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed && |
194 | (mtrr_state.enabled & 1)) { | 212 | (mtrr_state.enabled & 1)) { |
195 | /* Var MTRR contains UC entry below 1M? Skip it: */ | 213 | /* Var MTRR contains UC entry below 1M? Skip it: */ |
196 | printk(KERN_WARNING "WARNING: BIOS bug: VAR MTRR %d " | 214 | printk(BIOS_BUG_MSG, i); |
197 | "contains strange UC entry under 1M, check " | ||
198 | "with your system vendor!\n", i); | ||
199 | if (base + size <= (1<<(20-PAGE_SHIFT))) | 215 | if (base + size <= (1<<(20-PAGE_SHIFT))) |
200 | continue; | 216 | continue; |
201 | size -= (1<<(20-PAGE_SHIFT)) - base; | 217 | size -= (1<<(20-PAGE_SHIFT)) - base; |
@@ -237,17 +253,13 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | |||
237 | return nr_range; | 253 | return nr_range; |
238 | } | 254 | } |
239 | 255 | ||
240 | static struct res_range __initdata range[RANGE_NUM]; | ||
241 | static int __initdata nr_range; | ||
242 | |||
243 | #ifdef CONFIG_MTRR_SANITIZER | 256 | #ifdef CONFIG_MTRR_SANITIZER |
244 | 257 | ||
245 | static unsigned long __init sum_ranges(struct res_range *range, int nr_range) | 258 | static unsigned long __init sum_ranges(struct res_range *range, int nr_range) |
246 | { | 259 | { |
247 | unsigned long sum; | 260 | unsigned long sum = 0; |
248 | int i; | 261 | int i; |
249 | 262 | ||
250 | sum = 0; | ||
251 | for (i = 0; i < nr_range; i++) | 263 | for (i = 0; i < nr_range; i++) |
252 | sum += range[i].end + 1 - range[i].start; | 264 | sum += range[i].end + 1 - range[i].start; |
253 | 265 | ||
@@ -278,17 +290,9 @@ static int __init mtrr_cleanup_debug_setup(char *str) | |||
278 | } | 290 | } |
279 | early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup); | 291 | early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup); |
280 | 292 | ||
281 | struct var_mtrr_state { | ||
282 | unsigned long range_startk; | ||
283 | unsigned long range_sizek; | ||
284 | unsigned long chunk_sizek; | ||
285 | unsigned long gran_sizek; | ||
286 | unsigned int reg; | ||
287 | }; | ||
288 | |||
289 | static void __init | 293 | static void __init |
290 | set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | 294 | set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, |
291 | unsigned char type, unsigned int address_bits) | 295 | unsigned char type, unsigned int address_bits) |
292 | { | 296 | { |
293 | u32 base_lo, base_hi, mask_lo, mask_hi; | 297 | u32 base_lo, base_hi, mask_lo, mask_hi; |
294 | u64 base, mask; | 298 | u64 base, mask; |
@@ -301,7 +305,7 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | |||
301 | mask = (1ULL << address_bits) - 1; | 305 | mask = (1ULL << address_bits) - 1; |
302 | mask &= ~((((u64)sizek) << 10) - 1); | 306 | mask &= ~((((u64)sizek) << 10) - 1); |
303 | 307 | ||
304 | base = ((u64)basek) << 10; | 308 | base = ((u64)basek) << 10; |
305 | 309 | ||
306 | base |= type; | 310 | base |= type; |
307 | mask |= 0x800; | 311 | mask |= 0x800; |
@@ -317,15 +321,14 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | |||
317 | 321 | ||
318 | static void __init | 322 | static void __init |
319 | save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | 323 | save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, |
320 | unsigned char type) | 324 | unsigned char type) |
321 | { | 325 | { |
322 | range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10); | 326 | range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10); |
323 | range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10); | 327 | range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10); |
324 | range_state[reg].type = type; | 328 | range_state[reg].type = type; |
325 | } | 329 | } |
326 | 330 | ||
327 | static void __init | 331 | static void __init set_var_mtrr_all(unsigned int address_bits) |
328 | set_var_mtrr_all(unsigned int address_bits) | ||
329 | { | 332 | { |
330 | unsigned long basek, sizek; | 333 | unsigned long basek, sizek; |
331 | unsigned char type; | 334 | unsigned char type; |
@@ -342,11 +345,11 @@ set_var_mtrr_all(unsigned int address_bits) | |||
342 | 345 | ||
343 | static unsigned long to_size_factor(unsigned long sizek, char *factorp) | 346 | static unsigned long to_size_factor(unsigned long sizek, char *factorp) |
344 | { | 347 | { |
345 | char factor; | ||
346 | unsigned long base = sizek; | 348 | unsigned long base = sizek; |
349 | char factor; | ||
347 | 350 | ||
348 | if (base & ((1<<10) - 1)) { | 351 | if (base & ((1<<10) - 1)) { |
349 | /* not MB alignment */ | 352 | /* Not MB-aligned: */ |
350 | factor = 'K'; | 353 | factor = 'K'; |
351 | } else if (base & ((1<<20) - 1)) { | 354 | } else if (base & ((1<<20) - 1)) { |
352 | factor = 'M'; | 355 | factor = 'M'; |
@@ -372,11 +375,12 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk, | |||
372 | unsigned long max_align, align; | 375 | unsigned long max_align, align; |
373 | unsigned long sizek; | 376 | unsigned long sizek; |
374 | 377 | ||
375 | /* Compute the maximum size I can make a range */ | 378 | /* Compute the maximum size with which we can make a range: */ |
376 | if (range_startk) | 379 | if (range_startk) |
377 | max_align = ffs(range_startk) - 1; | 380 | max_align = ffs(range_startk) - 1; |
378 | else | 381 | else |
379 | max_align = 32; | 382 | max_align = 32; |
383 | |||
380 | align = fls(range_sizek) - 1; | 384 | align = fls(range_sizek) - 1; |
381 | if (align > max_align) | 385 | if (align > max_align) |
382 | align = max_align; | 386 | align = max_align; |
@@ -386,11 +390,10 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk, | |||
386 | char start_factor = 'K', size_factor = 'K'; | 390 | char start_factor = 'K', size_factor = 'K'; |
387 | unsigned long start_base, size_base; | 391 | unsigned long start_base, size_base; |
388 | 392 | ||
389 | start_base = to_size_factor(range_startk, | 393 | start_base = to_size_factor(range_startk, &start_factor); |
390 | &start_factor), | 394 | size_base = to_size_factor(sizek, &size_factor); |
391 | size_base = to_size_factor(sizek, &size_factor), | ||
392 | 395 | ||
393 | printk(KERN_DEBUG "Setting variable MTRR %d, " | 396 | Dprintk("Setting variable MTRR %d, " |
394 | "base: %ld%cB, range: %ld%cB, type %s\n", | 397 | "base: %ld%cB, range: %ld%cB, type %s\n", |
395 | reg, start_base, start_factor, | 398 | reg, start_base, start_factor, |
396 | size_base, size_factor, | 399 | size_base, size_factor, |
@@ -425,10 +428,11 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | |||
425 | chunk_sizek = state->chunk_sizek; | 428 | chunk_sizek = state->chunk_sizek; |
426 | gran_sizek = state->gran_sizek; | 429 | gran_sizek = state->gran_sizek; |
427 | 430 | ||
428 | /* align with gran size, prevent small block used up MTRRs */ | 431 | /* Align with gran size, prevent small block used up MTRRs: */ |
429 | range_basek = ALIGN(state->range_startk, gran_sizek); | 432 | range_basek = ALIGN(state->range_startk, gran_sizek); |
430 | if ((range_basek > basek) && basek) | 433 | if ((range_basek > basek) && basek) |
431 | return second_sizek; | 434 | return second_sizek; |
435 | |||
432 | state->range_sizek -= (range_basek - state->range_startk); | 436 | state->range_sizek -= (range_basek - state->range_startk); |
433 | range_sizek = ALIGN(state->range_sizek, gran_sizek); | 437 | range_sizek = ALIGN(state->range_sizek, gran_sizek); |
434 | 438 | ||
@@ -439,22 +443,21 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | |||
439 | } | 443 | } |
440 | state->range_sizek = range_sizek; | 444 | state->range_sizek = range_sizek; |
441 | 445 | ||
442 | /* try to append some small hole */ | 446 | /* Try to append some small hole: */ |
443 | range0_basek = state->range_startk; | 447 | range0_basek = state->range_startk; |
444 | range0_sizek = ALIGN(state->range_sizek, chunk_sizek); | 448 | range0_sizek = ALIGN(state->range_sizek, chunk_sizek); |
445 | 449 | ||
446 | /* no increase */ | 450 | /* No increase: */ |
447 | if (range0_sizek == state->range_sizek) { | 451 | if (range0_sizek == state->range_sizek) { |
448 | if (debug_print) | 452 | Dprintk("rangeX: %016lx - %016lx\n", |
449 | printk(KERN_DEBUG "rangeX: %016lx - %016lx\n", | 453 | range0_basek<<10, |
450 | range0_basek<<10, | 454 | (range0_basek + state->range_sizek)<<10); |
451 | (range0_basek + state->range_sizek)<<10); | ||
452 | state->reg = range_to_mtrr(state->reg, range0_basek, | 455 | state->reg = range_to_mtrr(state->reg, range0_basek, |
453 | state->range_sizek, MTRR_TYPE_WRBACK); | 456 | state->range_sizek, MTRR_TYPE_WRBACK); |
454 | return 0; | 457 | return 0; |
455 | } | 458 | } |
456 | 459 | ||
457 | /* only cut back, when it is not the last */ | 460 | /* Only cut back when it is not the last: */ |
458 | if (sizek) { | 461 | if (sizek) { |
459 | while (range0_basek + range0_sizek > (basek + sizek)) { | 462 | while (range0_basek + range0_sizek > (basek + sizek)) { |
460 | if (range0_sizek >= chunk_sizek) | 463 | if (range0_sizek >= chunk_sizek) |
@@ -470,16 +473,16 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | |||
470 | second_try: | 473 | second_try: |
471 | range_basek = range0_basek + range0_sizek; | 474 | range_basek = range0_basek + range0_sizek; |
472 | 475 | ||
473 | /* one hole in the middle */ | 476 | /* One hole in the middle: */ |
474 | if (range_basek > basek && range_basek <= (basek + sizek)) | 477 | if (range_basek > basek && range_basek <= (basek + sizek)) |
475 | second_sizek = range_basek - basek; | 478 | second_sizek = range_basek - basek; |
476 | 479 | ||
477 | if (range0_sizek > state->range_sizek) { | 480 | if (range0_sizek > state->range_sizek) { |
478 | 481 | ||
479 | /* one hole in middle or at end */ | 482 | /* One hole in middle or at the end: */ |
480 | hole_sizek = range0_sizek - state->range_sizek - second_sizek; | 483 | hole_sizek = range0_sizek - state->range_sizek - second_sizek; |
481 | 484 | ||
482 | /* hole size should be less than half of range0 size */ | 485 | /* Hole size should be less than half of range0 size: */ |
483 | if (hole_sizek >= (range0_sizek >> 1) && | 486 | if (hole_sizek >= (range0_sizek >> 1) && |
484 | range0_sizek >= chunk_sizek) { | 487 | range0_sizek >= chunk_sizek) { |
485 | range0_sizek -= chunk_sizek; | 488 | range0_sizek -= chunk_sizek; |
@@ -491,32 +494,30 @@ second_try: | |||
491 | } | 494 | } |
492 | 495 | ||
493 | if (range0_sizek) { | 496 | if (range0_sizek) { |
494 | if (debug_print) | 497 | Dprintk("range0: %016lx - %016lx\n", |
495 | printk(KERN_DEBUG "range0: %016lx - %016lx\n", | 498 | range0_basek<<10, |
496 | range0_basek<<10, | 499 | (range0_basek + range0_sizek)<<10); |
497 | (range0_basek + range0_sizek)<<10); | ||
498 | state->reg = range_to_mtrr(state->reg, range0_basek, | 500 | state->reg = range_to_mtrr(state->reg, range0_basek, |
499 | range0_sizek, MTRR_TYPE_WRBACK); | 501 | range0_sizek, MTRR_TYPE_WRBACK); |
500 | } | 502 | } |
501 | 503 | ||
502 | if (range0_sizek < state->range_sizek) { | 504 | if (range0_sizek < state->range_sizek) { |
503 | /* need to handle left over */ | 505 | /* Need to handle left over range: */ |
504 | range_sizek = state->range_sizek - range0_sizek; | 506 | range_sizek = state->range_sizek - range0_sizek; |
505 | 507 | ||
506 | if (debug_print) | 508 | Dprintk("range: %016lx - %016lx\n", |
507 | printk(KERN_DEBUG "range: %016lx - %016lx\n", | 509 | range_basek<<10, |
508 | range_basek<<10, | 510 | (range_basek + range_sizek)<<10); |
509 | (range_basek + range_sizek)<<10); | 511 | |
510 | state->reg = range_to_mtrr(state->reg, range_basek, | 512 | state->reg = range_to_mtrr(state->reg, range_basek, |
511 | range_sizek, MTRR_TYPE_WRBACK); | 513 | range_sizek, MTRR_TYPE_WRBACK); |
512 | } | 514 | } |
513 | 515 | ||
514 | if (hole_sizek) { | 516 | if (hole_sizek) { |
515 | hole_basek = range_basek - hole_sizek - second_sizek; | 517 | hole_basek = range_basek - hole_sizek - second_sizek; |
516 | if (debug_print) | 518 | Dprintk("hole: %016lx - %016lx\n", |
517 | printk(KERN_DEBUG "hole: %016lx - %016lx\n", | 519 | hole_basek<<10, |
518 | hole_basek<<10, | 520 | (hole_basek + hole_sizek)<<10); |
519 | (hole_basek + hole_sizek)<<10); | ||
520 | state->reg = range_to_mtrr(state->reg, hole_basek, | 521 | state->reg = range_to_mtrr(state->reg, hole_basek, |
521 | hole_sizek, MTRR_TYPE_UNCACHABLE); | 522 | hole_sizek, MTRR_TYPE_UNCACHABLE); |
522 | } | 523 | } |
@@ -537,23 +538,23 @@ set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn, | |||
537 | basek = base_pfn << (PAGE_SHIFT - 10); | 538 | basek = base_pfn << (PAGE_SHIFT - 10); |
538 | sizek = size_pfn << (PAGE_SHIFT - 10); | 539 | sizek = size_pfn << (PAGE_SHIFT - 10); |
539 | 540 | ||
540 | /* See if I can merge with the last range */ | 541 | /* See if I can merge with the last range: */ |
541 | if ((basek <= 1024) || | 542 | if ((basek <= 1024) || |
542 | (state->range_startk + state->range_sizek == basek)) { | 543 | (state->range_startk + state->range_sizek == basek)) { |
543 | unsigned long endk = basek + sizek; | 544 | unsigned long endk = basek + sizek; |
544 | state->range_sizek = endk - state->range_startk; | 545 | state->range_sizek = endk - state->range_startk; |
545 | return; | 546 | return; |
546 | } | 547 | } |
547 | /* Write the range mtrrs */ | 548 | /* Write the range mtrrs: */ |
548 | if (state->range_sizek != 0) | 549 | if (state->range_sizek != 0) |
549 | second_sizek = range_to_mtrr_with_hole(state, basek, sizek); | 550 | second_sizek = range_to_mtrr_with_hole(state, basek, sizek); |
550 | 551 | ||
551 | /* Allocate an msr */ | 552 | /* Allocate an msr: */ |
552 | state->range_startk = basek + second_sizek; | 553 | state->range_startk = basek + second_sizek; |
553 | state->range_sizek = sizek - second_sizek; | 554 | state->range_sizek = sizek - second_sizek; |
554 | } | 555 | } |
555 | 556 | ||
556 | /* mininum size of mtrr block that can take hole */ | 557 | /* Mininum size of mtrr block that can take hole: */ |
557 | static u64 mtrr_chunk_size __initdata = (256ULL<<20); | 558 | static u64 mtrr_chunk_size __initdata = (256ULL<<20); |
558 | 559 | ||
559 | static int __init parse_mtrr_chunk_size_opt(char *p) | 560 | static int __init parse_mtrr_chunk_size_opt(char *p) |
@@ -565,7 +566,7 @@ static int __init parse_mtrr_chunk_size_opt(char *p) | |||
565 | } | 566 | } |
566 | early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt); | 567 | early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt); |
567 | 568 | ||
568 | /* granity of mtrr of block */ | 569 | /* Granularity of mtrr of block: */ |
569 | static u64 mtrr_gran_size __initdata; | 570 | static u64 mtrr_gran_size __initdata; |
570 | 571 | ||
571 | static int __init parse_mtrr_gran_size_opt(char *p) | 572 | static int __init parse_mtrr_gran_size_opt(char *p) |
@@ -577,7 +578,7 @@ static int __init parse_mtrr_gran_size_opt(char *p) | |||
577 | } | 578 | } |
578 | early_param("mtrr_gran_size", parse_mtrr_gran_size_opt); | 579 | early_param("mtrr_gran_size", parse_mtrr_gran_size_opt); |
579 | 580 | ||
580 | static int nr_mtrr_spare_reg __initdata = | 581 | static unsigned long nr_mtrr_spare_reg __initdata = |
581 | CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT; | 582 | CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT; |
582 | 583 | ||
583 | static int __init parse_mtrr_spare_reg(char *arg) | 584 | static int __init parse_mtrr_spare_reg(char *arg) |
@@ -586,7 +587,6 @@ static int __init parse_mtrr_spare_reg(char *arg) | |||
586 | nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0); | 587 | nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0); |
587 | return 0; | 588 | return 0; |
588 | } | 589 | } |
589 | |||
590 | early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg); | 590 | early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg); |
591 | 591 | ||
592 | static int __init | 592 | static int __init |
@@ -594,8 +594,8 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range, | |||
594 | u64 chunk_size, u64 gran_size) | 594 | u64 chunk_size, u64 gran_size) |
595 | { | 595 | { |
596 | struct var_mtrr_state var_state; | 596 | struct var_mtrr_state var_state; |
597 | int i; | ||
598 | int num_reg; | 597 | int num_reg; |
598 | int i; | ||
599 | 599 | ||
600 | var_state.range_startk = 0; | 600 | var_state.range_startk = 0; |
601 | var_state.range_sizek = 0; | 601 | var_state.range_sizek = 0; |
@@ -605,17 +605,18 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range, | |||
605 | 605 | ||
606 | memset(range_state, 0, sizeof(range_state)); | 606 | memset(range_state, 0, sizeof(range_state)); |
607 | 607 | ||
608 | /* Write the range etc */ | 608 | /* Write the range: */ |
609 | for (i = 0; i < nr_range; i++) | 609 | for (i = 0; i < nr_range; i++) { |
610 | set_var_mtrr_range(&var_state, range[i].start, | 610 | set_var_mtrr_range(&var_state, range[i].start, |
611 | range[i].end - range[i].start + 1); | 611 | range[i].end - range[i].start + 1); |
612 | } | ||
612 | 613 | ||
613 | /* Write the last range */ | 614 | /* Write the last range: */ |
614 | if (var_state.range_sizek != 0) | 615 | if (var_state.range_sizek != 0) |
615 | range_to_mtrr_with_hole(&var_state, 0, 0); | 616 | range_to_mtrr_with_hole(&var_state, 0, 0); |
616 | 617 | ||
617 | num_reg = var_state.reg; | 618 | num_reg = var_state.reg; |
618 | /* Clear out the extra MTRR's */ | 619 | /* Clear out the extra MTRR's: */ |
619 | while (var_state.reg < num_var_ranges) { | 620 | while (var_state.reg < num_var_ranges) { |
620 | save_var_mtrr(var_state.reg, 0, 0, 0); | 621 | save_var_mtrr(var_state.reg, 0, 0, 0); |
621 | var_state.reg++; | 622 | var_state.reg++; |
@@ -625,11 +626,11 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range, | |||
625 | } | 626 | } |
626 | 627 | ||
627 | struct mtrr_cleanup_result { | 628 | struct mtrr_cleanup_result { |
628 | unsigned long gran_sizek; | 629 | unsigned long gran_sizek; |
629 | unsigned long chunk_sizek; | 630 | unsigned long chunk_sizek; |
630 | unsigned long lose_cover_sizek; | 631 | unsigned long lose_cover_sizek; |
631 | unsigned int num_reg; | 632 | unsigned int num_reg; |
632 | int bad; | 633 | int bad; |
633 | }; | 634 | }; |
634 | 635 | ||
635 | /* | 636 | /* |
@@ -645,10 +646,10 @@ static unsigned long __initdata min_loss_pfn[RANGE_NUM]; | |||
645 | 646 | ||
646 | static void __init print_out_mtrr_range_state(void) | 647 | static void __init print_out_mtrr_range_state(void) |
647 | { | 648 | { |
648 | int i; | ||
649 | char start_factor = 'K', size_factor = 'K'; | 649 | char start_factor = 'K', size_factor = 'K'; |
650 | unsigned long start_base, size_base; | 650 | unsigned long start_base, size_base; |
651 | mtrr_type type; | 651 | mtrr_type type; |
652 | int i; | ||
652 | 653 | ||
653 | for (i = 0; i < num_var_ranges; i++) { | 654 | for (i = 0; i < num_var_ranges; i++) { |
654 | 655 | ||
@@ -676,10 +677,10 @@ static int __init mtrr_need_cleanup(void) | |||
676 | int i; | 677 | int i; |
677 | mtrr_type type; | 678 | mtrr_type type; |
678 | unsigned long size; | 679 | unsigned long size; |
679 | /* extra one for all 0 */ | 680 | /* Extra one for all 0: */ |
680 | int num[MTRR_NUM_TYPES + 1]; | 681 | int num[MTRR_NUM_TYPES + 1]; |
681 | 682 | ||
682 | /* check entries number */ | 683 | /* Check entries number: */ |
683 | memset(num, 0, sizeof(num)); | 684 | memset(num, 0, sizeof(num)); |
684 | for (i = 0; i < num_var_ranges; i++) { | 685 | for (i = 0; i < num_var_ranges; i++) { |
685 | type = range_state[i].type; | 686 | type = range_state[i].type; |
@@ -693,88 +694,86 @@ static int __init mtrr_need_cleanup(void) | |||
693 | num[type]++; | 694 | num[type]++; |
694 | } | 695 | } |
695 | 696 | ||
696 | /* check if we got UC entries */ | 697 | /* Check if we got UC entries: */ |
697 | if (!num[MTRR_TYPE_UNCACHABLE]) | 698 | if (!num[MTRR_TYPE_UNCACHABLE]) |
698 | return 0; | 699 | return 0; |
699 | 700 | ||
700 | /* check if we only had WB and UC */ | 701 | /* Check if we only had WB and UC */ |
701 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != | 702 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != |
702 | num_var_ranges - num[MTRR_NUM_TYPES]) | 703 | num_var_ranges - num[MTRR_NUM_TYPES]) |
703 | return 0; | 704 | return 0; |
704 | 705 | ||
705 | return 1; | 706 | return 1; |
706 | } | 707 | } |
707 | 708 | ||
708 | static unsigned long __initdata range_sums; | 709 | static unsigned long __initdata range_sums; |
709 | static void __init mtrr_calc_range_state(u64 chunk_size, u64 gran_size, | 710 | |
710 | unsigned long extra_remove_base, | 711 | static void __init |
711 | unsigned long extra_remove_size, | 712 | mtrr_calc_range_state(u64 chunk_size, u64 gran_size, |
712 | int i) | 713 | unsigned long x_remove_base, |
714 | unsigned long x_remove_size, int i) | ||
713 | { | 715 | { |
714 | int num_reg; | ||
715 | static struct res_range range_new[RANGE_NUM]; | 716 | static struct res_range range_new[RANGE_NUM]; |
716 | static int nr_range_new; | ||
717 | unsigned long range_sums_new; | 717 | unsigned long range_sums_new; |
718 | static int nr_range_new; | ||
719 | int num_reg; | ||
718 | 720 | ||
719 | /* convert ranges to var ranges state */ | 721 | /* Convert ranges to var ranges state: */ |
720 | num_reg = x86_setup_var_mtrrs(range, nr_range, | 722 | num_reg = x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); |
721 | chunk_size, gran_size); | ||
722 | 723 | ||
723 | /* we got new setting in range_state, check it */ | 724 | /* We got new setting in range_state, check it: */ |
724 | memset(range_new, 0, sizeof(range_new)); | 725 | memset(range_new, 0, sizeof(range_new)); |
725 | nr_range_new = x86_get_mtrr_mem_range(range_new, 0, | 726 | nr_range_new = x86_get_mtrr_mem_range(range_new, 0, |
726 | extra_remove_base, extra_remove_size); | 727 | x_remove_base, x_remove_size); |
727 | range_sums_new = sum_ranges(range_new, nr_range_new); | 728 | range_sums_new = sum_ranges(range_new, nr_range_new); |
728 | 729 | ||
729 | result[i].chunk_sizek = chunk_size >> 10; | 730 | result[i].chunk_sizek = chunk_size >> 10; |
730 | result[i].gran_sizek = gran_size >> 10; | 731 | result[i].gran_sizek = gran_size >> 10; |
731 | result[i].num_reg = num_reg; | 732 | result[i].num_reg = num_reg; |
733 | |||
732 | if (range_sums < range_sums_new) { | 734 | if (range_sums < range_sums_new) { |
733 | result[i].lose_cover_sizek = | 735 | result[i].lose_cover_sizek = (range_sums_new - range_sums) << PSHIFT; |
734 | (range_sums_new - range_sums) << PSHIFT; | ||
735 | result[i].bad = 1; | 736 | result[i].bad = 1; |
736 | } else | 737 | } else { |
737 | result[i].lose_cover_sizek = | 738 | result[i].lose_cover_sizek = (range_sums - range_sums_new) << PSHIFT; |
738 | (range_sums - range_sums_new) << PSHIFT; | 739 | } |
739 | 740 | ||
740 | /* double check it */ | 741 | /* Double check it: */ |
741 | if (!result[i].bad && !result[i].lose_cover_sizek) { | 742 | if (!result[i].bad && !result[i].lose_cover_sizek) { |
742 | if (nr_range_new != nr_range || | 743 | if (nr_range_new != nr_range || memcmp(range, range_new, sizeof(range))) |
743 | memcmp(range, range_new, sizeof(range))) | 744 | result[i].bad = 1; |
744 | result[i].bad = 1; | ||
745 | } | 745 | } |
746 | 746 | ||
747 | if (!result[i].bad && (range_sums - range_sums_new < | 747 | if (!result[i].bad && (range_sums - range_sums_new < min_loss_pfn[num_reg])) |
748 | min_loss_pfn[num_reg])) { | 748 | min_loss_pfn[num_reg] = range_sums - range_sums_new; |
749 | min_loss_pfn[num_reg] = | ||
750 | range_sums - range_sums_new; | ||
751 | } | ||
752 | } | 749 | } |
753 | 750 | ||
754 | static void __init mtrr_print_out_one_result(int i) | 751 | static void __init mtrr_print_out_one_result(int i) |
755 | { | 752 | { |
756 | char gran_factor, chunk_factor, lose_factor; | ||
757 | unsigned long gran_base, chunk_base, lose_base; | 753 | unsigned long gran_base, chunk_base, lose_base; |
754 | char gran_factor, chunk_factor, lose_factor; | ||
758 | 755 | ||
759 | gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), | 756 | gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), |
760 | chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), | 757 | chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), |
761 | lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), | 758 | lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), |
762 | printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t", | 759 | |
763 | result[i].bad ? "*BAD*" : " ", | 760 | pr_info("%sgran_size: %ld%c \tchunk_size: %ld%c \t", |
764 | gran_base, gran_factor, chunk_base, chunk_factor); | 761 | result[i].bad ? "*BAD*" : " ", |
765 | printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n", | 762 | gran_base, gran_factor, chunk_base, chunk_factor); |
766 | result[i].num_reg, result[i].bad ? "-" : "", | 763 | pr_cont("num_reg: %d \tlose cover RAM: %s%ld%c\n", |
767 | lose_base, lose_factor); | 764 | result[i].num_reg, result[i].bad ? "-" : "", |
765 | lose_base, lose_factor); | ||
768 | } | 766 | } |
769 | 767 | ||
770 | static int __init mtrr_search_optimal_index(void) | 768 | static int __init mtrr_search_optimal_index(void) |
771 | { | 769 | { |
772 | int i; | ||
773 | int num_reg_good; | 770 | int num_reg_good; |
774 | int index_good; | 771 | int index_good; |
772 | int i; | ||
775 | 773 | ||
776 | if (nr_mtrr_spare_reg >= num_var_ranges) | 774 | if (nr_mtrr_spare_reg >= num_var_ranges) |
777 | nr_mtrr_spare_reg = num_var_ranges - 1; | 775 | nr_mtrr_spare_reg = num_var_ranges - 1; |
776 | |||
778 | num_reg_good = -1; | 777 | num_reg_good = -1; |
779 | for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { | 778 | for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { |
780 | if (!min_loss_pfn[i]) | 779 | if (!min_loss_pfn[i]) |
@@ -796,24 +795,24 @@ static int __init mtrr_search_optimal_index(void) | |||
796 | return index_good; | 795 | return index_good; |
797 | } | 796 | } |
798 | 797 | ||
799 | |||
800 | int __init mtrr_cleanup(unsigned address_bits) | 798 | int __init mtrr_cleanup(unsigned address_bits) |
801 | { | 799 | { |
802 | unsigned long extra_remove_base, extra_remove_size; | 800 | unsigned long x_remove_base, x_remove_size; |
803 | unsigned long base, size, def, dummy; | 801 | unsigned long base, size, def, dummy; |
804 | mtrr_type type; | ||
805 | u64 chunk_size, gran_size; | 802 | u64 chunk_size, gran_size; |
803 | mtrr_type type; | ||
806 | int index_good; | 804 | int index_good; |
807 | int i; | 805 | int i; |
808 | 806 | ||
809 | if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1) | 807 | if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1) |
810 | return 0; | 808 | return 0; |
809 | |||
811 | rdmsr(MSR_MTRRdefType, def, dummy); | 810 | rdmsr(MSR_MTRRdefType, def, dummy); |
812 | def &= 0xff; | 811 | def &= 0xff; |
813 | if (def != MTRR_TYPE_UNCACHABLE) | 812 | if (def != MTRR_TYPE_UNCACHABLE) |
814 | return 0; | 813 | return 0; |
815 | 814 | ||
816 | /* get it and store it aside */ | 815 | /* Get it and store it aside: */ |
817 | memset(range_state, 0, sizeof(range_state)); | 816 | memset(range_state, 0, sizeof(range_state)); |
818 | for (i = 0; i < num_var_ranges; i++) { | 817 | for (i = 0; i < num_var_ranges; i++) { |
819 | mtrr_if->get(i, &base, &size, &type); | 818 | mtrr_if->get(i, &base, &size, &type); |
@@ -822,29 +821,28 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
822 | range_state[i].type = type; | 821 | range_state[i].type = type; |
823 | } | 822 | } |
824 | 823 | ||
825 | /* check if we need handle it and can handle it */ | 824 | /* Check if we need handle it and can handle it: */ |
826 | if (!mtrr_need_cleanup()) | 825 | if (!mtrr_need_cleanup()) |
827 | return 0; | 826 | return 0; |
828 | 827 | ||
829 | /* print original var MTRRs at first, for debugging: */ | 828 | /* Print original var MTRRs at first, for debugging: */ |
830 | printk(KERN_DEBUG "original variable MTRRs\n"); | 829 | printk(KERN_DEBUG "original variable MTRRs\n"); |
831 | print_out_mtrr_range_state(); | 830 | print_out_mtrr_range_state(); |
832 | 831 | ||
833 | memset(range, 0, sizeof(range)); | 832 | memset(range, 0, sizeof(range)); |
834 | extra_remove_size = 0; | 833 | x_remove_size = 0; |
835 | extra_remove_base = 1 << (32 - PAGE_SHIFT); | 834 | x_remove_base = 1 << (32 - PAGE_SHIFT); |
836 | if (mtrr_tom2) | 835 | if (mtrr_tom2) |
837 | extra_remove_size = | 836 | x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; |
838 | (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base; | 837 | |
839 | nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base, | 838 | nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size); |
840 | extra_remove_size); | ||
841 | /* | 839 | /* |
842 | * [0, 1M) should always be coverred by var mtrr with WB | 840 | * [0, 1M) should always be covered by var mtrr with WB |
843 | * and fixed mtrrs should take effective before var mtrr for it | 841 | * and fixed mtrrs should take effect before var mtrr for it: |
844 | */ | 842 | */ |
845 | nr_range = add_range_with_merge(range, nr_range, 0, | 843 | nr_range = add_range_with_merge(range, nr_range, 0, |
846 | (1ULL<<(20 - PAGE_SHIFT)) - 1); | 844 | (1ULL<<(20 - PAGE_SHIFT)) - 1); |
847 | /* sort the ranges */ | 845 | /* Sort the ranges: */ |
848 | sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); | 846 | sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); |
849 | 847 | ||
850 | range_sums = sum_ranges(range, nr_range); | 848 | range_sums = sum_ranges(range, nr_range); |
@@ -854,7 +852,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
854 | if (mtrr_chunk_size && mtrr_gran_size) { | 852 | if (mtrr_chunk_size && mtrr_gran_size) { |
855 | i = 0; | 853 | i = 0; |
856 | mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size, | 854 | mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size, |
857 | extra_remove_base, extra_remove_size, i); | 855 | x_remove_base, x_remove_size, i); |
858 | 856 | ||
859 | mtrr_print_out_one_result(i); | 857 | mtrr_print_out_one_result(i); |
860 | 858 | ||
@@ -880,7 +878,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
880 | continue; | 878 | continue; |
881 | 879 | ||
882 | mtrr_calc_range_state(chunk_size, gran_size, | 880 | mtrr_calc_range_state(chunk_size, gran_size, |
883 | extra_remove_base, extra_remove_size, i); | 881 | x_remove_base, x_remove_size, i); |
884 | if (debug_print) { | 882 | if (debug_print) { |
885 | mtrr_print_out_one_result(i); | 883 | mtrr_print_out_one_result(i); |
886 | printk(KERN_INFO "\n"); | 884 | printk(KERN_INFO "\n"); |
@@ -890,7 +888,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
890 | } | 888 | } |
891 | } | 889 | } |
892 | 890 | ||
893 | /* try to find the optimal index */ | 891 | /* Try to find the optimal index: */ |
894 | index_good = mtrr_search_optimal_index(); | 892 | index_good = mtrr_search_optimal_index(); |
895 | 893 | ||
896 | if (index_good != -1) { | 894 | if (index_good != -1) { |
@@ -898,7 +896,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
898 | i = index_good; | 896 | i = index_good; |
899 | mtrr_print_out_one_result(i); | 897 | mtrr_print_out_one_result(i); |
900 | 898 | ||
901 | /* convert ranges to var ranges state */ | 899 | /* Convert ranges to var ranges state: */ |
902 | chunk_size = result[i].chunk_sizek; | 900 | chunk_size = result[i].chunk_sizek; |
903 | chunk_size <<= 10; | 901 | chunk_size <<= 10; |
904 | gran_size = result[i].gran_sizek; | 902 | gran_size = result[i].gran_sizek; |
@@ -941,8 +939,8 @@ early_param("disable_mtrr_trim", disable_mtrr_trim_setup); | |||
941 | * Note this won't check if the MTRRs < 4GB where the magic bit doesn't | 939 | * Note this won't check if the MTRRs < 4GB where the magic bit doesn't |
942 | * apply to are wrong, but so far we don't know of any such case in the wild. | 940 | * apply to are wrong, but so far we don't know of any such case in the wild. |
943 | */ | 941 | */ |
944 | #define Tom2Enabled (1U << 21) | 942 | #define Tom2Enabled (1U << 21) |
945 | #define Tom2ForceMemTypeWB (1U << 22) | 943 | #define Tom2ForceMemTypeWB (1U << 22) |
946 | 944 | ||
947 | int __init amd_special_default_mtrr(void) | 945 | int __init amd_special_default_mtrr(void) |
948 | { | 946 | { |
@@ -952,7 +950,7 @@ int __init amd_special_default_mtrr(void) | |||
952 | return 0; | 950 | return 0; |
953 | if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) | 951 | if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) |
954 | return 0; | 952 | return 0; |
955 | /* In case some hypervisor doesn't pass SYSCFG through */ | 953 | /* In case some hypervisor doesn't pass SYSCFG through: */ |
956 | if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) | 954 | if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) |
957 | return 0; | 955 | return 0; |
958 | /* | 956 | /* |
@@ -965,19 +963,21 @@ int __init amd_special_default_mtrr(void) | |||
965 | return 0; | 963 | return 0; |
966 | } | 964 | } |
967 | 965 | ||
968 | static u64 __init real_trim_memory(unsigned long start_pfn, | 966 | static u64 __init |
969 | unsigned long limit_pfn) | 967 | real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn) |
970 | { | 968 | { |
971 | u64 trim_start, trim_size; | 969 | u64 trim_start, trim_size; |
970 | |||
972 | trim_start = start_pfn; | 971 | trim_start = start_pfn; |
973 | trim_start <<= PAGE_SHIFT; | 972 | trim_start <<= PAGE_SHIFT; |
973 | |||
974 | trim_size = limit_pfn; | 974 | trim_size = limit_pfn; |
975 | trim_size <<= PAGE_SHIFT; | 975 | trim_size <<= PAGE_SHIFT; |
976 | trim_size -= trim_start; | 976 | trim_size -= trim_start; |
977 | 977 | ||
978 | return e820_update_range(trim_start, trim_size, E820_RAM, | 978 | return e820_update_range(trim_start, trim_size, E820_RAM, E820_RESERVED); |
979 | E820_RESERVED); | ||
980 | } | 979 | } |
980 | |||
981 | /** | 981 | /** |
982 | * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs | 982 | * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs |
983 | * @end_pfn: ending page frame number | 983 | * @end_pfn: ending page frame number |
@@ -985,7 +985,7 @@ static u64 __init real_trim_memory(unsigned long start_pfn, | |||
985 | * Some buggy BIOSes don't setup the MTRRs properly for systems with certain | 985 | * Some buggy BIOSes don't setup the MTRRs properly for systems with certain |
986 | * memory configurations. This routine checks that the highest MTRR matches | 986 | * memory configurations. This routine checks that the highest MTRR matches |
987 | * the end of memory, to make sure the MTRRs having a write back type cover | 987 | * the end of memory, to make sure the MTRRs having a write back type cover |
988 | * all of the memory the kernel is intending to use. If not, it'll trim any | 988 | * all of the memory the kernel is intending to use. If not, it'll trim any |
989 | * memory off the end by adjusting end_pfn, removing it from the kernel's | 989 | * memory off the end by adjusting end_pfn, removing it from the kernel's |
990 | * allocation pools, warning the user with an obnoxious message. | 990 | * allocation pools, warning the user with an obnoxious message. |
991 | */ | 991 | */ |
@@ -994,21 +994,22 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
994 | unsigned long i, base, size, highest_pfn = 0, def, dummy; | 994 | unsigned long i, base, size, highest_pfn = 0, def, dummy; |
995 | mtrr_type type; | 995 | mtrr_type type; |
996 | u64 total_trim_size; | 996 | u64 total_trim_size; |
997 | |||
998 | /* extra one for all 0 */ | 997 | /* extra one for all 0 */ |
999 | int num[MTRR_NUM_TYPES + 1]; | 998 | int num[MTRR_NUM_TYPES + 1]; |
999 | |||
1000 | /* | 1000 | /* |
1001 | * Make sure we only trim uncachable memory on machines that | 1001 | * Make sure we only trim uncachable memory on machines that |
1002 | * support the Intel MTRR architecture: | 1002 | * support the Intel MTRR architecture: |
1003 | */ | 1003 | */ |
1004 | if (!is_cpu(INTEL) || disable_mtrr_trim) | 1004 | if (!is_cpu(INTEL) || disable_mtrr_trim) |
1005 | return 0; | 1005 | return 0; |
1006 | |||
1006 | rdmsr(MSR_MTRRdefType, def, dummy); | 1007 | rdmsr(MSR_MTRRdefType, def, dummy); |
1007 | def &= 0xff; | 1008 | def &= 0xff; |
1008 | if (def != MTRR_TYPE_UNCACHABLE) | 1009 | if (def != MTRR_TYPE_UNCACHABLE) |
1009 | return 0; | 1010 | return 0; |
1010 | 1011 | ||
1011 | /* get it and store it aside */ | 1012 | /* Get it and store it aside: */ |
1012 | memset(range_state, 0, sizeof(range_state)); | 1013 | memset(range_state, 0, sizeof(range_state)); |
1013 | for (i = 0; i < num_var_ranges; i++) { | 1014 | for (i = 0; i < num_var_ranges; i++) { |
1014 | mtrr_if->get(i, &base, &size, &type); | 1015 | mtrr_if->get(i, &base, &size, &type); |
@@ -1017,7 +1018,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
1017 | range_state[i].type = type; | 1018 | range_state[i].type = type; |
1018 | } | 1019 | } |
1019 | 1020 | ||
1020 | /* Find highest cached pfn */ | 1021 | /* Find highest cached pfn: */ |
1021 | for (i = 0; i < num_var_ranges; i++) { | 1022 | for (i = 0; i < num_var_ranges; i++) { |
1022 | type = range_state[i].type; | 1023 | type = range_state[i].type; |
1023 | if (type != MTRR_TYPE_WRBACK) | 1024 | if (type != MTRR_TYPE_WRBACK) |
@@ -1028,13 +1029,13 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
1028 | highest_pfn = base + size; | 1029 | highest_pfn = base + size; |
1029 | } | 1030 | } |
1030 | 1031 | ||
1031 | /* kvm/qemu doesn't have mtrr set right, don't trim them all */ | 1032 | /* kvm/qemu doesn't have mtrr set right, don't trim them all: */ |
1032 | if (!highest_pfn) { | 1033 | if (!highest_pfn) { |
1033 | printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n"); | 1034 | printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n"); |
1034 | return 0; | 1035 | return 0; |
1035 | } | 1036 | } |
1036 | 1037 | ||
1037 | /* check entries number */ | 1038 | /* Check entries number: */ |
1038 | memset(num, 0, sizeof(num)); | 1039 | memset(num, 0, sizeof(num)); |
1039 | for (i = 0; i < num_var_ranges; i++) { | 1040 | for (i = 0; i < num_var_ranges; i++) { |
1040 | type = range_state[i].type; | 1041 | type = range_state[i].type; |
@@ -1046,11 +1047,11 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
1046 | num[type]++; | 1047 | num[type]++; |
1047 | } | 1048 | } |
1048 | 1049 | ||
1049 | /* no entry for WB? */ | 1050 | /* No entry for WB? */ |
1050 | if (!num[MTRR_TYPE_WRBACK]) | 1051 | if (!num[MTRR_TYPE_WRBACK]) |
1051 | return 0; | 1052 | return 0; |
1052 | 1053 | ||
1053 | /* check if we only had WB and UC */ | 1054 | /* Check if we only had WB and UC: */ |
1054 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != | 1055 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != |
1055 | num_var_ranges - num[MTRR_NUM_TYPES]) | 1056 | num_var_ranges - num[MTRR_NUM_TYPES]) |
1056 | return 0; | 1057 | return 0; |
@@ -1066,31 +1067,31 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
1066 | } | 1067 | } |
1067 | nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0); | 1068 | nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0); |
1068 | 1069 | ||
1070 | /* Check the head: */ | ||
1069 | total_trim_size = 0; | 1071 | total_trim_size = 0; |
1070 | /* check the head */ | ||
1071 | if (range[0].start) | 1072 | if (range[0].start) |
1072 | total_trim_size += real_trim_memory(0, range[0].start); | 1073 | total_trim_size += real_trim_memory(0, range[0].start); |
1073 | /* check the holes */ | 1074 | |
1075 | /* Check the holes: */ | ||
1074 | for (i = 0; i < nr_range - 1; i++) { | 1076 | for (i = 0; i < nr_range - 1; i++) { |
1075 | if (range[i].end + 1 < range[i+1].start) | 1077 | if (range[i].end + 1 < range[i+1].start) |
1076 | total_trim_size += real_trim_memory(range[i].end + 1, | 1078 | total_trim_size += real_trim_memory(range[i].end + 1, |
1077 | range[i+1].start); | 1079 | range[i+1].start); |
1078 | } | 1080 | } |
1079 | /* check the top */ | 1081 | |
1082 | /* Check the top: */ | ||
1080 | i = nr_range - 1; | 1083 | i = nr_range - 1; |
1081 | if (range[i].end + 1 < end_pfn) | 1084 | if (range[i].end + 1 < end_pfn) |
1082 | total_trim_size += real_trim_memory(range[i].end + 1, | 1085 | total_trim_size += real_trim_memory(range[i].end + 1, |
1083 | end_pfn); | 1086 | end_pfn); |
1084 | 1087 | ||
1085 | if (total_trim_size) { | 1088 | if (total_trim_size) { |
1086 | printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover" | 1089 | pr_warning("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n", total_trim_size >> 20); |
1087 | " all of memory, losing %lluMB of RAM.\n", | ||
1088 | total_trim_size >> 20); | ||
1089 | 1090 | ||
1090 | if (!changed_by_mtrr_cleanup) | 1091 | if (!changed_by_mtrr_cleanup) |
1091 | WARN_ON(1); | 1092 | WARN_ON(1); |
1092 | 1093 | ||
1093 | printk(KERN_INFO "update e820 for mtrr\n"); | 1094 | pr_info("update e820 for mtrr\n"); |
1094 | update_e820(); | 1095 | update_e820(); |
1095 | 1096 | ||
1096 | return 1; | 1097 | return 1; |
@@ -1098,4 +1099,3 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
1098 | 1099 | ||
1099 | return 0; | 1100 | return 0; |
1100 | } | 1101 | } |
1101 | |||
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c index ff14c320040c..228d982ce09c 100644 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c | |||
@@ -1,38 +1,40 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <linux/io.h> | ||
2 | #include <linux/mm.h> | 3 | #include <linux/mm.h> |
3 | #include <asm/mtrr.h> | 4 | |
4 | #include <asm/msr.h> | ||
5 | #include <asm/io.h> | ||
6 | #include <asm/processor-cyrix.h> | 5 | #include <asm/processor-cyrix.h> |
7 | #include <asm/processor-flags.h> | 6 | #include <asm/processor-flags.h> |
7 | #include <asm/mtrr.h> | ||
8 | #include <asm/msr.h> | ||
9 | |||
8 | #include "mtrr.h" | 10 | #include "mtrr.h" |
9 | 11 | ||
10 | static void | 12 | static void |
11 | cyrix_get_arr(unsigned int reg, unsigned long *base, | 13 | cyrix_get_arr(unsigned int reg, unsigned long *base, |
12 | unsigned long *size, mtrr_type * type) | 14 | unsigned long *size, mtrr_type * type) |
13 | { | 15 | { |
14 | unsigned long flags; | ||
15 | unsigned char arr, ccr3, rcr, shift; | 16 | unsigned char arr, ccr3, rcr, shift; |
17 | unsigned long flags; | ||
16 | 18 | ||
17 | arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ | 19 | arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ |
18 | 20 | ||
19 | /* Save flags and disable interrupts */ | ||
20 | local_irq_save(flags); | 21 | local_irq_save(flags); |
21 | 22 | ||
22 | ccr3 = getCx86(CX86_CCR3); | 23 | ccr3 = getCx86(CX86_CCR3); |
23 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 24 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
24 | ((unsigned char *) base)[3] = getCx86(arr); | 25 | ((unsigned char *)base)[3] = getCx86(arr); |
25 | ((unsigned char *) base)[2] = getCx86(arr + 1); | 26 | ((unsigned char *)base)[2] = getCx86(arr + 1); |
26 | ((unsigned char *) base)[1] = getCx86(arr + 2); | 27 | ((unsigned char *)base)[1] = getCx86(arr + 2); |
27 | rcr = getCx86(CX86_RCR_BASE + reg); | 28 | rcr = getCx86(CX86_RCR_BASE + reg); |
28 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 29 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ |
29 | 30 | ||
30 | /* Enable interrupts if it was enabled previously */ | ||
31 | local_irq_restore(flags); | 31 | local_irq_restore(flags); |
32 | |||
32 | shift = ((unsigned char *) base)[1] & 0x0f; | 33 | shift = ((unsigned char *) base)[1] & 0x0f; |
33 | *base >>= PAGE_SHIFT; | 34 | *base >>= PAGE_SHIFT; |
34 | 35 | ||
35 | /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7 | 36 | /* |
37 | * Power of two, at least 4K on ARR0-ARR6, 256K on ARR7 | ||
36 | * Note: shift==0xf means 4G, this is unsupported. | 38 | * Note: shift==0xf means 4G, this is unsupported. |
37 | */ | 39 | */ |
38 | if (shift) | 40 | if (shift) |
@@ -76,17 +78,20 @@ cyrix_get_arr(unsigned int reg, unsigned long *base, | |||
76 | } | 78 | } |
77 | } | 79 | } |
78 | 80 | ||
81 | /* | ||
82 | * cyrix_get_free_region - get a free ARR. | ||
83 | * | ||
84 | * @base: the starting (base) address of the region. | ||
85 | * @size: the size (in bytes) of the region. | ||
86 | * | ||
87 | * Returns: the index of the region on success, else -1 on error. | ||
88 | */ | ||
79 | static int | 89 | static int |
80 | cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) | 90 | cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) |
81 | /* [SUMMARY] Get a free ARR. | ||
82 | <base> The starting (base) address of the region. | ||
83 | <size> The size (in bytes) of the region. | ||
84 | [RETURNS] The index of the region on success, else -1 on error. | ||
85 | */ | ||
86 | { | 91 | { |
87 | int i; | ||
88 | mtrr_type ltype; | ||
89 | unsigned long lbase, lsize; | 92 | unsigned long lbase, lsize; |
93 | mtrr_type ltype; | ||
94 | int i; | ||
90 | 95 | ||
91 | switch (replace_reg) { | 96 | switch (replace_reg) { |
92 | case 7: | 97 | case 7: |
@@ -107,14 +112,17 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) | |||
107 | cyrix_get_arr(7, &lbase, &lsize, <ype); | 112 | cyrix_get_arr(7, &lbase, &lsize, <ype); |
108 | if (lsize == 0) | 113 | if (lsize == 0) |
109 | return 7; | 114 | return 7; |
110 | /* Else try ARR0-ARR6 first */ | 115 | /* Else try ARR0-ARR6 first */ |
111 | } else { | 116 | } else { |
112 | for (i = 0; i < 7; i++) { | 117 | for (i = 0; i < 7; i++) { |
113 | cyrix_get_arr(i, &lbase, &lsize, <ype); | 118 | cyrix_get_arr(i, &lbase, &lsize, <ype); |
114 | if (lsize == 0) | 119 | if (lsize == 0) |
115 | return i; | 120 | return i; |
116 | } | 121 | } |
117 | /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */ | 122 | /* |
123 | * ARR0-ARR6 isn't free | ||
124 | * try ARR7 but its size must be at least 256K | ||
125 | */ | ||
118 | cyrix_get_arr(i, &lbase, &lsize, <ype); | 126 | cyrix_get_arr(i, &lbase, &lsize, <ype); |
119 | if ((lsize == 0) && (size >= 0x40)) | 127 | if ((lsize == 0) && (size >= 0x40)) |
120 | return i; | 128 | return i; |
@@ -122,21 +130,22 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) | |||
122 | return -ENOSPC; | 130 | return -ENOSPC; |
123 | } | 131 | } |
124 | 132 | ||
125 | static u32 cr4 = 0; | 133 | static u32 cr4, ccr3; |
126 | static u32 ccr3; | ||
127 | 134 | ||
128 | static void prepare_set(void) | 135 | static void prepare_set(void) |
129 | { | 136 | { |
130 | u32 cr0; | 137 | u32 cr0; |
131 | 138 | ||
132 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ | 139 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ |
133 | if ( cpu_has_pge ) { | 140 | if (cpu_has_pge) { |
134 | cr4 = read_cr4(); | 141 | cr4 = read_cr4(); |
135 | write_cr4(cr4 & ~X86_CR4_PGE); | 142 | write_cr4(cr4 & ~X86_CR4_PGE); |
136 | } | 143 | } |
137 | 144 | ||
138 | /* Disable and flush caches. Note that wbinvd flushes the TLBs as | 145 | /* |
139 | a side-effect */ | 146 | * Disable and flush caches. |
147 | * Note that wbinvd flushes the TLBs as a side-effect | ||
148 | */ | ||
140 | cr0 = read_cr0() | X86_CR0_CD; | 149 | cr0 = read_cr0() | X86_CR0_CD; |
141 | wbinvd(); | 150 | wbinvd(); |
142 | write_cr0(cr0); | 151 | write_cr0(cr0); |
@@ -147,22 +156,21 @@ static void prepare_set(void) | |||
147 | 156 | ||
148 | /* Cyrix ARRs - everything else was excluded at the top */ | 157 | /* Cyrix ARRs - everything else was excluded at the top */ |
149 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); | 158 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); |
150 | |||
151 | } | 159 | } |
152 | 160 | ||
153 | static void post_set(void) | 161 | static void post_set(void) |
154 | { | 162 | { |
155 | /* Flush caches and TLBs */ | 163 | /* Flush caches and TLBs */ |
156 | wbinvd(); | 164 | wbinvd(); |
157 | 165 | ||
158 | /* Cyrix ARRs - everything else was excluded at the top */ | 166 | /* Cyrix ARRs - everything else was excluded at the top */ |
159 | setCx86(CX86_CCR3, ccr3); | 167 | setCx86(CX86_CCR3, ccr3); |
160 | 168 | ||
161 | /* Enable caches */ | 169 | /* Enable caches */ |
162 | write_cr0(read_cr0() & 0xbfffffff); | 170 | write_cr0(read_cr0() & 0xbfffffff); |
163 | 171 | ||
164 | /* Restore value of CR4 */ | 172 | /* Restore value of CR4 */ |
165 | if ( cpu_has_pge ) | 173 | if (cpu_has_pge) |
166 | write_cr4(cr4); | 174 | write_cr4(cr4); |
167 | } | 175 | } |
168 | 176 | ||
@@ -178,7 +186,8 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base, | |||
178 | size >>= 6; | 186 | size >>= 6; |
179 | 187 | ||
180 | size &= 0x7fff; /* make sure arr_size <= 14 */ | 188 | size &= 0x7fff; /* make sure arr_size <= 14 */ |
181 | for (arr_size = 0; size; arr_size++, size >>= 1) ; | 189 | for (arr_size = 0; size; arr_size++, size >>= 1) |
190 | ; | ||
182 | 191 | ||
183 | if (reg < 7) { | 192 | if (reg < 7) { |
184 | switch (type) { | 193 | switch (type) { |
@@ -215,18 +224,18 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base, | |||
215 | prepare_set(); | 224 | prepare_set(); |
216 | 225 | ||
217 | base <<= PAGE_SHIFT; | 226 | base <<= PAGE_SHIFT; |
218 | setCx86(arr, ((unsigned char *) &base)[3]); | 227 | setCx86(arr + 0, ((unsigned char *)&base)[3]); |
219 | setCx86(arr + 1, ((unsigned char *) &base)[2]); | 228 | setCx86(arr + 1, ((unsigned char *)&base)[2]); |
220 | setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size); | 229 | setCx86(arr + 2, (((unsigned char *)&base)[1]) | arr_size); |
221 | setCx86(CX86_RCR_BASE + reg, arr_type); | 230 | setCx86(CX86_RCR_BASE + reg, arr_type); |
222 | 231 | ||
223 | post_set(); | 232 | post_set(); |
224 | } | 233 | } |
225 | 234 | ||
226 | typedef struct { | 235 | typedef struct { |
227 | unsigned long base; | 236 | unsigned long base; |
228 | unsigned long size; | 237 | unsigned long size; |
229 | mtrr_type type; | 238 | mtrr_type type; |
230 | } arr_state_t; | 239 | } arr_state_t; |
231 | 240 | ||
232 | static arr_state_t arr_state[8] = { | 241 | static arr_state_t arr_state[8] = { |
@@ -247,16 +256,17 @@ static void cyrix_set_all(void) | |||
247 | setCx86(CX86_CCR0 + i, ccr_state[i]); | 256 | setCx86(CX86_CCR0 + i, ccr_state[i]); |
248 | for (; i < 7; i++) | 257 | for (; i < 7; i++) |
249 | setCx86(CX86_CCR4 + i, ccr_state[i]); | 258 | setCx86(CX86_CCR4 + i, ccr_state[i]); |
250 | for (i = 0; i < 8; i++) | 259 | |
251 | cyrix_set_arr(i, arr_state[i].base, | 260 | for (i = 0; i < 8; i++) { |
261 | cyrix_set_arr(i, arr_state[i].base, | ||
252 | arr_state[i].size, arr_state[i].type); | 262 | arr_state[i].size, arr_state[i].type); |
263 | } | ||
253 | 264 | ||
254 | post_set(); | 265 | post_set(); |
255 | } | 266 | } |
256 | 267 | ||
257 | static struct mtrr_ops cyrix_mtrr_ops = { | 268 | static struct mtrr_ops cyrix_mtrr_ops = { |
258 | .vendor = X86_VENDOR_CYRIX, | 269 | .vendor = X86_VENDOR_CYRIX, |
259 | // .init = cyrix_arr_init, | ||
260 | .set_all = cyrix_set_all, | 270 | .set_all = cyrix_set_all, |
261 | .set = cyrix_set_arr, | 271 | .set = cyrix_set_arr, |
262 | .get = cyrix_get_arr, | 272 | .get = cyrix_get_arr, |
@@ -270,5 +280,3 @@ int __init cyrix_init_mtrr(void) | |||
270 | set_mtrr_ops(&cyrix_mtrr_ops); | 280 | set_mtrr_ops(&cyrix_mtrr_ops); |
271 | return 0; | 281 | return 0; |
272 | } | 282 | } |
273 | |||
274 | //arch_initcall(cyrix_init_mtrr); | ||
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 0543f69f0b27..55da0c5f68dd 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -1,28 +1,34 @@ | |||
1 | /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong | 1 | /* |
2 | because MTRRs can span upto 40 bits (36bits on most modern x86) */ | 2 | * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong |
3 | * because MTRRs can span upto 40 bits (36bits on most modern x86) | ||
4 | */ | ||
5 | #define DEBUG | ||
6 | |||
7 | #include <linux/module.h> | ||
3 | #include <linux/init.h> | 8 | #include <linux/init.h> |
4 | #include <linux/slab.h> | 9 | #include <linux/slab.h> |
10 | #include <linux/io.h> | ||
5 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
6 | #include <linux/module.h> | 12 | |
7 | #include <asm/io.h> | ||
8 | #include <asm/mtrr.h> | ||
9 | #include <asm/msr.h> | ||
10 | #include <asm/system.h> | ||
11 | #include <asm/cpufeature.h> | ||
12 | #include <asm/processor-flags.h> | 13 | #include <asm/processor-flags.h> |
14 | #include <asm/cpufeature.h> | ||
13 | #include <asm/tlbflush.h> | 15 | #include <asm/tlbflush.h> |
16 | #include <asm/system.h> | ||
17 | #include <asm/mtrr.h> | ||
18 | #include <asm/msr.h> | ||
14 | #include <asm/pat.h> | 19 | #include <asm/pat.h> |
20 | |||
15 | #include "mtrr.h" | 21 | #include "mtrr.h" |
16 | 22 | ||
17 | struct fixed_range_block { | 23 | struct fixed_range_block { |
18 | int base_msr; /* start address of an MTRR block */ | 24 | int base_msr; /* start address of an MTRR block */ |
19 | int ranges; /* number of MTRRs in this block */ | 25 | int ranges; /* number of MTRRs in this block */ |
20 | }; | 26 | }; |
21 | 27 | ||
22 | static struct fixed_range_block fixed_range_blocks[] = { | 28 | static struct fixed_range_block fixed_range_blocks[] = { |
23 | { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */ | 29 | { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */ |
24 | { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */ | 30 | { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */ |
25 | { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */ | 31 | { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */ |
26 | {} | 32 | {} |
27 | }; | 33 | }; |
28 | 34 | ||
@@ -30,10 +36,10 @@ static unsigned long smp_changes_mask; | |||
30 | static int mtrr_state_set; | 36 | static int mtrr_state_set; |
31 | u64 mtrr_tom2; | 37 | u64 mtrr_tom2; |
32 | 38 | ||
33 | struct mtrr_state_type mtrr_state = {}; | 39 | struct mtrr_state_type mtrr_state; |
34 | EXPORT_SYMBOL_GPL(mtrr_state); | 40 | EXPORT_SYMBOL_GPL(mtrr_state); |
35 | 41 | ||
36 | /** | 42 | /* |
37 | * BIOS is expected to clear MtrrFixDramModEn bit, see for example | 43 | * BIOS is expected to clear MtrrFixDramModEn bit, see for example |
38 | * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD | 44 | * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD |
39 | * Opteron Processors" (26094 Rev. 3.30 February 2006), section | 45 | * Opteron Processors" (26094 Rev. 3.30 February 2006), section |
@@ -104,9 +110,8 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
104 | * Look of multiple ranges matching this address and pick type | 110 | * Look of multiple ranges matching this address and pick type |
105 | * as per MTRR precedence | 111 | * as per MTRR precedence |
106 | */ | 112 | */ |
107 | if (!(mtrr_state.enabled & 2)) { | 113 | if (!(mtrr_state.enabled & 2)) |
108 | return mtrr_state.def_type; | 114 | return mtrr_state.def_type; |
109 | } | ||
110 | 115 | ||
111 | prev_match = 0xFF; | 116 | prev_match = 0xFF; |
112 | for (i = 0; i < num_var_ranges; ++i) { | 117 | for (i = 0; i < num_var_ranges; ++i) { |
@@ -125,9 +130,8 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
125 | if (start_state != end_state) | 130 | if (start_state != end_state) |
126 | return 0xFE; | 131 | return 0xFE; |
127 | 132 | ||
128 | if ((start & mask) != (base & mask)) { | 133 | if ((start & mask) != (base & mask)) |
129 | continue; | 134 | continue; |
130 | } | ||
131 | 135 | ||
132 | curr_match = mtrr_state.var_ranges[i].base_lo & 0xff; | 136 | curr_match = mtrr_state.var_ranges[i].base_lo & 0xff; |
133 | if (prev_match == 0xFF) { | 137 | if (prev_match == 0xFF) { |
@@ -148,9 +152,8 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
148 | curr_match = MTRR_TYPE_WRTHROUGH; | 152 | curr_match = MTRR_TYPE_WRTHROUGH; |
149 | } | 153 | } |
150 | 154 | ||
151 | if (prev_match != curr_match) { | 155 | if (prev_match != curr_match) |
152 | return MTRR_TYPE_UNCACHABLE; | 156 | return MTRR_TYPE_UNCACHABLE; |
153 | } | ||
154 | } | 157 | } |
155 | 158 | ||
156 | if (mtrr_tom2) { | 159 | if (mtrr_tom2) { |
@@ -164,7 +167,7 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
164 | return mtrr_state.def_type; | 167 | return mtrr_state.def_type; |
165 | } | 168 | } |
166 | 169 | ||
167 | /* Get the MSR pair relating to a var range */ | 170 | /* Get the MSR pair relating to a var range */ |
168 | static void | 171 | static void |
169 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) | 172 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) |
170 | { | 173 | { |
@@ -172,7 +175,7 @@ get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) | |||
172 | rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); | 175 | rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); |
173 | } | 176 | } |
174 | 177 | ||
175 | /* fill the MSR pair relating to a var range */ | 178 | /* Fill the MSR pair relating to a var range */ |
176 | void fill_mtrr_var_range(unsigned int index, | 179 | void fill_mtrr_var_range(unsigned int index, |
177 | u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi) | 180 | u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi) |
178 | { | 181 | { |
@@ -186,10 +189,9 @@ void fill_mtrr_var_range(unsigned int index, | |||
186 | vr[index].mask_hi = mask_hi; | 189 | vr[index].mask_hi = mask_hi; |
187 | } | 190 | } |
188 | 191 | ||
189 | static void | 192 | static void get_fixed_ranges(mtrr_type *frs) |
190 | get_fixed_ranges(mtrr_type * frs) | ||
191 | { | 193 | { |
192 | unsigned int *p = (unsigned int *) frs; | 194 | unsigned int *p = (unsigned int *)frs; |
193 | int i; | 195 | int i; |
194 | 196 | ||
195 | k8_check_syscfg_dram_mod_en(); | 197 | k8_check_syscfg_dram_mod_en(); |
@@ -217,22 +219,22 @@ static void __init print_fixed_last(void) | |||
217 | if (!last_fixed_end) | 219 | if (!last_fixed_end) |
218 | return; | 220 | return; |
219 | 221 | ||
220 | printk(KERN_DEBUG " %05X-%05X %s\n", last_fixed_start, | 222 | pr_debug(" %05X-%05X %s\n", last_fixed_start, |
221 | last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type)); | 223 | last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type)); |
222 | 224 | ||
223 | last_fixed_end = 0; | 225 | last_fixed_end = 0; |
224 | } | 226 | } |
225 | 227 | ||
226 | static void __init update_fixed_last(unsigned base, unsigned end, | 228 | static void __init update_fixed_last(unsigned base, unsigned end, |
227 | mtrr_type type) | 229 | mtrr_type type) |
228 | { | 230 | { |
229 | last_fixed_start = base; | 231 | last_fixed_start = base; |
230 | last_fixed_end = end; | 232 | last_fixed_end = end; |
231 | last_fixed_type = type; | 233 | last_fixed_type = type; |
232 | } | 234 | } |
233 | 235 | ||
234 | static void __init print_fixed(unsigned base, unsigned step, | 236 | static void __init |
235 | const mtrr_type *types) | 237 | print_fixed(unsigned base, unsigned step, const mtrr_type *types) |
236 | { | 238 | { |
237 | unsigned i; | 239 | unsigned i; |
238 | 240 | ||
@@ -259,54 +261,55 @@ static void __init print_mtrr_state(void) | |||
259 | unsigned int i; | 261 | unsigned int i; |
260 | int high_width; | 262 | int high_width; |
261 | 263 | ||
262 | printk(KERN_DEBUG "MTRR default type: %s\n", | 264 | pr_debug("MTRR default type: %s\n", |
263 | mtrr_attrib_to_str(mtrr_state.def_type)); | 265 | mtrr_attrib_to_str(mtrr_state.def_type)); |
264 | if (mtrr_state.have_fixed) { | 266 | if (mtrr_state.have_fixed) { |
265 | printk(KERN_DEBUG "MTRR fixed ranges %sabled:\n", | 267 | pr_debug("MTRR fixed ranges %sabled:\n", |
266 | mtrr_state.enabled & 1 ? "en" : "dis"); | 268 | mtrr_state.enabled & 1 ? "en" : "dis"); |
267 | print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); | 269 | print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); |
268 | for (i = 0; i < 2; ++i) | 270 | for (i = 0; i < 2; ++i) |
269 | print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8); | 271 | print_fixed(0x80000 + i * 0x20000, 0x04000, |
272 | mtrr_state.fixed_ranges + (i + 1) * 8); | ||
270 | for (i = 0; i < 8; ++i) | 273 | for (i = 0; i < 8; ++i) |
271 | print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8); | 274 | print_fixed(0xC0000 + i * 0x08000, 0x01000, |
275 | mtrr_state.fixed_ranges + (i + 3) * 8); | ||
272 | 276 | ||
273 | /* tail */ | 277 | /* tail */ |
274 | print_fixed_last(); | 278 | print_fixed_last(); |
275 | } | 279 | } |
276 | printk(KERN_DEBUG "MTRR variable ranges %sabled:\n", | 280 | pr_debug("MTRR variable ranges %sabled:\n", |
277 | mtrr_state.enabled & 2 ? "en" : "dis"); | 281 | mtrr_state.enabled & 2 ? "en" : "dis"); |
278 | if (size_or_mask & 0xffffffffUL) | 282 | if (size_or_mask & 0xffffffffUL) |
279 | high_width = ffs(size_or_mask & 0xffffffffUL) - 1; | 283 | high_width = ffs(size_or_mask & 0xffffffffUL) - 1; |
280 | else | 284 | else |
281 | high_width = ffs(size_or_mask>>32) + 32 - 1; | 285 | high_width = ffs(size_or_mask>>32) + 32 - 1; |
282 | high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4; | 286 | high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4; |
287 | |||
283 | for (i = 0; i < num_var_ranges; ++i) { | 288 | for (i = 0; i < num_var_ranges; ++i) { |
284 | if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) | 289 | if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) |
285 | printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n", | 290 | pr_debug(" %u base %0*X%05X000 mask %0*X%05X000 %s\n", |
286 | i, | 291 | i, |
287 | high_width, | 292 | high_width, |
288 | mtrr_state.var_ranges[i].base_hi, | 293 | mtrr_state.var_ranges[i].base_hi, |
289 | mtrr_state.var_ranges[i].base_lo >> 12, | 294 | mtrr_state.var_ranges[i].base_lo >> 12, |
290 | high_width, | 295 | high_width, |
291 | mtrr_state.var_ranges[i].mask_hi, | 296 | mtrr_state.var_ranges[i].mask_hi, |
292 | mtrr_state.var_ranges[i].mask_lo >> 12, | 297 | mtrr_state.var_ranges[i].mask_lo >> 12, |
293 | mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff)); | 298 | mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff)); |
294 | else | 299 | else |
295 | printk(KERN_DEBUG " %u disabled\n", i); | 300 | pr_debug(" %u disabled\n", i); |
296 | } | ||
297 | if (mtrr_tom2) { | ||
298 | printk(KERN_DEBUG "TOM2: %016llx aka %lldM\n", | ||
299 | mtrr_tom2, mtrr_tom2>>20); | ||
300 | } | 301 | } |
302 | if (mtrr_tom2) | ||
303 | pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20); | ||
301 | } | 304 | } |
302 | 305 | ||
303 | /* Grab all of the MTRR state for this CPU into *state */ | 306 | /* Grab all of the MTRR state for this CPU into *state */ |
304 | void __init get_mtrr_state(void) | 307 | void __init get_mtrr_state(void) |
305 | { | 308 | { |
306 | unsigned int i; | ||
307 | struct mtrr_var_range *vrs; | 309 | struct mtrr_var_range *vrs; |
308 | unsigned lo, dummy; | ||
309 | unsigned long flags; | 310 | unsigned long flags; |
311 | unsigned lo, dummy; | ||
312 | unsigned int i; | ||
310 | 313 | ||
311 | vrs = mtrr_state.var_ranges; | 314 | vrs = mtrr_state.var_ranges; |
312 | 315 | ||
@@ -324,6 +327,7 @@ void __init get_mtrr_state(void) | |||
324 | 327 | ||
325 | if (amd_special_default_mtrr()) { | 328 | if (amd_special_default_mtrr()) { |
326 | unsigned low, high; | 329 | unsigned low, high; |
330 | |||
327 | /* TOP_MEM2 */ | 331 | /* TOP_MEM2 */ |
328 | rdmsr(MSR_K8_TOP_MEM2, low, high); | 332 | rdmsr(MSR_K8_TOP_MEM2, low, high); |
329 | mtrr_tom2 = high; | 333 | mtrr_tom2 = high; |
@@ -344,10 +348,9 @@ void __init get_mtrr_state(void) | |||
344 | 348 | ||
345 | post_set(); | 349 | post_set(); |
346 | local_irq_restore(flags); | 350 | local_irq_restore(flags); |
347 | |||
348 | } | 351 | } |
349 | 352 | ||
350 | /* Some BIOS's are fucked and don't set all MTRRs the same! */ | 353 | /* Some BIOS's are messed up and don't set all MTRRs the same! */ |
351 | void __init mtrr_state_warn(void) | 354 | void __init mtrr_state_warn(void) |
352 | { | 355 | { |
353 | unsigned long mask = smp_changes_mask; | 356 | unsigned long mask = smp_changes_mask; |
@@ -355,28 +358,33 @@ void __init mtrr_state_warn(void) | |||
355 | if (!mask) | 358 | if (!mask) |
356 | return; | 359 | return; |
357 | if (mask & MTRR_CHANGE_MASK_FIXED) | 360 | if (mask & MTRR_CHANGE_MASK_FIXED) |
358 | printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n"); | 361 | pr_warning("mtrr: your CPUs had inconsistent fixed MTRR settings\n"); |
359 | if (mask & MTRR_CHANGE_MASK_VARIABLE) | 362 | if (mask & MTRR_CHANGE_MASK_VARIABLE) |
360 | printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n"); | 363 | pr_warning("mtrr: your CPUs had inconsistent variable MTRR settings\n"); |
361 | if (mask & MTRR_CHANGE_MASK_DEFTYPE) | 364 | if (mask & MTRR_CHANGE_MASK_DEFTYPE) |
362 | printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n"); | 365 | pr_warning("mtrr: your CPUs had inconsistent MTRRdefType settings\n"); |
366 | |||
363 | printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); | 367 | printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); |
364 | printk(KERN_INFO "mtrr: corrected configuration.\n"); | 368 | printk(KERN_INFO "mtrr: corrected configuration.\n"); |
365 | } | 369 | } |
366 | 370 | ||
367 | /* Doesn't attempt to pass an error out to MTRR users | 371 | /* |
368 | because it's quite complicated in some cases and probably not | 372 | * Doesn't attempt to pass an error out to MTRR users |
369 | worth it because the best error handling is to ignore it. */ | 373 | * because it's quite complicated in some cases and probably not |
374 | * worth it because the best error handling is to ignore it. | ||
375 | */ | ||
370 | void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) | 376 | void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) |
371 | { | 377 | { |
372 | if (wrmsr_safe(msr, a, b) < 0) | 378 | if (wrmsr_safe(msr, a, b) < 0) { |
373 | printk(KERN_ERR | 379 | printk(KERN_ERR |
374 | "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", | 380 | "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", |
375 | smp_processor_id(), msr, a, b); | 381 | smp_processor_id(), msr, a, b); |
382 | } | ||
376 | } | 383 | } |
377 | 384 | ||
378 | /** | 385 | /** |
379 | * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have | 386 | * set_fixed_range - checks & updates a fixed-range MTRR if it |
387 | * differs from the value it should have | ||
380 | * @msr: MSR address of the MTTR which should be checked and updated | 388 | * @msr: MSR address of the MTTR which should be checked and updated |
381 | * @changed: pointer which indicates whether the MTRR needed to be changed | 389 | * @changed: pointer which indicates whether the MTRR needed to be changed |
382 | * @msrwords: pointer to the MSR values which the MSR should have | 390 | * @msrwords: pointer to the MSR values which the MSR should have |
@@ -401,20 +409,23 @@ static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords) | |||
401 | * | 409 | * |
402 | * Returns: The index of the region on success, else negative on error. | 410 | * Returns: The index of the region on success, else negative on error. |
403 | */ | 411 | */ |
404 | int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) | 412 | int |
413 | generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) | ||
405 | { | 414 | { |
406 | int i, max; | ||
407 | mtrr_type ltype; | ||
408 | unsigned long lbase, lsize; | 415 | unsigned long lbase, lsize; |
416 | mtrr_type ltype; | ||
417 | int i, max; | ||
409 | 418 | ||
410 | max = num_var_ranges; | 419 | max = num_var_ranges; |
411 | if (replace_reg >= 0 && replace_reg < max) | 420 | if (replace_reg >= 0 && replace_reg < max) |
412 | return replace_reg; | 421 | return replace_reg; |
422 | |||
413 | for (i = 0; i < max; ++i) { | 423 | for (i = 0; i < max; ++i) { |
414 | mtrr_if->get(i, &lbase, &lsize, <ype); | 424 | mtrr_if->get(i, &lbase, &lsize, <ype); |
415 | if (lsize == 0) | 425 | if (lsize == 0) |
416 | return i; | 426 | return i; |
417 | } | 427 | } |
428 | |||
418 | return -ENOSPC; | 429 | return -ENOSPC; |
419 | } | 430 | } |
420 | 431 | ||
@@ -434,7 +445,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, | |||
434 | rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); | 445 | rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); |
435 | 446 | ||
436 | if ((mask_lo & 0x800) == 0) { | 447 | if ((mask_lo & 0x800) == 0) { |
437 | /* Invalid (i.e. free) range */ | 448 | /* Invalid (i.e. free) range */ |
438 | *base = 0; | 449 | *base = 0; |
439 | *size = 0; | 450 | *size = 0; |
440 | *type = 0; | 451 | *type = 0; |
@@ -471,27 +482,31 @@ out_put_cpu: | |||
471 | } | 482 | } |
472 | 483 | ||
473 | /** | 484 | /** |
474 | * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set | 485 | * set_fixed_ranges - checks & updates the fixed-range MTRRs if they |
486 | * differ from the saved set | ||
475 | * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges() | 487 | * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges() |
476 | */ | 488 | */ |
477 | static int set_fixed_ranges(mtrr_type * frs) | 489 | static int set_fixed_ranges(mtrr_type *frs) |
478 | { | 490 | { |
479 | unsigned long long *saved = (unsigned long long *) frs; | 491 | unsigned long long *saved = (unsigned long long *)frs; |
480 | bool changed = false; | 492 | bool changed = false; |
481 | int block=-1, range; | 493 | int block = -1, range; |
482 | 494 | ||
483 | k8_check_syscfg_dram_mod_en(); | 495 | k8_check_syscfg_dram_mod_en(); |
484 | 496 | ||
485 | while (fixed_range_blocks[++block].ranges) | 497 | while (fixed_range_blocks[++block].ranges) { |
486 | for (range=0; range < fixed_range_blocks[block].ranges; range++) | 498 | for (range = 0; range < fixed_range_blocks[block].ranges; range++) |
487 | set_fixed_range(fixed_range_blocks[block].base_msr + range, | 499 | set_fixed_range(fixed_range_blocks[block].base_msr + range, |
488 | &changed, (unsigned int *) saved++); | 500 | &changed, (unsigned int *)saved++); |
501 | } | ||
489 | 502 | ||
490 | return changed; | 503 | return changed; |
491 | } | 504 | } |
492 | 505 | ||
493 | /* Set the MSR pair relating to a var range. Returns TRUE if | 506 | /* |
494 | changes are made */ | 507 | * Set the MSR pair relating to a var range. |
508 | * Returns true if changes are made. | ||
509 | */ | ||
495 | static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) | 510 | static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) |
496 | { | 511 | { |
497 | unsigned int lo, hi; | 512 | unsigned int lo, hi; |
@@ -501,6 +516,7 @@ static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) | |||
501 | if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) | 516 | if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) |
502 | || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != | 517 | || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != |
503 | (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { | 518 | (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { |
519 | |||
504 | mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); | 520 | mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); |
505 | changed = true; | 521 | changed = true; |
506 | } | 522 | } |
@@ -526,21 +542,26 @@ static u32 deftype_lo, deftype_hi; | |||
526 | */ | 542 | */ |
527 | static unsigned long set_mtrr_state(void) | 543 | static unsigned long set_mtrr_state(void) |
528 | { | 544 | { |
529 | unsigned int i; | ||
530 | unsigned long change_mask = 0; | 545 | unsigned long change_mask = 0; |
546 | unsigned int i; | ||
531 | 547 | ||
532 | for (i = 0; i < num_var_ranges; i++) | 548 | for (i = 0; i < num_var_ranges; i++) { |
533 | if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) | 549 | if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) |
534 | change_mask |= MTRR_CHANGE_MASK_VARIABLE; | 550 | change_mask |= MTRR_CHANGE_MASK_VARIABLE; |
551 | } | ||
535 | 552 | ||
536 | if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) | 553 | if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) |
537 | change_mask |= MTRR_CHANGE_MASK_FIXED; | 554 | change_mask |= MTRR_CHANGE_MASK_FIXED; |
538 | 555 | ||
539 | /* Set_mtrr_restore restores the old value of MTRRdefType, | 556 | /* |
540 | so to set it we fiddle with the saved value */ | 557 | * Set_mtrr_restore restores the old value of MTRRdefType, |
558 | * so to set it we fiddle with the saved value: | ||
559 | */ | ||
541 | if ((deftype_lo & 0xff) != mtrr_state.def_type | 560 | if ((deftype_lo & 0xff) != mtrr_state.def_type |
542 | || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { | 561 | || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { |
543 | deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10); | 562 | |
563 | deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | | ||
564 | (mtrr_state.enabled << 10); | ||
544 | change_mask |= MTRR_CHANGE_MASK_DEFTYPE; | 565 | change_mask |= MTRR_CHANGE_MASK_DEFTYPE; |
545 | } | 566 | } |
546 | 567 | ||
@@ -548,33 +569,36 @@ static unsigned long set_mtrr_state(void) | |||
548 | } | 569 | } |
549 | 570 | ||
550 | 571 | ||
551 | static unsigned long cr4 = 0; | 572 | static unsigned long cr4; |
552 | static DEFINE_SPINLOCK(set_atomicity_lock); | 573 | static DEFINE_SPINLOCK(set_atomicity_lock); |
553 | 574 | ||
554 | /* | 575 | /* |
555 | * Since we are disabling the cache don't allow any interrupts - they | 576 | * Since we are disabling the cache don't allow any interrupts, |
556 | * would run extremely slow and would only increase the pain. The caller must | 577 | * they would run extremely slow and would only increase the pain. |
557 | * ensure that local interrupts are disabled and are reenabled after post_set() | 578 | * |
558 | * has been called. | 579 | * The caller must ensure that local interrupts are disabled and |
580 | * are reenabled after post_set() has been called. | ||
559 | */ | 581 | */ |
560 | |||
561 | static void prepare_set(void) __acquires(set_atomicity_lock) | 582 | static void prepare_set(void) __acquires(set_atomicity_lock) |
562 | { | 583 | { |
563 | unsigned long cr0; | 584 | unsigned long cr0; |
564 | 585 | ||
565 | /* Note that this is not ideal, since the cache is only flushed/disabled | 586 | /* |
566 | for this CPU while the MTRRs are changed, but changing this requires | 587 | * Note that this is not ideal |
567 | more invasive changes to the way the kernel boots */ | 588 | * since the cache is only flushed/disabled for this CPU while the |
589 | * MTRRs are changed, but changing this requires more invasive | ||
590 | * changes to the way the kernel boots | ||
591 | */ | ||
568 | 592 | ||
569 | spin_lock(&set_atomicity_lock); | 593 | spin_lock(&set_atomicity_lock); |
570 | 594 | ||
571 | /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ | 595 | /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ |
572 | cr0 = read_cr0() | X86_CR0_CD; | 596 | cr0 = read_cr0() | X86_CR0_CD; |
573 | write_cr0(cr0); | 597 | write_cr0(cr0); |
574 | wbinvd(); | 598 | wbinvd(); |
575 | 599 | ||
576 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ | 600 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ |
577 | if ( cpu_has_pge ) { | 601 | if (cpu_has_pge) { |
578 | cr4 = read_cr4(); | 602 | cr4 = read_cr4(); |
579 | write_cr4(cr4 & ~X86_CR4_PGE); | 603 | write_cr4(cr4 & ~X86_CR4_PGE); |
580 | } | 604 | } |
@@ -582,26 +606,26 @@ static void prepare_set(void) __acquires(set_atomicity_lock) | |||
582 | /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ | 606 | /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ |
583 | __flush_tlb(); | 607 | __flush_tlb(); |
584 | 608 | ||
585 | /* Save MTRR state */ | 609 | /* Save MTRR state */ |
586 | rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); | 610 | rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); |
587 | 611 | ||
588 | /* Disable MTRRs, and set the default type to uncached */ | 612 | /* Disable MTRRs, and set the default type to uncached */ |
589 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); | 613 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); |
590 | } | 614 | } |
591 | 615 | ||
592 | static void post_set(void) __releases(set_atomicity_lock) | 616 | static void post_set(void) __releases(set_atomicity_lock) |
593 | { | 617 | { |
594 | /* Flush TLBs (no need to flush caches - they are disabled) */ | 618 | /* Flush TLBs (no need to flush caches - they are disabled) */ |
595 | __flush_tlb(); | 619 | __flush_tlb(); |
596 | 620 | ||
597 | /* Intel (P6) standard MTRRs */ | 621 | /* Intel (P6) standard MTRRs */ |
598 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); | 622 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); |
599 | 623 | ||
600 | /* Enable caches */ | 624 | /* Enable caches */ |
601 | write_cr0(read_cr0() & 0xbfffffff); | 625 | write_cr0(read_cr0() & 0xbfffffff); |
602 | 626 | ||
603 | /* Restore value of CR4 */ | 627 | /* Restore value of CR4 */ |
604 | if ( cpu_has_pge ) | 628 | if (cpu_has_pge) |
605 | write_cr4(cr4); | 629 | write_cr4(cr4); |
606 | spin_unlock(&set_atomicity_lock); | 630 | spin_unlock(&set_atomicity_lock); |
607 | } | 631 | } |
@@ -623,24 +647,27 @@ static void generic_set_all(void) | |||
623 | post_set(); | 647 | post_set(); |
624 | local_irq_restore(flags); | 648 | local_irq_restore(flags); |
625 | 649 | ||
626 | /* Use the atomic bitops to update the global mask */ | 650 | /* Use the atomic bitops to update the global mask */ |
627 | for (count = 0; count < sizeof mask * 8; ++count) { | 651 | for (count = 0; count < sizeof mask * 8; ++count) { |
628 | if (mask & 0x01) | 652 | if (mask & 0x01) |
629 | set_bit(count, &smp_changes_mask); | 653 | set_bit(count, &smp_changes_mask); |
630 | mask >>= 1; | 654 | mask >>= 1; |
631 | } | 655 | } |
632 | 656 | ||
633 | } | 657 | } |
634 | 658 | ||
659 | /** | ||
660 | * generic_set_mtrr - set variable MTRR register on the local CPU. | ||
661 | * | ||
662 | * @reg: The register to set. | ||
663 | * @base: The base address of the region. | ||
664 | * @size: The size of the region. If this is 0 the region is disabled. | ||
665 | * @type: The type of the region. | ||
666 | * | ||
667 | * Returns nothing. | ||
668 | */ | ||
635 | static void generic_set_mtrr(unsigned int reg, unsigned long base, | 669 | static void generic_set_mtrr(unsigned int reg, unsigned long base, |
636 | unsigned long size, mtrr_type type) | 670 | unsigned long size, mtrr_type type) |
637 | /* [SUMMARY] Set variable MTRR register on the local CPU. | ||
638 | <reg> The register to set. | ||
639 | <base> The base address of the region. | ||
640 | <size> The size of the region. If this is 0 the region is disabled. | ||
641 | <type> The type of the region. | ||
642 | [RETURNS] Nothing. | ||
643 | */ | ||
644 | { | 671 | { |
645 | unsigned long flags; | 672 | unsigned long flags; |
646 | struct mtrr_var_range *vr; | 673 | struct mtrr_var_range *vr; |
@@ -651,8 +678,10 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base, | |||
651 | prepare_set(); | 678 | prepare_set(); |
652 | 679 | ||
653 | if (size == 0) { | 680 | if (size == 0) { |
654 | /* The invalid bit is kept in the mask, so we simply clear the | 681 | /* |
655 | relevant mask register to disable a range. */ | 682 | * The invalid bit is kept in the mask, so we simply |
683 | * clear the relevant mask register to disable a range. | ||
684 | */ | ||
656 | mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); | 685 | mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); |
657 | memset(vr, 0, sizeof(struct mtrr_var_range)); | 686 | memset(vr, 0, sizeof(struct mtrr_var_range)); |
658 | } else { | 687 | } else { |
@@ -669,46 +698,50 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base, | |||
669 | local_irq_restore(flags); | 698 | local_irq_restore(flags); |
670 | } | 699 | } |
671 | 700 | ||
672 | int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type) | 701 | int generic_validate_add_page(unsigned long base, unsigned long size, |
702 | unsigned int type) | ||
673 | { | 703 | { |
674 | unsigned long lbase, last; | 704 | unsigned long lbase, last; |
675 | 705 | ||
676 | /* For Intel PPro stepping <= 7, must be 4 MiB aligned | 706 | /* |
677 | and not touch 0x70000000->0x7003FFFF */ | 707 | * For Intel PPro stepping <= 7 |
708 | * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF | ||
709 | */ | ||
678 | if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && | 710 | if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && |
679 | boot_cpu_data.x86_model == 1 && | 711 | boot_cpu_data.x86_model == 1 && |
680 | boot_cpu_data.x86_mask <= 7) { | 712 | boot_cpu_data.x86_mask <= 7) { |
681 | if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { | 713 | if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { |
682 | printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); | 714 | pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); |
683 | return -EINVAL; | 715 | return -EINVAL; |
684 | } | 716 | } |
685 | if (!(base + size < 0x70000 || base > 0x7003F) && | 717 | if (!(base + size < 0x70000 || base > 0x7003F) && |
686 | (type == MTRR_TYPE_WRCOMB | 718 | (type == MTRR_TYPE_WRCOMB |
687 | || type == MTRR_TYPE_WRBACK)) { | 719 | || type == MTRR_TYPE_WRBACK)) { |
688 | printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); | 720 | pr_warning("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); |
689 | return -EINVAL; | 721 | return -EINVAL; |
690 | } | 722 | } |
691 | } | 723 | } |
692 | 724 | ||
693 | /* Check upper bits of base and last are equal and lower bits are 0 | 725 | /* |
694 | for base and 1 for last */ | 726 | * Check upper bits of base and last are equal and lower bits are 0 |
727 | * for base and 1 for last | ||
728 | */ | ||
695 | last = base + size - 1; | 729 | last = base + size - 1; |
696 | for (lbase = base; !(lbase & 1) && (last & 1); | 730 | for (lbase = base; !(lbase & 1) && (last & 1); |
697 | lbase = lbase >> 1, last = last >> 1) ; | 731 | lbase = lbase >> 1, last = last >> 1) |
732 | ; | ||
698 | if (lbase != last) { | 733 | if (lbase != last) { |
699 | printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", | 734 | pr_warning("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size); |
700 | base, size); | ||
701 | return -EINVAL; | 735 | return -EINVAL; |
702 | } | 736 | } |
703 | return 0; | 737 | return 0; |
704 | } | 738 | } |
705 | 739 | ||
706 | |||
707 | static int generic_have_wrcomb(void) | 740 | static int generic_have_wrcomb(void) |
708 | { | 741 | { |
709 | unsigned long config, dummy; | 742 | unsigned long config, dummy; |
710 | rdmsr(MSR_MTRRcap, config, dummy); | 743 | rdmsr(MSR_MTRRcap, config, dummy); |
711 | return (config & (1 << 10)); | 744 | return config & (1 << 10); |
712 | } | 745 | } |
713 | 746 | ||
714 | int positive_have_wrcomb(void) | 747 | int positive_have_wrcomb(void) |
@@ -716,14 +749,15 @@ int positive_have_wrcomb(void) | |||
716 | return 1; | 749 | return 1; |
717 | } | 750 | } |
718 | 751 | ||
719 | /* generic structure... | 752 | /* |
753 | * Generic structure... | ||
720 | */ | 754 | */ |
721 | struct mtrr_ops generic_mtrr_ops = { | 755 | struct mtrr_ops generic_mtrr_ops = { |
722 | .use_intel_if = 1, | 756 | .use_intel_if = 1, |
723 | .set_all = generic_set_all, | 757 | .set_all = generic_set_all, |
724 | .get = generic_get_mtrr, | 758 | .get = generic_get_mtrr, |
725 | .get_free_region = generic_get_free_region, | 759 | .get_free_region = generic_get_free_region, |
726 | .set = generic_set_mtrr, | 760 | .set = generic_set_mtrr, |
727 | .validate_add_page = generic_validate_add_page, | 761 | .validate_add_page = generic_validate_add_page, |
728 | .have_wrcomb = generic_have_wrcomb, | 762 | .have_wrcomb = generic_have_wrcomb, |
729 | }; | 763 | }; |
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index fb73a52913a4..08b6ea4c62b4 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c | |||
@@ -1,27 +1,28 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/proc_fs.h> | ||
3 | #include <linux/capability.h> | 1 | #include <linux/capability.h> |
4 | #include <linux/ctype.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/seq_file.h> | 2 | #include <linux/seq_file.h> |
7 | #include <asm/uaccess.h> | 3 | #include <linux/uaccess.h> |
4 | #include <linux/proc_fs.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/ctype.h> | ||
7 | #include <linux/init.h> | ||
8 | 8 | ||
9 | #define LINE_SIZE 80 | 9 | #define LINE_SIZE 80 |
10 | 10 | ||
11 | #include <asm/mtrr.h> | 11 | #include <asm/mtrr.h> |
12 | |||
12 | #include "mtrr.h" | 13 | #include "mtrr.h" |
13 | 14 | ||
14 | #define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private) | 15 | #define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private) |
15 | 16 | ||
16 | static const char *const mtrr_strings[MTRR_NUM_TYPES] = | 17 | static const char *const mtrr_strings[MTRR_NUM_TYPES] = |
17 | { | 18 | { |
18 | "uncachable", /* 0 */ | 19 | "uncachable", /* 0 */ |
19 | "write-combining", /* 1 */ | 20 | "write-combining", /* 1 */ |
20 | "?", /* 2 */ | 21 | "?", /* 2 */ |
21 | "?", /* 3 */ | 22 | "?", /* 3 */ |
22 | "write-through", /* 4 */ | 23 | "write-through", /* 4 */ |
23 | "write-protect", /* 5 */ | 24 | "write-protect", /* 5 */ |
24 | "write-back", /* 6 */ | 25 | "write-back", /* 6 */ |
25 | }; | 26 | }; |
26 | 27 | ||
27 | const char *mtrr_attrib_to_str(int x) | 28 | const char *mtrr_attrib_to_str(int x) |
@@ -35,8 +36,8 @@ static int | |||
35 | mtrr_file_add(unsigned long base, unsigned long size, | 36 | mtrr_file_add(unsigned long base, unsigned long size, |
36 | unsigned int type, bool increment, struct file *file, int page) | 37 | unsigned int type, bool increment, struct file *file, int page) |
37 | { | 38 | { |
39 | unsigned int *fcount = FILE_FCOUNT(file); | ||
38 | int reg, max; | 40 | int reg, max; |
39 | unsigned int *fcount = FILE_FCOUNT(file); | ||
40 | 41 | ||
41 | max = num_var_ranges; | 42 | max = num_var_ranges; |
42 | if (fcount == NULL) { | 43 | if (fcount == NULL) { |
@@ -61,8 +62,8 @@ static int | |||
61 | mtrr_file_del(unsigned long base, unsigned long size, | 62 | mtrr_file_del(unsigned long base, unsigned long size, |
62 | struct file *file, int page) | 63 | struct file *file, int page) |
63 | { | 64 | { |
64 | int reg; | ||
65 | unsigned int *fcount = FILE_FCOUNT(file); | 65 | unsigned int *fcount = FILE_FCOUNT(file); |
66 | int reg; | ||
66 | 67 | ||
67 | if (!page) { | 68 | if (!page) { |
68 | if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) | 69 | if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) |
@@ -81,13 +82,14 @@ mtrr_file_del(unsigned long base, unsigned long size, | |||
81 | return reg; | 82 | return reg; |
82 | } | 83 | } |
83 | 84 | ||
84 | /* RED-PEN: seq_file can seek now. this is ignored. */ | 85 | /* |
86 | * seq_file can seek but we ignore it. | ||
87 | * | ||
88 | * Format of control line: | ||
89 | * "base=%Lx size=%Lx type=%s" or "disable=%d" | ||
90 | */ | ||
85 | static ssize_t | 91 | static ssize_t |
86 | mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) | 92 | mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) |
87 | /* Format of control line: | ||
88 | "base=%Lx size=%Lx type=%s" OR: | ||
89 | "disable=%d" | ||
90 | */ | ||
91 | { | 93 | { |
92 | int i, err; | 94 | int i, err; |
93 | unsigned long reg; | 95 | unsigned long reg; |
@@ -100,15 +102,18 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) | |||
100 | return -EPERM; | 102 | return -EPERM; |
101 | if (!len) | 103 | if (!len) |
102 | return -EINVAL; | 104 | return -EINVAL; |
105 | |||
103 | memset(line, 0, LINE_SIZE); | 106 | memset(line, 0, LINE_SIZE); |
104 | if (len > LINE_SIZE) | 107 | if (len > LINE_SIZE) |
105 | len = LINE_SIZE; | 108 | len = LINE_SIZE; |
106 | if (copy_from_user(line, buf, len - 1)) | 109 | if (copy_from_user(line, buf, len - 1)) |
107 | return -EFAULT; | 110 | return -EFAULT; |
111 | |||
108 | linelen = strlen(line); | 112 | linelen = strlen(line); |
109 | ptr = line + linelen - 1; | 113 | ptr = line + linelen - 1; |
110 | if (linelen && *ptr == '\n') | 114 | if (linelen && *ptr == '\n') |
111 | *ptr = '\0'; | 115 | *ptr = '\0'; |
116 | |||
112 | if (!strncmp(line, "disable=", 8)) { | 117 | if (!strncmp(line, "disable=", 8)) { |
113 | reg = simple_strtoul(line + 8, &ptr, 0); | 118 | reg = simple_strtoul(line + 8, &ptr, 0); |
114 | err = mtrr_del_page(reg, 0, 0); | 119 | err = mtrr_del_page(reg, 0, 0); |
@@ -116,28 +121,35 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) | |||
116 | return err; | 121 | return err; |
117 | return len; | 122 | return len; |
118 | } | 123 | } |
124 | |||
119 | if (strncmp(line, "base=", 5)) | 125 | if (strncmp(line, "base=", 5)) |
120 | return -EINVAL; | 126 | return -EINVAL; |
127 | |||
121 | base = simple_strtoull(line + 5, &ptr, 0); | 128 | base = simple_strtoull(line + 5, &ptr, 0); |
122 | for (; isspace(*ptr); ++ptr) ; | 129 | for (; isspace(*ptr); ++ptr) |
130 | ; | ||
131 | |||
123 | if (strncmp(ptr, "size=", 5)) | 132 | if (strncmp(ptr, "size=", 5)) |
124 | return -EINVAL; | 133 | return -EINVAL; |
134 | |||
125 | size = simple_strtoull(ptr + 5, &ptr, 0); | 135 | size = simple_strtoull(ptr + 5, &ptr, 0); |
126 | if ((base & 0xfff) || (size & 0xfff)) | 136 | if ((base & 0xfff) || (size & 0xfff)) |
127 | return -EINVAL; | 137 | return -EINVAL; |
128 | for (; isspace(*ptr); ++ptr) ; | 138 | for (; isspace(*ptr); ++ptr) |
139 | ; | ||
140 | |||
129 | if (strncmp(ptr, "type=", 5)) | 141 | if (strncmp(ptr, "type=", 5)) |
130 | return -EINVAL; | 142 | return -EINVAL; |
131 | ptr += 5; | 143 | ptr += 5; |
132 | for (; isspace(*ptr); ++ptr) ; | 144 | for (; isspace(*ptr); ++ptr) |
145 | ; | ||
146 | |||
133 | for (i = 0; i < MTRR_NUM_TYPES; ++i) { | 147 | for (i = 0; i < MTRR_NUM_TYPES; ++i) { |
134 | if (strcmp(ptr, mtrr_strings[i])) | 148 | if (strcmp(ptr, mtrr_strings[i])) |
135 | continue; | 149 | continue; |
136 | base >>= PAGE_SHIFT; | 150 | base >>= PAGE_SHIFT; |
137 | size >>= PAGE_SHIFT; | 151 | size >>= PAGE_SHIFT; |
138 | err = | 152 | err = mtrr_add_page((unsigned long)base, (unsigned long)size, i, true); |
139 | mtrr_add_page((unsigned long) base, (unsigned long) size, i, | ||
140 | true); | ||
141 | if (err < 0) | 153 | if (err < 0) |
142 | return err; | 154 | return err; |
143 | return len; | 155 | return len; |
@@ -181,7 +193,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
181 | case MTRRIOC32_SET_PAGE_ENTRY: | 193 | case MTRRIOC32_SET_PAGE_ENTRY: |
182 | case MTRRIOC32_DEL_PAGE_ENTRY: | 194 | case MTRRIOC32_DEL_PAGE_ENTRY: |
183 | case MTRRIOC32_KILL_PAGE_ENTRY: { | 195 | case MTRRIOC32_KILL_PAGE_ENTRY: { |
184 | struct mtrr_sentry32 __user *s32 = (struct mtrr_sentry32 __user *)__arg; | 196 | struct mtrr_sentry32 __user *s32; |
197 | |||
198 | s32 = (struct mtrr_sentry32 __user *)__arg; | ||
185 | err = get_user(sentry.base, &s32->base); | 199 | err = get_user(sentry.base, &s32->base); |
186 | err |= get_user(sentry.size, &s32->size); | 200 | err |= get_user(sentry.size, &s32->size); |
187 | err |= get_user(sentry.type, &s32->type); | 201 | err |= get_user(sentry.type, &s32->type); |
@@ -191,7 +205,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
191 | } | 205 | } |
192 | case MTRRIOC32_GET_ENTRY: | 206 | case MTRRIOC32_GET_ENTRY: |
193 | case MTRRIOC32_GET_PAGE_ENTRY: { | 207 | case MTRRIOC32_GET_PAGE_ENTRY: { |
194 | struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg; | 208 | struct mtrr_gentry32 __user *g32; |
209 | |||
210 | g32 = (struct mtrr_gentry32 __user *)__arg; | ||
195 | err = get_user(gentry.regnum, &g32->regnum); | 211 | err = get_user(gentry.regnum, &g32->regnum); |
196 | err |= get_user(gentry.base, &g32->base); | 212 | err |= get_user(gentry.base, &g32->base); |
197 | err |= get_user(gentry.size, &g32->size); | 213 | err |= get_user(gentry.size, &g32->size); |
@@ -314,7 +330,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
314 | if (err) | 330 | if (err) |
315 | return err; | 331 | return err; |
316 | 332 | ||
317 | switch(cmd) { | 333 | switch (cmd) { |
318 | case MTRRIOC_GET_ENTRY: | 334 | case MTRRIOC_GET_ENTRY: |
319 | case MTRRIOC_GET_PAGE_ENTRY: | 335 | case MTRRIOC_GET_PAGE_ENTRY: |
320 | if (copy_to_user(arg, &gentry, sizeof gentry)) | 336 | if (copy_to_user(arg, &gentry, sizeof gentry)) |
@@ -323,7 +339,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
323 | #ifdef CONFIG_COMPAT | 339 | #ifdef CONFIG_COMPAT |
324 | case MTRRIOC32_GET_ENTRY: | 340 | case MTRRIOC32_GET_ENTRY: |
325 | case MTRRIOC32_GET_PAGE_ENTRY: { | 341 | case MTRRIOC32_GET_PAGE_ENTRY: { |
326 | struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg; | 342 | struct mtrr_gentry32 __user *g32; |
343 | |||
344 | g32 = (struct mtrr_gentry32 __user *)__arg; | ||
327 | err = put_user(gentry.base, &g32->base); | 345 | err = put_user(gentry.base, &g32->base); |
328 | err |= put_user(gentry.size, &g32->size); | 346 | err |= put_user(gentry.size, &g32->size); |
329 | err |= put_user(gentry.regnum, &g32->regnum); | 347 | err |= put_user(gentry.regnum, &g32->regnum); |
@@ -335,11 +353,10 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
335 | return err; | 353 | return err; |
336 | } | 354 | } |
337 | 355 | ||
338 | static int | 356 | static int mtrr_close(struct inode *ino, struct file *file) |
339 | mtrr_close(struct inode *ino, struct file *file) | ||
340 | { | 357 | { |
341 | int i, max; | ||
342 | unsigned int *fcount = FILE_FCOUNT(file); | 358 | unsigned int *fcount = FILE_FCOUNT(file); |
359 | int i, max; | ||
343 | 360 | ||
344 | if (fcount != NULL) { | 361 | if (fcount != NULL) { |
345 | max = num_var_ranges; | 362 | max = num_var_ranges; |
@@ -359,22 +376,22 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset); | |||
359 | 376 | ||
360 | static int mtrr_open(struct inode *inode, struct file *file) | 377 | static int mtrr_open(struct inode *inode, struct file *file) |
361 | { | 378 | { |
362 | if (!mtrr_if) | 379 | if (!mtrr_if) |
363 | return -EIO; | 380 | return -EIO; |
364 | if (!mtrr_if->get) | 381 | if (!mtrr_if->get) |
365 | return -ENXIO; | 382 | return -ENXIO; |
366 | return single_open(file, mtrr_seq_show, NULL); | 383 | return single_open(file, mtrr_seq_show, NULL); |
367 | } | 384 | } |
368 | 385 | ||
369 | static const struct file_operations mtrr_fops = { | 386 | static const struct file_operations mtrr_fops = { |
370 | .owner = THIS_MODULE, | 387 | .owner = THIS_MODULE, |
371 | .open = mtrr_open, | 388 | .open = mtrr_open, |
372 | .read = seq_read, | 389 | .read = seq_read, |
373 | .llseek = seq_lseek, | 390 | .llseek = seq_lseek, |
374 | .write = mtrr_write, | 391 | .write = mtrr_write, |
375 | .unlocked_ioctl = mtrr_ioctl, | 392 | .unlocked_ioctl = mtrr_ioctl, |
376 | .compat_ioctl = mtrr_ioctl, | 393 | .compat_ioctl = mtrr_ioctl, |
377 | .release = mtrr_close, | 394 | .release = mtrr_close, |
378 | }; | 395 | }; |
379 | 396 | ||
380 | static int mtrr_seq_show(struct seq_file *seq, void *offset) | 397 | static int mtrr_seq_show(struct seq_file *seq, void *offset) |
@@ -388,23 +405,24 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset) | |||
388 | max = num_var_ranges; | 405 | max = num_var_ranges; |
389 | for (i = 0; i < max; i++) { | 406 | for (i = 0; i < max; i++) { |
390 | mtrr_if->get(i, &base, &size, &type); | 407 | mtrr_if->get(i, &base, &size, &type); |
391 | if (size == 0) | 408 | if (size == 0) { |
392 | mtrr_usage_table[i] = 0; | 409 | mtrr_usage_table[i] = 0; |
393 | else { | 410 | continue; |
394 | if (size < (0x100000 >> PAGE_SHIFT)) { | ||
395 | /* less than 1MB */ | ||
396 | factor = 'K'; | ||
397 | size <<= PAGE_SHIFT - 10; | ||
398 | } else { | ||
399 | factor = 'M'; | ||
400 | size >>= 20 - PAGE_SHIFT; | ||
401 | } | ||
402 | /* RED-PEN: base can be > 32bit */ | ||
403 | len += seq_printf(seq, | ||
404 | "reg%02i: base=0x%06lx000 (%5luMB), size=%5lu%cB, count=%d: %s\n", | ||
405 | i, base, base >> (20 - PAGE_SHIFT), size, factor, | ||
406 | mtrr_usage_table[i], mtrr_attrib_to_str(type)); | ||
407 | } | 411 | } |
412 | if (size < (0x100000 >> PAGE_SHIFT)) { | ||
413 | /* less than 1MB */ | ||
414 | factor = 'K'; | ||
415 | size <<= PAGE_SHIFT - 10; | ||
416 | } else { | ||
417 | factor = 'M'; | ||
418 | size >>= 20 - PAGE_SHIFT; | ||
419 | } | ||
420 | /* Base can be > 32bit */ | ||
421 | len += seq_printf(seq, "reg%02i: base=0x%06lx000 " | ||
422 | "(%5luMB), size=%5lu%cB, count=%d: %s\n", | ||
423 | i, base, base >> (20 - PAGE_SHIFT), size, | ||
424 | factor, mtrr_usage_table[i], | ||
425 | mtrr_attrib_to_str(type)); | ||
408 | } | 426 | } |
409 | return 0; | 427 | return 0; |
410 | } | 428 | } |
@@ -422,6 +440,5 @@ static int __init mtrr_if_init(void) | |||
422 | proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops); | 440 | proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops); |
423 | return 0; | 441 | return 0; |
424 | } | 442 | } |
425 | |||
426 | arch_initcall(mtrr_if_init); | 443 | arch_initcall(mtrr_if_init); |
427 | #endif /* CONFIG_PROC_FS */ | 444 | #endif /* CONFIG_PROC_FS */ |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 8fc248b5aeaf..7af0f88a4163 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -25,43 +25,48 @@ | |||
25 | Operating System Writer's Guide" (Intel document number 242692), | 25 | Operating System Writer's Guide" (Intel document number 242692), |
26 | section 11.11.7 | 26 | section 11.11.7 |
27 | 27 | ||
28 | This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> | 28 | This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> |
29 | on 6-7 March 2002. | 29 | on 6-7 March 2002. |
30 | Source: Intel Architecture Software Developers Manual, Volume 3: | 30 | Source: Intel Architecture Software Developers Manual, Volume 3: |
31 | System Programming Guide; Section 9.11. (1997 edition - PPro). | 31 | System Programming Guide; Section 9.11. (1997 edition - PPro). |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #define DEBUG | ||
35 | |||
36 | #include <linux/types.h> /* FIXME: kvm_para.h needs this */ | ||
37 | |||
38 | #include <linux/kvm_para.h> | ||
39 | #include <linux/uaccess.h> | ||
34 | #include <linux/module.h> | 40 | #include <linux/module.h> |
41 | #include <linux/mutex.h> | ||
35 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | #include <linux/sort.h> | ||
44 | #include <linux/cpu.h> | ||
36 | #include <linux/pci.h> | 45 | #include <linux/pci.h> |
37 | #include <linux/smp.h> | 46 | #include <linux/smp.h> |
38 | #include <linux/cpu.h> | ||
39 | #include <linux/mutex.h> | ||
40 | #include <linux/sort.h> | ||
41 | 47 | ||
48 | #include <asm/processor.h> | ||
42 | #include <asm/e820.h> | 49 | #include <asm/e820.h> |
43 | #include <asm/mtrr.h> | 50 | #include <asm/mtrr.h> |
44 | #include <asm/uaccess.h> | ||
45 | #include <asm/processor.h> | ||
46 | #include <asm/msr.h> | 51 | #include <asm/msr.h> |
47 | #include <asm/kvm_para.h> | 52 | |
48 | #include "mtrr.h" | 53 | #include "mtrr.h" |
49 | 54 | ||
50 | u32 num_var_ranges = 0; | 55 | u32 num_var_ranges; |
51 | 56 | ||
52 | unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; | 57 | unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; |
53 | static DEFINE_MUTEX(mtrr_mutex); | 58 | static DEFINE_MUTEX(mtrr_mutex); |
54 | 59 | ||
55 | u64 size_or_mask, size_and_mask; | 60 | u64 size_or_mask, size_and_mask; |
56 | 61 | ||
57 | static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {}; | 62 | static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM]; |
58 | 63 | ||
59 | struct mtrr_ops * mtrr_if = NULL; | 64 | struct mtrr_ops *mtrr_if; |
60 | 65 | ||
61 | static void set_mtrr(unsigned int reg, unsigned long base, | 66 | static void set_mtrr(unsigned int reg, unsigned long base, |
62 | unsigned long size, mtrr_type type); | 67 | unsigned long size, mtrr_type type); |
63 | 68 | ||
64 | void set_mtrr_ops(struct mtrr_ops * ops) | 69 | void set_mtrr_ops(struct mtrr_ops *ops) |
65 | { | 70 | { |
66 | if (ops->vendor && ops->vendor < X86_VENDOR_NUM) | 71 | if (ops->vendor && ops->vendor < X86_VENDOR_NUM) |
67 | mtrr_ops[ops->vendor] = ops; | 72 | mtrr_ops[ops->vendor] = ops; |
@@ -72,30 +77,36 @@ static int have_wrcomb(void) | |||
72 | { | 77 | { |
73 | struct pci_dev *dev; | 78 | struct pci_dev *dev; |
74 | u8 rev; | 79 | u8 rev; |
75 | 80 | ||
76 | if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) { | 81 | dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL); |
77 | /* ServerWorks LE chipsets < rev 6 have problems with write-combining | 82 | if (dev != NULL) { |
78 | Don't allow it and leave room for other chipsets to be tagged */ | 83 | /* |
84 | * ServerWorks LE chipsets < rev 6 have problems with | ||
85 | * write-combining. Don't allow it and leave room for other | ||
86 | * chipsets to be tagged | ||
87 | */ | ||
79 | if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && | 88 | if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && |
80 | dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) { | 89 | dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) { |
81 | pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); | 90 | pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); |
82 | if (rev <= 5) { | 91 | if (rev <= 5) { |
83 | printk(KERN_INFO "mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n"); | 92 | pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n"); |
84 | pci_dev_put(dev); | 93 | pci_dev_put(dev); |
85 | return 0; | 94 | return 0; |
86 | } | 95 | } |
87 | } | 96 | } |
88 | /* Intel 450NX errata # 23. Non ascending cacheline evictions to | 97 | /* |
89 | write combining memory may resulting in data corruption */ | 98 | * Intel 450NX errata # 23. Non ascending cacheline evictions to |
99 | * write combining memory may resulting in data corruption | ||
100 | */ | ||
90 | if (dev->vendor == PCI_VENDOR_ID_INTEL && | 101 | if (dev->vendor == PCI_VENDOR_ID_INTEL && |
91 | dev->device == PCI_DEVICE_ID_INTEL_82451NX) { | 102 | dev->device == PCI_DEVICE_ID_INTEL_82451NX) { |
92 | printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n"); | 103 | pr_info("mtrr: Intel 450NX MMC detected. Write-combining disabled.\n"); |
93 | pci_dev_put(dev); | 104 | pci_dev_put(dev); |
94 | return 0; | 105 | return 0; |
95 | } | 106 | } |
96 | pci_dev_put(dev); | 107 | pci_dev_put(dev); |
97 | } | 108 | } |
98 | return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0); | 109 | return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0; |
99 | } | 110 | } |
100 | 111 | ||
101 | /* This function returns the number of variable MTRRs */ | 112 | /* This function returns the number of variable MTRRs */ |
@@ -103,12 +114,13 @@ static void __init set_num_var_ranges(void) | |||
103 | { | 114 | { |
104 | unsigned long config = 0, dummy; | 115 | unsigned long config = 0, dummy; |
105 | 116 | ||
106 | if (use_intel()) { | 117 | if (use_intel()) |
107 | rdmsr(MSR_MTRRcap, config, dummy); | 118 | rdmsr(MSR_MTRRcap, config, dummy); |
108 | } else if (is_cpu(AMD)) | 119 | else if (is_cpu(AMD)) |
109 | config = 2; | 120 | config = 2; |
110 | else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) | 121 | else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) |
111 | config = 8; | 122 | config = 8; |
123 | |||
112 | num_var_ranges = config & 0xff; | 124 | num_var_ranges = config & 0xff; |
113 | } | 125 | } |
114 | 126 | ||
@@ -130,10 +142,12 @@ struct set_mtrr_data { | |||
130 | mtrr_type smp_type; | 142 | mtrr_type smp_type; |
131 | }; | 143 | }; |
132 | 144 | ||
145 | /** | ||
146 | * ipi_handler - Synchronisation handler. Executed by "other" CPUs. | ||
147 | * | ||
148 | * Returns nothing. | ||
149 | */ | ||
133 | static void ipi_handler(void *info) | 150 | static void ipi_handler(void *info) |
134 | /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs. | ||
135 | [RETURNS] Nothing. | ||
136 | */ | ||
137 | { | 151 | { |
138 | #ifdef CONFIG_SMP | 152 | #ifdef CONFIG_SMP |
139 | struct set_mtrr_data *data = info; | 153 | struct set_mtrr_data *data = info; |
@@ -142,18 +156,19 @@ static void ipi_handler(void *info) | |||
142 | local_irq_save(flags); | 156 | local_irq_save(flags); |
143 | 157 | ||
144 | atomic_dec(&data->count); | 158 | atomic_dec(&data->count); |
145 | while(!atomic_read(&data->gate)) | 159 | while (!atomic_read(&data->gate)) |
146 | cpu_relax(); | 160 | cpu_relax(); |
147 | 161 | ||
148 | /* The master has cleared me to execute */ | 162 | /* The master has cleared me to execute */ |
149 | if (data->smp_reg != ~0U) | 163 | if (data->smp_reg != ~0U) { |
150 | mtrr_if->set(data->smp_reg, data->smp_base, | 164 | mtrr_if->set(data->smp_reg, data->smp_base, |
151 | data->smp_size, data->smp_type); | 165 | data->smp_size, data->smp_type); |
152 | else | 166 | } else { |
153 | mtrr_if->set_all(); | 167 | mtrr_if->set_all(); |
168 | } | ||
154 | 169 | ||
155 | atomic_dec(&data->count); | 170 | atomic_dec(&data->count); |
156 | while(atomic_read(&data->gate)) | 171 | while (atomic_read(&data->gate)) |
157 | cpu_relax(); | 172 | cpu_relax(); |
158 | 173 | ||
159 | atomic_dec(&data->count); | 174 | atomic_dec(&data->count); |
@@ -161,7 +176,8 @@ static void ipi_handler(void *info) | |||
161 | #endif | 176 | #endif |
162 | } | 177 | } |
163 | 178 | ||
164 | static inline int types_compatible(mtrr_type type1, mtrr_type type2) { | 179 | static inline int types_compatible(mtrr_type type1, mtrr_type type2) |
180 | { | ||
165 | return type1 == MTRR_TYPE_UNCACHABLE || | 181 | return type1 == MTRR_TYPE_UNCACHABLE || |
166 | type2 == MTRR_TYPE_UNCACHABLE || | 182 | type2 == MTRR_TYPE_UNCACHABLE || |
167 | (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || | 183 | (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || |
@@ -176,10 +192,10 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) { | |||
176 | * @type: mtrr type | 192 | * @type: mtrr type |
177 | * | 193 | * |
178 | * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: | 194 | * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: |
179 | * | 195 | * |
180 | * 1. Send IPI to do the following: | 196 | * 1. Send IPI to do the following: |
181 | * 2. Disable Interrupts | 197 | * 2. Disable Interrupts |
182 | * 3. Wait for all procs to do so | 198 | * 3. Wait for all procs to do so |
183 | * 4. Enter no-fill cache mode | 199 | * 4. Enter no-fill cache mode |
184 | * 5. Flush caches | 200 | * 5. Flush caches |
185 | * 6. Clear PGE bit | 201 | * 6. Clear PGE bit |
@@ -189,26 +205,27 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) { | |||
189 | * 10. Enable all range registers | 205 | * 10. Enable all range registers |
190 | * 11. Flush all TLBs and caches again | 206 | * 11. Flush all TLBs and caches again |
191 | * 12. Enter normal cache mode and reenable caching | 207 | * 12. Enter normal cache mode and reenable caching |
192 | * 13. Set PGE | 208 | * 13. Set PGE |
193 | * 14. Wait for buddies to catch up | 209 | * 14. Wait for buddies to catch up |
194 | * 15. Enable interrupts. | 210 | * 15. Enable interrupts. |
195 | * | 211 | * |
196 | * What does that mean for us? Well, first we set data.count to the number | 212 | * What does that mean for us? Well, first we set data.count to the number |
197 | * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait | 213 | * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait |
198 | * until it hits 0 and proceed. We set the data.gate flag and reset data.count. | 214 | * until it hits 0 and proceed. We set the data.gate flag and reset data.count. |
199 | * Meanwhile, they are waiting for that flag to be set. Once it's set, each | 215 | * Meanwhile, they are waiting for that flag to be set. Once it's set, each |
200 | * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it | 216 | * CPU goes through the transition of updating MTRRs. |
201 | * differently, so we call mtrr_if->set() callback and let them take care of it. | 217 | * The CPU vendors may each do it differently, |
202 | * When they're done, they again decrement data->count and wait for data.gate to | 218 | * so we call mtrr_if->set() callback and let them take care of it. |
203 | * be reset. | 219 | * When they're done, they again decrement data->count and wait for data.gate |
204 | * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag. | 220 | * to be reset. |
221 | * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag | ||
205 | * Everyone then enables interrupts and we all continue on. | 222 | * Everyone then enables interrupts and we all continue on. |
206 | * | 223 | * |
207 | * Note that the mechanism is the same for UP systems, too; all the SMP stuff | 224 | * Note that the mechanism is the same for UP systems, too; all the SMP stuff |
208 | * becomes nops. | 225 | * becomes nops. |
209 | */ | 226 | */ |
210 | static void set_mtrr(unsigned int reg, unsigned long base, | 227 | static void |
211 | unsigned long size, mtrr_type type) | 228 | set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) |
212 | { | 229 | { |
213 | struct set_mtrr_data data; | 230 | struct set_mtrr_data data; |
214 | unsigned long flags; | 231 | unsigned long flags; |
@@ -218,121 +235,122 @@ static void set_mtrr(unsigned int reg, unsigned long base, | |||
218 | data.smp_size = size; | 235 | data.smp_size = size; |
219 | data.smp_type = type; | 236 | data.smp_type = type; |
220 | atomic_set(&data.count, num_booting_cpus() - 1); | 237 | atomic_set(&data.count, num_booting_cpus() - 1); |
221 | /* make sure data.count is visible before unleashing other CPUs */ | 238 | |
239 | /* Make sure data.count is visible before unleashing other CPUs */ | ||
222 | smp_wmb(); | 240 | smp_wmb(); |
223 | atomic_set(&data.gate,0); | 241 | atomic_set(&data.gate, 0); |
224 | 242 | ||
225 | /* Start the ball rolling on other CPUs */ | 243 | /* Start the ball rolling on other CPUs */ |
226 | if (smp_call_function(ipi_handler, &data, 0) != 0) | 244 | if (smp_call_function(ipi_handler, &data, 0) != 0) |
227 | panic("mtrr: timed out waiting for other CPUs\n"); | 245 | panic("mtrr: timed out waiting for other CPUs\n"); |
228 | 246 | ||
229 | local_irq_save(flags); | 247 | local_irq_save(flags); |
230 | 248 | ||
231 | while(atomic_read(&data.count)) | 249 | while (atomic_read(&data.count)) |
232 | cpu_relax(); | 250 | cpu_relax(); |
233 | 251 | ||
234 | /* ok, reset count and toggle gate */ | 252 | /* Ok, reset count and toggle gate */ |
235 | atomic_set(&data.count, num_booting_cpus() - 1); | 253 | atomic_set(&data.count, num_booting_cpus() - 1); |
236 | smp_wmb(); | 254 | smp_wmb(); |
237 | atomic_set(&data.gate,1); | 255 | atomic_set(&data.gate, 1); |
238 | 256 | ||
239 | /* do our MTRR business */ | 257 | /* Do our MTRR business */ |
240 | 258 | ||
241 | /* HACK! | 259 | /* |
260 | * HACK! | ||
242 | * We use this same function to initialize the mtrrs on boot. | 261 | * We use this same function to initialize the mtrrs on boot. |
243 | * The state of the boot cpu's mtrrs has been saved, and we want | 262 | * The state of the boot cpu's mtrrs has been saved, and we want |
244 | * to replicate across all the APs. | 263 | * to replicate across all the APs. |
245 | * If we're doing that @reg is set to something special... | 264 | * If we're doing that @reg is set to something special... |
246 | */ | 265 | */ |
247 | if (reg != ~0U) | 266 | if (reg != ~0U) |
248 | mtrr_if->set(reg,base,size,type); | 267 | mtrr_if->set(reg, base, size, type); |
249 | 268 | ||
250 | /* wait for the others */ | 269 | /* Wait for the others */ |
251 | while(atomic_read(&data.count)) | 270 | while (atomic_read(&data.count)) |
252 | cpu_relax(); | 271 | cpu_relax(); |
253 | 272 | ||
254 | atomic_set(&data.count, num_booting_cpus() - 1); | 273 | atomic_set(&data.count, num_booting_cpus() - 1); |
255 | smp_wmb(); | 274 | smp_wmb(); |
256 | atomic_set(&data.gate,0); | 275 | atomic_set(&data.gate, 0); |
257 | 276 | ||
258 | /* | 277 | /* |
259 | * Wait here for everyone to have seen the gate change | 278 | * Wait here for everyone to have seen the gate change |
260 | * So we're the last ones to touch 'data' | 279 | * So we're the last ones to touch 'data' |
261 | */ | 280 | */ |
262 | while(atomic_read(&data.count)) | 281 | while (atomic_read(&data.count)) |
263 | cpu_relax(); | 282 | cpu_relax(); |
264 | 283 | ||
265 | local_irq_restore(flags); | 284 | local_irq_restore(flags); |
266 | } | 285 | } |
267 | 286 | ||
268 | /** | 287 | /** |
269 | * mtrr_add_page - Add a memory type region | 288 | * mtrr_add_page - Add a memory type region |
270 | * @base: Physical base address of region in pages (in units of 4 kB!) | 289 | * @base: Physical base address of region in pages (in units of 4 kB!) |
271 | * @size: Physical size of region in pages (4 kB) | 290 | * @size: Physical size of region in pages (4 kB) |
272 | * @type: Type of MTRR desired | 291 | * @type: Type of MTRR desired |
273 | * @increment: If this is true do usage counting on the region | 292 | * @increment: If this is true do usage counting on the region |
274 | * | 293 | * |
275 | * Memory type region registers control the caching on newer Intel and | 294 | * Memory type region registers control the caching on newer Intel and |
276 | * non Intel processors. This function allows drivers to request an | 295 | * non Intel processors. This function allows drivers to request an |
277 | * MTRR is added. The details and hardware specifics of each processor's | 296 | * MTRR is added. The details and hardware specifics of each processor's |
278 | * implementation are hidden from the caller, but nevertheless the | 297 | * implementation are hidden from the caller, but nevertheless the |
279 | * caller should expect to need to provide a power of two size on an | 298 | * caller should expect to need to provide a power of two size on an |
280 | * equivalent power of two boundary. | 299 | * equivalent power of two boundary. |
281 | * | 300 | * |
282 | * If the region cannot be added either because all regions are in use | 301 | * If the region cannot be added either because all regions are in use |
283 | * or the CPU cannot support it a negative value is returned. On success | 302 | * or the CPU cannot support it a negative value is returned. On success |
284 | * the register number for this entry is returned, but should be treated | 303 | * the register number for this entry is returned, but should be treated |
285 | * as a cookie only. | 304 | * as a cookie only. |
286 | * | 305 | * |
287 | * On a multiprocessor machine the changes are made to all processors. | 306 | * On a multiprocessor machine the changes are made to all processors. |
288 | * This is required on x86 by the Intel processors. | 307 | * This is required on x86 by the Intel processors. |
289 | * | 308 | * |
290 | * The available types are | 309 | * The available types are |
291 | * | 310 | * |
292 | * %MTRR_TYPE_UNCACHABLE - No caching | 311 | * %MTRR_TYPE_UNCACHABLE - No caching |
293 | * | 312 | * |
294 | * %MTRR_TYPE_WRBACK - Write data back in bursts whenever | 313 | * %MTRR_TYPE_WRBACK - Write data back in bursts whenever |
295 | * | 314 | * |
296 | * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts | 315 | * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts |
297 | * | 316 | * |
298 | * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes | 317 | * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes |
299 | * | 318 | * |
300 | * BUGS: Needs a quiet flag for the cases where drivers do not mind | 319 | * BUGS: Needs a quiet flag for the cases where drivers do not mind |
301 | * failures and do not wish system log messages to be sent. | 320 | * failures and do not wish system log messages to be sent. |
302 | */ | 321 | */ |
303 | 322 | int mtrr_add_page(unsigned long base, unsigned long size, | |
304 | int mtrr_add_page(unsigned long base, unsigned long size, | ||
305 | unsigned int type, bool increment) | 323 | unsigned int type, bool increment) |
306 | { | 324 | { |
325 | unsigned long lbase, lsize; | ||
307 | int i, replace, error; | 326 | int i, replace, error; |
308 | mtrr_type ltype; | 327 | mtrr_type ltype; |
309 | unsigned long lbase, lsize; | ||
310 | 328 | ||
311 | if (!mtrr_if) | 329 | if (!mtrr_if) |
312 | return -ENXIO; | 330 | return -ENXIO; |
313 | 331 | ||
314 | if ((error = mtrr_if->validate_add_page(base,size,type))) | 332 | error = mtrr_if->validate_add_page(base, size, type); |
333 | if (error) | ||
315 | return error; | 334 | return error; |
316 | 335 | ||
317 | if (type >= MTRR_NUM_TYPES) { | 336 | if (type >= MTRR_NUM_TYPES) { |
318 | printk(KERN_WARNING "mtrr: type: %u invalid\n", type); | 337 | pr_warning("mtrr: type: %u invalid\n", type); |
319 | return -EINVAL; | 338 | return -EINVAL; |
320 | } | 339 | } |
321 | 340 | ||
322 | /* If the type is WC, check that this processor supports it */ | 341 | /* If the type is WC, check that this processor supports it */ |
323 | if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { | 342 | if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { |
324 | printk(KERN_WARNING | 343 | pr_warning("mtrr: your processor doesn't support write-combining\n"); |
325 | "mtrr: your processor doesn't support write-combining\n"); | ||
326 | return -ENOSYS; | 344 | return -ENOSYS; |
327 | } | 345 | } |
328 | 346 | ||
329 | if (!size) { | 347 | if (!size) { |
330 | printk(KERN_WARNING "mtrr: zero sized request\n"); | 348 | pr_warning("mtrr: zero sized request\n"); |
331 | return -EINVAL; | 349 | return -EINVAL; |
332 | } | 350 | } |
333 | 351 | ||
334 | if (base & size_or_mask || size & size_or_mask) { | 352 | if (base & size_or_mask || size & size_or_mask) { |
335 | printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n"); | 353 | pr_warning("mtrr: base or size exceeds the MTRR width\n"); |
336 | return -EINVAL; | 354 | return -EINVAL; |
337 | } | 355 | } |
338 | 356 | ||
@@ -341,36 +359,40 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
341 | 359 | ||
342 | /* No CPU hotplug when we change MTRR entries */ | 360 | /* No CPU hotplug when we change MTRR entries */ |
343 | get_online_cpus(); | 361 | get_online_cpus(); |
344 | /* Search for existing MTRR */ | 362 | |
363 | /* Search for existing MTRR */ | ||
345 | mutex_lock(&mtrr_mutex); | 364 | mutex_lock(&mtrr_mutex); |
346 | for (i = 0; i < num_var_ranges; ++i) { | 365 | for (i = 0; i < num_var_ranges; ++i) { |
347 | mtrr_if->get(i, &lbase, &lsize, <ype); | 366 | mtrr_if->get(i, &lbase, &lsize, <ype); |
348 | if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase) | 367 | if (!lsize || base > lbase + lsize - 1 || |
368 | base + size - 1 < lbase) | ||
349 | continue; | 369 | continue; |
350 | /* At this point we know there is some kind of overlap/enclosure */ | 370 | /* |
371 | * At this point we know there is some kind of | ||
372 | * overlap/enclosure | ||
373 | */ | ||
351 | if (base < lbase || base + size - 1 > lbase + lsize - 1) { | 374 | if (base < lbase || base + size - 1 > lbase + lsize - 1) { |
352 | if (base <= lbase && base + size - 1 >= lbase + lsize - 1) { | 375 | if (base <= lbase && |
376 | base + size - 1 >= lbase + lsize - 1) { | ||
353 | /* New region encloses an existing region */ | 377 | /* New region encloses an existing region */ |
354 | if (type == ltype) { | 378 | if (type == ltype) { |
355 | replace = replace == -1 ? i : -2; | 379 | replace = replace == -1 ? i : -2; |
356 | continue; | 380 | continue; |
357 | } | 381 | } else if (types_compatible(type, ltype)) |
358 | else if (types_compatible(type, ltype)) | ||
359 | continue; | 382 | continue; |
360 | } | 383 | } |
361 | printk(KERN_WARNING | 384 | pr_warning("mtrr: 0x%lx000,0x%lx000 overlaps existing" |
362 | "mtrr: 0x%lx000,0x%lx000 overlaps existing" | 385 | " 0x%lx000,0x%lx000\n", base, size, lbase, |
363 | " 0x%lx000,0x%lx000\n", base, size, lbase, | 386 | lsize); |
364 | lsize); | ||
365 | goto out; | 387 | goto out; |
366 | } | 388 | } |
367 | /* New region is enclosed by an existing region */ | 389 | /* New region is enclosed by an existing region */ |
368 | if (ltype != type) { | 390 | if (ltype != type) { |
369 | if (types_compatible(type, ltype)) | 391 | if (types_compatible(type, ltype)) |
370 | continue; | 392 | continue; |
371 | printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", | 393 | pr_warning("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", |
372 | base, size, mtrr_attrib_to_str(ltype), | 394 | base, size, mtrr_attrib_to_str(ltype), |
373 | mtrr_attrib_to_str(type)); | 395 | mtrr_attrib_to_str(type)); |
374 | goto out; | 396 | goto out; |
375 | } | 397 | } |
376 | if (increment) | 398 | if (increment) |
@@ -378,7 +400,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
378 | error = i; | 400 | error = i; |
379 | goto out; | 401 | goto out; |
380 | } | 402 | } |
381 | /* Search for an empty MTRR */ | 403 | /* Search for an empty MTRR */ |
382 | i = mtrr_if->get_free_region(base, size, replace); | 404 | i = mtrr_if->get_free_region(base, size, replace); |
383 | if (i >= 0) { | 405 | if (i >= 0) { |
384 | set_mtrr(i, base, size, type); | 406 | set_mtrr(i, base, size, type); |
@@ -393,8 +415,9 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
393 | mtrr_usage_table[replace] = 0; | 415 | mtrr_usage_table[replace] = 0; |
394 | } | 416 | } |
395 | } | 417 | } |
396 | } else | 418 | } else { |
397 | printk(KERN_INFO "mtrr: no more MTRRs available\n"); | 419 | pr_info("mtrr: no more MTRRs available\n"); |
420 | } | ||
398 | error = i; | 421 | error = i; |
399 | out: | 422 | out: |
400 | mutex_unlock(&mtrr_mutex); | 423 | mutex_unlock(&mtrr_mutex); |
@@ -405,10 +428,8 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
405 | static int mtrr_check(unsigned long base, unsigned long size) | 428 | static int mtrr_check(unsigned long base, unsigned long size) |
406 | { | 429 | { |
407 | if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { | 430 | if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { |
408 | printk(KERN_WARNING | 431 | pr_warning("mtrr: size and base must be multiples of 4 kiB\n"); |
409 | "mtrr: size and base must be multiples of 4 kiB\n"); | 432 | pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base); |
410 | printk(KERN_DEBUG | ||
411 | "mtrr: size: 0x%lx base: 0x%lx\n", size, base); | ||
412 | dump_stack(); | 433 | dump_stack(); |
413 | return -1; | 434 | return -1; |
414 | } | 435 | } |
@@ -416,66 +437,64 @@ static int mtrr_check(unsigned long base, unsigned long size) | |||
416 | } | 437 | } |
417 | 438 | ||
418 | /** | 439 | /** |
419 | * mtrr_add - Add a memory type region | 440 | * mtrr_add - Add a memory type region |
420 | * @base: Physical base address of region | 441 | * @base: Physical base address of region |
421 | * @size: Physical size of region | 442 | * @size: Physical size of region |
422 | * @type: Type of MTRR desired | 443 | * @type: Type of MTRR desired |
423 | * @increment: If this is true do usage counting on the region | 444 | * @increment: If this is true do usage counting on the region |
424 | * | 445 | * |
425 | * Memory type region registers control the caching on newer Intel and | 446 | * Memory type region registers control the caching on newer Intel and |
426 | * non Intel processors. This function allows drivers to request an | 447 | * non Intel processors. This function allows drivers to request an |
427 | * MTRR is added. The details and hardware specifics of each processor's | 448 | * MTRR is added. The details and hardware specifics of each processor's |
428 | * implementation are hidden from the caller, but nevertheless the | 449 | * implementation are hidden from the caller, but nevertheless the |
429 | * caller should expect to need to provide a power of two size on an | 450 | * caller should expect to need to provide a power of two size on an |
430 | * equivalent power of two boundary. | 451 | * equivalent power of two boundary. |
431 | * | 452 | * |
432 | * If the region cannot be added either because all regions are in use | 453 | * If the region cannot be added either because all regions are in use |
433 | * or the CPU cannot support it a negative value is returned. On success | 454 | * or the CPU cannot support it a negative value is returned. On success |
434 | * the register number for this entry is returned, but should be treated | 455 | * the register number for this entry is returned, but should be treated |
435 | * as a cookie only. | 456 | * as a cookie only. |
436 | * | 457 | * |
437 | * On a multiprocessor machine the changes are made to all processors. | 458 | * On a multiprocessor machine the changes are made to all processors. |
438 | * This is required on x86 by the Intel processors. | 459 | * This is required on x86 by the Intel processors. |
439 | * | 460 | * |
440 | * The available types are | 461 | * The available types are |
441 | * | 462 | * |
442 | * %MTRR_TYPE_UNCACHABLE - No caching | 463 | * %MTRR_TYPE_UNCACHABLE - No caching |
443 | * | 464 | * |
444 | * %MTRR_TYPE_WRBACK - Write data back in bursts whenever | 465 | * %MTRR_TYPE_WRBACK - Write data back in bursts whenever |
445 | * | 466 | * |
446 | * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts | 467 | * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts |
447 | * | 468 | * |
448 | * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes | 469 | * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes |
449 | * | 470 | * |
450 | * BUGS: Needs a quiet flag for the cases where drivers do not mind | 471 | * BUGS: Needs a quiet flag for the cases where drivers do not mind |
451 | * failures and do not wish system log messages to be sent. | 472 | * failures and do not wish system log messages to be sent. |
452 | */ | 473 | */ |
453 | 474 | int mtrr_add(unsigned long base, unsigned long size, unsigned int type, | |
454 | int | 475 | bool increment) |
455 | mtrr_add(unsigned long base, unsigned long size, unsigned int type, | ||
456 | bool increment) | ||
457 | { | 476 | { |
458 | if (mtrr_check(base, size)) | 477 | if (mtrr_check(base, size)) |
459 | return -EINVAL; | 478 | return -EINVAL; |
460 | return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, | 479 | return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, |
461 | increment); | 480 | increment); |
462 | } | 481 | } |
482 | EXPORT_SYMBOL(mtrr_add); | ||
463 | 483 | ||
464 | /** | 484 | /** |
465 | * mtrr_del_page - delete a memory type region | 485 | * mtrr_del_page - delete a memory type region |
466 | * @reg: Register returned by mtrr_add | 486 | * @reg: Register returned by mtrr_add |
467 | * @base: Physical base address | 487 | * @base: Physical base address |
468 | * @size: Size of region | 488 | * @size: Size of region |
469 | * | 489 | * |
470 | * If register is supplied then base and size are ignored. This is | 490 | * If register is supplied then base and size are ignored. This is |
471 | * how drivers should call it. | 491 | * how drivers should call it. |
472 | * | 492 | * |
473 | * Releases an MTRR region. If the usage count drops to zero the | 493 | * Releases an MTRR region. If the usage count drops to zero the |
474 | * register is freed and the region returns to default state. | 494 | * register is freed and the region returns to default state. |
475 | * On success the register is returned, on failure a negative error | 495 | * On success the register is returned, on failure a negative error |
476 | * code. | 496 | * code. |
477 | */ | 497 | */ |
478 | |||
479 | int mtrr_del_page(int reg, unsigned long base, unsigned long size) | 498 | int mtrr_del_page(int reg, unsigned long base, unsigned long size) |
480 | { | 499 | { |
481 | int i, max; | 500 | int i, max; |
@@ -500,22 +519,22 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) | |||
500 | } | 519 | } |
501 | } | 520 | } |
502 | if (reg < 0) { | 521 | if (reg < 0) { |
503 | printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base, | 522 | pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n", |
504 | size); | 523 | base, size); |
505 | goto out; | 524 | goto out; |
506 | } | 525 | } |
507 | } | 526 | } |
508 | if (reg >= max) { | 527 | if (reg >= max) { |
509 | printk(KERN_WARNING "mtrr: register: %d too big\n", reg); | 528 | pr_warning("mtrr: register: %d too big\n", reg); |
510 | goto out; | 529 | goto out; |
511 | } | 530 | } |
512 | mtrr_if->get(reg, &lbase, &lsize, <ype); | 531 | mtrr_if->get(reg, &lbase, &lsize, <ype); |
513 | if (lsize < 1) { | 532 | if (lsize < 1) { |
514 | printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg); | 533 | pr_warning("mtrr: MTRR %d not used\n", reg); |
515 | goto out; | 534 | goto out; |
516 | } | 535 | } |
517 | if (mtrr_usage_table[reg] < 1) { | 536 | if (mtrr_usage_table[reg] < 1) { |
518 | printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg); | 537 | pr_warning("mtrr: reg: %d has count=0\n", reg); |
519 | goto out; | 538 | goto out; |
520 | } | 539 | } |
521 | if (--mtrr_usage_table[reg] < 1) | 540 | if (--mtrr_usage_table[reg] < 1) |
@@ -526,33 +545,31 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) | |||
526 | put_online_cpus(); | 545 | put_online_cpus(); |
527 | return error; | 546 | return error; |
528 | } | 547 | } |
548 | |||
529 | /** | 549 | /** |
530 | * mtrr_del - delete a memory type region | 550 | * mtrr_del - delete a memory type region |
531 | * @reg: Register returned by mtrr_add | 551 | * @reg: Register returned by mtrr_add |
532 | * @base: Physical base address | 552 | * @base: Physical base address |
533 | * @size: Size of region | 553 | * @size: Size of region |
534 | * | 554 | * |
535 | * If register is supplied then base and size are ignored. This is | 555 | * If register is supplied then base and size are ignored. This is |
536 | * how drivers should call it. | 556 | * how drivers should call it. |
537 | * | 557 | * |
538 | * Releases an MTRR region. If the usage count drops to zero the | 558 | * Releases an MTRR region. If the usage count drops to zero the |
539 | * register is freed and the region returns to default state. | 559 | * register is freed and the region returns to default state. |
540 | * On success the register is returned, on failure a negative error | 560 | * On success the register is returned, on failure a negative error |
541 | * code. | 561 | * code. |
542 | */ | 562 | */ |
543 | 563 | int mtrr_del(int reg, unsigned long base, unsigned long size) | |
544 | int | ||
545 | mtrr_del(int reg, unsigned long base, unsigned long size) | ||
546 | { | 564 | { |
547 | if (mtrr_check(base, size)) | 565 | if (mtrr_check(base, size)) |
548 | return -EINVAL; | 566 | return -EINVAL; |
549 | return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); | 567 | return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); |
550 | } | 568 | } |
551 | |||
552 | EXPORT_SYMBOL(mtrr_add); | ||
553 | EXPORT_SYMBOL(mtrr_del); | 569 | EXPORT_SYMBOL(mtrr_del); |
554 | 570 | ||
555 | /* HACK ALERT! | 571 | /* |
572 | * HACK ALERT! | ||
556 | * These should be called implicitly, but we can't yet until all the initcall | 573 | * These should be called implicitly, but we can't yet until all the initcall |
557 | * stuff is done... | 574 | * stuff is done... |
558 | */ | 575 | */ |
@@ -576,29 +593,28 @@ struct mtrr_value { | |||
576 | 593 | ||
577 | static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; | 594 | static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; |
578 | 595 | ||
579 | static int mtrr_save(struct sys_device * sysdev, pm_message_t state) | 596 | static int mtrr_save(struct sys_device *sysdev, pm_message_t state) |
580 | { | 597 | { |
581 | int i; | 598 | int i; |
582 | 599 | ||
583 | for (i = 0; i < num_var_ranges; i++) { | 600 | for (i = 0; i < num_var_ranges; i++) { |
584 | mtrr_if->get(i, | 601 | mtrr_if->get(i, &mtrr_value[i].lbase, |
585 | &mtrr_value[i].lbase, | 602 | &mtrr_value[i].lsize, |
586 | &mtrr_value[i].lsize, | 603 | &mtrr_value[i].ltype); |
587 | &mtrr_value[i].ltype); | ||
588 | } | 604 | } |
589 | return 0; | 605 | return 0; |
590 | } | 606 | } |
591 | 607 | ||
592 | static int mtrr_restore(struct sys_device * sysdev) | 608 | static int mtrr_restore(struct sys_device *sysdev) |
593 | { | 609 | { |
594 | int i; | 610 | int i; |
595 | 611 | ||
596 | for (i = 0; i < num_var_ranges; i++) { | 612 | for (i = 0; i < num_var_ranges; i++) { |
597 | if (mtrr_value[i].lsize) | 613 | if (mtrr_value[i].lsize) { |
598 | set_mtrr(i, | 614 | set_mtrr(i, mtrr_value[i].lbase, |
599 | mtrr_value[i].lbase, | 615 | mtrr_value[i].lsize, |
600 | mtrr_value[i].lsize, | 616 | mtrr_value[i].ltype); |
601 | mtrr_value[i].ltype); | 617 | } |
602 | } | 618 | } |
603 | return 0; | 619 | return 0; |
604 | } | 620 | } |
@@ -615,26 +631,29 @@ int __initdata changed_by_mtrr_cleanup; | |||
615 | /** | 631 | /** |
616 | * mtrr_bp_init - initialize mtrrs on the boot CPU | 632 | * mtrr_bp_init - initialize mtrrs on the boot CPU |
617 | * | 633 | * |
618 | * This needs to be called early; before any of the other CPUs are | 634 | * This needs to be called early; before any of the other CPUs are |
619 | * initialized (i.e. before smp_init()). | 635 | * initialized (i.e. before smp_init()). |
620 | * | 636 | * |
621 | */ | 637 | */ |
622 | void __init mtrr_bp_init(void) | 638 | void __init mtrr_bp_init(void) |
623 | { | 639 | { |
624 | u32 phys_addr; | 640 | u32 phys_addr; |
641 | |||
625 | init_ifs(); | 642 | init_ifs(); |
626 | 643 | ||
627 | phys_addr = 32; | 644 | phys_addr = 32; |
628 | 645 | ||
629 | if (cpu_has_mtrr) { | 646 | if (cpu_has_mtrr) { |
630 | mtrr_if = &generic_mtrr_ops; | 647 | mtrr_if = &generic_mtrr_ops; |
631 | size_or_mask = 0xff000000; /* 36 bits */ | 648 | size_or_mask = 0xff000000; /* 36 bits */ |
632 | size_and_mask = 0x00f00000; | 649 | size_and_mask = 0x00f00000; |
633 | phys_addr = 36; | 650 | phys_addr = 36; |
634 | 651 | ||
635 | /* This is an AMD specific MSR, but we assume(hope?) that | 652 | /* |
636 | Intel will implement it to when they extend the address | 653 | * This is an AMD specific MSR, but we assume(hope?) that |
637 | bus of the Xeon. */ | 654 | * Intel will implement it to when they extend the address |
655 | * bus of the Xeon. | ||
656 | */ | ||
638 | if (cpuid_eax(0x80000000) >= 0x80000008) { | 657 | if (cpuid_eax(0x80000000) >= 0x80000008) { |
639 | phys_addr = cpuid_eax(0x80000008) & 0xff; | 658 | phys_addr = cpuid_eax(0x80000008) & 0xff; |
640 | /* CPUID workaround for Intel 0F33/0F34 CPU */ | 659 | /* CPUID workaround for Intel 0F33/0F34 CPU */ |
@@ -649,9 +668,11 @@ void __init mtrr_bp_init(void) | |||
649 | size_and_mask = ~size_or_mask & 0xfffff00000ULL; | 668 | size_and_mask = ~size_or_mask & 0xfffff00000ULL; |
650 | } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && | 669 | } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && |
651 | boot_cpu_data.x86 == 6) { | 670 | boot_cpu_data.x86 == 6) { |
652 | /* VIA C* family have Intel style MTRRs, but | 671 | /* |
653 | don't support PAE */ | 672 | * VIA C* family have Intel style MTRRs, |
654 | size_or_mask = 0xfff00000; /* 32 bits */ | 673 | * but don't support PAE |
674 | */ | ||
675 | size_or_mask = 0xfff00000; /* 32 bits */ | ||
655 | size_and_mask = 0; | 676 | size_and_mask = 0; |
656 | phys_addr = 32; | 677 | phys_addr = 32; |
657 | } | 678 | } |
@@ -694,7 +715,6 @@ void __init mtrr_bp_init(void) | |||
694 | changed_by_mtrr_cleanup = 1; | 715 | changed_by_mtrr_cleanup = 1; |
695 | mtrr_if->set_all(); | 716 | mtrr_if->set_all(); |
696 | } | 717 | } |
697 | |||
698 | } | 718 | } |
699 | } | 719 | } |
700 | } | 720 | } |
@@ -706,12 +726,17 @@ void mtrr_ap_init(void) | |||
706 | if (!mtrr_if || !use_intel()) | 726 | if (!mtrr_if || !use_intel()) |
707 | return; | 727 | return; |
708 | /* | 728 | /* |
709 | * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed, | 729 | * Ideally we should hold mtrr_mutex here to avoid mtrr entries |
710 | * but this routine will be called in cpu boot time, holding the lock | 730 | * changed, but this routine will be called in cpu boot time, |
711 | * breaks it. This routine is called in two cases: 1.very earily time | 731 | * holding the lock breaks it. |
712 | * of software resume, when there absolutely isn't mtrr entry changes; | 732 | * |
713 | * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to | 733 | * This routine is called in two cases: |
714 | * prevent mtrr entry changes | 734 | * |
735 | * 1. very earily time of software resume, when there absolutely | ||
736 | * isn't mtrr entry changes; | ||
737 | * | ||
738 | * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug | ||
739 | * lock to prevent mtrr entry changes | ||
715 | */ | 740 | */ |
716 | local_irq_save(flags); | 741 | local_irq_save(flags); |
717 | 742 | ||
@@ -732,19 +757,23 @@ static int __init mtrr_init_finialize(void) | |||
732 | { | 757 | { |
733 | if (!mtrr_if) | 758 | if (!mtrr_if) |
734 | return 0; | 759 | return 0; |
760 | |||
735 | if (use_intel()) { | 761 | if (use_intel()) { |
736 | if (!changed_by_mtrr_cleanup) | 762 | if (!changed_by_mtrr_cleanup) |
737 | mtrr_state_warn(); | 763 | mtrr_state_warn(); |
738 | } else { | 764 | return 0; |
739 | /* The CPUs haven't MTRR and seem to not support SMP. They have | ||
740 | * specific drivers, we use a tricky method to support | ||
741 | * suspend/resume for them. | ||
742 | * TBD: is there any system with such CPU which supports | ||
743 | * suspend/resume? if no, we should remove the code. | ||
744 | */ | ||
745 | sysdev_driver_register(&cpu_sysdev_class, | ||
746 | &mtrr_sysdev_driver); | ||
747 | } | 765 | } |
766 | |||
767 | /* | ||
768 | * The CPU has no MTRR and seems to not support SMP. They have | ||
769 | * specific drivers, we use a tricky method to support | ||
770 | * suspend/resume for them. | ||
771 | * | ||
772 | * TBD: is there any system with such CPU which supports | ||
773 | * suspend/resume? If no, we should remove the code. | ||
774 | */ | ||
775 | sysdev_driver_register(&cpu_sysdev_class, &mtrr_sysdev_driver); | ||
776 | |||
748 | return 0; | 777 | return 0; |
749 | } | 778 | } |
750 | subsys_initcall(mtrr_init_finialize); | 779 | subsys_initcall(mtrr_init_finialize); |
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h index 7538b767f206..a501dee9a87a 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * local mtrr defines. | 2 | * local MTRR defines. |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
@@ -14,13 +14,12 @@ extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; | |||
14 | struct mtrr_ops { | 14 | struct mtrr_ops { |
15 | u32 vendor; | 15 | u32 vendor; |
16 | u32 use_intel_if; | 16 | u32 use_intel_if; |
17 | // void (*init)(void); | ||
18 | void (*set)(unsigned int reg, unsigned long base, | 17 | void (*set)(unsigned int reg, unsigned long base, |
19 | unsigned long size, mtrr_type type); | 18 | unsigned long size, mtrr_type type); |
20 | void (*set_all)(void); | 19 | void (*set_all)(void); |
21 | 20 | ||
22 | void (*get)(unsigned int reg, unsigned long *base, | 21 | void (*get)(unsigned int reg, unsigned long *base, |
23 | unsigned long *size, mtrr_type * type); | 22 | unsigned long *size, mtrr_type *type); |
24 | int (*get_free_region)(unsigned long base, unsigned long size, | 23 | int (*get_free_region)(unsigned long base, unsigned long size, |
25 | int replace_reg); | 24 | int replace_reg); |
26 | int (*validate_add_page)(unsigned long base, unsigned long size, | 25 | int (*validate_add_page)(unsigned long base, unsigned long size, |
@@ -39,11 +38,11 @@ extern int positive_have_wrcomb(void); | |||
39 | 38 | ||
40 | /* library functions for processor-specific routines */ | 39 | /* library functions for processor-specific routines */ |
41 | struct set_mtrr_context { | 40 | struct set_mtrr_context { |
42 | unsigned long flags; | 41 | unsigned long flags; |
43 | unsigned long cr4val; | 42 | unsigned long cr4val; |
44 | u32 deftype_lo; | 43 | u32 deftype_lo; |
45 | u32 deftype_hi; | 44 | u32 deftype_hi; |
46 | u32 ccr3; | 45 | u32 ccr3; |
47 | }; | 46 | }; |
48 | 47 | ||
49 | void set_mtrr_done(struct set_mtrr_context *ctxt); | 48 | void set_mtrr_done(struct set_mtrr_context *ctxt); |
@@ -54,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index, | |||
54 | u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi); | 53 | u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi); |
55 | void get_mtrr_state(void); | 54 | void get_mtrr_state(void); |
56 | 55 | ||
57 | extern void set_mtrr_ops(struct mtrr_ops * ops); | 56 | extern void set_mtrr_ops(struct mtrr_ops *ops); |
58 | 57 | ||
59 | extern u64 size_or_mask, size_and_mask; | 58 | extern u64 size_or_mask, size_and_mask; |
60 | extern struct mtrr_ops * mtrr_if; | 59 | extern struct mtrr_ops *mtrr_if; |
61 | 60 | ||
62 | #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd) | 61 | #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd) |
63 | #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1) | 62 | #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1) |
diff --git a/arch/x86/kernel/cpu/mtrr/state.c b/arch/x86/kernel/cpu/mtrr/state.c index 1f5fb1588d1f..dfc80b4e6b0d 100644 --- a/arch/x86/kernel/cpu/mtrr/state.c +++ b/arch/x86/kernel/cpu/mtrr/state.c | |||
@@ -1,24 +1,25 @@ | |||
1 | #include <linux/mm.h> | ||
2 | #include <linux/init.h> | 1 | #include <linux/init.h> |
3 | #include <asm/io.h> | 2 | #include <linux/io.h> |
4 | #include <asm/mtrr.h> | 3 | #include <linux/mm.h> |
5 | #include <asm/msr.h> | 4 | |
6 | #include <asm/processor-cyrix.h> | 5 | #include <asm/processor-cyrix.h> |
7 | #include <asm/processor-flags.h> | 6 | #include <asm/processor-flags.h> |
8 | #include "mtrr.h" | 7 | #include <asm/mtrr.h> |
8 | #include <asm/msr.h> | ||
9 | 9 | ||
10 | #include "mtrr.h" | ||
10 | 11 | ||
11 | /* Put the processor into a state where MTRRs can be safely set */ | 12 | /* Put the processor into a state where MTRRs can be safely set */ |
12 | void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) | 13 | void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) |
13 | { | 14 | { |
14 | unsigned int cr0; | 15 | unsigned int cr0; |
15 | 16 | ||
16 | /* Disable interrupts locally */ | 17 | /* Disable interrupts locally */ |
17 | local_irq_save(ctxt->flags); | 18 | local_irq_save(ctxt->flags); |
18 | 19 | ||
19 | if (use_intel() || is_cpu(CYRIX)) { | 20 | if (use_intel() || is_cpu(CYRIX)) { |
20 | 21 | ||
21 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ | 22 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ |
22 | if (cpu_has_pge) { | 23 | if (cpu_has_pge) { |
23 | ctxt->cr4val = read_cr4(); | 24 | ctxt->cr4val = read_cr4(); |
24 | write_cr4(ctxt->cr4val & ~X86_CR4_PGE); | 25 | write_cr4(ctxt->cr4val & ~X86_CR4_PGE); |
@@ -33,50 +34,61 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) | |||
33 | write_cr0(cr0); | 34 | write_cr0(cr0); |
34 | wbinvd(); | 35 | wbinvd(); |
35 | 36 | ||
36 | if (use_intel()) | 37 | if (use_intel()) { |
37 | /* Save MTRR state */ | 38 | /* Save MTRR state */ |
38 | rdmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi); | 39 | rdmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi); |
39 | else | 40 | } else { |
40 | /* Cyrix ARRs - everything else were excluded at the top */ | 41 | /* |
42 | * Cyrix ARRs - | ||
43 | * everything else were excluded at the top | ||
44 | */ | ||
41 | ctxt->ccr3 = getCx86(CX86_CCR3); | 45 | ctxt->ccr3 = getCx86(CX86_CCR3); |
46 | } | ||
42 | } | 47 | } |
43 | } | 48 | } |
44 | 49 | ||
45 | void set_mtrr_cache_disable(struct set_mtrr_context *ctxt) | 50 | void set_mtrr_cache_disable(struct set_mtrr_context *ctxt) |
46 | { | 51 | { |
47 | if (use_intel()) | 52 | if (use_intel()) { |
48 | /* Disable MTRRs, and set the default type to uncached */ | 53 | /* Disable MTRRs, and set the default type to uncached */ |
49 | mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo & 0xf300UL, | 54 | mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo & 0xf300UL, |
50 | ctxt->deftype_hi); | 55 | ctxt->deftype_hi); |
51 | else if (is_cpu(CYRIX)) | 56 | } else { |
52 | /* Cyrix ARRs - everything else were excluded at the top */ | 57 | if (is_cpu(CYRIX)) { |
53 | setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10); | 58 | /* Cyrix ARRs - everything else were excluded at the top */ |
59 | setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10); | ||
60 | } | ||
61 | } | ||
54 | } | 62 | } |
55 | 63 | ||
56 | /* Restore the processor after a set_mtrr_prepare */ | 64 | /* Restore the processor after a set_mtrr_prepare */ |
57 | void set_mtrr_done(struct set_mtrr_context *ctxt) | 65 | void set_mtrr_done(struct set_mtrr_context *ctxt) |
58 | { | 66 | { |
59 | if (use_intel() || is_cpu(CYRIX)) { | 67 | if (use_intel() || is_cpu(CYRIX)) { |
60 | 68 | ||
61 | /* Flush caches and TLBs */ | 69 | /* Flush caches and TLBs */ |
62 | wbinvd(); | 70 | wbinvd(); |
63 | 71 | ||
64 | /* Restore MTRRdefType */ | 72 | /* Restore MTRRdefType */ |
65 | if (use_intel()) | 73 | if (use_intel()) { |
66 | /* Intel (P6) standard MTRRs */ | 74 | /* Intel (P6) standard MTRRs */ |
67 | mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi); | 75 | mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo, |
68 | else | 76 | ctxt->deftype_hi); |
69 | /* Cyrix ARRs - everything else was excluded at the top */ | 77 | } else { |
78 | /* | ||
79 | * Cyrix ARRs - | ||
80 | * everything else was excluded at the top | ||
81 | */ | ||
70 | setCx86(CX86_CCR3, ctxt->ccr3); | 82 | setCx86(CX86_CCR3, ctxt->ccr3); |
83 | } | ||
71 | 84 | ||
72 | /* Enable caches */ | 85 | /* Enable caches */ |
73 | write_cr0(read_cr0() & 0xbfffffff); | 86 | write_cr0(read_cr0() & 0xbfffffff); |
74 | 87 | ||
75 | /* Restore value of CR4 */ | 88 | /* Restore value of CR4 */ |
76 | if (cpu_has_pge) | 89 | if (cpu_has_pge) |
77 | write_cr4(ctxt->cr4val); | 90 | write_cr4(ctxt->cr4val); |
78 | } | 91 | } |
79 | /* Re-enable interrupts locally (if enabled previously) */ | 92 | /* Re-enable interrupts locally (if enabled previously) */ |
80 | local_irq_restore(ctxt->flags); | 93 | local_irq_restore(ctxt->flags); |
81 | } | 94 | } |
82 | |||
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 900332b800f8..f9cd0849bd42 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * Copyright (C) 2009 Jaswinder Singh Rajput | 6 | * Copyright (C) 2009 Jaswinder Singh Rajput |
7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | 7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter |
8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> | ||
9 | * | 10 | * |
10 | * For licencing details see kernel-base/COPYING | 11 | * For licencing details see kernel-base/COPYING |
11 | */ | 12 | */ |
@@ -20,6 +21,7 @@ | |||
20 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
21 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
22 | #include <linux/highmem.h> | 23 | #include <linux/highmem.h> |
24 | #include <linux/cpu.h> | ||
23 | 25 | ||
24 | #include <asm/apic.h> | 26 | #include <asm/apic.h> |
25 | #include <asm/stacktrace.h> | 27 | #include <asm/stacktrace.h> |
@@ -27,12 +29,52 @@ | |||
27 | 29 | ||
28 | static u64 perf_counter_mask __read_mostly; | 30 | static u64 perf_counter_mask __read_mostly; |
29 | 31 | ||
32 | /* The maximal number of PEBS counters: */ | ||
33 | #define MAX_PEBS_COUNTERS 4 | ||
34 | |||
35 | /* The size of a BTS record in bytes: */ | ||
36 | #define BTS_RECORD_SIZE 24 | ||
37 | |||
38 | /* The size of a per-cpu BTS buffer in bytes: */ | ||
39 | #define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 1024) | ||
40 | |||
41 | /* The BTS overflow threshold in bytes from the end of the buffer: */ | ||
42 | #define BTS_OVFL_TH (BTS_RECORD_SIZE * 64) | ||
43 | |||
44 | |||
45 | /* | ||
46 | * Bits in the debugctlmsr controlling branch tracing. | ||
47 | */ | ||
48 | #define X86_DEBUGCTL_TR (1 << 6) | ||
49 | #define X86_DEBUGCTL_BTS (1 << 7) | ||
50 | #define X86_DEBUGCTL_BTINT (1 << 8) | ||
51 | #define X86_DEBUGCTL_BTS_OFF_OS (1 << 9) | ||
52 | #define X86_DEBUGCTL_BTS_OFF_USR (1 << 10) | ||
53 | |||
54 | /* | ||
55 | * A debug store configuration. | ||
56 | * | ||
57 | * We only support architectures that use 64bit fields. | ||
58 | */ | ||
59 | struct debug_store { | ||
60 | u64 bts_buffer_base; | ||
61 | u64 bts_index; | ||
62 | u64 bts_absolute_maximum; | ||
63 | u64 bts_interrupt_threshold; | ||
64 | u64 pebs_buffer_base; | ||
65 | u64 pebs_index; | ||
66 | u64 pebs_absolute_maximum; | ||
67 | u64 pebs_interrupt_threshold; | ||
68 | u64 pebs_counter_reset[MAX_PEBS_COUNTERS]; | ||
69 | }; | ||
70 | |||
30 | struct cpu_hw_counters { | 71 | struct cpu_hw_counters { |
31 | struct perf_counter *counters[X86_PMC_IDX_MAX]; | 72 | struct perf_counter *counters[X86_PMC_IDX_MAX]; |
32 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 73 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
33 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 74 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
34 | unsigned long interrupts; | 75 | unsigned long interrupts; |
35 | int enabled; | 76 | int enabled; |
77 | struct debug_store *ds; | ||
36 | }; | 78 | }; |
37 | 79 | ||
38 | /* | 80 | /* |
@@ -58,6 +100,8 @@ struct x86_pmu { | |||
58 | int apic; | 100 | int apic; |
59 | u64 max_period; | 101 | u64 max_period; |
60 | u64 intel_ctrl; | 102 | u64 intel_ctrl; |
103 | void (*enable_bts)(u64 config); | ||
104 | void (*disable_bts)(void); | ||
61 | }; | 105 | }; |
62 | 106 | ||
63 | static struct x86_pmu x86_pmu __read_mostly; | 107 | static struct x86_pmu x86_pmu __read_mostly; |
@@ -577,6 +621,9 @@ x86_perf_counter_update(struct perf_counter *counter, | |||
577 | u64 prev_raw_count, new_raw_count; | 621 | u64 prev_raw_count, new_raw_count; |
578 | s64 delta; | 622 | s64 delta; |
579 | 623 | ||
624 | if (idx == X86_PMC_IDX_FIXED_BTS) | ||
625 | return 0; | ||
626 | |||
580 | /* | 627 | /* |
581 | * Careful: an NMI might modify the previous counter value. | 628 | * Careful: an NMI might modify the previous counter value. |
582 | * | 629 | * |
@@ -666,10 +713,110 @@ static void release_pmc_hardware(void) | |||
666 | #endif | 713 | #endif |
667 | } | 714 | } |
668 | 715 | ||
716 | static inline bool bts_available(void) | ||
717 | { | ||
718 | return x86_pmu.enable_bts != NULL; | ||
719 | } | ||
720 | |||
721 | static inline void init_debug_store_on_cpu(int cpu) | ||
722 | { | ||
723 | struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; | ||
724 | |||
725 | if (!ds) | ||
726 | return; | ||
727 | |||
728 | wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, | ||
729 | (u32)((u64)(unsigned long)ds), | ||
730 | (u32)((u64)(unsigned long)ds >> 32)); | ||
731 | } | ||
732 | |||
733 | static inline void fini_debug_store_on_cpu(int cpu) | ||
734 | { | ||
735 | if (!per_cpu(cpu_hw_counters, cpu).ds) | ||
736 | return; | ||
737 | |||
738 | wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); | ||
739 | } | ||
740 | |||
741 | static void release_bts_hardware(void) | ||
742 | { | ||
743 | int cpu; | ||
744 | |||
745 | if (!bts_available()) | ||
746 | return; | ||
747 | |||
748 | get_online_cpus(); | ||
749 | |||
750 | for_each_online_cpu(cpu) | ||
751 | fini_debug_store_on_cpu(cpu); | ||
752 | |||
753 | for_each_possible_cpu(cpu) { | ||
754 | struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; | ||
755 | |||
756 | if (!ds) | ||
757 | continue; | ||
758 | |||
759 | per_cpu(cpu_hw_counters, cpu).ds = NULL; | ||
760 | |||
761 | kfree((void *)(unsigned long)ds->bts_buffer_base); | ||
762 | kfree(ds); | ||
763 | } | ||
764 | |||
765 | put_online_cpus(); | ||
766 | } | ||
767 | |||
768 | static int reserve_bts_hardware(void) | ||
769 | { | ||
770 | int cpu, err = 0; | ||
771 | |||
772 | if (!bts_available()) | ||
773 | return 0; | ||
774 | |||
775 | get_online_cpus(); | ||
776 | |||
777 | for_each_possible_cpu(cpu) { | ||
778 | struct debug_store *ds; | ||
779 | void *buffer; | ||
780 | |||
781 | err = -ENOMEM; | ||
782 | buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL); | ||
783 | if (unlikely(!buffer)) | ||
784 | break; | ||
785 | |||
786 | ds = kzalloc(sizeof(*ds), GFP_KERNEL); | ||
787 | if (unlikely(!ds)) { | ||
788 | kfree(buffer); | ||
789 | break; | ||
790 | } | ||
791 | |||
792 | ds->bts_buffer_base = (u64)(unsigned long)buffer; | ||
793 | ds->bts_index = ds->bts_buffer_base; | ||
794 | ds->bts_absolute_maximum = | ||
795 | ds->bts_buffer_base + BTS_BUFFER_SIZE; | ||
796 | ds->bts_interrupt_threshold = | ||
797 | ds->bts_absolute_maximum - BTS_OVFL_TH; | ||
798 | |||
799 | per_cpu(cpu_hw_counters, cpu).ds = ds; | ||
800 | err = 0; | ||
801 | } | ||
802 | |||
803 | if (err) | ||
804 | release_bts_hardware(); | ||
805 | else { | ||
806 | for_each_online_cpu(cpu) | ||
807 | init_debug_store_on_cpu(cpu); | ||
808 | } | ||
809 | |||
810 | put_online_cpus(); | ||
811 | |||
812 | return err; | ||
813 | } | ||
814 | |||
669 | static void hw_perf_counter_destroy(struct perf_counter *counter) | 815 | static void hw_perf_counter_destroy(struct perf_counter *counter) |
670 | { | 816 | { |
671 | if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { | 817 | if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { |
672 | release_pmc_hardware(); | 818 | release_pmc_hardware(); |
819 | release_bts_hardware(); | ||
673 | mutex_unlock(&pmc_reserve_mutex); | 820 | mutex_unlock(&pmc_reserve_mutex); |
674 | } | 821 | } |
675 | } | 822 | } |
@@ -712,6 +859,42 @@ set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr) | |||
712 | return 0; | 859 | return 0; |
713 | } | 860 | } |
714 | 861 | ||
862 | static void intel_pmu_enable_bts(u64 config) | ||
863 | { | ||
864 | unsigned long debugctlmsr; | ||
865 | |||
866 | debugctlmsr = get_debugctlmsr(); | ||
867 | |||
868 | debugctlmsr |= X86_DEBUGCTL_TR; | ||
869 | debugctlmsr |= X86_DEBUGCTL_BTS; | ||
870 | debugctlmsr |= X86_DEBUGCTL_BTINT; | ||
871 | |||
872 | if (!(config & ARCH_PERFMON_EVENTSEL_OS)) | ||
873 | debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS; | ||
874 | |||
875 | if (!(config & ARCH_PERFMON_EVENTSEL_USR)) | ||
876 | debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR; | ||
877 | |||
878 | update_debugctlmsr(debugctlmsr); | ||
879 | } | ||
880 | |||
881 | static void intel_pmu_disable_bts(void) | ||
882 | { | ||
883 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
884 | unsigned long debugctlmsr; | ||
885 | |||
886 | if (!cpuc->ds) | ||
887 | return; | ||
888 | |||
889 | debugctlmsr = get_debugctlmsr(); | ||
890 | |||
891 | debugctlmsr &= | ||
892 | ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT | | ||
893 | X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR); | ||
894 | |||
895 | update_debugctlmsr(debugctlmsr); | ||
896 | } | ||
897 | |||
715 | /* | 898 | /* |
716 | * Setup the hardware configuration for a given attr_type | 899 | * Setup the hardware configuration for a given attr_type |
717 | */ | 900 | */ |
@@ -728,9 +911,13 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
728 | err = 0; | 911 | err = 0; |
729 | if (!atomic_inc_not_zero(&active_counters)) { | 912 | if (!atomic_inc_not_zero(&active_counters)) { |
730 | mutex_lock(&pmc_reserve_mutex); | 913 | mutex_lock(&pmc_reserve_mutex); |
731 | if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware()) | 914 | if (atomic_read(&active_counters) == 0) { |
732 | err = -EBUSY; | 915 | if (!reserve_pmc_hardware()) |
733 | else | 916 | err = -EBUSY; |
917 | else | ||
918 | err = reserve_bts_hardware(); | ||
919 | } | ||
920 | if (!err) | ||
734 | atomic_inc(&active_counters); | 921 | atomic_inc(&active_counters); |
735 | mutex_unlock(&pmc_reserve_mutex); | 922 | mutex_unlock(&pmc_reserve_mutex); |
736 | } | 923 | } |
@@ -793,6 +980,20 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
793 | if (config == -1LL) | 980 | if (config == -1LL) |
794 | return -EINVAL; | 981 | return -EINVAL; |
795 | 982 | ||
983 | /* | ||
984 | * Branch tracing: | ||
985 | */ | ||
986 | if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && | ||
987 | (hwc->sample_period == 1)) { | ||
988 | /* BTS is not supported by this architecture. */ | ||
989 | if (!bts_available()) | ||
990 | return -EOPNOTSUPP; | ||
991 | |||
992 | /* BTS is currently only allowed for user-mode. */ | ||
993 | if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) | ||
994 | return -EOPNOTSUPP; | ||
995 | } | ||
996 | |||
796 | hwc->config |= config; | 997 | hwc->config |= config; |
797 | 998 | ||
798 | return 0; | 999 | return 0; |
@@ -817,7 +1018,18 @@ static void p6_pmu_disable_all(void) | |||
817 | 1018 | ||
818 | static void intel_pmu_disable_all(void) | 1019 | static void intel_pmu_disable_all(void) |
819 | { | 1020 | { |
1021 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
1022 | |||
1023 | if (!cpuc->enabled) | ||
1024 | return; | ||
1025 | |||
1026 | cpuc->enabled = 0; | ||
1027 | barrier(); | ||
1028 | |||
820 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | 1029 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); |
1030 | |||
1031 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) | ||
1032 | intel_pmu_disable_bts(); | ||
821 | } | 1033 | } |
822 | 1034 | ||
823 | static void amd_pmu_disable_all(void) | 1035 | static void amd_pmu_disable_all(void) |
@@ -875,7 +1087,25 @@ static void p6_pmu_enable_all(void) | |||
875 | 1087 | ||
876 | static void intel_pmu_enable_all(void) | 1088 | static void intel_pmu_enable_all(void) |
877 | { | 1089 | { |
1090 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
1091 | |||
1092 | if (cpuc->enabled) | ||
1093 | return; | ||
1094 | |||
1095 | cpuc->enabled = 1; | ||
1096 | barrier(); | ||
1097 | |||
878 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); | 1098 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); |
1099 | |||
1100 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { | ||
1101 | struct perf_counter *counter = | ||
1102 | cpuc->counters[X86_PMC_IDX_FIXED_BTS]; | ||
1103 | |||
1104 | if (WARN_ON_ONCE(!counter)) | ||
1105 | return; | ||
1106 | |||
1107 | intel_pmu_enable_bts(counter->hw.config); | ||
1108 | } | ||
879 | } | 1109 | } |
880 | 1110 | ||
881 | static void amd_pmu_enable_all(void) | 1111 | static void amd_pmu_enable_all(void) |
@@ -962,6 +1192,11 @@ p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | |||
962 | static inline void | 1192 | static inline void |
963 | intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | 1193 | intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) |
964 | { | 1194 | { |
1195 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | ||
1196 | intel_pmu_disable_bts(); | ||
1197 | return; | ||
1198 | } | ||
1199 | |||
965 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 1200 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
966 | intel_pmu_disable_fixed(hwc, idx); | 1201 | intel_pmu_disable_fixed(hwc, idx); |
967 | return; | 1202 | return; |
@@ -990,6 +1225,9 @@ x86_perf_counter_set_period(struct perf_counter *counter, | |||
990 | s64 period = hwc->sample_period; | 1225 | s64 period = hwc->sample_period; |
991 | int err, ret = 0; | 1226 | int err, ret = 0; |
992 | 1227 | ||
1228 | if (idx == X86_PMC_IDX_FIXED_BTS) | ||
1229 | return 0; | ||
1230 | |||
993 | /* | 1231 | /* |
994 | * If we are way outside a reasoable range then just skip forward: | 1232 | * If we are way outside a reasoable range then just skip forward: |
995 | */ | 1233 | */ |
@@ -1072,6 +1310,14 @@ static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | |||
1072 | 1310 | ||
1073 | static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | 1311 | static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) |
1074 | { | 1312 | { |
1313 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | ||
1314 | if (!__get_cpu_var(cpu_hw_counters).enabled) | ||
1315 | return; | ||
1316 | |||
1317 | intel_pmu_enable_bts(hwc->config); | ||
1318 | return; | ||
1319 | } | ||
1320 | |||
1075 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 1321 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
1076 | intel_pmu_enable_fixed(hwc, idx); | 1322 | intel_pmu_enable_fixed(hwc, idx); |
1077 | return; | 1323 | return; |
@@ -1093,11 +1339,16 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) | |||
1093 | { | 1339 | { |
1094 | unsigned int event; | 1340 | unsigned int event; |
1095 | 1341 | ||
1342 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; | ||
1343 | |||
1344 | if (unlikely((event == | ||
1345 | x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && | ||
1346 | (hwc->sample_period == 1))) | ||
1347 | return X86_PMC_IDX_FIXED_BTS; | ||
1348 | |||
1096 | if (!x86_pmu.num_counters_fixed) | 1349 | if (!x86_pmu.num_counters_fixed) |
1097 | return -1; | 1350 | return -1; |
1098 | 1351 | ||
1099 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; | ||
1100 | |||
1101 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) | 1352 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) |
1102 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; | 1353 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; |
1103 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) | 1354 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) |
@@ -1118,7 +1369,15 @@ static int x86_pmu_enable(struct perf_counter *counter) | |||
1118 | int idx; | 1369 | int idx; |
1119 | 1370 | ||
1120 | idx = fixed_mode_idx(counter, hwc); | 1371 | idx = fixed_mode_idx(counter, hwc); |
1121 | if (idx >= 0) { | 1372 | if (idx == X86_PMC_IDX_FIXED_BTS) { |
1373 | /* BTS is already occupied. */ | ||
1374 | if (test_and_set_bit(idx, cpuc->used_mask)) | ||
1375 | return -EAGAIN; | ||
1376 | |||
1377 | hwc->config_base = 0; | ||
1378 | hwc->counter_base = 0; | ||
1379 | hwc->idx = idx; | ||
1380 | } else if (idx >= 0) { | ||
1122 | /* | 1381 | /* |
1123 | * Try to get the fixed counter, if that is already taken | 1382 | * Try to get the fixed counter, if that is already taken |
1124 | * then try to get a generic counter: | 1383 | * then try to get a generic counter: |
@@ -1229,6 +1488,44 @@ void perf_counter_print_debug(void) | |||
1229 | local_irq_restore(flags); | 1488 | local_irq_restore(flags); |
1230 | } | 1489 | } |
1231 | 1490 | ||
1491 | static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc, | ||
1492 | struct perf_sample_data *data) | ||
1493 | { | ||
1494 | struct debug_store *ds = cpuc->ds; | ||
1495 | struct bts_record { | ||
1496 | u64 from; | ||
1497 | u64 to; | ||
1498 | u64 flags; | ||
1499 | }; | ||
1500 | struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS]; | ||
1501 | unsigned long orig_ip = data->regs->ip; | ||
1502 | struct bts_record *at, *top; | ||
1503 | |||
1504 | if (!counter) | ||
1505 | return; | ||
1506 | |||
1507 | if (!ds) | ||
1508 | return; | ||
1509 | |||
1510 | at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; | ||
1511 | top = (struct bts_record *)(unsigned long)ds->bts_index; | ||
1512 | |||
1513 | ds->bts_index = ds->bts_buffer_base; | ||
1514 | |||
1515 | for (; at < top; at++) { | ||
1516 | data->regs->ip = at->from; | ||
1517 | data->addr = at->to; | ||
1518 | |||
1519 | perf_counter_output(counter, 1, data); | ||
1520 | } | ||
1521 | |||
1522 | data->regs->ip = orig_ip; | ||
1523 | data->addr = 0; | ||
1524 | |||
1525 | /* There's new data available. */ | ||
1526 | counter->pending_kill = POLL_IN; | ||
1527 | } | ||
1528 | |||
1232 | static void x86_pmu_disable(struct perf_counter *counter) | 1529 | static void x86_pmu_disable(struct perf_counter *counter) |
1233 | { | 1530 | { |
1234 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 1531 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
@@ -1253,6 +1550,15 @@ static void x86_pmu_disable(struct perf_counter *counter) | |||
1253 | * that we are disabling: | 1550 | * that we are disabling: |
1254 | */ | 1551 | */ |
1255 | x86_perf_counter_update(counter, hwc, idx); | 1552 | x86_perf_counter_update(counter, hwc, idx); |
1553 | |||
1554 | /* Drain the remaining BTS records. */ | ||
1555 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | ||
1556 | struct perf_sample_data data; | ||
1557 | struct pt_regs regs; | ||
1558 | |||
1559 | data.regs = ®s; | ||
1560 | intel_pmu_drain_bts_buffer(cpuc, &data); | ||
1561 | } | ||
1256 | cpuc->counters[idx] = NULL; | 1562 | cpuc->counters[idx] = NULL; |
1257 | clear_bit(idx, cpuc->used_mask); | 1563 | clear_bit(idx, cpuc->used_mask); |
1258 | 1564 | ||
@@ -1280,6 +1586,7 @@ static int intel_pmu_save_and_restart(struct perf_counter *counter) | |||
1280 | 1586 | ||
1281 | static void intel_pmu_reset(void) | 1587 | static void intel_pmu_reset(void) |
1282 | { | 1588 | { |
1589 | struct debug_store *ds = __get_cpu_var(cpu_hw_counters).ds; | ||
1283 | unsigned long flags; | 1590 | unsigned long flags; |
1284 | int idx; | 1591 | int idx; |
1285 | 1592 | ||
@@ -1297,6 +1604,8 @@ static void intel_pmu_reset(void) | |||
1297 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { | 1604 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { |
1298 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); | 1605 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); |
1299 | } | 1606 | } |
1607 | if (ds) | ||
1608 | ds->bts_index = ds->bts_buffer_base; | ||
1300 | 1609 | ||
1301 | local_irq_restore(flags); | 1610 | local_irq_restore(flags); |
1302 | } | 1611 | } |
@@ -1362,6 +1671,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
1362 | cpuc = &__get_cpu_var(cpu_hw_counters); | 1671 | cpuc = &__get_cpu_var(cpu_hw_counters); |
1363 | 1672 | ||
1364 | perf_disable(); | 1673 | perf_disable(); |
1674 | intel_pmu_drain_bts_buffer(cpuc, &data); | ||
1365 | status = intel_pmu_get_status(); | 1675 | status = intel_pmu_get_status(); |
1366 | if (!status) { | 1676 | if (!status) { |
1367 | perf_enable(); | 1677 | perf_enable(); |
@@ -1571,6 +1881,8 @@ static struct x86_pmu intel_pmu = { | |||
1571 | * the generic counter period: | 1881 | * the generic counter period: |
1572 | */ | 1882 | */ |
1573 | .max_period = (1ULL << 31) - 1, | 1883 | .max_period = (1ULL << 31) - 1, |
1884 | .enable_bts = intel_pmu_enable_bts, | ||
1885 | .disable_bts = intel_pmu_disable_bts, | ||
1574 | }; | 1886 | }; |
1575 | 1887 | ||
1576 | static struct x86_pmu amd_pmu = { | 1888 | static struct x86_pmu amd_pmu = { |
@@ -1962,3 +2274,8 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
1962 | 2274 | ||
1963 | return entry; | 2275 | return entry; |
1964 | } | 2276 | } |
2277 | |||
2278 | void hw_perf_counter_setup_online(int cpu) | ||
2279 | { | ||
2280 | init_debug_store_on_cpu(cpu); | ||
2281 | } | ||
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index e60ed740d2b3..392bea43b890 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -68,16 +68,16 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) | |||
68 | /* returns the bit offset of the performance counter register */ | 68 | /* returns the bit offset of the performance counter register */ |
69 | switch (boot_cpu_data.x86_vendor) { | 69 | switch (boot_cpu_data.x86_vendor) { |
70 | case X86_VENDOR_AMD: | 70 | case X86_VENDOR_AMD: |
71 | return (msr - MSR_K7_PERFCTR0); | 71 | return msr - MSR_K7_PERFCTR0; |
72 | case X86_VENDOR_INTEL: | 72 | case X86_VENDOR_INTEL: |
73 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 73 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
74 | return (msr - MSR_ARCH_PERFMON_PERFCTR0); | 74 | return msr - MSR_ARCH_PERFMON_PERFCTR0; |
75 | 75 | ||
76 | switch (boot_cpu_data.x86) { | 76 | switch (boot_cpu_data.x86) { |
77 | case 6: | 77 | case 6: |
78 | return (msr - MSR_P6_PERFCTR0); | 78 | return msr - MSR_P6_PERFCTR0; |
79 | case 15: | 79 | case 15: |
80 | return (msr - MSR_P4_BPU_PERFCTR0); | 80 | return msr - MSR_P4_BPU_PERFCTR0; |
81 | } | 81 | } |
82 | } | 82 | } |
83 | return 0; | 83 | return 0; |
@@ -92,16 +92,16 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) | |||
92 | /* returns the bit offset of the event selection register */ | 92 | /* returns the bit offset of the event selection register */ |
93 | switch (boot_cpu_data.x86_vendor) { | 93 | switch (boot_cpu_data.x86_vendor) { |
94 | case X86_VENDOR_AMD: | 94 | case X86_VENDOR_AMD: |
95 | return (msr - MSR_K7_EVNTSEL0); | 95 | return msr - MSR_K7_EVNTSEL0; |
96 | case X86_VENDOR_INTEL: | 96 | case X86_VENDOR_INTEL: |
97 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 97 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
98 | return (msr - MSR_ARCH_PERFMON_EVENTSEL0); | 98 | return msr - MSR_ARCH_PERFMON_EVENTSEL0; |
99 | 99 | ||
100 | switch (boot_cpu_data.x86) { | 100 | switch (boot_cpu_data.x86) { |
101 | case 6: | 101 | case 6: |
102 | return (msr - MSR_P6_EVNTSEL0); | 102 | return msr - MSR_P6_EVNTSEL0; |
103 | case 15: | 103 | case 15: |
104 | return (msr - MSR_P4_BSU_ESCR0); | 104 | return msr - MSR_P4_BSU_ESCR0; |
105 | } | 105 | } |
106 | } | 106 | } |
107 | return 0; | 107 | return 0; |
@@ -113,7 +113,7 @@ int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) | |||
113 | { | 113 | { |
114 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 114 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
115 | 115 | ||
116 | return (!test_bit(counter, perfctr_nmi_owner)); | 116 | return !test_bit(counter, perfctr_nmi_owner); |
117 | } | 117 | } |
118 | 118 | ||
119 | /* checks the an msr for availability */ | 119 | /* checks the an msr for availability */ |
@@ -124,7 +124,7 @@ int avail_to_resrv_perfctr_nmi(unsigned int msr) | |||
124 | counter = nmi_perfctr_msr_to_bit(msr); | 124 | counter = nmi_perfctr_msr_to_bit(msr); |
125 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 125 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
126 | 126 | ||
127 | return (!test_bit(counter, perfctr_nmi_owner)); | 127 | return !test_bit(counter, perfctr_nmi_owner); |
128 | } | 128 | } |
129 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); | 129 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); |
130 | 130 | ||
@@ -237,7 +237,7 @@ static unsigned int adjust_for_32bit_ctr(unsigned int hz) | |||
237 | */ | 237 | */ |
238 | counter_val = (u64)cpu_khz * 1000; | 238 | counter_val = (u64)cpu_khz * 1000; |
239 | do_div(counter_val, retval); | 239 | do_div(counter_val, retval); |
240 | if (counter_val > 0x7fffffffULL) { | 240 | if (counter_val > 0x7fffffffULL) { |
241 | u64 count = (u64)cpu_khz * 1000; | 241 | u64 count = (u64)cpu_khz * 1000; |
242 | do_div(count, 0x7fffffffUL); | 242 | do_div(count, 0x7fffffffUL); |
243 | retval = count + 1; | 243 | retval = count + 1; |
@@ -251,7 +251,7 @@ static void write_watchdog_counter(unsigned int perfctr_msr, | |||
251 | u64 count = (u64)cpu_khz * 1000; | 251 | u64 count = (u64)cpu_khz * 1000; |
252 | 252 | ||
253 | do_div(count, nmi_hz); | 253 | do_div(count, nmi_hz); |
254 | if(descr) | 254 | if (descr) |
255 | pr_debug("setting %s to -0x%08Lx\n", descr, count); | 255 | pr_debug("setting %s to -0x%08Lx\n", descr, count); |
256 | wrmsrl(perfctr_msr, 0 - count); | 256 | wrmsrl(perfctr_msr, 0 - count); |
257 | } | 257 | } |
@@ -262,7 +262,7 @@ static void write_watchdog_counter32(unsigned int perfctr_msr, | |||
262 | u64 count = (u64)cpu_khz * 1000; | 262 | u64 count = (u64)cpu_khz * 1000; |
263 | 263 | ||
264 | do_div(count, nmi_hz); | 264 | do_div(count, nmi_hz); |
265 | if(descr) | 265 | if (descr) |
266 | pr_debug("setting %s to -0x%08Lx\n", descr, count); | 266 | pr_debug("setting %s to -0x%08Lx\n", descr, count); |
267 | wrmsr(perfctr_msr, (u32)(-count), 0); | 267 | wrmsr(perfctr_msr, (u32)(-count), 0); |
268 | } | 268 | } |
@@ -296,7 +296,7 @@ static int setup_k7_watchdog(unsigned nmi_hz) | |||
296 | 296 | ||
297 | /* setup the timer */ | 297 | /* setup the timer */ |
298 | wrmsr(evntsel_msr, evntsel, 0); | 298 | wrmsr(evntsel_msr, evntsel, 0); |
299 | write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz); | 299 | write_watchdog_counter(perfctr_msr, "K7_PERFCTR0", nmi_hz); |
300 | 300 | ||
301 | /* initialize the wd struct before enabling */ | 301 | /* initialize the wd struct before enabling */ |
302 | wd->perfctr_msr = perfctr_msr; | 302 | wd->perfctr_msr = perfctr_msr; |
@@ -387,7 +387,7 @@ static int setup_p6_watchdog(unsigned nmi_hz) | |||
387 | /* setup the timer */ | 387 | /* setup the timer */ |
388 | wrmsr(evntsel_msr, evntsel, 0); | 388 | wrmsr(evntsel_msr, evntsel, 0); |
389 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | 389 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); |
390 | write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz); | 390 | write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0", nmi_hz); |
391 | 391 | ||
392 | /* initialize the wd struct before enabling */ | 392 | /* initialize the wd struct before enabling */ |
393 | wd->perfctr_msr = perfctr_msr; | 393 | wd->perfctr_msr = perfctr_msr; |
@@ -415,7 +415,7 @@ static void __kprobes p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | |||
415 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 415 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
416 | 416 | ||
417 | /* P6/ARCH_PERFMON has 32 bit counter write */ | 417 | /* P6/ARCH_PERFMON has 32 bit counter write */ |
418 | write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz); | 418 | write_watchdog_counter32(wd->perfctr_msr, NULL, nmi_hz); |
419 | } | 419 | } |
420 | 420 | ||
421 | static const struct wd_ops p6_wd_ops = { | 421 | static const struct wd_ops p6_wd_ops = { |
@@ -490,9 +490,9 @@ static int setup_p4_watchdog(unsigned nmi_hz) | |||
490 | if (smp_num_siblings == 2) { | 490 | if (smp_num_siblings == 2) { |
491 | unsigned int ebx, apicid; | 491 | unsigned int ebx, apicid; |
492 | 492 | ||
493 | ebx = cpuid_ebx(1); | 493 | ebx = cpuid_ebx(1); |
494 | apicid = (ebx >> 24) & 0xff; | 494 | apicid = (ebx >> 24) & 0xff; |
495 | ht_num = apicid & 1; | 495 | ht_num = apicid & 1; |
496 | } else | 496 | } else |
497 | #endif | 497 | #endif |
498 | ht_num = 0; | 498 | ht_num = 0; |
@@ -544,7 +544,7 @@ static int setup_p4_watchdog(unsigned nmi_hz) | |||
544 | } | 544 | } |
545 | 545 | ||
546 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) | 546 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) |
547 | | P4_ESCR_OS | 547 | | P4_ESCR_OS |
548 | | P4_ESCR_USR; | 548 | | P4_ESCR_USR; |
549 | 549 | ||
550 | cccr_val |= P4_CCCR_THRESHOLD(15) | 550 | cccr_val |= P4_CCCR_THRESHOLD(15) |
@@ -612,7 +612,7 @@ static void __kprobes p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | |||
612 | { | 612 | { |
613 | unsigned dummy; | 613 | unsigned dummy; |
614 | /* | 614 | /* |
615 | * P4 quirks: | 615 | * P4 quirks: |
616 | * - An overflown perfctr will assert its interrupt | 616 | * - An overflown perfctr will assert its interrupt |
617 | * until the OVF flag in its CCCR is cleared. | 617 | * until the OVF flag in its CCCR is cleared. |
618 | * - LVTPC is masked on interrupt and must be | 618 | * - LVTPC is masked on interrupt and must be |
@@ -662,7 +662,8 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz) | |||
662 | * NOTE: Corresponding bit = 0 in ebx indicates event present. | 662 | * NOTE: Corresponding bit = 0 in ebx indicates event present. |
663 | */ | 663 | */ |
664 | cpuid(10, &(eax.full), &ebx, &unused, &unused); | 664 | cpuid(10, &(eax.full), &ebx, &unused, &unused); |
665 | if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || | 665 | if ((eax.split.mask_length < |
666 | (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || | ||
666 | (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | 667 | (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) |
667 | return 0; | 668 | return 0; |
668 | 669 | ||
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index f82706a3901d..62ac8cb6ba27 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
@@ -126,7 +126,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
126 | if (i < ARRAY_SIZE(x86_power_flags) && | 126 | if (i < ARRAY_SIZE(x86_power_flags) && |
127 | x86_power_flags[i]) | 127 | x86_power_flags[i]) |
128 | seq_printf(m, "%s%s", | 128 | seq_printf(m, "%s%s", |
129 | x86_power_flags[i][0]?" ":"", | 129 | x86_power_flags[i][0] ? " " : "", |
130 | x86_power_flags[i]); | 130 | x86_power_flags[i]); |
131 | else | 131 | else |
132 | seq_printf(m, " [%d]", i); | 132 | seq_printf(m, " [%d]", i); |
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 284c399e3234..bc24f514ec93 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
@@ -49,17 +49,17 @@ static inline int __vmware_platform(void) | |||
49 | 49 | ||
50 | static unsigned long __vmware_get_tsc_khz(void) | 50 | static unsigned long __vmware_get_tsc_khz(void) |
51 | { | 51 | { |
52 | uint64_t tsc_hz; | 52 | uint64_t tsc_hz; |
53 | uint32_t eax, ebx, ecx, edx; | 53 | uint32_t eax, ebx, ecx, edx; |
54 | 54 | ||
55 | VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); | 55 | VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); |
56 | 56 | ||
57 | if (ebx == UINT_MAX) | 57 | if (ebx == UINT_MAX) |
58 | return 0; | 58 | return 0; |
59 | tsc_hz = eax | (((uint64_t)ebx) << 32); | 59 | tsc_hz = eax | (((uint64_t)ebx) << 32); |
60 | do_div(tsc_hz, 1000); | 60 | do_div(tsc_hz, 1000); |
61 | BUG_ON(tsc_hz >> 32); | 61 | BUG_ON(tsc_hz >> 32); |
62 | return tsc_hz; | 62 | return tsc_hz; |
63 | } | 63 | } |
64 | 64 | ||
65 | /* | 65 | /* |
diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c index b4f14c6c09d9..37250fe490b1 100644 --- a/arch/x86/kernel/doublefault_32.c +++ b/arch/x86/kernel/doublefault_32.c | |||
@@ -27,9 +27,7 @@ static void doublefault_fn(void) | |||
27 | 27 | ||
28 | if (ptr_ok(gdt)) { | 28 | if (ptr_ok(gdt)) { |
29 | gdt += GDT_ENTRY_TSS << 3; | 29 | gdt += GDT_ENTRY_TSS << 3; |
30 | tss = *(u16 *)(gdt+2); | 30 | tss = get_desc_base((struct desc_struct *)gdt); |
31 | tss += *(u8 *)(gdt+4) << 16; | ||
32 | tss += *(u8 *)(gdt+7) << 24; | ||
33 | printk(KERN_EMERG "double fault, tss at %08lx\n", tss); | 31 | printk(KERN_EMERG "double fault, tss at %08lx\n", tss); |
34 | 32 | ||
35 | if (ptr_ok(tss)) { | 33 | if (ptr_ok(tss)) { |
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index 48bfe1386038..ef42a038f1a6 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c | |||
@@ -509,15 +509,15 @@ enum bts_field { | |||
509 | bts_escape = ((unsigned long)-1 & ~bts_qual_mask) | 509 | bts_escape = ((unsigned long)-1 & ~bts_qual_mask) |
510 | }; | 510 | }; |
511 | 511 | ||
512 | static inline unsigned long bts_get(const char *base, enum bts_field field) | 512 | static inline unsigned long bts_get(const char *base, unsigned long field) |
513 | { | 513 | { |
514 | base += (ds_cfg.sizeof_ptr_field * field); | 514 | base += (ds_cfg.sizeof_ptr_field * field); |
515 | return *(unsigned long *)base; | 515 | return *(unsigned long *)base; |
516 | } | 516 | } |
517 | 517 | ||
518 | static inline void bts_set(char *base, enum bts_field field, unsigned long val) | 518 | static inline void bts_set(char *base, unsigned long field, unsigned long val) |
519 | { | 519 | { |
520 | base += (ds_cfg.sizeof_ptr_field * field);; | 520 | base += (ds_cfg.sizeof_ptr_field * field); |
521 | (*(unsigned long *)base) = val; | 521 | (*(unsigned long *)base) = val; |
522 | } | 522 | } |
523 | 523 | ||
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index c8405718a4c3..2d8a371d4339 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/bug.h> | 15 | #include <linux/bug.h> |
16 | #include <linux/nmi.h> | 16 | #include <linux/nmi.h> |
17 | #include <linux/sysfs.h> | 17 | #include <linux/sysfs.h> |
18 | #include <linux/ftrace.h> | ||
19 | 18 | ||
20 | #include <asm/stacktrace.h> | 19 | #include <asm/stacktrace.h> |
21 | 20 | ||
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index d94e1ea3b9fe..9dbb527e1652 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -417,10 +417,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
417 | unsigned long return_hooker = (unsigned long) | 417 | unsigned long return_hooker = (unsigned long) |
418 | &return_to_handler; | 418 | &return_to_handler; |
419 | 419 | ||
420 | /* Nmi's are currently unsupported */ | ||
421 | if (unlikely(in_nmi())) | ||
422 | return; | ||
423 | |||
424 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 420 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
425 | return; | 421 | return; |
426 | 422 | ||
@@ -498,37 +494,56 @@ static struct syscall_metadata *find_syscall_meta(unsigned long *syscall) | |||
498 | 494 | ||
499 | struct syscall_metadata *syscall_nr_to_meta(int nr) | 495 | struct syscall_metadata *syscall_nr_to_meta(int nr) |
500 | { | 496 | { |
501 | if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0) | 497 | if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) |
502 | return NULL; | 498 | return NULL; |
503 | 499 | ||
504 | return syscalls_metadata[nr]; | 500 | return syscalls_metadata[nr]; |
505 | } | 501 | } |
506 | 502 | ||
507 | void arch_init_ftrace_syscalls(void) | 503 | int syscall_name_to_nr(char *name) |
504 | { | ||
505 | int i; | ||
506 | |||
507 | if (!syscalls_metadata) | ||
508 | return -1; | ||
509 | |||
510 | for (i = 0; i < NR_syscalls; i++) { | ||
511 | if (syscalls_metadata[i]) { | ||
512 | if (!strcmp(syscalls_metadata[i]->name, name)) | ||
513 | return i; | ||
514 | } | ||
515 | } | ||
516 | return -1; | ||
517 | } | ||
518 | |||
519 | void set_syscall_enter_id(int num, int id) | ||
520 | { | ||
521 | syscalls_metadata[num]->enter_id = id; | ||
522 | } | ||
523 | |||
524 | void set_syscall_exit_id(int num, int id) | ||
525 | { | ||
526 | syscalls_metadata[num]->exit_id = id; | ||
527 | } | ||
528 | |||
529 | static int __init arch_init_ftrace_syscalls(void) | ||
508 | { | 530 | { |
509 | int i; | 531 | int i; |
510 | struct syscall_metadata *meta; | 532 | struct syscall_metadata *meta; |
511 | unsigned long **psys_syscall_table = &sys_call_table; | 533 | unsigned long **psys_syscall_table = &sys_call_table; |
512 | static atomic_t refs; | ||
513 | |||
514 | if (atomic_inc_return(&refs) != 1) | ||
515 | goto end; | ||
516 | 534 | ||
517 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * | 535 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * |
518 | FTRACE_SYSCALL_MAX, GFP_KERNEL); | 536 | NR_syscalls, GFP_KERNEL); |
519 | if (!syscalls_metadata) { | 537 | if (!syscalls_metadata) { |
520 | WARN_ON(1); | 538 | WARN_ON(1); |
521 | return; | 539 | return -ENOMEM; |
522 | } | 540 | } |
523 | 541 | ||
524 | for (i = 0; i < FTRACE_SYSCALL_MAX; i++) { | 542 | for (i = 0; i < NR_syscalls; i++) { |
525 | meta = find_syscall_meta(psys_syscall_table[i]); | 543 | meta = find_syscall_meta(psys_syscall_table[i]); |
526 | syscalls_metadata[i] = meta; | 544 | syscalls_metadata[i] = meta; |
527 | } | 545 | } |
528 | return; | 546 | return 0; |
529 | |||
530 | /* Paranoid: avoid overflow */ | ||
531 | end: | ||
532 | atomic_dec(&refs); | ||
533 | } | 547 | } |
548 | arch_initcall(arch_init_ftrace_syscalls); | ||
534 | #endif | 549 | #endif |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 0d98a01cbdb2..7ffec6b3b331 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -261,9 +261,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20); | |||
261 | * which will be freed later | 261 | * which will be freed later |
262 | */ | 262 | */ |
263 | 263 | ||
264 | #ifndef CONFIG_HOTPLUG_CPU | 264 | __CPUINIT |
265 | .section .init.text,"ax",@progbits | ||
266 | #endif | ||
267 | 265 | ||
268 | #ifdef CONFIG_SMP | 266 | #ifdef CONFIG_SMP |
269 | ENTRY(startup_32_smp) | 267 | ENTRY(startup_32_smp) |
@@ -441,7 +439,6 @@ is386: movl $2,%ecx # set MP | |||
441 | jne 1f | 439 | jne 1f |
442 | movl $per_cpu__gdt_page,%eax | 440 | movl $per_cpu__gdt_page,%eax |
443 | movl $per_cpu__stack_canary,%ecx | 441 | movl $per_cpu__stack_canary,%ecx |
444 | subl $20, %ecx | ||
445 | movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) | 442 | movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) |
446 | shrl $16, %ecx | 443 | shrl $16, %ecx |
447 | movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) | 444 | movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) |
@@ -602,11 +599,7 @@ ignore_int: | |||
602 | #endif | 599 | #endif |
603 | iret | 600 | iret |
604 | 601 | ||
605 | #ifndef CONFIG_HOTPLUG_CPU | ||
606 | __CPUINITDATA | ||
607 | #else | ||
608 | __REFDATA | 602 | __REFDATA |
609 | #endif | ||
610 | .align 4 | 603 | .align 4 |
611 | ENTRY(initial_code) | 604 | ENTRY(initial_code) |
612 | .long i386_start_kernel | 605 | .long i386_start_kernel |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 3b09634a5153..7d35d0fe2329 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -218,7 +218,6 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) | |||
218 | void fixup_irqs(void) | 218 | void fixup_irqs(void) |
219 | { | 219 | { |
220 | unsigned int irq; | 220 | unsigned int irq; |
221 | static int warned; | ||
222 | struct irq_desc *desc; | 221 | struct irq_desc *desc; |
223 | 222 | ||
224 | for_each_irq_desc(irq, desc) { | 223 | for_each_irq_desc(irq, desc) { |
@@ -236,8 +235,8 @@ void fixup_irqs(void) | |||
236 | } | 235 | } |
237 | if (desc->chip->set_affinity) | 236 | if (desc->chip->set_affinity) |
238 | desc->chip->set_affinity(irq, affinity); | 237 | desc->chip->set_affinity(irq, affinity); |
239 | else if (desc->action && !(warned++)) | 238 | else if (desc->action) |
240 | printk("Cannot set affinity for irq %i\n", irq); | 239 | printk_once("Cannot set affinity for irq %i\n", irq); |
241 | } | 240 | } |
242 | 241 | ||
243 | #if 0 | 242 | #if 0 |
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 651c93b28862..fcd513bf2846 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -482,11 +482,11 @@ static void __init construct_ioapic_table(int mpc_default_type) | |||
482 | MP_bus_info(&bus); | 482 | MP_bus_info(&bus); |
483 | } | 483 | } |
484 | 484 | ||
485 | ioapic.type = MP_IOAPIC; | 485 | ioapic.type = MP_IOAPIC; |
486 | ioapic.apicid = 2; | 486 | ioapic.apicid = 2; |
487 | ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01; | 487 | ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01; |
488 | ioapic.flags = MPC_APIC_USABLE; | 488 | ioapic.flags = MPC_APIC_USABLE; |
489 | ioapic.apicaddr = 0xFEC00000; | 489 | ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE; |
490 | MP_ioapic_info(&ioapic); | 490 | MP_ioapic_info(&ioapic); |
491 | 491 | ||
492 | /* | 492 | /* |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 1a041bcf506b..d71c8655905b 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/dmar.h> | 3 | #include <linux/dmar.h> |
4 | #include <linux/bootmem.h> | 4 | #include <linux/bootmem.h> |
5 | #include <linux/pci.h> | 5 | #include <linux/pci.h> |
6 | #include <linux/kmemleak.h> | ||
6 | 7 | ||
7 | #include <asm/proto.h> | 8 | #include <asm/proto.h> |
8 | #include <asm/dma.h> | 9 | #include <asm/dma.h> |
@@ -32,7 +33,14 @@ int no_iommu __read_mostly; | |||
32 | /* Set this to 1 if there is a HW IOMMU in the system */ | 33 | /* Set this to 1 if there is a HW IOMMU in the system */ |
33 | int iommu_detected __read_mostly = 0; | 34 | int iommu_detected __read_mostly = 0; |
34 | 35 | ||
35 | int iommu_pass_through; | 36 | /* |
37 | * This variable becomes 1 if iommu=pt is passed on the kernel command line. | ||
38 | * If this variable is 1, IOMMU implementations do no DMA ranslation for | ||
39 | * devices and allow every device to access to whole physical memory. This is | ||
40 | * useful if a user want to use an IOMMU only for KVM device assignment to | ||
41 | * guests and not for driver dma translation. | ||
42 | */ | ||
43 | int iommu_pass_through __read_mostly; | ||
36 | 44 | ||
37 | dma_addr_t bad_dma_address __read_mostly = 0; | 45 | dma_addr_t bad_dma_address __read_mostly = 0; |
38 | EXPORT_SYMBOL(bad_dma_address); | 46 | EXPORT_SYMBOL(bad_dma_address); |
@@ -88,6 +96,11 @@ void __init dma32_reserve_bootmem(void) | |||
88 | size = roundup(dma32_bootmem_size, align); | 96 | size = roundup(dma32_bootmem_size, align); |
89 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, | 97 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, |
90 | 512ULL<<20); | 98 | 512ULL<<20); |
99 | /* | ||
100 | * Kmemleak should not scan this block as it may not be mapped via the | ||
101 | * kernel direct mapping. | ||
102 | */ | ||
103 | kmemleak_ignore(dma32_bootmem_ptr); | ||
91 | if (dma32_bootmem_ptr) | 104 | if (dma32_bootmem_ptr) |
92 | dma32_bootmem_size = size; | 105 | dma32_bootmem_size = size; |
93 | else | 106 | else |
@@ -147,7 +160,7 @@ again: | |||
147 | return NULL; | 160 | return NULL; |
148 | 161 | ||
149 | addr = page_to_phys(page); | 162 | addr = page_to_phys(page); |
150 | if (!is_buffer_dma_capable(dma_mask, addr, size)) { | 163 | if (addr + size > dma_mask) { |
151 | __free_pages(page, get_order(size)); | 164 | __free_pages(page, get_order(size)); |
152 | 165 | ||
153 | if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) { | 166 | if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) { |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index d2e56b8f48e7..98a827ee9ed7 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -190,14 +190,13 @@ static void iommu_full(struct device *dev, size_t size, int dir) | |||
190 | static inline int | 190 | static inline int |
191 | need_iommu(struct device *dev, unsigned long addr, size_t size) | 191 | need_iommu(struct device *dev, unsigned long addr, size_t size) |
192 | { | 192 | { |
193 | return force_iommu || | 193 | return force_iommu || !dma_capable(dev, addr, size); |
194 | !is_buffer_dma_capable(*dev->dma_mask, addr, size); | ||
195 | } | 194 | } |
196 | 195 | ||
197 | static inline int | 196 | static inline int |
198 | nonforced_iommu(struct device *dev, unsigned long addr, size_t size) | 197 | nonforced_iommu(struct device *dev, unsigned long addr, size_t size) |
199 | { | 198 | { |
200 | return !is_buffer_dma_capable(*dev->dma_mask, addr, size); | 199 | return !dma_capable(dev, addr, size); |
201 | } | 200 | } |
202 | 201 | ||
203 | /* Map a single continuous physical area into the IOMMU. | 202 | /* Map a single continuous physical area into the IOMMU. |
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 71d412a09f30..a3933d4330cd 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c | |||
@@ -14,7 +14,7 @@ | |||
14 | static int | 14 | static int |
15 | check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) | 15 | check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) |
16 | { | 16 | { |
17 | if (hwdev && !is_buffer_dma_capable(*hwdev->dma_mask, bus, size)) { | 17 | if (hwdev && !dma_capable(hwdev, bus, size)) { |
18 | if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) | 18 | if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) |
19 | printk(KERN_ERR | 19 | printk(KERN_ERR |
20 | "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", | 20 | "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", |
@@ -79,12 +79,29 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
79 | free_pages((unsigned long)vaddr, get_order(size)); | 79 | free_pages((unsigned long)vaddr, get_order(size)); |
80 | } | 80 | } |
81 | 81 | ||
82 | static void nommu_sync_single_for_device(struct device *dev, | ||
83 | dma_addr_t addr, size_t size, | ||
84 | enum dma_data_direction dir) | ||
85 | { | ||
86 | flush_write_buffers(); | ||
87 | } | ||
88 | |||
89 | |||
90 | static void nommu_sync_sg_for_device(struct device *dev, | ||
91 | struct scatterlist *sg, int nelems, | ||
92 | enum dma_data_direction dir) | ||
93 | { | ||
94 | flush_write_buffers(); | ||
95 | } | ||
96 | |||
82 | struct dma_map_ops nommu_dma_ops = { | 97 | struct dma_map_ops nommu_dma_ops = { |
83 | .alloc_coherent = dma_generic_alloc_coherent, | 98 | .alloc_coherent = dma_generic_alloc_coherent, |
84 | .free_coherent = nommu_free_coherent, | 99 | .free_coherent = nommu_free_coherent, |
85 | .map_sg = nommu_map_sg, | 100 | .map_sg = nommu_map_sg, |
86 | .map_page = nommu_map_page, | 101 | .map_page = nommu_map_page, |
87 | .is_phys = 1, | 102 | .sync_single_for_device = nommu_sync_single_for_device, |
103 | .sync_sg_for_device = nommu_sync_sg_for_device, | ||
104 | .is_phys = 1, | ||
88 | }; | 105 | }; |
89 | 106 | ||
90 | void __init no_iommu_init(void) | 107 | void __init no_iommu_init(void) |
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 6af96ee44200..e8a35016115f 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c | |||
@@ -13,31 +13,6 @@ | |||
13 | 13 | ||
14 | int swiotlb __read_mostly; | 14 | int swiotlb __read_mostly; |
15 | 15 | ||
16 | void * __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) | ||
17 | { | ||
18 | return alloc_bootmem_low_pages(size); | ||
19 | } | ||
20 | |||
21 | void *swiotlb_alloc(unsigned order, unsigned long nslabs) | ||
22 | { | ||
23 | return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); | ||
24 | } | ||
25 | |||
26 | dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) | ||
27 | { | ||
28 | return paddr; | ||
29 | } | ||
30 | |||
31 | phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr) | ||
32 | { | ||
33 | return baddr; | ||
34 | } | ||
35 | |||
36 | int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) | ||
37 | { | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 16 | static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
42 | dma_addr_t *dma_handle, gfp_t flags) | 17 | dma_addr_t *dma_handle, gfp_t flags) |
43 | { | 18 | { |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 994dd6a4a2a0..071166a4ba83 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -519,16 +519,12 @@ static void c1e_idle(void) | |||
519 | if (!cpumask_test_cpu(cpu, c1e_mask)) { | 519 | if (!cpumask_test_cpu(cpu, c1e_mask)) { |
520 | cpumask_set_cpu(cpu, c1e_mask); | 520 | cpumask_set_cpu(cpu, c1e_mask); |
521 | /* | 521 | /* |
522 | * Force broadcast so ACPI can not interfere. Needs | 522 | * Force broadcast so ACPI can not interfere. |
523 | * to run with interrupts enabled as it uses | ||
524 | * smp_function_call. | ||
525 | */ | 523 | */ |
526 | local_irq_enable(); | ||
527 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, | 524 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, |
528 | &cpu); | 525 | &cpu); |
529 | printk(KERN_INFO "Switch to broadcast mode on CPU%d\n", | 526 | printk(KERN_INFO "Switch to broadcast mode on CPU%d\n", |
530 | cpu); | 527 | cpu); |
531 | local_irq_disable(); | ||
532 | } | 528 | } |
533 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); | 529 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); |
534 | 530 | ||
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 09ecbde91c13..8d7d5c9c1be3 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -35,10 +35,11 @@ | |||
35 | #include <asm/proto.h> | 35 | #include <asm/proto.h> |
36 | #include <asm/ds.h> | 36 | #include <asm/ds.h> |
37 | 37 | ||
38 | #include <trace/syscall.h> | ||
39 | |||
40 | #include "tls.h" | 38 | #include "tls.h" |
41 | 39 | ||
40 | #define CREATE_TRACE_POINTS | ||
41 | #include <trace/events/syscalls.h> | ||
42 | |||
42 | enum x86_regset { | 43 | enum x86_regset { |
43 | REGSET_GENERAL, | 44 | REGSET_GENERAL, |
44 | REGSET_FP, | 45 | REGSET_FP, |
@@ -1497,8 +1498,8 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs) | |||
1497 | tracehook_report_syscall_entry(regs)) | 1498 | tracehook_report_syscall_entry(regs)) |
1498 | ret = -1L; | 1499 | ret = -1L; |
1499 | 1500 | ||
1500 | if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) | 1501 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
1501 | ftrace_syscall_enter(regs); | 1502 | trace_sys_enter(regs, regs->orig_ax); |
1502 | 1503 | ||
1503 | if (unlikely(current->audit_context)) { | 1504 | if (unlikely(current->audit_context)) { |
1504 | if (IS_IA32) | 1505 | if (IS_IA32) |
@@ -1523,8 +1524,8 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs) | |||
1523 | if (unlikely(current->audit_context)) | 1524 | if (unlikely(current->audit_context)) |
1524 | audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); | 1525 | audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); |
1525 | 1526 | ||
1526 | if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) | 1527 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
1527 | ftrace_syscall_exit(regs); | 1528 | trace_sys_exit(regs, regs->ax); |
1528 | 1529 | ||
1529 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | 1530 | if (test_thread_flag(TIF_SYSCALL_TRACE)) |
1530 | tracehook_report_syscall_exit(regs, 0); | 1531 | tracehook_report_syscall_exit(regs, 0); |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 4c578751e94e..81e58238c4ce 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -869,6 +869,8 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) | |||
869 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 869 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
870 | clear_thread_flag(TIF_NOTIFY_RESUME); | 870 | clear_thread_flag(TIF_NOTIFY_RESUME); |
871 | tracehook_notify_resume(regs); | 871 | tracehook_notify_resume(regs); |
872 | if (current->replacement_session_keyring) | ||
873 | key_replace_session_keyring(); | ||
872 | } | 874 | } |
873 | 875 | ||
874 | #ifdef CONFIG_X86_32 | 876 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index e8b9863ef8c4..3149032ff107 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
6 | #include <linux/ptrace.h> | 6 | #include <linux/ptrace.h> |
7 | #include <asm/desc.h> | ||
7 | 8 | ||
8 | unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) | 9 | unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) |
9 | { | 10 | { |
@@ -23,7 +24,7 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re | |||
23 | * and APM bios ones we just ignore here. | 24 | * and APM bios ones we just ignore here. |
24 | */ | 25 | */ |
25 | if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) { | 26 | if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) { |
26 | u32 *desc; | 27 | struct desc_struct *desc; |
27 | unsigned long base; | 28 | unsigned long base; |
28 | 29 | ||
29 | seg &= ~7UL; | 30 | seg &= ~7UL; |
@@ -33,12 +34,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re | |||
33 | addr = -1L; /* bogus selector, access would fault */ | 34 | addr = -1L; /* bogus selector, access would fault */ |
34 | else { | 35 | else { |
35 | desc = child->mm->context.ldt + seg; | 36 | desc = child->mm->context.ldt + seg; |
36 | base = ((desc[0] >> 16) | | 37 | base = get_desc_base(desc); |
37 | ((desc[1] & 0xff) << 16) | | ||
38 | (desc[1] & 0xff000000)); | ||
39 | 38 | ||
40 | /* 16-bit code segment? */ | 39 | /* 16-bit code segment? */ |
41 | if (!((desc[1] >> 22) & 1)) | 40 | if (!desc->d) |
42 | addr &= 0xffff; | 41 | addr &= 0xffff; |
43 | addr += base; | 42 | addr += base; |
44 | } | 43 | } |
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 6bc211accf08..45e00eb09c3a 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -18,9 +18,9 @@ | |||
18 | #include <asm/ia32.h> | 18 | #include <asm/ia32.h> |
19 | #include <asm/syscalls.h> | 19 | #include <asm/syscalls.h> |
20 | 20 | ||
21 | asmlinkage long sys_mmap(unsigned long addr, unsigned long len, | 21 | SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, |
22 | unsigned long prot, unsigned long flags, | 22 | unsigned long, prot, unsigned long, flags, |
23 | unsigned long fd, unsigned long off) | 23 | unsigned long, fd, unsigned long, off) |
24 | { | 24 | { |
25 | long error; | 25 | long error; |
26 | struct file *file; | 26 | struct file *file; |
@@ -226,7 +226,7 @@ bottomup: | |||
226 | } | 226 | } |
227 | 227 | ||
228 | 228 | ||
229 | asmlinkage long sys_uname(struct new_utsname __user *name) | 229 | SYSCALL_DEFINE1(uname, struct new_utsname __user *, name) |
230 | { | 230 | { |
231 | int err; | 231 | int err; |
232 | down_read(&uts_sem); | 232 | down_read(&uts_sem); |
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 77b9689f8edb..503c1f2e8835 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -640,13 +640,13 @@ static int __init uv_ptc_init(void) | |||
640 | if (!is_uv_system()) | 640 | if (!is_uv_system()) |
641 | return 0; | 641 | return 0; |
642 | 642 | ||
643 | proc_uv_ptc = create_proc_entry(UV_PTC_BASENAME, 0444, NULL); | 643 | proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL, |
644 | &proc_uv_ptc_operations); | ||
644 | if (!proc_uv_ptc) { | 645 | if (!proc_uv_ptc) { |
645 | printk(KERN_ERR "unable to create %s proc entry\n", | 646 | printk(KERN_ERR "unable to create %s proc entry\n", |
646 | UV_PTC_BASENAME); | 647 | UV_PTC_BASENAME); |
647 | return -EINVAL; | 648 | return -EINVAL; |
648 | } | 649 | } |
649 | proc_uv_ptc->proc_fops = &proc_uv_ptc_operations; | ||
650 | return 0; | 650 | return 0; |
651 | } | 651 | } |
652 | 652 | ||
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 5204332f475d..6fe85c272a2b 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -76,7 +76,7 @@ char ignore_fpu_irq; | |||
76 | * F0 0F bug workaround.. We have a special link segment | 76 | * F0 0F bug workaround.. We have a special link segment |
77 | * for this. | 77 | * for this. |
78 | */ | 78 | */ |
79 | gate_desc idt_table[256] | 79 | gate_desc idt_table[NR_VECTORS] |
80 | __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; | 80 | __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; |
81 | #endif | 81 | #endif |
82 | 82 | ||
@@ -786,27 +786,6 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) | |||
786 | #endif | 786 | #endif |
787 | } | 787 | } |
788 | 788 | ||
789 | #ifdef CONFIG_X86_32 | ||
790 | unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp) | ||
791 | { | ||
792 | struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id()); | ||
793 | unsigned long base = (kesp - uesp) & -THREAD_SIZE; | ||
794 | unsigned long new_kesp = kesp - base; | ||
795 | unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT; | ||
796 | __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS]; | ||
797 | |||
798 | /* Set up base for espfix segment */ | ||
799 | desc &= 0x00f0ff0000000000ULL; | ||
800 | desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) | | ||
801 | ((((__u64)base) << 32) & 0xff00000000000000ULL) | | ||
802 | ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) | | ||
803 | (lim_pages & 0xffff); | ||
804 | *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc; | ||
805 | |||
806 | return new_kesp; | ||
807 | } | ||
808 | #endif | ||
809 | |||
810 | asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) | 789 | asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) |
811 | { | 790 | { |
812 | } | 791 | } |
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 78d185d797de..9fc178255c04 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
@@ -46,11 +46,10 @@ PHDRS { | |||
46 | data PT_LOAD FLAGS(7); /* RWE */ | 46 | data PT_LOAD FLAGS(7); /* RWE */ |
47 | #ifdef CONFIG_X86_64 | 47 | #ifdef CONFIG_X86_64 |
48 | user PT_LOAD FLAGS(7); /* RWE */ | 48 | user PT_LOAD FLAGS(7); /* RWE */ |
49 | data.init PT_LOAD FLAGS(7); /* RWE */ | ||
50 | #ifdef CONFIG_SMP | 49 | #ifdef CONFIG_SMP |
51 | percpu PT_LOAD FLAGS(7); /* RWE */ | 50 | percpu PT_LOAD FLAGS(7); /* RWE */ |
52 | #endif | 51 | #endif |
53 | data.init2 PT_LOAD FLAGS(7); /* RWE */ | 52 | init PT_LOAD FLAGS(7); /* RWE */ |
54 | #endif | 53 | #endif |
55 | note PT_NOTE FLAGS(0); /* ___ */ | 54 | note PT_NOTE FLAGS(0); /* ___ */ |
56 | } | 55 | } |
@@ -103,65 +102,43 @@ SECTIONS | |||
103 | __stop___ex_table = .; | 102 | __stop___ex_table = .; |
104 | } :text = 0x9090 | 103 | } :text = 0x9090 |
105 | 104 | ||
106 | RODATA | 105 | RO_DATA(PAGE_SIZE) |
107 | 106 | ||
108 | /* Data */ | 107 | /* Data */ |
109 | . = ALIGN(PAGE_SIZE); | ||
110 | .data : AT(ADDR(.data) - LOAD_OFFSET) { | 108 | .data : AT(ADDR(.data) - LOAD_OFFSET) { |
111 | /* Start of data section */ | 109 | /* Start of data section */ |
112 | _sdata = .; | 110 | _sdata = .; |
113 | DATA_DATA | 111 | |
114 | CONSTRUCTORS | 112 | /* init_task */ |
115 | } :data | 113 | INIT_TASK_DATA(THREAD_SIZE) |
116 | 114 | ||
117 | #ifdef CONFIG_X86_32 | 115 | #ifdef CONFIG_X86_32 |
118 | /* 32 bit has nosave before _edata */ | 116 | /* 32 bit has nosave before _edata */ |
119 | . = ALIGN(PAGE_SIZE); | 117 | NOSAVE_DATA |
120 | .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { | ||
121 | __nosave_begin = .; | ||
122 | *(.data.nosave) | ||
123 | . = ALIGN(PAGE_SIZE); | ||
124 | __nosave_end = .; | ||
125 | } | ||
126 | #endif | 118 | #endif |
127 | 119 | ||
128 | . = ALIGN(PAGE_SIZE); | 120 | PAGE_ALIGNED_DATA(PAGE_SIZE) |
129 | .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { | ||
130 | *(.data.page_aligned) | ||
131 | *(.data.idt) | 121 | *(.data.idt) |
132 | } | ||
133 | 122 | ||
134 | #ifdef CONFIG_X86_32 | 123 | CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES) |
135 | . = ALIGN(32); | ||
136 | #else | ||
137 | . = ALIGN(PAGE_SIZE); | ||
138 | . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); | ||
139 | #endif | ||
140 | .data.cacheline_aligned : | ||
141 | AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { | ||
142 | *(.data.cacheline_aligned) | ||
143 | } | ||
144 | 124 | ||
145 | /* rarely changed data like cpu maps */ | 125 | DATA_DATA |
146 | #ifdef CONFIG_X86_32 | 126 | CONSTRUCTORS |
147 | . = ALIGN(32); | 127 | |
148 | #else | 128 | /* rarely changed data like cpu maps */ |
149 | . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES); | 129 | READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES) |
150 | #endif | ||
151 | .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { | ||
152 | *(.data.read_mostly) | ||
153 | 130 | ||
154 | /* End of data section */ | 131 | /* End of data section */ |
155 | _edata = .; | 132 | _edata = .; |
156 | } | 133 | } :data |
157 | 134 | ||
158 | #ifdef CONFIG_X86_64 | 135 | #ifdef CONFIG_X86_64 |
159 | 136 | ||
160 | #define VSYSCALL_ADDR (-10*1024*1024) | 137 | #define VSYSCALL_ADDR (-10*1024*1024) |
161 | #define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + \ | 138 | #define VSYSCALL_PHYS_ADDR ((LOADADDR(.data) + SIZEOF(.data) + \ |
162 | SIZEOF(.data.read_mostly) + 4095) & ~(4095)) | 139 | PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) |
163 | #define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + \ | 140 | #define VSYSCALL_VIRT_ADDR ((ADDR(.data) + SIZEOF(.data) + \ |
164 | SIZEOF(.data.read_mostly) + 4095) & ~(4095)) | 141 | PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) |
165 | 142 | ||
166 | #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR) | 143 | #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR) |
167 | #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET) | 144 | #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET) |
@@ -227,35 +204,29 @@ SECTIONS | |||
227 | 204 | ||
228 | #endif /* CONFIG_X86_64 */ | 205 | #endif /* CONFIG_X86_64 */ |
229 | 206 | ||
230 | /* init_task */ | 207 | /* Init code and data - will be freed after init */ |
231 | . = ALIGN(THREAD_SIZE); | 208 | . = ALIGN(PAGE_SIZE); |
232 | .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { | 209 | .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { |
233 | *(.data.init_task) | 210 | __init_begin = .; /* paired with __init_end */ |
234 | } | 211 | } |
235 | #ifdef CONFIG_X86_64 | ||
236 | :data.init | ||
237 | #endif | ||
238 | 212 | ||
213 | #if defined(CONFIG_X86_64) && defined(CONFIG_SMP) | ||
239 | /* | 214 | /* |
240 | * smp_locks might be freed after init | 215 | * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the |
241 | * start/end must be page aligned | 216 | * output PHDR, so the next output section - .init.text - should |
217 | * start another segment - init. | ||
242 | */ | 218 | */ |
243 | . = ALIGN(PAGE_SIZE); | 219 | PERCPU_VADDR(0, :percpu) |
244 | .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { | 220 | #endif |
245 | __smp_locks = .; | ||
246 | *(.smp_locks) | ||
247 | __smp_locks_end = .; | ||
248 | . = ALIGN(PAGE_SIZE); | ||
249 | } | ||
250 | 221 | ||
251 | /* Init code and data - will be freed after init */ | ||
252 | . = ALIGN(PAGE_SIZE); | ||
253 | .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { | 222 | .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { |
254 | __init_begin = .; /* paired with __init_end */ | ||
255 | _sinittext = .; | 223 | _sinittext = .; |
256 | INIT_TEXT | 224 | INIT_TEXT |
257 | _einittext = .; | 225 | _einittext = .; |
258 | } | 226 | } |
227 | #ifdef CONFIG_X86_64 | ||
228 | :init | ||
229 | #endif | ||
259 | 230 | ||
260 | .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { | 231 | .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { |
261 | INIT_DATA | 232 | INIT_DATA |
@@ -326,17 +297,7 @@ SECTIONS | |||
326 | } | 297 | } |
327 | #endif | 298 | #endif |
328 | 299 | ||
329 | #if defined(CONFIG_X86_64) && defined(CONFIG_SMP) | 300 | #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) |
330 | /* | ||
331 | * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the | ||
332 | * output PHDR, so the next output section - __data_nosave - should | ||
333 | * start another section data.init2. Also, pda should be at the head of | ||
334 | * percpu area. Preallocate it and define the percpu offset symbol | ||
335 | * so that it can be accessed as a percpu variable. | ||
336 | */ | ||
337 | . = ALIGN(PAGE_SIZE); | ||
338 | PERCPU_VADDR(0, :percpu) | ||
339 | #else | ||
340 | PERCPU(PAGE_SIZE) | 301 | PERCPU(PAGE_SIZE) |
341 | #endif | 302 | #endif |
342 | 303 | ||
@@ -347,15 +308,22 @@ SECTIONS | |||
347 | __init_end = .; | 308 | __init_end = .; |
348 | } | 309 | } |
349 | 310 | ||
311 | /* | ||
312 | * smp_locks might be freed after init | ||
313 | * start/end must be page aligned | ||
314 | */ | ||
315 | . = ALIGN(PAGE_SIZE); | ||
316 | .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { | ||
317 | __smp_locks = .; | ||
318 | *(.smp_locks) | ||
319 | __smp_locks_end = .; | ||
320 | . = ALIGN(PAGE_SIZE); | ||
321 | } | ||
322 | |||
350 | #ifdef CONFIG_X86_64 | 323 | #ifdef CONFIG_X86_64 |
351 | .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { | 324 | .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { |
352 | . = ALIGN(PAGE_SIZE); | 325 | NOSAVE_DATA |
353 | __nosave_begin = .; | 326 | } |
354 | *(.data.nosave) | ||
355 | . = ALIGN(PAGE_SIZE); | ||
356 | __nosave_end = .; | ||
357 | } :data.init2 | ||
358 | /* use another section data.init2, see PERCPU_VADDR() above */ | ||
359 | #endif | 327 | #endif |
360 | 328 | ||
361 | /* BSS */ | 329 | /* BSS */ |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3d4529011828..633ccc7400a4 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2297,12 +2297,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr, | |||
2297 | unsigned int bytes, | 2297 | unsigned int bytes, |
2298 | struct kvm_vcpu *vcpu) | 2298 | struct kvm_vcpu *vcpu) |
2299 | { | 2299 | { |
2300 | static int reported; | 2300 | printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); |
2301 | |||
2302 | if (!reported) { | ||
2303 | reported = 1; | ||
2304 | printk(KERN_WARNING "kvm: emulating exchange as write\n"); | ||
2305 | } | ||
2306 | #ifndef CONFIG_X86_64 | 2301 | #ifndef CONFIG_X86_64 |
2307 | /* guests cmpxchg8b have to be emulated atomically */ | 2302 | /* guests cmpxchg8b have to be emulated atomically */ |
2308 | if (bytes == 8) { | 2303 | if (bytes == 8) { |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 6176fe8f29e0..ea56b8cbb6a6 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -796,7 +796,7 @@ int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, | |||
796 | return ret; | 796 | return ret; |
797 | 797 | ||
798 | #else | 798 | #else |
799 | reserve_bootmem(phys, len, BOOTMEM_DEFAULT); | 799 | reserve_bootmem(phys, len, flags); |
800 | #endif | 800 | #endif |
801 | 801 | ||
802 | if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { | 802 | if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { |
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index 2c55ed098654..528bf954eb74 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c | |||
@@ -331,6 +331,20 @@ static void kmemcheck_read_strict(struct pt_regs *regs, | |||
331 | kmemcheck_shadow_set(shadow, size); | 331 | kmemcheck_shadow_set(shadow, size); |
332 | } | 332 | } |
333 | 333 | ||
334 | bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) | ||
335 | { | ||
336 | enum kmemcheck_shadow status; | ||
337 | void *shadow; | ||
338 | |||
339 | shadow = kmemcheck_shadow_lookup(addr); | ||
340 | if (!shadow) | ||
341 | return true; | ||
342 | |||
343 | status = kmemcheck_shadow_test(shadow, size); | ||
344 | |||
345 | return status == KMEMCHECK_SHADOW_INITIALIZED; | ||
346 | } | ||
347 | |||
334 | /* Access may cross page boundary */ | 348 | /* Access may cross page boundary */ |
335 | static void kmemcheck_read(struct pt_regs *regs, | 349 | static void kmemcheck_read(struct pt_regs *regs, |
336 | unsigned long addr, unsigned int size) | 350 | unsigned long addr, unsigned int size) |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index e6718bb28065..b2f7d3e59b86 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -623,7 +623,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, | |||
623 | return ret; | 623 | return ret; |
624 | 624 | ||
625 | if (flags != want_flags) { | 625 | if (flags != want_flags) { |
626 | if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) { | 626 | if (strict_prot || |
627 | !is_new_memtype_allowed(paddr, size, want_flags, flags)) { | ||
627 | free_memtype(paddr, paddr + size); | 628 | free_memtype(paddr, paddr + size); |
628 | printk(KERN_ERR "%s:%d map pfn expected mapping type %s" | 629 | printk(KERN_ERR "%s:%d map pfn expected mapping type %s" |
629 | " for %Lx-%Lx, got %s\n", | 630 | " for %Lx-%Lx, got %s\n", |
@@ -826,7 +827,7 @@ static int memtype_seq_show(struct seq_file *seq, void *v) | |||
826 | return 0; | 827 | return 0; |
827 | } | 828 | } |
828 | 829 | ||
829 | static struct seq_operations memtype_seq_ops = { | 830 | static const struct seq_operations memtype_seq_ops = { |
830 | .start = memtype_seq_start, | 831 | .start = memtype_seq_start, |
831 | .next = memtype_seq_next, | 832 | .next = memtype_seq_next, |
832 | .stop = memtype_seq_stop, | 833 | .stop = memtype_seq_stop, |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 89b9a5cd63da..cb88b1a0bd5f 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -1,11 +1,14 @@ | |||
1 | /** | 1 | /** |
2 | * @file nmi_int.c | 2 | * @file nmi_int.c |
3 | * | 3 | * |
4 | * @remark Copyright 2002-2008 OProfile authors | 4 | * @remark Copyright 2002-2009 OProfile authors |
5 | * @remark Read the file COPYING | 5 | * @remark Read the file COPYING |
6 | * | 6 | * |
7 | * @author John Levon <levon@movementarian.org> | 7 | * @author John Levon <levon@movementarian.org> |
8 | * @author Robert Richter <robert.richter@amd.com> | 8 | * @author Robert Richter <robert.richter@amd.com> |
9 | * @author Barry Kasindorf <barry.kasindorf@amd.com> | ||
10 | * @author Jason Yeh <jason.yeh@amd.com> | ||
11 | * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> | ||
9 | */ | 12 | */ |
10 | 13 | ||
11 | #include <linux/init.h> | 14 | #include <linux/init.h> |
@@ -24,13 +27,35 @@ | |||
24 | #include "op_counter.h" | 27 | #include "op_counter.h" |
25 | #include "op_x86_model.h" | 28 | #include "op_x86_model.h" |
26 | 29 | ||
27 | static struct op_x86_model_spec const *model; | 30 | static struct op_x86_model_spec *model; |
28 | static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); | 31 | static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); |
29 | static DEFINE_PER_CPU(unsigned long, saved_lvtpc); | 32 | static DEFINE_PER_CPU(unsigned long, saved_lvtpc); |
30 | 33 | ||
31 | /* 0 == registered but off, 1 == registered and on */ | 34 | /* 0 == registered but off, 1 == registered and on */ |
32 | static int nmi_enabled = 0; | 35 | static int nmi_enabled = 0; |
33 | 36 | ||
37 | struct op_counter_config counter_config[OP_MAX_COUNTER]; | ||
38 | |||
39 | /* common functions */ | ||
40 | |||
41 | u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, | ||
42 | struct op_counter_config *counter_config) | ||
43 | { | ||
44 | u64 val = 0; | ||
45 | u16 event = (u16)counter_config->event; | ||
46 | |||
47 | val |= ARCH_PERFMON_EVENTSEL_INT; | ||
48 | val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0; | ||
49 | val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0; | ||
50 | val |= (counter_config->unit_mask & 0xFF) << 8; | ||
51 | event &= model->event_mask ? model->event_mask : 0xFF; | ||
52 | val |= event & 0xFF; | ||
53 | val |= (event & 0x0F00) << 24; | ||
54 | |||
55 | return val; | ||
56 | } | ||
57 | |||
58 | |||
34 | static int profile_exceptions_notify(struct notifier_block *self, | 59 | static int profile_exceptions_notify(struct notifier_block *self, |
35 | unsigned long val, void *data) | 60 | unsigned long val, void *data) |
36 | { | 61 | { |
@@ -52,36 +77,214 @@ static int profile_exceptions_notify(struct notifier_block *self, | |||
52 | 77 | ||
53 | static void nmi_cpu_save_registers(struct op_msrs *msrs) | 78 | static void nmi_cpu_save_registers(struct op_msrs *msrs) |
54 | { | 79 | { |
55 | unsigned int const nr_ctrs = model->num_counters; | ||
56 | unsigned int const nr_ctrls = model->num_controls; | ||
57 | struct op_msr *counters = msrs->counters; | 80 | struct op_msr *counters = msrs->counters; |
58 | struct op_msr *controls = msrs->controls; | 81 | struct op_msr *controls = msrs->controls; |
59 | unsigned int i; | 82 | unsigned int i; |
60 | 83 | ||
61 | for (i = 0; i < nr_ctrs; ++i) { | 84 | for (i = 0; i < model->num_counters; ++i) { |
62 | if (counters[i].addr) { | 85 | if (counters[i].addr) |
63 | rdmsr(counters[i].addr, | 86 | rdmsrl(counters[i].addr, counters[i].saved); |
64 | counters[i].saved.low, | 87 | } |
65 | counters[i].saved.high); | 88 | |
66 | } | 89 | for (i = 0; i < model->num_controls; ++i) { |
90 | if (controls[i].addr) | ||
91 | rdmsrl(controls[i].addr, controls[i].saved); | ||
92 | } | ||
93 | } | ||
94 | |||
95 | static void nmi_cpu_start(void *dummy) | ||
96 | { | ||
97 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | ||
98 | model->start(msrs); | ||
99 | } | ||
100 | |||
101 | static int nmi_start(void) | ||
102 | { | ||
103 | on_each_cpu(nmi_cpu_start, NULL, 1); | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static void nmi_cpu_stop(void *dummy) | ||
108 | { | ||
109 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | ||
110 | model->stop(msrs); | ||
111 | } | ||
112 | |||
113 | static void nmi_stop(void) | ||
114 | { | ||
115 | on_each_cpu(nmi_cpu_stop, NULL, 1); | ||
116 | } | ||
117 | |||
118 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
119 | |||
120 | static DEFINE_PER_CPU(int, switch_index); | ||
121 | |||
122 | static inline int has_mux(void) | ||
123 | { | ||
124 | return !!model->switch_ctrl; | ||
125 | } | ||
126 | |||
127 | inline int op_x86_phys_to_virt(int phys) | ||
128 | { | ||
129 | return __get_cpu_var(switch_index) + phys; | ||
130 | } | ||
131 | |||
132 | inline int op_x86_virt_to_phys(int virt) | ||
133 | { | ||
134 | return virt % model->num_counters; | ||
135 | } | ||
136 | |||
137 | static void nmi_shutdown_mux(void) | ||
138 | { | ||
139 | int i; | ||
140 | |||
141 | if (!has_mux()) | ||
142 | return; | ||
143 | |||
144 | for_each_possible_cpu(i) { | ||
145 | kfree(per_cpu(cpu_msrs, i).multiplex); | ||
146 | per_cpu(cpu_msrs, i).multiplex = NULL; | ||
147 | per_cpu(switch_index, i) = 0; | ||
67 | } | 148 | } |
149 | } | ||
150 | |||
151 | static int nmi_setup_mux(void) | ||
152 | { | ||
153 | size_t multiplex_size = | ||
154 | sizeof(struct op_msr) * model->num_virt_counters; | ||
155 | int i; | ||
156 | |||
157 | if (!has_mux()) | ||
158 | return 1; | ||
159 | |||
160 | for_each_possible_cpu(i) { | ||
161 | per_cpu(cpu_msrs, i).multiplex = | ||
162 | kmalloc(multiplex_size, GFP_KERNEL); | ||
163 | if (!per_cpu(cpu_msrs, i).multiplex) | ||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | return 1; | ||
168 | } | ||
169 | |||
170 | static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) | ||
171 | { | ||
172 | int i; | ||
173 | struct op_msr *multiplex = msrs->multiplex; | ||
174 | |||
175 | if (!has_mux()) | ||
176 | return; | ||
68 | 177 | ||
69 | for (i = 0; i < nr_ctrls; ++i) { | 178 | for (i = 0; i < model->num_virt_counters; ++i) { |
70 | if (controls[i].addr) { | 179 | if (counter_config[i].enabled) { |
71 | rdmsr(controls[i].addr, | 180 | multiplex[i].saved = -(u64)counter_config[i].count; |
72 | controls[i].saved.low, | 181 | } else { |
73 | controls[i].saved.high); | 182 | multiplex[i].addr = 0; |
183 | multiplex[i].saved = 0; | ||
74 | } | 184 | } |
75 | } | 185 | } |
186 | |||
187 | per_cpu(switch_index, cpu) = 0; | ||
188 | } | ||
189 | |||
190 | static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs) | ||
191 | { | ||
192 | struct op_msr *multiplex = msrs->multiplex; | ||
193 | int i; | ||
194 | |||
195 | for (i = 0; i < model->num_counters; ++i) { | ||
196 | int virt = op_x86_phys_to_virt(i); | ||
197 | if (multiplex[virt].addr) | ||
198 | rdmsrl(multiplex[virt].addr, multiplex[virt].saved); | ||
199 | } | ||
200 | } | ||
201 | |||
202 | static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs) | ||
203 | { | ||
204 | struct op_msr *multiplex = msrs->multiplex; | ||
205 | int i; | ||
206 | |||
207 | for (i = 0; i < model->num_counters; ++i) { | ||
208 | int virt = op_x86_phys_to_virt(i); | ||
209 | if (multiplex[virt].addr) | ||
210 | wrmsrl(multiplex[virt].addr, multiplex[virt].saved); | ||
211 | } | ||
76 | } | 212 | } |
77 | 213 | ||
78 | static void nmi_save_registers(void *dummy) | 214 | static void nmi_cpu_switch(void *dummy) |
79 | { | 215 | { |
80 | int cpu = smp_processor_id(); | 216 | int cpu = smp_processor_id(); |
217 | int si = per_cpu(switch_index, cpu); | ||
81 | struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); | 218 | struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); |
82 | nmi_cpu_save_registers(msrs); | 219 | |
220 | nmi_cpu_stop(NULL); | ||
221 | nmi_cpu_save_mpx_registers(msrs); | ||
222 | |||
223 | /* move to next set */ | ||
224 | si += model->num_counters; | ||
225 | if ((si > model->num_virt_counters) || (counter_config[si].count == 0)) | ||
226 | per_cpu(switch_index, cpu) = 0; | ||
227 | else | ||
228 | per_cpu(switch_index, cpu) = si; | ||
229 | |||
230 | model->switch_ctrl(model, msrs); | ||
231 | nmi_cpu_restore_mpx_registers(msrs); | ||
232 | |||
233 | nmi_cpu_start(NULL); | ||
234 | } | ||
235 | |||
236 | |||
237 | /* | ||
238 | * Quick check to see if multiplexing is necessary. | ||
239 | * The check should be sufficient since counters are used | ||
240 | * in ordre. | ||
241 | */ | ||
242 | static int nmi_multiplex_on(void) | ||
243 | { | ||
244 | return counter_config[model->num_counters].count ? 0 : -EINVAL; | ||
245 | } | ||
246 | |||
247 | static int nmi_switch_event(void) | ||
248 | { | ||
249 | if (!has_mux()) | ||
250 | return -ENOSYS; /* not implemented */ | ||
251 | if (nmi_multiplex_on() < 0) | ||
252 | return -EINVAL; /* not necessary */ | ||
253 | |||
254 | on_each_cpu(nmi_cpu_switch, NULL, 1); | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static inline void mux_init(struct oprofile_operations *ops) | ||
260 | { | ||
261 | if (has_mux()) | ||
262 | ops->switch_events = nmi_switch_event; | ||
263 | } | ||
264 | |||
265 | static void mux_clone(int cpu) | ||
266 | { | ||
267 | if (!has_mux()) | ||
268 | return; | ||
269 | |||
270 | memcpy(per_cpu(cpu_msrs, cpu).multiplex, | ||
271 | per_cpu(cpu_msrs, 0).multiplex, | ||
272 | sizeof(struct op_msr) * model->num_virt_counters); | ||
83 | } | 273 | } |
84 | 274 | ||
275 | #else | ||
276 | |||
277 | inline int op_x86_phys_to_virt(int phys) { return phys; } | ||
278 | inline int op_x86_virt_to_phys(int virt) { return virt; } | ||
279 | static inline void nmi_shutdown_mux(void) { } | ||
280 | static inline int nmi_setup_mux(void) { return 1; } | ||
281 | static inline void | ||
282 | nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { } | ||
283 | static inline void mux_init(struct oprofile_operations *ops) { } | ||
284 | static void mux_clone(int cpu) { } | ||
285 | |||
286 | #endif | ||
287 | |||
85 | static void free_msrs(void) | 288 | static void free_msrs(void) |
86 | { | 289 | { |
87 | int i; | 290 | int i; |
@@ -95,38 +298,32 @@ static void free_msrs(void) | |||
95 | 298 | ||
96 | static int allocate_msrs(void) | 299 | static int allocate_msrs(void) |
97 | { | 300 | { |
98 | int success = 1; | ||
99 | size_t controls_size = sizeof(struct op_msr) * model->num_controls; | 301 | size_t controls_size = sizeof(struct op_msr) * model->num_controls; |
100 | size_t counters_size = sizeof(struct op_msr) * model->num_counters; | 302 | size_t counters_size = sizeof(struct op_msr) * model->num_counters; |
101 | 303 | ||
102 | int i; | 304 | int i; |
103 | for_each_possible_cpu(i) { | 305 | for_each_possible_cpu(i) { |
104 | per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, | 306 | per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, |
105 | GFP_KERNEL); | 307 | GFP_KERNEL); |
106 | if (!per_cpu(cpu_msrs, i).counters) { | 308 | if (!per_cpu(cpu_msrs, i).counters) |
107 | success = 0; | 309 | return 0; |
108 | break; | ||
109 | } | ||
110 | per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, | 310 | per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, |
111 | GFP_KERNEL); | 311 | GFP_KERNEL); |
112 | if (!per_cpu(cpu_msrs, i).controls) { | 312 | if (!per_cpu(cpu_msrs, i).controls) |
113 | success = 0; | 313 | return 0; |
114 | break; | ||
115 | } | ||
116 | } | 314 | } |
117 | 315 | ||
118 | if (!success) | 316 | return 1; |
119 | free_msrs(); | ||
120 | |||
121 | return success; | ||
122 | } | 317 | } |
123 | 318 | ||
124 | static void nmi_cpu_setup(void *dummy) | 319 | static void nmi_cpu_setup(void *dummy) |
125 | { | 320 | { |
126 | int cpu = smp_processor_id(); | 321 | int cpu = smp_processor_id(); |
127 | struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); | 322 | struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); |
323 | nmi_cpu_save_registers(msrs); | ||
128 | spin_lock(&oprofilefs_lock); | 324 | spin_lock(&oprofilefs_lock); |
129 | model->setup_ctrs(msrs); | 325 | model->setup_ctrs(model, msrs); |
326 | nmi_cpu_setup_mux(cpu, msrs); | ||
130 | spin_unlock(&oprofilefs_lock); | 327 | spin_unlock(&oprofilefs_lock); |
131 | per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC); | 328 | per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC); |
132 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 329 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
@@ -144,11 +341,15 @@ static int nmi_setup(void) | |||
144 | int cpu; | 341 | int cpu; |
145 | 342 | ||
146 | if (!allocate_msrs()) | 343 | if (!allocate_msrs()) |
147 | return -ENOMEM; | 344 | err = -ENOMEM; |
345 | else if (!nmi_setup_mux()) | ||
346 | err = -ENOMEM; | ||
347 | else | ||
348 | err = register_die_notifier(&profile_exceptions_nb); | ||
148 | 349 | ||
149 | err = register_die_notifier(&profile_exceptions_nb); | ||
150 | if (err) { | 350 | if (err) { |
151 | free_msrs(); | 351 | free_msrs(); |
352 | nmi_shutdown_mux(); | ||
152 | return err; | 353 | return err; |
153 | } | 354 | } |
154 | 355 | ||
@@ -159,45 +360,38 @@ static int nmi_setup(void) | |||
159 | /* Assume saved/restored counters are the same on all CPUs */ | 360 | /* Assume saved/restored counters are the same on all CPUs */ |
160 | model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); | 361 | model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); |
161 | for_each_possible_cpu(cpu) { | 362 | for_each_possible_cpu(cpu) { |
162 | if (cpu != 0) { | 363 | if (!cpu) |
163 | memcpy(per_cpu(cpu_msrs, cpu).counters, | 364 | continue; |
164 | per_cpu(cpu_msrs, 0).counters, | 365 | |
165 | sizeof(struct op_msr) * model->num_counters); | 366 | memcpy(per_cpu(cpu_msrs, cpu).counters, |
166 | 367 | per_cpu(cpu_msrs, 0).counters, | |
167 | memcpy(per_cpu(cpu_msrs, cpu).controls, | 368 | sizeof(struct op_msr) * model->num_counters); |
168 | per_cpu(cpu_msrs, 0).controls, | 369 | |
169 | sizeof(struct op_msr) * model->num_controls); | 370 | memcpy(per_cpu(cpu_msrs, cpu).controls, |
170 | } | 371 | per_cpu(cpu_msrs, 0).controls, |
372 | sizeof(struct op_msr) * model->num_controls); | ||
171 | 373 | ||
374 | mux_clone(cpu); | ||
172 | } | 375 | } |
173 | on_each_cpu(nmi_save_registers, NULL, 1); | ||
174 | on_each_cpu(nmi_cpu_setup, NULL, 1); | 376 | on_each_cpu(nmi_cpu_setup, NULL, 1); |
175 | nmi_enabled = 1; | 377 | nmi_enabled = 1; |
176 | return 0; | 378 | return 0; |
177 | } | 379 | } |
178 | 380 | ||
179 | static void nmi_restore_registers(struct op_msrs *msrs) | 381 | static void nmi_cpu_restore_registers(struct op_msrs *msrs) |
180 | { | 382 | { |
181 | unsigned int const nr_ctrs = model->num_counters; | ||
182 | unsigned int const nr_ctrls = model->num_controls; | ||
183 | struct op_msr *counters = msrs->counters; | 383 | struct op_msr *counters = msrs->counters; |
184 | struct op_msr *controls = msrs->controls; | 384 | struct op_msr *controls = msrs->controls; |
185 | unsigned int i; | 385 | unsigned int i; |
186 | 386 | ||
187 | for (i = 0; i < nr_ctrls; ++i) { | 387 | for (i = 0; i < model->num_controls; ++i) { |
188 | if (controls[i].addr) { | 388 | if (controls[i].addr) |
189 | wrmsr(controls[i].addr, | 389 | wrmsrl(controls[i].addr, controls[i].saved); |
190 | controls[i].saved.low, | ||
191 | controls[i].saved.high); | ||
192 | } | ||
193 | } | 390 | } |
194 | 391 | ||
195 | for (i = 0; i < nr_ctrs; ++i) { | 392 | for (i = 0; i < model->num_counters; ++i) { |
196 | if (counters[i].addr) { | 393 | if (counters[i].addr) |
197 | wrmsr(counters[i].addr, | 394 | wrmsrl(counters[i].addr, counters[i].saved); |
198 | counters[i].saved.low, | ||
199 | counters[i].saved.high); | ||
200 | } | ||
201 | } | 395 | } |
202 | } | 396 | } |
203 | 397 | ||
@@ -205,7 +399,7 @@ static void nmi_cpu_shutdown(void *dummy) | |||
205 | { | 399 | { |
206 | unsigned int v; | 400 | unsigned int v; |
207 | int cpu = smp_processor_id(); | 401 | int cpu = smp_processor_id(); |
208 | struct op_msrs *msrs = &__get_cpu_var(cpu_msrs); | 402 | struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); |
209 | 403 | ||
210 | /* restoring APIC_LVTPC can trigger an apic error because the delivery | 404 | /* restoring APIC_LVTPC can trigger an apic error because the delivery |
211 | * mode and vector nr combination can be illegal. That's by design: on | 405 | * mode and vector nr combination can be illegal. That's by design: on |
@@ -216,7 +410,7 @@ static void nmi_cpu_shutdown(void *dummy) | |||
216 | apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); | 410 | apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); |
217 | apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); | 411 | apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); |
218 | apic_write(APIC_LVTERR, v); | 412 | apic_write(APIC_LVTERR, v); |
219 | nmi_restore_registers(msrs); | 413 | nmi_cpu_restore_registers(msrs); |
220 | } | 414 | } |
221 | 415 | ||
222 | static void nmi_shutdown(void) | 416 | static void nmi_shutdown(void) |
@@ -226,42 +420,18 @@ static void nmi_shutdown(void) | |||
226 | nmi_enabled = 0; | 420 | nmi_enabled = 0; |
227 | on_each_cpu(nmi_cpu_shutdown, NULL, 1); | 421 | on_each_cpu(nmi_cpu_shutdown, NULL, 1); |
228 | unregister_die_notifier(&profile_exceptions_nb); | 422 | unregister_die_notifier(&profile_exceptions_nb); |
423 | nmi_shutdown_mux(); | ||
229 | msrs = &get_cpu_var(cpu_msrs); | 424 | msrs = &get_cpu_var(cpu_msrs); |
230 | model->shutdown(msrs); | 425 | model->shutdown(msrs); |
231 | free_msrs(); | 426 | free_msrs(); |
232 | put_cpu_var(cpu_msrs); | 427 | put_cpu_var(cpu_msrs); |
233 | } | 428 | } |
234 | 429 | ||
235 | static void nmi_cpu_start(void *dummy) | ||
236 | { | ||
237 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | ||
238 | model->start(msrs); | ||
239 | } | ||
240 | |||
241 | static int nmi_start(void) | ||
242 | { | ||
243 | on_each_cpu(nmi_cpu_start, NULL, 1); | ||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | static void nmi_cpu_stop(void *dummy) | ||
248 | { | ||
249 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | ||
250 | model->stop(msrs); | ||
251 | } | ||
252 | |||
253 | static void nmi_stop(void) | ||
254 | { | ||
255 | on_each_cpu(nmi_cpu_stop, NULL, 1); | ||
256 | } | ||
257 | |||
258 | struct op_counter_config counter_config[OP_MAX_COUNTER]; | ||
259 | |||
260 | static int nmi_create_files(struct super_block *sb, struct dentry *root) | 430 | static int nmi_create_files(struct super_block *sb, struct dentry *root) |
261 | { | 431 | { |
262 | unsigned int i; | 432 | unsigned int i; |
263 | 433 | ||
264 | for (i = 0; i < model->num_counters; ++i) { | 434 | for (i = 0; i < model->num_virt_counters; ++i) { |
265 | struct dentry *dir; | 435 | struct dentry *dir; |
266 | char buf[4]; | 436 | char buf[4]; |
267 | 437 | ||
@@ -270,7 +440,7 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root) | |||
270 | * NOTE: assumes 1:1 mapping here (that counters are organized | 440 | * NOTE: assumes 1:1 mapping here (that counters are organized |
271 | * sequentially in their struct assignment). | 441 | * sequentially in their struct assignment). |
272 | */ | 442 | */ |
273 | if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i))) | 443 | if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i))) |
274 | continue; | 444 | continue; |
275 | 445 | ||
276 | snprintf(buf, sizeof(buf), "%d", i); | 446 | snprintf(buf, sizeof(buf), "%d", i); |
@@ -402,6 +572,7 @@ module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0); | |||
402 | static int __init ppro_init(char **cpu_type) | 572 | static int __init ppro_init(char **cpu_type) |
403 | { | 573 | { |
404 | __u8 cpu_model = boot_cpu_data.x86_model; | 574 | __u8 cpu_model = boot_cpu_data.x86_model; |
575 | struct op_x86_model_spec *spec = &op_ppro_spec; /* default */ | ||
405 | 576 | ||
406 | if (force_arch_perfmon && cpu_has_arch_perfmon) | 577 | if (force_arch_perfmon && cpu_has_arch_perfmon) |
407 | return 0; | 578 | return 0; |
@@ -428,7 +599,7 @@ static int __init ppro_init(char **cpu_type) | |||
428 | *cpu_type = "i386/core_2"; | 599 | *cpu_type = "i386/core_2"; |
429 | break; | 600 | break; |
430 | case 26: | 601 | case 26: |
431 | arch_perfmon_setup_counters(); | 602 | spec = &op_arch_perfmon_spec; |
432 | *cpu_type = "i386/core_i7"; | 603 | *cpu_type = "i386/core_i7"; |
433 | break; | 604 | break; |
434 | case 28: | 605 | case 28: |
@@ -439,17 +610,7 @@ static int __init ppro_init(char **cpu_type) | |||
439 | return 0; | 610 | return 0; |
440 | } | 611 | } |
441 | 612 | ||
442 | model = &op_ppro_spec; | 613 | model = spec; |
443 | return 1; | ||
444 | } | ||
445 | |||
446 | static int __init arch_perfmon_init(char **cpu_type) | ||
447 | { | ||
448 | if (!cpu_has_arch_perfmon) | ||
449 | return 0; | ||
450 | *cpu_type = "i386/arch_perfmon"; | ||
451 | model = &op_arch_perfmon_spec; | ||
452 | arch_perfmon_setup_counters(); | ||
453 | return 1; | 614 | return 1; |
454 | } | 615 | } |
455 | 616 | ||
@@ -471,27 +632,26 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
471 | /* Needs to be at least an Athlon (or hammer in 32bit mode) */ | 632 | /* Needs to be at least an Athlon (or hammer in 32bit mode) */ |
472 | 633 | ||
473 | switch (family) { | 634 | switch (family) { |
474 | default: | ||
475 | return -ENODEV; | ||
476 | case 6: | 635 | case 6: |
477 | model = &op_amd_spec; | ||
478 | cpu_type = "i386/athlon"; | 636 | cpu_type = "i386/athlon"; |
479 | break; | 637 | break; |
480 | case 0xf: | 638 | case 0xf: |
481 | model = &op_amd_spec; | 639 | /* |
482 | /* Actually it could be i386/hammer too, but give | 640 | * Actually it could be i386/hammer too, but |
483 | user space an consistent name. */ | 641 | * give user space an consistent name. |
642 | */ | ||
484 | cpu_type = "x86-64/hammer"; | 643 | cpu_type = "x86-64/hammer"; |
485 | break; | 644 | break; |
486 | case 0x10: | 645 | case 0x10: |
487 | model = &op_amd_spec; | ||
488 | cpu_type = "x86-64/family10"; | 646 | cpu_type = "x86-64/family10"; |
489 | break; | 647 | break; |
490 | case 0x11: | 648 | case 0x11: |
491 | model = &op_amd_spec; | ||
492 | cpu_type = "x86-64/family11h"; | 649 | cpu_type = "x86-64/family11h"; |
493 | break; | 650 | break; |
651 | default: | ||
652 | return -ENODEV; | ||
494 | } | 653 | } |
654 | model = &op_amd_spec; | ||
495 | break; | 655 | break; |
496 | 656 | ||
497 | case X86_VENDOR_INTEL: | 657 | case X86_VENDOR_INTEL: |
@@ -510,8 +670,15 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
510 | break; | 670 | break; |
511 | } | 671 | } |
512 | 672 | ||
513 | if (!cpu_type && !arch_perfmon_init(&cpu_type)) | 673 | if (cpu_type) |
674 | break; | ||
675 | |||
676 | if (!cpu_has_arch_perfmon) | ||
514 | return -ENODEV; | 677 | return -ENODEV; |
678 | |||
679 | /* use arch perfmon as fallback */ | ||
680 | cpu_type = "i386/arch_perfmon"; | ||
681 | model = &op_arch_perfmon_spec; | ||
515 | break; | 682 | break; |
516 | 683 | ||
517 | default: | 684 | default: |
@@ -522,18 +689,23 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
522 | register_cpu_notifier(&oprofile_cpu_nb); | 689 | register_cpu_notifier(&oprofile_cpu_nb); |
523 | #endif | 690 | #endif |
524 | /* default values, can be overwritten by model */ | 691 | /* default values, can be overwritten by model */ |
525 | ops->create_files = nmi_create_files; | 692 | ops->create_files = nmi_create_files; |
526 | ops->setup = nmi_setup; | 693 | ops->setup = nmi_setup; |
527 | ops->shutdown = nmi_shutdown; | 694 | ops->shutdown = nmi_shutdown; |
528 | ops->start = nmi_start; | 695 | ops->start = nmi_start; |
529 | ops->stop = nmi_stop; | 696 | ops->stop = nmi_stop; |
530 | ops->cpu_type = cpu_type; | 697 | ops->cpu_type = cpu_type; |
531 | 698 | ||
532 | if (model->init) | 699 | if (model->init) |
533 | ret = model->init(ops); | 700 | ret = model->init(ops); |
534 | if (ret) | 701 | if (ret) |
535 | return ret; | 702 | return ret; |
536 | 703 | ||
704 | if (!model->num_virt_counters) | ||
705 | model->num_virt_counters = model->num_counters; | ||
706 | |||
707 | mux_init(ops); | ||
708 | |||
537 | init_sysfs(); | 709 | init_sysfs(); |
538 | using_nmi = 1; | 710 | using_nmi = 1; |
539 | printk(KERN_INFO "oprofile: using NMI interrupt.\n"); | 711 | printk(KERN_INFO "oprofile: using NMI interrupt.\n"); |
diff --git a/arch/x86/oprofile/op_counter.h b/arch/x86/oprofile/op_counter.h index 91b6a116165e..e28398df0df2 100644 --- a/arch/x86/oprofile/op_counter.h +++ b/arch/x86/oprofile/op_counter.h | |||
@@ -10,7 +10,7 @@ | |||
10 | #ifndef OP_COUNTER_H | 10 | #ifndef OP_COUNTER_H |
11 | #define OP_COUNTER_H | 11 | #define OP_COUNTER_H |
12 | 12 | ||
13 | #define OP_MAX_COUNTER 8 | 13 | #define OP_MAX_COUNTER 32 |
14 | 14 | ||
15 | /* Per-perfctr configuration as set via | 15 | /* Per-perfctr configuration as set via |
16 | * oprofilefs. | 16 | * oprofilefs. |
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 8fdf06e4edf9..39686c29f03a 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c | |||
@@ -9,12 +9,15 @@ | |||
9 | * @author Philippe Elie | 9 | * @author Philippe Elie |
10 | * @author Graydon Hoare | 10 | * @author Graydon Hoare |
11 | * @author Robert Richter <robert.richter@amd.com> | 11 | * @author Robert Richter <robert.richter@amd.com> |
12 | * @author Barry Kasindorf | 12 | * @author Barry Kasindorf <barry.kasindorf@amd.com> |
13 | * @author Jason Yeh <jason.yeh@amd.com> | ||
14 | * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> | ||
13 | */ | 15 | */ |
14 | 16 | ||
15 | #include <linux/oprofile.h> | 17 | #include <linux/oprofile.h> |
16 | #include <linux/device.h> | 18 | #include <linux/device.h> |
17 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
20 | #include <linux/percpu.h> | ||
18 | 21 | ||
19 | #include <asm/ptrace.h> | 22 | #include <asm/ptrace.h> |
20 | #include <asm/msr.h> | 23 | #include <asm/msr.h> |
@@ -25,43 +28,36 @@ | |||
25 | 28 | ||
26 | #define NUM_COUNTERS 4 | 29 | #define NUM_COUNTERS 4 |
27 | #define NUM_CONTROLS 4 | 30 | #define NUM_CONTROLS 4 |
31 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
32 | #define NUM_VIRT_COUNTERS 32 | ||
33 | #define NUM_VIRT_CONTROLS 32 | ||
34 | #else | ||
35 | #define NUM_VIRT_COUNTERS NUM_COUNTERS | ||
36 | #define NUM_VIRT_CONTROLS NUM_CONTROLS | ||
37 | #endif | ||
38 | |||
39 | #define OP_EVENT_MASK 0x0FFF | ||
40 | #define OP_CTR_OVERFLOW (1ULL<<31) | ||
28 | 41 | ||
29 | #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) | 42 | #define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21)) |
30 | #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) | 43 | |
31 | #define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0) | 44 | static unsigned long reset_value[NUM_VIRT_COUNTERS]; |
32 | #define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) | ||
33 | |||
34 | #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) | ||
35 | #define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0) | ||
36 | #define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0) | ||
37 | #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) | ||
38 | #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) | ||
39 | #define CTRL_CLEAR_LO(x) (x &= (1<<21)) | ||
40 | #define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0) | ||
41 | #define CTRL_SET_ENABLE(val) (val |= 1<<20) | ||
42 | #define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16)) | ||
43 | #define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17)) | ||
44 | #define CTRL_SET_UM(val, m) (val |= (m << 8)) | ||
45 | #define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff)) | ||
46 | #define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf)) | ||
47 | #define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9)) | ||
48 | #define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8)) | ||
49 | |||
50 | static unsigned long reset_value[NUM_COUNTERS]; | ||
51 | 45 | ||
52 | #ifdef CONFIG_OPROFILE_IBS | 46 | #ifdef CONFIG_OPROFILE_IBS |
53 | 47 | ||
54 | /* IbsFetchCtl bits/masks */ | 48 | /* IbsFetchCtl bits/masks */ |
55 | #define IBS_FETCH_HIGH_VALID_BIT (1UL << 17) /* bit 49 */ | 49 | #define IBS_FETCH_RAND_EN (1ULL<<57) |
56 | #define IBS_FETCH_HIGH_ENABLE (1UL << 16) /* bit 48 */ | 50 | #define IBS_FETCH_VAL (1ULL<<49) |
57 | #define IBS_FETCH_LOW_MAX_CNT_MASK 0x0000FFFFUL /* MaxCnt mask */ | 51 | #define IBS_FETCH_ENABLE (1ULL<<48) |
52 | #define IBS_FETCH_CNT_MASK 0xFFFF0000ULL | ||
58 | 53 | ||
59 | /*IbsOpCtl bits */ | 54 | /*IbsOpCtl bits */ |
60 | #define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */ | 55 | #define IBS_OP_CNT_CTL (1ULL<<19) |
61 | #define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */ | 56 | #define IBS_OP_VAL (1ULL<<18) |
57 | #define IBS_OP_ENABLE (1ULL<<17) | ||
62 | 58 | ||
63 | #define IBS_FETCH_SIZE 6 | 59 | #define IBS_FETCH_SIZE 6 |
64 | #define IBS_OP_SIZE 12 | 60 | #define IBS_OP_SIZE 12 |
65 | 61 | ||
66 | static int has_ibs; /* AMD Family10h and later */ | 62 | static int has_ibs; /* AMD Family10h and later */ |
67 | 63 | ||
@@ -78,6 +74,45 @@ static struct op_ibs_config ibs_config; | |||
78 | 74 | ||
79 | #endif | 75 | #endif |
80 | 76 | ||
77 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
78 | |||
79 | static void op_mux_fill_in_addresses(struct op_msrs * const msrs) | ||
80 | { | ||
81 | int i; | ||
82 | |||
83 | for (i = 0; i < NUM_VIRT_COUNTERS; i++) { | ||
84 | int hw_counter = op_x86_virt_to_phys(i); | ||
85 | if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) | ||
86 | msrs->multiplex[i].addr = MSR_K7_PERFCTR0 + hw_counter; | ||
87 | else | ||
88 | msrs->multiplex[i].addr = 0; | ||
89 | } | ||
90 | } | ||
91 | |||
92 | static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, | ||
93 | struct op_msrs const * const msrs) | ||
94 | { | ||
95 | u64 val; | ||
96 | int i; | ||
97 | |||
98 | /* enable active counters */ | ||
99 | for (i = 0; i < NUM_COUNTERS; ++i) { | ||
100 | int virt = op_x86_phys_to_virt(i); | ||
101 | if (!counter_config[virt].enabled) | ||
102 | continue; | ||
103 | rdmsrl(msrs->controls[i].addr, val); | ||
104 | val &= model->reserved; | ||
105 | val |= op_x86_get_ctrl(model, &counter_config[virt]); | ||
106 | wrmsrl(msrs->controls[i].addr, val); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | #else | ||
111 | |||
112 | static inline void op_mux_fill_in_addresses(struct op_msrs * const msrs) { } | ||
113 | |||
114 | #endif | ||
115 | |||
81 | /* functions for op_amd_spec */ | 116 | /* functions for op_amd_spec */ |
82 | 117 | ||
83 | static void op_amd_fill_in_addresses(struct op_msrs * const msrs) | 118 | static void op_amd_fill_in_addresses(struct op_msrs * const msrs) |
@@ -97,150 +132,174 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs) | |||
97 | else | 132 | else |
98 | msrs->controls[i].addr = 0; | 133 | msrs->controls[i].addr = 0; |
99 | } | 134 | } |
100 | } | ||
101 | 135 | ||
136 | op_mux_fill_in_addresses(msrs); | ||
137 | } | ||
102 | 138 | ||
103 | static void op_amd_setup_ctrs(struct op_msrs const * const msrs) | 139 | static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, |
140 | struct op_msrs const * const msrs) | ||
104 | { | 141 | { |
105 | unsigned int low, high; | 142 | u64 val; |
106 | int i; | 143 | int i; |
107 | 144 | ||
145 | /* setup reset_value */ | ||
146 | for (i = 0; i < NUM_VIRT_COUNTERS; ++i) { | ||
147 | if (counter_config[i].enabled) | ||
148 | reset_value[i] = counter_config[i].count; | ||
149 | else | ||
150 | reset_value[i] = 0; | ||
151 | } | ||
152 | |||
108 | /* clear all counters */ | 153 | /* clear all counters */ |
109 | for (i = 0 ; i < NUM_CONTROLS; ++i) { | 154 | for (i = 0; i < NUM_CONTROLS; ++i) { |
110 | if (unlikely(!CTRL_IS_RESERVED(msrs, i))) | 155 | if (unlikely(!msrs->controls[i].addr)) |
111 | continue; | 156 | continue; |
112 | CTRL_READ(low, high, msrs, i); | 157 | rdmsrl(msrs->controls[i].addr, val); |
113 | CTRL_CLEAR_LO(low); | 158 | val &= model->reserved; |
114 | CTRL_CLEAR_HI(high); | 159 | wrmsrl(msrs->controls[i].addr, val); |
115 | CTRL_WRITE(low, high, msrs, i); | ||
116 | } | 160 | } |
117 | 161 | ||
118 | /* avoid a false detection of ctr overflows in NMI handler */ | 162 | /* avoid a false detection of ctr overflows in NMI handler */ |
119 | for (i = 0; i < NUM_COUNTERS; ++i) { | 163 | for (i = 0; i < NUM_COUNTERS; ++i) { |
120 | if (unlikely(!CTR_IS_RESERVED(msrs, i))) | 164 | if (unlikely(!msrs->counters[i].addr)) |
121 | continue; | 165 | continue; |
122 | CTR_WRITE(1, msrs, i); | 166 | wrmsrl(msrs->counters[i].addr, -1LL); |
123 | } | 167 | } |
124 | 168 | ||
125 | /* enable active counters */ | 169 | /* enable active counters */ |
126 | for (i = 0; i < NUM_COUNTERS; ++i) { | 170 | for (i = 0; i < NUM_COUNTERS; ++i) { |
127 | if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { | 171 | int virt = op_x86_phys_to_virt(i); |
128 | reset_value[i] = counter_config[i].count; | 172 | if (!counter_config[virt].enabled) |
173 | continue; | ||
174 | if (!msrs->counters[i].addr) | ||
175 | continue; | ||
129 | 176 | ||
130 | CTR_WRITE(counter_config[i].count, msrs, i); | 177 | /* setup counter registers */ |
131 | 178 | wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]); | |
132 | CTRL_READ(low, high, msrs, i); | 179 | |
133 | CTRL_CLEAR_LO(low); | 180 | /* setup control registers */ |
134 | CTRL_CLEAR_HI(high); | 181 | rdmsrl(msrs->controls[i].addr, val); |
135 | CTRL_SET_ENABLE(low); | 182 | val &= model->reserved; |
136 | CTRL_SET_USR(low, counter_config[i].user); | 183 | val |= op_x86_get_ctrl(model, &counter_config[virt]); |
137 | CTRL_SET_KERN(low, counter_config[i].kernel); | 184 | wrmsrl(msrs->controls[i].addr, val); |
138 | CTRL_SET_UM(low, counter_config[i].unit_mask); | ||
139 | CTRL_SET_EVENT_LOW(low, counter_config[i].event); | ||
140 | CTRL_SET_EVENT_HIGH(high, counter_config[i].event); | ||
141 | CTRL_SET_HOST_ONLY(high, 0); | ||
142 | CTRL_SET_GUEST_ONLY(high, 0); | ||
143 | |||
144 | CTRL_WRITE(low, high, msrs, i); | ||
145 | } else { | ||
146 | reset_value[i] = 0; | ||
147 | } | ||
148 | } | 185 | } |
149 | } | 186 | } |
150 | 187 | ||
151 | #ifdef CONFIG_OPROFILE_IBS | 188 | #ifdef CONFIG_OPROFILE_IBS |
152 | 189 | ||
153 | static inline int | 190 | static inline void |
154 | op_amd_handle_ibs(struct pt_regs * const regs, | 191 | op_amd_handle_ibs(struct pt_regs * const regs, |
155 | struct op_msrs const * const msrs) | 192 | struct op_msrs const * const msrs) |
156 | { | 193 | { |
157 | u32 low, high; | 194 | u64 val, ctl; |
158 | u64 msr; | ||
159 | struct op_entry entry; | 195 | struct op_entry entry; |
160 | 196 | ||
161 | if (!has_ibs) | 197 | if (!has_ibs) |
162 | return 1; | 198 | return; |
163 | 199 | ||
164 | if (ibs_config.fetch_enabled) { | 200 | if (ibs_config.fetch_enabled) { |
165 | rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); | 201 | rdmsrl(MSR_AMD64_IBSFETCHCTL, ctl); |
166 | if (high & IBS_FETCH_HIGH_VALID_BIT) { | 202 | if (ctl & IBS_FETCH_VAL) { |
167 | rdmsrl(MSR_AMD64_IBSFETCHLINAD, msr); | 203 | rdmsrl(MSR_AMD64_IBSFETCHLINAD, val); |
168 | oprofile_write_reserve(&entry, regs, msr, | 204 | oprofile_write_reserve(&entry, regs, val, |
169 | IBS_FETCH_CODE, IBS_FETCH_SIZE); | 205 | IBS_FETCH_CODE, IBS_FETCH_SIZE); |
170 | oprofile_add_data(&entry, (u32)msr); | 206 | oprofile_add_data64(&entry, val); |
171 | oprofile_add_data(&entry, (u32)(msr >> 32)); | 207 | oprofile_add_data64(&entry, ctl); |
172 | oprofile_add_data(&entry, low); | 208 | rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, val); |
173 | oprofile_add_data(&entry, high); | 209 | oprofile_add_data64(&entry, val); |
174 | rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, msr); | ||
175 | oprofile_add_data(&entry, (u32)msr); | ||
176 | oprofile_add_data(&entry, (u32)(msr >> 32)); | ||
177 | oprofile_write_commit(&entry); | 210 | oprofile_write_commit(&entry); |
178 | 211 | ||
179 | /* reenable the IRQ */ | 212 | /* reenable the IRQ */ |
180 | high &= ~IBS_FETCH_HIGH_VALID_BIT; | 213 | ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT_MASK); |
181 | high |= IBS_FETCH_HIGH_ENABLE; | 214 | ctl |= IBS_FETCH_ENABLE; |
182 | low &= IBS_FETCH_LOW_MAX_CNT_MASK; | 215 | wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl); |
183 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); | ||
184 | } | 216 | } |
185 | } | 217 | } |
186 | 218 | ||
187 | if (ibs_config.op_enabled) { | 219 | if (ibs_config.op_enabled) { |
188 | rdmsr(MSR_AMD64_IBSOPCTL, low, high); | 220 | rdmsrl(MSR_AMD64_IBSOPCTL, ctl); |
189 | if (low & IBS_OP_LOW_VALID_BIT) { | 221 | if (ctl & IBS_OP_VAL) { |
190 | rdmsrl(MSR_AMD64_IBSOPRIP, msr); | 222 | rdmsrl(MSR_AMD64_IBSOPRIP, val); |
191 | oprofile_write_reserve(&entry, regs, msr, | 223 | oprofile_write_reserve(&entry, regs, val, |
192 | IBS_OP_CODE, IBS_OP_SIZE); | 224 | IBS_OP_CODE, IBS_OP_SIZE); |
193 | oprofile_add_data(&entry, (u32)msr); | 225 | oprofile_add_data64(&entry, val); |
194 | oprofile_add_data(&entry, (u32)(msr >> 32)); | 226 | rdmsrl(MSR_AMD64_IBSOPDATA, val); |
195 | rdmsrl(MSR_AMD64_IBSOPDATA, msr); | 227 | oprofile_add_data64(&entry, val); |
196 | oprofile_add_data(&entry, (u32)msr); | 228 | rdmsrl(MSR_AMD64_IBSOPDATA2, val); |
197 | oprofile_add_data(&entry, (u32)(msr >> 32)); | 229 | oprofile_add_data64(&entry, val); |
198 | rdmsrl(MSR_AMD64_IBSOPDATA2, msr); | 230 | rdmsrl(MSR_AMD64_IBSOPDATA3, val); |
199 | oprofile_add_data(&entry, (u32)msr); | 231 | oprofile_add_data64(&entry, val); |
200 | oprofile_add_data(&entry, (u32)(msr >> 32)); | 232 | rdmsrl(MSR_AMD64_IBSDCLINAD, val); |
201 | rdmsrl(MSR_AMD64_IBSOPDATA3, msr); | 233 | oprofile_add_data64(&entry, val); |
202 | oprofile_add_data(&entry, (u32)msr); | 234 | rdmsrl(MSR_AMD64_IBSDCPHYSAD, val); |
203 | oprofile_add_data(&entry, (u32)(msr >> 32)); | 235 | oprofile_add_data64(&entry, val); |
204 | rdmsrl(MSR_AMD64_IBSDCLINAD, msr); | ||
205 | oprofile_add_data(&entry, (u32)msr); | ||
206 | oprofile_add_data(&entry, (u32)(msr >> 32)); | ||
207 | rdmsrl(MSR_AMD64_IBSDCPHYSAD, msr); | ||
208 | oprofile_add_data(&entry, (u32)msr); | ||
209 | oprofile_add_data(&entry, (u32)(msr >> 32)); | ||
210 | oprofile_write_commit(&entry); | 236 | oprofile_write_commit(&entry); |
211 | 237 | ||
212 | /* reenable the IRQ */ | 238 | /* reenable the IRQ */ |
213 | high = 0; | 239 | ctl &= ~IBS_OP_VAL & 0xFFFFFFFF; |
214 | low &= ~IBS_OP_LOW_VALID_BIT; | 240 | ctl |= IBS_OP_ENABLE; |
215 | low |= IBS_OP_LOW_ENABLE; | 241 | wrmsrl(MSR_AMD64_IBSOPCTL, ctl); |
216 | wrmsr(MSR_AMD64_IBSOPCTL, low, high); | ||
217 | } | 242 | } |
218 | } | 243 | } |
244 | } | ||
219 | 245 | ||
220 | return 1; | 246 | static inline void op_amd_start_ibs(void) |
247 | { | ||
248 | u64 val; | ||
249 | if (has_ibs && ibs_config.fetch_enabled) { | ||
250 | val = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; | ||
251 | val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0; | ||
252 | val |= IBS_FETCH_ENABLE; | ||
253 | wrmsrl(MSR_AMD64_IBSFETCHCTL, val); | ||
254 | } | ||
255 | |||
256 | if (has_ibs && ibs_config.op_enabled) { | ||
257 | val = (ibs_config.max_cnt_op >> 4) & 0xFFFF; | ||
258 | val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0; | ||
259 | val |= IBS_OP_ENABLE; | ||
260 | wrmsrl(MSR_AMD64_IBSOPCTL, val); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | static void op_amd_stop_ibs(void) | ||
265 | { | ||
266 | if (has_ibs && ibs_config.fetch_enabled) | ||
267 | /* clear max count and enable */ | ||
268 | wrmsrl(MSR_AMD64_IBSFETCHCTL, 0); | ||
269 | |||
270 | if (has_ibs && ibs_config.op_enabled) | ||
271 | /* clear max count and enable */ | ||
272 | wrmsrl(MSR_AMD64_IBSOPCTL, 0); | ||
221 | } | 273 | } |
222 | 274 | ||
275 | #else | ||
276 | |||
277 | static inline void op_amd_handle_ibs(struct pt_regs * const regs, | ||
278 | struct op_msrs const * const msrs) { } | ||
279 | static inline void op_amd_start_ibs(void) { } | ||
280 | static inline void op_amd_stop_ibs(void) { } | ||
281 | |||
223 | #endif | 282 | #endif |
224 | 283 | ||
225 | static int op_amd_check_ctrs(struct pt_regs * const regs, | 284 | static int op_amd_check_ctrs(struct pt_regs * const regs, |
226 | struct op_msrs const * const msrs) | 285 | struct op_msrs const * const msrs) |
227 | { | 286 | { |
228 | unsigned int low, high; | 287 | u64 val; |
229 | int i; | 288 | int i; |
230 | 289 | ||
231 | for (i = 0 ; i < NUM_COUNTERS; ++i) { | 290 | for (i = 0; i < NUM_COUNTERS; ++i) { |
232 | if (!reset_value[i]) | 291 | int virt = op_x86_phys_to_virt(i); |
292 | if (!reset_value[virt]) | ||
233 | continue; | 293 | continue; |
234 | CTR_READ(low, high, msrs, i); | 294 | rdmsrl(msrs->counters[i].addr, val); |
235 | if (CTR_OVERFLOWED(low)) { | 295 | /* bit is clear if overflowed: */ |
236 | oprofile_add_sample(regs, i); | 296 | if (val & OP_CTR_OVERFLOW) |
237 | CTR_WRITE(reset_value[i], msrs, i); | 297 | continue; |
238 | } | 298 | oprofile_add_sample(regs, virt); |
299 | wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]); | ||
239 | } | 300 | } |
240 | 301 | ||
241 | #ifdef CONFIG_OPROFILE_IBS | ||
242 | op_amd_handle_ibs(regs, msrs); | 302 | op_amd_handle_ibs(regs, msrs); |
243 | #endif | ||
244 | 303 | ||
245 | /* See op_model_ppro.c */ | 304 | /* See op_model_ppro.c */ |
246 | return 1; | 305 | return 1; |
@@ -248,79 +307,50 @@ static int op_amd_check_ctrs(struct pt_regs * const regs, | |||
248 | 307 | ||
249 | static void op_amd_start(struct op_msrs const * const msrs) | 308 | static void op_amd_start(struct op_msrs const * const msrs) |
250 | { | 309 | { |
251 | unsigned int low, high; | 310 | u64 val; |
252 | int i; | 311 | int i; |
253 | for (i = 0 ; i < NUM_COUNTERS ; ++i) { | ||
254 | if (reset_value[i]) { | ||
255 | CTRL_READ(low, high, msrs, i); | ||
256 | CTRL_SET_ACTIVE(low); | ||
257 | CTRL_WRITE(low, high, msrs, i); | ||
258 | } | ||
259 | } | ||
260 | 312 | ||
261 | #ifdef CONFIG_OPROFILE_IBS | 313 | for (i = 0; i < NUM_COUNTERS; ++i) { |
262 | if (has_ibs && ibs_config.fetch_enabled) { | 314 | if (!reset_value[op_x86_phys_to_virt(i)]) |
263 | low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; | 315 | continue; |
264 | high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */ | 316 | rdmsrl(msrs->controls[i].addr, val); |
265 | + IBS_FETCH_HIGH_ENABLE; | 317 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
266 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); | 318 | wrmsrl(msrs->controls[i].addr, val); |
267 | } | 319 | } |
268 | 320 | ||
269 | if (has_ibs && ibs_config.op_enabled) { | 321 | op_amd_start_ibs(); |
270 | low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) | ||
271 | + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */ | ||
272 | + IBS_OP_LOW_ENABLE; | ||
273 | high = 0; | ||
274 | wrmsr(MSR_AMD64_IBSOPCTL, low, high); | ||
275 | } | ||
276 | #endif | ||
277 | } | 322 | } |
278 | 323 | ||
279 | |||
280 | static void op_amd_stop(struct op_msrs const * const msrs) | 324 | static void op_amd_stop(struct op_msrs const * const msrs) |
281 | { | 325 | { |
282 | unsigned int low, high; | 326 | u64 val; |
283 | int i; | 327 | int i; |
284 | 328 | ||
285 | /* | 329 | /* |
286 | * Subtle: stop on all counters to avoid race with setting our | 330 | * Subtle: stop on all counters to avoid race with setting our |
287 | * pm callback | 331 | * pm callback |
288 | */ | 332 | */ |
289 | for (i = 0 ; i < NUM_COUNTERS ; ++i) { | 333 | for (i = 0; i < NUM_COUNTERS; ++i) { |
290 | if (!reset_value[i]) | 334 | if (!reset_value[op_x86_phys_to_virt(i)]) |
291 | continue; | 335 | continue; |
292 | CTRL_READ(low, high, msrs, i); | 336 | rdmsrl(msrs->controls[i].addr, val); |
293 | CTRL_SET_INACTIVE(low); | 337 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; |
294 | CTRL_WRITE(low, high, msrs, i); | 338 | wrmsrl(msrs->controls[i].addr, val); |
295 | } | ||
296 | |||
297 | #ifdef CONFIG_OPROFILE_IBS | ||
298 | if (has_ibs && ibs_config.fetch_enabled) { | ||
299 | /* clear max count and enable */ | ||
300 | low = 0; | ||
301 | high = 0; | ||
302 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); | ||
303 | } | 339 | } |
304 | 340 | ||
305 | if (has_ibs && ibs_config.op_enabled) { | 341 | op_amd_stop_ibs(); |
306 | /* clear max count and enable */ | ||
307 | low = 0; | ||
308 | high = 0; | ||
309 | wrmsr(MSR_AMD64_IBSOPCTL, low, high); | ||
310 | } | ||
311 | #endif | ||
312 | } | 342 | } |
313 | 343 | ||
314 | static void op_amd_shutdown(struct op_msrs const * const msrs) | 344 | static void op_amd_shutdown(struct op_msrs const * const msrs) |
315 | { | 345 | { |
316 | int i; | 346 | int i; |
317 | 347 | ||
318 | for (i = 0 ; i < NUM_COUNTERS ; ++i) { | 348 | for (i = 0; i < NUM_COUNTERS; ++i) { |
319 | if (CTR_IS_RESERVED(msrs, i)) | 349 | if (msrs->counters[i].addr) |
320 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); | 350 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); |
321 | } | 351 | } |
322 | for (i = 0 ; i < NUM_CONTROLS ; ++i) { | 352 | for (i = 0; i < NUM_CONTROLS; ++i) { |
323 | if (CTRL_IS_RESERVED(msrs, i)) | 353 | if (msrs->controls[i].addr) |
324 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | 354 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); |
325 | } | 355 | } |
326 | } | 356 | } |
@@ -490,15 +520,21 @@ static void op_amd_exit(void) {} | |||
490 | 520 | ||
491 | #endif /* CONFIG_OPROFILE_IBS */ | 521 | #endif /* CONFIG_OPROFILE_IBS */ |
492 | 522 | ||
493 | struct op_x86_model_spec const op_amd_spec = { | 523 | struct op_x86_model_spec op_amd_spec = { |
494 | .init = op_amd_init, | ||
495 | .exit = op_amd_exit, | ||
496 | .num_counters = NUM_COUNTERS, | 524 | .num_counters = NUM_COUNTERS, |
497 | .num_controls = NUM_CONTROLS, | 525 | .num_controls = NUM_CONTROLS, |
526 | .num_virt_counters = NUM_VIRT_COUNTERS, | ||
527 | .reserved = MSR_AMD_EVENTSEL_RESERVED, | ||
528 | .event_mask = OP_EVENT_MASK, | ||
529 | .init = op_amd_init, | ||
530 | .exit = op_amd_exit, | ||
498 | .fill_in_addresses = &op_amd_fill_in_addresses, | 531 | .fill_in_addresses = &op_amd_fill_in_addresses, |
499 | .setup_ctrs = &op_amd_setup_ctrs, | 532 | .setup_ctrs = &op_amd_setup_ctrs, |
500 | .check_ctrs = &op_amd_check_ctrs, | 533 | .check_ctrs = &op_amd_check_ctrs, |
501 | .start = &op_amd_start, | 534 | .start = &op_amd_start, |
502 | .stop = &op_amd_stop, | 535 | .stop = &op_amd_stop, |
503 | .shutdown = &op_amd_shutdown | 536 | .shutdown = &op_amd_shutdown, |
537 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
538 | .switch_ctrl = &op_mux_switch_ctrl, | ||
539 | #endif | ||
504 | }; | 540 | }; |
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 819b131fd752..ac6b354becdf 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c | |||
@@ -32,6 +32,8 @@ | |||
32 | #define NUM_CCCRS_HT2 9 | 32 | #define NUM_CCCRS_HT2 9 |
33 | #define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2) | 33 | #define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2) |
34 | 34 | ||
35 | #define OP_CTR_OVERFLOW (1ULL<<31) | ||
36 | |||
35 | static unsigned int num_counters = NUM_COUNTERS_NON_HT; | 37 | static unsigned int num_counters = NUM_COUNTERS_NON_HT; |
36 | static unsigned int num_controls = NUM_CONTROLS_NON_HT; | 38 | static unsigned int num_controls = NUM_CONTROLS_NON_HT; |
37 | 39 | ||
@@ -350,8 +352,6 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { | |||
350 | #define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1)) | 352 | #define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1)) |
351 | #define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25)) | 353 | #define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25)) |
352 | #define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9)) | 354 | #define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9)) |
353 | #define ESCR_READ(escr, high, ev, i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0) | ||
354 | #define ESCR_WRITE(escr, high, ev, i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0) | ||
355 | 355 | ||
356 | #define CCCR_RESERVED_BITS 0x38030FFF | 356 | #define CCCR_RESERVED_BITS 0x38030FFF |
357 | #define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS) | 357 | #define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS) |
@@ -361,17 +361,9 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { | |||
361 | #define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27)) | 361 | #define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27)) |
362 | #define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12)) | 362 | #define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12)) |
363 | #define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12)) | 363 | #define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12)) |
364 | #define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0) | ||
365 | #define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0) | ||
366 | #define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) | 364 | #define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) |
367 | #define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) | 365 | #define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) |
368 | 366 | ||
369 | #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) | ||
370 | #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) | ||
371 | #define CTR_READ(l, h, i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h)); } while (0) | ||
372 | #define CTR_WRITE(l, i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1); } while (0) | ||
373 | #define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000)) | ||
374 | |||
375 | 367 | ||
376 | /* this assigns a "stagger" to the current CPU, which is used throughout | 368 | /* this assigns a "stagger" to the current CPU, which is used throughout |
377 | the code in this module as an extra array offset, to select the "even" | 369 | the code in this module as an extra array offset, to select the "even" |
@@ -515,7 +507,7 @@ static void pmc_setup_one_p4_counter(unsigned int ctr) | |||
515 | if (ev->bindings[i].virt_counter & counter_bit) { | 507 | if (ev->bindings[i].virt_counter & counter_bit) { |
516 | 508 | ||
517 | /* modify ESCR */ | 509 | /* modify ESCR */ |
518 | ESCR_READ(escr, high, ev, i); | 510 | rdmsr(ev->bindings[i].escr_address, escr, high); |
519 | ESCR_CLEAR(escr); | 511 | ESCR_CLEAR(escr); |
520 | if (stag == 0) { | 512 | if (stag == 0) { |
521 | ESCR_SET_USR_0(escr, counter_config[ctr].user); | 513 | ESCR_SET_USR_0(escr, counter_config[ctr].user); |
@@ -526,10 +518,11 @@ static void pmc_setup_one_p4_counter(unsigned int ctr) | |||
526 | } | 518 | } |
527 | ESCR_SET_EVENT_SELECT(escr, ev->event_select); | 519 | ESCR_SET_EVENT_SELECT(escr, ev->event_select); |
528 | ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask); | 520 | ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask); |
529 | ESCR_WRITE(escr, high, ev, i); | 521 | wrmsr(ev->bindings[i].escr_address, escr, high); |
530 | 522 | ||
531 | /* modify CCCR */ | 523 | /* modify CCCR */ |
532 | CCCR_READ(cccr, high, VIRT_CTR(stag, ctr)); | 524 | rdmsr(p4_counters[VIRT_CTR(stag, ctr)].cccr_address, |
525 | cccr, high); | ||
533 | CCCR_CLEAR(cccr); | 526 | CCCR_CLEAR(cccr); |
534 | CCCR_SET_REQUIRED_BITS(cccr); | 527 | CCCR_SET_REQUIRED_BITS(cccr); |
535 | CCCR_SET_ESCR_SELECT(cccr, ev->escr_select); | 528 | CCCR_SET_ESCR_SELECT(cccr, ev->escr_select); |
@@ -537,7 +530,8 @@ static void pmc_setup_one_p4_counter(unsigned int ctr) | |||
537 | CCCR_SET_PMI_OVF_0(cccr); | 530 | CCCR_SET_PMI_OVF_0(cccr); |
538 | else | 531 | else |
539 | CCCR_SET_PMI_OVF_1(cccr); | 532 | CCCR_SET_PMI_OVF_1(cccr); |
540 | CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr)); | 533 | wrmsr(p4_counters[VIRT_CTR(stag, ctr)].cccr_address, |
534 | cccr, high); | ||
541 | return; | 535 | return; |
542 | } | 536 | } |
543 | } | 537 | } |
@@ -548,7 +542,8 @@ static void pmc_setup_one_p4_counter(unsigned int ctr) | |||
548 | } | 542 | } |
549 | 543 | ||
550 | 544 | ||
551 | static void p4_setup_ctrs(struct op_msrs const * const msrs) | 545 | static void p4_setup_ctrs(struct op_x86_model_spec const *model, |
546 | struct op_msrs const * const msrs) | ||
552 | { | 547 | { |
553 | unsigned int i; | 548 | unsigned int i; |
554 | unsigned int low, high; | 549 | unsigned int low, high; |
@@ -563,8 +558,8 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs) | |||
563 | } | 558 | } |
564 | 559 | ||
565 | /* clear the cccrs we will use */ | 560 | /* clear the cccrs we will use */ |
566 | for (i = 0 ; i < num_counters ; i++) { | 561 | for (i = 0; i < num_counters; i++) { |
567 | if (unlikely(!CTRL_IS_RESERVED(msrs, i))) | 562 | if (unlikely(!msrs->controls[i].addr)) |
568 | continue; | 563 | continue; |
569 | rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); | 564 | rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); |
570 | CCCR_CLEAR(low); | 565 | CCCR_CLEAR(low); |
@@ -574,17 +569,18 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs) | |||
574 | 569 | ||
575 | /* clear all escrs (including those outside our concern) */ | 570 | /* clear all escrs (including those outside our concern) */ |
576 | for (i = num_counters; i < num_controls; i++) { | 571 | for (i = num_counters; i < num_controls; i++) { |
577 | if (unlikely(!CTRL_IS_RESERVED(msrs, i))) | 572 | if (unlikely(!msrs->controls[i].addr)) |
578 | continue; | 573 | continue; |
579 | wrmsr(msrs->controls[i].addr, 0, 0); | 574 | wrmsr(msrs->controls[i].addr, 0, 0); |
580 | } | 575 | } |
581 | 576 | ||
582 | /* setup all counters */ | 577 | /* setup all counters */ |
583 | for (i = 0 ; i < num_counters ; ++i) { | 578 | for (i = 0; i < num_counters; ++i) { |
584 | if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs, i))) { | 579 | if (counter_config[i].enabled && msrs->controls[i].addr) { |
585 | reset_value[i] = counter_config[i].count; | 580 | reset_value[i] = counter_config[i].count; |
586 | pmc_setup_one_p4_counter(i); | 581 | pmc_setup_one_p4_counter(i); |
587 | CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i)); | 582 | wrmsrl(p4_counters[VIRT_CTR(stag, i)].counter_address, |
583 | -(u64)counter_config[i].count); | ||
588 | } else { | 584 | } else { |
589 | reset_value[i] = 0; | 585 | reset_value[i] = 0; |
590 | } | 586 | } |
@@ -624,14 +620,16 @@ static int p4_check_ctrs(struct pt_regs * const regs, | |||
624 | 620 | ||
625 | real = VIRT_CTR(stag, i); | 621 | real = VIRT_CTR(stag, i); |
626 | 622 | ||
627 | CCCR_READ(low, high, real); | 623 | rdmsr(p4_counters[real].cccr_address, low, high); |
628 | CTR_READ(ctr, high, real); | 624 | rdmsr(p4_counters[real].counter_address, ctr, high); |
629 | if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) { | 625 | if (CCCR_OVF_P(low) || !(ctr & OP_CTR_OVERFLOW)) { |
630 | oprofile_add_sample(regs, i); | 626 | oprofile_add_sample(regs, i); |
631 | CTR_WRITE(reset_value[i], real); | 627 | wrmsrl(p4_counters[real].counter_address, |
628 | -(u64)reset_value[i]); | ||
632 | CCCR_CLEAR_OVF(low); | 629 | CCCR_CLEAR_OVF(low); |
633 | CCCR_WRITE(low, high, real); | 630 | wrmsr(p4_counters[real].cccr_address, low, high); |
634 | CTR_WRITE(reset_value[i], real); | 631 | wrmsrl(p4_counters[real].counter_address, |
632 | -(u64)reset_value[i]); | ||
635 | } | 633 | } |
636 | } | 634 | } |
637 | 635 | ||
@@ -653,9 +651,9 @@ static void p4_start(struct op_msrs const * const msrs) | |||
653 | for (i = 0; i < num_counters; ++i) { | 651 | for (i = 0; i < num_counters; ++i) { |
654 | if (!reset_value[i]) | 652 | if (!reset_value[i]) |
655 | continue; | 653 | continue; |
656 | CCCR_READ(low, high, VIRT_CTR(stag, i)); | 654 | rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); |
657 | CCCR_SET_ENABLE(low); | 655 | CCCR_SET_ENABLE(low); |
658 | CCCR_WRITE(low, high, VIRT_CTR(stag, i)); | 656 | wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); |
659 | } | 657 | } |
660 | } | 658 | } |
661 | 659 | ||
@@ -670,9 +668,9 @@ static void p4_stop(struct op_msrs const * const msrs) | |||
670 | for (i = 0; i < num_counters; ++i) { | 668 | for (i = 0; i < num_counters; ++i) { |
671 | if (!reset_value[i]) | 669 | if (!reset_value[i]) |
672 | continue; | 670 | continue; |
673 | CCCR_READ(low, high, VIRT_CTR(stag, i)); | 671 | rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); |
674 | CCCR_SET_DISABLE(low); | 672 | CCCR_SET_DISABLE(low); |
675 | CCCR_WRITE(low, high, VIRT_CTR(stag, i)); | 673 | wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); |
676 | } | 674 | } |
677 | } | 675 | } |
678 | 676 | ||
@@ -680,8 +678,8 @@ static void p4_shutdown(struct op_msrs const * const msrs) | |||
680 | { | 678 | { |
681 | int i; | 679 | int i; |
682 | 680 | ||
683 | for (i = 0 ; i < num_counters ; ++i) { | 681 | for (i = 0; i < num_counters; ++i) { |
684 | if (CTR_IS_RESERVED(msrs, i)) | 682 | if (msrs->counters[i].addr) |
685 | release_perfctr_nmi(msrs->counters[i].addr); | 683 | release_perfctr_nmi(msrs->counters[i].addr); |
686 | } | 684 | } |
687 | /* | 685 | /* |
@@ -689,15 +687,15 @@ static void p4_shutdown(struct op_msrs const * const msrs) | |||
689 | * conjunction with the counter registers (hence the starting offset). | 687 | * conjunction with the counter registers (hence the starting offset). |
690 | * This saves a few bits. | 688 | * This saves a few bits. |
691 | */ | 689 | */ |
692 | for (i = num_counters ; i < num_controls ; ++i) { | 690 | for (i = num_counters; i < num_controls; ++i) { |
693 | if (CTRL_IS_RESERVED(msrs, i)) | 691 | if (msrs->controls[i].addr) |
694 | release_evntsel_nmi(msrs->controls[i].addr); | 692 | release_evntsel_nmi(msrs->controls[i].addr); |
695 | } | 693 | } |
696 | } | 694 | } |
697 | 695 | ||
698 | 696 | ||
699 | #ifdef CONFIG_SMP | 697 | #ifdef CONFIG_SMP |
700 | struct op_x86_model_spec const op_p4_ht2_spec = { | 698 | struct op_x86_model_spec op_p4_ht2_spec = { |
701 | .num_counters = NUM_COUNTERS_HT2, | 699 | .num_counters = NUM_COUNTERS_HT2, |
702 | .num_controls = NUM_CONTROLS_HT2, | 700 | .num_controls = NUM_CONTROLS_HT2, |
703 | .fill_in_addresses = &p4_fill_in_addresses, | 701 | .fill_in_addresses = &p4_fill_in_addresses, |
@@ -709,7 +707,7 @@ struct op_x86_model_spec const op_p4_ht2_spec = { | |||
709 | }; | 707 | }; |
710 | #endif | 708 | #endif |
711 | 709 | ||
712 | struct op_x86_model_spec const op_p4_spec = { | 710 | struct op_x86_model_spec op_p4_spec = { |
713 | .num_counters = NUM_COUNTERS_NON_HT, | 711 | .num_counters = NUM_COUNTERS_NON_HT, |
714 | .num_controls = NUM_CONTROLS_NON_HT, | 712 | .num_controls = NUM_CONTROLS_NON_HT, |
715 | .fill_in_addresses = &p4_fill_in_addresses, | 713 | .fill_in_addresses = &p4_fill_in_addresses, |
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 4da7230b3d17..4899215999de 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * @author Philippe Elie | 10 | * @author Philippe Elie |
11 | * @author Graydon Hoare | 11 | * @author Graydon Hoare |
12 | * @author Andi Kleen | 12 | * @author Andi Kleen |
13 | * @author Robert Richter <robert.richter@amd.com> | ||
13 | */ | 14 | */ |
14 | 15 | ||
15 | #include <linux/oprofile.h> | 16 | #include <linux/oprofile.h> |
@@ -18,7 +19,6 @@ | |||
18 | #include <asm/msr.h> | 19 | #include <asm/msr.h> |
19 | #include <asm/apic.h> | 20 | #include <asm/apic.h> |
20 | #include <asm/nmi.h> | 21 | #include <asm/nmi.h> |
21 | #include <asm/perf_counter.h> | ||
22 | 22 | ||
23 | #include "op_x86_model.h" | 23 | #include "op_x86_model.h" |
24 | #include "op_counter.h" | 24 | #include "op_counter.h" |
@@ -26,20 +26,7 @@ | |||
26 | static int num_counters = 2; | 26 | static int num_counters = 2; |
27 | static int counter_width = 32; | 27 | static int counter_width = 32; |
28 | 28 | ||
29 | #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) | 29 | #define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21)) |
30 | #define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1)))) | ||
31 | |||
32 | #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) | ||
33 | #define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) | ||
34 | #define CTRL_WRITE(l, h, msrs, c) do {wrmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) | ||
35 | #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) | ||
36 | #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) | ||
37 | #define CTRL_CLEAR(x) (x &= (1<<21)) | ||
38 | #define CTRL_SET_ENABLE(val) (val |= 1<<20) | ||
39 | #define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16)) | ||
40 | #define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17)) | ||
41 | #define CTRL_SET_UM(val, m) (val |= (m << 8)) | ||
42 | #define CTRL_SET_EVENT(val, e) (val |= e) | ||
43 | 30 | ||
44 | static u64 *reset_value; | 31 | static u64 *reset_value; |
45 | 32 | ||
@@ -63,9 +50,10 @@ static void ppro_fill_in_addresses(struct op_msrs * const msrs) | |||
63 | } | 50 | } |
64 | 51 | ||
65 | 52 | ||
66 | static void ppro_setup_ctrs(struct op_msrs const * const msrs) | 53 | static void ppro_setup_ctrs(struct op_x86_model_spec const *model, |
54 | struct op_msrs const * const msrs) | ||
67 | { | 55 | { |
68 | unsigned int low, high; | 56 | u64 val; |
69 | int i; | 57 | int i; |
70 | 58 | ||
71 | if (!reset_value) { | 59 | if (!reset_value) { |
@@ -93,36 +81,30 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) | |||
93 | } | 81 | } |
94 | 82 | ||
95 | /* clear all counters */ | 83 | /* clear all counters */ |
96 | for (i = 0 ; i < num_counters; ++i) { | 84 | for (i = 0; i < num_counters; ++i) { |
97 | if (unlikely(!CTRL_IS_RESERVED(msrs, i))) | 85 | if (unlikely(!msrs->controls[i].addr)) |
98 | continue; | 86 | continue; |
99 | CTRL_READ(low, high, msrs, i); | 87 | rdmsrl(msrs->controls[i].addr, val); |
100 | CTRL_CLEAR(low); | 88 | val &= model->reserved; |
101 | CTRL_WRITE(low, high, msrs, i); | 89 | wrmsrl(msrs->controls[i].addr, val); |
102 | } | 90 | } |
103 | 91 | ||
104 | /* avoid a false detection of ctr overflows in NMI handler */ | 92 | /* avoid a false detection of ctr overflows in NMI handler */ |
105 | for (i = 0; i < num_counters; ++i) { | 93 | for (i = 0; i < num_counters; ++i) { |
106 | if (unlikely(!CTR_IS_RESERVED(msrs, i))) | 94 | if (unlikely(!msrs->counters[i].addr)) |
107 | continue; | 95 | continue; |
108 | wrmsrl(msrs->counters[i].addr, -1LL); | 96 | wrmsrl(msrs->counters[i].addr, -1LL); |
109 | } | 97 | } |
110 | 98 | ||
111 | /* enable active counters */ | 99 | /* enable active counters */ |
112 | for (i = 0; i < num_counters; ++i) { | 100 | for (i = 0; i < num_counters; ++i) { |
113 | if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { | 101 | if (counter_config[i].enabled && msrs->counters[i].addr) { |
114 | reset_value[i] = counter_config[i].count; | 102 | reset_value[i] = counter_config[i].count; |
115 | |||
116 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); | 103 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); |
117 | 104 | rdmsrl(msrs->controls[i].addr, val); | |
118 | CTRL_READ(low, high, msrs, i); | 105 | val &= model->reserved; |
119 | CTRL_CLEAR(low); | 106 | val |= op_x86_get_ctrl(model, &counter_config[i]); |
120 | CTRL_SET_ENABLE(low); | 107 | wrmsrl(msrs->controls[i].addr, val); |
121 | CTRL_SET_USR(low, counter_config[i].user); | ||
122 | CTRL_SET_KERN(low, counter_config[i].kernel); | ||
123 | CTRL_SET_UM(low, counter_config[i].unit_mask); | ||
124 | CTRL_SET_EVENT(low, counter_config[i].event); | ||
125 | CTRL_WRITE(low, high, msrs, i); | ||
126 | } else { | 108 | } else { |
127 | reset_value[i] = 0; | 109 | reset_value[i] = 0; |
128 | } | 110 | } |
@@ -143,14 +125,14 @@ static int ppro_check_ctrs(struct pt_regs * const regs, | |||
143 | if (unlikely(!reset_value)) | 125 | if (unlikely(!reset_value)) |
144 | goto out; | 126 | goto out; |
145 | 127 | ||
146 | for (i = 0 ; i < num_counters; ++i) { | 128 | for (i = 0; i < num_counters; ++i) { |
147 | if (!reset_value[i]) | 129 | if (!reset_value[i]) |
148 | continue; | 130 | continue; |
149 | rdmsrl(msrs->counters[i].addr, val); | 131 | rdmsrl(msrs->counters[i].addr, val); |
150 | if (CTR_OVERFLOWED(val)) { | 132 | if (val & (1ULL << (counter_width - 1))) |
151 | oprofile_add_sample(regs, i); | 133 | continue; |
152 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); | 134 | oprofile_add_sample(regs, i); |
153 | } | 135 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); |
154 | } | 136 | } |
155 | 137 | ||
156 | out: | 138 | out: |
@@ -171,16 +153,16 @@ out: | |||
171 | 153 | ||
172 | static void ppro_start(struct op_msrs const * const msrs) | 154 | static void ppro_start(struct op_msrs const * const msrs) |
173 | { | 155 | { |
174 | unsigned int low, high; | 156 | u64 val; |
175 | int i; | 157 | int i; |
176 | 158 | ||
177 | if (!reset_value) | 159 | if (!reset_value) |
178 | return; | 160 | return; |
179 | for (i = 0; i < num_counters; ++i) { | 161 | for (i = 0; i < num_counters; ++i) { |
180 | if (reset_value[i]) { | 162 | if (reset_value[i]) { |
181 | CTRL_READ(low, high, msrs, i); | 163 | rdmsrl(msrs->controls[i].addr, val); |
182 | CTRL_SET_ACTIVE(low); | 164 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
183 | CTRL_WRITE(low, high, msrs, i); | 165 | wrmsrl(msrs->controls[i].addr, val); |
184 | } | 166 | } |
185 | } | 167 | } |
186 | } | 168 | } |
@@ -188,7 +170,7 @@ static void ppro_start(struct op_msrs const * const msrs) | |||
188 | 170 | ||
189 | static void ppro_stop(struct op_msrs const * const msrs) | 171 | static void ppro_stop(struct op_msrs const * const msrs) |
190 | { | 172 | { |
191 | unsigned int low, high; | 173 | u64 val; |
192 | int i; | 174 | int i; |
193 | 175 | ||
194 | if (!reset_value) | 176 | if (!reset_value) |
@@ -196,9 +178,9 @@ static void ppro_stop(struct op_msrs const * const msrs) | |||
196 | for (i = 0; i < num_counters; ++i) { | 178 | for (i = 0; i < num_counters; ++i) { |
197 | if (!reset_value[i]) | 179 | if (!reset_value[i]) |
198 | continue; | 180 | continue; |
199 | CTRL_READ(low, high, msrs, i); | 181 | rdmsrl(msrs->controls[i].addr, val); |
200 | CTRL_SET_INACTIVE(low); | 182 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; |
201 | CTRL_WRITE(low, high, msrs, i); | 183 | wrmsrl(msrs->controls[i].addr, val); |
202 | } | 184 | } |
203 | } | 185 | } |
204 | 186 | ||
@@ -206,12 +188,12 @@ static void ppro_shutdown(struct op_msrs const * const msrs) | |||
206 | { | 188 | { |
207 | int i; | 189 | int i; |
208 | 190 | ||
209 | for (i = 0 ; i < num_counters ; ++i) { | 191 | for (i = 0; i < num_counters; ++i) { |
210 | if (CTR_IS_RESERVED(msrs, i)) | 192 | if (msrs->counters[i].addr) |
211 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); | 193 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); |
212 | } | 194 | } |
213 | for (i = 0 ; i < num_counters ; ++i) { | 195 | for (i = 0; i < num_counters; ++i) { |
214 | if (CTRL_IS_RESERVED(msrs, i)) | 196 | if (msrs->controls[i].addr) |
215 | release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); | 197 | release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); |
216 | } | 198 | } |
217 | if (reset_value) { | 199 | if (reset_value) { |
@@ -222,8 +204,9 @@ static void ppro_shutdown(struct op_msrs const * const msrs) | |||
222 | 204 | ||
223 | 205 | ||
224 | struct op_x86_model_spec op_ppro_spec = { | 206 | struct op_x86_model_spec op_ppro_spec = { |
225 | .num_counters = 2, /* can be overriden */ | 207 | .num_counters = 2, |
226 | .num_controls = 2, /* dito */ | 208 | .num_controls = 2, |
209 | .reserved = MSR_PPRO_EVENTSEL_RESERVED, | ||
227 | .fill_in_addresses = &ppro_fill_in_addresses, | 210 | .fill_in_addresses = &ppro_fill_in_addresses, |
228 | .setup_ctrs = &ppro_setup_ctrs, | 211 | .setup_ctrs = &ppro_setup_ctrs, |
229 | .check_ctrs = &ppro_check_ctrs, | 212 | .check_ctrs = &ppro_check_ctrs, |
@@ -241,7 +224,7 @@ struct op_x86_model_spec op_ppro_spec = { | |||
241 | * the specific CPU. | 224 | * the specific CPU. |
242 | */ | 225 | */ |
243 | 226 | ||
244 | void arch_perfmon_setup_counters(void) | 227 | static void arch_perfmon_setup_counters(void) |
245 | { | 228 | { |
246 | union cpuid10_eax eax; | 229 | union cpuid10_eax eax; |
247 | 230 | ||
@@ -259,11 +242,17 @@ void arch_perfmon_setup_counters(void) | |||
259 | 242 | ||
260 | op_arch_perfmon_spec.num_counters = num_counters; | 243 | op_arch_perfmon_spec.num_counters = num_counters; |
261 | op_arch_perfmon_spec.num_controls = num_counters; | 244 | op_arch_perfmon_spec.num_controls = num_counters; |
262 | op_ppro_spec.num_counters = num_counters; | 245 | } |
263 | op_ppro_spec.num_controls = num_counters; | 246 | |
247 | static int arch_perfmon_init(struct oprofile_operations *ignore) | ||
248 | { | ||
249 | arch_perfmon_setup_counters(); | ||
250 | return 0; | ||
264 | } | 251 | } |
265 | 252 | ||
266 | struct op_x86_model_spec op_arch_perfmon_spec = { | 253 | struct op_x86_model_spec op_arch_perfmon_spec = { |
254 | .reserved = MSR_PPRO_EVENTSEL_RESERVED, | ||
255 | .init = &arch_perfmon_init, | ||
267 | /* num_counters/num_controls filled in at runtime */ | 256 | /* num_counters/num_controls filled in at runtime */ |
268 | .fill_in_addresses = &ppro_fill_in_addresses, | 257 | .fill_in_addresses = &ppro_fill_in_addresses, |
269 | /* user space does the cpuid check for available events */ | 258 | /* user space does the cpuid check for available events */ |
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index 825e79064d64..b83776180c7f 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h | |||
@@ -6,51 +6,66 @@ | |||
6 | * @remark Read the file COPYING | 6 | * @remark Read the file COPYING |
7 | * | 7 | * |
8 | * @author Graydon Hoare | 8 | * @author Graydon Hoare |
9 | * @author Robert Richter <robert.richter@amd.com> | ||
9 | */ | 10 | */ |
10 | 11 | ||
11 | #ifndef OP_X86_MODEL_H | 12 | #ifndef OP_X86_MODEL_H |
12 | #define OP_X86_MODEL_H | 13 | #define OP_X86_MODEL_H |
13 | 14 | ||
14 | struct op_saved_msr { | 15 | #include <asm/types.h> |
15 | unsigned int high; | 16 | #include <asm/perf_counter.h> |
16 | unsigned int low; | ||
17 | }; | ||
18 | 17 | ||
19 | struct op_msr { | 18 | struct op_msr { |
20 | unsigned long addr; | 19 | unsigned long addr; |
21 | struct op_saved_msr saved; | 20 | u64 saved; |
22 | }; | 21 | }; |
23 | 22 | ||
24 | struct op_msrs { | 23 | struct op_msrs { |
25 | struct op_msr *counters; | 24 | struct op_msr *counters; |
26 | struct op_msr *controls; | 25 | struct op_msr *controls; |
26 | struct op_msr *multiplex; | ||
27 | }; | 27 | }; |
28 | 28 | ||
29 | struct pt_regs; | 29 | struct pt_regs; |
30 | 30 | ||
31 | struct oprofile_operations; | ||
32 | |||
31 | /* The model vtable abstracts the differences between | 33 | /* The model vtable abstracts the differences between |
32 | * various x86 CPU models' perfctr support. | 34 | * various x86 CPU models' perfctr support. |
33 | */ | 35 | */ |
34 | struct op_x86_model_spec { | 36 | struct op_x86_model_spec { |
35 | int (*init)(struct oprofile_operations *ops); | 37 | unsigned int num_counters; |
36 | void (*exit)(void); | 38 | unsigned int num_controls; |
37 | unsigned int num_counters; | 39 | unsigned int num_virt_counters; |
38 | unsigned int num_controls; | 40 | u64 reserved; |
39 | void (*fill_in_addresses)(struct op_msrs * const msrs); | 41 | u16 event_mask; |
40 | void (*setup_ctrs)(struct op_msrs const * const msrs); | 42 | int (*init)(struct oprofile_operations *ops); |
41 | int (*check_ctrs)(struct pt_regs * const regs, | 43 | void (*exit)(void); |
42 | struct op_msrs const * const msrs); | 44 | void (*fill_in_addresses)(struct op_msrs * const msrs); |
43 | void (*start)(struct op_msrs const * const msrs); | 45 | void (*setup_ctrs)(struct op_x86_model_spec const *model, |
44 | void (*stop)(struct op_msrs const * const msrs); | 46 | struct op_msrs const * const msrs); |
45 | void (*shutdown)(struct op_msrs const * const msrs); | 47 | int (*check_ctrs)(struct pt_regs * const regs, |
48 | struct op_msrs const * const msrs); | ||
49 | void (*start)(struct op_msrs const * const msrs); | ||
50 | void (*stop)(struct op_msrs const * const msrs); | ||
51 | void (*shutdown)(struct op_msrs const * const msrs); | ||
52 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
53 | void (*switch_ctrl)(struct op_x86_model_spec const *model, | ||
54 | struct op_msrs const * const msrs); | ||
55 | #endif | ||
46 | }; | 56 | }; |
47 | 57 | ||
58 | struct op_counter_config; | ||
59 | |||
60 | extern u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, | ||
61 | struct op_counter_config *counter_config); | ||
62 | extern int op_x86_phys_to_virt(int phys); | ||
63 | extern int op_x86_virt_to_phys(int virt); | ||
64 | |||
48 | extern struct op_x86_model_spec op_ppro_spec; | 65 | extern struct op_x86_model_spec op_ppro_spec; |
49 | extern struct op_x86_model_spec const op_p4_spec; | 66 | extern struct op_x86_model_spec op_p4_spec; |
50 | extern struct op_x86_model_spec const op_p4_ht2_spec; | 67 | extern struct op_x86_model_spec op_p4_ht2_spec; |
51 | extern struct op_x86_model_spec const op_amd_spec; | 68 | extern struct op_x86_model_spec op_amd_spec; |
52 | extern struct op_x86_model_spec op_arch_perfmon_spec; | 69 | extern struct op_x86_model_spec op_arch_perfmon_spec; |
53 | 70 | ||
54 | extern void arch_perfmon_setup_counters(void); | ||
55 | |||
56 | #endif /* OP_X86_MODEL_H */ | 71 | #endif /* OP_X86_MODEL_H */ |
diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c index bd13c3e4c6db..347d882b3bb3 100644 --- a/arch/x86/pci/direct.c +++ b/arch/x86/pci/direct.c | |||
@@ -192,13 +192,14 @@ struct pci_raw_ops pci_direct_conf2 = { | |||
192 | static int __init pci_sanity_check(struct pci_raw_ops *o) | 192 | static int __init pci_sanity_check(struct pci_raw_ops *o) |
193 | { | 193 | { |
194 | u32 x = 0; | 194 | u32 x = 0; |
195 | int devfn; | 195 | int year, devfn; |
196 | 196 | ||
197 | if (pci_probe & PCI_NO_CHECKS) | 197 | if (pci_probe & PCI_NO_CHECKS) |
198 | return 1; | 198 | return 1; |
199 | /* Assume Type 1 works for newer systems. | 199 | /* Assume Type 1 works for newer systems. |
200 | This handles machines that don't have anything on PCI Bus 0. */ | 200 | This handles machines that don't have anything on PCI Bus 0. */ |
201 | if (dmi_get_year(DMI_BIOS_DATE) >= 2001) | 201 | dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL); |
202 | if (year >= 2001) | ||
202 | return 1; | 203 | return 1; |
203 | 204 | ||
204 | for (devfn = 0; devfn < 0x100; devfn++) { | 205 | for (devfn = 0; devfn < 0x100; devfn++) { |
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 172438f86a02..7410640db173 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile | |||
@@ -5,6 +5,10 @@ CFLAGS_REMOVE_time.o = -pg | |||
5 | CFLAGS_REMOVE_irq.o = -pg | 5 | CFLAGS_REMOVE_irq.o = -pg |
6 | endif | 6 | endif |
7 | 7 | ||
8 | # Make sure early boot has no stackprotector | ||
9 | nostackp := $(call cc-option, -fno-stack-protector) | ||
10 | CFLAGS_enlighten.o := $(nostackp) | ||
11 | |||
8 | obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ | 12 | obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ |
9 | time.o xen-asm.o xen-asm_$(BITS).o \ | 13 | time.o xen-asm.o xen-asm_$(BITS).o \ |
10 | grant-table.o suspend.o | 14 | grant-table.o suspend.o |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index a8432d816903..b62ccb840cfb 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -215,6 +215,7 @@ static __init void xen_init_cpuid_mask(void) | |||
215 | (1 << X86_FEATURE_ACPI)); /* disable ACPI */ | 215 | (1 << X86_FEATURE_ACPI)); /* disable ACPI */ |
216 | 216 | ||
217 | ax = 1; | 217 | ax = 1; |
218 | cx = 0; | ||
218 | xen_cpuid(&ax, &bx, &cx, &dx); | 219 | xen_cpuid(&ax, &bx, &cx, &dx); |
219 | 220 | ||
220 | /* cpuid claims we support xsave; try enabling it to see what happens */ | 221 | /* cpuid claims we support xsave; try enabling it to see what happens */ |
@@ -974,10 +975,6 @@ asmlinkage void __init xen_start_kernel(void) | |||
974 | 975 | ||
975 | xen_domain_type = XEN_PV_DOMAIN; | 976 | xen_domain_type = XEN_PV_DOMAIN; |
976 | 977 | ||
977 | BUG_ON(memcmp(xen_start_info->magic, "xen-3", 5) != 0); | ||
978 | |||
979 | xen_setup_features(); | ||
980 | |||
981 | /* Install Xen paravirt ops */ | 978 | /* Install Xen paravirt ops */ |
982 | pv_info = xen_info; | 979 | pv_info = xen_info; |
983 | pv_init_ops = xen_init_ops; | 980 | pv_init_ops = xen_init_ops; |
@@ -986,8 +983,15 @@ asmlinkage void __init xen_start_kernel(void) | |||
986 | pv_apic_ops = xen_apic_ops; | 983 | pv_apic_ops = xen_apic_ops; |
987 | pv_mmu_ops = xen_mmu_ops; | 984 | pv_mmu_ops = xen_mmu_ops; |
988 | 985 | ||
989 | xen_init_irq_ops(); | 986 | #ifdef CONFIG_X86_64 |
987 | /* | ||
988 | * Setup percpu state. We only need to do this for 64-bit | ||
989 | * because 32-bit already has %fs set properly. | ||
990 | */ | ||
991 | load_percpu_segment(0); | ||
992 | #endif | ||
990 | 993 | ||
994 | xen_init_irq_ops(); | ||
991 | xen_init_cpuid_mask(); | 995 | xen_init_cpuid_mask(); |
992 | 996 | ||
993 | #ifdef CONFIG_X86_LOCAL_APIC | 997 | #ifdef CONFIG_X86_LOCAL_APIC |
@@ -997,6 +1001,8 @@ asmlinkage void __init xen_start_kernel(void) | |||
997 | set_xen_basic_apic_ops(); | 1001 | set_xen_basic_apic_ops(); |
998 | #endif | 1002 | #endif |
999 | 1003 | ||
1004 | xen_setup_features(); | ||
1005 | |||
1000 | if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { | 1006 | if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { |
1001 | pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start; | 1007 | pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start; |
1002 | pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit; | 1008 | pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit; |
@@ -1004,13 +1010,6 @@ asmlinkage void __init xen_start_kernel(void) | |||
1004 | 1010 | ||
1005 | machine_ops = xen_machine_ops; | 1011 | machine_ops = xen_machine_ops; |
1006 | 1012 | ||
1007 | #ifdef CONFIG_X86_64 | ||
1008 | /* | ||
1009 | * Setup percpu state. We only need to do this for 64-bit | ||
1010 | * because 32-bit already has %fs set properly. | ||
1011 | */ | ||
1012 | load_percpu_segment(0); | ||
1013 | #endif | ||
1014 | /* | 1013 | /* |
1015 | * The only reliable way to retain the initial address of the | 1014 | * The only reliable way to retain the initial address of the |
1016 | * percpu gdt_page is to remember it here, so we can go and | 1015 | * percpu gdt_page is to remember it here, so we can go and |
@@ -1061,6 +1060,7 @@ asmlinkage void __init xen_start_kernel(void) | |||
1061 | /* set up basic CPUID stuff */ | 1060 | /* set up basic CPUID stuff */ |
1062 | cpu_detect(&new_cpu_data); | 1061 | cpu_detect(&new_cpu_data); |
1063 | new_cpu_data.hard_math = 1; | 1062 | new_cpu_data.hard_math = 1; |
1063 | new_cpu_data.wp_works_ok = 1; | ||
1064 | new_cpu_data.x86_capability[0] = cpuid_edx(1); | 1064 | new_cpu_data.x86_capability[0] = cpuid_edx(1); |
1065 | #endif | 1065 | #endif |
1066 | 1066 | ||