diff options
Diffstat (limited to 'arch')
332 files changed, 9874 insertions, 5589 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 99193b160232..beea3ccebb5e 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -30,6 +30,18 @@ config OPROFILE_IBS | |||
30 | 30 | ||
31 | If unsure, say N. | 31 | If unsure, say N. |
32 | 32 | ||
33 | config OPROFILE_EVENT_MULTIPLEX | ||
34 | bool "OProfile multiplexing support (EXPERIMENTAL)" | ||
35 | default n | ||
36 | depends on OPROFILE && X86 | ||
37 | help | ||
38 | The number of hardware counters is limited. The multiplexing | ||
39 | feature enables OProfile to gather more events than counters | ||
40 | are provided by the hardware. This is realized by switching | ||
41 | between events at an user specified time interval. | ||
42 | |||
43 | If unsure, say N. | ||
44 | |||
33 | config HAVE_OPROFILE | 45 | config HAVE_OPROFILE |
34 | bool | 46 | bool |
35 | 47 | ||
diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h index 3641ec1452f4..26773e3246e2 100644 --- a/arch/alpha/include/asm/socket.h +++ b/arch/alpha/include/asm/socket.h | |||
@@ -32,6 +32,8 @@ | |||
32 | #define SO_RCVTIMEO 0x1012 | 32 | #define SO_RCVTIMEO 0x1012 |
33 | #define SO_SNDTIMEO 0x1013 | 33 | #define SO_SNDTIMEO 0x1013 |
34 | #define SO_ACCEPTCONN 0x1014 | 34 | #define SO_ACCEPTCONN 0x1014 |
35 | #define SO_PROTOCOL 0x1028 | ||
36 | #define SO_DOMAIN 0x1029 | ||
35 | 37 | ||
36 | /* linux-specific, might as well be the same as on i386 */ | 38 | /* linux-specific, might as well be the same as on i386 */ |
37 | #define SO_NO_CHECK 11 | 39 | #define SO_NO_CHECK 11 |
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index 60c83abfde70..5076a8860b18 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h | |||
@@ -75,6 +75,7 @@ register struct thread_info *__current_thread_info __asm__("$8"); | |||
75 | #define TIF_UAC_SIGBUS 7 | 75 | #define TIF_UAC_SIGBUS 7 |
76 | #define TIF_MEMDIE 8 | 76 | #define TIF_MEMDIE 8 |
77 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ | 77 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ |
78 | #define TIF_NOTIFY_RESUME 10 /* callback before returning to user */ | ||
78 | #define TIF_FREEZE 16 /* is freezing for suspend */ | 79 | #define TIF_FREEZE 16 /* is freezing for suspend */ |
79 | 80 | ||
80 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 81 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
@@ -82,10 +83,12 @@ register struct thread_info *__current_thread_info __asm__("$8"); | |||
82 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 83 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
83 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 84 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
84 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 85 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
86 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | ||
85 | #define _TIF_FREEZE (1<<TIF_FREEZE) | 87 | #define _TIF_FREEZE (1<<TIF_FREEZE) |
86 | 88 | ||
87 | /* Work to do on interrupt/exception return. */ | 89 | /* Work to do on interrupt/exception return. */ |
88 | #define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED) | 90 | #define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ |
91 | _TIF_NOTIFY_RESUME) | ||
89 | 92 | ||
90 | /* Work to do on any return to userspace. */ | 93 | /* Work to do on any return to userspace. */ |
91 | #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ | 94 | #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ |
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index df65eaa84c4c..0932dbb1ef8e 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/binfmts.h> | 20 | #include <linux/binfmts.h> |
21 | #include <linux/bitops.h> | 21 | #include <linux/bitops.h> |
22 | #include <linux/syscalls.h> | 22 | #include <linux/syscalls.h> |
23 | #include <linux/tracehook.h> | ||
23 | 24 | ||
24 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
25 | #include <asm/sigcontext.h> | 26 | #include <asm/sigcontext.h> |
@@ -683,4 +684,11 @@ do_notify_resume(struct pt_regs *regs, struct switch_stack *sw, | |||
683 | { | 684 | { |
684 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) | 685 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) |
685 | do_signal(regs, sw, r0, r19); | 686 | do_signal(regs, sw, r0, r19); |
687 | |||
688 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
689 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
690 | tracehook_notify_resume(regs); | ||
691 | if (current->replacement_session_keyring) | ||
692 | key_replace_session_keyring(); | ||
693 | } | ||
686 | } | 694 | } |
diff --git a/arch/arm/include/asm/socket.h b/arch/arm/include/asm/socket.h index 537de4e0ef50..92ac61d294fd 100644 --- a/arch/arm/include/asm/socket.h +++ b/arch/arm/include/asm/socket.h | |||
@@ -57,4 +57,7 @@ | |||
57 | #define SO_TIMESTAMPING 37 | 57 | #define SO_TIMESTAMPING 37 |
58 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | 58 | #define SCM_TIMESTAMPING SO_TIMESTAMPING |
59 | 59 | ||
60 | #define SO_PROTOCOL 38 | ||
61 | #define SO_DOMAIN 39 | ||
62 | |||
60 | #endif /* _ASM_SOCKET_H */ | 63 | #endif /* _ASM_SOCKET_H */ |
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 73394e50cbca..d3a39b1e6c0f 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h | |||
@@ -130,11 +130,13 @@ extern void vfp_sync_state(struct thread_info *thread); | |||
130 | * TIF_SYSCALL_TRACE - syscall trace active | 130 | * TIF_SYSCALL_TRACE - syscall trace active |
131 | * TIF_SIGPENDING - signal pending | 131 | * TIF_SIGPENDING - signal pending |
132 | * TIF_NEED_RESCHED - rescheduling necessary | 132 | * TIF_NEED_RESCHED - rescheduling necessary |
133 | * TIF_NOTIFY_RESUME - callback before returning to user | ||
133 | * TIF_USEDFPU - FPU was used by this task this quantum (SMP) | 134 | * TIF_USEDFPU - FPU was used by this task this quantum (SMP) |
134 | * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED | 135 | * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED |
135 | */ | 136 | */ |
136 | #define TIF_SIGPENDING 0 | 137 | #define TIF_SIGPENDING 0 |
137 | #define TIF_NEED_RESCHED 1 | 138 | #define TIF_NEED_RESCHED 1 |
139 | #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ | ||
138 | #define TIF_SYSCALL_TRACE 8 | 140 | #define TIF_SYSCALL_TRACE 8 |
139 | #define TIF_POLLING_NRFLAG 16 | 141 | #define TIF_POLLING_NRFLAG 16 |
140 | #define TIF_USING_IWMMXT 17 | 142 | #define TIF_USING_IWMMXT 17 |
@@ -143,6 +145,7 @@ extern void vfp_sync_state(struct thread_info *thread); | |||
143 | 145 | ||
144 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 146 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
145 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 147 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
148 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | ||
146 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 149 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
147 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | 150 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
148 | #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) | 151 | #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 8c3de1a350b5..7813ab782fda 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -51,7 +51,7 @@ fast_work_pending: | |||
51 | work_pending: | 51 | work_pending: |
52 | tst r1, #_TIF_NEED_RESCHED | 52 | tst r1, #_TIF_NEED_RESCHED |
53 | bne work_resched | 53 | bne work_resched |
54 | tst r1, #_TIF_SIGPENDING | 54 | tst r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME |
55 | beq no_work_pending | 55 | beq no_work_pending |
56 | mov r0, sp @ 'regs' | 56 | mov r0, sp @ 'regs' |
57 | mov r2, why @ 'syscall' | 57 | mov r2, why @ 'syscall' |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index f6bc5d442782..b76fe06d92e7 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/personality.h> | 12 | #include <linux/personality.h> |
13 | #include <linux/freezer.h> | 13 | #include <linux/freezer.h> |
14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
15 | #include <linux/tracehook.h> | ||
15 | 16 | ||
16 | #include <asm/elf.h> | 17 | #include <asm/elf.h> |
17 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
@@ -707,4 +708,11 @@ do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall) | |||
707 | { | 708 | { |
708 | if (thread_flags & _TIF_SIGPENDING) | 709 | if (thread_flags & _TIF_SIGPENDING) |
709 | do_signal(¤t->blocked, regs, syscall); | 710 | do_signal(¤t->blocked, regs, syscall); |
711 | |||
712 | if (thread_flags & _TIF_NOTIFY_RESUME) { | ||
713 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
714 | tracehook_notify_resume(regs); | ||
715 | if (current->replacement_session_keyring) | ||
716 | key_replace_session_keyring(); | ||
717 | } | ||
710 | } | 718 | } |
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 1e93dfee7543..5083f03e9b5e 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c | |||
@@ -416,6 +416,7 @@ static struct clocksource clocksource_ixp4xx = { | |||
416 | }; | 416 | }; |
417 | 417 | ||
418 | unsigned long ixp4xx_timer_freq = FREQ; | 418 | unsigned long ixp4xx_timer_freq = FREQ; |
419 | EXPORT_SYMBOL(ixp4xx_timer_freq); | ||
419 | static int __init ixp4xx_clocksource_init(void) | 420 | static int __init ixp4xx_clocksource_init(void) |
420 | { | 421 | { |
421 | clocksource_ixp4xx.mult = | 422 | clocksource_ixp4xx.mult = |
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c index 99b6e1546311..0447d26d454b 100644 --- a/arch/arm/mach-omap2/mcbsp.c +++ b/arch/arm/mach-omap2/mcbsp.c | |||
@@ -128,6 +128,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
128 | .rx_irq = INT_24XX_MCBSP1_IRQ_RX, | 128 | .rx_irq = INT_24XX_MCBSP1_IRQ_RX, |
129 | .tx_irq = INT_24XX_MCBSP1_IRQ_TX, | 129 | .tx_irq = INT_24XX_MCBSP1_IRQ_TX, |
130 | .ops = &omap2_mcbsp_ops, | 130 | .ops = &omap2_mcbsp_ops, |
131 | .buffer_size = 0x6F, | ||
131 | }, | 132 | }, |
132 | { | 133 | { |
133 | .phys_base = OMAP34XX_MCBSP2_BASE, | 134 | .phys_base = OMAP34XX_MCBSP2_BASE, |
@@ -136,6 +137,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
136 | .rx_irq = INT_24XX_MCBSP2_IRQ_RX, | 137 | .rx_irq = INT_24XX_MCBSP2_IRQ_RX, |
137 | .tx_irq = INT_24XX_MCBSP2_IRQ_TX, | 138 | .tx_irq = INT_24XX_MCBSP2_IRQ_TX, |
138 | .ops = &omap2_mcbsp_ops, | 139 | .ops = &omap2_mcbsp_ops, |
140 | .buffer_size = 0x3FF, | ||
139 | }, | 141 | }, |
140 | { | 142 | { |
141 | .phys_base = OMAP34XX_MCBSP3_BASE, | 143 | .phys_base = OMAP34XX_MCBSP3_BASE, |
@@ -144,6 +146,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
144 | .rx_irq = INT_24XX_MCBSP3_IRQ_RX, | 146 | .rx_irq = INT_24XX_MCBSP3_IRQ_RX, |
145 | .tx_irq = INT_24XX_MCBSP3_IRQ_TX, | 147 | .tx_irq = INT_24XX_MCBSP3_IRQ_TX, |
146 | .ops = &omap2_mcbsp_ops, | 148 | .ops = &omap2_mcbsp_ops, |
149 | .buffer_size = 0x6F, | ||
147 | }, | 150 | }, |
148 | { | 151 | { |
149 | .phys_base = OMAP34XX_MCBSP4_BASE, | 152 | .phys_base = OMAP34XX_MCBSP4_BASE, |
@@ -152,6 +155,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
152 | .rx_irq = INT_24XX_MCBSP4_IRQ_RX, | 155 | .rx_irq = INT_24XX_MCBSP4_IRQ_RX, |
153 | .tx_irq = INT_24XX_MCBSP4_IRQ_TX, | 156 | .tx_irq = INT_24XX_MCBSP4_IRQ_TX, |
154 | .ops = &omap2_mcbsp_ops, | 157 | .ops = &omap2_mcbsp_ops, |
158 | .buffer_size = 0x6F, | ||
155 | }, | 159 | }, |
156 | { | 160 | { |
157 | .phys_base = OMAP34XX_MCBSP5_BASE, | 161 | .phys_base = OMAP34XX_MCBSP5_BASE, |
@@ -160,6 +164,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
160 | .rx_irq = INT_24XX_MCBSP5_IRQ_RX, | 164 | .rx_irq = INT_24XX_MCBSP5_IRQ_RX, |
161 | .tx_irq = INT_24XX_MCBSP5_IRQ_TX, | 165 | .tx_irq = INT_24XX_MCBSP5_IRQ_TX, |
162 | .ops = &omap2_mcbsp_ops, | 166 | .ops = &omap2_mcbsp_ops, |
167 | .buffer_size = 0x6F, | ||
163 | }, | 168 | }, |
164 | }; | 169 | }; |
165 | #define OMAP34XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap34xx_mcbsp_pdata) | 170 | #define OMAP34XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap34xx_mcbsp_pdata) |
diff --git a/arch/arm/mach-pxa/include/mach/audio.h b/arch/arm/mach-pxa/include/mach/audio.h index 16eb02552d5d..a3449e35a6f5 100644 --- a/arch/arm/mach-pxa/include/mach/audio.h +++ b/arch/arm/mach-pxa/include/mach/audio.h | |||
@@ -3,10 +3,12 @@ | |||
3 | 3 | ||
4 | #include <sound/core.h> | 4 | #include <sound/core.h> |
5 | #include <sound/pcm.h> | 5 | #include <sound/pcm.h> |
6 | #include <sound/ac97_codec.h> | ||
6 | 7 | ||
7 | /* | 8 | /* |
8 | * @reset_gpio: AC97 reset gpio (normally gpio113 or gpio95) | 9 | * @reset_gpio: AC97 reset gpio (normally gpio113 or gpio95) |
9 | * a -1 value means no gpio will be used for reset | 10 | * a -1 value means no gpio will be used for reset |
11 | * @codec_pdata: AC97 codec platform_data | ||
10 | 12 | ||
11 | * reset_gpio should only be specified for pxa27x CPUs where a silicon | 13 | * reset_gpio should only be specified for pxa27x CPUs where a silicon |
12 | * bug prevents correct operation of the reset line. If not specified, | 14 | * bug prevents correct operation of the reset line. If not specified, |
@@ -20,6 +22,7 @@ typedef struct { | |||
20 | void (*resume)(void *); | 22 | void (*resume)(void *); |
21 | void *priv; | 23 | void *priv; |
22 | int reset_gpio; | 24 | int reset_gpio; |
25 | void *codec_pdata[AC97_BUS_MAX_DEVICES]; | ||
23 | } pxa2xx_audio_ops_t; | 26 | } pxa2xx_audio_ops_t; |
24 | 27 | ||
25 | extern void pxa_set_ac97_info(pxa2xx_audio_ops_t *ops); | 28 | extern void pxa_set_ac97_info(pxa2xx_audio_ops_t *ops); |
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index e3ac94f09006..9b00f4cbc903 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c | |||
@@ -1127,6 +1127,11 @@ int omap_dma_running(void) | |||
1127 | void omap_dma_link_lch(int lch_head, int lch_queue) | 1127 | void omap_dma_link_lch(int lch_head, int lch_queue) |
1128 | { | 1128 | { |
1129 | if (omap_dma_in_1510_mode()) { | 1129 | if (omap_dma_in_1510_mode()) { |
1130 | if (lch_head == lch_queue) { | ||
1131 | dma_write(dma_read(CCR(lch_head)) | (3 << 8), | ||
1132 | CCR(lch_head)); | ||
1133 | return; | ||
1134 | } | ||
1130 | printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); | 1135 | printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); |
1131 | BUG(); | 1136 | BUG(); |
1132 | return; | 1137 | return; |
@@ -1149,6 +1154,11 @@ EXPORT_SYMBOL(omap_dma_link_lch); | |||
1149 | void omap_dma_unlink_lch(int lch_head, int lch_queue) | 1154 | void omap_dma_unlink_lch(int lch_head, int lch_queue) |
1150 | { | 1155 | { |
1151 | if (omap_dma_in_1510_mode()) { | 1156 | if (omap_dma_in_1510_mode()) { |
1157 | if (lch_head == lch_queue) { | ||
1158 | dma_write(dma_read(CCR(lch_head)) & ~(3 << 8), | ||
1159 | CCR(lch_head)); | ||
1160 | return; | ||
1161 | } | ||
1152 | printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); | 1162 | printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); |
1153 | BUG(); | 1163 | BUG(); |
1154 | return; | 1164 | return; |
diff --git a/arch/arm/plat-omap/include/mach/mcbsp.h b/arch/arm/plat-omap/include/mach/mcbsp.h index bb154ea76769..63a3f254af7b 100644 --- a/arch/arm/plat-omap/include/mach/mcbsp.h +++ b/arch/arm/plat-omap/include/mach/mcbsp.h | |||
@@ -134,6 +134,11 @@ | |||
134 | #define OMAP_MCBSP_REG_XCERG 0x74 | 134 | #define OMAP_MCBSP_REG_XCERG 0x74 |
135 | #define OMAP_MCBSP_REG_XCERH 0x78 | 135 | #define OMAP_MCBSP_REG_XCERH 0x78 |
136 | #define OMAP_MCBSP_REG_SYSCON 0x8C | 136 | #define OMAP_MCBSP_REG_SYSCON 0x8C |
137 | #define OMAP_MCBSP_REG_THRSH2 0x90 | ||
138 | #define OMAP_MCBSP_REG_THRSH1 0x94 | ||
139 | #define OMAP_MCBSP_REG_IRQST 0xA0 | ||
140 | #define OMAP_MCBSP_REG_IRQEN 0xA4 | ||
141 | #define OMAP_MCBSP_REG_WAKEUPEN 0xA8 | ||
137 | #define OMAP_MCBSP_REG_XCCR 0xAC | 142 | #define OMAP_MCBSP_REG_XCCR 0xAC |
138 | #define OMAP_MCBSP_REG_RCCR 0xB0 | 143 | #define OMAP_MCBSP_REG_RCCR 0xB0 |
139 | 144 | ||
@@ -249,8 +254,27 @@ | |||
249 | #define RDISABLE 0x0001 | 254 | #define RDISABLE 0x0001 |
250 | 255 | ||
251 | /********************** McBSP SYSCONFIG bit definitions ********************/ | 256 | /********************** McBSP SYSCONFIG bit definitions ********************/ |
257 | #define CLOCKACTIVITY(value) ((value)<<8) | ||
258 | #define SIDLEMODE(value) ((value)<<3) | ||
259 | #define ENAWAKEUP 0x0004 | ||
252 | #define SOFTRST 0x0002 | 260 | #define SOFTRST 0x0002 |
253 | 261 | ||
262 | /********************** McBSP DMA operating modes **************************/ | ||
263 | #define MCBSP_DMA_MODE_ELEMENT 0 | ||
264 | #define MCBSP_DMA_MODE_THRESHOLD 1 | ||
265 | #define MCBSP_DMA_MODE_FRAME 2 | ||
266 | |||
267 | /********************** McBSP WAKEUPEN bit definitions *********************/ | ||
268 | #define XEMPTYEOFEN 0x4000 | ||
269 | #define XRDYEN 0x0400 | ||
270 | #define XEOFEN 0x0200 | ||
271 | #define XFSXEN 0x0100 | ||
272 | #define XSYNCERREN 0x0080 | ||
273 | #define RRDYEN 0x0008 | ||
274 | #define REOFEN 0x0004 | ||
275 | #define RFSREN 0x0002 | ||
276 | #define RSYNCERREN 0x0001 | ||
277 | |||
254 | /* we don't do multichannel for now */ | 278 | /* we don't do multichannel for now */ |
255 | struct omap_mcbsp_reg_cfg { | 279 | struct omap_mcbsp_reg_cfg { |
256 | u16 spcr2; | 280 | u16 spcr2; |
@@ -344,6 +368,9 @@ struct omap_mcbsp_platform_data { | |||
344 | u8 dma_rx_sync, dma_tx_sync; | 368 | u8 dma_rx_sync, dma_tx_sync; |
345 | u16 rx_irq, tx_irq; | 369 | u16 rx_irq, tx_irq; |
346 | struct omap_mcbsp_ops *ops; | 370 | struct omap_mcbsp_ops *ops; |
371 | #ifdef CONFIG_ARCH_OMAP34XX | ||
372 | u16 buffer_size; | ||
373 | #endif | ||
347 | }; | 374 | }; |
348 | 375 | ||
349 | struct omap_mcbsp { | 376 | struct omap_mcbsp { |
@@ -377,6 +404,11 @@ struct omap_mcbsp { | |||
377 | struct omap_mcbsp_platform_data *pdata; | 404 | struct omap_mcbsp_platform_data *pdata; |
378 | struct clk *iclk; | 405 | struct clk *iclk; |
379 | struct clk *fclk; | 406 | struct clk *fclk; |
407 | #ifdef CONFIG_ARCH_OMAP34XX | ||
408 | int dma_op_mode; | ||
409 | u16 max_tx_thres; | ||
410 | u16 max_rx_thres; | ||
411 | #endif | ||
380 | }; | 412 | }; |
381 | extern struct omap_mcbsp **mcbsp_ptr; | 413 | extern struct omap_mcbsp **mcbsp_ptr; |
382 | extern int omap_mcbsp_count; | 414 | extern int omap_mcbsp_count; |
@@ -385,10 +417,25 @@ int omap_mcbsp_init(void); | |||
385 | void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config, | 417 | void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config, |
386 | int size); | 418 | int size); |
387 | void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg * config); | 419 | void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg * config); |
420 | #ifdef CONFIG_ARCH_OMAP34XX | ||
421 | void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold); | ||
422 | void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold); | ||
423 | u16 omap_mcbsp_get_max_tx_threshold(unsigned int id); | ||
424 | u16 omap_mcbsp_get_max_rx_threshold(unsigned int id); | ||
425 | int omap_mcbsp_get_dma_op_mode(unsigned int id); | ||
426 | #else | ||
427 | static inline void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold) | ||
428 | { } | ||
429 | static inline void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold) | ||
430 | { } | ||
431 | static inline u16 omap_mcbsp_get_max_tx_threshold(unsigned int id) { return 0; } | ||
432 | static inline u16 omap_mcbsp_get_max_rx_threshold(unsigned int id) { return 0; } | ||
433 | static inline int omap_mcbsp_get_dma_op_mode(unsigned int id) { return 0; } | ||
434 | #endif | ||
388 | int omap_mcbsp_request(unsigned int id); | 435 | int omap_mcbsp_request(unsigned int id); |
389 | void omap_mcbsp_free(unsigned int id); | 436 | void omap_mcbsp_free(unsigned int id); |
390 | void omap_mcbsp_start(unsigned int id); | 437 | void omap_mcbsp_start(unsigned int id, int tx, int rx); |
391 | void omap_mcbsp_stop(unsigned int id); | 438 | void omap_mcbsp_stop(unsigned int id, int tx, int rx); |
392 | void omap_mcbsp_xmit_word(unsigned int id, u32 word); | 439 | void omap_mcbsp_xmit_word(unsigned int id, u32 word); |
393 | u32 omap_mcbsp_recv_word(unsigned int id); | 440 | u32 omap_mcbsp_recv_word(unsigned int id); |
394 | 441 | ||
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c index efa0e0111f38..8dc7927906f1 100644 --- a/arch/arm/plat-omap/mcbsp.c +++ b/arch/arm/plat-omap/mcbsp.c | |||
@@ -198,6 +198,170 @@ void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg *config) | |||
198 | } | 198 | } |
199 | EXPORT_SYMBOL(omap_mcbsp_config); | 199 | EXPORT_SYMBOL(omap_mcbsp_config); |
200 | 200 | ||
201 | #ifdef CONFIG_ARCH_OMAP34XX | ||
202 | /* | ||
203 | * omap_mcbsp_set_tx_threshold configures how to deal | ||
204 | * with transmit threshold. the threshold value and handler can be | ||
205 | * configure in here. | ||
206 | */ | ||
207 | void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold) | ||
208 | { | ||
209 | struct omap_mcbsp *mcbsp; | ||
210 | void __iomem *io_base; | ||
211 | |||
212 | if (!cpu_is_omap34xx()) | ||
213 | return; | ||
214 | |||
215 | if (!omap_mcbsp_check_valid_id(id)) { | ||
216 | printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); | ||
217 | return; | ||
218 | } | ||
219 | mcbsp = id_to_mcbsp_ptr(id); | ||
220 | io_base = mcbsp->io_base; | ||
221 | |||
222 | OMAP_MCBSP_WRITE(io_base, THRSH2, threshold); | ||
223 | } | ||
224 | EXPORT_SYMBOL(omap_mcbsp_set_tx_threshold); | ||
225 | |||
226 | /* | ||
227 | * omap_mcbsp_set_rx_threshold configures how to deal | ||
228 | * with receive threshold. the threshold value and handler can be | ||
229 | * configure in here. | ||
230 | */ | ||
231 | void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold) | ||
232 | { | ||
233 | struct omap_mcbsp *mcbsp; | ||
234 | void __iomem *io_base; | ||
235 | |||
236 | if (!cpu_is_omap34xx()) | ||
237 | return; | ||
238 | |||
239 | if (!omap_mcbsp_check_valid_id(id)) { | ||
240 | printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); | ||
241 | return; | ||
242 | } | ||
243 | mcbsp = id_to_mcbsp_ptr(id); | ||
244 | io_base = mcbsp->io_base; | ||
245 | |||
246 | OMAP_MCBSP_WRITE(io_base, THRSH1, threshold); | ||
247 | } | ||
248 | EXPORT_SYMBOL(omap_mcbsp_set_rx_threshold); | ||
249 | |||
250 | /* | ||
251 | * omap_mcbsp_get_max_tx_thres just return the current configured | ||
252 | * maximum threshold for transmission | ||
253 | */ | ||
254 | u16 omap_mcbsp_get_max_tx_threshold(unsigned int id) | ||
255 | { | ||
256 | struct omap_mcbsp *mcbsp; | ||
257 | |||
258 | if (!omap_mcbsp_check_valid_id(id)) { | ||
259 | printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); | ||
260 | return -ENODEV; | ||
261 | } | ||
262 | mcbsp = id_to_mcbsp_ptr(id); | ||
263 | |||
264 | return mcbsp->max_tx_thres; | ||
265 | } | ||
266 | EXPORT_SYMBOL(omap_mcbsp_get_max_tx_threshold); | ||
267 | |||
268 | /* | ||
269 | * omap_mcbsp_get_max_rx_thres just return the current configured | ||
270 | * maximum threshold for reception | ||
271 | */ | ||
272 | u16 omap_mcbsp_get_max_rx_threshold(unsigned int id) | ||
273 | { | ||
274 | struct omap_mcbsp *mcbsp; | ||
275 | |||
276 | if (!omap_mcbsp_check_valid_id(id)) { | ||
277 | printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); | ||
278 | return -ENODEV; | ||
279 | } | ||
280 | mcbsp = id_to_mcbsp_ptr(id); | ||
281 | |||
282 | return mcbsp->max_rx_thres; | ||
283 | } | ||
284 | EXPORT_SYMBOL(omap_mcbsp_get_max_rx_threshold); | ||
285 | |||
286 | /* | ||
287 | * omap_mcbsp_get_dma_op_mode just return the current configured | ||
288 | * operating mode for the mcbsp channel | ||
289 | */ | ||
290 | int omap_mcbsp_get_dma_op_mode(unsigned int id) | ||
291 | { | ||
292 | struct omap_mcbsp *mcbsp; | ||
293 | int dma_op_mode; | ||
294 | |||
295 | if (!omap_mcbsp_check_valid_id(id)) { | ||
296 | printk(KERN_ERR "%s: Invalid id (%u)\n", __func__, id + 1); | ||
297 | return -ENODEV; | ||
298 | } | ||
299 | mcbsp = id_to_mcbsp_ptr(id); | ||
300 | |||
301 | spin_lock_irq(&mcbsp->lock); | ||
302 | dma_op_mode = mcbsp->dma_op_mode; | ||
303 | spin_unlock_irq(&mcbsp->lock); | ||
304 | |||
305 | return dma_op_mode; | ||
306 | } | ||
307 | EXPORT_SYMBOL(omap_mcbsp_get_dma_op_mode); | ||
308 | |||
309 | static inline void omap34xx_mcbsp_request(struct omap_mcbsp *mcbsp) | ||
310 | { | ||
311 | /* | ||
312 | * Enable wakup behavior, smart idle and all wakeups | ||
313 | * REVISIT: some wakeups may be unnecessary | ||
314 | */ | ||
315 | if (cpu_is_omap34xx()) { | ||
316 | u16 syscon; | ||
317 | |||
318 | syscon = OMAP_MCBSP_READ(mcbsp->io_base, SYSCON); | ||
319 | syscon &= ~(ENAWAKEUP | SIDLEMODE(0x03) | CLOCKACTIVITY(0x03)); | ||
320 | |||
321 | spin_lock_irq(&mcbsp->lock); | ||
322 | if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) { | ||
323 | syscon |= (ENAWAKEUP | SIDLEMODE(0x02) | | ||
324 | CLOCKACTIVITY(0x02)); | ||
325 | OMAP_MCBSP_WRITE(mcbsp->io_base, WAKEUPEN, | ||
326 | XRDYEN | RRDYEN); | ||
327 | } else { | ||
328 | syscon |= SIDLEMODE(0x01); | ||
329 | } | ||
330 | spin_unlock_irq(&mcbsp->lock); | ||
331 | |||
332 | OMAP_MCBSP_WRITE(mcbsp->io_base, SYSCON, syscon); | ||
333 | } | ||
334 | } | ||
335 | |||
336 | static inline void omap34xx_mcbsp_free(struct omap_mcbsp *mcbsp) | ||
337 | { | ||
338 | /* | ||
339 | * Disable wakup behavior, smart idle and all wakeups | ||
340 | */ | ||
341 | if (cpu_is_omap34xx()) { | ||
342 | u16 syscon; | ||
343 | |||
344 | syscon = OMAP_MCBSP_READ(mcbsp->io_base, SYSCON); | ||
345 | syscon &= ~(ENAWAKEUP | SIDLEMODE(0x03) | CLOCKACTIVITY(0x03)); | ||
346 | /* | ||
347 | * HW bug workaround - If no_idle mode is taken, we need to | ||
348 | * go to smart_idle before going to always_idle, or the | ||
349 | * device will not hit retention anymore. | ||
350 | */ | ||
351 | syscon |= SIDLEMODE(0x02); | ||
352 | OMAP_MCBSP_WRITE(mcbsp->io_base, SYSCON, syscon); | ||
353 | |||
354 | syscon &= ~(SIDLEMODE(0x03)); | ||
355 | OMAP_MCBSP_WRITE(mcbsp->io_base, SYSCON, syscon); | ||
356 | |||
357 | OMAP_MCBSP_WRITE(mcbsp->io_base, WAKEUPEN, 0); | ||
358 | } | ||
359 | } | ||
360 | #else | ||
361 | static inline void omap34xx_mcbsp_request(struct omap_mcbsp *mcbsp) {} | ||
362 | static inline void omap34xx_mcbsp_free(struct omap_mcbsp *mcbsp) {} | ||
363 | #endif | ||
364 | |||
201 | /* | 365 | /* |
202 | * We can choose between IRQ based or polled IO. | 366 | * We can choose between IRQ based or polled IO. |
203 | * This needs to be called before omap_mcbsp_request(). | 367 | * This needs to be called before omap_mcbsp_request(). |
@@ -257,6 +421,9 @@ int omap_mcbsp_request(unsigned int id) | |||
257 | clk_enable(mcbsp->iclk); | 421 | clk_enable(mcbsp->iclk); |
258 | clk_enable(mcbsp->fclk); | 422 | clk_enable(mcbsp->fclk); |
259 | 423 | ||
424 | /* Do procedure specific to omap34xx arch, if applicable */ | ||
425 | omap34xx_mcbsp_request(mcbsp); | ||
426 | |||
260 | /* | 427 | /* |
261 | * Make sure that transmitter, receiver and sample-rate generator are | 428 | * Make sure that transmitter, receiver and sample-rate generator are |
262 | * not running before activating IRQs. | 429 | * not running before activating IRQs. |
@@ -305,6 +472,9 @@ void omap_mcbsp_free(unsigned int id) | |||
305 | if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->free) | 472 | if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->free) |
306 | mcbsp->pdata->ops->free(id); | 473 | mcbsp->pdata->ops->free(id); |
307 | 474 | ||
475 | /* Do procedure specific to omap34xx arch, if applicable */ | ||
476 | omap34xx_mcbsp_free(mcbsp); | ||
477 | |||
308 | clk_disable(mcbsp->fclk); | 478 | clk_disable(mcbsp->fclk); |
309 | clk_disable(mcbsp->iclk); | 479 | clk_disable(mcbsp->iclk); |
310 | 480 | ||
@@ -328,14 +498,15 @@ void omap_mcbsp_free(unsigned int id) | |||
328 | EXPORT_SYMBOL(omap_mcbsp_free); | 498 | EXPORT_SYMBOL(omap_mcbsp_free); |
329 | 499 | ||
330 | /* | 500 | /* |
331 | * Here we start the McBSP, by enabling the sample | 501 | * Here we start the McBSP, by enabling transmitter, receiver or both. |
332 | * generator, both transmitter and receivers, | 502 | * If no transmitter or receiver is active prior calling, then sample-rate |
333 | * and the frame sync. | 503 | * generator and frame sync are started. |
334 | */ | 504 | */ |
335 | void omap_mcbsp_start(unsigned int id) | 505 | void omap_mcbsp_start(unsigned int id, int tx, int rx) |
336 | { | 506 | { |
337 | struct omap_mcbsp *mcbsp; | 507 | struct omap_mcbsp *mcbsp; |
338 | void __iomem *io_base; | 508 | void __iomem *io_base; |
509 | int idle; | ||
339 | u16 w; | 510 | u16 w; |
340 | 511 | ||
341 | if (!omap_mcbsp_check_valid_id(id)) { | 512 | if (!omap_mcbsp_check_valid_id(id)) { |
@@ -348,32 +519,58 @@ void omap_mcbsp_start(unsigned int id) | |||
348 | mcbsp->rx_word_length = (OMAP_MCBSP_READ(io_base, RCR1) >> 5) & 0x7; | 519 | mcbsp->rx_word_length = (OMAP_MCBSP_READ(io_base, RCR1) >> 5) & 0x7; |
349 | mcbsp->tx_word_length = (OMAP_MCBSP_READ(io_base, XCR1) >> 5) & 0x7; | 520 | mcbsp->tx_word_length = (OMAP_MCBSP_READ(io_base, XCR1) >> 5) & 0x7; |
350 | 521 | ||
351 | /* Start the sample generator */ | 522 | idle = !((OMAP_MCBSP_READ(io_base, SPCR2) | |
352 | w = OMAP_MCBSP_READ(io_base, SPCR2); | 523 | OMAP_MCBSP_READ(io_base, SPCR1)) & 1); |
353 | OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 6)); | 524 | |
525 | if (idle) { | ||
526 | /* Start the sample generator */ | ||
527 | w = OMAP_MCBSP_READ(io_base, SPCR2); | ||
528 | OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 6)); | ||
529 | } | ||
354 | 530 | ||
355 | /* Enable transmitter and receiver */ | 531 | /* Enable transmitter and receiver */ |
532 | tx &= 1; | ||
356 | w = OMAP_MCBSP_READ(io_base, SPCR2); | 533 | w = OMAP_MCBSP_READ(io_base, SPCR2); |
357 | OMAP_MCBSP_WRITE(io_base, SPCR2, w | 1); | 534 | OMAP_MCBSP_WRITE(io_base, SPCR2, w | tx); |
358 | 535 | ||
536 | rx &= 1; | ||
359 | w = OMAP_MCBSP_READ(io_base, SPCR1); | 537 | w = OMAP_MCBSP_READ(io_base, SPCR1); |
360 | OMAP_MCBSP_WRITE(io_base, SPCR1, w | 1); | 538 | OMAP_MCBSP_WRITE(io_base, SPCR1, w | rx); |
361 | 539 | ||
362 | udelay(100); | 540 | /* |
541 | * Worst case: CLKSRG*2 = 8000khz: (1/8000) * 2 * 2 usec | ||
542 | * REVISIT: 100us may give enough time for two CLKSRG, however | ||
543 | * due to some unknown PM related, clock gating etc. reason it | ||
544 | * is now at 500us. | ||
545 | */ | ||
546 | udelay(500); | ||
363 | 547 | ||
364 | /* Start frame sync */ | 548 | if (idle) { |
365 | w = OMAP_MCBSP_READ(io_base, SPCR2); | 549 | /* Start frame sync */ |
366 | OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 7)); | 550 | w = OMAP_MCBSP_READ(io_base, SPCR2); |
551 | OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 7)); | ||
552 | } | ||
553 | |||
554 | if (cpu_is_omap2430() || cpu_is_omap34xx()) { | ||
555 | /* Release the transmitter and receiver */ | ||
556 | w = OMAP_MCBSP_READ(io_base, XCCR); | ||
557 | w &= ~(tx ? XDISABLE : 0); | ||
558 | OMAP_MCBSP_WRITE(io_base, XCCR, w); | ||
559 | w = OMAP_MCBSP_READ(io_base, RCCR); | ||
560 | w &= ~(rx ? RDISABLE : 0); | ||
561 | OMAP_MCBSP_WRITE(io_base, RCCR, w); | ||
562 | } | ||
367 | 563 | ||
368 | /* Dump McBSP Regs */ | 564 | /* Dump McBSP Regs */ |
369 | omap_mcbsp_dump_reg(id); | 565 | omap_mcbsp_dump_reg(id); |
370 | } | 566 | } |
371 | EXPORT_SYMBOL(omap_mcbsp_start); | 567 | EXPORT_SYMBOL(omap_mcbsp_start); |
372 | 568 | ||
373 | void omap_mcbsp_stop(unsigned int id) | 569 | void omap_mcbsp_stop(unsigned int id, int tx, int rx) |
374 | { | 570 | { |
375 | struct omap_mcbsp *mcbsp; | 571 | struct omap_mcbsp *mcbsp; |
376 | void __iomem *io_base; | 572 | void __iomem *io_base; |
573 | int idle; | ||
377 | u16 w; | 574 | u16 w; |
378 | 575 | ||
379 | if (!omap_mcbsp_check_valid_id(id)) { | 576 | if (!omap_mcbsp_check_valid_id(id)) { |
@@ -385,16 +582,33 @@ void omap_mcbsp_stop(unsigned int id) | |||
385 | io_base = mcbsp->io_base; | 582 | io_base = mcbsp->io_base; |
386 | 583 | ||
387 | /* Reset transmitter */ | 584 | /* Reset transmitter */ |
585 | tx &= 1; | ||
586 | if (cpu_is_omap2430() || cpu_is_omap34xx()) { | ||
587 | w = OMAP_MCBSP_READ(io_base, XCCR); | ||
588 | w |= (tx ? XDISABLE : 0); | ||
589 | OMAP_MCBSP_WRITE(io_base, XCCR, w); | ||
590 | } | ||
388 | w = OMAP_MCBSP_READ(io_base, SPCR2); | 591 | w = OMAP_MCBSP_READ(io_base, SPCR2); |
389 | OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~(1)); | 592 | OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~tx); |
390 | 593 | ||
391 | /* Reset receiver */ | 594 | /* Reset receiver */ |
595 | rx &= 1; | ||
596 | if (cpu_is_omap2430() || cpu_is_omap34xx()) { | ||
597 | w = OMAP_MCBSP_READ(io_base, RCCR); | ||
598 | w |= (tx ? RDISABLE : 0); | ||
599 | OMAP_MCBSP_WRITE(io_base, RCCR, w); | ||
600 | } | ||
392 | w = OMAP_MCBSP_READ(io_base, SPCR1); | 601 | w = OMAP_MCBSP_READ(io_base, SPCR1); |
393 | OMAP_MCBSP_WRITE(io_base, SPCR1, w & ~(1)); | 602 | OMAP_MCBSP_WRITE(io_base, SPCR1, w & ~rx); |
394 | 603 | ||
395 | /* Reset the sample rate generator */ | 604 | idle = !((OMAP_MCBSP_READ(io_base, SPCR2) | |
396 | w = OMAP_MCBSP_READ(io_base, SPCR2); | 605 | OMAP_MCBSP_READ(io_base, SPCR1)) & 1); |
397 | OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~(1 << 6)); | 606 | |
607 | if (idle) { | ||
608 | /* Reset the sample rate generator */ | ||
609 | w = OMAP_MCBSP_READ(io_base, SPCR2); | ||
610 | OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~(1 << 6)); | ||
611 | } | ||
398 | } | 612 | } |
399 | EXPORT_SYMBOL(omap_mcbsp_stop); | 613 | EXPORT_SYMBOL(omap_mcbsp_stop); |
400 | 614 | ||
@@ -883,6 +1097,149 @@ void omap_mcbsp_set_spi_mode(unsigned int id, | |||
883 | } | 1097 | } |
884 | EXPORT_SYMBOL(omap_mcbsp_set_spi_mode); | 1098 | EXPORT_SYMBOL(omap_mcbsp_set_spi_mode); |
885 | 1099 | ||
1100 | #ifdef CONFIG_ARCH_OMAP34XX | ||
1101 | #define max_thres(m) (mcbsp->pdata->buffer_size) | ||
1102 | #define valid_threshold(m, val) ((val) <= max_thres(m)) | ||
1103 | #define THRESHOLD_PROP_BUILDER(prop) \ | ||
1104 | static ssize_t prop##_show(struct device *dev, \ | ||
1105 | struct device_attribute *attr, char *buf) \ | ||
1106 | { \ | ||
1107 | struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); \ | ||
1108 | \ | ||
1109 | return sprintf(buf, "%u\n", mcbsp->prop); \ | ||
1110 | } \ | ||
1111 | \ | ||
1112 | static ssize_t prop##_store(struct device *dev, \ | ||
1113 | struct device_attribute *attr, \ | ||
1114 | const char *buf, size_t size) \ | ||
1115 | { \ | ||
1116 | struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); \ | ||
1117 | unsigned long val; \ | ||
1118 | int status; \ | ||
1119 | \ | ||
1120 | status = strict_strtoul(buf, 0, &val); \ | ||
1121 | if (status) \ | ||
1122 | return status; \ | ||
1123 | \ | ||
1124 | if (!valid_threshold(mcbsp, val)) \ | ||
1125 | return -EDOM; \ | ||
1126 | \ | ||
1127 | mcbsp->prop = val; \ | ||
1128 | return size; \ | ||
1129 | } \ | ||
1130 | \ | ||
1131 | static DEVICE_ATTR(prop, 0644, prop##_show, prop##_store); | ||
1132 | |||
1133 | THRESHOLD_PROP_BUILDER(max_tx_thres); | ||
1134 | THRESHOLD_PROP_BUILDER(max_rx_thres); | ||
1135 | |||
1136 | static const char *dma_op_modes[] = { | ||
1137 | "element", "threshold", "frame", | ||
1138 | }; | ||
1139 | |||
1140 | static ssize_t dma_op_mode_show(struct device *dev, | ||
1141 | struct device_attribute *attr, char *buf) | ||
1142 | { | ||
1143 | struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); | ||
1144 | int dma_op_mode, i = 0; | ||
1145 | ssize_t len = 0; | ||
1146 | const char * const *s; | ||
1147 | |||
1148 | spin_lock_irq(&mcbsp->lock); | ||
1149 | dma_op_mode = mcbsp->dma_op_mode; | ||
1150 | spin_unlock_irq(&mcbsp->lock); | ||
1151 | |||
1152 | for (s = &dma_op_modes[i]; i < ARRAY_SIZE(dma_op_modes); s++, i++) { | ||
1153 | if (dma_op_mode == i) | ||
1154 | len += sprintf(buf + len, "[%s] ", *s); | ||
1155 | else | ||
1156 | len += sprintf(buf + len, "%s ", *s); | ||
1157 | } | ||
1158 | len += sprintf(buf + len, "\n"); | ||
1159 | |||
1160 | return len; | ||
1161 | } | ||
1162 | |||
1163 | static ssize_t dma_op_mode_store(struct device *dev, | ||
1164 | struct device_attribute *attr, | ||
1165 | const char *buf, size_t size) | ||
1166 | { | ||
1167 | struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); | ||
1168 | const char * const *s; | ||
1169 | int i = 0; | ||
1170 | |||
1171 | for (s = &dma_op_modes[i]; i < ARRAY_SIZE(dma_op_modes); s++, i++) | ||
1172 | if (sysfs_streq(buf, *s)) | ||
1173 | break; | ||
1174 | |||
1175 | if (i == ARRAY_SIZE(dma_op_modes)) | ||
1176 | return -EINVAL; | ||
1177 | |||
1178 | spin_lock_irq(&mcbsp->lock); | ||
1179 | if (!mcbsp->free) { | ||
1180 | size = -EBUSY; | ||
1181 | goto unlock; | ||
1182 | } | ||
1183 | mcbsp->dma_op_mode = i; | ||
1184 | |||
1185 | unlock: | ||
1186 | spin_unlock_irq(&mcbsp->lock); | ||
1187 | |||
1188 | return size; | ||
1189 | } | ||
1190 | |||
1191 | static DEVICE_ATTR(dma_op_mode, 0644, dma_op_mode_show, dma_op_mode_store); | ||
1192 | |||
1193 | static const struct attribute *additional_attrs[] = { | ||
1194 | &dev_attr_max_tx_thres.attr, | ||
1195 | &dev_attr_max_rx_thres.attr, | ||
1196 | &dev_attr_dma_op_mode.attr, | ||
1197 | NULL, | ||
1198 | }; | ||
1199 | |||
1200 | static const struct attribute_group additional_attr_group = { | ||
1201 | .attrs = (struct attribute **)additional_attrs, | ||
1202 | }; | ||
1203 | |||
1204 | static inline int __devinit omap_additional_add(struct device *dev) | ||
1205 | { | ||
1206 | return sysfs_create_group(&dev->kobj, &additional_attr_group); | ||
1207 | } | ||
1208 | |||
1209 | static inline void __devexit omap_additional_remove(struct device *dev) | ||
1210 | { | ||
1211 | sysfs_remove_group(&dev->kobj, &additional_attr_group); | ||
1212 | } | ||
1213 | |||
1214 | static inline void __devinit omap34xx_device_init(struct omap_mcbsp *mcbsp) | ||
1215 | { | ||
1216 | mcbsp->dma_op_mode = MCBSP_DMA_MODE_ELEMENT; | ||
1217 | if (cpu_is_omap34xx()) { | ||
1218 | mcbsp->max_tx_thres = max_thres(mcbsp); | ||
1219 | mcbsp->max_rx_thres = max_thres(mcbsp); | ||
1220 | /* | ||
1221 | * REVISIT: Set dmap_op_mode to THRESHOLD as default | ||
1222 | * for mcbsp2 instances. | ||
1223 | */ | ||
1224 | if (omap_additional_add(mcbsp->dev)) | ||
1225 | dev_warn(mcbsp->dev, | ||
1226 | "Unable to create additional controls\n"); | ||
1227 | } else { | ||
1228 | mcbsp->max_tx_thres = -EINVAL; | ||
1229 | mcbsp->max_rx_thres = -EINVAL; | ||
1230 | } | ||
1231 | } | ||
1232 | |||
1233 | static inline void __devexit omap34xx_device_exit(struct omap_mcbsp *mcbsp) | ||
1234 | { | ||
1235 | if (cpu_is_omap34xx()) | ||
1236 | omap_additional_remove(mcbsp->dev); | ||
1237 | } | ||
1238 | #else | ||
1239 | static inline void __devinit omap34xx_device_init(struct omap_mcbsp *mcbsp) {} | ||
1240 | static inline void __devexit omap34xx_device_exit(struct omap_mcbsp *mcbsp) {} | ||
1241 | #endif /* CONFIG_ARCH_OMAP34XX */ | ||
1242 | |||
886 | /* | 1243 | /* |
887 | * McBSP1 and McBSP3 are directly mapped on 1610 and 1510. | 1244 | * McBSP1 and McBSP3 are directly mapped on 1610 and 1510. |
888 | * 730 has only 2 McBSP, and both of them are MPU peripherals. | 1245 | * 730 has only 2 McBSP, and both of them are MPU peripherals. |
@@ -953,6 +1310,10 @@ static int __devinit omap_mcbsp_probe(struct platform_device *pdev) | |||
953 | mcbsp->dev = &pdev->dev; | 1310 | mcbsp->dev = &pdev->dev; |
954 | mcbsp_ptr[id] = mcbsp; | 1311 | mcbsp_ptr[id] = mcbsp; |
955 | platform_set_drvdata(pdev, mcbsp); | 1312 | platform_set_drvdata(pdev, mcbsp); |
1313 | |||
1314 | /* Initialize mcbsp properties for OMAP34XX if needed / applicable */ | ||
1315 | omap34xx_device_init(mcbsp); | ||
1316 | |||
956 | return 0; | 1317 | return 0; |
957 | 1318 | ||
958 | err_fclk: | 1319 | err_fclk: |
@@ -976,6 +1337,8 @@ static int __devexit omap_mcbsp_remove(struct platform_device *pdev) | |||
976 | mcbsp->pdata->ops->free) | 1337 | mcbsp->pdata->ops->free) |
977 | mcbsp->pdata->ops->free(mcbsp->id); | 1338 | mcbsp->pdata->ops->free(mcbsp->id); |
978 | 1339 | ||
1340 | omap34xx_device_exit(mcbsp); | ||
1341 | |||
979 | clk_disable(mcbsp->fclk); | 1342 | clk_disable(mcbsp->fclk); |
980 | clk_disable(mcbsp->iclk); | 1343 | clk_disable(mcbsp->iclk); |
981 | clk_put(mcbsp->fclk); | 1344 | clk_put(mcbsp->fclk); |
diff --git a/arch/arm/plat-s3c/include/plat/audio-simtec.h b/arch/arm/plat-s3c/include/plat/audio-simtec.h new file mode 100644 index 000000000000..0f440b9168db --- /dev/null +++ b/arch/arm/plat-s3c/include/plat/audio-simtec.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* arch/arm/plat-s3c/include/plat/audio-simtec.h | ||
2 | * | ||
3 | * Copyright 2008 Simtec Electronics | ||
4 | * http://armlinux.simtec.co.uk/ | ||
5 | * Ben Dooks <ben@simtec.co.uk> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * Simtec Audio support. | ||
12 | */ | ||
13 | |||
14 | /** | ||
15 | * struct s3c24xx_audio_simtec_pdata - platform data for simtec audio | ||
16 | * @use_mpllin: Select codec clock from MPLLin | ||
17 | * @output_cdclk: Need to output CDCLK to the codec | ||
18 | * @have_mic: Set if we have a MIC socket | ||
19 | * @have_lout: Set if we have a LineOut socket | ||
20 | * @amp_gpio: GPIO pin to enable the AMP | ||
21 | * @amp_gain: Option GPIO to control AMP gain | ||
22 | */ | ||
23 | struct s3c24xx_audio_simtec_pdata { | ||
24 | unsigned int use_mpllin:1; | ||
25 | unsigned int output_cdclk:1; | ||
26 | |||
27 | unsigned int have_mic:1; | ||
28 | unsigned int have_lout:1; | ||
29 | |||
30 | int amp_gpio; | ||
31 | int amp_gain[2]; | ||
32 | |||
33 | void (*startup)(void); | ||
34 | }; | ||
35 | |||
36 | extern int simtec_audio_add(const char *codec_name, | ||
37 | struct s3c24xx_audio_simtec_pdata *pdata); | ||
diff --git a/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h index 0fad7571030e..07659dad1748 100644 --- a/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h +++ b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h | |||
@@ -33,6 +33,11 @@ | |||
33 | #define S3C2412_IISCON_RXDMA_ACTIVE (1 << 1) | 33 | #define S3C2412_IISCON_RXDMA_ACTIVE (1 << 1) |
34 | #define S3C2412_IISCON_IIS_ACTIVE (1 << 0) | 34 | #define S3C2412_IISCON_IIS_ACTIVE (1 << 0) |
35 | 35 | ||
36 | #define S3C64XX_IISMOD_BLC_16BIT (0 << 13) | ||
37 | #define S3C64XX_IISMOD_BLC_8BIT (1 << 13) | ||
38 | #define S3C64XX_IISMOD_BLC_24BIT (2 << 13) | ||
39 | #define S3C64XX_IISMOD_BLC_MASK (3 << 13) | ||
40 | |||
36 | #define S3C64XX_IISMOD_IMS_PCLK (0 << 10) | 41 | #define S3C64XX_IISMOD_IMS_PCLK (0 << 10) |
37 | #define S3C64XX_IISMOD_IMS_SYSMUX (1 << 10) | 42 | #define S3C64XX_IISMOD_IMS_SYSMUX (1 << 10) |
38 | 43 | ||
diff --git a/arch/avr32/include/asm/socket.h b/arch/avr32/include/asm/socket.h index 04c860619700..fe863f9794d5 100644 --- a/arch/avr32/include/asm/socket.h +++ b/arch/avr32/include/asm/socket.h | |||
@@ -57,4 +57,7 @@ | |||
57 | #define SO_TIMESTAMPING 37 | 57 | #define SO_TIMESTAMPING 37 |
58 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | 58 | #define SCM_TIMESTAMPING SO_TIMESTAMPING |
59 | 59 | ||
60 | #define SO_PROTOCOL 38 | ||
61 | #define SO_DOMAIN 39 | ||
62 | |||
60 | #endif /* __ASM_AVR32_SOCKET_H */ | 63 | #endif /* __ASM_AVR32_SOCKET_H */ |
diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h index fc42de5ca209..fd0c5d7e9337 100644 --- a/arch/avr32/include/asm/thread_info.h +++ b/arch/avr32/include/asm/thread_info.h | |||
@@ -84,6 +84,7 @@ static inline struct thread_info *current_thread_info(void) | |||
84 | #define TIF_MEMDIE 6 | 84 | #define TIF_MEMDIE 6 |
85 | #define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */ | 85 | #define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */ |
86 | #define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */ | 86 | #define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */ |
87 | #define TIF_NOTIFY_RESUME 9 /* callback before returning to user */ | ||
87 | #define TIF_FREEZE 29 | 88 | #define TIF_FREEZE 29 |
88 | #define TIF_DEBUG 30 /* debugging enabled */ | 89 | #define TIF_DEBUG 30 /* debugging enabled */ |
89 | #define TIF_USERSPACE 31 /* true if FS sets userspace */ | 90 | #define TIF_USERSPACE 31 /* true if FS sets userspace */ |
@@ -96,6 +97,7 @@ static inline struct thread_info *current_thread_info(void) | |||
96 | #define _TIF_MEMDIE (1 << TIF_MEMDIE) | 97 | #define _TIF_MEMDIE (1 << TIF_MEMDIE) |
97 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | 98 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) |
98 | #define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP) | 99 | #define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP) |
100 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | ||
99 | #define _TIF_FREEZE (1 << TIF_FREEZE) | 101 | #define _TIF_FREEZE (1 << TIF_FREEZE) |
100 | 102 | ||
101 | /* Note: The masks below must never span more than 16 bits! */ | 103 | /* Note: The masks below must never span more than 16 bits! */ |
@@ -103,13 +105,15 @@ static inline struct thread_info *current_thread_info(void) | |||
103 | /* work to do on interrupt/exception return */ | 105 | /* work to do on interrupt/exception return */ |
104 | #define _TIF_WORK_MASK \ | 106 | #define _TIF_WORK_MASK \ |
105 | ((1 << TIF_SIGPENDING) \ | 107 | ((1 << TIF_SIGPENDING) \ |
108 | | _TIF_NOTIFY_RESUME \ | ||
106 | | (1 << TIF_NEED_RESCHED) \ | 109 | | (1 << TIF_NEED_RESCHED) \ |
107 | | (1 << TIF_POLLING_NRFLAG) \ | 110 | | (1 << TIF_POLLING_NRFLAG) \ |
108 | | (1 << TIF_BREAKPOINT) \ | 111 | | (1 << TIF_BREAKPOINT) \ |
109 | | (1 << TIF_RESTORE_SIGMASK)) | 112 | | (1 << TIF_RESTORE_SIGMASK)) |
110 | 113 | ||
111 | /* work to do on any return to userspace */ | 114 | /* work to do on any return to userspace */ |
112 | #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | (1 << TIF_SYSCALL_TRACE)) | 115 | #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | (1 << TIF_SYSCALL_TRACE) | \ |
116 | _TIF_NOTIFY_RESUME) | ||
113 | /* work to do on return from debug mode */ | 117 | /* work to do on return from debug mode */ |
114 | #define _TIF_DBGWORK_MASK (_TIF_WORK_MASK & ~(1 << TIF_BREAKPOINT)) | 118 | #define _TIF_DBGWORK_MASK (_TIF_WORK_MASK & ~(1 << TIF_BREAKPOINT)) |
115 | 119 | ||
diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S index 009a80155d67..169268c40ae2 100644 --- a/arch/avr32/kernel/entry-avr32b.S +++ b/arch/avr32/kernel/entry-avr32b.S | |||
@@ -281,7 +281,7 @@ syscall_exit_work: | |||
281 | ld.w r1, r0[TI_flags] | 281 | ld.w r1, r0[TI_flags] |
282 | rjmp 1b | 282 | rjmp 1b |
283 | 283 | ||
284 | 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | 284 | 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NOTIFY_RESUME |
285 | tst r1, r2 | 285 | tst r1, r2 |
286 | breq 3f | 286 | breq 3f |
287 | unmask_interrupts | 287 | unmask_interrupts |
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c index 27227561bad6..64f886fac2ef 100644 --- a/arch/avr32/kernel/signal.c +++ b/arch/avr32/kernel/signal.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/ptrace.h> | 16 | #include <linux/ptrace.h> |
17 | #include <linux/unistd.h> | 17 | #include <linux/unistd.h> |
18 | #include <linux/freezer.h> | 18 | #include <linux/freezer.h> |
19 | #include <linux/tracehook.h> | ||
19 | 20 | ||
20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
21 | #include <asm/ucontext.h> | 22 | #include <asm/ucontext.h> |
@@ -322,4 +323,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti) | |||
322 | 323 | ||
323 | if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) | 324 | if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) |
324 | do_signal(regs, ¤t->blocked, syscall); | 325 | do_signal(regs, ¤t->blocked, syscall); |
326 | |||
327 | if (ti->flags & _TIF_NOTIFY_RESUME) { | ||
328 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
329 | tracehook_notify_resume(regs); | ||
330 | if (current->replacement_session_keyring) | ||
331 | key_replace_session_keyring(); | ||
332 | } | ||
325 | } | 333 | } |
diff --git a/arch/cris/include/asm/socket.h b/arch/cris/include/asm/socket.h index d5cf74005408..45ec49bdb7b1 100644 --- a/arch/cris/include/asm/socket.h +++ b/arch/cris/include/asm/socket.h | |||
@@ -59,6 +59,9 @@ | |||
59 | #define SO_TIMESTAMPING 37 | 59 | #define SO_TIMESTAMPING 37 |
60 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | 60 | #define SCM_TIMESTAMPING SO_TIMESTAMPING |
61 | 61 | ||
62 | #define SO_PROTOCOL 38 | ||
63 | #define SO_DOMAIN 39 | ||
64 | |||
62 | #endif /* _ASM_SOCKET_H */ | 65 | #endif /* _ASM_SOCKET_H */ |
63 | 66 | ||
64 | 67 | ||
diff --git a/arch/cris/kernel/ptrace.c b/arch/cris/kernel/ptrace.c index b326023baab2..48b0f3912632 100644 --- a/arch/cris/kernel/ptrace.c +++ b/arch/cris/kernel/ptrace.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
17 | #include <linux/ptrace.h> | 17 | #include <linux/ptrace.h> |
18 | #include <linux/user.h> | 18 | #include <linux/user.h> |
19 | #include <linux/tracehook.h> | ||
19 | 20 | ||
20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
21 | #include <asm/page.h> | 22 | #include <asm/page.h> |
@@ -36,4 +37,11 @@ void do_notify_resume(int canrestart, struct pt_regs *regs, | |||
36 | /* deal with pending signal delivery */ | 37 | /* deal with pending signal delivery */ |
37 | if (thread_info_flags & _TIF_SIGPENDING) | 38 | if (thread_info_flags & _TIF_SIGPENDING) |
38 | do_signal(canrestart,regs); | 39 | do_signal(canrestart,regs); |
40 | |||
41 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
42 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
43 | tracehook_notify_resume(regs); | ||
44 | if (current->replacement_session_keyring) | ||
45 | key_replace_session_keyring(); | ||
46 | } | ||
39 | } | 47 | } |
diff --git a/arch/frv/include/asm/socket.h b/arch/frv/include/asm/socket.h index 57c3d4054e8b..2dea726095c2 100644 --- a/arch/frv/include/asm/socket.h +++ b/arch/frv/include/asm/socket.h | |||
@@ -57,5 +57,8 @@ | |||
57 | #define SO_TIMESTAMPING 37 | 57 | #define SO_TIMESTAMPING 37 |
58 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | 58 | #define SCM_TIMESTAMPING SO_TIMESTAMPING |
59 | 59 | ||
60 | #define SO_PROTOCOL 38 | ||
61 | #define SO_DOMAIN 39 | ||
62 | |||
60 | #endif /* _ASM_SOCKET_H */ | 63 | #endif /* _ASM_SOCKET_H */ |
61 | 64 | ||
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c index 4a7a62c6e783..6b0a2b6fed6a 100644 --- a/arch/frv/kernel/signal.c +++ b/arch/frv/kernel/signal.c | |||
@@ -572,6 +572,8 @@ asmlinkage void do_notify_resume(__u32 thread_info_flags) | |||
572 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 572 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
573 | clear_thread_flag(TIF_NOTIFY_RESUME); | 573 | clear_thread_flag(TIF_NOTIFY_RESUME); |
574 | tracehook_notify_resume(__frame); | 574 | tracehook_notify_resume(__frame); |
575 | if (current->replacement_session_keyring) | ||
576 | key_replace_session_keyring(); | ||
575 | } | 577 | } |
576 | 578 | ||
577 | } /* end do_notify_resume() */ | 579 | } /* end do_notify_resume() */ |
diff --git a/arch/h8300/include/asm/socket.h b/arch/h8300/include/asm/socket.h index 602518a70a1a..1547f01c8e22 100644 --- a/arch/h8300/include/asm/socket.h +++ b/arch/h8300/include/asm/socket.h | |||
@@ -57,4 +57,7 @@ | |||
57 | #define SO_TIMESTAMPING 37 | 57 | #define SO_TIMESTAMPING 37 |
58 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | 58 | #define SCM_TIMESTAMPING SO_TIMESTAMPING |
59 | 59 | ||
60 | #define SO_PROTOCOL 38 | ||
61 | #define SO_DOMAIN 39 | ||
62 | |||
60 | #endif /* _ASM_SOCKET_H */ | 63 | #endif /* _ASM_SOCKET_H */ |
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h index 8bbc8b0ee45d..70e67e47d020 100644 --- a/arch/h8300/include/asm/thread_info.h +++ b/arch/h8300/include/asm/thread_info.h | |||
@@ -89,6 +89,7 @@ static inline struct thread_info *current_thread_info(void) | |||
89 | TIF_NEED_RESCHED */ | 89 | TIF_NEED_RESCHED */ |
90 | #define TIF_MEMDIE 4 | 90 | #define TIF_MEMDIE 4 |
91 | #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ | 91 | #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ |
92 | #define TIF_NOTIFY_RESUME 6 /* callback before returning to user */ | ||
92 | #define TIF_FREEZE 16 /* is freezing for suspend */ | 93 | #define TIF_FREEZE 16 /* is freezing for suspend */ |
93 | 94 | ||
94 | /* as above, but as bit values */ | 95 | /* as above, but as bit values */ |
@@ -97,6 +98,7 @@ static inline struct thread_info *current_thread_info(void) | |||
97 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 98 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
98 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 99 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
99 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 100 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
101 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | ||
100 | #define _TIF_FREEZE (1<<TIF_FREEZE) | 102 | #define _TIF_FREEZE (1<<TIF_FREEZE) |
101 | 103 | ||
102 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ | 104 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ |
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c index cf3472f7389b..af842c369d24 100644 --- a/arch/h8300/kernel/signal.c +++ b/arch/h8300/kernel/signal.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/tty.h> | 39 | #include <linux/tty.h> |
40 | #include <linux/binfmts.h> | 40 | #include <linux/binfmts.h> |
41 | #include <linux/freezer.h> | 41 | #include <linux/freezer.h> |
42 | #include <linux/tracehook.h> | ||
42 | 43 | ||
43 | #include <asm/setup.h> | 44 | #include <asm/setup.h> |
44 | #include <asm/uaccess.h> | 45 | #include <asm/uaccess.h> |
@@ -552,4 +553,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags) | |||
552 | { | 553 | { |
553 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) | 554 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) |
554 | do_signal(regs, NULL); | 555 | do_signal(regs, NULL); |
556 | |||
557 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
558 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
559 | tracehook_notify_resume(regs); | ||
560 | if (current->replacement_session_keyring) | ||
561 | key_replace_session_keyring(); | ||
562 | } | ||
555 | } | 563 | } |
diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c index e4d8fde68103..7e81966ce481 100644 --- a/arch/ia64/hp/sim/simeth.c +++ b/arch/ia64/hp/sim/simeth.c | |||
@@ -412,7 +412,7 @@ simeth_tx(struct sk_buff *skb, struct net_device *dev) | |||
412 | */ | 412 | */ |
413 | 413 | ||
414 | dev_kfree_skb(skb); | 414 | dev_kfree_skb(skb); |
415 | return 0; | 415 | return NETDEV_TX_OK; |
416 | } | 416 | } |
417 | 417 | ||
418 | static inline struct sk_buff * | 418 | static inline struct sk_buff * |
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 5a61b5c2e18f..8d3c79cd81e7 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h | |||
@@ -44,7 +44,6 @@ static inline void dma_free_coherent(struct device *dev, size_t size, | |||
44 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 44 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
45 | 45 | ||
46 | #define get_dma_ops(dev) platform_dma_get_ops(dev) | 46 | #define get_dma_ops(dev) platform_dma_get_ops(dev) |
47 | #define flush_write_buffers() | ||
48 | 47 | ||
49 | #include <asm-generic/dma-mapping-common.h> | 48 | #include <asm-generic/dma-mapping-common.h> |
50 | 49 | ||
@@ -69,6 +68,24 @@ dma_set_mask (struct device *dev, u64 mask) | |||
69 | return 0; | 68 | return 0; |
70 | } | 69 | } |
71 | 70 | ||
71 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | ||
72 | { | ||
73 | if (!dev->dma_mask) | ||
74 | return 0; | ||
75 | |||
76 | return addr + size <= *dev->dma_mask; | ||
77 | } | ||
78 | |||
79 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | ||
80 | { | ||
81 | return paddr; | ||
82 | } | ||
83 | |||
84 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) | ||
85 | { | ||
86 | return daddr; | ||
87 | } | ||
88 | |||
72 | extern int dma_get_cache_alignment(void); | 89 | extern int dma_get_cache_alignment(void); |
73 | 90 | ||
74 | static inline void | 91 | static inline void |
diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h index 745421225ec6..0b0d5ff062e5 100644 --- a/arch/ia64/include/asm/socket.h +++ b/arch/ia64/include/asm/socket.h | |||
@@ -66,4 +66,7 @@ | |||
66 | #define SO_TIMESTAMPING 37 | 66 | #define SO_TIMESTAMPING 37 |
67 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | 67 | #define SCM_TIMESTAMPING SO_TIMESTAMPING |
68 | 68 | ||
69 | #define SO_PROTOCOL 38 | ||
70 | #define SO_DOMAIN 39 | ||
71 | |||
69 | #endif /* _ASM_IA64_SOCKET_H */ | 72 | #endif /* _ASM_IA64_SOCKET_H */ |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 5d7c0e5b9e76..89969e950045 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -192,6 +192,8 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) | |||
192 | if (test_thread_flag(TIF_NOTIFY_RESUME)) { | 192 | if (test_thread_flag(TIF_NOTIFY_RESUME)) { |
193 | clear_thread_flag(TIF_NOTIFY_RESUME); | 193 | clear_thread_flag(TIF_NOTIFY_RESUME); |
194 | tracehook_notify_resume(&scr->pt); | 194 | tracehook_notify_resume(&scr->pt); |
195 | if (current->replacement_session_keyring) | ||
196 | key_replace_session_keyring(); | ||
195 | } | 197 | } |
196 | 198 | ||
197 | /* copy user rbs to kernel rbs */ | 199 | /* copy user rbs to kernel rbs */ |
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c index fb8332690179..dbeadb9c8e20 100644 --- a/arch/ia64/xen/time.c +++ b/arch/ia64/xen/time.c | |||
@@ -133,8 +133,7 @@ consider_steal_time(unsigned long new_itm) | |||
133 | account_idle_ticks(blocked); | 133 | account_idle_ticks(blocked); |
134 | run_local_timers(); | 134 | run_local_timers(); |
135 | 135 | ||
136 | if (rcu_pending(cpu)) | 136 | rcu_check_callbacks(cpu, user_mode(get_irq_regs())); |
137 | rcu_check_callbacks(cpu, user_mode(get_irq_regs())); | ||
138 | 137 | ||
139 | scheduler_tick(); | 138 | scheduler_tick(); |
140 | run_posix_cpu_timers(p); | 139 | run_posix_cpu_timers(p); |
diff --git a/arch/m32r/include/asm/socket.h b/arch/m32r/include/asm/socket.h index be7ed589af5c..3390a864f224 100644 --- a/arch/m32r/include/asm/socket.h +++ b/arch/m32r/include/asm/socket.h | |||
@@ -57,4 +57,7 @@ | |||
57 | #define SO_TIMESTAMPING 37 | 57 | #define SO_TIMESTAMPING 37 |
58 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | 58 | #define SCM_TIMESTAMPING SO_TIMESTAMPING |
59 | 59 | ||
60 | #define SO_PROTOCOL 38 | ||
61 | #define SO_DOMAIN 39 | ||
62 | |||
60 | #endif /* _ASM_M32R_SOCKET_H */ | 63 | #endif /* _ASM_M32R_SOCKET_H */ |
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h index 07bb5bd00e2a..71578151a403 100644 --- a/arch/m32r/include/asm/thread_info.h +++ b/arch/m32r/include/asm/thread_info.h | |||
@@ -149,6 +149,7 @@ static inline unsigned int get_thread_fault_code(void) | |||
149 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ | 149 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ |
150 | #define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */ | 150 | #define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */ |
151 | #define TIF_IRET 4 /* return with iret */ | 151 | #define TIF_IRET 4 /* return with iret */ |
152 | #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */ | ||
152 | #define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */ | 153 | #define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */ |
153 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ | 154 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ |
154 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 155 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
@@ -160,6 +161,7 @@ static inline unsigned int get_thread_fault_code(void) | |||
160 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 161 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
161 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) | 162 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) |
162 | #define _TIF_IRET (1<<TIF_IRET) | 163 | #define _TIF_IRET (1<<TIF_IRET) |
164 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | ||
163 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 165 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
164 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) | 166 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) |
165 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 167 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c index 18124542a6eb..144b0f124fc7 100644 --- a/arch/m32r/kernel/signal.c +++ b/arch/m32r/kernel/signal.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/stddef.h> | 21 | #include <linux/stddef.h> |
22 | #include <linux/personality.h> | 22 | #include <linux/personality.h> |
23 | #include <linux/freezer.h> | 23 | #include <linux/freezer.h> |
24 | #include <linux/tracehook.h> | ||
24 | #include <asm/cacheflush.h> | 25 | #include <asm/cacheflush.h> |
25 | #include <asm/ucontext.h> | 26 | #include <asm/ucontext.h> |
26 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
@@ -408,5 +409,12 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, | |||
408 | if (thread_info_flags & _TIF_SIGPENDING) | 409 | if (thread_info_flags & _TIF_SIGPENDING) |
409 | do_signal(regs,oldset); | 410 | do_signal(regs,oldset); |
410 | 411 | ||
412 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
413 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
414 | tracehook_notify_resume(regs); | ||
415 | if (current->replacement_session_keyring) | ||
416 | key_replace_session_keyring(); | ||
417 | } | ||
418 | |||
411 | clear_thread_flag(TIF_IRET); | 419 | clear_thread_flag(TIF_IRET); |
412 | } | 420 | } |
diff --git a/arch/m68k/include/asm/entry_mm.h b/arch/m68k/include/asm/entry_mm.h index 5202f5a5b420..474125886218 100644 --- a/arch/m68k/include/asm/entry_mm.h +++ b/arch/m68k/include/asm/entry_mm.h | |||
@@ -46,7 +46,6 @@ | |||
46 | #define curptr a2 | 46 | #define curptr a2 |
47 | 47 | ||
48 | LFLUSH_I_AND_D = 0x00000808 | 48 | LFLUSH_I_AND_D = 0x00000808 |
49 | LSIGTRAP = 5 | ||
50 | 49 | ||
51 | /* process bits for task_struct.ptrace */ | 50 | /* process bits for task_struct.ptrace */ |
52 | PT_TRACESYS_OFF = 3 | 51 | PT_TRACESYS_OFF = 3 |
@@ -118,9 +117,6 @@ PT_DTRACE_BIT = 2 | |||
118 | #define STR(X) STR1(X) | 117 | #define STR(X) STR1(X) |
119 | #define STR1(X) #X | 118 | #define STR1(X) #X |
120 | 119 | ||
121 | #define PT_OFF_ORIG_D0 0x24 | ||
122 | #define PT_OFF_FORMATVEC 0x32 | ||
123 | #define PT_OFF_SR 0x2C | ||
124 | #define SAVE_ALL_INT \ | 120 | #define SAVE_ALL_INT \ |
125 | "clrl %%sp@-;" /* stk_adj */ \ | 121 | "clrl %%sp@-;" /* stk_adj */ \ |
126 | "pea -1:w;" /* orig d0 = -1 */ \ | 122 | "pea -1:w;" /* orig d0 = -1 */ \ |
diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h index c2553d26273d..907ed03d792f 100644 --- a/arch/m68k/include/asm/entry_no.h +++ b/arch/m68k/include/asm/entry_no.h | |||
@@ -72,8 +72,8 @@ LENOSYS = 38 | |||
72 | lea %sp@(-32),%sp /* space for 8 regs */ | 72 | lea %sp@(-32),%sp /* space for 8 regs */ |
73 | moveml %d1-%d5/%a0-%a2,%sp@ | 73 | moveml %d1-%d5/%a0-%a2,%sp@ |
74 | movel sw_usp,%a0 /* get usp */ | 74 | movel sw_usp,%a0 /* get usp */ |
75 | movel %a0@-,%sp@(PT_PC) /* copy exception program counter */ | 75 | movel %a0@-,%sp@(PT_OFF_PC) /* copy exception program counter */ |
76 | movel %a0@-,%sp@(PT_FORMATVEC)/* copy exception format/vector/sr */ | 76 | movel %a0@-,%sp@(PT_OFF_FORMATVEC)/*copy exception format/vector/sr */ |
77 | bra 7f | 77 | bra 7f |
78 | 6: | 78 | 6: |
79 | clrl %sp@- /* stkadj */ | 79 | clrl %sp@- /* stkadj */ |
@@ -89,8 +89,8 @@ LENOSYS = 38 | |||
89 | bnes 8f /* no, skip */ | 89 | bnes 8f /* no, skip */ |
90 | move #0x2700,%sr /* disable intrs */ | 90 | move #0x2700,%sr /* disable intrs */ |
91 | movel sw_usp,%a0 /* get usp */ | 91 | movel sw_usp,%a0 /* get usp */ |
92 | movel %sp@(PT_PC),%a0@- /* copy exception program counter */ | 92 | movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */ |
93 | movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */ | 93 | movel %sp@(PT_OFF_FORMATVEC),%a0@-/*copy exception format/vector/sr */ |
94 | moveml %sp@,%d1-%d5/%a0-%a2 | 94 | moveml %sp@,%d1-%d5/%a0-%a2 |
95 | lea %sp@(32),%sp /* space for 8 regs */ | 95 | lea %sp@(32),%sp /* space for 8 regs */ |
96 | movel %sp@+,%d0 | 96 | movel %sp@+,%d0 |
diff --git a/arch/m68k/include/asm/math-emu.h b/arch/m68k/include/asm/math-emu.h index ddfab96403cb..5e9249b0014c 100644 --- a/arch/m68k/include/asm/math-emu.h +++ b/arch/m68k/include/asm/math-emu.h | |||
@@ -145,16 +145,16 @@ extern unsigned int fp_debugprint; | |||
145 | * these are only used during instruction decoding | 145 | * these are only used during instruction decoding |
146 | * where we always know how deep we're on the stack. | 146 | * where we always know how deep we're on the stack. |
147 | */ | 147 | */ |
148 | #define FPS_DO (PT_D0) | 148 | #define FPS_DO (PT_OFF_D0) |
149 | #define FPS_D1 (PT_D1) | 149 | #define FPS_D1 (PT_OFF_D1) |
150 | #define FPS_D2 (PT_D2) | 150 | #define FPS_D2 (PT_OFF_D2) |
151 | #define FPS_A0 (PT_A0) | 151 | #define FPS_A0 (PT_OFF_A0) |
152 | #define FPS_A1 (PT_A1) | 152 | #define FPS_A1 (PT_OFF_A1) |
153 | #define FPS_A2 (PT_A2) | 153 | #define FPS_A2 (PT_OFF_A2) |
154 | #define FPS_SR (PT_SR) | 154 | #define FPS_SR (PT_OFF_SR) |
155 | #define FPS_PC (PT_PC) | 155 | #define FPS_PC (PT_OFF_PC) |
156 | #define FPS_EA (PT_PC+6) | 156 | #define FPS_EA (PT_OFF_PC+6) |
157 | #define FPS_PC2 (PT_PC+10) | 157 | #define FPS_PC2 (PT_OFF_PC+10) |
158 | 158 | ||
159 | .macro fp_get_fp_reg | 159 | .macro fp_get_fp_reg |
160 | lea (FPD_FPREG,FPDATA,%d0.w*4),%a0 | 160 | lea (FPD_FPREG,FPDATA,%d0.w*4),%a0 |
diff --git a/arch/m68k/include/asm/socket.h b/arch/m68k/include/asm/socket.h index ca87f938b03f..eee01cce921b 100644 --- a/arch/m68k/include/asm/socket.h +++ b/arch/m68k/include/asm/socket.h | |||
@@ -57,4 +57,7 @@ | |||
57 | #define SO_TIMESTAMPING 37 | 57 | #define SO_TIMESTAMPING 37 |
58 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | 58 | #define SCM_TIMESTAMPING SO_TIMESTAMPING |
59 | 59 | ||
60 | #define SO_PROTOCOL 38 | ||
61 | #define SO_DOMAIN 39 | ||
62 | |||
60 | #endif /* _ASM_SOCKET_H */ | 63 | #endif /* _ASM_SOCKET_H */ |
diff --git a/arch/m68k/include/asm/thread_info_mm.h b/arch/m68k/include/asm/thread_info_mm.h index 6ea5c33b3c56..b6da3882be9b 100644 --- a/arch/m68k/include/asm/thread_info_mm.h +++ b/arch/m68k/include/asm/thread_info_mm.h | |||
@@ -1,6 +1,10 @@ | |||
1 | #ifndef _ASM_M68K_THREAD_INFO_H | 1 | #ifndef _ASM_M68K_THREAD_INFO_H |
2 | #define _ASM_M68K_THREAD_INFO_H | 2 | #define _ASM_M68K_THREAD_INFO_H |
3 | 3 | ||
4 | #ifndef ASM_OFFSETS_C | ||
5 | #include <asm/asm-offsets.h> | ||
6 | #endif | ||
7 | #include <asm/current.h> | ||
4 | #include <asm/types.h> | 8 | #include <asm/types.h> |
5 | #include <asm/page.h> | 9 | #include <asm/page.h> |
6 | 10 | ||
@@ -31,7 +35,12 @@ struct thread_info { | |||
31 | #define init_thread_info (init_task.thread.info) | 35 | #define init_thread_info (init_task.thread.info) |
32 | #define init_stack (init_thread_union.stack) | 36 | #define init_stack (init_thread_union.stack) |
33 | 37 | ||
34 | #define task_thread_info(tsk) (&(tsk)->thread.info) | 38 | #ifdef ASM_OFFSETS_C |
39 | #define task_thread_info(tsk) ((struct thread_info *) NULL) | ||
40 | #else | ||
41 | #define task_thread_info(tsk) ((struct thread_info *)((char *)tsk+TASK_TINFO)) | ||
42 | #endif | ||
43 | |||
35 | #define task_stack_page(tsk) ((tsk)->stack) | 44 | #define task_stack_page(tsk) ((tsk)->stack) |
36 | #define current_thread_info() task_thread_info(current) | 45 | #define current_thread_info() task_thread_info(current) |
37 | 46 | ||
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c index b1f012f6c493..73e5e581245b 100644 --- a/arch/m68k/kernel/asm-offsets.c +++ b/arch/m68k/kernel/asm-offsets.c | |||
@@ -8,6 +8,8 @@ | |||
8 | * #defines from the assembly-language output. | 8 | * #defines from the assembly-language output. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define ASM_OFFSETS_C | ||
12 | |||
11 | #include <linux/stddef.h> | 13 | #include <linux/stddef.h> |
12 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
13 | #include <linux/kernel_stat.h> | 15 | #include <linux/kernel_stat.h> |
@@ -27,6 +29,9 @@ int main(void) | |||
27 | DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info)); | 29 | DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info)); |
28 | DEFINE(TASK_MM, offsetof(struct task_struct, mm)); | 30 | DEFINE(TASK_MM, offsetof(struct task_struct, mm)); |
29 | DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); | 31 | DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); |
32 | #ifdef CONFIG_MMU | ||
33 | DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info)); | ||
34 | #endif | ||
30 | 35 | ||
31 | /* offsets into the thread struct */ | 36 | /* offsets into the thread struct */ |
32 | DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); | 37 | DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); |
@@ -44,20 +49,20 @@ int main(void) | |||
44 | DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags)); | 49 | DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags)); |
45 | 50 | ||
46 | /* offsets into the pt_regs */ | 51 | /* offsets into the pt_regs */ |
47 | DEFINE(PT_D0, offsetof(struct pt_regs, d0)); | 52 | DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0)); |
48 | DEFINE(PT_ORIG_D0, offsetof(struct pt_regs, orig_d0)); | 53 | DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0)); |
49 | DEFINE(PT_D1, offsetof(struct pt_regs, d1)); | 54 | DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1)); |
50 | DEFINE(PT_D2, offsetof(struct pt_regs, d2)); | 55 | DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2)); |
51 | DEFINE(PT_D3, offsetof(struct pt_regs, d3)); | 56 | DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3)); |
52 | DEFINE(PT_D4, offsetof(struct pt_regs, d4)); | 57 | DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4)); |
53 | DEFINE(PT_D5, offsetof(struct pt_regs, d5)); | 58 | DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5)); |
54 | DEFINE(PT_A0, offsetof(struct pt_regs, a0)); | 59 | DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0)); |
55 | DEFINE(PT_A1, offsetof(struct pt_regs, a1)); | 60 | DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1)); |
56 | DEFINE(PT_A2, offsetof(struct pt_regs, a2)); | 61 | DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2)); |
57 | DEFINE(PT_PC, offsetof(struct pt_regs, pc)); | 62 | DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc)); |
58 | DEFINE(PT_SR, offsetof(struct pt_regs, sr)); | 63 | DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr)); |
59 | /* bitfields are a bit difficult */ | 64 | /* bitfields are a bit difficult */ |
60 | DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4); | 65 | DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4); |
61 | 66 | ||
62 | /* offsets into the irq_handler struct */ | 67 | /* offsets into the irq_handler struct */ |
63 | DEFINE(IRQ_HANDLER, offsetof(struct irq_node, handler)); | 68 | DEFINE(IRQ_HANDLER, offsetof(struct irq_node, handler)); |
@@ -84,10 +89,10 @@ int main(void) | |||
84 | DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref)); | 89 | DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref)); |
85 | 90 | ||
86 | /* signal defines */ | 91 | /* signal defines */ |
87 | DEFINE(SIGSEGV, SIGSEGV); | 92 | DEFINE(LSIGSEGV, SIGSEGV); |
88 | DEFINE(SEGV_MAPERR, SEGV_MAPERR); | 93 | DEFINE(LSEGV_MAPERR, SEGV_MAPERR); |
89 | DEFINE(SIGTRAP, SIGTRAP); | 94 | DEFINE(LSIGTRAP, SIGTRAP); |
90 | DEFINE(TRAP_TRACE, TRAP_TRACE); | 95 | DEFINE(LTRAP_TRACE, TRAP_TRACE); |
91 | 96 | ||
92 | /* offsets into the custom struct */ | 97 | /* offsets into the custom struct */ |
93 | DEFINE(CUSTOMBASE, &amiga_custom); | 98 | DEFINE(CUSTOMBASE, &amiga_custom); |
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S index c3735cd6207e..922f52e7ed1a 100644 --- a/arch/m68k/kernel/entry.S +++ b/arch/m68k/kernel/entry.S | |||
@@ -77,17 +77,17 @@ ENTRY(ret_from_fork) | |||
77 | jra .Lret_from_exception | 77 | jra .Lret_from_exception |
78 | 78 | ||
79 | do_trace_entry: | 79 | do_trace_entry: |
80 | movel #-ENOSYS,%sp@(PT_D0) | needed for strace | 80 | movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace |
81 | subql #4,%sp | 81 | subql #4,%sp |
82 | SAVE_SWITCH_STACK | 82 | SAVE_SWITCH_STACK |
83 | jbsr syscall_trace | 83 | jbsr syscall_trace |
84 | RESTORE_SWITCH_STACK | 84 | RESTORE_SWITCH_STACK |
85 | addql #4,%sp | 85 | addql #4,%sp |
86 | movel %sp@(PT_ORIG_D0),%d0 | 86 | movel %sp@(PT_OFF_ORIG_D0),%d0 |
87 | cmpl #NR_syscalls,%d0 | 87 | cmpl #NR_syscalls,%d0 |
88 | jcs syscall | 88 | jcs syscall |
89 | badsys: | 89 | badsys: |
90 | movel #-ENOSYS,%sp@(PT_D0) | 90 | movel #-ENOSYS,%sp@(PT_OFF_D0) |
91 | jra ret_from_syscall | 91 | jra ret_from_syscall |
92 | 92 | ||
93 | do_trace_exit: | 93 | do_trace_exit: |
@@ -103,7 +103,7 @@ ENTRY(ret_from_signal) | |||
103 | addql #4,%sp | 103 | addql #4,%sp |
104 | /* on 68040 complete pending writebacks if any */ | 104 | /* on 68040 complete pending writebacks if any */ |
105 | #ifdef CONFIG_M68040 | 105 | #ifdef CONFIG_M68040 |
106 | bfextu %sp@(PT_VECTOR){#0,#4},%d0 | 106 | bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0 |
107 | subql #7,%d0 | bus error frame ? | 107 | subql #7,%d0 | bus error frame ? |
108 | jbne 1f | 108 | jbne 1f |
109 | movel %sp,%sp@- | 109 | movel %sp,%sp@- |
@@ -127,7 +127,7 @@ ENTRY(system_call) | |||
127 | jcc badsys | 127 | jcc badsys |
128 | syscall: | 128 | syscall: |
129 | jbsr @(sys_call_table,%d0:l:4)@(0) | 129 | jbsr @(sys_call_table,%d0:l:4)@(0) |
130 | movel %d0,%sp@(PT_D0) | save the return value | 130 | movel %d0,%sp@(PT_OFF_D0) | save the return value |
131 | ret_from_syscall: | 131 | ret_from_syscall: |
132 | |oriw #0x0700,%sr | 132 | |oriw #0x0700,%sr |
133 | movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0 | 133 | movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0 |
@@ -135,7 +135,7 @@ ret_from_syscall: | |||
135 | 1: RESTORE_ALL | 135 | 1: RESTORE_ALL |
136 | 136 | ||
137 | syscall_exit_work: | 137 | syscall_exit_work: |
138 | btst #5,%sp@(PT_SR) | check if returning to kernel | 138 | btst #5,%sp@(PT_OFF_SR) | check if returning to kernel |
139 | bnes 1b | if so, skip resched, signals | 139 | bnes 1b | if so, skip resched, signals |
140 | lslw #1,%d0 | 140 | lslw #1,%d0 |
141 | jcs do_trace_exit | 141 | jcs do_trace_exit |
@@ -148,7 +148,7 @@ syscall_exit_work: | |||
148 | 148 | ||
149 | ENTRY(ret_from_exception) | 149 | ENTRY(ret_from_exception) |
150 | .Lret_from_exception: | 150 | .Lret_from_exception: |
151 | btst #5,%sp@(PT_SR) | check if returning to kernel | 151 | btst #5,%sp@(PT_OFF_SR) | check if returning to kernel |
152 | bnes 1f | if so, skip resched, signals | 152 | bnes 1f | if so, skip resched, signals |
153 | | only allow interrupts when we are really the last one on the | 153 | | only allow interrupts when we are really the last one on the |
154 | | kernel stack, otherwise stack overflow can occur during | 154 | | kernel stack, otherwise stack overflow can occur during |
@@ -182,7 +182,7 @@ do_signal_return: | |||
182 | jbra resume_userspace | 182 | jbra resume_userspace |
183 | 183 | ||
184 | do_delayed_trace: | 184 | do_delayed_trace: |
185 | bclr #7,%sp@(PT_SR) | clear trace bit in SR | 185 | bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR |
186 | pea 1 | send SIGTRAP | 186 | pea 1 | send SIGTRAP |
187 | movel %curptr,%sp@- | 187 | movel %curptr,%sp@- |
188 | pea LSIGTRAP | 188 | pea LSIGTRAP |
@@ -199,7 +199,7 @@ ENTRY(auto_inthandler) | |||
199 | GET_CURRENT(%d0) | 199 | GET_CURRENT(%d0) |
200 | addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) | 200 | addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) |
201 | | put exception # in d0 | 201 | | put exception # in d0 |
202 | bfextu %sp@(PT_VECTOR){#4,#10},%d0 | 202 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 |
203 | subw #VEC_SPUR,%d0 | 203 | subw #VEC_SPUR,%d0 |
204 | 204 | ||
205 | movel %sp,%sp@- | 205 | movel %sp,%sp@- |
@@ -216,7 +216,7 @@ ret_from_interrupt: | |||
216 | ALIGN | 216 | ALIGN |
217 | ret_from_last_interrupt: | 217 | ret_from_last_interrupt: |
218 | moveq #(~ALLOWINT>>8)&0xff,%d0 | 218 | moveq #(~ALLOWINT>>8)&0xff,%d0 |
219 | andb %sp@(PT_SR),%d0 | 219 | andb %sp@(PT_OFF_SR),%d0 |
220 | jne 2b | 220 | jne 2b |
221 | 221 | ||
222 | /* check if we need to do software interrupts */ | 222 | /* check if we need to do software interrupts */ |
@@ -232,7 +232,7 @@ ENTRY(user_inthandler) | |||
232 | GET_CURRENT(%d0) | 232 | GET_CURRENT(%d0) |
233 | addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) | 233 | addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) |
234 | | put exception # in d0 | 234 | | put exception # in d0 |
235 | bfextu %sp@(PT_VECTOR){#4,#10},%d0 | 235 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 |
236 | user_irqvec_fixup = . + 2 | 236 | user_irqvec_fixup = . + 2 |
237 | subw #VEC_USER,%d0 | 237 | subw #VEC_USER,%d0 |
238 | 238 | ||
diff --git a/arch/m68k/math-emu/fp_entry.S b/arch/m68k/math-emu/fp_entry.S index 954b4f304a7d..a3fe1f348dfe 100644 --- a/arch/m68k/math-emu/fp_entry.S +++ b/arch/m68k/math-emu/fp_entry.S | |||
@@ -85,8 +85,8 @@ fp_err_ua2: | |||
85 | fp_err_ua1: | 85 | fp_err_ua1: |
86 | addq.l #4,%sp | 86 | addq.l #4,%sp |
87 | move.l %a0,-(%sp) | 87 | move.l %a0,-(%sp) |
88 | pea SEGV_MAPERR | 88 | pea LSEGV_MAPERR |
89 | pea SIGSEGV | 89 | pea LSIGSEGV |
90 | jsr fpemu_signal | 90 | jsr fpemu_signal |
91 | add.w #12,%sp | 91 | add.w #12,%sp |
92 | jra ret_from_exception | 92 | jra ret_from_exception |
@@ -96,8 +96,8 @@ fp_err_ua1: | |||
96 | | it does not really belong here, but... | 96 | | it does not really belong here, but... |
97 | fp_sendtrace060: | 97 | fp_sendtrace060: |
98 | move.l (FPS_PC,%sp),-(%sp) | 98 | move.l (FPS_PC,%sp),-(%sp) |
99 | pea TRAP_TRACE | 99 | pea LTRAP_TRACE |
100 | pea SIGTRAP | 100 | pea LSIGTRAP |
101 | jsr fpemu_signal | 101 | jsr fpemu_signal |
102 | add.w #12,%sp | 102 | add.w #12,%sp |
103 | jra ret_from_exception | 103 | jra ret_from_exception |
@@ -122,17 +122,17 @@ fp_get_data_reg: | |||
122 | .long fp_get_d6, fp_get_d7 | 122 | .long fp_get_d6, fp_get_d7 |
123 | 123 | ||
124 | fp_get_d0: | 124 | fp_get_d0: |
125 | move.l (PT_D0+8,%sp),%d0 | 125 | move.l (PT_OFF_D0+8,%sp),%d0 |
126 | printf PREGISTER,"{d0->%08x}",1,%d0 | 126 | printf PREGISTER,"{d0->%08x}",1,%d0 |
127 | rts | 127 | rts |
128 | 128 | ||
129 | fp_get_d1: | 129 | fp_get_d1: |
130 | move.l (PT_D1+8,%sp),%d0 | 130 | move.l (PT_OFF_D1+8,%sp),%d0 |
131 | printf PREGISTER,"{d1->%08x}",1,%d0 | 131 | printf PREGISTER,"{d1->%08x}",1,%d0 |
132 | rts | 132 | rts |
133 | 133 | ||
134 | fp_get_d2: | 134 | fp_get_d2: |
135 | move.l (PT_D2+8,%sp),%d0 | 135 | move.l (PT_OFF_D2+8,%sp),%d0 |
136 | printf PREGISTER,"{d2->%08x}",1,%d0 | 136 | printf PREGISTER,"{d2->%08x}",1,%d0 |
137 | rts | 137 | rts |
138 | 138 | ||
@@ -173,35 +173,35 @@ fp_put_data_reg: | |||
173 | 173 | ||
174 | fp_put_d0: | 174 | fp_put_d0: |
175 | printf PREGISTER,"{d0<-%08x}",1,%d0 | 175 | printf PREGISTER,"{d0<-%08x}",1,%d0 |
176 | move.l %d0,(PT_D0+8,%sp) | 176 | move.l %d0,(PT_OFF_D0+8,%sp) |
177 | rts | 177 | rts |
178 | 178 | ||
179 | fp_put_d1: | 179 | fp_put_d1: |
180 | printf PREGISTER,"{d1<-%08x}",1,%d0 | 180 | printf PREGISTER,"{d1<-%08x}",1,%d0 |
181 | move.l %d0,(PT_D1+8,%sp) | 181 | move.l %d0,(PT_OFF_D1+8,%sp) |
182 | rts | 182 | rts |
183 | 183 | ||
184 | fp_put_d2: | 184 | fp_put_d2: |
185 | printf PREGISTER,"{d2<-%08x}",1,%d0 | 185 | printf PREGISTER,"{d2<-%08x}",1,%d0 |
186 | move.l %d0,(PT_D2+8,%sp) | 186 | move.l %d0,(PT_OFF_D2+8,%sp) |
187 | rts | 187 | rts |
188 | 188 | ||
189 | fp_put_d3: | 189 | fp_put_d3: |
190 | printf PREGISTER,"{d3<-%08x}",1,%d0 | 190 | printf PREGISTER,"{d3<-%08x}",1,%d0 |
191 | | move.l %d0,%d3 | 191 | | move.l %d0,%d3 |
192 | move.l %d0,(PT_D3+8,%sp) | 192 | move.l %d0,(PT_OFF_D3+8,%sp) |
193 | rts | 193 | rts |
194 | 194 | ||
195 | fp_put_d4: | 195 | fp_put_d4: |
196 | printf PREGISTER,"{d4<-%08x}",1,%d0 | 196 | printf PREGISTER,"{d4<-%08x}",1,%d0 |
197 | | move.l %d0,%d4 | 197 | | move.l %d0,%d4 |
198 | move.l %d0,(PT_D4+8,%sp) | 198 | move.l %d0,(PT_OFF_D4+8,%sp) |
199 | rts | 199 | rts |
200 | 200 | ||
201 | fp_put_d5: | 201 | fp_put_d5: |
202 | printf PREGISTER,"{d5<-%08x}",1,%d0 | 202 | printf PREGISTER,"{d5<-%08x}",1,%d0 |
203 | | move.l %d0,%d5 | 203 | | move.l %d0,%d5 |
204 | move.l %d0,(PT_D5+8,%sp) | 204 | move.l %d0,(PT_OFF_D5+8,%sp) |
205 | rts | 205 | rts |
206 | 206 | ||
207 | fp_put_d6: | 207 | fp_put_d6: |
@@ -225,17 +225,17 @@ fp_get_addr_reg: | |||
225 | .long fp_get_a6, fp_get_a7 | 225 | .long fp_get_a6, fp_get_a7 |
226 | 226 | ||
227 | fp_get_a0: | 227 | fp_get_a0: |
228 | move.l (PT_A0+8,%sp),%a0 | 228 | move.l (PT_OFF_A0+8,%sp),%a0 |
229 | printf PREGISTER,"{a0->%08x}",1,%a0 | 229 | printf PREGISTER,"{a0->%08x}",1,%a0 |
230 | rts | 230 | rts |
231 | 231 | ||
232 | fp_get_a1: | 232 | fp_get_a1: |
233 | move.l (PT_A1+8,%sp),%a0 | 233 | move.l (PT_OFF_A1+8,%sp),%a0 |
234 | printf PREGISTER,"{a1->%08x}",1,%a0 | 234 | printf PREGISTER,"{a1->%08x}",1,%a0 |
235 | rts | 235 | rts |
236 | 236 | ||
237 | fp_get_a2: | 237 | fp_get_a2: |
238 | move.l (PT_A2+8,%sp),%a0 | 238 | move.l (PT_OFF_A2+8,%sp),%a0 |
239 | printf PREGISTER,"{a2->%08x}",1,%a0 | 239 | printf PREGISTER,"{a2->%08x}",1,%a0 |
240 | rts | 240 | rts |
241 | 241 | ||
@@ -276,17 +276,17 @@ fp_put_addr_reg: | |||
276 | 276 | ||
277 | fp_put_a0: | 277 | fp_put_a0: |
278 | printf PREGISTER,"{a0<-%08x}",1,%a0 | 278 | printf PREGISTER,"{a0<-%08x}",1,%a0 |
279 | move.l %a0,(PT_A0+8,%sp) | 279 | move.l %a0,(PT_OFF_A0+8,%sp) |
280 | rts | 280 | rts |
281 | 281 | ||
282 | fp_put_a1: | 282 | fp_put_a1: |
283 | printf PREGISTER,"{a1<-%08x}",1,%a0 | 283 | printf PREGISTER,"{a1<-%08x}",1,%a0 |
284 | move.l %a0,(PT_A1+8,%sp) | 284 | move.l %a0,(PT_OFF_A1+8,%sp) |
285 | rts | 285 | rts |
286 | 286 | ||
287 | fp_put_a2: | 287 | fp_put_a2: |
288 | printf PREGISTER,"{a2<-%08x}",1,%a0 | 288 | printf PREGISTER,"{a2<-%08x}",1,%a0 |
289 | move.l %a0,(PT_A2+8,%sp) | 289 | move.l %a0,(PT_OFF_A2+8,%sp) |
290 | rts | 290 | rts |
291 | 291 | ||
292 | fp_put_a3: | 292 | fp_put_a3: |
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c index 2ecab6155932..cf50fa29b198 100644 --- a/arch/mips/ar7/platform.c +++ b/arch/mips/ar7/platform.c | |||
@@ -32,6 +32,8 @@ | |||
32 | #include <linux/leds.h> | 32 | #include <linux/leds.h> |
33 | #include <linux/string.h> | 33 | #include <linux/string.h> |
34 | #include <linux/etherdevice.h> | 34 | #include <linux/etherdevice.h> |
35 | #include <linux/phy.h> | ||
36 | #include <linux/phy_fixed.h> | ||
35 | 37 | ||
36 | #include <asm/addrspace.h> | 38 | #include <asm/addrspace.h> |
37 | #include <asm/mach-ar7/ar7.h> | 39 | #include <asm/mach-ar7/ar7.h> |
@@ -208,6 +210,12 @@ static struct physmap_flash_data physmap_flash_data = { | |||
208 | .width = 2, | 210 | .width = 2, |
209 | }; | 211 | }; |
210 | 212 | ||
213 | static struct fixed_phy_status fixed_phy_status __initdata = { | ||
214 | .link = 1, | ||
215 | .speed = 100, | ||
216 | .duplex = 1, | ||
217 | }; | ||
218 | |||
211 | static struct plat_cpmac_data cpmac_low_data = { | 219 | static struct plat_cpmac_data cpmac_low_data = { |
212 | .reset_bit = 17, | 220 | .reset_bit = 17, |
213 | .power_bit = 20, | 221 | .power_bit = 20, |
@@ -530,6 +538,9 @@ static int __init ar7_register_devices(void) | |||
530 | } | 538 | } |
531 | 539 | ||
532 | if (ar7_has_high_cpmac()) { | 540 | if (ar7_has_high_cpmac()) { |
541 | res = fixed_phy_add(PHY_POLL, cpmac_high.id, &fixed_phy_status); | ||
542 | if (res && res != -ENODEV) | ||
543 | return res; | ||
533 | cpmac_get_mac(1, cpmac_high_data.dev_addr); | 544 | cpmac_get_mac(1, cpmac_high_data.dev_addr); |
534 | res = platform_device_register(&cpmac_high); | 545 | res = platform_device_register(&cpmac_high); |
535 | if (res) | 546 | if (res) |
@@ -538,6 +549,10 @@ static int __init ar7_register_devices(void) | |||
538 | cpmac_low_data.phy_mask = 0xffffffff; | 549 | cpmac_low_data.phy_mask = 0xffffffff; |
539 | } | 550 | } |
540 | 551 | ||
552 | res = fixed_phy_add(PHY_POLL, cpmac_low.id, &fixed_phy_status); | ||
553 | if (res && res != -ENODEV) | ||
554 | return res; | ||
555 | |||
541 | cpmac_get_mac(0, cpmac_low_data.dev_addr); | 556 | cpmac_get_mac(0, cpmac_low_data.dev_addr); |
542 | res = platform_device_register(&cpmac_low); | 557 | res = platform_device_register(&cpmac_low); |
543 | if (res) | 558 | if (res) |
diff --git a/arch/mips/include/asm/socket.h b/arch/mips/include/asm/socket.h index 2abca1780169..ae05accd9fe4 100644 --- a/arch/mips/include/asm/socket.h +++ b/arch/mips/include/asm/socket.h | |||
@@ -42,6 +42,8 @@ To add: #define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */ | |||
42 | #define SO_SNDTIMEO 0x1005 /* send timeout */ | 42 | #define SO_SNDTIMEO 0x1005 /* send timeout */ |
43 | #define SO_RCVTIMEO 0x1006 /* receive timeout */ | 43 | #define SO_RCVTIMEO 0x1006 /* receive timeout */ |
44 | #define SO_ACCEPTCONN 0x1009 | 44 | #define SO_ACCEPTCONN 0x1009 |
45 | #define SO_PROTOCOL 0x1028 /* protocol type */ | ||
46 | #define SO_DOMAIN 0x1029 /* domain/socket family */ | ||
45 | 47 | ||
46 | /* linux-specific, might as well be the same as on i386 */ | 48 | /* linux-specific, might as well be the same as on i386 */ |
47 | #define SO_NO_CHECK 11 | 49 | #define SO_NO_CHECK 11 |
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index f9df720d2e40..01cc1630b66c 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h | |||
@@ -115,6 +115,7 @@ register struct thread_info *__current_thread_info __asm__("$28"); | |||
115 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ | 115 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ |
116 | #define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */ | 116 | #define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */ |
117 | #define TIF_SECCOMP 4 /* secure computing */ | 117 | #define TIF_SECCOMP 4 /* secure computing */ |
118 | #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */ | ||
118 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ | 119 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ |
119 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ | 120 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ |
120 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 121 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
@@ -139,6 +140,7 @@ register struct thread_info *__current_thread_info __asm__("$28"); | |||
139 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 140 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
140 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | 141 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) |
141 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | 142 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) |
143 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | ||
142 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 144 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
143 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) | 145 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) |
144 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 146 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index e855b118a079..1a6ae124635b 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -164,7 +164,7 @@ EXPORT(sysn32_call_table) | |||
164 | PTR sys_connect | 164 | PTR sys_connect |
165 | PTR sys_accept | 165 | PTR sys_accept |
166 | PTR sys_sendto | 166 | PTR sys_sendto |
167 | PTR sys_recvfrom | 167 | PTR compat_sys_recvfrom |
168 | PTR compat_sys_sendmsg /* 6045 */ | 168 | PTR compat_sys_sendmsg /* 6045 */ |
169 | PTR compat_sys_recvmsg | 169 | PTR compat_sys_recvmsg |
170 | PTR sys_shutdown | 170 | PTR sys_shutdown |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 0c49f1a660be..cd31087a651f 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -378,8 +378,8 @@ sys_call_table: | |||
378 | PTR sys_getsockname | 378 | PTR sys_getsockname |
379 | PTR sys_getsockopt | 379 | PTR sys_getsockopt |
380 | PTR sys_listen | 380 | PTR sys_listen |
381 | PTR sys_recv /* 4175 */ | 381 | PTR compat_sys_recv /* 4175 */ |
382 | PTR sys_recvfrom | 382 | PTR compat_sys_recvfrom |
383 | PTR compat_sys_recvmsg | 383 | PTR compat_sys_recvmsg |
384 | PTR sys_send | 384 | PTR sys_send |
385 | PTR compat_sys_sendmsg | 385 | PTR compat_sys_sendmsg |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 830c5ef9932b..6254041b942f 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/compiler.h> | 21 | #include <linux/compiler.h> |
22 | #include <linux/syscalls.h> | 22 | #include <linux/syscalls.h> |
23 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
24 | #include <linux/tracehook.h> | ||
24 | 25 | ||
25 | #include <asm/abi.h> | 26 | #include <asm/abi.h> |
26 | #include <asm/asm.h> | 27 | #include <asm/asm.h> |
@@ -700,4 +701,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, | |||
700 | /* deal with pending signal delivery */ | 701 | /* deal with pending signal delivery */ |
701 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) | 702 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) |
702 | do_signal(regs); | 703 | do_signal(regs); |
704 | |||
705 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
706 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
707 | tracehook_notify_resume(regs); | ||
708 | if (current->replacement_session_keyring) | ||
709 | key_replace_session_keyring(); | ||
710 | } | ||
703 | } | 711 | } |
diff --git a/arch/mn10300/include/asm/socket.h b/arch/mn10300/include/asm/socket.h index fb5daf438ec9..4df75af29d76 100644 --- a/arch/mn10300/include/asm/socket.h +++ b/arch/mn10300/include/asm/socket.h | |||
@@ -57,4 +57,7 @@ | |||
57 | #define SO_TIMESTAMPING 37 | 57 | #define SO_TIMESTAMPING 37 |
58 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | 58 | #define SCM_TIMESTAMPING SO_TIMESTAMPING |
59 | 59 | ||
60 | #define SO_PROTOCOL 38 | ||
61 | #define SO_DOMAIN 39 | ||
62 | |||
60 | #endif /* _ASM_SOCKET_H */ | 63 | #endif /* _ASM_SOCKET_H */ |
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c index feb2f2e810db..a21f43bc68e2 100644 --- a/arch/mn10300/kernel/signal.c +++ b/arch/mn10300/kernel/signal.c | |||
@@ -568,5 +568,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags) | |||
568 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 568 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
569 | clear_thread_flag(TIF_NOTIFY_RESUME); | 569 | clear_thread_flag(TIF_NOTIFY_RESUME); |
570 | tracehook_notify_resume(__frame); | 570 | tracehook_notify_resume(__frame); |
571 | if (current->replacement_session_keyring) | ||
572 | key_replace_session_keyring(); | ||
571 | } | 573 | } |
572 | } | 574 | } |
diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h index 885472bf7b78..960b1e5d8e16 100644 --- a/arch/parisc/include/asm/socket.h +++ b/arch/parisc/include/asm/socket.h | |||
@@ -24,6 +24,8 @@ | |||
24 | #define SO_RCVTIMEO 0x1006 | 24 | #define SO_RCVTIMEO 0x1006 |
25 | #define SO_ERROR 0x1007 | 25 | #define SO_ERROR 0x1007 |
26 | #define SO_TYPE 0x1008 | 26 | #define SO_TYPE 0x1008 |
27 | #define SO_PROTOCOL 0x1028 | ||
28 | #define SO_DOMAIN 0x1029 | ||
27 | #define SO_PEERNAME 0x2000 | 29 | #define SO_PEERNAME 0x2000 |
28 | 30 | ||
29 | #define SO_NO_CHECK 0x400b | 31 | #define SO_NO_CHECK 0x400b |
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index 4ce0edfbe969..ac775a76bff7 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h | |||
@@ -59,6 +59,7 @@ struct thread_info { | |||
59 | #define TIF_MEMDIE 5 | 59 | #define TIF_MEMDIE 5 |
60 | #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ | 60 | #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ |
61 | #define TIF_FREEZE 7 /* is freezing for suspend */ | 61 | #define TIF_FREEZE 7 /* is freezing for suspend */ |
62 | #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ | ||
62 | 63 | ||
63 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 64 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
64 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 65 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
@@ -67,8 +68,9 @@ struct thread_info { | |||
67 | #define _TIF_32BIT (1 << TIF_32BIT) | 68 | #define _TIF_32BIT (1 << TIF_32BIT) |
68 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | 69 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) |
69 | #define _TIF_FREEZE (1 << TIF_FREEZE) | 70 | #define _TIF_FREEZE (1 << TIF_FREEZE) |
71 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | ||
70 | 72 | ||
71 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | \ | 73 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ |
72 | _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) | 74 | _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) |
73 | 75 | ||
74 | #endif /* __KERNEL__ */ | 76 | #endif /* __KERNEL__ */ |
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index e552e547cb93..8c4712b74dc1 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S | |||
@@ -948,7 +948,7 @@ intr_check_sig: | |||
948 | /* As above */ | 948 | /* As above */ |
949 | mfctl %cr30,%r1 | 949 | mfctl %cr30,%r1 |
950 | LDREG TI_FLAGS(%r1),%r19 | 950 | LDREG TI_FLAGS(%r1),%r19 |
951 | ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r20 | 951 | ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r20 |
952 | and,COND(<>) %r19, %r20, %r0 | 952 | and,COND(<>) %r19, %r20, %r0 |
953 | b,n intr_restore /* skip past if we've nothing to do */ | 953 | b,n intr_restore /* skip past if we've nothing to do */ |
954 | 954 | ||
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index f82544225e8e..8eb3c63c407a 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/stddef.h> | 25 | #include <linux/stddef.h> |
26 | #include <linux/compat.h> | 26 | #include <linux/compat.h> |
27 | #include <linux/elf.h> | 27 | #include <linux/elf.h> |
28 | #include <linux/tracehook.h> | ||
28 | #include <asm/ucontext.h> | 29 | #include <asm/ucontext.h> |
29 | #include <asm/rt_sigframe.h> | 30 | #include <asm/rt_sigframe.h> |
30 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
@@ -645,4 +646,11 @@ void do_notify_resume(struct pt_regs *regs, long in_syscall) | |||
645 | if (test_thread_flag(TIF_SIGPENDING) || | 646 | if (test_thread_flag(TIF_SIGPENDING) || |
646 | test_thread_flag(TIF_RESTORE_SIGMASK)) | 647 | test_thread_flag(TIF_RESTORE_SIGMASK)) |
647 | do_signal(regs, in_syscall); | 648 | do_signal(regs, in_syscall); |
649 | |||
650 | if (test_thread_flag(TIF_NOTIFY_RESUME)) { | ||
651 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
652 | tracehook_notify_resume(regs); | ||
653 | if (current->replacement_session_keyring) | ||
654 | key_replace_session_keyring(); | ||
655 | } | ||
648 | } | 656 | } |
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index b44aaabdd1a6..0c34371ec49c 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -424,6 +424,29 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
424 | #endif | 424 | #endif |
425 | } | 425 | } |
426 | 426 | ||
427 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | ||
428 | { | ||
429 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
430 | |||
431 | if (ops->addr_needs_map && ops->addr_needs_map(dev, addr, size)) | ||
432 | return 0; | ||
433 | |||
434 | if (!dev->dma_mask) | ||
435 | return 0; | ||
436 | |||
437 | return addr + size <= *dev->dma_mask; | ||
438 | } | ||
439 | |||
440 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | ||
441 | { | ||
442 | return paddr + get_dma_direct_offset(dev); | ||
443 | } | ||
444 | |||
445 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) | ||
446 | { | ||
447 | return daddr - get_dma_direct_offset(dev); | ||
448 | } | ||
449 | |||
427 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 450 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
428 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 451 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
429 | #ifdef CONFIG_NOT_COHERENT_CACHE | 452 | #ifdef CONFIG_NOT_COHERENT_CACHE |
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index eb17da781128..2a5da069714e 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h | |||
@@ -104,8 +104,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
104 | else | 104 | else |
105 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); | 105 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); |
106 | 106 | ||
107 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) | 107 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) |
108 | /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we | 108 | /* Second case is 32-bit with 64-bit PTE. In this case, we |
109 | * can just store as long as we do the two halves in the right order | 109 | * can just store as long as we do the two halves in the right order |
110 | * with a barrier in between. This is possible because we take care, | 110 | * with a barrier in between. This is possible because we take care, |
111 | * in the hash code, to pre-invalidate if the PTE was already hashed, | 111 | * in the hash code, to pre-invalidate if the PTE was already hashed, |
@@ -140,7 +140,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
140 | 140 | ||
141 | #else | 141 | #else |
142 | /* Anything else just stores the PTE normally. That covers all 64-bit | 142 | /* Anything else just stores the PTE normally. That covers all 64-bit |
143 | * cases, and 32-bit non-hash with 64-bit PTEs in UP mode | 143 | * cases, and 32-bit non-hash with 32-bit PTEs. |
144 | */ | 144 | */ |
145 | *ptep = pte; | 145 | *ptep = pte; |
146 | #endif | 146 | #endif |
diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h index 157c5ca581c8..f388f0ab193f 100644 --- a/arch/powerpc/include/asm/qe.h +++ b/arch/powerpc/include/asm/qe.h | |||
@@ -154,6 +154,7 @@ int qe_get_snum(void); | |||
154 | void qe_put_snum(u8 snum); | 154 | void qe_put_snum(u8 snum); |
155 | unsigned int qe_get_num_of_risc(void); | 155 | unsigned int qe_get_num_of_risc(void); |
156 | unsigned int qe_get_num_of_snums(void); | 156 | unsigned int qe_get_num_of_snums(void); |
157 | int qe_alive_during_sleep(void); | ||
157 | 158 | ||
158 | /* we actually use cpm_muram implementation, define this for convenience */ | 159 | /* we actually use cpm_muram implementation, define this for convenience */ |
159 | #define qe_muram_init cpm_muram_init | 160 | #define qe_muram_init cpm_muram_init |
diff --git a/arch/powerpc/include/asm/socket.h b/arch/powerpc/include/asm/socket.h index 1e5cfad0e3f7..3ab8b3e6feb0 100644 --- a/arch/powerpc/include/asm/socket.h +++ b/arch/powerpc/include/asm/socket.h | |||
@@ -64,4 +64,7 @@ | |||
64 | #define SO_TIMESTAMPING 37 | 64 | #define SO_TIMESTAMPING 37 |
65 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | 65 | #define SCM_TIMESTAMPING SO_TIMESTAMPING |
66 | 66 | ||
67 | #define SO_PROTOCOL 38 | ||
68 | #define SO_DOMAIN 39 | ||
69 | |||
67 | #endif /* _ASM_POWERPC_SOCKET_H */ | 70 | #endif /* _ASM_POWERPC_SOCKET_H */ |
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index c3b193121f81..198266cf9e2d 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h | |||
@@ -54,7 +54,7 @@ | |||
54 | * This returns the old value in the lock, so we succeeded | 54 | * This returns the old value in the lock, so we succeeded |
55 | * in getting the lock if the return value is 0. | 55 | * in getting the lock if the return value is 0. |
56 | */ | 56 | */ |
57 | static inline unsigned long __spin_trylock(raw_spinlock_t *lock) | 57 | static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) |
58 | { | 58 | { |
59 | unsigned long tmp, token; | 59 | unsigned long tmp, token; |
60 | 60 | ||
@@ -76,7 +76,7 @@ static inline unsigned long __spin_trylock(raw_spinlock_t *lock) | |||
76 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 76 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
77 | { | 77 | { |
78 | CLEAR_IO_SYNC; | 78 | CLEAR_IO_SYNC; |
79 | return __spin_trylock(lock) == 0; | 79 | return arch_spin_trylock(lock) == 0; |
80 | } | 80 | } |
81 | 81 | ||
82 | /* | 82 | /* |
@@ -108,7 +108,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
108 | { | 108 | { |
109 | CLEAR_IO_SYNC; | 109 | CLEAR_IO_SYNC; |
110 | while (1) { | 110 | while (1) { |
111 | if (likely(__spin_trylock(lock) == 0)) | 111 | if (likely(arch_spin_trylock(lock) == 0)) |
112 | break; | 112 | break; |
113 | do { | 113 | do { |
114 | HMT_low(); | 114 | HMT_low(); |
@@ -126,7 +126,7 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | |||
126 | 126 | ||
127 | CLEAR_IO_SYNC; | 127 | CLEAR_IO_SYNC; |
128 | while (1) { | 128 | while (1) { |
129 | if (likely(__spin_trylock(lock) == 0)) | 129 | if (likely(arch_spin_trylock(lock) == 0)) |
130 | break; | 130 | break; |
131 | local_save_flags(flags_dis); | 131 | local_save_flags(flags_dis); |
132 | local_irq_restore(flags); | 132 | local_irq_restore(flags); |
@@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); | |||
181 | * This returns the old value in the lock + 1, | 181 | * This returns the old value in the lock + 1, |
182 | * so we got a read lock if the return value is > 0. | 182 | * so we got a read lock if the return value is > 0. |
183 | */ | 183 | */ |
184 | static inline long __read_trylock(raw_rwlock_t *rw) | 184 | static inline long arch_read_trylock(raw_rwlock_t *rw) |
185 | { | 185 | { |
186 | long tmp; | 186 | long tmp; |
187 | 187 | ||
@@ -205,7 +205,7 @@ static inline long __read_trylock(raw_rwlock_t *rw) | |||
205 | * This returns the old value in the lock, | 205 | * This returns the old value in the lock, |
206 | * so we got the write lock if the return value is 0. | 206 | * so we got the write lock if the return value is 0. |
207 | */ | 207 | */ |
208 | static inline long __write_trylock(raw_rwlock_t *rw) | 208 | static inline long arch_write_trylock(raw_rwlock_t *rw) |
209 | { | 209 | { |
210 | long tmp, token; | 210 | long tmp, token; |
211 | 211 | ||
@@ -228,7 +228,7 @@ static inline long __write_trylock(raw_rwlock_t *rw) | |||
228 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 228 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
229 | { | 229 | { |
230 | while (1) { | 230 | while (1) { |
231 | if (likely(__read_trylock(rw) > 0)) | 231 | if (likely(arch_read_trylock(rw) > 0)) |
232 | break; | 232 | break; |
233 | do { | 233 | do { |
234 | HMT_low(); | 234 | HMT_low(); |
@@ -242,7 +242,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
242 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 242 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
243 | { | 243 | { |
244 | while (1) { | 244 | while (1) { |
245 | if (likely(__write_trylock(rw) == 0)) | 245 | if (likely(arch_write_trylock(rw) == 0)) |
246 | break; | 246 | break; |
247 | do { | 247 | do { |
248 | HMT_low(); | 248 | HMT_low(); |
@@ -255,12 +255,12 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
255 | 255 | ||
256 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 256 | static inline int __raw_read_trylock(raw_rwlock_t *rw) |
257 | { | 257 | { |
258 | return __read_trylock(rw) > 0; | 258 | return arch_read_trylock(rw) > 0; |
259 | } | 259 | } |
260 | 260 | ||
261 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 261 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
262 | { | 262 | { |
263 | return __write_trylock(rw) == 0; | 263 | return arch_write_trylock(rw) == 0; |
264 | } | 264 | } |
265 | 265 | ||
266 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 266 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index b73396b93905..9619285f64e8 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o | |||
97 | 97 | ||
98 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 98 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
99 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 99 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
100 | obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o | 100 | obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o perf_callchain.o |
101 | obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ | 101 | obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ |
102 | power5+-pmu.o power6-pmu.o power7-pmu.o | 102 | power5+-pmu.o power6-pmu.o power7-pmu.o |
103 | obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o | 103 | obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 561b64652311..197b15646eeb 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -67,6 +67,8 @@ int main(void) | |||
67 | DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id)); | 67 | DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id)); |
68 | #ifdef CONFIG_PPC64 | 68 | #ifdef CONFIG_PPC64 |
69 | DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); | 69 | DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); |
70 | DEFINE(SIGSEGV, SIGSEGV); | ||
71 | DEFINE(NMI_MASK, NMI_MASK); | ||
70 | #else | 72 | #else |
71 | DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); | 73 | DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); |
72 | #endif /* CONFIG_PPC64 */ | 74 | #endif /* CONFIG_PPC64 */ |
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 68ccf11e4f19..e8a57de85bcf 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c | |||
@@ -24,50 +24,12 @@ | |||
24 | int swiotlb __read_mostly; | 24 | int swiotlb __read_mostly; |
25 | unsigned int ppc_swiotlb_enable; | 25 | unsigned int ppc_swiotlb_enable; |
26 | 26 | ||
27 | void *swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t addr) | ||
28 | { | ||
29 | unsigned long pfn = PFN_DOWN(swiotlb_bus_to_phys(hwdev, addr)); | ||
30 | void *pageaddr = page_address(pfn_to_page(pfn)); | ||
31 | |||
32 | if (pageaddr != NULL) | ||
33 | return pageaddr + (addr % PAGE_SIZE); | ||
34 | return NULL; | ||
35 | } | ||
36 | |||
37 | dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) | ||
38 | { | ||
39 | return paddr + get_dma_direct_offset(hwdev); | ||
40 | } | ||
41 | |||
42 | phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr) | ||
43 | |||
44 | { | ||
45 | return baddr - get_dma_direct_offset(hwdev); | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * Determine if an address needs bounce buffering via swiotlb. | ||
50 | * Going forward I expect the swiotlb code to generalize on using | ||
51 | * a dma_ops->addr_needs_map, and this function will move from here to the | ||
52 | * generic swiotlb code. | ||
53 | */ | ||
54 | int | ||
55 | swiotlb_arch_address_needs_mapping(struct device *hwdev, dma_addr_t addr, | ||
56 | size_t size) | ||
57 | { | ||
58 | struct dma_mapping_ops *dma_ops = get_dma_ops(hwdev); | ||
59 | |||
60 | BUG_ON(!dma_ops); | ||
61 | return dma_ops->addr_needs_map(hwdev, addr, size); | ||
62 | } | ||
63 | |||
64 | /* | 27 | /* |
65 | * Determine if an address is reachable by a pci device, or if we must bounce. | 28 | * Determine if an address is reachable by a pci device, or if we must bounce. |
66 | */ | 29 | */ |
67 | static int | 30 | static int |
68 | swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) | 31 | swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) |
69 | { | 32 | { |
70 | u64 mask = dma_get_mask(hwdev); | ||
71 | dma_addr_t max; | 33 | dma_addr_t max; |
72 | struct pci_controller *hose; | 34 | struct pci_controller *hose; |
73 | struct pci_dev *pdev = to_pci_dev(hwdev); | 35 | struct pci_dev *pdev = to_pci_dev(hwdev); |
@@ -79,16 +41,9 @@ swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) | |||
79 | if ((addr + size > max) | (addr < hose->dma_window_base_cur)) | 41 | if ((addr + size > max) | (addr < hose->dma_window_base_cur)) |
80 | return 1; | 42 | return 1; |
81 | 43 | ||
82 | return !is_buffer_dma_capable(mask, addr, size); | 44 | return 0; |
83 | } | ||
84 | |||
85 | static int | ||
86 | swiotlb_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) | ||
87 | { | ||
88 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); | ||
89 | } | 45 | } |
90 | 46 | ||
91 | |||
92 | /* | 47 | /* |
93 | * At the moment, all platforms that use this code only require | 48 | * At the moment, all platforms that use this code only require |
94 | * swiotlb to be used if we're operating on HIGHMEM. Since | 49 | * swiotlb to be used if we're operating on HIGHMEM. Since |
@@ -104,7 +59,6 @@ struct dma_mapping_ops swiotlb_dma_ops = { | |||
104 | .dma_supported = swiotlb_dma_supported, | 59 | .dma_supported = swiotlb_dma_supported, |
105 | .map_page = swiotlb_map_page, | 60 | .map_page = swiotlb_map_page, |
106 | .unmap_page = swiotlb_unmap_page, | 61 | .unmap_page = swiotlb_unmap_page, |
107 | .addr_needs_map = swiotlb_addr_needs_map, | ||
108 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, | 62 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, |
109 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, | 63 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, |
110 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | 64 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index eb898112e577..8ac85e08ffae 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -729,6 +729,11 @@ BEGIN_FTR_SECTION | |||
729 | bne- do_ste_alloc /* If so handle it */ | 729 | bne- do_ste_alloc /* If so handle it */ |
730 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | 730 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) |
731 | 731 | ||
732 | clrrdi r11,r1,THREAD_SHIFT | ||
733 | lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ | ||
734 | andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ | ||
735 | bne 77f /* then don't call hash_page now */ | ||
736 | |||
732 | /* | 737 | /* |
733 | * On iSeries, we soft-disable interrupts here, then | 738 | * On iSeries, we soft-disable interrupts here, then |
734 | * hard-enable interrupts so that the hash_page code can spin on | 739 | * hard-enable interrupts so that the hash_page code can spin on |
@@ -833,6 +838,20 @@ handle_page_fault: | |||
833 | bl .low_hash_fault | 838 | bl .low_hash_fault |
834 | b .ret_from_except | 839 | b .ret_from_except |
835 | 840 | ||
841 | /* | ||
842 | * We come here as a result of a DSI at a point where we don't want | ||
843 | * to call hash_page, such as when we are accessing memory (possibly | ||
844 | * user memory) inside a PMU interrupt that occurred while interrupts | ||
845 | * were soft-disabled. We want to invoke the exception handler for | ||
846 | * the access, or panic if there isn't a handler. | ||
847 | */ | ||
848 | 77: bl .save_nvgprs | ||
849 | mr r4,r3 | ||
850 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
851 | li r5,SIGSEGV | ||
852 | bl .bad_page_fault | ||
853 | b .ret_from_except | ||
854 | |||
836 | /* here we have a segment miss */ | 855 | /* here we have a segment miss */ |
837 | do_ste_alloc: | 856 | do_ste_alloc: |
838 | bl .ste_allocate /* try to insert stab entry */ | 857 | bl .ste_allocate /* try to insert stab entry */ |
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c new file mode 100644 index 000000000000..f74b62c67511 --- /dev/null +++ b/arch/powerpc/kernel/perf_callchain.c | |||
@@ -0,0 +1,527 @@ | |||
1 | /* | ||
2 | * Performance counter callchain support - powerpc architecture code | ||
3 | * | ||
4 | * Copyright © 2009 Paul Mackerras, IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/perf_counter.h> | ||
14 | #include <linux/percpu.h> | ||
15 | #include <linux/uaccess.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <asm/ptrace.h> | ||
18 | #include <asm/pgtable.h> | ||
19 | #include <asm/sigcontext.h> | ||
20 | #include <asm/ucontext.h> | ||
21 | #include <asm/vdso.h> | ||
22 | #ifdef CONFIG_PPC64 | ||
23 | #include "ppc32.h" | ||
24 | #endif | ||
25 | |||
26 | /* | ||
27 | * Store another value in a callchain_entry. | ||
28 | */ | ||
29 | static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
30 | { | ||
31 | unsigned int nr = entry->nr; | ||
32 | |||
33 | if (nr < PERF_MAX_STACK_DEPTH) { | ||
34 | entry->ip[nr] = ip; | ||
35 | entry->nr = nr + 1; | ||
36 | } | ||
37 | } | ||
38 | |||
39 | /* | ||
40 | * Is sp valid as the address of the next kernel stack frame after prev_sp? | ||
41 | * The next frame may be in a different stack area but should not go | ||
42 | * back down in the same stack area. | ||
43 | */ | ||
44 | static int valid_next_sp(unsigned long sp, unsigned long prev_sp) | ||
45 | { | ||
46 | if (sp & 0xf) | ||
47 | return 0; /* must be 16-byte aligned */ | ||
48 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) | ||
49 | return 0; | ||
50 | if (sp >= prev_sp + STACK_FRAME_OVERHEAD) | ||
51 | return 1; | ||
52 | /* | ||
53 | * sp could decrease when we jump off an interrupt stack | ||
54 | * back to the regular process stack. | ||
55 | */ | ||
56 | if ((sp & ~(THREAD_SIZE - 1)) != (prev_sp & ~(THREAD_SIZE - 1))) | ||
57 | return 1; | ||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | static void perf_callchain_kernel(struct pt_regs *regs, | ||
62 | struct perf_callchain_entry *entry) | ||
63 | { | ||
64 | unsigned long sp, next_sp; | ||
65 | unsigned long next_ip; | ||
66 | unsigned long lr; | ||
67 | long level = 0; | ||
68 | unsigned long *fp; | ||
69 | |||
70 | lr = regs->link; | ||
71 | sp = regs->gpr[1]; | ||
72 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
73 | callchain_store(entry, regs->nip); | ||
74 | |||
75 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) | ||
76 | return; | ||
77 | |||
78 | for (;;) { | ||
79 | fp = (unsigned long *) sp; | ||
80 | next_sp = fp[0]; | ||
81 | |||
82 | if (next_sp == sp + STACK_INT_FRAME_SIZE && | ||
83 | fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { | ||
84 | /* | ||
85 | * This looks like an interrupt frame for an | ||
86 | * interrupt that occurred in the kernel | ||
87 | */ | ||
88 | regs = (struct pt_regs *)(sp + STACK_FRAME_OVERHEAD); | ||
89 | next_ip = regs->nip; | ||
90 | lr = regs->link; | ||
91 | level = 0; | ||
92 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
93 | |||
94 | } else { | ||
95 | if (level == 0) | ||
96 | next_ip = lr; | ||
97 | else | ||
98 | next_ip = fp[STACK_FRAME_LR_SAVE]; | ||
99 | |||
100 | /* | ||
101 | * We can't tell which of the first two addresses | ||
102 | * we get are valid, but we can filter out the | ||
103 | * obviously bogus ones here. We replace them | ||
104 | * with 0 rather than removing them entirely so | ||
105 | * that userspace can tell which is which. | ||
106 | */ | ||
107 | if ((level == 1 && next_ip == lr) || | ||
108 | (level <= 1 && !kernel_text_address(next_ip))) | ||
109 | next_ip = 0; | ||
110 | |||
111 | ++level; | ||
112 | } | ||
113 | |||
114 | callchain_store(entry, next_ip); | ||
115 | if (!valid_next_sp(next_sp, sp)) | ||
116 | return; | ||
117 | sp = next_sp; | ||
118 | } | ||
119 | } | ||
120 | |||
121 | #ifdef CONFIG_PPC64 | ||
122 | |||
123 | #ifdef CONFIG_HUGETLB_PAGE | ||
124 | #define is_huge_psize(pagesize) (HPAGE_SHIFT && mmu_huge_psizes[pagesize]) | ||
125 | #else | ||
126 | #define is_huge_psize(pagesize) 0 | ||
127 | #endif | ||
128 | |||
129 | /* | ||
130 | * On 64-bit we don't want to invoke hash_page on user addresses from | ||
131 | * interrupt context, so if the access faults, we read the page tables | ||
132 | * to find which page (if any) is mapped and access it directly. | ||
133 | */ | ||
134 | static int read_user_stack_slow(void __user *ptr, void *ret, int nb) | ||
135 | { | ||
136 | pgd_t *pgdir; | ||
137 | pte_t *ptep, pte; | ||
138 | int pagesize; | ||
139 | unsigned long addr = (unsigned long) ptr; | ||
140 | unsigned long offset; | ||
141 | unsigned long pfn; | ||
142 | void *kaddr; | ||
143 | |||
144 | pgdir = current->mm->pgd; | ||
145 | if (!pgdir) | ||
146 | return -EFAULT; | ||
147 | |||
148 | pagesize = get_slice_psize(current->mm, addr); | ||
149 | |||
150 | /* align address to page boundary */ | ||
151 | offset = addr & ((1ul << mmu_psize_defs[pagesize].shift) - 1); | ||
152 | addr -= offset; | ||
153 | |||
154 | if (is_huge_psize(pagesize)) | ||
155 | ptep = huge_pte_offset(current->mm, addr); | ||
156 | else | ||
157 | ptep = find_linux_pte(pgdir, addr); | ||
158 | |||
159 | if (ptep == NULL) | ||
160 | return -EFAULT; | ||
161 | pte = *ptep; | ||
162 | if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER)) | ||
163 | return -EFAULT; | ||
164 | pfn = pte_pfn(pte); | ||
165 | if (!page_is_ram(pfn)) | ||
166 | return -EFAULT; | ||
167 | |||
168 | /* no highmem to worry about here */ | ||
169 | kaddr = pfn_to_kaddr(pfn); | ||
170 | memcpy(ret, kaddr + offset, nb); | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret) | ||
175 | { | ||
176 | if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) || | ||
177 | ((unsigned long)ptr & 7)) | ||
178 | return -EFAULT; | ||
179 | |||
180 | if (!__get_user_inatomic(*ret, ptr)) | ||
181 | return 0; | ||
182 | |||
183 | return read_user_stack_slow(ptr, ret, 8); | ||
184 | } | ||
185 | |||
186 | static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) | ||
187 | { | ||
188 | if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) || | ||
189 | ((unsigned long)ptr & 3)) | ||
190 | return -EFAULT; | ||
191 | |||
192 | if (!__get_user_inatomic(*ret, ptr)) | ||
193 | return 0; | ||
194 | |||
195 | return read_user_stack_slow(ptr, ret, 4); | ||
196 | } | ||
197 | |||
198 | static inline int valid_user_sp(unsigned long sp, int is_64) | ||
199 | { | ||
200 | if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32) | ||
201 | return 0; | ||
202 | return 1; | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * 64-bit user processes use the same stack frame for RT and non-RT signals. | ||
207 | */ | ||
208 | struct signal_frame_64 { | ||
209 | char dummy[__SIGNAL_FRAMESIZE]; | ||
210 | struct ucontext uc; | ||
211 | unsigned long unused[2]; | ||
212 | unsigned int tramp[6]; | ||
213 | struct siginfo *pinfo; | ||
214 | void *puc; | ||
215 | struct siginfo info; | ||
216 | char abigap[288]; | ||
217 | }; | ||
218 | |||
219 | static int is_sigreturn_64_address(unsigned long nip, unsigned long fp) | ||
220 | { | ||
221 | if (nip == fp + offsetof(struct signal_frame_64, tramp)) | ||
222 | return 1; | ||
223 | if (vdso64_rt_sigtramp && current->mm->context.vdso_base && | ||
224 | nip == current->mm->context.vdso_base + vdso64_rt_sigtramp) | ||
225 | return 1; | ||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * Do some sanity checking on the signal frame pointed to by sp. | ||
231 | * We check the pinfo and puc pointers in the frame. | ||
232 | */ | ||
233 | static int sane_signal_64_frame(unsigned long sp) | ||
234 | { | ||
235 | struct signal_frame_64 __user *sf; | ||
236 | unsigned long pinfo, puc; | ||
237 | |||
238 | sf = (struct signal_frame_64 __user *) sp; | ||
239 | if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) || | ||
240 | read_user_stack_64((unsigned long __user *) &sf->puc, &puc)) | ||
241 | return 0; | ||
242 | return pinfo == (unsigned long) &sf->info && | ||
243 | puc == (unsigned long) &sf->uc; | ||
244 | } | ||
245 | |||
246 | static void perf_callchain_user_64(struct pt_regs *regs, | ||
247 | struct perf_callchain_entry *entry) | ||
248 | { | ||
249 | unsigned long sp, next_sp; | ||
250 | unsigned long next_ip; | ||
251 | unsigned long lr; | ||
252 | long level = 0; | ||
253 | struct signal_frame_64 __user *sigframe; | ||
254 | unsigned long __user *fp, *uregs; | ||
255 | |||
256 | next_ip = regs->nip; | ||
257 | lr = regs->link; | ||
258 | sp = regs->gpr[1]; | ||
259 | callchain_store(entry, PERF_CONTEXT_USER); | ||
260 | callchain_store(entry, next_ip); | ||
261 | |||
262 | for (;;) { | ||
263 | fp = (unsigned long __user *) sp; | ||
264 | if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) | ||
265 | return; | ||
266 | if (level > 0 && read_user_stack_64(&fp[2], &next_ip)) | ||
267 | return; | ||
268 | |||
269 | /* | ||
270 | * Note: the next_sp - sp >= signal frame size check | ||
271 | * is true when next_sp < sp, which can happen when | ||
272 | * transitioning from an alternate signal stack to the | ||
273 | * normal stack. | ||
274 | */ | ||
275 | if (next_sp - sp >= sizeof(struct signal_frame_64) && | ||
276 | (is_sigreturn_64_address(next_ip, sp) || | ||
277 | (level <= 1 && is_sigreturn_64_address(lr, sp))) && | ||
278 | sane_signal_64_frame(sp)) { | ||
279 | /* | ||
280 | * This looks like an signal frame | ||
281 | */ | ||
282 | sigframe = (struct signal_frame_64 __user *) sp; | ||
283 | uregs = sigframe->uc.uc_mcontext.gp_regs; | ||
284 | if (read_user_stack_64(&uregs[PT_NIP], &next_ip) || | ||
285 | read_user_stack_64(&uregs[PT_LNK], &lr) || | ||
286 | read_user_stack_64(&uregs[PT_R1], &sp)) | ||
287 | return; | ||
288 | level = 0; | ||
289 | callchain_store(entry, PERF_CONTEXT_USER); | ||
290 | callchain_store(entry, next_ip); | ||
291 | continue; | ||
292 | } | ||
293 | |||
294 | if (level == 0) | ||
295 | next_ip = lr; | ||
296 | callchain_store(entry, next_ip); | ||
297 | ++level; | ||
298 | sp = next_sp; | ||
299 | } | ||
300 | } | ||
301 | |||
302 | static inline int current_is_64bit(void) | ||
303 | { | ||
304 | /* | ||
305 | * We can't use test_thread_flag() here because we may be on an | ||
306 | * interrupt stack, and the thread flags don't get copied over | ||
307 | * from the thread_info on the main stack to the interrupt stack. | ||
308 | */ | ||
309 | return !test_ti_thread_flag(task_thread_info(current), TIF_32BIT); | ||
310 | } | ||
311 | |||
312 | #else /* CONFIG_PPC64 */ | ||
313 | /* | ||
314 | * On 32-bit we just access the address and let hash_page create a | ||
315 | * HPTE if necessary, so there is no need to fall back to reading | ||
316 | * the page tables. Since this is called at interrupt level, | ||
317 | * do_page_fault() won't treat a DSI as a page fault. | ||
318 | */ | ||
319 | static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) | ||
320 | { | ||
321 | if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) || | ||
322 | ((unsigned long)ptr & 3)) | ||
323 | return -EFAULT; | ||
324 | |||
325 | return __get_user_inatomic(*ret, ptr); | ||
326 | } | ||
327 | |||
328 | static inline void perf_callchain_user_64(struct pt_regs *regs, | ||
329 | struct perf_callchain_entry *entry) | ||
330 | { | ||
331 | } | ||
332 | |||
333 | static inline int current_is_64bit(void) | ||
334 | { | ||
335 | return 0; | ||
336 | } | ||
337 | |||
338 | static inline int valid_user_sp(unsigned long sp, int is_64) | ||
339 | { | ||
340 | if (!sp || (sp & 7) || sp > TASK_SIZE - 32) | ||
341 | return 0; | ||
342 | return 1; | ||
343 | } | ||
344 | |||
345 | #define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE | ||
346 | #define sigcontext32 sigcontext | ||
347 | #define mcontext32 mcontext | ||
348 | #define ucontext32 ucontext | ||
349 | #define compat_siginfo_t struct siginfo | ||
350 | |||
351 | #endif /* CONFIG_PPC64 */ | ||
352 | |||
353 | /* | ||
354 | * Layout for non-RT signal frames | ||
355 | */ | ||
356 | struct signal_frame_32 { | ||
357 | char dummy[__SIGNAL_FRAMESIZE32]; | ||
358 | struct sigcontext32 sctx; | ||
359 | struct mcontext32 mctx; | ||
360 | int abigap[56]; | ||
361 | }; | ||
362 | |||
363 | /* | ||
364 | * Layout for RT signal frames | ||
365 | */ | ||
366 | struct rt_signal_frame_32 { | ||
367 | char dummy[__SIGNAL_FRAMESIZE32 + 16]; | ||
368 | compat_siginfo_t info; | ||
369 | struct ucontext32 uc; | ||
370 | int abigap[56]; | ||
371 | }; | ||
372 | |||
373 | static int is_sigreturn_32_address(unsigned int nip, unsigned int fp) | ||
374 | { | ||
375 | if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad)) | ||
376 | return 1; | ||
377 | if (vdso32_sigtramp && current->mm->context.vdso_base && | ||
378 | nip == current->mm->context.vdso_base + vdso32_sigtramp) | ||
379 | return 1; | ||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp) | ||
384 | { | ||
385 | if (nip == fp + offsetof(struct rt_signal_frame_32, | ||
386 | uc.uc_mcontext.mc_pad)) | ||
387 | return 1; | ||
388 | if (vdso32_rt_sigtramp && current->mm->context.vdso_base && | ||
389 | nip == current->mm->context.vdso_base + vdso32_rt_sigtramp) | ||
390 | return 1; | ||
391 | return 0; | ||
392 | } | ||
393 | |||
394 | static int sane_signal_32_frame(unsigned int sp) | ||
395 | { | ||
396 | struct signal_frame_32 __user *sf; | ||
397 | unsigned int regs; | ||
398 | |||
399 | sf = (struct signal_frame_32 __user *) (unsigned long) sp; | ||
400 | if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, ®s)) | ||
401 | return 0; | ||
402 | return regs == (unsigned long) &sf->mctx; | ||
403 | } | ||
404 | |||
405 | static int sane_rt_signal_32_frame(unsigned int sp) | ||
406 | { | ||
407 | struct rt_signal_frame_32 __user *sf; | ||
408 | unsigned int regs; | ||
409 | |||
410 | sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp; | ||
411 | if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, ®s)) | ||
412 | return 0; | ||
413 | return regs == (unsigned long) &sf->uc.uc_mcontext; | ||
414 | } | ||
415 | |||
416 | static unsigned int __user *signal_frame_32_regs(unsigned int sp, | ||
417 | unsigned int next_sp, unsigned int next_ip) | ||
418 | { | ||
419 | struct mcontext32 __user *mctx = NULL; | ||
420 | struct signal_frame_32 __user *sf; | ||
421 | struct rt_signal_frame_32 __user *rt_sf; | ||
422 | |||
423 | /* | ||
424 | * Note: the next_sp - sp >= signal frame size check | ||
425 | * is true when next_sp < sp, for example, when | ||
426 | * transitioning from an alternate signal stack to the | ||
427 | * normal stack. | ||
428 | */ | ||
429 | if (next_sp - sp >= sizeof(struct signal_frame_32) && | ||
430 | is_sigreturn_32_address(next_ip, sp) && | ||
431 | sane_signal_32_frame(sp)) { | ||
432 | sf = (struct signal_frame_32 __user *) (unsigned long) sp; | ||
433 | mctx = &sf->mctx; | ||
434 | } | ||
435 | |||
436 | if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) && | ||
437 | is_rt_sigreturn_32_address(next_ip, sp) && | ||
438 | sane_rt_signal_32_frame(sp)) { | ||
439 | rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp; | ||
440 | mctx = &rt_sf->uc.uc_mcontext; | ||
441 | } | ||
442 | |||
443 | if (!mctx) | ||
444 | return NULL; | ||
445 | return mctx->mc_gregs; | ||
446 | } | ||
447 | |||
448 | static void perf_callchain_user_32(struct pt_regs *regs, | ||
449 | struct perf_callchain_entry *entry) | ||
450 | { | ||
451 | unsigned int sp, next_sp; | ||
452 | unsigned int next_ip; | ||
453 | unsigned int lr; | ||
454 | long level = 0; | ||
455 | unsigned int __user *fp, *uregs; | ||
456 | |||
457 | next_ip = regs->nip; | ||
458 | lr = regs->link; | ||
459 | sp = regs->gpr[1]; | ||
460 | callchain_store(entry, PERF_CONTEXT_USER); | ||
461 | callchain_store(entry, next_ip); | ||
462 | |||
463 | while (entry->nr < PERF_MAX_STACK_DEPTH) { | ||
464 | fp = (unsigned int __user *) (unsigned long) sp; | ||
465 | if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp)) | ||
466 | return; | ||
467 | if (level > 0 && read_user_stack_32(&fp[1], &next_ip)) | ||
468 | return; | ||
469 | |||
470 | uregs = signal_frame_32_regs(sp, next_sp, next_ip); | ||
471 | if (!uregs && level <= 1) | ||
472 | uregs = signal_frame_32_regs(sp, next_sp, lr); | ||
473 | if (uregs) { | ||
474 | /* | ||
475 | * This looks like an signal frame, so restart | ||
476 | * the stack trace with the values in it. | ||
477 | */ | ||
478 | if (read_user_stack_32(&uregs[PT_NIP], &next_ip) || | ||
479 | read_user_stack_32(&uregs[PT_LNK], &lr) || | ||
480 | read_user_stack_32(&uregs[PT_R1], &sp)) | ||
481 | return; | ||
482 | level = 0; | ||
483 | callchain_store(entry, PERF_CONTEXT_USER); | ||
484 | callchain_store(entry, next_ip); | ||
485 | continue; | ||
486 | } | ||
487 | |||
488 | if (level == 0) | ||
489 | next_ip = lr; | ||
490 | callchain_store(entry, next_ip); | ||
491 | ++level; | ||
492 | sp = next_sp; | ||
493 | } | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * Since we can't get PMU interrupts inside a PMU interrupt handler, | ||
498 | * we don't need separate irq and nmi entries here. | ||
499 | */ | ||
500 | static DEFINE_PER_CPU(struct perf_callchain_entry, callchain); | ||
501 | |||
502 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
503 | { | ||
504 | struct perf_callchain_entry *entry = &__get_cpu_var(callchain); | ||
505 | |||
506 | entry->nr = 0; | ||
507 | |||
508 | if (current->pid == 0) /* idle task? */ | ||
509 | return entry; | ||
510 | |||
511 | if (!user_mode(regs)) { | ||
512 | perf_callchain_kernel(regs, entry); | ||
513 | if (current->mm) | ||
514 | regs = task_pt_regs(current); | ||
515 | else | ||
516 | regs = NULL; | ||
517 | } | ||
518 | |||
519 | if (regs) { | ||
520 | if (current_is_64bit()) | ||
521 | perf_callchain_user_64(regs, entry); | ||
522 | else | ||
523 | perf_callchain_user_32(regs, entry); | ||
524 | } | ||
525 | |||
526 | return entry; | ||
527 | } | ||
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 5b7038f248b6..a685652effeb 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
@@ -92,15 +92,13 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize, | |||
92 | : "memory" ); | 92 | : "memory" ); |
93 | } | 93 | } |
94 | 94 | ||
95 | void slb_flush_and_rebolt(void) | 95 | static void __slb_flush_and_rebolt(void) |
96 | { | 96 | { |
97 | /* If you change this make sure you change SLB_NUM_BOLTED | 97 | /* If you change this make sure you change SLB_NUM_BOLTED |
98 | * appropriately too. */ | 98 | * appropriately too. */ |
99 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; | 99 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; |
100 | unsigned long ksp_esid_data, ksp_vsid_data; | 100 | unsigned long ksp_esid_data, ksp_vsid_data; |
101 | 101 | ||
102 | WARN_ON(!irqs_disabled()); | ||
103 | |||
104 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; | 102 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; |
105 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; | 103 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; |
106 | lflags = SLB_VSID_KERNEL | linear_llp; | 104 | lflags = SLB_VSID_KERNEL | linear_llp; |
@@ -117,12 +115,6 @@ void slb_flush_and_rebolt(void) | |||
117 | ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; | 115 | ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; |
118 | } | 116 | } |
119 | 117 | ||
120 | /* | ||
121 | * We can't take a PMU exception in the following code, so hard | ||
122 | * disable interrupts. | ||
123 | */ | ||
124 | hard_irq_disable(); | ||
125 | |||
126 | /* We need to do this all in asm, so we're sure we don't touch | 118 | /* We need to do this all in asm, so we're sure we don't touch |
127 | * the stack between the slbia and rebolting it. */ | 119 | * the stack between the slbia and rebolting it. */ |
128 | asm volatile("isync\n" | 120 | asm volatile("isync\n" |
@@ -139,6 +131,21 @@ void slb_flush_and_rebolt(void) | |||
139 | : "memory"); | 131 | : "memory"); |
140 | } | 132 | } |
141 | 133 | ||
134 | void slb_flush_and_rebolt(void) | ||
135 | { | ||
136 | |||
137 | WARN_ON(!irqs_disabled()); | ||
138 | |||
139 | /* | ||
140 | * We can't take a PMU exception in the following code, so hard | ||
141 | * disable interrupts. | ||
142 | */ | ||
143 | hard_irq_disable(); | ||
144 | |||
145 | __slb_flush_and_rebolt(); | ||
146 | get_paca()->slb_cache_ptr = 0; | ||
147 | } | ||
148 | |||
142 | void slb_vmalloc_update(void) | 149 | void slb_vmalloc_update(void) |
143 | { | 150 | { |
144 | unsigned long vflags; | 151 | unsigned long vflags; |
@@ -180,12 +187,20 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2) | |||
180 | /* Flush all user entries from the segment table of the current processor. */ | 187 | /* Flush all user entries from the segment table of the current processor. */ |
181 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | 188 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) |
182 | { | 189 | { |
183 | unsigned long offset = get_paca()->slb_cache_ptr; | 190 | unsigned long offset; |
184 | unsigned long slbie_data = 0; | 191 | unsigned long slbie_data = 0; |
185 | unsigned long pc = KSTK_EIP(tsk); | 192 | unsigned long pc = KSTK_EIP(tsk); |
186 | unsigned long stack = KSTK_ESP(tsk); | 193 | unsigned long stack = KSTK_ESP(tsk); |
187 | unsigned long unmapped_base; | 194 | unsigned long unmapped_base; |
188 | 195 | ||
196 | /* | ||
197 | * We need interrupts hard-disabled here, not just soft-disabled, | ||
198 | * so that a PMU interrupt can't occur, which might try to access | ||
199 | * user memory (to get a stack trace) and possible cause an SLB miss | ||
200 | * which would update the slb_cache/slb_cache_ptr fields in the PACA. | ||
201 | */ | ||
202 | hard_irq_disable(); | ||
203 | offset = get_paca()->slb_cache_ptr; | ||
189 | if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && | 204 | if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && |
190 | offset <= SLB_CACHE_ENTRIES) { | 205 | offset <= SLB_CACHE_ENTRIES) { |
191 | int i; | 206 | int i; |
@@ -200,7 +215,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |||
200 | } | 215 | } |
201 | asm volatile("isync" : : : "memory"); | 216 | asm volatile("isync" : : : "memory"); |
202 | } else { | 217 | } else { |
203 | slb_flush_and_rebolt(); | 218 | __slb_flush_and_rebolt(); |
204 | } | 219 | } |
205 | 220 | ||
206 | /* Workaround POWER5 < DD2.1 issue */ | 221 | /* Workaround POWER5 < DD2.1 issue */ |
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 98cd1dc2ae75..ab5fb48b3e90 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c | |||
@@ -164,7 +164,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) | |||
164 | { | 164 | { |
165 | struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr; | 165 | struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr; |
166 | struct stab_entry *ste; | 166 | struct stab_entry *ste; |
167 | unsigned long offset = __get_cpu_var(stab_cache_ptr); | 167 | unsigned long offset; |
168 | unsigned long pc = KSTK_EIP(tsk); | 168 | unsigned long pc = KSTK_EIP(tsk); |
169 | unsigned long stack = KSTK_ESP(tsk); | 169 | unsigned long stack = KSTK_ESP(tsk); |
170 | unsigned long unmapped_base; | 170 | unsigned long unmapped_base; |
@@ -172,6 +172,15 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) | |||
172 | /* Force previous translations to complete. DRENG */ | 172 | /* Force previous translations to complete. DRENG */ |
173 | asm volatile("isync" : : : "memory"); | 173 | asm volatile("isync" : : : "memory"); |
174 | 174 | ||
175 | /* | ||
176 | * We need interrupts hard-disabled here, not just soft-disabled, | ||
177 | * so that a PMU interrupt can't occur, which might try to access | ||
178 | * user memory (to get a stack trace) and possible cause an STAB miss | ||
179 | * which would update the stab_cache/stab_cache_ptr per-cpu variables. | ||
180 | */ | ||
181 | hard_irq_disable(); | ||
182 | |||
183 | offset = __get_cpu_var(stab_cache_ptr); | ||
175 | if (offset <= NR_STAB_CACHE_ENTRIES) { | 184 | if (offset <= NR_STAB_CACHE_ENTRIES) { |
176 | int i; | 185 | int i; |
177 | 186 | ||
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c index 237e3654f48c..464271bea6c9 100644 --- a/arch/powerpc/sysdev/qe_lib/qe.c +++ b/arch/powerpc/sysdev/qe_lib/qe.c | |||
@@ -65,6 +65,19 @@ static unsigned int qe_num_of_snum; | |||
65 | 65 | ||
66 | static phys_addr_t qebase = -1; | 66 | static phys_addr_t qebase = -1; |
67 | 67 | ||
68 | int qe_alive_during_sleep(void) | ||
69 | { | ||
70 | static int ret = -1; | ||
71 | |||
72 | if (ret != -1) | ||
73 | return ret; | ||
74 | |||
75 | ret = !of_find_compatible_node(NULL, NULL, "fsl,mpc8569-pmc"); | ||
76 | |||
77 | return ret; | ||
78 | } | ||
79 | EXPORT_SYMBOL(qe_alive_during_sleep); | ||
80 | |||
68 | phys_addr_t get_qe_base(void) | 81 | phys_addr_t get_qe_base(void) |
69 | { | 82 | { |
70 | struct device_node *qe; | 83 | struct device_node *qe; |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 2ae5d72f47ed..1c866efd217d 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -84,7 +84,7 @@ config S390 | |||
84 | select HAVE_FUNCTION_TRACER | 84 | select HAVE_FUNCTION_TRACER |
85 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 85 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
86 | select HAVE_FTRACE_MCOUNT_RECORD | 86 | select HAVE_FTRACE_MCOUNT_RECORD |
87 | select HAVE_FTRACE_SYSCALLS | 87 | select HAVE_SYSCALL_TRACEPOINTS |
88 | select HAVE_DYNAMIC_FTRACE | 88 | select HAVE_DYNAMIC_FTRACE |
89 | select HAVE_FUNCTION_GRAPH_TRACER | 89 | select HAVE_FUNCTION_GRAPH_TRACER |
90 | select HAVE_DEFAULT_NO_SPIN_MUTEXES | 90 | select HAVE_DEFAULT_NO_SPIN_MUTEXES |
@@ -95,7 +95,6 @@ config S390 | |||
95 | select HAVE_ARCH_TRACEHOOK | 95 | select HAVE_ARCH_TRACEHOOK |
96 | select INIT_ALL_POSSIBLE | 96 | select INIT_ALL_POSSIBLE |
97 | select HAVE_PERF_COUNTERS | 97 | select HAVE_PERF_COUNTERS |
98 | select GENERIC_ATOMIC64 if !64BIT | ||
99 | 98 | ||
100 | config SCHED_OMIT_FRAME_POINTER | 99 | config SCHED_OMIT_FRAME_POINTER |
101 | bool | 100 | bool |
@@ -481,13 +480,6 @@ config CMM_IUCV | |||
481 | Select this option to enable the special message interface to | 480 | Select this option to enable the special message interface to |
482 | the cooperative memory management. | 481 | the cooperative memory management. |
483 | 482 | ||
484 | config PAGE_STATES | ||
485 | bool "Unused page notification" | ||
486 | help | ||
487 | This enables the notification of unused pages to the | ||
488 | hypervisor. The ESSA instruction is used to do the states | ||
489 | changes between a page that has content and the unused state. | ||
490 | |||
491 | config APPLDATA_BASE | 483 | config APPLDATA_BASE |
492 | bool "Linux - VM Monitor Stream, base infrastructure" | 484 | bool "Linux - VM Monitor Stream, base infrastructure" |
493 | depends on PROC_FS | 485 | depends on PROC_FS |
diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 0ff387cebf88..fc8fb20e7fc0 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile | |||
@@ -88,8 +88,7 @@ LDFLAGS_vmlinux := -e start | |||
88 | head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o | 88 | head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o |
89 | 89 | ||
90 | core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ | 90 | core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ |
91 | arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ \ | 91 | arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ |
92 | arch/s390/power/ | ||
93 | 92 | ||
94 | libs-y += arch/s390/lib/ | 93 | libs-y += arch/s390/lib/ |
95 | drivers-y += drivers/s390/ | 94 | drivers-y += drivers/s390/ |
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 4aba83b31596..2bc479ab3a66 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c | |||
@@ -250,8 +250,9 @@ static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
250 | const u8 *temp_key = key; | 250 | const u8 *temp_key = key; |
251 | u32 *flags = &tfm->crt_flags; | 251 | u32 *flags = &tfm->crt_flags; |
252 | 252 | ||
253 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) { | 253 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE)) && |
254 | *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; | 254 | (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { |
255 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
255 | return -EINVAL; | 256 | return -EINVAL; |
256 | } | 257 | } |
257 | for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) { | 258 | for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) { |
@@ -411,9 +412,9 @@ static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
411 | 412 | ||
412 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && | 413 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && |
413 | memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], | 414 | memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], |
414 | DES_KEY_SIZE))) { | 415 | DES_KEY_SIZE)) && |
415 | 416 | (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | |
416 | *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; | 417 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; |
417 | return -EINVAL; | 418 | return -EINVAL; |
418 | } | 419 | } |
419 | for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) { | 420 | for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) { |
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index e85ba348722a..f6de7826c979 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c | |||
@@ -46,12 +46,38 @@ static int sha1_init(struct shash_desc *desc) | |||
46 | return 0; | 46 | return 0; |
47 | } | 47 | } |
48 | 48 | ||
49 | static int sha1_export(struct shash_desc *desc, void *out) | ||
50 | { | ||
51 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
52 | struct sha1_state *octx = out; | ||
53 | |||
54 | octx->count = sctx->count; | ||
55 | memcpy(octx->state, sctx->state, sizeof(octx->state)); | ||
56 | memcpy(octx->buffer, sctx->buf, sizeof(octx->buffer)); | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | static int sha1_import(struct shash_desc *desc, const void *in) | ||
61 | { | ||
62 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
63 | const struct sha1_state *ictx = in; | ||
64 | |||
65 | sctx->count = ictx->count; | ||
66 | memcpy(sctx->state, ictx->state, sizeof(ictx->state)); | ||
67 | memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer)); | ||
68 | sctx->func = KIMD_SHA_1; | ||
69 | return 0; | ||
70 | } | ||
71 | |||
49 | static struct shash_alg alg = { | 72 | static struct shash_alg alg = { |
50 | .digestsize = SHA1_DIGEST_SIZE, | 73 | .digestsize = SHA1_DIGEST_SIZE, |
51 | .init = sha1_init, | 74 | .init = sha1_init, |
52 | .update = s390_sha_update, | 75 | .update = s390_sha_update, |
53 | .final = s390_sha_final, | 76 | .final = s390_sha_final, |
77 | .export = sha1_export, | ||
78 | .import = sha1_import, | ||
54 | .descsize = sizeof(struct s390_sha_ctx), | 79 | .descsize = sizeof(struct s390_sha_ctx), |
80 | .statesize = sizeof(struct sha1_state), | ||
55 | .base = { | 81 | .base = { |
56 | .cra_name = "sha1", | 82 | .cra_name = "sha1", |
57 | .cra_driver_name= "sha1-s390", | 83 | .cra_driver_name= "sha1-s390", |
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index f9fefc569632..61a7db372121 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c | |||
@@ -42,12 +42,38 @@ static int sha256_init(struct shash_desc *desc) | |||
42 | return 0; | 42 | return 0; |
43 | } | 43 | } |
44 | 44 | ||
45 | static int sha256_export(struct shash_desc *desc, void *out) | ||
46 | { | ||
47 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
48 | struct sha256_state *octx = out; | ||
49 | |||
50 | octx->count = sctx->count; | ||
51 | memcpy(octx->state, sctx->state, sizeof(octx->state)); | ||
52 | memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); | ||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static int sha256_import(struct shash_desc *desc, const void *in) | ||
57 | { | ||
58 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
59 | const struct sha256_state *ictx = in; | ||
60 | |||
61 | sctx->count = ictx->count; | ||
62 | memcpy(sctx->state, ictx->state, sizeof(ictx->state)); | ||
63 | memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); | ||
64 | sctx->func = KIMD_SHA_256; | ||
65 | return 0; | ||
66 | } | ||
67 | |||
45 | static struct shash_alg alg = { | 68 | static struct shash_alg alg = { |
46 | .digestsize = SHA256_DIGEST_SIZE, | 69 | .digestsize = SHA256_DIGEST_SIZE, |
47 | .init = sha256_init, | 70 | .init = sha256_init, |
48 | .update = s390_sha_update, | 71 | .update = s390_sha_update, |
49 | .final = s390_sha_final, | 72 | .final = s390_sha_final, |
73 | .export = sha256_export, | ||
74 | .import = sha256_import, | ||
50 | .descsize = sizeof(struct s390_sha_ctx), | 75 | .descsize = sizeof(struct s390_sha_ctx), |
76 | .statesize = sizeof(struct sha256_state), | ||
51 | .base = { | 77 | .base = { |
52 | .cra_name = "sha256", | 78 | .cra_name = "sha256", |
53 | .cra_driver_name= "sha256-s390", | 79 | .cra_driver_name= "sha256-s390", |
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c index 83192bfc8048..4bf73d0dc525 100644 --- a/arch/s390/crypto/sha512_s390.c +++ b/arch/s390/crypto/sha512_s390.c | |||
@@ -13,7 +13,10 @@ | |||
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | #include <crypto/internal/hash.h> | 15 | #include <crypto/internal/hash.h> |
16 | #include <crypto/sha.h> | ||
17 | #include <linux/errno.h> | ||
16 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | 20 | #include <linux/module.h> |
18 | 21 | ||
19 | #include "sha.h" | 22 | #include "sha.h" |
@@ -37,12 +40,42 @@ static int sha512_init(struct shash_desc *desc) | |||
37 | return 0; | 40 | return 0; |
38 | } | 41 | } |
39 | 42 | ||
43 | static int sha512_export(struct shash_desc *desc, void *out) | ||
44 | { | ||
45 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
46 | struct sha512_state *octx = out; | ||
47 | |||
48 | octx->count[0] = sctx->count; | ||
49 | octx->count[1] = 0; | ||
50 | memcpy(octx->state, sctx->state, sizeof(octx->state)); | ||
51 | memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | static int sha512_import(struct shash_desc *desc, const void *in) | ||
56 | { | ||
57 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
58 | const struct sha512_state *ictx = in; | ||
59 | |||
60 | if (unlikely(ictx->count[1])) | ||
61 | return -ERANGE; | ||
62 | sctx->count = ictx->count[0]; | ||
63 | |||
64 | memcpy(sctx->state, ictx->state, sizeof(ictx->state)); | ||
65 | memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); | ||
66 | sctx->func = KIMD_SHA_512; | ||
67 | return 0; | ||
68 | } | ||
69 | |||
40 | static struct shash_alg sha512_alg = { | 70 | static struct shash_alg sha512_alg = { |
41 | .digestsize = SHA512_DIGEST_SIZE, | 71 | .digestsize = SHA512_DIGEST_SIZE, |
42 | .init = sha512_init, | 72 | .init = sha512_init, |
43 | .update = s390_sha_update, | 73 | .update = s390_sha_update, |
44 | .final = s390_sha_final, | 74 | .final = s390_sha_final, |
75 | .export = sha512_export, | ||
76 | .import = sha512_import, | ||
45 | .descsize = sizeof(struct s390_sha_ctx), | 77 | .descsize = sizeof(struct s390_sha_ctx), |
78 | .statesize = sizeof(struct sha512_state), | ||
46 | .base = { | 79 | .base = { |
47 | .cra_name = "sha512", | 80 | .cra_name = "sha512", |
48 | .cra_driver_name= "sha512-s390", | 81 | .cra_driver_name= "sha512-s390", |
@@ -78,7 +111,10 @@ static struct shash_alg sha384_alg = { | |||
78 | .init = sha384_init, | 111 | .init = sha384_init, |
79 | .update = s390_sha_update, | 112 | .update = s390_sha_update, |
80 | .final = s390_sha_final, | 113 | .final = s390_sha_final, |
114 | .export = sha512_export, | ||
115 | .import = sha512_import, | ||
81 | .descsize = sizeof(struct s390_sha_ctx), | 116 | .descsize = sizeof(struct s390_sha_ctx), |
117 | .statesize = sizeof(struct sha512_state), | ||
82 | .base = { | 118 | .base = { |
83 | .cra_name = "sha384", | 119 | .cra_name = "sha384", |
84 | .cra_driver_name= "sha384-s390", | 120 | .cra_driver_name= "sha384-s390", |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index fcba206529f3..4e91a2573cc4 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -900,7 +900,7 @@ CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y | |||
900 | CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y | 900 | CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y |
901 | CONFIG_HAVE_DYNAMIC_FTRACE=y | 901 | CONFIG_HAVE_DYNAMIC_FTRACE=y |
902 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y | 902 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y |
903 | CONFIG_HAVE_FTRACE_SYSCALLS=y | 903 | CONFIG_HAVE_SYSCALL_TRACEPOINTS=y |
904 | CONFIG_TRACING_SUPPORT=y | 904 | CONFIG_TRACING_SUPPORT=y |
905 | CONFIG_FTRACE=y | 905 | CONFIG_FTRACE=y |
906 | # CONFIG_FUNCTION_TRACER is not set | 906 | # CONFIG_FUNCTION_TRACER is not set |
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 5a805df216bb..bd9914b89488 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c | |||
@@ -355,11 +355,7 @@ static struct dentry *hypfs_create_file(struct super_block *sb, | |||
355 | { | 355 | { |
356 | struct dentry *dentry; | 356 | struct dentry *dentry; |
357 | struct inode *inode; | 357 | struct inode *inode; |
358 | struct qstr qname; | ||
359 | 358 | ||
360 | qname.name = name; | ||
361 | qname.len = strlen(name); | ||
362 | qname.hash = full_name_hash(name, qname.len); | ||
363 | mutex_lock(&parent->d_inode->i_mutex); | 359 | mutex_lock(&parent->d_inode->i_mutex); |
364 | dentry = lookup_one_len(name, parent, strlen(name)); | 360 | dentry = lookup_one_len(name, parent, strlen(name)); |
365 | if (IS_ERR(dentry)) { | 361 | if (IS_ERR(dentry)) { |
@@ -426,7 +422,7 @@ struct dentry *hypfs_create_u64(struct super_block *sb, struct dentry *dir, | |||
426 | char tmp[TMP_SIZE]; | 422 | char tmp[TMP_SIZE]; |
427 | struct dentry *dentry; | 423 | struct dentry *dentry; |
428 | 424 | ||
429 | snprintf(tmp, TMP_SIZE, "%lld\n", (unsigned long long int)value); | 425 | snprintf(tmp, TMP_SIZE, "%llu\n", (unsigned long long int)value); |
430 | buffer = kstrdup(tmp, GFP_KERNEL); | 426 | buffer = kstrdup(tmp, GFP_KERNEL); |
431 | if (!buffer) | 427 | if (!buffer) |
432 | return ERR_PTR(-ENOMEM); | 428 | return ERR_PTR(-ENOMEM); |
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index c7d0abfb0f00..ae7c8f9f94a5 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h | |||
@@ -1,33 +1,23 @@ | |||
1 | #ifndef __ARCH_S390_ATOMIC__ | 1 | #ifndef __ARCH_S390_ATOMIC__ |
2 | #define __ARCH_S390_ATOMIC__ | 2 | #define __ARCH_S390_ATOMIC__ |
3 | 3 | ||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/types.h> | ||
6 | |||
7 | /* | 4 | /* |
8 | * include/asm-s390/atomic.h | 5 | * Copyright 1999,2009 IBM Corp. |
6 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, | ||
7 | * Denis Joseph Barrow, | ||
8 | * Arnd Bergmann <arndb@de.ibm.com>, | ||
9 | * | 9 | * |
10 | * S390 version | 10 | * Atomic operations that C can't guarantee us. |
11 | * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation | 11 | * Useful for resource counting etc. |
12 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | 12 | * s390 uses 'Compare And Swap' for atomicity in SMP enviroment. |
13 | * Denis Joseph Barrow, | ||
14 | * Arnd Bergmann (arndb@de.ibm.com) | ||
15 | * | ||
16 | * Derived from "include/asm-i386/bitops.h" | ||
17 | * Copyright (C) 1992, Linus Torvalds | ||
18 | * | 13 | * |
19 | */ | 14 | */ |
20 | 15 | ||
21 | /* | 16 | #include <linux/compiler.h> |
22 | * Atomic operations that C can't guarantee us. Useful for | 17 | #include <linux/types.h> |
23 | * resource counting etc.. | ||
24 | * S390 uses 'Compare And Swap' for atomicity in SMP enviroment | ||
25 | */ | ||
26 | 18 | ||
27 | #define ATOMIC_INIT(i) { (i) } | 19 | #define ATOMIC_INIT(i) { (i) } |
28 | 20 | ||
29 | #ifdef __KERNEL__ | ||
30 | |||
31 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 21 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
32 | 22 | ||
33 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ | 23 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ |
@@ -77,7 +67,7 @@ static inline void atomic_set(atomic_t *v, int i) | |||
77 | barrier(); | 67 | barrier(); |
78 | } | 68 | } |
79 | 69 | ||
80 | static __inline__ int atomic_add_return(int i, atomic_t * v) | 70 | static inline int atomic_add_return(int i, atomic_t *v) |
81 | { | 71 | { |
82 | return __CS_LOOP(v, i, "ar"); | 72 | return __CS_LOOP(v, i, "ar"); |
83 | } | 73 | } |
@@ -87,7 +77,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) | |||
87 | #define atomic_inc_return(_v) atomic_add_return(1, _v) | 77 | #define atomic_inc_return(_v) atomic_add_return(1, _v) |
88 | #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) | 78 | #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) |
89 | 79 | ||
90 | static __inline__ int atomic_sub_return(int i, atomic_t * v) | 80 | static inline int atomic_sub_return(int i, atomic_t *v) |
91 | { | 81 | { |
92 | return __CS_LOOP(v, i, "sr"); | 82 | return __CS_LOOP(v, i, "sr"); |
93 | } | 83 | } |
@@ -97,19 +87,19 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) | |||
97 | #define atomic_dec_return(_v) atomic_sub_return(1, _v) | 87 | #define atomic_dec_return(_v) atomic_sub_return(1, _v) |
98 | #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) | 88 | #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) |
99 | 89 | ||
100 | static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) | 90 | static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) |
101 | { | 91 | { |
102 | __CS_LOOP(v, ~mask, "nr"); | 92 | __CS_LOOP(v, ~mask, "nr"); |
103 | } | 93 | } |
104 | 94 | ||
105 | static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) | 95 | static inline void atomic_set_mask(unsigned long mask, atomic_t *v) |
106 | { | 96 | { |
107 | __CS_LOOP(v, mask, "or"); | 97 | __CS_LOOP(v, mask, "or"); |
108 | } | 98 | } |
109 | 99 | ||
110 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 100 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
111 | 101 | ||
112 | static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) | 102 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
113 | { | 103 | { |
114 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 104 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
115 | asm volatile( | 105 | asm volatile( |
@@ -127,7 +117,7 @@ static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) | |||
127 | return old; | 117 | return old; |
128 | } | 118 | } |
129 | 119 | ||
130 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | 120 | static inline int atomic_add_unless(atomic_t *v, int a, int u) |
131 | { | 121 | { |
132 | int c, old; | 122 | int c, old; |
133 | c = atomic_read(v); | 123 | c = atomic_read(v); |
@@ -146,9 +136,10 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
146 | 136 | ||
147 | #undef __CS_LOOP | 137 | #undef __CS_LOOP |
148 | 138 | ||
149 | #ifdef __s390x__ | ||
150 | #define ATOMIC64_INIT(i) { (i) } | 139 | #define ATOMIC64_INIT(i) { (i) } |
151 | 140 | ||
141 | #ifdef CONFIG_64BIT | ||
142 | |||
152 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 143 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
153 | 144 | ||
154 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ | 145 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ |
@@ -162,7 +153,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
162 | : "=&d" (old_val), "=&d" (new_val), \ | 153 | : "=&d" (old_val), "=&d" (new_val), \ |
163 | "=Q" (((atomic_t *)(ptr))->counter) \ | 154 | "=Q" (((atomic_t *)(ptr))->counter) \ |
164 | : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ | 155 | : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ |
165 | : "cc", "memory" ); \ | 156 | : "cc", "memory"); \ |
166 | new_val; \ | 157 | new_val; \ |
167 | }) | 158 | }) |
168 | 159 | ||
@@ -180,7 +171,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
180 | "=m" (((atomic_t *)(ptr))->counter) \ | 171 | "=m" (((atomic_t *)(ptr))->counter) \ |
181 | : "a" (ptr), "d" (op_val), \ | 172 | : "a" (ptr), "d" (op_val), \ |
182 | "m" (((atomic_t *)(ptr))->counter) \ | 173 | "m" (((atomic_t *)(ptr))->counter) \ |
183 | : "cc", "memory" ); \ | 174 | : "cc", "memory"); \ |
184 | new_val; \ | 175 | new_val; \ |
185 | }) | 176 | }) |
186 | 177 | ||
@@ -198,39 +189,29 @@ static inline void atomic64_set(atomic64_t *v, long long i) | |||
198 | barrier(); | 189 | barrier(); |
199 | } | 190 | } |
200 | 191 | ||
201 | static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) | 192 | static inline long long atomic64_add_return(long long i, atomic64_t *v) |
202 | { | 193 | { |
203 | return __CSG_LOOP(v, i, "agr"); | 194 | return __CSG_LOOP(v, i, "agr"); |
204 | } | 195 | } |
205 | #define atomic64_add(_i, _v) atomic64_add_return(_i, _v) | ||
206 | #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) | ||
207 | #define atomic64_inc(_v) atomic64_add_return(1, _v) | ||
208 | #define atomic64_inc_return(_v) atomic64_add_return(1, _v) | ||
209 | #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) | ||
210 | 196 | ||
211 | static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v) | 197 | static inline long long atomic64_sub_return(long long i, atomic64_t *v) |
212 | { | 198 | { |
213 | return __CSG_LOOP(v, i, "sgr"); | 199 | return __CSG_LOOP(v, i, "sgr"); |
214 | } | 200 | } |
215 | #define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) | ||
216 | #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) | ||
217 | #define atomic64_dec(_v) atomic64_sub_return(1, _v) | ||
218 | #define atomic64_dec_return(_v) atomic64_sub_return(1, _v) | ||
219 | #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) | ||
220 | 201 | ||
221 | static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) | 202 | static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) |
222 | { | 203 | { |
223 | __CSG_LOOP(v, ~mask, "ngr"); | 204 | __CSG_LOOP(v, ~mask, "ngr"); |
224 | } | 205 | } |
225 | 206 | ||
226 | static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) | 207 | static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) |
227 | { | 208 | { |
228 | __CSG_LOOP(v, mask, "ogr"); | 209 | __CSG_LOOP(v, mask, "ogr"); |
229 | } | 210 | } |
230 | 211 | ||
231 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | 212 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
232 | 213 | ||
233 | static __inline__ long long atomic64_cmpxchg(atomic64_t *v, | 214 | static inline long long atomic64_cmpxchg(atomic64_t *v, |
234 | long long old, long long new) | 215 | long long old, long long new) |
235 | { | 216 | { |
236 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 217 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
@@ -249,8 +230,112 @@ static __inline__ long long atomic64_cmpxchg(atomic64_t *v, | |||
249 | return old; | 230 | return old; |
250 | } | 231 | } |
251 | 232 | ||
252 | static __inline__ int atomic64_add_unless(atomic64_t *v, | 233 | #undef __CSG_LOOP |
253 | long long a, long long u) | 234 | |
235 | #else /* CONFIG_64BIT */ | ||
236 | |||
237 | typedef struct { | ||
238 | long long counter; | ||
239 | } atomic64_t; | ||
240 | |||
241 | static inline long long atomic64_read(const atomic64_t *v) | ||
242 | { | ||
243 | register_pair rp; | ||
244 | |||
245 | asm volatile( | ||
246 | " lm %0,%N0,0(%1)" | ||
247 | : "=&d" (rp) | ||
248 | : "a" (&v->counter), "m" (v->counter) | ||
249 | ); | ||
250 | return rp.pair; | ||
251 | } | ||
252 | |||
253 | static inline void atomic64_set(atomic64_t *v, long long i) | ||
254 | { | ||
255 | register_pair rp = {.pair = i}; | ||
256 | |||
257 | asm volatile( | ||
258 | " stm %1,%N1,0(%2)" | ||
259 | : "=m" (v->counter) | ||
260 | : "d" (rp), "a" (&v->counter) | ||
261 | ); | ||
262 | } | ||
263 | |||
264 | static inline long long atomic64_xchg(atomic64_t *v, long long new) | ||
265 | { | ||
266 | register_pair rp_new = {.pair = new}; | ||
267 | register_pair rp_old; | ||
268 | |||
269 | asm volatile( | ||
270 | " lm %0,%N0,0(%2)\n" | ||
271 | "0: cds %0,%3,0(%2)\n" | ||
272 | " jl 0b\n" | ||
273 | : "=&d" (rp_old), "+m" (v->counter) | ||
274 | : "a" (&v->counter), "d" (rp_new) | ||
275 | : "cc"); | ||
276 | return rp_old.pair; | ||
277 | } | ||
278 | |||
279 | static inline long long atomic64_cmpxchg(atomic64_t *v, | ||
280 | long long old, long long new) | ||
281 | { | ||
282 | register_pair rp_old = {.pair = old}; | ||
283 | register_pair rp_new = {.pair = new}; | ||
284 | |||
285 | asm volatile( | ||
286 | " cds %0,%3,0(%2)" | ||
287 | : "+&d" (rp_old), "+m" (v->counter) | ||
288 | : "a" (&v->counter), "d" (rp_new) | ||
289 | : "cc"); | ||
290 | return rp_old.pair; | ||
291 | } | ||
292 | |||
293 | |||
294 | static inline long long atomic64_add_return(long long i, atomic64_t *v) | ||
295 | { | ||
296 | long long old, new; | ||
297 | |||
298 | do { | ||
299 | old = atomic64_read(v); | ||
300 | new = old + i; | ||
301 | } while (atomic64_cmpxchg(v, old, new) != old); | ||
302 | return new; | ||
303 | } | ||
304 | |||
305 | static inline long long atomic64_sub_return(long long i, atomic64_t *v) | ||
306 | { | ||
307 | long long old, new; | ||
308 | |||
309 | do { | ||
310 | old = atomic64_read(v); | ||
311 | new = old - i; | ||
312 | } while (atomic64_cmpxchg(v, old, new) != old); | ||
313 | return new; | ||
314 | } | ||
315 | |||
316 | static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v) | ||
317 | { | ||
318 | long long old, new; | ||
319 | |||
320 | do { | ||
321 | old = atomic64_read(v); | ||
322 | new = old | mask; | ||
323 | } while (atomic64_cmpxchg(v, old, new) != old); | ||
324 | } | ||
325 | |||
326 | static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v) | ||
327 | { | ||
328 | long long old, new; | ||
329 | |||
330 | do { | ||
331 | old = atomic64_read(v); | ||
332 | new = old & mask; | ||
333 | } while (atomic64_cmpxchg(v, old, new) != old); | ||
334 | } | ||
335 | |||
336 | #endif /* CONFIG_64BIT */ | ||
337 | |||
338 | static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) | ||
254 | { | 339 | { |
255 | long long c, old; | 340 | long long c, old; |
256 | c = atomic64_read(v); | 341 | c = atomic64_read(v); |
@@ -265,15 +350,17 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, | |||
265 | return c != u; | 350 | return c != u; |
266 | } | 351 | } |
267 | 352 | ||
268 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | 353 | #define atomic64_add(_i, _v) atomic64_add_return(_i, _v) |
269 | 354 | #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) | |
270 | #undef __CSG_LOOP | 355 | #define atomic64_inc(_v) atomic64_add_return(1, _v) |
271 | 356 | #define atomic64_inc_return(_v) atomic64_add_return(1, _v) | |
272 | #else /* __s390x__ */ | 357 | #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) |
273 | 358 | #define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) | |
274 | #include <asm-generic/atomic64.h> | 359 | #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) |
275 | 360 | #define atomic64_dec(_v) atomic64_sub_return(1, _v) | |
276 | #endif /* __s390x__ */ | 361 | #define atomic64_dec_return(_v) atomic64_sub_return(1, _v) |
362 | #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) | ||
363 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | ||
277 | 364 | ||
278 | #define smp_mb__before_atomic_dec() smp_mb() | 365 | #define smp_mb__before_atomic_dec() smp_mb() |
279 | #define smp_mb__after_atomic_dec() smp_mb() | 366 | #define smp_mb__after_atomic_dec() smp_mb() |
@@ -281,5 +368,5 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, | |||
281 | #define smp_mb__after_atomic_inc() smp_mb() | 368 | #define smp_mb__after_atomic_inc() smp_mb() |
282 | 369 | ||
283 | #include <asm-generic/atomic-long.h> | 370 | #include <asm-generic/atomic-long.h> |
284 | #endif /* __KERNEL__ */ | 371 | |
285 | #endif /* __ARCH_S390_ATOMIC__ */ | 372 | #endif /* __ARCH_S390_ATOMIC__ */ |
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h index d5a8e7c1477c..6c00f6800a34 100644 --- a/arch/s390/include/asm/checksum.h +++ b/arch/s390/include/asm/checksum.h | |||
@@ -78,28 +78,11 @@ csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum) | |||
78 | */ | 78 | */ |
79 | static inline __sum16 csum_fold(__wsum sum) | 79 | static inline __sum16 csum_fold(__wsum sum) |
80 | { | 80 | { |
81 | #ifndef __s390x__ | 81 | u32 csum = (__force u32) sum; |
82 | register_pair rp; | ||
83 | 82 | ||
84 | asm volatile( | 83 | csum += (csum >> 16) + (csum << 16); |
85 | " slr %N1,%N1\n" /* %0 = H L */ | 84 | csum >>= 16; |
86 | " lr %1,%0\n" /* %0 = H L, %1 = H L 0 0 */ | 85 | return (__force __sum16) ~csum; |
87 | " srdl %1,16\n" /* %0 = H L, %1 = 0 H L 0 */ | ||
88 | " alr %1,%N1\n" /* %0 = H L, %1 = L H L 0 */ | ||
89 | " alr %0,%1\n" /* %0 = H+L+C L+H */ | ||
90 | " srl %0,16\n" /* %0 = H+L+C */ | ||
91 | : "+&d" (sum), "=d" (rp) : : "cc"); | ||
92 | #else /* __s390x__ */ | ||
93 | asm volatile( | ||
94 | " sr 3,3\n" /* %0 = H*65536 + L */ | ||
95 | " lr 2,%0\n" /* %0 = H L, 2/3 = H L / 0 0 */ | ||
96 | " srdl 2,16\n" /* %0 = H L, 2/3 = 0 H / L 0 */ | ||
97 | " alr 2,3\n" /* %0 = H L, 2/3 = L H / L 0 */ | ||
98 | " alr %0,2\n" /* %0 = H+L+C L+H */ | ||
99 | " srl %0,16\n" /* %0 = H+L+C */ | ||
100 | : "+&d" (sum) : : "cc", "2", "3"); | ||
101 | #endif /* __s390x__ */ | ||
102 | return (__force __sum16) ~sum; | ||
103 | } | 86 | } |
104 | 87 | ||
105 | /* | 88 | /* |
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h index 807997f7414b..4943654ed7fd 100644 --- a/arch/s390/include/asm/chsc.h +++ b/arch/s390/include/asm/chsc.h | |||
@@ -125,4 +125,32 @@ struct chsc_cpd_info { | |||
125 | #define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info) | 125 | #define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info) |
126 | #define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal) | 126 | #define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal) |
127 | 127 | ||
128 | #ifdef __KERNEL__ | ||
129 | |||
130 | struct css_general_char { | ||
131 | u64 : 12; | ||
132 | u32 dynio : 1; /* bit 12 */ | ||
133 | u32 : 28; | ||
134 | u32 aif : 1; /* bit 41 */ | ||
135 | u32 : 3; | ||
136 | u32 mcss : 1; /* bit 45 */ | ||
137 | u32 fcs : 1; /* bit 46 */ | ||
138 | u32 : 1; | ||
139 | u32 ext_mb : 1; /* bit 48 */ | ||
140 | u32 : 7; | ||
141 | u32 aif_tdd : 1; /* bit 56 */ | ||
142 | u32 : 1; | ||
143 | u32 qebsm : 1; /* bit 58 */ | ||
144 | u32 : 8; | ||
145 | u32 aif_osa : 1; /* bit 67 */ | ||
146 | u32 : 14; | ||
147 | u32 cib : 1; /* bit 82 */ | ||
148 | u32 : 5; | ||
149 | u32 fcx : 1; /* bit 88 */ | ||
150 | u32 : 7; | ||
151 | }__attribute__((packed)); | ||
152 | |||
153 | extern struct css_general_char css_general_characteristics; | ||
154 | |||
155 | #endif /* __KERNEL__ */ | ||
128 | #endif | 156 | #endif |
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index 619bf94b11f1..e85679af54dd 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h | |||
@@ -15,228 +15,7 @@ | |||
15 | #define LPM_ANYPATH 0xff | 15 | #define LPM_ANYPATH 0xff |
16 | #define __MAX_CSSID 0 | 16 | #define __MAX_CSSID 0 |
17 | 17 | ||
18 | /** | 18 | #include <asm/scsw.h> |
19 | * struct cmd_scsw - command-mode subchannel status word | ||
20 | * @key: subchannel key | ||
21 | * @sctl: suspend control | ||
22 | * @eswf: esw format | ||
23 | * @cc: deferred condition code | ||
24 | * @fmt: format | ||
25 | * @pfch: prefetch | ||
26 | * @isic: initial-status interruption control | ||
27 | * @alcc: address-limit checking control | ||
28 | * @ssi: suppress-suspended interruption | ||
29 | * @zcc: zero condition code | ||
30 | * @ectl: extended control | ||
31 | * @pno: path not operational | ||
32 | * @res: reserved | ||
33 | * @fctl: function control | ||
34 | * @actl: activity control | ||
35 | * @stctl: status control | ||
36 | * @cpa: channel program address | ||
37 | * @dstat: device status | ||
38 | * @cstat: subchannel status | ||
39 | * @count: residual count | ||
40 | */ | ||
41 | struct cmd_scsw { | ||
42 | __u32 key : 4; | ||
43 | __u32 sctl : 1; | ||
44 | __u32 eswf : 1; | ||
45 | __u32 cc : 2; | ||
46 | __u32 fmt : 1; | ||
47 | __u32 pfch : 1; | ||
48 | __u32 isic : 1; | ||
49 | __u32 alcc : 1; | ||
50 | __u32 ssi : 1; | ||
51 | __u32 zcc : 1; | ||
52 | __u32 ectl : 1; | ||
53 | __u32 pno : 1; | ||
54 | __u32 res : 1; | ||
55 | __u32 fctl : 3; | ||
56 | __u32 actl : 7; | ||
57 | __u32 stctl : 5; | ||
58 | __u32 cpa; | ||
59 | __u32 dstat : 8; | ||
60 | __u32 cstat : 8; | ||
61 | __u32 count : 16; | ||
62 | } __attribute__ ((packed)); | ||
63 | |||
64 | /** | ||
65 | * struct tm_scsw - transport-mode subchannel status word | ||
66 | * @key: subchannel key | ||
67 | * @eswf: esw format | ||
68 | * @cc: deferred condition code | ||
69 | * @fmt: format | ||
70 | * @x: IRB-format control | ||
71 | * @q: interrogate-complete | ||
72 | * @ectl: extended control | ||
73 | * @pno: path not operational | ||
74 | * @fctl: function control | ||
75 | * @actl: activity control | ||
76 | * @stctl: status control | ||
77 | * @tcw: TCW address | ||
78 | * @dstat: device status | ||
79 | * @cstat: subchannel status | ||
80 | * @fcxs: FCX status | ||
81 | * @schxs: subchannel-extended status | ||
82 | */ | ||
83 | struct tm_scsw { | ||
84 | u32 key:4; | ||
85 | u32 :1; | ||
86 | u32 eswf:1; | ||
87 | u32 cc:2; | ||
88 | u32 fmt:3; | ||
89 | u32 x:1; | ||
90 | u32 q:1; | ||
91 | u32 :1; | ||
92 | u32 ectl:1; | ||
93 | u32 pno:1; | ||
94 | u32 :1; | ||
95 | u32 fctl:3; | ||
96 | u32 actl:7; | ||
97 | u32 stctl:5; | ||
98 | u32 tcw; | ||
99 | u32 dstat:8; | ||
100 | u32 cstat:8; | ||
101 | u32 fcxs:8; | ||
102 | u32 schxs:8; | ||
103 | } __attribute__ ((packed)); | ||
104 | |||
105 | /** | ||
106 | * union scsw - subchannel status word | ||
107 | * @cmd: command-mode SCSW | ||
108 | * @tm: transport-mode SCSW | ||
109 | */ | ||
110 | union scsw { | ||
111 | struct cmd_scsw cmd; | ||
112 | struct tm_scsw tm; | ||
113 | } __attribute__ ((packed)); | ||
114 | |||
115 | int scsw_is_tm(union scsw *scsw); | ||
116 | u32 scsw_key(union scsw *scsw); | ||
117 | u32 scsw_eswf(union scsw *scsw); | ||
118 | u32 scsw_cc(union scsw *scsw); | ||
119 | u32 scsw_ectl(union scsw *scsw); | ||
120 | u32 scsw_pno(union scsw *scsw); | ||
121 | u32 scsw_fctl(union scsw *scsw); | ||
122 | u32 scsw_actl(union scsw *scsw); | ||
123 | u32 scsw_stctl(union scsw *scsw); | ||
124 | u32 scsw_dstat(union scsw *scsw); | ||
125 | u32 scsw_cstat(union scsw *scsw); | ||
126 | int scsw_is_solicited(union scsw *scsw); | ||
127 | int scsw_is_valid_key(union scsw *scsw); | ||
128 | int scsw_is_valid_eswf(union scsw *scsw); | ||
129 | int scsw_is_valid_cc(union scsw *scsw); | ||
130 | int scsw_is_valid_ectl(union scsw *scsw); | ||
131 | int scsw_is_valid_pno(union scsw *scsw); | ||
132 | int scsw_is_valid_fctl(union scsw *scsw); | ||
133 | int scsw_is_valid_actl(union scsw *scsw); | ||
134 | int scsw_is_valid_stctl(union scsw *scsw); | ||
135 | int scsw_is_valid_dstat(union scsw *scsw); | ||
136 | int scsw_is_valid_cstat(union scsw *scsw); | ||
137 | int scsw_cmd_is_valid_key(union scsw *scsw); | ||
138 | int scsw_cmd_is_valid_sctl(union scsw *scsw); | ||
139 | int scsw_cmd_is_valid_eswf(union scsw *scsw); | ||
140 | int scsw_cmd_is_valid_cc(union scsw *scsw); | ||
141 | int scsw_cmd_is_valid_fmt(union scsw *scsw); | ||
142 | int scsw_cmd_is_valid_pfch(union scsw *scsw); | ||
143 | int scsw_cmd_is_valid_isic(union scsw *scsw); | ||
144 | int scsw_cmd_is_valid_alcc(union scsw *scsw); | ||
145 | int scsw_cmd_is_valid_ssi(union scsw *scsw); | ||
146 | int scsw_cmd_is_valid_zcc(union scsw *scsw); | ||
147 | int scsw_cmd_is_valid_ectl(union scsw *scsw); | ||
148 | int scsw_cmd_is_valid_pno(union scsw *scsw); | ||
149 | int scsw_cmd_is_valid_fctl(union scsw *scsw); | ||
150 | int scsw_cmd_is_valid_actl(union scsw *scsw); | ||
151 | int scsw_cmd_is_valid_stctl(union scsw *scsw); | ||
152 | int scsw_cmd_is_valid_dstat(union scsw *scsw); | ||
153 | int scsw_cmd_is_valid_cstat(union scsw *scsw); | ||
154 | int scsw_cmd_is_solicited(union scsw *scsw); | ||
155 | int scsw_tm_is_valid_key(union scsw *scsw); | ||
156 | int scsw_tm_is_valid_eswf(union scsw *scsw); | ||
157 | int scsw_tm_is_valid_cc(union scsw *scsw); | ||
158 | int scsw_tm_is_valid_fmt(union scsw *scsw); | ||
159 | int scsw_tm_is_valid_x(union scsw *scsw); | ||
160 | int scsw_tm_is_valid_q(union scsw *scsw); | ||
161 | int scsw_tm_is_valid_ectl(union scsw *scsw); | ||
162 | int scsw_tm_is_valid_pno(union scsw *scsw); | ||
163 | int scsw_tm_is_valid_fctl(union scsw *scsw); | ||
164 | int scsw_tm_is_valid_actl(union scsw *scsw); | ||
165 | int scsw_tm_is_valid_stctl(union scsw *scsw); | ||
166 | int scsw_tm_is_valid_dstat(union scsw *scsw); | ||
167 | int scsw_tm_is_valid_cstat(union scsw *scsw); | ||
168 | int scsw_tm_is_valid_fcxs(union scsw *scsw); | ||
169 | int scsw_tm_is_valid_schxs(union scsw *scsw); | ||
170 | int scsw_tm_is_solicited(union scsw *scsw); | ||
171 | |||
172 | #define SCSW_FCTL_CLEAR_FUNC 0x1 | ||
173 | #define SCSW_FCTL_HALT_FUNC 0x2 | ||
174 | #define SCSW_FCTL_START_FUNC 0x4 | ||
175 | |||
176 | #define SCSW_ACTL_SUSPENDED 0x1 | ||
177 | #define SCSW_ACTL_DEVACT 0x2 | ||
178 | #define SCSW_ACTL_SCHACT 0x4 | ||
179 | #define SCSW_ACTL_CLEAR_PEND 0x8 | ||
180 | #define SCSW_ACTL_HALT_PEND 0x10 | ||
181 | #define SCSW_ACTL_START_PEND 0x20 | ||
182 | #define SCSW_ACTL_RESUME_PEND 0x40 | ||
183 | |||
184 | #define SCSW_STCTL_STATUS_PEND 0x1 | ||
185 | #define SCSW_STCTL_SEC_STATUS 0x2 | ||
186 | #define SCSW_STCTL_PRIM_STATUS 0x4 | ||
187 | #define SCSW_STCTL_INTER_STATUS 0x8 | ||
188 | #define SCSW_STCTL_ALERT_STATUS 0x10 | ||
189 | |||
190 | #define DEV_STAT_ATTENTION 0x80 | ||
191 | #define DEV_STAT_STAT_MOD 0x40 | ||
192 | #define DEV_STAT_CU_END 0x20 | ||
193 | #define DEV_STAT_BUSY 0x10 | ||
194 | #define DEV_STAT_CHN_END 0x08 | ||
195 | #define DEV_STAT_DEV_END 0x04 | ||
196 | #define DEV_STAT_UNIT_CHECK 0x02 | ||
197 | #define DEV_STAT_UNIT_EXCEP 0x01 | ||
198 | |||
199 | #define SCHN_STAT_PCI 0x80 | ||
200 | #define SCHN_STAT_INCORR_LEN 0x40 | ||
201 | #define SCHN_STAT_PROG_CHECK 0x20 | ||
202 | #define SCHN_STAT_PROT_CHECK 0x10 | ||
203 | #define SCHN_STAT_CHN_DATA_CHK 0x08 | ||
204 | #define SCHN_STAT_CHN_CTRL_CHK 0x04 | ||
205 | #define SCHN_STAT_INTF_CTRL_CHK 0x02 | ||
206 | #define SCHN_STAT_CHAIN_CHECK 0x01 | ||
207 | |||
208 | /* | ||
209 | * architectured values for first sense byte | ||
210 | */ | ||
211 | #define SNS0_CMD_REJECT 0x80 | ||
212 | #define SNS_CMD_REJECT SNS0_CMD_REJEC | ||
213 | #define SNS0_INTERVENTION_REQ 0x40 | ||
214 | #define SNS0_BUS_OUT_CHECK 0x20 | ||
215 | #define SNS0_EQUIPMENT_CHECK 0x10 | ||
216 | #define SNS0_DATA_CHECK 0x08 | ||
217 | #define SNS0_OVERRUN 0x04 | ||
218 | #define SNS0_INCOMPL_DOMAIN 0x01 | ||
219 | |||
220 | /* | ||
221 | * architectured values for second sense byte | ||
222 | */ | ||
223 | #define SNS1_PERM_ERR 0x80 | ||
224 | #define SNS1_INV_TRACK_FORMAT 0x40 | ||
225 | #define SNS1_EOC 0x20 | ||
226 | #define SNS1_MESSAGE_TO_OPER 0x10 | ||
227 | #define SNS1_NO_REC_FOUND 0x08 | ||
228 | #define SNS1_FILE_PROTECTED 0x04 | ||
229 | #define SNS1_WRITE_INHIBITED 0x02 | ||
230 | #define SNS1_INPRECISE_END 0x01 | ||
231 | |||
232 | /* | ||
233 | * architectured values for third sense byte | ||
234 | */ | ||
235 | #define SNS2_REQ_INH_WRITE 0x80 | ||
236 | #define SNS2_CORRECTABLE 0x40 | ||
237 | #define SNS2_FIRST_LOG_ERR 0x20 | ||
238 | #define SNS2_ENV_DATA_PRESENT 0x10 | ||
239 | #define SNS2_INPRECISE_END 0x04 | ||
240 | 19 | ||
241 | /** | 20 | /** |
242 | * struct ccw1 - channel command word | 21 | * struct ccw1 - channel command word |
diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h new file mode 100644 index 000000000000..471234b90574 --- /dev/null +++ b/arch/s390/include/asm/cpu.h | |||
@@ -0,0 +1,26 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2000,2009 | ||
3 | * Author(s): Hartmut Penner <hp@de.ibm.com>, | ||
4 | * Martin Schwidefsky <schwidefsky@de.ibm.com>, | ||
5 | * Christian Ehrhardt <ehrhardt@de.ibm.com>, | ||
6 | */ | ||
7 | |||
8 | #ifndef _ASM_S390_CPU_H | ||
9 | #define _ASM_S390_CPU_H | ||
10 | |||
11 | #define MAX_CPU_ADDRESS 255 | ||
12 | |||
13 | #ifndef __ASSEMBLY__ | ||
14 | |||
15 | #include <linux/types.h> | ||
16 | |||
17 | struct cpuid | ||
18 | { | ||
19 | unsigned int version : 8; | ||
20 | unsigned int ident : 24; | ||
21 | unsigned int machine : 16; | ||
22 | unsigned int unused : 16; | ||
23 | } __packed; | ||
24 | |||
25 | #endif /* __ASSEMBLY__ */ | ||
26 | #endif /* _ASM_S390_CPU_H */ | ||
diff --git a/arch/s390/include/asm/cpuid.h b/arch/s390/include/asm/cpuid.h deleted file mode 100644 index 07836a2e5222..000000000000 --- a/arch/s390/include/asm/cpuid.h +++ /dev/null | |||
@@ -1,25 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2000,2009 | ||
3 | * Author(s): Hartmut Penner <hp@de.ibm.com>, | ||
4 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
5 | * Christian Ehrhardt <ehrhardt@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef _ASM_S390_CPUID_H_ | ||
9 | #define _ASM_S390_CPUID_H_ | ||
10 | |||
11 | /* | ||
12 | * CPU type and hardware bug flags. Kept separately for each CPU. | ||
13 | * Members of this structure are referenced in head.S, so think twice | ||
14 | * before touching them. [mj] | ||
15 | */ | ||
16 | |||
17 | typedef struct | ||
18 | { | ||
19 | unsigned int version : 8; | ||
20 | unsigned int ident : 24; | ||
21 | unsigned int machine : 16; | ||
22 | unsigned int unused : 16; | ||
23 | } __attribute__ ((packed)) cpuid_t; | ||
24 | |||
25 | #endif /* _ASM_S390_CPUID_H_ */ | ||
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h index 31ed5686a968..18124b75a7ab 100644 --- a/arch/s390/include/asm/debug.h +++ b/arch/s390/include/asm/debug.h | |||
@@ -167,6 +167,10 @@ debug_text_event(debug_info_t* id, int level, const char* txt) | |||
167 | return debug_event_common(id,level,txt,strlen(txt)); | 167 | return debug_event_common(id,level,txt,strlen(txt)); |
168 | } | 168 | } |
169 | 169 | ||
170 | /* | ||
171 | * IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are | ||
172 | * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details! | ||
173 | */ | ||
170 | extern debug_entry_t * | 174 | extern debug_entry_t * |
171 | debug_sprintf_event(debug_info_t* id,int level,char *string,...) | 175 | debug_sprintf_event(debug_info_t* id,int level,char *string,...) |
172 | __attribute__ ((format(printf, 3, 4))); | 176 | __attribute__ ((format(printf, 3, 4))); |
@@ -206,7 +210,10 @@ debug_text_exception(debug_info_t* id, int level, const char* txt) | |||
206 | return debug_exception_common(id,level,txt,strlen(txt)); | 210 | return debug_exception_common(id,level,txt,strlen(txt)); |
207 | } | 211 | } |
208 | 212 | ||
209 | 213 | /* | |
214 | * IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are | ||
215 | * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details! | ||
216 | */ | ||
210 | extern debug_entry_t * | 217 | extern debug_entry_t * |
211 | debug_sprintf_exception(debug_info_t* id,int level,char *string,...) | 218 | debug_sprintf_exception(debug_info_t* id,int level,char *string,...) |
212 | __attribute__ ((format(printf, 3, 4))); | 219 | __attribute__ ((format(printf, 3, 4))); |
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h index 89ec7056da28..498bc3892385 100644 --- a/arch/s390/include/asm/hardirq.h +++ b/arch/s390/include/asm/hardirq.h | |||
@@ -18,13 +18,6 @@ | |||
18 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
19 | #include <asm/lowcore.h> | 19 | #include <asm/lowcore.h> |
20 | 20 | ||
21 | /* irq_cpustat_t is unused currently, but could be converted | ||
22 | * into a percpu variable instead of storing softirq_pending | ||
23 | * on the lowcore */ | ||
24 | typedef struct { | ||
25 | unsigned int __softirq_pending; | ||
26 | } irq_cpustat_t; | ||
27 | |||
28 | #define local_softirq_pending() (S390_lowcore.softirq_pending) | 21 | #define local_softirq_pending() (S390_lowcore.softirq_pending) |
29 | 22 | ||
30 | #define __ARCH_IRQ_STAT | 23 | #define __ARCH_IRQ_STAT |
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 1171e6d144a3..5e95d95450b3 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h | |||
@@ -57,6 +57,8 @@ struct ipl_block_fcp { | |||
57 | } __attribute__((packed)); | 57 | } __attribute__((packed)); |
58 | 58 | ||
59 | #define DIAG308_VMPARM_SIZE 64 | 59 | #define DIAG308_VMPARM_SIZE 64 |
60 | #define DIAG308_SCPDATA_SIZE (PAGE_SIZE - (sizeof(struct ipl_list_hdr) + \ | ||
61 | offsetof(struct ipl_block_fcp, scp_data))) | ||
60 | 62 | ||
61 | struct ipl_block_ccw { | 63 | struct ipl_block_ccw { |
62 | u8 load_parm[8]; | 64 | u8 load_parm[8]; |
@@ -91,7 +93,8 @@ extern void do_halt(void); | |||
91 | extern void do_poff(void); | 93 | extern void do_poff(void); |
92 | extern void ipl_save_parameters(void); | 94 | extern void ipl_save_parameters(void); |
93 | extern void ipl_update_parameters(void); | 95 | extern void ipl_update_parameters(void); |
94 | extern void get_ipl_vmparm(char *); | 96 | extern size_t append_ipl_vmparm(char *, size_t); |
97 | extern size_t append_ipl_scpdata(char *, size_t); | ||
95 | 98 | ||
96 | enum { | 99 | enum { |
97 | IPL_DEVNO_VALID = 1, | 100 | IPL_DEVNO_VALID = 1, |
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 1cd02f6073a0..698988f69403 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/kvm_host.h> | 18 | #include <linux/kvm_host.h> |
19 | #include <asm/debug.h> | 19 | #include <asm/debug.h> |
20 | #include <asm/cpuid.h> | 20 | #include <asm/cpu.h> |
21 | 21 | ||
22 | #define KVM_MAX_VCPUS 64 | 22 | #define KVM_MAX_VCPUS 64 |
23 | #define KVM_MEMORY_SLOTS 32 | 23 | #define KVM_MEMORY_SLOTS 32 |
@@ -217,8 +217,8 @@ struct kvm_vcpu_arch { | |||
217 | struct hrtimer ckc_timer; | 217 | struct hrtimer ckc_timer; |
218 | struct tasklet_struct tasklet; | 218 | struct tasklet_struct tasklet; |
219 | union { | 219 | union { |
220 | cpuid_t cpu_id; | 220 | struct cpuid cpu_id; |
221 | u64 stidp_data; | 221 | u64 stidp_data; |
222 | }; | 222 | }; |
223 | }; | 223 | }; |
224 | 224 | ||
diff --git a/arch/s390/include/asm/kvm_virtio.h b/arch/s390/include/asm/kvm_virtio.h index 0503936f101f..acdfdff26611 100644 --- a/arch/s390/include/asm/kvm_virtio.h +++ b/arch/s390/include/asm/kvm_virtio.h | |||
@@ -54,14 +54,4 @@ struct kvm_vqconfig { | |||
54 | * This is pagesize for historical reasons. */ | 54 | * This is pagesize for historical reasons. */ |
55 | #define KVM_S390_VIRTIO_RING_ALIGN 4096 | 55 | #define KVM_S390_VIRTIO_RING_ALIGN 4096 |
56 | 56 | ||
57 | #ifdef __KERNEL__ | ||
58 | /* early virtio console setup */ | ||
59 | #ifdef CONFIG_S390_GUEST | ||
60 | extern void s390_virtio_console_init(void); | ||
61 | #else | ||
62 | static inline void s390_virtio_console_init(void) | ||
63 | { | ||
64 | } | ||
65 | #endif /* CONFIG_VIRTIO_CONSOLE */ | ||
66 | #endif /* __KERNEL__ */ | ||
67 | #endif | 57 | #endif |
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 5046ad6b7a63..6bc9426a6fbf 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h | |||
@@ -132,7 +132,7 @@ | |||
132 | 132 | ||
133 | #ifndef __ASSEMBLY__ | 133 | #ifndef __ASSEMBLY__ |
134 | 134 | ||
135 | #include <asm/cpuid.h> | 135 | #include <asm/cpu.h> |
136 | #include <asm/ptrace.h> | 136 | #include <asm/ptrace.h> |
137 | #include <linux/types.h> | 137 | #include <linux/types.h> |
138 | 138 | ||
@@ -275,7 +275,7 @@ struct _lowcore | |||
275 | __u32 user_exec_asce; /* 0x02ac */ | 275 | __u32 user_exec_asce; /* 0x02ac */ |
276 | 276 | ||
277 | /* SMP info area */ | 277 | /* SMP info area */ |
278 | cpuid_t cpu_id; /* 0x02b0 */ | 278 | struct cpuid cpu_id; /* 0x02b0 */ |
279 | __u32 cpu_nr; /* 0x02b8 */ | 279 | __u32 cpu_nr; /* 0x02b8 */ |
280 | __u32 softirq_pending; /* 0x02bc */ | 280 | __u32 softirq_pending; /* 0x02bc */ |
281 | __u32 percpu_offset; /* 0x02c0 */ | 281 | __u32 percpu_offset; /* 0x02c0 */ |
@@ -380,7 +380,7 @@ struct _lowcore | |||
380 | __u64 user_exec_asce; /* 0x0318 */ | 380 | __u64 user_exec_asce; /* 0x0318 */ |
381 | 381 | ||
382 | /* SMP info area */ | 382 | /* SMP info area */ |
383 | cpuid_t cpu_id; /* 0x0320 */ | 383 | struct cpuid cpu_id; /* 0x0320 */ |
384 | __u32 cpu_nr; /* 0x0328 */ | 384 | __u32 cpu_nr; /* 0x0328 */ |
385 | __u32 softirq_pending; /* 0x032c */ | 385 | __u32 softirq_pending; /* 0x032c */ |
386 | __u64 percpu_offset; /* 0x0330 */ | 386 | __u64 percpu_offset; /* 0x0330 */ |
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index 3b59216e6284..03be99919d62 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define __MMU_H | 2 | #define __MMU_H |
3 | 3 | ||
4 | typedef struct { | 4 | typedef struct { |
5 | spinlock_t list_lock; | ||
5 | struct list_head crst_list; | 6 | struct list_head crst_list; |
6 | struct list_head pgtable_list; | 7 | struct list_head pgtable_list; |
7 | unsigned long asce_bits; | 8 | unsigned long asce_bits; |
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 3e3594d01f83..5e9daf5d7f22 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h | |||
@@ -125,8 +125,6 @@ page_get_storage_key(unsigned long addr) | |||
125 | return skey; | 125 | return skey; |
126 | } | 126 | } |
127 | 127 | ||
128 | #ifdef CONFIG_PAGE_STATES | ||
129 | |||
130 | struct page; | 128 | struct page; |
131 | void arch_free_page(struct page *page, int order); | 129 | void arch_free_page(struct page *page, int order); |
132 | void arch_alloc_page(struct page *page, int order); | 130 | void arch_alloc_page(struct page *page, int order); |
@@ -134,8 +132,6 @@ void arch_alloc_page(struct page *page, int order); | |||
134 | #define HAVE_ARCH_FREE_PAGE | 132 | #define HAVE_ARCH_FREE_PAGE |
135 | #define HAVE_ARCH_ALLOC_PAGE | 133 | #define HAVE_ARCH_ALLOC_PAGE |
136 | 134 | ||
137 | #endif | ||
138 | |||
139 | #endif /* !__ASSEMBLY__ */ | 135 | #endif /* !__ASSEMBLY__ */ |
140 | 136 | ||
141 | #define __PAGE_OFFSET 0x0UL | 137 | #define __PAGE_OFFSET 0x0UL |
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index b2658b9220fe..ddad5903341c 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h | |||
@@ -140,6 +140,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |||
140 | 140 | ||
141 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | 141 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
142 | { | 142 | { |
143 | spin_lock_init(&mm->context.list_lock); | ||
143 | INIT_LIST_HEAD(&mm->context.crst_list); | 144 | INIT_LIST_HEAD(&mm->context.crst_list); |
144 | INIT_LIST_HEAD(&mm->context.pgtable_list); | 145 | INIT_LIST_HEAD(&mm->context.pgtable_list); |
145 | return (pgd_t *) crst_table_alloc(mm, s390_noexec); | 146 | return (pgd_t *) crst_table_alloc(mm, s390_noexec); |
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index c139fa7b8e89..cf8eed3fa779 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -14,7 +14,7 @@ | |||
14 | #define __ASM_S390_PROCESSOR_H | 14 | #define __ASM_S390_PROCESSOR_H |
15 | 15 | ||
16 | #include <linux/linkage.h> | 16 | #include <linux/linkage.h> |
17 | #include <asm/cpuid.h> | 17 | #include <asm/cpu.h> |
18 | #include <asm/page.h> | 18 | #include <asm/page.h> |
19 | #include <asm/ptrace.h> | 19 | #include <asm/ptrace.h> |
20 | #include <asm/setup.h> | 20 | #include <asm/setup.h> |
@@ -26,7 +26,7 @@ | |||
26 | */ | 26 | */ |
27 | #define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; }) | 27 | #define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; }) |
28 | 28 | ||
29 | static inline void get_cpu_id(cpuid_t *ptr) | 29 | static inline void get_cpu_id(struct cpuid *ptr) |
30 | { | 30 | { |
31 | asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr)); | 31 | asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr)); |
32 | } | 32 | } |
diff --git a/arch/s390/include/asm/scatterlist.h b/arch/s390/include/asm/scatterlist.h index 29ec8e28c8df..35d786fe93ae 100644 --- a/arch/s390/include/asm/scatterlist.h +++ b/arch/s390/include/asm/scatterlist.h | |||
@@ -1,19 +1 @@ | |||
1 | #ifndef _ASMS390_SCATTERLIST_H | #include <asm-generic/scatterlist.h> | |
2 | #define _ASMS390_SCATTERLIST_H | ||
3 | |||
4 | struct scatterlist { | ||
5 | #ifdef CONFIG_DEBUG_SG | ||
6 | unsigned long sg_magic; | ||
7 | #endif | ||
8 | unsigned long page_link; | ||
9 | unsigned int offset; | ||
10 | unsigned int length; | ||
11 | }; | ||
12 | |||
13 | #ifdef __s390x__ | ||
14 | #define ISA_DMA_THRESHOLD (0xffffffffffffffffUL) | ||
15 | #else | ||
16 | #define ISA_DMA_THRESHOLD (0xffffffffUL) | ||
17 | #endif | ||
18 | |||
19 | #endif /* _ASMS390X_SCATTERLIST_H */ | ||
diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h new file mode 100644 index 000000000000..de389cb54d28 --- /dev/null +++ b/arch/s390/include/asm/scsw.h | |||
@@ -0,0 +1,956 @@ | |||
1 | /* | ||
2 | * Helper functions for scsw access. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008,2009 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef _ASM_S390_SCSW_H_ | ||
9 | #define _ASM_S390_SCSW_H_ | ||
10 | |||
11 | #include <linux/types.h> | ||
12 | #include <asm/chsc.h> | ||
13 | #include <asm/cio.h> | ||
14 | |||
15 | /** | ||
16 | * struct cmd_scsw - command-mode subchannel status word | ||
17 | * @key: subchannel key | ||
18 | * @sctl: suspend control | ||
19 | * @eswf: esw format | ||
20 | * @cc: deferred condition code | ||
21 | * @fmt: format | ||
22 | * @pfch: prefetch | ||
23 | * @isic: initial-status interruption control | ||
24 | * @alcc: address-limit checking control | ||
25 | * @ssi: suppress-suspended interruption | ||
26 | * @zcc: zero condition code | ||
27 | * @ectl: extended control | ||
28 | * @pno: path not operational | ||
29 | * @res: reserved | ||
30 | * @fctl: function control | ||
31 | * @actl: activity control | ||
32 | * @stctl: status control | ||
33 | * @cpa: channel program address | ||
34 | * @dstat: device status | ||
35 | * @cstat: subchannel status | ||
36 | * @count: residual count | ||
37 | */ | ||
38 | struct cmd_scsw { | ||
39 | __u32 key : 4; | ||
40 | __u32 sctl : 1; | ||
41 | __u32 eswf : 1; | ||
42 | __u32 cc : 2; | ||
43 | __u32 fmt : 1; | ||
44 | __u32 pfch : 1; | ||
45 | __u32 isic : 1; | ||
46 | __u32 alcc : 1; | ||
47 | __u32 ssi : 1; | ||
48 | __u32 zcc : 1; | ||
49 | __u32 ectl : 1; | ||
50 | __u32 pno : 1; | ||
51 | __u32 res : 1; | ||
52 | __u32 fctl : 3; | ||
53 | __u32 actl : 7; | ||
54 | __u32 stctl : 5; | ||
55 | __u32 cpa; | ||
56 | __u32 dstat : 8; | ||
57 | __u32 cstat : 8; | ||
58 | __u32 count : 16; | ||
59 | } __attribute__ ((packed)); | ||
60 | |||
61 | /** | ||
62 | * struct tm_scsw - transport-mode subchannel status word | ||
63 | * @key: subchannel key | ||
64 | * @eswf: esw format | ||
65 | * @cc: deferred condition code | ||
66 | * @fmt: format | ||
67 | * @x: IRB-format control | ||
68 | * @q: interrogate-complete | ||
69 | * @ectl: extended control | ||
70 | * @pno: path not operational | ||
71 | * @fctl: function control | ||
72 | * @actl: activity control | ||
73 | * @stctl: status control | ||
74 | * @tcw: TCW address | ||
75 | * @dstat: device status | ||
76 | * @cstat: subchannel status | ||
77 | * @fcxs: FCX status | ||
78 | * @schxs: subchannel-extended status | ||
79 | */ | ||
80 | struct tm_scsw { | ||
81 | u32 key:4; | ||
82 | u32 :1; | ||
83 | u32 eswf:1; | ||
84 | u32 cc:2; | ||
85 | u32 fmt:3; | ||
86 | u32 x:1; | ||
87 | u32 q:1; | ||
88 | u32 :1; | ||
89 | u32 ectl:1; | ||
90 | u32 pno:1; | ||
91 | u32 :1; | ||
92 | u32 fctl:3; | ||
93 | u32 actl:7; | ||
94 | u32 stctl:5; | ||
95 | u32 tcw; | ||
96 | u32 dstat:8; | ||
97 | u32 cstat:8; | ||
98 | u32 fcxs:8; | ||
99 | u32 schxs:8; | ||
100 | } __attribute__ ((packed)); | ||
101 | |||
102 | /** | ||
103 | * union scsw - subchannel status word | ||
104 | * @cmd: command-mode SCSW | ||
105 | * @tm: transport-mode SCSW | ||
106 | */ | ||
107 | union scsw { | ||
108 | struct cmd_scsw cmd; | ||
109 | struct tm_scsw tm; | ||
110 | } __attribute__ ((packed)); | ||
111 | |||
112 | #define SCSW_FCTL_CLEAR_FUNC 0x1 | ||
113 | #define SCSW_FCTL_HALT_FUNC 0x2 | ||
114 | #define SCSW_FCTL_START_FUNC 0x4 | ||
115 | |||
116 | #define SCSW_ACTL_SUSPENDED 0x1 | ||
117 | #define SCSW_ACTL_DEVACT 0x2 | ||
118 | #define SCSW_ACTL_SCHACT 0x4 | ||
119 | #define SCSW_ACTL_CLEAR_PEND 0x8 | ||
120 | #define SCSW_ACTL_HALT_PEND 0x10 | ||
121 | #define SCSW_ACTL_START_PEND 0x20 | ||
122 | #define SCSW_ACTL_RESUME_PEND 0x40 | ||
123 | |||
124 | #define SCSW_STCTL_STATUS_PEND 0x1 | ||
125 | #define SCSW_STCTL_SEC_STATUS 0x2 | ||
126 | #define SCSW_STCTL_PRIM_STATUS 0x4 | ||
127 | #define SCSW_STCTL_INTER_STATUS 0x8 | ||
128 | #define SCSW_STCTL_ALERT_STATUS 0x10 | ||
129 | |||
130 | #define DEV_STAT_ATTENTION 0x80 | ||
131 | #define DEV_STAT_STAT_MOD 0x40 | ||
132 | #define DEV_STAT_CU_END 0x20 | ||
133 | #define DEV_STAT_BUSY 0x10 | ||
134 | #define DEV_STAT_CHN_END 0x08 | ||
135 | #define DEV_STAT_DEV_END 0x04 | ||
136 | #define DEV_STAT_UNIT_CHECK 0x02 | ||
137 | #define DEV_STAT_UNIT_EXCEP 0x01 | ||
138 | |||
139 | #define SCHN_STAT_PCI 0x80 | ||
140 | #define SCHN_STAT_INCORR_LEN 0x40 | ||
141 | #define SCHN_STAT_PROG_CHECK 0x20 | ||
142 | #define SCHN_STAT_PROT_CHECK 0x10 | ||
143 | #define SCHN_STAT_CHN_DATA_CHK 0x08 | ||
144 | #define SCHN_STAT_CHN_CTRL_CHK 0x04 | ||
145 | #define SCHN_STAT_INTF_CTRL_CHK 0x02 | ||
146 | #define SCHN_STAT_CHAIN_CHECK 0x01 | ||
147 | |||
148 | /* | ||
149 | * architectured values for first sense byte | ||
150 | */ | ||
151 | #define SNS0_CMD_REJECT 0x80 | ||
152 | #define SNS_CMD_REJECT SNS0_CMD_REJEC | ||
153 | #define SNS0_INTERVENTION_REQ 0x40 | ||
154 | #define SNS0_BUS_OUT_CHECK 0x20 | ||
155 | #define SNS0_EQUIPMENT_CHECK 0x10 | ||
156 | #define SNS0_DATA_CHECK 0x08 | ||
157 | #define SNS0_OVERRUN 0x04 | ||
158 | #define SNS0_INCOMPL_DOMAIN 0x01 | ||
159 | |||
160 | /* | ||
161 | * architectured values for second sense byte | ||
162 | */ | ||
163 | #define SNS1_PERM_ERR 0x80 | ||
164 | #define SNS1_INV_TRACK_FORMAT 0x40 | ||
165 | #define SNS1_EOC 0x20 | ||
166 | #define SNS1_MESSAGE_TO_OPER 0x10 | ||
167 | #define SNS1_NO_REC_FOUND 0x08 | ||
168 | #define SNS1_FILE_PROTECTED 0x04 | ||
169 | #define SNS1_WRITE_INHIBITED 0x02 | ||
170 | #define SNS1_INPRECISE_END 0x01 | ||
171 | |||
172 | /* | ||
173 | * architectured values for third sense byte | ||
174 | */ | ||
175 | #define SNS2_REQ_INH_WRITE 0x80 | ||
176 | #define SNS2_CORRECTABLE 0x40 | ||
177 | #define SNS2_FIRST_LOG_ERR 0x20 | ||
178 | #define SNS2_ENV_DATA_PRESENT 0x10 | ||
179 | #define SNS2_INPRECISE_END 0x04 | ||
180 | |||
181 | /** | ||
182 | * scsw_is_tm - check for transport mode scsw | ||
183 | * @scsw: pointer to scsw | ||
184 | * | ||
185 | * Return non-zero if the specified scsw is a transport mode scsw, zero | ||
186 | * otherwise. | ||
187 | */ | ||
188 | static inline int scsw_is_tm(union scsw *scsw) | ||
189 | { | ||
190 | return css_general_characteristics.fcx && (scsw->tm.x == 1); | ||
191 | } | ||
192 | |||
193 | /** | ||
194 | * scsw_key - return scsw key field | ||
195 | * @scsw: pointer to scsw | ||
196 | * | ||
197 | * Return the value of the key field of the specified scsw, regardless of | ||
198 | * whether it is a transport mode or command mode scsw. | ||
199 | */ | ||
200 | static inline u32 scsw_key(union scsw *scsw) | ||
201 | { | ||
202 | if (scsw_is_tm(scsw)) | ||
203 | return scsw->tm.key; | ||
204 | else | ||
205 | return scsw->cmd.key; | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * scsw_eswf - return scsw eswf field | ||
210 | * @scsw: pointer to scsw | ||
211 | * | ||
212 | * Return the value of the eswf field of the specified scsw, regardless of | ||
213 | * whether it is a transport mode or command mode scsw. | ||
214 | */ | ||
215 | static inline u32 scsw_eswf(union scsw *scsw) | ||
216 | { | ||
217 | if (scsw_is_tm(scsw)) | ||
218 | return scsw->tm.eswf; | ||
219 | else | ||
220 | return scsw->cmd.eswf; | ||
221 | } | ||
222 | |||
223 | /** | ||
224 | * scsw_cc - return scsw cc field | ||
225 | * @scsw: pointer to scsw | ||
226 | * | ||
227 | * Return the value of the cc field of the specified scsw, regardless of | ||
228 | * whether it is a transport mode or command mode scsw. | ||
229 | */ | ||
230 | static inline u32 scsw_cc(union scsw *scsw) | ||
231 | { | ||
232 | if (scsw_is_tm(scsw)) | ||
233 | return scsw->tm.cc; | ||
234 | else | ||
235 | return scsw->cmd.cc; | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * scsw_ectl - return scsw ectl field | ||
240 | * @scsw: pointer to scsw | ||
241 | * | ||
242 | * Return the value of the ectl field of the specified scsw, regardless of | ||
243 | * whether it is a transport mode or command mode scsw. | ||
244 | */ | ||
245 | static inline u32 scsw_ectl(union scsw *scsw) | ||
246 | { | ||
247 | if (scsw_is_tm(scsw)) | ||
248 | return scsw->tm.ectl; | ||
249 | else | ||
250 | return scsw->cmd.ectl; | ||
251 | } | ||
252 | |||
253 | /** | ||
254 | * scsw_pno - return scsw pno field | ||
255 | * @scsw: pointer to scsw | ||
256 | * | ||
257 | * Return the value of the pno field of the specified scsw, regardless of | ||
258 | * whether it is a transport mode or command mode scsw. | ||
259 | */ | ||
260 | static inline u32 scsw_pno(union scsw *scsw) | ||
261 | { | ||
262 | if (scsw_is_tm(scsw)) | ||
263 | return scsw->tm.pno; | ||
264 | else | ||
265 | return scsw->cmd.pno; | ||
266 | } | ||
267 | |||
268 | /** | ||
269 | * scsw_fctl - return scsw fctl field | ||
270 | * @scsw: pointer to scsw | ||
271 | * | ||
272 | * Return the value of the fctl field of the specified scsw, regardless of | ||
273 | * whether it is a transport mode or command mode scsw. | ||
274 | */ | ||
275 | static inline u32 scsw_fctl(union scsw *scsw) | ||
276 | { | ||
277 | if (scsw_is_tm(scsw)) | ||
278 | return scsw->tm.fctl; | ||
279 | else | ||
280 | return scsw->cmd.fctl; | ||
281 | } | ||
282 | |||
283 | /** | ||
284 | * scsw_actl - return scsw actl field | ||
285 | * @scsw: pointer to scsw | ||
286 | * | ||
287 | * Return the value of the actl field of the specified scsw, regardless of | ||
288 | * whether it is a transport mode or command mode scsw. | ||
289 | */ | ||
290 | static inline u32 scsw_actl(union scsw *scsw) | ||
291 | { | ||
292 | if (scsw_is_tm(scsw)) | ||
293 | return scsw->tm.actl; | ||
294 | else | ||
295 | return scsw->cmd.actl; | ||
296 | } | ||
297 | |||
298 | /** | ||
299 | * scsw_stctl - return scsw stctl field | ||
300 | * @scsw: pointer to scsw | ||
301 | * | ||
302 | * Return the value of the stctl field of the specified scsw, regardless of | ||
303 | * whether it is a transport mode or command mode scsw. | ||
304 | */ | ||
305 | static inline u32 scsw_stctl(union scsw *scsw) | ||
306 | { | ||
307 | if (scsw_is_tm(scsw)) | ||
308 | return scsw->tm.stctl; | ||
309 | else | ||
310 | return scsw->cmd.stctl; | ||
311 | } | ||
312 | |||
313 | /** | ||
314 | * scsw_dstat - return scsw dstat field | ||
315 | * @scsw: pointer to scsw | ||
316 | * | ||
317 | * Return the value of the dstat field of the specified scsw, regardless of | ||
318 | * whether it is a transport mode or command mode scsw. | ||
319 | */ | ||
320 | static inline u32 scsw_dstat(union scsw *scsw) | ||
321 | { | ||
322 | if (scsw_is_tm(scsw)) | ||
323 | return scsw->tm.dstat; | ||
324 | else | ||
325 | return scsw->cmd.dstat; | ||
326 | } | ||
327 | |||
328 | /** | ||
329 | * scsw_cstat - return scsw cstat field | ||
330 | * @scsw: pointer to scsw | ||
331 | * | ||
332 | * Return the value of the cstat field of the specified scsw, regardless of | ||
333 | * whether it is a transport mode or command mode scsw. | ||
334 | */ | ||
335 | static inline u32 scsw_cstat(union scsw *scsw) | ||
336 | { | ||
337 | if (scsw_is_tm(scsw)) | ||
338 | return scsw->tm.cstat; | ||
339 | else | ||
340 | return scsw->cmd.cstat; | ||
341 | } | ||
342 | |||
343 | /** | ||
344 | * scsw_cmd_is_valid_key - check key field validity | ||
345 | * @scsw: pointer to scsw | ||
346 | * | ||
347 | * Return non-zero if the key field of the specified command mode scsw is | ||
348 | * valid, zero otherwise. | ||
349 | */ | ||
350 | static inline int scsw_cmd_is_valid_key(union scsw *scsw) | ||
351 | { | ||
352 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
353 | } | ||
354 | |||
355 | /** | ||
356 | * scsw_cmd_is_valid_sctl - check fctl field validity | ||
357 | * @scsw: pointer to scsw | ||
358 | * | ||
359 | * Return non-zero if the fctl field of the specified command mode scsw is | ||
360 | * valid, zero otherwise. | ||
361 | */ | ||
362 | static inline int scsw_cmd_is_valid_sctl(union scsw *scsw) | ||
363 | { | ||
364 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
365 | } | ||
366 | |||
367 | /** | ||
368 | * scsw_cmd_is_valid_eswf - check eswf field validity | ||
369 | * @scsw: pointer to scsw | ||
370 | * | ||
371 | * Return non-zero if the eswf field of the specified command mode scsw is | ||
372 | * valid, zero otherwise. | ||
373 | */ | ||
374 | static inline int scsw_cmd_is_valid_eswf(union scsw *scsw) | ||
375 | { | ||
376 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); | ||
377 | } | ||
378 | |||
379 | /** | ||
380 | * scsw_cmd_is_valid_cc - check cc field validity | ||
381 | * @scsw: pointer to scsw | ||
382 | * | ||
383 | * Return non-zero if the cc field of the specified command mode scsw is | ||
384 | * valid, zero otherwise. | ||
385 | */ | ||
386 | static inline int scsw_cmd_is_valid_cc(union scsw *scsw) | ||
387 | { | ||
388 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && | ||
389 | (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); | ||
390 | } | ||
391 | |||
392 | /** | ||
393 | * scsw_cmd_is_valid_fmt - check fmt field validity | ||
394 | * @scsw: pointer to scsw | ||
395 | * | ||
396 | * Return non-zero if the fmt field of the specified command mode scsw is | ||
397 | * valid, zero otherwise. | ||
398 | */ | ||
399 | static inline int scsw_cmd_is_valid_fmt(union scsw *scsw) | ||
400 | { | ||
401 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
402 | } | ||
403 | |||
404 | /** | ||
405 | * scsw_cmd_is_valid_pfch - check pfch field validity | ||
406 | * @scsw: pointer to scsw | ||
407 | * | ||
408 | * Return non-zero if the pfch field of the specified command mode scsw is | ||
409 | * valid, zero otherwise. | ||
410 | */ | ||
411 | static inline int scsw_cmd_is_valid_pfch(union scsw *scsw) | ||
412 | { | ||
413 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
414 | } | ||
415 | |||
416 | /** | ||
417 | * scsw_cmd_is_valid_isic - check isic field validity | ||
418 | * @scsw: pointer to scsw | ||
419 | * | ||
420 | * Return non-zero if the isic field of the specified command mode scsw is | ||
421 | * valid, zero otherwise. | ||
422 | */ | ||
423 | static inline int scsw_cmd_is_valid_isic(union scsw *scsw) | ||
424 | { | ||
425 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
426 | } | ||
427 | |||
428 | /** | ||
429 | * scsw_cmd_is_valid_alcc - check alcc field validity | ||
430 | * @scsw: pointer to scsw | ||
431 | * | ||
432 | * Return non-zero if the alcc field of the specified command mode scsw is | ||
433 | * valid, zero otherwise. | ||
434 | */ | ||
435 | static inline int scsw_cmd_is_valid_alcc(union scsw *scsw) | ||
436 | { | ||
437 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * scsw_cmd_is_valid_ssi - check ssi field validity | ||
442 | * @scsw: pointer to scsw | ||
443 | * | ||
444 | * Return non-zero if the ssi field of the specified command mode scsw is | ||
445 | * valid, zero otherwise. | ||
446 | */ | ||
447 | static inline int scsw_cmd_is_valid_ssi(union scsw *scsw) | ||
448 | { | ||
449 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
450 | } | ||
451 | |||
452 | /** | ||
453 | * scsw_cmd_is_valid_zcc - check zcc field validity | ||
454 | * @scsw: pointer to scsw | ||
455 | * | ||
456 | * Return non-zero if the zcc field of the specified command mode scsw is | ||
457 | * valid, zero otherwise. | ||
458 | */ | ||
459 | static inline int scsw_cmd_is_valid_zcc(union scsw *scsw) | ||
460 | { | ||
461 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && | ||
462 | (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS); | ||
463 | } | ||
464 | |||
465 | /** | ||
466 | * scsw_cmd_is_valid_ectl - check ectl field validity | ||
467 | * @scsw: pointer to scsw | ||
468 | * | ||
469 | * Return non-zero if the ectl field of the specified command mode scsw is | ||
470 | * valid, zero otherwise. | ||
471 | */ | ||
472 | static inline int scsw_cmd_is_valid_ectl(union scsw *scsw) | ||
473 | { | ||
474 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
475 | !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && | ||
476 | (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS); | ||
477 | } | ||
478 | |||
479 | /** | ||
480 | * scsw_cmd_is_valid_pno - check pno field validity | ||
481 | * @scsw: pointer to scsw | ||
482 | * | ||
483 | * Return non-zero if the pno field of the specified command mode scsw is | ||
484 | * valid, zero otherwise. | ||
485 | */ | ||
486 | static inline int scsw_cmd_is_valid_pno(union scsw *scsw) | ||
487 | { | ||
488 | return (scsw->cmd.fctl != 0) && | ||
489 | (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
490 | (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) || | ||
491 | ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && | ||
492 | (scsw->cmd.actl & SCSW_ACTL_SUSPENDED))); | ||
493 | } | ||
494 | |||
495 | /** | ||
496 | * scsw_cmd_is_valid_fctl - check fctl field validity | ||
497 | * @scsw: pointer to scsw | ||
498 | * | ||
499 | * Return non-zero if the fctl field of the specified command mode scsw is | ||
500 | * valid, zero otherwise. | ||
501 | */ | ||
502 | static inline int scsw_cmd_is_valid_fctl(union scsw *scsw) | ||
503 | { | ||
504 | /* Only valid if pmcw.dnv == 1*/ | ||
505 | return 1; | ||
506 | } | ||
507 | |||
508 | /** | ||
509 | * scsw_cmd_is_valid_actl - check actl field validity | ||
510 | * @scsw: pointer to scsw | ||
511 | * | ||
512 | * Return non-zero if the actl field of the specified command mode scsw is | ||
513 | * valid, zero otherwise. | ||
514 | */ | ||
515 | static inline int scsw_cmd_is_valid_actl(union scsw *scsw) | ||
516 | { | ||
517 | /* Only valid if pmcw.dnv == 1*/ | ||
518 | return 1; | ||
519 | } | ||
520 | |||
521 | /** | ||
522 | * scsw_cmd_is_valid_stctl - check stctl field validity | ||
523 | * @scsw: pointer to scsw | ||
524 | * | ||
525 | * Return non-zero if the stctl field of the specified command mode scsw is | ||
526 | * valid, zero otherwise. | ||
527 | */ | ||
528 | static inline int scsw_cmd_is_valid_stctl(union scsw *scsw) | ||
529 | { | ||
530 | /* Only valid if pmcw.dnv == 1*/ | ||
531 | return 1; | ||
532 | } | ||
533 | |||
534 | /** | ||
535 | * scsw_cmd_is_valid_dstat - check dstat field validity | ||
536 | * @scsw: pointer to scsw | ||
537 | * | ||
538 | * Return non-zero if the dstat field of the specified command mode scsw is | ||
539 | * valid, zero otherwise. | ||
540 | */ | ||
541 | static inline int scsw_cmd_is_valid_dstat(union scsw *scsw) | ||
542 | { | ||
543 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
544 | (scsw->cmd.cc != 3); | ||
545 | } | ||
546 | |||
547 | /** | ||
548 | * scsw_cmd_is_valid_cstat - check cstat field validity | ||
549 | * @scsw: pointer to scsw | ||
550 | * | ||
551 | * Return non-zero if the cstat field of the specified command mode scsw is | ||
552 | * valid, zero otherwise. | ||
553 | */ | ||
554 | static inline int scsw_cmd_is_valid_cstat(union scsw *scsw) | ||
555 | { | ||
556 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
557 | (scsw->cmd.cc != 3); | ||
558 | } | ||
559 | |||
560 | /** | ||
561 | * scsw_tm_is_valid_key - check key field validity | ||
562 | * @scsw: pointer to scsw | ||
563 | * | ||
564 | * Return non-zero if the key field of the specified transport mode scsw is | ||
565 | * valid, zero otherwise. | ||
566 | */ | ||
567 | static inline int scsw_tm_is_valid_key(union scsw *scsw) | ||
568 | { | ||
569 | return (scsw->tm.fctl & SCSW_FCTL_START_FUNC); | ||
570 | } | ||
571 | |||
572 | /** | ||
573 | * scsw_tm_is_valid_eswf - check eswf field validity | ||
574 | * @scsw: pointer to scsw | ||
575 | * | ||
576 | * Return non-zero if the eswf field of the specified transport mode scsw is | ||
577 | * valid, zero otherwise. | ||
578 | */ | ||
579 | static inline int scsw_tm_is_valid_eswf(union scsw *scsw) | ||
580 | { | ||
581 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); | ||
582 | } | ||
583 | |||
584 | /** | ||
585 | * scsw_tm_is_valid_cc - check cc field validity | ||
586 | * @scsw: pointer to scsw | ||
587 | * | ||
588 | * Return non-zero if the cc field of the specified transport mode scsw is | ||
589 | * valid, zero otherwise. | ||
590 | */ | ||
591 | static inline int scsw_tm_is_valid_cc(union scsw *scsw) | ||
592 | { | ||
593 | return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) && | ||
594 | (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); | ||
595 | } | ||
596 | |||
597 | /** | ||
598 | * scsw_tm_is_valid_fmt - check fmt field validity | ||
599 | * @scsw: pointer to scsw | ||
600 | * | ||
601 | * Return non-zero if the fmt field of the specified transport mode scsw is | ||
602 | * valid, zero otherwise. | ||
603 | */ | ||
604 | static inline int scsw_tm_is_valid_fmt(union scsw *scsw) | ||
605 | { | ||
606 | return 1; | ||
607 | } | ||
608 | |||
609 | /** | ||
610 | * scsw_tm_is_valid_x - check x field validity | ||
611 | * @scsw: pointer to scsw | ||
612 | * | ||
613 | * Return non-zero if the x field of the specified transport mode scsw is | ||
614 | * valid, zero otherwise. | ||
615 | */ | ||
616 | static inline int scsw_tm_is_valid_x(union scsw *scsw) | ||
617 | { | ||
618 | return 1; | ||
619 | } | ||
620 | |||
621 | /** | ||
622 | * scsw_tm_is_valid_q - check q field validity | ||
623 | * @scsw: pointer to scsw | ||
624 | * | ||
625 | * Return non-zero if the q field of the specified transport mode scsw is | ||
626 | * valid, zero otherwise. | ||
627 | */ | ||
628 | static inline int scsw_tm_is_valid_q(union scsw *scsw) | ||
629 | { | ||
630 | return 1; | ||
631 | } | ||
632 | |||
633 | /** | ||
634 | * scsw_tm_is_valid_ectl - check ectl field validity | ||
635 | * @scsw: pointer to scsw | ||
636 | * | ||
637 | * Return non-zero if the ectl field of the specified transport mode scsw is | ||
638 | * valid, zero otherwise. | ||
639 | */ | ||
640 | static inline int scsw_tm_is_valid_ectl(union scsw *scsw) | ||
641 | { | ||
642 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
643 | !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && | ||
644 | (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS); | ||
645 | } | ||
646 | |||
647 | /** | ||
648 | * scsw_tm_is_valid_pno - check pno field validity | ||
649 | * @scsw: pointer to scsw | ||
650 | * | ||
651 | * Return non-zero if the pno field of the specified transport mode scsw is | ||
652 | * valid, zero otherwise. | ||
653 | */ | ||
654 | static inline int scsw_tm_is_valid_pno(union scsw *scsw) | ||
655 | { | ||
656 | return (scsw->tm.fctl != 0) && | ||
657 | (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
658 | (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) || | ||
659 | ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && | ||
660 | (scsw->tm.actl & SCSW_ACTL_SUSPENDED))); | ||
661 | } | ||
662 | |||
663 | /** | ||
664 | * scsw_tm_is_valid_fctl - check fctl field validity | ||
665 | * @scsw: pointer to scsw | ||
666 | * | ||
667 | * Return non-zero if the fctl field of the specified transport mode scsw is | ||
668 | * valid, zero otherwise. | ||
669 | */ | ||
670 | static inline int scsw_tm_is_valid_fctl(union scsw *scsw) | ||
671 | { | ||
672 | /* Only valid if pmcw.dnv == 1*/ | ||
673 | return 1; | ||
674 | } | ||
675 | |||
676 | /** | ||
677 | * scsw_tm_is_valid_actl - check actl field validity | ||
678 | * @scsw: pointer to scsw | ||
679 | * | ||
680 | * Return non-zero if the actl field of the specified transport mode scsw is | ||
681 | * valid, zero otherwise. | ||
682 | */ | ||
683 | static inline int scsw_tm_is_valid_actl(union scsw *scsw) | ||
684 | { | ||
685 | /* Only valid if pmcw.dnv == 1*/ | ||
686 | return 1; | ||
687 | } | ||
688 | |||
689 | /** | ||
690 | * scsw_tm_is_valid_stctl - check stctl field validity | ||
691 | * @scsw: pointer to scsw | ||
692 | * | ||
693 | * Return non-zero if the stctl field of the specified transport mode scsw is | ||
694 | * valid, zero otherwise. | ||
695 | */ | ||
696 | static inline int scsw_tm_is_valid_stctl(union scsw *scsw) | ||
697 | { | ||
698 | /* Only valid if pmcw.dnv == 1*/ | ||
699 | return 1; | ||
700 | } | ||
701 | |||
702 | /** | ||
703 | * scsw_tm_is_valid_dstat - check dstat field validity | ||
704 | * @scsw: pointer to scsw | ||
705 | * | ||
706 | * Return non-zero if the dstat field of the specified transport mode scsw is | ||
707 | * valid, zero otherwise. | ||
708 | */ | ||
709 | static inline int scsw_tm_is_valid_dstat(union scsw *scsw) | ||
710 | { | ||
711 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
712 | (scsw->tm.cc != 3); | ||
713 | } | ||
714 | |||
715 | /** | ||
716 | * scsw_tm_is_valid_cstat - check cstat field validity | ||
717 | * @scsw: pointer to scsw | ||
718 | * | ||
719 | * Return non-zero if the cstat field of the specified transport mode scsw is | ||
720 | * valid, zero otherwise. | ||
721 | */ | ||
722 | static inline int scsw_tm_is_valid_cstat(union scsw *scsw) | ||
723 | { | ||
724 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
725 | (scsw->tm.cc != 3); | ||
726 | } | ||
727 | |||
728 | /** | ||
729 | * scsw_tm_is_valid_fcxs - check fcxs field validity | ||
730 | * @scsw: pointer to scsw | ||
731 | * | ||
732 | * Return non-zero if the fcxs field of the specified transport mode scsw is | ||
733 | * valid, zero otherwise. | ||
734 | */ | ||
735 | static inline int scsw_tm_is_valid_fcxs(union scsw *scsw) | ||
736 | { | ||
737 | return 1; | ||
738 | } | ||
739 | |||
740 | /** | ||
741 | * scsw_tm_is_valid_schxs - check schxs field validity | ||
742 | * @scsw: pointer to scsw | ||
743 | * | ||
744 | * Return non-zero if the schxs field of the specified transport mode scsw is | ||
745 | * valid, zero otherwise. | ||
746 | */ | ||
747 | static inline int scsw_tm_is_valid_schxs(union scsw *scsw) | ||
748 | { | ||
749 | return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK | | ||
750 | SCHN_STAT_INTF_CTRL_CHK | | ||
751 | SCHN_STAT_PROT_CHECK | | ||
752 | SCHN_STAT_CHN_DATA_CHK)); | ||
753 | } | ||
754 | |||
755 | /** | ||
756 | * scsw_is_valid_actl - check actl field validity | ||
757 | * @scsw: pointer to scsw | ||
758 | * | ||
759 | * Return non-zero if the actl field of the specified scsw is valid, | ||
760 | * regardless of whether it is a transport mode or command mode scsw. | ||
761 | * Return zero if the field does not contain a valid value. | ||
762 | */ | ||
763 | static inline int scsw_is_valid_actl(union scsw *scsw) | ||
764 | { | ||
765 | if (scsw_is_tm(scsw)) | ||
766 | return scsw_tm_is_valid_actl(scsw); | ||
767 | else | ||
768 | return scsw_cmd_is_valid_actl(scsw); | ||
769 | } | ||
770 | |||
771 | /** | ||
772 | * scsw_is_valid_cc - check cc field validity | ||
773 | * @scsw: pointer to scsw | ||
774 | * | ||
775 | * Return non-zero if the cc field of the specified scsw is valid, | ||
776 | * regardless of whether it is a transport mode or command mode scsw. | ||
777 | * Return zero if the field does not contain a valid value. | ||
778 | */ | ||
779 | static inline int scsw_is_valid_cc(union scsw *scsw) | ||
780 | { | ||
781 | if (scsw_is_tm(scsw)) | ||
782 | return scsw_tm_is_valid_cc(scsw); | ||
783 | else | ||
784 | return scsw_cmd_is_valid_cc(scsw); | ||
785 | } | ||
786 | |||
787 | /** | ||
788 | * scsw_is_valid_cstat - check cstat field validity | ||
789 | * @scsw: pointer to scsw | ||
790 | * | ||
791 | * Return non-zero if the cstat field of the specified scsw is valid, | ||
792 | * regardless of whether it is a transport mode or command mode scsw. | ||
793 | * Return zero if the field does not contain a valid value. | ||
794 | */ | ||
795 | static inline int scsw_is_valid_cstat(union scsw *scsw) | ||
796 | { | ||
797 | if (scsw_is_tm(scsw)) | ||
798 | return scsw_tm_is_valid_cstat(scsw); | ||
799 | else | ||
800 | return scsw_cmd_is_valid_cstat(scsw); | ||
801 | } | ||
802 | |||
803 | /** | ||
804 | * scsw_is_valid_dstat - check dstat field validity | ||
805 | * @scsw: pointer to scsw | ||
806 | * | ||
807 | * Return non-zero if the dstat field of the specified scsw is valid, | ||
808 | * regardless of whether it is a transport mode or command mode scsw. | ||
809 | * Return zero if the field does not contain a valid value. | ||
810 | */ | ||
811 | static inline int scsw_is_valid_dstat(union scsw *scsw) | ||
812 | { | ||
813 | if (scsw_is_tm(scsw)) | ||
814 | return scsw_tm_is_valid_dstat(scsw); | ||
815 | else | ||
816 | return scsw_cmd_is_valid_dstat(scsw); | ||
817 | } | ||
818 | |||
819 | /** | ||
820 | * scsw_is_valid_ectl - check ectl field validity | ||
821 | * @scsw: pointer to scsw | ||
822 | * | ||
823 | * Return non-zero if the ectl field of the specified scsw is valid, | ||
824 | * regardless of whether it is a transport mode or command mode scsw. | ||
825 | * Return zero if the field does not contain a valid value. | ||
826 | */ | ||
827 | static inline int scsw_is_valid_ectl(union scsw *scsw) | ||
828 | { | ||
829 | if (scsw_is_tm(scsw)) | ||
830 | return scsw_tm_is_valid_ectl(scsw); | ||
831 | else | ||
832 | return scsw_cmd_is_valid_ectl(scsw); | ||
833 | } | ||
834 | |||
835 | /** | ||
836 | * scsw_is_valid_eswf - check eswf field validity | ||
837 | * @scsw: pointer to scsw | ||
838 | * | ||
839 | * Return non-zero if the eswf field of the specified scsw is valid, | ||
840 | * regardless of whether it is a transport mode or command mode scsw. | ||
841 | * Return zero if the field does not contain a valid value. | ||
842 | */ | ||
843 | static inline int scsw_is_valid_eswf(union scsw *scsw) | ||
844 | { | ||
845 | if (scsw_is_tm(scsw)) | ||
846 | return scsw_tm_is_valid_eswf(scsw); | ||
847 | else | ||
848 | return scsw_cmd_is_valid_eswf(scsw); | ||
849 | } | ||
850 | |||
851 | /** | ||
852 | * scsw_is_valid_fctl - check fctl field validity | ||
853 | * @scsw: pointer to scsw | ||
854 | * | ||
855 | * Return non-zero if the fctl field of the specified scsw is valid, | ||
856 | * regardless of whether it is a transport mode or command mode scsw. | ||
857 | * Return zero if the field does not contain a valid value. | ||
858 | */ | ||
859 | static inline int scsw_is_valid_fctl(union scsw *scsw) | ||
860 | { | ||
861 | if (scsw_is_tm(scsw)) | ||
862 | return scsw_tm_is_valid_fctl(scsw); | ||
863 | else | ||
864 | return scsw_cmd_is_valid_fctl(scsw); | ||
865 | } | ||
866 | |||
867 | /** | ||
868 | * scsw_is_valid_key - check key field validity | ||
869 | * @scsw: pointer to scsw | ||
870 | * | ||
871 | * Return non-zero if the key field of the specified scsw is valid, | ||
872 | * regardless of whether it is a transport mode or command mode scsw. | ||
873 | * Return zero if the field does not contain a valid value. | ||
874 | */ | ||
875 | static inline int scsw_is_valid_key(union scsw *scsw) | ||
876 | { | ||
877 | if (scsw_is_tm(scsw)) | ||
878 | return scsw_tm_is_valid_key(scsw); | ||
879 | else | ||
880 | return scsw_cmd_is_valid_key(scsw); | ||
881 | } | ||
882 | |||
883 | /** | ||
884 | * scsw_is_valid_pno - check pno field validity | ||
885 | * @scsw: pointer to scsw | ||
886 | * | ||
887 | * Return non-zero if the pno field of the specified scsw is valid, | ||
888 | * regardless of whether it is a transport mode or command mode scsw. | ||
889 | * Return zero if the field does not contain a valid value. | ||
890 | */ | ||
891 | static inline int scsw_is_valid_pno(union scsw *scsw) | ||
892 | { | ||
893 | if (scsw_is_tm(scsw)) | ||
894 | return scsw_tm_is_valid_pno(scsw); | ||
895 | else | ||
896 | return scsw_cmd_is_valid_pno(scsw); | ||
897 | } | ||
898 | |||
899 | /** | ||
900 | * scsw_is_valid_stctl - check stctl field validity | ||
901 | * @scsw: pointer to scsw | ||
902 | * | ||
903 | * Return non-zero if the stctl field of the specified scsw is valid, | ||
904 | * regardless of whether it is a transport mode or command mode scsw. | ||
905 | * Return zero if the field does not contain a valid value. | ||
906 | */ | ||
907 | static inline int scsw_is_valid_stctl(union scsw *scsw) | ||
908 | { | ||
909 | if (scsw_is_tm(scsw)) | ||
910 | return scsw_tm_is_valid_stctl(scsw); | ||
911 | else | ||
912 | return scsw_cmd_is_valid_stctl(scsw); | ||
913 | } | ||
914 | |||
915 | /** | ||
916 | * scsw_cmd_is_solicited - check for solicited scsw | ||
917 | * @scsw: pointer to scsw | ||
918 | * | ||
919 | * Return non-zero if the command mode scsw indicates that the associated | ||
920 | * status condition is solicited, zero if it is unsolicited. | ||
921 | */ | ||
922 | static inline int scsw_cmd_is_solicited(union scsw *scsw) | ||
923 | { | ||
924 | return (scsw->cmd.cc != 0) || (scsw->cmd.stctl != | ||
925 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); | ||
926 | } | ||
927 | |||
928 | /** | ||
929 | * scsw_tm_is_solicited - check for solicited scsw | ||
930 | * @scsw: pointer to scsw | ||
931 | * | ||
932 | * Return non-zero if the transport mode scsw indicates that the associated | ||
933 | * status condition is solicited, zero if it is unsolicited. | ||
934 | */ | ||
935 | static inline int scsw_tm_is_solicited(union scsw *scsw) | ||
936 | { | ||
937 | return (scsw->tm.cc != 0) || (scsw->tm.stctl != | ||
938 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); | ||
939 | } | ||
940 | |||
941 | /** | ||
942 | * scsw_is_solicited - check for solicited scsw | ||
943 | * @scsw: pointer to scsw | ||
944 | * | ||
945 | * Return non-zero if the transport or command mode scsw indicates that the | ||
946 | * associated status condition is solicited, zero if it is unsolicited. | ||
947 | */ | ||
948 | static inline int scsw_is_solicited(union scsw *scsw) | ||
949 | { | ||
950 | if (scsw_is_tm(scsw)) | ||
951 | return scsw_tm_is_solicited(scsw); | ||
952 | else | ||
953 | return scsw_cmd_is_solicited(scsw); | ||
954 | } | ||
955 | |||
956 | #endif /* _ASM_S390_SCSW_H_ */ | ||
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 38b0fc221ed7..e37478e87286 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #ifndef _ASM_S390_SETUP_H | 8 | #ifndef _ASM_S390_SETUP_H |
9 | #define _ASM_S390_SETUP_H | 9 | #define _ASM_S390_SETUP_H |
10 | 10 | ||
11 | #define COMMAND_LINE_SIZE 1024 | 11 | #define COMMAND_LINE_SIZE 4096 |
12 | 12 | ||
13 | #define ARCH_COMMAND_LINE_SIZE 896 | 13 | #define ARCH_COMMAND_LINE_SIZE 896 |
14 | 14 | ||
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index 72137bc907ac..c991fe6473c9 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h | |||
@@ -51,32 +51,7 @@ extern void machine_power_off_smp(void); | |||
51 | #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */ | 51 | #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */ |
52 | 52 | ||
53 | #define raw_smp_processor_id() (S390_lowcore.cpu_nr) | 53 | #define raw_smp_processor_id() (S390_lowcore.cpu_nr) |
54 | 54 | #define cpu_logical_map(cpu) (cpu) | |
55 | /* | ||
56 | * returns 1 if cpu is in stopped/check stopped state or not operational | ||
57 | * returns 0 otherwise | ||
58 | */ | ||
59 | static inline int | ||
60 | smp_cpu_not_running(int cpu) | ||
61 | { | ||
62 | __u32 status; | ||
63 | |||
64 | switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { | ||
65 | case sigp_order_code_accepted: | ||
66 | case sigp_status_stored: | ||
67 | /* Check for stopped and check stop state */ | ||
68 | if (status & 0x50) | ||
69 | return 1; | ||
70 | break; | ||
71 | case sigp_not_operational: | ||
72 | return 1; | ||
73 | default: | ||
74 | break; | ||
75 | } | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | #define cpu_logical_map(cpu) (cpu) | ||
80 | 55 | ||
81 | extern int __cpu_disable (void); | 56 | extern int __cpu_disable (void); |
82 | extern void __cpu_die (unsigned int cpu); | 57 | extern void __cpu_die (unsigned int cpu); |
@@ -91,11 +66,6 @@ extern void arch_send_call_function_ipi(cpumask_t mask); | |||
91 | 66 | ||
92 | #endif | 67 | #endif |
93 | 68 | ||
94 | #ifndef CONFIG_SMP | ||
95 | #define hard_smp_processor_id() 0 | ||
96 | #define smp_cpu_not_running(cpu) 1 | ||
97 | #endif | ||
98 | |||
99 | #ifdef CONFIG_HOTPLUG_CPU | 69 | #ifdef CONFIG_HOTPLUG_CPU |
100 | extern int smp_rescan_cpus(void); | 70 | extern int smp_rescan_cpus(void); |
101 | #else | 71 | #else |
diff --git a/arch/s390/include/asm/socket.h b/arch/s390/include/asm/socket.h index 02330c50241b..e42df89a0b85 100644 --- a/arch/s390/include/asm/socket.h +++ b/arch/s390/include/asm/socket.h | |||
@@ -65,4 +65,7 @@ | |||
65 | #define SO_TIMESTAMPING 37 | 65 | #define SO_TIMESTAMPING 37 |
66 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | 66 | #define SCM_TIMESTAMPING SO_TIMESTAMPING |
67 | 67 | ||
68 | #define SO_PROTOCOL 38 | ||
69 | #define SO_DOMAIN 39 | ||
70 | |||
68 | #endif /* _ASM_SOCKET_H */ | 71 | #endif /* _ASM_SOCKET_H */ |
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index c9af0d19c7ab..41ce6861174e 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
@@ -191,4 +191,33 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
191 | #define _raw_read_relax(lock) cpu_relax() | 191 | #define _raw_read_relax(lock) cpu_relax() |
192 | #define _raw_write_relax(lock) cpu_relax() | 192 | #define _raw_write_relax(lock) cpu_relax() |
193 | 193 | ||
194 | #define __always_inline__spin_lock | ||
195 | #define __always_inline__read_lock | ||
196 | #define __always_inline__write_lock | ||
197 | #define __always_inline__spin_lock_bh | ||
198 | #define __always_inline__read_lock_bh | ||
199 | #define __always_inline__write_lock_bh | ||
200 | #define __always_inline__spin_lock_irq | ||
201 | #define __always_inline__read_lock_irq | ||
202 | #define __always_inline__write_lock_irq | ||
203 | #define __always_inline__spin_lock_irqsave | ||
204 | #define __always_inline__read_lock_irqsave | ||
205 | #define __always_inline__write_lock_irqsave | ||
206 | #define __always_inline__spin_trylock | ||
207 | #define __always_inline__read_trylock | ||
208 | #define __always_inline__write_trylock | ||
209 | #define __always_inline__spin_trylock_bh | ||
210 | #define __always_inline__spin_unlock | ||
211 | #define __always_inline__read_unlock | ||
212 | #define __always_inline__write_unlock | ||
213 | #define __always_inline__spin_unlock_bh | ||
214 | #define __always_inline__read_unlock_bh | ||
215 | #define __always_inline__write_unlock_bh | ||
216 | #define __always_inline__spin_unlock_irq | ||
217 | #define __always_inline__read_unlock_irq | ||
218 | #define __always_inline__write_unlock_irq | ||
219 | #define __always_inline__spin_unlock_irqrestore | ||
220 | #define __always_inline__read_unlock_irqrestore | ||
221 | #define __always_inline__write_unlock_irqrestore | ||
222 | |||
194 | #endif /* __ASM_SPINLOCK_H */ | 223 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 4fb83c1cdb77..379661d2f81a 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h | |||
@@ -109,11 +109,7 @@ extern void pfault_fini(void); | |||
109 | #define pfault_fini() do { } while (0) | 109 | #define pfault_fini() do { } while (0) |
110 | #endif /* CONFIG_PFAULT */ | 110 | #endif /* CONFIG_PFAULT */ |
111 | 111 | ||
112 | #ifdef CONFIG_PAGE_STATES | ||
113 | extern void cmma_init(void); | 112 | extern void cmma_init(void); |
114 | #else | ||
115 | static inline void cmma_init(void) { } | ||
116 | #endif | ||
117 | 113 | ||
118 | #define finish_arch_switch(prev) do { \ | 114 | #define finish_arch_switch(prev) do { \ |
119 | set_fs(current->thread.mm_segment); \ | 115 | set_fs(current->thread.mm_segment); \ |
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index ba1cab9fc1f9..07eb61b2fb3a 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h | |||
@@ -92,7 +92,7 @@ static inline struct thread_info *current_thread_info(void) | |||
92 | #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ | 92 | #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ |
93 | #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ | 93 | #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ |
94 | #define TIF_SECCOMP 10 /* secure computing */ | 94 | #define TIF_SECCOMP 10 /* secure computing */ |
95 | #define TIF_SYSCALL_FTRACE 11 /* ftrace syscall instrumentation */ | 95 | #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ |
96 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ | 96 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ |
97 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling | 97 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling |
98 | TIF_NEED_RESCHED */ | 98 | TIF_NEED_RESCHED */ |
@@ -111,7 +111,7 @@ static inline struct thread_info *current_thread_info(void) | |||
111 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 111 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
112 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | 112 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) |
113 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | 113 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) |
114 | #define _TIF_SYSCALL_FTRACE (1<<TIF_SYSCALL_FTRACE) | 114 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) |
115 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) | 115 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) |
116 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 116 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
117 | #define _TIF_31BIT (1<<TIF_31BIT) | 117 | #define _TIF_31BIT (1<<TIF_31BIT) |
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index cc21e3e20fd7..24aa1cda20ad 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h | |||
@@ -90,4 +90,18 @@ unsigned long long monotonic_clock(void); | |||
90 | 90 | ||
91 | extern u64 sched_clock_base_cc; | 91 | extern u64 sched_clock_base_cc; |
92 | 92 | ||
93 | /** | ||
94 | * get_clock_monotonic - returns current time in clock rate units | ||
95 | * | ||
96 | * The caller must ensure that preemption is disabled. | ||
97 | * The clock and sched_clock_base get changed via stop_machine. | ||
98 | * Therefore preemption must be disabled when calling this | ||
99 | * function, otherwise the returned value is not guaranteed to | ||
100 | * be monotonic. | ||
101 | */ | ||
102 | static inline unsigned long long get_clock_monotonic(void) | ||
103 | { | ||
104 | return get_clock_xt() - sched_clock_base_cc; | ||
105 | } | ||
106 | |||
93 | #endif | 107 | #endif |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index c75ed43b1a18..c7be8e10b87e 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -32,7 +32,7 @@ extra-y += head.o init_task.o vmlinux.lds | |||
32 | 32 | ||
33 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o | 33 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o |
34 | obj-$(CONFIG_SMP) += smp.o topology.o | 34 | obj-$(CONFIG_SMP) += smp.o topology.o |
35 | 35 | obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o | |
36 | obj-$(CONFIG_AUDIT) += audit.o | 36 | obj-$(CONFIG_AUDIT) += audit.o |
37 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o | 37 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o |
38 | obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ | 38 | obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ |
@@ -41,7 +41,7 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ | |||
41 | 41 | ||
42 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 42 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
43 | obj-$(CONFIG_KPROBES) += kprobes.o | 43 | obj-$(CONFIG_KPROBES) += kprobes.o |
44 | obj-$(CONFIG_FUNCTION_TRACER) += mcount.o | 44 | obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o) |
45 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 45 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
46 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 46 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
47 | 47 | ||
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index cae14c499511..bf8b4ae7ff2d 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Heiko Carstens <heiko.carstens@de.ibm.com> | 6 | * Heiko Carstens <heiko.carstens@de.ibm.com> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "setup" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
10 | #include <linux/init.h> | 13 | #include <linux/init.h> |
11 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
@@ -16,6 +19,7 @@ | |||
16 | #include <linux/module.h> | 19 | #include <linux/module.h> |
17 | #include <linux/pfn.h> | 20 | #include <linux/pfn.h> |
18 | #include <linux/uaccess.h> | 21 | #include <linux/uaccess.h> |
22 | #include <linux/kernel.h> | ||
19 | #include <asm/ebcdic.h> | 23 | #include <asm/ebcdic.h> |
20 | #include <asm/ipl.h> | 24 | #include <asm/ipl.h> |
21 | #include <asm/lowcore.h> | 25 | #include <asm/lowcore.h> |
@@ -35,8 +39,6 @@ | |||
35 | 39 | ||
36 | char kernel_nss_name[NSS_NAME_SIZE + 1]; | 40 | char kernel_nss_name[NSS_NAME_SIZE + 1]; |
37 | 41 | ||
38 | static unsigned long machine_flags; | ||
39 | |||
40 | static void __init setup_boot_command_line(void); | 42 | static void __init setup_boot_command_line(void); |
41 | 43 | ||
42 | /* | 44 | /* |
@@ -81,6 +83,8 @@ asm( | |||
81 | " br 14\n" | 83 | " br 14\n" |
82 | " .size savesys_ipl_nss, .-savesys_ipl_nss\n"); | 84 | " .size savesys_ipl_nss, .-savesys_ipl_nss\n"); |
83 | 85 | ||
86 | static __initdata char upper_command_line[COMMAND_LINE_SIZE]; | ||
87 | |||
84 | static noinline __init void create_kernel_nss(void) | 88 | static noinline __init void create_kernel_nss(void) |
85 | { | 89 | { |
86 | unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size; | 90 | unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size; |
@@ -90,7 +94,6 @@ static noinline __init void create_kernel_nss(void) | |||
90 | int response; | 94 | int response; |
91 | size_t len; | 95 | size_t len; |
92 | char *savesys_ptr; | 96 | char *savesys_ptr; |
93 | char upper_command_line[COMMAND_LINE_SIZE]; | ||
94 | char defsys_cmd[DEFSYS_CMD_SIZE]; | 97 | char defsys_cmd[DEFSYS_CMD_SIZE]; |
95 | char savesys_cmd[SAVESYS_CMD_SIZE]; | 98 | char savesys_cmd[SAVESYS_CMD_SIZE]; |
96 | 99 | ||
@@ -141,6 +144,8 @@ static noinline __init void create_kernel_nss(void) | |||
141 | __cpcmd(defsys_cmd, NULL, 0, &response); | 144 | __cpcmd(defsys_cmd, NULL, 0, &response); |
142 | 145 | ||
143 | if (response != 0) { | 146 | if (response != 0) { |
147 | pr_err("Defining the Linux kernel NSS failed with rc=%d\n", | ||
148 | response); | ||
144 | kernel_nss_name[0] = '\0'; | 149 | kernel_nss_name[0] = '\0'; |
145 | return; | 150 | return; |
146 | } | 151 | } |
@@ -153,8 +158,11 @@ static noinline __init void create_kernel_nss(void) | |||
153 | * max SAVESYS_CMD_SIZE | 158 | * max SAVESYS_CMD_SIZE |
154 | * On error: response contains the numeric portion of cp error message. | 159 | * On error: response contains the numeric portion of cp error message. |
155 | * for SAVESYS it will be >= 263 | 160 | * for SAVESYS it will be >= 263 |
161 | * for missing privilege class, it will be 1 | ||
156 | */ | 162 | */ |
157 | if (response > SAVESYS_CMD_SIZE) { | 163 | if (response > SAVESYS_CMD_SIZE || response == 1) { |
164 | pr_err("Saving the Linux kernel NSS failed with rc=%d\n", | ||
165 | response); | ||
158 | kernel_nss_name[0] = '\0'; | 166 | kernel_nss_name[0] = '\0'; |
159 | return; | 167 | return; |
160 | } | 168 | } |
@@ -205,12 +213,9 @@ static noinline __init void detect_machine_type(void) | |||
205 | 213 | ||
206 | /* Running under KVM? If not we assume z/VM */ | 214 | /* Running under KVM? If not we assume z/VM */ |
207 | if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3)) | 215 | if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3)) |
208 | machine_flags |= MACHINE_FLAG_KVM; | 216 | S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; |
209 | else | 217 | else |
210 | machine_flags |= MACHINE_FLAG_VM; | 218 | S390_lowcore.machine_flags |= MACHINE_FLAG_VM; |
211 | |||
212 | /* Store machine flags for setting up lowcore early */ | ||
213 | S390_lowcore.machine_flags = machine_flags; | ||
214 | } | 219 | } |
215 | 220 | ||
216 | static __init void early_pgm_check_handler(void) | 221 | static __init void early_pgm_check_handler(void) |
@@ -245,7 +250,7 @@ static noinline __init void setup_hpage(void) | |||
245 | facilities = stfl(); | 250 | facilities = stfl(); |
246 | if (!(facilities & (1UL << 23)) || !(facilities & (1UL << 29))) | 251 | if (!(facilities & (1UL << 23)) || !(facilities & (1UL << 29))) |
247 | return; | 252 | return; |
248 | machine_flags |= MACHINE_FLAG_HPAGE; | 253 | S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE; |
249 | __ctl_set_bit(0, 23); | 254 | __ctl_set_bit(0, 23); |
250 | #endif | 255 | #endif |
251 | } | 256 | } |
@@ -263,7 +268,7 @@ static __init void detect_mvpg(void) | |||
263 | EX_TABLE(0b,1b) | 268 | EX_TABLE(0b,1b) |
264 | : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0"); | 269 | : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0"); |
265 | if (!rc) | 270 | if (!rc) |
266 | machine_flags |= MACHINE_FLAG_MVPG; | 271 | S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG; |
267 | #endif | 272 | #endif |
268 | } | 273 | } |
269 | 274 | ||
@@ -279,7 +284,7 @@ static __init void detect_ieee(void) | |||
279 | EX_TABLE(0b,1b) | 284 | EX_TABLE(0b,1b) |
280 | : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc"); | 285 | : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc"); |
281 | if (!rc) | 286 | if (!rc) |
282 | machine_flags |= MACHINE_FLAG_IEEE; | 287 | S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE; |
283 | #endif | 288 | #endif |
284 | } | 289 | } |
285 | 290 | ||
@@ -298,7 +303,7 @@ static __init void detect_csp(void) | |||
298 | EX_TABLE(0b,1b) | 303 | EX_TABLE(0b,1b) |
299 | : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2"); | 304 | : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2"); |
300 | if (!rc) | 305 | if (!rc) |
301 | machine_flags |= MACHINE_FLAG_CSP; | 306 | S390_lowcore.machine_flags |= MACHINE_FLAG_CSP; |
302 | #endif | 307 | #endif |
303 | } | 308 | } |
304 | 309 | ||
@@ -315,7 +320,7 @@ static __init void detect_diag9c(void) | |||
315 | EX_TABLE(0b,1b) | 320 | EX_TABLE(0b,1b) |
316 | : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc"); | 321 | : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc"); |
317 | if (!rc) | 322 | if (!rc) |
318 | machine_flags |= MACHINE_FLAG_DIAG9C; | 323 | S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C; |
319 | } | 324 | } |
320 | 325 | ||
321 | static __init void detect_diag44(void) | 326 | static __init void detect_diag44(void) |
@@ -330,7 +335,7 @@ static __init void detect_diag44(void) | |||
330 | EX_TABLE(0b,1b) | 335 | EX_TABLE(0b,1b) |
331 | : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc"); | 336 | : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc"); |
332 | if (!rc) | 337 | if (!rc) |
333 | machine_flags |= MACHINE_FLAG_DIAG44; | 338 | S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44; |
334 | #endif | 339 | #endif |
335 | } | 340 | } |
336 | 341 | ||
@@ -341,11 +346,11 @@ static __init void detect_machine_facilities(void) | |||
341 | 346 | ||
342 | facilities = stfl(); | 347 | facilities = stfl(); |
343 | if (facilities & (1 << 28)) | 348 | if (facilities & (1 << 28)) |
344 | machine_flags |= MACHINE_FLAG_IDTE; | 349 | S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; |
345 | if (facilities & (1 << 23)) | 350 | if (facilities & (1 << 23)) |
346 | machine_flags |= MACHINE_FLAG_PFMF; | 351 | S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF; |
347 | if (facilities & (1 << 4)) | 352 | if (facilities & (1 << 4)) |
348 | machine_flags |= MACHINE_FLAG_MVCOS; | 353 | S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; |
349 | #endif | 354 | #endif |
350 | } | 355 | } |
351 | 356 | ||
@@ -367,21 +372,35 @@ static __init void rescue_initrd(void) | |||
367 | } | 372 | } |
368 | 373 | ||
369 | /* Set up boot command line */ | 374 | /* Set up boot command line */ |
370 | static void __init setup_boot_command_line(void) | 375 | static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t)) |
371 | { | 376 | { |
372 | char *parm = NULL; | 377 | char *parm, *delim; |
378 | size_t rc, len; | ||
379 | |||
380 | len = strlen(boot_command_line); | ||
381 | |||
382 | delim = boot_command_line + len; /* '\0' character position */ | ||
383 | parm = boot_command_line + len + 1; /* append right after '\0' */ | ||
373 | 384 | ||
385 | rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1); | ||
386 | if (rc) { | ||
387 | if (*parm == '=') | ||
388 | memmove(boot_command_line, parm + 1, rc); | ||
389 | else | ||
390 | *delim = ' '; /* replace '\0' with space */ | ||
391 | } | ||
392 | } | ||
393 | |||
394 | static void __init setup_boot_command_line(void) | ||
395 | { | ||
374 | /* copy arch command line */ | 396 | /* copy arch command line */ |
375 | strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); | 397 | strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); |
376 | 398 | ||
377 | /* append IPL PARM data to the boot command line */ | 399 | /* append IPL PARM data to the boot command line */ |
378 | if (MACHINE_IS_VM) { | 400 | if (MACHINE_IS_VM) |
379 | parm = boot_command_line + strlen(boot_command_line); | 401 | append_to_cmdline(append_ipl_vmparm); |
380 | *parm++ = ' '; | 402 | |
381 | get_ipl_vmparm(parm); | 403 | append_to_cmdline(append_ipl_scpdata); |
382 | if (parm[0] == '=') | ||
383 | memmove(boot_command_line, parm + 1, strlen(parm)); | ||
384 | } | ||
385 | } | 404 | } |
386 | 405 | ||
387 | 406 | ||
@@ -413,7 +432,6 @@ void __init startup_init(void) | |||
413 | setup_hpage(); | 432 | setup_hpage(); |
414 | sclp_facilities_detect(); | 433 | sclp_facilities_detect(); |
415 | detect_memory_layout(memory_chunk); | 434 | detect_memory_layout(memory_chunk); |
416 | S390_lowcore.machine_flags = machine_flags; | ||
417 | #ifdef CONFIG_DYNAMIC_FTRACE | 435 | #ifdef CONFIG_DYNAMIC_FTRACE |
418 | S390_lowcore.ftrace_func = (unsigned long)ftrace_caller; | 436 | S390_lowcore.ftrace_func = (unsigned long)ftrace_caller; |
419 | #endif | 437 | #endif |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index c4c80a22bc1f..f43d2ee54464 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -54,7 +54,7 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | |||
54 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 54 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
55 | _TIF_MCCK_PENDING) | 55 | _TIF_MCCK_PENDING) |
56 | _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ | 56 | _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ |
57 | _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8) | 57 | _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8) |
58 | 58 | ||
59 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | 59 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER |
60 | STACK_SIZE = 1 << STACK_SHIFT | 60 | STACK_SIZE = 1 << STACK_SHIFT |
@@ -278,7 +278,8 @@ sysc_return: | |||
278 | bnz BASED(sysc_work) # there is work to do (signals etc.) | 278 | bnz BASED(sysc_work) # there is work to do (signals etc.) |
279 | sysc_restore: | 279 | sysc_restore: |
280 | #ifdef CONFIG_TRACE_IRQFLAGS | 280 | #ifdef CONFIG_TRACE_IRQFLAGS |
281 | la %r1,BASED(sysc_restore_trace_psw) | 281 | la %r1,BASED(sysc_restore_trace_psw_addr) |
282 | l %r1,0(%r1) | ||
282 | lpsw 0(%r1) | 283 | lpsw 0(%r1) |
283 | sysc_restore_trace: | 284 | sysc_restore_trace: |
284 | TRACE_IRQS_CHECK | 285 | TRACE_IRQS_CHECK |
@@ -289,10 +290,15 @@ sysc_leave: | |||
289 | sysc_done: | 290 | sysc_done: |
290 | 291 | ||
291 | #ifdef CONFIG_TRACE_IRQFLAGS | 292 | #ifdef CONFIG_TRACE_IRQFLAGS |
293 | sysc_restore_trace_psw_addr: | ||
294 | .long sysc_restore_trace_psw | ||
295 | |||
296 | .section .data,"aw",@progbits | ||
292 | .align 8 | 297 | .align 8 |
293 | .globl sysc_restore_trace_psw | 298 | .globl sysc_restore_trace_psw |
294 | sysc_restore_trace_psw: | 299 | sysc_restore_trace_psw: |
295 | .long 0, sysc_restore_trace + 0x80000000 | 300 | .long 0, sysc_restore_trace + 0x80000000 |
301 | .previous | ||
296 | #endif | 302 | #endif |
297 | 303 | ||
298 | # | 304 | # |
@@ -606,7 +612,8 @@ io_return: | |||
606 | bnz BASED(io_work) # there is work to do (signals etc.) | 612 | bnz BASED(io_work) # there is work to do (signals etc.) |
607 | io_restore: | 613 | io_restore: |
608 | #ifdef CONFIG_TRACE_IRQFLAGS | 614 | #ifdef CONFIG_TRACE_IRQFLAGS |
609 | la %r1,BASED(io_restore_trace_psw) | 615 | la %r1,BASED(io_restore_trace_psw_addr) |
616 | l %r1,0(%r1) | ||
610 | lpsw 0(%r1) | 617 | lpsw 0(%r1) |
611 | io_restore_trace: | 618 | io_restore_trace: |
612 | TRACE_IRQS_CHECK | 619 | TRACE_IRQS_CHECK |
@@ -617,10 +624,15 @@ io_leave: | |||
617 | io_done: | 624 | io_done: |
618 | 625 | ||
619 | #ifdef CONFIG_TRACE_IRQFLAGS | 626 | #ifdef CONFIG_TRACE_IRQFLAGS |
627 | io_restore_trace_psw_addr: | ||
628 | .long io_restore_trace_psw | ||
629 | |||
630 | .section .data,"aw",@progbits | ||
620 | .align 8 | 631 | .align 8 |
621 | .globl io_restore_trace_psw | 632 | .globl io_restore_trace_psw |
622 | io_restore_trace_psw: | 633 | io_restore_trace_psw: |
623 | .long 0, io_restore_trace + 0x80000000 | 634 | .long 0, io_restore_trace + 0x80000000 |
635 | .previous | ||
624 | #endif | 636 | #endif |
625 | 637 | ||
626 | # | 638 | # |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index f6618e9e15ef..a6f7b20df616 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -57,7 +57,7 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | |||
57 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 57 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
58 | _TIF_MCCK_PENDING) | 58 | _TIF_MCCK_PENDING) |
59 | _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ | 59 | _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ |
60 | _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8) | 60 | _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8) |
61 | 61 | ||
62 | #define BASED(name) name-system_call(%r13) | 62 | #define BASED(name) name-system_call(%r13) |
63 | 63 | ||
@@ -284,10 +284,12 @@ sysc_leave: | |||
284 | sysc_done: | 284 | sysc_done: |
285 | 285 | ||
286 | #ifdef CONFIG_TRACE_IRQFLAGS | 286 | #ifdef CONFIG_TRACE_IRQFLAGS |
287 | .section .data,"aw",@progbits | ||
287 | .align 8 | 288 | .align 8 |
288 | .globl sysc_restore_trace_psw | 289 | .globl sysc_restore_trace_psw |
289 | sysc_restore_trace_psw: | 290 | sysc_restore_trace_psw: |
290 | .quad 0, sysc_restore_trace | 291 | .quad 0, sysc_restore_trace |
292 | .previous | ||
291 | #endif | 293 | #endif |
292 | 294 | ||
293 | # | 295 | # |
@@ -595,10 +597,12 @@ io_leave: | |||
595 | io_done: | 597 | io_done: |
596 | 598 | ||
597 | #ifdef CONFIG_TRACE_IRQFLAGS | 599 | #ifdef CONFIG_TRACE_IRQFLAGS |
600 | .section .data,"aw",@progbits | ||
598 | .align 8 | 601 | .align 8 |
599 | .globl io_restore_trace_psw | 602 | .globl io_restore_trace_psw |
600 | io_restore_trace_psw: | 603 | io_restore_trace_psw: |
601 | .quad 0, io_restore_trace | 604 | .quad 0, io_restore_trace |
605 | .previous | ||
602 | #endif | 606 | #endif |
603 | 607 | ||
604 | # | 608 | # |
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 3e298e64f0db..57bdcb1e3cdf 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c | |||
@@ -220,6 +220,29 @@ struct syscall_metadata *syscall_nr_to_meta(int nr) | |||
220 | return syscalls_metadata[nr]; | 220 | return syscalls_metadata[nr]; |
221 | } | 221 | } |
222 | 222 | ||
223 | int syscall_name_to_nr(char *name) | ||
224 | { | ||
225 | int i; | ||
226 | |||
227 | if (!syscalls_metadata) | ||
228 | return -1; | ||
229 | for (i = 0; i < NR_syscalls; i++) | ||
230 | if (syscalls_metadata[i]) | ||
231 | if (!strcmp(syscalls_metadata[i]->name, name)) | ||
232 | return i; | ||
233 | return -1; | ||
234 | } | ||
235 | |||
236 | void set_syscall_enter_id(int num, int id) | ||
237 | { | ||
238 | syscalls_metadata[num]->enter_id = id; | ||
239 | } | ||
240 | |||
241 | void set_syscall_exit_id(int num, int id) | ||
242 | { | ||
243 | syscalls_metadata[num]->exit_id = id; | ||
244 | } | ||
245 | |||
223 | static struct syscall_metadata *find_syscall_meta(unsigned long syscall) | 246 | static struct syscall_metadata *find_syscall_meta(unsigned long syscall) |
224 | { | 247 | { |
225 | struct syscall_metadata *start; | 248 | struct syscall_metadata *start; |
@@ -237,24 +260,19 @@ static struct syscall_metadata *find_syscall_meta(unsigned long syscall) | |||
237 | return NULL; | 260 | return NULL; |
238 | } | 261 | } |
239 | 262 | ||
240 | void arch_init_ftrace_syscalls(void) | 263 | static int __init arch_init_ftrace_syscalls(void) |
241 | { | 264 | { |
242 | struct syscall_metadata *meta; | 265 | struct syscall_metadata *meta; |
243 | int i; | 266 | int i; |
244 | static atomic_t refs; | ||
245 | |||
246 | if (atomic_inc_return(&refs) != 1) | ||
247 | goto out; | ||
248 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls, | 267 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls, |
249 | GFP_KERNEL); | 268 | GFP_KERNEL); |
250 | if (!syscalls_metadata) | 269 | if (!syscalls_metadata) |
251 | goto out; | 270 | return -ENOMEM; |
252 | for (i = 0; i < NR_syscalls; i++) { | 271 | for (i = 0; i < NR_syscalls; i++) { |
253 | meta = find_syscall_meta((unsigned long)sys_call_table[i]); | 272 | meta = find_syscall_meta((unsigned long)sys_call_table[i]); |
254 | syscalls_metadata[i] = meta; | 273 | syscalls_metadata[i] = meta; |
255 | } | 274 | } |
256 | return; | 275 | return 0; |
257 | out: | ||
258 | atomic_dec(&refs); | ||
259 | } | 276 | } |
277 | arch_initcall(arch_init_ftrace_syscalls); | ||
260 | #endif | 278 | #endif |
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index ec6882348520..c52b4f7742fa 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <asm/asm-offsets.h> | 27 | #include <asm/asm-offsets.h> |
28 | #include <asm/thread_info.h> | 28 | #include <asm/thread_info.h> |
29 | #include <asm/page.h> | 29 | #include <asm/page.h> |
30 | #include <asm/cpu.h> | ||
30 | 31 | ||
31 | #ifdef CONFIG_64BIT | 32 | #ifdef CONFIG_64BIT |
32 | #define ARCH_OFFSET 4 | 33 | #define ARCH_OFFSET 4 |
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index 2ced846065b7..602b508cd4c4 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S | |||
@@ -24,6 +24,7 @@ startup_continue: | |||
24 | # Setup stack | 24 | # Setup stack |
25 | # | 25 | # |
26 | l %r15,.Linittu-.LPG1(%r13) | 26 | l %r15,.Linittu-.LPG1(%r13) |
27 | st %r15,__LC_THREAD_INFO # cache thread info in lowcore | ||
27 | mvc __LC_CURRENT(4),__TI_task(%r15) | 28 | mvc __LC_CURRENT(4),__TI_task(%r15) |
28 | ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE | 29 | ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE |
29 | st %r15,__LC_KERNEL_STACK # set end of kernel stack | 30 | st %r15,__LC_KERNEL_STACK # set end of kernel stack |
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 65667b2e65ce..6a250808092b 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S | |||
@@ -62,9 +62,9 @@ startup_continue: | |||
62 | clr %r11,%r12 | 62 | clr %r11,%r12 |
63 | je 5f # no more space in prefix array | 63 | je 5f # no more space in prefix array |
64 | 4: | 64 | 4: |
65 | ahi %r8,1 # next cpu (r8 += 1) | 65 | ahi %r8,1 # next cpu (r8 += 1) |
66 | cl %r8,.Llast_cpu-.LPG1(%r13) # is last possible cpu ? | 66 | chi %r8,MAX_CPU_ADDRESS # is last possible cpu ? |
67 | jl 1b # jump if not last cpu | 67 | jle 1b # jump if not last cpu |
68 | 5: | 68 | 5: |
69 | lhi %r1,2 # mode 2 = esame (dump) | 69 | lhi %r1,2 # mode 2 = esame (dump) |
70 | j 6f | 70 | j 6f |
@@ -92,6 +92,7 @@ startup_continue: | |||
92 | # Setup stack | 92 | # Setup stack |
93 | # | 93 | # |
94 | larl %r15,init_thread_union | 94 | larl %r15,init_thread_union |
95 | stg %r15,__LC_THREAD_INFO # cache thread info in lowcore | ||
95 | lg %r14,__TI_task(%r15) # cache current in lowcore | 96 | lg %r14,__TI_task(%r15) # cache current in lowcore |
96 | stg %r14,__LC_CURRENT | 97 | stg %r14,__LC_CURRENT |
97 | aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE | 98 | aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE |
@@ -129,8 +130,6 @@ startup_continue: | |||
129 | #ifdef CONFIG_ZFCPDUMP | 130 | #ifdef CONFIG_ZFCPDUMP |
130 | .Lcurrent_cpu: | 131 | .Lcurrent_cpu: |
131 | .long 0x0 | 132 | .long 0x0 |
132 | .Llast_cpu: | ||
133 | .long 0x0000ffff | ||
134 | .Lpref_arr_ptr: | 133 | .Lpref_arr_ptr: |
135 | .long zfcpdump_prefix_array | 134 | .long zfcpdump_prefix_array |
136 | #endif /* CONFIG_ZFCPDUMP */ | 135 | #endif /* CONFIG_ZFCPDUMP */ |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 371a2d88f4ac..ee57a42e6e93 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -272,17 +272,18 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr, | |||
272 | static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); | 272 | static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); |
273 | 273 | ||
274 | /* VM IPL PARM routines */ | 274 | /* VM IPL PARM routines */ |
275 | static void reipl_get_ascii_vmparm(char *dest, | 275 | size_t reipl_get_ascii_vmparm(char *dest, size_t size, |
276 | const struct ipl_parameter_block *ipb) | 276 | const struct ipl_parameter_block *ipb) |
277 | { | 277 | { |
278 | int i; | 278 | int i; |
279 | int len = 0; | 279 | size_t len; |
280 | char has_lowercase = 0; | 280 | char has_lowercase = 0; |
281 | 281 | ||
282 | len = 0; | ||
282 | if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) && | 283 | if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) && |
283 | (ipb->ipl_info.ccw.vm_parm_len > 0)) { | 284 | (ipb->ipl_info.ccw.vm_parm_len > 0)) { |
284 | 285 | ||
285 | len = ipb->ipl_info.ccw.vm_parm_len; | 286 | len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len); |
286 | memcpy(dest, ipb->ipl_info.ccw.vm_parm, len); | 287 | memcpy(dest, ipb->ipl_info.ccw.vm_parm, len); |
287 | /* If at least one character is lowercase, we assume mixed | 288 | /* If at least one character is lowercase, we assume mixed |
288 | * case; otherwise we convert everything to lowercase. | 289 | * case; otherwise we convert everything to lowercase. |
@@ -299,14 +300,20 @@ static void reipl_get_ascii_vmparm(char *dest, | |||
299 | EBCASC(dest, len); | 300 | EBCASC(dest, len); |
300 | } | 301 | } |
301 | dest[len] = 0; | 302 | dest[len] = 0; |
303 | |||
304 | return len; | ||
302 | } | 305 | } |
303 | 306 | ||
304 | void get_ipl_vmparm(char *dest) | 307 | size_t append_ipl_vmparm(char *dest, size_t size) |
305 | { | 308 | { |
309 | size_t rc; | ||
310 | |||
311 | rc = 0; | ||
306 | if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW)) | 312 | if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW)) |
307 | reipl_get_ascii_vmparm(dest, &ipl_block); | 313 | rc = reipl_get_ascii_vmparm(dest, size, &ipl_block); |
308 | else | 314 | else |
309 | dest[0] = 0; | 315 | dest[0] = 0; |
316 | return rc; | ||
310 | } | 317 | } |
311 | 318 | ||
312 | static ssize_t ipl_vm_parm_show(struct kobject *kobj, | 319 | static ssize_t ipl_vm_parm_show(struct kobject *kobj, |
@@ -314,10 +321,65 @@ static ssize_t ipl_vm_parm_show(struct kobject *kobj, | |||
314 | { | 321 | { |
315 | char parm[DIAG308_VMPARM_SIZE + 1] = {}; | 322 | char parm[DIAG308_VMPARM_SIZE + 1] = {}; |
316 | 323 | ||
317 | get_ipl_vmparm(parm); | 324 | append_ipl_vmparm(parm, sizeof(parm)); |
318 | return sprintf(page, "%s\n", parm); | 325 | return sprintf(page, "%s\n", parm); |
319 | } | 326 | } |
320 | 327 | ||
328 | static size_t scpdata_length(const char* buf, size_t count) | ||
329 | { | ||
330 | while (count) { | ||
331 | if (buf[count - 1] != '\0' && buf[count - 1] != ' ') | ||
332 | break; | ||
333 | count--; | ||
334 | } | ||
335 | return count; | ||
336 | } | ||
337 | |||
338 | size_t reipl_append_ascii_scpdata(char *dest, size_t size, | ||
339 | const struct ipl_parameter_block *ipb) | ||
340 | { | ||
341 | size_t count; | ||
342 | size_t i; | ||
343 | int has_lowercase; | ||
344 | |||
345 | count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data, | ||
346 | ipb->ipl_info.fcp.scp_data_len)); | ||
347 | if (!count) | ||
348 | goto out; | ||
349 | |||
350 | has_lowercase = 0; | ||
351 | for (i = 0; i < count; i++) { | ||
352 | if (!isascii(ipb->ipl_info.fcp.scp_data[i])) { | ||
353 | count = 0; | ||
354 | goto out; | ||
355 | } | ||
356 | if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i])) | ||
357 | has_lowercase = 1; | ||
358 | } | ||
359 | |||
360 | if (has_lowercase) | ||
361 | memcpy(dest, ipb->ipl_info.fcp.scp_data, count); | ||
362 | else | ||
363 | for (i = 0; i < count; i++) | ||
364 | dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]); | ||
365 | out: | ||
366 | dest[count] = '\0'; | ||
367 | return count; | ||
368 | } | ||
369 | |||
370 | size_t append_ipl_scpdata(char *dest, size_t len) | ||
371 | { | ||
372 | size_t rc; | ||
373 | |||
374 | rc = 0; | ||
375 | if (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP) | ||
376 | rc = reipl_append_ascii_scpdata(dest, len, &ipl_block); | ||
377 | else | ||
378 | dest[0] = 0; | ||
379 | return rc; | ||
380 | } | ||
381 | |||
382 | |||
321 | static struct kobj_attribute sys_ipl_vm_parm_attr = | 383 | static struct kobj_attribute sys_ipl_vm_parm_attr = |
322 | __ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL); | 384 | __ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL); |
323 | 385 | ||
@@ -553,7 +615,7 @@ static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb, | |||
553 | { | 615 | { |
554 | char vmparm[DIAG308_VMPARM_SIZE + 1] = {}; | 616 | char vmparm[DIAG308_VMPARM_SIZE + 1] = {}; |
555 | 617 | ||
556 | reipl_get_ascii_vmparm(vmparm, ipb); | 618 | reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb); |
557 | return sprintf(page, "%s\n", vmparm); | 619 | return sprintf(page, "%s\n", vmparm); |
558 | } | 620 | } |
559 | 621 | ||
@@ -626,6 +688,59 @@ static struct kobj_attribute sys_reipl_ccw_vmparm_attr = | |||
626 | 688 | ||
627 | /* FCP reipl device attributes */ | 689 | /* FCP reipl device attributes */ |
628 | 690 | ||
691 | static ssize_t reipl_fcp_scpdata_read(struct kobject *kobj, | ||
692 | struct bin_attribute *attr, | ||
693 | char *buf, loff_t off, size_t count) | ||
694 | { | ||
695 | size_t size = reipl_block_fcp->ipl_info.fcp.scp_data_len; | ||
696 | void *scp_data = reipl_block_fcp->ipl_info.fcp.scp_data; | ||
697 | |||
698 | return memory_read_from_buffer(buf, count, &off, scp_data, size); | ||
699 | } | ||
700 | |||
701 | static ssize_t reipl_fcp_scpdata_write(struct kobject *kobj, | ||
702 | struct bin_attribute *attr, | ||
703 | char *buf, loff_t off, size_t count) | ||
704 | { | ||
705 | size_t padding; | ||
706 | size_t scpdata_len; | ||
707 | |||
708 | if (off < 0) | ||
709 | return -EINVAL; | ||
710 | |||
711 | if (off >= DIAG308_SCPDATA_SIZE) | ||
712 | return -ENOSPC; | ||
713 | |||
714 | if (count > DIAG308_SCPDATA_SIZE - off) | ||
715 | count = DIAG308_SCPDATA_SIZE - off; | ||
716 | |||
717 | memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf + off, count); | ||
718 | scpdata_len = off + count; | ||
719 | |||
720 | if (scpdata_len % 8) { | ||
721 | padding = 8 - (scpdata_len % 8); | ||
722 | memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len, | ||
723 | 0, padding); | ||
724 | scpdata_len += padding; | ||
725 | } | ||
726 | |||
727 | reipl_block_fcp->ipl_info.fcp.scp_data_len = scpdata_len; | ||
728 | reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN + scpdata_len; | ||
729 | reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN + scpdata_len; | ||
730 | |||
731 | return count; | ||
732 | } | ||
733 | |||
734 | static struct bin_attribute sys_reipl_fcp_scp_data_attr = { | ||
735 | .attr = { | ||
736 | .name = "scp_data", | ||
737 | .mode = S_IRUGO | S_IWUSR, | ||
738 | }, | ||
739 | .size = PAGE_SIZE, | ||
740 | .read = reipl_fcp_scpdata_read, | ||
741 | .write = reipl_fcp_scpdata_write, | ||
742 | }; | ||
743 | |||
629 | DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", | 744 | DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", |
630 | reipl_block_fcp->ipl_info.fcp.wwpn); | 745 | reipl_block_fcp->ipl_info.fcp.wwpn); |
631 | DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n", | 746 | DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n", |
@@ -647,7 +762,6 @@ static struct attribute *reipl_fcp_attrs[] = { | |||
647 | }; | 762 | }; |
648 | 763 | ||
649 | static struct attribute_group reipl_fcp_attr_group = { | 764 | static struct attribute_group reipl_fcp_attr_group = { |
650 | .name = IPL_FCP_STR, | ||
651 | .attrs = reipl_fcp_attrs, | 765 | .attrs = reipl_fcp_attrs, |
652 | }; | 766 | }; |
653 | 767 | ||
@@ -895,6 +1009,7 @@ static struct kobj_attribute reipl_type_attr = | |||
895 | __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store); | 1009 | __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store); |
896 | 1010 | ||
897 | static struct kset *reipl_kset; | 1011 | static struct kset *reipl_kset; |
1012 | static struct kset *reipl_fcp_kset; | ||
898 | 1013 | ||
899 | static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb, | 1014 | static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb, |
900 | const enum ipl_method m) | 1015 | const enum ipl_method m) |
@@ -906,7 +1021,7 @@ static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb, | |||
906 | 1021 | ||
907 | reipl_get_ascii_loadparm(loadparm, ipb); | 1022 | reipl_get_ascii_loadparm(loadparm, ipb); |
908 | reipl_get_ascii_nss_name(nss_name, ipb); | 1023 | reipl_get_ascii_nss_name(nss_name, ipb); |
909 | reipl_get_ascii_vmparm(vmparm, ipb); | 1024 | reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb); |
910 | 1025 | ||
911 | switch (m) { | 1026 | switch (m) { |
912 | case REIPL_METHOD_CCW_VM: | 1027 | case REIPL_METHOD_CCW_VM: |
@@ -1076,23 +1191,44 @@ static int __init reipl_fcp_init(void) | |||
1076 | int rc; | 1191 | int rc; |
1077 | 1192 | ||
1078 | if (!diag308_set_works) { | 1193 | if (!diag308_set_works) { |
1079 | if (ipl_info.type == IPL_TYPE_FCP) | 1194 | if (ipl_info.type == IPL_TYPE_FCP) { |
1080 | make_attrs_ro(reipl_fcp_attrs); | 1195 | make_attrs_ro(reipl_fcp_attrs); |
1081 | else | 1196 | sys_reipl_fcp_scp_data_attr.attr.mode = S_IRUGO; |
1197 | } else | ||
1082 | return 0; | 1198 | return 0; |
1083 | } | 1199 | } |
1084 | 1200 | ||
1085 | reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); | 1201 | reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); |
1086 | if (!reipl_block_fcp) | 1202 | if (!reipl_block_fcp) |
1087 | return -ENOMEM; | 1203 | return -ENOMEM; |
1088 | rc = sysfs_create_group(&reipl_kset->kobj, &reipl_fcp_attr_group); | 1204 | |
1205 | /* sysfs: create fcp kset for mixing attr group and bin attrs */ | ||
1206 | reipl_fcp_kset = kset_create_and_add(IPL_FCP_STR, NULL, | ||
1207 | &reipl_kset->kobj); | ||
1208 | if (!reipl_kset) { | ||
1209 | free_page((unsigned long) reipl_block_fcp); | ||
1210 | return -ENOMEM; | ||
1211 | } | ||
1212 | |||
1213 | rc = sysfs_create_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group); | ||
1214 | if (rc) { | ||
1215 | kset_unregister(reipl_fcp_kset); | ||
1216 | free_page((unsigned long) reipl_block_fcp); | ||
1217 | return rc; | ||
1218 | } | ||
1219 | |||
1220 | rc = sysfs_create_bin_file(&reipl_fcp_kset->kobj, | ||
1221 | &sys_reipl_fcp_scp_data_attr); | ||
1089 | if (rc) { | 1222 | if (rc) { |
1090 | free_page((unsigned long)reipl_block_fcp); | 1223 | sysfs_remove_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group); |
1224 | kset_unregister(reipl_fcp_kset); | ||
1225 | free_page((unsigned long) reipl_block_fcp); | ||
1091 | return rc; | 1226 | return rc; |
1092 | } | 1227 | } |
1093 | if (ipl_info.type == IPL_TYPE_FCP) { | 1228 | |
1229 | if (ipl_info.type == IPL_TYPE_FCP) | ||
1094 | memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); | 1230 | memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); |
1095 | } else { | 1231 | else { |
1096 | reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; | 1232 | reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; |
1097 | reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION; | 1233 | reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION; |
1098 | reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN; | 1234 | reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN; |
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 2a0a5e97ba8c..dfe015d7398c 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S | |||
@@ -11,111 +11,27 @@ | |||
11 | ftrace_stub: | 11 | ftrace_stub: |
12 | br %r14 | 12 | br %r14 |
13 | 13 | ||
14 | #ifdef CONFIG_64BIT | ||
15 | |||
16 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
17 | |||
18 | .globl _mcount | 14 | .globl _mcount |
19 | _mcount: | 15 | _mcount: |
20 | br %r14 | 16 | #ifdef CONFIG_DYNAMIC_FTRACE |
21 | |||
22 | .globl ftrace_caller | ||
23 | ftrace_caller: | ||
24 | larl %r1,function_trace_stop | ||
25 | icm %r1,0xf,0(%r1) | ||
26 | bnzr %r14 | ||
27 | stmg %r2,%r5,32(%r15) | ||
28 | stg %r14,112(%r15) | ||
29 | lgr %r1,%r15 | ||
30 | aghi %r15,-160 | ||
31 | stg %r1,__SF_BACKCHAIN(%r15) | ||
32 | lgr %r2,%r14 | ||
33 | lg %r3,168(%r15) | ||
34 | larl %r14,ftrace_dyn_func | ||
35 | lg %r14,0(%r14) | ||
36 | basr %r14,%r14 | ||
37 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
38 | .globl ftrace_graph_caller | ||
39 | ftrace_graph_caller: | ||
40 | # This unconditional branch gets runtime patched. Change only if | ||
41 | # you know what you are doing. See ftrace_enable_graph_caller(). | ||
42 | j 0f | ||
43 | lg %r2,272(%r15) | ||
44 | lg %r3,168(%r15) | ||
45 | brasl %r14,prepare_ftrace_return | ||
46 | stg %r2,168(%r15) | ||
47 | 0: | ||
48 | #endif | ||
49 | aghi %r15,160 | ||
50 | lmg %r2,%r5,32(%r15) | ||
51 | lg %r14,112(%r15) | ||
52 | br %r14 | 17 | br %r14 |
53 | 18 | ||
54 | .data | 19 | .data |
55 | .globl ftrace_dyn_func | 20 | .globl ftrace_dyn_func |
56 | ftrace_dyn_func: | 21 | ftrace_dyn_func: |
57 | .quad ftrace_stub | 22 | .long ftrace_stub |
58 | .previous | 23 | .previous |
59 | 24 | ||
60 | #else /* CONFIG_DYNAMIC_FTRACE */ | ||
61 | |||
62 | .globl _mcount | ||
63 | _mcount: | ||
64 | larl %r1,function_trace_stop | ||
65 | icm %r1,0xf,0(%r1) | ||
66 | bnzr %r14 | ||
67 | stmg %r2,%r5,32(%r15) | ||
68 | stg %r14,112(%r15) | ||
69 | lgr %r1,%r15 | ||
70 | aghi %r15,-160 | ||
71 | stg %r1,__SF_BACKCHAIN(%r15) | ||
72 | lgr %r2,%r14 | ||
73 | lg %r3,168(%r15) | ||
74 | larl %r14,ftrace_trace_function | ||
75 | lg %r14,0(%r14) | ||
76 | basr %r14,%r14 | ||
77 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
78 | lg %r2,272(%r15) | ||
79 | lg %r3,168(%r15) | ||
80 | brasl %r14,prepare_ftrace_return | ||
81 | stg %r2,168(%r15) | ||
82 | #endif | ||
83 | aghi %r15,160 | ||
84 | lmg %r2,%r5,32(%r15) | ||
85 | lg %r14,112(%r15) | ||
86 | br %r14 | ||
87 | |||
88 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
89 | |||
90 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
91 | |||
92 | .globl return_to_handler | ||
93 | return_to_handler: | ||
94 | stmg %r2,%r5,32(%r15) | ||
95 | lgr %r1,%r15 | ||
96 | aghi %r15,-160 | ||
97 | stg %r1,__SF_BACKCHAIN(%r15) | ||
98 | brasl %r14,ftrace_return_to_handler | ||
99 | aghi %r15,160 | ||
100 | lgr %r14,%r2 | ||
101 | lmg %r2,%r5,32(%r15) | ||
102 | br %r14 | ||
103 | |||
104 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
105 | |||
106 | #else /* CONFIG_64BIT */ | ||
107 | |||
108 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
109 | |||
110 | .globl _mcount | ||
111 | _mcount: | ||
112 | br %r14 | ||
113 | |||
114 | .globl ftrace_caller | 25 | .globl ftrace_caller |
115 | ftrace_caller: | 26 | ftrace_caller: |
27 | #endif | ||
116 | stm %r2,%r5,16(%r15) | 28 | stm %r2,%r5,16(%r15) |
117 | bras %r1,2f | 29 | bras %r1,2f |
30 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
31 | 0: .long ftrace_dyn_func | ||
32 | #else | ||
118 | 0: .long ftrace_trace_function | 33 | 0: .long ftrace_trace_function |
34 | #endif | ||
119 | 1: .long function_trace_stop | 35 | 1: .long function_trace_stop |
120 | 2: l %r2,1b-0b(%r1) | 36 | 2: l %r2,1b-0b(%r1) |
121 | icm %r2,0xf,0(%r2) | 37 | icm %r2,0xf,0(%r2) |
@@ -131,53 +47,13 @@ ftrace_caller: | |||
131 | l %r14,0(%r14) | 47 | l %r14,0(%r14) |
132 | basr %r14,%r14 | 48 | basr %r14,%r14 |
133 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 49 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
50 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
134 | .globl ftrace_graph_caller | 51 | .globl ftrace_graph_caller |
135 | ftrace_graph_caller: | 52 | ftrace_graph_caller: |
136 | # This unconditional branch gets runtime patched. Change only if | 53 | # This unconditional branch gets runtime patched. Change only if |
137 | # you know what you are doing. See ftrace_enable_graph_caller(). | 54 | # you know what you are doing. See ftrace_enable_graph_caller(). |
138 | j 1f | 55 | j 1f |
139 | bras %r1,0f | ||
140 | .long prepare_ftrace_return | ||
141 | 0: l %r2,152(%r15) | ||
142 | l %r4,0(%r1) | ||
143 | l %r3,100(%r15) | ||
144 | basr %r14,%r4 | ||
145 | st %r2,100(%r15) | ||
146 | 1: | ||
147 | #endif | 56 | #endif |
148 | ahi %r15,96 | ||
149 | l %r14,56(%r15) | ||
150 | 3: lm %r2,%r5,16(%r15) | ||
151 | br %r14 | ||
152 | |||
153 | .data | ||
154 | .globl ftrace_dyn_func | ||
155 | ftrace_dyn_func: | ||
156 | .long ftrace_stub | ||
157 | .previous | ||
158 | |||
159 | #else /* CONFIG_DYNAMIC_FTRACE */ | ||
160 | |||
161 | .globl _mcount | ||
162 | _mcount: | ||
163 | stm %r2,%r5,16(%r15) | ||
164 | bras %r1,2f | ||
165 | 0: .long ftrace_trace_function | ||
166 | 1: .long function_trace_stop | ||
167 | 2: l %r2,1b-0b(%r1) | ||
168 | icm %r2,0xf,0(%r2) | ||
169 | jnz 3f | ||
170 | st %r14,56(%r15) | ||
171 | lr %r0,%r15 | ||
172 | ahi %r15,-96 | ||
173 | l %r3,100(%r15) | ||
174 | la %r2,0(%r14) | ||
175 | st %r0,__SF_BACKCHAIN(%r15) | ||
176 | la %r3,0(%r3) | ||
177 | l %r14,0b-0b(%r1) | ||
178 | l %r14,0(%r14) | ||
179 | basr %r14,%r14 | ||
180 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
181 | bras %r1,0f | 57 | bras %r1,0f |
182 | .long prepare_ftrace_return | 58 | .long prepare_ftrace_return |
183 | 0: l %r2,152(%r15) | 59 | 0: l %r2,152(%r15) |
@@ -185,14 +61,13 @@ _mcount: | |||
185 | l %r3,100(%r15) | 61 | l %r3,100(%r15) |
186 | basr %r14,%r4 | 62 | basr %r14,%r4 |
187 | st %r2,100(%r15) | 63 | st %r2,100(%r15) |
64 | 1: | ||
188 | #endif | 65 | #endif |
189 | ahi %r15,96 | 66 | ahi %r15,96 |
190 | l %r14,56(%r15) | 67 | l %r14,56(%r15) |
191 | 3: lm %r2,%r5,16(%r15) | 68 | 3: lm %r2,%r5,16(%r15) |
192 | br %r14 | 69 | br %r14 |
193 | 70 | ||
194 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
195 | |||
196 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 71 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
197 | 72 | ||
198 | .globl return_to_handler | 73 | .globl return_to_handler |
@@ -211,6 +86,4 @@ return_to_handler: | |||
211 | lm %r2,%r5,16(%r15) | 86 | lm %r2,%r5,16(%r15) |
212 | br %r14 | 87 | br %r14 |
213 | 88 | ||
214 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 89 | #endif |
215 | |||
216 | #endif /* CONFIG_64BIT */ | ||
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S new file mode 100644 index 000000000000..c37211c6092b --- /dev/null +++ b/arch/s390/kernel/mcount64.S | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2008,2009 | ||
3 | * | ||
4 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <asm/asm-offsets.h> | ||
9 | |||
10 | .globl ftrace_stub | ||
11 | ftrace_stub: | ||
12 | br %r14 | ||
13 | |||
14 | .globl _mcount | ||
15 | _mcount: | ||
16 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
17 | br %r14 | ||
18 | |||
19 | .data | ||
20 | .globl ftrace_dyn_func | ||
21 | ftrace_dyn_func: | ||
22 | .quad ftrace_stub | ||
23 | .previous | ||
24 | |||
25 | .globl ftrace_caller | ||
26 | ftrace_caller: | ||
27 | #endif | ||
28 | larl %r1,function_trace_stop | ||
29 | icm %r1,0xf,0(%r1) | ||
30 | bnzr %r14 | ||
31 | stmg %r2,%r5,32(%r15) | ||
32 | stg %r14,112(%r15) | ||
33 | lgr %r1,%r15 | ||
34 | aghi %r15,-160 | ||
35 | stg %r1,__SF_BACKCHAIN(%r15) | ||
36 | lgr %r2,%r14 | ||
37 | lg %r3,168(%r15) | ||
38 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
39 | larl %r14,ftrace_dyn_func | ||
40 | #else | ||
41 | larl %r14,ftrace_trace_function | ||
42 | #endif | ||
43 | lg %r14,0(%r14) | ||
44 | basr %r14,%r14 | ||
45 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
46 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
47 | .globl ftrace_graph_caller | ||
48 | ftrace_graph_caller: | ||
49 | # This unconditional branch gets runtime patched. Change only if | ||
50 | # you know what you are doing. See ftrace_enable_graph_caller(). | ||
51 | j 0f | ||
52 | #endif | ||
53 | lg %r2,272(%r15) | ||
54 | lg %r3,168(%r15) | ||
55 | brasl %r14,prepare_ftrace_return | ||
56 | stg %r2,168(%r15) | ||
57 | 0: | ||
58 | #endif | ||
59 | aghi %r15,160 | ||
60 | lmg %r2,%r5,32(%r15) | ||
61 | lg %r14,112(%r15) | ||
62 | br %r14 | ||
63 | |||
64 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
65 | |||
66 | .globl return_to_handler | ||
67 | return_to_handler: | ||
68 | stmg %r2,%r5,32(%r15) | ||
69 | lgr %r1,%r15 | ||
70 | aghi %r15,-160 | ||
71 | stg %r1,__SF_BACKCHAIN(%r15) | ||
72 | brasl %r14,ftrace_return_to_handler | ||
73 | aghi %r15,160 | ||
74 | lgr %r14,%r2 | ||
75 | lmg %r2,%r5,32(%r15) | ||
76 | br %r14 | ||
77 | |||
78 | #endif | ||
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 43acd73105b7..f3ddd7ac06c5 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -51,6 +51,9 @@ | |||
51 | #include "compat_ptrace.h" | 51 | #include "compat_ptrace.h" |
52 | #endif | 52 | #endif |
53 | 53 | ||
54 | #define CREATE_TRACE_POINTS | ||
55 | #include <trace/events/syscalls.h> | ||
56 | |||
54 | enum s390_regset { | 57 | enum s390_regset { |
55 | REGSET_GENERAL, | 58 | REGSET_GENERAL, |
56 | REGSET_FP, | 59 | REGSET_FP, |
@@ -661,8 +664,8 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) | |||
661 | ret = -1; | 664 | ret = -1; |
662 | } | 665 | } |
663 | 666 | ||
664 | if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) | 667 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
665 | ftrace_syscall_enter(regs); | 668 | trace_sys_enter(regs, regs->gprs[2]); |
666 | 669 | ||
667 | if (unlikely(current->audit_context)) | 670 | if (unlikely(current->audit_context)) |
668 | audit_syscall_entry(is_compat_task() ? | 671 | audit_syscall_entry(is_compat_task() ? |
@@ -679,8 +682,8 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs) | |||
679 | audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), | 682 | audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), |
680 | regs->gprs[2]); | 683 | regs->gprs[2]); |
681 | 684 | ||
682 | if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) | 685 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
683 | ftrace_syscall_exit(regs); | 686 | trace_sys_exit(regs, regs->gprs[2]); |
684 | 687 | ||
685 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | 688 | if (test_thread_flag(TIF_SYSCALL_TRACE)) |
686 | tracehook_report_syscall_exit(regs, 0); | 689 | tracehook_report_syscall_exit(regs, 0); |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index cbb897bc50bd..9ed13a1ed376 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -156,15 +156,11 @@ __setup("condev=", condev_setup); | |||
156 | 156 | ||
157 | static void __init set_preferred_console(void) | 157 | static void __init set_preferred_console(void) |
158 | { | 158 | { |
159 | if (MACHINE_IS_KVM) { | 159 | if (MACHINE_IS_KVM) |
160 | add_preferred_console("hvc", 0, NULL); | 160 | add_preferred_console("hvc", 0, NULL); |
161 | s390_virtio_console_init(); | 161 | else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP) |
162 | return; | ||
163 | } | ||
164 | |||
165 | if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP) | ||
166 | add_preferred_console("ttyS", 0, NULL); | 162 | add_preferred_console("ttyS", 0, NULL); |
167 | if (CONSOLE_IS_3270) | 163 | else if (CONSOLE_IS_3270) |
168 | add_preferred_console("tty3270", 0, NULL); | 164 | add_preferred_console("tty3270", 0, NULL); |
169 | } | 165 | } |
170 | 166 | ||
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 062bd64e65fa..6b4fef877f9d 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -536,4 +536,6 @@ void do_notify_resume(struct pt_regs *regs) | |||
536 | { | 536 | { |
537 | clear_thread_flag(TIF_NOTIFY_RESUME); | 537 | clear_thread_flag(TIF_NOTIFY_RESUME); |
538 | tracehook_notify_resume(regs); | 538 | tracehook_notify_resume(regs); |
539 | if (current->replacement_session_keyring) | ||
540 | key_replace_session_keyring(); | ||
539 | } | 541 | } |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index be2cae083406..56c16876b919 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <asm/sclp.h> | 49 | #include <asm/sclp.h> |
50 | #include <asm/cputime.h> | 50 | #include <asm/cputime.h> |
51 | #include <asm/vdso.h> | 51 | #include <asm/vdso.h> |
52 | #include <asm/cpu.h> | ||
52 | #include "entry.h" | 53 | #include "entry.h" |
53 | 54 | ||
54 | static struct task_struct *current_set[NR_CPUS]; | 55 | static struct task_struct *current_set[NR_CPUS]; |
@@ -70,6 +71,23 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); | |||
70 | 71 | ||
71 | static void smp_ext_bitcall(int, ec_bit_sig); | 72 | static void smp_ext_bitcall(int, ec_bit_sig); |
72 | 73 | ||
74 | static int cpu_stopped(int cpu) | ||
75 | { | ||
76 | __u32 status; | ||
77 | |||
78 | switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { | ||
79 | case sigp_order_code_accepted: | ||
80 | case sigp_status_stored: | ||
81 | /* Check for stopped and check stop state */ | ||
82 | if (status & 0x50) | ||
83 | return 1; | ||
84 | break; | ||
85 | default: | ||
86 | break; | ||
87 | } | ||
88 | return 0; | ||
89 | } | ||
90 | |||
73 | void smp_send_stop(void) | 91 | void smp_send_stop(void) |
74 | { | 92 | { |
75 | int cpu, rc; | 93 | int cpu, rc; |
@@ -86,7 +104,7 @@ void smp_send_stop(void) | |||
86 | rc = signal_processor(cpu, sigp_stop); | 104 | rc = signal_processor(cpu, sigp_stop); |
87 | } while (rc == sigp_busy); | 105 | } while (rc == sigp_busy); |
88 | 106 | ||
89 | while (!smp_cpu_not_running(cpu)) | 107 | while (!cpu_stopped(cpu)) |
90 | cpu_relax(); | 108 | cpu_relax(); |
91 | } | 109 | } |
92 | } | 110 | } |
@@ -269,19 +287,6 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } | |||
269 | 287 | ||
270 | #endif /* CONFIG_ZFCPDUMP */ | 288 | #endif /* CONFIG_ZFCPDUMP */ |
271 | 289 | ||
272 | static int cpu_stopped(int cpu) | ||
273 | { | ||
274 | __u32 status; | ||
275 | |||
276 | /* Check for stopped state */ | ||
277 | if (signal_processor_ps(&status, 0, cpu, sigp_sense) == | ||
278 | sigp_status_stored) { | ||
279 | if (status & 0x40) | ||
280 | return 1; | ||
281 | } | ||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | static int cpu_known(int cpu_id) | 290 | static int cpu_known(int cpu_id) |
286 | { | 291 | { |
287 | int cpu; | 292 | int cpu; |
@@ -300,7 +305,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail) | |||
300 | logical_cpu = cpumask_first(&avail); | 305 | logical_cpu = cpumask_first(&avail); |
301 | if (logical_cpu >= nr_cpu_ids) | 306 | if (logical_cpu >= nr_cpu_ids) |
302 | return 0; | 307 | return 0; |
303 | for (cpu_id = 0; cpu_id <= 65535; cpu_id++) { | 308 | for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) { |
304 | if (cpu_known(cpu_id)) | 309 | if (cpu_known(cpu_id)) |
305 | continue; | 310 | continue; |
306 | __cpu_logical_map[logical_cpu] = cpu_id; | 311 | __cpu_logical_map[logical_cpu] = cpu_id; |
@@ -379,7 +384,7 @@ static void __init smp_detect_cpus(void) | |||
379 | /* Use sigp detection algorithm if sclp doesn't work. */ | 384 | /* Use sigp detection algorithm if sclp doesn't work. */ |
380 | if (sclp_get_cpu_info(info)) { | 385 | if (sclp_get_cpu_info(info)) { |
381 | smp_use_sigp_detection = 1; | 386 | smp_use_sigp_detection = 1; |
382 | for (cpu = 0; cpu <= 65535; cpu++) { | 387 | for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { |
383 | if (cpu == boot_cpu_addr) | 388 | if (cpu == boot_cpu_addr) |
384 | continue; | 389 | continue; |
385 | __cpu_logical_map[CPU_INIT_NO] = cpu; | 390 | __cpu_logical_map[CPU_INIT_NO] = cpu; |
@@ -635,7 +640,7 @@ int __cpu_disable(void) | |||
635 | void __cpu_die(unsigned int cpu) | 640 | void __cpu_die(unsigned int cpu) |
636 | { | 641 | { |
637 | /* Wait until target cpu is down */ | 642 | /* Wait until target cpu is down */ |
638 | while (!smp_cpu_not_running(cpu)) | 643 | while (!cpu_stopped(cpu)) |
639 | cpu_relax(); | 644 | cpu_relax(); |
640 | smp_free_lowcore(cpu); | 645 | smp_free_lowcore(cpu); |
641 | pr_info("Processor %d stopped\n", cpu); | 646 | pr_info("Processor %d stopped\n", cpu); |
diff --git a/arch/s390/power/swsusp.c b/arch/s390/kernel/suspend.c index bd1f5c6b0b8c..086bee970cae 100644 --- a/arch/s390/power/swsusp.c +++ b/arch/s390/kernel/suspend.c | |||
@@ -1,13 +1,44 @@ | |||
1 | /* | 1 | /* |
2 | * Support for suspend and resume on s390 | 2 | * Suspend support specific for s390. |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2009 | 4 | * Copyright IBM Corp. 2009 |
5 | * | 5 | * |
6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> | 6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> |
7 | * | ||
8 | */ | 7 | */ |
9 | 8 | ||
9 | #include <linux/suspend.h> | ||
10 | #include <linux/reboot.h> | ||
11 | #include <linux/pfn.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <asm/sections.h> | ||
10 | #include <asm/system.h> | 14 | #include <asm/system.h> |
15 | #include <asm/ipl.h> | ||
16 | |||
17 | /* | ||
18 | * References to section boundaries | ||
19 | */ | ||
20 | extern const void __nosave_begin, __nosave_end; | ||
21 | |||
22 | /* | ||
23 | * check if given pfn is in the 'nosave' or in the read only NSS section | ||
24 | */ | ||
25 | int pfn_is_nosave(unsigned long pfn) | ||
26 | { | ||
27 | unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; | ||
28 | unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) | ||
29 | >> PAGE_SHIFT; | ||
30 | unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1; | ||
31 | unsigned long stext_pfn = PFN_DOWN(__pa(&_stext)); | ||
32 | |||
33 | if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) | ||
34 | return 1; | ||
35 | if (pfn >= stext_pfn && pfn <= eshared_pfn) { | ||
36 | if (ipl_info.type == IPL_TYPE_NSS) | ||
37 | return 1; | ||
38 | } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0)) | ||
39 | return 1; | ||
40 | return 0; | ||
41 | } | ||
11 | 42 | ||
12 | void save_processor_state(void) | 43 | void save_processor_state(void) |
13 | { | 44 | { |
diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index b26df5c5933e..7cd6b096f0d1 100644 --- a/arch/s390/power/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S | |||
@@ -21,7 +21,7 @@ | |||
21 | * This function runs with disabled interrupts. | 21 | * This function runs with disabled interrupts. |
22 | */ | 22 | */ |
23 | .section .text | 23 | .section .text |
24 | .align 2 | 24 | .align 4 |
25 | .globl swsusp_arch_suspend | 25 | .globl swsusp_arch_suspend |
26 | swsusp_arch_suspend: | 26 | swsusp_arch_suspend: |
27 | stmg %r6,%r15,__SF_GPRS(%r15) | 27 | stmg %r6,%r15,__SF_GPRS(%r15) |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index d4c8e9c47c81..54e327e9af04 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #define TICK_SIZE tick | 60 | #define TICK_SIZE tick |
61 | 61 | ||
62 | u64 sched_clock_base_cc = -1; /* Force to data section. */ | 62 | u64 sched_clock_base_cc = -1; /* Force to data section. */ |
63 | EXPORT_SYMBOL_GPL(sched_clock_base_cc); | ||
63 | 64 | ||
64 | static DEFINE_PER_CPU(struct clock_event_device, comparators); | 65 | static DEFINE_PER_CPU(struct clock_event_device, comparators); |
65 | 66 | ||
@@ -68,7 +69,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators); | |||
68 | */ | 69 | */ |
69 | unsigned long long notrace sched_clock(void) | 70 | unsigned long long notrace sched_clock(void) |
70 | { | 71 | { |
71 | return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9; | 72 | return (get_clock_monotonic() * 125) >> 9; |
72 | } | 73 | } |
73 | 74 | ||
74 | /* | 75 | /* |
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index a53db23ee092..7315f9e67e1d 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S | |||
@@ -52,55 +52,18 @@ SECTIONS | |||
52 | . = ALIGN(PAGE_SIZE); | 52 | . = ALIGN(PAGE_SIZE); |
53 | _eshared = .; /* End of shareable data */ | 53 | _eshared = .; /* End of shareable data */ |
54 | 54 | ||
55 | . = ALIGN(16); /* Exception table */ | 55 | EXCEPTION_TABLE(16) :data |
56 | __ex_table : { | ||
57 | __start___ex_table = .; | ||
58 | *(__ex_table) | ||
59 | __stop___ex_table = .; | ||
60 | } :data | ||
61 | |||
62 | .data : { /* Data */ | ||
63 | DATA_DATA | ||
64 | CONSTRUCTORS | ||
65 | } | ||
66 | |||
67 | . = ALIGN(PAGE_SIZE); | ||
68 | .data_nosave : { | ||
69 | __nosave_begin = .; | ||
70 | *(.data.nosave) | ||
71 | } | ||
72 | . = ALIGN(PAGE_SIZE); | ||
73 | __nosave_end = .; | ||
74 | |||
75 | . = ALIGN(PAGE_SIZE); | ||
76 | .data.page_aligned : { | ||
77 | *(.data.idt) | ||
78 | } | ||
79 | 56 | ||
80 | . = ALIGN(0x100); | 57 | RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE) |
81 | .data.cacheline_aligned : { | ||
82 | *(.data.cacheline_aligned) | ||
83 | } | ||
84 | 58 | ||
85 | . = ALIGN(0x100); | ||
86 | .data.read_mostly : { | ||
87 | *(.data.read_mostly) | ||
88 | } | ||
89 | _edata = .; /* End of data section */ | 59 | _edata = .; /* End of data section */ |
90 | 60 | ||
91 | . = ALIGN(THREAD_SIZE); /* init_task */ | ||
92 | .data.init_task : { | ||
93 | *(.data.init_task) | ||
94 | } | ||
95 | |||
96 | /* will be freed after init */ | 61 | /* will be freed after init */ |
97 | . = ALIGN(PAGE_SIZE); /* Init code and data */ | 62 | . = ALIGN(PAGE_SIZE); /* Init code and data */ |
98 | __init_begin = .; | 63 | __init_begin = .; |
99 | .init.text : { | 64 | |
100 | _sinittext = .; | 65 | INIT_TEXT_SECTION(PAGE_SIZE) |
101 | INIT_TEXT | 66 | |
102 | _einittext = .; | ||
103 | } | ||
104 | /* | 67 | /* |
105 | * .exit.text is discarded at runtime, not link time, | 68 | * .exit.text is discarded at runtime, not link time, |
106 | * to deal with references from __bug_table | 69 | * to deal with references from __bug_table |
@@ -111,49 +74,13 @@ SECTIONS | |||
111 | 74 | ||
112 | /* early.c uses stsi, which requires page aligned data. */ | 75 | /* early.c uses stsi, which requires page aligned data. */ |
113 | . = ALIGN(PAGE_SIZE); | 76 | . = ALIGN(PAGE_SIZE); |
114 | .init.data : { | 77 | INIT_DATA_SECTION(0x100) |
115 | INIT_DATA | ||
116 | } | ||
117 | . = ALIGN(0x100); | ||
118 | .init.setup : { | ||
119 | __setup_start = .; | ||
120 | *(.init.setup) | ||
121 | __setup_end = .; | ||
122 | } | ||
123 | .initcall.init : { | ||
124 | __initcall_start = .; | ||
125 | INITCALLS | ||
126 | __initcall_end = .; | ||
127 | } | ||
128 | |||
129 | .con_initcall.init : { | ||
130 | __con_initcall_start = .; | ||
131 | *(.con_initcall.init) | ||
132 | __con_initcall_end = .; | ||
133 | } | ||
134 | SECURITY_INIT | ||
135 | |||
136 | #ifdef CONFIG_BLK_DEV_INITRD | ||
137 | . = ALIGN(0x100); | ||
138 | .init.ramfs : { | ||
139 | __initramfs_start = .; | ||
140 | *(.init.ramfs) | ||
141 | . = ALIGN(2); | ||
142 | __initramfs_end = .; | ||
143 | } | ||
144 | #endif | ||
145 | 78 | ||
146 | PERCPU(PAGE_SIZE) | 79 | PERCPU(PAGE_SIZE) |
147 | . = ALIGN(PAGE_SIZE); | 80 | . = ALIGN(PAGE_SIZE); |
148 | __init_end = .; /* freed after init ends here */ | 81 | __init_end = .; /* freed after init ends here */ |
149 | 82 | ||
150 | /* BSS */ | 83 | BSS_SECTION(0, 2, 0) |
151 | .bss : { | ||
152 | __bss_start = .; | ||
153 | *(.bss) | ||
154 | . = ALIGN(2); | ||
155 | __bss_stop = .; | ||
156 | } | ||
157 | 84 | ||
158 | _end = . ; | 85 | _end = . ; |
159 | 86 | ||
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile index db05661ac895..eec054484419 100644 --- a/arch/s390/mm/Makefile +++ b/arch/s390/mm/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the linux s390-specific parts of the memory manager. | 2 | # Makefile for the linux s390-specific parts of the memory manager. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o | 5 | obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \ |
6 | page-states.o | ||
6 | obj-$(CONFIG_CMM) += cmm.o | 7 | obj-$(CONFIG_CMM) += cmm.o |
7 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 8 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
8 | obj-$(CONFIG_PAGE_STATES) += page-states.o | ||
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index e5e119fe03b2..1abbadd497e1 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * Copyright (C) 1995 Linus Torvalds | 10 | * Copyright (C) 1995 Linus Torvalds |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/perf_counter.h> | ||
13 | #include <linux/signal.h> | 14 | #include <linux/signal.h> |
14 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
15 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
@@ -305,7 +306,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write) | |||
305 | * interrupts again and then search the VMAs | 306 | * interrupts again and then search the VMAs |
306 | */ | 307 | */ |
307 | local_irq_enable(); | 308 | local_irq_enable(); |
308 | 309 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | |
309 | down_read(&mm->mmap_sem); | 310 | down_read(&mm->mmap_sem); |
310 | 311 | ||
311 | si_code = SEGV_MAPERR; | 312 | si_code = SEGV_MAPERR; |
@@ -363,11 +364,15 @@ good_area: | |||
363 | } | 364 | } |
364 | BUG(); | 365 | BUG(); |
365 | } | 366 | } |
366 | if (fault & VM_FAULT_MAJOR) | 367 | if (fault & VM_FAULT_MAJOR) { |
367 | tsk->maj_flt++; | 368 | tsk->maj_flt++; |
368 | else | 369 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, |
370 | regs, address); | ||
371 | } else { | ||
369 | tsk->min_flt++; | 372 | tsk->min_flt++; |
370 | 373 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | |
374 | regs, address); | ||
375 | } | ||
371 | up_read(&mm->mmap_sem); | 376 | up_read(&mm->mmap_sem); |
372 | /* | 377 | /* |
373 | * The instruction that caused the program check will | 378 | * The instruction that caused the program check will |
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index fc0ad73ffd90..f92ec203ad92 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/mm/page-states.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | 2 | * Copyright IBM Corp. 2008 |
5 | * | 3 | * |
6 | * Guest page hinting for unused pages. | 4 | * Guest page hinting for unused pages. |
@@ -17,11 +15,12 @@ | |||
17 | #define ESSA_SET_STABLE 1 | 15 | #define ESSA_SET_STABLE 1 |
18 | #define ESSA_SET_UNUSED 2 | 16 | #define ESSA_SET_UNUSED 2 |
19 | 17 | ||
20 | static int cmma_flag; | 18 | static int cmma_flag = 1; |
21 | 19 | ||
22 | static int __init cmma(char *str) | 20 | static int __init cmma(char *str) |
23 | { | 21 | { |
24 | char *parm; | 22 | char *parm; |
23 | |||
25 | parm = strstrip(str); | 24 | parm = strstrip(str); |
26 | if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) { | 25 | if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) { |
27 | cmma_flag = 1; | 26 | cmma_flag = 1; |
@@ -32,7 +31,6 @@ static int __init cmma(char *str) | |||
32 | return 1; | 31 | return 1; |
33 | return 0; | 32 | return 0; |
34 | } | 33 | } |
35 | |||
36 | __setup("cmma=", cmma); | 34 | __setup("cmma=", cmma); |
37 | 35 | ||
38 | void __init cmma_init(void) | 36 | void __init cmma_init(void) |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 565667207985..c70215247071 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -78,9 +78,9 @@ unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) | |||
78 | } | 78 | } |
79 | page->index = page_to_phys(shadow); | 79 | page->index = page_to_phys(shadow); |
80 | } | 80 | } |
81 | spin_lock(&mm->page_table_lock); | 81 | spin_lock(&mm->context.list_lock); |
82 | list_add(&page->lru, &mm->context.crst_list); | 82 | list_add(&page->lru, &mm->context.crst_list); |
83 | spin_unlock(&mm->page_table_lock); | 83 | spin_unlock(&mm->context.list_lock); |
84 | return (unsigned long *) page_to_phys(page); | 84 | return (unsigned long *) page_to_phys(page); |
85 | } | 85 | } |
86 | 86 | ||
@@ -89,9 +89,9 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table) | |||
89 | unsigned long *shadow = get_shadow_table(table); | 89 | unsigned long *shadow = get_shadow_table(table); |
90 | struct page *page = virt_to_page(table); | 90 | struct page *page = virt_to_page(table); |
91 | 91 | ||
92 | spin_lock(&mm->page_table_lock); | 92 | spin_lock(&mm->context.list_lock); |
93 | list_del(&page->lru); | 93 | list_del(&page->lru); |
94 | spin_unlock(&mm->page_table_lock); | 94 | spin_unlock(&mm->context.list_lock); |
95 | if (shadow) | 95 | if (shadow) |
96 | free_pages((unsigned long) shadow, ALLOC_ORDER); | 96 | free_pages((unsigned long) shadow, ALLOC_ORDER); |
97 | free_pages((unsigned long) table, ALLOC_ORDER); | 97 | free_pages((unsigned long) table, ALLOC_ORDER); |
@@ -182,7 +182,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
182 | unsigned long bits; | 182 | unsigned long bits; |
183 | 183 | ||
184 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; | 184 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; |
185 | spin_lock(&mm->page_table_lock); | 185 | spin_lock(&mm->context.list_lock); |
186 | page = NULL; | 186 | page = NULL; |
187 | if (!list_empty(&mm->context.pgtable_list)) { | 187 | if (!list_empty(&mm->context.pgtable_list)) { |
188 | page = list_first_entry(&mm->context.pgtable_list, | 188 | page = list_first_entry(&mm->context.pgtable_list, |
@@ -191,7 +191,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
191 | page = NULL; | 191 | page = NULL; |
192 | } | 192 | } |
193 | if (!page) { | 193 | if (!page) { |
194 | spin_unlock(&mm->page_table_lock); | 194 | spin_unlock(&mm->context.list_lock); |
195 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); | 195 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); |
196 | if (!page) | 196 | if (!page) |
197 | return NULL; | 197 | return NULL; |
@@ -202,7 +202,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
202 | clear_table_pgstes(table); | 202 | clear_table_pgstes(table); |
203 | else | 203 | else |
204 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); | 204 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); |
205 | spin_lock(&mm->page_table_lock); | 205 | spin_lock(&mm->context.list_lock); |
206 | list_add(&page->lru, &mm->context.pgtable_list); | 206 | list_add(&page->lru, &mm->context.pgtable_list); |
207 | } | 207 | } |
208 | table = (unsigned long *) page_to_phys(page); | 208 | table = (unsigned long *) page_to_phys(page); |
@@ -213,7 +213,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
213 | page->flags |= bits; | 213 | page->flags |= bits; |
214 | if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) | 214 | if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) |
215 | list_move_tail(&page->lru, &mm->context.pgtable_list); | 215 | list_move_tail(&page->lru, &mm->context.pgtable_list); |
216 | spin_unlock(&mm->page_table_lock); | 216 | spin_unlock(&mm->context.list_lock); |
217 | return table; | 217 | return table; |
218 | } | 218 | } |
219 | 219 | ||
@@ -225,7 +225,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) | |||
225 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; | 225 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; |
226 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); | 226 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); |
227 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | 227 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
228 | spin_lock(&mm->page_table_lock); | 228 | spin_lock(&mm->context.list_lock); |
229 | page->flags ^= bits; | 229 | page->flags ^= bits; |
230 | if (page->flags & FRAG_MASK) { | 230 | if (page->flags & FRAG_MASK) { |
231 | /* Page now has some free pgtable fragments. */ | 231 | /* Page now has some free pgtable fragments. */ |
@@ -234,7 +234,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) | |||
234 | } else | 234 | } else |
235 | /* All fragments of the 4K page have been freed. */ | 235 | /* All fragments of the 4K page have been freed. */ |
236 | list_del(&page->lru); | 236 | list_del(&page->lru); |
237 | spin_unlock(&mm->page_table_lock); | 237 | spin_unlock(&mm->context.list_lock); |
238 | if (page) { | 238 | if (page) { |
239 | pgtable_page_dtor(page); | 239 | pgtable_page_dtor(page); |
240 | __free_page(page); | 240 | __free_page(page); |
@@ -245,7 +245,7 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) | |||
245 | { | 245 | { |
246 | struct page *page; | 246 | struct page *page; |
247 | 247 | ||
248 | spin_lock(&mm->page_table_lock); | 248 | spin_lock(&mm->context.list_lock); |
249 | /* Free shadow region and segment tables. */ | 249 | /* Free shadow region and segment tables. */ |
250 | list_for_each_entry(page, &mm->context.crst_list, lru) | 250 | list_for_each_entry(page, &mm->context.crst_list, lru) |
251 | if (page->index) { | 251 | if (page->index) { |
@@ -255,7 +255,7 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) | |||
255 | /* "Free" second halves of page tables. */ | 255 | /* "Free" second halves of page tables. */ |
256 | list_for_each_entry(page, &mm->context.pgtable_list, lru) | 256 | list_for_each_entry(page, &mm->context.pgtable_list, lru) |
257 | page->flags &= ~SECOND_HALVES; | 257 | page->flags &= ~SECOND_HALVES; |
258 | spin_unlock(&mm->page_table_lock); | 258 | spin_unlock(&mm->context.list_lock); |
259 | mm->context.noexec = 0; | 259 | mm->context.noexec = 0; |
260 | update_mm(mm, tsk); | 260 | update_mm(mm, tsk); |
261 | } | 261 | } |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index e4868bfc672f..5f91a38d7592 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -331,6 +331,7 @@ void __init vmem_map_init(void) | |||
331 | unsigned long start, end; | 331 | unsigned long start, end; |
332 | int i; | 332 | int i; |
333 | 333 | ||
334 | spin_lock_init(&init_mm.context.list_lock); | ||
334 | INIT_LIST_HEAD(&init_mm.context.crst_list); | 335 | INIT_LIST_HEAD(&init_mm.context.crst_list); |
335 | INIT_LIST_HEAD(&init_mm.context.pgtable_list); | 336 | INIT_LIST_HEAD(&init_mm.context.pgtable_list); |
336 | init_mm.context.noexec = 0; | 337 | init_mm.context.noexec = 0; |
diff --git a/arch/s390/power/Makefile b/arch/s390/power/Makefile deleted file mode 100644 index 973bb45a8fec..000000000000 --- a/arch/s390/power/Makefile +++ /dev/null | |||
@@ -1,8 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for s390 PM support | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_HIBERNATION) += suspend.o | ||
6 | obj-$(CONFIG_HIBERNATION) += swsusp.o | ||
7 | obj-$(CONFIG_HIBERNATION) += swsusp_64.o | ||
8 | obj-$(CONFIG_HIBERNATION) += swsusp_asm64.o | ||
diff --git a/arch/s390/power/suspend.c b/arch/s390/power/suspend.c deleted file mode 100644 index b3351eceebbe..000000000000 --- a/arch/s390/power/suspend.c +++ /dev/null | |||
@@ -1,40 +0,0 @@ | |||
1 | /* | ||
2 | * Suspend support specific for s390. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * | ||
6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/mm.h> | ||
10 | #include <linux/suspend.h> | ||
11 | #include <linux/reboot.h> | ||
12 | #include <linux/pfn.h> | ||
13 | #include <asm/sections.h> | ||
14 | #include <asm/ipl.h> | ||
15 | |||
16 | /* | ||
17 | * References to section boundaries | ||
18 | */ | ||
19 | extern const void __nosave_begin, __nosave_end; | ||
20 | |||
21 | /* | ||
22 | * check if given pfn is in the 'nosave' or in the read only NSS section | ||
23 | */ | ||
24 | int pfn_is_nosave(unsigned long pfn) | ||
25 | { | ||
26 | unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; | ||
27 | unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) | ||
28 | >> PAGE_SHIFT; | ||
29 | unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1; | ||
30 | unsigned long stext_pfn = PFN_DOWN(__pa(&_stext)); | ||
31 | |||
32 | if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) | ||
33 | return 1; | ||
34 | if (pfn >= stext_pfn && pfn <= eshared_pfn) { | ||
35 | if (ipl_info.type == IPL_TYPE_NSS) | ||
36 | return 1; | ||
37 | } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0)) | ||
38 | return 1; | ||
39 | return 0; | ||
40 | } | ||
diff --git a/arch/s390/power/swsusp_64.c b/arch/s390/power/swsusp_64.c deleted file mode 100644 index 9516a517d72f..000000000000 --- a/arch/s390/power/swsusp_64.c +++ /dev/null | |||
@@ -1,17 +0,0 @@ | |||
1 | /* | ||
2 | * Support for suspend and resume on s390 | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * | ||
6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include <asm/system.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | |||
13 | void do_after_copyback(void) | ||
14 | { | ||
15 | mb(); | ||
16 | } | ||
17 | |||
diff --git a/arch/sh/include/asm/sh_eth.h b/arch/sh/include/asm/sh_eth.h index bb832584f3c1..acf99700deed 100644 --- a/arch/sh/include/asm/sh_eth.h +++ b/arch/sh/include/asm/sh_eth.h | |||
@@ -6,6 +6,9 @@ enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN}; | |||
6 | struct sh_eth_plat_data { | 6 | struct sh_eth_plat_data { |
7 | int phy; | 7 | int phy; |
8 | int edmac_endian; | 8 | int edmac_endian; |
9 | |||
10 | unsigned no_ether_link:1; | ||
11 | unsigned ether_link_active_low:1; | ||
9 | }; | 12 | }; |
10 | 13 | ||
11 | #endif | 14 | #endif |
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index b5afbec1db59..04a21883f327 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c | |||
@@ -640,5 +640,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0, | |||
640 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 640 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
641 | clear_thread_flag(TIF_NOTIFY_RESUME); | 641 | clear_thread_flag(TIF_NOTIFY_RESUME); |
642 | tracehook_notify_resume(regs); | 642 | tracehook_notify_resume(regs); |
643 | if (current->replacement_session_keyring) | ||
644 | key_replace_session_keyring(); | ||
643 | } | 645 | } |
644 | } | 646 | } |
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index 0663a0ee6021..9e5c9b1d7e98 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c | |||
@@ -772,5 +772,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info | |||
772 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 772 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
773 | clear_thread_flag(TIF_NOTIFY_RESUME); | 773 | clear_thread_flag(TIF_NOTIFY_RESUME); |
774 | tracehook_notify_resume(regs); | 774 | tracehook_notify_resume(regs); |
775 | if (current->replacement_session_keyring) | ||
776 | key_replace_session_keyring(); | ||
775 | } | 777 | } |
776 | } | 778 | } |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 3f8b6a92eabd..2bd5c287538a 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -25,6 +25,9 @@ config SPARC | |||
25 | select ARCH_WANT_OPTIONAL_GPIOLIB | 25 | select ARCH_WANT_OPTIONAL_GPIOLIB |
26 | select RTC_CLASS | 26 | select RTC_CLASS |
27 | select RTC_DRV_M48T59 | 27 | select RTC_DRV_M48T59 |
28 | select HAVE_PERF_COUNTERS | ||
29 | select HAVE_DMA_ATTRS | ||
30 | select HAVE_DMA_API_DEBUG | ||
28 | 31 | ||
29 | config SPARC32 | 32 | config SPARC32 |
30 | def_bool !64BIT | 33 | def_bool !64BIT |
@@ -44,6 +47,7 @@ config SPARC64 | |||
44 | select RTC_DRV_BQ4802 | 47 | select RTC_DRV_BQ4802 |
45 | select RTC_DRV_SUN4V | 48 | select RTC_DRV_SUN4V |
46 | select RTC_DRV_STARFIRE | 49 | select RTC_DRV_STARFIRE |
50 | select HAVE_PERF_COUNTERS | ||
47 | 51 | ||
48 | config ARCH_DEFCONFIG | 52 | config ARCH_DEFCONFIG |
49 | string | 53 | string |
@@ -437,6 +441,17 @@ config SERIAL_CONSOLE | |||
437 | 441 | ||
438 | If unsure, say N. | 442 | If unsure, say N. |
439 | 443 | ||
444 | config SPARC_LEON | ||
445 | bool "Sparc Leon processor family" | ||
446 | depends on SPARC32 | ||
447 | ---help--- | ||
448 | If you say Y here if you are running on a SPARC-LEON processor. | ||
449 | The LEON processor is a synthesizable VHDL model of the | ||
450 | SPARC-v8 standard. LEON is part of the GRLIB collection of | ||
451 | IP cores that are distributed under GPL. GRLIB can be downloaded | ||
452 | from www.gaisler.com. You can download a sparc-linux cross-compilation | ||
453 | toolchain at www.gaisler.com. | ||
454 | |||
440 | endmenu | 455 | endmenu |
441 | 456 | ||
442 | menu "Bus options (PCI etc.)" | 457 | menu "Bus options (PCI etc.)" |
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile index 2003ded054c2..467221dd5702 100644 --- a/arch/sparc/Makefile +++ b/arch/sparc/Makefile | |||
@@ -38,10 +38,6 @@ CPPFLAGS_vmlinux.lds += -m32 | |||
38 | # Actual linking is done with "make image". | 38 | # Actual linking is done with "make image". |
39 | LDFLAGS_vmlinux = -r | 39 | LDFLAGS_vmlinux = -r |
40 | 40 | ||
41 | # Default target | ||
42 | all: zImage | ||
43 | |||
44 | |||
45 | else | 41 | else |
46 | ##### | 42 | ##### |
47 | # sparc64 | 43 | # sparc64 |
@@ -91,6 +87,9 @@ endif | |||
91 | 87 | ||
92 | boot := arch/sparc/boot | 88 | boot := arch/sparc/boot |
93 | 89 | ||
90 | # Default target | ||
91 | all: zImage | ||
92 | |||
94 | image zImage tftpboot.img vmlinux.aout: vmlinux | 93 | image zImage tftpboot.img vmlinux.aout: vmlinux |
95 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | 94 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ |
96 | 95 | ||
@@ -109,8 +108,9 @@ define archhelp | |||
109 | endef | 108 | endef |
110 | else | 109 | else |
111 | define archhelp | 110 | define archhelp |
112 | echo '* vmlinux - Standard sparc64 kernel' | 111 | echo '* vmlinux - standard sparc64 kernel' |
113 | echo ' vmlinux.aout - a.out kernel for sparc64' | 112 | echo '* zImage - stripped and compressed sparc64 kernel ($(boot)/zImage)' |
113 | echo ' vmlinux.aout - a.out kernel for sparc64' | ||
114 | echo ' tftpboot.img - image prepared for tftp' | 114 | echo ' tftpboot.img - image prepared for tftp' |
115 | endef | 115 | endef |
116 | endif | 116 | endif |
diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile index 1ff0fd924756..97e3feb9ff1b 100644 --- a/arch/sparc/boot/Makefile +++ b/arch/sparc/boot/Makefile | |||
@@ -79,6 +79,9 @@ $(obj)/image: vmlinux FORCE | |||
79 | $(call if_changed,strip) | 79 | $(call if_changed,strip) |
80 | @echo ' kernel: $@ is ready' | 80 | @echo ' kernel: $@ is ready' |
81 | 81 | ||
82 | $(obj)/zImage: $(obj)/image | ||
83 | $(call if_changed,gzip) | ||
84 | |||
82 | $(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback_64 System.map $(ROOT_IMG) FORCE | 85 | $(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback_64 System.map $(ROOT_IMG) FORCE |
83 | $(call if_changed,elftoaout) | 86 | $(call if_changed,elftoaout) |
84 | $(call if_changed,piggy) | 87 | $(call if_changed,piggy) |
diff --git a/arch/sparc/include/asm/asi.h b/arch/sparc/include/asm/asi.h index 74703c5ef985..b2e3db63a64b 100644 --- a/arch/sparc/include/asm/asi.h +++ b/arch/sparc/include/asm/asi.h | |||
@@ -40,7 +40,11 @@ | |||
40 | #define ASI_M_UNA01 0x01 /* Same here... */ | 40 | #define ASI_M_UNA01 0x01 /* Same here... */ |
41 | #define ASI_M_MXCC 0x02 /* Access to TI VIKING MXCC registers */ | 41 | #define ASI_M_MXCC 0x02 /* Access to TI VIKING MXCC registers */ |
42 | #define ASI_M_FLUSH_PROBE 0x03 /* Reference MMU Flush/Probe; rw, ss */ | 42 | #define ASI_M_FLUSH_PROBE 0x03 /* Reference MMU Flush/Probe; rw, ss */ |
43 | #ifndef CONFIG_SPARC_LEON | ||
43 | #define ASI_M_MMUREGS 0x04 /* MMU Registers; rw, ss */ | 44 | #define ASI_M_MMUREGS 0x04 /* MMU Registers; rw, ss */ |
45 | #else | ||
46 | #define ASI_M_MMUREGS 0x19 | ||
47 | #endif /* CONFIG_SPARC_LEON */ | ||
44 | #define ASI_M_TLBDIAG 0x05 /* MMU TLB only Diagnostics */ | 48 | #define ASI_M_TLBDIAG 0x05 /* MMU TLB only Diagnostics */ |
45 | #define ASI_M_DIAGS 0x06 /* Reference MMU Diagnostics */ | 49 | #define ASI_M_DIAGS 0x06 /* Reference MMU Diagnostics */ |
46 | #define ASI_M_IODIAG 0x07 /* MMU I/O TLB only Diagnostics */ | 50 | #define ASI_M_IODIAG 0x07 /* MMU I/O TLB only Diagnostics */ |
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 204e4bf64438..5a8c308e2b5c 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/scatterlist.h> | 4 | #include <linux/scatterlist.h> |
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
6 | #include <linux/dma-debug.h> | ||
6 | 7 | ||
7 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | 8 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) |
8 | 9 | ||
@@ -13,142 +14,40 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask); | |||
13 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 14 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
14 | #define dma_is_consistent(d, h) (1) | 15 | #define dma_is_consistent(d, h) (1) |
15 | 16 | ||
16 | struct dma_ops { | 17 | extern struct dma_map_ops *dma_ops, pci32_dma_ops; |
17 | void *(*alloc_coherent)(struct device *dev, size_t size, | 18 | extern struct bus_type pci_bus_type; |
18 | dma_addr_t *dma_handle, gfp_t flag); | ||
19 | void (*free_coherent)(struct device *dev, size_t size, | ||
20 | void *cpu_addr, dma_addr_t dma_handle); | ||
21 | dma_addr_t (*map_page)(struct device *dev, struct page *page, | ||
22 | unsigned long offset, size_t size, | ||
23 | enum dma_data_direction direction); | ||
24 | void (*unmap_page)(struct device *dev, dma_addr_t dma_addr, | ||
25 | size_t size, | ||
26 | enum dma_data_direction direction); | ||
27 | int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, | ||
28 | enum dma_data_direction direction); | ||
29 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, | ||
30 | int nhwentries, | ||
31 | enum dma_data_direction direction); | ||
32 | void (*sync_single_for_cpu)(struct device *dev, | ||
33 | dma_addr_t dma_handle, size_t size, | ||
34 | enum dma_data_direction direction); | ||
35 | void (*sync_single_for_device)(struct device *dev, | ||
36 | dma_addr_t dma_handle, size_t size, | ||
37 | enum dma_data_direction direction); | ||
38 | void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, | ||
39 | int nelems, | ||
40 | enum dma_data_direction direction); | ||
41 | void (*sync_sg_for_device)(struct device *dev, | ||
42 | struct scatterlist *sg, int nents, | ||
43 | enum dma_data_direction dir); | ||
44 | }; | ||
45 | extern const struct dma_ops *dma_ops; | ||
46 | 19 | ||
47 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | 20 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
48 | dma_addr_t *dma_handle, gfp_t flag) | ||
49 | { | ||
50 | return dma_ops->alloc_coherent(dev, size, dma_handle, flag); | ||
51 | } | ||
52 | |||
53 | static inline void dma_free_coherent(struct device *dev, size_t size, | ||
54 | void *cpu_addr, dma_addr_t dma_handle) | ||
55 | { | ||
56 | dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); | ||
57 | } | ||
58 | |||
59 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | ||
60 | size_t size, | ||
61 | enum dma_data_direction direction) | ||
62 | { | ||
63 | return dma_ops->map_page(dev, virt_to_page(cpu_addr), | ||
64 | (unsigned long)cpu_addr & ~PAGE_MASK, size, | ||
65 | direction); | ||
66 | } | ||
67 | |||
68 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
69 | size_t size, | ||
70 | enum dma_data_direction direction) | ||
71 | { | ||
72 | dma_ops->unmap_page(dev, dma_addr, size, direction); | ||
73 | } | ||
74 | |||
75 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
76 | unsigned long offset, size_t size, | ||
77 | enum dma_data_direction direction) | ||
78 | { | ||
79 | return dma_ops->map_page(dev, page, offset, size, direction); | ||
80 | } | ||
81 | |||
82 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
83 | size_t size, | ||
84 | enum dma_data_direction direction) | ||
85 | { | ||
86 | dma_ops->unmap_page(dev, dma_address, size, direction); | ||
87 | } | ||
88 | |||
89 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
90 | int nents, enum dma_data_direction direction) | ||
91 | { | ||
92 | return dma_ops->map_sg(dev, sg, nents, direction); | ||
93 | } | ||
94 | |||
95 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
96 | int nents, enum dma_data_direction direction) | ||
97 | { | 21 | { |
98 | dma_ops->unmap_sg(dev, sg, nents, direction); | 22 | #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) |
99 | } | 23 | if (dev->bus == &pci_bus_type) |
100 | 24 | return &pci32_dma_ops; | |
101 | static inline void dma_sync_single_for_cpu(struct device *dev, | 25 | #endif |
102 | dma_addr_t dma_handle, size_t size, | 26 | return dma_ops; |
103 | enum dma_data_direction direction) | ||
104 | { | ||
105 | dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction); | ||
106 | } | 27 | } |
107 | 28 | ||
108 | static inline void dma_sync_single_for_device(struct device *dev, | 29 | #include <asm-generic/dma-mapping-common.h> |
109 | dma_addr_t dma_handle, | ||
110 | size_t size, | ||
111 | enum dma_data_direction direction) | ||
112 | { | ||
113 | if (dma_ops->sync_single_for_device) | ||
114 | dma_ops->sync_single_for_device(dev, dma_handle, size, | ||
115 | direction); | ||
116 | } | ||
117 | 30 | ||
118 | static inline void dma_sync_sg_for_cpu(struct device *dev, | 31 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
119 | struct scatterlist *sg, int nelems, | 32 | dma_addr_t *dma_handle, gfp_t flag) |
120 | enum dma_data_direction direction) | ||
121 | { | 33 | { |
122 | dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction); | 34 | struct dma_map_ops *ops = get_dma_ops(dev); |
123 | } | 35 | void *cpu_addr; |
124 | 36 | ||
125 | static inline void dma_sync_sg_for_device(struct device *dev, | 37 | cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag); |
126 | struct scatterlist *sg, int nelems, | 38 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); |
127 | enum dma_data_direction direction) | 39 | return cpu_addr; |
128 | { | ||
129 | if (dma_ops->sync_sg_for_device) | ||
130 | dma_ops->sync_sg_for_device(dev, sg, nelems, direction); | ||
131 | } | 40 | } |
132 | 41 | ||
133 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | 42 | static inline void dma_free_coherent(struct device *dev, size_t size, |
134 | dma_addr_t dma_handle, | 43 | void *cpu_addr, dma_addr_t dma_handle) |
135 | unsigned long offset, | ||
136 | size_t size, | ||
137 | enum dma_data_direction dir) | ||
138 | { | 44 | { |
139 | dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir); | 45 | struct dma_map_ops *ops = get_dma_ops(dev); |
140 | } | ||
141 | 46 | ||
142 | static inline void dma_sync_single_range_for_device(struct device *dev, | 47 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); |
143 | dma_addr_t dma_handle, | 48 | ops->free_coherent(dev, size, cpu_addr, dma_handle); |
144 | unsigned long offset, | ||
145 | size_t size, | ||
146 | enum dma_data_direction dir) | ||
147 | { | ||
148 | dma_sync_single_for_device(dev, dma_handle+offset, size, dir); | ||
149 | } | 49 | } |
150 | 50 | ||
151 | |||
152 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 51 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
153 | { | 52 | { |
154 | return (dma_addr == DMA_ERROR_CODE); | 53 | return (dma_addr == DMA_ERROR_CODE); |
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h index 1934f2cbf513..a0b443cb3c1f 100644 --- a/arch/sparc/include/asm/irq_64.h +++ b/arch/sparc/include/asm/irq_64.h | |||
@@ -89,8 +89,8 @@ static inline unsigned long get_softint(void) | |||
89 | return retval; | 89 | return retval; |
90 | } | 90 | } |
91 | 91 | ||
92 | void __trigger_all_cpu_backtrace(void); | 92 | void arch_trigger_all_cpu_backtrace(void); |
93 | #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() | 93 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace |
94 | 94 | ||
95 | extern void *hardirq_stack[NR_CPUS]; | 95 | extern void *hardirq_stack[NR_CPUS]; |
96 | extern void *softirq_stack[NR_CPUS]; | 96 | extern void *softirq_stack[NR_CPUS]; |
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h new file mode 100644 index 000000000000..28a42b73f64f --- /dev/null +++ b/arch/sparc/include/asm/leon.h | |||
@@ -0,0 +1,362 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 Konrad Eisele (eiselekd@web.de,konrad@gaisler.com) Gaisler Research | ||
3 | * Copyright (C) 2004 Stefan Holst (mail@s-holst.de) Uni-Stuttgart | ||
4 | * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB | ||
5 | * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB | ||
6 | */ | ||
7 | |||
8 | #ifndef LEON_H_INCLUDE | ||
9 | #define LEON_H_INCLUDE | ||
10 | |||
11 | #ifdef CONFIG_SPARC_LEON | ||
12 | |||
13 | #define ASI_LEON_NOCACHE 0x01 | ||
14 | |||
15 | #define ASI_LEON_DCACHE_MISS 0x1 | ||
16 | |||
17 | #define ASI_LEON_CACHEREGS 0x02 | ||
18 | #define ASI_LEON_IFLUSH 0x10 | ||
19 | #define ASI_LEON_DFLUSH 0x11 | ||
20 | |||
21 | #define ASI_LEON_MMUFLUSH 0x18 | ||
22 | #define ASI_LEON_MMUREGS 0x19 | ||
23 | #define ASI_LEON_BYPASS 0x1c | ||
24 | #define ASI_LEON_FLUSH_PAGE 0x10 | ||
25 | |||
26 | /* mmu register access, ASI_LEON_MMUREGS */ | ||
27 | #define LEON_CNR_CTRL 0x000 | ||
28 | #define LEON_CNR_CTXP 0x100 | ||
29 | #define LEON_CNR_CTX 0x200 | ||
30 | #define LEON_CNR_F 0x300 | ||
31 | #define LEON_CNR_FADDR 0x400 | ||
32 | |||
33 | #define LEON_CNR_CTX_NCTX 256 /*number of MMU ctx */ | ||
34 | |||
35 | #define LEON_CNR_CTRL_TLBDIS 0x80000000 | ||
36 | |||
37 | #define LEON_MMUTLB_ENT_MAX 64 | ||
38 | |||
39 | /* | ||
40 | * diagnostic access from mmutlb.vhd: | ||
41 | * 0: pte address | ||
42 | * 4: pte | ||
43 | * 8: additional flags | ||
44 | */ | ||
45 | #define LEON_DIAGF_LVL 0x3 | ||
46 | #define LEON_DIAGF_WR 0x8 | ||
47 | #define LEON_DIAGF_WR_SHIFT 3 | ||
48 | #define LEON_DIAGF_HIT 0x10 | ||
49 | #define LEON_DIAGF_HIT_SHIFT 4 | ||
50 | #define LEON_DIAGF_CTX 0x1fe0 | ||
51 | #define LEON_DIAGF_CTX_SHIFT 5 | ||
52 | #define LEON_DIAGF_VALID 0x2000 | ||
53 | #define LEON_DIAGF_VALID_SHIFT 13 | ||
54 | |||
55 | /* | ||
56 | * Interrupt Sources | ||
57 | * | ||
58 | * The interrupt source numbers directly map to the trap type and to | ||
59 | * the bits used in the Interrupt Clear, Interrupt Force, Interrupt Mask, | ||
60 | * and the Interrupt Pending Registers. | ||
61 | */ | ||
62 | #define LEON_INTERRUPT_CORRECTABLE_MEMORY_ERROR 1 | ||
63 | #define LEON_INTERRUPT_UART_1_RX_TX 2 | ||
64 | #define LEON_INTERRUPT_UART_0_RX_TX 3 | ||
65 | #define LEON_INTERRUPT_EXTERNAL_0 4 | ||
66 | #define LEON_INTERRUPT_EXTERNAL_1 5 | ||
67 | #define LEON_INTERRUPT_EXTERNAL_2 6 | ||
68 | #define LEON_INTERRUPT_EXTERNAL_3 7 | ||
69 | #define LEON_INTERRUPT_TIMER1 8 | ||
70 | #define LEON_INTERRUPT_TIMER2 9 | ||
71 | #define LEON_INTERRUPT_EMPTY1 10 | ||
72 | #define LEON_INTERRUPT_EMPTY2 11 | ||
73 | #define LEON_INTERRUPT_OPEN_ETH 12 | ||
74 | #define LEON_INTERRUPT_EMPTY4 13 | ||
75 | #define LEON_INTERRUPT_EMPTY5 14 | ||
76 | #define LEON_INTERRUPT_EMPTY6 15 | ||
77 | |||
78 | /* irq masks */ | ||
79 | #define LEON_HARD_INT(x) (1 << (x)) /* irq 0-15 */ | ||
80 | #define LEON_IRQMASK_R 0x0000fffe /* bit 15- 1 of lregs.irqmask */ | ||
81 | #define LEON_IRQPRIO_R 0xfffe0000 /* bit 31-17 of lregs.irqmask */ | ||
82 | |||
83 | /* leon uart register definitions */ | ||
84 | #define LEON_OFF_UDATA 0x0 | ||
85 | #define LEON_OFF_USTAT 0x4 | ||
86 | #define LEON_OFF_UCTRL 0x8 | ||
87 | #define LEON_OFF_USCAL 0xc | ||
88 | |||
89 | #define LEON_UCTRL_RE 0x01 | ||
90 | #define LEON_UCTRL_TE 0x02 | ||
91 | #define LEON_UCTRL_RI 0x04 | ||
92 | #define LEON_UCTRL_TI 0x08 | ||
93 | #define LEON_UCTRL_PS 0x10 | ||
94 | #define LEON_UCTRL_PE 0x20 | ||
95 | #define LEON_UCTRL_FL 0x40 | ||
96 | #define LEON_UCTRL_LB 0x80 | ||
97 | |||
98 | #define LEON_USTAT_DR 0x01 | ||
99 | #define LEON_USTAT_TS 0x02 | ||
100 | #define LEON_USTAT_TH 0x04 | ||
101 | #define LEON_USTAT_BR 0x08 | ||
102 | #define LEON_USTAT_OV 0x10 | ||
103 | #define LEON_USTAT_PE 0x20 | ||
104 | #define LEON_USTAT_FE 0x40 | ||
105 | |||
106 | #define LEON_MCFG2_SRAMDIS 0x00002000 | ||
107 | #define LEON_MCFG2_SDRAMEN 0x00004000 | ||
108 | #define LEON_MCFG2_SRAMBANKSZ 0x00001e00 /* [12-9] */ | ||
109 | #define LEON_MCFG2_SRAMBANKSZ_SHIFT 9 | ||
110 | #define LEON_MCFG2_SDRAMBANKSZ 0x03800000 /* [25-23] */ | ||
111 | #define LEON_MCFG2_SDRAMBANKSZ_SHIFT 23 | ||
112 | |||
113 | #define LEON_TCNT0_MASK 0x7fffff | ||
114 | |||
115 | #define LEON_USTAT_ERROR (LEON_USTAT_OV | LEON_USTAT_PE | LEON_USTAT_FE) | ||
116 | /* no break yet */ | ||
117 | |||
118 | #define ASI_LEON3_SYSCTRL 0x02 | ||
119 | #define ASI_LEON3_SYSCTRL_ICFG 0x08 | ||
120 | #define ASI_LEON3_SYSCTRL_DCFG 0x0c | ||
121 | #define ASI_LEON3_SYSCTRL_CFG_SNOOPING (1 << 27) | ||
122 | #define ASI_LEON3_SYSCTRL_CFG_SSIZE(c) (1 << ((c >> 20) & 0xf)) | ||
123 | |||
124 | #ifndef __ASSEMBLY__ | ||
125 | |||
126 | /* do a virtual address read without cache */ | ||
127 | static inline unsigned long leon_readnobuffer_reg(unsigned long paddr) | ||
128 | { | ||
129 | unsigned long retval; | ||
130 | __asm__ __volatile__("lda [%1] %2, %0\n\t" : | ||
131 | "=r"(retval) : "r"(paddr), "i"(ASI_LEON_NOCACHE)); | ||
132 | return retval; | ||
133 | } | ||
134 | |||
135 | /* do a physical address bypass write, i.e. for 0x80000000 */ | ||
136 | static inline void leon_store_reg(unsigned long paddr, unsigned long value) | ||
137 | { | ||
138 | __asm__ __volatile__("sta %0, [%1] %2\n\t" : : "r"(value), "r"(paddr), | ||
139 | "i"(ASI_LEON_BYPASS) : "memory"); | ||
140 | } | ||
141 | |||
142 | /* do a physical address bypass load, i.e. for 0x80000000 */ | ||
143 | static inline unsigned long leon_load_reg(unsigned long paddr) | ||
144 | { | ||
145 | unsigned long retval; | ||
146 | __asm__ __volatile__("lda [%1] %2, %0\n\t" : | ||
147 | "=r"(retval) : "r"(paddr), "i"(ASI_LEON_BYPASS)); | ||
148 | return retval; | ||
149 | } | ||
150 | |||
151 | extern inline void leon_srmmu_disabletlb(void) | ||
152 | { | ||
153 | unsigned int retval; | ||
154 | __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0), | ||
155 | "i"(ASI_LEON_MMUREGS)); | ||
156 | retval |= LEON_CNR_CTRL_TLBDIS; | ||
157 | __asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0), | ||
158 | "i"(ASI_LEON_MMUREGS) : "memory"); | ||
159 | } | ||
160 | |||
161 | extern inline void leon_srmmu_enabletlb(void) | ||
162 | { | ||
163 | unsigned int retval; | ||
164 | __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0), | ||
165 | "i"(ASI_LEON_MMUREGS)); | ||
166 | retval = retval & ~LEON_CNR_CTRL_TLBDIS; | ||
167 | __asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0), | ||
168 | "i"(ASI_LEON_MMUREGS) : "memory"); | ||
169 | } | ||
170 | |||
171 | /* macro access for leon_load_reg() and leon_store_reg() */ | ||
172 | #define LEON3_BYPASS_LOAD_PA(x) (leon_load_reg((unsigned long)(x))) | ||
173 | #define LEON3_BYPASS_STORE_PA(x, v) (leon_store_reg((unsigned long)(x), (unsigned long)(v))) | ||
174 | #define LEON3_BYPASS_ANDIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) & v) | ||
175 | #define LEON3_BYPASS_ORIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) | v) | ||
176 | #define LEON_BYPASS_LOAD_PA(x) leon_load_reg((unsigned long)(x)) | ||
177 | #define LEON_BYPASS_STORE_PA(x, v) leon_store_reg((unsigned long)(x), (unsigned long)(v)) | ||
178 | #define LEON_REGLOAD_PA(x) leon_load_reg((unsigned long)(x)+LEON_PREGS) | ||
179 | #define LEON_REGSTORE_PA(x, v) leon_store_reg((unsigned long)(x)+LEON_PREGS, (unsigned long)(v)) | ||
180 | #define LEON_REGSTORE_OR_PA(x, v) LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) | (unsigned long)(v)) | ||
181 | #define LEON_REGSTORE_AND_PA(x, v) LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) & (unsigned long)(v)) | ||
182 | |||
183 | /* macro access for leon_readnobuffer_reg() */ | ||
184 | #define LEON_BYPASSCACHE_LOAD_VA(x) leon_readnobuffer_reg((unsigned long)(x)) | ||
185 | |||
186 | extern void sparc_leon_eirq_register(int eirq); | ||
187 | extern void leon_init(void); | ||
188 | extern void leon_switch_mm(void); | ||
189 | extern void leon_init_IRQ(void); | ||
190 | |||
191 | extern unsigned long last_valid_pfn; | ||
192 | |||
193 | extern inline unsigned long sparc_leon3_get_dcachecfg(void) | ||
194 | { | ||
195 | unsigned int retval; | ||
196 | __asm__ __volatile__("lda [%1] %2, %0\n\t" : | ||
197 | "=r"(retval) : | ||
198 | "r"(ASI_LEON3_SYSCTRL_DCFG), | ||
199 | "i"(ASI_LEON3_SYSCTRL)); | ||
200 | return retval; | ||
201 | } | ||
202 | |||
203 | /* enable snooping */ | ||
204 | extern inline void sparc_leon3_enable_snooping(void) | ||
205 | { | ||
206 | __asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t" | ||
207 | "set 0x800000, %%l2\n\t" | ||
208 | "or %%l2, %%l1, %%l2\n\t" | ||
209 | "sta %%l2, [%%g0] 2\n\t" : : : "l1", "l2"); | ||
210 | }; | ||
211 | |||
212 | extern inline void sparc_leon3_disable_cache(void) | ||
213 | { | ||
214 | __asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t" | ||
215 | "set 0x00000f, %%l2\n\t" | ||
216 | "andn %%l2, %%l1, %%l2\n\t" | ||
217 | "sta %%l2, [%%g0] 2\n\t" : : : "l1", "l2"); | ||
218 | }; | ||
219 | |||
220 | #endif /*!__ASSEMBLY__*/ | ||
221 | |||
222 | #ifdef CONFIG_SMP | ||
223 | # define LEON3_IRQ_RESCHEDULE 13 | ||
224 | # define LEON3_IRQ_TICKER (leon_percpu_timer_dev[0].irq) | ||
225 | # define LEON3_IRQ_CROSS_CALL 15 | ||
226 | #endif | ||
227 | |||
228 | #if defined(PAGE_SIZE_LEON_8K) | ||
229 | #define LEON_PAGE_SIZE_LEON 1 | ||
230 | #elif defined(PAGE_SIZE_LEON_16K) | ||
231 | #define LEON_PAGE_SIZE_LEON 2) | ||
232 | #else | ||
233 | #define LEON_PAGE_SIZE_LEON 0 | ||
234 | #endif | ||
235 | |||
236 | #if LEON_PAGE_SIZE_LEON == 0 | ||
237 | /* [ 8, 6, 6 ] + 12 */ | ||
238 | #define LEON_PGD_SH 24 | ||
239 | #define LEON_PGD_M 0xff | ||
240 | #define LEON_PMD_SH 18 | ||
241 | #define LEON_PMD_SH_V (LEON_PGD_SH-2) | ||
242 | #define LEON_PMD_M 0x3f | ||
243 | #define LEON_PTE_SH 12 | ||
244 | #define LEON_PTE_M 0x3f | ||
245 | #elif LEON_PAGE_SIZE_LEON == 1 | ||
246 | /* [ 7, 6, 6 ] + 13 */ | ||
247 | #define LEON_PGD_SH 25 | ||
248 | #define LEON_PGD_M 0x7f | ||
249 | #define LEON_PMD_SH 19 | ||
250 | #define LEON_PMD_SH_V (LEON_PGD_SH-1) | ||
251 | #define LEON_PMD_M 0x3f | ||
252 | #define LEON_PTE_SH 13 | ||
253 | #define LEON_PTE_M 0x3f | ||
254 | #elif LEON_PAGE_SIZE_LEON == 2 | ||
255 | /* [ 6, 6, 6 ] + 14 */ | ||
256 | #define LEON_PGD_SH 26 | ||
257 | #define LEON_PGD_M 0x3f | ||
258 | #define LEON_PMD_SH 20 | ||
259 | #define LEON_PMD_SH_V (LEON_PGD_SH-0) | ||
260 | #define LEON_PMD_M 0x3f | ||
261 | #define LEON_PTE_SH 14 | ||
262 | #define LEON_PTE_M 0x3f | ||
263 | #elif LEON_PAGE_SIZE_LEON == 3 | ||
264 | /* [ 4, 7, 6 ] + 15 */ | ||
265 | #define LEON_PGD_SH 28 | ||
266 | #define LEON_PGD_M 0x0f | ||
267 | #define LEON_PMD_SH 21 | ||
268 | #define LEON_PMD_SH_V (LEON_PGD_SH-0) | ||
269 | #define LEON_PMD_M 0x7f | ||
270 | #define LEON_PTE_SH 15 | ||
271 | #define LEON_PTE_M 0x3f | ||
272 | #else | ||
273 | #error cannot determine LEON_PAGE_SIZE_LEON | ||
274 | #endif | ||
275 | |||
276 | #define PAGE_MIN_SHIFT (12) | ||
277 | #define PAGE_MIN_SIZE (1UL << PAGE_MIN_SHIFT) | ||
278 | |||
279 | #define LEON3_XCCR_SETS_MASK 0x07000000UL | ||
280 | #define LEON3_XCCR_SSIZE_MASK 0x00f00000UL | ||
281 | |||
282 | #define LEON2_CCR_DSETS_MASK 0x03000000UL | ||
283 | #define LEON2_CFG_SSIZE_MASK 0x00007000UL | ||
284 | |||
285 | #ifndef __ASSEMBLY__ | ||
286 | extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr); | ||
287 | extern void leon_flush_icache_all(void); | ||
288 | extern void leon_flush_dcache_all(void); | ||
289 | extern void leon_flush_cache_all(void); | ||
290 | extern void leon_flush_tlb_all(void); | ||
291 | extern int leon_flush_during_switch; | ||
292 | extern int leon_flush_needed(void); | ||
293 | |||
294 | struct vm_area_struct; | ||
295 | extern void leon_flush_icache_all(void); | ||
296 | extern void leon_flush_dcache_all(void); | ||
297 | extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page); | ||
298 | extern void leon_flush_cache_all(void); | ||
299 | extern void leon_flush_tlb_all(void); | ||
300 | extern int leon_flush_during_switch; | ||
301 | extern int leon_flush_needed(void); | ||
302 | extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page); | ||
303 | |||
304 | /* struct that hold LEON3 cache configuration registers */ | ||
305 | struct leon3_cacheregs { | ||
306 | unsigned long ccr; /* 0x00 - Cache Control Register */ | ||
307 | unsigned long iccr; /* 0x08 - Instruction Cache Configuration Register */ | ||
308 | unsigned long dccr; /* 0x0c - Data Cache Configuration Register */ | ||
309 | }; | ||
310 | |||
311 | /* struct that hold LEON2 cache configuration register | ||
312 | * & configuration register | ||
313 | */ | ||
314 | struct leon2_cacheregs { | ||
315 | unsigned long ccr, cfg; | ||
316 | }; | ||
317 | |||
318 | #ifdef __KERNEL__ | ||
319 | |||
320 | #include <linux/interrupt.h> | ||
321 | |||
322 | struct device_node; | ||
323 | extern int sparc_leon_eirq_get(int eirq, int cpu); | ||
324 | extern irqreturn_t sparc_leon_eirq_isr(int dummy, void *dev_id); | ||
325 | extern void sparc_leon_eirq_register(int eirq); | ||
326 | extern void leon_clear_clock_irq(void); | ||
327 | extern void leon_load_profile_irq(int cpu, unsigned int limit); | ||
328 | extern void leon_init_timers(irq_handler_t counter_fn); | ||
329 | extern void leon_clear_clock_irq(void); | ||
330 | extern void leon_load_profile_irq(int cpu, unsigned int limit); | ||
331 | extern void leon_trans_init(struct device_node *dp); | ||
332 | extern void leon_node_init(struct device_node *dp, struct device_node ***nextp); | ||
333 | extern void leon_init_IRQ(void); | ||
334 | extern void leon_init(void); | ||
335 | extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr); | ||
336 | extern void init_leon(void); | ||
337 | extern void poke_leonsparc(void); | ||
338 | extern void leon3_getCacheRegs(struct leon3_cacheregs *regs); | ||
339 | extern int leon_flush_needed(void); | ||
340 | extern void leon_switch_mm(void); | ||
341 | extern int srmmu_swprobe_trace; | ||
342 | |||
343 | #endif /* __KERNEL__ */ | ||
344 | |||
345 | #endif /* __ASSEMBLY__ */ | ||
346 | |||
347 | /* macros used in leon_mm.c */ | ||
348 | #define PFN(x) ((x) >> PAGE_SHIFT) | ||
349 | #define _pfn_valid(pfn) ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base))) | ||
350 | #define _SRMMU_PTE_PMASK_LEON 0xffffffff | ||
351 | |||
352 | #else /* defined(CONFIG_SPARC_LEON) */ | ||
353 | |||
354 | /* nop definitions for !LEON case */ | ||
355 | #define leon_init() do {} while (0) | ||
356 | #define leon_switch_mm() do {} while (0) | ||
357 | #define leon_init_IRQ() do {} while (0) | ||
358 | #define init_leon() do {} while (0) | ||
359 | |||
360 | #endif /* !defined(CONFIG_SPARC_LEON) */ | ||
361 | |||
362 | #endif | ||
diff --git a/arch/sparc/include/asm/leon_amba.h b/arch/sparc/include/asm/leon_amba.h new file mode 100644 index 000000000000..618e88821795 --- /dev/null +++ b/arch/sparc/include/asm/leon_amba.h | |||
@@ -0,0 +1,263 @@ | |||
1 | /* | ||
2 | *Copyright (C) 2004 Konrad Eisele (eiselekd@web.de,konrad@gaisler.com), Gaisler Research | ||
3 | *Copyright (C) 2004 Stefan Holst (mail@s-holst.de), Uni-Stuttgart | ||
4 | *Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com),Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB | ||
5 | */ | ||
6 | |||
7 | #ifndef LEON_AMBA_H_INCLUDE | ||
8 | #define LEON_AMBA_H_INCLUDE | ||
9 | |||
10 | #ifndef __ASSEMBLY__ | ||
11 | |||
12 | struct amba_prom_registers { | ||
13 | unsigned int phys_addr; /* The physical address of this register */ | ||
14 | unsigned int reg_size; /* How many bytes does this register take up? */ | ||
15 | }; | ||
16 | |||
17 | #endif | ||
18 | |||
19 | /* | ||
20 | * The following defines the bits in the LEON UART Status Registers. | ||
21 | */ | ||
22 | |||
23 | #define LEON_REG_UART_STATUS_DR 0x00000001 /* Data Ready */ | ||
24 | #define LEON_REG_UART_STATUS_TSE 0x00000002 /* TX Send Register Empty */ | ||
25 | #define LEON_REG_UART_STATUS_THE 0x00000004 /* TX Hold Register Empty */ | ||
26 | #define LEON_REG_UART_STATUS_BR 0x00000008 /* Break Error */ | ||
27 | #define LEON_REG_UART_STATUS_OE 0x00000010 /* RX Overrun Error */ | ||
28 | #define LEON_REG_UART_STATUS_PE 0x00000020 /* RX Parity Error */ | ||
29 | #define LEON_REG_UART_STATUS_FE 0x00000040 /* RX Framing Error */ | ||
30 | #define LEON_REG_UART_STATUS_ERR 0x00000078 /* Error Mask */ | ||
31 | |||
32 | /* | ||
33 | * The following defines the bits in the LEON UART Ctrl Registers. | ||
34 | */ | ||
35 | |||
36 | #define LEON_REG_UART_CTRL_RE 0x00000001 /* Receiver enable */ | ||
37 | #define LEON_REG_UART_CTRL_TE 0x00000002 /* Transmitter enable */ | ||
38 | #define LEON_REG_UART_CTRL_RI 0x00000004 /* Receiver interrupt enable */ | ||
39 | #define LEON_REG_UART_CTRL_TI 0x00000008 /* Transmitter irq */ | ||
40 | #define LEON_REG_UART_CTRL_PS 0x00000010 /* Parity select */ | ||
41 | #define LEON_REG_UART_CTRL_PE 0x00000020 /* Parity enable */ | ||
42 | #define LEON_REG_UART_CTRL_FL 0x00000040 /* Flow control enable */ | ||
43 | #define LEON_REG_UART_CTRL_LB 0x00000080 /* Loop Back enable */ | ||
44 | |||
45 | #define LEON3_GPTIMER_EN 1 | ||
46 | #define LEON3_GPTIMER_RL 2 | ||
47 | #define LEON3_GPTIMER_LD 4 | ||
48 | #define LEON3_GPTIMER_IRQEN 8 | ||
49 | #define LEON3_GPTIMER_SEPIRQ 8 | ||
50 | |||
51 | #define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */ | ||
52 | /* 0 = hold scalar and counter */ | ||
53 | #define LEON23_REG_TIMER_CONTROL_RL 0x00000002 /* 1 = reload at 0 */ | ||
54 | /* 0 = stop at 0 */ | ||
55 | #define LEON23_REG_TIMER_CONTROL_LD 0x00000004 /* 1 = load counter */ | ||
56 | /* 0 = no function */ | ||
57 | #define LEON23_REG_TIMER_CONTROL_IQ 0x00000008 /* 1 = irq enable */ | ||
58 | /* 0 = no function */ | ||
59 | |||
60 | /* | ||
61 | * The following defines the bits in the LEON PS/2 Status Registers. | ||
62 | */ | ||
63 | |||
64 | #define LEON_REG_PS2_STATUS_DR 0x00000001 /* Data Ready */ | ||
65 | #define LEON_REG_PS2_STATUS_PE 0x00000002 /* Parity error */ | ||
66 | #define LEON_REG_PS2_STATUS_FE 0x00000004 /* Framing error */ | ||
67 | #define LEON_REG_PS2_STATUS_KI 0x00000008 /* Keyboard inhibit */ | ||
68 | #define LEON_REG_PS2_STATUS_RF 0x00000010 /* RX buffer full */ | ||
69 | #define LEON_REG_PS2_STATUS_TF 0x00000020 /* TX buffer full */ | ||
70 | |||
71 | /* | ||
72 | * The following defines the bits in the LEON PS/2 Ctrl Registers. | ||
73 | */ | ||
74 | |||
75 | #define LEON_REG_PS2_CTRL_RE 0x00000001 /* Receiver enable */ | ||
76 | #define LEON_REG_PS2_CTRL_TE 0x00000002 /* Transmitter enable */ | ||
77 | #define LEON_REG_PS2_CTRL_RI 0x00000004 /* Keyboard receive irq */ | ||
78 | #define LEON_REG_PS2_CTRL_TI 0x00000008 /* Keyboard transmit irq */ | ||
79 | |||
80 | #define LEON3_IRQMPSTATUS_CPUNR 28 | ||
81 | #define LEON3_IRQMPSTATUS_BROADCAST 27 | ||
82 | |||
83 | #define GPTIMER_CONFIG_IRQNT(a) (((a) >> 3) & 0x1f) | ||
84 | #define GPTIMER_CONFIG_ISSEP(a) ((a) & (1 << 8)) | ||
85 | #define GPTIMER_CONFIG_NTIMERS(a) ((a) & (0x7)) | ||
86 | #define LEON3_GPTIMER_CTRL_PENDING 0x10 | ||
87 | #define LEON3_GPTIMER_CONFIG_NRTIMERS(c) ((c)->config & 0x7) | ||
88 | #define LEON3_GPTIMER_CTRL_ISPENDING(r) (((r)&LEON3_GPTIMER_CTRL_PENDING) ? 1 : 0) | ||
89 | |||
90 | #ifdef CONFIG_SPARC_LEON | ||
91 | |||
92 | #ifndef __ASSEMBLY__ | ||
93 | |||
94 | struct leon3_irqctrl_regs_map { | ||
95 | u32 ilevel; | ||
96 | u32 ipend; | ||
97 | u32 iforce; | ||
98 | u32 iclear; | ||
99 | u32 mpstatus; | ||
100 | u32 mpbroadcast; | ||
101 | u32 notused02; | ||
102 | u32 notused03; | ||
103 | u32 notused10; | ||
104 | u32 notused11; | ||
105 | u32 notused12; | ||
106 | u32 notused13; | ||
107 | u32 notused20; | ||
108 | u32 notused21; | ||
109 | u32 notused22; | ||
110 | u32 notused23; | ||
111 | u32 mask[16]; | ||
112 | u32 force[16]; | ||
113 | /* Extended IRQ registers */ | ||
114 | u32 intid[16]; /* 0xc0 */ | ||
115 | }; | ||
116 | |||
117 | struct leon3_apbuart_regs_map { | ||
118 | u32 data; | ||
119 | u32 status; | ||
120 | u32 ctrl; | ||
121 | u32 scaler; | ||
122 | }; | ||
123 | |||
124 | struct leon3_gptimerelem_regs_map { | ||
125 | u32 val; | ||
126 | u32 rld; | ||
127 | u32 ctrl; | ||
128 | u32 unused; | ||
129 | }; | ||
130 | |||
131 | struct leon3_gptimer_regs_map { | ||
132 | u32 scalar; | ||
133 | u32 scalar_reload; | ||
134 | u32 config; | ||
135 | u32 unused; | ||
136 | struct leon3_gptimerelem_regs_map e[8]; | ||
137 | }; | ||
138 | |||
139 | /* | ||
140 | * Types and structure used for AMBA Plug & Play bus scanning | ||
141 | */ | ||
142 | |||
143 | #define AMBA_MAXAPB_DEVS 64 | ||
144 | #define AMBA_MAXAPB_DEVS_PERBUS 16 | ||
145 | |||
146 | struct amba_device_table { | ||
147 | int devnr; /* number of devices on AHB or APB bus */ | ||
148 | unsigned int *addr[16]; /* addresses to the devices configuration tables */ | ||
149 | unsigned int allocbits[1]; /* 0=unallocated, 1=allocated driver */ | ||
150 | }; | ||
151 | |||
152 | struct amba_apbslv_device_table { | ||
153 | int devnr; /* number of devices on AHB or APB bus */ | ||
154 | unsigned int *addr[AMBA_MAXAPB_DEVS]; /* addresses to the devices configuration tables */ | ||
155 | unsigned int apbmst[AMBA_MAXAPB_DEVS]; /* apb master if a entry is a apb slave */ | ||
156 | unsigned int apbmstidx[AMBA_MAXAPB_DEVS]; /* apb master idx if a entry is a apb slave */ | ||
157 | unsigned int allocbits[4]; /* 0=unallocated, 1=allocated driver */ | ||
158 | }; | ||
159 | |||
160 | struct amba_confarea_type { | ||
161 | struct amba_confarea_type *next;/* next bus in chain */ | ||
162 | struct amba_device_table ahbmst; | ||
163 | struct amba_device_table ahbslv; | ||
164 | struct amba_apbslv_device_table apbslv; | ||
165 | unsigned int apbmst; | ||
166 | }; | ||
167 | |||
168 | /* collect apb slaves */ | ||
169 | struct amba_apb_device { | ||
170 | unsigned int start, irq, bus_id; | ||
171 | struct amba_confarea_type *bus; | ||
172 | }; | ||
173 | |||
174 | /* collect ahb slaves */ | ||
175 | struct amba_ahb_device { | ||
176 | unsigned int start[4], irq, bus_id; | ||
177 | struct amba_confarea_type *bus; | ||
178 | }; | ||
179 | |||
180 | struct device_node; | ||
181 | void _amba_init(struct device_node *dp, struct device_node ***nextp); | ||
182 | |||
183 | extern struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; | ||
184 | extern struct leon3_gptimer_regs_map *leon3_gptimer_regs; | ||
185 | extern struct amba_apb_device leon_percpu_timer_dev[16]; | ||
186 | extern int leondebug_irq_disable; | ||
187 | extern int leon_debug_irqout; | ||
188 | extern unsigned long leon3_gptimer_irq; | ||
189 | extern unsigned int sparc_leon_eirq; | ||
190 | |||
191 | #endif /* __ASSEMBLY__ */ | ||
192 | |||
193 | #define LEON3_IO_AREA 0xfff00000 | ||
194 | #define LEON3_CONF_AREA 0xff000 | ||
195 | #define LEON3_AHB_SLAVE_CONF_AREA (1 << 11) | ||
196 | |||
197 | #define LEON3_AHB_CONF_WORDS 8 | ||
198 | #define LEON3_APB_CONF_WORDS 2 | ||
199 | #define LEON3_AHB_MASTERS 16 | ||
200 | #define LEON3_AHB_SLAVES 16 | ||
201 | #define LEON3_APB_SLAVES 16 | ||
202 | #define LEON3_APBUARTS 8 | ||
203 | |||
204 | /* Vendor codes */ | ||
205 | #define VENDOR_GAISLER 1 | ||
206 | #define VENDOR_PENDER 2 | ||
207 | #define VENDOR_ESA 4 | ||
208 | #define VENDOR_OPENCORES 8 | ||
209 | |||
210 | /* Gaisler Research device id's */ | ||
211 | #define GAISLER_LEON3 0x003 | ||
212 | #define GAISLER_LEON3DSU 0x004 | ||
213 | #define GAISLER_ETHAHB 0x005 | ||
214 | #define GAISLER_APBMST 0x006 | ||
215 | #define GAISLER_AHBUART 0x007 | ||
216 | #define GAISLER_SRCTRL 0x008 | ||
217 | #define GAISLER_SDCTRL 0x009 | ||
218 | #define GAISLER_APBUART 0x00C | ||
219 | #define GAISLER_IRQMP 0x00D | ||
220 | #define GAISLER_AHBRAM 0x00E | ||
221 | #define GAISLER_GPTIMER 0x011 | ||
222 | #define GAISLER_PCITRG 0x012 | ||
223 | #define GAISLER_PCISBRG 0x013 | ||
224 | #define GAISLER_PCIFBRG 0x014 | ||
225 | #define GAISLER_PCITRACE 0x015 | ||
226 | #define GAISLER_PCIDMA 0x016 | ||
227 | #define GAISLER_AHBTRACE 0x017 | ||
228 | #define GAISLER_ETHDSU 0x018 | ||
229 | #define GAISLER_PIOPORT 0x01A | ||
230 | #define GAISLER_GRGPIO 0x01A | ||
231 | #define GAISLER_AHBJTAG 0x01c | ||
232 | #define GAISLER_ETHMAC 0x01D | ||
233 | #define GAISLER_AHB2AHB 0x020 | ||
234 | #define GAISLER_USBDC 0x021 | ||
235 | #define GAISLER_ATACTRL 0x024 | ||
236 | #define GAISLER_DDRSPA 0x025 | ||
237 | #define GAISLER_USBEHC 0x026 | ||
238 | #define GAISLER_USBUHC 0x027 | ||
239 | #define GAISLER_I2CMST 0x028 | ||
240 | #define GAISLER_SPICTRL 0x02D | ||
241 | #define GAISLER_DDR2SPA 0x02E | ||
242 | #define GAISLER_SPIMCTRL 0x045 | ||
243 | #define GAISLER_LEON4 0x048 | ||
244 | #define GAISLER_LEON4DSU 0x049 | ||
245 | #define GAISLER_AHBSTAT 0x052 | ||
246 | #define GAISLER_FTMCTRL 0x054 | ||
247 | #define GAISLER_KBD 0x060 | ||
248 | #define GAISLER_VGA 0x061 | ||
249 | #define GAISLER_SVGA 0x063 | ||
250 | #define GAISLER_GRSYSMON 0x066 | ||
251 | #define GAISLER_GRACECTRL 0x067 | ||
252 | |||
253 | #define GAISLER_L2TIME 0xffd /* internal device: leon2 timer */ | ||
254 | #define GAISLER_L2C 0xffe /* internal device: leon2compat */ | ||
255 | #define GAISLER_PLUGPLAY 0xfff /* internal device: plug & play configarea */ | ||
256 | |||
257 | #define amba_vendor(x) (((x) >> 24) & 0xff) | ||
258 | |||
259 | #define amba_device(x) (((x) >> 12) & 0xfff) | ||
260 | |||
261 | #endif /* !defined(CONFIG_SPARC_LEON) */ | ||
262 | |||
263 | #endif | ||
diff --git a/arch/sparc/include/asm/machines.h b/arch/sparc/include/asm/machines.h index c28c2f248794..cd9c099567e4 100644 --- a/arch/sparc/include/asm/machines.h +++ b/arch/sparc/include/asm/machines.h | |||
@@ -15,7 +15,7 @@ struct Sun_Machine_Models { | |||
15 | /* Current number of machines we know about that has an IDPROM | 15 | /* Current number of machines we know about that has an IDPROM |
16 | * machtype entry including one entry for the 0x80 OBP machines. | 16 | * machtype entry including one entry for the 0x80 OBP machines. |
17 | */ | 17 | */ |
18 | #define NUM_SUN_MACHINES 15 | 18 | #define NUM_SUN_MACHINES 16 |
19 | 19 | ||
20 | /* The machine type in the idprom area looks like this: | 20 | /* The machine type in the idprom area looks like this: |
21 | * | 21 | * |
@@ -30,6 +30,7 @@ struct Sun_Machine_Models { | |||
30 | 30 | ||
31 | #define SM_ARCH_MASK 0xf0 | 31 | #define SM_ARCH_MASK 0xf0 |
32 | #define SM_SUN4 0x20 | 32 | #define SM_SUN4 0x20 |
33 | #define M_LEON 0x30 | ||
33 | #define SM_SUN4C 0x50 | 34 | #define SM_SUN4C 0x50 |
34 | #define SM_SUN4M 0x70 | 35 | #define SM_SUN4M 0x70 |
35 | #define SM_SUN4M_OBP 0x80 | 36 | #define SM_SUN4M_OBP 0x80 |
@@ -41,6 +42,9 @@ struct Sun_Machine_Models { | |||
41 | #define SM_4_330 0x03 /* Sun 4/300 series */ | 42 | #define SM_4_330 0x03 /* Sun 4/300 series */ |
42 | #define SM_4_470 0x04 /* Sun 4/400 series */ | 43 | #define SM_4_470 0x04 /* Sun 4/400 series */ |
43 | 44 | ||
45 | /* Leon machines */ | ||
46 | #define M_LEON3_SOC 0x02 /* Leon3 SoC */ | ||
47 | |||
44 | /* Sun4c machines Full Name - PROM NAME */ | 48 | /* Sun4c machines Full Name - PROM NAME */ |
45 | #define SM_4C_SS1 0x01 /* Sun4c SparcStation 1 - Sun 4/60 */ | 49 | #define SM_4C_SS1 0x01 /* Sun4c SparcStation 1 - Sun 4/60 */ |
46 | #define SM_4C_IPC 0x02 /* Sun4c SparcStation IPC - Sun 4/40 */ | 50 | #define SM_4C_IPC 0x02 /* Sun4c SparcStation IPC - Sun 4/40 */ |
diff --git a/arch/sparc/include/asm/nmi.h b/arch/sparc/include/asm/nmi.h index fbd546dd4feb..72e6500e7ab0 100644 --- a/arch/sparc/include/asm/nmi.h +++ b/arch/sparc/include/asm/nmi.h | |||
@@ -5,6 +5,9 @@ extern int __init nmi_init(void); | |||
5 | extern void perfctr_irq(int irq, struct pt_regs *regs); | 5 | extern void perfctr_irq(int irq, struct pt_regs *regs); |
6 | extern void nmi_adjust_hz(unsigned int new_hz); | 6 | extern void nmi_adjust_hz(unsigned int new_hz); |
7 | 7 | ||
8 | extern int nmi_usable; | 8 | extern atomic_t nmi_active; |
9 | |||
10 | extern void start_nmi_watchdog(void *unused); | ||
11 | extern void stop_nmi_watchdog(void *unused); | ||
9 | 12 | ||
10 | #endif /* __NMI_H */ | 13 | #endif /* __NMI_H */ |
diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h index 6e14fd179335..d9c031f9910f 100644 --- a/arch/sparc/include/asm/pci.h +++ b/arch/sparc/include/asm/pci.h | |||
@@ -5,4 +5,7 @@ | |||
5 | #else | 5 | #else |
6 | #include <asm/pci_32.h> | 6 | #include <asm/pci_32.h> |
7 | #endif | 7 | #endif |
8 | |||
9 | #include <asm-generic/pci-dma-compat.h> | ||
10 | |||
8 | #endif | 11 | #endif |
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h index b41c4c198159..ac0e8369fd97 100644 --- a/arch/sparc/include/asm/pci_32.h +++ b/arch/sparc/include/asm/pci_32.h | |||
@@ -31,42 +31,8 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) | |||
31 | */ | 31 | */ |
32 | #define PCI_DMA_BUS_IS_PHYS (0) | 32 | #define PCI_DMA_BUS_IS_PHYS (0) |
33 | 33 | ||
34 | #include <asm/scatterlist.h> | ||
35 | |||
36 | struct pci_dev; | 34 | struct pci_dev; |
37 | 35 | ||
38 | /* Allocate and map kernel buffer using consistent mode DMA for a device. | ||
39 | * hwdev should be valid struct pci_dev pointer for PCI devices. | ||
40 | */ | ||
41 | extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle); | ||
42 | |||
43 | /* Free and unmap a consistent DMA buffer. | ||
44 | * cpu_addr is what was returned from pci_alloc_consistent, | ||
45 | * size must be the same as what as passed into pci_alloc_consistent, | ||
46 | * and likewise dma_addr must be the same as what *dma_addrp was set to. | ||
47 | * | ||
48 | * References to the memory and mappings assosciated with cpu_addr/dma_addr | ||
49 | * past this call are illegal. | ||
50 | */ | ||
51 | extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); | ||
52 | |||
53 | /* Map a single buffer of the indicated size for DMA in streaming mode. | ||
54 | * The 32-bit bus address to use is returned. | ||
55 | * | ||
56 | * Once the device is given the dma address, the device owns this memory | ||
57 | * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed. | ||
58 | */ | ||
59 | extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction); | ||
60 | |||
61 | /* Unmap a single streaming mode DMA translation. The dma_addr and size | ||
62 | * must match what was provided for in a previous pci_map_single call. All | ||
63 | * other usages are undefined. | ||
64 | * | ||
65 | * After this call, reads by the cpu to the buffer are guaranteed to see | ||
66 | * whatever the device wrote there. | ||
67 | */ | ||
68 | extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction); | ||
69 | |||
70 | /* pci_unmap_{single,page} is not a nop, thus... */ | 36 | /* pci_unmap_{single,page} is not a nop, thus... */ |
71 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | 37 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ |
72 | dma_addr_t ADDR_NAME; | 38 | dma_addr_t ADDR_NAME; |
@@ -81,69 +47,6 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t | |||
81 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | 47 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ |
82 | (((PTR)->LEN_NAME) = (VAL)) | 48 | (((PTR)->LEN_NAME) = (VAL)) |
83 | 49 | ||
84 | /* | ||
85 | * Same as above, only with pages instead of mapped addresses. | ||
86 | */ | ||
87 | extern dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, | ||
88 | unsigned long offset, size_t size, int direction); | ||
89 | extern void pci_unmap_page(struct pci_dev *hwdev, | ||
90 | dma_addr_t dma_address, size_t size, int direction); | ||
91 | |||
92 | /* Map a set of buffers described by scatterlist in streaming | ||
93 | * mode for DMA. This is the scather-gather version of the | ||
94 | * above pci_map_single interface. Here the scatter gather list | ||
95 | * elements are each tagged with the appropriate dma address | ||
96 | * and length. They are obtained via sg_dma_{address,length}(SG). | ||
97 | * | ||
98 | * NOTE: An implementation may be able to use a smaller number of | ||
99 | * DMA address/length pairs than there are SG table elements. | ||
100 | * (for example via virtual mapping capabilities) | ||
101 | * The routine returns the number of addr/length pairs actually | ||
102 | * used, at most nents. | ||
103 | * | ||
104 | * Device ownership issues as mentioned above for pci_map_single are | ||
105 | * the same here. | ||
106 | */ | ||
107 | extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction); | ||
108 | |||
109 | /* Unmap a set of streaming mode DMA translations. | ||
110 | * Again, cpu read rules concerning calls here are the same as for | ||
111 | * pci_unmap_single() above. | ||
112 | */ | ||
113 | extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction); | ||
114 | |||
115 | /* Make physical memory consistent for a single | ||
116 | * streaming mode DMA translation after a transfer. | ||
117 | * | ||
118 | * If you perform a pci_map_single() but wish to interrogate the | ||
119 | * buffer using the cpu, yet do not wish to teardown the PCI dma | ||
120 | * mapping, you must call this function before doing so. At the | ||
121 | * next point you give the PCI dma address back to the card, you | ||
122 | * must first perform a pci_dma_sync_for_device, and then the device | ||
123 | * again owns the buffer. | ||
124 | */ | ||
125 | extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction); | ||
126 | extern void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction); | ||
127 | |||
128 | /* Make physical memory consistent for a set of streaming | ||
129 | * mode DMA translations after a transfer. | ||
130 | * | ||
131 | * The same as pci_dma_sync_single_* but for a scatter-gather list, | ||
132 | * same rules and usage. | ||
133 | */ | ||
134 | extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction); | ||
135 | extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction); | ||
136 | |||
137 | /* Return whether the given PCI device DMA address mask can | ||
138 | * be supported properly. For example, if your device can | ||
139 | * only drive the low 24-bits during PCI bus mastering, then | ||
140 | * you would pass 0x00ffffff as the mask to this function. | ||
141 | */ | ||
142 | static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask) | ||
143 | { | ||
144 | return 1; | ||
145 | } | ||
146 | |||
147 | #ifdef CONFIG_PCI | 50 | #ifdef CONFIG_PCI |
148 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, | 51 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, |
149 | enum pci_dma_burst_strategy *strat, | 52 | enum pci_dma_burst_strategy *strat, |
@@ -154,14 +57,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, | |||
154 | } | 57 | } |
155 | #endif | 58 | #endif |
156 | 59 | ||
157 | #define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0) | ||
158 | |||
159 | static inline int pci_dma_mapping_error(struct pci_dev *pdev, | ||
160 | dma_addr_t dma_addr) | ||
161 | { | ||
162 | return (dma_addr == PCI_DMA_ERROR_CODE); | ||
163 | } | ||
164 | |||
165 | struct device_node; | 60 | struct device_node; |
166 | extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); | 61 | extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); |
167 | 62 | ||
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h index 7a1e3566e59c..5cc9f6aa5494 100644 --- a/arch/sparc/include/asm/pci_64.h +++ b/arch/sparc/include/asm/pci_64.h | |||
@@ -35,37 +35,6 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) | |||
35 | */ | 35 | */ |
36 | #define PCI_DMA_BUS_IS_PHYS (0) | 36 | #define PCI_DMA_BUS_IS_PHYS (0) |
37 | 37 | ||
38 | static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, | ||
39 | dma_addr_t *dma_handle) | ||
40 | { | ||
41 | return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC); | ||
42 | } | ||
43 | |||
44 | static inline void pci_free_consistent(struct pci_dev *pdev, size_t size, | ||
45 | void *vaddr, dma_addr_t dma_handle) | ||
46 | { | ||
47 | return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle); | ||
48 | } | ||
49 | |||
50 | static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, | ||
51 | size_t size, int direction) | ||
52 | { | ||
53 | return dma_map_single(&pdev->dev, ptr, size, | ||
54 | (enum dma_data_direction) direction); | ||
55 | } | ||
56 | |||
57 | static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, | ||
58 | size_t size, int direction) | ||
59 | { | ||
60 | dma_unmap_single(&pdev->dev, dma_addr, size, | ||
61 | (enum dma_data_direction) direction); | ||
62 | } | ||
63 | |||
64 | #define pci_map_page(dev, page, off, size, dir) \ | ||
65 | pci_map_single(dev, (page_address(page) + (off)), size, dir) | ||
66 | #define pci_unmap_page(dev,addr,sz,dir) \ | ||
67 | pci_unmap_single(dev,addr,sz,dir) | ||
68 | |||
69 | /* pci_unmap_{single,page} is not a nop, thus... */ | 38 | /* pci_unmap_{single,page} is not a nop, thus... */ |
70 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | 39 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ |
71 | dma_addr_t ADDR_NAME; | 40 | dma_addr_t ADDR_NAME; |
@@ -80,57 +49,6 @@ static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, | |||
80 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | 49 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ |
81 | (((PTR)->LEN_NAME) = (VAL)) | 50 | (((PTR)->LEN_NAME) = (VAL)) |
82 | 51 | ||
83 | static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, | ||
84 | int nents, int direction) | ||
85 | { | ||
86 | return dma_map_sg(&pdev->dev, sg, nents, | ||
87 | (enum dma_data_direction) direction); | ||
88 | } | ||
89 | |||
90 | static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, | ||
91 | int nents, int direction) | ||
92 | { | ||
93 | dma_unmap_sg(&pdev->dev, sg, nents, | ||
94 | (enum dma_data_direction) direction); | ||
95 | } | ||
96 | |||
97 | static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, | ||
98 | dma_addr_t dma_handle, | ||
99 | size_t size, int direction) | ||
100 | { | ||
101 | dma_sync_single_for_cpu(&pdev->dev, dma_handle, size, | ||
102 | (enum dma_data_direction) direction); | ||
103 | } | ||
104 | |||
105 | static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev, | ||
106 | dma_addr_t dma_handle, | ||
107 | size_t size, int direction) | ||
108 | { | ||
109 | /* No flushing needed to sync cpu writes to the device. */ | ||
110 | } | ||
111 | |||
112 | static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, | ||
113 | struct scatterlist *sg, | ||
114 | int nents, int direction) | ||
115 | { | ||
116 | dma_sync_sg_for_cpu(&pdev->dev, sg, nents, | ||
117 | (enum dma_data_direction) direction); | ||
118 | } | ||
119 | |||
120 | static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev, | ||
121 | struct scatterlist *sg, | ||
122 | int nelems, int direction) | ||
123 | { | ||
124 | /* No flushing needed to sync cpu writes to the device. */ | ||
125 | } | ||
126 | |||
127 | /* Return whether the given PCI device DMA address mask can | ||
128 | * be supported properly. For example, if your device can | ||
129 | * only drive the low 24-bits during PCI bus mastering, then | ||
130 | * you would pass 0x00ffffff as the mask to this function. | ||
131 | */ | ||
132 | extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask); | ||
133 | |||
134 | /* PCI IOMMU mapping bypass support. */ | 52 | /* PCI IOMMU mapping bypass support. */ |
135 | 53 | ||
136 | /* PCI 64-bit addressing works for all slots on all controller | 54 | /* PCI 64-bit addressing works for all slots on all controller |
@@ -140,12 +58,6 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask); | |||
140 | #define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) | 58 | #define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) |
141 | #define PCI64_ADDR_BASE 0xfffc000000000000UL | 59 | #define PCI64_ADDR_BASE 0xfffc000000000000UL |
142 | 60 | ||
143 | static inline int pci_dma_mapping_error(struct pci_dev *pdev, | ||
144 | dma_addr_t dma_addr) | ||
145 | { | ||
146 | return dma_mapping_error(&pdev->dev, dma_addr); | ||
147 | } | ||
148 | |||
149 | #ifdef CONFIG_PCI | 61 | #ifdef CONFIG_PCI |
150 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, | 62 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, |
151 | enum pci_dma_burst_strategy *strat, | 63 | enum pci_dma_burst_strategy *strat, |
diff --git a/arch/sparc/include/asm/perf_counter.h b/arch/sparc/include/asm/perf_counter.h new file mode 100644 index 000000000000..5d7a8ca0e491 --- /dev/null +++ b/arch/sparc/include/asm/perf_counter.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #ifndef __ASM_SPARC_PERF_COUNTER_H | ||
2 | #define __ASM_SPARC_PERF_COUNTER_H | ||
3 | |||
4 | extern void set_perf_counter_pending(void); | ||
5 | |||
6 | #define PERF_COUNTER_INDEX_OFFSET 0 | ||
7 | |||
8 | #ifdef CONFIG_PERF_COUNTERS | ||
9 | extern void init_hw_perf_counters(void); | ||
10 | #else | ||
11 | static inline void init_hw_perf_counters(void) { } | ||
12 | #endif | ||
13 | |||
14 | #endif | ||
diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h index 808555fc1d58..1407c07bdade 100644 --- a/arch/sparc/include/asm/pgtsrmmu.h +++ b/arch/sparc/include/asm/pgtsrmmu.h | |||
@@ -267,6 +267,7 @@ static inline void srmmu_flush_tlb_page(unsigned long page) | |||
267 | 267 | ||
268 | } | 268 | } |
269 | 269 | ||
270 | #ifndef CONFIG_SPARC_LEON | ||
270 | static inline unsigned long srmmu_hwprobe(unsigned long vaddr) | 271 | static inline unsigned long srmmu_hwprobe(unsigned long vaddr) |
271 | { | 272 | { |
272 | unsigned long retval; | 273 | unsigned long retval; |
@@ -278,6 +279,9 @@ static inline unsigned long srmmu_hwprobe(unsigned long vaddr) | |||
278 | 279 | ||
279 | return retval; | 280 | return retval; |
280 | } | 281 | } |
282 | #else | ||
283 | #define srmmu_hwprobe(addr) (srmmu_swprobe(addr, 0) & SRMMU_PTE_PMASK) | ||
284 | #endif | ||
281 | 285 | ||
282 | static inline int | 286 | static inline int |
283 | srmmu_get_pte (unsigned long addr) | 287 | srmmu_get_pte (unsigned long addr) |
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h index be8d7aaeb60d..82a190d7efc1 100644 --- a/arch/sparc/include/asm/prom.h +++ b/arch/sparc/include/asm/prom.h | |||
@@ -118,5 +118,8 @@ extern struct device_node *of_console_device; | |||
118 | extern char *of_console_path; | 118 | extern char *of_console_path; |
119 | extern char *of_console_options; | 119 | extern char *of_console_options; |
120 | 120 | ||
121 | extern void (*prom_build_more)(struct device_node *dp, struct device_node ***nextp); | ||
122 | extern char *build_full_name(struct device_node *dp); | ||
123 | |||
121 | #endif /* __KERNEL__ */ | 124 | #endif /* __KERNEL__ */ |
122 | #endif /* _SPARC_PROM_H */ | 125 | #endif /* _SPARC_PROM_H */ |
diff --git a/arch/sparc/include/asm/socket.h b/arch/sparc/include/asm/socket.h index 982a12f959f4..3a5ae3d12088 100644 --- a/arch/sparc/include/asm/socket.h +++ b/arch/sparc/include/asm/socket.h | |||
@@ -29,6 +29,9 @@ | |||
29 | #define SO_RCVBUFFORCE 0x100b | 29 | #define SO_RCVBUFFORCE 0x100b |
30 | #define SO_ERROR 0x1007 | 30 | #define SO_ERROR 0x1007 |
31 | #define SO_TYPE 0x1008 | 31 | #define SO_TYPE 0x1008 |
32 | #define SO_PROTOCOL 0x1028 | ||
33 | #define SO_DOMAIN 0x1029 | ||
34 | |||
32 | 35 | ||
33 | /* Linux specific, keep the same. */ | 36 | /* Linux specific, keep the same. */ |
34 | #define SO_NO_CHECK 0x000b | 37 | #define SO_NO_CHECK 0x000b |
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 46f91ab66a50..857630cff636 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h | |||
@@ -76,7 +76,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
76 | * | 76 | * |
77 | * Unfortunately this scheme limits us to ~16,000,000 cpus. | 77 | * Unfortunately this scheme limits us to ~16,000,000 cpus. |
78 | */ | 78 | */ |
79 | static inline void __read_lock(raw_rwlock_t *rw) | 79 | static inline void arch_read_lock(raw_rwlock_t *rw) |
80 | { | 80 | { |
81 | register raw_rwlock_t *lp asm("g1"); | 81 | register raw_rwlock_t *lp asm("g1"); |
82 | lp = rw; | 82 | lp = rw; |
@@ -92,11 +92,11 @@ static inline void __read_lock(raw_rwlock_t *rw) | |||
92 | #define __raw_read_lock(lock) \ | 92 | #define __raw_read_lock(lock) \ |
93 | do { unsigned long flags; \ | 93 | do { unsigned long flags; \ |
94 | local_irq_save(flags); \ | 94 | local_irq_save(flags); \ |
95 | __read_lock(lock); \ | 95 | arch_read_lock(lock); \ |
96 | local_irq_restore(flags); \ | 96 | local_irq_restore(flags); \ |
97 | } while(0) | 97 | } while(0) |
98 | 98 | ||
99 | static inline void __read_unlock(raw_rwlock_t *rw) | 99 | static inline void arch_read_unlock(raw_rwlock_t *rw) |
100 | { | 100 | { |
101 | register raw_rwlock_t *lp asm("g1"); | 101 | register raw_rwlock_t *lp asm("g1"); |
102 | lp = rw; | 102 | lp = rw; |
@@ -112,7 +112,7 @@ static inline void __read_unlock(raw_rwlock_t *rw) | |||
112 | #define __raw_read_unlock(lock) \ | 112 | #define __raw_read_unlock(lock) \ |
113 | do { unsigned long flags; \ | 113 | do { unsigned long flags; \ |
114 | local_irq_save(flags); \ | 114 | local_irq_save(flags); \ |
115 | __read_unlock(lock); \ | 115 | arch_read_unlock(lock); \ |
116 | local_irq_restore(flags); \ | 116 | local_irq_restore(flags); \ |
117 | } while(0) | 117 | } while(0) |
118 | 118 | ||
@@ -150,7 +150,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
150 | return (val == 0); | 150 | return (val == 0); |
151 | } | 151 | } |
152 | 152 | ||
153 | static inline int __read_trylock(raw_rwlock_t *rw) | 153 | static inline int arch_read_trylock(raw_rwlock_t *rw) |
154 | { | 154 | { |
155 | register raw_rwlock_t *lp asm("g1"); | 155 | register raw_rwlock_t *lp asm("g1"); |
156 | register int res asm("o0"); | 156 | register int res asm("o0"); |
@@ -169,7 +169,7 @@ static inline int __read_trylock(raw_rwlock_t *rw) | |||
169 | ({ unsigned long flags; \ | 169 | ({ unsigned long flags; \ |
170 | int res; \ | 170 | int res; \ |
171 | local_irq_save(flags); \ | 171 | local_irq_save(flags); \ |
172 | res = __read_trylock(lock); \ | 172 | res = arch_read_trylock(lock); \ |
173 | local_irq_restore(flags); \ | 173 | local_irq_restore(flags); \ |
174 | res; \ | 174 | res; \ |
175 | }) | 175 | }) |
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index f6b2b92ad8d2..43e514783582 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h | |||
@@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla | |||
92 | 92 | ||
93 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ | 93 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ |
94 | 94 | ||
95 | static void inline __read_lock(raw_rwlock_t *lock) | 95 | static void inline arch_read_lock(raw_rwlock_t *lock) |
96 | { | 96 | { |
97 | unsigned long tmp1, tmp2; | 97 | unsigned long tmp1, tmp2; |
98 | 98 | ||
@@ -115,7 +115,7 @@ static void inline __read_lock(raw_rwlock_t *lock) | |||
115 | : "memory"); | 115 | : "memory"); |
116 | } | 116 | } |
117 | 117 | ||
118 | static int inline __read_trylock(raw_rwlock_t *lock) | 118 | static int inline arch_read_trylock(raw_rwlock_t *lock) |
119 | { | 119 | { |
120 | int tmp1, tmp2; | 120 | int tmp1, tmp2; |
121 | 121 | ||
@@ -136,7 +136,7 @@ static int inline __read_trylock(raw_rwlock_t *lock) | |||
136 | return tmp1; | 136 | return tmp1; |
137 | } | 137 | } |
138 | 138 | ||
139 | static void inline __read_unlock(raw_rwlock_t *lock) | 139 | static void inline arch_read_unlock(raw_rwlock_t *lock) |
140 | { | 140 | { |
141 | unsigned long tmp1, tmp2; | 141 | unsigned long tmp1, tmp2; |
142 | 142 | ||
@@ -152,7 +152,7 @@ static void inline __read_unlock(raw_rwlock_t *lock) | |||
152 | : "memory"); | 152 | : "memory"); |
153 | } | 153 | } |
154 | 154 | ||
155 | static void inline __write_lock(raw_rwlock_t *lock) | 155 | static void inline arch_write_lock(raw_rwlock_t *lock) |
156 | { | 156 | { |
157 | unsigned long mask, tmp1, tmp2; | 157 | unsigned long mask, tmp1, tmp2; |
158 | 158 | ||
@@ -177,7 +177,7 @@ static void inline __write_lock(raw_rwlock_t *lock) | |||
177 | : "memory"); | 177 | : "memory"); |
178 | } | 178 | } |
179 | 179 | ||
180 | static void inline __write_unlock(raw_rwlock_t *lock) | 180 | static void inline arch_write_unlock(raw_rwlock_t *lock) |
181 | { | 181 | { |
182 | __asm__ __volatile__( | 182 | __asm__ __volatile__( |
183 | " stw %%g0, [%0]" | 183 | " stw %%g0, [%0]" |
@@ -186,7 +186,7 @@ static void inline __write_unlock(raw_rwlock_t *lock) | |||
186 | : "memory"); | 186 | : "memory"); |
187 | } | 187 | } |
188 | 188 | ||
189 | static int inline __write_trylock(raw_rwlock_t *lock) | 189 | static int inline arch_write_trylock(raw_rwlock_t *lock) |
190 | { | 190 | { |
191 | unsigned long mask, tmp1, tmp2, result; | 191 | unsigned long mask, tmp1, tmp2, result; |
192 | 192 | ||
@@ -210,14 +210,14 @@ static int inline __write_trylock(raw_rwlock_t *lock) | |||
210 | return result; | 210 | return result; |
211 | } | 211 | } |
212 | 212 | ||
213 | #define __raw_read_lock(p) __read_lock(p) | 213 | #define __raw_read_lock(p) arch_read_lock(p) |
214 | #define __raw_read_lock_flags(p, f) __read_lock(p) | 214 | #define __raw_read_lock_flags(p, f) arch_read_lock(p) |
215 | #define __raw_read_trylock(p) __read_trylock(p) | 215 | #define __raw_read_trylock(p) arch_read_trylock(p) |
216 | #define __raw_read_unlock(p) __read_unlock(p) | 216 | #define __raw_read_unlock(p) arch_read_unlock(p) |
217 | #define __raw_write_lock(p) __write_lock(p) | 217 | #define __raw_write_lock(p) arch_write_lock(p) |
218 | #define __raw_write_lock_flags(p, f) __write_lock(p) | 218 | #define __raw_write_lock_flags(p, f) arch_write_lock(p) |
219 | #define __raw_write_unlock(p) __write_unlock(p) | 219 | #define __raw_write_unlock(p) arch_write_unlock(p) |
220 | #define __raw_write_trylock(p) __write_trylock(p) | 220 | #define __raw_write_trylock(p) arch_write_trylock(p) |
221 | 221 | ||
222 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) | 222 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) |
223 | #define __raw_write_can_lock(rw) (!(rw)->lock) | 223 | #define __raw_write_can_lock(rw) (!(rw)->lock) |
diff --git a/arch/sparc/include/asm/system_32.h b/arch/sparc/include/asm/system_32.h index 751c8c17f5a0..890036b3689a 100644 --- a/arch/sparc/include/asm/system_32.h +++ b/arch/sparc/include/asm/system_32.h | |||
@@ -32,6 +32,7 @@ enum sparc_cpu { | |||
32 | sun4u = 0x05, /* V8 ploos ploos */ | 32 | sun4u = 0x05, /* V8 ploos ploos */ |
33 | sun_unknown = 0x06, | 33 | sun_unknown = 0x06, |
34 | ap1000 = 0x07, /* almost a sun4m */ | 34 | ap1000 = 0x07, /* almost a sun4m */ |
35 | sparc_leon = 0x08, /* Leon SoC */ | ||
35 | }; | 36 | }; |
36 | 37 | ||
37 | /* Really, userland should not be looking at any of this... */ | 38 | /* Really, userland should not be looking at any of this... */ |
diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h index 6c077816ab28..25e848f0cad7 100644 --- a/arch/sparc/include/asm/system_64.h +++ b/arch/sparc/include/asm/system_64.h | |||
@@ -29,6 +29,10 @@ enum sparc_cpu { | |||
29 | /* This cannot ever be a sun4c :) That's just history. */ | 29 | /* This cannot ever be a sun4c :) That's just history. */ |
30 | #define ARCH_SUN4C 0 | 30 | #define ARCH_SUN4C 0 |
31 | 31 | ||
32 | extern const char *sparc_cpu_type; | ||
33 | extern const char *sparc_fpu_type; | ||
34 | extern const char *sparc_pmu_type; | ||
35 | |||
32 | extern char reboot_command[]; | 36 | extern char reboot_command[]; |
33 | 37 | ||
34 | /* These are here in an effort to more fully work around Spitfire Errata | 38 | /* These are here in an effort to more fully work around Spitfire Errata |
diff --git a/arch/sparc/include/asm/types.h b/arch/sparc/include/asm/types.h index de671d73baed..09c79a9c8516 100644 --- a/arch/sparc/include/asm/types.h +++ b/arch/sparc/include/asm/types.h | |||
@@ -8,9 +8,8 @@ | |||
8 | * need to be careful to avoid a name clashes. | 8 | * need to be careful to avoid a name clashes. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #if defined(__sparc__) && defined(__arch64__) | 11 | #if defined(__sparc__) |
12 | 12 | ||
13 | /*** SPARC 64 bit ***/ | ||
14 | #include <asm-generic/int-ll64.h> | 13 | #include <asm-generic/int-ll64.h> |
15 | 14 | ||
16 | #ifndef __ASSEMBLY__ | 15 | #ifndef __ASSEMBLY__ |
@@ -26,33 +25,21 @@ typedef unsigned short umode_t; | |||
26 | /* Dma addresses come in generic and 64-bit flavours. */ | 25 | /* Dma addresses come in generic and 64-bit flavours. */ |
27 | 26 | ||
28 | typedef u32 dma_addr_t; | 27 | typedef u32 dma_addr_t; |
29 | typedef u64 dma64_addr_t; | ||
30 | 28 | ||
31 | #endif /* __ASSEMBLY__ */ | 29 | #if defined(__arch64__) |
32 | 30 | ||
33 | #endif /* __KERNEL__ */ | 31 | /*** SPARC 64 bit ***/ |
32 | typedef u64 dma64_addr_t; | ||
34 | #else | 33 | #else |
35 | |||
36 | /*** SPARC 32 bit ***/ | 34 | /*** SPARC 32 bit ***/ |
37 | #include <asm-generic/int-ll64.h> | ||
38 | |||
39 | #ifndef __ASSEMBLY__ | ||
40 | |||
41 | typedef unsigned short umode_t; | ||
42 | |||
43 | #endif /* __ASSEMBLY__ */ | ||
44 | |||
45 | #ifdef __KERNEL__ | ||
46 | |||
47 | #ifndef __ASSEMBLY__ | ||
48 | |||
49 | typedef u32 dma_addr_t; | ||
50 | typedef u32 dma64_addr_t; | 35 | typedef u32 dma64_addr_t; |
51 | 36 | ||
37 | #endif /* defined(__arch64__) */ | ||
38 | |||
52 | #endif /* __ASSEMBLY__ */ | 39 | #endif /* __ASSEMBLY__ */ |
53 | 40 | ||
54 | #endif /* __KERNEL__ */ | 41 | #endif /* __KERNEL__ */ |
55 | 42 | ||
56 | #endif /* defined(__sparc__) && defined(__arch64__) */ | 43 | #endif /* defined(__sparc__) */ |
57 | 44 | ||
58 | #endif /* defined(_SPARC_TYPES_H) */ | 45 | #endif /* defined(_SPARC_TYPES_H) */ |
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index a38c03238918..9ea271e19c70 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h | |||
@@ -7,8 +7,8 @@ | |||
7 | 7 | ||
8 | #ifdef __KERNEL__ | 8 | #ifdef __KERNEL__ |
9 | #include <linux/compiler.h> | 9 | #include <linux/compiler.h> |
10 | #include <linux/sched.h> | ||
11 | #include <linux/string.h> | 10 | #include <linux/string.h> |
11 | #include <linux/thread_info.h> | ||
12 | #include <asm/asi.h> | 12 | #include <asm/asi.h> |
13 | #include <asm/system.h> | 13 | #include <asm/system.h> |
14 | #include <asm/spitfire.h> | 14 | #include <asm/spitfire.h> |
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h index b2c406de7d4f..706df669f3b8 100644 --- a/arch/sparc/include/asm/unistd.h +++ b/arch/sparc/include/asm/unistd.h | |||
@@ -395,8 +395,9 @@ | |||
395 | #define __NR_preadv 324 | 395 | #define __NR_preadv 324 |
396 | #define __NR_pwritev 325 | 396 | #define __NR_pwritev 325 |
397 | #define __NR_rt_tgsigqueueinfo 326 | 397 | #define __NR_rt_tgsigqueueinfo 326 |
398 | #define __NR_perf_counter_open 327 | ||
398 | 399 | ||
399 | #define NR_SYSCALLS 327 | 400 | #define NR_SYSCALLS 328 |
400 | 401 | ||
401 | #ifdef __32bit_syscall_numbers__ | 402 | #ifdef __32bit_syscall_numbers__ |
402 | /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, | 403 | /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, |
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 475ce4696acd..247cc620cee5 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile | |||
@@ -41,6 +41,8 @@ obj-y += of_device_common.o | |||
41 | obj-y += of_device_$(BITS).o | 41 | obj-y += of_device_$(BITS).o |
42 | obj-$(CONFIG_SPARC64) += prom_irqtrans.o | 42 | obj-$(CONFIG_SPARC64) += prom_irqtrans.o |
43 | 43 | ||
44 | obj-$(CONFIG_SPARC_LEON)+= leon_kernel.o | ||
45 | |||
44 | obj-$(CONFIG_SPARC64) += reboot.o | 46 | obj-$(CONFIG_SPARC64) += reboot.o |
45 | obj-$(CONFIG_SPARC64) += sysfs.o | 47 | obj-$(CONFIG_SPARC64) += sysfs.o |
46 | obj-$(CONFIG_SPARC64) += iommu.o | 48 | obj-$(CONFIG_SPARC64) += iommu.o |
@@ -61,7 +63,7 @@ obj-$(CONFIG_SPARC64_SMP) += cpumap.o | |||
61 | obj-$(CONFIG_SPARC32) += devres.o | 63 | obj-$(CONFIG_SPARC32) += devres.o |
62 | devres-y := ../../../kernel/irq/devres.o | 64 | devres-y := ../../../kernel/irq/devres.o |
63 | 65 | ||
64 | obj-$(CONFIG_SPARC32) += dma.o | 66 | obj-y += dma.o |
65 | 67 | ||
66 | obj-$(CONFIG_SPARC32_PCI) += pcic.o | 68 | obj-$(CONFIG_SPARC32_PCI) += pcic.o |
67 | 69 | ||
@@ -101,3 +103,6 @@ obj-$(CONFIG_SUN_LDOMS) += ldc.o vio.o viohs.o ds.o | |||
101 | obj-$(CONFIG_AUDIT) += audit.o | 103 | obj-$(CONFIG_AUDIT) += audit.o |
102 | audit--$(CONFIG_AUDIT) := compat_audit.o | 104 | audit--$(CONFIG_AUDIT) := compat_audit.o |
103 | obj-$(CONFIG_COMPAT) += $(audit--y) | 105 | obj-$(CONFIG_COMPAT) += $(audit--y) |
106 | |||
107 | pc--$(CONFIG_PERF_COUNTERS) := perf_counter.o | ||
108 | obj-$(CONFIG_SPARC64) += $(pc--y) | ||
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c index d85c3dc4953a..1446df90ef85 100644 --- a/arch/sparc/kernel/cpu.c +++ b/arch/sparc/kernel/cpu.c | |||
@@ -312,7 +312,12 @@ void __cpuinit cpu_probe(void) | |||
312 | 312 | ||
313 | psr = get_psr(); | 313 | psr = get_psr(); |
314 | put_psr(psr | PSR_EF); | 314 | put_psr(psr | PSR_EF); |
315 | #ifdef CONFIG_SPARC_LEON | ||
316 | fpu_vers = 7; | ||
317 | #else | ||
315 | fpu_vers = ((get_fsr() >> 17) & 0x7); | 318 | fpu_vers = ((get_fsr() >> 17) & 0x7); |
319 | #endif | ||
320 | |||
316 | put_psr(psr); | 321 | put_psr(psr); |
317 | 322 | ||
318 | set_cpu_and_fpu(psr_impl, psr_vers, fpu_vers); | 323 | set_cpu_and_fpu(psr_impl, psr_vers, fpu_vers); |
diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c index 524c32f97c55..e1ba8ee21b9a 100644 --- a/arch/sparc/kernel/dma.c +++ b/arch/sparc/kernel/dma.c | |||
@@ -1,178 +1,13 @@ | |||
1 | /* dma.c: PCI and SBUS DMA accessors for 32-bit sparc. | ||
2 | * | ||
3 | * Copyright (C) 2008 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | 1 | #include <linux/kernel.h> |
7 | #include <linux/module.h> | 2 | #include <linux/module.h> |
8 | #include <linux/dma-mapping.h> | 3 | #include <linux/dma-mapping.h> |
9 | #include <linux/scatterlist.h> | 4 | #include <linux/dma-debug.h> |
10 | #include <linux/mm.h> | ||
11 | |||
12 | #ifdef CONFIG_PCI | ||
13 | #include <linux/pci.h> | ||
14 | #endif | ||
15 | 5 | ||
16 | #include "dma.h" | 6 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 15) |
17 | 7 | ||
18 | int dma_supported(struct device *dev, u64 mask) | 8 | static int __init dma_init(void) |
19 | { | 9 | { |
20 | #ifdef CONFIG_PCI | 10 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
21 | if (dev->bus == &pci_bus_type) | ||
22 | return pci_dma_supported(to_pci_dev(dev), mask); | ||
23 | #endif | ||
24 | return 0; | 11 | return 0; |
25 | } | 12 | } |
26 | EXPORT_SYMBOL(dma_supported); | 13 | fs_initcall(dma_init); |
27 | |||
28 | int dma_set_mask(struct device *dev, u64 dma_mask) | ||
29 | { | ||
30 | #ifdef CONFIG_PCI | ||
31 | if (dev->bus == &pci_bus_type) | ||
32 | return pci_set_dma_mask(to_pci_dev(dev), dma_mask); | ||
33 | #endif | ||
34 | return -EOPNOTSUPP; | ||
35 | } | ||
36 | EXPORT_SYMBOL(dma_set_mask); | ||
37 | |||
38 | static void *dma32_alloc_coherent(struct device *dev, size_t size, | ||
39 | dma_addr_t *dma_handle, gfp_t flag) | ||
40 | { | ||
41 | #ifdef CONFIG_PCI | ||
42 | if (dev->bus == &pci_bus_type) | ||
43 | return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle); | ||
44 | #endif | ||
45 | return sbus_alloc_consistent(dev, size, dma_handle); | ||
46 | } | ||
47 | |||
48 | static void dma32_free_coherent(struct device *dev, size_t size, | ||
49 | void *cpu_addr, dma_addr_t dma_handle) | ||
50 | { | ||
51 | #ifdef CONFIG_PCI | ||
52 | if (dev->bus == &pci_bus_type) { | ||
53 | pci_free_consistent(to_pci_dev(dev), size, | ||
54 | cpu_addr, dma_handle); | ||
55 | return; | ||
56 | } | ||
57 | #endif | ||
58 | sbus_free_consistent(dev, size, cpu_addr, dma_handle); | ||
59 | } | ||
60 | |||
61 | static dma_addr_t dma32_map_page(struct device *dev, struct page *page, | ||
62 | unsigned long offset, size_t size, | ||
63 | enum dma_data_direction direction) | ||
64 | { | ||
65 | #ifdef CONFIG_PCI | ||
66 | if (dev->bus == &pci_bus_type) | ||
67 | return pci_map_page(to_pci_dev(dev), page, offset, | ||
68 | size, (int)direction); | ||
69 | #endif | ||
70 | return sbus_map_single(dev, page_address(page) + offset, | ||
71 | size, (int)direction); | ||
72 | } | ||
73 | |||
74 | static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
75 | size_t size, enum dma_data_direction direction) | ||
76 | { | ||
77 | #ifdef CONFIG_PCI | ||
78 | if (dev->bus == &pci_bus_type) { | ||
79 | pci_unmap_page(to_pci_dev(dev), dma_address, | ||
80 | size, (int)direction); | ||
81 | return; | ||
82 | } | ||
83 | #endif | ||
84 | sbus_unmap_single(dev, dma_address, size, (int)direction); | ||
85 | } | ||
86 | |||
87 | static int dma32_map_sg(struct device *dev, struct scatterlist *sg, | ||
88 | int nents, enum dma_data_direction direction) | ||
89 | { | ||
90 | #ifdef CONFIG_PCI | ||
91 | if (dev->bus == &pci_bus_type) | ||
92 | return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction); | ||
93 | #endif | ||
94 | return sbus_map_sg(dev, sg, nents, direction); | ||
95 | } | ||
96 | |||
97 | void dma32_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
98 | int nents, enum dma_data_direction direction) | ||
99 | { | ||
100 | #ifdef CONFIG_PCI | ||
101 | if (dev->bus == &pci_bus_type) { | ||
102 | pci_unmap_sg(to_pci_dev(dev), sg, nents, (int)direction); | ||
103 | return; | ||
104 | } | ||
105 | #endif | ||
106 | sbus_unmap_sg(dev, sg, nents, (int)direction); | ||
107 | } | ||
108 | |||
109 | static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
110 | size_t size, | ||
111 | enum dma_data_direction direction) | ||
112 | { | ||
113 | #ifdef CONFIG_PCI | ||
114 | if (dev->bus == &pci_bus_type) { | ||
115 | pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle, | ||
116 | size, (int)direction); | ||
117 | return; | ||
118 | } | ||
119 | #endif | ||
120 | sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction); | ||
121 | } | ||
122 | |||
123 | static void dma32_sync_single_for_device(struct device *dev, | ||
124 | dma_addr_t dma_handle, size_t size, | ||
125 | enum dma_data_direction direction) | ||
126 | { | ||
127 | #ifdef CONFIG_PCI | ||
128 | if (dev->bus == &pci_bus_type) { | ||
129 | pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle, | ||
130 | size, (int)direction); | ||
131 | return; | ||
132 | } | ||
133 | #endif | ||
134 | sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction); | ||
135 | } | ||
136 | |||
137 | static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
138 | int nelems, enum dma_data_direction direction) | ||
139 | { | ||
140 | #ifdef CONFIG_PCI | ||
141 | if (dev->bus == &pci_bus_type) { | ||
142 | pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, | ||
143 | nelems, (int)direction); | ||
144 | return; | ||
145 | } | ||
146 | #endif | ||
147 | BUG(); | ||
148 | } | ||
149 | |||
150 | static void dma32_sync_sg_for_device(struct device *dev, | ||
151 | struct scatterlist *sg, int nelems, | ||
152 | enum dma_data_direction direction) | ||
153 | { | ||
154 | #ifdef CONFIG_PCI | ||
155 | if (dev->bus == &pci_bus_type) { | ||
156 | pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, | ||
157 | nelems, (int)direction); | ||
158 | return; | ||
159 | } | ||
160 | #endif | ||
161 | BUG(); | ||
162 | } | ||
163 | |||
164 | static const struct dma_ops dma32_dma_ops = { | ||
165 | .alloc_coherent = dma32_alloc_coherent, | ||
166 | .free_coherent = dma32_free_coherent, | ||
167 | .map_page = dma32_map_page, | ||
168 | .unmap_page = dma32_unmap_page, | ||
169 | .map_sg = dma32_map_sg, | ||
170 | .unmap_sg = dma32_unmap_sg, | ||
171 | .sync_single_for_cpu = dma32_sync_single_for_cpu, | ||
172 | .sync_single_for_device = dma32_sync_single_for_device, | ||
173 | .sync_sg_for_cpu = dma32_sync_sg_for_cpu, | ||
174 | .sync_sg_for_device = dma32_sync_sg_for_device, | ||
175 | }; | ||
176 | |||
177 | const struct dma_ops *dma_ops = &dma32_dma_ops; | ||
178 | EXPORT_SYMBOL(dma_ops); | ||
diff --git a/arch/sparc/kernel/dma.h b/arch/sparc/kernel/dma.h deleted file mode 100644 index f8d8951adb53..000000000000 --- a/arch/sparc/kernel/dma.h +++ /dev/null | |||
@@ -1,14 +0,0 @@ | |||
1 | void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp); | ||
2 | void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba); | ||
3 | dma_addr_t sbus_map_single(struct device *dev, void *va, | ||
4 | size_t len, int direction); | ||
5 | void sbus_unmap_single(struct device *dev, dma_addr_t ba, | ||
6 | size_t n, int direction); | ||
7 | int sbus_map_sg(struct device *dev, struct scatterlist *sg, | ||
8 | int n, int direction); | ||
9 | void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
10 | int n, int direction); | ||
11 | void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, | ||
12 | size_t size, int direction); | ||
13 | void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, | ||
14 | size_t size, int direction); | ||
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S index 6b4d8acc4c83..439d82a95ac9 100644 --- a/arch/sparc/kernel/head_32.S +++ b/arch/sparc/kernel/head_32.S | |||
@@ -809,6 +809,11 @@ found_version: | |||
809 | nop | 809 | nop |
810 | 810 | ||
811 | got_prop: | 811 | got_prop: |
812 | #ifdef CONFIG_SPARC_LEON | ||
813 | /* no cpu-type check is needed, it is a SPARC-LEON */ | ||
814 | ba sun4c_continue_boot | ||
815 | nop | ||
816 | #endif | ||
812 | set cputypval, %o2 | 817 | set cputypval, %o2 |
813 | ldub [%o2 + 0x4], %l1 | 818 | ldub [%o2 + 0x4], %l1 |
814 | 819 | ||
diff --git a/arch/sparc/kernel/idprom.c b/arch/sparc/kernel/idprom.c index 57922f69c3f7..52a15fe2db19 100644 --- a/arch/sparc/kernel/idprom.c +++ b/arch/sparc/kernel/idprom.c | |||
@@ -31,6 +31,8 @@ static struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = { | |||
31 | { .name = "Sun 4/200 Series", .id_machtype = (SM_SUN4 | SM_4_260) }, | 31 | { .name = "Sun 4/200 Series", .id_machtype = (SM_SUN4 | SM_4_260) }, |
32 | { .name = "Sun 4/300 Series", .id_machtype = (SM_SUN4 | SM_4_330) }, | 32 | { .name = "Sun 4/300 Series", .id_machtype = (SM_SUN4 | SM_4_330) }, |
33 | { .name = "Sun 4/400 Series", .id_machtype = (SM_SUN4 | SM_4_470) }, | 33 | { .name = "Sun 4/400 Series", .id_machtype = (SM_SUN4 | SM_4_470) }, |
34 | /* Now Leon */ | ||
35 | { .name = "Leon3 System-on-a-Chip", .id_machtype = (M_LEON | M_LEON3_SOC) }, | ||
34 | /* Now, Sun4c's */ | 36 | /* Now, Sun4c's */ |
35 | { .name = "Sun4c SparcStation 1", .id_machtype = (SM_SUN4C | SM_4C_SS1) }, | 37 | { .name = "Sun4c SparcStation 1", .id_machtype = (SM_SUN4C | SM_4C_SS1) }, |
36 | { .name = "Sun4c SparcStation IPC", .id_machtype = (SM_SUN4C | SM_4C_IPC) }, | 38 | { .name = "Sun4c SparcStation IPC", .id_machtype = (SM_SUN4C | SM_4C_IPC) }, |
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 0aeaefe696b9..7690cc219ecc 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c | |||
@@ -353,7 +353,8 @@ static void dma_4u_free_coherent(struct device *dev, size_t size, | |||
353 | 353 | ||
354 | static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, | 354 | static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, |
355 | unsigned long offset, size_t sz, | 355 | unsigned long offset, size_t sz, |
356 | enum dma_data_direction direction) | 356 | enum dma_data_direction direction, |
357 | struct dma_attrs *attrs) | ||
357 | { | 358 | { |
358 | struct iommu *iommu; | 359 | struct iommu *iommu; |
359 | struct strbuf *strbuf; | 360 | struct strbuf *strbuf; |
@@ -474,7 +475,8 @@ do_flush_sync: | |||
474 | } | 475 | } |
475 | 476 | ||
476 | static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, | 477 | static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, |
477 | size_t sz, enum dma_data_direction direction) | 478 | size_t sz, enum dma_data_direction direction, |
479 | struct dma_attrs *attrs) | ||
478 | { | 480 | { |
479 | struct iommu *iommu; | 481 | struct iommu *iommu; |
480 | struct strbuf *strbuf; | 482 | struct strbuf *strbuf; |
@@ -520,7 +522,8 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, | |||
520 | } | 522 | } |
521 | 523 | ||
522 | static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, | 524 | static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, |
523 | int nelems, enum dma_data_direction direction) | 525 | int nelems, enum dma_data_direction direction, |
526 | struct dma_attrs *attrs) | ||
524 | { | 527 | { |
525 | struct scatterlist *s, *outs, *segstart; | 528 | struct scatterlist *s, *outs, *segstart; |
526 | unsigned long flags, handle, prot, ctx; | 529 | unsigned long flags, handle, prot, ctx; |
@@ -691,7 +694,8 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) | |||
691 | } | 694 | } |
692 | 695 | ||
693 | static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, | 696 | static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, |
694 | int nelems, enum dma_data_direction direction) | 697 | int nelems, enum dma_data_direction direction, |
698 | struct dma_attrs *attrs) | ||
695 | { | 699 | { |
696 | unsigned long flags, ctx; | 700 | unsigned long flags, ctx; |
697 | struct scatterlist *sg; | 701 | struct scatterlist *sg; |
@@ -822,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, | |||
822 | spin_unlock_irqrestore(&iommu->lock, flags); | 826 | spin_unlock_irqrestore(&iommu->lock, flags); |
823 | } | 827 | } |
824 | 828 | ||
825 | static const struct dma_ops sun4u_dma_ops = { | 829 | static struct dma_map_ops sun4u_dma_ops = { |
826 | .alloc_coherent = dma_4u_alloc_coherent, | 830 | .alloc_coherent = dma_4u_alloc_coherent, |
827 | .free_coherent = dma_4u_free_coherent, | 831 | .free_coherent = dma_4u_free_coherent, |
828 | .map_page = dma_4u_map_page, | 832 | .map_page = dma_4u_map_page, |
@@ -833,9 +837,11 @@ static const struct dma_ops sun4u_dma_ops = { | |||
833 | .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, | 837 | .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, |
834 | }; | 838 | }; |
835 | 839 | ||
836 | const struct dma_ops *dma_ops = &sun4u_dma_ops; | 840 | struct dma_map_ops *dma_ops = &sun4u_dma_ops; |
837 | EXPORT_SYMBOL(dma_ops); | 841 | EXPORT_SYMBOL(dma_ops); |
838 | 842 | ||
843 | extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask); | ||
844 | |||
839 | int dma_supported(struct device *dev, u64 device_mask) | 845 | int dma_supported(struct device *dev, u64 device_mask) |
840 | { | 846 | { |
841 | struct iommu *iommu = dev->archdata.iommu; | 847 | struct iommu *iommu = dev->archdata.iommu; |
@@ -849,7 +855,7 @@ int dma_supported(struct device *dev, u64 device_mask) | |||
849 | 855 | ||
850 | #ifdef CONFIG_PCI | 856 | #ifdef CONFIG_PCI |
851 | if (dev->bus == &pci_bus_type) | 857 | if (dev->bus == &pci_bus_type) |
852 | return pci_dma_supported(to_pci_dev(dev), device_mask); | 858 | return pci64_dma_supported(to_pci_dev(dev), device_mask); |
853 | #endif | 859 | #endif |
854 | 860 | ||
855 | return 0; | 861 | return 0; |
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 87ea0d03d975..9f61fd8cbb7b 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
36 | #include <linux/pci.h> /* struct pci_dev */ | 36 | #include <linux/pci.h> /* struct pci_dev */ |
37 | #include <linux/proc_fs.h> | 37 | #include <linux/proc_fs.h> |
38 | #include <linux/seq_file.h> | ||
38 | #include <linux/scatterlist.h> | 39 | #include <linux/scatterlist.h> |
39 | #include <linux/of_device.h> | 40 | #include <linux/of_device.h> |
40 | 41 | ||
@@ -48,8 +49,6 @@ | |||
48 | #include <asm/iommu.h> | 49 | #include <asm/iommu.h> |
49 | #include <asm/io-unit.h> | 50 | #include <asm/io-unit.h> |
50 | 51 | ||
51 | #include "dma.h" | ||
52 | |||
53 | #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ | 52 | #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ |
54 | 53 | ||
55 | static struct resource *_sparc_find_resource(struct resource *r, | 54 | static struct resource *_sparc_find_resource(struct resource *r, |
@@ -246,7 +245,8 @@ EXPORT_SYMBOL(sbus_set_sbus64); | |||
246 | * Typically devices use them for control blocks. | 245 | * Typically devices use them for control blocks. |
247 | * CPU may access them without any explicit flushing. | 246 | * CPU may access them without any explicit flushing. |
248 | */ | 247 | */ |
249 | void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp) | 248 | static void *sbus_alloc_coherent(struct device *dev, size_t len, |
249 | dma_addr_t *dma_addrp, gfp_t gfp) | ||
250 | { | 250 | { |
251 | struct of_device *op = to_of_device(dev); | 251 | struct of_device *op = to_of_device(dev); |
252 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; | 252 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; |
@@ -299,7 +299,8 @@ err_nopages: | |||
299 | return NULL; | 299 | return NULL; |
300 | } | 300 | } |
301 | 301 | ||
302 | void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) | 302 | static void sbus_free_coherent(struct device *dev, size_t n, void *p, |
303 | dma_addr_t ba) | ||
303 | { | 304 | { |
304 | struct resource *res; | 305 | struct resource *res; |
305 | struct page *pgv; | 306 | struct page *pgv; |
@@ -317,7 +318,7 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) | |||
317 | 318 | ||
318 | n = (n + PAGE_SIZE-1) & PAGE_MASK; | 319 | n = (n + PAGE_SIZE-1) & PAGE_MASK; |
319 | if ((res->end-res->start)+1 != n) { | 320 | if ((res->end-res->start)+1 != n) { |
320 | printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n", | 321 | printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n", |
321 | (long)((res->end-res->start)+1), n); | 322 | (long)((res->end-res->start)+1), n); |
322 | return; | 323 | return; |
323 | } | 324 | } |
@@ -337,8 +338,13 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) | |||
337 | * CPU view of this memory may be inconsistent with | 338 | * CPU view of this memory may be inconsistent with |
338 | * a device view and explicit flushing is necessary. | 339 | * a device view and explicit flushing is necessary. |
339 | */ | 340 | */ |
340 | dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction) | 341 | static dma_addr_t sbus_map_page(struct device *dev, struct page *page, |
342 | unsigned long offset, size_t len, | ||
343 | enum dma_data_direction dir, | ||
344 | struct dma_attrs *attrs) | ||
341 | { | 345 | { |
346 | void *va = page_address(page) + offset; | ||
347 | |||
342 | /* XXX why are some lengths signed, others unsigned? */ | 348 | /* XXX why are some lengths signed, others unsigned? */ |
343 | if (len <= 0) { | 349 | if (len <= 0) { |
344 | return 0; | 350 | return 0; |
@@ -350,12 +356,14 @@ dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int directi | |||
350 | return mmu_get_scsi_one(dev, va, len); | 356 | return mmu_get_scsi_one(dev, va, len); |
351 | } | 357 | } |
352 | 358 | ||
353 | void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction) | 359 | static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n, |
360 | enum dma_data_direction dir, struct dma_attrs *attrs) | ||
354 | { | 361 | { |
355 | mmu_release_scsi_one(dev, ba, n); | 362 | mmu_release_scsi_one(dev, ba, n); |
356 | } | 363 | } |
357 | 364 | ||
358 | int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction) | 365 | static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, |
366 | enum dma_data_direction dir, struct dma_attrs *attrs) | ||
359 | { | 367 | { |
360 | mmu_get_scsi_sgl(dev, sg, n); | 368 | mmu_get_scsi_sgl(dev, sg, n); |
361 | 369 | ||
@@ -366,19 +374,38 @@ int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction | |||
366 | return n; | 374 | return n; |
367 | } | 375 | } |
368 | 376 | ||
369 | void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction) | 377 | static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, |
378 | enum dma_data_direction dir, struct dma_attrs *attrs) | ||
370 | { | 379 | { |
371 | mmu_release_scsi_sgl(dev, sg, n); | 380 | mmu_release_scsi_sgl(dev, sg, n); |
372 | } | 381 | } |
373 | 382 | ||
374 | void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction) | 383 | static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
384 | int n, enum dma_data_direction dir) | ||
375 | { | 385 | { |
386 | BUG(); | ||
376 | } | 387 | } |
377 | 388 | ||
378 | void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction) | 389 | static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
390 | int n, enum dma_data_direction dir) | ||
379 | { | 391 | { |
392 | BUG(); | ||
380 | } | 393 | } |
381 | 394 | ||
395 | struct dma_map_ops sbus_dma_ops = { | ||
396 | .alloc_coherent = sbus_alloc_coherent, | ||
397 | .free_coherent = sbus_free_coherent, | ||
398 | .map_page = sbus_map_page, | ||
399 | .unmap_page = sbus_unmap_page, | ||
400 | .map_sg = sbus_map_sg, | ||
401 | .unmap_sg = sbus_unmap_sg, | ||
402 | .sync_sg_for_cpu = sbus_sync_sg_for_cpu, | ||
403 | .sync_sg_for_device = sbus_sync_sg_for_device, | ||
404 | }; | ||
405 | |||
406 | struct dma_map_ops *dma_ops = &sbus_dma_ops; | ||
407 | EXPORT_SYMBOL(dma_ops); | ||
408 | |||
382 | static int __init sparc_register_ioport(void) | 409 | static int __init sparc_register_ioport(void) |
383 | { | 410 | { |
384 | register_proc_sparc_ioport(); | 411 | register_proc_sparc_ioport(); |
@@ -395,7 +422,8 @@ arch_initcall(sparc_register_ioport); | |||
395 | /* Allocate and map kernel buffer using consistent mode DMA for a device. | 422 | /* Allocate and map kernel buffer using consistent mode DMA for a device. |
396 | * hwdev should be valid struct pci_dev pointer for PCI devices. | 423 | * hwdev should be valid struct pci_dev pointer for PCI devices. |
397 | */ | 424 | */ |
398 | void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) | 425 | static void *pci32_alloc_coherent(struct device *dev, size_t len, |
426 | dma_addr_t *pba, gfp_t gfp) | ||
399 | { | 427 | { |
400 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; | 428 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; |
401 | unsigned long va; | 429 | unsigned long va; |
@@ -439,7 +467,6 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) | |||
439 | *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ | 467 | *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ |
440 | return (void *) res->start; | 468 | return (void *) res->start; |
441 | } | 469 | } |
442 | EXPORT_SYMBOL(pci_alloc_consistent); | ||
443 | 470 | ||
444 | /* Free and unmap a consistent DMA buffer. | 471 | /* Free and unmap a consistent DMA buffer. |
445 | * cpu_addr is what was returned from pci_alloc_consistent, | 472 | * cpu_addr is what was returned from pci_alloc_consistent, |
@@ -449,7 +476,8 @@ EXPORT_SYMBOL(pci_alloc_consistent); | |||
449 | * References to the memory and mappings associated with cpu_addr/dma_addr | 476 | * References to the memory and mappings associated with cpu_addr/dma_addr |
450 | * past this call are illegal. | 477 | * past this call are illegal. |
451 | */ | 478 | */ |
452 | void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) | 479 | static void pci32_free_coherent(struct device *dev, size_t n, void *p, |
480 | dma_addr_t ba) | ||
453 | { | 481 | { |
454 | struct resource *res; | 482 | struct resource *res; |
455 | unsigned long pgp; | 483 | unsigned long pgp; |
@@ -481,60 +509,18 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) | |||
481 | 509 | ||
482 | free_pages(pgp, get_order(n)); | 510 | free_pages(pgp, get_order(n)); |
483 | } | 511 | } |
484 | EXPORT_SYMBOL(pci_free_consistent); | ||
485 | |||
486 | /* Map a single buffer of the indicated size for DMA in streaming mode. | ||
487 | * The 32-bit bus address to use is returned. | ||
488 | * | ||
489 | * Once the device is given the dma address, the device owns this memory | ||
490 | * until either pci_unmap_single or pci_dma_sync_single_* is performed. | ||
491 | */ | ||
492 | dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, | ||
493 | int direction) | ||
494 | { | ||
495 | BUG_ON(direction == PCI_DMA_NONE); | ||
496 | /* IIep is write-through, not flushing. */ | ||
497 | return virt_to_phys(ptr); | ||
498 | } | ||
499 | EXPORT_SYMBOL(pci_map_single); | ||
500 | |||
501 | /* Unmap a single streaming mode DMA translation. The dma_addr and size | ||
502 | * must match what was provided for in a previous pci_map_single call. All | ||
503 | * other usages are undefined. | ||
504 | * | ||
505 | * After this call, reads by the cpu to the buffer are guaranteed to see | ||
506 | * whatever the device wrote there. | ||
507 | */ | ||
508 | void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, | ||
509 | int direction) | ||
510 | { | ||
511 | BUG_ON(direction == PCI_DMA_NONE); | ||
512 | if (direction != PCI_DMA_TODEVICE) { | ||
513 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | ||
514 | (size + PAGE_SIZE-1) & PAGE_MASK); | ||
515 | } | ||
516 | } | ||
517 | EXPORT_SYMBOL(pci_unmap_single); | ||
518 | 512 | ||
519 | /* | 513 | /* |
520 | * Same as pci_map_single, but with pages. | 514 | * Same as pci_map_single, but with pages. |
521 | */ | 515 | */ |
522 | dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, | 516 | static dma_addr_t pci32_map_page(struct device *dev, struct page *page, |
523 | unsigned long offset, size_t size, int direction) | 517 | unsigned long offset, size_t size, |
518 | enum dma_data_direction dir, | ||
519 | struct dma_attrs *attrs) | ||
524 | { | 520 | { |
525 | BUG_ON(direction == PCI_DMA_NONE); | ||
526 | /* IIep is write-through, not flushing. */ | 521 | /* IIep is write-through, not flushing. */ |
527 | return page_to_phys(page) + offset; | 522 | return page_to_phys(page) + offset; |
528 | } | 523 | } |
529 | EXPORT_SYMBOL(pci_map_page); | ||
530 | |||
531 | void pci_unmap_page(struct pci_dev *hwdev, | ||
532 | dma_addr_t dma_address, size_t size, int direction) | ||
533 | { | ||
534 | BUG_ON(direction == PCI_DMA_NONE); | ||
535 | /* mmu_inval_dma_area XXX */ | ||
536 | } | ||
537 | EXPORT_SYMBOL(pci_unmap_page); | ||
538 | 524 | ||
539 | /* Map a set of buffers described by scatterlist in streaming | 525 | /* Map a set of buffers described by scatterlist in streaming |
540 | * mode for DMA. This is the scather-gather version of the | 526 | * mode for DMA. This is the scather-gather version of the |
@@ -551,13 +537,13 @@ EXPORT_SYMBOL(pci_unmap_page); | |||
551 | * Device ownership issues as mentioned above for pci_map_single are | 537 | * Device ownership issues as mentioned above for pci_map_single are |
552 | * the same here. | 538 | * the same here. |
553 | */ | 539 | */ |
554 | int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, | 540 | static int pci32_map_sg(struct device *device, struct scatterlist *sgl, |
555 | int direction) | 541 | int nents, enum dma_data_direction dir, |
542 | struct dma_attrs *attrs) | ||
556 | { | 543 | { |
557 | struct scatterlist *sg; | 544 | struct scatterlist *sg; |
558 | int n; | 545 | int n; |
559 | 546 | ||
560 | BUG_ON(direction == PCI_DMA_NONE); | ||
561 | /* IIep is write-through, not flushing. */ | 547 | /* IIep is write-through, not flushing. */ |
562 | for_each_sg(sgl, sg, nents, n) { | 548 | for_each_sg(sgl, sg, nents, n) { |
563 | BUG_ON(page_address(sg_page(sg)) == NULL); | 549 | BUG_ON(page_address(sg_page(sg)) == NULL); |
@@ -566,20 +552,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, | |||
566 | } | 552 | } |
567 | return nents; | 553 | return nents; |
568 | } | 554 | } |
569 | EXPORT_SYMBOL(pci_map_sg); | ||
570 | 555 | ||
571 | /* Unmap a set of streaming mode DMA translations. | 556 | /* Unmap a set of streaming mode DMA translations. |
572 | * Again, cpu read rules concerning calls here are the same as for | 557 | * Again, cpu read rules concerning calls here are the same as for |
573 | * pci_unmap_single() above. | 558 | * pci_unmap_single() above. |
574 | */ | 559 | */ |
575 | void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, | 560 | static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, |
576 | int direction) | 561 | int nents, enum dma_data_direction dir, |
562 | struct dma_attrs *attrs) | ||
577 | { | 563 | { |
578 | struct scatterlist *sg; | 564 | struct scatterlist *sg; |
579 | int n; | 565 | int n; |
580 | 566 | ||
581 | BUG_ON(direction == PCI_DMA_NONE); | 567 | if (dir != PCI_DMA_TODEVICE) { |
582 | if (direction != PCI_DMA_TODEVICE) { | ||
583 | for_each_sg(sgl, sg, nents, n) { | 568 | for_each_sg(sgl, sg, nents, n) { |
584 | BUG_ON(page_address(sg_page(sg)) == NULL); | 569 | BUG_ON(page_address(sg_page(sg)) == NULL); |
585 | mmu_inval_dma_area( | 570 | mmu_inval_dma_area( |
@@ -588,7 +573,6 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, | |||
588 | } | 573 | } |
589 | } | 574 | } |
590 | } | 575 | } |
591 | EXPORT_SYMBOL(pci_unmap_sg); | ||
592 | 576 | ||
593 | /* Make physical memory consistent for a single | 577 | /* Make physical memory consistent for a single |
594 | * streaming mode DMA translation before or after a transfer. | 578 | * streaming mode DMA translation before or after a transfer. |
@@ -600,25 +584,23 @@ EXPORT_SYMBOL(pci_unmap_sg); | |||
600 | * must first perform a pci_dma_sync_for_device, and then the | 584 | * must first perform a pci_dma_sync_for_device, and then the |
601 | * device again owns the buffer. | 585 | * device again owns the buffer. |
602 | */ | 586 | */ |
603 | void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) | 587 | static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba, |
588 | size_t size, enum dma_data_direction dir) | ||
604 | { | 589 | { |
605 | BUG_ON(direction == PCI_DMA_NONE); | 590 | if (dir != PCI_DMA_TODEVICE) { |
606 | if (direction != PCI_DMA_TODEVICE) { | ||
607 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 591 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), |
608 | (size + PAGE_SIZE-1) & PAGE_MASK); | 592 | (size + PAGE_SIZE-1) & PAGE_MASK); |
609 | } | 593 | } |
610 | } | 594 | } |
611 | EXPORT_SYMBOL(pci_dma_sync_single_for_cpu); | ||
612 | 595 | ||
613 | void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) | 596 | static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba, |
597 | size_t size, enum dma_data_direction dir) | ||
614 | { | 598 | { |
615 | BUG_ON(direction == PCI_DMA_NONE); | 599 | if (dir != PCI_DMA_TODEVICE) { |
616 | if (direction != PCI_DMA_TODEVICE) { | ||
617 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 600 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), |
618 | (size + PAGE_SIZE-1) & PAGE_MASK); | 601 | (size + PAGE_SIZE-1) & PAGE_MASK); |
619 | } | 602 | } |
620 | } | 603 | } |
621 | EXPORT_SYMBOL(pci_dma_sync_single_for_device); | ||
622 | 604 | ||
623 | /* Make physical memory consistent for a set of streaming | 605 | /* Make physical memory consistent for a set of streaming |
624 | * mode DMA translations after a transfer. | 606 | * mode DMA translations after a transfer. |
@@ -626,13 +608,13 @@ EXPORT_SYMBOL(pci_dma_sync_single_for_device); | |||
626 | * The same as pci_dma_sync_single_* but for a scatter-gather list, | 608 | * The same as pci_dma_sync_single_* but for a scatter-gather list, |
627 | * same rules and usage. | 609 | * same rules and usage. |
628 | */ | 610 | */ |
629 | void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) | 611 | static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, |
612 | int nents, enum dma_data_direction dir) | ||
630 | { | 613 | { |
631 | struct scatterlist *sg; | 614 | struct scatterlist *sg; |
632 | int n; | 615 | int n; |
633 | 616 | ||
634 | BUG_ON(direction == PCI_DMA_NONE); | 617 | if (dir != PCI_DMA_TODEVICE) { |
635 | if (direction != PCI_DMA_TODEVICE) { | ||
636 | for_each_sg(sgl, sg, nents, n) { | 618 | for_each_sg(sgl, sg, nents, n) { |
637 | BUG_ON(page_address(sg_page(sg)) == NULL); | 619 | BUG_ON(page_address(sg_page(sg)) == NULL); |
638 | mmu_inval_dma_area( | 620 | mmu_inval_dma_area( |
@@ -641,15 +623,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int | |||
641 | } | 623 | } |
642 | } | 624 | } |
643 | } | 625 | } |
644 | EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu); | ||
645 | 626 | ||
646 | void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) | 627 | static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl, |
628 | int nents, enum dma_data_direction dir) | ||
647 | { | 629 | { |
648 | struct scatterlist *sg; | 630 | struct scatterlist *sg; |
649 | int n; | 631 | int n; |
650 | 632 | ||
651 | BUG_ON(direction == PCI_DMA_NONE); | 633 | if (dir != PCI_DMA_TODEVICE) { |
652 | if (direction != PCI_DMA_TODEVICE) { | ||
653 | for_each_sg(sgl, sg, nents, n) { | 634 | for_each_sg(sgl, sg, nents, n) { |
654 | BUG_ON(page_address(sg_page(sg)) == NULL); | 635 | BUG_ON(page_address(sg_page(sg)) == NULL); |
655 | mmu_inval_dma_area( | 636 | mmu_inval_dma_area( |
@@ -658,31 +639,78 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, | |||
658 | } | 639 | } |
659 | } | 640 | } |
660 | } | 641 | } |
661 | EXPORT_SYMBOL(pci_dma_sync_sg_for_device); | 642 | |
643 | struct dma_map_ops pci32_dma_ops = { | ||
644 | .alloc_coherent = pci32_alloc_coherent, | ||
645 | .free_coherent = pci32_free_coherent, | ||
646 | .map_page = pci32_map_page, | ||
647 | .map_sg = pci32_map_sg, | ||
648 | .unmap_sg = pci32_unmap_sg, | ||
649 | .sync_single_for_cpu = pci32_sync_single_for_cpu, | ||
650 | .sync_single_for_device = pci32_sync_single_for_device, | ||
651 | .sync_sg_for_cpu = pci32_sync_sg_for_cpu, | ||
652 | .sync_sg_for_device = pci32_sync_sg_for_device, | ||
653 | }; | ||
654 | EXPORT_SYMBOL(pci32_dma_ops); | ||
655 | |||
662 | #endif /* CONFIG_PCI */ | 656 | #endif /* CONFIG_PCI */ |
663 | 657 | ||
658 | /* | ||
659 | * Return whether the given PCI device DMA address mask can be | ||
660 | * supported properly. For example, if your device can only drive the | ||
661 | * low 24-bits during PCI bus mastering, then you would pass | ||
662 | * 0x00ffffff as the mask to this function. | ||
663 | */ | ||
664 | int dma_supported(struct device *dev, u64 mask) | ||
665 | { | ||
666 | #ifdef CONFIG_PCI | ||
667 | if (dev->bus == &pci_bus_type) | ||
668 | return 1; | ||
669 | #endif | ||
670 | return 0; | ||
671 | } | ||
672 | EXPORT_SYMBOL(dma_supported); | ||
673 | |||
674 | int dma_set_mask(struct device *dev, u64 dma_mask) | ||
675 | { | ||
676 | #ifdef CONFIG_PCI | ||
677 | if (dev->bus == &pci_bus_type) | ||
678 | return pci_set_dma_mask(to_pci_dev(dev), dma_mask); | ||
679 | #endif | ||
680 | return -EOPNOTSUPP; | ||
681 | } | ||
682 | EXPORT_SYMBOL(dma_set_mask); | ||
683 | |||
684 | |||
664 | #ifdef CONFIG_PROC_FS | 685 | #ifdef CONFIG_PROC_FS |
665 | 686 | ||
666 | static int | 687 | static int sparc_io_proc_show(struct seq_file *m, void *v) |
667 | _sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof, | ||
668 | void *data) | ||
669 | { | 688 | { |
670 | char *p = buf, *e = buf + length; | 689 | struct resource *root = m->private, *r; |
671 | struct resource *r; | ||
672 | const char *nm; | 690 | const char *nm; |
673 | 691 | ||
674 | for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { | 692 | for (r = root->child; r != NULL; r = r->sibling) { |
675 | if (p + 32 >= e) /* Better than nothing */ | ||
676 | break; | ||
677 | if ((nm = r->name) == 0) nm = "???"; | 693 | if ((nm = r->name) == 0) nm = "???"; |
678 | p += sprintf(p, "%016llx-%016llx: %s\n", | 694 | seq_printf(m, "%016llx-%016llx: %s\n", |
679 | (unsigned long long)r->start, | 695 | (unsigned long long)r->start, |
680 | (unsigned long long)r->end, nm); | 696 | (unsigned long long)r->end, nm); |
681 | } | 697 | } |
682 | 698 | ||
683 | return p-buf; | 699 | return 0; |
684 | } | 700 | } |
685 | 701 | ||
702 | static int sparc_io_proc_open(struct inode *inode, struct file *file) | ||
703 | { | ||
704 | return single_open(file, sparc_io_proc_show, PDE(inode)->data); | ||
705 | } | ||
706 | |||
707 | static const struct file_operations sparc_io_proc_fops = { | ||
708 | .owner = THIS_MODULE, | ||
709 | .open = sparc_io_proc_open, | ||
710 | .read = seq_read, | ||
711 | .llseek = seq_lseek, | ||
712 | .release = single_release, | ||
713 | }; | ||
686 | #endif /* CONFIG_PROC_FS */ | 714 | #endif /* CONFIG_PROC_FS */ |
687 | 715 | ||
688 | /* | 716 | /* |
@@ -707,7 +735,7 @@ static struct resource *_sparc_find_resource(struct resource *root, | |||
707 | static void register_proc_sparc_ioport(void) | 735 | static void register_proc_sparc_ioport(void) |
708 | { | 736 | { |
709 | #ifdef CONFIG_PROC_FS | 737 | #ifdef CONFIG_PROC_FS |
710 | create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap); | 738 | proc_create_data("io_map", 0, NULL, &sparc_io_proc_fops, &sparc_iomap); |
711 | create_proc_read_entry("dvma_map",0,NULL,_sparc_io_get_info,&_sparc_dvma); | 739 | proc_create_data("dvma_map", 0, NULL, &sparc_io_proc_fops, &_sparc_dvma); |
712 | #endif | 740 | #endif |
713 | } | 741 | } |
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c index ad800b80c718..e1af43728329 100644 --- a/arch/sparc/kernel/irq_32.c +++ b/arch/sparc/kernel/irq_32.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <asm/pcic.h> | 45 | #include <asm/pcic.h> |
46 | #include <asm/cacheflush.h> | 46 | #include <asm/cacheflush.h> |
47 | #include <asm/irq_regs.h> | 47 | #include <asm/irq_regs.h> |
48 | #include <asm/leon.h> | ||
48 | 49 | ||
49 | #include "kernel.h" | 50 | #include "kernel.h" |
50 | #include "irq.h" | 51 | #include "irq.h" |
@@ -661,6 +662,10 @@ void __init init_IRQ(void) | |||
661 | sun4d_init_IRQ(); | 662 | sun4d_init_IRQ(); |
662 | break; | 663 | break; |
663 | 664 | ||
665 | case sparc_leon: | ||
666 | leon_init_IRQ(); | ||
667 | break; | ||
668 | |||
664 | default: | 669 | default: |
665 | prom_printf("Cannot initialize IRQs on this Sun machine..."); | 670 | prom_printf("Cannot initialize IRQs on this Sun machine..."); |
666 | break; | 671 | break; |
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c new file mode 100644 index 000000000000..54d8a5bd4824 --- /dev/null +++ b/arch/sparc/kernel/leon_kernel.c | |||
@@ -0,0 +1,203 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB | ||
3 | * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/errno.h> | ||
9 | #include <linux/mutex.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <linux/of.h> | ||
12 | #include <linux/of_platform.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/of_device.h> | ||
15 | #include <asm/oplib.h> | ||
16 | #include <asm/timer.h> | ||
17 | #include <asm/prom.h> | ||
18 | #include <asm/leon.h> | ||
19 | #include <asm/leon_amba.h> | ||
20 | |||
21 | #include "prom.h" | ||
22 | #include "irq.h" | ||
23 | |||
24 | struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; /* interrupt controller base address, initialized by amba_init() */ | ||
25 | struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base address, initialized by amba_init() */ | ||
26 | struct amba_apb_device leon_percpu_timer_dev[16]; | ||
27 | |||
28 | int leondebug_irq_disable; | ||
29 | int leon_debug_irqout; | ||
30 | static int dummy_master_l10_counter; | ||
31 | |||
32 | unsigned long leon3_gptimer_irq; /* interrupt controller irq number, initialized by amba_init() */ | ||
33 | unsigned int sparc_leon_eirq; | ||
34 | #define LEON_IMASK ((&leon3_irqctrl_regs->mask[0])) | ||
35 | |||
36 | /* Return the IRQ of the pending IRQ on the extended IRQ controller */ | ||
37 | int sparc_leon_eirq_get(int eirq, int cpu) | ||
38 | { | ||
39 | return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->intid[cpu]) & 0x1f; | ||
40 | } | ||
41 | |||
42 | irqreturn_t sparc_leon_eirq_isr(int dummy, void *dev_id) | ||
43 | { | ||
44 | printk(KERN_ERR "sparc_leon_eirq_isr: ERROR EXTENDED IRQ\n"); | ||
45 | return IRQ_HANDLED; | ||
46 | } | ||
47 | |||
48 | /* The extended IRQ controller has been found, this function registers it */ | ||
49 | void sparc_leon_eirq_register(int eirq) | ||
50 | { | ||
51 | int irq; | ||
52 | |||
53 | /* Register a "BAD" handler for this interrupt, it should never happen */ | ||
54 | irq = request_irq(eirq, sparc_leon_eirq_isr, | ||
55 | (IRQF_DISABLED | SA_STATIC_ALLOC), "extirq", NULL); | ||
56 | |||
57 | if (irq) { | ||
58 | printk(KERN_ERR | ||
59 | "sparc_leon_eirq_register: unable to attach IRQ%d\n", | ||
60 | eirq); | ||
61 | } else { | ||
62 | sparc_leon_eirq = eirq; | ||
63 | } | ||
64 | |||
65 | } | ||
66 | |||
67 | static inline unsigned long get_irqmask(unsigned int irq) | ||
68 | { | ||
69 | unsigned long mask; | ||
70 | |||
71 | if (!irq || ((irq > 0xf) && !sparc_leon_eirq) | ||
72 | || ((irq > 0x1f) && sparc_leon_eirq)) { | ||
73 | printk(KERN_ERR | ||
74 | "leon_get_irqmask: false irq number: %d\n", irq); | ||
75 | mask = 0; | ||
76 | } else { | ||
77 | mask = LEON_HARD_INT(irq); | ||
78 | } | ||
79 | return mask; | ||
80 | } | ||
81 | |||
82 | static void leon_enable_irq(unsigned int irq_nr) | ||
83 | { | ||
84 | unsigned long mask, flags; | ||
85 | mask = get_irqmask(irq_nr); | ||
86 | local_irq_save(flags); | ||
87 | LEON3_BYPASS_STORE_PA(LEON_IMASK, | ||
88 | (LEON3_BYPASS_LOAD_PA(LEON_IMASK) | (mask))); | ||
89 | local_irq_restore(flags); | ||
90 | } | ||
91 | |||
92 | static void leon_disable_irq(unsigned int irq_nr) | ||
93 | { | ||
94 | unsigned long mask, flags; | ||
95 | mask = get_irqmask(irq_nr); | ||
96 | local_irq_save(flags); | ||
97 | LEON3_BYPASS_STORE_PA(LEON_IMASK, | ||
98 | (LEON3_BYPASS_LOAD_PA(LEON_IMASK) & ~(mask))); | ||
99 | local_irq_restore(flags); | ||
100 | |||
101 | } | ||
102 | |||
103 | void __init leon_init_timers(irq_handler_t counter_fn) | ||
104 | { | ||
105 | int irq; | ||
106 | |||
107 | leondebug_irq_disable = 0; | ||
108 | leon_debug_irqout = 0; | ||
109 | master_l10_counter = (unsigned int *)&dummy_master_l10_counter; | ||
110 | dummy_master_l10_counter = 0; | ||
111 | |||
112 | if (leon3_gptimer_regs && leon3_irqctrl_regs) { | ||
113 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].val, 0); | ||
114 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].rld, | ||
115 | (((1000000 / 100) - 1))); | ||
116 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 0); | ||
117 | |||
118 | } else { | ||
119 | printk(KERN_ERR "No Timer/irqctrl found\n"); | ||
120 | BUG(); | ||
121 | } | ||
122 | |||
123 | irq = request_irq(leon3_gptimer_irq, | ||
124 | counter_fn, | ||
125 | (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL); | ||
126 | |||
127 | if (irq) { | ||
128 | printk(KERN_ERR "leon_time_init: unable to attach IRQ%d\n", | ||
129 | LEON_INTERRUPT_TIMER1); | ||
130 | prom_halt(); | ||
131 | } | ||
132 | |||
133 | if (leon3_gptimer_regs) { | ||
134 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, | ||
135 | LEON3_GPTIMER_EN | | ||
136 | LEON3_GPTIMER_RL | | ||
137 | LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN); | ||
138 | } | ||
139 | } | ||
140 | |||
141 | void leon_clear_clock_irq(void) | ||
142 | { | ||
143 | } | ||
144 | |||
145 | void leon_load_profile_irq(int cpu, unsigned int limit) | ||
146 | { | ||
147 | BUG(); | ||
148 | } | ||
149 | |||
150 | |||
151 | |||
152 | |||
153 | void __init leon_trans_init(struct device_node *dp) | ||
154 | { | ||
155 | if (strcmp(dp->type, "cpu") == 0 && strcmp(dp->name, "<NULL>") == 0) { | ||
156 | struct property *p; | ||
157 | p = of_find_property(dp, "mid", (void *)0); | ||
158 | if (p) { | ||
159 | int mid; | ||
160 | dp->name = prom_early_alloc(5 + 1); | ||
161 | memcpy(&mid, p->value, p->length); | ||
162 | sprintf((char *)dp->name, "cpu%.2d", mid); | ||
163 | } | ||
164 | } | ||
165 | } | ||
166 | |||
167 | void __initdata (*prom_amba_init)(struct device_node *dp, struct device_node ***nextp) = 0; | ||
168 | |||
169 | void __init leon_node_init(struct device_node *dp, struct device_node ***nextp) | ||
170 | { | ||
171 | if (prom_amba_init && | ||
172 | strcmp(dp->type, "ambapp") == 0 && | ||
173 | strcmp(dp->name, "ambapp0") == 0) { | ||
174 | prom_amba_init(dp, nextp); | ||
175 | } | ||
176 | } | ||
177 | |||
178 | void __init leon_init_IRQ(void) | ||
179 | { | ||
180 | sparc_init_timers = leon_init_timers; | ||
181 | |||
182 | BTFIXUPSET_CALL(enable_irq, leon_enable_irq, BTFIXUPCALL_NORM); | ||
183 | BTFIXUPSET_CALL(disable_irq, leon_disable_irq, BTFIXUPCALL_NORM); | ||
184 | BTFIXUPSET_CALL(enable_pil_irq, leon_enable_irq, BTFIXUPCALL_NORM); | ||
185 | BTFIXUPSET_CALL(disable_pil_irq, leon_disable_irq, BTFIXUPCALL_NORM); | ||
186 | |||
187 | BTFIXUPSET_CALL(clear_clock_irq, leon_clear_clock_irq, | ||
188 | BTFIXUPCALL_NORM); | ||
189 | BTFIXUPSET_CALL(load_profile_irq, leon_load_profile_irq, | ||
190 | BTFIXUPCALL_NOP); | ||
191 | |||
192 | #ifdef CONFIG_SMP | ||
193 | BTFIXUPSET_CALL(set_cpu_int, leon_set_cpu_int, BTFIXUPCALL_NORM); | ||
194 | BTFIXUPSET_CALL(clear_cpu_int, leon_clear_ipi, BTFIXUPCALL_NORM); | ||
195 | BTFIXUPSET_CALL(set_irq_udt, leon_set_udt, BTFIXUPCALL_NORM); | ||
196 | #endif | ||
197 | |||
198 | } | ||
199 | |||
200 | void __init leon_init(void) | ||
201 | { | ||
202 | prom_build_more = &leon_node_init; | ||
203 | } | ||
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index b75bf502cd42..378eb53e0776 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/delay.h> | 19 | #include <linux/delay.h> |
20 | #include <linux/smp.h> | 20 | #include <linux/smp.h> |
21 | 21 | ||
22 | #include <asm/perf_counter.h> | ||
22 | #include <asm/ptrace.h> | 23 | #include <asm/ptrace.h> |
23 | #include <asm/local.h> | 24 | #include <asm/local.h> |
24 | #include <asm/pcr.h> | 25 | #include <asm/pcr.h> |
@@ -31,13 +32,19 @@ | |||
31 | * level 14 as our IRQ off level. | 32 | * level 14 as our IRQ off level. |
32 | */ | 33 | */ |
33 | 34 | ||
34 | static int nmi_watchdog_active; | ||
35 | static int panic_on_timeout; | 35 | static int panic_on_timeout; |
36 | 36 | ||
37 | int nmi_usable; | 37 | /* nmi_active: |
38 | EXPORT_SYMBOL_GPL(nmi_usable); | 38 | * >0: the NMI watchdog is active, but can be disabled |
39 | * <0: the NMI watchdog has not been set up, and cannot be enabled | ||
40 | * 0: the NMI watchdog is disabled, but can be enabled | ||
41 | */ | ||
42 | atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ | ||
43 | EXPORT_SYMBOL(nmi_active); | ||
39 | 44 | ||
40 | static unsigned int nmi_hz = HZ; | 45 | static unsigned int nmi_hz = HZ; |
46 | static DEFINE_PER_CPU(short, wd_enabled); | ||
47 | static int endflag __initdata; | ||
41 | 48 | ||
42 | static DEFINE_PER_CPU(unsigned int, last_irq_sum); | 49 | static DEFINE_PER_CPU(unsigned int, last_irq_sum); |
43 | static DEFINE_PER_CPU(local_t, alert_counter); | 50 | static DEFINE_PER_CPU(local_t, alert_counter); |
@@ -45,7 +52,7 @@ static DEFINE_PER_CPU(int, nmi_touch); | |||
45 | 52 | ||
46 | void touch_nmi_watchdog(void) | 53 | void touch_nmi_watchdog(void) |
47 | { | 54 | { |
48 | if (nmi_watchdog_active) { | 55 | if (atomic_read(&nmi_active)) { |
49 | int cpu; | 56 | int cpu; |
50 | 57 | ||
51 | for_each_present_cpu(cpu) { | 58 | for_each_present_cpu(cpu) { |
@@ -78,6 +85,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) | |||
78 | if (do_panic || panic_on_oops) | 85 | if (do_panic || panic_on_oops) |
79 | panic("Non maskable interrupt"); | 86 | panic("Non maskable interrupt"); |
80 | 87 | ||
88 | nmi_exit(); | ||
81 | local_irq_enable(); | 89 | local_irq_enable(); |
82 | do_exit(SIGBUS); | 90 | do_exit(SIGBUS); |
83 | } | 91 | } |
@@ -92,6 +100,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) | |||
92 | 100 | ||
93 | local_cpu_data().__nmi_count++; | 101 | local_cpu_data().__nmi_count++; |
94 | 102 | ||
103 | nmi_enter(); | ||
104 | |||
95 | if (notify_die(DIE_NMI, "nmi", regs, 0, | 105 | if (notify_die(DIE_NMI, "nmi", regs, 0, |
96 | pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) | 106 | pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) |
97 | touched = 1; | 107 | touched = 1; |
@@ -110,10 +120,12 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) | |||
110 | __get_cpu_var(last_irq_sum) = sum; | 120 | __get_cpu_var(last_irq_sum) = sum; |
111 | local_set(&__get_cpu_var(alert_counter), 0); | 121 | local_set(&__get_cpu_var(alert_counter), 0); |
112 | } | 122 | } |
113 | if (nmi_usable) { | 123 | if (__get_cpu_var(wd_enabled)) { |
114 | write_pic(picl_value(nmi_hz)); | 124 | write_pic(picl_value(nmi_hz)); |
115 | pcr_ops->write(pcr_enable); | 125 | pcr_ops->write(pcr_enable); |
116 | } | 126 | } |
127 | |||
128 | nmi_exit(); | ||
117 | } | 129 | } |
118 | 130 | ||
119 | static inline unsigned int get_nmi_count(int cpu) | 131 | static inline unsigned int get_nmi_count(int cpu) |
@@ -121,8 +133,6 @@ static inline unsigned int get_nmi_count(int cpu) | |||
121 | return cpu_data(cpu).__nmi_count; | 133 | return cpu_data(cpu).__nmi_count; |
122 | } | 134 | } |
123 | 135 | ||
124 | static int endflag __initdata; | ||
125 | |||
126 | static __init void nmi_cpu_busy(void *data) | 136 | static __init void nmi_cpu_busy(void *data) |
127 | { | 137 | { |
128 | local_irq_enable_in_hardirq(); | 138 | local_irq_enable_in_hardirq(); |
@@ -143,12 +153,15 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count) | |||
143 | printk(KERN_WARNING | 153 | printk(KERN_WARNING |
144 | "and attach the output of the 'dmesg' command.\n"); | 154 | "and attach the output of the 'dmesg' command.\n"); |
145 | 155 | ||
146 | nmi_usable = 0; | 156 | per_cpu(wd_enabled, cpu) = 0; |
157 | atomic_dec(&nmi_active); | ||
147 | } | 158 | } |
148 | 159 | ||
149 | static void stop_watchdog(void *unused) | 160 | void stop_nmi_watchdog(void *unused) |
150 | { | 161 | { |
151 | pcr_ops->write(PCR_PIC_PRIV); | 162 | pcr_ops->write(PCR_PIC_PRIV); |
163 | __get_cpu_var(wd_enabled) = 0; | ||
164 | atomic_dec(&nmi_active); | ||
152 | } | 165 | } |
153 | 166 | ||
154 | static int __init check_nmi_watchdog(void) | 167 | static int __init check_nmi_watchdog(void) |
@@ -156,6 +169,9 @@ static int __init check_nmi_watchdog(void) | |||
156 | unsigned int *prev_nmi_count; | 169 | unsigned int *prev_nmi_count; |
157 | int cpu, err; | 170 | int cpu, err; |
158 | 171 | ||
172 | if (!atomic_read(&nmi_active)) | ||
173 | return 0; | ||
174 | |||
159 | prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL); | 175 | prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL); |
160 | if (!prev_nmi_count) { | 176 | if (!prev_nmi_count) { |
161 | err = -ENOMEM; | 177 | err = -ENOMEM; |
@@ -172,12 +188,15 @@ static int __init check_nmi_watchdog(void) | |||
172 | mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */ | 188 | mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */ |
173 | 189 | ||
174 | for_each_online_cpu(cpu) { | 190 | for_each_online_cpu(cpu) { |
191 | if (!per_cpu(wd_enabled, cpu)) | ||
192 | continue; | ||
175 | if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) | 193 | if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) |
176 | report_broken_nmi(cpu, prev_nmi_count); | 194 | report_broken_nmi(cpu, prev_nmi_count); |
177 | } | 195 | } |
178 | endflag = 1; | 196 | endflag = 1; |
179 | if (!nmi_usable) { | 197 | if (!atomic_read(&nmi_active)) { |
180 | kfree(prev_nmi_count); | 198 | kfree(prev_nmi_count); |
199 | atomic_set(&nmi_active, -1); | ||
181 | err = -ENODEV; | 200 | err = -ENODEV; |
182 | goto error; | 201 | goto error; |
183 | } | 202 | } |
@@ -188,12 +207,26 @@ static int __init check_nmi_watchdog(void) | |||
188 | kfree(prev_nmi_count); | 207 | kfree(prev_nmi_count); |
189 | return 0; | 208 | return 0; |
190 | error: | 209 | error: |
191 | on_each_cpu(stop_watchdog, NULL, 1); | 210 | on_each_cpu(stop_nmi_watchdog, NULL, 1); |
192 | return err; | 211 | return err; |
193 | } | 212 | } |
194 | 213 | ||
195 | static void start_watchdog(void *unused) | 214 | void start_nmi_watchdog(void *unused) |
196 | { | 215 | { |
216 | __get_cpu_var(wd_enabled) = 1; | ||
217 | atomic_inc(&nmi_active); | ||
218 | |||
219 | pcr_ops->write(PCR_PIC_PRIV); | ||
220 | write_pic(picl_value(nmi_hz)); | ||
221 | |||
222 | pcr_ops->write(pcr_enable); | ||
223 | } | ||
224 | |||
225 | static void nmi_adjust_hz_one(void *unused) | ||
226 | { | ||
227 | if (!__get_cpu_var(wd_enabled)) | ||
228 | return; | ||
229 | |||
197 | pcr_ops->write(PCR_PIC_PRIV); | 230 | pcr_ops->write(PCR_PIC_PRIV); |
198 | write_pic(picl_value(nmi_hz)); | 231 | write_pic(picl_value(nmi_hz)); |
199 | 232 | ||
@@ -203,13 +236,13 @@ static void start_watchdog(void *unused) | |||
203 | void nmi_adjust_hz(unsigned int new_hz) | 236 | void nmi_adjust_hz(unsigned int new_hz) |
204 | { | 237 | { |
205 | nmi_hz = new_hz; | 238 | nmi_hz = new_hz; |
206 | on_each_cpu(start_watchdog, NULL, 1); | 239 | on_each_cpu(nmi_adjust_hz_one, NULL, 1); |
207 | } | 240 | } |
208 | EXPORT_SYMBOL_GPL(nmi_adjust_hz); | 241 | EXPORT_SYMBOL_GPL(nmi_adjust_hz); |
209 | 242 | ||
210 | static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p) | 243 | static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p) |
211 | { | 244 | { |
212 | on_each_cpu(stop_watchdog, NULL, 1); | 245 | on_each_cpu(stop_nmi_watchdog, NULL, 1); |
213 | return 0; | 246 | return 0; |
214 | } | 247 | } |
215 | 248 | ||
@@ -221,18 +254,19 @@ int __init nmi_init(void) | |||
221 | { | 254 | { |
222 | int err; | 255 | int err; |
223 | 256 | ||
224 | nmi_usable = 1; | 257 | on_each_cpu(start_nmi_watchdog, NULL, 1); |
225 | |||
226 | on_each_cpu(start_watchdog, NULL, 1); | ||
227 | 258 | ||
228 | err = check_nmi_watchdog(); | 259 | err = check_nmi_watchdog(); |
229 | if (!err) { | 260 | if (!err) { |
230 | err = register_reboot_notifier(&nmi_reboot_notifier); | 261 | err = register_reboot_notifier(&nmi_reboot_notifier); |
231 | if (err) { | 262 | if (err) { |
232 | nmi_usable = 0; | 263 | on_each_cpu(stop_nmi_watchdog, NULL, 1); |
233 | on_each_cpu(stop_watchdog, NULL, 1); | 264 | atomic_set(&nmi_active, -1); |
234 | } | 265 | } |
235 | } | 266 | } |
267 | if (!err) | ||
268 | init_hw_perf_counters(); | ||
269 | |||
236 | return err; | 270 | return err; |
237 | } | 271 | } |
238 | 272 | ||
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c index 90396702ea2c..4c26eb59e742 100644 --- a/arch/sparc/kernel/of_device_32.c +++ b/arch/sparc/kernel/of_device_32.c | |||
@@ -9,6 +9,8 @@ | |||
9 | #include <linux/irq.h> | 9 | #include <linux/irq.h> |
10 | #include <linux/of_device.h> | 10 | #include <linux/of_device.h> |
11 | #include <linux/of_platform.h> | 11 | #include <linux/of_platform.h> |
12 | #include <asm/leon.h> | ||
13 | #include <asm/leon_amba.h> | ||
12 | 14 | ||
13 | #include "of_device_common.h" | 15 | #include "of_device_common.h" |
14 | 16 | ||
@@ -97,6 +99,35 @@ static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags) | |||
97 | return IORESOURCE_MEM; | 99 | return IORESOURCE_MEM; |
98 | } | 100 | } |
99 | 101 | ||
102 | /* | ||
103 | * AMBAPP bus specific translator | ||
104 | */ | ||
105 | |||
106 | static int of_bus_ambapp_match(struct device_node *np) | ||
107 | { | ||
108 | return !strcmp(np->name, "ambapp"); | ||
109 | } | ||
110 | |||
111 | static void of_bus_ambapp_count_cells(struct device_node *child, | ||
112 | int *addrc, int *sizec) | ||
113 | { | ||
114 | if (addrc) | ||
115 | *addrc = 1; | ||
116 | if (sizec) | ||
117 | *sizec = 1; | ||
118 | } | ||
119 | |||
120 | static int of_bus_ambapp_map(u32 *addr, const u32 *range, | ||
121 | int na, int ns, int pna) | ||
122 | { | ||
123 | return of_bus_default_map(addr, range, na, ns, pna); | ||
124 | } | ||
125 | |||
126 | static unsigned long of_bus_ambapp_get_flags(const u32 *addr, | ||
127 | unsigned long flags) | ||
128 | { | ||
129 | return IORESOURCE_MEM; | ||
130 | } | ||
100 | 131 | ||
101 | /* | 132 | /* |
102 | * Array of bus specific translators | 133 | * Array of bus specific translators |
@@ -121,6 +152,15 @@ static struct of_bus of_busses[] = { | |||
121 | .map = of_bus_default_map, | 152 | .map = of_bus_default_map, |
122 | .get_flags = of_bus_sbus_get_flags, | 153 | .get_flags = of_bus_sbus_get_flags, |
123 | }, | 154 | }, |
155 | /* AMBA */ | ||
156 | { | ||
157 | .name = "ambapp", | ||
158 | .addr_prop_name = "reg", | ||
159 | .match = of_bus_ambapp_match, | ||
160 | .count_cells = of_bus_ambapp_count_cells, | ||
161 | .map = of_bus_ambapp_map, | ||
162 | .get_flags = of_bus_ambapp_get_flags, | ||
163 | }, | ||
124 | /* Default */ | 164 | /* Default */ |
125 | { | 165 | { |
126 | .name = "default", | 166 | .name = "default", |
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 57859ad23547..c68648662802 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c | |||
@@ -1039,7 +1039,7 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) | |||
1039 | pci_dev_put(ali_isa_bridge); | 1039 | pci_dev_put(ali_isa_bridge); |
1040 | } | 1040 | } |
1041 | 1041 | ||
1042 | int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) | 1042 | int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask) |
1043 | { | 1043 | { |
1044 | u64 dma_addr_mask; | 1044 | u64 dma_addr_mask; |
1045 | 1045 | ||
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 2485eaa23101..23c33ff9c31e 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c | |||
@@ -232,7 +232,8 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, | |||
232 | 232 | ||
233 | static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, | 233 | static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, |
234 | unsigned long offset, size_t sz, | 234 | unsigned long offset, size_t sz, |
235 | enum dma_data_direction direction) | 235 | enum dma_data_direction direction, |
236 | struct dma_attrs *attrs) | ||
236 | { | 237 | { |
237 | struct iommu *iommu; | 238 | struct iommu *iommu; |
238 | unsigned long flags, npages, oaddr; | 239 | unsigned long flags, npages, oaddr; |
@@ -296,7 +297,8 @@ iommu_map_fail: | |||
296 | } | 297 | } |
297 | 298 | ||
298 | static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, | 299 | static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, |
299 | size_t sz, enum dma_data_direction direction) | 300 | size_t sz, enum dma_data_direction direction, |
301 | struct dma_attrs *attrs) | ||
300 | { | 302 | { |
301 | struct pci_pbm_info *pbm; | 303 | struct pci_pbm_info *pbm; |
302 | struct iommu *iommu; | 304 | struct iommu *iommu; |
@@ -336,7 +338,8 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, | |||
336 | } | 338 | } |
337 | 339 | ||
338 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | 340 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, |
339 | int nelems, enum dma_data_direction direction) | 341 | int nelems, enum dma_data_direction direction, |
342 | struct dma_attrs *attrs) | ||
340 | { | 343 | { |
341 | struct scatterlist *s, *outs, *segstart; | 344 | struct scatterlist *s, *outs, *segstart; |
342 | unsigned long flags, handle, prot; | 345 | unsigned long flags, handle, prot; |
@@ -478,7 +481,8 @@ iommu_map_failed: | |||
478 | } | 481 | } |
479 | 482 | ||
480 | static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, | 483 | static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, |
481 | int nelems, enum dma_data_direction direction) | 484 | int nelems, enum dma_data_direction direction, |
485 | struct dma_attrs *attrs) | ||
482 | { | 486 | { |
483 | struct pci_pbm_info *pbm; | 487 | struct pci_pbm_info *pbm; |
484 | struct scatterlist *sg; | 488 | struct scatterlist *sg; |
@@ -521,29 +525,13 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
521 | spin_unlock_irqrestore(&iommu->lock, flags); | 525 | spin_unlock_irqrestore(&iommu->lock, flags); |
522 | } | 526 | } |
523 | 527 | ||
524 | static void dma_4v_sync_single_for_cpu(struct device *dev, | 528 | static struct dma_map_ops sun4v_dma_ops = { |
525 | dma_addr_t bus_addr, size_t sz, | ||
526 | enum dma_data_direction direction) | ||
527 | { | ||
528 | /* Nothing to do... */ | ||
529 | } | ||
530 | |||
531 | static void dma_4v_sync_sg_for_cpu(struct device *dev, | ||
532 | struct scatterlist *sglist, int nelems, | ||
533 | enum dma_data_direction direction) | ||
534 | { | ||
535 | /* Nothing to do... */ | ||
536 | } | ||
537 | |||
538 | static const struct dma_ops sun4v_dma_ops = { | ||
539 | .alloc_coherent = dma_4v_alloc_coherent, | 529 | .alloc_coherent = dma_4v_alloc_coherent, |
540 | .free_coherent = dma_4v_free_coherent, | 530 | .free_coherent = dma_4v_free_coherent, |
541 | .map_page = dma_4v_map_page, | 531 | .map_page = dma_4v_map_page, |
542 | .unmap_page = dma_4v_unmap_page, | 532 | .unmap_page = dma_4v_unmap_page, |
543 | .map_sg = dma_4v_map_sg, | 533 | .map_sg = dma_4v_map_sg, |
544 | .unmap_sg = dma_4v_unmap_sg, | 534 | .unmap_sg = dma_4v_unmap_sg, |
545 | .sync_single_for_cpu = dma_4v_sync_single_for_cpu, | ||
546 | .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu, | ||
547 | }; | 535 | }; |
548 | 536 | ||
549 | static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm, | 537 | static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm, |
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index 1ae8cdd7e703..68ff00107073 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c | |||
@@ -7,6 +7,8 @@ | |||
7 | #include <linux/init.h> | 7 | #include <linux/init.h> |
8 | #include <linux/irq.h> | 8 | #include <linux/irq.h> |
9 | 9 | ||
10 | #include <linux/perf_counter.h> | ||
11 | |||
10 | #include <asm/pil.h> | 12 | #include <asm/pil.h> |
11 | #include <asm/pcr.h> | 13 | #include <asm/pcr.h> |
12 | #include <asm/nmi.h> | 14 | #include <asm/nmi.h> |
@@ -34,10 +36,20 @@ unsigned int picl_shift; | |||
34 | */ | 36 | */ |
35 | void deferred_pcr_work_irq(int irq, struct pt_regs *regs) | 37 | void deferred_pcr_work_irq(int irq, struct pt_regs *regs) |
36 | { | 38 | { |
39 | struct pt_regs *old_regs; | ||
40 | |||
37 | clear_softint(1 << PIL_DEFERRED_PCR_WORK); | 41 | clear_softint(1 << PIL_DEFERRED_PCR_WORK); |
42 | |||
43 | old_regs = set_irq_regs(regs); | ||
44 | irq_enter(); | ||
45 | #ifdef CONFIG_PERF_COUNTERS | ||
46 | perf_counter_do_pending(); | ||
47 | #endif | ||
48 | irq_exit(); | ||
49 | set_irq_regs(old_regs); | ||
38 | } | 50 | } |
39 | 51 | ||
40 | void schedule_deferred_pcr_work(void) | 52 | void set_perf_counter_pending(void) |
41 | { | 53 | { |
42 | set_softint(1 << PIL_DEFERRED_PCR_WORK); | 54 | set_softint(1 << PIL_DEFERRED_PCR_WORK); |
43 | } | 55 | } |
diff --git a/arch/sparc/kernel/perf_counter.c b/arch/sparc/kernel/perf_counter.c new file mode 100644 index 000000000000..09de4035eaa9 --- /dev/null +++ b/arch/sparc/kernel/perf_counter.c | |||
@@ -0,0 +1,557 @@ | |||
1 | /* Performance counter support for sparc64. | ||
2 | * | ||
3 | * Copyright (C) 2009 David S. Miller <davem@davemloft.net> | ||
4 | * | ||
5 | * This code is based almost entirely upon the x86 perf counter | ||
6 | * code, which is: | ||
7 | * | ||
8 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | ||
9 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | ||
10 | * Copyright (C) 2009 Jaswinder Singh Rajput | ||
11 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | ||
12 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | ||
13 | */ | ||
14 | |||
15 | #include <linux/perf_counter.h> | ||
16 | #include <linux/kprobes.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/kdebug.h> | ||
19 | #include <linux/mutex.h> | ||
20 | |||
21 | #include <asm/cpudata.h> | ||
22 | #include <asm/atomic.h> | ||
23 | #include <asm/nmi.h> | ||
24 | #include <asm/pcr.h> | ||
25 | |||
26 | /* Sparc64 chips have two performance counters, 32-bits each, with | ||
27 | * overflow interrupts generated on transition from 0xffffffff to 0. | ||
28 | * The counters are accessed in one go using a 64-bit register. | ||
29 | * | ||
30 | * Both counters are controlled using a single control register. The | ||
31 | * only way to stop all sampling is to clear all of the context (user, | ||
32 | * supervisor, hypervisor) sampling enable bits. But these bits apply | ||
33 | * to both counters, thus the two counters can't be enabled/disabled | ||
34 | * individually. | ||
35 | * | ||
36 | * The control register has two event fields, one for each of the two | ||
37 | * counters. It's thus nearly impossible to have one counter going | ||
38 | * while keeping the other one stopped. Therefore it is possible to | ||
39 | * get overflow interrupts for counters not currently "in use" and | ||
40 | * that condition must be checked in the overflow interrupt handler. | ||
41 | * | ||
42 | * So we use a hack, in that we program inactive counters with the | ||
43 | * "sw_count0" and "sw_count1" events. These count how many times | ||
44 | * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an | ||
45 | * unusual way to encode a NOP and therefore will not trigger in | ||
46 | * normal code. | ||
47 | */ | ||
48 | |||
49 | #define MAX_HWCOUNTERS 2 | ||
50 | #define MAX_PERIOD ((1UL << 32) - 1) | ||
51 | |||
52 | #define PIC_UPPER_INDEX 0 | ||
53 | #define PIC_LOWER_INDEX 1 | ||
54 | |||
55 | struct cpu_hw_counters { | ||
56 | struct perf_counter *counters[MAX_HWCOUNTERS]; | ||
57 | unsigned long used_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; | ||
58 | unsigned long active_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; | ||
59 | int enabled; | ||
60 | }; | ||
61 | DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, }; | ||
62 | |||
63 | struct perf_event_map { | ||
64 | u16 encoding; | ||
65 | u8 pic_mask; | ||
66 | #define PIC_NONE 0x00 | ||
67 | #define PIC_UPPER 0x01 | ||
68 | #define PIC_LOWER 0x02 | ||
69 | }; | ||
70 | |||
71 | struct sparc_pmu { | ||
72 | const struct perf_event_map *(*event_map)(int); | ||
73 | int max_events; | ||
74 | int upper_shift; | ||
75 | int lower_shift; | ||
76 | int event_mask; | ||
77 | int hv_bit; | ||
78 | int irq_bit; | ||
79 | int upper_nop; | ||
80 | int lower_nop; | ||
81 | }; | ||
82 | |||
83 | static const struct perf_event_map ultra3i_perfmon_event_map[] = { | ||
84 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, | ||
85 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, | ||
86 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, | ||
87 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, | ||
88 | }; | ||
89 | |||
90 | static const struct perf_event_map *ultra3i_event_map(int event) | ||
91 | { | ||
92 | return &ultra3i_perfmon_event_map[event]; | ||
93 | } | ||
94 | |||
95 | static const struct sparc_pmu ultra3i_pmu = { | ||
96 | .event_map = ultra3i_event_map, | ||
97 | .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map), | ||
98 | .upper_shift = 11, | ||
99 | .lower_shift = 4, | ||
100 | .event_mask = 0x3f, | ||
101 | .upper_nop = 0x1c, | ||
102 | .lower_nop = 0x14, | ||
103 | }; | ||
104 | |||
105 | static const struct perf_event_map niagara2_perfmon_event_map[] = { | ||
106 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, | ||
107 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, | ||
108 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER }, | ||
109 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER }, | ||
110 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER }, | ||
111 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER }, | ||
112 | }; | ||
113 | |||
114 | static const struct perf_event_map *niagara2_event_map(int event) | ||
115 | { | ||
116 | return &niagara2_perfmon_event_map[event]; | ||
117 | } | ||
118 | |||
119 | static const struct sparc_pmu niagara2_pmu = { | ||
120 | .event_map = niagara2_event_map, | ||
121 | .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), | ||
122 | .upper_shift = 19, | ||
123 | .lower_shift = 6, | ||
124 | .event_mask = 0xfff, | ||
125 | .hv_bit = 0x8, | ||
126 | .irq_bit = 0x03, | ||
127 | .upper_nop = 0x220, | ||
128 | .lower_nop = 0x220, | ||
129 | }; | ||
130 | |||
131 | static const struct sparc_pmu *sparc_pmu __read_mostly; | ||
132 | |||
133 | static u64 event_encoding(u64 event, int idx) | ||
134 | { | ||
135 | if (idx == PIC_UPPER_INDEX) | ||
136 | event <<= sparc_pmu->upper_shift; | ||
137 | else | ||
138 | event <<= sparc_pmu->lower_shift; | ||
139 | return event; | ||
140 | } | ||
141 | |||
142 | static u64 mask_for_index(int idx) | ||
143 | { | ||
144 | return event_encoding(sparc_pmu->event_mask, idx); | ||
145 | } | ||
146 | |||
147 | static u64 nop_for_index(int idx) | ||
148 | { | ||
149 | return event_encoding(idx == PIC_UPPER_INDEX ? | ||
150 | sparc_pmu->upper_nop : | ||
151 | sparc_pmu->lower_nop, idx); | ||
152 | } | ||
153 | |||
154 | static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc, | ||
155 | int idx) | ||
156 | { | ||
157 | u64 val, mask = mask_for_index(idx); | ||
158 | |||
159 | val = pcr_ops->read(); | ||
160 | pcr_ops->write((val & ~mask) | hwc->config); | ||
161 | } | ||
162 | |||
163 | static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc, | ||
164 | int idx) | ||
165 | { | ||
166 | u64 mask = mask_for_index(idx); | ||
167 | u64 nop = nop_for_index(idx); | ||
168 | u64 val = pcr_ops->read(); | ||
169 | |||
170 | pcr_ops->write((val & ~mask) | nop); | ||
171 | } | ||
172 | |||
173 | void hw_perf_enable(void) | ||
174 | { | ||
175 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
176 | u64 val; | ||
177 | int i; | ||
178 | |||
179 | if (cpuc->enabled) | ||
180 | return; | ||
181 | |||
182 | cpuc->enabled = 1; | ||
183 | barrier(); | ||
184 | |||
185 | val = pcr_ops->read(); | ||
186 | |||
187 | for (i = 0; i < MAX_HWCOUNTERS; i++) { | ||
188 | struct perf_counter *cp = cpuc->counters[i]; | ||
189 | struct hw_perf_counter *hwc; | ||
190 | |||
191 | if (!cp) | ||
192 | continue; | ||
193 | hwc = &cp->hw; | ||
194 | val |= hwc->config_base; | ||
195 | } | ||
196 | |||
197 | pcr_ops->write(val); | ||
198 | } | ||
199 | |||
200 | void hw_perf_disable(void) | ||
201 | { | ||
202 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
203 | u64 val; | ||
204 | |||
205 | if (!cpuc->enabled) | ||
206 | return; | ||
207 | |||
208 | cpuc->enabled = 0; | ||
209 | |||
210 | val = pcr_ops->read(); | ||
211 | val &= ~(PCR_UTRACE | PCR_STRACE | | ||
212 | sparc_pmu->hv_bit | sparc_pmu->irq_bit); | ||
213 | pcr_ops->write(val); | ||
214 | } | ||
215 | |||
216 | static u32 read_pmc(int idx) | ||
217 | { | ||
218 | u64 val; | ||
219 | |||
220 | read_pic(val); | ||
221 | if (idx == PIC_UPPER_INDEX) | ||
222 | val >>= 32; | ||
223 | |||
224 | return val & 0xffffffff; | ||
225 | } | ||
226 | |||
227 | static void write_pmc(int idx, u64 val) | ||
228 | { | ||
229 | u64 shift, mask, pic; | ||
230 | |||
231 | shift = 0; | ||
232 | if (idx == PIC_UPPER_INDEX) | ||
233 | shift = 32; | ||
234 | |||
235 | mask = ((u64) 0xffffffff) << shift; | ||
236 | val <<= shift; | ||
237 | |||
238 | read_pic(pic); | ||
239 | pic &= ~mask; | ||
240 | pic |= val; | ||
241 | write_pic(pic); | ||
242 | } | ||
243 | |||
244 | static int sparc_perf_counter_set_period(struct perf_counter *counter, | ||
245 | struct hw_perf_counter *hwc, int idx) | ||
246 | { | ||
247 | s64 left = atomic64_read(&hwc->period_left); | ||
248 | s64 period = hwc->sample_period; | ||
249 | int ret = 0; | ||
250 | |||
251 | if (unlikely(left <= -period)) { | ||
252 | left = period; | ||
253 | atomic64_set(&hwc->period_left, left); | ||
254 | hwc->last_period = period; | ||
255 | ret = 1; | ||
256 | } | ||
257 | |||
258 | if (unlikely(left <= 0)) { | ||
259 | left += period; | ||
260 | atomic64_set(&hwc->period_left, left); | ||
261 | hwc->last_period = period; | ||
262 | ret = 1; | ||
263 | } | ||
264 | if (left > MAX_PERIOD) | ||
265 | left = MAX_PERIOD; | ||
266 | |||
267 | atomic64_set(&hwc->prev_count, (u64)-left); | ||
268 | |||
269 | write_pmc(idx, (u64)(-left) & 0xffffffff); | ||
270 | |||
271 | perf_counter_update_userpage(counter); | ||
272 | |||
273 | return ret; | ||
274 | } | ||
275 | |||
276 | static int sparc_pmu_enable(struct perf_counter *counter) | ||
277 | { | ||
278 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
279 | struct hw_perf_counter *hwc = &counter->hw; | ||
280 | int idx = hwc->idx; | ||
281 | |||
282 | if (test_and_set_bit(idx, cpuc->used_mask)) | ||
283 | return -EAGAIN; | ||
284 | |||
285 | sparc_pmu_disable_counter(hwc, idx); | ||
286 | |||
287 | cpuc->counters[idx] = counter; | ||
288 | set_bit(idx, cpuc->active_mask); | ||
289 | |||
290 | sparc_perf_counter_set_period(counter, hwc, idx); | ||
291 | sparc_pmu_enable_counter(hwc, idx); | ||
292 | perf_counter_update_userpage(counter); | ||
293 | return 0; | ||
294 | } | ||
295 | |||
296 | static u64 sparc_perf_counter_update(struct perf_counter *counter, | ||
297 | struct hw_perf_counter *hwc, int idx) | ||
298 | { | ||
299 | int shift = 64 - 32; | ||
300 | u64 prev_raw_count, new_raw_count; | ||
301 | s64 delta; | ||
302 | |||
303 | again: | ||
304 | prev_raw_count = atomic64_read(&hwc->prev_count); | ||
305 | new_raw_count = read_pmc(idx); | ||
306 | |||
307 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | ||
308 | new_raw_count) != prev_raw_count) | ||
309 | goto again; | ||
310 | |||
311 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | ||
312 | delta >>= shift; | ||
313 | |||
314 | atomic64_add(delta, &counter->count); | ||
315 | atomic64_sub(delta, &hwc->period_left); | ||
316 | |||
317 | return new_raw_count; | ||
318 | } | ||
319 | |||
320 | static void sparc_pmu_disable(struct perf_counter *counter) | ||
321 | { | ||
322 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
323 | struct hw_perf_counter *hwc = &counter->hw; | ||
324 | int idx = hwc->idx; | ||
325 | |||
326 | clear_bit(idx, cpuc->active_mask); | ||
327 | sparc_pmu_disable_counter(hwc, idx); | ||
328 | |||
329 | barrier(); | ||
330 | |||
331 | sparc_perf_counter_update(counter, hwc, idx); | ||
332 | cpuc->counters[idx] = NULL; | ||
333 | clear_bit(idx, cpuc->used_mask); | ||
334 | |||
335 | perf_counter_update_userpage(counter); | ||
336 | } | ||
337 | |||
338 | static void sparc_pmu_read(struct perf_counter *counter) | ||
339 | { | ||
340 | struct hw_perf_counter *hwc = &counter->hw; | ||
341 | sparc_perf_counter_update(counter, hwc, hwc->idx); | ||
342 | } | ||
343 | |||
344 | static void sparc_pmu_unthrottle(struct perf_counter *counter) | ||
345 | { | ||
346 | struct hw_perf_counter *hwc = &counter->hw; | ||
347 | sparc_pmu_enable_counter(hwc, hwc->idx); | ||
348 | } | ||
349 | |||
350 | static atomic_t active_counters = ATOMIC_INIT(0); | ||
351 | static DEFINE_MUTEX(pmc_grab_mutex); | ||
352 | |||
353 | void perf_counter_grab_pmc(void) | ||
354 | { | ||
355 | if (atomic_inc_not_zero(&active_counters)) | ||
356 | return; | ||
357 | |||
358 | mutex_lock(&pmc_grab_mutex); | ||
359 | if (atomic_read(&active_counters) == 0) { | ||
360 | if (atomic_read(&nmi_active) > 0) { | ||
361 | on_each_cpu(stop_nmi_watchdog, NULL, 1); | ||
362 | BUG_ON(atomic_read(&nmi_active) != 0); | ||
363 | } | ||
364 | atomic_inc(&active_counters); | ||
365 | } | ||
366 | mutex_unlock(&pmc_grab_mutex); | ||
367 | } | ||
368 | |||
369 | void perf_counter_release_pmc(void) | ||
370 | { | ||
371 | if (atomic_dec_and_mutex_lock(&active_counters, &pmc_grab_mutex)) { | ||
372 | if (atomic_read(&nmi_active) == 0) | ||
373 | on_each_cpu(start_nmi_watchdog, NULL, 1); | ||
374 | mutex_unlock(&pmc_grab_mutex); | ||
375 | } | ||
376 | } | ||
377 | |||
378 | static void hw_perf_counter_destroy(struct perf_counter *counter) | ||
379 | { | ||
380 | perf_counter_release_pmc(); | ||
381 | } | ||
382 | |||
383 | static int __hw_perf_counter_init(struct perf_counter *counter) | ||
384 | { | ||
385 | struct perf_counter_attr *attr = &counter->attr; | ||
386 | struct hw_perf_counter *hwc = &counter->hw; | ||
387 | const struct perf_event_map *pmap; | ||
388 | u64 enc; | ||
389 | |||
390 | if (atomic_read(&nmi_active) < 0) | ||
391 | return -ENODEV; | ||
392 | |||
393 | if (attr->type != PERF_TYPE_HARDWARE) | ||
394 | return -EOPNOTSUPP; | ||
395 | |||
396 | if (attr->config >= sparc_pmu->max_events) | ||
397 | return -EINVAL; | ||
398 | |||
399 | perf_counter_grab_pmc(); | ||
400 | counter->destroy = hw_perf_counter_destroy; | ||
401 | |||
402 | /* We save the enable bits in the config_base. So to | ||
403 | * turn off sampling just write 'config', and to enable | ||
404 | * things write 'config | config_base'. | ||
405 | */ | ||
406 | hwc->config_base = sparc_pmu->irq_bit; | ||
407 | if (!attr->exclude_user) | ||
408 | hwc->config_base |= PCR_UTRACE; | ||
409 | if (!attr->exclude_kernel) | ||
410 | hwc->config_base |= PCR_STRACE; | ||
411 | if (!attr->exclude_hv) | ||
412 | hwc->config_base |= sparc_pmu->hv_bit; | ||
413 | |||
414 | if (!hwc->sample_period) { | ||
415 | hwc->sample_period = MAX_PERIOD; | ||
416 | hwc->last_period = hwc->sample_period; | ||
417 | atomic64_set(&hwc->period_left, hwc->sample_period); | ||
418 | } | ||
419 | |||
420 | pmap = sparc_pmu->event_map(attr->config); | ||
421 | |||
422 | enc = pmap->encoding; | ||
423 | if (pmap->pic_mask & PIC_UPPER) { | ||
424 | hwc->idx = PIC_UPPER_INDEX; | ||
425 | enc <<= sparc_pmu->upper_shift; | ||
426 | } else { | ||
427 | hwc->idx = PIC_LOWER_INDEX; | ||
428 | enc <<= sparc_pmu->lower_shift; | ||
429 | } | ||
430 | |||
431 | hwc->config |= enc; | ||
432 | return 0; | ||
433 | } | ||
434 | |||
435 | static const struct pmu pmu = { | ||
436 | .enable = sparc_pmu_enable, | ||
437 | .disable = sparc_pmu_disable, | ||
438 | .read = sparc_pmu_read, | ||
439 | .unthrottle = sparc_pmu_unthrottle, | ||
440 | }; | ||
441 | |||
442 | const struct pmu *hw_perf_counter_init(struct perf_counter *counter) | ||
443 | { | ||
444 | int err = __hw_perf_counter_init(counter); | ||
445 | |||
446 | if (err) | ||
447 | return ERR_PTR(err); | ||
448 | return &pmu; | ||
449 | } | ||
450 | |||
451 | void perf_counter_print_debug(void) | ||
452 | { | ||
453 | unsigned long flags; | ||
454 | u64 pcr, pic; | ||
455 | int cpu; | ||
456 | |||
457 | if (!sparc_pmu) | ||
458 | return; | ||
459 | |||
460 | local_irq_save(flags); | ||
461 | |||
462 | cpu = smp_processor_id(); | ||
463 | |||
464 | pcr = pcr_ops->read(); | ||
465 | read_pic(pic); | ||
466 | |||
467 | pr_info("\n"); | ||
468 | pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n", | ||
469 | cpu, pcr, pic); | ||
470 | |||
471 | local_irq_restore(flags); | ||
472 | } | ||
473 | |||
474 | static int __kprobes perf_counter_nmi_handler(struct notifier_block *self, | ||
475 | unsigned long cmd, void *__args) | ||
476 | { | ||
477 | struct die_args *args = __args; | ||
478 | struct perf_sample_data data; | ||
479 | struct cpu_hw_counters *cpuc; | ||
480 | struct pt_regs *regs; | ||
481 | int idx; | ||
482 | |||
483 | if (!atomic_read(&active_counters)) | ||
484 | return NOTIFY_DONE; | ||
485 | |||
486 | switch (cmd) { | ||
487 | case DIE_NMI: | ||
488 | break; | ||
489 | |||
490 | default: | ||
491 | return NOTIFY_DONE; | ||
492 | } | ||
493 | |||
494 | regs = args->regs; | ||
495 | |||
496 | data.regs = regs; | ||
497 | data.addr = 0; | ||
498 | |||
499 | cpuc = &__get_cpu_var(cpu_hw_counters); | ||
500 | for (idx = 0; idx < MAX_HWCOUNTERS; idx++) { | ||
501 | struct perf_counter *counter = cpuc->counters[idx]; | ||
502 | struct hw_perf_counter *hwc; | ||
503 | u64 val; | ||
504 | |||
505 | if (!test_bit(idx, cpuc->active_mask)) | ||
506 | continue; | ||
507 | hwc = &counter->hw; | ||
508 | val = sparc_perf_counter_update(counter, hwc, idx); | ||
509 | if (val & (1ULL << 31)) | ||
510 | continue; | ||
511 | |||
512 | data.period = counter->hw.last_period; | ||
513 | if (!sparc_perf_counter_set_period(counter, hwc, idx)) | ||
514 | continue; | ||
515 | |||
516 | if (perf_counter_overflow(counter, 1, &data)) | ||
517 | sparc_pmu_disable_counter(hwc, idx); | ||
518 | } | ||
519 | |||
520 | return NOTIFY_STOP; | ||
521 | } | ||
522 | |||
523 | static __read_mostly struct notifier_block perf_counter_nmi_notifier = { | ||
524 | .notifier_call = perf_counter_nmi_handler, | ||
525 | }; | ||
526 | |||
527 | static bool __init supported_pmu(void) | ||
528 | { | ||
529 | if (!strcmp(sparc_pmu_type, "ultra3i")) { | ||
530 | sparc_pmu = &ultra3i_pmu; | ||
531 | return true; | ||
532 | } | ||
533 | if (!strcmp(sparc_pmu_type, "niagara2")) { | ||
534 | sparc_pmu = &niagara2_pmu; | ||
535 | return true; | ||
536 | } | ||
537 | return false; | ||
538 | } | ||
539 | |||
540 | void __init init_hw_perf_counters(void) | ||
541 | { | ||
542 | pr_info("Performance counters: "); | ||
543 | |||
544 | if (!supported_pmu()) { | ||
545 | pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); | ||
546 | return; | ||
547 | } | ||
548 | |||
549 | pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); | ||
550 | |||
551 | /* All sparc64 PMUs currently have 2 counters. But this simple | ||
552 | * driver only supports one active counter at a time. | ||
553 | */ | ||
554 | perf_max_counters = 1; | ||
555 | |||
556 | register_die_notifier(&perf_counter_nmi_notifier); | ||
557 | } | ||
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 4041f94e7724..18d67854a1b8 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c | |||
@@ -251,7 +251,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp) | |||
251 | } | 251 | } |
252 | } | 252 | } |
253 | 253 | ||
254 | void __trigger_all_cpu_backtrace(void) | 254 | void arch_trigger_all_cpu_backtrace(void) |
255 | { | 255 | { |
256 | struct thread_info *tp = current_thread_info(); | 256 | struct thread_info *tp = current_thread_info(); |
257 | struct pt_regs *regs = get_irq_regs(); | 257 | struct pt_regs *regs = get_irq_regs(); |
@@ -304,7 +304,7 @@ void __trigger_all_cpu_backtrace(void) | |||
304 | 304 | ||
305 | static void sysrq_handle_globreg(int key, struct tty_struct *tty) | 305 | static void sysrq_handle_globreg(int key, struct tty_struct *tty) |
306 | { | 306 | { |
307 | __trigger_all_cpu_backtrace(); | 307 | arch_trigger_all_cpu_backtrace(); |
308 | } | 308 | } |
309 | 309 | ||
310 | static struct sysrq_key_op sparc_globalreg_op = { | 310 | static struct sysrq_key_op sparc_globalreg_op = { |
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c index fe43e80772db..0a37e8cfd160 100644 --- a/arch/sparc/kernel/prom_32.c +++ b/arch/sparc/kernel/prom_32.c | |||
@@ -24,6 +24,8 @@ | |||
24 | 24 | ||
25 | #include <asm/prom.h> | 25 | #include <asm/prom.h> |
26 | #include <asm/oplib.h> | 26 | #include <asm/oplib.h> |
27 | #include <asm/leon.h> | ||
28 | #include <asm/leon_amba.h> | ||
27 | 29 | ||
28 | #include "prom.h" | 30 | #include "prom.h" |
29 | 31 | ||
@@ -131,6 +133,35 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf) | |||
131 | regs->which_io, regs->phys_addr); | 133 | regs->which_io, regs->phys_addr); |
132 | } | 134 | } |
133 | 135 | ||
136 | /* "name:vendor:device@irq,addrlo" */ | ||
137 | static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf) | ||
138 | { | ||
139 | struct amba_prom_registers *regs; unsigned int *intr; | ||
140 | unsigned int *device, *vendor; | ||
141 | struct property *prop; | ||
142 | |||
143 | prop = of_find_property(dp, "reg", NULL); | ||
144 | if (!prop) | ||
145 | return; | ||
146 | regs = prop->value; | ||
147 | prop = of_find_property(dp, "interrupts", NULL); | ||
148 | if (!prop) | ||
149 | return; | ||
150 | intr = prop->value; | ||
151 | prop = of_find_property(dp, "vendor", NULL); | ||
152 | if (!prop) | ||
153 | return; | ||
154 | vendor = prop->value; | ||
155 | prop = of_find_property(dp, "device", NULL); | ||
156 | if (!prop) | ||
157 | return; | ||
158 | device = prop->value; | ||
159 | |||
160 | sprintf(tmp_buf, "%s:%d:%d@%x,%x", | ||
161 | dp->name, *vendor, *device, | ||
162 | *intr, regs->phys_addr); | ||
163 | } | ||
164 | |||
134 | static void __init __build_path_component(struct device_node *dp, char *tmp_buf) | 165 | static void __init __build_path_component(struct device_node *dp, char *tmp_buf) |
135 | { | 166 | { |
136 | struct device_node *parent = dp->parent; | 167 | struct device_node *parent = dp->parent; |
@@ -143,6 +174,8 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf) | |||
143 | return sbus_path_component(dp, tmp_buf); | 174 | return sbus_path_component(dp, tmp_buf); |
144 | if (!strcmp(parent->type, "ebus")) | 175 | if (!strcmp(parent->type, "ebus")) |
145 | return ebus_path_component(dp, tmp_buf); | 176 | return ebus_path_component(dp, tmp_buf); |
177 | if (!strcmp(parent->type, "ambapp")) | ||
178 | return ambapp_path_component(dp, tmp_buf); | ||
146 | 179 | ||
147 | /* "isa" is handled with platform naming */ | 180 | /* "isa" is handled with platform naming */ |
148 | } | 181 | } |
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c index 0fb5789d43c8..138910c67206 100644 --- a/arch/sparc/kernel/prom_common.c +++ b/arch/sparc/kernel/prom_common.c | |||
@@ -22,9 +22,12 @@ | |||
22 | #include <linux/of.h> | 22 | #include <linux/of.h> |
23 | #include <asm/prom.h> | 23 | #include <asm/prom.h> |
24 | #include <asm/oplib.h> | 24 | #include <asm/oplib.h> |
25 | #include <asm/leon.h> | ||
25 | 26 | ||
26 | #include "prom.h" | 27 | #include "prom.h" |
27 | 28 | ||
29 | void (*prom_build_more)(struct device_node *dp, struct device_node ***nextp); | ||
30 | |||
28 | struct device_node *of_console_device; | 31 | struct device_node *of_console_device; |
29 | EXPORT_SYMBOL(of_console_device); | 32 | EXPORT_SYMBOL(of_console_device); |
30 | 33 | ||
@@ -161,7 +164,7 @@ static struct property * __init build_one_prop(phandle node, char *prev, | |||
161 | name = prom_nextprop(node, prev, p->name); | 164 | name = prom_nextprop(node, prev, p->name); |
162 | } | 165 | } |
163 | 166 | ||
164 | if (strlen(name) == 0) { | 167 | if (!name || strlen(name) == 0) { |
165 | tmp = p; | 168 | tmp = p; |
166 | return NULL; | 169 | return NULL; |
167 | } | 170 | } |
@@ -242,7 +245,7 @@ static struct device_node * __init prom_create_node(phandle node, | |||
242 | return dp; | 245 | return dp; |
243 | } | 246 | } |
244 | 247 | ||
245 | static char * __init build_full_name(struct device_node *dp) | 248 | char * __init build_full_name(struct device_node *dp) |
246 | { | 249 | { |
247 | int len, ourlen, plen; | 250 | int len, ourlen, plen; |
248 | char *n; | 251 | char *n; |
@@ -289,6 +292,9 @@ static struct device_node * __init prom_build_tree(struct device_node *parent, | |||
289 | 292 | ||
290 | dp->child = prom_build_tree(dp, prom_getchild(node), nextp); | 293 | dp->child = prom_build_tree(dp, prom_getchild(node), nextp); |
291 | 294 | ||
295 | if (prom_build_more) | ||
296 | prom_build_more(dp, nextp); | ||
297 | |||
292 | node = prom_getsibling(node); | 298 | node = prom_getsibling(node); |
293 | } | 299 | } |
294 | 300 | ||
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index 998cadb4e7f2..16a47ffe03c1 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c | |||
@@ -235,6 +235,8 @@ void __init setup_arch(char **cmdline_p) | |||
235 | sparc_cpu_model = sun4e; | 235 | sparc_cpu_model = sun4e; |
236 | if (!strcmp(&cputypval,"sun4u")) | 236 | if (!strcmp(&cputypval,"sun4u")) |
237 | sparc_cpu_model = sun4u; | 237 | sparc_cpu_model = sun4u; |
238 | if (!strncmp(&cputypval, "leon" , 4)) | ||
239 | sparc_cpu_model = sparc_leon; | ||
238 | 240 | ||
239 | printk("ARCH: "); | 241 | printk("ARCH: "); |
240 | switch(sparc_cpu_model) { | 242 | switch(sparc_cpu_model) { |
@@ -256,6 +258,9 @@ void __init setup_arch(char **cmdline_p) | |||
256 | case sun4u: | 258 | case sun4u: |
257 | printk("SUN4U\n"); | 259 | printk("SUN4U\n"); |
258 | break; | 260 | break; |
261 | case sparc_leon: | ||
262 | printk("LEON\n"); | ||
263 | break; | ||
259 | default: | 264 | default: |
260 | printk("UNKNOWN!\n"); | 265 | printk("UNKNOWN!\n"); |
261 | break; | 266 | break; |
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 181d069a2d44..7ce1a1005b1d 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c | |||
@@ -590,6 +590,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, | |||
590 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 590 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
591 | clear_thread_flag(TIF_NOTIFY_RESUME); | 591 | clear_thread_flag(TIF_NOTIFY_RESUME); |
592 | tracehook_notify_resume(regs); | 592 | tracehook_notify_resume(regs); |
593 | if (current->replacement_session_keyring) | ||
594 | key_replace_session_keyring(); | ||
593 | } | 595 | } |
594 | } | 596 | } |
595 | 597 | ||
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index ec82d76dc6f2..647afbda7ae1 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c | |||
@@ -613,5 +613,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long | |||
613 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 613 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
614 | clear_thread_flag(TIF_NOTIFY_RESUME); | 614 | clear_thread_flag(TIF_NOTIFY_RESUME); |
615 | tracehook_notify_resume(regs); | 615 | tracehook_notify_resume(regs); |
616 | if (current->replacement_session_keyring) | ||
617 | key_replace_session_keyring(); | ||
616 | } | 618 | } |
617 | } | 619 | } |
620 | |||
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S index aed94869ad6a..e7061138c98a 100644 --- a/arch/sparc/kernel/sys32.S +++ b/arch/sparc/kernel/sys32.S | |||
@@ -121,7 +121,7 @@ SIGN2(sys32_syslog, sys_syslog, %o0, %o2) | |||
121 | SIGN1(sys32_umask, sys_umask, %o0) | 121 | SIGN1(sys32_umask, sys_umask, %o0) |
122 | SIGN3(sys32_tgkill, sys_tgkill, %o0, %o1, %o2) | 122 | SIGN3(sys32_tgkill, sys_tgkill, %o0, %o1, %o2) |
123 | SIGN1(sys32_sendto, sys_sendto, %o0) | 123 | SIGN1(sys32_sendto, sys_sendto, %o0) |
124 | SIGN1(sys32_recvfrom, sys_recvfrom, %o0) | 124 | SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) |
125 | SIGN3(sys32_socket, sys_socket, %o0, %o1, %o2) | 125 | SIGN3(sys32_socket, sys_socket, %o0, %o1, %o2) |
126 | SIGN2(sys32_connect, sys_connect, %o0, %o2) | 126 | SIGN2(sys32_connect, sys_connect, %o0, %o2) |
127 | SIGN2(sys32_bind, sys_bind, %o0, %o2) | 127 | SIGN2(sys32_bind, sys_bind, %o0, %o2) |
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c index d28f496f4669..ca39c606fe8e 100644 --- a/arch/sparc/kernel/sysfs.c +++ b/arch/sparc/kernel/sysfs.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * | 2 | * |
3 | * Copyright (C) 2007 David S. Miller <davem@davemloft.net> | 3 | * Copyright (C) 2007 David S. Miller <davem@davemloft.net> |
4 | */ | 4 | */ |
5 | #include <linux/sched.h> | ||
5 | #include <linux/sysdev.h> | 6 | #include <linux/sysdev.h> |
6 | #include <linux/cpu.h> | 7 | #include <linux/cpu.h> |
7 | #include <linux/smp.h> | 8 | #include <linux/smp.h> |
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index 690901657291..04181577cb65 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S | |||
@@ -82,5 +82,5 @@ sys_call_table: | |||
82 | /*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate | 82 | /*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate |
83 | /*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 | 83 | /*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 |
84 | /*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv | 84 | /*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv |
85 | /*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo | 85 | /*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open |
86 | 86 | ||
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 2ee7250ba7ae..91b06b7f7acf 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S | |||
@@ -83,7 +83,7 @@ sys_call_table32: | |||
83 | /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate | 83 | /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate |
84 | .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 | 84 | .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 |
85 | /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv | 85 | /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv |
86 | .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo | 86 | .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_counter_open |
87 | 87 | ||
88 | #endif /* CONFIG_COMPAT */ | 88 | #endif /* CONFIG_COMPAT */ |
89 | 89 | ||
@@ -158,4 +158,4 @@ sys_call_table: | |||
158 | /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate | 158 | /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate |
159 | .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 | 159 | .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 |
160 | /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv | 160 | /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv |
161 | .word sys_pwritev, sys_rt_tgsigqueueinfo | 161 | .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open |
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile index 681abe0a4594..79836a7dd00c 100644 --- a/arch/sparc/mm/Makefile +++ b/arch/sparc/mm/Makefile | |||
@@ -11,6 +11,7 @@ obj-$(CONFIG_SPARC32) += loadmmu.o | |||
11 | obj-y += generic_$(BITS).o | 11 | obj-y += generic_$(BITS).o |
12 | obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o | 12 | obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o |
13 | obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o | 13 | obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o |
14 | obj-$(CONFIG_SPARC_LEON)+= leon_mm.o | ||
14 | 15 | ||
15 | # Only used by sparc64 | 16 | # Only used by sparc64 |
16 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 17 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 26bb3919ff1f..54114ad0bdee 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */ | 34 | #include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */ |
35 | #include <asm/tlb.h> | 35 | #include <asm/tlb.h> |
36 | #include <asm/prom.h> | 36 | #include <asm/prom.h> |
37 | #include <asm/leon.h> | ||
37 | 38 | ||
38 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
39 | 40 | ||
@@ -326,6 +327,9 @@ void __init paging_init(void) | |||
326 | sparc_unmapped_base = 0xe0000000; | 327 | sparc_unmapped_base = 0xe0000000; |
327 | BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000); | 328 | BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000); |
328 | break; | 329 | break; |
330 | case sparc_leon: | ||
331 | leon_init(); | ||
332 | /* fall through */ | ||
329 | case sun4m: | 333 | case sun4m: |
330 | case sun4d: | 334 | case sun4d: |
331 | srmmu_paging_init(); | 335 | srmmu_paging_init(); |
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c new file mode 100644 index 000000000000..c0e01297e64e --- /dev/null +++ b/arch/sparc/mm/leon_mm.c | |||
@@ -0,0 +1,260 @@ | |||
1 | /* | ||
2 | * linux/arch/sparc/mm/leon_m.c | ||
3 | * | ||
4 | * Copyright (C) 2004 Konrad Eisele (eiselekd@web.de, konrad@gaisler.com) Gaisler Research | ||
5 | * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB | ||
6 | * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB | ||
7 | * | ||
8 | * do srmmu probe in software | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <asm/asi.h> | ||
15 | #include <asm/leon.h> | ||
16 | #include <asm/tlbflush.h> | ||
17 | |||
18 | int leon_flush_during_switch = 1; | ||
19 | int srmmu_swprobe_trace; | ||
20 | |||
21 | unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr) | ||
22 | { | ||
23 | |||
24 | unsigned int ctxtbl; | ||
25 | unsigned int pgd, pmd, ped; | ||
26 | unsigned int ptr; | ||
27 | unsigned int lvl, pte, paddrbase; | ||
28 | unsigned int ctx; | ||
29 | unsigned int paddr_calc; | ||
30 | |||
31 | paddrbase = 0; | ||
32 | |||
33 | if (srmmu_swprobe_trace) | ||
34 | printk(KERN_INFO "swprobe: trace on\n"); | ||
35 | |||
36 | ctxtbl = srmmu_get_ctable_ptr(); | ||
37 | if (!(ctxtbl)) { | ||
38 | if (srmmu_swprobe_trace) | ||
39 | printk(KERN_INFO "swprobe: srmmu_get_ctable_ptr returned 0=>0\n"); | ||
40 | return 0; | ||
41 | } | ||
42 | if (!_pfn_valid(PFN(ctxtbl))) { | ||
43 | if (srmmu_swprobe_trace) | ||
44 | printk(KERN_INFO | ||
45 | "swprobe: !_pfn_valid(%x)=>0\n", | ||
46 | PFN(ctxtbl)); | ||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | ctx = srmmu_get_context(); | ||
51 | if (srmmu_swprobe_trace) | ||
52 | printk(KERN_INFO "swprobe: --- ctx (%x) ---\n", ctx); | ||
53 | |||
54 | pgd = LEON_BYPASS_LOAD_PA(ctxtbl + (ctx * 4)); | ||
55 | |||
56 | if (((pgd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { | ||
57 | if (srmmu_swprobe_trace) | ||
58 | printk(KERN_INFO "swprobe: pgd is entry level 3\n"); | ||
59 | lvl = 3; | ||
60 | pte = pgd; | ||
61 | paddrbase = pgd & _SRMMU_PTE_PMASK_LEON; | ||
62 | goto ready; | ||
63 | } | ||
64 | if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { | ||
65 | if (srmmu_swprobe_trace) | ||
66 | printk(KERN_INFO "swprobe: pgd is invalid => 0\n"); | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | if (srmmu_swprobe_trace) | ||
71 | printk(KERN_INFO "swprobe: --- pgd (%x) ---\n", pgd); | ||
72 | |||
73 | ptr = (pgd & SRMMU_PTD_PMASK) << 4; | ||
74 | ptr += ((((vaddr) >> LEON_PGD_SH) & LEON_PGD_M) * 4); | ||
75 | if (!_pfn_valid(PFN(ptr))) | ||
76 | return 0; | ||
77 | |||
78 | pmd = LEON_BYPASS_LOAD_PA(ptr); | ||
79 | if (((pmd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { | ||
80 | if (srmmu_swprobe_trace) | ||
81 | printk(KERN_INFO "swprobe: pmd is entry level 2\n"); | ||
82 | lvl = 2; | ||
83 | pte = pmd; | ||
84 | paddrbase = pmd & _SRMMU_PTE_PMASK_LEON; | ||
85 | goto ready; | ||
86 | } | ||
87 | if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { | ||
88 | if (srmmu_swprobe_trace) | ||
89 | printk(KERN_INFO "swprobe: pmd is invalid => 0\n"); | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | if (srmmu_swprobe_trace) | ||
94 | printk(KERN_INFO "swprobe: --- pmd (%x) ---\n", pmd); | ||
95 | |||
96 | ptr = (pmd & SRMMU_PTD_PMASK) << 4; | ||
97 | ptr += (((vaddr >> LEON_PMD_SH) & LEON_PMD_M) * 4); | ||
98 | if (!_pfn_valid(PFN(ptr))) { | ||
99 | if (srmmu_swprobe_trace) | ||
100 | printk(KERN_INFO "swprobe: !_pfn_valid(%x)=>0\n", | ||
101 | PFN(ptr)); | ||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | ped = LEON_BYPASS_LOAD_PA(ptr); | ||
106 | |||
107 | if (((ped & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { | ||
108 | if (srmmu_swprobe_trace) | ||
109 | printk(KERN_INFO "swprobe: ped is entry level 1\n"); | ||
110 | lvl = 1; | ||
111 | pte = ped; | ||
112 | paddrbase = ped & _SRMMU_PTE_PMASK_LEON; | ||
113 | goto ready; | ||
114 | } | ||
115 | if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { | ||
116 | if (srmmu_swprobe_trace) | ||
117 | printk(KERN_INFO "swprobe: ped is invalid => 0\n"); | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | if (srmmu_swprobe_trace) | ||
122 | printk(KERN_INFO "swprobe: --- ped (%x) ---\n", ped); | ||
123 | |||
124 | ptr = (ped & SRMMU_PTD_PMASK) << 4; | ||
125 | ptr += (((vaddr >> LEON_PTE_SH) & LEON_PTE_M) * 4); | ||
126 | if (!_pfn_valid(PFN(ptr))) | ||
127 | return 0; | ||
128 | |||
129 | ptr = LEON_BYPASS_LOAD_PA(ptr); | ||
130 | if (((ptr & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { | ||
131 | if (srmmu_swprobe_trace) | ||
132 | printk(KERN_INFO "swprobe: ptr is entry level 0\n"); | ||
133 | lvl = 0; | ||
134 | pte = ptr; | ||
135 | paddrbase = ptr & _SRMMU_PTE_PMASK_LEON; | ||
136 | goto ready; | ||
137 | } | ||
138 | if (srmmu_swprobe_trace) | ||
139 | printk(KERN_INFO "swprobe: ptr is invalid => 0\n"); | ||
140 | return 0; | ||
141 | |||
142 | ready: | ||
143 | switch (lvl) { | ||
144 | case 0: | ||
145 | paddr_calc = | ||
146 | (vaddr & ~(-1 << LEON_PTE_SH)) | ((pte & ~0xff) << 4); | ||
147 | break; | ||
148 | case 1: | ||
149 | paddr_calc = | ||
150 | (vaddr & ~(-1 << LEON_PMD_SH)) | ((pte & ~0xff) << 4); | ||
151 | break; | ||
152 | case 2: | ||
153 | paddr_calc = | ||
154 | (vaddr & ~(-1 << LEON_PGD_SH)) | ((pte & ~0xff) << 4); | ||
155 | break; | ||
156 | default: | ||
157 | case 3: | ||
158 | paddr_calc = vaddr; | ||
159 | break; | ||
160 | } | ||
161 | if (srmmu_swprobe_trace) | ||
162 | printk(KERN_INFO "swprobe: padde %x\n", paddr_calc); | ||
163 | if (paddr) | ||
164 | *paddr = paddr_calc; | ||
165 | return paddrbase; | ||
166 | } | ||
167 | |||
168 | void leon_flush_icache_all(void) | ||
169 | { | ||
170 | __asm__ __volatile__(" flush "); /*iflush*/ | ||
171 | } | ||
172 | |||
173 | void leon_flush_dcache_all(void) | ||
174 | { | ||
175 | __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : | ||
176 | "i"(ASI_LEON_DFLUSH) : "memory"); | ||
177 | } | ||
178 | |||
179 | void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page) | ||
180 | { | ||
181 | if (vma->vm_flags & VM_EXEC) | ||
182 | leon_flush_icache_all(); | ||
183 | leon_flush_dcache_all(); | ||
184 | } | ||
185 | |||
186 | void leon_flush_cache_all(void) | ||
187 | { | ||
188 | __asm__ __volatile__(" flush "); /*iflush*/ | ||
189 | __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : | ||
190 | "i"(ASI_LEON_DFLUSH) : "memory"); | ||
191 | } | ||
192 | |||
193 | void leon_flush_tlb_all(void) | ||
194 | { | ||
195 | leon_flush_cache_all(); | ||
196 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r"(0x400), | ||
197 | "i"(ASI_LEON_MMUFLUSH) : "memory"); | ||
198 | } | ||
199 | |||
200 | /* get all cache regs */ | ||
201 | void leon3_getCacheRegs(struct leon3_cacheregs *regs) | ||
202 | { | ||
203 | unsigned long ccr, iccr, dccr; | ||
204 | |||
205 | if (!regs) | ||
206 | return; | ||
207 | /* Get Cache regs from "Cache ASI" address 0x0, 0x8 and 0xC */ | ||
208 | __asm__ __volatile__("lda [%%g0] %3, %0\n\t" | ||
209 | "mov 0x08, %%g1\n\t" | ||
210 | "lda [%%g1] %3, %1\n\t" | ||
211 | "mov 0x0c, %%g1\n\t" | ||
212 | "lda [%%g1] %3, %2\n\t" | ||
213 | : "=r"(ccr), "=r"(iccr), "=r"(dccr) | ||
214 | /* output */ | ||
215 | : "i"(ASI_LEON_CACHEREGS) /* input */ | ||
216 | : "g1" /* clobber list */ | ||
217 | ); | ||
218 | regs->ccr = ccr; | ||
219 | regs->iccr = iccr; | ||
220 | regs->dccr = dccr; | ||
221 | } | ||
222 | |||
223 | /* Due to virtual cache we need to check cache configuration if | ||
224 | * it is possible to skip flushing in some cases. | ||
225 | * | ||
226 | * Leon2 and Leon3 differ in their way of telling cache information | ||
227 | * | ||
228 | */ | ||
229 | int leon_flush_needed(void) | ||
230 | { | ||
231 | int flush_needed = -1; | ||
232 | unsigned int ssize, sets; | ||
233 | char *setStr[4] = | ||
234 | { "direct mapped", "2-way associative", "3-way associative", | ||
235 | "4-way associative" | ||
236 | }; | ||
237 | /* leon 3 */ | ||
238 | struct leon3_cacheregs cregs; | ||
239 | leon3_getCacheRegs(&cregs); | ||
240 | sets = (cregs.dccr & LEON3_XCCR_SETS_MASK) >> 24; | ||
241 | /* (ssize=>realsize) 0=>1k, 1=>2k, 2=>4k, 3=>8k ... */ | ||
242 | ssize = 1 << ((cregs.dccr & LEON3_XCCR_SSIZE_MASK) >> 20); | ||
243 | |||
244 | printk(KERN_INFO "CACHE: %s cache, set size %dk\n", | ||
245 | sets > 3 ? "unknown" : setStr[sets], ssize); | ||
246 | if ((ssize <= (PAGE_SIZE / 1024)) && (sets == 0)) { | ||
247 | /* Set Size <= Page size ==> | ||
248 | flush on every context switch not needed. */ | ||
249 | flush_needed = 0; | ||
250 | printk(KERN_INFO "CACHE: not flushing on every context switch\n"); | ||
251 | } | ||
252 | return flush_needed; | ||
253 | } | ||
254 | |||
255 | void leon_switch_mm(void) | ||
256 | { | ||
257 | flush_tlb_mm((void *)0); | ||
258 | if (leon_flush_during_switch) | ||
259 | leon_flush_cache_all(); | ||
260 | } | ||
diff --git a/arch/sparc/mm/loadmmu.c b/arch/sparc/mm/loadmmu.c index 652be05acbea..82ec8f666036 100644 --- a/arch/sparc/mm/loadmmu.c +++ b/arch/sparc/mm/loadmmu.c | |||
@@ -33,6 +33,7 @@ void __init load_mmu(void) | |||
33 | break; | 33 | break; |
34 | case sun4m: | 34 | case sun4m: |
35 | case sun4d: | 35 | case sun4d: |
36 | case sparc_leon: | ||
36 | ld_mmu_srmmu(); | 37 | ld_mmu_srmmu(); |
37 | break; | 38 | break; |
38 | default: | 39 | default: |
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index ade4eb373bdd..509b1ffeba66 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <asm/tsunami.h> | 46 | #include <asm/tsunami.h> |
47 | #include <asm/swift.h> | 47 | #include <asm/swift.h> |
48 | #include <asm/turbosparc.h> | 48 | #include <asm/turbosparc.h> |
49 | #include <asm/leon.h> | ||
49 | 50 | ||
50 | #include <asm/btfixup.h> | 51 | #include <asm/btfixup.h> |
51 | 52 | ||
@@ -569,6 +570,9 @@ static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, | |||
569 | srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); | 570 | srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); |
570 | } | 571 | } |
571 | 572 | ||
573 | if (sparc_cpu_model == sparc_leon) | ||
574 | leon_switch_mm(); | ||
575 | |||
572 | if (is_hypersparc) | 576 | if (is_hypersparc) |
573 | hyper_flush_whole_icache(); | 577 | hyper_flush_whole_icache(); |
574 | 578 | ||
@@ -1977,6 +1981,45 @@ static void __init init_viking(void) | |||
1977 | poke_srmmu = poke_viking; | 1981 | poke_srmmu = poke_viking; |
1978 | } | 1982 | } |
1979 | 1983 | ||
1984 | #ifdef CONFIG_SPARC_LEON | ||
1985 | |||
1986 | void __init poke_leonsparc(void) | ||
1987 | { | ||
1988 | } | ||
1989 | |||
1990 | void __init init_leon(void) | ||
1991 | { | ||
1992 | |||
1993 | srmmu_name = "Leon"; | ||
1994 | |||
1995 | BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all, | ||
1996 | BTFIXUPCALL_NORM); | ||
1997 | BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all, | ||
1998 | BTFIXUPCALL_NORM); | ||
1999 | BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all, | ||
2000 | BTFIXUPCALL_NORM); | ||
2001 | BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all, | ||
2002 | BTFIXUPCALL_NORM); | ||
2003 | BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all, | ||
2004 | BTFIXUPCALL_NORM); | ||
2005 | |||
2006 | BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM); | ||
2007 | BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM); | ||
2008 | BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM); | ||
2009 | BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM); | ||
2010 | |||
2011 | BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all, | ||
2012 | BTFIXUPCALL_NOP); | ||
2013 | BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP); | ||
2014 | |||
2015 | poke_srmmu = poke_leonsparc; | ||
2016 | |||
2017 | srmmu_cache_pagetables = 0; | ||
2018 | |||
2019 | leon_flush_during_switch = leon_flush_needed(); | ||
2020 | } | ||
2021 | #endif | ||
2022 | |||
1980 | /* Probe for the srmmu chip version. */ | 2023 | /* Probe for the srmmu chip version. */ |
1981 | static void __init get_srmmu_type(void) | 2024 | static void __init get_srmmu_type(void) |
1982 | { | 2025 | { |
@@ -1992,7 +2035,15 @@ static void __init get_srmmu_type(void) | |||
1992 | psr_typ = (psr >> 28) & 0xf; | 2035 | psr_typ = (psr >> 28) & 0xf; |
1993 | psr_vers = (psr >> 24) & 0xf; | 2036 | psr_vers = (psr >> 24) & 0xf; |
1994 | 2037 | ||
1995 | /* First, check for HyperSparc or Cypress. */ | 2038 | /* First, check for sparc-leon. */ |
2039 | if (sparc_cpu_model == sparc_leon) { | ||
2040 | psr_typ = 0xf; /* hardcoded ids for older models/simulators */ | ||
2041 | psr_vers = 2; | ||
2042 | init_leon(); | ||
2043 | return; | ||
2044 | } | ||
2045 | |||
2046 | /* Second, check for HyperSparc or Cypress. */ | ||
1996 | if(mod_typ == 1) { | 2047 | if(mod_typ == 1) { |
1997 | switch(mod_rev) { | 2048 | switch(mod_rev) { |
1998 | case 7: | 2049 | case 7: |
diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c index d172f86439b1..f97cb8b6ee5f 100644 --- a/arch/sparc/oprofile/init.c +++ b/arch/sparc/oprofile/init.c | |||
@@ -21,7 +21,7 @@ | |||
21 | static int profile_timer_exceptions_notify(struct notifier_block *self, | 21 | static int profile_timer_exceptions_notify(struct notifier_block *self, |
22 | unsigned long val, void *data) | 22 | unsigned long val, void *data) |
23 | { | 23 | { |
24 | struct die_args *args = (struct die_args *)data; | 24 | struct die_args *args = data; |
25 | int ret = NOTIFY_DONE; | 25 | int ret = NOTIFY_DONE; |
26 | 26 | ||
27 | switch (val) { | 27 | switch (val) { |
@@ -57,7 +57,7 @@ static void timer_stop(void) | |||
57 | 57 | ||
58 | static int op_nmi_timer_init(struct oprofile_operations *ops) | 58 | static int op_nmi_timer_init(struct oprofile_operations *ops) |
59 | { | 59 | { |
60 | if (!nmi_usable) | 60 | if (atomic_read(&nmi_active) <= 0) |
61 | return -ENODEV; | 61 | return -ENODEV; |
62 | 62 | ||
63 | ops->start = timer_start; | 63 | ops->start = timer_start; |
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index 3b44b47c7e1d..f114813ae258 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c | |||
@@ -245,7 +245,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
245 | 245 | ||
246 | dev_kfree_skb(skb); | 246 | dev_kfree_skb(skb); |
247 | 247 | ||
248 | return 0; | 248 | return NETDEV_TX_OK; |
249 | } | 249 | } |
250 | 250 | ||
251 | static void uml_net_set_multicast_list(struct net_device *dev) | 251 | static void uml_net_set_multicast_list(struct net_device *dev) |
@@ -285,7 +285,7 @@ static void uml_net_get_drvinfo(struct net_device *dev, | |||
285 | strcpy(info->version, "42"); | 285 | strcpy(info->version, "42"); |
286 | } | 286 | } |
287 | 287 | ||
288 | static struct ethtool_ops uml_net_ethtool_ops = { | 288 | static const struct ethtool_ops uml_net_ethtool_ops = { |
289 | .get_drvinfo = uml_net_get_drvinfo, | 289 | .get_drvinfo = uml_net_get_drvinfo, |
290 | .get_link = ethtool_op_get_link, | 290 | .get_link = ethtool_op_get_link, |
291 | }; | 291 | }; |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 13ffa5df37d7..fc20fdc0f7f2 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -38,7 +38,7 @@ config X86 | |||
38 | select HAVE_FUNCTION_GRAPH_FP_TEST | 38 | select HAVE_FUNCTION_GRAPH_FP_TEST |
39 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 39 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
40 | select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE | 40 | select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE |
41 | select HAVE_FTRACE_SYSCALLS | 41 | select HAVE_SYSCALL_TRACEPOINTS |
42 | select HAVE_KVM | 42 | select HAVE_KVM |
43 | select HAVE_ARCH_KGDB | 43 | select HAVE_ARCH_KGDB |
44 | select HAVE_ARCH_TRACEHOOK | 44 | select HAVE_ARCH_TRACEHOOK |
@@ -586,7 +586,6 @@ config GART_IOMMU | |||
586 | bool "GART IOMMU support" if EMBEDDED | 586 | bool "GART IOMMU support" if EMBEDDED |
587 | default y | 587 | default y |
588 | select SWIOTLB | 588 | select SWIOTLB |
589 | select AGP | ||
590 | depends on X86_64 && PCI | 589 | depends on X86_64 && PCI |
591 | ---help--- | 590 | ---help--- |
592 | Support for full DMA access of devices with 32bit memory access only | 591 | Support for full DMA access of devices with 32bit memory access only |
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 8130334329c0..527519b8a9f9 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -262,6 +262,15 @@ config MCORE2 | |||
262 | family in /proc/cpuinfo. Newer ones have 6 and older ones 15 | 262 | family in /proc/cpuinfo. Newer ones have 6 and older ones 15 |
263 | (not a typo) | 263 | (not a typo) |
264 | 264 | ||
265 | config MATOM | ||
266 | bool "Intel Atom" | ||
267 | ---help--- | ||
268 | |||
269 | Select this for the Intel Atom platform. Intel Atom CPUs have an | ||
270 | in-order pipelining architecture and thus can benefit from | ||
271 | accordingly optimized code. Use a recent GCC with specific Atom | ||
272 | support in order to fully benefit from selecting this option. | ||
273 | |||
265 | config GENERIC_CPU | 274 | config GENERIC_CPU |
266 | bool "Generic-x86-64" | 275 | bool "Generic-x86-64" |
267 | depends on X86_64 | 276 | depends on X86_64 |
@@ -295,7 +304,7 @@ config X86_CPU | |||
295 | config X86_L1_CACHE_BYTES | 304 | config X86_L1_CACHE_BYTES |
296 | int | 305 | int |
297 | default "128" if MPSC | 306 | default "128" if MPSC |
298 | default "64" if GENERIC_CPU || MK8 || MCORE2 || X86_32 | 307 | default "64" if GENERIC_CPU || MK8 || MCORE2 || MATOM || X86_32 |
299 | 308 | ||
300 | config X86_INTERNODE_CACHE_BYTES | 309 | config X86_INTERNODE_CACHE_BYTES |
301 | int | 310 | int |
@@ -310,7 +319,7 @@ config X86_L1_CACHE_SHIFT | |||
310 | default "7" if MPENTIUM4 || MPSC | 319 | default "7" if MPENTIUM4 || MPSC |
311 | default "4" if X86_ELAN || M486 || M386 || MGEODEGX1 | 320 | default "4" if X86_ELAN || M486 || M386 || MGEODEGX1 |
312 | default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX | 321 | default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX |
313 | default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7 || X86_GENERIC || GENERIC_CPU | 322 | default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU |
314 | 323 | ||
315 | config X86_XADD | 324 | config X86_XADD |
316 | def_bool y | 325 | def_bool y |
@@ -359,7 +368,7 @@ config X86_INTEL_USERCOPY | |||
359 | 368 | ||
360 | config X86_USE_PPRO_CHECKSUM | 369 | config X86_USE_PPRO_CHECKSUM |
361 | def_bool y | 370 | def_bool y |
362 | depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX || MCORE2 | 371 | depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM |
363 | 372 | ||
364 | config X86_USE_3DNOW | 373 | config X86_USE_3DNOW |
365 | def_bool y | 374 | def_bool y |
@@ -387,7 +396,7 @@ config X86_P6_NOP | |||
387 | 396 | ||
388 | config X86_TSC | 397 | config X86_TSC |
389 | def_bool y | 398 | def_bool y |
390 | depends on ((MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ) || X86_64 | 399 | depends on ((MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) && !X86_NUMAQ) || X86_64 |
391 | 400 | ||
392 | config X86_CMPXCHG64 | 401 | config X86_CMPXCHG64 |
393 | def_bool y | 402 | def_bool y |
@@ -397,7 +406,7 @@ config X86_CMPXCHG64 | |||
397 | # generates cmov. | 406 | # generates cmov. |
398 | config X86_CMOV | 407 | config X86_CMOV |
399 | def_bool y | 408 | def_bool y |
400 | depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64) | 409 | depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM) |
401 | 410 | ||
402 | config X86_MINIMUM_CPU_FAMILY | 411 | config X86_MINIMUM_CPU_FAMILY |
403 | int | 412 | int |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 1b68659c41b4..7983c420eaf2 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -32,8 +32,8 @@ ifeq ($(CONFIG_X86_32),y) | |||
32 | 32 | ||
33 | # Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use | 33 | # Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use |
34 | # a lot more stack due to the lack of sharing of stacklots: | 34 | # a lot more stack due to the lack of sharing of stacklots: |
35 | KBUILD_CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then \ | 35 | KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0400, \ |
36 | echo $(call cc-option,-fno-unit-at-a-time); fi ;) | 36 | $(call cc-option,-fno-unit-at-a-time)) |
37 | 37 | ||
38 | # CPU-specific tuning. Anything which can be shared with UML should go here. | 38 | # CPU-specific tuning. Anything which can be shared with UML should go here. |
39 | include $(srctree)/arch/x86/Makefile_32.cpu | 39 | include $(srctree)/arch/x86/Makefile_32.cpu |
@@ -55,6 +55,8 @@ else | |||
55 | 55 | ||
56 | cflags-$(CONFIG_MCORE2) += \ | 56 | cflags-$(CONFIG_MCORE2) += \ |
57 | $(call cc-option,-march=core2,$(call cc-option,-mtune=generic)) | 57 | $(call cc-option,-march=core2,$(call cc-option,-mtune=generic)) |
58 | cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \ | ||
59 | $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic)) | ||
58 | cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic) | 60 | cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic) |
59 | KBUILD_CFLAGS += $(cflags-y) | 61 | KBUILD_CFLAGS += $(cflags-y) |
60 | 62 | ||
@@ -72,7 +74,7 @@ endif | |||
72 | 74 | ||
73 | ifdef CONFIG_CC_STACKPROTECTOR | 75 | ifdef CONFIG_CC_STACKPROTECTOR |
74 | cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh | 76 | cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh |
75 | ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC)),y) | 77 | ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(biarch)),y) |
76 | stackp-y := -fstack-protector | 78 | stackp-y := -fstack-protector |
77 | stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += -fstack-protector-all | 79 | stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += -fstack-protector-all |
78 | KBUILD_CFLAGS += $(stackp-y) | 80 | KBUILD_CFLAGS += $(stackp-y) |
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu index 80177ec052f0..30e9a264f69d 100644 --- a/arch/x86/Makefile_32.cpu +++ b/arch/x86/Makefile_32.cpu | |||
@@ -33,6 +33,8 @@ cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)-f | |||
33 | cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686) | 33 | cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686) |
34 | cflags-$(CONFIG_MVIAC7) += -march=i686 | 34 | cflags-$(CONFIG_MVIAC7) += -march=i686 |
35 | cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2) | 35 | cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2) |
36 | cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \ | ||
37 | $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic)) | ||
36 | 38 | ||
37 | # AMD Elan support | 39 | # AMD Elan support |
38 | cflags-$(CONFIG_X86_ELAN) += -march=i486 | 40 | cflags-$(CONFIG_X86_ELAN) += -march=i486 |
diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c index 275dd177f198..11e8c6eb80a1 100644 --- a/arch/x86/boot/video-vesa.c +++ b/arch/x86/boot/video-vesa.c | |||
@@ -31,7 +31,6 @@ static inline void vesa_store_mode_params_graphics(void) {} | |||
31 | 31 | ||
32 | static int vesa_probe(void) | 32 | static int vesa_probe(void) |
33 | { | 33 | { |
34 | #if defined(CONFIG_VIDEO_VESA) || defined(CONFIG_FIRMWARE_EDID) | ||
35 | struct biosregs ireg, oreg; | 34 | struct biosregs ireg, oreg; |
36 | u16 mode; | 35 | u16 mode; |
37 | addr_t mode_ptr; | 36 | addr_t mode_ptr; |
@@ -49,8 +48,7 @@ static int vesa_probe(void) | |||
49 | vginfo.signature != VESA_MAGIC || | 48 | vginfo.signature != VESA_MAGIC || |
50 | vginfo.version < 0x0102) | 49 | vginfo.version < 0x0102) |
51 | return 0; /* Not present */ | 50 | return 0; /* Not present */ |
52 | #endif /* CONFIG_VIDEO_VESA || CONFIG_FIRMWARE_EDID */ | 51 | |
53 | #ifdef CONFIG_VIDEO_VESA | ||
54 | set_fs(vginfo.video_mode_ptr.seg); | 52 | set_fs(vginfo.video_mode_ptr.seg); |
55 | mode_ptr = vginfo.video_mode_ptr.off; | 53 | mode_ptr = vginfo.video_mode_ptr.off; |
56 | 54 | ||
@@ -102,9 +100,6 @@ static int vesa_probe(void) | |||
102 | } | 100 | } |
103 | 101 | ||
104 | return nmodes; | 102 | return nmodes; |
105 | #else | ||
106 | return 0; | ||
107 | #endif /* CONFIG_VIDEO_VESA */ | ||
108 | } | 103 | } |
109 | 104 | ||
110 | static int vesa_set_mode(struct mode_info *mode) | 105 | static int vesa_set_mode(struct mode_info *mode) |
diff --git a/arch/x86/boot/video-vga.c b/arch/x86/boot/video-vga.c index 8f8d827e254d..819caa1f2008 100644 --- a/arch/x86/boot/video-vga.c +++ b/arch/x86/boot/video-vga.c | |||
@@ -47,14 +47,6 @@ static u8 vga_set_basic_mode(void) | |||
47 | 47 | ||
48 | initregs(&ireg); | 48 | initregs(&ireg); |
49 | 49 | ||
50 | #ifdef CONFIG_VIDEO_400_HACK | ||
51 | if (adapter >= ADAPTER_VGA) { | ||
52 | ireg.ax = 0x1202; | ||
53 | ireg.bx = 0x0030; | ||
54 | intcall(0x10, &ireg, NULL); | ||
55 | } | ||
56 | #endif | ||
57 | |||
58 | ax = 0x0f00; | 50 | ax = 0x0f00; |
59 | intcall(0x10, &ireg, &oreg); | 51 | intcall(0x10, &ireg, &oreg); |
60 | mode = oreg.al; | 52 | mode = oreg.al; |
@@ -62,11 +54,9 @@ static u8 vga_set_basic_mode(void) | |||
62 | set_fs(0); | 54 | set_fs(0); |
63 | rows = rdfs8(0x484); /* rows minus one */ | 55 | rows = rdfs8(0x484); /* rows minus one */ |
64 | 56 | ||
65 | #ifndef CONFIG_VIDEO_400_HACK | ||
66 | if ((oreg.ax == 0x5003 || oreg.ax == 0x5007) && | 57 | if ((oreg.ax == 0x5003 || oreg.ax == 0x5007) && |
67 | (rows == 0 || rows == 24)) | 58 | (rows == 0 || rows == 24)) |
68 | return mode; | 59 | return mode; |
69 | #endif | ||
70 | 60 | ||
71 | if (mode != 3 && mode != 7) | 61 | if (mode != 3 && mode != 7) |
72 | mode = 3; | 62 | mode = 3; |
diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c index bad728b76fc2..d42da3802499 100644 --- a/arch/x86/boot/video.c +++ b/arch/x86/boot/video.c | |||
@@ -221,7 +221,6 @@ static unsigned int mode_menu(void) | |||
221 | } | 221 | } |
222 | } | 222 | } |
223 | 223 | ||
224 | #ifdef CONFIG_VIDEO_RETAIN | ||
225 | /* Save screen content to the heap */ | 224 | /* Save screen content to the heap */ |
226 | static struct saved_screen { | 225 | static struct saved_screen { |
227 | int x, y; | 226 | int x, y; |
@@ -299,10 +298,6 @@ static void restore_screen(void) | |||
299 | ireg.dl = saved.curx; | 298 | ireg.dl = saved.curx; |
300 | intcall(0x10, &ireg, NULL); | 299 | intcall(0x10, &ireg, NULL); |
301 | } | 300 | } |
302 | #else | ||
303 | #define save_screen() ((void)0) | ||
304 | #define restore_screen() ((void)0) | ||
305 | #endif | ||
306 | 301 | ||
307 | void set_video(void) | 302 | void set_video(void) |
308 | { | 303 | { |
diff --git a/arch/x86/boot/video.h b/arch/x86/boot/video.h index 5bb174a997fc..ff339c5db311 100644 --- a/arch/x86/boot/video.h +++ b/arch/x86/boot/video.h | |||
@@ -17,19 +17,8 @@ | |||
17 | 17 | ||
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | 19 | ||
20 | /* Enable autodetection of SVGA adapters and modes. */ | 20 | /* |
21 | #undef CONFIG_VIDEO_SVGA | 21 | * This code uses an extended set of video mode numbers. These include: |
22 | |||
23 | /* Enable autodetection of VESA modes */ | ||
24 | #define CONFIG_VIDEO_VESA | ||
25 | |||
26 | /* Retain screen contents when switching modes */ | ||
27 | #define CONFIG_VIDEO_RETAIN | ||
28 | |||
29 | /* Force 400 scan lines for standard modes (hack to fix bad BIOS behaviour */ | ||
30 | #undef CONFIG_VIDEO_400_HACK | ||
31 | |||
32 | /* This code uses an extended set of video mode numbers. These include: | ||
33 | * Aliases for standard modes | 22 | * Aliases for standard modes |
34 | * NORMAL_VGA (-1) | 23 | * NORMAL_VGA (-1) |
35 | * EXTENDED_VGA (-2) | 24 | * EXTENDED_VGA (-2) |
@@ -67,13 +56,8 @@ | |||
67 | /* The "recalculate timings" flag */ | 56 | /* The "recalculate timings" flag */ |
68 | #define VIDEO_RECALC 0x8000 | 57 | #define VIDEO_RECALC 0x8000 |
69 | 58 | ||
70 | /* Define DO_STORE according to CONFIG_VIDEO_RETAIN */ | ||
71 | #ifdef CONFIG_VIDEO_RETAIN | ||
72 | void store_screen(void); | 59 | void store_screen(void); |
73 | #define DO_STORE() store_screen() | 60 | #define DO_STORE() store_screen() |
74 | #else | ||
75 | #define DO_STORE() ((void)0) | ||
76 | #endif /* CONFIG_VIDEO_RETAIN */ | ||
77 | 61 | ||
78 | /* | 62 | /* |
79 | * Mode table structures | 63 | * Mode table structures |
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index edb992ebef92..d28fad19654a 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig | |||
@@ -2355,7 +2355,7 @@ CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y | |||
2355 | CONFIG_HAVE_DYNAMIC_FTRACE=y | 2355 | CONFIG_HAVE_DYNAMIC_FTRACE=y |
2356 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y | 2356 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y |
2357 | CONFIG_HAVE_HW_BRANCH_TRACER=y | 2357 | CONFIG_HAVE_HW_BRANCH_TRACER=y |
2358 | CONFIG_HAVE_FTRACE_SYSCALLS=y | 2358 | CONFIG_HAVE_SYSCALL_TRACEPOINTS=y |
2359 | CONFIG_RING_BUFFER=y | 2359 | CONFIG_RING_BUFFER=y |
2360 | CONFIG_TRACING=y | 2360 | CONFIG_TRACING=y |
2361 | CONFIG_TRACING_SUPPORT=y | 2361 | CONFIG_TRACING_SUPPORT=y |
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index cee1dd2e69b2..6c86acd847a4 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig | |||
@@ -2329,7 +2329,7 @@ CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y | |||
2329 | CONFIG_HAVE_DYNAMIC_FTRACE=y | 2329 | CONFIG_HAVE_DYNAMIC_FTRACE=y |
2330 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y | 2330 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y |
2331 | CONFIG_HAVE_HW_BRANCH_TRACER=y | 2331 | CONFIG_HAVE_HW_BRANCH_TRACER=y |
2332 | CONFIG_HAVE_FTRACE_SYSCALLS=y | 2332 | CONFIG_HAVE_SYSCALL_TRACEPOINTS=y |
2333 | CONFIG_RING_BUFFER=y | 2333 | CONFIG_RING_BUFFER=y |
2334 | CONFIG_TRACING=y | 2334 | CONFIG_TRACING=y |
2335 | CONFIG_TRACING_SUPPORT=y | 2335 | CONFIG_TRACING_SUPPORT=y |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index c580c5ec1cad..585edebe12cf 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -59,13 +59,6 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out, | |||
59 | asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, | 59 | asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, |
60 | const u8 *in, unsigned int len, u8 *iv); | 60 | const u8 *in, unsigned int len, u8 *iv); |
61 | 61 | ||
62 | static inline int kernel_fpu_using(void) | ||
63 | { | ||
64 | if (in_interrupt() && !(read_cr0() & X86_CR0_TS)) | ||
65 | return 1; | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) | 62 | static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) |
70 | { | 63 | { |
71 | unsigned long addr = (unsigned long)raw_ctx; | 64 | unsigned long addr = (unsigned long)raw_ctx; |
@@ -89,7 +82,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, | |||
89 | return -EINVAL; | 82 | return -EINVAL; |
90 | } | 83 | } |
91 | 84 | ||
92 | if (kernel_fpu_using()) | 85 | if (irq_fpu_usable()) |
93 | err = crypto_aes_expand_key(ctx, in_key, key_len); | 86 | err = crypto_aes_expand_key(ctx, in_key, key_len); |
94 | else { | 87 | else { |
95 | kernel_fpu_begin(); | 88 | kernel_fpu_begin(); |
@@ -110,7 +103,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |||
110 | { | 103 | { |
111 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); | 104 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); |
112 | 105 | ||
113 | if (kernel_fpu_using()) | 106 | if (irq_fpu_usable()) |
114 | crypto_aes_encrypt_x86(ctx, dst, src); | 107 | crypto_aes_encrypt_x86(ctx, dst, src); |
115 | else { | 108 | else { |
116 | kernel_fpu_begin(); | 109 | kernel_fpu_begin(); |
@@ -123,7 +116,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |||
123 | { | 116 | { |
124 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); | 117 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); |
125 | 118 | ||
126 | if (kernel_fpu_using()) | 119 | if (irq_fpu_usable()) |
127 | crypto_aes_decrypt_x86(ctx, dst, src); | 120 | crypto_aes_decrypt_x86(ctx, dst, src); |
128 | else { | 121 | else { |
129 | kernel_fpu_begin(); | 122 | kernel_fpu_begin(); |
@@ -349,7 +342,7 @@ static int ablk_encrypt(struct ablkcipher_request *req) | |||
349 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 342 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
350 | struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | 343 | struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); |
351 | 344 | ||
352 | if (kernel_fpu_using()) { | 345 | if (irq_fpu_usable()) { |
353 | struct ablkcipher_request *cryptd_req = | 346 | struct ablkcipher_request *cryptd_req = |
354 | ablkcipher_request_ctx(req); | 347 | ablkcipher_request_ctx(req); |
355 | memcpy(cryptd_req, req, sizeof(*req)); | 348 | memcpy(cryptd_req, req, sizeof(*req)); |
@@ -370,7 +363,7 @@ static int ablk_decrypt(struct ablkcipher_request *req) | |||
370 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 363 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
371 | struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | 364 | struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); |
372 | 365 | ||
373 | if (kernel_fpu_using()) { | 366 | if (irq_fpu_usable()) { |
374 | struct ablkcipher_request *cryptd_req = | 367 | struct ablkcipher_request *cryptd_req = |
375 | ablkcipher_request_ctx(req); | 368 | ablkcipher_request_ctx(req); |
376 | memcpy(cryptd_req, req, sizeof(*req)); | 369 | memcpy(cryptd_req, req, sizeof(*req)); |
@@ -636,7 +629,7 @@ static int __init aesni_init(void) | |||
636 | int err; | 629 | int err; |
637 | 630 | ||
638 | if (!cpu_has_aes) { | 631 | if (!cpu_has_aes) { |
639 | printk(KERN_ERR "Intel AES-NI instructions are not detected.\n"); | 632 | printk(KERN_INFO "Intel AES-NI instructions are not detected.\n"); |
640 | return -ENODEV; | 633 | return -ENODEV; |
641 | } | 634 | } |
642 | if ((err = crypto_register_alg(&aesni_alg))) | 635 | if ((err = crypto_register_alg(&aesni_alg))) |
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index e590261ba059..ba331bfd1112 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
@@ -537,7 +537,7 @@ ia32_sys_call_table: | |||
537 | .quad sys_mkdir | 537 | .quad sys_mkdir |
538 | .quad sys_rmdir /* 40 */ | 538 | .quad sys_rmdir /* 40 */ |
539 | .quad sys_dup | 539 | .quad sys_dup |
540 | .quad sys32_pipe | 540 | .quad sys_pipe |
541 | .quad compat_sys_times | 541 | .quad compat_sys_times |
542 | .quad quiet_ni_syscall /* old prof syscall holder */ | 542 | .quad quiet_ni_syscall /* old prof syscall holder */ |
543 | .quad sys_brk /* 45 */ | 543 | .quad sys_brk /* 45 */ |
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index 085a8c35f149..9f5527198825 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c | |||
@@ -189,20 +189,6 @@ asmlinkage long sys32_mprotect(unsigned long start, size_t len, | |||
189 | return sys_mprotect(start, len, prot); | 189 | return sys_mprotect(start, len, prot); |
190 | } | 190 | } |
191 | 191 | ||
192 | asmlinkage long sys32_pipe(int __user *fd) | ||
193 | { | ||
194 | int retval; | ||
195 | int fds[2]; | ||
196 | |||
197 | retval = do_pipe_flags(fds, 0); | ||
198 | if (retval) | ||
199 | goto out; | ||
200 | if (copy_to_user(fd, fds, sizeof(fds))) | ||
201 | retval = -EFAULT; | ||
202 | out: | ||
203 | return retval; | ||
204 | } | ||
205 | |||
206 | asmlinkage long sys32_rt_sigaction(int sig, struct sigaction32 __user *act, | 192 | asmlinkage long sys32_rt_sigaction(int sig, struct sigaction32 __user *act, |
207 | struct sigaction32 __user *oact, | 193 | struct sigaction32 __user *oact, |
208 | unsigned int sigsetsize) | 194 | unsigned int sigsetsize) |
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 1a37bcdc8606..c240efc74e00 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -73,8 +73,6 @@ static inline void alternatives_smp_module_del(struct module *mod) {} | |||
73 | static inline void alternatives_smp_switch(int smp) {} | 73 | static inline void alternatives_smp_switch(int smp) {} |
74 | #endif /* CONFIG_SMP */ | 74 | #endif /* CONFIG_SMP */ |
75 | 75 | ||
76 | const unsigned char *const *find_nop_table(void); | ||
77 | |||
78 | /* alternative assembly primitive: */ | 76 | /* alternative assembly primitive: */ |
79 | #define ALTERNATIVE(oldinstr, newinstr, feature) \ | 77 | #define ALTERNATIVE(oldinstr, newinstr, feature) \ |
80 | \ | 78 | \ |
@@ -144,8 +142,6 @@ static inline void apply_paravirt(struct paravirt_patch_site *start, | |||
144 | #define __parainstructions_end NULL | 142 | #define __parainstructions_end NULL |
145 | #endif | 143 | #endif |
146 | 144 | ||
147 | extern void add_nops(void *insns, unsigned int len); | ||
148 | |||
149 | /* | 145 | /* |
150 | * Clear and restore the kernel write-protection flag on the local CPU. | 146 | * Clear and restore the kernel write-protection flag on the local CPU. |
151 | * Allows the kernel to edit read-only pages. | 147 | * Allows the kernel to edit read-only pages. |
@@ -161,10 +157,7 @@ extern void add_nops(void *insns, unsigned int len); | |||
161 | * Intel's errata. | 157 | * Intel's errata. |
162 | * On the local CPU you need to be protected again NMI or MCE handlers seeing an | 158 | * On the local CPU you need to be protected again NMI or MCE handlers seeing an |
163 | * inconsistent instruction while you patch. | 159 | * inconsistent instruction while you patch. |
164 | * The _early version expects the memory to already be RW. | ||
165 | */ | 160 | */ |
166 | |||
167 | extern void *text_poke(void *addr, const void *opcode, size_t len); | 161 | extern void *text_poke(void *addr, const void *opcode, size_t len); |
168 | extern void *text_poke_early(void *addr, const void *opcode, size_t len); | ||
169 | 162 | ||
170 | #endif /* _ASM_X86_ALTERNATIVE_H */ | 163 | #endif /* _ASM_X86_ALTERNATIVE_H */ |
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h index bdf96f119f06..ac95995b7bad 100644 --- a/arch/x86/include/asm/amd_iommu.h +++ b/arch/x86/include/asm/amd_iommu.h | |||
@@ -25,6 +25,7 @@ | |||
25 | #ifdef CONFIG_AMD_IOMMU | 25 | #ifdef CONFIG_AMD_IOMMU |
26 | extern int amd_iommu_init(void); | 26 | extern int amd_iommu_init(void); |
27 | extern int amd_iommu_init_dma_ops(void); | 27 | extern int amd_iommu_init_dma_ops(void); |
28 | extern int amd_iommu_init_passthrough(void); | ||
28 | extern void amd_iommu_detect(void); | 29 | extern void amd_iommu_detect(void); |
29 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); | 30 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); |
30 | extern void amd_iommu_flush_all_domains(void); | 31 | extern void amd_iommu_flush_all_domains(void); |
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 0c878caaa0a2..2a2cc7a78a81 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -143,22 +143,29 @@ | |||
143 | #define EVT_BUFFER_SIZE 8192 /* 512 entries */ | 143 | #define EVT_BUFFER_SIZE 8192 /* 512 entries */ |
144 | #define EVT_LEN_MASK (0x9ULL << 56) | 144 | #define EVT_LEN_MASK (0x9ULL << 56) |
145 | 145 | ||
146 | #define PAGE_MODE_NONE 0x00 | ||
146 | #define PAGE_MODE_1_LEVEL 0x01 | 147 | #define PAGE_MODE_1_LEVEL 0x01 |
147 | #define PAGE_MODE_2_LEVEL 0x02 | 148 | #define PAGE_MODE_2_LEVEL 0x02 |
148 | #define PAGE_MODE_3_LEVEL 0x03 | 149 | #define PAGE_MODE_3_LEVEL 0x03 |
149 | 150 | #define PAGE_MODE_4_LEVEL 0x04 | |
150 | #define IOMMU_PDE_NL_0 0x000ULL | 151 | #define PAGE_MODE_5_LEVEL 0x05 |
151 | #define IOMMU_PDE_NL_1 0x200ULL | 152 | #define PAGE_MODE_6_LEVEL 0x06 |
152 | #define IOMMU_PDE_NL_2 0x400ULL | 153 | |
153 | #define IOMMU_PDE_NL_3 0x600ULL | 154 | #define PM_LEVEL_SHIFT(x) (12 + ((x) * 9)) |
154 | 155 | #define PM_LEVEL_SIZE(x) (((x) < 6) ? \ | |
155 | #define IOMMU_PTE_L2_INDEX(address) (((address) >> 30) & 0x1ffULL) | 156 | ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \ |
156 | #define IOMMU_PTE_L1_INDEX(address) (((address) >> 21) & 0x1ffULL) | 157 | (0xffffffffffffffffULL)) |
157 | #define IOMMU_PTE_L0_INDEX(address) (((address) >> 12) & 0x1ffULL) | 158 | #define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) |
158 | 159 | #define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) | |
159 | #define IOMMU_MAP_SIZE_L1 (1ULL << 21) | 160 | #define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ |
160 | #define IOMMU_MAP_SIZE_L2 (1ULL << 30) | 161 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW) |
161 | #define IOMMU_MAP_SIZE_L3 (1ULL << 39) | 162 | #define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL) |
163 | |||
164 | #define PM_MAP_4k 0 | ||
165 | #define PM_ADDR_MASK 0x000ffffffffff000ULL | ||
166 | #define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \ | ||
167 | (~((1ULL << (12 + ((lvl) * 9))) - 1))) | ||
168 | #define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) | ||
162 | 169 | ||
163 | #define IOMMU_PTE_P (1ULL << 0) | 170 | #define IOMMU_PTE_P (1ULL << 0) |
164 | #define IOMMU_PTE_TV (1ULL << 1) | 171 | #define IOMMU_PTE_TV (1ULL << 1) |
@@ -167,11 +174,6 @@ | |||
167 | #define IOMMU_PTE_IR (1ULL << 61) | 174 | #define IOMMU_PTE_IR (1ULL << 61) |
168 | #define IOMMU_PTE_IW (1ULL << 62) | 175 | #define IOMMU_PTE_IW (1ULL << 62) |
169 | 176 | ||
170 | #define IOMMU_L1_PDE(address) \ | ||
171 | ((address) | IOMMU_PDE_NL_1 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW) | ||
172 | #define IOMMU_L2_PDE(address) \ | ||
173 | ((address) | IOMMU_PDE_NL_2 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW) | ||
174 | |||
175 | #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) | 177 | #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) |
176 | #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) | 178 | #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) |
177 | #define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) | 179 | #define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) |
@@ -194,11 +196,14 @@ | |||
194 | #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ | 196 | #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ |
195 | #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops | 197 | #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops |
196 | domain for an IOMMU */ | 198 | domain for an IOMMU */ |
199 | #define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page | ||
200 | translation */ | ||
201 | |||
197 | extern bool amd_iommu_dump; | 202 | extern bool amd_iommu_dump; |
198 | #define DUMP_printk(format, arg...) \ | 203 | #define DUMP_printk(format, arg...) \ |
199 | do { \ | 204 | do { \ |
200 | if (amd_iommu_dump) \ | 205 | if (amd_iommu_dump) \ |
201 | printk(KERN_INFO "AMD IOMMU: " format, ## arg); \ | 206 | printk(KERN_INFO "AMD-Vi: " format, ## arg); \ |
202 | } while(0); | 207 | } while(0); |
203 | 208 | ||
204 | /* | 209 | /* |
@@ -226,6 +231,7 @@ struct protection_domain { | |||
226 | int mode; /* paging mode (0-6 levels) */ | 231 | int mode; /* paging mode (0-6 levels) */ |
227 | u64 *pt_root; /* page table root pointer */ | 232 | u64 *pt_root; /* page table root pointer */ |
228 | unsigned long flags; /* flags to find out type of domain */ | 233 | unsigned long flags; /* flags to find out type of domain */ |
234 | bool updated; /* complete domain flush required */ | ||
229 | unsigned dev_cnt; /* devices assigned to this domain */ | 235 | unsigned dev_cnt; /* devices assigned to this domain */ |
230 | void *priv; /* private data */ | 236 | void *priv; /* private data */ |
231 | }; | 237 | }; |
@@ -337,6 +343,9 @@ struct amd_iommu { | |||
337 | /* if one, we need to send a completion wait command */ | 343 | /* if one, we need to send a completion wait command */ |
338 | bool need_sync; | 344 | bool need_sync; |
339 | 345 | ||
346 | /* becomes true if a command buffer reset is running */ | ||
347 | bool reset_in_progress; | ||
348 | |||
340 | /* default dma_ops domain for that IOMMU */ | 349 | /* default dma_ops domain for that IOMMU */ |
341 | struct dma_ops_domain *default_dom; | 350 | struct dma_ops_domain *default_dom; |
342 | }; | 351 | }; |
@@ -457,4 +466,7 @@ static inline void amd_iommu_stats_init(void) { } | |||
457 | 466 | ||
458 | #endif /* CONFIG_AMD_IOMMU_STATS */ | 467 | #endif /* CONFIG_AMD_IOMMU_STATS */ |
459 | 468 | ||
469 | /* some function prototypes */ | ||
470 | extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); | ||
471 | |||
460 | #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ | 472 | #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index bb7d47925847..586b7adb8e53 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -183,6 +183,10 @@ static inline int x2apic_enabled(void) | |||
183 | } | 183 | } |
184 | 184 | ||
185 | #define x2apic_supported() (cpu_has_x2apic) | 185 | #define x2apic_supported() (cpu_has_x2apic) |
186 | static inline void x2apic_force_phys(void) | ||
187 | { | ||
188 | x2apic_phys = 1; | ||
189 | } | ||
186 | #else | 190 | #else |
187 | static inline void check_x2apic(void) | 191 | static inline void check_x2apic(void) |
188 | { | 192 | { |
@@ -194,6 +198,9 @@ static inline int x2apic_enabled(void) | |||
194 | { | 198 | { |
195 | return 0; | 199 | return 0; |
196 | } | 200 | } |
201 | static inline void x2apic_force_phys(void) | ||
202 | { | ||
203 | } | ||
197 | 204 | ||
198 | #define x2apic_preenabled 0 | 205 | #define x2apic_preenabled 0 |
199 | #define x2apic_supported() 0 | 206 | #define x2apic_supported() 0 |
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 7ddb36ab933b..7386bfa4f4bc 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h | |||
@@ -8,7 +8,8 @@ | |||
8 | * Ingo Molnar <mingo@redhat.com>, 1999, 2000 | 8 | * Ingo Molnar <mingo@redhat.com>, 1999, 2000 |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define APIC_DEFAULT_PHYS_BASE 0xfee00000 | 11 | #define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000 |
12 | #define APIC_DEFAULT_PHYS_BASE 0xfee00000 | ||
12 | 13 | ||
13 | #define APIC_ID 0x20 | 14 | #define APIC_ID 0x20 |
14 | 15 | ||
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 56be78f582f0..b3ed1e1460ff 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | #ifdef __ASSEMBLY__ | 4 | #ifdef __ASSEMBLY__ |
5 | # define __ASM_FORM(x) x | 5 | # define __ASM_FORM(x) x |
6 | # define __ASM_EX_SEC .section __ex_table | 6 | # define __ASM_EX_SEC .section __ex_table, "a" |
7 | #else | 7 | #else |
8 | # define __ASM_FORM(x) " " #x " " | 8 | # define __ASM_FORM(x) " " #x " " |
9 | # define __ASM_EX_SEC " .section __ex_table,\"a\"\n" | 9 | # define __ASM_EX_SEC " .section __ex_table,\"a\"\n" |
@@ -38,10 +38,18 @@ | |||
38 | #define _ASM_DI __ASM_REG(di) | 38 | #define _ASM_DI __ASM_REG(di) |
39 | 39 | ||
40 | /* Exception table entry */ | 40 | /* Exception table entry */ |
41 | #ifdef __ASSEMBLY__ | ||
42 | # define _ASM_EXTABLE(from,to) \ | ||
43 | __ASM_EX_SEC ; \ | ||
44 | _ASM_ALIGN ; \ | ||
45 | _ASM_PTR from , to ; \ | ||
46 | .previous | ||
47 | #else | ||
41 | # define _ASM_EXTABLE(from,to) \ | 48 | # define _ASM_EXTABLE(from,to) \ |
42 | __ASM_EX_SEC \ | 49 | __ASM_EX_SEC \ |
43 | _ASM_ALIGN "\n" \ | 50 | _ASM_ALIGN "\n" \ |
44 | _ASM_PTR #from "," #to "\n" \ | 51 | _ASM_PTR #from "," #to "\n" \ |
45 | " .previous\n" | 52 | " .previous\n" |
53 | #endif | ||
46 | 54 | ||
47 | #endif /* _ASM_X86_ASM_H */ | 55 | #endif /* _ASM_X86_ASM_H */ |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 4a28d22d4793..847fee6493a2 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -95,6 +95,7 @@ | |||
95 | #define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ | 95 | #define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ |
96 | #define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */ | 96 | #define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */ |
97 | #define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */ | 97 | #define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */ |
98 | #define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */ | ||
98 | 99 | ||
99 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 100 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
100 | #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ | 101 | #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ |
diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h index c68c361697e1..4d447b732d82 100644 --- a/arch/x86/include/asm/current.h +++ b/arch/x86/include/asm/current.h | |||
@@ -11,7 +11,7 @@ DECLARE_PER_CPU(struct task_struct *, current_task); | |||
11 | 11 | ||
12 | static __always_inline struct task_struct *get_current(void) | 12 | static __always_inline struct task_struct *get_current(void) |
13 | { | 13 | { |
14 | return percpu_read(current_task); | 14 | return percpu_read_stable(current_task); |
15 | } | 15 | } |
16 | 16 | ||
17 | #define current get_current() | 17 | #define current get_current() |
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index c993e9e0fed4..e8de2f6f5ca5 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h | |||
@@ -291,11 +291,24 @@ static inline unsigned long get_desc_base(const struct desc_struct *desc) | |||
291 | return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24); | 291 | return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24); |
292 | } | 292 | } |
293 | 293 | ||
294 | static inline void set_desc_base(struct desc_struct *desc, unsigned long base) | ||
295 | { | ||
296 | desc->base0 = base & 0xffff; | ||
297 | desc->base1 = (base >> 16) & 0xff; | ||
298 | desc->base2 = (base >> 24) & 0xff; | ||
299 | } | ||
300 | |||
294 | static inline unsigned long get_desc_limit(const struct desc_struct *desc) | 301 | static inline unsigned long get_desc_limit(const struct desc_struct *desc) |
295 | { | 302 | { |
296 | return desc->limit0 | (desc->limit << 16); | 303 | return desc->limit0 | (desc->limit << 16); |
297 | } | 304 | } |
298 | 305 | ||
306 | static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit) | ||
307 | { | ||
308 | desc->limit0 = limit & 0xffff; | ||
309 | desc->limit = (limit >> 16) & 0xf; | ||
310 | } | ||
311 | |||
299 | static inline void _set_gate(int gate, unsigned type, void *addr, | 312 | static inline void _set_gate(int gate, unsigned type, void *addr, |
300 | unsigned dpl, unsigned ist, unsigned seg) | 313 | unsigned dpl, unsigned ist, unsigned seg) |
301 | { | 314 | { |
diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h index a6adefa28b94..9d6684849fd9 100644 --- a/arch/x86/include/asm/desc_defs.h +++ b/arch/x86/include/asm/desc_defs.h | |||
@@ -34,6 +34,12 @@ struct desc_struct { | |||
34 | }; | 34 | }; |
35 | } __attribute__((packed)); | 35 | } __attribute__((packed)); |
36 | 36 | ||
37 | #define GDT_ENTRY_INIT(flags, base, limit) { { { \ | ||
38 | .a = ((limit) & 0xffff) | (((base) & 0xffff) << 16), \ | ||
39 | .b = (((base) & 0xff0000) >> 16) | (((flags) & 0xf0ff) << 8) | \ | ||
40 | ((limit) & 0xf0000) | ((base) & 0xff000000), \ | ||
41 | } } } | ||
42 | |||
37 | enum { | 43 | enum { |
38 | GATE_INTERRUPT = 0xE, | 44 | GATE_INTERRUPT = 0xE, |
39 | GATE_TRAP = 0xF, | 45 | GATE_TRAP = 0xF, |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 1c3f9435f1c9..0ee770d23d0e 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -55,6 +55,24 @@ extern int dma_set_mask(struct device *dev, u64 mask); | |||
55 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 55 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
56 | dma_addr_t *dma_addr, gfp_t flag); | 56 | dma_addr_t *dma_addr, gfp_t flag); |
57 | 57 | ||
58 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | ||
59 | { | ||
60 | if (!dev->dma_mask) | ||
61 | return 0; | ||
62 | |||
63 | return addr + size <= *dev->dma_mask; | ||
64 | } | ||
65 | |||
66 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | ||
67 | { | ||
68 | return paddr; | ||
69 | } | ||
70 | |||
71 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) | ||
72 | { | ||
73 | return daddr; | ||
74 | } | ||
75 | |||
58 | static inline void | 76 | static inline void |
59 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 77 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
60 | enum dma_data_direction dir) | 78 | enum dma_data_direction dir) |
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h index 3afc5e87cfdd..ae6253ab9029 100644 --- a/arch/x86/include/asm/dwarf2.h +++ b/arch/x86/include/asm/dwarf2.h | |||
@@ -87,9 +87,25 @@ | |||
87 | CFI_RESTORE \reg | 87 | CFI_RESTORE \reg |
88 | .endm | 88 | .endm |
89 | #else /*!CONFIG_X86_64*/ | 89 | #else /*!CONFIG_X86_64*/ |
90 | .macro pushl_cfi reg | ||
91 | pushl \reg | ||
92 | CFI_ADJUST_CFA_OFFSET 4 | ||
93 | .endm | ||
90 | 94 | ||
91 | /* 32bit defenitions are missed yet */ | 95 | .macro popl_cfi reg |
96 | popl \reg | ||
97 | CFI_ADJUST_CFA_OFFSET -4 | ||
98 | .endm | ||
92 | 99 | ||
100 | .macro movl_cfi reg offset=0 | ||
101 | movl %\reg, \offset(%esp) | ||
102 | CFI_REL_OFFSET \reg, \offset | ||
103 | .endm | ||
104 | |||
105 | .macro movl_cfi_restore offset reg | ||
106 | movl \offset(%esp), %\reg | ||
107 | CFI_RESTORE \reg | ||
108 | .endm | ||
93 | #endif /*!CONFIG_X86_64*/ | 109 | #endif /*!CONFIG_X86_64*/ |
94 | #endif /*__ASSEMBLY__*/ | 110 | #endif /*__ASSEMBLY__*/ |
95 | 111 | ||
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index bd2c6511c887..db24c2278be0 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -28,13 +28,6 @@ | |||
28 | 28 | ||
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | /* FIXME: I don't want to stay hardcoded */ | ||
32 | #ifdef CONFIG_X86_64 | ||
33 | # define FTRACE_SYSCALL_MAX 296 | ||
34 | #else | ||
35 | # define FTRACE_SYSCALL_MAX 333 | ||
36 | #endif | ||
37 | |||
38 | #ifdef CONFIG_FUNCTION_TRACER | 31 | #ifdef CONFIG_FUNCTION_TRACER |
39 | #define MCOUNT_ADDR ((long)(mcount)) | 32 | #define MCOUNT_ADDR ((long)(mcount)) |
40 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ | 33 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ |
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 175adf58dd4f..0b20bbb758f2 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -26,6 +26,7 @@ extern void fpu_init(void); | |||
26 | extern void mxcsr_feature_mask_init(void); | 26 | extern void mxcsr_feature_mask_init(void); |
27 | extern int init_fpu(struct task_struct *child); | 27 | extern int init_fpu(struct task_struct *child); |
28 | extern asmlinkage void math_state_restore(void); | 28 | extern asmlinkage void math_state_restore(void); |
29 | extern void __math_state_restore(void); | ||
29 | extern void init_thread_xstate(void); | 30 | extern void init_thread_xstate(void); |
30 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); | 31 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); |
31 | 32 | ||
@@ -301,6 +302,14 @@ static inline void kernel_fpu_end(void) | |||
301 | preempt_enable(); | 302 | preempt_enable(); |
302 | } | 303 | } |
303 | 304 | ||
305 | static inline bool irq_fpu_usable(void) | ||
306 | { | ||
307 | struct pt_regs *regs; | ||
308 | |||
309 | return !in_interrupt() || !(regs = get_irq_regs()) || \ | ||
310 | user_mode(regs) || (read_cr0() & X86_CR0_TS); | ||
311 | } | ||
312 | |||
304 | /* | 313 | /* |
305 | * Some instructions like VIA's padlock instructions generate a spurious | 314 | * Some instructions like VIA's padlock instructions generate a spurious |
306 | * DNA fault but don't modify SSE registers. And these instructions | 315 | * DNA fault but don't modify SSE registers. And these instructions |
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 330ee807f89e..85232d32fcb8 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h | |||
@@ -150,11 +150,10 @@ extern int timer_through_8259; | |||
150 | #define io_apic_assign_pci_irqs \ | 150 | #define io_apic_assign_pci_irqs \ |
151 | (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) | 151 | (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) |
152 | 152 | ||
153 | #ifdef CONFIG_ACPI | 153 | extern u8 io_apic_unique_id(u8 id); |
154 | extern int io_apic_get_unique_id(int ioapic, int apic_id); | 154 | extern int io_apic_get_unique_id(int ioapic, int apic_id); |
155 | extern int io_apic_get_version(int ioapic); | 155 | extern int io_apic_get_version(int ioapic); |
156 | extern int io_apic_get_redir_entries(int ioapic); | 156 | extern int io_apic_get_redir_entries(int ioapic); |
157 | #endif /* CONFIG_ACPI */ | ||
158 | 157 | ||
159 | struct io_apic_irq_attr; | 158 | struct io_apic_irq_attr; |
160 | extern int io_apic_set_pci_routing(struct device *dev, int irq, | 159 | extern int io_apic_set_pci_routing(struct device *dev, int irq, |
@@ -177,6 +176,16 @@ extern int setup_ioapic_entry(int apic, int irq, | |||
177 | int polarity, int vector, int pin); | 176 | int polarity, int vector, int pin); |
178 | extern void ioapic_write_entry(int apic, int pin, | 177 | extern void ioapic_write_entry(int apic, int pin, |
179 | struct IO_APIC_route_entry e); | 178 | struct IO_APIC_route_entry e); |
179 | |||
180 | struct mp_ioapic_gsi{ | ||
181 | int gsi_base; | ||
182 | int gsi_end; | ||
183 | }; | ||
184 | extern struct mp_ioapic_gsi mp_gsi_routing[]; | ||
185 | int mp_find_ioapic(int gsi); | ||
186 | int mp_find_ioapic_pin(int ioapic, int gsi); | ||
187 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base); | ||
188 | |||
180 | #else /* !CONFIG_X86_IO_APIC */ | 189 | #else /* !CONFIG_X86_IO_APIC */ |
181 | #define io_apic_assign_pci_irqs 0 | 190 | #define io_apic_assign_pci_irqs 0 |
182 | static const int timer_through_8259 = 0; | 191 | static const int timer_through_8259 = 0; |
diff --git a/arch/x86/include/asm/ioctls.h b/arch/x86/include/asm/ioctls.h index 0d5b23b7b06e..ec34c760665e 100644 --- a/arch/x86/include/asm/ioctls.h +++ b/arch/x86/include/asm/ioctls.h | |||
@@ -1,94 +1 @@ | |||
1 | #ifndef _ASM_X86_IOCTLS_H | #include <asm-generic/ioctls.h> | |
2 | #define _ASM_X86_IOCTLS_H | ||
3 | |||
4 | #include <asm/ioctl.h> | ||
5 | |||
6 | /* 0x54 is just a magic number to make these relatively unique ('T') */ | ||
7 | |||
8 | #define TCGETS 0x5401 | ||
9 | #define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */ | ||
10 | #define TCSETSW 0x5403 | ||
11 | #define TCSETSF 0x5404 | ||
12 | #define TCGETA 0x5405 | ||
13 | #define TCSETA 0x5406 | ||
14 | #define TCSETAW 0x5407 | ||
15 | #define TCSETAF 0x5408 | ||
16 | #define TCSBRK 0x5409 | ||
17 | #define TCXONC 0x540A | ||
18 | #define TCFLSH 0x540B | ||
19 | #define TIOCEXCL 0x540C | ||
20 | #define TIOCNXCL 0x540D | ||
21 | #define TIOCSCTTY 0x540E | ||
22 | #define TIOCGPGRP 0x540F | ||
23 | #define TIOCSPGRP 0x5410 | ||
24 | #define TIOCOUTQ 0x5411 | ||
25 | #define TIOCSTI 0x5412 | ||
26 | #define TIOCGWINSZ 0x5413 | ||
27 | #define TIOCSWINSZ 0x5414 | ||
28 | #define TIOCMGET 0x5415 | ||
29 | #define TIOCMBIS 0x5416 | ||
30 | #define TIOCMBIC 0x5417 | ||
31 | #define TIOCMSET 0x5418 | ||
32 | #define TIOCGSOFTCAR 0x5419 | ||
33 | #define TIOCSSOFTCAR 0x541A | ||
34 | #define FIONREAD 0x541B | ||
35 | #define TIOCINQ FIONREAD | ||
36 | #define TIOCLINUX 0x541C | ||
37 | #define TIOCCONS 0x541D | ||
38 | #define TIOCGSERIAL 0x541E | ||
39 | #define TIOCSSERIAL 0x541F | ||
40 | #define TIOCPKT 0x5420 | ||
41 | #define FIONBIO 0x5421 | ||
42 | #define TIOCNOTTY 0x5422 | ||
43 | #define TIOCSETD 0x5423 | ||
44 | #define TIOCGETD 0x5424 | ||
45 | #define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ | ||
46 | /* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */ | ||
47 | #define TIOCSBRK 0x5427 /* BSD compatibility */ | ||
48 | #define TIOCCBRK 0x5428 /* BSD compatibility */ | ||
49 | #define TIOCGSID 0x5429 /* Return the session ID of FD */ | ||
50 | #define TCGETS2 _IOR('T', 0x2A, struct termios2) | ||
51 | #define TCSETS2 _IOW('T', 0x2B, struct termios2) | ||
52 | #define TCSETSW2 _IOW('T', 0x2C, struct termios2) | ||
53 | #define TCSETSF2 _IOW('T', 0x2D, struct termios2) | ||
54 | #define TIOCGRS485 0x542E | ||
55 | #define TIOCSRS485 0x542F | ||
56 | #define TIOCGPTN _IOR('T', 0x30, unsigned int) | ||
57 | /* Get Pty Number (of pty-mux device) */ | ||
58 | #define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ | ||
59 | #define TCGETX 0x5432 /* SYS5 TCGETX compatibility */ | ||
60 | #define TCSETX 0x5433 | ||
61 | #define TCSETXF 0x5434 | ||
62 | #define TCSETXW 0x5435 | ||
63 | |||
64 | #define FIONCLEX 0x5450 | ||
65 | #define FIOCLEX 0x5451 | ||
66 | #define FIOASYNC 0x5452 | ||
67 | #define TIOCSERCONFIG 0x5453 | ||
68 | #define TIOCSERGWILD 0x5454 | ||
69 | #define TIOCSERSWILD 0x5455 | ||
70 | #define TIOCGLCKTRMIOS 0x5456 | ||
71 | #define TIOCSLCKTRMIOS 0x5457 | ||
72 | #define TIOCSERGSTRUCT 0x5458 /* For debugging only */ | ||
73 | #define TIOCSERGETLSR 0x5459 /* Get line status register */ | ||
74 | #define TIOCSERGETMULTI 0x545A /* Get multiport config */ | ||
75 | #define TIOCSERSETMULTI 0x545B /* Set multiport config */ | ||
76 | |||
77 | #define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ | ||
78 | #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ | ||
79 | #define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ | ||
80 | #define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ | ||
81 | #define FIOQSIZE 0x5460 | ||
82 | |||
83 | /* Used for packet mode */ | ||
84 | #define TIOCPKT_DATA 0 | ||
85 | #define TIOCPKT_FLUSHREAD 1 | ||
86 | #define TIOCPKT_FLUSHWRITE 2 | ||
87 | #define TIOCPKT_STOP 4 | ||
88 | #define TIOCPKT_START 8 | ||
89 | #define TIOCPKT_NOSTOP 16 | ||
90 | #define TIOCPKT_DOSTOP 32 | ||
91 | |||
92 | #define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ | ||
93 | |||
94 | #endif /* _ASM_X86_IOCTLS_H */ | ||
diff --git a/arch/x86/include/asm/ipcbuf.h b/arch/x86/include/asm/ipcbuf.h index ee678fd51594..84c7e51cb6d0 100644 --- a/arch/x86/include/asm/ipcbuf.h +++ b/arch/x86/include/asm/ipcbuf.h | |||
@@ -1,28 +1 @@ | |||
1 | #ifndef _ASM_X86_IPCBUF_H | #include <asm-generic/ipcbuf.h> | |
2 | #define _ASM_X86_IPCBUF_H | ||
3 | |||
4 | /* | ||
5 | * The ipc64_perm structure for x86 architecture. | ||
6 | * Note extra padding because this structure is passed back and forth | ||
7 | * between kernel and user space. | ||
8 | * | ||
9 | * Pad space is left for: | ||
10 | * - 32-bit mode_t and seq | ||
11 | * - 2 miscellaneous 32-bit values | ||
12 | */ | ||
13 | |||
14 | struct ipc64_perm { | ||
15 | __kernel_key_t key; | ||
16 | __kernel_uid32_t uid; | ||
17 | __kernel_gid32_t gid; | ||
18 | __kernel_uid32_t cuid; | ||
19 | __kernel_gid32_t cgid; | ||
20 | __kernel_mode_t mode; | ||
21 | unsigned short __pad1; | ||
22 | unsigned short seq; | ||
23 | unsigned short __pad2; | ||
24 | unsigned long __unused1; | ||
25 | unsigned long __unused2; | ||
26 | }; | ||
27 | |||
28 | #endif /* _ASM_X86_IPCBUF_H */ | ||
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index c6ccbe7e81ad..9e2b952f810a 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h | |||
@@ -13,14 +13,13 @@ static inline unsigned long native_save_fl(void) | |||
13 | unsigned long flags; | 13 | unsigned long flags; |
14 | 14 | ||
15 | /* | 15 | /* |
16 | * Note: this needs to be "=r" not "=rm", because we have the | 16 | * "=rm" is safe here, because "pop" adjusts the stack before |
17 | * stack offset from what gcc expects at the time the "pop" is | 17 | * it evaluates its effective address -- this is part of the |
18 | * executed, and so a memory reference with respect to the stack | 18 | * documented behavior of the "pop" instruction. |
19 | * would end up using the wrong address. | ||
20 | */ | 19 | */ |
21 | asm volatile("# __raw_save_flags\n\t" | 20 | asm volatile("# __raw_save_flags\n\t" |
22 | "pushf ; pop %0" | 21 | "pushf ; pop %0" |
23 | : "=r" (flags) | 22 | : "=rm" (flags) |
24 | : /* no input */ | 23 | : /* no input */ |
25 | : "memory"); | 24 | : "memory"); |
26 | 25 | ||
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h index 5136dad57cbb..0d97deba1e35 100644 --- a/arch/x86/include/asm/lguest.h +++ b/arch/x86/include/asm/lguest.h | |||
@@ -90,8 +90,9 @@ static inline void lguest_set_ts(void) | |||
90 | } | 90 | } |
91 | 91 | ||
92 | /* Full 4G segment descriptors, suitable for CS and DS. */ | 92 | /* Full 4G segment descriptors, suitable for CS and DS. */ |
93 | #define FULL_EXEC_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9b00} } }) | 93 | #define FULL_EXEC_SEGMENT \ |
94 | #define FULL_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9300} } }) | 94 | ((struct desc_struct)GDT_ENTRY_INIT(0xc09b, 0, 0xfffff)) |
95 | #define FULL_SEGMENT ((struct desc_struct)GDT_ENTRY_INIT(0xc093, 0, 0xfffff)) | ||
95 | 96 | ||
96 | #endif /* __ASSEMBLY__ */ | 97 | #endif /* __ASSEMBLY__ */ |
97 | 98 | ||
diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h index 751af2550ed9..593e51d4643f 100644 --- a/arch/x86/include/asm/mman.h +++ b/arch/x86/include/asm/mman.h | |||
@@ -1,20 +1,8 @@ | |||
1 | #ifndef _ASM_X86_MMAN_H | 1 | #ifndef _ASM_X86_MMAN_H |
2 | #define _ASM_X86_MMAN_H | 2 | #define _ASM_X86_MMAN_H |
3 | 3 | ||
4 | #include <asm-generic/mman-common.h> | ||
5 | |||
6 | #define MAP_32BIT 0x40 /* only give out 32bit addresses */ | 4 | #define MAP_32BIT 0x40 /* only give out 32bit addresses */ |
7 | 5 | ||
8 | #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ | 6 | #include <asm-generic/mman.h> |
9 | #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ | ||
10 | #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ | ||
11 | #define MAP_LOCKED 0x2000 /* pages are locked */ | ||
12 | #define MAP_NORESERVE 0x4000 /* don't check for reservations */ | ||
13 | #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ | ||
14 | #define MAP_NONBLOCK 0x10000 /* do not block on IO */ | ||
15 | #define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ | ||
16 | |||
17 | #define MCL_CURRENT 1 /* lock all current mappings */ | ||
18 | #define MCL_FUTURE 2 /* lock all future mappings */ | ||
19 | 7 | ||
20 | #endif /* _ASM_X86_MMAN_H */ | 8 | #endif /* _ASM_X86_MMAN_H */ |
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h index 47d62743c4d5..3e2ce58a31a3 100644 --- a/arch/x86/include/asm/module.h +++ b/arch/x86/include/asm/module.h | |||
@@ -1,18 +1,7 @@ | |||
1 | #ifndef _ASM_X86_MODULE_H | 1 | #ifndef _ASM_X86_MODULE_H |
2 | #define _ASM_X86_MODULE_H | 2 | #define _ASM_X86_MODULE_H |
3 | 3 | ||
4 | /* x86_32/64 are simple */ | 4 | #include <asm-generic/module.h> |
5 | struct mod_arch_specific {}; | ||
6 | |||
7 | #ifdef CONFIG_X86_32 | ||
8 | # define Elf_Shdr Elf32_Shdr | ||
9 | # define Elf_Sym Elf32_Sym | ||
10 | # define Elf_Ehdr Elf32_Ehdr | ||
11 | #else | ||
12 | # define Elf_Shdr Elf64_Shdr | ||
13 | # define Elf_Sym Elf64_Sym | ||
14 | # define Elf_Ehdr Elf64_Ehdr | ||
15 | #endif | ||
16 | 5 | ||
17 | #ifdef CONFIG_X86_64 | 6 | #ifdef CONFIG_X86_64 |
18 | /* X86_64 does not define MODULE_PROC_FAMILY */ | 7 | /* X86_64 does not define MODULE_PROC_FAMILY */ |
@@ -28,6 +17,8 @@ struct mod_arch_specific {}; | |||
28 | #define MODULE_PROC_FAMILY "586MMX " | 17 | #define MODULE_PROC_FAMILY "586MMX " |
29 | #elif defined CONFIG_MCORE2 | 18 | #elif defined CONFIG_MCORE2 |
30 | #define MODULE_PROC_FAMILY "CORE2 " | 19 | #define MODULE_PROC_FAMILY "CORE2 " |
20 | #elif defined CONFIG_MATOM | ||
21 | #define MODULE_PROC_FAMILY "ATOM " | ||
31 | #elif defined CONFIG_M686 | 22 | #elif defined CONFIG_M686 |
32 | #define MODULE_PROC_FAMILY "686 " | 23 | #define MODULE_PROC_FAMILY "686 " |
33 | #elif defined CONFIG_MPENTIUMII | 24 | #elif defined CONFIG_MPENTIUMII |
diff --git a/arch/x86/include/asm/msgbuf.h b/arch/x86/include/asm/msgbuf.h index 7e4e9481f51c..809134c644a6 100644 --- a/arch/x86/include/asm/msgbuf.h +++ b/arch/x86/include/asm/msgbuf.h | |||
@@ -1,39 +1 @@ | |||
1 | #ifndef _ASM_X86_MSGBUF_H | #include <asm-generic/msgbuf.h> | |
2 | #define _ASM_X86_MSGBUF_H | ||
3 | |||
4 | /* | ||
5 | * The msqid64_ds structure for i386 architecture. | ||
6 | * Note extra padding because this structure is passed back and forth | ||
7 | * between kernel and user space. | ||
8 | * | ||
9 | * Pad space on i386 is left for: | ||
10 | * - 64-bit time_t to solve y2038 problem | ||
11 | * - 2 miscellaneous 32-bit values | ||
12 | * | ||
13 | * Pad space on x8664 is left for: | ||
14 | * - 2 miscellaneous 64-bit values | ||
15 | */ | ||
16 | struct msqid64_ds { | ||
17 | struct ipc64_perm msg_perm; | ||
18 | __kernel_time_t msg_stime; /* last msgsnd time */ | ||
19 | #ifdef __i386__ | ||
20 | unsigned long __unused1; | ||
21 | #endif | ||
22 | __kernel_time_t msg_rtime; /* last msgrcv time */ | ||
23 | #ifdef __i386__ | ||
24 | unsigned long __unused2; | ||
25 | #endif | ||
26 | __kernel_time_t msg_ctime; /* last change time */ | ||
27 | #ifdef __i386__ | ||
28 | unsigned long __unused3; | ||
29 | #endif | ||
30 | unsigned long msg_cbytes; /* current number of bytes on queue */ | ||
31 | unsigned long msg_qnum; /* number of messages in queue */ | ||
32 | unsigned long msg_qbytes; /* max number of bytes on queue */ | ||
33 | __kernel_pid_t msg_lspid; /* pid of last msgsnd */ | ||
34 | __kernel_pid_t msg_lrpid; /* last receive pid */ | ||
35 | unsigned long __unused4; | ||
36 | unsigned long __unused5; | ||
37 | }; | ||
38 | |||
39 | #endif /* _ASM_X86_MSGBUF_H */ | ||
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 48ad9d29484a..7e2b6ba962ff 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h | |||
@@ -3,10 +3,16 @@ | |||
3 | 3 | ||
4 | #include <asm/msr-index.h> | 4 | #include <asm/msr-index.h> |
5 | 5 | ||
6 | #ifdef __KERNEL__ | ||
7 | #ifndef __ASSEMBLY__ | 6 | #ifndef __ASSEMBLY__ |
8 | 7 | ||
9 | #include <linux/types.h> | 8 | #include <linux/types.h> |
9 | #include <linux/ioctl.h> | ||
10 | |||
11 | #define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8]) | ||
12 | #define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8]) | ||
13 | |||
14 | #ifdef __KERNEL__ | ||
15 | |||
10 | #include <asm/asm.h> | 16 | #include <asm/asm.h> |
11 | #include <asm/errno.h> | 17 | #include <asm/errno.h> |
12 | #include <asm/cpumask.h> | 18 | #include <asm/cpumask.h> |
@@ -67,23 +73,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, | |||
67 | ".previous\n\t" | 73 | ".previous\n\t" |
68 | _ASM_EXTABLE(2b, 3b) | 74 | _ASM_EXTABLE(2b, 3b) |
69 | : [err] "=r" (*err), EAX_EDX_RET(val, low, high) | 75 | : [err] "=r" (*err), EAX_EDX_RET(val, low, high) |
70 | : "c" (msr), [fault] "i" (-EFAULT)); | 76 | : "c" (msr), [fault] "i" (-EIO)); |
71 | return EAX_EDX_VAL(val, low, high); | ||
72 | } | ||
73 | |||
74 | static inline unsigned long long native_read_msr_amd_safe(unsigned int msr, | ||
75 | int *err) | ||
76 | { | ||
77 | DECLARE_ARGS(val, low, high); | ||
78 | |||
79 | asm volatile("2: rdmsr ; xor %0,%0\n" | ||
80 | "1:\n\t" | ||
81 | ".section .fixup,\"ax\"\n\t" | ||
82 | "3: mov %3,%0 ; jmp 1b\n\t" | ||
83 | ".previous\n\t" | ||
84 | _ASM_EXTABLE(2b, 3b) | ||
85 | : "=r" (*err), EAX_EDX_RET(val, low, high) | ||
86 | : "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT)); | ||
87 | return EAX_EDX_VAL(val, low, high); | 77 | return EAX_EDX_VAL(val, low, high); |
88 | } | 78 | } |
89 | 79 | ||
@@ -106,13 +96,16 @@ notrace static inline int native_write_msr_safe(unsigned int msr, | |||
106 | _ASM_EXTABLE(2b, 3b) | 96 | _ASM_EXTABLE(2b, 3b) |
107 | : [err] "=a" (err) | 97 | : [err] "=a" (err) |
108 | : "c" (msr), "0" (low), "d" (high), | 98 | : "c" (msr), "0" (low), "d" (high), |
109 | [fault] "i" (-EFAULT) | 99 | [fault] "i" (-EIO) |
110 | : "memory"); | 100 | : "memory"); |
111 | return err; | 101 | return err; |
112 | } | 102 | } |
113 | 103 | ||
114 | extern unsigned long long native_read_tsc(void); | 104 | extern unsigned long long native_read_tsc(void); |
115 | 105 | ||
106 | extern int native_rdmsr_safe_regs(u32 regs[8]); | ||
107 | extern int native_wrmsr_safe_regs(u32 regs[8]); | ||
108 | |||
116 | static __always_inline unsigned long long __native_read_tsc(void) | 109 | static __always_inline unsigned long long __native_read_tsc(void) |
117 | { | 110 | { |
118 | DECLARE_ARGS(val, low, high); | 111 | DECLARE_ARGS(val, low, high); |
@@ -181,14 +174,44 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | |||
181 | *p = native_read_msr_safe(msr, &err); | 174 | *p = native_read_msr_safe(msr, &err); |
182 | return err; | 175 | return err; |
183 | } | 176 | } |
177 | |||
184 | static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) | 178 | static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) |
185 | { | 179 | { |
180 | u32 gprs[8] = { 0 }; | ||
186 | int err; | 181 | int err; |
187 | 182 | ||
188 | *p = native_read_msr_amd_safe(msr, &err); | 183 | gprs[1] = msr; |
184 | gprs[7] = 0x9c5a203a; | ||
185 | |||
186 | err = native_rdmsr_safe_regs(gprs); | ||
187 | |||
188 | *p = gprs[0] | ((u64)gprs[2] << 32); | ||
189 | |||
189 | return err; | 190 | return err; |
190 | } | 191 | } |
191 | 192 | ||
193 | static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) | ||
194 | { | ||
195 | u32 gprs[8] = { 0 }; | ||
196 | |||
197 | gprs[0] = (u32)val; | ||
198 | gprs[1] = msr; | ||
199 | gprs[2] = val >> 32; | ||
200 | gprs[7] = 0x9c5a203a; | ||
201 | |||
202 | return native_wrmsr_safe_regs(gprs); | ||
203 | } | ||
204 | |||
205 | static inline int rdmsr_safe_regs(u32 regs[8]) | ||
206 | { | ||
207 | return native_rdmsr_safe_regs(regs); | ||
208 | } | ||
209 | |||
210 | static inline int wrmsr_safe_regs(u32 regs[8]) | ||
211 | { | ||
212 | return native_wrmsr_safe_regs(regs); | ||
213 | } | ||
214 | |||
192 | #define rdtscl(low) \ | 215 | #define rdtscl(low) \ |
193 | ((low) = (u32)__native_read_tsc()) | 216 | ((low) = (u32)__native_read_tsc()) |
194 | 217 | ||
@@ -228,6 +251,8 @@ void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs); | |||
228 | void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs); | 251 | void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs); |
229 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | 252 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
230 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | 253 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
254 | int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); | ||
255 | int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); | ||
231 | #else /* CONFIG_SMP */ | 256 | #else /* CONFIG_SMP */ |
232 | static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | 257 | static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) |
233 | { | 258 | { |
@@ -258,7 +283,15 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | |||
258 | { | 283 | { |
259 | return wrmsr_safe(msr_no, l, h); | 284 | return wrmsr_safe(msr_no, l, h); |
260 | } | 285 | } |
286 | static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) | ||
287 | { | ||
288 | return rdmsr_safe_regs(regs); | ||
289 | } | ||
290 | static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) | ||
291 | { | ||
292 | return wrmsr_safe_regs(regs); | ||
293 | } | ||
261 | #endif /* CONFIG_SMP */ | 294 | #endif /* CONFIG_SMP */ |
262 | #endif /* __ASSEMBLY__ */ | ||
263 | #endif /* __KERNEL__ */ | 295 | #endif /* __KERNEL__ */ |
296 | #endif /* __ASSEMBLY__ */ | ||
264 | #endif /* _ASM_X86_MSR_H */ | 297 | #endif /* _ASM_X86_MSR_H */ |
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index c86e5ed4af51..e63cf7d441e1 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h | |||
@@ -45,8 +45,8 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, | |||
45 | void __user *, size_t *, loff_t *); | 45 | void __user *, size_t *, loff_t *); |
46 | extern int unknown_nmi_panic; | 46 | extern int unknown_nmi_panic; |
47 | 47 | ||
48 | void __trigger_all_cpu_backtrace(void); | 48 | void arch_trigger_all_cpu_backtrace(void); |
49 | #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() | 49 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace |
50 | 50 | ||
51 | static inline void localise_nmi_watchdog(void) | 51 | static inline void localise_nmi_watchdog(void) |
52 | { | 52 | { |
diff --git a/arch/x86/include/asm/param.h b/arch/x86/include/asm/param.h index 6f0d0422f4ca..965d45427975 100644 --- a/arch/x86/include/asm/param.h +++ b/arch/x86/include/asm/param.h | |||
@@ -1,22 +1 @@ | |||
1 | #ifndef _ASM_X86_PARAM_H | #include <asm-generic/param.h> | |
2 | #define _ASM_X86_PARAM_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | # define HZ CONFIG_HZ /* Internal kernel timer frequency */ | ||
6 | # define USER_HZ 100 /* some user interfaces are */ | ||
7 | # define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */ | ||
8 | #endif | ||
9 | |||
10 | #ifndef HZ | ||
11 | #define HZ 100 | ||
12 | #endif | ||
13 | |||
14 | #define EXEC_PAGESIZE 4096 | ||
15 | |||
16 | #ifndef NOGROUP | ||
17 | #define NOGROUP (-1) | ||
18 | #endif | ||
19 | |||
20 | #define MAXHOSTNAMELEN 64 /* max length of hostname */ | ||
21 | |||
22 | #endif /* _ASM_X86_PARAM_H */ | ||
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 4fb37c8a0832..40d6586af25b 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -7,689 +7,11 @@ | |||
7 | #include <asm/pgtable_types.h> | 7 | #include <asm/pgtable_types.h> |
8 | #include <asm/asm.h> | 8 | #include <asm/asm.h> |
9 | 9 | ||
10 | /* Bitmask of what can be clobbered: usually at least eax. */ | 10 | #include <asm/paravirt_types.h> |
11 | #define CLBR_NONE 0 | ||
12 | #define CLBR_EAX (1 << 0) | ||
13 | #define CLBR_ECX (1 << 1) | ||
14 | #define CLBR_EDX (1 << 2) | ||
15 | #define CLBR_EDI (1 << 3) | ||
16 | |||
17 | #ifdef CONFIG_X86_32 | ||
18 | /* CLBR_ANY should match all regs platform has. For i386, that's just it */ | ||
19 | #define CLBR_ANY ((1 << 4) - 1) | ||
20 | |||
21 | #define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX) | ||
22 | #define CLBR_RET_REG (CLBR_EAX | CLBR_EDX) | ||
23 | #define CLBR_SCRATCH (0) | ||
24 | #else | ||
25 | #define CLBR_RAX CLBR_EAX | ||
26 | #define CLBR_RCX CLBR_ECX | ||
27 | #define CLBR_RDX CLBR_EDX | ||
28 | #define CLBR_RDI CLBR_EDI | ||
29 | #define CLBR_RSI (1 << 4) | ||
30 | #define CLBR_R8 (1 << 5) | ||
31 | #define CLBR_R9 (1 << 6) | ||
32 | #define CLBR_R10 (1 << 7) | ||
33 | #define CLBR_R11 (1 << 8) | ||
34 | |||
35 | #define CLBR_ANY ((1 << 9) - 1) | ||
36 | |||
37 | #define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \ | ||
38 | CLBR_RCX | CLBR_R8 | CLBR_R9) | ||
39 | #define CLBR_RET_REG (CLBR_RAX) | ||
40 | #define CLBR_SCRATCH (CLBR_R10 | CLBR_R11) | ||
41 | |||
42 | #include <asm/desc_defs.h> | ||
43 | #endif /* X86_64 */ | ||
44 | |||
45 | #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG) | ||
46 | 11 | ||
47 | #ifndef __ASSEMBLY__ | 12 | #ifndef __ASSEMBLY__ |
48 | #include <linux/types.h> | 13 | #include <linux/types.h> |
49 | #include <linux/cpumask.h> | 14 | #include <linux/cpumask.h> |
50 | #include <asm/kmap_types.h> | ||
51 | #include <asm/desc_defs.h> | ||
52 | |||
53 | struct page; | ||
54 | struct thread_struct; | ||
55 | struct desc_ptr; | ||
56 | struct tss_struct; | ||
57 | struct mm_struct; | ||
58 | struct desc_struct; | ||
59 | struct task_struct; | ||
60 | |||
61 | /* | ||
62 | * Wrapper type for pointers to code which uses the non-standard | ||
63 | * calling convention. See PV_CALL_SAVE_REGS_THUNK below. | ||
64 | */ | ||
65 | struct paravirt_callee_save { | ||
66 | void *func; | ||
67 | }; | ||
68 | |||
69 | /* general info */ | ||
70 | struct pv_info { | ||
71 | unsigned int kernel_rpl; | ||
72 | int shared_kernel_pmd; | ||
73 | int paravirt_enabled; | ||
74 | const char *name; | ||
75 | }; | ||
76 | |||
77 | struct pv_init_ops { | ||
78 | /* | ||
79 | * Patch may replace one of the defined code sequences with | ||
80 | * arbitrary code, subject to the same register constraints. | ||
81 | * This generally means the code is not free to clobber any | ||
82 | * registers other than EAX. The patch function should return | ||
83 | * the number of bytes of code generated, as we nop pad the | ||
84 | * rest in generic code. | ||
85 | */ | ||
86 | unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, | ||
87 | unsigned long addr, unsigned len); | ||
88 | |||
89 | /* Basic arch-specific setup */ | ||
90 | void (*arch_setup)(void); | ||
91 | char *(*memory_setup)(void); | ||
92 | void (*post_allocator_init)(void); | ||
93 | |||
94 | /* Print a banner to identify the environment */ | ||
95 | void (*banner)(void); | ||
96 | }; | ||
97 | |||
98 | |||
99 | struct pv_lazy_ops { | ||
100 | /* Set deferred update mode, used for batching operations. */ | ||
101 | void (*enter)(void); | ||
102 | void (*leave)(void); | ||
103 | }; | ||
104 | |||
105 | struct pv_time_ops { | ||
106 | void (*time_init)(void); | ||
107 | |||
108 | /* Set and set time of day */ | ||
109 | unsigned long (*get_wallclock)(void); | ||
110 | int (*set_wallclock)(unsigned long); | ||
111 | |||
112 | unsigned long long (*sched_clock)(void); | ||
113 | unsigned long (*get_tsc_khz)(void); | ||
114 | }; | ||
115 | |||
116 | struct pv_cpu_ops { | ||
117 | /* hooks for various privileged instructions */ | ||
118 | unsigned long (*get_debugreg)(int regno); | ||
119 | void (*set_debugreg)(int regno, unsigned long value); | ||
120 | |||
121 | void (*clts)(void); | ||
122 | |||
123 | unsigned long (*read_cr0)(void); | ||
124 | void (*write_cr0)(unsigned long); | ||
125 | |||
126 | unsigned long (*read_cr4_safe)(void); | ||
127 | unsigned long (*read_cr4)(void); | ||
128 | void (*write_cr4)(unsigned long); | ||
129 | |||
130 | #ifdef CONFIG_X86_64 | ||
131 | unsigned long (*read_cr8)(void); | ||
132 | void (*write_cr8)(unsigned long); | ||
133 | #endif | ||
134 | |||
135 | /* Segment descriptor handling */ | ||
136 | void (*load_tr_desc)(void); | ||
137 | void (*load_gdt)(const struct desc_ptr *); | ||
138 | void (*load_idt)(const struct desc_ptr *); | ||
139 | void (*store_gdt)(struct desc_ptr *); | ||
140 | void (*store_idt)(struct desc_ptr *); | ||
141 | void (*set_ldt)(const void *desc, unsigned entries); | ||
142 | unsigned long (*store_tr)(void); | ||
143 | void (*load_tls)(struct thread_struct *t, unsigned int cpu); | ||
144 | #ifdef CONFIG_X86_64 | ||
145 | void (*load_gs_index)(unsigned int idx); | ||
146 | #endif | ||
147 | void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, | ||
148 | const void *desc); | ||
149 | void (*write_gdt_entry)(struct desc_struct *, | ||
150 | int entrynum, const void *desc, int size); | ||
151 | void (*write_idt_entry)(gate_desc *, | ||
152 | int entrynum, const gate_desc *gate); | ||
153 | void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); | ||
154 | void (*free_ldt)(struct desc_struct *ldt, unsigned entries); | ||
155 | |||
156 | void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); | ||
157 | |||
158 | void (*set_iopl_mask)(unsigned mask); | ||
159 | |||
160 | void (*wbinvd)(void); | ||
161 | void (*io_delay)(void); | ||
162 | |||
163 | /* cpuid emulation, mostly so that caps bits can be disabled */ | ||
164 | void (*cpuid)(unsigned int *eax, unsigned int *ebx, | ||
165 | unsigned int *ecx, unsigned int *edx); | ||
166 | |||
167 | /* MSR, PMC and TSR operations. | ||
168 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ | ||
169 | u64 (*read_msr_amd)(unsigned int msr, int *err); | ||
170 | u64 (*read_msr)(unsigned int msr, int *err); | ||
171 | int (*write_msr)(unsigned int msr, unsigned low, unsigned high); | ||
172 | |||
173 | u64 (*read_tsc)(void); | ||
174 | u64 (*read_pmc)(int counter); | ||
175 | unsigned long long (*read_tscp)(unsigned int *aux); | ||
176 | |||
177 | /* | ||
178 | * Atomically enable interrupts and return to userspace. This | ||
179 | * is only ever used to return to 32-bit processes; in a | ||
180 | * 64-bit kernel, it's used for 32-on-64 compat processes, but | ||
181 | * never native 64-bit processes. (Jump, not call.) | ||
182 | */ | ||
183 | void (*irq_enable_sysexit)(void); | ||
184 | |||
185 | /* | ||
186 | * Switch to usermode gs and return to 64-bit usermode using | ||
187 | * sysret. Only used in 64-bit kernels to return to 64-bit | ||
188 | * processes. Usermode register state, including %rsp, must | ||
189 | * already be restored. | ||
190 | */ | ||
191 | void (*usergs_sysret64)(void); | ||
192 | |||
193 | /* | ||
194 | * Switch to usermode gs and return to 32-bit usermode using | ||
195 | * sysret. Used to return to 32-on-64 compat processes. | ||
196 | * Other usermode register state, including %esp, must already | ||
197 | * be restored. | ||
198 | */ | ||
199 | void (*usergs_sysret32)(void); | ||
200 | |||
201 | /* Normal iret. Jump to this with the standard iret stack | ||
202 | frame set up. */ | ||
203 | void (*iret)(void); | ||
204 | |||
205 | void (*swapgs)(void); | ||
206 | |||
207 | void (*start_context_switch)(struct task_struct *prev); | ||
208 | void (*end_context_switch)(struct task_struct *next); | ||
209 | }; | ||
210 | |||
211 | struct pv_irq_ops { | ||
212 | void (*init_IRQ)(void); | ||
213 | |||
214 | /* | ||
215 | * Get/set interrupt state. save_fl and restore_fl are only | ||
216 | * expected to use X86_EFLAGS_IF; all other bits | ||
217 | * returned from save_fl are undefined, and may be ignored by | ||
218 | * restore_fl. | ||
219 | * | ||
220 | * NOTE: These functions callers expect the callee to preserve | ||
221 | * more registers than the standard C calling convention. | ||
222 | */ | ||
223 | struct paravirt_callee_save save_fl; | ||
224 | struct paravirt_callee_save restore_fl; | ||
225 | struct paravirt_callee_save irq_disable; | ||
226 | struct paravirt_callee_save irq_enable; | ||
227 | |||
228 | void (*safe_halt)(void); | ||
229 | void (*halt)(void); | ||
230 | |||
231 | #ifdef CONFIG_X86_64 | ||
232 | void (*adjust_exception_frame)(void); | ||
233 | #endif | ||
234 | }; | ||
235 | |||
236 | struct pv_apic_ops { | ||
237 | #ifdef CONFIG_X86_LOCAL_APIC | ||
238 | void (*setup_boot_clock)(void); | ||
239 | void (*setup_secondary_clock)(void); | ||
240 | |||
241 | void (*startup_ipi_hook)(int phys_apicid, | ||
242 | unsigned long start_eip, | ||
243 | unsigned long start_esp); | ||
244 | #endif | ||
245 | }; | ||
246 | |||
247 | struct pv_mmu_ops { | ||
248 | /* | ||
249 | * Called before/after init_mm pagetable setup. setup_start | ||
250 | * may reset %cr3, and may pre-install parts of the pagetable; | ||
251 | * pagetable setup is expected to preserve any existing | ||
252 | * mapping. | ||
253 | */ | ||
254 | void (*pagetable_setup_start)(pgd_t *pgd_base); | ||
255 | void (*pagetable_setup_done)(pgd_t *pgd_base); | ||
256 | |||
257 | unsigned long (*read_cr2)(void); | ||
258 | void (*write_cr2)(unsigned long); | ||
259 | |||
260 | unsigned long (*read_cr3)(void); | ||
261 | void (*write_cr3)(unsigned long); | ||
262 | |||
263 | /* | ||
264 | * Hooks for intercepting the creation/use/destruction of an | ||
265 | * mm_struct. | ||
266 | */ | ||
267 | void (*activate_mm)(struct mm_struct *prev, | ||
268 | struct mm_struct *next); | ||
269 | void (*dup_mmap)(struct mm_struct *oldmm, | ||
270 | struct mm_struct *mm); | ||
271 | void (*exit_mmap)(struct mm_struct *mm); | ||
272 | |||
273 | |||
274 | /* TLB operations */ | ||
275 | void (*flush_tlb_user)(void); | ||
276 | void (*flush_tlb_kernel)(void); | ||
277 | void (*flush_tlb_single)(unsigned long addr); | ||
278 | void (*flush_tlb_others)(const struct cpumask *cpus, | ||
279 | struct mm_struct *mm, | ||
280 | unsigned long va); | ||
281 | |||
282 | /* Hooks for allocating and freeing a pagetable top-level */ | ||
283 | int (*pgd_alloc)(struct mm_struct *mm); | ||
284 | void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd); | ||
285 | |||
286 | /* | ||
287 | * Hooks for allocating/releasing pagetable pages when they're | ||
288 | * attached to a pagetable | ||
289 | */ | ||
290 | void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); | ||
291 | void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); | ||
292 | void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count); | ||
293 | void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); | ||
294 | void (*release_pte)(unsigned long pfn); | ||
295 | void (*release_pmd)(unsigned long pfn); | ||
296 | void (*release_pud)(unsigned long pfn); | ||
297 | |||
298 | /* Pagetable manipulation functions */ | ||
299 | void (*set_pte)(pte_t *ptep, pte_t pteval); | ||
300 | void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, | ||
301 | pte_t *ptep, pte_t pteval); | ||
302 | void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); | ||
303 | void (*pte_update)(struct mm_struct *mm, unsigned long addr, | ||
304 | pte_t *ptep); | ||
305 | void (*pte_update_defer)(struct mm_struct *mm, | ||
306 | unsigned long addr, pte_t *ptep); | ||
307 | |||
308 | pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, | ||
309 | pte_t *ptep); | ||
310 | void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr, | ||
311 | pte_t *ptep, pte_t pte); | ||
312 | |||
313 | struct paravirt_callee_save pte_val; | ||
314 | struct paravirt_callee_save make_pte; | ||
315 | |||
316 | struct paravirt_callee_save pgd_val; | ||
317 | struct paravirt_callee_save make_pgd; | ||
318 | |||
319 | #if PAGETABLE_LEVELS >= 3 | ||
320 | #ifdef CONFIG_X86_PAE | ||
321 | void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); | ||
322 | void (*pte_clear)(struct mm_struct *mm, unsigned long addr, | ||
323 | pte_t *ptep); | ||
324 | void (*pmd_clear)(pmd_t *pmdp); | ||
325 | |||
326 | #endif /* CONFIG_X86_PAE */ | ||
327 | |||
328 | void (*set_pud)(pud_t *pudp, pud_t pudval); | ||
329 | |||
330 | struct paravirt_callee_save pmd_val; | ||
331 | struct paravirt_callee_save make_pmd; | ||
332 | |||
333 | #if PAGETABLE_LEVELS == 4 | ||
334 | struct paravirt_callee_save pud_val; | ||
335 | struct paravirt_callee_save make_pud; | ||
336 | |||
337 | void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); | ||
338 | #endif /* PAGETABLE_LEVELS == 4 */ | ||
339 | #endif /* PAGETABLE_LEVELS >= 3 */ | ||
340 | |||
341 | #ifdef CONFIG_HIGHPTE | ||
342 | void *(*kmap_atomic_pte)(struct page *page, enum km_type type); | ||
343 | #endif | ||
344 | |||
345 | struct pv_lazy_ops lazy_mode; | ||
346 | |||
347 | /* dom0 ops */ | ||
348 | |||
349 | /* Sometimes the physical address is a pfn, and sometimes its | ||
350 | an mfn. We can tell which is which from the index. */ | ||
351 | void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, | ||
352 | phys_addr_t phys, pgprot_t flags); | ||
353 | }; | ||
354 | |||
355 | struct raw_spinlock; | ||
356 | struct pv_lock_ops { | ||
357 | int (*spin_is_locked)(struct raw_spinlock *lock); | ||
358 | int (*spin_is_contended)(struct raw_spinlock *lock); | ||
359 | void (*spin_lock)(struct raw_spinlock *lock); | ||
360 | void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags); | ||
361 | int (*spin_trylock)(struct raw_spinlock *lock); | ||
362 | void (*spin_unlock)(struct raw_spinlock *lock); | ||
363 | }; | ||
364 | |||
365 | /* This contains all the paravirt structures: we get a convenient | ||
366 | * number for each function using the offset which we use to indicate | ||
367 | * what to patch. */ | ||
368 | struct paravirt_patch_template { | ||
369 | struct pv_init_ops pv_init_ops; | ||
370 | struct pv_time_ops pv_time_ops; | ||
371 | struct pv_cpu_ops pv_cpu_ops; | ||
372 | struct pv_irq_ops pv_irq_ops; | ||
373 | struct pv_apic_ops pv_apic_ops; | ||
374 | struct pv_mmu_ops pv_mmu_ops; | ||
375 | struct pv_lock_ops pv_lock_ops; | ||
376 | }; | ||
377 | |||
378 | extern struct pv_info pv_info; | ||
379 | extern struct pv_init_ops pv_init_ops; | ||
380 | extern struct pv_time_ops pv_time_ops; | ||
381 | extern struct pv_cpu_ops pv_cpu_ops; | ||
382 | extern struct pv_irq_ops pv_irq_ops; | ||
383 | extern struct pv_apic_ops pv_apic_ops; | ||
384 | extern struct pv_mmu_ops pv_mmu_ops; | ||
385 | extern struct pv_lock_ops pv_lock_ops; | ||
386 | |||
387 | #define PARAVIRT_PATCH(x) \ | ||
388 | (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) | ||
389 | |||
390 | #define paravirt_type(op) \ | ||
391 | [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \ | ||
392 | [paravirt_opptr] "i" (&(op)) | ||
393 | #define paravirt_clobber(clobber) \ | ||
394 | [paravirt_clobber] "i" (clobber) | ||
395 | |||
396 | /* | ||
397 | * Generate some code, and mark it as patchable by the | ||
398 | * apply_paravirt() alternate instruction patcher. | ||
399 | */ | ||
400 | #define _paravirt_alt(insn_string, type, clobber) \ | ||
401 | "771:\n\t" insn_string "\n" "772:\n" \ | ||
402 | ".pushsection .parainstructions,\"a\"\n" \ | ||
403 | _ASM_ALIGN "\n" \ | ||
404 | _ASM_PTR " 771b\n" \ | ||
405 | " .byte " type "\n" \ | ||
406 | " .byte 772b-771b\n" \ | ||
407 | " .short " clobber "\n" \ | ||
408 | ".popsection\n" | ||
409 | |||
410 | /* Generate patchable code, with the default asm parameters. */ | ||
411 | #define paravirt_alt(insn_string) \ | ||
412 | _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") | ||
413 | |||
414 | /* Simple instruction patching code. */ | ||
415 | #define DEF_NATIVE(ops, name, code) \ | ||
416 | extern const char start_##ops##_##name[], end_##ops##_##name[]; \ | ||
417 | asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") | ||
418 | |||
419 | unsigned paravirt_patch_nop(void); | ||
420 | unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len); | ||
421 | unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len); | ||
422 | unsigned paravirt_patch_ignore(unsigned len); | ||
423 | unsigned paravirt_patch_call(void *insnbuf, | ||
424 | const void *target, u16 tgt_clobbers, | ||
425 | unsigned long addr, u16 site_clobbers, | ||
426 | unsigned len); | ||
427 | unsigned paravirt_patch_jmp(void *insnbuf, const void *target, | ||
428 | unsigned long addr, unsigned len); | ||
429 | unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, | ||
430 | unsigned long addr, unsigned len); | ||
431 | |||
432 | unsigned paravirt_patch_insns(void *insnbuf, unsigned len, | ||
433 | const char *start, const char *end); | ||
434 | |||
435 | unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | ||
436 | unsigned long addr, unsigned len); | ||
437 | |||
438 | int paravirt_disable_iospace(void); | ||
439 | |||
440 | /* | ||
441 | * This generates an indirect call based on the operation type number. | ||
442 | * The type number, computed in PARAVIRT_PATCH, is derived from the | ||
443 | * offset into the paravirt_patch_template structure, and can therefore be | ||
444 | * freely converted back into a structure offset. | ||
445 | */ | ||
446 | #define PARAVIRT_CALL "call *%c[paravirt_opptr];" | ||
447 | |||
448 | /* | ||
449 | * These macros are intended to wrap calls through one of the paravirt | ||
450 | * ops structs, so that they can be later identified and patched at | ||
451 | * runtime. | ||
452 | * | ||
453 | * Normally, a call to a pv_op function is a simple indirect call: | ||
454 | * (pv_op_struct.operations)(args...). | ||
455 | * | ||
456 | * Unfortunately, this is a relatively slow operation for modern CPUs, | ||
457 | * because it cannot necessarily determine what the destination | ||
458 | * address is. In this case, the address is a runtime constant, so at | ||
459 | * the very least we can patch the call to e a simple direct call, or | ||
460 | * ideally, patch an inline implementation into the callsite. (Direct | ||
461 | * calls are essentially free, because the call and return addresses | ||
462 | * are completely predictable.) | ||
463 | * | ||
464 | * For i386, these macros rely on the standard gcc "regparm(3)" calling | ||
465 | * convention, in which the first three arguments are placed in %eax, | ||
466 | * %edx, %ecx (in that order), and the remaining arguments are placed | ||
467 | * on the stack. All caller-save registers (eax,edx,ecx) are expected | ||
468 | * to be modified (either clobbered or used for return values). | ||
469 | * X86_64, on the other hand, already specifies a register-based calling | ||
470 | * conventions, returning at %rax, with parameteres going on %rdi, %rsi, | ||
471 | * %rdx, and %rcx. Note that for this reason, x86_64 does not need any | ||
472 | * special handling for dealing with 4 arguments, unlike i386. | ||
473 | * However, x86_64 also have to clobber all caller saved registers, which | ||
474 | * unfortunately, are quite a bit (r8 - r11) | ||
475 | * | ||
476 | * The call instruction itself is marked by placing its start address | ||
477 | * and size into the .parainstructions section, so that | ||
478 | * apply_paravirt() in arch/i386/kernel/alternative.c can do the | ||
479 | * appropriate patching under the control of the backend pv_init_ops | ||
480 | * implementation. | ||
481 | * | ||
482 | * Unfortunately there's no way to get gcc to generate the args setup | ||
483 | * for the call, and then allow the call itself to be generated by an | ||
484 | * inline asm. Because of this, we must do the complete arg setup and | ||
485 | * return value handling from within these macros. This is fairly | ||
486 | * cumbersome. | ||
487 | * | ||
488 | * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments. | ||
489 | * It could be extended to more arguments, but there would be little | ||
490 | * to be gained from that. For each number of arguments, there are | ||
491 | * the two VCALL and CALL variants for void and non-void functions. | ||
492 | * | ||
493 | * When there is a return value, the invoker of the macro must specify | ||
494 | * the return type. The macro then uses sizeof() on that type to | ||
495 | * determine whether its a 32 or 64 bit value, and places the return | ||
496 | * in the right register(s) (just %eax for 32-bit, and %edx:%eax for | ||
497 | * 64-bit). For x86_64 machines, it just returns at %rax regardless of | ||
498 | * the return value size. | ||
499 | * | ||
500 | * 64-bit arguments are passed as a pair of adjacent 32-bit arguments | ||
501 | * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments | ||
502 | * in low,high order | ||
503 | * | ||
504 | * Small structures are passed and returned in registers. The macro | ||
505 | * calling convention can't directly deal with this, so the wrapper | ||
506 | * functions must do this. | ||
507 | * | ||
508 | * These PVOP_* macros are only defined within this header. This | ||
509 | * means that all uses must be wrapped in inline functions. This also | ||
510 | * makes sure the incoming and outgoing types are always correct. | ||
511 | */ | ||
512 | #ifdef CONFIG_X86_32 | ||
513 | #define PVOP_VCALL_ARGS \ | ||
514 | unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx | ||
515 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS | ||
516 | |||
517 | #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) | ||
518 | #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x)) | ||
519 | #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x)) | ||
520 | |||
521 | #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ | ||
522 | "=c" (__ecx) | ||
523 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS | ||
524 | |||
525 | #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx) | ||
526 | #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS | ||
527 | |||
528 | #define EXTRA_CLOBBERS | ||
529 | #define VEXTRA_CLOBBERS | ||
530 | #else /* CONFIG_X86_64 */ | ||
531 | #define PVOP_VCALL_ARGS \ | ||
532 | unsigned long __edi = __edi, __esi = __esi, \ | ||
533 | __edx = __edx, __ecx = __ecx | ||
534 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax | ||
535 | |||
536 | #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) | ||
537 | #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) | ||
538 | #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x)) | ||
539 | #define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x)) | ||
540 | |||
541 | #define PVOP_VCALL_CLOBBERS "=D" (__edi), \ | ||
542 | "=S" (__esi), "=d" (__edx), \ | ||
543 | "=c" (__ecx) | ||
544 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) | ||
545 | |||
546 | #define PVOP_VCALLEE_CLOBBERS "=a" (__eax) | ||
547 | #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS | ||
548 | |||
549 | #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" | ||
550 | #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" | ||
551 | #endif /* CONFIG_X86_32 */ | ||
552 | |||
553 | #ifdef CONFIG_PARAVIRT_DEBUG | ||
554 | #define PVOP_TEST_NULL(op) BUG_ON(op == NULL) | ||
555 | #else | ||
556 | #define PVOP_TEST_NULL(op) ((void)op) | ||
557 | #endif | ||
558 | |||
559 | #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \ | ||
560 | pre, post, ...) \ | ||
561 | ({ \ | ||
562 | rettype __ret; \ | ||
563 | PVOP_CALL_ARGS; \ | ||
564 | PVOP_TEST_NULL(op); \ | ||
565 | /* This is 32-bit specific, but is okay in 64-bit */ \ | ||
566 | /* since this condition will never hold */ \ | ||
567 | if (sizeof(rettype) > sizeof(unsigned long)) { \ | ||
568 | asm volatile(pre \ | ||
569 | paravirt_alt(PARAVIRT_CALL) \ | ||
570 | post \ | ||
571 | : call_clbr \ | ||
572 | : paravirt_type(op), \ | ||
573 | paravirt_clobber(clbr), \ | ||
574 | ##__VA_ARGS__ \ | ||
575 | : "memory", "cc" extra_clbr); \ | ||
576 | __ret = (rettype)((((u64)__edx) << 32) | __eax); \ | ||
577 | } else { \ | ||
578 | asm volatile(pre \ | ||
579 | paravirt_alt(PARAVIRT_CALL) \ | ||
580 | post \ | ||
581 | : call_clbr \ | ||
582 | : paravirt_type(op), \ | ||
583 | paravirt_clobber(clbr), \ | ||
584 | ##__VA_ARGS__ \ | ||
585 | : "memory", "cc" extra_clbr); \ | ||
586 | __ret = (rettype)__eax; \ | ||
587 | } \ | ||
588 | __ret; \ | ||
589 | }) | ||
590 | |||
591 | #define __PVOP_CALL(rettype, op, pre, post, ...) \ | ||
592 | ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \ | ||
593 | EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__) | ||
594 | |||
595 | #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \ | ||
596 | ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ | ||
597 | PVOP_CALLEE_CLOBBERS, , \ | ||
598 | pre, post, ##__VA_ARGS__) | ||
599 | |||
600 | |||
601 | #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \ | ||
602 | ({ \ | ||
603 | PVOP_VCALL_ARGS; \ | ||
604 | PVOP_TEST_NULL(op); \ | ||
605 | asm volatile(pre \ | ||
606 | paravirt_alt(PARAVIRT_CALL) \ | ||
607 | post \ | ||
608 | : call_clbr \ | ||
609 | : paravirt_type(op), \ | ||
610 | paravirt_clobber(clbr), \ | ||
611 | ##__VA_ARGS__ \ | ||
612 | : "memory", "cc" extra_clbr); \ | ||
613 | }) | ||
614 | |||
615 | #define __PVOP_VCALL(op, pre, post, ...) \ | ||
616 | ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \ | ||
617 | VEXTRA_CLOBBERS, \ | ||
618 | pre, post, ##__VA_ARGS__) | ||
619 | |||
620 | #define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \ | ||
621 | ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ | ||
622 | PVOP_VCALLEE_CLOBBERS, , \ | ||
623 | pre, post, ##__VA_ARGS__) | ||
624 | |||
625 | |||
626 | |||
627 | #define PVOP_CALL0(rettype, op) \ | ||
628 | __PVOP_CALL(rettype, op, "", "") | ||
629 | #define PVOP_VCALL0(op) \ | ||
630 | __PVOP_VCALL(op, "", "") | ||
631 | |||
632 | #define PVOP_CALLEE0(rettype, op) \ | ||
633 | __PVOP_CALLEESAVE(rettype, op, "", "") | ||
634 | #define PVOP_VCALLEE0(op) \ | ||
635 | __PVOP_VCALLEESAVE(op, "", "") | ||
636 | |||
637 | |||
638 | #define PVOP_CALL1(rettype, op, arg1) \ | ||
639 | __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) | ||
640 | #define PVOP_VCALL1(op, arg1) \ | ||
641 | __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1)) | ||
642 | |||
643 | #define PVOP_CALLEE1(rettype, op, arg1) \ | ||
644 | __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) | ||
645 | #define PVOP_VCALLEE1(op, arg1) \ | ||
646 | __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1)) | ||
647 | |||
648 | |||
649 | #define PVOP_CALL2(rettype, op, arg1, arg2) \ | ||
650 | __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ | ||
651 | PVOP_CALL_ARG2(arg2)) | ||
652 | #define PVOP_VCALL2(op, arg1, arg2) \ | ||
653 | __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ | ||
654 | PVOP_CALL_ARG2(arg2)) | ||
655 | |||
656 | #define PVOP_CALLEE2(rettype, op, arg1, arg2) \ | ||
657 | __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ | ||
658 | PVOP_CALL_ARG2(arg2)) | ||
659 | #define PVOP_VCALLEE2(op, arg1, arg2) \ | ||
660 | __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \ | ||
661 | PVOP_CALL_ARG2(arg2)) | ||
662 | |||
663 | |||
664 | #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ | ||
665 | __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ | ||
666 | PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) | ||
667 | #define PVOP_VCALL3(op, arg1, arg2, arg3) \ | ||
668 | __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ | ||
669 | PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) | ||
670 | |||
671 | /* This is the only difference in x86_64. We can make it much simpler */ | ||
672 | #ifdef CONFIG_X86_32 | ||
673 | #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ | ||
674 | __PVOP_CALL(rettype, op, \ | ||
675 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ | ||
676 | PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ | ||
677 | PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4))) | ||
678 | #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ | ||
679 | __PVOP_VCALL(op, \ | ||
680 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ | ||
681 | "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ | ||
682 | "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) | ||
683 | #else | ||
684 | #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ | ||
685 | __PVOP_CALL(rettype, op, "", "", \ | ||
686 | PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ | ||
687 | PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) | ||
688 | #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ | ||
689 | __PVOP_VCALL(op, "", "", \ | ||
690 | PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ | ||
691 | PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) | ||
692 | #endif | ||
693 | 15 | ||
694 | static inline int paravirt_enabled(void) | 16 | static inline int paravirt_enabled(void) |
695 | { | 17 | { |
@@ -820,15 +142,22 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err) | |||
820 | { | 142 | { |
821 | return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); | 143 | return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); |
822 | } | 144 | } |
823 | static inline u64 paravirt_read_msr_amd(unsigned msr, int *err) | 145 | |
146 | static inline int paravirt_rdmsr_regs(u32 *regs) | ||
824 | { | 147 | { |
825 | return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err); | 148 | return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs); |
826 | } | 149 | } |
150 | |||
827 | static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) | 151 | static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) |
828 | { | 152 | { |
829 | return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); | 153 | return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); |
830 | } | 154 | } |
831 | 155 | ||
156 | static inline int paravirt_wrmsr_regs(u32 *regs) | ||
157 | { | ||
158 | return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs); | ||
159 | } | ||
160 | |||
832 | /* These should all do BUG_ON(_err), but our headers are too tangled. */ | 161 | /* These should all do BUG_ON(_err), but our headers are too tangled. */ |
833 | #define rdmsr(msr, val1, val2) \ | 162 | #define rdmsr(msr, val1, val2) \ |
834 | do { \ | 163 | do { \ |
@@ -862,6 +191,9 @@ do { \ | |||
862 | _err; \ | 191 | _err; \ |
863 | }) | 192 | }) |
864 | 193 | ||
194 | #define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs) | ||
195 | #define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs) | ||
196 | |||
865 | static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | 197 | static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) |
866 | { | 198 | { |
867 | int err; | 199 | int err; |
@@ -871,12 +203,31 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | |||
871 | } | 203 | } |
872 | static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) | 204 | static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) |
873 | { | 205 | { |
206 | u32 gprs[8] = { 0 }; | ||
874 | int err; | 207 | int err; |
875 | 208 | ||
876 | *p = paravirt_read_msr_amd(msr, &err); | 209 | gprs[1] = msr; |
210 | gprs[7] = 0x9c5a203a; | ||
211 | |||
212 | err = paravirt_rdmsr_regs(gprs); | ||
213 | |||
214 | *p = gprs[0] | ((u64)gprs[2] << 32); | ||
215 | |||
877 | return err; | 216 | return err; |
878 | } | 217 | } |
879 | 218 | ||
219 | static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) | ||
220 | { | ||
221 | u32 gprs[8] = { 0 }; | ||
222 | |||
223 | gprs[0] = (u32)val; | ||
224 | gprs[1] = msr; | ||
225 | gprs[2] = val >> 32; | ||
226 | gprs[7] = 0x9c5a203a; | ||
227 | |||
228 | return paravirt_wrmsr_regs(gprs); | ||
229 | } | ||
230 | |||
880 | static inline u64 paravirt_read_tsc(void) | 231 | static inline u64 paravirt_read_tsc(void) |
881 | { | 232 | { |
882 | return PVOP_CALL0(u64, pv_cpu_ops.read_tsc); | 233 | return PVOP_CALL0(u64, pv_cpu_ops.read_tsc); |
@@ -1393,20 +744,6 @@ static inline void pmd_clear(pmd_t *pmdp) | |||
1393 | } | 744 | } |
1394 | #endif /* CONFIG_X86_PAE */ | 745 | #endif /* CONFIG_X86_PAE */ |
1395 | 746 | ||
1396 | /* Lazy mode for batching updates / context switch */ | ||
1397 | enum paravirt_lazy_mode { | ||
1398 | PARAVIRT_LAZY_NONE, | ||
1399 | PARAVIRT_LAZY_MMU, | ||
1400 | PARAVIRT_LAZY_CPU, | ||
1401 | }; | ||
1402 | |||
1403 | enum paravirt_lazy_mode paravirt_get_lazy_mode(void); | ||
1404 | void paravirt_start_context_switch(struct task_struct *prev); | ||
1405 | void paravirt_end_context_switch(struct task_struct *next); | ||
1406 | |||
1407 | void paravirt_enter_lazy_mmu(void); | ||
1408 | void paravirt_leave_lazy_mmu(void); | ||
1409 | |||
1410 | #define __HAVE_ARCH_START_CONTEXT_SWITCH | 747 | #define __HAVE_ARCH_START_CONTEXT_SWITCH |
1411 | static inline void arch_start_context_switch(struct task_struct *prev) | 748 | static inline void arch_start_context_switch(struct task_struct *prev) |
1412 | { | 749 | { |
@@ -1437,12 +774,6 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, | |||
1437 | pv_mmu_ops.set_fixmap(idx, phys, flags); | 774 | pv_mmu_ops.set_fixmap(idx, phys, flags); |
1438 | } | 775 | } |
1439 | 776 | ||
1440 | void _paravirt_nop(void); | ||
1441 | u32 _paravirt_ident_32(u32); | ||
1442 | u64 _paravirt_ident_64(u64); | ||
1443 | |||
1444 | #define paravirt_nop ((void *)_paravirt_nop) | ||
1445 | |||
1446 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) | 777 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) |
1447 | 778 | ||
1448 | static inline int __raw_spin_is_locked(struct raw_spinlock *lock) | 779 | static inline int __raw_spin_is_locked(struct raw_spinlock *lock) |
@@ -1479,17 +810,6 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) | |||
1479 | 810 | ||
1480 | #endif | 811 | #endif |
1481 | 812 | ||
1482 | /* These all sit in the .parainstructions section to tell us what to patch. */ | ||
1483 | struct paravirt_patch_site { | ||
1484 | u8 *instr; /* original instructions */ | ||
1485 | u8 instrtype; /* type of this instruction */ | ||
1486 | u8 len; /* length of original instruction */ | ||
1487 | u16 clobbers; /* what registers you may clobber */ | ||
1488 | }; | ||
1489 | |||
1490 | extern struct paravirt_patch_site __parainstructions[], | ||
1491 | __parainstructions_end[]; | ||
1492 | |||
1493 | #ifdef CONFIG_X86_32 | 813 | #ifdef CONFIG_X86_32 |
1494 | #define PV_SAVE_REGS "pushl %ecx; pushl %edx;" | 814 | #define PV_SAVE_REGS "pushl %ecx; pushl %edx;" |
1495 | #define PV_RESTORE_REGS "popl %edx; popl %ecx;" | 815 | #define PV_RESTORE_REGS "popl %edx; popl %ecx;" |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h new file mode 100644 index 000000000000..25402d0006e7 --- /dev/null +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -0,0 +1,721 @@ | |||
1 | #ifndef _ASM_X86_PARAVIRT_TYPES_H | ||
2 | #define _ASM_X86_PARAVIRT_TYPES_H | ||
3 | |||
4 | /* Bitmask of what can be clobbered: usually at least eax. */ | ||
5 | #define CLBR_NONE 0 | ||
6 | #define CLBR_EAX (1 << 0) | ||
7 | #define CLBR_ECX (1 << 1) | ||
8 | #define CLBR_EDX (1 << 2) | ||
9 | #define CLBR_EDI (1 << 3) | ||
10 | |||
11 | #ifdef CONFIG_X86_32 | ||
12 | /* CLBR_ANY should match all regs platform has. For i386, that's just it */ | ||
13 | #define CLBR_ANY ((1 << 4) - 1) | ||
14 | |||
15 | #define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX) | ||
16 | #define CLBR_RET_REG (CLBR_EAX | CLBR_EDX) | ||
17 | #define CLBR_SCRATCH (0) | ||
18 | #else | ||
19 | #define CLBR_RAX CLBR_EAX | ||
20 | #define CLBR_RCX CLBR_ECX | ||
21 | #define CLBR_RDX CLBR_EDX | ||
22 | #define CLBR_RDI CLBR_EDI | ||
23 | #define CLBR_RSI (1 << 4) | ||
24 | #define CLBR_R8 (1 << 5) | ||
25 | #define CLBR_R9 (1 << 6) | ||
26 | #define CLBR_R10 (1 << 7) | ||
27 | #define CLBR_R11 (1 << 8) | ||
28 | |||
29 | #define CLBR_ANY ((1 << 9) - 1) | ||
30 | |||
31 | #define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \ | ||
32 | CLBR_RCX | CLBR_R8 | CLBR_R9) | ||
33 | #define CLBR_RET_REG (CLBR_RAX) | ||
34 | #define CLBR_SCRATCH (CLBR_R10 | CLBR_R11) | ||
35 | |||
36 | #endif /* X86_64 */ | ||
37 | |||
38 | #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG) | ||
39 | |||
40 | #ifndef __ASSEMBLY__ | ||
41 | |||
42 | #include <asm/desc_defs.h> | ||
43 | #include <asm/kmap_types.h> | ||
44 | |||
45 | struct page; | ||
46 | struct thread_struct; | ||
47 | struct desc_ptr; | ||
48 | struct tss_struct; | ||
49 | struct mm_struct; | ||
50 | struct desc_struct; | ||
51 | struct task_struct; | ||
52 | struct cpumask; | ||
53 | |||
54 | /* | ||
55 | * Wrapper type for pointers to code which uses the non-standard | ||
56 | * calling convention. See PV_CALL_SAVE_REGS_THUNK below. | ||
57 | */ | ||
58 | struct paravirt_callee_save { | ||
59 | void *func; | ||
60 | }; | ||
61 | |||
62 | /* general info */ | ||
63 | struct pv_info { | ||
64 | unsigned int kernel_rpl; | ||
65 | int shared_kernel_pmd; | ||
66 | int paravirt_enabled; | ||
67 | const char *name; | ||
68 | }; | ||
69 | |||
70 | struct pv_init_ops { | ||
71 | /* | ||
72 | * Patch may replace one of the defined code sequences with | ||
73 | * arbitrary code, subject to the same register constraints. | ||
74 | * This generally means the code is not free to clobber any | ||
75 | * registers other than EAX. The patch function should return | ||
76 | * the number of bytes of code generated, as we nop pad the | ||
77 | * rest in generic code. | ||
78 | */ | ||
79 | unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, | ||
80 | unsigned long addr, unsigned len); | ||
81 | |||
82 | /* Basic arch-specific setup */ | ||
83 | void (*arch_setup)(void); | ||
84 | char *(*memory_setup)(void); | ||
85 | void (*post_allocator_init)(void); | ||
86 | |||
87 | /* Print a banner to identify the environment */ | ||
88 | void (*banner)(void); | ||
89 | }; | ||
90 | |||
91 | |||
92 | struct pv_lazy_ops { | ||
93 | /* Set deferred update mode, used for batching operations. */ | ||
94 | void (*enter)(void); | ||
95 | void (*leave)(void); | ||
96 | }; | ||
97 | |||
98 | struct pv_time_ops { | ||
99 | void (*time_init)(void); | ||
100 | |||
101 | /* Set and set time of day */ | ||
102 | unsigned long (*get_wallclock)(void); | ||
103 | int (*set_wallclock)(unsigned long); | ||
104 | |||
105 | unsigned long long (*sched_clock)(void); | ||
106 | unsigned long (*get_tsc_khz)(void); | ||
107 | }; | ||
108 | |||
109 | struct pv_cpu_ops { | ||
110 | /* hooks for various privileged instructions */ | ||
111 | unsigned long (*get_debugreg)(int regno); | ||
112 | void (*set_debugreg)(int regno, unsigned long value); | ||
113 | |||
114 | void (*clts)(void); | ||
115 | |||
116 | unsigned long (*read_cr0)(void); | ||
117 | void (*write_cr0)(unsigned long); | ||
118 | |||
119 | unsigned long (*read_cr4_safe)(void); | ||
120 | unsigned long (*read_cr4)(void); | ||
121 | void (*write_cr4)(unsigned long); | ||
122 | |||
123 | #ifdef CONFIG_X86_64 | ||
124 | unsigned long (*read_cr8)(void); | ||
125 | void (*write_cr8)(unsigned long); | ||
126 | #endif | ||
127 | |||
128 | /* Segment descriptor handling */ | ||
129 | void (*load_tr_desc)(void); | ||
130 | void (*load_gdt)(const struct desc_ptr *); | ||
131 | void (*load_idt)(const struct desc_ptr *); | ||
132 | void (*store_gdt)(struct desc_ptr *); | ||
133 | void (*store_idt)(struct desc_ptr *); | ||
134 | void (*set_ldt)(const void *desc, unsigned entries); | ||
135 | unsigned long (*store_tr)(void); | ||
136 | void (*load_tls)(struct thread_struct *t, unsigned int cpu); | ||
137 | #ifdef CONFIG_X86_64 | ||
138 | void (*load_gs_index)(unsigned int idx); | ||
139 | #endif | ||
140 | void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, | ||
141 | const void *desc); | ||
142 | void (*write_gdt_entry)(struct desc_struct *, | ||
143 | int entrynum, const void *desc, int size); | ||
144 | void (*write_idt_entry)(gate_desc *, | ||
145 | int entrynum, const gate_desc *gate); | ||
146 | void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); | ||
147 | void (*free_ldt)(struct desc_struct *ldt, unsigned entries); | ||
148 | |||
149 | void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); | ||
150 | |||
151 | void (*set_iopl_mask)(unsigned mask); | ||
152 | |||
153 | void (*wbinvd)(void); | ||
154 | void (*io_delay)(void); | ||
155 | |||
156 | /* cpuid emulation, mostly so that caps bits can be disabled */ | ||
157 | void (*cpuid)(unsigned int *eax, unsigned int *ebx, | ||
158 | unsigned int *ecx, unsigned int *edx); | ||
159 | |||
160 | /* MSR, PMC and TSR operations. | ||
161 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ | ||
162 | u64 (*read_msr)(unsigned int msr, int *err); | ||
163 | int (*rdmsr_regs)(u32 *regs); | ||
164 | int (*write_msr)(unsigned int msr, unsigned low, unsigned high); | ||
165 | int (*wrmsr_regs)(u32 *regs); | ||
166 | |||
167 | u64 (*read_tsc)(void); | ||
168 | u64 (*read_pmc)(int counter); | ||
169 | unsigned long long (*read_tscp)(unsigned int *aux); | ||
170 | |||
171 | /* | ||
172 | * Atomically enable interrupts and return to userspace. This | ||
173 | * is only ever used to return to 32-bit processes; in a | ||
174 | * 64-bit kernel, it's used for 32-on-64 compat processes, but | ||
175 | * never native 64-bit processes. (Jump, not call.) | ||
176 | */ | ||
177 | void (*irq_enable_sysexit)(void); | ||
178 | |||
179 | /* | ||
180 | * Switch to usermode gs and return to 64-bit usermode using | ||
181 | * sysret. Only used in 64-bit kernels to return to 64-bit | ||
182 | * processes. Usermode register state, including %rsp, must | ||
183 | * already be restored. | ||
184 | */ | ||
185 | void (*usergs_sysret64)(void); | ||
186 | |||
187 | /* | ||
188 | * Switch to usermode gs and return to 32-bit usermode using | ||
189 | * sysret. Used to return to 32-on-64 compat processes. | ||
190 | * Other usermode register state, including %esp, must already | ||
191 | * be restored. | ||
192 | */ | ||
193 | void (*usergs_sysret32)(void); | ||
194 | |||
195 | /* Normal iret. Jump to this with the standard iret stack | ||
196 | frame set up. */ | ||
197 | void (*iret)(void); | ||
198 | |||
199 | void (*swapgs)(void); | ||
200 | |||
201 | void (*start_context_switch)(struct task_struct *prev); | ||
202 | void (*end_context_switch)(struct task_struct *next); | ||
203 | }; | ||
204 | |||
205 | struct pv_irq_ops { | ||
206 | void (*init_IRQ)(void); | ||
207 | |||
208 | /* | ||
209 | * Get/set interrupt state. save_fl and restore_fl are only | ||
210 | * expected to use X86_EFLAGS_IF; all other bits | ||
211 | * returned from save_fl are undefined, and may be ignored by | ||
212 | * restore_fl. | ||
213 | * | ||
214 | * NOTE: These functions callers expect the callee to preserve | ||
215 | * more registers than the standard C calling convention. | ||
216 | */ | ||
217 | struct paravirt_callee_save save_fl; | ||
218 | struct paravirt_callee_save restore_fl; | ||
219 | struct paravirt_callee_save irq_disable; | ||
220 | struct paravirt_callee_save irq_enable; | ||
221 | |||
222 | void (*safe_halt)(void); | ||
223 | void (*halt)(void); | ||
224 | |||
225 | #ifdef CONFIG_X86_64 | ||
226 | void (*adjust_exception_frame)(void); | ||
227 | #endif | ||
228 | }; | ||
229 | |||
230 | struct pv_apic_ops { | ||
231 | #ifdef CONFIG_X86_LOCAL_APIC | ||
232 | void (*setup_boot_clock)(void); | ||
233 | void (*setup_secondary_clock)(void); | ||
234 | |||
235 | void (*startup_ipi_hook)(int phys_apicid, | ||
236 | unsigned long start_eip, | ||
237 | unsigned long start_esp); | ||
238 | #endif | ||
239 | }; | ||
240 | |||
241 | struct pv_mmu_ops { | ||
242 | /* | ||
243 | * Called before/after init_mm pagetable setup. setup_start | ||
244 | * may reset %cr3, and may pre-install parts of the pagetable; | ||
245 | * pagetable setup is expected to preserve any existing | ||
246 | * mapping. | ||
247 | */ | ||
248 | void (*pagetable_setup_start)(pgd_t *pgd_base); | ||
249 | void (*pagetable_setup_done)(pgd_t *pgd_base); | ||
250 | |||
251 | unsigned long (*read_cr2)(void); | ||
252 | void (*write_cr2)(unsigned long); | ||
253 | |||
254 | unsigned long (*read_cr3)(void); | ||
255 | void (*write_cr3)(unsigned long); | ||
256 | |||
257 | /* | ||
258 | * Hooks for intercepting the creation/use/destruction of an | ||
259 | * mm_struct. | ||
260 | */ | ||
261 | void (*activate_mm)(struct mm_struct *prev, | ||
262 | struct mm_struct *next); | ||
263 | void (*dup_mmap)(struct mm_struct *oldmm, | ||
264 | struct mm_struct *mm); | ||
265 | void (*exit_mmap)(struct mm_struct *mm); | ||
266 | |||
267 | |||
268 | /* TLB operations */ | ||
269 | void (*flush_tlb_user)(void); | ||
270 | void (*flush_tlb_kernel)(void); | ||
271 | void (*flush_tlb_single)(unsigned long addr); | ||
272 | void (*flush_tlb_others)(const struct cpumask *cpus, | ||
273 | struct mm_struct *mm, | ||
274 | unsigned long va); | ||
275 | |||
276 | /* Hooks for allocating and freeing a pagetable top-level */ | ||
277 | int (*pgd_alloc)(struct mm_struct *mm); | ||
278 | void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd); | ||
279 | |||
280 | /* | ||
281 | * Hooks for allocating/releasing pagetable pages when they're | ||
282 | * attached to a pagetable | ||
283 | */ | ||
284 | void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); | ||
285 | void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); | ||
286 | void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count); | ||
287 | void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); | ||
288 | void (*release_pte)(unsigned long pfn); | ||
289 | void (*release_pmd)(unsigned long pfn); | ||
290 | void (*release_pud)(unsigned long pfn); | ||
291 | |||
292 | /* Pagetable manipulation functions */ | ||
293 | void (*set_pte)(pte_t *ptep, pte_t pteval); | ||
294 | void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, | ||
295 | pte_t *ptep, pte_t pteval); | ||
296 | void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); | ||
297 | void (*pte_update)(struct mm_struct *mm, unsigned long addr, | ||
298 | pte_t *ptep); | ||
299 | void (*pte_update_defer)(struct mm_struct *mm, | ||
300 | unsigned long addr, pte_t *ptep); | ||
301 | |||
302 | pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, | ||
303 | pte_t *ptep); | ||
304 | void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr, | ||
305 | pte_t *ptep, pte_t pte); | ||
306 | |||
307 | struct paravirt_callee_save pte_val; | ||
308 | struct paravirt_callee_save make_pte; | ||
309 | |||
310 | struct paravirt_callee_save pgd_val; | ||
311 | struct paravirt_callee_save make_pgd; | ||
312 | |||
313 | #if PAGETABLE_LEVELS >= 3 | ||
314 | #ifdef CONFIG_X86_PAE | ||
315 | void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); | ||
316 | void (*pte_clear)(struct mm_struct *mm, unsigned long addr, | ||
317 | pte_t *ptep); | ||
318 | void (*pmd_clear)(pmd_t *pmdp); | ||
319 | |||
320 | #endif /* CONFIG_X86_PAE */ | ||
321 | |||
322 | void (*set_pud)(pud_t *pudp, pud_t pudval); | ||
323 | |||
324 | struct paravirt_callee_save pmd_val; | ||
325 | struct paravirt_callee_save make_pmd; | ||
326 | |||
327 | #if PAGETABLE_LEVELS == 4 | ||
328 | struct paravirt_callee_save pud_val; | ||
329 | struct paravirt_callee_save make_pud; | ||
330 | |||
331 | void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); | ||
332 | #endif /* PAGETABLE_LEVELS == 4 */ | ||
333 | #endif /* PAGETABLE_LEVELS >= 3 */ | ||
334 | |||
335 | #ifdef CONFIG_HIGHPTE | ||
336 | void *(*kmap_atomic_pte)(struct page *page, enum km_type type); | ||
337 | #endif | ||
338 | |||
339 | struct pv_lazy_ops lazy_mode; | ||
340 | |||
341 | /* dom0 ops */ | ||
342 | |||
343 | /* Sometimes the physical address is a pfn, and sometimes its | ||
344 | an mfn. We can tell which is which from the index. */ | ||
345 | void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, | ||
346 | phys_addr_t phys, pgprot_t flags); | ||
347 | }; | ||
348 | |||
349 | struct raw_spinlock; | ||
350 | struct pv_lock_ops { | ||
351 | int (*spin_is_locked)(struct raw_spinlock *lock); | ||
352 | int (*spin_is_contended)(struct raw_spinlock *lock); | ||
353 | void (*spin_lock)(struct raw_spinlock *lock); | ||
354 | void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags); | ||
355 | int (*spin_trylock)(struct raw_spinlock *lock); | ||
356 | void (*spin_unlock)(struct raw_spinlock *lock); | ||
357 | }; | ||
358 | |||
359 | /* This contains all the paravirt structures: we get a convenient | ||
360 | * number for each function using the offset which we use to indicate | ||
361 | * what to patch. */ | ||
362 | struct paravirt_patch_template { | ||
363 | struct pv_init_ops pv_init_ops; | ||
364 | struct pv_time_ops pv_time_ops; | ||
365 | struct pv_cpu_ops pv_cpu_ops; | ||
366 | struct pv_irq_ops pv_irq_ops; | ||
367 | struct pv_apic_ops pv_apic_ops; | ||
368 | struct pv_mmu_ops pv_mmu_ops; | ||
369 | struct pv_lock_ops pv_lock_ops; | ||
370 | }; | ||
371 | |||
372 | extern struct pv_info pv_info; | ||
373 | extern struct pv_init_ops pv_init_ops; | ||
374 | extern struct pv_time_ops pv_time_ops; | ||
375 | extern struct pv_cpu_ops pv_cpu_ops; | ||
376 | extern struct pv_irq_ops pv_irq_ops; | ||
377 | extern struct pv_apic_ops pv_apic_ops; | ||
378 | extern struct pv_mmu_ops pv_mmu_ops; | ||
379 | extern struct pv_lock_ops pv_lock_ops; | ||
380 | |||
381 | #define PARAVIRT_PATCH(x) \ | ||
382 | (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) | ||
383 | |||
384 | #define paravirt_type(op) \ | ||
385 | [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \ | ||
386 | [paravirt_opptr] "i" (&(op)) | ||
387 | #define paravirt_clobber(clobber) \ | ||
388 | [paravirt_clobber] "i" (clobber) | ||
389 | |||
390 | /* | ||
391 | * Generate some code, and mark it as patchable by the | ||
392 | * apply_paravirt() alternate instruction patcher. | ||
393 | */ | ||
394 | #define _paravirt_alt(insn_string, type, clobber) \ | ||
395 | "771:\n\t" insn_string "\n" "772:\n" \ | ||
396 | ".pushsection .parainstructions,\"a\"\n" \ | ||
397 | _ASM_ALIGN "\n" \ | ||
398 | _ASM_PTR " 771b\n" \ | ||
399 | " .byte " type "\n" \ | ||
400 | " .byte 772b-771b\n" \ | ||
401 | " .short " clobber "\n" \ | ||
402 | ".popsection\n" | ||
403 | |||
404 | /* Generate patchable code, with the default asm parameters. */ | ||
405 | #define paravirt_alt(insn_string) \ | ||
406 | _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") | ||
407 | |||
408 | /* Simple instruction patching code. */ | ||
409 | #define DEF_NATIVE(ops, name, code) \ | ||
410 | extern const char start_##ops##_##name[], end_##ops##_##name[]; \ | ||
411 | asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") | ||
412 | |||
413 | unsigned paravirt_patch_nop(void); | ||
414 | unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len); | ||
415 | unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len); | ||
416 | unsigned paravirt_patch_ignore(unsigned len); | ||
417 | unsigned paravirt_patch_call(void *insnbuf, | ||
418 | const void *target, u16 tgt_clobbers, | ||
419 | unsigned long addr, u16 site_clobbers, | ||
420 | unsigned len); | ||
421 | unsigned paravirt_patch_jmp(void *insnbuf, const void *target, | ||
422 | unsigned long addr, unsigned len); | ||
423 | unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, | ||
424 | unsigned long addr, unsigned len); | ||
425 | |||
426 | unsigned paravirt_patch_insns(void *insnbuf, unsigned len, | ||
427 | const char *start, const char *end); | ||
428 | |||
429 | unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | ||
430 | unsigned long addr, unsigned len); | ||
431 | |||
432 | int paravirt_disable_iospace(void); | ||
433 | |||
434 | /* | ||
435 | * This generates an indirect call based on the operation type number. | ||
436 | * The type number, computed in PARAVIRT_PATCH, is derived from the | ||
437 | * offset into the paravirt_patch_template structure, and can therefore be | ||
438 | * freely converted back into a structure offset. | ||
439 | */ | ||
440 | #define PARAVIRT_CALL "call *%c[paravirt_opptr];" | ||
441 | |||
442 | /* | ||
443 | * These macros are intended to wrap calls through one of the paravirt | ||
444 | * ops structs, so that they can be later identified and patched at | ||
445 | * runtime. | ||
446 | * | ||
447 | * Normally, a call to a pv_op function is a simple indirect call: | ||
448 | * (pv_op_struct.operations)(args...). | ||
449 | * | ||
450 | * Unfortunately, this is a relatively slow operation for modern CPUs, | ||
451 | * because it cannot necessarily determine what the destination | ||
452 | * address is. In this case, the address is a runtime constant, so at | ||
453 | * the very least we can patch the call to e a simple direct call, or | ||
454 | * ideally, patch an inline implementation into the callsite. (Direct | ||
455 | * calls are essentially free, because the call and return addresses | ||
456 | * are completely predictable.) | ||
457 | * | ||
458 | * For i386, these macros rely on the standard gcc "regparm(3)" calling | ||
459 | * convention, in which the first three arguments are placed in %eax, | ||
460 | * %edx, %ecx (in that order), and the remaining arguments are placed | ||
461 | * on the stack. All caller-save registers (eax,edx,ecx) are expected | ||
462 | * to be modified (either clobbered or used for return values). | ||
463 | * X86_64, on the other hand, already specifies a register-based calling | ||
464 | * conventions, returning at %rax, with parameteres going on %rdi, %rsi, | ||
465 | * %rdx, and %rcx. Note that for this reason, x86_64 does not need any | ||
466 | * special handling for dealing with 4 arguments, unlike i386. | ||
467 | * However, x86_64 also have to clobber all caller saved registers, which | ||
468 | * unfortunately, are quite a bit (r8 - r11) | ||
469 | * | ||
470 | * The call instruction itself is marked by placing its start address | ||
471 | * and size into the .parainstructions section, so that | ||
472 | * apply_paravirt() in arch/i386/kernel/alternative.c can do the | ||
473 | * appropriate patching under the control of the backend pv_init_ops | ||
474 | * implementation. | ||
475 | * | ||
476 | * Unfortunately there's no way to get gcc to generate the args setup | ||
477 | * for the call, and then allow the call itself to be generated by an | ||
478 | * inline asm. Because of this, we must do the complete arg setup and | ||
479 | * return value handling from within these macros. This is fairly | ||
480 | * cumbersome. | ||
481 | * | ||
482 | * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments. | ||
483 | * It could be extended to more arguments, but there would be little | ||
484 | * to be gained from that. For each number of arguments, there are | ||
485 | * the two VCALL and CALL variants for void and non-void functions. | ||
486 | * | ||
487 | * When there is a return value, the invoker of the macro must specify | ||
488 | * the return type. The macro then uses sizeof() on that type to | ||
489 | * determine whether its a 32 or 64 bit value, and places the return | ||
490 | * in the right register(s) (just %eax for 32-bit, and %edx:%eax for | ||
491 | * 64-bit). For x86_64 machines, it just returns at %rax regardless of | ||
492 | * the return value size. | ||
493 | * | ||
494 | * 64-bit arguments are passed as a pair of adjacent 32-bit arguments | ||
495 | * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments | ||
496 | * in low,high order | ||
497 | * | ||
498 | * Small structures are passed and returned in registers. The macro | ||
499 | * calling convention can't directly deal with this, so the wrapper | ||
500 | * functions must do this. | ||
501 | * | ||
502 | * These PVOP_* macros are only defined within this header. This | ||
503 | * means that all uses must be wrapped in inline functions. This also | ||
504 | * makes sure the incoming and outgoing types are always correct. | ||
505 | */ | ||
506 | #ifdef CONFIG_X86_32 | ||
507 | #define PVOP_VCALL_ARGS \ | ||
508 | unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx | ||
509 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS | ||
510 | |||
511 | #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) | ||
512 | #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x)) | ||
513 | #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x)) | ||
514 | |||
515 | #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ | ||
516 | "=c" (__ecx) | ||
517 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS | ||
518 | |||
519 | #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx) | ||
520 | #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS | ||
521 | |||
522 | #define EXTRA_CLOBBERS | ||
523 | #define VEXTRA_CLOBBERS | ||
524 | #else /* CONFIG_X86_64 */ | ||
525 | #define PVOP_VCALL_ARGS \ | ||
526 | unsigned long __edi = __edi, __esi = __esi, \ | ||
527 | __edx = __edx, __ecx = __ecx | ||
528 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax | ||
529 | |||
530 | #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) | ||
531 | #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) | ||
532 | #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x)) | ||
533 | #define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x)) | ||
534 | |||
535 | #define PVOP_VCALL_CLOBBERS "=D" (__edi), \ | ||
536 | "=S" (__esi), "=d" (__edx), \ | ||
537 | "=c" (__ecx) | ||
538 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) | ||
539 | |||
540 | #define PVOP_VCALLEE_CLOBBERS "=a" (__eax) | ||
541 | #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS | ||
542 | |||
543 | #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" | ||
544 | #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" | ||
545 | #endif /* CONFIG_X86_32 */ | ||
546 | |||
547 | #ifdef CONFIG_PARAVIRT_DEBUG | ||
548 | #define PVOP_TEST_NULL(op) BUG_ON(op == NULL) | ||
549 | #else | ||
550 | #define PVOP_TEST_NULL(op) ((void)op) | ||
551 | #endif | ||
552 | |||
553 | #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \ | ||
554 | pre, post, ...) \ | ||
555 | ({ \ | ||
556 | rettype __ret; \ | ||
557 | PVOP_CALL_ARGS; \ | ||
558 | PVOP_TEST_NULL(op); \ | ||
559 | /* This is 32-bit specific, but is okay in 64-bit */ \ | ||
560 | /* since this condition will never hold */ \ | ||
561 | if (sizeof(rettype) > sizeof(unsigned long)) { \ | ||
562 | asm volatile(pre \ | ||
563 | paravirt_alt(PARAVIRT_CALL) \ | ||
564 | post \ | ||
565 | : call_clbr \ | ||
566 | : paravirt_type(op), \ | ||
567 | paravirt_clobber(clbr), \ | ||
568 | ##__VA_ARGS__ \ | ||
569 | : "memory", "cc" extra_clbr); \ | ||
570 | __ret = (rettype)((((u64)__edx) << 32) | __eax); \ | ||
571 | } else { \ | ||
572 | asm volatile(pre \ | ||
573 | paravirt_alt(PARAVIRT_CALL) \ | ||
574 | post \ | ||
575 | : call_clbr \ | ||
576 | : paravirt_type(op), \ | ||
577 | paravirt_clobber(clbr), \ | ||
578 | ##__VA_ARGS__ \ | ||
579 | : "memory", "cc" extra_clbr); \ | ||
580 | __ret = (rettype)__eax; \ | ||
581 | } \ | ||
582 | __ret; \ | ||
583 | }) | ||
584 | |||
585 | #define __PVOP_CALL(rettype, op, pre, post, ...) \ | ||
586 | ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \ | ||
587 | EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__) | ||
588 | |||
589 | #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \ | ||
590 | ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ | ||
591 | PVOP_CALLEE_CLOBBERS, , \ | ||
592 | pre, post, ##__VA_ARGS__) | ||
593 | |||
594 | |||
595 | #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \ | ||
596 | ({ \ | ||
597 | PVOP_VCALL_ARGS; \ | ||
598 | PVOP_TEST_NULL(op); \ | ||
599 | asm volatile(pre \ | ||
600 | paravirt_alt(PARAVIRT_CALL) \ | ||
601 | post \ | ||
602 | : call_clbr \ | ||
603 | : paravirt_type(op), \ | ||
604 | paravirt_clobber(clbr), \ | ||
605 | ##__VA_ARGS__ \ | ||
606 | : "memory", "cc" extra_clbr); \ | ||
607 | }) | ||
608 | |||
609 | #define __PVOP_VCALL(op, pre, post, ...) \ | ||
610 | ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \ | ||
611 | VEXTRA_CLOBBERS, \ | ||
612 | pre, post, ##__VA_ARGS__) | ||
613 | |||
614 | #define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \ | ||
615 | ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ | ||
616 | PVOP_VCALLEE_CLOBBERS, , \ | ||
617 | pre, post, ##__VA_ARGS__) | ||
618 | |||
619 | |||
620 | |||
621 | #define PVOP_CALL0(rettype, op) \ | ||
622 | __PVOP_CALL(rettype, op, "", "") | ||
623 | #define PVOP_VCALL0(op) \ | ||
624 | __PVOP_VCALL(op, "", "") | ||
625 | |||
626 | #define PVOP_CALLEE0(rettype, op) \ | ||
627 | __PVOP_CALLEESAVE(rettype, op, "", "") | ||
628 | #define PVOP_VCALLEE0(op) \ | ||
629 | __PVOP_VCALLEESAVE(op, "", "") | ||
630 | |||
631 | |||
632 | #define PVOP_CALL1(rettype, op, arg1) \ | ||
633 | __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) | ||
634 | #define PVOP_VCALL1(op, arg1) \ | ||
635 | __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1)) | ||
636 | |||
637 | #define PVOP_CALLEE1(rettype, op, arg1) \ | ||
638 | __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) | ||
639 | #define PVOP_VCALLEE1(op, arg1) \ | ||
640 | __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1)) | ||
641 | |||
642 | |||
643 | #define PVOP_CALL2(rettype, op, arg1, arg2) \ | ||
644 | __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ | ||
645 | PVOP_CALL_ARG2(arg2)) | ||
646 | #define PVOP_VCALL2(op, arg1, arg2) \ | ||
647 | __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ | ||
648 | PVOP_CALL_ARG2(arg2)) | ||
649 | |||
650 | #define PVOP_CALLEE2(rettype, op, arg1, arg2) \ | ||
651 | __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ | ||
652 | PVOP_CALL_ARG2(arg2)) | ||
653 | #define PVOP_VCALLEE2(op, arg1, arg2) \ | ||
654 | __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \ | ||
655 | PVOP_CALL_ARG2(arg2)) | ||
656 | |||
657 | |||
658 | #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ | ||
659 | __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ | ||
660 | PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) | ||
661 | #define PVOP_VCALL3(op, arg1, arg2, arg3) \ | ||
662 | __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ | ||
663 | PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) | ||
664 | |||
665 | /* This is the only difference in x86_64. We can make it much simpler */ | ||
666 | #ifdef CONFIG_X86_32 | ||
667 | #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ | ||
668 | __PVOP_CALL(rettype, op, \ | ||
669 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ | ||
670 | PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ | ||
671 | PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4))) | ||
672 | #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ | ||
673 | __PVOP_VCALL(op, \ | ||
674 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ | ||
675 | "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ | ||
676 | "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) | ||
677 | #else | ||
678 | #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ | ||
679 | __PVOP_CALL(rettype, op, "", "", \ | ||
680 | PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ | ||
681 | PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) | ||
682 | #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ | ||
683 | __PVOP_VCALL(op, "", "", \ | ||
684 | PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ | ||
685 | PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) | ||
686 | #endif | ||
687 | |||
688 | /* Lazy mode for batching updates / context switch */ | ||
689 | enum paravirt_lazy_mode { | ||
690 | PARAVIRT_LAZY_NONE, | ||
691 | PARAVIRT_LAZY_MMU, | ||
692 | PARAVIRT_LAZY_CPU, | ||
693 | }; | ||
694 | |||
695 | enum paravirt_lazy_mode paravirt_get_lazy_mode(void); | ||
696 | void paravirt_start_context_switch(struct task_struct *prev); | ||
697 | void paravirt_end_context_switch(struct task_struct *next); | ||
698 | |||
699 | void paravirt_enter_lazy_mmu(void); | ||
700 | void paravirt_leave_lazy_mmu(void); | ||
701 | |||
702 | void _paravirt_nop(void); | ||
703 | u32 _paravirt_ident_32(u32); | ||
704 | u64 _paravirt_ident_64(u64); | ||
705 | |||
706 | #define paravirt_nop ((void *)_paravirt_nop) | ||
707 | |||
708 | /* These all sit in the .parainstructions section to tell us what to patch. */ | ||
709 | struct paravirt_patch_site { | ||
710 | u8 *instr; /* original instructions */ | ||
711 | u8 instrtype; /* type of this instruction */ | ||
712 | u8 len; /* length of original instruction */ | ||
713 | u16 clobbers; /* what registers you may clobber */ | ||
714 | }; | ||
715 | |||
716 | extern struct paravirt_patch_site __parainstructions[], | ||
717 | __parainstructions_end[]; | ||
718 | |||
719 | #endif /* __ASSEMBLY__ */ | ||
720 | |||
721 | #endif /* _ASM_X86_PARAVIRT_TYPES_H */ | ||
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 103f1ddb0d85..04eacefcfd26 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -49,7 +49,7 @@ | |||
49 | #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x | 49 | #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x |
50 | #define __my_cpu_offset percpu_read(this_cpu_off) | 50 | #define __my_cpu_offset percpu_read(this_cpu_off) |
51 | #else | 51 | #else |
52 | #define __percpu_arg(x) "%" #x | 52 | #define __percpu_arg(x) "%P" #x |
53 | #endif | 53 | #endif |
54 | 54 | ||
55 | /* | 55 | /* |
@@ -104,36 +104,48 @@ do { \ | |||
104 | } \ | 104 | } \ |
105 | } while (0) | 105 | } while (0) |
106 | 106 | ||
107 | #define percpu_from_op(op, var) \ | 107 | #define percpu_from_op(op, var, constraint) \ |
108 | ({ \ | 108 | ({ \ |
109 | typeof(var) ret__; \ | 109 | typeof(var) ret__; \ |
110 | switch (sizeof(var)) { \ | 110 | switch (sizeof(var)) { \ |
111 | case 1: \ | 111 | case 1: \ |
112 | asm(op "b "__percpu_arg(1)",%0" \ | 112 | asm(op "b "__percpu_arg(1)",%0" \ |
113 | : "=q" (ret__) \ | 113 | : "=q" (ret__) \ |
114 | : "m" (var)); \ | 114 | : constraint); \ |
115 | break; \ | 115 | break; \ |
116 | case 2: \ | 116 | case 2: \ |
117 | asm(op "w "__percpu_arg(1)",%0" \ | 117 | asm(op "w "__percpu_arg(1)",%0" \ |
118 | : "=r" (ret__) \ | 118 | : "=r" (ret__) \ |
119 | : "m" (var)); \ | 119 | : constraint); \ |
120 | break; \ | 120 | break; \ |
121 | case 4: \ | 121 | case 4: \ |
122 | asm(op "l "__percpu_arg(1)",%0" \ | 122 | asm(op "l "__percpu_arg(1)",%0" \ |
123 | : "=r" (ret__) \ | 123 | : "=r" (ret__) \ |
124 | : "m" (var)); \ | 124 | : constraint); \ |
125 | break; \ | 125 | break; \ |
126 | case 8: \ | 126 | case 8: \ |
127 | asm(op "q "__percpu_arg(1)",%0" \ | 127 | asm(op "q "__percpu_arg(1)",%0" \ |
128 | : "=r" (ret__) \ | 128 | : "=r" (ret__) \ |
129 | : "m" (var)); \ | 129 | : constraint); \ |
130 | break; \ | 130 | break; \ |
131 | default: __bad_percpu_size(); \ | 131 | default: __bad_percpu_size(); \ |
132 | } \ | 132 | } \ |
133 | ret__; \ | 133 | ret__; \ |
134 | }) | 134 | }) |
135 | 135 | ||
136 | #define percpu_read(var) percpu_from_op("mov", per_cpu__##var) | 136 | /* |
137 | * percpu_read() makes gcc load the percpu variable every time it is | ||
138 | * accessed while percpu_read_stable() allows the value to be cached. | ||
139 | * percpu_read_stable() is more efficient and can be used if its value | ||
140 | * is guaranteed to be valid across cpus. The current users include | ||
141 | * get_current() and get_thread_info() both of which are actually | ||
142 | * per-thread variables implemented as per-cpu variables and thus | ||
143 | * stable for the duration of the respective task. | ||
144 | */ | ||
145 | #define percpu_read(var) percpu_from_op("mov", per_cpu__##var, \ | ||
146 | "m" (per_cpu__##var)) | ||
147 | #define percpu_read_stable(var) percpu_from_op("mov", per_cpu__##var, \ | ||
148 | "p" (&per_cpu__##var)) | ||
137 | #define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val) | 149 | #define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val) |
138 | #define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val) | 150 | #define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val) |
139 | #define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val) | 151 | #define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val) |
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h index fa64e401589d..e7b7c938ae27 100644 --- a/arch/x86/include/asm/perf_counter.h +++ b/arch/x86/include/asm/perf_counter.h | |||
@@ -84,6 +84,16 @@ union cpuid10_edx { | |||
84 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b | 84 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b |
85 | #define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) | 85 | #define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) |
86 | 86 | ||
87 | /* | ||
88 | * We model BTS tracing as another fixed-mode PMC. | ||
89 | * | ||
90 | * We choose a value in the middle of the fixed counter range, since lower | ||
91 | * values are used by actual fixed counters and higher values are used | ||
92 | * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. | ||
93 | */ | ||
94 | #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) | ||
95 | |||
96 | |||
87 | #ifdef CONFIG_PERF_COUNTERS | 97 | #ifdef CONFIG_PERF_COUNTERS |
88 | extern void init_hw_perf_counters(void); | 98 | extern void init_hw_perf_counters(void); |
89 | extern void perf_counters_lapic_init(void); | 99 | extern void perf_counters_lapic_init(void); |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 16748077559a..4c5b51fdc788 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -135,6 +135,11 @@ static inline unsigned long pte_pfn(pte_t pte) | |||
135 | return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; | 135 | return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; |
136 | } | 136 | } |
137 | 137 | ||
138 | static inline unsigned long pmd_pfn(pmd_t pmd) | ||
139 | { | ||
140 | return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; | ||
141 | } | ||
142 | |||
138 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) | 143 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
139 | 144 | ||
140 | static inline int pmd_large(pmd_t pte) | 145 | static inline int pmd_large(pmd_t pte) |
@@ -359,7 +364,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) | |||
359 | * this macro returns the index of the entry in the pmd page which would | 364 | * this macro returns the index of the entry in the pmd page which would |
360 | * control the given virtual address | 365 | * control the given virtual address |
361 | */ | 366 | */ |
362 | static inline unsigned pmd_index(unsigned long address) | 367 | static inline unsigned long pmd_index(unsigned long address) |
363 | { | 368 | { |
364 | return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); | 369 | return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); |
365 | } | 370 | } |
@@ -379,7 +384,7 @@ static inline unsigned pmd_index(unsigned long address) | |||
379 | * this function returns the index of the entry in the pte page which would | 384 | * this function returns the index of the entry in the pte page which would |
380 | * control the given virtual address | 385 | * control the given virtual address |
381 | */ | 386 | */ |
382 | static inline unsigned pte_index(unsigned long address) | 387 | static inline unsigned long pte_index(unsigned long address) |
383 | { | 388 | { |
384 | return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); | 389 | return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); |
385 | } | 390 | } |
@@ -430,11 +435,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) | |||
430 | return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); | 435 | return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); |
431 | } | 436 | } |
432 | 437 | ||
433 | static inline unsigned long pmd_pfn(pmd_t pmd) | ||
434 | { | ||
435 | return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; | ||
436 | } | ||
437 | |||
438 | static inline int pud_large(pud_t pud) | 438 | static inline int pud_large(pud_t pud) |
439 | { | 439 | { |
440 | return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == | 440 | return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == |
@@ -470,7 +470,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd) | |||
470 | #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) | 470 | #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) |
471 | 471 | ||
472 | /* to find an entry in a page-table-directory. */ | 472 | /* to find an entry in a page-table-directory. */ |
473 | static inline unsigned pud_index(unsigned long address) | 473 | static inline unsigned long pud_index(unsigned long address) |
474 | { | 474 | { |
475 | return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); | 475 | return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); |
476 | } | 476 | } |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index c7768269b1cf..e08ea043e085 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -403,7 +403,17 @@ extern unsigned long kernel_eflags; | |||
403 | extern asmlinkage void ignore_sysret(void); | 403 | extern asmlinkage void ignore_sysret(void); |
404 | #else /* X86_64 */ | 404 | #else /* X86_64 */ |
405 | #ifdef CONFIG_CC_STACKPROTECTOR | 405 | #ifdef CONFIG_CC_STACKPROTECTOR |
406 | DECLARE_PER_CPU(unsigned long, stack_canary); | 406 | /* |
407 | * Make sure stack canary segment base is cached-aligned: | ||
408 | * "For Intel Atom processors, avoid non zero segment base address | ||
409 | * that is not aligned to cache line boundary at all cost." | ||
410 | * (Optim Ref Manual Assembly/Compiler Coding Rule 15.) | ||
411 | */ | ||
412 | struct stack_canary { | ||
413 | char __pad[20]; /* canary at %gs:20 */ | ||
414 | unsigned long canary; | ||
415 | }; | ||
416 | DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); | ||
407 | #endif | 417 | #endif |
408 | #endif /* X86_64 */ | 418 | #endif /* X86_64 */ |
409 | 419 | ||
@@ -703,13 +713,23 @@ static inline void cpu_relax(void) | |||
703 | rep_nop(); | 713 | rep_nop(); |
704 | } | 714 | } |
705 | 715 | ||
706 | /* Stop speculative execution: */ | 716 | /* Stop speculative execution and prefetching of modified code. */ |
707 | static inline void sync_core(void) | 717 | static inline void sync_core(void) |
708 | { | 718 | { |
709 | int tmp; | 719 | int tmp; |
710 | 720 | ||
711 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) | 721 | #if defined(CONFIG_M386) || defined(CONFIG_M486) |
712 | : "ebx", "ecx", "edx", "memory"); | 722 | if (boot_cpu_data.x86 < 5) |
723 | /* There is no speculative execution. | ||
724 | * jmp is a barrier to prefetching. */ | ||
725 | asm volatile("jmp 1f\n1:\n" ::: "memory"); | ||
726 | else | ||
727 | #endif | ||
728 | /* cpuid is a barrier to speculative execution. | ||
729 | * Prefetched instructions are automatically | ||
730 | * invalidated when modified. */ | ||
731 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) | ||
732 | : "ebx", "ecx", "edx", "memory"); | ||
713 | } | 733 | } |
714 | 734 | ||
715 | static inline void __monitor(const void *eax, unsigned long ecx, | 735 | static inline void __monitor(const void *eax, unsigned long ecx, |
diff --git a/arch/x86/include/asm/scatterlist.h b/arch/x86/include/asm/scatterlist.h index 263d397d2eef..75af592677ec 100644 --- a/arch/x86/include/asm/scatterlist.h +++ b/arch/x86/include/asm/scatterlist.h | |||
@@ -1,33 +1,8 @@ | |||
1 | #ifndef _ASM_X86_SCATTERLIST_H | 1 | #ifndef _ASM_X86_SCATTERLIST_H |
2 | #define _ASM_X86_SCATTERLIST_H | 2 | #define _ASM_X86_SCATTERLIST_H |
3 | 3 | ||
4 | #include <asm/types.h> | ||
5 | |||
6 | struct scatterlist { | ||
7 | #ifdef CONFIG_DEBUG_SG | ||
8 | unsigned long sg_magic; | ||
9 | #endif | ||
10 | unsigned long page_link; | ||
11 | unsigned int offset; | ||
12 | unsigned int length; | ||
13 | dma_addr_t dma_address; | ||
14 | unsigned int dma_length; | ||
15 | }; | ||
16 | |||
17 | #define ARCH_HAS_SG_CHAIN | ||
18 | #define ISA_DMA_THRESHOLD (0x00ffffff) | 4 | #define ISA_DMA_THRESHOLD (0x00ffffff) |
19 | 5 | ||
20 | /* | 6 | #include <asm-generic/scatterlist.h> |
21 | * These macros should be used after a pci_map_sg call has been done | ||
22 | * to get bus addresses of each of the SG entries and their lengths. | ||
23 | * You should only work with the number of sg entries pci_map_sg | ||
24 | * returns. | ||
25 | */ | ||
26 | #define sg_dma_address(sg) ((sg)->dma_address) | ||
27 | #ifdef CONFIG_X86_32 | ||
28 | # define sg_dma_len(sg) ((sg)->length) | ||
29 | #else | ||
30 | # define sg_dma_len(sg) ((sg)->dma_length) | ||
31 | #endif | ||
32 | 7 | ||
33 | #endif /* _ASM_X86_SCATTERLIST_H */ | 8 | #endif /* _ASM_X86_SCATTERLIST_H */ |
diff --git a/arch/x86/include/asm/shmbuf.h b/arch/x86/include/asm/shmbuf.h index b51413b74971..83c05fc2de38 100644 --- a/arch/x86/include/asm/shmbuf.h +++ b/arch/x86/include/asm/shmbuf.h | |||
@@ -1,51 +1 @@ | |||
1 | #ifndef _ASM_X86_SHMBUF_H | #include <asm-generic/shmbuf.h> | |
2 | #define _ASM_X86_SHMBUF_H | ||
3 | |||
4 | /* | ||
5 | * The shmid64_ds structure for x86 architecture. | ||
6 | * Note extra padding because this structure is passed back and forth | ||
7 | * between kernel and user space. | ||
8 | * | ||
9 | * Pad space on 32 bit is left for: | ||
10 | * - 64-bit time_t to solve y2038 problem | ||
11 | * - 2 miscellaneous 32-bit values | ||
12 | * | ||
13 | * Pad space on 64 bit is left for: | ||
14 | * - 2 miscellaneous 64-bit values | ||
15 | */ | ||
16 | |||
17 | struct shmid64_ds { | ||
18 | struct ipc64_perm shm_perm; /* operation perms */ | ||
19 | size_t shm_segsz; /* size of segment (bytes) */ | ||
20 | __kernel_time_t shm_atime; /* last attach time */ | ||
21 | #ifdef __i386__ | ||
22 | unsigned long __unused1; | ||
23 | #endif | ||
24 | __kernel_time_t shm_dtime; /* last detach time */ | ||
25 | #ifdef __i386__ | ||
26 | unsigned long __unused2; | ||
27 | #endif | ||
28 | __kernel_time_t shm_ctime; /* last change time */ | ||
29 | #ifdef __i386__ | ||
30 | unsigned long __unused3; | ||
31 | #endif | ||
32 | __kernel_pid_t shm_cpid; /* pid of creator */ | ||
33 | __kernel_pid_t shm_lpid; /* pid of last operator */ | ||
34 | unsigned long shm_nattch; /* no. of current attaches */ | ||
35 | unsigned long __unused4; | ||
36 | unsigned long __unused5; | ||
37 | }; | ||
38 | |||
39 | struct shminfo64 { | ||
40 | unsigned long shmmax; | ||
41 | unsigned long shmmin; | ||
42 | unsigned long shmmni; | ||
43 | unsigned long shmseg; | ||
44 | unsigned long shmall; | ||
45 | unsigned long __unused1; | ||
46 | unsigned long __unused2; | ||
47 | unsigned long __unused3; | ||
48 | unsigned long __unused4; | ||
49 | }; | ||
50 | |||
51 | #endif /* _ASM_X86_SHMBUF_H */ | ||
diff --git a/arch/x86/include/asm/socket.h b/arch/x86/include/asm/socket.h index ca8bf2cd0ba9..6b71384b9d8b 100644 --- a/arch/x86/include/asm/socket.h +++ b/arch/x86/include/asm/socket.h | |||
@@ -1,60 +1 @@ | |||
1 | #ifndef _ASM_X86_SOCKET_H | #include <asm-generic/socket.h> | |
2 | #define _ASM_X86_SOCKET_H | ||
3 | |||
4 | #include <asm/sockios.h> | ||
5 | |||
6 | /* For setsockopt(2) */ | ||
7 | #define SOL_SOCKET 1 | ||
8 | |||
9 | #define SO_DEBUG 1 | ||
10 | #define SO_REUSEADDR 2 | ||
11 | #define SO_TYPE 3 | ||
12 | #define SO_ERROR 4 | ||
13 | #define SO_DONTROUTE 5 | ||
14 | #define SO_BROADCAST 6 | ||
15 | #define SO_SNDBUF 7 | ||
16 | #define SO_RCVBUF 8 | ||
17 | #define SO_SNDBUFFORCE 32 | ||
18 | #define SO_RCVBUFFORCE 33 | ||
19 | #define SO_KEEPALIVE 9 | ||
20 | #define SO_OOBINLINE 10 | ||
21 | #define SO_NO_CHECK 11 | ||
22 | #define SO_PRIORITY 12 | ||
23 | #define SO_LINGER 13 | ||
24 | #define SO_BSDCOMPAT 14 | ||
25 | /* To add :#define SO_REUSEPORT 15 */ | ||
26 | #define SO_PASSCRED 16 | ||
27 | #define SO_PEERCRED 17 | ||
28 | #define SO_RCVLOWAT 18 | ||
29 | #define SO_SNDLOWAT 19 | ||
30 | #define SO_RCVTIMEO 20 | ||
31 | #define SO_SNDTIMEO 21 | ||
32 | |||
33 | /* Security levels - as per NRL IPv6 - don't actually do anything */ | ||
34 | #define SO_SECURITY_AUTHENTICATION 22 | ||
35 | #define SO_SECURITY_ENCRYPTION_TRANSPORT 23 | ||
36 | #define SO_SECURITY_ENCRYPTION_NETWORK 24 | ||
37 | |||
38 | #define SO_BINDTODEVICE 25 | ||
39 | |||
40 | /* Socket filtering */ | ||
41 | #define SO_ATTACH_FILTER 26 | ||
42 | #define SO_DETACH_FILTER 27 | ||
43 | |||
44 | #define SO_PEERNAME 28 | ||
45 | #define SO_TIMESTAMP 29 | ||
46 | #define SCM_TIMESTAMP SO_TIMESTAMP | ||
47 | |||
48 | #define SO_ACCEPTCONN 30 | ||
49 | |||
50 | #define SO_PEERSEC 31 | ||
51 | #define SO_PASSSEC 34 | ||
52 | #define SO_TIMESTAMPNS 35 | ||
53 | #define SCM_TIMESTAMPNS SO_TIMESTAMPNS | ||
54 | |||
55 | #define SO_MARK 36 | ||
56 | |||
57 | #define SO_TIMESTAMPING 37 | ||
58 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | ||
59 | |||
60 | #endif /* _ASM_X86_SOCKET_H */ | ||
diff --git a/arch/x86/include/asm/sockios.h b/arch/x86/include/asm/sockios.h index 49cc72b5d3c9..def6d4746ee7 100644 --- a/arch/x86/include/asm/sockios.h +++ b/arch/x86/include/asm/sockios.h | |||
@@ -1,13 +1 @@ | |||
1 | #ifndef _ASM_X86_SOCKIOS_H | #include <asm-generic/sockios.h> | |
2 | #define _ASM_X86_SOCKIOS_H | ||
3 | |||
4 | /* Socket-level I/O control calls. */ | ||
5 | #define FIOSETOWN 0x8901 | ||
6 | #define SIOCSPGRP 0x8902 | ||
7 | #define FIOGETOWN 0x8903 | ||
8 | #define SIOCGPGRP 0x8904 | ||
9 | #define SIOCATMARK 0x8905 | ||
10 | #define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ | ||
11 | #define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ | ||
12 | |||
13 | #endif /* _ASM_X86_SOCKIOS_H */ | ||
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index c2d742c6e15f..157517763565 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h | |||
@@ -48,7 +48,7 @@ | |||
48 | * head_32 for boot CPU and setup_per_cpu_areas() for others. | 48 | * head_32 for boot CPU and setup_per_cpu_areas() for others. |
49 | */ | 49 | */ |
50 | #define GDT_STACK_CANARY_INIT \ | 50 | #define GDT_STACK_CANARY_INIT \ |
51 | [GDT_ENTRY_STACK_CANARY] = { { { 0x00000018, 0x00409000 } } }, | 51 | [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18), |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * Initialize the stackprotector canary value. | 54 | * Initialize the stackprotector canary value. |
@@ -78,21 +78,19 @@ static __always_inline void boot_init_stack_canary(void) | |||
78 | #ifdef CONFIG_X86_64 | 78 | #ifdef CONFIG_X86_64 |
79 | percpu_write(irq_stack_union.stack_canary, canary); | 79 | percpu_write(irq_stack_union.stack_canary, canary); |
80 | #else | 80 | #else |
81 | percpu_write(stack_canary, canary); | 81 | percpu_write(stack_canary.canary, canary); |
82 | #endif | 82 | #endif |
83 | } | 83 | } |
84 | 84 | ||
85 | static inline void setup_stack_canary_segment(int cpu) | 85 | static inline void setup_stack_canary_segment(int cpu) |
86 | { | 86 | { |
87 | #ifdef CONFIG_X86_32 | 87 | #ifdef CONFIG_X86_32 |
88 | unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu) - 20; | 88 | unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu); |
89 | struct desc_struct *gdt_table = get_cpu_gdt_table(cpu); | 89 | struct desc_struct *gdt_table = get_cpu_gdt_table(cpu); |
90 | struct desc_struct desc; | 90 | struct desc_struct desc; |
91 | 91 | ||
92 | desc = gdt_table[GDT_ENTRY_STACK_CANARY]; | 92 | desc = gdt_table[GDT_ENTRY_STACK_CANARY]; |
93 | desc.base0 = canary & 0xffff; | 93 | set_desc_base(&desc, canary); |
94 | desc.base1 = (canary >> 16) & 0xff; | ||
95 | desc.base2 = (canary >> 24) & 0xff; | ||
96 | write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S); | 94 | write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S); |
97 | #endif | 95 | #endif |
98 | } | 96 | } |
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index 643c59b4bc6e..f08f97374892 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h | |||
@@ -31,7 +31,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
31 | "movl %P[task_canary](%[next]), %%ebx\n\t" \ | 31 | "movl %P[task_canary](%[next]), %%ebx\n\t" \ |
32 | "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" | 32 | "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" |
33 | #define __switch_canary_oparam \ | 33 | #define __switch_canary_oparam \ |
34 | , [stack_canary] "=m" (per_cpu_var(stack_canary)) | 34 | , [stack_canary] "=m" (per_cpu_var(stack_canary.canary)) |
35 | #define __switch_canary_iparam \ | 35 | #define __switch_canary_iparam \ |
36 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | 36 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) |
37 | #else /* CC_STACKPROTECTOR */ | 37 | #else /* CC_STACKPROTECTOR */ |
@@ -150,33 +150,6 @@ do { \ | |||
150 | #endif | 150 | #endif |
151 | 151 | ||
152 | #ifdef __KERNEL__ | 152 | #ifdef __KERNEL__ |
153 | #define _set_base(addr, base) do { unsigned long __pr; \ | ||
154 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | ||
155 | "rorl $16,%%edx\n\t" \ | ||
156 | "movb %%dl,%2\n\t" \ | ||
157 | "movb %%dh,%3" \ | ||
158 | :"=&d" (__pr) \ | ||
159 | :"m" (*((addr)+2)), \ | ||
160 | "m" (*((addr)+4)), \ | ||
161 | "m" (*((addr)+7)), \ | ||
162 | "0" (base) \ | ||
163 | ); } while (0) | ||
164 | |||
165 | #define _set_limit(addr, limit) do { unsigned long __lr; \ | ||
166 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | ||
167 | "rorl $16,%%edx\n\t" \ | ||
168 | "movb %2,%%dh\n\t" \ | ||
169 | "andb $0xf0,%%dh\n\t" \ | ||
170 | "orb %%dh,%%dl\n\t" \ | ||
171 | "movb %%dl,%2" \ | ||
172 | :"=&d" (__lr) \ | ||
173 | :"m" (*(addr)), \ | ||
174 | "m" (*((addr)+6)), \ | ||
175 | "0" (limit) \ | ||
176 | ); } while (0) | ||
177 | |||
178 | #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base)) | ||
179 | #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1)) | ||
180 | 153 | ||
181 | extern void native_load_gs_index(unsigned); | 154 | extern void native_load_gs_index(unsigned); |
182 | 155 | ||
diff --git a/arch/x86/include/asm/termbits.h b/arch/x86/include/asm/termbits.h index af1b70ea440f..3935b106de79 100644 --- a/arch/x86/include/asm/termbits.h +++ b/arch/x86/include/asm/termbits.h | |||
@@ -1,198 +1 @@ | |||
1 | #ifndef _ASM_X86_TERMBITS_H | #include <asm-generic/termbits.h> | |
2 | #define _ASM_X86_TERMBITS_H | ||
3 | |||
4 | #include <linux/posix_types.h> | ||
5 | |||
6 | typedef unsigned char cc_t; | ||
7 | typedef unsigned int speed_t; | ||
8 | typedef unsigned int tcflag_t; | ||
9 | |||
10 | #define NCCS 19 | ||
11 | struct termios { | ||
12 | tcflag_t c_iflag; /* input mode flags */ | ||
13 | tcflag_t c_oflag; /* output mode flags */ | ||
14 | tcflag_t c_cflag; /* control mode flags */ | ||
15 | tcflag_t c_lflag; /* local mode flags */ | ||
16 | cc_t c_line; /* line discipline */ | ||
17 | cc_t c_cc[NCCS]; /* control characters */ | ||
18 | }; | ||
19 | |||
20 | struct termios2 { | ||
21 | tcflag_t c_iflag; /* input mode flags */ | ||
22 | tcflag_t c_oflag; /* output mode flags */ | ||
23 | tcflag_t c_cflag; /* control mode flags */ | ||
24 | tcflag_t c_lflag; /* local mode flags */ | ||
25 | cc_t c_line; /* line discipline */ | ||
26 | cc_t c_cc[NCCS]; /* control characters */ | ||
27 | speed_t c_ispeed; /* input speed */ | ||
28 | speed_t c_ospeed; /* output speed */ | ||
29 | }; | ||
30 | |||
31 | struct ktermios { | ||
32 | tcflag_t c_iflag; /* input mode flags */ | ||
33 | tcflag_t c_oflag; /* output mode flags */ | ||
34 | tcflag_t c_cflag; /* control mode flags */ | ||
35 | tcflag_t c_lflag; /* local mode flags */ | ||
36 | cc_t c_line; /* line discipline */ | ||
37 | cc_t c_cc[NCCS]; /* control characters */ | ||
38 | speed_t c_ispeed; /* input speed */ | ||
39 | speed_t c_ospeed; /* output speed */ | ||
40 | }; | ||
41 | |||
42 | /* c_cc characters */ | ||
43 | #define VINTR 0 | ||
44 | #define VQUIT 1 | ||
45 | #define VERASE 2 | ||
46 | #define VKILL 3 | ||
47 | #define VEOF 4 | ||
48 | #define VTIME 5 | ||
49 | #define VMIN 6 | ||
50 | #define VSWTC 7 | ||
51 | #define VSTART 8 | ||
52 | #define VSTOP 9 | ||
53 | #define VSUSP 10 | ||
54 | #define VEOL 11 | ||
55 | #define VREPRINT 12 | ||
56 | #define VDISCARD 13 | ||
57 | #define VWERASE 14 | ||
58 | #define VLNEXT 15 | ||
59 | #define VEOL2 16 | ||
60 | |||
61 | /* c_iflag bits */ | ||
62 | #define IGNBRK 0000001 | ||
63 | #define BRKINT 0000002 | ||
64 | #define IGNPAR 0000004 | ||
65 | #define PARMRK 0000010 | ||
66 | #define INPCK 0000020 | ||
67 | #define ISTRIP 0000040 | ||
68 | #define INLCR 0000100 | ||
69 | #define IGNCR 0000200 | ||
70 | #define ICRNL 0000400 | ||
71 | #define IUCLC 0001000 | ||
72 | #define IXON 0002000 | ||
73 | #define IXANY 0004000 | ||
74 | #define IXOFF 0010000 | ||
75 | #define IMAXBEL 0020000 | ||
76 | #define IUTF8 0040000 | ||
77 | |||
78 | /* c_oflag bits */ | ||
79 | #define OPOST 0000001 | ||
80 | #define OLCUC 0000002 | ||
81 | #define ONLCR 0000004 | ||
82 | #define OCRNL 0000010 | ||
83 | #define ONOCR 0000020 | ||
84 | #define ONLRET 0000040 | ||
85 | #define OFILL 0000100 | ||
86 | #define OFDEL 0000200 | ||
87 | #define NLDLY 0000400 | ||
88 | #define NL0 0000000 | ||
89 | #define NL1 0000400 | ||
90 | #define CRDLY 0003000 | ||
91 | #define CR0 0000000 | ||
92 | #define CR1 0001000 | ||
93 | #define CR2 0002000 | ||
94 | #define CR3 0003000 | ||
95 | #define TABDLY 0014000 | ||
96 | #define TAB0 0000000 | ||
97 | #define TAB1 0004000 | ||
98 | #define TAB2 0010000 | ||
99 | #define TAB3 0014000 | ||
100 | #define XTABS 0014000 | ||
101 | #define BSDLY 0020000 | ||
102 | #define BS0 0000000 | ||
103 | #define BS1 0020000 | ||
104 | #define VTDLY 0040000 | ||
105 | #define VT0 0000000 | ||
106 | #define VT1 0040000 | ||
107 | #define FFDLY 0100000 | ||
108 | #define FF0 0000000 | ||
109 | #define FF1 0100000 | ||
110 | |||
111 | /* c_cflag bit meaning */ | ||
112 | #define CBAUD 0010017 | ||
113 | #define B0 0000000 /* hang up */ | ||
114 | #define B50 0000001 | ||
115 | #define B75 0000002 | ||
116 | #define B110 0000003 | ||
117 | #define B134 0000004 | ||
118 | #define B150 0000005 | ||
119 | #define B200 0000006 | ||
120 | #define B300 0000007 | ||
121 | #define B600 0000010 | ||
122 | #define B1200 0000011 | ||
123 | #define B1800 0000012 | ||
124 | #define B2400 0000013 | ||
125 | #define B4800 0000014 | ||
126 | #define B9600 0000015 | ||
127 | #define B19200 0000016 | ||
128 | #define B38400 0000017 | ||
129 | #define EXTA B19200 | ||
130 | #define EXTB B38400 | ||
131 | #define CSIZE 0000060 | ||
132 | #define CS5 0000000 | ||
133 | #define CS6 0000020 | ||
134 | #define CS7 0000040 | ||
135 | #define CS8 0000060 | ||
136 | #define CSTOPB 0000100 | ||
137 | #define CREAD 0000200 | ||
138 | #define PARENB 0000400 | ||
139 | #define PARODD 0001000 | ||
140 | #define HUPCL 0002000 | ||
141 | #define CLOCAL 0004000 | ||
142 | #define CBAUDEX 0010000 | ||
143 | #define BOTHER 0010000 /* non standard rate */ | ||
144 | #define B57600 0010001 | ||
145 | #define B115200 0010002 | ||
146 | #define B230400 0010003 | ||
147 | #define B460800 0010004 | ||
148 | #define B500000 0010005 | ||
149 | #define B576000 0010006 | ||
150 | #define B921600 0010007 | ||
151 | #define B1000000 0010010 | ||
152 | #define B1152000 0010011 | ||
153 | #define B1500000 0010012 | ||
154 | #define B2000000 0010013 | ||
155 | #define B2500000 0010014 | ||
156 | #define B3000000 0010015 | ||
157 | #define B3500000 0010016 | ||
158 | #define B4000000 0010017 | ||
159 | #define CIBAUD 002003600000 /* input baud rate */ | ||
160 | #define CMSPAR 010000000000 /* mark or space (stick) parity */ | ||
161 | #define CRTSCTS 020000000000 /* flow control */ | ||
162 | |||
163 | #define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */ | ||
164 | |||
165 | /* c_lflag bits */ | ||
166 | #define ISIG 0000001 | ||
167 | #define ICANON 0000002 | ||
168 | #define XCASE 0000004 | ||
169 | #define ECHO 0000010 | ||
170 | #define ECHOE 0000020 | ||
171 | #define ECHOK 0000040 | ||
172 | #define ECHONL 0000100 | ||
173 | #define NOFLSH 0000200 | ||
174 | #define TOSTOP 0000400 | ||
175 | #define ECHOCTL 0001000 | ||
176 | #define ECHOPRT 0002000 | ||
177 | #define ECHOKE 0004000 | ||
178 | #define FLUSHO 0010000 | ||
179 | #define PENDIN 0040000 | ||
180 | #define IEXTEN 0100000 | ||
181 | |||
182 | /* tcflow() and TCXONC use these */ | ||
183 | #define TCOOFF 0 | ||
184 | #define TCOON 1 | ||
185 | #define TCIOFF 2 | ||
186 | #define TCION 3 | ||
187 | |||
188 | /* tcflush() and TCFLSH use these */ | ||
189 | #define TCIFLUSH 0 | ||
190 | #define TCOFLUSH 1 | ||
191 | #define TCIOFLUSH 2 | ||
192 | |||
193 | /* tcsetattr uses these */ | ||
194 | #define TCSANOW 0 | ||
195 | #define TCSADRAIN 1 | ||
196 | #define TCSAFLUSH 2 | ||
197 | |||
198 | #endif /* _ASM_X86_TERMBITS_H */ | ||
diff --git a/arch/x86/include/asm/termios.h b/arch/x86/include/asm/termios.h index c4ee8056baca..280d78a9d966 100644 --- a/arch/x86/include/asm/termios.h +++ b/arch/x86/include/asm/termios.h | |||
@@ -1,114 +1 @@ | |||
1 | #ifndef _ASM_X86_TERMIOS_H | #include <asm-generic/termios.h> | |
2 | #define _ASM_X86_TERMIOS_H | ||
3 | |||
4 | #include <asm/termbits.h> | ||
5 | #include <asm/ioctls.h> | ||
6 | |||
7 | struct winsize { | ||
8 | unsigned short ws_row; | ||
9 | unsigned short ws_col; | ||
10 | unsigned short ws_xpixel; | ||
11 | unsigned short ws_ypixel; | ||
12 | }; | ||
13 | |||
14 | #define NCC 8 | ||
15 | struct termio { | ||
16 | unsigned short c_iflag; /* input mode flags */ | ||
17 | unsigned short c_oflag; /* output mode flags */ | ||
18 | unsigned short c_cflag; /* control mode flags */ | ||
19 | unsigned short c_lflag; /* local mode flags */ | ||
20 | unsigned char c_line; /* line discipline */ | ||
21 | unsigned char c_cc[NCC]; /* control characters */ | ||
22 | }; | ||
23 | |||
24 | /* modem lines */ | ||
25 | #define TIOCM_LE 0x001 | ||
26 | #define TIOCM_DTR 0x002 | ||
27 | #define TIOCM_RTS 0x004 | ||
28 | #define TIOCM_ST 0x008 | ||
29 | #define TIOCM_SR 0x010 | ||
30 | #define TIOCM_CTS 0x020 | ||
31 | #define TIOCM_CAR 0x040 | ||
32 | #define TIOCM_RNG 0x080 | ||
33 | #define TIOCM_DSR 0x100 | ||
34 | #define TIOCM_CD TIOCM_CAR | ||
35 | #define TIOCM_RI TIOCM_RNG | ||
36 | #define TIOCM_OUT1 0x2000 | ||
37 | #define TIOCM_OUT2 0x4000 | ||
38 | #define TIOCM_LOOP 0x8000 | ||
39 | |||
40 | /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ | ||
41 | |||
42 | #ifdef __KERNEL__ | ||
43 | |||
44 | #include <asm/uaccess.h> | ||
45 | |||
46 | /* intr=^C quit=^\ erase=del kill=^U | ||
47 | eof=^D vtime=\0 vmin=\1 sxtc=\0 | ||
48 | start=^Q stop=^S susp=^Z eol=\0 | ||
49 | reprint=^R discard=^U werase=^W lnext=^V | ||
50 | eol2=\0 | ||
51 | */ | ||
52 | #define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" | ||
53 | |||
54 | /* | ||
55 | * Translate a "termio" structure into a "termios". Ugh. | ||
56 | */ | ||
57 | #define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ | ||
58 | unsigned short __tmp; \ | ||
59 | get_user(__tmp,&(termio)->x); \ | ||
60 | *(unsigned short *) &(termios)->x = __tmp; \ | ||
61 | } | ||
62 | |||
63 | static inline int user_termio_to_kernel_termios(struct ktermios *termios, | ||
64 | struct termio __user *termio) | ||
65 | { | ||
66 | SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); | ||
67 | SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); | ||
68 | SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); | ||
69 | SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); | ||
70 | get_user(termios->c_line, &termio->c_line); | ||
71 | return copy_from_user(termios->c_cc, termio->c_cc, NCC); | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * Translate a "termios" structure into a "termio". Ugh. | ||
76 | */ | ||
77 | static inline int kernel_termios_to_user_termio(struct termio __user *termio, | ||
78 | struct ktermios *termios) | ||
79 | { | ||
80 | put_user((termios)->c_iflag, &(termio)->c_iflag); | ||
81 | put_user((termios)->c_oflag, &(termio)->c_oflag); | ||
82 | put_user((termios)->c_cflag, &(termio)->c_cflag); | ||
83 | put_user((termios)->c_lflag, &(termio)->c_lflag); | ||
84 | put_user((termios)->c_line, &(termio)->c_line); | ||
85 | return copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); | ||
86 | } | ||
87 | |||
88 | static inline int user_termios_to_kernel_termios(struct ktermios *k, | ||
89 | struct termios2 __user *u) | ||
90 | { | ||
91 | return copy_from_user(k, u, sizeof(struct termios2)); | ||
92 | } | ||
93 | |||
94 | static inline int kernel_termios_to_user_termios(struct termios2 __user *u, | ||
95 | struct ktermios *k) | ||
96 | { | ||
97 | return copy_to_user(u, k, sizeof(struct termios2)); | ||
98 | } | ||
99 | |||
100 | static inline int user_termios_to_kernel_termios_1(struct ktermios *k, | ||
101 | struct termios __user *u) | ||
102 | { | ||
103 | return copy_from_user(k, u, sizeof(struct termios)); | ||
104 | } | ||
105 | |||
106 | static inline int kernel_termios_to_user_termios_1(struct termios __user *u, | ||
107 | struct ktermios *k) | ||
108 | { | ||
109 | return copy_to_user(u, k, sizeof(struct termios)); | ||
110 | } | ||
111 | |||
112 | #endif /* __KERNEL__ */ | ||
113 | |||
114 | #endif /* _ASM_X86_TERMIOS_H */ | ||
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index fad7d40b75f8..d27d0a2fec4c 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -95,7 +95,7 @@ struct thread_info { | |||
95 | #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ | 95 | #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ |
96 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ | 96 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ |
97 | #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ | 97 | #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ |
98 | #define TIF_SYSCALL_FTRACE 28 /* for ftrace syscall instrumentation */ | 98 | #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ |
99 | 99 | ||
100 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 100 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
101 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 101 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
@@ -118,17 +118,17 @@ struct thread_info { | |||
118 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) | 118 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) |
119 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) | 119 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) |
120 | #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) | 120 | #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) |
121 | #define _TIF_SYSCALL_FTRACE (1 << TIF_SYSCALL_FTRACE) | 121 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) |
122 | 122 | ||
123 | /* work to do in syscall_trace_enter() */ | 123 | /* work to do in syscall_trace_enter() */ |
124 | #define _TIF_WORK_SYSCALL_ENTRY \ | 124 | #define _TIF_WORK_SYSCALL_ENTRY \ |
125 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_FTRACE | \ | 125 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \ |
126 | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | _TIF_SINGLESTEP) | 126 | _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT) |
127 | 127 | ||
128 | /* work to do in syscall_trace_leave() */ | 128 | /* work to do in syscall_trace_leave() */ |
129 | #define _TIF_WORK_SYSCALL_EXIT \ | 129 | #define _TIF_WORK_SYSCALL_EXIT \ |
130 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ | 130 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ |
131 | _TIF_SYSCALL_FTRACE) | 131 | _TIF_SYSCALL_TRACEPOINT) |
132 | 132 | ||
133 | /* work to do on interrupt/exception return */ | 133 | /* work to do on interrupt/exception return */ |
134 | #define _TIF_WORK_MASK \ | 134 | #define _TIF_WORK_MASK \ |
@@ -137,7 +137,8 @@ struct thread_info { | |||
137 | _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU)) | 137 | _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU)) |
138 | 138 | ||
139 | /* work to do on any return to user space */ | 139 | /* work to do on any return to user space */ |
140 | #define _TIF_ALLWORK_MASK ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_FTRACE) | 140 | #define _TIF_ALLWORK_MASK \ |
141 | ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT) | ||
141 | 142 | ||
142 | /* Only used for 64 bit */ | 143 | /* Only used for 64 bit */ |
143 | #define _TIF_DO_NOTIFY_MASK \ | 144 | #define _TIF_DO_NOTIFY_MASK \ |
@@ -213,7 +214,7 @@ DECLARE_PER_CPU(unsigned long, kernel_stack); | |||
213 | static inline struct thread_info *current_thread_info(void) | 214 | static inline struct thread_info *current_thread_info(void) |
214 | { | 215 | { |
215 | struct thread_info *ti; | 216 | struct thread_info *ti; |
216 | ti = (void *)(percpu_read(kernel_stack) + | 217 | ti = (void *)(percpu_read_stable(kernel_stack) + |
217 | KERNEL_STACK_OFFSET - THREAD_SIZE); | 218 | KERNEL_STACK_OFFSET - THREAD_SIZE); |
218 | return ti; | 219 | return ti; |
219 | } | 220 | } |
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 066ef590d7e0..26d06e052a18 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
@@ -129,25 +129,34 @@ extern unsigned long node_remap_size[]; | |||
129 | #endif | 129 | #endif |
130 | 130 | ||
131 | /* sched_domains SD_NODE_INIT for NUMA machines */ | 131 | /* sched_domains SD_NODE_INIT for NUMA machines */ |
132 | #define SD_NODE_INIT (struct sched_domain) { \ | 132 | #define SD_NODE_INIT (struct sched_domain) { \ |
133 | .min_interval = 8, \ | 133 | .min_interval = 8, \ |
134 | .max_interval = 32, \ | 134 | .max_interval = 32, \ |
135 | .busy_factor = 32, \ | 135 | .busy_factor = 32, \ |
136 | .imbalance_pct = 125, \ | 136 | .imbalance_pct = 125, \ |
137 | .cache_nice_tries = SD_CACHE_NICE_TRIES, \ | 137 | .cache_nice_tries = SD_CACHE_NICE_TRIES, \ |
138 | .busy_idx = 3, \ | 138 | .busy_idx = 3, \ |
139 | .idle_idx = SD_IDLE_IDX, \ | 139 | .idle_idx = SD_IDLE_IDX, \ |
140 | .newidle_idx = SD_NEWIDLE_IDX, \ | 140 | .newidle_idx = SD_NEWIDLE_IDX, \ |
141 | .wake_idx = 1, \ | 141 | .wake_idx = 1, \ |
142 | .forkexec_idx = SD_FORKEXEC_IDX, \ | 142 | .forkexec_idx = SD_FORKEXEC_IDX, \ |
143 | .flags = SD_LOAD_BALANCE \ | 143 | \ |
144 | | SD_BALANCE_EXEC \ | 144 | .flags = 1*SD_LOAD_BALANCE \ |
145 | | SD_BALANCE_FORK \ | 145 | | 1*SD_BALANCE_NEWIDLE \ |
146 | | SD_WAKE_AFFINE \ | 146 | | 1*SD_BALANCE_EXEC \ |
147 | | SD_WAKE_BALANCE \ | 147 | | 1*SD_BALANCE_FORK \ |
148 | | SD_SERIALIZE, \ | 148 | | 0*SD_WAKE_IDLE \ |
149 | .last_balance = jiffies, \ | 149 | | 1*SD_WAKE_AFFINE \ |
150 | .balance_interval = 1, \ | 150 | | 1*SD_WAKE_BALANCE \ |
151 | | 0*SD_SHARE_CPUPOWER \ | ||
152 | | 0*SD_POWERSAVINGS_BALANCE \ | ||
153 | | 0*SD_SHARE_PKG_RESOURCES \ | ||
154 | | 1*SD_SERIALIZE \ | ||
155 | | 1*SD_WAKE_IDLE_FAR \ | ||
156 | | 0*SD_PREFER_SIBLING \ | ||
157 | , \ | ||
158 | .last_balance = jiffies, \ | ||
159 | .balance_interval = 1, \ | ||
151 | } | 160 | } |
152 | 161 | ||
153 | #ifdef CONFIG_X86_64_ACPI_NUMA | 162 | #ifdef CONFIG_X86_64_ACPI_NUMA |
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index bfd74c032fca..4da91ad69e0d 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h | |||
@@ -81,9 +81,7 @@ extern int panic_on_unrecovered_nmi; | |||
81 | 81 | ||
82 | void math_error(void __user *); | 82 | void math_error(void __user *); |
83 | void math_emulate(struct math_emu_info *); | 83 | void math_emulate(struct math_emu_info *); |
84 | #ifdef CONFIG_X86_32 | 84 | #ifndef CONFIG_X86_32 |
85 | unsigned long patch_espfix_desc(unsigned long, unsigned long); | ||
86 | #else | ||
87 | asmlinkage void smp_thermal_interrupt(void); | 85 | asmlinkage void smp_thermal_interrupt(void); |
88 | asmlinkage void mce_threshold_interrupt(void); | 86 | asmlinkage void mce_threshold_interrupt(void); |
89 | #endif | 87 | #endif |
diff --git a/arch/x86/include/asm/types.h b/arch/x86/include/asm/types.h index 09b97745772f..df1da20f4534 100644 --- a/arch/x86/include/asm/types.h +++ b/arch/x86/include/asm/types.h | |||
@@ -1,19 +1,11 @@ | |||
1 | #ifndef _ASM_X86_TYPES_H | 1 | #ifndef _ASM_X86_TYPES_H |
2 | #define _ASM_X86_TYPES_H | 2 | #define _ASM_X86_TYPES_H |
3 | 3 | ||
4 | #include <asm-generic/int-ll64.h> | 4 | #define dma_addr_t dma_addr_t |
5 | 5 | ||
6 | #ifndef __ASSEMBLY__ | 6 | #include <asm-generic/types.h> |
7 | |||
8 | typedef unsigned short umode_t; | ||
9 | 7 | ||
10 | #endif /* __ASSEMBLY__ */ | ||
11 | |||
12 | /* | ||
13 | * These aren't exported outside the kernel to avoid name space clashes | ||
14 | */ | ||
15 | #ifdef __KERNEL__ | 8 | #ifdef __KERNEL__ |
16 | |||
17 | #ifndef __ASSEMBLY__ | 9 | #ifndef __ASSEMBLY__ |
18 | 10 | ||
19 | typedef u64 dma64_addr_t; | 11 | typedef u64 dma64_addr_t; |
diff --git a/arch/x86/include/asm/ucontext.h b/arch/x86/include/asm/ucontext.h index 87324cf439d9..b7c29c8017f2 100644 --- a/arch/x86/include/asm/ucontext.h +++ b/arch/x86/include/asm/ucontext.h | |||
@@ -7,12 +7,6 @@ | |||
7 | * sigcontext struct (uc_mcontext). | 7 | * sigcontext struct (uc_mcontext). |
8 | */ | 8 | */ |
9 | 9 | ||
10 | struct ucontext { | 10 | #include <asm-generic/ucontext.h> |
11 | unsigned long uc_flags; | ||
12 | struct ucontext *uc_link; | ||
13 | stack_t uc_stack; | ||
14 | struct sigcontext uc_mcontext; | ||
15 | sigset_t uc_sigmask; /* mask last for extensibility */ | ||
16 | }; | ||
17 | 11 | ||
18 | #endif /* _ASM_X86_UCONTEXT_H */ | 12 | #endif /* _ASM_X86_UCONTEXT_H */ |
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h index 732a30706153..8deaada61bc8 100644 --- a/arch/x86/include/asm/unistd_32.h +++ b/arch/x86/include/asm/unistd_32.h | |||
@@ -345,6 +345,8 @@ | |||
345 | 345 | ||
346 | #ifdef __KERNEL__ | 346 | #ifdef __KERNEL__ |
347 | 347 | ||
348 | #define NR_syscalls 337 | ||
349 | |||
348 | #define __ARCH_WANT_IPC_PARSE_VERSION | 350 | #define __ARCH_WANT_IPC_PARSE_VERSION |
349 | #define __ARCH_WANT_OLD_READDIR | 351 | #define __ARCH_WANT_OLD_READDIR |
350 | #define __ARCH_WANT_OLD_STAT | 352 | #define __ARCH_WANT_OLD_STAT |
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index 900e1617e672..b9f3c60de5f7 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h | |||
@@ -688,6 +688,12 @@ __SYSCALL(__NR_perf_counter_open, sys_perf_counter_open) | |||
688 | #endif /* __NO_STUBS */ | 688 | #endif /* __NO_STUBS */ |
689 | 689 | ||
690 | #ifdef __KERNEL__ | 690 | #ifdef __KERNEL__ |
691 | |||
692 | #ifndef COMPILE_OFFSETS | ||
693 | #include <asm/asm-offsets.h> | ||
694 | #define NR_syscalls (__NR_syscall_max + 1) | ||
695 | #endif | ||
696 | |||
691 | /* | 697 | /* |
692 | * "Conditional" syscalls | 698 | * "Conditional" syscalls |
693 | * | 699 | * |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 6b8ca3a0285d..67e929b89875 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -833,106 +833,6 @@ static int __init acpi_parse_madt_lapic_entries(void) | |||
833 | extern int es7000_plat; | 833 | extern int es7000_plat; |
834 | #endif | 834 | #endif |
835 | 835 | ||
836 | static struct { | ||
837 | int gsi_base; | ||
838 | int gsi_end; | ||
839 | } mp_ioapic_routing[MAX_IO_APICS]; | ||
840 | |||
841 | int mp_find_ioapic(int gsi) | ||
842 | { | ||
843 | int i = 0; | ||
844 | |||
845 | /* Find the IOAPIC that manages this GSI. */ | ||
846 | for (i = 0; i < nr_ioapics; i++) { | ||
847 | if ((gsi >= mp_ioapic_routing[i].gsi_base) | ||
848 | && (gsi <= mp_ioapic_routing[i].gsi_end)) | ||
849 | return i; | ||
850 | } | ||
851 | |||
852 | printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); | ||
853 | return -1; | ||
854 | } | ||
855 | |||
856 | int mp_find_ioapic_pin(int ioapic, int gsi) | ||
857 | { | ||
858 | if (WARN_ON(ioapic == -1)) | ||
859 | return -1; | ||
860 | if (WARN_ON(gsi > mp_ioapic_routing[ioapic].gsi_end)) | ||
861 | return -1; | ||
862 | |||
863 | return gsi - mp_ioapic_routing[ioapic].gsi_base; | ||
864 | } | ||
865 | |||
866 | static u8 __init uniq_ioapic_id(u8 id) | ||
867 | { | ||
868 | #ifdef CONFIG_X86_32 | ||
869 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && | ||
870 | !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
871 | return io_apic_get_unique_id(nr_ioapics, id); | ||
872 | else | ||
873 | return id; | ||
874 | #else | ||
875 | int i; | ||
876 | DECLARE_BITMAP(used, 256); | ||
877 | bitmap_zero(used, 256); | ||
878 | for (i = 0; i < nr_ioapics; i++) { | ||
879 | struct mpc_ioapic *ia = &mp_ioapics[i]; | ||
880 | __set_bit(ia->apicid, used); | ||
881 | } | ||
882 | if (!test_bit(id, used)) | ||
883 | return id; | ||
884 | return find_first_zero_bit(used, 256); | ||
885 | #endif | ||
886 | } | ||
887 | |||
888 | static int bad_ioapic(unsigned long address) | ||
889 | { | ||
890 | if (nr_ioapics >= MAX_IO_APICS) { | ||
891 | printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " | ||
892 | "(found %d)\n", MAX_IO_APICS, nr_ioapics); | ||
893 | panic("Recompile kernel with bigger MAX_IO_APICS!\n"); | ||
894 | } | ||
895 | if (!address) { | ||
896 | printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" | ||
897 | " found in table, skipping!\n"); | ||
898 | return 1; | ||
899 | } | ||
900 | return 0; | ||
901 | } | ||
902 | |||
903 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) | ||
904 | { | ||
905 | int idx = 0; | ||
906 | |||
907 | if (bad_ioapic(address)) | ||
908 | return; | ||
909 | |||
910 | idx = nr_ioapics; | ||
911 | |||
912 | mp_ioapics[idx].type = MP_IOAPIC; | ||
913 | mp_ioapics[idx].flags = MPC_APIC_USABLE; | ||
914 | mp_ioapics[idx].apicaddr = address; | ||
915 | |||
916 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); | ||
917 | mp_ioapics[idx].apicid = uniq_ioapic_id(id); | ||
918 | mp_ioapics[idx].apicver = io_apic_get_version(idx); | ||
919 | |||
920 | /* | ||
921 | * Build basic GSI lookup table to facilitate gsi->io_apic lookups | ||
922 | * and to prevent reprogramming of IOAPIC pins (PCI GSIs). | ||
923 | */ | ||
924 | mp_ioapic_routing[idx].gsi_base = gsi_base; | ||
925 | mp_ioapic_routing[idx].gsi_end = gsi_base + | ||
926 | io_apic_get_redir_entries(idx); | ||
927 | |||
928 | printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " | ||
929 | "GSI %d-%d\n", idx, mp_ioapics[idx].apicid, | ||
930 | mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr, | ||
931 | mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end); | ||
932 | |||
933 | nr_ioapics++; | ||
934 | } | ||
935 | |||
936 | int __init acpi_probe_gsi(void) | 836 | int __init acpi_probe_gsi(void) |
937 | { | 837 | { |
938 | int idx; | 838 | int idx; |
@@ -947,7 +847,7 @@ int __init acpi_probe_gsi(void) | |||
947 | 847 | ||
948 | max_gsi = 0; | 848 | max_gsi = 0; |
949 | for (idx = 0; idx < nr_ioapics; idx++) { | 849 | for (idx = 0; idx < nr_ioapics; idx++) { |
950 | gsi = mp_ioapic_routing[idx].gsi_end; | 850 | gsi = mp_gsi_routing[idx].gsi_end; |
951 | 851 | ||
952 | if (gsi > max_gsi) | 852 | if (gsi > max_gsi) |
953 | max_gsi = gsi; | 853 | max_gsi = gsi; |
@@ -1179,9 +1079,8 @@ static int __init acpi_parse_madt_ioapic_entries(void) | |||
1179 | * If MPS is present, it will handle them, | 1079 | * If MPS is present, it will handle them, |
1180 | * otherwise the system will stay in PIC mode | 1080 | * otherwise the system will stay in PIC mode |
1181 | */ | 1081 | */ |
1182 | if (acpi_disabled || acpi_noirq) { | 1082 | if (acpi_disabled || acpi_noirq) |
1183 | return -ENODEV; | 1083 | return -ENODEV; |
1184 | } | ||
1185 | 1084 | ||
1186 | if (!cpu_has_apic) | 1085 | if (!cpu_has_apic) |
1187 | return -ENODEV; | 1086 | return -ENODEV; |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index f57658702571..de7353c0ce9c 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/sched.h> | 2 | #include <linux/sched.h> |
3 | #include <linux/mutex.h> | 3 | #include <linux/mutex.h> |
4 | #include <linux/list.h> | 4 | #include <linux/list.h> |
5 | #include <linux/stringify.h> | ||
5 | #include <linux/kprobes.h> | 6 | #include <linux/kprobes.h> |
6 | #include <linux/mm.h> | 7 | #include <linux/mm.h> |
7 | #include <linux/vmalloc.h> | 8 | #include <linux/vmalloc.h> |
@@ -32,7 +33,7 @@ __setup("smp-alt-boot", bootonly); | |||
32 | #define smp_alt_once 1 | 33 | #define smp_alt_once 1 |
33 | #endif | 34 | #endif |
34 | 35 | ||
35 | static int debug_alternative; | 36 | static int __initdata_or_module debug_alternative; |
36 | 37 | ||
37 | static int __init debug_alt(char *str) | 38 | static int __init debug_alt(char *str) |
38 | { | 39 | { |
@@ -51,7 +52,7 @@ static int __init setup_noreplace_smp(char *str) | |||
51 | __setup("noreplace-smp", setup_noreplace_smp); | 52 | __setup("noreplace-smp", setup_noreplace_smp); |
52 | 53 | ||
53 | #ifdef CONFIG_PARAVIRT | 54 | #ifdef CONFIG_PARAVIRT |
54 | static int noreplace_paravirt = 0; | 55 | static int __initdata_or_module noreplace_paravirt = 0; |
55 | 56 | ||
56 | static int __init setup_noreplace_paravirt(char *str) | 57 | static int __init setup_noreplace_paravirt(char *str) |
57 | { | 58 | { |
@@ -64,16 +65,17 @@ __setup("noreplace-paravirt", setup_noreplace_paravirt); | |||
64 | #define DPRINTK(fmt, args...) if (debug_alternative) \ | 65 | #define DPRINTK(fmt, args...) if (debug_alternative) \ |
65 | printk(KERN_DEBUG fmt, args) | 66 | printk(KERN_DEBUG fmt, args) |
66 | 67 | ||
67 | #ifdef GENERIC_NOP1 | 68 | #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) |
68 | /* Use inline assembly to define this because the nops are defined | 69 | /* Use inline assembly to define this because the nops are defined |
69 | as inline assembly strings in the include files and we cannot | 70 | as inline assembly strings in the include files and we cannot |
70 | get them easily into strings. */ | 71 | get them easily into strings. */ |
71 | asm("\t.section .rodata, \"a\"\nintelnops: " | 72 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nintelnops: " |
72 | GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 | 73 | GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 |
73 | GENERIC_NOP7 GENERIC_NOP8 | 74 | GENERIC_NOP7 GENERIC_NOP8 |
74 | "\t.previous"); | 75 | "\t.previous"); |
75 | extern const unsigned char intelnops[]; | 76 | extern const unsigned char intelnops[]; |
76 | static const unsigned char *const intel_nops[ASM_NOP_MAX+1] = { | 77 | static const unsigned char *const __initconst_or_module |
78 | intel_nops[ASM_NOP_MAX+1] = { | ||
77 | NULL, | 79 | NULL, |
78 | intelnops, | 80 | intelnops, |
79 | intelnops + 1, | 81 | intelnops + 1, |
@@ -87,12 +89,13 @@ static const unsigned char *const intel_nops[ASM_NOP_MAX+1] = { | |||
87 | #endif | 89 | #endif |
88 | 90 | ||
89 | #ifdef K8_NOP1 | 91 | #ifdef K8_NOP1 |
90 | asm("\t.section .rodata, \"a\"\nk8nops: " | 92 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk8nops: " |
91 | K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 | 93 | K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 |
92 | K8_NOP7 K8_NOP8 | 94 | K8_NOP7 K8_NOP8 |
93 | "\t.previous"); | 95 | "\t.previous"); |
94 | extern const unsigned char k8nops[]; | 96 | extern const unsigned char k8nops[]; |
95 | static const unsigned char *const k8_nops[ASM_NOP_MAX+1] = { | 97 | static const unsigned char *const __initconst_or_module |
98 | k8_nops[ASM_NOP_MAX+1] = { | ||
96 | NULL, | 99 | NULL, |
97 | k8nops, | 100 | k8nops, |
98 | k8nops + 1, | 101 | k8nops + 1, |
@@ -105,13 +108,14 @@ static const unsigned char *const k8_nops[ASM_NOP_MAX+1] = { | |||
105 | }; | 108 | }; |
106 | #endif | 109 | #endif |
107 | 110 | ||
108 | #ifdef K7_NOP1 | 111 | #if defined(K7_NOP1) && !defined(CONFIG_X86_64) |
109 | asm("\t.section .rodata, \"a\"\nk7nops: " | 112 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk7nops: " |
110 | K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 | 113 | K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 |
111 | K7_NOP7 K7_NOP8 | 114 | K7_NOP7 K7_NOP8 |
112 | "\t.previous"); | 115 | "\t.previous"); |
113 | extern const unsigned char k7nops[]; | 116 | extern const unsigned char k7nops[]; |
114 | static const unsigned char *const k7_nops[ASM_NOP_MAX+1] = { | 117 | static const unsigned char *const __initconst_or_module |
118 | k7_nops[ASM_NOP_MAX+1] = { | ||
115 | NULL, | 119 | NULL, |
116 | k7nops, | 120 | k7nops, |
117 | k7nops + 1, | 121 | k7nops + 1, |
@@ -125,12 +129,13 @@ static const unsigned char *const k7_nops[ASM_NOP_MAX+1] = { | |||
125 | #endif | 129 | #endif |
126 | 130 | ||
127 | #ifdef P6_NOP1 | 131 | #ifdef P6_NOP1 |
128 | asm("\t.section .rodata, \"a\"\np6nops: " | 132 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\np6nops: " |
129 | P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6 | 133 | P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6 |
130 | P6_NOP7 P6_NOP8 | 134 | P6_NOP7 P6_NOP8 |
131 | "\t.previous"); | 135 | "\t.previous"); |
132 | extern const unsigned char p6nops[]; | 136 | extern const unsigned char p6nops[]; |
133 | static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = { | 137 | static const unsigned char *const __initconst_or_module |
138 | p6_nops[ASM_NOP_MAX+1] = { | ||
134 | NULL, | 139 | NULL, |
135 | p6nops, | 140 | p6nops, |
136 | p6nops + 1, | 141 | p6nops + 1, |
@@ -146,7 +151,7 @@ static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = { | |||
146 | #ifdef CONFIG_X86_64 | 151 | #ifdef CONFIG_X86_64 |
147 | 152 | ||
148 | extern char __vsyscall_0; | 153 | extern char __vsyscall_0; |
149 | const unsigned char *const *find_nop_table(void) | 154 | static const unsigned char *const *__init_or_module find_nop_table(void) |
150 | { | 155 | { |
151 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | 156 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
152 | boot_cpu_has(X86_FEATURE_NOPL)) | 157 | boot_cpu_has(X86_FEATURE_NOPL)) |
@@ -157,7 +162,7 @@ const unsigned char *const *find_nop_table(void) | |||
157 | 162 | ||
158 | #else /* CONFIG_X86_64 */ | 163 | #else /* CONFIG_X86_64 */ |
159 | 164 | ||
160 | const unsigned char *const *find_nop_table(void) | 165 | static const unsigned char *const *__init_or_module find_nop_table(void) |
161 | { | 166 | { |
162 | if (boot_cpu_has(X86_FEATURE_K8)) | 167 | if (boot_cpu_has(X86_FEATURE_K8)) |
163 | return k8_nops; | 168 | return k8_nops; |
@@ -172,7 +177,7 @@ const unsigned char *const *find_nop_table(void) | |||
172 | #endif /* CONFIG_X86_64 */ | 177 | #endif /* CONFIG_X86_64 */ |
173 | 178 | ||
174 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ | 179 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ |
175 | void add_nops(void *insns, unsigned int len) | 180 | static void __init_or_module add_nops(void *insns, unsigned int len) |
176 | { | 181 | { |
177 | const unsigned char *const *noptable = find_nop_table(); | 182 | const unsigned char *const *noptable = find_nop_table(); |
178 | 183 | ||
@@ -185,10 +190,10 @@ void add_nops(void *insns, unsigned int len) | |||
185 | len -= noplen; | 190 | len -= noplen; |
186 | } | 191 | } |
187 | } | 192 | } |
188 | EXPORT_SYMBOL_GPL(add_nops); | ||
189 | 193 | ||
190 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; | 194 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; |
191 | extern u8 *__smp_locks[], *__smp_locks_end[]; | 195 | extern u8 *__smp_locks[], *__smp_locks_end[]; |
196 | static void *text_poke_early(void *addr, const void *opcode, size_t len); | ||
192 | 197 | ||
193 | /* Replace instructions with better alternatives for this CPU type. | 198 | /* Replace instructions with better alternatives for this CPU type. |
194 | This runs before SMP is initialized to avoid SMP problems with | 199 | This runs before SMP is initialized to avoid SMP problems with |
@@ -196,7 +201,8 @@ extern u8 *__smp_locks[], *__smp_locks_end[]; | |||
196 | APs have less capabilities than the boot processor are not handled. | 201 | APs have less capabilities than the boot processor are not handled. |
197 | Tough. Make sure you disable such features by hand. */ | 202 | Tough. Make sure you disable such features by hand. */ |
198 | 203 | ||
199 | void apply_alternatives(struct alt_instr *start, struct alt_instr *end) | 204 | void __init_or_module apply_alternatives(struct alt_instr *start, |
205 | struct alt_instr *end) | ||
200 | { | 206 | { |
201 | struct alt_instr *a; | 207 | struct alt_instr *a; |
202 | char insnbuf[MAX_PATCH_LEN]; | 208 | char insnbuf[MAX_PATCH_LEN]; |
@@ -279,9 +285,10 @@ static LIST_HEAD(smp_alt_modules); | |||
279 | static DEFINE_MUTEX(smp_alt); | 285 | static DEFINE_MUTEX(smp_alt); |
280 | static int smp_mode = 1; /* protected by smp_alt */ | 286 | static int smp_mode = 1; /* protected by smp_alt */ |
281 | 287 | ||
282 | void alternatives_smp_module_add(struct module *mod, char *name, | 288 | void __init_or_module alternatives_smp_module_add(struct module *mod, |
283 | void *locks, void *locks_end, | 289 | char *name, |
284 | void *text, void *text_end) | 290 | void *locks, void *locks_end, |
291 | void *text, void *text_end) | ||
285 | { | 292 | { |
286 | struct smp_alt_module *smp; | 293 | struct smp_alt_module *smp; |
287 | 294 | ||
@@ -317,7 +324,7 @@ void alternatives_smp_module_add(struct module *mod, char *name, | |||
317 | mutex_unlock(&smp_alt); | 324 | mutex_unlock(&smp_alt); |
318 | } | 325 | } |
319 | 326 | ||
320 | void alternatives_smp_module_del(struct module *mod) | 327 | void __init_or_module alternatives_smp_module_del(struct module *mod) |
321 | { | 328 | { |
322 | struct smp_alt_module *item; | 329 | struct smp_alt_module *item; |
323 | 330 | ||
@@ -386,8 +393,8 @@ void alternatives_smp_switch(int smp) | |||
386 | #endif | 393 | #endif |
387 | 394 | ||
388 | #ifdef CONFIG_PARAVIRT | 395 | #ifdef CONFIG_PARAVIRT |
389 | void apply_paravirt(struct paravirt_patch_site *start, | 396 | void __init_or_module apply_paravirt(struct paravirt_patch_site *start, |
390 | struct paravirt_patch_site *end) | 397 | struct paravirt_patch_site *end) |
391 | { | 398 | { |
392 | struct paravirt_patch_site *p; | 399 | struct paravirt_patch_site *p; |
393 | char insnbuf[MAX_PATCH_LEN]; | 400 | char insnbuf[MAX_PATCH_LEN]; |
@@ -485,13 +492,14 @@ void __init alternative_instructions(void) | |||
485 | * instructions. And on the local CPU you need to be protected again NMI or MCE | 492 | * instructions. And on the local CPU you need to be protected again NMI or MCE |
486 | * handlers seeing an inconsistent instruction while you patch. | 493 | * handlers seeing an inconsistent instruction while you patch. |
487 | */ | 494 | */ |
488 | void *text_poke_early(void *addr, const void *opcode, size_t len) | 495 | static void *__init_or_module text_poke_early(void *addr, const void *opcode, |
496 | size_t len) | ||
489 | { | 497 | { |
490 | unsigned long flags; | 498 | unsigned long flags; |
491 | local_irq_save(flags); | 499 | local_irq_save(flags); |
492 | memcpy(addr, opcode, len); | 500 | memcpy(addr, opcode, len); |
493 | local_irq_restore(flags); | ||
494 | sync_core(); | 501 | sync_core(); |
502 | local_irq_restore(flags); | ||
495 | /* Could also do a CLFLUSH here to speed up CPU recovery; but | 503 | /* Could also do a CLFLUSH here to speed up CPU recovery; but |
496 | that causes hangs on some VIA CPUs. */ | 504 | that causes hangs on some VIA CPUs. */ |
497 | return addr; | 505 | return addr; |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 6c99f5037801..98f230f6a28d 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -41,9 +41,13 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock); | |||
41 | static LIST_HEAD(iommu_pd_list); | 41 | static LIST_HEAD(iommu_pd_list); |
42 | static DEFINE_SPINLOCK(iommu_pd_list_lock); | 42 | static DEFINE_SPINLOCK(iommu_pd_list_lock); |
43 | 43 | ||
44 | #ifdef CONFIG_IOMMU_API | 44 | /* |
45 | * Domain for untranslated devices - only allocated | ||
46 | * if iommu=pt passed on kernel cmd line. | ||
47 | */ | ||
48 | static struct protection_domain *pt_domain; | ||
49 | |||
45 | static struct iommu_ops amd_iommu_ops; | 50 | static struct iommu_ops amd_iommu_ops; |
46 | #endif | ||
47 | 51 | ||
48 | /* | 52 | /* |
49 | * general struct to manage commands send to an IOMMU | 53 | * general struct to manage commands send to an IOMMU |
@@ -55,16 +59,16 @@ struct iommu_cmd { | |||
55 | static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | 59 | static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, |
56 | struct unity_map_entry *e); | 60 | struct unity_map_entry *e); |
57 | static struct dma_ops_domain *find_protection_domain(u16 devid); | 61 | static struct dma_ops_domain *find_protection_domain(u16 devid); |
58 | static u64* alloc_pte(struct protection_domain *dom, | 62 | static u64 *alloc_pte(struct protection_domain *domain, |
59 | unsigned long address, u64 | 63 | unsigned long address, int end_lvl, |
60 | **pte_page, gfp_t gfp); | 64 | u64 **pte_page, gfp_t gfp); |
61 | static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, | 65 | static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, |
62 | unsigned long start_page, | 66 | unsigned long start_page, |
63 | unsigned int pages); | 67 | unsigned int pages); |
64 | 68 | static void reset_iommu_command_buffer(struct amd_iommu *iommu); | |
65 | #ifndef BUS_NOTIFY_UNBOUND_DRIVER | 69 | static u64 *fetch_pte(struct protection_domain *domain, |
66 | #define BUS_NOTIFY_UNBOUND_DRIVER 0x0005 | 70 | unsigned long address, int map_size); |
67 | #endif | 71 | static void update_domain(struct protection_domain *domain); |
68 | 72 | ||
69 | #ifdef CONFIG_AMD_IOMMU_STATS | 73 | #ifdef CONFIG_AMD_IOMMU_STATS |
70 | 74 | ||
@@ -138,7 +142,25 @@ static int iommu_has_npcache(struct amd_iommu *iommu) | |||
138 | * | 142 | * |
139 | ****************************************************************************/ | 143 | ****************************************************************************/ |
140 | 144 | ||
141 | static void iommu_print_event(void *__evt) | 145 | static void dump_dte_entry(u16 devid) |
146 | { | ||
147 | int i; | ||
148 | |||
149 | for (i = 0; i < 8; ++i) | ||
150 | pr_err("AMD-Vi: DTE[%d]: %08x\n", i, | ||
151 | amd_iommu_dev_table[devid].data[i]); | ||
152 | } | ||
153 | |||
154 | static void dump_command(unsigned long phys_addr) | ||
155 | { | ||
156 | struct iommu_cmd *cmd = phys_to_virt(phys_addr); | ||
157 | int i; | ||
158 | |||
159 | for (i = 0; i < 4; ++i) | ||
160 | pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]); | ||
161 | } | ||
162 | |||
163 | static void iommu_print_event(struct amd_iommu *iommu, void *__evt) | ||
142 | { | 164 | { |
143 | u32 *event = __evt; | 165 | u32 *event = __evt; |
144 | int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; | 166 | int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; |
@@ -147,7 +169,7 @@ static void iommu_print_event(void *__evt) | |||
147 | int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; | 169 | int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; |
148 | u64 address = (u64)(((u64)event[3]) << 32) | event[2]; | 170 | u64 address = (u64)(((u64)event[3]) << 32) | event[2]; |
149 | 171 | ||
150 | printk(KERN_ERR "AMD IOMMU: Event logged ["); | 172 | printk(KERN_ERR "AMD-Vi: Event logged ["); |
151 | 173 | ||
152 | switch (type) { | 174 | switch (type) { |
153 | case EVENT_TYPE_ILL_DEV: | 175 | case EVENT_TYPE_ILL_DEV: |
@@ -155,6 +177,7 @@ static void iommu_print_event(void *__evt) | |||
155 | "address=0x%016llx flags=0x%04x]\n", | 177 | "address=0x%016llx flags=0x%04x]\n", |
156 | PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), | 178 | PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), |
157 | address, flags); | 179 | address, flags); |
180 | dump_dte_entry(devid); | ||
158 | break; | 181 | break; |
159 | case EVENT_TYPE_IO_FAULT: | 182 | case EVENT_TYPE_IO_FAULT: |
160 | printk("IO_PAGE_FAULT device=%02x:%02x.%x " | 183 | printk("IO_PAGE_FAULT device=%02x:%02x.%x " |
@@ -176,6 +199,8 @@ static void iommu_print_event(void *__evt) | |||
176 | break; | 199 | break; |
177 | case EVENT_TYPE_ILL_CMD: | 200 | case EVENT_TYPE_ILL_CMD: |
178 | printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); | 201 | printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); |
202 | reset_iommu_command_buffer(iommu); | ||
203 | dump_command(address); | ||
179 | break; | 204 | break; |
180 | case EVENT_TYPE_CMD_HARD_ERR: | 205 | case EVENT_TYPE_CMD_HARD_ERR: |
181 | printk("COMMAND_HARDWARE_ERROR address=0x%016llx " | 206 | printk("COMMAND_HARDWARE_ERROR address=0x%016llx " |
@@ -209,7 +234,7 @@ static void iommu_poll_events(struct amd_iommu *iommu) | |||
209 | tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); | 234 | tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); |
210 | 235 | ||
211 | while (head != tail) { | 236 | while (head != tail) { |
212 | iommu_print_event(iommu->evt_buf + head); | 237 | iommu_print_event(iommu, iommu->evt_buf + head); |
213 | head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size; | 238 | head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size; |
214 | } | 239 | } |
215 | 240 | ||
@@ -296,8 +321,11 @@ static void __iommu_wait_for_completion(struct amd_iommu *iommu) | |||
296 | status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; | 321 | status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; |
297 | writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); | 322 | writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); |
298 | 323 | ||
299 | if (unlikely(i == EXIT_LOOP_COUNT)) | 324 | if (unlikely(i == EXIT_LOOP_COUNT)) { |
300 | panic("AMD IOMMU: Completion wait loop failed\n"); | 325 | spin_unlock(&iommu->lock); |
326 | reset_iommu_command_buffer(iommu); | ||
327 | spin_lock(&iommu->lock); | ||
328 | } | ||
301 | } | 329 | } |
302 | 330 | ||
303 | /* | 331 | /* |
@@ -445,47 +473,78 @@ static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid) | |||
445 | } | 473 | } |
446 | 474 | ||
447 | /* | 475 | /* |
476 | * This function flushes one domain on one IOMMU | ||
477 | */ | ||
478 | static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid) | ||
479 | { | ||
480 | struct iommu_cmd cmd; | ||
481 | unsigned long flags; | ||
482 | |||
483 | __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, | ||
484 | domid, 1, 1); | ||
485 | |||
486 | spin_lock_irqsave(&iommu->lock, flags); | ||
487 | __iommu_queue_command(iommu, &cmd); | ||
488 | __iommu_completion_wait(iommu); | ||
489 | __iommu_wait_for_completion(iommu); | ||
490 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
491 | } | ||
492 | |||
493 | static void flush_all_domains_on_iommu(struct amd_iommu *iommu) | ||
494 | { | ||
495 | int i; | ||
496 | |||
497 | for (i = 1; i < MAX_DOMAIN_ID; ++i) { | ||
498 | if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) | ||
499 | continue; | ||
500 | flush_domain_on_iommu(iommu, i); | ||
501 | } | ||
502 | |||
503 | } | ||
504 | |||
505 | /* | ||
448 | * This function is used to flush the IO/TLB for a given protection domain | 506 | * This function is used to flush the IO/TLB for a given protection domain |
449 | * on every IOMMU in the system | 507 | * on every IOMMU in the system |
450 | */ | 508 | */ |
451 | static void iommu_flush_domain(u16 domid) | 509 | static void iommu_flush_domain(u16 domid) |
452 | { | 510 | { |
453 | unsigned long flags; | ||
454 | struct amd_iommu *iommu; | 511 | struct amd_iommu *iommu; |
455 | struct iommu_cmd cmd; | ||
456 | 512 | ||
457 | INC_STATS_COUNTER(domain_flush_all); | 513 | INC_STATS_COUNTER(domain_flush_all); |
458 | 514 | ||
459 | __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, | 515 | for_each_iommu(iommu) |
460 | domid, 1, 1); | 516 | flush_domain_on_iommu(iommu, domid); |
461 | |||
462 | for_each_iommu(iommu) { | ||
463 | spin_lock_irqsave(&iommu->lock, flags); | ||
464 | __iommu_queue_command(iommu, &cmd); | ||
465 | __iommu_completion_wait(iommu); | ||
466 | __iommu_wait_for_completion(iommu); | ||
467 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
468 | } | ||
469 | } | 517 | } |
470 | 518 | ||
471 | void amd_iommu_flush_all_domains(void) | 519 | void amd_iommu_flush_all_domains(void) |
472 | { | 520 | { |
521 | struct amd_iommu *iommu; | ||
522 | |||
523 | for_each_iommu(iommu) | ||
524 | flush_all_domains_on_iommu(iommu); | ||
525 | } | ||
526 | |||
527 | static void flush_all_devices_for_iommu(struct amd_iommu *iommu) | ||
528 | { | ||
473 | int i; | 529 | int i; |
474 | 530 | ||
475 | for (i = 1; i < MAX_DOMAIN_ID; ++i) { | 531 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { |
476 | if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) | 532 | if (iommu != amd_iommu_rlookup_table[i]) |
477 | continue; | 533 | continue; |
478 | iommu_flush_domain(i); | 534 | |
535 | iommu_queue_inv_dev_entry(iommu, i); | ||
536 | iommu_completion_wait(iommu); | ||
479 | } | 537 | } |
480 | } | 538 | } |
481 | 539 | ||
482 | void amd_iommu_flush_all_devices(void) | 540 | static void flush_devices_by_domain(struct protection_domain *domain) |
483 | { | 541 | { |
484 | struct amd_iommu *iommu; | 542 | struct amd_iommu *iommu; |
485 | int i; | 543 | int i; |
486 | 544 | ||
487 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { | 545 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { |
488 | if (amd_iommu_pd_table[i] == NULL) | 546 | if ((domain == NULL && amd_iommu_pd_table[i] == NULL) || |
547 | (amd_iommu_pd_table[i] != domain)) | ||
489 | continue; | 548 | continue; |
490 | 549 | ||
491 | iommu = amd_iommu_rlookup_table[i]; | 550 | iommu = amd_iommu_rlookup_table[i]; |
@@ -497,6 +556,27 @@ void amd_iommu_flush_all_devices(void) | |||
497 | } | 556 | } |
498 | } | 557 | } |
499 | 558 | ||
559 | static void reset_iommu_command_buffer(struct amd_iommu *iommu) | ||
560 | { | ||
561 | pr_err("AMD-Vi: Resetting IOMMU command buffer\n"); | ||
562 | |||
563 | if (iommu->reset_in_progress) | ||
564 | panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n"); | ||
565 | |||
566 | iommu->reset_in_progress = true; | ||
567 | |||
568 | amd_iommu_reset_cmd_buffer(iommu); | ||
569 | flush_all_devices_for_iommu(iommu); | ||
570 | flush_all_domains_on_iommu(iommu); | ||
571 | |||
572 | iommu->reset_in_progress = false; | ||
573 | } | ||
574 | |||
575 | void amd_iommu_flush_all_devices(void) | ||
576 | { | ||
577 | flush_devices_by_domain(NULL); | ||
578 | } | ||
579 | |||
500 | /**************************************************************************** | 580 | /**************************************************************************** |
501 | * | 581 | * |
502 | * The functions below are used the create the page table mappings for | 582 | * The functions below are used the create the page table mappings for |
@@ -514,18 +594,21 @@ void amd_iommu_flush_all_devices(void) | |||
514 | static int iommu_map_page(struct protection_domain *dom, | 594 | static int iommu_map_page(struct protection_domain *dom, |
515 | unsigned long bus_addr, | 595 | unsigned long bus_addr, |
516 | unsigned long phys_addr, | 596 | unsigned long phys_addr, |
517 | int prot) | 597 | int prot, |
598 | int map_size) | ||
518 | { | 599 | { |
519 | u64 __pte, *pte; | 600 | u64 __pte, *pte; |
520 | 601 | ||
521 | bus_addr = PAGE_ALIGN(bus_addr); | 602 | bus_addr = PAGE_ALIGN(bus_addr); |
522 | phys_addr = PAGE_ALIGN(phys_addr); | 603 | phys_addr = PAGE_ALIGN(phys_addr); |
523 | 604 | ||
524 | /* only support 512GB address spaces for now */ | 605 | BUG_ON(!PM_ALIGNED(map_size, bus_addr)); |
525 | if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) | 606 | BUG_ON(!PM_ALIGNED(map_size, phys_addr)); |
607 | |||
608 | if (!(prot & IOMMU_PROT_MASK)) | ||
526 | return -EINVAL; | 609 | return -EINVAL; |
527 | 610 | ||
528 | pte = alloc_pte(dom, bus_addr, NULL, GFP_KERNEL); | 611 | pte = alloc_pte(dom, bus_addr, map_size, NULL, GFP_KERNEL); |
529 | 612 | ||
530 | if (IOMMU_PTE_PRESENT(*pte)) | 613 | if (IOMMU_PTE_PRESENT(*pte)) |
531 | return -EBUSY; | 614 | return -EBUSY; |
@@ -538,29 +621,18 @@ static int iommu_map_page(struct protection_domain *dom, | |||
538 | 621 | ||
539 | *pte = __pte; | 622 | *pte = __pte; |
540 | 623 | ||
624 | update_domain(dom); | ||
625 | |||
541 | return 0; | 626 | return 0; |
542 | } | 627 | } |
543 | 628 | ||
544 | static void iommu_unmap_page(struct protection_domain *dom, | 629 | static void iommu_unmap_page(struct protection_domain *dom, |
545 | unsigned long bus_addr) | 630 | unsigned long bus_addr, int map_size) |
546 | { | 631 | { |
547 | u64 *pte; | 632 | u64 *pte = fetch_pte(dom, bus_addr, map_size); |
548 | |||
549 | pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)]; | ||
550 | |||
551 | if (!IOMMU_PTE_PRESENT(*pte)) | ||
552 | return; | ||
553 | |||
554 | pte = IOMMU_PTE_PAGE(*pte); | ||
555 | pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)]; | ||
556 | 633 | ||
557 | if (!IOMMU_PTE_PRESENT(*pte)) | 634 | if (pte) |
558 | return; | 635 | *pte = 0; |
559 | |||
560 | pte = IOMMU_PTE_PAGE(*pte); | ||
561 | pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)]; | ||
562 | |||
563 | *pte = 0; | ||
564 | } | 636 | } |
565 | 637 | ||
566 | /* | 638 | /* |
@@ -615,7 +687,8 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | |||
615 | 687 | ||
616 | for (addr = e->address_start; addr < e->address_end; | 688 | for (addr = e->address_start; addr < e->address_end; |
617 | addr += PAGE_SIZE) { | 689 | addr += PAGE_SIZE) { |
618 | ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot); | 690 | ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, |
691 | PM_MAP_4k); | ||
619 | if (ret) | 692 | if (ret) |
620 | return ret; | 693 | return ret; |
621 | /* | 694 | /* |
@@ -670,24 +743,29 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, | |||
670 | * This function checks if there is a PTE for a given dma address. If | 743 | * This function checks if there is a PTE for a given dma address. If |
671 | * there is one, it returns the pointer to it. | 744 | * there is one, it returns the pointer to it. |
672 | */ | 745 | */ |
673 | static u64* fetch_pte(struct protection_domain *domain, | 746 | static u64 *fetch_pte(struct protection_domain *domain, |
674 | unsigned long address) | 747 | unsigned long address, int map_size) |
675 | { | 748 | { |
749 | int level; | ||
676 | u64 *pte; | 750 | u64 *pte; |
677 | 751 | ||
678 | pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(address)]; | 752 | level = domain->mode - 1; |
753 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; | ||
679 | 754 | ||
680 | if (!IOMMU_PTE_PRESENT(*pte)) | 755 | while (level > map_size) { |
681 | return NULL; | 756 | if (!IOMMU_PTE_PRESENT(*pte)) |
757 | return NULL; | ||
682 | 758 | ||
683 | pte = IOMMU_PTE_PAGE(*pte); | 759 | level -= 1; |
684 | pte = &pte[IOMMU_PTE_L1_INDEX(address)]; | ||
685 | 760 | ||
686 | if (!IOMMU_PTE_PRESENT(*pte)) | 761 | pte = IOMMU_PTE_PAGE(*pte); |
687 | return NULL; | 762 | pte = &pte[PM_LEVEL_INDEX(level, address)]; |
688 | 763 | ||
689 | pte = IOMMU_PTE_PAGE(*pte); | 764 | if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) { |
690 | pte = &pte[IOMMU_PTE_L0_INDEX(address)]; | 765 | pte = NULL; |
766 | break; | ||
767 | } | ||
768 | } | ||
691 | 769 | ||
692 | return pte; | 770 | return pte; |
693 | } | 771 | } |
@@ -727,7 +805,7 @@ static int alloc_new_range(struct amd_iommu *iommu, | |||
727 | u64 *pte, *pte_page; | 805 | u64 *pte, *pte_page; |
728 | 806 | ||
729 | for (i = 0; i < num_ptes; ++i) { | 807 | for (i = 0; i < num_ptes; ++i) { |
730 | pte = alloc_pte(&dma_dom->domain, address, | 808 | pte = alloc_pte(&dma_dom->domain, address, PM_MAP_4k, |
731 | &pte_page, gfp); | 809 | &pte_page, gfp); |
732 | if (!pte) | 810 | if (!pte) |
733 | goto out_free; | 811 | goto out_free; |
@@ -760,16 +838,20 @@ static int alloc_new_range(struct amd_iommu *iommu, | |||
760 | for (i = dma_dom->aperture[index]->offset; | 838 | for (i = dma_dom->aperture[index]->offset; |
761 | i < dma_dom->aperture_size; | 839 | i < dma_dom->aperture_size; |
762 | i += PAGE_SIZE) { | 840 | i += PAGE_SIZE) { |
763 | u64 *pte = fetch_pte(&dma_dom->domain, i); | 841 | u64 *pte = fetch_pte(&dma_dom->domain, i, PM_MAP_4k); |
764 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) | 842 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
765 | continue; | 843 | continue; |
766 | 844 | ||
767 | dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1); | 845 | dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1); |
768 | } | 846 | } |
769 | 847 | ||
848 | update_domain(&dma_dom->domain); | ||
849 | |||
770 | return 0; | 850 | return 0; |
771 | 851 | ||
772 | out_free: | 852 | out_free: |
853 | update_domain(&dma_dom->domain); | ||
854 | |||
773 | free_page((unsigned long)dma_dom->aperture[index]->bitmap); | 855 | free_page((unsigned long)dma_dom->aperture[index]->bitmap); |
774 | 856 | ||
775 | kfree(dma_dom->aperture[index]); | 857 | kfree(dma_dom->aperture[index]); |
@@ -1009,7 +1091,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu) | |||
1009 | dma_dom->domain.id = domain_id_alloc(); | 1091 | dma_dom->domain.id = domain_id_alloc(); |
1010 | if (dma_dom->domain.id == 0) | 1092 | if (dma_dom->domain.id == 0) |
1011 | goto free_dma_dom; | 1093 | goto free_dma_dom; |
1012 | dma_dom->domain.mode = PAGE_MODE_3_LEVEL; | 1094 | dma_dom->domain.mode = PAGE_MODE_2_LEVEL; |
1013 | dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); | 1095 | dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); |
1014 | dma_dom->domain.flags = PD_DMA_OPS_MASK; | 1096 | dma_dom->domain.flags = PD_DMA_OPS_MASK; |
1015 | dma_dom->domain.priv = dma_dom; | 1097 | dma_dom->domain.priv = dma_dom; |
@@ -1063,6 +1145,41 @@ static struct protection_domain *domain_for_device(u16 devid) | |||
1063 | return dom; | 1145 | return dom; |
1064 | } | 1146 | } |
1065 | 1147 | ||
1148 | static void set_dte_entry(u16 devid, struct protection_domain *domain) | ||
1149 | { | ||
1150 | u64 pte_root = virt_to_phys(domain->pt_root); | ||
1151 | |||
1152 | pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) | ||
1153 | << DEV_ENTRY_MODE_SHIFT; | ||
1154 | pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; | ||
1155 | |||
1156 | amd_iommu_dev_table[devid].data[2] = domain->id; | ||
1157 | amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); | ||
1158 | amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); | ||
1159 | |||
1160 | amd_iommu_pd_table[devid] = domain; | ||
1161 | } | ||
1162 | |||
1163 | /* | ||
1164 | * If a device is not yet associated with a domain, this function does | ||
1165 | * assigns it visible for the hardware | ||
1166 | */ | ||
1167 | static void __attach_device(struct amd_iommu *iommu, | ||
1168 | struct protection_domain *domain, | ||
1169 | u16 devid) | ||
1170 | { | ||
1171 | /* lock domain */ | ||
1172 | spin_lock(&domain->lock); | ||
1173 | |||
1174 | /* update DTE entry */ | ||
1175 | set_dte_entry(devid, domain); | ||
1176 | |||
1177 | domain->dev_cnt += 1; | ||
1178 | |||
1179 | /* ready */ | ||
1180 | spin_unlock(&domain->lock); | ||
1181 | } | ||
1182 | |||
1066 | /* | 1183 | /* |
1067 | * If a device is not yet associated with a domain, this function does | 1184 | * If a device is not yet associated with a domain, this function does |
1068 | * assigns it visible for the hardware | 1185 | * assigns it visible for the hardware |
@@ -1072,27 +1189,16 @@ static void attach_device(struct amd_iommu *iommu, | |||
1072 | u16 devid) | 1189 | u16 devid) |
1073 | { | 1190 | { |
1074 | unsigned long flags; | 1191 | unsigned long flags; |
1075 | u64 pte_root = virt_to_phys(domain->pt_root); | ||
1076 | |||
1077 | domain->dev_cnt += 1; | ||
1078 | |||
1079 | pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) | ||
1080 | << DEV_ENTRY_MODE_SHIFT; | ||
1081 | pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; | ||
1082 | 1192 | ||
1083 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | 1193 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); |
1084 | amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); | 1194 | __attach_device(iommu, domain, devid); |
1085 | amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); | ||
1086 | amd_iommu_dev_table[devid].data[2] = domain->id; | ||
1087 | |||
1088 | amd_iommu_pd_table[devid] = domain; | ||
1089 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 1195 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
1090 | 1196 | ||
1091 | /* | 1197 | /* |
1092 | * We might boot into a crash-kernel here. The crashed kernel | 1198 | * We might boot into a crash-kernel here. The crashed kernel |
1093 | * left the caches in the IOMMU dirty. So we have to flush | 1199 | * left the caches in the IOMMU dirty. So we have to flush |
1094 | * here to evict all dirty stuff. | 1200 | * here to evict all dirty stuff. |
1095 | */ | 1201 | */ |
1096 | iommu_queue_inv_dev_entry(iommu, devid); | 1202 | iommu_queue_inv_dev_entry(iommu, devid); |
1097 | iommu_flush_tlb_pde(iommu, domain->id); | 1203 | iommu_flush_tlb_pde(iommu, domain->id); |
1098 | } | 1204 | } |
@@ -1119,6 +1225,15 @@ static void __detach_device(struct protection_domain *domain, u16 devid) | |||
1119 | 1225 | ||
1120 | /* ready */ | 1226 | /* ready */ |
1121 | spin_unlock(&domain->lock); | 1227 | spin_unlock(&domain->lock); |
1228 | |||
1229 | /* | ||
1230 | * If we run in passthrough mode the device must be assigned to the | ||
1231 | * passthrough domain if it is detached from any other domain | ||
1232 | */ | ||
1233 | if (iommu_pass_through) { | ||
1234 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; | ||
1235 | __attach_device(iommu, pt_domain, devid); | ||
1236 | } | ||
1122 | } | 1237 | } |
1123 | 1238 | ||
1124 | /* | 1239 | /* |
@@ -1164,6 +1279,8 @@ static int device_change_notifier(struct notifier_block *nb, | |||
1164 | case BUS_NOTIFY_UNBOUND_DRIVER: | 1279 | case BUS_NOTIFY_UNBOUND_DRIVER: |
1165 | if (!domain) | 1280 | if (!domain) |
1166 | goto out; | 1281 | goto out; |
1282 | if (iommu_pass_through) | ||
1283 | break; | ||
1167 | detach_device(domain, devid); | 1284 | detach_device(domain, devid); |
1168 | break; | 1285 | break; |
1169 | case BUS_NOTIFY_ADD_DEVICE: | 1286 | case BUS_NOTIFY_ADD_DEVICE: |
@@ -1292,39 +1409,91 @@ static int get_device_resources(struct device *dev, | |||
1292 | return 1; | 1409 | return 1; |
1293 | } | 1410 | } |
1294 | 1411 | ||
1412 | static void update_device_table(struct protection_domain *domain) | ||
1413 | { | ||
1414 | unsigned long flags; | ||
1415 | int i; | ||
1416 | |||
1417 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { | ||
1418 | if (amd_iommu_pd_table[i] != domain) | ||
1419 | continue; | ||
1420 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | ||
1421 | set_dte_entry(i, domain); | ||
1422 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | ||
1423 | } | ||
1424 | } | ||
1425 | |||
1426 | static void update_domain(struct protection_domain *domain) | ||
1427 | { | ||
1428 | if (!domain->updated) | ||
1429 | return; | ||
1430 | |||
1431 | update_device_table(domain); | ||
1432 | flush_devices_by_domain(domain); | ||
1433 | iommu_flush_domain(domain->id); | ||
1434 | |||
1435 | domain->updated = false; | ||
1436 | } | ||
1437 | |||
1295 | /* | 1438 | /* |
1296 | * If the pte_page is not yet allocated this function is called | 1439 | * This function is used to add another level to an IO page table. Adding |
1440 | * another level increases the size of the address space by 9 bits to a size up | ||
1441 | * to 64 bits. | ||
1297 | */ | 1442 | */ |
1298 | static u64* alloc_pte(struct protection_domain *dom, | 1443 | static bool increase_address_space(struct protection_domain *domain, |
1299 | unsigned long address, u64 **pte_page, gfp_t gfp) | 1444 | gfp_t gfp) |
1445 | { | ||
1446 | u64 *pte; | ||
1447 | |||
1448 | if (domain->mode == PAGE_MODE_6_LEVEL) | ||
1449 | /* address space already 64 bit large */ | ||
1450 | return false; | ||
1451 | |||
1452 | pte = (void *)get_zeroed_page(gfp); | ||
1453 | if (!pte) | ||
1454 | return false; | ||
1455 | |||
1456 | *pte = PM_LEVEL_PDE(domain->mode, | ||
1457 | virt_to_phys(domain->pt_root)); | ||
1458 | domain->pt_root = pte; | ||
1459 | domain->mode += 1; | ||
1460 | domain->updated = true; | ||
1461 | |||
1462 | return true; | ||
1463 | } | ||
1464 | |||
1465 | static u64 *alloc_pte(struct protection_domain *domain, | ||
1466 | unsigned long address, | ||
1467 | int end_lvl, | ||
1468 | u64 **pte_page, | ||
1469 | gfp_t gfp) | ||
1300 | { | 1470 | { |
1301 | u64 *pte, *page; | 1471 | u64 *pte, *page; |
1472 | int level; | ||
1302 | 1473 | ||
1303 | pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(address)]; | 1474 | while (address > PM_LEVEL_SIZE(domain->mode)) |
1475 | increase_address_space(domain, gfp); | ||
1304 | 1476 | ||
1305 | if (!IOMMU_PTE_PRESENT(*pte)) { | 1477 | level = domain->mode - 1; |
1306 | page = (u64 *)get_zeroed_page(gfp); | 1478 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; |
1307 | if (!page) | ||
1308 | return NULL; | ||
1309 | *pte = IOMMU_L2_PDE(virt_to_phys(page)); | ||
1310 | } | ||
1311 | 1479 | ||
1312 | pte = IOMMU_PTE_PAGE(*pte); | 1480 | while (level > end_lvl) { |
1313 | pte = &pte[IOMMU_PTE_L1_INDEX(address)]; | 1481 | if (!IOMMU_PTE_PRESENT(*pte)) { |
1482 | page = (u64 *)get_zeroed_page(gfp); | ||
1483 | if (!page) | ||
1484 | return NULL; | ||
1485 | *pte = PM_LEVEL_PDE(level, virt_to_phys(page)); | ||
1486 | } | ||
1314 | 1487 | ||
1315 | if (!IOMMU_PTE_PRESENT(*pte)) { | 1488 | level -= 1; |
1316 | page = (u64 *)get_zeroed_page(gfp); | ||
1317 | if (!page) | ||
1318 | return NULL; | ||
1319 | *pte = IOMMU_L1_PDE(virt_to_phys(page)); | ||
1320 | } | ||
1321 | 1489 | ||
1322 | pte = IOMMU_PTE_PAGE(*pte); | 1490 | pte = IOMMU_PTE_PAGE(*pte); |
1323 | 1491 | ||
1324 | if (pte_page) | 1492 | if (pte_page && level == end_lvl) |
1325 | *pte_page = pte; | 1493 | *pte_page = pte; |
1326 | 1494 | ||
1327 | pte = &pte[IOMMU_PTE_L0_INDEX(address)]; | 1495 | pte = &pte[PM_LEVEL_INDEX(level, address)]; |
1496 | } | ||
1328 | 1497 | ||
1329 | return pte; | 1498 | return pte; |
1330 | } | 1499 | } |
@@ -1344,10 +1513,13 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom, | |||
1344 | 1513 | ||
1345 | pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; | 1514 | pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; |
1346 | if (!pte) { | 1515 | if (!pte) { |
1347 | pte = alloc_pte(&dom->domain, address, &pte_page, GFP_ATOMIC); | 1516 | pte = alloc_pte(&dom->domain, address, PM_MAP_4k, &pte_page, |
1517 | GFP_ATOMIC); | ||
1348 | aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; | 1518 | aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; |
1349 | } else | 1519 | } else |
1350 | pte += IOMMU_PTE_L0_INDEX(address); | 1520 | pte += PM_LEVEL_INDEX(0, address); |
1521 | |||
1522 | update_domain(&dom->domain); | ||
1351 | 1523 | ||
1352 | return pte; | 1524 | return pte; |
1353 | } | 1525 | } |
@@ -1409,7 +1581,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu, | |||
1409 | if (!pte) | 1581 | if (!pte) |
1410 | return; | 1582 | return; |
1411 | 1583 | ||
1412 | pte += IOMMU_PTE_L0_INDEX(address); | 1584 | pte += PM_LEVEL_INDEX(0, address); |
1413 | 1585 | ||
1414 | WARN_ON(!*pte); | 1586 | WARN_ON(!*pte); |
1415 | 1587 | ||
@@ -1988,19 +2160,47 @@ static void cleanup_domain(struct protection_domain *domain) | |||
1988 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 2160 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
1989 | } | 2161 | } |
1990 | 2162 | ||
1991 | static int amd_iommu_domain_init(struct iommu_domain *dom) | 2163 | static void protection_domain_free(struct protection_domain *domain) |
2164 | { | ||
2165 | if (!domain) | ||
2166 | return; | ||
2167 | |||
2168 | if (domain->id) | ||
2169 | domain_id_free(domain->id); | ||
2170 | |||
2171 | kfree(domain); | ||
2172 | } | ||
2173 | |||
2174 | static struct protection_domain *protection_domain_alloc(void) | ||
1992 | { | 2175 | { |
1993 | struct protection_domain *domain; | 2176 | struct protection_domain *domain; |
1994 | 2177 | ||
1995 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); | 2178 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); |
1996 | if (!domain) | 2179 | if (!domain) |
1997 | return -ENOMEM; | 2180 | return NULL; |
1998 | 2181 | ||
1999 | spin_lock_init(&domain->lock); | 2182 | spin_lock_init(&domain->lock); |
2000 | domain->mode = PAGE_MODE_3_LEVEL; | ||
2001 | domain->id = domain_id_alloc(); | 2183 | domain->id = domain_id_alloc(); |
2002 | if (!domain->id) | 2184 | if (!domain->id) |
2185 | goto out_err; | ||
2186 | |||
2187 | return domain; | ||
2188 | |||
2189 | out_err: | ||
2190 | kfree(domain); | ||
2191 | |||
2192 | return NULL; | ||
2193 | } | ||
2194 | |||
2195 | static int amd_iommu_domain_init(struct iommu_domain *dom) | ||
2196 | { | ||
2197 | struct protection_domain *domain; | ||
2198 | |||
2199 | domain = protection_domain_alloc(); | ||
2200 | if (!domain) | ||
2003 | goto out_free; | 2201 | goto out_free; |
2202 | |||
2203 | domain->mode = PAGE_MODE_3_LEVEL; | ||
2004 | domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); | 2204 | domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); |
2005 | if (!domain->pt_root) | 2205 | if (!domain->pt_root) |
2006 | goto out_free; | 2206 | goto out_free; |
@@ -2010,7 +2210,7 @@ static int amd_iommu_domain_init(struct iommu_domain *dom) | |||
2010 | return 0; | 2210 | return 0; |
2011 | 2211 | ||
2012 | out_free: | 2212 | out_free: |
2013 | kfree(domain); | 2213 | protection_domain_free(domain); |
2014 | 2214 | ||
2015 | return -ENOMEM; | 2215 | return -ENOMEM; |
2016 | } | 2216 | } |
@@ -2115,7 +2315,7 @@ static int amd_iommu_map_range(struct iommu_domain *dom, | |||
2115 | paddr &= PAGE_MASK; | 2315 | paddr &= PAGE_MASK; |
2116 | 2316 | ||
2117 | for (i = 0; i < npages; ++i) { | 2317 | for (i = 0; i < npages; ++i) { |
2118 | ret = iommu_map_page(domain, iova, paddr, prot); | 2318 | ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); |
2119 | if (ret) | 2319 | if (ret) |
2120 | return ret; | 2320 | return ret; |
2121 | 2321 | ||
@@ -2136,7 +2336,7 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom, | |||
2136 | iova &= PAGE_MASK; | 2336 | iova &= PAGE_MASK; |
2137 | 2337 | ||
2138 | for (i = 0; i < npages; ++i) { | 2338 | for (i = 0; i < npages; ++i) { |
2139 | iommu_unmap_page(domain, iova); | 2339 | iommu_unmap_page(domain, iova, PM_MAP_4k); |
2140 | iova += PAGE_SIZE; | 2340 | iova += PAGE_SIZE; |
2141 | } | 2341 | } |
2142 | 2342 | ||
@@ -2151,21 +2351,9 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, | |||
2151 | phys_addr_t paddr; | 2351 | phys_addr_t paddr; |
2152 | u64 *pte; | 2352 | u64 *pte; |
2153 | 2353 | ||
2154 | pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(iova)]; | 2354 | pte = fetch_pte(domain, iova, PM_MAP_4k); |
2155 | |||
2156 | if (!IOMMU_PTE_PRESENT(*pte)) | ||
2157 | return 0; | ||
2158 | |||
2159 | pte = IOMMU_PTE_PAGE(*pte); | ||
2160 | pte = &pte[IOMMU_PTE_L1_INDEX(iova)]; | ||
2161 | |||
2162 | if (!IOMMU_PTE_PRESENT(*pte)) | ||
2163 | return 0; | ||
2164 | |||
2165 | pte = IOMMU_PTE_PAGE(*pte); | ||
2166 | pte = &pte[IOMMU_PTE_L0_INDEX(iova)]; | ||
2167 | 2355 | ||
2168 | if (!IOMMU_PTE_PRESENT(*pte)) | 2356 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
2169 | return 0; | 2357 | return 0; |
2170 | 2358 | ||
2171 | paddr = *pte & IOMMU_PAGE_MASK; | 2359 | paddr = *pte & IOMMU_PAGE_MASK; |
@@ -2191,3 +2379,46 @@ static struct iommu_ops amd_iommu_ops = { | |||
2191 | .domain_has_cap = amd_iommu_domain_has_cap, | 2379 | .domain_has_cap = amd_iommu_domain_has_cap, |
2192 | }; | 2380 | }; |
2193 | 2381 | ||
2382 | /***************************************************************************** | ||
2383 | * | ||
2384 | * The next functions do a basic initialization of IOMMU for pass through | ||
2385 | * mode | ||
2386 | * | ||
2387 | * In passthrough mode the IOMMU is initialized and enabled but not used for | ||
2388 | * DMA-API translation. | ||
2389 | * | ||
2390 | *****************************************************************************/ | ||
2391 | |||
2392 | int __init amd_iommu_init_passthrough(void) | ||
2393 | { | ||
2394 | struct pci_dev *dev = NULL; | ||
2395 | u16 devid, devid2; | ||
2396 | |||
2397 | /* allocate passthroug domain */ | ||
2398 | pt_domain = protection_domain_alloc(); | ||
2399 | if (!pt_domain) | ||
2400 | return -ENOMEM; | ||
2401 | |||
2402 | pt_domain->mode |= PAGE_MODE_NONE; | ||
2403 | |||
2404 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
2405 | struct amd_iommu *iommu; | ||
2406 | |||
2407 | devid = calc_devid(dev->bus->number, dev->devfn); | ||
2408 | if (devid > amd_iommu_last_bdf) | ||
2409 | continue; | ||
2410 | |||
2411 | devid2 = amd_iommu_alias_table[devid]; | ||
2412 | |||
2413 | iommu = amd_iommu_rlookup_table[devid2]; | ||
2414 | if (!iommu) | ||
2415 | continue; | ||
2416 | |||
2417 | __attach_device(iommu, pt_domain, devid); | ||
2418 | __attach_device(iommu, pt_domain, devid2); | ||
2419 | } | ||
2420 | |||
2421 | pr_info("AMD-Vi: Initialized for Passthrough Mode\n"); | ||
2422 | |||
2423 | return 0; | ||
2424 | } | ||
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index c1b17e97252e..b4b61d462dcc 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -252,7 +252,7 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit) | |||
252 | /* Function to enable the hardware */ | 252 | /* Function to enable the hardware */ |
253 | static void iommu_enable(struct amd_iommu *iommu) | 253 | static void iommu_enable(struct amd_iommu *iommu) |
254 | { | 254 | { |
255 | printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at %s cap 0x%hx\n", | 255 | printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n", |
256 | dev_name(&iommu->dev->dev), iommu->cap_ptr); | 256 | dev_name(&iommu->dev->dev), iommu->cap_ptr); |
257 | 257 | ||
258 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); | 258 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); |
@@ -435,6 +435,20 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) | |||
435 | } | 435 | } |
436 | 436 | ||
437 | /* | 437 | /* |
438 | * This function resets the command buffer if the IOMMU stopped fetching | ||
439 | * commands from it. | ||
440 | */ | ||
441 | void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) | ||
442 | { | ||
443 | iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); | ||
444 | |||
445 | writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); | ||
446 | writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | ||
447 | |||
448 | iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); | ||
449 | } | ||
450 | |||
451 | /* | ||
438 | * This function writes the command buffer address to the hardware and | 452 | * This function writes the command buffer address to the hardware and |
439 | * enables it. | 453 | * enables it. |
440 | */ | 454 | */ |
@@ -450,11 +464,7 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu) | |||
450 | memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, | 464 | memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, |
451 | &entry, sizeof(entry)); | 465 | &entry, sizeof(entry)); |
452 | 466 | ||
453 | /* set head and tail to zero manually */ | 467 | amd_iommu_reset_cmd_buffer(iommu); |
454 | writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); | ||
455 | writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | ||
456 | |||
457 | iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); | ||
458 | } | 468 | } |
459 | 469 | ||
460 | static void __init free_command_buffer(struct amd_iommu *iommu) | 470 | static void __init free_command_buffer(struct amd_iommu *iommu) |
@@ -858,7 +868,7 @@ static int __init init_iommu_all(struct acpi_table_header *table) | |||
858 | switch (*p) { | 868 | switch (*p) { |
859 | case ACPI_IVHD_TYPE: | 869 | case ACPI_IVHD_TYPE: |
860 | 870 | ||
861 | DUMP_printk("IOMMU: device: %02x:%02x.%01x cap: %04x " | 871 | DUMP_printk("device: %02x:%02x.%01x cap: %04x " |
862 | "seg: %d flags: %01x info %04x\n", | 872 | "seg: %d flags: %01x info %04x\n", |
863 | PCI_BUS(h->devid), PCI_SLOT(h->devid), | 873 | PCI_BUS(h->devid), PCI_SLOT(h->devid), |
864 | PCI_FUNC(h->devid), h->cap_ptr, | 874 | PCI_FUNC(h->devid), h->cap_ptr, |
@@ -902,7 +912,7 @@ static int __init iommu_setup_msi(struct amd_iommu *iommu) | |||
902 | 912 | ||
903 | r = request_irq(iommu->dev->irq, amd_iommu_int_handler, | 913 | r = request_irq(iommu->dev->irq, amd_iommu_int_handler, |
904 | IRQF_SAMPLE_RANDOM, | 914 | IRQF_SAMPLE_RANDOM, |
905 | "AMD IOMMU", | 915 | "AMD-Vi", |
906 | NULL); | 916 | NULL); |
907 | 917 | ||
908 | if (r) { | 918 | if (r) { |
@@ -1150,7 +1160,7 @@ int __init amd_iommu_init(void) | |||
1150 | 1160 | ||
1151 | 1161 | ||
1152 | if (no_iommu) { | 1162 | if (no_iommu) { |
1153 | printk(KERN_INFO "AMD IOMMU disabled by kernel command line\n"); | 1163 | printk(KERN_INFO "AMD-Vi disabled by kernel command line\n"); |
1154 | return 0; | 1164 | return 0; |
1155 | } | 1165 | } |
1156 | 1166 | ||
@@ -1242,22 +1252,28 @@ int __init amd_iommu_init(void) | |||
1242 | if (ret) | 1252 | if (ret) |
1243 | goto free; | 1253 | goto free; |
1244 | 1254 | ||
1245 | ret = amd_iommu_init_dma_ops(); | 1255 | if (iommu_pass_through) |
1256 | ret = amd_iommu_init_passthrough(); | ||
1257 | else | ||
1258 | ret = amd_iommu_init_dma_ops(); | ||
1246 | if (ret) | 1259 | if (ret) |
1247 | goto free; | 1260 | goto free; |
1248 | 1261 | ||
1249 | enable_iommus(); | 1262 | enable_iommus(); |
1250 | 1263 | ||
1251 | printk(KERN_INFO "AMD IOMMU: device isolation "); | 1264 | if (iommu_pass_through) |
1265 | goto out; | ||
1266 | |||
1267 | printk(KERN_INFO "AMD-Vi: device isolation "); | ||
1252 | if (amd_iommu_isolate) | 1268 | if (amd_iommu_isolate) |
1253 | printk("enabled\n"); | 1269 | printk("enabled\n"); |
1254 | else | 1270 | else |
1255 | printk("disabled\n"); | 1271 | printk("disabled\n"); |
1256 | 1272 | ||
1257 | if (amd_iommu_unmap_flush) | 1273 | if (amd_iommu_unmap_flush) |
1258 | printk(KERN_INFO "AMD IOMMU: IO/TLB flush on unmap enabled\n"); | 1274 | printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); |
1259 | else | 1275 | else |
1260 | printk(KERN_INFO "AMD IOMMU: Lazy IO/TLB flushing enabled\n"); | 1276 | printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); |
1261 | 1277 | ||
1262 | out: | 1278 | out: |
1263 | return ret; | 1279 | return ret; |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 676debfc1702..128111d8ffe0 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
21 | #include <linux/ioport.h> | 21 | #include <linux/ioport.h> |
22 | #include <linux/suspend.h> | 22 | #include <linux/suspend.h> |
23 | #include <linux/kmemleak.h> | ||
23 | #include <asm/e820.h> | 24 | #include <asm/e820.h> |
24 | #include <asm/io.h> | 25 | #include <asm/io.h> |
25 | #include <asm/iommu.h> | 26 | #include <asm/iommu.h> |
@@ -94,6 +95,11 @@ static u32 __init allocate_aperture(void) | |||
94 | * code for safe | 95 | * code for safe |
95 | */ | 96 | */ |
96 | p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20); | 97 | p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20); |
98 | /* | ||
99 | * Kmemleak should not scan this block as it may not be mapped via the | ||
100 | * kernel direct mapping. | ||
101 | */ | ||
102 | kmemleak_ignore(p); | ||
97 | if (!p || __pa(p)+aper_size > 0xffffffff) { | 103 | if (!p || __pa(p)+aper_size > 0xffffffff) { |
98 | printk(KERN_ERR | 104 | printk(KERN_ERR |
99 | "Cannot allocate aperture memory hole (%p,%uK)\n", | 105 | "Cannot allocate aperture memory hole (%p,%uK)\n", |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 0a1c2830ec66..159740decc41 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <asm/mtrr.h> | 49 | #include <asm/mtrr.h> |
50 | #include <asm/smp.h> | 50 | #include <asm/smp.h> |
51 | #include <asm/mce.h> | 51 | #include <asm/mce.h> |
52 | #include <asm/kvm_para.h> | ||
52 | 53 | ||
53 | unsigned int num_processors; | 54 | unsigned int num_processors; |
54 | 55 | ||
@@ -1361,52 +1362,80 @@ void enable_x2apic(void) | |||
1361 | } | 1362 | } |
1362 | #endif /* CONFIG_X86_X2APIC */ | 1363 | #endif /* CONFIG_X86_X2APIC */ |
1363 | 1364 | ||
1364 | void __init enable_IR_x2apic(void) | 1365 | int __init enable_IR(void) |
1365 | { | 1366 | { |
1366 | #ifdef CONFIG_INTR_REMAP | 1367 | #ifdef CONFIG_INTR_REMAP |
1367 | int ret; | ||
1368 | unsigned long flags; | ||
1369 | struct IO_APIC_route_entry **ioapic_entries = NULL; | ||
1370 | |||
1371 | ret = dmar_table_init(); | ||
1372 | if (ret) { | ||
1373 | pr_debug("dmar_table_init() failed with %d:\n", ret); | ||
1374 | goto ir_failed; | ||
1375 | } | ||
1376 | |||
1377 | if (!intr_remapping_supported()) { | 1368 | if (!intr_remapping_supported()) { |
1378 | pr_debug("intr-remapping not supported\n"); | 1369 | pr_debug("intr-remapping not supported\n"); |
1379 | goto ir_failed; | 1370 | return 0; |
1380 | } | 1371 | } |
1381 | 1372 | ||
1382 | |||
1383 | if (!x2apic_preenabled && skip_ioapic_setup) { | 1373 | if (!x2apic_preenabled && skip_ioapic_setup) { |
1384 | pr_info("Skipped enabling intr-remap because of skipping " | 1374 | pr_info("Skipped enabling intr-remap because of skipping " |
1385 | "io-apic setup\n"); | 1375 | "io-apic setup\n"); |
1386 | return; | 1376 | return 0; |
1387 | } | 1377 | } |
1388 | 1378 | ||
1379 | if (enable_intr_remapping(x2apic_supported())) | ||
1380 | return 0; | ||
1381 | |||
1382 | pr_info("Enabled Interrupt-remapping\n"); | ||
1383 | |||
1384 | return 1; | ||
1385 | |||
1386 | #endif | ||
1387 | return 0; | ||
1388 | } | ||
1389 | |||
1390 | void __init enable_IR_x2apic(void) | ||
1391 | { | ||
1392 | unsigned long flags; | ||
1393 | struct IO_APIC_route_entry **ioapic_entries = NULL; | ||
1394 | int ret, x2apic_enabled = 0; | ||
1395 | int dmar_table_init_ret = 0; | ||
1396 | |||
1397 | #ifdef CONFIG_INTR_REMAP | ||
1398 | dmar_table_init_ret = dmar_table_init(); | ||
1399 | if (dmar_table_init_ret) | ||
1400 | pr_debug("dmar_table_init() failed with %d:\n", | ||
1401 | dmar_table_init_ret); | ||
1402 | #endif | ||
1403 | |||
1389 | ioapic_entries = alloc_ioapic_entries(); | 1404 | ioapic_entries = alloc_ioapic_entries(); |
1390 | if (!ioapic_entries) { | 1405 | if (!ioapic_entries) { |
1391 | pr_info("Allocate ioapic_entries failed: %d\n", ret); | 1406 | pr_err("Allocate ioapic_entries failed\n"); |
1392 | goto end; | 1407 | goto out; |
1393 | } | 1408 | } |
1394 | 1409 | ||
1395 | ret = save_IO_APIC_setup(ioapic_entries); | 1410 | ret = save_IO_APIC_setup(ioapic_entries); |
1396 | if (ret) { | 1411 | if (ret) { |
1397 | pr_info("Saving IO-APIC state failed: %d\n", ret); | 1412 | pr_info("Saving IO-APIC state failed: %d\n", ret); |
1398 | goto end; | 1413 | goto out; |
1399 | } | 1414 | } |
1400 | 1415 | ||
1401 | local_irq_save(flags); | 1416 | local_irq_save(flags); |
1402 | mask_IO_APIC_setup(ioapic_entries); | ||
1403 | mask_8259A(); | 1417 | mask_8259A(); |
1418 | mask_IO_APIC_setup(ioapic_entries); | ||
1404 | 1419 | ||
1405 | ret = enable_intr_remapping(x2apic_supported()); | 1420 | if (dmar_table_init_ret) |
1406 | if (ret) | 1421 | ret = 0; |
1407 | goto end_restore; | 1422 | else |
1423 | ret = enable_IR(); | ||
1408 | 1424 | ||
1409 | pr_info("Enabled Interrupt-remapping\n"); | 1425 | if (!ret) { |
1426 | /* IR is required if there is APIC ID > 255 even when running | ||
1427 | * under KVM | ||
1428 | */ | ||
1429 | if (max_physical_apicid > 255 || !kvm_para_available()) | ||
1430 | goto nox2apic; | ||
1431 | /* | ||
1432 | * without IR all CPUs can be addressed by IOAPIC/MSI | ||
1433 | * only in physical mode | ||
1434 | */ | ||
1435 | x2apic_force_phys(); | ||
1436 | } | ||
1437 | |||
1438 | x2apic_enabled = 1; | ||
1410 | 1439 | ||
1411 | if (x2apic_supported() && !x2apic_mode) { | 1440 | if (x2apic_supported() && !x2apic_mode) { |
1412 | x2apic_mode = 1; | 1441 | x2apic_mode = 1; |
@@ -1414,41 +1443,25 @@ void __init enable_IR_x2apic(void) | |||
1414 | pr_info("Enabled x2apic\n"); | 1443 | pr_info("Enabled x2apic\n"); |
1415 | } | 1444 | } |
1416 | 1445 | ||
1417 | end_restore: | 1446 | nox2apic: |
1418 | if (ret) | 1447 | if (!ret) /* IR enabling failed */ |
1419 | /* | ||
1420 | * IR enabling failed | ||
1421 | */ | ||
1422 | restore_IO_APIC_setup(ioapic_entries); | 1448 | restore_IO_APIC_setup(ioapic_entries); |
1423 | |||
1424 | unmask_8259A(); | 1449 | unmask_8259A(); |
1425 | local_irq_restore(flags); | 1450 | local_irq_restore(flags); |
1426 | 1451 | ||
1427 | end: | 1452 | out: |
1428 | if (ioapic_entries) | 1453 | if (ioapic_entries) |
1429 | free_ioapic_entries(ioapic_entries); | 1454 | free_ioapic_entries(ioapic_entries); |
1430 | 1455 | ||
1431 | if (!ret) | 1456 | if (x2apic_enabled) |
1432 | return; | 1457 | return; |
1433 | 1458 | ||
1434 | ir_failed: | ||
1435 | if (x2apic_preenabled) | 1459 | if (x2apic_preenabled) |
1436 | panic("x2apic enabled by bios. But IR enabling failed"); | 1460 | panic("x2apic: enabled by BIOS but kernel init failed."); |
1437 | else if (cpu_has_x2apic) | 1461 | else if (cpu_has_x2apic) |
1438 | pr_info("Not enabling x2apic,Intr-remapping\n"); | 1462 | pr_info("Not enabling x2apic, Intr-remapping init failed.\n"); |
1439 | #else | ||
1440 | if (!cpu_has_x2apic) | ||
1441 | return; | ||
1442 | |||
1443 | if (x2apic_preenabled) | ||
1444 | panic("x2apic enabled prior OS handover," | ||
1445 | " enable CONFIG_X86_X2APIC, CONFIG_INTR_REMAP"); | ||
1446 | #endif | ||
1447 | |||
1448 | return; | ||
1449 | } | 1463 | } |
1450 | 1464 | ||
1451 | |||
1452 | #ifdef CONFIG_X86_64 | 1465 | #ifdef CONFIG_X86_64 |
1453 | /* | 1466 | /* |
1454 | * Detect and enable local APICs on non-SMP boards. | 1467 | * Detect and enable local APICs on non-SMP boards. |
@@ -1549,8 +1562,6 @@ no_apic: | |||
1549 | #ifdef CONFIG_X86_64 | 1562 | #ifdef CONFIG_X86_64 |
1550 | void __init early_init_lapic_mapping(void) | 1563 | void __init early_init_lapic_mapping(void) |
1551 | { | 1564 | { |
1552 | unsigned long phys_addr; | ||
1553 | |||
1554 | /* | 1565 | /* |
1555 | * If no local APIC can be found then go out | 1566 | * If no local APIC can be found then go out |
1556 | * : it means there is no mpatable and MADT | 1567 | * : it means there is no mpatable and MADT |
@@ -1558,11 +1569,9 @@ void __init early_init_lapic_mapping(void) | |||
1558 | if (!smp_found_config) | 1569 | if (!smp_found_config) |
1559 | return; | 1570 | return; |
1560 | 1571 | ||
1561 | phys_addr = mp_lapic_addr; | 1572 | set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); |
1562 | |||
1563 | set_fixmap_nocache(FIX_APIC_BASE, phys_addr); | ||
1564 | apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", | 1573 | apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", |
1565 | APIC_BASE, phys_addr); | 1574 | APIC_BASE, mp_lapic_addr); |
1566 | 1575 | ||
1567 | /* | 1576 | /* |
1568 | * Fetch the APIC ID of the BSP in case we have a | 1577 | * Fetch the APIC ID of the BSP in case we have a |
@@ -1651,7 +1660,6 @@ int __init APIC_init_uniprocessor(void) | |||
1651 | APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { | 1660 | APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { |
1652 | pr_err("BIOS bug, local APIC 0x%x not detected!...\n", | 1661 | pr_err("BIOS bug, local APIC 0x%x not detected!...\n", |
1653 | boot_cpu_physical_apicid); | 1662 | boot_cpu_physical_apicid); |
1654 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); | ||
1655 | return -1; | 1663 | return -1; |
1656 | } | 1664 | } |
1657 | #endif | 1665 | #endif |
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 8952a5890281..89174f847b49 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
@@ -167,7 +167,7 @@ static int es7000_apic_is_cluster(void) | |||
167 | { | 167 | { |
168 | /* MPENTIUMIII */ | 168 | /* MPENTIUMIII */ |
169 | if (boot_cpu_data.x86 == 6 && | 169 | if (boot_cpu_data.x86 == 6 && |
170 | (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) | 170 | (boot_cpu_data.x86_model >= 7 && boot_cpu_data.x86_model <= 11)) |
171 | return 1; | 171 | return 1; |
172 | 172 | ||
173 | return 0; | 173 | return 0; |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index d2ed6c5ddc80..3c8f9e75d038 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -66,6 +66,8 @@ | |||
66 | #include <asm/apic.h> | 66 | #include <asm/apic.h> |
67 | 67 | ||
68 | #define __apicdebuginit(type) static type __init | 68 | #define __apicdebuginit(type) static type __init |
69 | #define for_each_irq_pin(entry, head) \ | ||
70 | for (entry = head; entry; entry = entry->next) | ||
69 | 71 | ||
70 | /* | 72 | /* |
71 | * Is the SiS APIC rmw bug present ? | 73 | * Is the SiS APIC rmw bug present ? |
@@ -85,6 +87,9 @@ int nr_ioapic_registers[MAX_IO_APICS]; | |||
85 | struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; | 87 | struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; |
86 | int nr_ioapics; | 88 | int nr_ioapics; |
87 | 89 | ||
90 | /* IO APIC gsi routing info */ | ||
91 | struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS]; | ||
92 | |||
88 | /* MP IRQ source entries */ | 93 | /* MP IRQ source entries */ |
89 | struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; | 94 | struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; |
90 | 95 | ||
@@ -116,15 +121,6 @@ static int __init parse_noapic(char *str) | |||
116 | } | 121 | } |
117 | early_param("noapic", parse_noapic); | 122 | early_param("noapic", parse_noapic); |
118 | 123 | ||
119 | struct irq_pin_list; | ||
120 | |||
121 | /* | ||
122 | * This is performance-critical, we want to do it O(1) | ||
123 | * | ||
124 | * the indexing order of this array favors 1:1 mappings | ||
125 | * between pins and IRQs. | ||
126 | */ | ||
127 | |||
128 | struct irq_pin_list { | 124 | struct irq_pin_list { |
129 | int apic, pin; | 125 | int apic, pin; |
130 | struct irq_pin_list *next; | 126 | struct irq_pin_list *next; |
@@ -139,6 +135,11 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int node) | |||
139 | return pin; | 135 | return pin; |
140 | } | 136 | } |
141 | 137 | ||
138 | /* | ||
139 | * This is performance-critical, we want to do it O(1) | ||
140 | * | ||
141 | * Most irqs are mapped 1:1 with pins. | ||
142 | */ | ||
142 | struct irq_cfg { | 143 | struct irq_cfg { |
143 | struct irq_pin_list *irq_2_pin; | 144 | struct irq_pin_list *irq_2_pin; |
144 | cpumask_var_t domain; | 145 | cpumask_var_t domain; |
@@ -414,13 +415,10 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg) | |||
414 | unsigned long flags; | 415 | unsigned long flags; |
415 | 416 | ||
416 | spin_lock_irqsave(&ioapic_lock, flags); | 417 | spin_lock_irqsave(&ioapic_lock, flags); |
417 | entry = cfg->irq_2_pin; | 418 | for_each_irq_pin(entry, cfg->irq_2_pin) { |
418 | for (;;) { | ||
419 | unsigned int reg; | 419 | unsigned int reg; |
420 | int pin; | 420 | int pin; |
421 | 421 | ||
422 | if (!entry) | ||
423 | break; | ||
424 | pin = entry->pin; | 422 | pin = entry->pin; |
425 | reg = io_apic_read(entry->apic, 0x10 + pin*2); | 423 | reg = io_apic_read(entry->apic, 0x10 + pin*2); |
426 | /* Is the remote IRR bit set? */ | 424 | /* Is the remote IRR bit set? */ |
@@ -428,9 +426,6 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg) | |||
428 | spin_unlock_irqrestore(&ioapic_lock, flags); | 426 | spin_unlock_irqrestore(&ioapic_lock, flags); |
429 | return true; | 427 | return true; |
430 | } | 428 | } |
431 | if (!entry->next) | ||
432 | break; | ||
433 | entry = entry->next; | ||
434 | } | 429 | } |
435 | spin_unlock_irqrestore(&ioapic_lock, flags); | 430 | spin_unlock_irqrestore(&ioapic_lock, flags); |
436 | 431 | ||
@@ -498,72 +493,68 @@ static void ioapic_mask_entry(int apic, int pin) | |||
498 | * shared ISA-space IRQs, so we have to support them. We are super | 493 | * shared ISA-space IRQs, so we have to support them. We are super |
499 | * fast in the common case, and fast for shared ISA-space IRQs. | 494 | * fast in the common case, and fast for shared ISA-space IRQs. |
500 | */ | 495 | */ |
501 | static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) | 496 | static int |
497 | add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) | ||
502 | { | 498 | { |
503 | struct irq_pin_list *entry; | 499 | struct irq_pin_list **last, *entry; |
504 | 500 | ||
505 | entry = cfg->irq_2_pin; | 501 | /* don't allow duplicates */ |
506 | if (!entry) { | 502 | last = &cfg->irq_2_pin; |
507 | entry = get_one_free_irq_2_pin(node); | 503 | for_each_irq_pin(entry, cfg->irq_2_pin) { |
508 | if (!entry) { | ||
509 | printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n", | ||
510 | apic, pin); | ||
511 | return; | ||
512 | } | ||
513 | cfg->irq_2_pin = entry; | ||
514 | entry->apic = apic; | ||
515 | entry->pin = pin; | ||
516 | return; | ||
517 | } | ||
518 | |||
519 | while (entry->next) { | ||
520 | /* not again, please */ | ||
521 | if (entry->apic == apic && entry->pin == pin) | 504 | if (entry->apic == apic && entry->pin == pin) |
522 | return; | 505 | return 0; |
523 | 506 | last = &entry->next; | |
524 | entry = entry->next; | ||
525 | } | 507 | } |
526 | 508 | ||
527 | entry->next = get_one_free_irq_2_pin(node); | 509 | entry = get_one_free_irq_2_pin(node); |
528 | entry = entry->next; | 510 | if (!entry) { |
511 | printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", | ||
512 | node, apic, pin); | ||
513 | return -ENOMEM; | ||
514 | } | ||
529 | entry->apic = apic; | 515 | entry->apic = apic; |
530 | entry->pin = pin; | 516 | entry->pin = pin; |
517 | |||
518 | *last = entry; | ||
519 | return 0; | ||
520 | } | ||
521 | |||
522 | static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) | ||
523 | { | ||
524 | if (add_pin_to_irq_node_nopanic(cfg, node, apic, pin)) | ||
525 | panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); | ||
531 | } | 526 | } |
532 | 527 | ||
533 | /* | 528 | /* |
534 | * Reroute an IRQ to a different pin. | 529 | * Reroute an IRQ to a different pin. |
535 | */ | 530 | */ |
536 | static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, | 531 | static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, |
537 | int oldapic, int oldpin, | 532 | int oldapic, int oldpin, |
538 | int newapic, int newpin) | 533 | int newapic, int newpin) |
539 | { | 534 | { |
540 | struct irq_pin_list *entry = cfg->irq_2_pin; | 535 | struct irq_pin_list *entry; |
541 | int replaced = 0; | ||
542 | 536 | ||
543 | while (entry) { | 537 | for_each_irq_pin(entry, cfg->irq_2_pin) { |
544 | if (entry->apic == oldapic && entry->pin == oldpin) { | 538 | if (entry->apic == oldapic && entry->pin == oldpin) { |
545 | entry->apic = newapic; | 539 | entry->apic = newapic; |
546 | entry->pin = newpin; | 540 | entry->pin = newpin; |
547 | replaced = 1; | ||
548 | /* every one is different, right? */ | 541 | /* every one is different, right? */ |
549 | break; | 542 | return; |
550 | } | 543 | } |
551 | entry = entry->next; | ||
552 | } | 544 | } |
553 | 545 | ||
554 | /* why? call replace before add? */ | 546 | /* old apic/pin didn't exist, so just add new ones */ |
555 | if (!replaced) | 547 | add_pin_to_irq_node(cfg, node, newapic, newpin); |
556 | add_pin_to_irq_node(cfg, node, newapic, newpin); | ||
557 | } | 548 | } |
558 | 549 | ||
559 | static inline void io_apic_modify_irq(struct irq_cfg *cfg, | 550 | static void io_apic_modify_irq(struct irq_cfg *cfg, |
560 | int mask_and, int mask_or, | 551 | int mask_and, int mask_or, |
561 | void (*final)(struct irq_pin_list *entry)) | 552 | void (*final)(struct irq_pin_list *entry)) |
562 | { | 553 | { |
563 | int pin; | 554 | int pin; |
564 | struct irq_pin_list *entry; | 555 | struct irq_pin_list *entry; |
565 | 556 | ||
566 | for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) { | 557 | for_each_irq_pin(entry, cfg->irq_2_pin) { |
567 | unsigned int reg; | 558 | unsigned int reg; |
568 | pin = entry->pin; | 559 | pin = entry->pin; |
569 | reg = io_apic_read(entry->apic, 0x10 + pin * 2); | 560 | reg = io_apic_read(entry->apic, 0x10 + pin * 2); |
@@ -580,7 +571,6 @@ static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) | |||
580 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); | 571 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); |
581 | } | 572 | } |
582 | 573 | ||
583 | #ifdef CONFIG_X86_64 | ||
584 | static void io_apic_sync(struct irq_pin_list *entry) | 574 | static void io_apic_sync(struct irq_pin_list *entry) |
585 | { | 575 | { |
586 | /* | 576 | /* |
@@ -596,11 +586,6 @@ static void __mask_IO_APIC_irq(struct irq_cfg *cfg) | |||
596 | { | 586 | { |
597 | io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); | 587 | io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); |
598 | } | 588 | } |
599 | #else /* CONFIG_X86_32 */ | ||
600 | static void __mask_IO_APIC_irq(struct irq_cfg *cfg) | ||
601 | { | ||
602 | io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, NULL); | ||
603 | } | ||
604 | 589 | ||
605 | static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg) | 590 | static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg) |
606 | { | 591 | { |
@@ -613,7 +598,6 @@ static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg) | |||
613 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, | 598 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, |
614 | IO_APIC_REDIR_LEVEL_TRIGGER, NULL); | 599 | IO_APIC_REDIR_LEVEL_TRIGGER, NULL); |
615 | } | 600 | } |
616 | #endif /* CONFIG_X86_32 */ | ||
617 | 601 | ||
618 | static void mask_IO_APIC_irq_desc(struct irq_desc *desc) | 602 | static void mask_IO_APIC_irq_desc(struct irq_desc *desc) |
619 | { | 603 | { |
@@ -1702,12 +1686,8 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1702 | if (!entry) | 1686 | if (!entry) |
1703 | continue; | 1687 | continue; |
1704 | printk(KERN_DEBUG "IRQ%d ", irq); | 1688 | printk(KERN_DEBUG "IRQ%d ", irq); |
1705 | for (;;) { | 1689 | for_each_irq_pin(entry, cfg->irq_2_pin) |
1706 | printk("-> %d:%d", entry->apic, entry->pin); | 1690 | printk("-> %d:%d", entry->apic, entry->pin); |
1707 | if (!entry->next) | ||
1708 | break; | ||
1709 | entry = entry->next; | ||
1710 | } | ||
1711 | printk("\n"); | 1691 | printk("\n"); |
1712 | } | 1692 | } |
1713 | 1693 | ||
@@ -2211,7 +2191,6 @@ static unsigned int startup_ioapic_irq(unsigned int irq) | |||
2211 | return was_pending; | 2191 | return was_pending; |
2212 | } | 2192 | } |
2213 | 2193 | ||
2214 | #ifdef CONFIG_X86_64 | ||
2215 | static int ioapic_retrigger_irq(unsigned int irq) | 2194 | static int ioapic_retrigger_irq(unsigned int irq) |
2216 | { | 2195 | { |
2217 | 2196 | ||
@@ -2224,14 +2203,6 @@ static int ioapic_retrigger_irq(unsigned int irq) | |||
2224 | 2203 | ||
2225 | return 1; | 2204 | return 1; |
2226 | } | 2205 | } |
2227 | #else | ||
2228 | static int ioapic_retrigger_irq(unsigned int irq) | ||
2229 | { | ||
2230 | apic->send_IPI_self(irq_cfg(irq)->vector); | ||
2231 | |||
2232 | return 1; | ||
2233 | } | ||
2234 | #endif | ||
2235 | 2206 | ||
2236 | /* | 2207 | /* |
2237 | * Level and edge triggered IO-APIC interrupts need different handling, | 2208 | * Level and edge triggered IO-APIC interrupts need different handling, |
@@ -2269,13 +2240,9 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
2269 | struct irq_pin_list *entry; | 2240 | struct irq_pin_list *entry; |
2270 | u8 vector = cfg->vector; | 2241 | u8 vector = cfg->vector; |
2271 | 2242 | ||
2272 | entry = cfg->irq_2_pin; | 2243 | for_each_irq_pin(entry, cfg->irq_2_pin) { |
2273 | for (;;) { | ||
2274 | unsigned int reg; | 2244 | unsigned int reg; |
2275 | 2245 | ||
2276 | if (!entry) | ||
2277 | break; | ||
2278 | |||
2279 | apic = entry->apic; | 2246 | apic = entry->apic; |
2280 | pin = entry->pin; | 2247 | pin = entry->pin; |
2281 | /* | 2248 | /* |
@@ -2288,9 +2255,6 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
2288 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; | 2255 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; |
2289 | reg |= vector; | 2256 | reg |= vector; |
2290 | io_apic_modify(apic, 0x10 + pin*2, reg); | 2257 | io_apic_modify(apic, 0x10 + pin*2, reg); |
2291 | if (!entry->next) | ||
2292 | break; | ||
2293 | entry = entry->next; | ||
2294 | } | 2258 | } |
2295 | } | 2259 | } |
2296 | 2260 | ||
@@ -2515,11 +2479,8 @@ atomic_t irq_mis_count; | |||
2515 | static void ack_apic_level(unsigned int irq) | 2479 | static void ack_apic_level(unsigned int irq) |
2516 | { | 2480 | { |
2517 | struct irq_desc *desc = irq_to_desc(irq); | 2481 | struct irq_desc *desc = irq_to_desc(irq); |
2518 | |||
2519 | #ifdef CONFIG_X86_32 | ||
2520 | unsigned long v; | 2482 | unsigned long v; |
2521 | int i; | 2483 | int i; |
2522 | #endif | ||
2523 | struct irq_cfg *cfg; | 2484 | struct irq_cfg *cfg; |
2524 | int do_unmask_irq = 0; | 2485 | int do_unmask_irq = 0; |
2525 | 2486 | ||
@@ -2532,31 +2493,28 @@ static void ack_apic_level(unsigned int irq) | |||
2532 | } | 2493 | } |
2533 | #endif | 2494 | #endif |
2534 | 2495 | ||
2535 | #ifdef CONFIG_X86_32 | ||
2536 | /* | 2496 | /* |
2537 | * It appears there is an erratum which affects at least version 0x11 | 2497 | * It appears there is an erratum which affects at least version 0x11 |
2538 | * of I/O APIC (that's the 82093AA and cores integrated into various | 2498 | * of I/O APIC (that's the 82093AA and cores integrated into various |
2539 | * chipsets). Under certain conditions a level-triggered interrupt is | 2499 | * chipsets). Under certain conditions a level-triggered interrupt is |
2540 | * erroneously delivered as edge-triggered one but the respective IRR | 2500 | * erroneously delivered as edge-triggered one but the respective IRR |
2541 | * bit gets set nevertheless. As a result the I/O unit expects an EOI | 2501 | * bit gets set nevertheless. As a result the I/O unit expects an EOI |
2542 | * message but it will never arrive and further interrupts are blocked | 2502 | * message but it will never arrive and further interrupts are blocked |
2543 | * from the source. The exact reason is so far unknown, but the | 2503 | * from the source. The exact reason is so far unknown, but the |
2544 | * phenomenon was observed when two consecutive interrupt requests | 2504 | * phenomenon was observed when two consecutive interrupt requests |
2545 | * from a given source get delivered to the same CPU and the source is | 2505 | * from a given source get delivered to the same CPU and the source is |
2546 | * temporarily disabled in between. | 2506 | * temporarily disabled in between. |
2547 | * | 2507 | * |
2548 | * A workaround is to simulate an EOI message manually. We achieve it | 2508 | * A workaround is to simulate an EOI message manually. We achieve it |
2549 | * by setting the trigger mode to edge and then to level when the edge | 2509 | * by setting the trigger mode to edge and then to level when the edge |
2550 | * trigger mode gets detected in the TMR of a local APIC for a | 2510 | * trigger mode gets detected in the TMR of a local APIC for a |
2551 | * level-triggered interrupt. We mask the source for the time of the | 2511 | * level-triggered interrupt. We mask the source for the time of the |
2552 | * operation to prevent an edge-triggered interrupt escaping meanwhile. | 2512 | * operation to prevent an edge-triggered interrupt escaping meanwhile. |
2553 | * The idea is from Manfred Spraul. --macro | 2513 | * The idea is from Manfred Spraul. --macro |
2554 | */ | 2514 | */ |
2555 | cfg = desc->chip_data; | 2515 | cfg = desc->chip_data; |
2556 | i = cfg->vector; | 2516 | i = cfg->vector; |
2557 | |||
2558 | v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); | 2517 | v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); |
2559 | #endif | ||
2560 | 2518 | ||
2561 | /* | 2519 | /* |
2562 | * We must acknowledge the irq before we move it or the acknowledge will | 2520 | * We must acknowledge the irq before we move it or the acknowledge will |
@@ -2598,7 +2556,7 @@ static void ack_apic_level(unsigned int irq) | |||
2598 | unmask_IO_APIC_irq_desc(desc); | 2556 | unmask_IO_APIC_irq_desc(desc); |
2599 | } | 2557 | } |
2600 | 2558 | ||
2601 | #ifdef CONFIG_X86_32 | 2559 | /* Tail end of version 0x11 I/O APIC bug workaround */ |
2602 | if (!(v & (1 << (i & 0x1f)))) { | 2560 | if (!(v & (1 << (i & 0x1f)))) { |
2603 | atomic_inc(&irq_mis_count); | 2561 | atomic_inc(&irq_mis_count); |
2604 | spin_lock(&ioapic_lock); | 2562 | spin_lock(&ioapic_lock); |
@@ -2606,26 +2564,15 @@ static void ack_apic_level(unsigned int irq) | |||
2606 | __unmask_and_level_IO_APIC_irq(cfg); | 2564 | __unmask_and_level_IO_APIC_irq(cfg); |
2607 | spin_unlock(&ioapic_lock); | 2565 | spin_unlock(&ioapic_lock); |
2608 | } | 2566 | } |
2609 | #endif | ||
2610 | } | 2567 | } |
2611 | 2568 | ||
2612 | #ifdef CONFIG_INTR_REMAP | 2569 | #ifdef CONFIG_INTR_REMAP |
2613 | static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | 2570 | static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) |
2614 | { | 2571 | { |
2615 | int apic, pin; | ||
2616 | struct irq_pin_list *entry; | 2572 | struct irq_pin_list *entry; |
2617 | 2573 | ||
2618 | entry = cfg->irq_2_pin; | 2574 | for_each_irq_pin(entry, cfg->irq_2_pin) |
2619 | for (;;) { | 2575 | io_apic_eoi(entry->apic, entry->pin); |
2620 | |||
2621 | if (!entry) | ||
2622 | break; | ||
2623 | |||
2624 | apic = entry->apic; | ||
2625 | pin = entry->pin; | ||
2626 | io_apic_eoi(apic, pin); | ||
2627 | entry = entry->next; | ||
2628 | } | ||
2629 | } | 2576 | } |
2630 | 2577 | ||
2631 | static void | 2578 | static void |
@@ -3241,8 +3188,7 @@ void destroy_irq(unsigned int irq) | |||
3241 | cfg = desc->chip_data; | 3188 | cfg = desc->chip_data; |
3242 | dynamic_irq_cleanup(irq); | 3189 | dynamic_irq_cleanup(irq); |
3243 | /* connect back irq_cfg */ | 3190 | /* connect back irq_cfg */ |
3244 | if (desc) | 3191 | desc->chip_data = cfg; |
3245 | desc->chip_data = cfg; | ||
3246 | 3192 | ||
3247 | free_irte(irq); | 3193 | free_irte(irq); |
3248 | spin_lock_irqsave(&vector_lock, flags); | 3194 | spin_lock_irqsave(&vector_lock, flags); |
@@ -3912,7 +3858,11 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq, | |||
3912 | */ | 3858 | */ |
3913 | if (irq >= NR_IRQS_LEGACY) { | 3859 | if (irq >= NR_IRQS_LEGACY) { |
3914 | cfg = desc->chip_data; | 3860 | cfg = desc->chip_data; |
3915 | add_pin_to_irq_node(cfg, node, ioapic, pin); | 3861 | if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) { |
3862 | printk(KERN_INFO "can not add pin %d for irq %d\n", | ||
3863 | pin, irq); | ||
3864 | return 0; | ||
3865 | } | ||
3916 | } | 3866 | } |
3917 | 3867 | ||
3918 | setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity); | 3868 | setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity); |
@@ -3941,11 +3891,28 @@ int io_apic_set_pci_routing(struct device *dev, int irq, | |||
3941 | return __io_apic_set_pci_routing(dev, irq, irq_attr); | 3891 | return __io_apic_set_pci_routing(dev, irq, irq_attr); |
3942 | } | 3892 | } |
3943 | 3893 | ||
3944 | /* -------------------------------------------------------------------------- | 3894 | u8 __init io_apic_unique_id(u8 id) |
3945 | ACPI-based IOAPIC Configuration | 3895 | { |
3946 | -------------------------------------------------------------------------- */ | 3896 | #ifdef CONFIG_X86_32 |
3897 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && | ||
3898 | !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
3899 | return io_apic_get_unique_id(nr_ioapics, id); | ||
3900 | else | ||
3901 | return id; | ||
3902 | #else | ||
3903 | int i; | ||
3904 | DECLARE_BITMAP(used, 256); | ||
3947 | 3905 | ||
3948 | #ifdef CONFIG_ACPI | 3906 | bitmap_zero(used, 256); |
3907 | for (i = 0; i < nr_ioapics; i++) { | ||
3908 | struct mpc_ioapic *ia = &mp_ioapics[i]; | ||
3909 | __set_bit(ia->apicid, used); | ||
3910 | } | ||
3911 | if (!test_bit(id, used)) | ||
3912 | return id; | ||
3913 | return find_first_zero_bit(used, 256); | ||
3914 | #endif | ||
3915 | } | ||
3949 | 3916 | ||
3950 | #ifdef CONFIG_X86_32 | 3917 | #ifdef CONFIG_X86_32 |
3951 | int __init io_apic_get_unique_id(int ioapic, int apic_id) | 3918 | int __init io_apic_get_unique_id(int ioapic, int apic_id) |
@@ -4054,8 +4021,6 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) | |||
4054 | return 0; | 4021 | return 0; |
4055 | } | 4022 | } |
4056 | 4023 | ||
4057 | #endif /* CONFIG_ACPI */ | ||
4058 | |||
4059 | /* | 4024 | /* |
4060 | * This function currently is only a helper for the i386 smp boot process where | 4025 | * This function currently is only a helper for the i386 smp boot process where |
4061 | * we need to reprogram the ioredtbls to cater for the cpus which have come online | 4026 | * we need to reprogram the ioredtbls to cater for the cpus which have come online |
@@ -4109,7 +4074,7 @@ void __init setup_ioapic_dest(void) | |||
4109 | 4074 | ||
4110 | static struct resource *ioapic_resources; | 4075 | static struct resource *ioapic_resources; |
4111 | 4076 | ||
4112 | static struct resource * __init ioapic_setup_resources(void) | 4077 | static struct resource * __init ioapic_setup_resources(int nr_ioapics) |
4113 | { | 4078 | { |
4114 | unsigned long n; | 4079 | unsigned long n; |
4115 | struct resource *res; | 4080 | struct resource *res; |
@@ -4125,15 +4090,13 @@ static struct resource * __init ioapic_setup_resources(void) | |||
4125 | mem = alloc_bootmem(n); | 4090 | mem = alloc_bootmem(n); |
4126 | res = (void *)mem; | 4091 | res = (void *)mem; |
4127 | 4092 | ||
4128 | if (mem != NULL) { | 4093 | mem += sizeof(struct resource) * nr_ioapics; |
4129 | mem += sizeof(struct resource) * nr_ioapics; | ||
4130 | 4094 | ||
4131 | for (i = 0; i < nr_ioapics; i++) { | 4095 | for (i = 0; i < nr_ioapics; i++) { |
4132 | res[i].name = mem; | 4096 | res[i].name = mem; |
4133 | res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 4097 | res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
4134 | sprintf(mem, "IOAPIC %u", i); | 4098 | sprintf(mem, "IOAPIC %u", i); |
4135 | mem += IOAPIC_RESOURCE_NAME_SIZE; | 4099 | mem += IOAPIC_RESOURCE_NAME_SIZE; |
4136 | } | ||
4137 | } | 4100 | } |
4138 | 4101 | ||
4139 | ioapic_resources = res; | 4102 | ioapic_resources = res; |
@@ -4147,7 +4110,7 @@ void __init ioapic_init_mappings(void) | |||
4147 | struct resource *ioapic_res; | 4110 | struct resource *ioapic_res; |
4148 | int i; | 4111 | int i; |
4149 | 4112 | ||
4150 | ioapic_res = ioapic_setup_resources(); | 4113 | ioapic_res = ioapic_setup_resources(nr_ioapics); |
4151 | for (i = 0; i < nr_ioapics; i++) { | 4114 | for (i = 0; i < nr_ioapics; i++) { |
4152 | if (smp_found_config) { | 4115 | if (smp_found_config) { |
4153 | ioapic_phys = mp_ioapics[i].apicaddr; | 4116 | ioapic_phys = mp_ioapics[i].apicaddr; |
@@ -4176,11 +4139,9 @@ fake_ioapic_page: | |||
4176 | __fix_to_virt(idx), ioapic_phys); | 4139 | __fix_to_virt(idx), ioapic_phys); |
4177 | idx++; | 4140 | idx++; |
4178 | 4141 | ||
4179 | if (ioapic_res != NULL) { | 4142 | ioapic_res->start = ioapic_phys; |
4180 | ioapic_res->start = ioapic_phys; | 4143 | ioapic_res->end = ioapic_phys + (4 * 1024) - 1; |
4181 | ioapic_res->end = ioapic_phys + (4 * 1024) - 1; | 4144 | ioapic_res++; |
4182 | ioapic_res++; | ||
4183 | } | ||
4184 | } | 4145 | } |
4185 | } | 4146 | } |
4186 | 4147 | ||
@@ -4201,3 +4162,76 @@ void __init ioapic_insert_resources(void) | |||
4201 | r++; | 4162 | r++; |
4202 | } | 4163 | } |
4203 | } | 4164 | } |
4165 | |||
4166 | int mp_find_ioapic(int gsi) | ||
4167 | { | ||
4168 | int i = 0; | ||
4169 | |||
4170 | /* Find the IOAPIC that manages this GSI. */ | ||
4171 | for (i = 0; i < nr_ioapics; i++) { | ||
4172 | if ((gsi >= mp_gsi_routing[i].gsi_base) | ||
4173 | && (gsi <= mp_gsi_routing[i].gsi_end)) | ||
4174 | return i; | ||
4175 | } | ||
4176 | |||
4177 | printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); | ||
4178 | return -1; | ||
4179 | } | ||
4180 | |||
4181 | int mp_find_ioapic_pin(int ioapic, int gsi) | ||
4182 | { | ||
4183 | if (WARN_ON(ioapic == -1)) | ||
4184 | return -1; | ||
4185 | if (WARN_ON(gsi > mp_gsi_routing[ioapic].gsi_end)) | ||
4186 | return -1; | ||
4187 | |||
4188 | return gsi - mp_gsi_routing[ioapic].gsi_base; | ||
4189 | } | ||
4190 | |||
4191 | static int bad_ioapic(unsigned long address) | ||
4192 | { | ||
4193 | if (nr_ioapics >= MAX_IO_APICS) { | ||
4194 | printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded " | ||
4195 | "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics); | ||
4196 | return 1; | ||
4197 | } | ||
4198 | if (!address) { | ||
4199 | printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address" | ||
4200 | " found in table, skipping!\n"); | ||
4201 | return 1; | ||
4202 | } | ||
4203 | return 0; | ||
4204 | } | ||
4205 | |||
4206 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) | ||
4207 | { | ||
4208 | int idx = 0; | ||
4209 | |||
4210 | if (bad_ioapic(address)) | ||
4211 | return; | ||
4212 | |||
4213 | idx = nr_ioapics; | ||
4214 | |||
4215 | mp_ioapics[idx].type = MP_IOAPIC; | ||
4216 | mp_ioapics[idx].flags = MPC_APIC_USABLE; | ||
4217 | mp_ioapics[idx].apicaddr = address; | ||
4218 | |||
4219 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); | ||
4220 | mp_ioapics[idx].apicid = io_apic_unique_id(id); | ||
4221 | mp_ioapics[idx].apicver = io_apic_get_version(idx); | ||
4222 | |||
4223 | /* | ||
4224 | * Build basic GSI lookup table to facilitate gsi->io_apic lookups | ||
4225 | * and to prevent reprogramming of IOAPIC pins (PCI GSIs). | ||
4226 | */ | ||
4227 | mp_gsi_routing[idx].gsi_base = gsi_base; | ||
4228 | mp_gsi_routing[idx].gsi_end = gsi_base + | ||
4229 | io_apic_get_redir_entries(idx); | ||
4230 | |||
4231 | printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " | ||
4232 | "GSI %d-%d\n", idx, mp_ioapics[idx].apicid, | ||
4233 | mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr, | ||
4234 | mp_gsi_routing[idx].gsi_base, mp_gsi_routing[idx].gsi_end); | ||
4235 | |||
4236 | nr_ioapics++; | ||
4237 | } | ||
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index 6ef00ba4c886..08385e090a6f 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c | |||
@@ -153,7 +153,7 @@ int safe_smp_processor_id(void) | |||
153 | { | 153 | { |
154 | int apicid, cpuid; | 154 | int apicid, cpuid; |
155 | 155 | ||
156 | if (!boot_cpu_has(X86_FEATURE_APIC)) | 156 | if (!cpu_has_apic) |
157 | return 0; | 157 | return 0; |
158 | 158 | ||
159 | apicid = hard_smp_processor_id(); | 159 | apicid = hard_smp_processor_id(); |
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index b3025b43b63a..db7220220d09 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
@@ -39,7 +39,7 @@ | |||
39 | int unknown_nmi_panic; | 39 | int unknown_nmi_panic; |
40 | int nmi_watchdog_enabled; | 40 | int nmi_watchdog_enabled; |
41 | 41 | ||
42 | static cpumask_var_t backtrace_mask; | 42 | static cpumask_t backtrace_mask __read_mostly; |
43 | 43 | ||
44 | /* nmi_active: | 44 | /* nmi_active: |
45 | * >0: the lapic NMI watchdog is active, but can be disabled | 45 | * >0: the lapic NMI watchdog is active, but can be disabled |
@@ -138,7 +138,6 @@ int __init check_nmi_watchdog(void) | |||
138 | if (!prev_nmi_count) | 138 | if (!prev_nmi_count) |
139 | goto error; | 139 | goto error; |
140 | 140 | ||
141 | alloc_cpumask_var(&backtrace_mask, GFP_KERNEL|__GFP_ZERO); | ||
142 | printk(KERN_INFO "Testing NMI watchdog ... "); | 141 | printk(KERN_INFO "Testing NMI watchdog ... "); |
143 | 142 | ||
144 | #ifdef CONFIG_SMP | 143 | #ifdef CONFIG_SMP |
@@ -415,14 +414,17 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
415 | } | 414 | } |
416 | 415 | ||
417 | /* We can be called before check_nmi_watchdog, hence NULL check. */ | 416 | /* We can be called before check_nmi_watchdog, hence NULL check. */ |
418 | if (backtrace_mask != NULL && cpumask_test_cpu(cpu, backtrace_mask)) { | 417 | if (cpumask_test_cpu(cpu, &backtrace_mask)) { |
419 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ | 418 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ |
420 | 419 | ||
421 | spin_lock(&lock); | 420 | spin_lock(&lock); |
422 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); | 421 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); |
422 | show_regs(regs); | ||
423 | dump_stack(); | 423 | dump_stack(); |
424 | spin_unlock(&lock); | 424 | spin_unlock(&lock); |
425 | cpumask_clear_cpu(cpu, backtrace_mask); | 425 | cpumask_clear_cpu(cpu, &backtrace_mask); |
426 | |||
427 | rc = 1; | ||
426 | } | 428 | } |
427 | 429 | ||
428 | /* Could check oops_in_progress here too, but it's safer not to */ | 430 | /* Could check oops_in_progress here too, but it's safer not to */ |
@@ -552,14 +554,18 @@ int do_nmi_callback(struct pt_regs *regs, int cpu) | |||
552 | return 0; | 554 | return 0; |
553 | } | 555 | } |
554 | 556 | ||
555 | void __trigger_all_cpu_backtrace(void) | 557 | void arch_trigger_all_cpu_backtrace(void) |
556 | { | 558 | { |
557 | int i; | 559 | int i; |
558 | 560 | ||
559 | cpumask_copy(backtrace_mask, cpu_online_mask); | 561 | cpumask_copy(&backtrace_mask, cpu_online_mask); |
562 | |||
563 | printk(KERN_INFO "sending NMI to all CPUs:\n"); | ||
564 | apic->send_IPI_all(NMI_VECTOR); | ||
565 | |||
560 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ | 566 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ |
561 | for (i = 0; i < 10 * 1000; i++) { | 567 | for (i = 0; i < 10 * 1000; i++) { |
562 | if (cpumask_empty(backtrace_mask)) | 568 | if (cpumask_empty(&backtrace_mask)) |
563 | break; | 569 | break; |
564 | mdelay(1); | 570 | mdelay(1); |
565 | } | 571 | } |
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index fcec2f1d34a1..65edc180fc82 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c | |||
@@ -55,11 +55,11 @@ static int apicid_phys_pkg_id(int initial_apic_id, int index_msb) | |||
55 | void __init default_setup_apic_routing(void) | 55 | void __init default_setup_apic_routing(void) |
56 | { | 56 | { |
57 | #ifdef CONFIG_X86_X2APIC | 57 | #ifdef CONFIG_X86_X2APIC |
58 | if (x2apic_mode && (apic != &apic_x2apic_phys && | 58 | if (x2apic_mode |
59 | #ifdef CONFIG_X86_UV | 59 | #ifdef CONFIG_X86_UV |
60 | apic != &apic_x2apic_uv_x && | 60 | && apic != &apic_x2apic_uv_x |
61 | #endif | 61 | #endif |
62 | apic != &apic_x2apic_cluster)) { | 62 | ) { |
63 | if (x2apic_phys) | 63 | if (x2apic_phys) |
64 | apic = &apic_x2apic_phys; | 64 | apic = &apic_x2apic_phys; |
65 | else | 65 | else |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 442b5508893f..151ace69a5aa 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -403,7 +403,15 @@ static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue); | |||
403 | static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); | 403 | static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); |
404 | static struct apm_user *user_list; | 404 | static struct apm_user *user_list; |
405 | static DEFINE_SPINLOCK(user_list_lock); | 405 | static DEFINE_SPINLOCK(user_list_lock); |
406 | static const struct desc_struct bad_bios_desc = { { { 0, 0x00409200 } } }; | 406 | |
407 | /* | ||
408 | * Set up a segment that references the real mode segment 0x40 | ||
409 | * that extends up to the end of page zero (that we have reserved). | ||
410 | * This is for buggy BIOS's that refer to (real mode) segment 0x40 | ||
411 | * even though they are called in protected mode. | ||
412 | */ | ||
413 | static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, | ||
414 | (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); | ||
407 | 415 | ||
408 | static const char driver_version[] = "1.16ac"; /* no spaces */ | 416 | static const char driver_version[] = "1.16ac"; /* no spaces */ |
409 | 417 | ||
@@ -2332,15 +2340,6 @@ static int __init apm_init(void) | |||
2332 | pm_flags |= PM_APM; | 2340 | pm_flags |= PM_APM; |
2333 | 2341 | ||
2334 | /* | 2342 | /* |
2335 | * Set up a segment that references the real mode segment 0x40 | ||
2336 | * that extends up to the end of page zero (that we have reserved). | ||
2337 | * This is for buggy BIOS's that refer to (real mode) segment 0x40 | ||
2338 | * even though they are called in protected mode. | ||
2339 | */ | ||
2340 | set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); | ||
2341 | _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); | ||
2342 | |||
2343 | /* | ||
2344 | * Set up the long jump entry point to the APM BIOS, which is called | 2343 | * Set up the long jump entry point to the APM BIOS, which is called |
2345 | * from inline assembly. | 2344 | * from inline assembly. |
2346 | */ | 2345 | */ |
@@ -2358,12 +2357,12 @@ static int __init apm_init(void) | |||
2358 | * code to that CPU. | 2357 | * code to that CPU. |
2359 | */ | 2358 | */ |
2360 | gdt = get_cpu_gdt_table(0); | 2359 | gdt = get_cpu_gdt_table(0); |
2361 | set_base(gdt[APM_CS >> 3], | 2360 | set_desc_base(&gdt[APM_CS >> 3], |
2362 | __va((unsigned long)apm_info.bios.cseg << 4)); | 2361 | (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4)); |
2363 | set_base(gdt[APM_CS_16 >> 3], | 2362 | set_desc_base(&gdt[APM_CS_16 >> 3], |
2364 | __va((unsigned long)apm_info.bios.cseg_16 << 4)); | 2363 | (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4)); |
2365 | set_base(gdt[APM_DS >> 3], | 2364 | set_desc_base(&gdt[APM_DS >> 3], |
2366 | __va((unsigned long)apm_info.bios.dseg << 4)); | 2365 | (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4)); |
2367 | 2366 | ||
2368 | proc_create("apm", 0, NULL, &apm_file_ops); | 2367 | proc_create("apm", 0, NULL, &apm_file_ops); |
2369 | 2368 | ||
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 898ecc47e129..4a6aeedcd965 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * This code generates raw asm output which is post-processed to extract | 3 | * This code generates raw asm output which is post-processed to extract |
4 | * and format the required data. | 4 | * and format the required data. |
5 | */ | 5 | */ |
6 | #define COMPILE_OFFSETS | ||
6 | 7 | ||
7 | #include <linux/crypto.h> | 8 | #include <linux/crypto.h> |
8 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 63fddcd082cd..22a47c82f3c0 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -2,7 +2,7 @@ | |||
2 | #include <linux/bitops.h> | 2 | #include <linux/bitops.h> |
3 | #include <linux/mm.h> | 3 | #include <linux/mm.h> |
4 | 4 | ||
5 | #include <asm/io.h> | 5 | #include <linux/io.h> |
6 | #include <asm/processor.h> | 6 | #include <asm/processor.h> |
7 | #include <asm/apic.h> | 7 | #include <asm/apic.h> |
8 | #include <asm/cpu.h> | 8 | #include <asm/cpu.h> |
@@ -45,8 +45,8 @@ static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) | |||
45 | #define CBAR_ENB (0x80000000) | 45 | #define CBAR_ENB (0x80000000) |
46 | #define CBAR_KEY (0X000000CB) | 46 | #define CBAR_KEY (0X000000CB) |
47 | if (c->x86_model == 9 || c->x86_model == 10) { | 47 | if (c->x86_model == 9 || c->x86_model == 10) { |
48 | if (inl (CBAR) & CBAR_ENB) | 48 | if (inl(CBAR) & CBAR_ENB) |
49 | outl (0 | CBAR_KEY, CBAR); | 49 | outl(0 | CBAR_KEY, CBAR); |
50 | } | 50 | } |
51 | } | 51 | } |
52 | 52 | ||
@@ -87,9 +87,10 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) | |||
87 | d = d2-d; | 87 | d = d2-d; |
88 | 88 | ||
89 | if (d > 20*K6_BUG_LOOP) | 89 | if (d > 20*K6_BUG_LOOP) |
90 | printk("system stability may be impaired when more than 32 MB are used.\n"); | 90 | printk(KERN_CONT |
91 | "system stability may be impaired when more than 32 MB are used.\n"); | ||
91 | else | 92 | else |
92 | printk("probably OK (after B9730xxxx).\n"); | 93 | printk(KERN_CONT "probably OK (after B9730xxxx).\n"); |
93 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); | 94 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); |
94 | } | 95 | } |
95 | 96 | ||
@@ -219,8 +220,9 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | |||
219 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { | 220 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { |
220 | rdmsr(MSR_K7_CLK_CTL, l, h); | 221 | rdmsr(MSR_K7_CLK_CTL, l, h); |
221 | if ((l & 0xfff00000) != 0x20000000) { | 222 | if ((l & 0xfff00000) != 0x20000000) { |
222 | printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, | 223 | printk(KERN_INFO |
223 | ((l & 0x000fffff)|0x20000000)); | 224 | "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", |
225 | l, ((l & 0x000fffff)|0x20000000)); | ||
224 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); | 226 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); |
225 | } | 227 | } |
226 | } | 228 | } |
@@ -251,6 +253,64 @@ static int __cpuinit nearby_node(int apicid) | |||
251 | #endif | 253 | #endif |
252 | 254 | ||
253 | /* | 255 | /* |
256 | * Fixup core topology information for AMD multi-node processors. | ||
257 | * Assumption 1: Number of cores in each internal node is the same. | ||
258 | * Assumption 2: Mixed systems with both single-node and dual-node | ||
259 | * processors are not supported. | ||
260 | */ | ||
261 | #ifdef CONFIG_X86_HT | ||
262 | static void __cpuinit amd_fixup_dcm(struct cpuinfo_x86 *c) | ||
263 | { | ||
264 | #ifdef CONFIG_PCI | ||
265 | u32 t, cpn; | ||
266 | u8 n, n_id; | ||
267 | int cpu = smp_processor_id(); | ||
268 | |||
269 | /* fixup topology information only once for a core */ | ||
270 | if (cpu_has(c, X86_FEATURE_AMD_DCM)) | ||
271 | return; | ||
272 | |||
273 | /* check for multi-node processor on boot cpu */ | ||
274 | t = read_pci_config(0, 24, 3, 0xe8); | ||
275 | if (!(t & (1 << 29))) | ||
276 | return; | ||
277 | |||
278 | set_cpu_cap(c, X86_FEATURE_AMD_DCM); | ||
279 | |||
280 | /* cores per node: each internal node has half the number of cores */ | ||
281 | cpn = c->x86_max_cores >> 1; | ||
282 | |||
283 | /* even-numbered NB_id of this dual-node processor */ | ||
284 | n = c->phys_proc_id << 1; | ||
285 | |||
286 | /* | ||
287 | * determine internal node id and assign cores fifty-fifty to | ||
288 | * each node of the dual-node processor | ||
289 | */ | ||
290 | t = read_pci_config(0, 24 + n, 3, 0xe8); | ||
291 | n = (t>>30) & 0x3; | ||
292 | if (n == 0) { | ||
293 | if (c->cpu_core_id < cpn) | ||
294 | n_id = 0; | ||
295 | else | ||
296 | n_id = 1; | ||
297 | } else { | ||
298 | if (c->cpu_core_id < cpn) | ||
299 | n_id = 1; | ||
300 | else | ||
301 | n_id = 0; | ||
302 | } | ||
303 | |||
304 | /* compute entire NodeID, use llc_shared_map to store sibling info */ | ||
305 | per_cpu(cpu_llc_id, cpu) = (c->phys_proc_id << 1) + n_id; | ||
306 | |||
307 | /* fixup core id to be in range from 0 to cpn */ | ||
308 | c->cpu_core_id = c->cpu_core_id % cpn; | ||
309 | #endif | ||
310 | } | ||
311 | #endif | ||
312 | |||
313 | /* | ||
254 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. | 314 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. |
255 | * Assumes number of cores is a power of two. | 315 | * Assumes number of cores is a power of two. |
256 | */ | 316 | */ |
@@ -267,6 +327,9 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | |||
267 | c->phys_proc_id = c->initial_apicid >> bits; | 327 | c->phys_proc_id = c->initial_apicid >> bits; |
268 | /* use socket ID also for last level cache */ | 328 | /* use socket ID also for last level cache */ |
269 | per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; | 329 | per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; |
330 | /* fixup topology information on multi-node processors */ | ||
331 | if ((c->x86 == 0x10) && (c->x86_model == 9)) | ||
332 | amd_fixup_dcm(c); | ||
270 | #endif | 333 | #endif |
271 | } | 334 | } |
272 | 335 | ||
@@ -275,9 +338,10 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | |||
275 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | 338 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) |
276 | int cpu = smp_processor_id(); | 339 | int cpu = smp_processor_id(); |
277 | int node; | 340 | int node; |
278 | unsigned apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid; | 341 | unsigned apicid = c->apicid; |
342 | |||
343 | node = per_cpu(cpu_llc_id, cpu); | ||
279 | 344 | ||
280 | node = c->phys_proc_id; | ||
281 | if (apicid_to_node[apicid] != NUMA_NO_NODE) | 345 | if (apicid_to_node[apicid] != NUMA_NO_NODE) |
282 | node = apicid_to_node[apicid]; | 346 | node = apicid_to_node[apicid]; |
283 | if (!node_online(node)) { | 347 | if (!node_online(node)) { |
@@ -398,18 +462,30 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
398 | u32 level; | 462 | u32 level; |
399 | 463 | ||
400 | level = cpuid_eax(1); | 464 | level = cpuid_eax(1); |
401 | if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) | 465 | if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) |
402 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | 466 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
403 | 467 | ||
404 | /* | 468 | /* |
405 | * Some BIOSes incorrectly force this feature, but only K8 | 469 | * Some BIOSes incorrectly force this feature, but only K8 |
406 | * revision D (model = 0x14) and later actually support it. | 470 | * revision D (model = 0x14) and later actually support it. |
471 | * (AMD Erratum #110, docId: 25759). | ||
407 | */ | 472 | */ |
408 | if (c->x86_model < 0x14) | 473 | if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { |
474 | u64 val; | ||
475 | |||
409 | clear_cpu_cap(c, X86_FEATURE_LAHF_LM); | 476 | clear_cpu_cap(c, X86_FEATURE_LAHF_LM); |
477 | if (!rdmsrl_amd_safe(0xc001100d, &val)) { | ||
478 | val &= ~(1ULL << 32); | ||
479 | wrmsrl_amd_safe(0xc001100d, val); | ||
480 | } | ||
481 | } | ||
482 | |||
410 | } | 483 | } |
411 | if (c->x86 == 0x10 || c->x86 == 0x11) | 484 | if (c->x86 == 0x10 || c->x86 == 0x11) |
412 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | 485 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
486 | |||
487 | /* get apicid instead of initial apic id from cpuid */ | ||
488 | c->apicid = hard_smp_processor_id(); | ||
413 | #else | 489 | #else |
414 | 490 | ||
415 | /* | 491 | /* |
@@ -494,27 +570,30 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
494 | * benefit in doing so. | 570 | * benefit in doing so. |
495 | */ | 571 | */ |
496 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { | 572 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { |
497 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); | 573 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); |
498 | if ((tseg>>PMD_SHIFT) < | 574 | if ((tseg>>PMD_SHIFT) < |
499 | (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || | 575 | (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || |
500 | ((tseg>>PMD_SHIFT) < | 576 | ((tseg>>PMD_SHIFT) < |
501 | (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && | 577 | (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && |
502 | (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) | 578 | (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) |
503 | set_memory_4k((unsigned long)__va(tseg), 1); | 579 | set_memory_4k((unsigned long)__va(tseg), 1); |
504 | } | 580 | } |
505 | } | 581 | } |
506 | #endif | 582 | #endif |
507 | } | 583 | } |
508 | 584 | ||
509 | #ifdef CONFIG_X86_32 | 585 | #ifdef CONFIG_X86_32 |
510 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) | 586 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, |
587 | unsigned int size) | ||
511 | { | 588 | { |
512 | /* AMD errata T13 (order #21922) */ | 589 | /* AMD errata T13 (order #21922) */ |
513 | if ((c->x86 == 6)) { | 590 | if ((c->x86 == 6)) { |
514 | if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */ | 591 | /* Duron Rev A0 */ |
592 | if (c->x86_model == 3 && c->x86_mask == 0) | ||
515 | size = 64; | 593 | size = 64; |
594 | /* Tbird rev A1/A2 */ | ||
516 | if (c->x86_model == 4 && | 595 | if (c->x86_model == 4 && |
517 | (c->x86_mask == 0 || c->x86_mask == 1)) /* Tbird rev A1/A2 */ | 596 | (c->x86_mask == 0 || c->x86_mask == 1)) |
518 | size = 256; | 597 | size = 256; |
519 | } | 598 | } |
520 | return size; | 599 | return size; |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index c8e315f1aa83..01a265212395 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -81,7 +81,7 @@ static void __init check_fpu(void) | |||
81 | 81 | ||
82 | boot_cpu_data.fdiv_bug = fdiv_bug; | 82 | boot_cpu_data.fdiv_bug = fdiv_bug; |
83 | if (boot_cpu_data.fdiv_bug) | 83 | if (boot_cpu_data.fdiv_bug) |
84 | printk("Hmm, FPU with FDIV bug.\n"); | 84 | printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n"); |
85 | } | 85 | } |
86 | 86 | ||
87 | static void __init check_hlt(void) | 87 | static void __init check_hlt(void) |
@@ -98,7 +98,7 @@ static void __init check_hlt(void) | |||
98 | halt(); | 98 | halt(); |
99 | halt(); | 99 | halt(); |
100 | halt(); | 100 | halt(); |
101 | printk("OK.\n"); | 101 | printk(KERN_CONT "OK.\n"); |
102 | } | 102 | } |
103 | 103 | ||
104 | /* | 104 | /* |
@@ -122,9 +122,9 @@ static void __init check_popad(void) | |||
122 | * CPU hard. Too bad. | 122 | * CPU hard. Too bad. |
123 | */ | 123 | */ |
124 | if (res != 12345678) | 124 | if (res != 12345678) |
125 | printk("Buggy.\n"); | 125 | printk(KERN_CONT "Buggy.\n"); |
126 | else | 126 | else |
127 | printk("OK.\n"); | 127 | printk(KERN_CONT "OK.\n"); |
128 | #endif | 128 | #endif |
129 | } | 129 | } |
130 | 130 | ||
@@ -156,7 +156,7 @@ void __init check_bugs(void) | |||
156 | { | 156 | { |
157 | identify_boot_cpu(); | 157 | identify_boot_cpu(); |
158 | #ifndef CONFIG_SMP | 158 | #ifndef CONFIG_SMP |
159 | printk("CPU: "); | 159 | printk(KERN_INFO "CPU: "); |
160 | print_cpu_info(&boot_cpu_data); | 160 | print_cpu_info(&boot_cpu_data); |
161 | #endif | 161 | #endif |
162 | check_config(); | 162 | check_config(); |
diff --git a/arch/x86/kernel/cpu/bugs_64.c b/arch/x86/kernel/cpu/bugs_64.c index 9a3ed0649d4e..04f0fe5af83e 100644 --- a/arch/x86/kernel/cpu/bugs_64.c +++ b/arch/x86/kernel/cpu/bugs_64.c | |||
@@ -15,7 +15,7 @@ void __init check_bugs(void) | |||
15 | { | 15 | { |
16 | identify_boot_cpu(); | 16 | identify_boot_cpu(); |
17 | #if !defined(CONFIG_SMP) | 17 | #if !defined(CONFIG_SMP) |
18 | printk("CPU: "); | 18 | printk(KERN_INFO "CPU: "); |
19 | print_cpu_info(&boot_cpu_data); | 19 | print_cpu_info(&boot_cpu_data); |
20 | #endif | 20 | #endif |
21 | alternative_instructions(); | 21 | alternative_instructions(); |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 5ce60a88027b..2055fc2b2e6b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -18,8 +18,8 @@ | |||
18 | #include <asm/hypervisor.h> | 18 | #include <asm/hypervisor.h> |
19 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
20 | #include <asm/sections.h> | 20 | #include <asm/sections.h> |
21 | #include <asm/topology.h> | 21 | #include <linux/topology.h> |
22 | #include <asm/cpumask.h> | 22 | #include <linux/cpumask.h> |
23 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
24 | #include <asm/atomic.h> | 24 | #include <asm/atomic.h> |
25 | #include <asm/proto.h> | 25 | #include <asm/proto.h> |
@@ -28,13 +28,13 @@ | |||
28 | #include <asm/desc.h> | 28 | #include <asm/desc.h> |
29 | #include <asm/i387.h> | 29 | #include <asm/i387.h> |
30 | #include <asm/mtrr.h> | 30 | #include <asm/mtrr.h> |
31 | #include <asm/numa.h> | 31 | #include <linux/numa.h> |
32 | #include <asm/asm.h> | 32 | #include <asm/asm.h> |
33 | #include <asm/cpu.h> | 33 | #include <asm/cpu.h> |
34 | #include <asm/mce.h> | 34 | #include <asm/mce.h> |
35 | #include <asm/msr.h> | 35 | #include <asm/msr.h> |
36 | #include <asm/pat.h> | 36 | #include <asm/pat.h> |
37 | #include <asm/smp.h> | 37 | #include <linux/smp.h> |
38 | 38 | ||
39 | #ifdef CONFIG_X86_LOCAL_APIC | 39 | #ifdef CONFIG_X86_LOCAL_APIC |
40 | #include <asm/uv/uv.h> | 40 | #include <asm/uv/uv.h> |
@@ -94,45 +94,45 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | |||
94 | * TLS descriptors are currently at a different place compared to i386. | 94 | * TLS descriptors are currently at a different place compared to i386. |
95 | * Hopefully nobody expects them at a fixed place (Wine?) | 95 | * Hopefully nobody expects them at a fixed place (Wine?) |
96 | */ | 96 | */ |
97 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, | 97 | [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), |
98 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, | 98 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), |
99 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, | 99 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), |
100 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, | 100 | [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), |
101 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, | 101 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), |
102 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, | 102 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), |
103 | #else | 103 | #else |
104 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, | 104 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), |
105 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, | 105 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
106 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, | 106 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), |
107 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } }, | 107 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), |
108 | /* | 108 | /* |
109 | * Segments used for calling PnP BIOS have byte granularity. | 109 | * Segments used for calling PnP BIOS have byte granularity. |
110 | * They code segments and data segments have fixed 64k limits, | 110 | * They code segments and data segments have fixed 64k limits, |
111 | * the transfer segment sizes are set at run time. | 111 | * the transfer segment sizes are set at run time. |
112 | */ | 112 | */ |
113 | /* 32-bit code */ | 113 | /* 32-bit code */ |
114 | [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, | 114 | [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
115 | /* 16-bit code */ | 115 | /* 16-bit code */ |
116 | [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, | 116 | [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
117 | /* 16-bit data */ | 117 | /* 16-bit data */ |
118 | [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, | 118 | [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), |
119 | /* 16-bit data */ | 119 | /* 16-bit data */ |
120 | [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, | 120 | [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), |
121 | /* 16-bit data */ | 121 | /* 16-bit data */ |
122 | [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, | 122 | [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), |
123 | /* | 123 | /* |
124 | * The APM segments have byte granularity and their bases | 124 | * The APM segments have byte granularity and their bases |
125 | * are set at run time. All have 64k limits. | 125 | * are set at run time. All have 64k limits. |
126 | */ | 126 | */ |
127 | /* 32-bit code */ | 127 | /* 32-bit code */ |
128 | [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, | 128 | [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
129 | /* 16-bit code */ | 129 | /* 16-bit code */ |
130 | [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, | 130 | [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
131 | /* data */ | 131 | /* data */ |
132 | [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, | 132 | [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), |
133 | 133 | ||
134 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x0000ffff, 0x00cf9200 } } }, | 134 | [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
135 | [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, | 135 | [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
136 | GDT_STACK_CANARY_INIT | 136 | GDT_STACK_CANARY_INIT |
137 | #endif | 137 | #endif |
138 | } }; | 138 | } }; |
@@ -982,18 +982,26 @@ static __init int setup_disablecpuid(char *arg) | |||
982 | __setup("clearcpuid=", setup_disablecpuid); | 982 | __setup("clearcpuid=", setup_disablecpuid); |
983 | 983 | ||
984 | #ifdef CONFIG_X86_64 | 984 | #ifdef CONFIG_X86_64 |
985 | struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; | 985 | struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; |
986 | 986 | ||
987 | DEFINE_PER_CPU_FIRST(union irq_stack_union, | 987 | DEFINE_PER_CPU_FIRST(union irq_stack_union, |
988 | irq_stack_union) __aligned(PAGE_SIZE); | 988 | irq_stack_union) __aligned(PAGE_SIZE); |
989 | 989 | ||
990 | DEFINE_PER_CPU(char *, irq_stack_ptr) = | 990 | /* |
991 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; | 991 | * The following four percpu variables are hot. Align current_task to |
992 | * cacheline size such that all four fall in the same cacheline. | ||
993 | */ | ||
994 | DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = | ||
995 | &init_task; | ||
996 | EXPORT_PER_CPU_SYMBOL(current_task); | ||
992 | 997 | ||
993 | DEFINE_PER_CPU(unsigned long, kernel_stack) = | 998 | DEFINE_PER_CPU(unsigned long, kernel_stack) = |
994 | (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; | 999 | (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; |
995 | EXPORT_PER_CPU_SYMBOL(kernel_stack); | 1000 | EXPORT_PER_CPU_SYMBOL(kernel_stack); |
996 | 1001 | ||
1002 | DEFINE_PER_CPU(char *, irq_stack_ptr) = | ||
1003 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; | ||
1004 | |||
997 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; | 1005 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; |
998 | 1006 | ||
999 | /* | 1007 | /* |
@@ -1008,8 +1016,7 @@ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { | |||
1008 | }; | 1016 | }; |
1009 | 1017 | ||
1010 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks | 1018 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks |
1011 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) | 1019 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); |
1012 | __aligned(PAGE_SIZE); | ||
1013 | 1020 | ||
1014 | /* May not be marked __init: used by software suspend */ | 1021 | /* May not be marked __init: used by software suspend */ |
1015 | void syscall_init(void) | 1022 | void syscall_init(void) |
@@ -1042,8 +1049,11 @@ DEFINE_PER_CPU(struct orig_ist, orig_ist); | |||
1042 | 1049 | ||
1043 | #else /* CONFIG_X86_64 */ | 1050 | #else /* CONFIG_X86_64 */ |
1044 | 1051 | ||
1052 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; | ||
1053 | EXPORT_PER_CPU_SYMBOL(current_task); | ||
1054 | |||
1045 | #ifdef CONFIG_CC_STACKPROTECTOR | 1055 | #ifdef CONFIG_CC_STACKPROTECTOR |
1046 | DEFINE_PER_CPU(unsigned long, stack_canary); | 1056 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
1047 | #endif | 1057 | #endif |
1048 | 1058 | ||
1049 | /* Make sure %fs and %gs are initialized properly in idle threads */ | 1059 | /* Make sure %fs and %gs are initialized properly in idle threads */ |
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 593171e967ef..19807b89f058 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
@@ -3,10 +3,10 @@ | |||
3 | #include <linux/delay.h> | 3 | #include <linux/delay.h> |
4 | #include <linux/pci.h> | 4 | #include <linux/pci.h> |
5 | #include <asm/dma.h> | 5 | #include <asm/dma.h> |
6 | #include <asm/io.h> | 6 | #include <linux/io.h> |
7 | #include <asm/processor-cyrix.h> | 7 | #include <asm/processor-cyrix.h> |
8 | #include <asm/processor-flags.h> | 8 | #include <asm/processor-flags.h> |
9 | #include <asm/timer.h> | 9 | #include <linux/timer.h> |
10 | #include <asm/pci-direct.h> | 10 | #include <asm/pci-direct.h> |
11 | #include <asm/tsc.h> | 11 | #include <asm/tsc.h> |
12 | 12 | ||
@@ -282,7 +282,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
282 | * The 5510/5520 companion chips have a funky PIT. | 282 | * The 5510/5520 companion chips have a funky PIT. |
283 | */ | 283 | */ |
284 | if (vendor == PCI_VENDOR_ID_CYRIX && | 284 | if (vendor == PCI_VENDOR_ID_CYRIX && |
285 | (device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520)) | 285 | (device == PCI_DEVICE_ID_CYRIX_5510 || |
286 | device == PCI_DEVICE_ID_CYRIX_5520)) | ||
286 | mark_tsc_unstable("cyrix 5510/5520 detected"); | 287 | mark_tsc_unstable("cyrix 5510/5520 detected"); |
287 | } | 288 | } |
288 | #endif | 289 | #endif |
@@ -299,7 +300,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
299 | * ? : 0x7x | 300 | * ? : 0x7x |
300 | * GX1 : 0x8x GX1 datasheet 56 | 301 | * GX1 : 0x8x GX1 datasheet 56 |
301 | */ | 302 | */ |
302 | if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f)) | 303 | if ((0x30 <= dir1 && dir1 <= 0x6f) || |
304 | (0x80 <= dir1 && dir1 <= 0x8f)) | ||
303 | geode_configure(); | 305 | geode_configure(); |
304 | return; | 306 | return; |
305 | } else { /* MediaGX */ | 307 | } else { /* MediaGX */ |
@@ -427,9 +429,12 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) | |||
427 | printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n"); | 429 | printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n"); |
428 | local_irq_save(flags); | 430 | local_irq_save(flags); |
429 | ccr3 = getCx86(CX86_CCR3); | 431 | ccr3 = getCx86(CX86_CCR3); |
430 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 432 | /* enable MAPEN */ |
431 | setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); /* enable cpuid */ | 433 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); |
432 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 434 | /* enable cpuid */ |
435 | setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); | ||
436 | /* disable MAPEN */ | ||
437 | setCx86(CX86_CCR3, ccr3); | ||
433 | local_irq_restore(flags); | 438 | local_irq_restore(flags); |
434 | } | 439 | } |
435 | } | 440 | } |
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index fb5b86af0b01..93ba8eeb100a 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c | |||
@@ -28,11 +28,10 @@ | |||
28 | static inline void __cpuinit | 28 | static inline void __cpuinit |
29 | detect_hypervisor_vendor(struct cpuinfo_x86 *c) | 29 | detect_hypervisor_vendor(struct cpuinfo_x86 *c) |
30 | { | 30 | { |
31 | if (vmware_platform()) { | 31 | if (vmware_platform()) |
32 | c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE; | 32 | c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE; |
33 | } else { | 33 | else |
34 | c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE; | 34 | c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE; |
35 | } | ||
36 | } | 35 | } |
37 | 36 | ||
38 | unsigned long get_hypervisor_tsc_freq(void) | 37 | unsigned long get_hypervisor_tsc_freq(void) |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 3260ab044996..80a722a071b5 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -7,17 +7,17 @@ | |||
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/thread_info.h> | 8 | #include <linux/thread_info.h> |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/uaccess.h> | ||
10 | 11 | ||
11 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
12 | #include <asm/pgtable.h> | 13 | #include <asm/pgtable.h> |
13 | #include <asm/msr.h> | 14 | #include <asm/msr.h> |
14 | #include <asm/uaccess.h> | ||
15 | #include <asm/ds.h> | 15 | #include <asm/ds.h> |
16 | #include <asm/bugs.h> | 16 | #include <asm/bugs.h> |
17 | #include <asm/cpu.h> | 17 | #include <asm/cpu.h> |
18 | 18 | ||
19 | #ifdef CONFIG_X86_64 | 19 | #ifdef CONFIG_X86_64 |
20 | #include <asm/topology.h> | 20 | #include <linux/topology.h> |
21 | #include <asm/numa_64.h> | 21 | #include <asm/numa_64.h> |
22 | #endif | 22 | #endif |
23 | 23 | ||
@@ -174,7 +174,8 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |||
174 | #ifdef CONFIG_X86_F00F_BUG | 174 | #ifdef CONFIG_X86_F00F_BUG |
175 | /* | 175 | /* |
176 | * All current models of Pentium and Pentium with MMX technology CPUs | 176 | * All current models of Pentium and Pentium with MMX technology CPUs |
177 | * have the F0 0F bug, which lets nonprivileged users lock up the system. | 177 | * have the F0 0F bug, which lets nonprivileged users lock up the |
178 | * system. | ||
178 | * Note that the workaround only should be initialized once... | 179 | * Note that the workaround only should be initialized once... |
179 | */ | 180 | */ |
180 | c->f00f_bug = 0; | 181 | c->f00f_bug = 0; |
@@ -207,7 +208,7 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |||
207 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); | 208 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); |
208 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); | 209 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); |
209 | lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; | 210 | lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; |
210 | wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); | 211 | wrmsr(MSR_IA32_MISC_ENABLE, lo, hi); |
211 | } | 212 | } |
212 | } | 213 | } |
213 | 214 | ||
@@ -283,7 +284,7 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) | |||
283 | /* Intel has a non-standard dependency on %ecx for this CPUID level. */ | 284 | /* Intel has a non-standard dependency on %ecx for this CPUID level. */ |
284 | cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); | 285 | cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); |
285 | if (eax & 0x1f) | 286 | if (eax & 0x1f) |
286 | return ((eax >> 26) + 1); | 287 | return (eax >> 26) + 1; |
287 | else | 288 | else |
288 | return 1; | 289 | return 1; |
289 | } | 290 | } |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 789efe217e1a..804c40e2bc3e 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Changes: | 4 | * Changes: |
5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) | 5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) |
6 | * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. | 6 | * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. |
7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. | 7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. |
8 | */ | 8 | */ |
9 | 9 | ||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/pci.h> | 16 | #include <linux/pci.h> |
17 | 17 | ||
18 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
19 | #include <asm/smp.h> | 19 | #include <linux/smp.h> |
20 | #include <asm/k8.h> | 20 | #include <asm/k8.h> |
21 | 21 | ||
22 | #define LVL_1_INST 1 | 22 | #define LVL_1_INST 1 |
@@ -25,14 +25,15 @@ | |||
25 | #define LVL_3 4 | 25 | #define LVL_3 4 |
26 | #define LVL_TRACE 5 | 26 | #define LVL_TRACE 5 |
27 | 27 | ||
28 | struct _cache_table | 28 | struct _cache_table { |
29 | { | ||
30 | unsigned char descriptor; | 29 | unsigned char descriptor; |
31 | char cache_type; | 30 | char cache_type; |
32 | short size; | 31 | short size; |
33 | }; | 32 | }; |
34 | 33 | ||
35 | /* all the cache descriptor types we care about (no TLB or trace cache entries) */ | 34 | /* All the cache descriptor types we care about (no TLB or |
35 | trace cache entries) */ | ||
36 | |||
36 | static const struct _cache_table __cpuinitconst cache_table[] = | 37 | static const struct _cache_table __cpuinitconst cache_table[] = |
37 | { | 38 | { |
38 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ | 39 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ |
@@ -105,8 +106,7 @@ static const struct _cache_table __cpuinitconst cache_table[] = | |||
105 | }; | 106 | }; |
106 | 107 | ||
107 | 108 | ||
108 | enum _cache_type | 109 | enum _cache_type { |
109 | { | ||
110 | CACHE_TYPE_NULL = 0, | 110 | CACHE_TYPE_NULL = 0, |
111 | CACHE_TYPE_DATA = 1, | 111 | CACHE_TYPE_DATA = 1, |
112 | CACHE_TYPE_INST = 2, | 112 | CACHE_TYPE_INST = 2, |
@@ -170,31 +170,31 @@ unsigned short num_cache_leaves; | |||
170 | Maybe later */ | 170 | Maybe later */ |
171 | union l1_cache { | 171 | union l1_cache { |
172 | struct { | 172 | struct { |
173 | unsigned line_size : 8; | 173 | unsigned line_size:8; |
174 | unsigned lines_per_tag : 8; | 174 | unsigned lines_per_tag:8; |
175 | unsigned assoc : 8; | 175 | unsigned assoc:8; |
176 | unsigned size_in_kb : 8; | 176 | unsigned size_in_kb:8; |
177 | }; | 177 | }; |
178 | unsigned val; | 178 | unsigned val; |
179 | }; | 179 | }; |
180 | 180 | ||
181 | union l2_cache { | 181 | union l2_cache { |
182 | struct { | 182 | struct { |
183 | unsigned line_size : 8; | 183 | unsigned line_size:8; |
184 | unsigned lines_per_tag : 4; | 184 | unsigned lines_per_tag:4; |
185 | unsigned assoc : 4; | 185 | unsigned assoc:4; |
186 | unsigned size_in_kb : 16; | 186 | unsigned size_in_kb:16; |
187 | }; | 187 | }; |
188 | unsigned val; | 188 | unsigned val; |
189 | }; | 189 | }; |
190 | 190 | ||
191 | union l3_cache { | 191 | union l3_cache { |
192 | struct { | 192 | struct { |
193 | unsigned line_size : 8; | 193 | unsigned line_size:8; |
194 | unsigned lines_per_tag : 4; | 194 | unsigned lines_per_tag:4; |
195 | unsigned assoc : 4; | 195 | unsigned assoc:4; |
196 | unsigned res : 2; | 196 | unsigned res:2; |
197 | unsigned size_encoded : 14; | 197 | unsigned size_encoded:14; |
198 | }; | 198 | }; |
199 | unsigned val; | 199 | unsigned val; |
200 | }; | 200 | }; |
@@ -241,7 +241,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
241 | case 0: | 241 | case 0: |
242 | if (!l1->val) | 242 | if (!l1->val) |
243 | return; | 243 | return; |
244 | assoc = l1->assoc; | 244 | assoc = assocs[l1->assoc]; |
245 | line_size = l1->line_size; | 245 | line_size = l1->line_size; |
246 | lines_per_tag = l1->lines_per_tag; | 246 | lines_per_tag = l1->lines_per_tag; |
247 | size_in_kb = l1->size_in_kb; | 247 | size_in_kb = l1->size_in_kb; |
@@ -249,7 +249,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
249 | case 2: | 249 | case 2: |
250 | if (!l2.val) | 250 | if (!l2.val) |
251 | return; | 251 | return; |
252 | assoc = l2.assoc; | 252 | assoc = assocs[l2.assoc]; |
253 | line_size = l2.line_size; | 253 | line_size = l2.line_size; |
254 | lines_per_tag = l2.lines_per_tag; | 254 | lines_per_tag = l2.lines_per_tag; |
255 | /* cpu_data has errata corrections for K7 applied */ | 255 | /* cpu_data has errata corrections for K7 applied */ |
@@ -258,10 +258,14 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
258 | case 3: | 258 | case 3: |
259 | if (!l3.val) | 259 | if (!l3.val) |
260 | return; | 260 | return; |
261 | assoc = l3.assoc; | 261 | assoc = assocs[l3.assoc]; |
262 | line_size = l3.line_size; | 262 | line_size = l3.line_size; |
263 | lines_per_tag = l3.lines_per_tag; | 263 | lines_per_tag = l3.lines_per_tag; |
264 | size_in_kb = l3.size_encoded * 512; | 264 | size_in_kb = l3.size_encoded * 512; |
265 | if (boot_cpu_has(X86_FEATURE_AMD_DCM)) { | ||
266 | size_in_kb = size_in_kb >> 1; | ||
267 | assoc = assoc >> 1; | ||
268 | } | ||
265 | break; | 269 | break; |
266 | default: | 270 | default: |
267 | return; | 271 | return; |
@@ -270,18 +274,14 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
270 | eax->split.is_self_initializing = 1; | 274 | eax->split.is_self_initializing = 1; |
271 | eax->split.type = types[leaf]; | 275 | eax->split.type = types[leaf]; |
272 | eax->split.level = levels[leaf]; | 276 | eax->split.level = levels[leaf]; |
273 | if (leaf == 3) | 277 | eax->split.num_threads_sharing = 0; |
274 | eax->split.num_threads_sharing = | ||
275 | current_cpu_data.x86_max_cores - 1; | ||
276 | else | ||
277 | eax->split.num_threads_sharing = 0; | ||
278 | eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1; | 278 | eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1; |
279 | 279 | ||
280 | 280 | ||
281 | if (assoc == 0xf) | 281 | if (assoc == 0xffff) |
282 | eax->split.is_fully_associative = 1; | 282 | eax->split.is_fully_associative = 1; |
283 | ebx->split.coherency_line_size = line_size - 1; | 283 | ebx->split.coherency_line_size = line_size - 1; |
284 | ebx->split.ways_of_associativity = assocs[assoc] - 1; | 284 | ebx->split.ways_of_associativity = assoc - 1; |
285 | ebx->split.physical_line_partition = lines_per_tag - 1; | 285 | ebx->split.physical_line_partition = lines_per_tag - 1; |
286 | ecx->split.number_of_sets = (size_in_kb * 1024) / line_size / | 286 | ecx->split.number_of_sets = (size_in_kb * 1024) / line_size / |
287 | (ebx->split.ways_of_associativity + 1) - 1; | 287 | (ebx->split.ways_of_associativity + 1) - 1; |
@@ -350,7 +350,8 @@ static int __cpuinit find_num_cache_leaves(void) | |||
350 | 350 | ||
351 | unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | 351 | unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) |
352 | { | 352 | { |
353 | unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ | 353 | /* Cache sizes */ |
354 | unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; | ||
354 | unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ | 355 | unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ |
355 | unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ | 356 | unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ |
356 | unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; | 357 | unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; |
@@ -377,8 +378,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
377 | 378 | ||
378 | retval = cpuid4_cache_lookup_regs(i, &this_leaf); | 379 | retval = cpuid4_cache_lookup_regs(i, &this_leaf); |
379 | if (retval >= 0) { | 380 | if (retval >= 0) { |
380 | switch(this_leaf.eax.split.level) { | 381 | switch (this_leaf.eax.split.level) { |
381 | case 1: | 382 | case 1: |
382 | if (this_leaf.eax.split.type == | 383 | if (this_leaf.eax.split.type == |
383 | CACHE_TYPE_DATA) | 384 | CACHE_TYPE_DATA) |
384 | new_l1d = this_leaf.size/1024; | 385 | new_l1d = this_leaf.size/1024; |
@@ -386,19 +387,20 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
386 | CACHE_TYPE_INST) | 387 | CACHE_TYPE_INST) |
387 | new_l1i = this_leaf.size/1024; | 388 | new_l1i = this_leaf.size/1024; |
388 | break; | 389 | break; |
389 | case 2: | 390 | case 2: |
390 | new_l2 = this_leaf.size/1024; | 391 | new_l2 = this_leaf.size/1024; |
391 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; | 392 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; |
392 | index_msb = get_count_order(num_threads_sharing); | 393 | index_msb = get_count_order(num_threads_sharing); |
393 | l2_id = c->apicid >> index_msb; | 394 | l2_id = c->apicid >> index_msb; |
394 | break; | 395 | break; |
395 | case 3: | 396 | case 3: |
396 | new_l3 = this_leaf.size/1024; | 397 | new_l3 = this_leaf.size/1024; |
397 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; | 398 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; |
398 | index_msb = get_count_order(num_threads_sharing); | 399 | index_msb = get_count_order( |
400 | num_threads_sharing); | ||
399 | l3_id = c->apicid >> index_msb; | 401 | l3_id = c->apicid >> index_msb; |
400 | break; | 402 | break; |
401 | default: | 403 | default: |
402 | break; | 404 | break; |
403 | } | 405 | } |
404 | } | 406 | } |
@@ -421,22 +423,21 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
421 | /* Number of times to iterate */ | 423 | /* Number of times to iterate */ |
422 | n = cpuid_eax(2) & 0xFF; | 424 | n = cpuid_eax(2) & 0xFF; |
423 | 425 | ||
424 | for ( i = 0 ; i < n ; i++ ) { | 426 | for (i = 0 ; i < n ; i++) { |
425 | cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); | 427 | cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); |
426 | 428 | ||
427 | /* If bit 31 is set, this is an unknown format */ | 429 | /* If bit 31 is set, this is an unknown format */ |
428 | for ( j = 0 ; j < 3 ; j++ ) { | 430 | for (j = 0 ; j < 3 ; j++) |
429 | if (regs[j] & (1 << 31)) regs[j] = 0; | 431 | if (regs[j] & (1 << 31)) |
430 | } | 432 | regs[j] = 0; |
431 | 433 | ||
432 | /* Byte 0 is level count, not a descriptor */ | 434 | /* Byte 0 is level count, not a descriptor */ |
433 | for ( j = 1 ; j < 16 ; j++ ) { | 435 | for (j = 1 ; j < 16 ; j++) { |
434 | unsigned char des = dp[j]; | 436 | unsigned char des = dp[j]; |
435 | unsigned char k = 0; | 437 | unsigned char k = 0; |
436 | 438 | ||
437 | /* look up this descriptor in the table */ | 439 | /* look up this descriptor in the table */ |
438 | while (cache_table[k].descriptor != 0) | 440 | while (cache_table[k].descriptor != 0) { |
439 | { | ||
440 | if (cache_table[k].descriptor == des) { | 441 | if (cache_table[k].descriptor == des) { |
441 | if (only_trace && cache_table[k].cache_type != LVL_TRACE) | 442 | if (only_trace && cache_table[k].cache_type != LVL_TRACE) |
442 | break; | 443 | break; |
@@ -488,14 +489,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
488 | } | 489 | } |
489 | 490 | ||
490 | if (trace) | 491 | if (trace) |
491 | printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); | 492 | printk(KERN_INFO "CPU: Trace cache: %dK uops", trace); |
492 | else if ( l1i ) | 493 | else if (l1i) |
493 | printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); | 494 | printk(KERN_INFO "CPU: L1 I cache: %dK", l1i); |
494 | 495 | ||
495 | if (l1d) | 496 | if (l1d) |
496 | printk(", L1 D cache: %dK\n", l1d); | 497 | printk(KERN_CONT ", L1 D cache: %dK\n", l1d); |
497 | else | 498 | else |
498 | printk("\n"); | 499 | printk(KERN_CONT "\n"); |
499 | 500 | ||
500 | if (l2) | 501 | if (l2) |
501 | printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); | 502 | printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); |
@@ -522,6 +523,18 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
522 | int index_msb, i; | 523 | int index_msb, i; |
523 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 524 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
524 | 525 | ||
526 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { | ||
527 | struct cpuinfo_x86 *d; | ||
528 | for_each_online_cpu(i) { | ||
529 | if (!per_cpu(cpuid4_info, i)) | ||
530 | continue; | ||
531 | d = &cpu_data(i); | ||
532 | this_leaf = CPUID4_INFO_IDX(i, index); | ||
533 | cpumask_copy(to_cpumask(this_leaf->shared_cpu_map), | ||
534 | d->llc_shared_map); | ||
535 | } | ||
536 | return; | ||
537 | } | ||
525 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 538 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
526 | num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; | 539 | num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; |
527 | 540 | ||
@@ -558,8 +571,13 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
558 | } | 571 | } |
559 | } | 572 | } |
560 | #else | 573 | #else |
561 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {} | 574 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) |
562 | static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {} | 575 | { |
576 | } | ||
577 | |||
578 | static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | ||
579 | { | ||
580 | } | ||
563 | #endif | 581 | #endif |
564 | 582 | ||
565 | static void __cpuinit free_cache_attributes(unsigned int cpu) | 583 | static void __cpuinit free_cache_attributes(unsigned int cpu) |
@@ -645,7 +663,7 @@ static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); | |||
645 | static ssize_t show_##file_name \ | 663 | static ssize_t show_##file_name \ |
646 | (struct _cpuid4_info *this_leaf, char *buf) \ | 664 | (struct _cpuid4_info *this_leaf, char *buf) \ |
647 | { \ | 665 | { \ |
648 | return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \ | 666 | return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ |
649 | } | 667 | } |
650 | 668 | ||
651 | show_one_plus(level, eax.split.level, 0); | 669 | show_one_plus(level, eax.split.level, 0); |
@@ -656,7 +674,7 @@ show_one_plus(number_of_sets, ecx.split.number_of_sets, 1); | |||
656 | 674 | ||
657 | static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf) | 675 | static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf) |
658 | { | 676 | { |
659 | return sprintf (buf, "%luK\n", this_leaf->size / 1024); | 677 | return sprintf(buf, "%luK\n", this_leaf->size / 1024); |
660 | } | 678 | } |
661 | 679 | ||
662 | static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | 680 | static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, |
@@ -669,7 +687,7 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | |||
669 | const struct cpumask *mask; | 687 | const struct cpumask *mask; |
670 | 688 | ||
671 | mask = to_cpumask(this_leaf->shared_cpu_map); | 689 | mask = to_cpumask(this_leaf->shared_cpu_map); |
672 | n = type? | 690 | n = type ? |
673 | cpulist_scnprintf(buf, len-2, mask) : | 691 | cpulist_scnprintf(buf, len-2, mask) : |
674 | cpumask_scnprintf(buf, len-2, mask); | 692 | cpumask_scnprintf(buf, len-2, mask); |
675 | buf[n++] = '\n'; | 693 | buf[n++] = '\n'; |
@@ -800,7 +818,7 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, | |||
800 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, | 818 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, |
801 | show_cache_disable_1, store_cache_disable_1); | 819 | show_cache_disable_1, store_cache_disable_1); |
802 | 820 | ||
803 | static struct attribute * default_attrs[] = { | 821 | static struct attribute *default_attrs[] = { |
804 | &type.attr, | 822 | &type.attr, |
805 | &level.attr, | 823 | &level.attr, |
806 | &coherency_line_size.attr, | 824 | &coherency_line_size.attr, |
@@ -815,7 +833,7 @@ static struct attribute * default_attrs[] = { | |||
815 | NULL | 833 | NULL |
816 | }; | 834 | }; |
817 | 835 | ||
818 | static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) | 836 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) |
819 | { | 837 | { |
820 | struct _cache_attr *fattr = to_attr(attr); | 838 | struct _cache_attr *fattr = to_attr(attr); |
821 | struct _index_kobject *this_leaf = to_object(kobj); | 839 | struct _index_kobject *this_leaf = to_object(kobj); |
@@ -828,8 +846,8 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) | |||
828 | return ret; | 846 | return ret; |
829 | } | 847 | } |
830 | 848 | ||
831 | static ssize_t store(struct kobject * kobj, struct attribute * attr, | 849 | static ssize_t store(struct kobject *kobj, struct attribute *attr, |
832 | const char * buf, size_t count) | 850 | const char *buf, size_t count) |
833 | { | 851 | { |
834 | struct _cache_attr *fattr = to_attr(attr); | 852 | struct _cache_attr *fattr = to_attr(attr); |
835 | struct _index_kobject *this_leaf = to_object(kobj); | 853 | struct _index_kobject *this_leaf = to_object(kobj); |
@@ -883,7 +901,7 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) | |||
883 | goto err_out; | 901 | goto err_out; |
884 | 902 | ||
885 | per_cpu(index_kobject, cpu) = kzalloc( | 903 | per_cpu(index_kobject, cpu) = kzalloc( |
886 | sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL); | 904 | sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); |
887 | if (unlikely(per_cpu(index_kobject, cpu) == NULL)) | 905 | if (unlikely(per_cpu(index_kobject, cpu) == NULL)) |
888 | goto err_out; | 906 | goto err_out; |
889 | 907 | ||
@@ -917,7 +935,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
917 | } | 935 | } |
918 | 936 | ||
919 | for (i = 0; i < num_cache_leaves; i++) { | 937 | for (i = 0; i < num_cache_leaves; i++) { |
920 | this_object = INDEX_KOBJECT_PTR(cpu,i); | 938 | this_object = INDEX_KOBJECT_PTR(cpu, i); |
921 | this_object->cpu = cpu; | 939 | this_object->cpu = cpu; |
922 | this_object->index = i; | 940 | this_object->index = i; |
923 | retval = kobject_init_and_add(&(this_object->kobj), | 941 | retval = kobject_init_and_add(&(this_object->kobj), |
@@ -925,9 +943,8 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
925 | per_cpu(cache_kobject, cpu), | 943 | per_cpu(cache_kobject, cpu), |
926 | "index%1lu", i); | 944 | "index%1lu", i); |
927 | if (unlikely(retval)) { | 945 | if (unlikely(retval)) { |
928 | for (j = 0; j < i; j++) { | 946 | for (j = 0; j < i; j++) |
929 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj)); | 947 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); |
930 | } | ||
931 | kobject_put(per_cpu(cache_kobject, cpu)); | 948 | kobject_put(per_cpu(cache_kobject, cpu)); |
932 | cpuid4_cache_sysfs_exit(cpu); | 949 | cpuid4_cache_sysfs_exit(cpu); |
933 | return retval; | 950 | return retval; |
@@ -952,7 +969,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
952 | cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); | 969 | cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); |
953 | 970 | ||
954 | for (i = 0; i < num_cache_leaves; i++) | 971 | for (i = 0; i < num_cache_leaves; i++) |
955 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); | 972 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); |
956 | kobject_put(per_cpu(cache_kobject, cpu)); | 973 | kobject_put(per_cpu(cache_kobject, cpu)); |
957 | cpuid4_cache_sysfs_exit(cpu); | 974 | cpuid4_cache_sysfs_exit(cpu); |
958 | } | 975 | } |
@@ -977,8 +994,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, | |||
977 | return NOTIFY_OK; | 994 | return NOTIFY_OK; |
978 | } | 995 | } |
979 | 996 | ||
980 | static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = | 997 | static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = { |
981 | { | ||
982 | .notifier_call = cacheinfo_cpu_callback, | 998 | .notifier_call = cacheinfo_cpu_callback, |
983 | }; | 999 | }; |
984 | 1000 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index ddae21620bda..1fecba404fd8 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -489,12 +489,14 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
489 | int i, err = 0; | 489 | int i, err = 0; |
490 | struct threshold_bank *b = NULL; | 490 | struct threshold_bank *b = NULL; |
491 | char name[32]; | 491 | char name[32]; |
492 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
493 | |||
492 | 494 | ||
493 | sprintf(name, "threshold_bank%i", bank); | 495 | sprintf(name, "threshold_bank%i", bank); |
494 | 496 | ||
495 | #ifdef CONFIG_SMP | 497 | #ifdef CONFIG_SMP |
496 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ | 498 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ |
497 | i = cpumask_first(cpu_core_mask(cpu)); | 499 | i = cpumask_first(c->llc_shared_map); |
498 | 500 | ||
499 | /* first core not up yet */ | 501 | /* first core not up yet */ |
500 | if (cpu_data(i).cpu_core_id) | 502 | if (cpu_data(i).cpu_core_id) |
@@ -514,7 +516,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
514 | if (err) | 516 | if (err) |
515 | goto out; | 517 | goto out; |
516 | 518 | ||
517 | cpumask_copy(b->cpus, cpu_core_mask(cpu)); | 519 | cpumask_copy(b->cpus, c->llc_shared_map); |
518 | per_cpu(threshold_banks, cpu)[bank] = b; | 520 | per_cpu(threshold_banks, cpu)[bank] = b; |
519 | 521 | ||
520 | goto out; | 522 | goto out; |
@@ -539,7 +541,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
539 | #ifndef CONFIG_SMP | 541 | #ifndef CONFIG_SMP |
540 | cpumask_setall(b->cpus); | 542 | cpumask_setall(b->cpus); |
541 | #else | 543 | #else |
542 | cpumask_copy(b->cpus, cpu_core_mask(cpu)); | 544 | cpumask_copy(b->cpus, c->llc_shared_map); |
543 | #endif | 545 | #endif |
544 | 546 | ||
545 | per_cpu(threshold_banks, cpu)[bank] = b; | 547 | per_cpu(threshold_banks, cpu)[bank] = b; |
diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c index ee2331b0e58f..33af14110dfd 100644 --- a/arch/x86/kernel/cpu/mtrr/amd.c +++ b/arch/x86/kernel/cpu/mtrr/amd.c | |||
@@ -7,15 +7,15 @@ | |||
7 | 7 | ||
8 | static void | 8 | static void |
9 | amd_get_mtrr(unsigned int reg, unsigned long *base, | 9 | amd_get_mtrr(unsigned int reg, unsigned long *base, |
10 | unsigned long *size, mtrr_type * type) | 10 | unsigned long *size, mtrr_type *type) |
11 | { | 11 | { |
12 | unsigned long low, high; | 12 | unsigned long low, high; |
13 | 13 | ||
14 | rdmsr(MSR_K6_UWCCR, low, high); | 14 | rdmsr(MSR_K6_UWCCR, low, high); |
15 | /* Upper dword is region 1, lower is region 0 */ | 15 | /* Upper dword is region 1, lower is region 0 */ |
16 | if (reg == 1) | 16 | if (reg == 1) |
17 | low = high; | 17 | low = high; |
18 | /* The base masks off on the right alignment */ | 18 | /* The base masks off on the right alignment */ |
19 | *base = (low & 0xFFFE0000) >> PAGE_SHIFT; | 19 | *base = (low & 0xFFFE0000) >> PAGE_SHIFT; |
20 | *type = 0; | 20 | *type = 0; |
21 | if (low & 1) | 21 | if (low & 1) |
@@ -27,74 +27,81 @@ amd_get_mtrr(unsigned int reg, unsigned long *base, | |||
27 | return; | 27 | return; |
28 | } | 28 | } |
29 | /* | 29 | /* |
30 | * This needs a little explaining. The size is stored as an | 30 | * This needs a little explaining. The size is stored as an |
31 | * inverted mask of bits of 128K granularity 15 bits long offset | 31 | * inverted mask of bits of 128K granularity 15 bits long offset |
32 | * 2 bits | 32 | * 2 bits. |
33 | * | 33 | * |
34 | * So to get a size we do invert the mask and add 1 to the lowest | 34 | * So to get a size we do invert the mask and add 1 to the lowest |
35 | * mask bit (4 as its 2 bits in). This gives us a size we then shift | 35 | * mask bit (4 as its 2 bits in). This gives us a size we then shift |
36 | * to turn into 128K blocks | 36 | * to turn into 128K blocks. |
37 | * | 37 | * |
38 | * eg 111 1111 1111 1100 is 512K | 38 | * eg 111 1111 1111 1100 is 512K |
39 | * | 39 | * |
40 | * invert 000 0000 0000 0011 | 40 | * invert 000 0000 0000 0011 |
41 | * +1 000 0000 0000 0100 | 41 | * +1 000 0000 0000 0100 |
42 | * *128K ... | 42 | * *128K ... |
43 | */ | 43 | */ |
44 | low = (~low) & 0x1FFFC; | 44 | low = (~low) & 0x1FFFC; |
45 | *size = (low + 4) << (15 - PAGE_SHIFT); | 45 | *size = (low + 4) << (15 - PAGE_SHIFT); |
46 | return; | ||
47 | } | 46 | } |
48 | 47 | ||
49 | static void amd_set_mtrr(unsigned int reg, unsigned long base, | 48 | /** |
50 | unsigned long size, mtrr_type type) | 49 | * amd_set_mtrr - Set variable MTRR register on the local CPU. |
51 | /* [SUMMARY] Set variable MTRR register on the local CPU. | 50 | * |
52 | <reg> The register to set. | 51 | * @reg The register to set. |
53 | <base> The base address of the region. | 52 | * @base The base address of the region. |
54 | <size> The size of the region. If this is 0 the region is disabled. | 53 | * @size The size of the region. If this is 0 the region is disabled. |
55 | <type> The type of the region. | 54 | * @type The type of the region. |
56 | [RETURNS] Nothing. | 55 | * |
57 | */ | 56 | * Returns nothing. |
57 | */ | ||
58 | static void | ||
59 | amd_set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) | ||
58 | { | 60 | { |
59 | u32 regs[2]; | 61 | u32 regs[2]; |
60 | 62 | ||
61 | /* | 63 | /* |
62 | * Low is MTRR0 , High MTRR 1 | 64 | * Low is MTRR0, High MTRR 1 |
63 | */ | 65 | */ |
64 | rdmsr(MSR_K6_UWCCR, regs[0], regs[1]); | 66 | rdmsr(MSR_K6_UWCCR, regs[0], regs[1]); |
65 | /* | 67 | /* |
66 | * Blank to disable | 68 | * Blank to disable |
67 | */ | 69 | */ |
68 | if (size == 0) | 70 | if (size == 0) { |
69 | regs[reg] = 0; | 71 | regs[reg] = 0; |
70 | else | 72 | } else { |
71 | /* Set the register to the base, the type (off by one) and an | 73 | /* |
72 | inverted bitmask of the size The size is the only odd | 74 | * Set the register to the base, the type (off by one) and an |
73 | bit. We are fed say 512K We invert this and we get 111 1111 | 75 | * inverted bitmask of the size The size is the only odd |
74 | 1111 1011 but if you subtract one and invert you get the | 76 | * bit. We are fed say 512K We invert this and we get 111 1111 |
75 | desired 111 1111 1111 1100 mask | 77 | * 1111 1011 but if you subtract one and invert you get the |
76 | 78 | * desired 111 1111 1111 1100 mask | |
77 | But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! */ | 79 | * |
80 | * But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! | ||
81 | */ | ||
78 | regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC) | 82 | regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC) |
79 | | (base << PAGE_SHIFT) | (type + 1); | 83 | | (base << PAGE_SHIFT) | (type + 1); |
84 | } | ||
80 | 85 | ||
81 | /* | 86 | /* |
82 | * The writeback rule is quite specific. See the manual. Its | 87 | * The writeback rule is quite specific. See the manual. Its |
83 | * disable local interrupts, write back the cache, set the mtrr | 88 | * disable local interrupts, write back the cache, set the mtrr |
84 | */ | 89 | */ |
85 | wbinvd(); | 90 | wbinvd(); |
86 | wrmsr(MSR_K6_UWCCR, regs[0], regs[1]); | 91 | wrmsr(MSR_K6_UWCCR, regs[0], regs[1]); |
87 | } | 92 | } |
88 | 93 | ||
89 | static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type) | 94 | static int |
95 | amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type) | ||
90 | { | 96 | { |
91 | /* Apply the K6 block alignment and size rules | 97 | /* |
92 | In order | 98 | * Apply the K6 block alignment and size rules |
93 | o Uncached or gathering only | 99 | * In order |
94 | o 128K or bigger block | 100 | * o Uncached or gathering only |
95 | o Power of 2 block | 101 | * o 128K or bigger block |
96 | o base suitably aligned to the power | 102 | * o Power of 2 block |
97 | */ | 103 | * o base suitably aligned to the power |
104 | */ | ||
98 | if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT)) | 105 | if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT)) |
99 | || (size & ~(size - 1)) - size || (base & (size - 1))) | 106 | || (size & ~(size - 1)) - size || (base & (size - 1))) |
100 | return -EINVAL; | 107 | return -EINVAL; |
@@ -115,5 +122,3 @@ int __init amd_init_mtrr(void) | |||
115 | set_mtrr_ops(&amd_mtrr_ops); | 122 | set_mtrr_ops(&amd_mtrr_ops); |
116 | return 0; | 123 | return 0; |
117 | } | 124 | } |
118 | |||
119 | //arch_initcall(amd_mtrr_init); | ||
diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c index cb9aa3a7a7ab..de89f14eff3a 100644 --- a/arch/x86/kernel/cpu/mtrr/centaur.c +++ b/arch/x86/kernel/cpu/mtrr/centaur.c | |||
@@ -1,7 +1,9 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <linux/mm.h> | 2 | #include <linux/mm.h> |
3 | |||
3 | #include <asm/mtrr.h> | 4 | #include <asm/mtrr.h> |
4 | #include <asm/msr.h> | 5 | #include <asm/msr.h> |
6 | |||
5 | #include "mtrr.h" | 7 | #include "mtrr.h" |
6 | 8 | ||
7 | static struct { | 9 | static struct { |
@@ -12,25 +14,25 @@ static struct { | |||
12 | static u8 centaur_mcr_reserved; | 14 | static u8 centaur_mcr_reserved; |
13 | static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */ | 15 | static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */ |
14 | 16 | ||
15 | /* | 17 | /** |
16 | * Report boot time MCR setups | 18 | * centaur_get_free_region - Get a free MTRR. |
19 | * | ||
20 | * @base: The starting (base) address of the region. | ||
21 | * @size: The size (in bytes) of the region. | ||
22 | * | ||
23 | * Returns: the index of the region on success, else -1 on error. | ||
17 | */ | 24 | */ |
18 | |||
19 | static int | 25 | static int |
20 | centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg) | 26 | centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg) |
21 | /* [SUMMARY] Get a free MTRR. | ||
22 | <base> The starting (base) address of the region. | ||
23 | <size> The size (in bytes) of the region. | ||
24 | [RETURNS] The index of the region on success, else -1 on error. | ||
25 | */ | ||
26 | { | 27 | { |
27 | int i, max; | ||
28 | mtrr_type ltype; | ||
29 | unsigned long lbase, lsize; | 28 | unsigned long lbase, lsize; |
29 | mtrr_type ltype; | ||
30 | int i, max; | ||
30 | 31 | ||
31 | max = num_var_ranges; | 32 | max = num_var_ranges; |
32 | if (replace_reg >= 0 && replace_reg < max) | 33 | if (replace_reg >= 0 && replace_reg < max) |
33 | return replace_reg; | 34 | return replace_reg; |
35 | |||
34 | for (i = 0; i < max; ++i) { | 36 | for (i = 0; i < max; ++i) { |
35 | if (centaur_mcr_reserved & (1 << i)) | 37 | if (centaur_mcr_reserved & (1 << i)) |
36 | continue; | 38 | continue; |
@@ -38,11 +40,14 @@ centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg) | |||
38 | if (lsize == 0) | 40 | if (lsize == 0) |
39 | return i; | 41 | return i; |
40 | } | 42 | } |
43 | |||
41 | return -ENOSPC; | 44 | return -ENOSPC; |
42 | } | 45 | } |
43 | 46 | ||
44 | void | 47 | /* |
45 | mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) | 48 | * Report boot time MCR setups |
49 | */ | ||
50 | void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) | ||
46 | { | 51 | { |
47 | centaur_mcr[mcr].low = lo; | 52 | centaur_mcr[mcr].low = lo; |
48 | centaur_mcr[mcr].high = hi; | 53 | centaur_mcr[mcr].high = hi; |
@@ -54,33 +59,35 @@ centaur_get_mcr(unsigned int reg, unsigned long *base, | |||
54 | { | 59 | { |
55 | *base = centaur_mcr[reg].high >> PAGE_SHIFT; | 60 | *base = centaur_mcr[reg].high >> PAGE_SHIFT; |
56 | *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT; | 61 | *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT; |
57 | *type = MTRR_TYPE_WRCOMB; /* If it is there, it is write-combining */ | 62 | *type = MTRR_TYPE_WRCOMB; /* write-combining */ |
63 | |||
58 | if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2)) | 64 | if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2)) |
59 | *type = MTRR_TYPE_UNCACHABLE; | 65 | *type = MTRR_TYPE_UNCACHABLE; |
60 | if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25) | 66 | if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25) |
61 | *type = MTRR_TYPE_WRBACK; | 67 | *type = MTRR_TYPE_WRBACK; |
62 | if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31) | 68 | if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31) |
63 | *type = MTRR_TYPE_WRBACK; | 69 | *type = MTRR_TYPE_WRBACK; |
64 | |||
65 | } | 70 | } |
66 | 71 | ||
67 | static void centaur_set_mcr(unsigned int reg, unsigned long base, | 72 | static void |
68 | unsigned long size, mtrr_type type) | 73 | centaur_set_mcr(unsigned int reg, unsigned long base, |
74 | unsigned long size, mtrr_type type) | ||
69 | { | 75 | { |
70 | unsigned long low, high; | 76 | unsigned long low, high; |
71 | 77 | ||
72 | if (size == 0) { | 78 | if (size == 0) { |
73 | /* Disable */ | 79 | /* Disable */ |
74 | high = low = 0; | 80 | high = low = 0; |
75 | } else { | 81 | } else { |
76 | high = base << PAGE_SHIFT; | 82 | high = base << PAGE_SHIFT; |
77 | if (centaur_mcr_type == 0) | 83 | if (centaur_mcr_type == 0) { |
78 | low = -size << PAGE_SHIFT | 0x1f; /* only support write-combining... */ | 84 | /* Only support write-combining... */ |
79 | else { | 85 | low = -size << PAGE_SHIFT | 0x1f; |
86 | } else { | ||
80 | if (type == MTRR_TYPE_UNCACHABLE) | 87 | if (type == MTRR_TYPE_UNCACHABLE) |
81 | low = -size << PAGE_SHIFT | 0x02; /* NC */ | 88 | low = -size << PAGE_SHIFT | 0x02; /* NC */ |
82 | else | 89 | else |
83 | low = -size << PAGE_SHIFT | 0x09; /* WWO,WC */ | 90 | low = -size << PAGE_SHIFT | 0x09; /* WWO, WC */ |
84 | } | 91 | } |
85 | } | 92 | } |
86 | centaur_mcr[reg].high = high; | 93 | centaur_mcr[reg].high = high; |
@@ -88,118 +95,16 @@ static void centaur_set_mcr(unsigned int reg, unsigned long base, | |||
88 | wrmsr(MSR_IDT_MCR0 + reg, low, high); | 95 | wrmsr(MSR_IDT_MCR0 + reg, low, high); |
89 | } | 96 | } |
90 | 97 | ||
91 | #if 0 | 98 | static int |
92 | /* | 99 | centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int type) |
93 | * Initialise the later (saner) Winchip MCR variant. In this version | ||
94 | * the BIOS can pass us the registers it has used (but not their values) | ||
95 | * and the control register is read/write | ||
96 | */ | ||
97 | |||
98 | static void __init | ||
99 | centaur_mcr1_init(void) | ||
100 | { | ||
101 | unsigned i; | ||
102 | u32 lo, hi; | ||
103 | |||
104 | /* Unfortunately, MCR's are read-only, so there is no way to | ||
105 | * find out what the bios might have done. | ||
106 | */ | ||
107 | |||
108 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
109 | if (((lo >> 17) & 7) == 1) { /* Type 1 Winchip2 MCR */ | ||
110 | lo &= ~0x1C0; /* clear key */ | ||
111 | lo |= 0x040; /* set key to 1 */ | ||
112 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); /* unlock MCR */ | ||
113 | } | ||
114 | |||
115 | centaur_mcr_type = 1; | ||
116 | |||
117 | /* | ||
118 | * Clear any unconfigured MCR's. | ||
119 | */ | ||
120 | |||
121 | for (i = 0; i < 8; ++i) { | ||
122 | if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) { | ||
123 | if (!(lo & (1 << (9 + i)))) | ||
124 | wrmsr(MSR_IDT_MCR0 + i, 0, 0); | ||
125 | else | ||
126 | /* | ||
127 | * If the BIOS set up an MCR we cannot see it | ||
128 | * but we don't wish to obliterate it | ||
129 | */ | ||
130 | centaur_mcr_reserved |= (1 << i); | ||
131 | } | ||
132 | } | ||
133 | /* | ||
134 | * Throw the main write-combining switch... | ||
135 | * However if OOSTORE is enabled then people have already done far | ||
136 | * cleverer things and we should behave. | ||
137 | */ | ||
138 | |||
139 | lo |= 15; /* Write combine enables */ | ||
140 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * Initialise the original winchip with read only MCR registers | ||
145 | * no used bitmask for the BIOS to pass on and write only control | ||
146 | */ | ||
147 | |||
148 | static void __init | ||
149 | centaur_mcr0_init(void) | ||
150 | { | ||
151 | unsigned i; | ||
152 | |||
153 | /* Unfortunately, MCR's are read-only, so there is no way to | ||
154 | * find out what the bios might have done. | ||
155 | */ | ||
156 | |||
157 | /* Clear any unconfigured MCR's. | ||
158 | * This way we are sure that the centaur_mcr array contains the actual | ||
159 | * values. The disadvantage is that any BIOS tweaks are thus undone. | ||
160 | * | ||
161 | */ | ||
162 | for (i = 0; i < 8; ++i) { | ||
163 | if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) | ||
164 | wrmsr(MSR_IDT_MCR0 + i, 0, 0); | ||
165 | } | ||
166 | |||
167 | wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); /* Write only */ | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * Initialise Winchip series MCR registers | ||
172 | */ | ||
173 | |||
174 | static void __init | ||
175 | centaur_mcr_init(void) | ||
176 | { | ||
177 | struct set_mtrr_context ctxt; | ||
178 | |||
179 | set_mtrr_prepare_save(&ctxt); | ||
180 | set_mtrr_cache_disable(&ctxt); | ||
181 | |||
182 | if (boot_cpu_data.x86_model == 4) | ||
183 | centaur_mcr0_init(); | ||
184 | else if (boot_cpu_data.x86_model == 8 || boot_cpu_data.x86_model == 9) | ||
185 | centaur_mcr1_init(); | ||
186 | |||
187 | set_mtrr_done(&ctxt); | ||
188 | } | ||
189 | #endif | ||
190 | |||
191 | static int centaur_validate_add_page(unsigned long base, | ||
192 | unsigned long size, unsigned int type) | ||
193 | { | 100 | { |
194 | /* | 101 | /* |
195 | * FIXME: Winchip2 supports uncached | 102 | * FIXME: Winchip2 supports uncached |
196 | */ | 103 | */ |
197 | if (type != MTRR_TYPE_WRCOMB && | 104 | if (type != MTRR_TYPE_WRCOMB && |
198 | (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) { | 105 | (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) { |
199 | printk(KERN_WARNING | 106 | pr_warning("mtrr: only write-combining%s supported\n", |
200 | "mtrr: only write-combining%s supported\n", | 107 | centaur_mcr_type ? " and uncacheable are" : " is"); |
201 | centaur_mcr_type ? " and uncacheable are" | ||
202 | : " is"); | ||
203 | return -EINVAL; | 108 | return -EINVAL; |
204 | } | 109 | } |
205 | return 0; | 110 | return 0; |
@@ -207,7 +112,6 @@ static int centaur_validate_add_page(unsigned long base, | |||
207 | 112 | ||
208 | static struct mtrr_ops centaur_mtrr_ops = { | 113 | static struct mtrr_ops centaur_mtrr_ops = { |
209 | .vendor = X86_VENDOR_CENTAUR, | 114 | .vendor = X86_VENDOR_CENTAUR, |
210 | // .init = centaur_mcr_init, | ||
211 | .set = centaur_set_mcr, | 115 | .set = centaur_set_mcr, |
212 | .get = centaur_get_mcr, | 116 | .get = centaur_get_mcr, |
213 | .get_free_region = centaur_get_free_region, | 117 | .get_free_region = centaur_get_free_region, |
@@ -220,5 +124,3 @@ int __init centaur_init_mtrr(void) | |||
220 | set_mtrr_ops(¢aur_mtrr_ops); | 124 | set_mtrr_ops(¢aur_mtrr_ops); |
221 | return 0; | 125 | return 0; |
222 | } | 126 | } |
223 | |||
224 | //arch_initcall(centaur_init_mtrr); | ||
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 1d584a18a50d..315738c74aad 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c | |||
@@ -1,51 +1,75 @@ | |||
1 | /* MTRR (Memory Type Range Register) cleanup | 1 | /* |
2 | 2 | * MTRR (Memory Type Range Register) cleanup | |
3 | Copyright (C) 2009 Yinghai Lu | 3 | * |
4 | 4 | * Copyright (C) 2009 Yinghai Lu | |
5 | This library is free software; you can redistribute it and/or | 5 | * |
6 | modify it under the terms of the GNU Library General Public | 6 | * This library is free software; you can redistribute it and/or |
7 | License as published by the Free Software Foundation; either | 7 | * modify it under the terms of the GNU Library General Public |
8 | version 2 of the License, or (at your option) any later version. | 8 | * License as published by the Free Software Foundation; either |
9 | 9 | * version 2 of the License, or (at your option) any later version. | |
10 | This library is distributed in the hope that it will be useful, | 10 | * |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * This library is distributed in the hope that it will be useful, |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | Library General Public License for more details. | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | 14 | * Library General Public License for more details. | |
15 | You should have received a copy of the GNU Library General Public | 15 | * |
16 | License along with this library; if not, write to the Free | 16 | * You should have received a copy of the GNU Library General Public |
17 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 17 | * License along with this library; if not, write to the Free |
18 | */ | 18 | * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
19 | 19 | */ | |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/pci.h> | 22 | #include <linux/pci.h> |
23 | #include <linux/smp.h> | 23 | #include <linux/smp.h> |
24 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
25 | #include <linux/mutex.h> | ||
26 | #include <linux/sort.h> | 25 | #include <linux/sort.h> |
26 | #include <linux/mutex.h> | ||
27 | #include <linux/uaccess.h> | ||
28 | #include <linux/kvm_para.h> | ||
27 | 29 | ||
30 | #include <asm/processor.h> | ||
28 | #include <asm/e820.h> | 31 | #include <asm/e820.h> |
29 | #include <asm/mtrr.h> | 32 | #include <asm/mtrr.h> |
30 | #include <asm/uaccess.h> | ||
31 | #include <asm/processor.h> | ||
32 | #include <asm/msr.h> | 33 | #include <asm/msr.h> |
33 | #include <asm/kvm_para.h> | ||
34 | #include "mtrr.h" | ||
35 | 34 | ||
36 | /* should be related to MTRR_VAR_RANGES nums */ | 35 | #include "mtrr.h" |
37 | #define RANGE_NUM 256 | ||
38 | 36 | ||
39 | struct res_range { | 37 | struct res_range { |
40 | unsigned long start; | 38 | unsigned long start; |
41 | unsigned long end; | 39 | unsigned long end; |
40 | }; | ||
41 | |||
42 | struct var_mtrr_range_state { | ||
43 | unsigned long base_pfn; | ||
44 | unsigned long size_pfn; | ||
45 | mtrr_type type; | ||
46 | }; | ||
47 | |||
48 | struct var_mtrr_state { | ||
49 | unsigned long range_startk; | ||
50 | unsigned long range_sizek; | ||
51 | unsigned long chunk_sizek; | ||
52 | unsigned long gran_sizek; | ||
53 | unsigned int reg; | ||
42 | }; | 54 | }; |
43 | 55 | ||
56 | /* Should be related to MTRR_VAR_RANGES nums */ | ||
57 | #define RANGE_NUM 256 | ||
58 | |||
59 | static struct res_range __initdata range[RANGE_NUM]; | ||
60 | static int __initdata nr_range; | ||
61 | |||
62 | static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; | ||
63 | |||
64 | static int __initdata debug_print; | ||
65 | #define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0) | ||
66 | |||
67 | |||
44 | static int __init | 68 | static int __init |
45 | add_range(struct res_range *range, int nr_range, unsigned long start, | 69 | add_range(struct res_range *range, int nr_range, |
46 | unsigned long end) | 70 | unsigned long start, unsigned long end) |
47 | { | 71 | { |
48 | /* out of slots */ | 72 | /* Out of slots: */ |
49 | if (nr_range >= RANGE_NUM) | 73 | if (nr_range >= RANGE_NUM) |
50 | return nr_range; | 74 | return nr_range; |
51 | 75 | ||
@@ -58,12 +82,12 @@ add_range(struct res_range *range, int nr_range, unsigned long start, | |||
58 | } | 82 | } |
59 | 83 | ||
60 | static int __init | 84 | static int __init |
61 | add_range_with_merge(struct res_range *range, int nr_range, unsigned long start, | 85 | add_range_with_merge(struct res_range *range, int nr_range, |
62 | unsigned long end) | 86 | unsigned long start, unsigned long end) |
63 | { | 87 | { |
64 | int i; | 88 | int i; |
65 | 89 | ||
66 | /* try to merge it with old one */ | 90 | /* Try to merge it with old one: */ |
67 | for (i = 0; i < nr_range; i++) { | 91 | for (i = 0; i < nr_range; i++) { |
68 | unsigned long final_start, final_end; | 92 | unsigned long final_start, final_end; |
69 | unsigned long common_start, common_end; | 93 | unsigned long common_start, common_end; |
@@ -84,7 +108,7 @@ add_range_with_merge(struct res_range *range, int nr_range, unsigned long start, | |||
84 | return nr_range; | 108 | return nr_range; |
85 | } | 109 | } |
86 | 110 | ||
87 | /* need to add that */ | 111 | /* Need to add it: */ |
88 | return add_range(range, nr_range, start, end); | 112 | return add_range(range, nr_range, start, end); |
89 | } | 113 | } |
90 | 114 | ||
@@ -117,7 +141,7 @@ subtract_range(struct res_range *range, unsigned long start, unsigned long end) | |||
117 | } | 141 | } |
118 | 142 | ||
119 | if (start > range[j].start && end < range[j].end) { | 143 | if (start > range[j].start && end < range[j].end) { |
120 | /* find the new spare */ | 144 | /* Find the new spare: */ |
121 | for (i = 0; i < RANGE_NUM; i++) { | 145 | for (i = 0; i < RANGE_NUM; i++) { |
122 | if (range[i].end == 0) | 146 | if (range[i].end == 0) |
123 | break; | 147 | break; |
@@ -146,14 +170,8 @@ static int __init cmp_range(const void *x1, const void *x2) | |||
146 | return start1 - start2; | 170 | return start1 - start2; |
147 | } | 171 | } |
148 | 172 | ||
149 | struct var_mtrr_range_state { | 173 | #define BIOS_BUG_MSG KERN_WARNING \ |
150 | unsigned long base_pfn; | 174 | "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n" |
151 | unsigned long size_pfn; | ||
152 | mtrr_type type; | ||
153 | }; | ||
154 | |||
155 | static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; | ||
156 | static int __initdata debug_print; | ||
157 | 175 | ||
158 | static int __init | 176 | static int __init |
159 | x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | 177 | x86_get_mtrr_mem_range(struct res_range *range, int nr_range, |
@@ -180,7 +198,7 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | |||
180 | range[i].start, range[i].end + 1); | 198 | range[i].start, range[i].end + 1); |
181 | } | 199 | } |
182 | 200 | ||
183 | /* take out UC ranges */ | 201 | /* Take out UC ranges: */ |
184 | for (i = 0; i < num_var_ranges; i++) { | 202 | for (i = 0; i < num_var_ranges; i++) { |
185 | type = range_state[i].type; | 203 | type = range_state[i].type; |
186 | if (type != MTRR_TYPE_UNCACHABLE && | 204 | if (type != MTRR_TYPE_UNCACHABLE && |
@@ -193,9 +211,7 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | |||
193 | if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed && | 211 | if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed && |
194 | (mtrr_state.enabled & 1)) { | 212 | (mtrr_state.enabled & 1)) { |
195 | /* Var MTRR contains UC entry below 1M? Skip it: */ | 213 | /* Var MTRR contains UC entry below 1M? Skip it: */ |
196 | printk(KERN_WARNING "WARNING: BIOS bug: VAR MTRR %d " | 214 | printk(BIOS_BUG_MSG, i); |
197 | "contains strange UC entry under 1M, check " | ||
198 | "with your system vendor!\n", i); | ||
199 | if (base + size <= (1<<(20-PAGE_SHIFT))) | 215 | if (base + size <= (1<<(20-PAGE_SHIFT))) |
200 | continue; | 216 | continue; |
201 | size -= (1<<(20-PAGE_SHIFT)) - base; | 217 | size -= (1<<(20-PAGE_SHIFT)) - base; |
@@ -237,17 +253,13 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | |||
237 | return nr_range; | 253 | return nr_range; |
238 | } | 254 | } |
239 | 255 | ||
240 | static struct res_range __initdata range[RANGE_NUM]; | ||
241 | static int __initdata nr_range; | ||
242 | |||
243 | #ifdef CONFIG_MTRR_SANITIZER | 256 | #ifdef CONFIG_MTRR_SANITIZER |
244 | 257 | ||
245 | static unsigned long __init sum_ranges(struct res_range *range, int nr_range) | 258 | static unsigned long __init sum_ranges(struct res_range *range, int nr_range) |
246 | { | 259 | { |
247 | unsigned long sum; | 260 | unsigned long sum = 0; |
248 | int i; | 261 | int i; |
249 | 262 | ||
250 | sum = 0; | ||
251 | for (i = 0; i < nr_range; i++) | 263 | for (i = 0; i < nr_range; i++) |
252 | sum += range[i].end + 1 - range[i].start; | 264 | sum += range[i].end + 1 - range[i].start; |
253 | 265 | ||
@@ -278,17 +290,9 @@ static int __init mtrr_cleanup_debug_setup(char *str) | |||
278 | } | 290 | } |
279 | early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup); | 291 | early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup); |
280 | 292 | ||
281 | struct var_mtrr_state { | ||
282 | unsigned long range_startk; | ||
283 | unsigned long range_sizek; | ||
284 | unsigned long chunk_sizek; | ||
285 | unsigned long gran_sizek; | ||
286 | unsigned int reg; | ||
287 | }; | ||
288 | |||
289 | static void __init | 293 | static void __init |
290 | set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | 294 | set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, |
291 | unsigned char type, unsigned int address_bits) | 295 | unsigned char type, unsigned int address_bits) |
292 | { | 296 | { |
293 | u32 base_lo, base_hi, mask_lo, mask_hi; | 297 | u32 base_lo, base_hi, mask_lo, mask_hi; |
294 | u64 base, mask; | 298 | u64 base, mask; |
@@ -301,7 +305,7 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | |||
301 | mask = (1ULL << address_bits) - 1; | 305 | mask = (1ULL << address_bits) - 1; |
302 | mask &= ~((((u64)sizek) << 10) - 1); | 306 | mask &= ~((((u64)sizek) << 10) - 1); |
303 | 307 | ||
304 | base = ((u64)basek) << 10; | 308 | base = ((u64)basek) << 10; |
305 | 309 | ||
306 | base |= type; | 310 | base |= type; |
307 | mask |= 0x800; | 311 | mask |= 0x800; |
@@ -317,15 +321,14 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | |||
317 | 321 | ||
318 | static void __init | 322 | static void __init |
319 | save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | 323 | save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, |
320 | unsigned char type) | 324 | unsigned char type) |
321 | { | 325 | { |
322 | range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10); | 326 | range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10); |
323 | range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10); | 327 | range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10); |
324 | range_state[reg].type = type; | 328 | range_state[reg].type = type; |
325 | } | 329 | } |
326 | 330 | ||
327 | static void __init | 331 | static void __init set_var_mtrr_all(unsigned int address_bits) |
328 | set_var_mtrr_all(unsigned int address_bits) | ||
329 | { | 332 | { |
330 | unsigned long basek, sizek; | 333 | unsigned long basek, sizek; |
331 | unsigned char type; | 334 | unsigned char type; |
@@ -342,11 +345,11 @@ set_var_mtrr_all(unsigned int address_bits) | |||
342 | 345 | ||
343 | static unsigned long to_size_factor(unsigned long sizek, char *factorp) | 346 | static unsigned long to_size_factor(unsigned long sizek, char *factorp) |
344 | { | 347 | { |
345 | char factor; | ||
346 | unsigned long base = sizek; | 348 | unsigned long base = sizek; |
349 | char factor; | ||
347 | 350 | ||
348 | if (base & ((1<<10) - 1)) { | 351 | if (base & ((1<<10) - 1)) { |
349 | /* not MB alignment */ | 352 | /* Not MB-aligned: */ |
350 | factor = 'K'; | 353 | factor = 'K'; |
351 | } else if (base & ((1<<20) - 1)) { | 354 | } else if (base & ((1<<20) - 1)) { |
352 | factor = 'M'; | 355 | factor = 'M'; |
@@ -372,11 +375,12 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk, | |||
372 | unsigned long max_align, align; | 375 | unsigned long max_align, align; |
373 | unsigned long sizek; | 376 | unsigned long sizek; |
374 | 377 | ||
375 | /* Compute the maximum size I can make a range */ | 378 | /* Compute the maximum size with which we can make a range: */ |
376 | if (range_startk) | 379 | if (range_startk) |
377 | max_align = ffs(range_startk) - 1; | 380 | max_align = ffs(range_startk) - 1; |
378 | else | 381 | else |
379 | max_align = 32; | 382 | max_align = 32; |
383 | |||
380 | align = fls(range_sizek) - 1; | 384 | align = fls(range_sizek) - 1; |
381 | if (align > max_align) | 385 | if (align > max_align) |
382 | align = max_align; | 386 | align = max_align; |
@@ -386,11 +390,10 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk, | |||
386 | char start_factor = 'K', size_factor = 'K'; | 390 | char start_factor = 'K', size_factor = 'K'; |
387 | unsigned long start_base, size_base; | 391 | unsigned long start_base, size_base; |
388 | 392 | ||
389 | start_base = to_size_factor(range_startk, | 393 | start_base = to_size_factor(range_startk, &start_factor); |
390 | &start_factor), | 394 | size_base = to_size_factor(sizek, &size_factor); |
391 | size_base = to_size_factor(sizek, &size_factor), | ||
392 | 395 | ||
393 | printk(KERN_DEBUG "Setting variable MTRR %d, " | 396 | Dprintk("Setting variable MTRR %d, " |
394 | "base: %ld%cB, range: %ld%cB, type %s\n", | 397 | "base: %ld%cB, range: %ld%cB, type %s\n", |
395 | reg, start_base, start_factor, | 398 | reg, start_base, start_factor, |
396 | size_base, size_factor, | 399 | size_base, size_factor, |
@@ -425,10 +428,11 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | |||
425 | chunk_sizek = state->chunk_sizek; | 428 | chunk_sizek = state->chunk_sizek; |
426 | gran_sizek = state->gran_sizek; | 429 | gran_sizek = state->gran_sizek; |
427 | 430 | ||
428 | /* align with gran size, prevent small block used up MTRRs */ | 431 | /* Align with gran size, prevent small block used up MTRRs: */ |
429 | range_basek = ALIGN(state->range_startk, gran_sizek); | 432 | range_basek = ALIGN(state->range_startk, gran_sizek); |
430 | if ((range_basek > basek) && basek) | 433 | if ((range_basek > basek) && basek) |
431 | return second_sizek; | 434 | return second_sizek; |
435 | |||
432 | state->range_sizek -= (range_basek - state->range_startk); | 436 | state->range_sizek -= (range_basek - state->range_startk); |
433 | range_sizek = ALIGN(state->range_sizek, gran_sizek); | 437 | range_sizek = ALIGN(state->range_sizek, gran_sizek); |
434 | 438 | ||
@@ -439,22 +443,21 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | |||
439 | } | 443 | } |
440 | state->range_sizek = range_sizek; | 444 | state->range_sizek = range_sizek; |
441 | 445 | ||
442 | /* try to append some small hole */ | 446 | /* Try to append some small hole: */ |
443 | range0_basek = state->range_startk; | 447 | range0_basek = state->range_startk; |
444 | range0_sizek = ALIGN(state->range_sizek, chunk_sizek); | 448 | range0_sizek = ALIGN(state->range_sizek, chunk_sizek); |
445 | 449 | ||
446 | /* no increase */ | 450 | /* No increase: */ |
447 | if (range0_sizek == state->range_sizek) { | 451 | if (range0_sizek == state->range_sizek) { |
448 | if (debug_print) | 452 | Dprintk("rangeX: %016lx - %016lx\n", |
449 | printk(KERN_DEBUG "rangeX: %016lx - %016lx\n", | 453 | range0_basek<<10, |
450 | range0_basek<<10, | 454 | (range0_basek + state->range_sizek)<<10); |
451 | (range0_basek + state->range_sizek)<<10); | ||
452 | state->reg = range_to_mtrr(state->reg, range0_basek, | 455 | state->reg = range_to_mtrr(state->reg, range0_basek, |
453 | state->range_sizek, MTRR_TYPE_WRBACK); | 456 | state->range_sizek, MTRR_TYPE_WRBACK); |
454 | return 0; | 457 | return 0; |
455 | } | 458 | } |
456 | 459 | ||
457 | /* only cut back, when it is not the last */ | 460 | /* Only cut back when it is not the last: */ |
458 | if (sizek) { | 461 | if (sizek) { |
459 | while (range0_basek + range0_sizek > (basek + sizek)) { | 462 | while (range0_basek + range0_sizek > (basek + sizek)) { |
460 | if (range0_sizek >= chunk_sizek) | 463 | if (range0_sizek >= chunk_sizek) |
@@ -470,16 +473,16 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | |||
470 | second_try: | 473 | second_try: |
471 | range_basek = range0_basek + range0_sizek; | 474 | range_basek = range0_basek + range0_sizek; |
472 | 475 | ||
473 | /* one hole in the middle */ | 476 | /* One hole in the middle: */ |
474 | if (range_basek > basek && range_basek <= (basek + sizek)) | 477 | if (range_basek > basek && range_basek <= (basek + sizek)) |
475 | second_sizek = range_basek - basek; | 478 | second_sizek = range_basek - basek; |
476 | 479 | ||
477 | if (range0_sizek > state->range_sizek) { | 480 | if (range0_sizek > state->range_sizek) { |
478 | 481 | ||
479 | /* one hole in middle or at end */ | 482 | /* One hole in middle or at the end: */ |
480 | hole_sizek = range0_sizek - state->range_sizek - second_sizek; | 483 | hole_sizek = range0_sizek - state->range_sizek - second_sizek; |
481 | 484 | ||
482 | /* hole size should be less than half of range0 size */ | 485 | /* Hole size should be less than half of range0 size: */ |
483 | if (hole_sizek >= (range0_sizek >> 1) && | 486 | if (hole_sizek >= (range0_sizek >> 1) && |
484 | range0_sizek >= chunk_sizek) { | 487 | range0_sizek >= chunk_sizek) { |
485 | range0_sizek -= chunk_sizek; | 488 | range0_sizek -= chunk_sizek; |
@@ -491,32 +494,30 @@ second_try: | |||
491 | } | 494 | } |
492 | 495 | ||
493 | if (range0_sizek) { | 496 | if (range0_sizek) { |
494 | if (debug_print) | 497 | Dprintk("range0: %016lx - %016lx\n", |
495 | printk(KERN_DEBUG "range0: %016lx - %016lx\n", | 498 | range0_basek<<10, |
496 | range0_basek<<10, | 499 | (range0_basek + range0_sizek)<<10); |
497 | (range0_basek + range0_sizek)<<10); | ||
498 | state->reg = range_to_mtrr(state->reg, range0_basek, | 500 | state->reg = range_to_mtrr(state->reg, range0_basek, |
499 | range0_sizek, MTRR_TYPE_WRBACK); | 501 | range0_sizek, MTRR_TYPE_WRBACK); |
500 | } | 502 | } |
501 | 503 | ||
502 | if (range0_sizek < state->range_sizek) { | 504 | if (range0_sizek < state->range_sizek) { |
503 | /* need to handle left over */ | 505 | /* Need to handle left over range: */ |
504 | range_sizek = state->range_sizek - range0_sizek; | 506 | range_sizek = state->range_sizek - range0_sizek; |
505 | 507 | ||
506 | if (debug_print) | 508 | Dprintk("range: %016lx - %016lx\n", |
507 | printk(KERN_DEBUG "range: %016lx - %016lx\n", | 509 | range_basek<<10, |
508 | range_basek<<10, | 510 | (range_basek + range_sizek)<<10); |
509 | (range_basek + range_sizek)<<10); | 511 | |
510 | state->reg = range_to_mtrr(state->reg, range_basek, | 512 | state->reg = range_to_mtrr(state->reg, range_basek, |
511 | range_sizek, MTRR_TYPE_WRBACK); | 513 | range_sizek, MTRR_TYPE_WRBACK); |
512 | } | 514 | } |
513 | 515 | ||
514 | if (hole_sizek) { | 516 | if (hole_sizek) { |
515 | hole_basek = range_basek - hole_sizek - second_sizek; | 517 | hole_basek = range_basek - hole_sizek - second_sizek; |
516 | if (debug_print) | 518 | Dprintk("hole: %016lx - %016lx\n", |
517 | printk(KERN_DEBUG "hole: %016lx - %016lx\n", | 519 | hole_basek<<10, |
518 | hole_basek<<10, | 520 | (hole_basek + hole_sizek)<<10); |
519 | (hole_basek + hole_sizek)<<10); | ||
520 | state->reg = range_to_mtrr(state->reg, hole_basek, | 521 | state->reg = range_to_mtrr(state->reg, hole_basek, |
521 | hole_sizek, MTRR_TYPE_UNCACHABLE); | 522 | hole_sizek, MTRR_TYPE_UNCACHABLE); |
522 | } | 523 | } |
@@ -537,23 +538,23 @@ set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn, | |||
537 | basek = base_pfn << (PAGE_SHIFT - 10); | 538 | basek = base_pfn << (PAGE_SHIFT - 10); |
538 | sizek = size_pfn << (PAGE_SHIFT - 10); | 539 | sizek = size_pfn << (PAGE_SHIFT - 10); |
539 | 540 | ||
540 | /* See if I can merge with the last range */ | 541 | /* See if I can merge with the last range: */ |
541 | if ((basek <= 1024) || | 542 | if ((basek <= 1024) || |
542 | (state->range_startk + state->range_sizek == basek)) { | 543 | (state->range_startk + state->range_sizek == basek)) { |
543 | unsigned long endk = basek + sizek; | 544 | unsigned long endk = basek + sizek; |
544 | state->range_sizek = endk - state->range_startk; | 545 | state->range_sizek = endk - state->range_startk; |
545 | return; | 546 | return; |
546 | } | 547 | } |
547 | /* Write the range mtrrs */ | 548 | /* Write the range mtrrs: */ |
548 | if (state->range_sizek != 0) | 549 | if (state->range_sizek != 0) |
549 | second_sizek = range_to_mtrr_with_hole(state, basek, sizek); | 550 | second_sizek = range_to_mtrr_with_hole(state, basek, sizek); |
550 | 551 | ||
551 | /* Allocate an msr */ | 552 | /* Allocate an msr: */ |
552 | state->range_startk = basek + second_sizek; | 553 | state->range_startk = basek + second_sizek; |
553 | state->range_sizek = sizek - second_sizek; | 554 | state->range_sizek = sizek - second_sizek; |
554 | } | 555 | } |
555 | 556 | ||
556 | /* mininum size of mtrr block that can take hole */ | 557 | /* Mininum size of mtrr block that can take hole: */ |
557 | static u64 mtrr_chunk_size __initdata = (256ULL<<20); | 558 | static u64 mtrr_chunk_size __initdata = (256ULL<<20); |
558 | 559 | ||
559 | static int __init parse_mtrr_chunk_size_opt(char *p) | 560 | static int __init parse_mtrr_chunk_size_opt(char *p) |
@@ -565,7 +566,7 @@ static int __init parse_mtrr_chunk_size_opt(char *p) | |||
565 | } | 566 | } |
566 | early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt); | 567 | early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt); |
567 | 568 | ||
568 | /* granity of mtrr of block */ | 569 | /* Granularity of mtrr of block: */ |
569 | static u64 mtrr_gran_size __initdata; | 570 | static u64 mtrr_gran_size __initdata; |
570 | 571 | ||
571 | static int __init parse_mtrr_gran_size_opt(char *p) | 572 | static int __init parse_mtrr_gran_size_opt(char *p) |
@@ -577,7 +578,7 @@ static int __init parse_mtrr_gran_size_opt(char *p) | |||
577 | } | 578 | } |
578 | early_param("mtrr_gran_size", parse_mtrr_gran_size_opt); | 579 | early_param("mtrr_gran_size", parse_mtrr_gran_size_opt); |
579 | 580 | ||
580 | static int nr_mtrr_spare_reg __initdata = | 581 | static unsigned long nr_mtrr_spare_reg __initdata = |
581 | CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT; | 582 | CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT; |
582 | 583 | ||
583 | static int __init parse_mtrr_spare_reg(char *arg) | 584 | static int __init parse_mtrr_spare_reg(char *arg) |
@@ -586,7 +587,6 @@ static int __init parse_mtrr_spare_reg(char *arg) | |||
586 | nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0); | 587 | nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0); |
587 | return 0; | 588 | return 0; |
588 | } | 589 | } |
589 | |||
590 | early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg); | 590 | early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg); |
591 | 591 | ||
592 | static int __init | 592 | static int __init |
@@ -594,8 +594,8 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range, | |||
594 | u64 chunk_size, u64 gran_size) | 594 | u64 chunk_size, u64 gran_size) |
595 | { | 595 | { |
596 | struct var_mtrr_state var_state; | 596 | struct var_mtrr_state var_state; |
597 | int i; | ||
598 | int num_reg; | 597 | int num_reg; |
598 | int i; | ||
599 | 599 | ||
600 | var_state.range_startk = 0; | 600 | var_state.range_startk = 0; |
601 | var_state.range_sizek = 0; | 601 | var_state.range_sizek = 0; |
@@ -605,17 +605,18 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range, | |||
605 | 605 | ||
606 | memset(range_state, 0, sizeof(range_state)); | 606 | memset(range_state, 0, sizeof(range_state)); |
607 | 607 | ||
608 | /* Write the range etc */ | 608 | /* Write the range: */ |
609 | for (i = 0; i < nr_range; i++) | 609 | for (i = 0; i < nr_range; i++) { |
610 | set_var_mtrr_range(&var_state, range[i].start, | 610 | set_var_mtrr_range(&var_state, range[i].start, |
611 | range[i].end - range[i].start + 1); | 611 | range[i].end - range[i].start + 1); |
612 | } | ||
612 | 613 | ||
613 | /* Write the last range */ | 614 | /* Write the last range: */ |
614 | if (var_state.range_sizek != 0) | 615 | if (var_state.range_sizek != 0) |
615 | range_to_mtrr_with_hole(&var_state, 0, 0); | 616 | range_to_mtrr_with_hole(&var_state, 0, 0); |
616 | 617 | ||
617 | num_reg = var_state.reg; | 618 | num_reg = var_state.reg; |
618 | /* Clear out the extra MTRR's */ | 619 | /* Clear out the extra MTRR's: */ |
619 | while (var_state.reg < num_var_ranges) { | 620 | while (var_state.reg < num_var_ranges) { |
620 | save_var_mtrr(var_state.reg, 0, 0, 0); | 621 | save_var_mtrr(var_state.reg, 0, 0, 0); |
621 | var_state.reg++; | 622 | var_state.reg++; |
@@ -625,11 +626,11 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range, | |||
625 | } | 626 | } |
626 | 627 | ||
627 | struct mtrr_cleanup_result { | 628 | struct mtrr_cleanup_result { |
628 | unsigned long gran_sizek; | 629 | unsigned long gran_sizek; |
629 | unsigned long chunk_sizek; | 630 | unsigned long chunk_sizek; |
630 | unsigned long lose_cover_sizek; | 631 | unsigned long lose_cover_sizek; |
631 | unsigned int num_reg; | 632 | unsigned int num_reg; |
632 | int bad; | 633 | int bad; |
633 | }; | 634 | }; |
634 | 635 | ||
635 | /* | 636 | /* |
@@ -645,10 +646,10 @@ static unsigned long __initdata min_loss_pfn[RANGE_NUM]; | |||
645 | 646 | ||
646 | static void __init print_out_mtrr_range_state(void) | 647 | static void __init print_out_mtrr_range_state(void) |
647 | { | 648 | { |
648 | int i; | ||
649 | char start_factor = 'K', size_factor = 'K'; | 649 | char start_factor = 'K', size_factor = 'K'; |
650 | unsigned long start_base, size_base; | 650 | unsigned long start_base, size_base; |
651 | mtrr_type type; | 651 | mtrr_type type; |
652 | int i; | ||
652 | 653 | ||
653 | for (i = 0; i < num_var_ranges; i++) { | 654 | for (i = 0; i < num_var_ranges; i++) { |
654 | 655 | ||
@@ -676,10 +677,10 @@ static int __init mtrr_need_cleanup(void) | |||
676 | int i; | 677 | int i; |
677 | mtrr_type type; | 678 | mtrr_type type; |
678 | unsigned long size; | 679 | unsigned long size; |
679 | /* extra one for all 0 */ | 680 | /* Extra one for all 0: */ |
680 | int num[MTRR_NUM_TYPES + 1]; | 681 | int num[MTRR_NUM_TYPES + 1]; |
681 | 682 | ||
682 | /* check entries number */ | 683 | /* Check entries number: */ |
683 | memset(num, 0, sizeof(num)); | 684 | memset(num, 0, sizeof(num)); |
684 | for (i = 0; i < num_var_ranges; i++) { | 685 | for (i = 0; i < num_var_ranges; i++) { |
685 | type = range_state[i].type; | 686 | type = range_state[i].type; |
@@ -693,88 +694,86 @@ static int __init mtrr_need_cleanup(void) | |||
693 | num[type]++; | 694 | num[type]++; |
694 | } | 695 | } |
695 | 696 | ||
696 | /* check if we got UC entries */ | 697 | /* Check if we got UC entries: */ |
697 | if (!num[MTRR_TYPE_UNCACHABLE]) | 698 | if (!num[MTRR_TYPE_UNCACHABLE]) |
698 | return 0; | 699 | return 0; |
699 | 700 | ||
700 | /* check if we only had WB and UC */ | 701 | /* Check if we only had WB and UC */ |
701 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != | 702 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != |
702 | num_var_ranges - num[MTRR_NUM_TYPES]) | 703 | num_var_ranges - num[MTRR_NUM_TYPES]) |
703 | return 0; | 704 | return 0; |
704 | 705 | ||
705 | return 1; | 706 | return 1; |
706 | } | 707 | } |
707 | 708 | ||
708 | static unsigned long __initdata range_sums; | 709 | static unsigned long __initdata range_sums; |
709 | static void __init mtrr_calc_range_state(u64 chunk_size, u64 gran_size, | 710 | |
710 | unsigned long extra_remove_base, | 711 | static void __init |
711 | unsigned long extra_remove_size, | 712 | mtrr_calc_range_state(u64 chunk_size, u64 gran_size, |
712 | int i) | 713 | unsigned long x_remove_base, |
714 | unsigned long x_remove_size, int i) | ||
713 | { | 715 | { |
714 | int num_reg; | ||
715 | static struct res_range range_new[RANGE_NUM]; | 716 | static struct res_range range_new[RANGE_NUM]; |
716 | static int nr_range_new; | ||
717 | unsigned long range_sums_new; | 717 | unsigned long range_sums_new; |
718 | static int nr_range_new; | ||
719 | int num_reg; | ||
718 | 720 | ||
719 | /* convert ranges to var ranges state */ | 721 | /* Convert ranges to var ranges state: */ |
720 | num_reg = x86_setup_var_mtrrs(range, nr_range, | 722 | num_reg = x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); |
721 | chunk_size, gran_size); | ||
722 | 723 | ||
723 | /* we got new setting in range_state, check it */ | 724 | /* We got new setting in range_state, check it: */ |
724 | memset(range_new, 0, sizeof(range_new)); | 725 | memset(range_new, 0, sizeof(range_new)); |
725 | nr_range_new = x86_get_mtrr_mem_range(range_new, 0, | 726 | nr_range_new = x86_get_mtrr_mem_range(range_new, 0, |
726 | extra_remove_base, extra_remove_size); | 727 | x_remove_base, x_remove_size); |
727 | range_sums_new = sum_ranges(range_new, nr_range_new); | 728 | range_sums_new = sum_ranges(range_new, nr_range_new); |
728 | 729 | ||
729 | result[i].chunk_sizek = chunk_size >> 10; | 730 | result[i].chunk_sizek = chunk_size >> 10; |
730 | result[i].gran_sizek = gran_size >> 10; | 731 | result[i].gran_sizek = gran_size >> 10; |
731 | result[i].num_reg = num_reg; | 732 | result[i].num_reg = num_reg; |
733 | |||
732 | if (range_sums < range_sums_new) { | 734 | if (range_sums < range_sums_new) { |
733 | result[i].lose_cover_sizek = | 735 | result[i].lose_cover_sizek = (range_sums_new - range_sums) << PSHIFT; |
734 | (range_sums_new - range_sums) << PSHIFT; | ||
735 | result[i].bad = 1; | 736 | result[i].bad = 1; |
736 | } else | 737 | } else { |
737 | result[i].lose_cover_sizek = | 738 | result[i].lose_cover_sizek = (range_sums - range_sums_new) << PSHIFT; |
738 | (range_sums - range_sums_new) << PSHIFT; | 739 | } |
739 | 740 | ||
740 | /* double check it */ | 741 | /* Double check it: */ |
741 | if (!result[i].bad && !result[i].lose_cover_sizek) { | 742 | if (!result[i].bad && !result[i].lose_cover_sizek) { |
742 | if (nr_range_new != nr_range || | 743 | if (nr_range_new != nr_range || memcmp(range, range_new, sizeof(range))) |
743 | memcmp(range, range_new, sizeof(range))) | 744 | result[i].bad = 1; |
744 | result[i].bad = 1; | ||
745 | } | 745 | } |
746 | 746 | ||
747 | if (!result[i].bad && (range_sums - range_sums_new < | 747 | if (!result[i].bad && (range_sums - range_sums_new < min_loss_pfn[num_reg])) |
748 | min_loss_pfn[num_reg])) { | 748 | min_loss_pfn[num_reg] = range_sums - range_sums_new; |
749 | min_loss_pfn[num_reg] = | ||
750 | range_sums - range_sums_new; | ||
751 | } | ||
752 | } | 749 | } |
753 | 750 | ||
754 | static void __init mtrr_print_out_one_result(int i) | 751 | static void __init mtrr_print_out_one_result(int i) |
755 | { | 752 | { |
756 | char gran_factor, chunk_factor, lose_factor; | ||
757 | unsigned long gran_base, chunk_base, lose_base; | 753 | unsigned long gran_base, chunk_base, lose_base; |
754 | char gran_factor, chunk_factor, lose_factor; | ||
758 | 755 | ||
759 | gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), | 756 | gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), |
760 | chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), | 757 | chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), |
761 | lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), | 758 | lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), |
762 | printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t", | 759 | |
763 | result[i].bad ? "*BAD*" : " ", | 760 | pr_info("%sgran_size: %ld%c \tchunk_size: %ld%c \t", |
764 | gran_base, gran_factor, chunk_base, chunk_factor); | 761 | result[i].bad ? "*BAD*" : " ", |
765 | printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n", | 762 | gran_base, gran_factor, chunk_base, chunk_factor); |
766 | result[i].num_reg, result[i].bad ? "-" : "", | 763 | pr_cont("num_reg: %d \tlose cover RAM: %s%ld%c\n", |
767 | lose_base, lose_factor); | 764 | result[i].num_reg, result[i].bad ? "-" : "", |
765 | lose_base, lose_factor); | ||
768 | } | 766 | } |
769 | 767 | ||
770 | static int __init mtrr_search_optimal_index(void) | 768 | static int __init mtrr_search_optimal_index(void) |
771 | { | 769 | { |
772 | int i; | ||
773 | int num_reg_good; | 770 | int num_reg_good; |
774 | int index_good; | 771 | int index_good; |
772 | int i; | ||
775 | 773 | ||
776 | if (nr_mtrr_spare_reg >= num_var_ranges) | 774 | if (nr_mtrr_spare_reg >= num_var_ranges) |
777 | nr_mtrr_spare_reg = num_var_ranges - 1; | 775 | nr_mtrr_spare_reg = num_var_ranges - 1; |
776 | |||
778 | num_reg_good = -1; | 777 | num_reg_good = -1; |
779 | for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { | 778 | for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { |
780 | if (!min_loss_pfn[i]) | 779 | if (!min_loss_pfn[i]) |
@@ -796,24 +795,24 @@ static int __init mtrr_search_optimal_index(void) | |||
796 | return index_good; | 795 | return index_good; |
797 | } | 796 | } |
798 | 797 | ||
799 | |||
800 | int __init mtrr_cleanup(unsigned address_bits) | 798 | int __init mtrr_cleanup(unsigned address_bits) |
801 | { | 799 | { |
802 | unsigned long extra_remove_base, extra_remove_size; | 800 | unsigned long x_remove_base, x_remove_size; |
803 | unsigned long base, size, def, dummy; | 801 | unsigned long base, size, def, dummy; |
804 | mtrr_type type; | ||
805 | u64 chunk_size, gran_size; | 802 | u64 chunk_size, gran_size; |
803 | mtrr_type type; | ||
806 | int index_good; | 804 | int index_good; |
807 | int i; | 805 | int i; |
808 | 806 | ||
809 | if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1) | 807 | if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1) |
810 | return 0; | 808 | return 0; |
809 | |||
811 | rdmsr(MSR_MTRRdefType, def, dummy); | 810 | rdmsr(MSR_MTRRdefType, def, dummy); |
812 | def &= 0xff; | 811 | def &= 0xff; |
813 | if (def != MTRR_TYPE_UNCACHABLE) | 812 | if (def != MTRR_TYPE_UNCACHABLE) |
814 | return 0; | 813 | return 0; |
815 | 814 | ||
816 | /* get it and store it aside */ | 815 | /* Get it and store it aside: */ |
817 | memset(range_state, 0, sizeof(range_state)); | 816 | memset(range_state, 0, sizeof(range_state)); |
818 | for (i = 0; i < num_var_ranges; i++) { | 817 | for (i = 0; i < num_var_ranges; i++) { |
819 | mtrr_if->get(i, &base, &size, &type); | 818 | mtrr_if->get(i, &base, &size, &type); |
@@ -822,29 +821,28 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
822 | range_state[i].type = type; | 821 | range_state[i].type = type; |
823 | } | 822 | } |
824 | 823 | ||
825 | /* check if we need handle it and can handle it */ | 824 | /* Check if we need handle it and can handle it: */ |
826 | if (!mtrr_need_cleanup()) | 825 | if (!mtrr_need_cleanup()) |
827 | return 0; | 826 | return 0; |
828 | 827 | ||
829 | /* print original var MTRRs at first, for debugging: */ | 828 | /* Print original var MTRRs at first, for debugging: */ |
830 | printk(KERN_DEBUG "original variable MTRRs\n"); | 829 | printk(KERN_DEBUG "original variable MTRRs\n"); |
831 | print_out_mtrr_range_state(); | 830 | print_out_mtrr_range_state(); |
832 | 831 | ||
833 | memset(range, 0, sizeof(range)); | 832 | memset(range, 0, sizeof(range)); |
834 | extra_remove_size = 0; | 833 | x_remove_size = 0; |
835 | extra_remove_base = 1 << (32 - PAGE_SHIFT); | 834 | x_remove_base = 1 << (32 - PAGE_SHIFT); |
836 | if (mtrr_tom2) | 835 | if (mtrr_tom2) |
837 | extra_remove_size = | 836 | x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; |
838 | (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base; | 837 | |
839 | nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base, | 838 | nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size); |
840 | extra_remove_size); | ||
841 | /* | 839 | /* |
842 | * [0, 1M) should always be coverred by var mtrr with WB | 840 | * [0, 1M) should always be covered by var mtrr with WB |
843 | * and fixed mtrrs should take effective before var mtrr for it | 841 | * and fixed mtrrs should take effect before var mtrr for it: |
844 | */ | 842 | */ |
845 | nr_range = add_range_with_merge(range, nr_range, 0, | 843 | nr_range = add_range_with_merge(range, nr_range, 0, |
846 | (1ULL<<(20 - PAGE_SHIFT)) - 1); | 844 | (1ULL<<(20 - PAGE_SHIFT)) - 1); |
847 | /* sort the ranges */ | 845 | /* Sort the ranges: */ |
848 | sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); | 846 | sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); |
849 | 847 | ||
850 | range_sums = sum_ranges(range, nr_range); | 848 | range_sums = sum_ranges(range, nr_range); |
@@ -854,7 +852,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
854 | if (mtrr_chunk_size && mtrr_gran_size) { | 852 | if (mtrr_chunk_size && mtrr_gran_size) { |
855 | i = 0; | 853 | i = 0; |
856 | mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size, | 854 | mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size, |
857 | extra_remove_base, extra_remove_size, i); | 855 | x_remove_base, x_remove_size, i); |
858 | 856 | ||
859 | mtrr_print_out_one_result(i); | 857 | mtrr_print_out_one_result(i); |
860 | 858 | ||
@@ -880,7 +878,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
880 | continue; | 878 | continue; |
881 | 879 | ||
882 | mtrr_calc_range_state(chunk_size, gran_size, | 880 | mtrr_calc_range_state(chunk_size, gran_size, |
883 | extra_remove_base, extra_remove_size, i); | 881 | x_remove_base, x_remove_size, i); |
884 | if (debug_print) { | 882 | if (debug_print) { |
885 | mtrr_print_out_one_result(i); | 883 | mtrr_print_out_one_result(i); |
886 | printk(KERN_INFO "\n"); | 884 | printk(KERN_INFO "\n"); |
@@ -890,7 +888,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
890 | } | 888 | } |
891 | } | 889 | } |
892 | 890 | ||
893 | /* try to find the optimal index */ | 891 | /* Try to find the optimal index: */ |
894 | index_good = mtrr_search_optimal_index(); | 892 | index_good = mtrr_search_optimal_index(); |
895 | 893 | ||
896 | if (index_good != -1) { | 894 | if (index_good != -1) { |
@@ -898,7 +896,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
898 | i = index_good; | 896 | i = index_good; |
899 | mtrr_print_out_one_result(i); | 897 | mtrr_print_out_one_result(i); |
900 | 898 | ||
901 | /* convert ranges to var ranges state */ | 899 | /* Convert ranges to var ranges state: */ |
902 | chunk_size = result[i].chunk_sizek; | 900 | chunk_size = result[i].chunk_sizek; |
903 | chunk_size <<= 10; | 901 | chunk_size <<= 10; |
904 | gran_size = result[i].gran_sizek; | 902 | gran_size = result[i].gran_sizek; |
@@ -941,8 +939,8 @@ early_param("disable_mtrr_trim", disable_mtrr_trim_setup); | |||
941 | * Note this won't check if the MTRRs < 4GB where the magic bit doesn't | 939 | * Note this won't check if the MTRRs < 4GB where the magic bit doesn't |
942 | * apply to are wrong, but so far we don't know of any such case in the wild. | 940 | * apply to are wrong, but so far we don't know of any such case in the wild. |
943 | */ | 941 | */ |
944 | #define Tom2Enabled (1U << 21) | 942 | #define Tom2Enabled (1U << 21) |
945 | #define Tom2ForceMemTypeWB (1U << 22) | 943 | #define Tom2ForceMemTypeWB (1U << 22) |
946 | 944 | ||
947 | int __init amd_special_default_mtrr(void) | 945 | int __init amd_special_default_mtrr(void) |
948 | { | 946 | { |
@@ -952,7 +950,7 @@ int __init amd_special_default_mtrr(void) | |||
952 | return 0; | 950 | return 0; |
953 | if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) | 951 | if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) |
954 | return 0; | 952 | return 0; |
955 | /* In case some hypervisor doesn't pass SYSCFG through */ | 953 | /* In case some hypervisor doesn't pass SYSCFG through: */ |
956 | if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) | 954 | if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) |
957 | return 0; | 955 | return 0; |
958 | /* | 956 | /* |
@@ -965,19 +963,21 @@ int __init amd_special_default_mtrr(void) | |||
965 | return 0; | 963 | return 0; |
966 | } | 964 | } |
967 | 965 | ||
968 | static u64 __init real_trim_memory(unsigned long start_pfn, | 966 | static u64 __init |
969 | unsigned long limit_pfn) | 967 | real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn) |
970 | { | 968 | { |
971 | u64 trim_start, trim_size; | 969 | u64 trim_start, trim_size; |
970 | |||
972 | trim_start = start_pfn; | 971 | trim_start = start_pfn; |
973 | trim_start <<= PAGE_SHIFT; | 972 | trim_start <<= PAGE_SHIFT; |
973 | |||
974 | trim_size = limit_pfn; | 974 | trim_size = limit_pfn; |
975 | trim_size <<= PAGE_SHIFT; | 975 | trim_size <<= PAGE_SHIFT; |
976 | trim_size -= trim_start; | 976 | trim_size -= trim_start; |
977 | 977 | ||
978 | return e820_update_range(trim_start, trim_size, E820_RAM, | 978 | return e820_update_range(trim_start, trim_size, E820_RAM, E820_RESERVED); |
979 | E820_RESERVED); | ||
980 | } | 979 | } |
980 | |||
981 | /** | 981 | /** |
982 | * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs | 982 | * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs |
983 | * @end_pfn: ending page frame number | 983 | * @end_pfn: ending page frame number |
@@ -985,7 +985,7 @@ static u64 __init real_trim_memory(unsigned long start_pfn, | |||
985 | * Some buggy BIOSes don't setup the MTRRs properly for systems with certain | 985 | * Some buggy BIOSes don't setup the MTRRs properly for systems with certain |
986 | * memory configurations. This routine checks that the highest MTRR matches | 986 | * memory configurations. This routine checks that the highest MTRR matches |
987 | * the end of memory, to make sure the MTRRs having a write back type cover | 987 | * the end of memory, to make sure the MTRRs having a write back type cover |
988 | * all of the memory the kernel is intending to use. If not, it'll trim any | 988 | * all of the memory the kernel is intending to use. If not, it'll trim any |
989 | * memory off the end by adjusting end_pfn, removing it from the kernel's | 989 | * memory off the end by adjusting end_pfn, removing it from the kernel's |
990 | * allocation pools, warning the user with an obnoxious message. | 990 | * allocation pools, warning the user with an obnoxious message. |
991 | */ | 991 | */ |
@@ -994,21 +994,22 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
994 | unsigned long i, base, size, highest_pfn = 0, def, dummy; | 994 | unsigned long i, base, size, highest_pfn = 0, def, dummy; |
995 | mtrr_type type; | 995 | mtrr_type type; |
996 | u64 total_trim_size; | 996 | u64 total_trim_size; |
997 | |||
998 | /* extra one for all 0 */ | 997 | /* extra one for all 0 */ |
999 | int num[MTRR_NUM_TYPES + 1]; | 998 | int num[MTRR_NUM_TYPES + 1]; |
999 | |||
1000 | /* | 1000 | /* |
1001 | * Make sure we only trim uncachable memory on machines that | 1001 | * Make sure we only trim uncachable memory on machines that |
1002 | * support the Intel MTRR architecture: | 1002 | * support the Intel MTRR architecture: |
1003 | */ | 1003 | */ |
1004 | if (!is_cpu(INTEL) || disable_mtrr_trim) | 1004 | if (!is_cpu(INTEL) || disable_mtrr_trim) |
1005 | return 0; | 1005 | return 0; |
1006 | |||
1006 | rdmsr(MSR_MTRRdefType, def, dummy); | 1007 | rdmsr(MSR_MTRRdefType, def, dummy); |
1007 | def &= 0xff; | 1008 | def &= 0xff; |
1008 | if (def != MTRR_TYPE_UNCACHABLE) | 1009 | if (def != MTRR_TYPE_UNCACHABLE) |
1009 | return 0; | 1010 | return 0; |
1010 | 1011 | ||
1011 | /* get it and store it aside */ | 1012 | /* Get it and store it aside: */ |
1012 | memset(range_state, 0, sizeof(range_state)); | 1013 | memset(range_state, 0, sizeof(range_state)); |
1013 | for (i = 0; i < num_var_ranges; i++) { | 1014 | for (i = 0; i < num_var_ranges; i++) { |
1014 | mtrr_if->get(i, &base, &size, &type); | 1015 | mtrr_if->get(i, &base, &size, &type); |
@@ -1017,7 +1018,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
1017 | range_state[i].type = type; | 1018 | range_state[i].type = type; |
1018 | } | 1019 | } |
1019 | 1020 | ||
1020 | /* Find highest cached pfn */ | 1021 | /* Find highest cached pfn: */ |
1021 | for (i = 0; i < num_var_ranges; i++) { | 1022 | for (i = 0; i < num_var_ranges; i++) { |
1022 | type = range_state[i].type; | 1023 | type = range_state[i].type; |
1023 | if (type != MTRR_TYPE_WRBACK) | 1024 | if (type != MTRR_TYPE_WRBACK) |
@@ -1028,13 +1029,13 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
1028 | highest_pfn = base + size; | 1029 | highest_pfn = base + size; |
1029 | } | 1030 | } |
1030 | 1031 | ||
1031 | /* kvm/qemu doesn't have mtrr set right, don't trim them all */ | 1032 | /* kvm/qemu doesn't have mtrr set right, don't trim them all: */ |
1032 | if (!highest_pfn) { | 1033 | if (!highest_pfn) { |
1033 | printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n"); | 1034 | printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n"); |
1034 | return 0; | 1035 | return 0; |
1035 | } | 1036 | } |
1036 | 1037 | ||
1037 | /* check entries number */ | 1038 | /* Check entries number: */ |
1038 | memset(num, 0, sizeof(num)); | 1039 | memset(num, 0, sizeof(num)); |
1039 | for (i = 0; i < num_var_ranges; i++) { | 1040 | for (i = 0; i < num_var_ranges; i++) { |
1040 | type = range_state[i].type; | 1041 | type = range_state[i].type; |
@@ -1046,11 +1047,11 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
1046 | num[type]++; | 1047 | num[type]++; |
1047 | } | 1048 | } |
1048 | 1049 | ||
1049 | /* no entry for WB? */ | 1050 | /* No entry for WB? */ |
1050 | if (!num[MTRR_TYPE_WRBACK]) | 1051 | if (!num[MTRR_TYPE_WRBACK]) |
1051 | return 0; | 1052 | return 0; |
1052 | 1053 | ||
1053 | /* check if we only had WB and UC */ | 1054 | /* Check if we only had WB and UC: */ |
1054 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != | 1055 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != |
1055 | num_var_ranges - num[MTRR_NUM_TYPES]) | 1056 | num_var_ranges - num[MTRR_NUM_TYPES]) |
1056 | return 0; | 1057 | return 0; |
@@ -1066,31 +1067,31 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
1066 | } | 1067 | } |
1067 | nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0); | 1068 | nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0); |
1068 | 1069 | ||
1070 | /* Check the head: */ | ||
1069 | total_trim_size = 0; | 1071 | total_trim_size = 0; |
1070 | /* check the head */ | ||
1071 | if (range[0].start) | 1072 | if (range[0].start) |
1072 | total_trim_size += real_trim_memory(0, range[0].start); | 1073 | total_trim_size += real_trim_memory(0, range[0].start); |
1073 | /* check the holes */ | 1074 | |
1075 | /* Check the holes: */ | ||
1074 | for (i = 0; i < nr_range - 1; i++) { | 1076 | for (i = 0; i < nr_range - 1; i++) { |
1075 | if (range[i].end + 1 < range[i+1].start) | 1077 | if (range[i].end + 1 < range[i+1].start) |
1076 | total_trim_size += real_trim_memory(range[i].end + 1, | 1078 | total_trim_size += real_trim_memory(range[i].end + 1, |
1077 | range[i+1].start); | 1079 | range[i+1].start); |
1078 | } | 1080 | } |
1079 | /* check the top */ | 1081 | |
1082 | /* Check the top: */ | ||
1080 | i = nr_range - 1; | 1083 | i = nr_range - 1; |
1081 | if (range[i].end + 1 < end_pfn) | 1084 | if (range[i].end + 1 < end_pfn) |
1082 | total_trim_size += real_trim_memory(range[i].end + 1, | 1085 | total_trim_size += real_trim_memory(range[i].end + 1, |
1083 | end_pfn); | 1086 | end_pfn); |
1084 | 1087 | ||
1085 | if (total_trim_size) { | 1088 | if (total_trim_size) { |
1086 | printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover" | 1089 | pr_warning("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n", total_trim_size >> 20); |
1087 | " all of memory, losing %lluMB of RAM.\n", | ||
1088 | total_trim_size >> 20); | ||
1089 | 1090 | ||
1090 | if (!changed_by_mtrr_cleanup) | 1091 | if (!changed_by_mtrr_cleanup) |
1091 | WARN_ON(1); | 1092 | WARN_ON(1); |
1092 | 1093 | ||
1093 | printk(KERN_INFO "update e820 for mtrr\n"); | 1094 | pr_info("update e820 for mtrr\n"); |
1094 | update_e820(); | 1095 | update_e820(); |
1095 | 1096 | ||
1096 | return 1; | 1097 | return 1; |
@@ -1098,4 +1099,3 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
1098 | 1099 | ||
1099 | return 0; | 1100 | return 0; |
1100 | } | 1101 | } |
1101 | |||
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c index ff14c320040c..228d982ce09c 100644 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c | |||
@@ -1,38 +1,40 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <linux/io.h> | ||
2 | #include <linux/mm.h> | 3 | #include <linux/mm.h> |
3 | #include <asm/mtrr.h> | 4 | |
4 | #include <asm/msr.h> | ||
5 | #include <asm/io.h> | ||
6 | #include <asm/processor-cyrix.h> | 5 | #include <asm/processor-cyrix.h> |
7 | #include <asm/processor-flags.h> | 6 | #include <asm/processor-flags.h> |
7 | #include <asm/mtrr.h> | ||
8 | #include <asm/msr.h> | ||
9 | |||
8 | #include "mtrr.h" | 10 | #include "mtrr.h" |
9 | 11 | ||
10 | static void | 12 | static void |
11 | cyrix_get_arr(unsigned int reg, unsigned long *base, | 13 | cyrix_get_arr(unsigned int reg, unsigned long *base, |
12 | unsigned long *size, mtrr_type * type) | 14 | unsigned long *size, mtrr_type * type) |
13 | { | 15 | { |
14 | unsigned long flags; | ||
15 | unsigned char arr, ccr3, rcr, shift; | 16 | unsigned char arr, ccr3, rcr, shift; |
17 | unsigned long flags; | ||
16 | 18 | ||
17 | arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ | 19 | arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ |
18 | 20 | ||
19 | /* Save flags and disable interrupts */ | ||
20 | local_irq_save(flags); | 21 | local_irq_save(flags); |
21 | 22 | ||
22 | ccr3 = getCx86(CX86_CCR3); | 23 | ccr3 = getCx86(CX86_CCR3); |
23 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 24 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
24 | ((unsigned char *) base)[3] = getCx86(arr); | 25 | ((unsigned char *)base)[3] = getCx86(arr); |
25 | ((unsigned char *) base)[2] = getCx86(arr + 1); | 26 | ((unsigned char *)base)[2] = getCx86(arr + 1); |
26 | ((unsigned char *) base)[1] = getCx86(arr + 2); | 27 | ((unsigned char *)base)[1] = getCx86(arr + 2); |
27 | rcr = getCx86(CX86_RCR_BASE + reg); | 28 | rcr = getCx86(CX86_RCR_BASE + reg); |
28 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 29 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ |
29 | 30 | ||
30 | /* Enable interrupts if it was enabled previously */ | ||
31 | local_irq_restore(flags); | 31 | local_irq_restore(flags); |
32 | |||
32 | shift = ((unsigned char *) base)[1] & 0x0f; | 33 | shift = ((unsigned char *) base)[1] & 0x0f; |
33 | *base >>= PAGE_SHIFT; | 34 | *base >>= PAGE_SHIFT; |
34 | 35 | ||
35 | /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7 | 36 | /* |
37 | * Power of two, at least 4K on ARR0-ARR6, 256K on ARR7 | ||
36 | * Note: shift==0xf means 4G, this is unsupported. | 38 | * Note: shift==0xf means 4G, this is unsupported. |
37 | */ | 39 | */ |
38 | if (shift) | 40 | if (shift) |
@@ -76,17 +78,20 @@ cyrix_get_arr(unsigned int reg, unsigned long *base, | |||
76 | } | 78 | } |
77 | } | 79 | } |
78 | 80 | ||
81 | /* | ||
82 | * cyrix_get_free_region - get a free ARR. | ||
83 | * | ||
84 | * @base: the starting (base) address of the region. | ||
85 | * @size: the size (in bytes) of the region. | ||
86 | * | ||
87 | * Returns: the index of the region on success, else -1 on error. | ||
88 | */ | ||
79 | static int | 89 | static int |
80 | cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) | 90 | cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) |
81 | /* [SUMMARY] Get a free ARR. | ||
82 | <base> The starting (base) address of the region. | ||
83 | <size> The size (in bytes) of the region. | ||
84 | [RETURNS] The index of the region on success, else -1 on error. | ||
85 | */ | ||
86 | { | 91 | { |
87 | int i; | ||
88 | mtrr_type ltype; | ||
89 | unsigned long lbase, lsize; | 92 | unsigned long lbase, lsize; |
93 | mtrr_type ltype; | ||
94 | int i; | ||
90 | 95 | ||
91 | switch (replace_reg) { | 96 | switch (replace_reg) { |
92 | case 7: | 97 | case 7: |
@@ -107,14 +112,17 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) | |||
107 | cyrix_get_arr(7, &lbase, &lsize, <ype); | 112 | cyrix_get_arr(7, &lbase, &lsize, <ype); |
108 | if (lsize == 0) | 113 | if (lsize == 0) |
109 | return 7; | 114 | return 7; |
110 | /* Else try ARR0-ARR6 first */ | 115 | /* Else try ARR0-ARR6 first */ |
111 | } else { | 116 | } else { |
112 | for (i = 0; i < 7; i++) { | 117 | for (i = 0; i < 7; i++) { |
113 | cyrix_get_arr(i, &lbase, &lsize, <ype); | 118 | cyrix_get_arr(i, &lbase, &lsize, <ype); |
114 | if (lsize == 0) | 119 | if (lsize == 0) |
115 | return i; | 120 | return i; |
116 | } | 121 | } |
117 | /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */ | 122 | /* |
123 | * ARR0-ARR6 isn't free | ||
124 | * try ARR7 but its size must be at least 256K | ||
125 | */ | ||
118 | cyrix_get_arr(i, &lbase, &lsize, <ype); | 126 | cyrix_get_arr(i, &lbase, &lsize, <ype); |
119 | if ((lsize == 0) && (size >= 0x40)) | 127 | if ((lsize == 0) && (size >= 0x40)) |
120 | return i; | 128 | return i; |
@@ -122,21 +130,22 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) | |||
122 | return -ENOSPC; | 130 | return -ENOSPC; |
123 | } | 131 | } |
124 | 132 | ||
125 | static u32 cr4 = 0; | 133 | static u32 cr4, ccr3; |
126 | static u32 ccr3; | ||
127 | 134 | ||
128 | static void prepare_set(void) | 135 | static void prepare_set(void) |
129 | { | 136 | { |
130 | u32 cr0; | 137 | u32 cr0; |
131 | 138 | ||
132 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ | 139 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ |
133 | if ( cpu_has_pge ) { | 140 | if (cpu_has_pge) { |
134 | cr4 = read_cr4(); | 141 | cr4 = read_cr4(); |
135 | write_cr4(cr4 & ~X86_CR4_PGE); | 142 | write_cr4(cr4 & ~X86_CR4_PGE); |
136 | } | 143 | } |
137 | 144 | ||
138 | /* Disable and flush caches. Note that wbinvd flushes the TLBs as | 145 | /* |
139 | a side-effect */ | 146 | * Disable and flush caches. |
147 | * Note that wbinvd flushes the TLBs as a side-effect | ||
148 | */ | ||
140 | cr0 = read_cr0() | X86_CR0_CD; | 149 | cr0 = read_cr0() | X86_CR0_CD; |
141 | wbinvd(); | 150 | wbinvd(); |
142 | write_cr0(cr0); | 151 | write_cr0(cr0); |
@@ -147,22 +156,21 @@ static void prepare_set(void) | |||
147 | 156 | ||
148 | /* Cyrix ARRs - everything else was excluded at the top */ | 157 | /* Cyrix ARRs - everything else was excluded at the top */ |
149 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); | 158 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); |
150 | |||
151 | } | 159 | } |
152 | 160 | ||
153 | static void post_set(void) | 161 | static void post_set(void) |
154 | { | 162 | { |
155 | /* Flush caches and TLBs */ | 163 | /* Flush caches and TLBs */ |
156 | wbinvd(); | 164 | wbinvd(); |
157 | 165 | ||
158 | /* Cyrix ARRs - everything else was excluded at the top */ | 166 | /* Cyrix ARRs - everything else was excluded at the top */ |
159 | setCx86(CX86_CCR3, ccr3); | 167 | setCx86(CX86_CCR3, ccr3); |
160 | 168 | ||
161 | /* Enable caches */ | 169 | /* Enable caches */ |
162 | write_cr0(read_cr0() & 0xbfffffff); | 170 | write_cr0(read_cr0() & 0xbfffffff); |
163 | 171 | ||
164 | /* Restore value of CR4 */ | 172 | /* Restore value of CR4 */ |
165 | if ( cpu_has_pge ) | 173 | if (cpu_has_pge) |
166 | write_cr4(cr4); | 174 | write_cr4(cr4); |
167 | } | 175 | } |
168 | 176 | ||
@@ -178,7 +186,8 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base, | |||
178 | size >>= 6; | 186 | size >>= 6; |
179 | 187 | ||
180 | size &= 0x7fff; /* make sure arr_size <= 14 */ | 188 | size &= 0x7fff; /* make sure arr_size <= 14 */ |
181 | for (arr_size = 0; size; arr_size++, size >>= 1) ; | 189 | for (arr_size = 0; size; arr_size++, size >>= 1) |
190 | ; | ||
182 | 191 | ||
183 | if (reg < 7) { | 192 | if (reg < 7) { |
184 | switch (type) { | 193 | switch (type) { |
@@ -215,18 +224,18 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base, | |||
215 | prepare_set(); | 224 | prepare_set(); |
216 | 225 | ||
217 | base <<= PAGE_SHIFT; | 226 | base <<= PAGE_SHIFT; |
218 | setCx86(arr, ((unsigned char *) &base)[3]); | 227 | setCx86(arr + 0, ((unsigned char *)&base)[3]); |
219 | setCx86(arr + 1, ((unsigned char *) &base)[2]); | 228 | setCx86(arr + 1, ((unsigned char *)&base)[2]); |
220 | setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size); | 229 | setCx86(arr + 2, (((unsigned char *)&base)[1]) | arr_size); |
221 | setCx86(CX86_RCR_BASE + reg, arr_type); | 230 | setCx86(CX86_RCR_BASE + reg, arr_type); |
222 | 231 | ||
223 | post_set(); | 232 | post_set(); |
224 | } | 233 | } |
225 | 234 | ||
226 | typedef struct { | 235 | typedef struct { |
227 | unsigned long base; | 236 | unsigned long base; |
228 | unsigned long size; | 237 | unsigned long size; |
229 | mtrr_type type; | 238 | mtrr_type type; |
230 | } arr_state_t; | 239 | } arr_state_t; |
231 | 240 | ||
232 | static arr_state_t arr_state[8] = { | 241 | static arr_state_t arr_state[8] = { |
@@ -247,16 +256,17 @@ static void cyrix_set_all(void) | |||
247 | setCx86(CX86_CCR0 + i, ccr_state[i]); | 256 | setCx86(CX86_CCR0 + i, ccr_state[i]); |
248 | for (; i < 7; i++) | 257 | for (; i < 7; i++) |
249 | setCx86(CX86_CCR4 + i, ccr_state[i]); | 258 | setCx86(CX86_CCR4 + i, ccr_state[i]); |
250 | for (i = 0; i < 8; i++) | 259 | |
251 | cyrix_set_arr(i, arr_state[i].base, | 260 | for (i = 0; i < 8; i++) { |
261 | cyrix_set_arr(i, arr_state[i].base, | ||
252 | arr_state[i].size, arr_state[i].type); | 262 | arr_state[i].size, arr_state[i].type); |
263 | } | ||
253 | 264 | ||
254 | post_set(); | 265 | post_set(); |
255 | } | 266 | } |
256 | 267 | ||
257 | static struct mtrr_ops cyrix_mtrr_ops = { | 268 | static struct mtrr_ops cyrix_mtrr_ops = { |
258 | .vendor = X86_VENDOR_CYRIX, | 269 | .vendor = X86_VENDOR_CYRIX, |
259 | // .init = cyrix_arr_init, | ||
260 | .set_all = cyrix_set_all, | 270 | .set_all = cyrix_set_all, |
261 | .set = cyrix_set_arr, | 271 | .set = cyrix_set_arr, |
262 | .get = cyrix_get_arr, | 272 | .get = cyrix_get_arr, |
@@ -270,5 +280,3 @@ int __init cyrix_init_mtrr(void) | |||
270 | set_mtrr_ops(&cyrix_mtrr_ops); | 280 | set_mtrr_ops(&cyrix_mtrr_ops); |
271 | return 0; | 281 | return 0; |
272 | } | 282 | } |
273 | |||
274 | //arch_initcall(cyrix_init_mtrr); | ||
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 0543f69f0b27..55da0c5f68dd 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -1,28 +1,34 @@ | |||
1 | /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong | 1 | /* |
2 | because MTRRs can span upto 40 bits (36bits on most modern x86) */ | 2 | * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong |
3 | * because MTRRs can span upto 40 bits (36bits on most modern x86) | ||
4 | */ | ||
5 | #define DEBUG | ||
6 | |||
7 | #include <linux/module.h> | ||
3 | #include <linux/init.h> | 8 | #include <linux/init.h> |
4 | #include <linux/slab.h> | 9 | #include <linux/slab.h> |
10 | #include <linux/io.h> | ||
5 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
6 | #include <linux/module.h> | 12 | |
7 | #include <asm/io.h> | ||
8 | #include <asm/mtrr.h> | ||
9 | #include <asm/msr.h> | ||
10 | #include <asm/system.h> | ||
11 | #include <asm/cpufeature.h> | ||
12 | #include <asm/processor-flags.h> | 13 | #include <asm/processor-flags.h> |
14 | #include <asm/cpufeature.h> | ||
13 | #include <asm/tlbflush.h> | 15 | #include <asm/tlbflush.h> |
16 | #include <asm/system.h> | ||
17 | #include <asm/mtrr.h> | ||
18 | #include <asm/msr.h> | ||
14 | #include <asm/pat.h> | 19 | #include <asm/pat.h> |
20 | |||
15 | #include "mtrr.h" | 21 | #include "mtrr.h" |
16 | 22 | ||
17 | struct fixed_range_block { | 23 | struct fixed_range_block { |
18 | int base_msr; /* start address of an MTRR block */ | 24 | int base_msr; /* start address of an MTRR block */ |
19 | int ranges; /* number of MTRRs in this block */ | 25 | int ranges; /* number of MTRRs in this block */ |
20 | }; | 26 | }; |
21 | 27 | ||
22 | static struct fixed_range_block fixed_range_blocks[] = { | 28 | static struct fixed_range_block fixed_range_blocks[] = { |
23 | { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */ | 29 | { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */ |
24 | { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */ | 30 | { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */ |
25 | { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */ | 31 | { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */ |
26 | {} | 32 | {} |
27 | }; | 33 | }; |
28 | 34 | ||
@@ -30,10 +36,10 @@ static unsigned long smp_changes_mask; | |||
30 | static int mtrr_state_set; | 36 | static int mtrr_state_set; |
31 | u64 mtrr_tom2; | 37 | u64 mtrr_tom2; |
32 | 38 | ||
33 | struct mtrr_state_type mtrr_state = {}; | 39 | struct mtrr_state_type mtrr_state; |
34 | EXPORT_SYMBOL_GPL(mtrr_state); | 40 | EXPORT_SYMBOL_GPL(mtrr_state); |
35 | 41 | ||
36 | /** | 42 | /* |
37 | * BIOS is expected to clear MtrrFixDramModEn bit, see for example | 43 | * BIOS is expected to clear MtrrFixDramModEn bit, see for example |
38 | * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD | 44 | * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD |
39 | * Opteron Processors" (26094 Rev. 3.30 February 2006), section | 45 | * Opteron Processors" (26094 Rev. 3.30 February 2006), section |
@@ -104,9 +110,8 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
104 | * Look of multiple ranges matching this address and pick type | 110 | * Look of multiple ranges matching this address and pick type |
105 | * as per MTRR precedence | 111 | * as per MTRR precedence |
106 | */ | 112 | */ |
107 | if (!(mtrr_state.enabled & 2)) { | 113 | if (!(mtrr_state.enabled & 2)) |
108 | return mtrr_state.def_type; | 114 | return mtrr_state.def_type; |
109 | } | ||
110 | 115 | ||
111 | prev_match = 0xFF; | 116 | prev_match = 0xFF; |
112 | for (i = 0; i < num_var_ranges; ++i) { | 117 | for (i = 0; i < num_var_ranges; ++i) { |
@@ -125,9 +130,8 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
125 | if (start_state != end_state) | 130 | if (start_state != end_state) |
126 | return 0xFE; | 131 | return 0xFE; |
127 | 132 | ||
128 | if ((start & mask) != (base & mask)) { | 133 | if ((start & mask) != (base & mask)) |
129 | continue; | 134 | continue; |
130 | } | ||
131 | 135 | ||
132 | curr_match = mtrr_state.var_ranges[i].base_lo & 0xff; | 136 | curr_match = mtrr_state.var_ranges[i].base_lo & 0xff; |
133 | if (prev_match == 0xFF) { | 137 | if (prev_match == 0xFF) { |
@@ -148,9 +152,8 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
148 | curr_match = MTRR_TYPE_WRTHROUGH; | 152 | curr_match = MTRR_TYPE_WRTHROUGH; |
149 | } | 153 | } |
150 | 154 | ||
151 | if (prev_match != curr_match) { | 155 | if (prev_match != curr_match) |
152 | return MTRR_TYPE_UNCACHABLE; | 156 | return MTRR_TYPE_UNCACHABLE; |
153 | } | ||
154 | } | 157 | } |
155 | 158 | ||
156 | if (mtrr_tom2) { | 159 | if (mtrr_tom2) { |
@@ -164,7 +167,7 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
164 | return mtrr_state.def_type; | 167 | return mtrr_state.def_type; |
165 | } | 168 | } |
166 | 169 | ||
167 | /* Get the MSR pair relating to a var range */ | 170 | /* Get the MSR pair relating to a var range */ |
168 | static void | 171 | static void |
169 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) | 172 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) |
170 | { | 173 | { |
@@ -172,7 +175,7 @@ get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) | |||
172 | rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); | 175 | rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); |
173 | } | 176 | } |
174 | 177 | ||
175 | /* fill the MSR pair relating to a var range */ | 178 | /* Fill the MSR pair relating to a var range */ |
176 | void fill_mtrr_var_range(unsigned int index, | 179 | void fill_mtrr_var_range(unsigned int index, |
177 | u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi) | 180 | u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi) |
178 | { | 181 | { |
@@ -186,10 +189,9 @@ void fill_mtrr_var_range(unsigned int index, | |||
186 | vr[index].mask_hi = mask_hi; | 189 | vr[index].mask_hi = mask_hi; |
187 | } | 190 | } |
188 | 191 | ||
189 | static void | 192 | static void get_fixed_ranges(mtrr_type *frs) |
190 | get_fixed_ranges(mtrr_type * frs) | ||
191 | { | 193 | { |
192 | unsigned int *p = (unsigned int *) frs; | 194 | unsigned int *p = (unsigned int *)frs; |
193 | int i; | 195 | int i; |
194 | 196 | ||
195 | k8_check_syscfg_dram_mod_en(); | 197 | k8_check_syscfg_dram_mod_en(); |
@@ -217,22 +219,22 @@ static void __init print_fixed_last(void) | |||
217 | if (!last_fixed_end) | 219 | if (!last_fixed_end) |
218 | return; | 220 | return; |
219 | 221 | ||
220 | printk(KERN_DEBUG " %05X-%05X %s\n", last_fixed_start, | 222 | pr_debug(" %05X-%05X %s\n", last_fixed_start, |
221 | last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type)); | 223 | last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type)); |
222 | 224 | ||
223 | last_fixed_end = 0; | 225 | last_fixed_end = 0; |
224 | } | 226 | } |
225 | 227 | ||
226 | static void __init update_fixed_last(unsigned base, unsigned end, | 228 | static void __init update_fixed_last(unsigned base, unsigned end, |
227 | mtrr_type type) | 229 | mtrr_type type) |
228 | { | 230 | { |
229 | last_fixed_start = base; | 231 | last_fixed_start = base; |
230 | last_fixed_end = end; | 232 | last_fixed_end = end; |
231 | last_fixed_type = type; | 233 | last_fixed_type = type; |
232 | } | 234 | } |
233 | 235 | ||
234 | static void __init print_fixed(unsigned base, unsigned step, | 236 | static void __init |
235 | const mtrr_type *types) | 237 | print_fixed(unsigned base, unsigned step, const mtrr_type *types) |
236 | { | 238 | { |
237 | unsigned i; | 239 | unsigned i; |
238 | 240 | ||
@@ -259,54 +261,55 @@ static void __init print_mtrr_state(void) | |||
259 | unsigned int i; | 261 | unsigned int i; |
260 | int high_width; | 262 | int high_width; |
261 | 263 | ||
262 | printk(KERN_DEBUG "MTRR default type: %s\n", | 264 | pr_debug("MTRR default type: %s\n", |
263 | mtrr_attrib_to_str(mtrr_state.def_type)); | 265 | mtrr_attrib_to_str(mtrr_state.def_type)); |
264 | if (mtrr_state.have_fixed) { | 266 | if (mtrr_state.have_fixed) { |
265 | printk(KERN_DEBUG "MTRR fixed ranges %sabled:\n", | 267 | pr_debug("MTRR fixed ranges %sabled:\n", |
266 | mtrr_state.enabled & 1 ? "en" : "dis"); | 268 | mtrr_state.enabled & 1 ? "en" : "dis"); |
267 | print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); | 269 | print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); |
268 | for (i = 0; i < 2; ++i) | 270 | for (i = 0; i < 2; ++i) |
269 | print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8); | 271 | print_fixed(0x80000 + i * 0x20000, 0x04000, |
272 | mtrr_state.fixed_ranges + (i + 1) * 8); | ||
270 | for (i = 0; i < 8; ++i) | 273 | for (i = 0; i < 8; ++i) |
271 | print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8); | 274 | print_fixed(0xC0000 + i * 0x08000, 0x01000, |
275 | mtrr_state.fixed_ranges + (i + 3) * 8); | ||
272 | 276 | ||
273 | /* tail */ | 277 | /* tail */ |
274 | print_fixed_last(); | 278 | print_fixed_last(); |
275 | } | 279 | } |
276 | printk(KERN_DEBUG "MTRR variable ranges %sabled:\n", | 280 | pr_debug("MTRR variable ranges %sabled:\n", |
277 | mtrr_state.enabled & 2 ? "en" : "dis"); | 281 | mtrr_state.enabled & 2 ? "en" : "dis"); |
278 | if (size_or_mask & 0xffffffffUL) | 282 | if (size_or_mask & 0xffffffffUL) |
279 | high_width = ffs(size_or_mask & 0xffffffffUL) - 1; | 283 | high_width = ffs(size_or_mask & 0xffffffffUL) - 1; |
280 | else | 284 | else |
281 | high_width = ffs(size_or_mask>>32) + 32 - 1; | 285 | high_width = ffs(size_or_mask>>32) + 32 - 1; |
282 | high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4; | 286 | high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4; |
287 | |||
283 | for (i = 0; i < num_var_ranges; ++i) { | 288 | for (i = 0; i < num_var_ranges; ++i) { |
284 | if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) | 289 | if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) |
285 | printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n", | 290 | pr_debug(" %u base %0*X%05X000 mask %0*X%05X000 %s\n", |
286 | i, | 291 | i, |
287 | high_width, | 292 | high_width, |
288 | mtrr_state.var_ranges[i].base_hi, | 293 | mtrr_state.var_ranges[i].base_hi, |
289 | mtrr_state.var_ranges[i].base_lo >> 12, | 294 | mtrr_state.var_ranges[i].base_lo >> 12, |
290 | high_width, | 295 | high_width, |
291 | mtrr_state.var_ranges[i].mask_hi, | 296 | mtrr_state.var_ranges[i].mask_hi, |
292 | mtrr_state.var_ranges[i].mask_lo >> 12, | 297 | mtrr_state.var_ranges[i].mask_lo >> 12, |
293 | mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff)); | 298 | mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff)); |
294 | else | 299 | else |
295 | printk(KERN_DEBUG " %u disabled\n", i); | 300 | pr_debug(" %u disabled\n", i); |
296 | } | ||
297 | if (mtrr_tom2) { | ||
298 | printk(KERN_DEBUG "TOM2: %016llx aka %lldM\n", | ||
299 | mtrr_tom2, mtrr_tom2>>20); | ||
300 | } | 301 | } |
302 | if (mtrr_tom2) | ||
303 | pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20); | ||
301 | } | 304 | } |
302 | 305 | ||
303 | /* Grab all of the MTRR state for this CPU into *state */ | 306 | /* Grab all of the MTRR state for this CPU into *state */ |
304 | void __init get_mtrr_state(void) | 307 | void __init get_mtrr_state(void) |
305 | { | 308 | { |
306 | unsigned int i; | ||
307 | struct mtrr_var_range *vrs; | 309 | struct mtrr_var_range *vrs; |
308 | unsigned lo, dummy; | ||
309 | unsigned long flags; | 310 | unsigned long flags; |
311 | unsigned lo, dummy; | ||
312 | unsigned int i; | ||
310 | 313 | ||
311 | vrs = mtrr_state.var_ranges; | 314 | vrs = mtrr_state.var_ranges; |
312 | 315 | ||
@@ -324,6 +327,7 @@ void __init get_mtrr_state(void) | |||
324 | 327 | ||
325 | if (amd_special_default_mtrr()) { | 328 | if (amd_special_default_mtrr()) { |
326 | unsigned low, high; | 329 | unsigned low, high; |
330 | |||
327 | /* TOP_MEM2 */ | 331 | /* TOP_MEM2 */ |
328 | rdmsr(MSR_K8_TOP_MEM2, low, high); | 332 | rdmsr(MSR_K8_TOP_MEM2, low, high); |
329 | mtrr_tom2 = high; | 333 | mtrr_tom2 = high; |
@@ -344,10 +348,9 @@ void __init get_mtrr_state(void) | |||
344 | 348 | ||
345 | post_set(); | 349 | post_set(); |
346 | local_irq_restore(flags); | 350 | local_irq_restore(flags); |
347 | |||
348 | } | 351 | } |
349 | 352 | ||
350 | /* Some BIOS's are fucked and don't set all MTRRs the same! */ | 353 | /* Some BIOS's are messed up and don't set all MTRRs the same! */ |
351 | void __init mtrr_state_warn(void) | 354 | void __init mtrr_state_warn(void) |
352 | { | 355 | { |
353 | unsigned long mask = smp_changes_mask; | 356 | unsigned long mask = smp_changes_mask; |
@@ -355,28 +358,33 @@ void __init mtrr_state_warn(void) | |||
355 | if (!mask) | 358 | if (!mask) |
356 | return; | 359 | return; |
357 | if (mask & MTRR_CHANGE_MASK_FIXED) | 360 | if (mask & MTRR_CHANGE_MASK_FIXED) |
358 | printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n"); | 361 | pr_warning("mtrr: your CPUs had inconsistent fixed MTRR settings\n"); |
359 | if (mask & MTRR_CHANGE_MASK_VARIABLE) | 362 | if (mask & MTRR_CHANGE_MASK_VARIABLE) |
360 | printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n"); | 363 | pr_warning("mtrr: your CPUs had inconsistent variable MTRR settings\n"); |
361 | if (mask & MTRR_CHANGE_MASK_DEFTYPE) | 364 | if (mask & MTRR_CHANGE_MASK_DEFTYPE) |
362 | printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n"); | 365 | pr_warning("mtrr: your CPUs had inconsistent MTRRdefType settings\n"); |
366 | |||
363 | printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); | 367 | printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); |
364 | printk(KERN_INFO "mtrr: corrected configuration.\n"); | 368 | printk(KERN_INFO "mtrr: corrected configuration.\n"); |
365 | } | 369 | } |
366 | 370 | ||
367 | /* Doesn't attempt to pass an error out to MTRR users | 371 | /* |
368 | because it's quite complicated in some cases and probably not | 372 | * Doesn't attempt to pass an error out to MTRR users |
369 | worth it because the best error handling is to ignore it. */ | 373 | * because it's quite complicated in some cases and probably not |
374 | * worth it because the best error handling is to ignore it. | ||
375 | */ | ||
370 | void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) | 376 | void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) |
371 | { | 377 | { |
372 | if (wrmsr_safe(msr, a, b) < 0) | 378 | if (wrmsr_safe(msr, a, b) < 0) { |
373 | printk(KERN_ERR | 379 | printk(KERN_ERR |
374 | "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", | 380 | "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", |
375 | smp_processor_id(), msr, a, b); | 381 | smp_processor_id(), msr, a, b); |
382 | } | ||
376 | } | 383 | } |
377 | 384 | ||
378 | /** | 385 | /** |
379 | * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have | 386 | * set_fixed_range - checks & updates a fixed-range MTRR if it |
387 | * differs from the value it should have | ||
380 | * @msr: MSR address of the MTTR which should be checked and updated | 388 | * @msr: MSR address of the MTTR which should be checked and updated |
381 | * @changed: pointer which indicates whether the MTRR needed to be changed | 389 | * @changed: pointer which indicates whether the MTRR needed to be changed |
382 | * @msrwords: pointer to the MSR values which the MSR should have | 390 | * @msrwords: pointer to the MSR values which the MSR should have |
@@ -401,20 +409,23 @@ static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords) | |||
401 | * | 409 | * |
402 | * Returns: The index of the region on success, else negative on error. | 410 | * Returns: The index of the region on success, else negative on error. |
403 | */ | 411 | */ |
404 | int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) | 412 | int |
413 | generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) | ||
405 | { | 414 | { |
406 | int i, max; | ||
407 | mtrr_type ltype; | ||
408 | unsigned long lbase, lsize; | 415 | unsigned long lbase, lsize; |
416 | mtrr_type ltype; | ||
417 | int i, max; | ||
409 | 418 | ||
410 | max = num_var_ranges; | 419 | max = num_var_ranges; |
411 | if (replace_reg >= 0 && replace_reg < max) | 420 | if (replace_reg >= 0 && replace_reg < max) |
412 | return replace_reg; | 421 | return replace_reg; |
422 | |||
413 | for (i = 0; i < max; ++i) { | 423 | for (i = 0; i < max; ++i) { |
414 | mtrr_if->get(i, &lbase, &lsize, <ype); | 424 | mtrr_if->get(i, &lbase, &lsize, <ype); |
415 | if (lsize == 0) | 425 | if (lsize == 0) |
416 | return i; | 426 | return i; |
417 | } | 427 | } |
428 | |||
418 | return -ENOSPC; | 429 | return -ENOSPC; |
419 | } | 430 | } |
420 | 431 | ||
@@ -434,7 +445,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, | |||
434 | rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); | 445 | rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); |
435 | 446 | ||
436 | if ((mask_lo & 0x800) == 0) { | 447 | if ((mask_lo & 0x800) == 0) { |
437 | /* Invalid (i.e. free) range */ | 448 | /* Invalid (i.e. free) range */ |
438 | *base = 0; | 449 | *base = 0; |
439 | *size = 0; | 450 | *size = 0; |
440 | *type = 0; | 451 | *type = 0; |
@@ -471,27 +482,31 @@ out_put_cpu: | |||
471 | } | 482 | } |
472 | 483 | ||
473 | /** | 484 | /** |
474 | * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set | 485 | * set_fixed_ranges - checks & updates the fixed-range MTRRs if they |
486 | * differ from the saved set | ||
475 | * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges() | 487 | * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges() |
476 | */ | 488 | */ |
477 | static int set_fixed_ranges(mtrr_type * frs) | 489 | static int set_fixed_ranges(mtrr_type *frs) |
478 | { | 490 | { |
479 | unsigned long long *saved = (unsigned long long *) frs; | 491 | unsigned long long *saved = (unsigned long long *)frs; |
480 | bool changed = false; | 492 | bool changed = false; |
481 | int block=-1, range; | 493 | int block = -1, range; |
482 | 494 | ||
483 | k8_check_syscfg_dram_mod_en(); | 495 | k8_check_syscfg_dram_mod_en(); |
484 | 496 | ||
485 | while (fixed_range_blocks[++block].ranges) | 497 | while (fixed_range_blocks[++block].ranges) { |
486 | for (range=0; range < fixed_range_blocks[block].ranges; range++) | 498 | for (range = 0; range < fixed_range_blocks[block].ranges; range++) |
487 | set_fixed_range(fixed_range_blocks[block].base_msr + range, | 499 | set_fixed_range(fixed_range_blocks[block].base_msr + range, |
488 | &changed, (unsigned int *) saved++); | 500 | &changed, (unsigned int *)saved++); |
501 | } | ||
489 | 502 | ||
490 | return changed; | 503 | return changed; |
491 | } | 504 | } |
492 | 505 | ||
493 | /* Set the MSR pair relating to a var range. Returns TRUE if | 506 | /* |
494 | changes are made */ | 507 | * Set the MSR pair relating to a var range. |
508 | * Returns true if changes are made. | ||
509 | */ | ||
495 | static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) | 510 | static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) |
496 | { | 511 | { |
497 | unsigned int lo, hi; | 512 | unsigned int lo, hi; |
@@ -501,6 +516,7 @@ static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) | |||
501 | if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) | 516 | if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) |
502 | || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != | 517 | || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != |
503 | (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { | 518 | (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { |
519 | |||
504 | mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); | 520 | mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); |
505 | changed = true; | 521 | changed = true; |
506 | } | 522 | } |
@@ -526,21 +542,26 @@ static u32 deftype_lo, deftype_hi; | |||
526 | */ | 542 | */ |
527 | static unsigned long set_mtrr_state(void) | 543 | static unsigned long set_mtrr_state(void) |
528 | { | 544 | { |
529 | unsigned int i; | ||
530 | unsigned long change_mask = 0; | 545 | unsigned long change_mask = 0; |
546 | unsigned int i; | ||
531 | 547 | ||
532 | for (i = 0; i < num_var_ranges; i++) | 548 | for (i = 0; i < num_var_ranges; i++) { |
533 | if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) | 549 | if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) |
534 | change_mask |= MTRR_CHANGE_MASK_VARIABLE; | 550 | change_mask |= MTRR_CHANGE_MASK_VARIABLE; |
551 | } | ||
535 | 552 | ||
536 | if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) | 553 | if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) |
537 | change_mask |= MTRR_CHANGE_MASK_FIXED; | 554 | change_mask |= MTRR_CHANGE_MASK_FIXED; |
538 | 555 | ||
539 | /* Set_mtrr_restore restores the old value of MTRRdefType, | 556 | /* |
540 | so to set it we fiddle with the saved value */ | 557 | * Set_mtrr_restore restores the old value of MTRRdefType, |
558 | * so to set it we fiddle with the saved value: | ||
559 | */ | ||
541 | if ((deftype_lo & 0xff) != mtrr_state.def_type | 560 | if ((deftype_lo & 0xff) != mtrr_state.def_type |
542 | || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { | 561 | || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { |
543 | deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10); | 562 | |
563 | deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | | ||
564 | (mtrr_state.enabled << 10); | ||
544 | change_mask |= MTRR_CHANGE_MASK_DEFTYPE; | 565 | change_mask |= MTRR_CHANGE_MASK_DEFTYPE; |
545 | } | 566 | } |
546 | 567 | ||
@@ -548,33 +569,36 @@ static unsigned long set_mtrr_state(void) | |||
548 | } | 569 | } |
549 | 570 | ||
550 | 571 | ||
551 | static unsigned long cr4 = 0; | 572 | static unsigned long cr4; |
552 | static DEFINE_SPINLOCK(set_atomicity_lock); | 573 | static DEFINE_SPINLOCK(set_atomicity_lock); |
553 | 574 | ||
554 | /* | 575 | /* |
555 | * Since we are disabling the cache don't allow any interrupts - they | 576 | * Since we are disabling the cache don't allow any interrupts, |
556 | * would run extremely slow and would only increase the pain. The caller must | 577 | * they would run extremely slow and would only increase the pain. |
557 | * ensure that local interrupts are disabled and are reenabled after post_set() | 578 | * |
558 | * has been called. | 579 | * The caller must ensure that local interrupts are disabled and |
580 | * are reenabled after post_set() has been called. | ||
559 | */ | 581 | */ |
560 | |||
561 | static void prepare_set(void) __acquires(set_atomicity_lock) | 582 | static void prepare_set(void) __acquires(set_atomicity_lock) |
562 | { | 583 | { |
563 | unsigned long cr0; | 584 | unsigned long cr0; |
564 | 585 | ||
565 | /* Note that this is not ideal, since the cache is only flushed/disabled | 586 | /* |
566 | for this CPU while the MTRRs are changed, but changing this requires | 587 | * Note that this is not ideal |
567 | more invasive changes to the way the kernel boots */ | 588 | * since the cache is only flushed/disabled for this CPU while the |
589 | * MTRRs are changed, but changing this requires more invasive | ||
590 | * changes to the way the kernel boots | ||
591 | */ | ||
568 | 592 | ||
569 | spin_lock(&set_atomicity_lock); | 593 | spin_lock(&set_atomicity_lock); |
570 | 594 | ||
571 | /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ | 595 | /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ |
572 | cr0 = read_cr0() | X86_CR0_CD; | 596 | cr0 = read_cr0() | X86_CR0_CD; |
573 | write_cr0(cr0); | 597 | write_cr0(cr0); |
574 | wbinvd(); | 598 | wbinvd(); |
575 | 599 | ||
576 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ | 600 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ |
577 | if ( cpu_has_pge ) { | 601 | if (cpu_has_pge) { |
578 | cr4 = read_cr4(); | 602 | cr4 = read_cr4(); |
579 | write_cr4(cr4 & ~X86_CR4_PGE); | 603 | write_cr4(cr4 & ~X86_CR4_PGE); |
580 | } | 604 | } |
@@ -582,26 +606,26 @@ static void prepare_set(void) __acquires(set_atomicity_lock) | |||
582 | /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ | 606 | /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ |
583 | __flush_tlb(); | 607 | __flush_tlb(); |
584 | 608 | ||
585 | /* Save MTRR state */ | 609 | /* Save MTRR state */ |
586 | rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); | 610 | rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); |
587 | 611 | ||
588 | /* Disable MTRRs, and set the default type to uncached */ | 612 | /* Disable MTRRs, and set the default type to uncached */ |
589 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); | 613 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); |
590 | } | 614 | } |
591 | 615 | ||
592 | static void post_set(void) __releases(set_atomicity_lock) | 616 | static void post_set(void) __releases(set_atomicity_lock) |
593 | { | 617 | { |
594 | /* Flush TLBs (no need to flush caches - they are disabled) */ | 618 | /* Flush TLBs (no need to flush caches - they are disabled) */ |
595 | __flush_tlb(); | 619 | __flush_tlb(); |
596 | 620 | ||
597 | /* Intel (P6) standard MTRRs */ | 621 | /* Intel (P6) standard MTRRs */ |
598 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); | 622 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); |
599 | 623 | ||
600 | /* Enable caches */ | 624 | /* Enable caches */ |
601 | write_cr0(read_cr0() & 0xbfffffff); | 625 | write_cr0(read_cr0() & 0xbfffffff); |
602 | 626 | ||
603 | /* Restore value of CR4 */ | 627 | /* Restore value of CR4 */ |
604 | if ( cpu_has_pge ) | 628 | if (cpu_has_pge) |
605 | write_cr4(cr4); | 629 | write_cr4(cr4); |
606 | spin_unlock(&set_atomicity_lock); | 630 | spin_unlock(&set_atomicity_lock); |
607 | } | 631 | } |
@@ -623,24 +647,27 @@ static void generic_set_all(void) | |||
623 | post_set(); | 647 | post_set(); |
624 | local_irq_restore(flags); | 648 | local_irq_restore(flags); |
625 | 649 | ||
626 | /* Use the atomic bitops to update the global mask */ | 650 | /* Use the atomic bitops to update the global mask */ |
627 | for (count = 0; count < sizeof mask * 8; ++count) { | 651 | for (count = 0; count < sizeof mask * 8; ++count) { |
628 | if (mask & 0x01) | 652 | if (mask & 0x01) |
629 | set_bit(count, &smp_changes_mask); | 653 | set_bit(count, &smp_changes_mask); |
630 | mask >>= 1; | 654 | mask >>= 1; |
631 | } | 655 | } |
632 | 656 | ||
633 | } | 657 | } |
634 | 658 | ||
659 | /** | ||
660 | * generic_set_mtrr - set variable MTRR register on the local CPU. | ||
661 | * | ||
662 | * @reg: The register to set. | ||
663 | * @base: The base address of the region. | ||
664 | * @size: The size of the region. If this is 0 the region is disabled. | ||
665 | * @type: The type of the region. | ||
666 | * | ||
667 | * Returns nothing. | ||
668 | */ | ||
635 | static void generic_set_mtrr(unsigned int reg, unsigned long base, | 669 | static void generic_set_mtrr(unsigned int reg, unsigned long base, |
636 | unsigned long size, mtrr_type type) | 670 | unsigned long size, mtrr_type type) |
637 | /* [SUMMARY] Set variable MTRR register on the local CPU. | ||
638 | <reg> The register to set. | ||
639 | <base> The base address of the region. | ||
640 | <size> The size of the region. If this is 0 the region is disabled. | ||
641 | <type> The type of the region. | ||
642 | [RETURNS] Nothing. | ||
643 | */ | ||
644 | { | 671 | { |
645 | unsigned long flags; | 672 | unsigned long flags; |
646 | struct mtrr_var_range *vr; | 673 | struct mtrr_var_range *vr; |
@@ -651,8 +678,10 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base, | |||
651 | prepare_set(); | 678 | prepare_set(); |
652 | 679 | ||
653 | if (size == 0) { | 680 | if (size == 0) { |
654 | /* The invalid bit is kept in the mask, so we simply clear the | 681 | /* |
655 | relevant mask register to disable a range. */ | 682 | * The invalid bit is kept in the mask, so we simply |
683 | * clear the relevant mask register to disable a range. | ||
684 | */ | ||
656 | mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); | 685 | mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); |
657 | memset(vr, 0, sizeof(struct mtrr_var_range)); | 686 | memset(vr, 0, sizeof(struct mtrr_var_range)); |
658 | } else { | 687 | } else { |
@@ -669,46 +698,50 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base, | |||
669 | local_irq_restore(flags); | 698 | local_irq_restore(flags); |
670 | } | 699 | } |
671 | 700 | ||
672 | int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type) | 701 | int generic_validate_add_page(unsigned long base, unsigned long size, |
702 | unsigned int type) | ||
673 | { | 703 | { |
674 | unsigned long lbase, last; | 704 | unsigned long lbase, last; |
675 | 705 | ||
676 | /* For Intel PPro stepping <= 7, must be 4 MiB aligned | 706 | /* |
677 | and not touch 0x70000000->0x7003FFFF */ | 707 | * For Intel PPro stepping <= 7 |
708 | * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF | ||
709 | */ | ||
678 | if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && | 710 | if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && |
679 | boot_cpu_data.x86_model == 1 && | 711 | boot_cpu_data.x86_model == 1 && |
680 | boot_cpu_data.x86_mask <= 7) { | 712 | boot_cpu_data.x86_mask <= 7) { |
681 | if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { | 713 | if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { |
682 | printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); | 714 | pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); |
683 | return -EINVAL; | 715 | return -EINVAL; |
684 | } | 716 | } |
685 | if (!(base + size < 0x70000 || base > 0x7003F) && | 717 | if (!(base + size < 0x70000 || base > 0x7003F) && |
686 | (type == MTRR_TYPE_WRCOMB | 718 | (type == MTRR_TYPE_WRCOMB |
687 | || type == MTRR_TYPE_WRBACK)) { | 719 | || type == MTRR_TYPE_WRBACK)) { |
688 | printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); | 720 | pr_warning("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); |
689 | return -EINVAL; | 721 | return -EINVAL; |
690 | } | 722 | } |
691 | } | 723 | } |
692 | 724 | ||
693 | /* Check upper bits of base and last are equal and lower bits are 0 | 725 | /* |
694 | for base and 1 for last */ | 726 | * Check upper bits of base and last are equal and lower bits are 0 |
727 | * for base and 1 for last | ||
728 | */ | ||
695 | last = base + size - 1; | 729 | last = base + size - 1; |
696 | for (lbase = base; !(lbase & 1) && (last & 1); | 730 | for (lbase = base; !(lbase & 1) && (last & 1); |
697 | lbase = lbase >> 1, last = last >> 1) ; | 731 | lbase = lbase >> 1, last = last >> 1) |
732 | ; | ||
698 | if (lbase != last) { | 733 | if (lbase != last) { |
699 | printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", | 734 | pr_warning("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size); |
700 | base, size); | ||
701 | return -EINVAL; | 735 | return -EINVAL; |
702 | } | 736 | } |
703 | return 0; | 737 | return 0; |
704 | } | 738 | } |
705 | 739 | ||
706 | |||
707 | static int generic_have_wrcomb(void) | 740 | static int generic_have_wrcomb(void) |
708 | { | 741 | { |
709 | unsigned long config, dummy; | 742 | unsigned long config, dummy; |
710 | rdmsr(MSR_MTRRcap, config, dummy); | 743 | rdmsr(MSR_MTRRcap, config, dummy); |
711 | return (config & (1 << 10)); | 744 | return config & (1 << 10); |
712 | } | 745 | } |
713 | 746 | ||
714 | int positive_have_wrcomb(void) | 747 | int positive_have_wrcomb(void) |
@@ -716,14 +749,15 @@ int positive_have_wrcomb(void) | |||
716 | return 1; | 749 | return 1; |
717 | } | 750 | } |
718 | 751 | ||
719 | /* generic structure... | 752 | /* |
753 | * Generic structure... | ||
720 | */ | 754 | */ |
721 | struct mtrr_ops generic_mtrr_ops = { | 755 | struct mtrr_ops generic_mtrr_ops = { |
722 | .use_intel_if = 1, | 756 | .use_intel_if = 1, |
723 | .set_all = generic_set_all, | 757 | .set_all = generic_set_all, |
724 | .get = generic_get_mtrr, | 758 | .get = generic_get_mtrr, |
725 | .get_free_region = generic_get_free_region, | 759 | .get_free_region = generic_get_free_region, |
726 | .set = generic_set_mtrr, | 760 | .set = generic_set_mtrr, |
727 | .validate_add_page = generic_validate_add_page, | 761 | .validate_add_page = generic_validate_add_page, |
728 | .have_wrcomb = generic_have_wrcomb, | 762 | .have_wrcomb = generic_have_wrcomb, |
729 | }; | 763 | }; |
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index fb73a52913a4..08b6ea4c62b4 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c | |||
@@ -1,27 +1,28 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/proc_fs.h> | ||
3 | #include <linux/capability.h> | 1 | #include <linux/capability.h> |
4 | #include <linux/ctype.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/seq_file.h> | 2 | #include <linux/seq_file.h> |
7 | #include <asm/uaccess.h> | 3 | #include <linux/uaccess.h> |
4 | #include <linux/proc_fs.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/ctype.h> | ||
7 | #include <linux/init.h> | ||
8 | 8 | ||
9 | #define LINE_SIZE 80 | 9 | #define LINE_SIZE 80 |
10 | 10 | ||
11 | #include <asm/mtrr.h> | 11 | #include <asm/mtrr.h> |
12 | |||
12 | #include "mtrr.h" | 13 | #include "mtrr.h" |
13 | 14 | ||
14 | #define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private) | 15 | #define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private) |
15 | 16 | ||
16 | static const char *const mtrr_strings[MTRR_NUM_TYPES] = | 17 | static const char *const mtrr_strings[MTRR_NUM_TYPES] = |
17 | { | 18 | { |
18 | "uncachable", /* 0 */ | 19 | "uncachable", /* 0 */ |
19 | "write-combining", /* 1 */ | 20 | "write-combining", /* 1 */ |
20 | "?", /* 2 */ | 21 | "?", /* 2 */ |
21 | "?", /* 3 */ | 22 | "?", /* 3 */ |
22 | "write-through", /* 4 */ | 23 | "write-through", /* 4 */ |
23 | "write-protect", /* 5 */ | 24 | "write-protect", /* 5 */ |
24 | "write-back", /* 6 */ | 25 | "write-back", /* 6 */ |
25 | }; | 26 | }; |
26 | 27 | ||
27 | const char *mtrr_attrib_to_str(int x) | 28 | const char *mtrr_attrib_to_str(int x) |
@@ -35,8 +36,8 @@ static int | |||
35 | mtrr_file_add(unsigned long base, unsigned long size, | 36 | mtrr_file_add(unsigned long base, unsigned long size, |
36 | unsigned int type, bool increment, struct file *file, int page) | 37 | unsigned int type, bool increment, struct file *file, int page) |
37 | { | 38 | { |
39 | unsigned int *fcount = FILE_FCOUNT(file); | ||
38 | int reg, max; | 40 | int reg, max; |
39 | unsigned int *fcount = FILE_FCOUNT(file); | ||
40 | 41 | ||
41 | max = num_var_ranges; | 42 | max = num_var_ranges; |
42 | if (fcount == NULL) { | 43 | if (fcount == NULL) { |
@@ -61,8 +62,8 @@ static int | |||
61 | mtrr_file_del(unsigned long base, unsigned long size, | 62 | mtrr_file_del(unsigned long base, unsigned long size, |
62 | struct file *file, int page) | 63 | struct file *file, int page) |
63 | { | 64 | { |
64 | int reg; | ||
65 | unsigned int *fcount = FILE_FCOUNT(file); | 65 | unsigned int *fcount = FILE_FCOUNT(file); |
66 | int reg; | ||
66 | 67 | ||
67 | if (!page) { | 68 | if (!page) { |
68 | if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) | 69 | if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) |
@@ -81,13 +82,14 @@ mtrr_file_del(unsigned long base, unsigned long size, | |||
81 | return reg; | 82 | return reg; |
82 | } | 83 | } |
83 | 84 | ||
84 | /* RED-PEN: seq_file can seek now. this is ignored. */ | 85 | /* |
86 | * seq_file can seek but we ignore it. | ||
87 | * | ||
88 | * Format of control line: | ||
89 | * "base=%Lx size=%Lx type=%s" or "disable=%d" | ||
90 | */ | ||
85 | static ssize_t | 91 | static ssize_t |
86 | mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) | 92 | mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) |
87 | /* Format of control line: | ||
88 | "base=%Lx size=%Lx type=%s" OR: | ||
89 | "disable=%d" | ||
90 | */ | ||
91 | { | 93 | { |
92 | int i, err; | 94 | int i, err; |
93 | unsigned long reg; | 95 | unsigned long reg; |
@@ -100,15 +102,18 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) | |||
100 | return -EPERM; | 102 | return -EPERM; |
101 | if (!len) | 103 | if (!len) |
102 | return -EINVAL; | 104 | return -EINVAL; |
105 | |||
103 | memset(line, 0, LINE_SIZE); | 106 | memset(line, 0, LINE_SIZE); |
104 | if (len > LINE_SIZE) | 107 | if (len > LINE_SIZE) |
105 | len = LINE_SIZE; | 108 | len = LINE_SIZE; |
106 | if (copy_from_user(line, buf, len - 1)) | 109 | if (copy_from_user(line, buf, len - 1)) |
107 | return -EFAULT; | 110 | return -EFAULT; |
111 | |||
108 | linelen = strlen(line); | 112 | linelen = strlen(line); |
109 | ptr = line + linelen - 1; | 113 | ptr = line + linelen - 1; |
110 | if (linelen && *ptr == '\n') | 114 | if (linelen && *ptr == '\n') |
111 | *ptr = '\0'; | 115 | *ptr = '\0'; |
116 | |||
112 | if (!strncmp(line, "disable=", 8)) { | 117 | if (!strncmp(line, "disable=", 8)) { |
113 | reg = simple_strtoul(line + 8, &ptr, 0); | 118 | reg = simple_strtoul(line + 8, &ptr, 0); |
114 | err = mtrr_del_page(reg, 0, 0); | 119 | err = mtrr_del_page(reg, 0, 0); |
@@ -116,28 +121,35 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) | |||
116 | return err; | 121 | return err; |
117 | return len; | 122 | return len; |
118 | } | 123 | } |
124 | |||
119 | if (strncmp(line, "base=", 5)) | 125 | if (strncmp(line, "base=", 5)) |
120 | return -EINVAL; | 126 | return -EINVAL; |
127 | |||
121 | base = simple_strtoull(line + 5, &ptr, 0); | 128 | base = simple_strtoull(line + 5, &ptr, 0); |
122 | for (; isspace(*ptr); ++ptr) ; | 129 | for (; isspace(*ptr); ++ptr) |
130 | ; | ||
131 | |||
123 | if (strncmp(ptr, "size=", 5)) | 132 | if (strncmp(ptr, "size=", 5)) |
124 | return -EINVAL; | 133 | return -EINVAL; |
134 | |||
125 | size = simple_strtoull(ptr + 5, &ptr, 0); | 135 | size = simple_strtoull(ptr + 5, &ptr, 0); |
126 | if ((base & 0xfff) || (size & 0xfff)) | 136 | if ((base & 0xfff) || (size & 0xfff)) |
127 | return -EINVAL; | 137 | return -EINVAL; |
128 | for (; isspace(*ptr); ++ptr) ; | 138 | for (; isspace(*ptr); ++ptr) |
139 | ; | ||
140 | |||
129 | if (strncmp(ptr, "type=", 5)) | 141 | if (strncmp(ptr, "type=", 5)) |
130 | return -EINVAL; | 142 | return -EINVAL; |
131 | ptr += 5; | 143 | ptr += 5; |
132 | for (; isspace(*ptr); ++ptr) ; | 144 | for (; isspace(*ptr); ++ptr) |
145 | ; | ||
146 | |||
133 | for (i = 0; i < MTRR_NUM_TYPES; ++i) { | 147 | for (i = 0; i < MTRR_NUM_TYPES; ++i) { |
134 | if (strcmp(ptr, mtrr_strings[i])) | 148 | if (strcmp(ptr, mtrr_strings[i])) |
135 | continue; | 149 | continue; |
136 | base >>= PAGE_SHIFT; | 150 | base >>= PAGE_SHIFT; |
137 | size >>= PAGE_SHIFT; | 151 | size >>= PAGE_SHIFT; |
138 | err = | 152 | err = mtrr_add_page((unsigned long)base, (unsigned long)size, i, true); |
139 | mtrr_add_page((unsigned long) base, (unsigned long) size, i, | ||
140 | true); | ||
141 | if (err < 0) | 153 | if (err < 0) |
142 | return err; | 154 | return err; |
143 | return len; | 155 | return len; |
@@ -181,7 +193,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
181 | case MTRRIOC32_SET_PAGE_ENTRY: | 193 | case MTRRIOC32_SET_PAGE_ENTRY: |
182 | case MTRRIOC32_DEL_PAGE_ENTRY: | 194 | case MTRRIOC32_DEL_PAGE_ENTRY: |
183 | case MTRRIOC32_KILL_PAGE_ENTRY: { | 195 | case MTRRIOC32_KILL_PAGE_ENTRY: { |
184 | struct mtrr_sentry32 __user *s32 = (struct mtrr_sentry32 __user *)__arg; | 196 | struct mtrr_sentry32 __user *s32; |
197 | |||
198 | s32 = (struct mtrr_sentry32 __user *)__arg; | ||
185 | err = get_user(sentry.base, &s32->base); | 199 | err = get_user(sentry.base, &s32->base); |
186 | err |= get_user(sentry.size, &s32->size); | 200 | err |= get_user(sentry.size, &s32->size); |
187 | err |= get_user(sentry.type, &s32->type); | 201 | err |= get_user(sentry.type, &s32->type); |
@@ -191,7 +205,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
191 | } | 205 | } |
192 | case MTRRIOC32_GET_ENTRY: | 206 | case MTRRIOC32_GET_ENTRY: |
193 | case MTRRIOC32_GET_PAGE_ENTRY: { | 207 | case MTRRIOC32_GET_PAGE_ENTRY: { |
194 | struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg; | 208 | struct mtrr_gentry32 __user *g32; |
209 | |||
210 | g32 = (struct mtrr_gentry32 __user *)__arg; | ||
195 | err = get_user(gentry.regnum, &g32->regnum); | 211 | err = get_user(gentry.regnum, &g32->regnum); |
196 | err |= get_user(gentry.base, &g32->base); | 212 | err |= get_user(gentry.base, &g32->base); |
197 | err |= get_user(gentry.size, &g32->size); | 213 | err |= get_user(gentry.size, &g32->size); |
@@ -314,7 +330,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
314 | if (err) | 330 | if (err) |
315 | return err; | 331 | return err; |
316 | 332 | ||
317 | switch(cmd) { | 333 | switch (cmd) { |
318 | case MTRRIOC_GET_ENTRY: | 334 | case MTRRIOC_GET_ENTRY: |
319 | case MTRRIOC_GET_PAGE_ENTRY: | 335 | case MTRRIOC_GET_PAGE_ENTRY: |
320 | if (copy_to_user(arg, &gentry, sizeof gentry)) | 336 | if (copy_to_user(arg, &gentry, sizeof gentry)) |
@@ -323,7 +339,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
323 | #ifdef CONFIG_COMPAT | 339 | #ifdef CONFIG_COMPAT |
324 | case MTRRIOC32_GET_ENTRY: | 340 | case MTRRIOC32_GET_ENTRY: |
325 | case MTRRIOC32_GET_PAGE_ENTRY: { | 341 | case MTRRIOC32_GET_PAGE_ENTRY: { |
326 | struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg; | 342 | struct mtrr_gentry32 __user *g32; |
343 | |||
344 | g32 = (struct mtrr_gentry32 __user *)__arg; | ||
327 | err = put_user(gentry.base, &g32->base); | 345 | err = put_user(gentry.base, &g32->base); |
328 | err |= put_user(gentry.size, &g32->size); | 346 | err |= put_user(gentry.size, &g32->size); |
329 | err |= put_user(gentry.regnum, &g32->regnum); | 347 | err |= put_user(gentry.regnum, &g32->regnum); |
@@ -335,11 +353,10 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
335 | return err; | 353 | return err; |
336 | } | 354 | } |
337 | 355 | ||
338 | static int | 356 | static int mtrr_close(struct inode *ino, struct file *file) |
339 | mtrr_close(struct inode *ino, struct file *file) | ||
340 | { | 357 | { |
341 | int i, max; | ||
342 | unsigned int *fcount = FILE_FCOUNT(file); | 358 | unsigned int *fcount = FILE_FCOUNT(file); |
359 | int i, max; | ||
343 | 360 | ||
344 | if (fcount != NULL) { | 361 | if (fcount != NULL) { |
345 | max = num_var_ranges; | 362 | max = num_var_ranges; |
@@ -359,22 +376,22 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset); | |||
359 | 376 | ||
360 | static int mtrr_open(struct inode *inode, struct file *file) | 377 | static int mtrr_open(struct inode *inode, struct file *file) |
361 | { | 378 | { |
362 | if (!mtrr_if) | 379 | if (!mtrr_if) |
363 | return -EIO; | 380 | return -EIO; |
364 | if (!mtrr_if->get) | 381 | if (!mtrr_if->get) |
365 | return -ENXIO; | 382 | return -ENXIO; |
366 | return single_open(file, mtrr_seq_show, NULL); | 383 | return single_open(file, mtrr_seq_show, NULL); |
367 | } | 384 | } |
368 | 385 | ||
369 | static const struct file_operations mtrr_fops = { | 386 | static const struct file_operations mtrr_fops = { |
370 | .owner = THIS_MODULE, | 387 | .owner = THIS_MODULE, |
371 | .open = mtrr_open, | 388 | .open = mtrr_open, |
372 | .read = seq_read, | 389 | .read = seq_read, |
373 | .llseek = seq_lseek, | 390 | .llseek = seq_lseek, |
374 | .write = mtrr_write, | 391 | .write = mtrr_write, |
375 | .unlocked_ioctl = mtrr_ioctl, | 392 | .unlocked_ioctl = mtrr_ioctl, |
376 | .compat_ioctl = mtrr_ioctl, | 393 | .compat_ioctl = mtrr_ioctl, |
377 | .release = mtrr_close, | 394 | .release = mtrr_close, |
378 | }; | 395 | }; |
379 | 396 | ||
380 | static int mtrr_seq_show(struct seq_file *seq, void *offset) | 397 | static int mtrr_seq_show(struct seq_file *seq, void *offset) |
@@ -388,23 +405,24 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset) | |||
388 | max = num_var_ranges; | 405 | max = num_var_ranges; |
389 | for (i = 0; i < max; i++) { | 406 | for (i = 0; i < max; i++) { |
390 | mtrr_if->get(i, &base, &size, &type); | 407 | mtrr_if->get(i, &base, &size, &type); |
391 | if (size == 0) | 408 | if (size == 0) { |
392 | mtrr_usage_table[i] = 0; | 409 | mtrr_usage_table[i] = 0; |
393 | else { | 410 | continue; |
394 | if (size < (0x100000 >> PAGE_SHIFT)) { | ||
395 | /* less than 1MB */ | ||
396 | factor = 'K'; | ||
397 | size <<= PAGE_SHIFT - 10; | ||
398 | } else { | ||
399 | factor = 'M'; | ||
400 | size >>= 20 - PAGE_SHIFT; | ||
401 | } | ||
402 | /* RED-PEN: base can be > 32bit */ | ||
403 | len += seq_printf(seq, | ||
404 | "reg%02i: base=0x%06lx000 (%5luMB), size=%5lu%cB, count=%d: %s\n", | ||
405 | i, base, base >> (20 - PAGE_SHIFT), size, factor, | ||
406 | mtrr_usage_table[i], mtrr_attrib_to_str(type)); | ||
407 | } | 411 | } |
412 | if (size < (0x100000 >> PAGE_SHIFT)) { | ||
413 | /* less than 1MB */ | ||
414 | factor = 'K'; | ||
415 | size <<= PAGE_SHIFT - 10; | ||
416 | } else { | ||
417 | factor = 'M'; | ||
418 | size >>= 20 - PAGE_SHIFT; | ||
419 | } | ||
420 | /* Base can be > 32bit */ | ||
421 | len += seq_printf(seq, "reg%02i: base=0x%06lx000 " | ||
422 | "(%5luMB), size=%5lu%cB, count=%d: %s\n", | ||
423 | i, base, base >> (20 - PAGE_SHIFT), size, | ||
424 | factor, mtrr_usage_table[i], | ||
425 | mtrr_attrib_to_str(type)); | ||
408 | } | 426 | } |
409 | return 0; | 427 | return 0; |
410 | } | 428 | } |
@@ -422,6 +440,5 @@ static int __init mtrr_if_init(void) | |||
422 | proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops); | 440 | proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops); |
423 | return 0; | 441 | return 0; |
424 | } | 442 | } |
425 | |||
426 | arch_initcall(mtrr_if_init); | 443 | arch_initcall(mtrr_if_init); |
427 | #endif /* CONFIG_PROC_FS */ | 444 | #endif /* CONFIG_PROC_FS */ |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 8fc248b5aeaf..7af0f88a4163 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -25,43 +25,48 @@ | |||
25 | Operating System Writer's Guide" (Intel document number 242692), | 25 | Operating System Writer's Guide" (Intel document number 242692), |
26 | section 11.11.7 | 26 | section 11.11.7 |
27 | 27 | ||
28 | This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> | 28 | This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> |
29 | on 6-7 March 2002. | 29 | on 6-7 March 2002. |
30 | Source: Intel Architecture Software Developers Manual, Volume 3: | 30 | Source: Intel Architecture Software Developers Manual, Volume 3: |
31 | System Programming Guide; Section 9.11. (1997 edition - PPro). | 31 | System Programming Guide; Section 9.11. (1997 edition - PPro). |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #define DEBUG | ||
35 | |||
36 | #include <linux/types.h> /* FIXME: kvm_para.h needs this */ | ||
37 | |||
38 | #include <linux/kvm_para.h> | ||
39 | #include <linux/uaccess.h> | ||
34 | #include <linux/module.h> | 40 | #include <linux/module.h> |
41 | #include <linux/mutex.h> | ||
35 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | #include <linux/sort.h> | ||
44 | #include <linux/cpu.h> | ||
36 | #include <linux/pci.h> | 45 | #include <linux/pci.h> |
37 | #include <linux/smp.h> | 46 | #include <linux/smp.h> |
38 | #include <linux/cpu.h> | ||
39 | #include <linux/mutex.h> | ||
40 | #include <linux/sort.h> | ||
41 | 47 | ||
48 | #include <asm/processor.h> | ||
42 | #include <asm/e820.h> | 49 | #include <asm/e820.h> |
43 | #include <asm/mtrr.h> | 50 | #include <asm/mtrr.h> |
44 | #include <asm/uaccess.h> | ||
45 | #include <asm/processor.h> | ||
46 | #include <asm/msr.h> | 51 | #include <asm/msr.h> |
47 | #include <asm/kvm_para.h> | 52 | |
48 | #include "mtrr.h" | 53 | #include "mtrr.h" |
49 | 54 | ||
50 | u32 num_var_ranges = 0; | 55 | u32 num_var_ranges; |
51 | 56 | ||
52 | unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; | 57 | unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; |
53 | static DEFINE_MUTEX(mtrr_mutex); | 58 | static DEFINE_MUTEX(mtrr_mutex); |
54 | 59 | ||
55 | u64 size_or_mask, size_and_mask; | 60 | u64 size_or_mask, size_and_mask; |
56 | 61 | ||
57 | static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {}; | 62 | static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM]; |
58 | 63 | ||
59 | struct mtrr_ops * mtrr_if = NULL; | 64 | struct mtrr_ops *mtrr_if; |
60 | 65 | ||
61 | static void set_mtrr(unsigned int reg, unsigned long base, | 66 | static void set_mtrr(unsigned int reg, unsigned long base, |
62 | unsigned long size, mtrr_type type); | 67 | unsigned long size, mtrr_type type); |
63 | 68 | ||
64 | void set_mtrr_ops(struct mtrr_ops * ops) | 69 | void set_mtrr_ops(struct mtrr_ops *ops) |
65 | { | 70 | { |
66 | if (ops->vendor && ops->vendor < X86_VENDOR_NUM) | 71 | if (ops->vendor && ops->vendor < X86_VENDOR_NUM) |
67 | mtrr_ops[ops->vendor] = ops; | 72 | mtrr_ops[ops->vendor] = ops; |
@@ -72,30 +77,36 @@ static int have_wrcomb(void) | |||
72 | { | 77 | { |
73 | struct pci_dev *dev; | 78 | struct pci_dev *dev; |
74 | u8 rev; | 79 | u8 rev; |
75 | 80 | ||
76 | if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) { | 81 | dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL); |
77 | /* ServerWorks LE chipsets < rev 6 have problems with write-combining | 82 | if (dev != NULL) { |
78 | Don't allow it and leave room for other chipsets to be tagged */ | 83 | /* |
84 | * ServerWorks LE chipsets < rev 6 have problems with | ||
85 | * write-combining. Don't allow it and leave room for other | ||
86 | * chipsets to be tagged | ||
87 | */ | ||
79 | if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && | 88 | if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && |
80 | dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) { | 89 | dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) { |
81 | pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); | 90 | pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); |
82 | if (rev <= 5) { | 91 | if (rev <= 5) { |
83 | printk(KERN_INFO "mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n"); | 92 | pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n"); |
84 | pci_dev_put(dev); | 93 | pci_dev_put(dev); |
85 | return 0; | 94 | return 0; |
86 | } | 95 | } |
87 | } | 96 | } |
88 | /* Intel 450NX errata # 23. Non ascending cacheline evictions to | 97 | /* |
89 | write combining memory may resulting in data corruption */ | 98 | * Intel 450NX errata # 23. Non ascending cacheline evictions to |
99 | * write combining memory may resulting in data corruption | ||
100 | */ | ||
90 | if (dev->vendor == PCI_VENDOR_ID_INTEL && | 101 | if (dev->vendor == PCI_VENDOR_ID_INTEL && |
91 | dev->device == PCI_DEVICE_ID_INTEL_82451NX) { | 102 | dev->device == PCI_DEVICE_ID_INTEL_82451NX) { |
92 | printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n"); | 103 | pr_info("mtrr: Intel 450NX MMC detected. Write-combining disabled.\n"); |
93 | pci_dev_put(dev); | 104 | pci_dev_put(dev); |
94 | return 0; | 105 | return 0; |
95 | } | 106 | } |
96 | pci_dev_put(dev); | 107 | pci_dev_put(dev); |
97 | } | 108 | } |
98 | return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0); | 109 | return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0; |
99 | } | 110 | } |
100 | 111 | ||
101 | /* This function returns the number of variable MTRRs */ | 112 | /* This function returns the number of variable MTRRs */ |
@@ -103,12 +114,13 @@ static void __init set_num_var_ranges(void) | |||
103 | { | 114 | { |
104 | unsigned long config = 0, dummy; | 115 | unsigned long config = 0, dummy; |
105 | 116 | ||
106 | if (use_intel()) { | 117 | if (use_intel()) |
107 | rdmsr(MSR_MTRRcap, config, dummy); | 118 | rdmsr(MSR_MTRRcap, config, dummy); |
108 | } else if (is_cpu(AMD)) | 119 | else if (is_cpu(AMD)) |
109 | config = 2; | 120 | config = 2; |
110 | else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) | 121 | else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) |
111 | config = 8; | 122 | config = 8; |
123 | |||
112 | num_var_ranges = config & 0xff; | 124 | num_var_ranges = config & 0xff; |
113 | } | 125 | } |
114 | 126 | ||
@@ -130,10 +142,12 @@ struct set_mtrr_data { | |||
130 | mtrr_type smp_type; | 142 | mtrr_type smp_type; |
131 | }; | 143 | }; |
132 | 144 | ||
145 | /** | ||
146 | * ipi_handler - Synchronisation handler. Executed by "other" CPUs. | ||
147 | * | ||
148 | * Returns nothing. | ||
149 | */ | ||
133 | static void ipi_handler(void *info) | 150 | static void ipi_handler(void *info) |
134 | /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs. | ||
135 | [RETURNS] Nothing. | ||
136 | */ | ||
137 | { | 151 | { |
138 | #ifdef CONFIG_SMP | 152 | #ifdef CONFIG_SMP |
139 | struct set_mtrr_data *data = info; | 153 | struct set_mtrr_data *data = info; |
@@ -142,18 +156,19 @@ static void ipi_handler(void *info) | |||
142 | local_irq_save(flags); | 156 | local_irq_save(flags); |
143 | 157 | ||
144 | atomic_dec(&data->count); | 158 | atomic_dec(&data->count); |
145 | while(!atomic_read(&data->gate)) | 159 | while (!atomic_read(&data->gate)) |
146 | cpu_relax(); | 160 | cpu_relax(); |
147 | 161 | ||
148 | /* The master has cleared me to execute */ | 162 | /* The master has cleared me to execute */ |
149 | if (data->smp_reg != ~0U) | 163 | if (data->smp_reg != ~0U) { |
150 | mtrr_if->set(data->smp_reg, data->smp_base, | 164 | mtrr_if->set(data->smp_reg, data->smp_base, |
151 | data->smp_size, data->smp_type); | 165 | data->smp_size, data->smp_type); |
152 | else | 166 | } else { |
153 | mtrr_if->set_all(); | 167 | mtrr_if->set_all(); |
168 | } | ||
154 | 169 | ||
155 | atomic_dec(&data->count); | 170 | atomic_dec(&data->count); |
156 | while(atomic_read(&data->gate)) | 171 | while (atomic_read(&data->gate)) |
157 | cpu_relax(); | 172 | cpu_relax(); |
158 | 173 | ||
159 | atomic_dec(&data->count); | 174 | atomic_dec(&data->count); |
@@ -161,7 +176,8 @@ static void ipi_handler(void *info) | |||
161 | #endif | 176 | #endif |
162 | } | 177 | } |
163 | 178 | ||
164 | static inline int types_compatible(mtrr_type type1, mtrr_type type2) { | 179 | static inline int types_compatible(mtrr_type type1, mtrr_type type2) |
180 | { | ||
165 | return type1 == MTRR_TYPE_UNCACHABLE || | 181 | return type1 == MTRR_TYPE_UNCACHABLE || |
166 | type2 == MTRR_TYPE_UNCACHABLE || | 182 | type2 == MTRR_TYPE_UNCACHABLE || |
167 | (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || | 183 | (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || |
@@ -176,10 +192,10 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) { | |||
176 | * @type: mtrr type | 192 | * @type: mtrr type |
177 | * | 193 | * |
178 | * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: | 194 | * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: |
179 | * | 195 | * |
180 | * 1. Send IPI to do the following: | 196 | * 1. Send IPI to do the following: |
181 | * 2. Disable Interrupts | 197 | * 2. Disable Interrupts |
182 | * 3. Wait for all procs to do so | 198 | * 3. Wait for all procs to do so |
183 | * 4. Enter no-fill cache mode | 199 | * 4. Enter no-fill cache mode |
184 | * 5. Flush caches | 200 | * 5. Flush caches |
185 | * 6. Clear PGE bit | 201 | * 6. Clear PGE bit |
@@ -189,26 +205,27 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) { | |||
189 | * 10. Enable all range registers | 205 | * 10. Enable all range registers |
190 | * 11. Flush all TLBs and caches again | 206 | * 11. Flush all TLBs and caches again |
191 | * 12. Enter normal cache mode and reenable caching | 207 | * 12. Enter normal cache mode and reenable caching |
192 | * 13. Set PGE | 208 | * 13. Set PGE |
193 | * 14. Wait for buddies to catch up | 209 | * 14. Wait for buddies to catch up |
194 | * 15. Enable interrupts. | 210 | * 15. Enable interrupts. |
195 | * | 211 | * |
196 | * What does that mean for us? Well, first we set data.count to the number | 212 | * What does that mean for us? Well, first we set data.count to the number |
197 | * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait | 213 | * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait |
198 | * until it hits 0 and proceed. We set the data.gate flag and reset data.count. | 214 | * until it hits 0 and proceed. We set the data.gate flag and reset data.count. |
199 | * Meanwhile, they are waiting for that flag to be set. Once it's set, each | 215 | * Meanwhile, they are waiting for that flag to be set. Once it's set, each |
200 | * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it | 216 | * CPU goes through the transition of updating MTRRs. |
201 | * differently, so we call mtrr_if->set() callback and let them take care of it. | 217 | * The CPU vendors may each do it differently, |
202 | * When they're done, they again decrement data->count and wait for data.gate to | 218 | * so we call mtrr_if->set() callback and let them take care of it. |
203 | * be reset. | 219 | * When they're done, they again decrement data->count and wait for data.gate |
204 | * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag. | 220 | * to be reset. |
221 | * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag | ||
205 | * Everyone then enables interrupts and we all continue on. | 222 | * Everyone then enables interrupts and we all continue on. |
206 | * | 223 | * |
207 | * Note that the mechanism is the same for UP systems, too; all the SMP stuff | 224 | * Note that the mechanism is the same for UP systems, too; all the SMP stuff |
208 | * becomes nops. | 225 | * becomes nops. |
209 | */ | 226 | */ |
210 | static void set_mtrr(unsigned int reg, unsigned long base, | 227 | static void |
211 | unsigned long size, mtrr_type type) | 228 | set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) |
212 | { | 229 | { |
213 | struct set_mtrr_data data; | 230 | struct set_mtrr_data data; |
214 | unsigned long flags; | 231 | unsigned long flags; |
@@ -218,121 +235,122 @@ static void set_mtrr(unsigned int reg, unsigned long base, | |||
218 | data.smp_size = size; | 235 | data.smp_size = size; |
219 | data.smp_type = type; | 236 | data.smp_type = type; |
220 | atomic_set(&data.count, num_booting_cpus() - 1); | 237 | atomic_set(&data.count, num_booting_cpus() - 1); |
221 | /* make sure data.count is visible before unleashing other CPUs */ | 238 | |
239 | /* Make sure data.count is visible before unleashing other CPUs */ | ||
222 | smp_wmb(); | 240 | smp_wmb(); |
223 | atomic_set(&data.gate,0); | 241 | atomic_set(&data.gate, 0); |
224 | 242 | ||
225 | /* Start the ball rolling on other CPUs */ | 243 | /* Start the ball rolling on other CPUs */ |
226 | if (smp_call_function(ipi_handler, &data, 0) != 0) | 244 | if (smp_call_function(ipi_handler, &data, 0) != 0) |
227 | panic("mtrr: timed out waiting for other CPUs\n"); | 245 | panic("mtrr: timed out waiting for other CPUs\n"); |
228 | 246 | ||
229 | local_irq_save(flags); | 247 | local_irq_save(flags); |
230 | 248 | ||
231 | while(atomic_read(&data.count)) | 249 | while (atomic_read(&data.count)) |
232 | cpu_relax(); | 250 | cpu_relax(); |
233 | 251 | ||
234 | /* ok, reset count and toggle gate */ | 252 | /* Ok, reset count and toggle gate */ |
235 | atomic_set(&data.count, num_booting_cpus() - 1); | 253 | atomic_set(&data.count, num_booting_cpus() - 1); |
236 | smp_wmb(); | 254 | smp_wmb(); |
237 | atomic_set(&data.gate,1); | 255 | atomic_set(&data.gate, 1); |
238 | 256 | ||
239 | /* do our MTRR business */ | 257 | /* Do our MTRR business */ |
240 | 258 | ||
241 | /* HACK! | 259 | /* |
260 | * HACK! | ||
242 | * We use this same function to initialize the mtrrs on boot. | 261 | * We use this same function to initialize the mtrrs on boot. |
243 | * The state of the boot cpu's mtrrs has been saved, and we want | 262 | * The state of the boot cpu's mtrrs has been saved, and we want |
244 | * to replicate across all the APs. | 263 | * to replicate across all the APs. |
245 | * If we're doing that @reg is set to something special... | 264 | * If we're doing that @reg is set to something special... |
246 | */ | 265 | */ |
247 | if (reg != ~0U) | 266 | if (reg != ~0U) |
248 | mtrr_if->set(reg,base,size,type); | 267 | mtrr_if->set(reg, base, size, type); |
249 | 268 | ||
250 | /* wait for the others */ | 269 | /* Wait for the others */ |
251 | while(atomic_read(&data.count)) | 270 | while (atomic_read(&data.count)) |
252 | cpu_relax(); | 271 | cpu_relax(); |
253 | 272 | ||
254 | atomic_set(&data.count, num_booting_cpus() - 1); | 273 | atomic_set(&data.count, num_booting_cpus() - 1); |
255 | smp_wmb(); | 274 | smp_wmb(); |
256 | atomic_set(&data.gate,0); | 275 | atomic_set(&data.gate, 0); |
257 | 276 | ||
258 | /* | 277 | /* |
259 | * Wait here for everyone to have seen the gate change | 278 | * Wait here for everyone to have seen the gate change |
260 | * So we're the last ones to touch 'data' | 279 | * So we're the last ones to touch 'data' |
261 | */ | 280 | */ |
262 | while(atomic_read(&data.count)) | 281 | while (atomic_read(&data.count)) |
263 | cpu_relax(); | 282 | cpu_relax(); |
264 | 283 | ||
265 | local_irq_restore(flags); | 284 | local_irq_restore(flags); |
266 | } | 285 | } |
267 | 286 | ||
268 | /** | 287 | /** |
269 | * mtrr_add_page - Add a memory type region | 288 | * mtrr_add_page - Add a memory type region |
270 | * @base: Physical base address of region in pages (in units of 4 kB!) | 289 | * @base: Physical base address of region in pages (in units of 4 kB!) |
271 | * @size: Physical size of region in pages (4 kB) | 290 | * @size: Physical size of region in pages (4 kB) |
272 | * @type: Type of MTRR desired | 291 | * @type: Type of MTRR desired |
273 | * @increment: If this is true do usage counting on the region | 292 | * @increment: If this is true do usage counting on the region |
274 | * | 293 | * |
275 | * Memory type region registers control the caching on newer Intel and | 294 | * Memory type region registers control the caching on newer Intel and |
276 | * non Intel processors. This function allows drivers to request an | 295 | * non Intel processors. This function allows drivers to request an |
277 | * MTRR is added. The details and hardware specifics of each processor's | 296 | * MTRR is added. The details and hardware specifics of each processor's |
278 | * implementation are hidden from the caller, but nevertheless the | 297 | * implementation are hidden from the caller, but nevertheless the |
279 | * caller should expect to need to provide a power of two size on an | 298 | * caller should expect to need to provide a power of two size on an |
280 | * equivalent power of two boundary. | 299 | * equivalent power of two boundary. |
281 | * | 300 | * |
282 | * If the region cannot be added either because all regions are in use | 301 | * If the region cannot be added either because all regions are in use |
283 | * or the CPU cannot support it a negative value is returned. On success | 302 | * or the CPU cannot support it a negative value is returned. On success |
284 | * the register number for this entry is returned, but should be treated | 303 | * the register number for this entry is returned, but should be treated |
285 | * as a cookie only. | 304 | * as a cookie only. |
286 | * | 305 | * |
287 | * On a multiprocessor machine the changes are made to all processors. | 306 | * On a multiprocessor machine the changes are made to all processors. |
288 | * This is required on x86 by the Intel processors. | 307 | * This is required on x86 by the Intel processors. |
289 | * | 308 | * |
290 | * The available types are | 309 | * The available types are |
291 | * | 310 | * |
292 | * %MTRR_TYPE_UNCACHABLE - No caching | 311 | * %MTRR_TYPE_UNCACHABLE - No caching |
293 | * | 312 | * |
294 | * %MTRR_TYPE_WRBACK - Write data back in bursts whenever | 313 | * %MTRR_TYPE_WRBACK - Write data back in bursts whenever |
295 | * | 314 | * |
296 | * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts | 315 | * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts |
297 | * | 316 | * |
298 | * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes | 317 | * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes |
299 | * | 318 | * |
300 | * BUGS: Needs a quiet flag for the cases where drivers do not mind | 319 | * BUGS: Needs a quiet flag for the cases where drivers do not mind |
301 | * failures and do not wish system log messages to be sent. | 320 | * failures and do not wish system log messages to be sent. |
302 | */ | 321 | */ |
303 | 322 | int mtrr_add_page(unsigned long base, unsigned long size, | |
304 | int mtrr_add_page(unsigned long base, unsigned long size, | ||
305 | unsigned int type, bool increment) | 323 | unsigned int type, bool increment) |
306 | { | 324 | { |
325 | unsigned long lbase, lsize; | ||
307 | int i, replace, error; | 326 | int i, replace, error; |
308 | mtrr_type ltype; | 327 | mtrr_type ltype; |
309 | unsigned long lbase, lsize; | ||
310 | 328 | ||
311 | if (!mtrr_if) | 329 | if (!mtrr_if) |
312 | return -ENXIO; | 330 | return -ENXIO; |
313 | 331 | ||
314 | if ((error = mtrr_if->validate_add_page(base,size,type))) | 332 | error = mtrr_if->validate_add_page(base, size, type); |
333 | if (error) | ||
315 | return error; | 334 | return error; |
316 | 335 | ||
317 | if (type >= MTRR_NUM_TYPES) { | 336 | if (type >= MTRR_NUM_TYPES) { |
318 | printk(KERN_WARNING "mtrr: type: %u invalid\n", type); | 337 | pr_warning("mtrr: type: %u invalid\n", type); |
319 | return -EINVAL; | 338 | return -EINVAL; |
320 | } | 339 | } |
321 | 340 | ||
322 | /* If the type is WC, check that this processor supports it */ | 341 | /* If the type is WC, check that this processor supports it */ |
323 | if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { | 342 | if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { |
324 | printk(KERN_WARNING | 343 | pr_warning("mtrr: your processor doesn't support write-combining\n"); |
325 | "mtrr: your processor doesn't support write-combining\n"); | ||
326 | return -ENOSYS; | 344 | return -ENOSYS; |
327 | } | 345 | } |
328 | 346 | ||
329 | if (!size) { | 347 | if (!size) { |
330 | printk(KERN_WARNING "mtrr: zero sized request\n"); | 348 | pr_warning("mtrr: zero sized request\n"); |
331 | return -EINVAL; | 349 | return -EINVAL; |
332 | } | 350 | } |
333 | 351 | ||
334 | if (base & size_or_mask || size & size_or_mask) { | 352 | if (base & size_or_mask || size & size_or_mask) { |
335 | printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n"); | 353 | pr_warning("mtrr: base or size exceeds the MTRR width\n"); |
336 | return -EINVAL; | 354 | return -EINVAL; |
337 | } | 355 | } |
338 | 356 | ||
@@ -341,36 +359,40 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
341 | 359 | ||
342 | /* No CPU hotplug when we change MTRR entries */ | 360 | /* No CPU hotplug when we change MTRR entries */ |
343 | get_online_cpus(); | 361 | get_online_cpus(); |
344 | /* Search for existing MTRR */ | 362 | |
363 | /* Search for existing MTRR */ | ||
345 | mutex_lock(&mtrr_mutex); | 364 | mutex_lock(&mtrr_mutex); |
346 | for (i = 0; i < num_var_ranges; ++i) { | 365 | for (i = 0; i < num_var_ranges; ++i) { |
347 | mtrr_if->get(i, &lbase, &lsize, <ype); | 366 | mtrr_if->get(i, &lbase, &lsize, <ype); |
348 | if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase) | 367 | if (!lsize || base > lbase + lsize - 1 || |
368 | base + size - 1 < lbase) | ||
349 | continue; | 369 | continue; |
350 | /* At this point we know there is some kind of overlap/enclosure */ | 370 | /* |
371 | * At this point we know there is some kind of | ||
372 | * overlap/enclosure | ||
373 | */ | ||
351 | if (base < lbase || base + size - 1 > lbase + lsize - 1) { | 374 | if (base < lbase || base + size - 1 > lbase + lsize - 1) { |
352 | if (base <= lbase && base + size - 1 >= lbase + lsize - 1) { | 375 | if (base <= lbase && |
376 | base + size - 1 >= lbase + lsize - 1) { | ||
353 | /* New region encloses an existing region */ | 377 | /* New region encloses an existing region */ |
354 | if (type == ltype) { | 378 | if (type == ltype) { |
355 | replace = replace == -1 ? i : -2; | 379 | replace = replace == -1 ? i : -2; |
356 | continue; | 380 | continue; |
357 | } | 381 | } else if (types_compatible(type, ltype)) |
358 | else if (types_compatible(type, ltype)) | ||
359 | continue; | 382 | continue; |
360 | } | 383 | } |
361 | printk(KERN_WARNING | 384 | pr_warning("mtrr: 0x%lx000,0x%lx000 overlaps existing" |
362 | "mtrr: 0x%lx000,0x%lx000 overlaps existing" | 385 | " 0x%lx000,0x%lx000\n", base, size, lbase, |
363 | " 0x%lx000,0x%lx000\n", base, size, lbase, | 386 | lsize); |
364 | lsize); | ||
365 | goto out; | 387 | goto out; |
366 | } | 388 | } |
367 | /* New region is enclosed by an existing region */ | 389 | /* New region is enclosed by an existing region */ |
368 | if (ltype != type) { | 390 | if (ltype != type) { |
369 | if (types_compatible(type, ltype)) | 391 | if (types_compatible(type, ltype)) |
370 | continue; | 392 | continue; |
371 | printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", | 393 | pr_warning("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", |
372 | base, size, mtrr_attrib_to_str(ltype), | 394 | base, size, mtrr_attrib_to_str(ltype), |
373 | mtrr_attrib_to_str(type)); | 395 | mtrr_attrib_to_str(type)); |
374 | goto out; | 396 | goto out; |
375 | } | 397 | } |
376 | if (increment) | 398 | if (increment) |
@@ -378,7 +400,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
378 | error = i; | 400 | error = i; |
379 | goto out; | 401 | goto out; |
380 | } | 402 | } |
381 | /* Search for an empty MTRR */ | 403 | /* Search for an empty MTRR */ |
382 | i = mtrr_if->get_free_region(base, size, replace); | 404 | i = mtrr_if->get_free_region(base, size, replace); |
383 | if (i >= 0) { | 405 | if (i >= 0) { |
384 | set_mtrr(i, base, size, type); | 406 | set_mtrr(i, base, size, type); |
@@ -393,8 +415,9 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
393 | mtrr_usage_table[replace] = 0; | 415 | mtrr_usage_table[replace] = 0; |
394 | } | 416 | } |
395 | } | 417 | } |
396 | } else | 418 | } else { |
397 | printk(KERN_INFO "mtrr: no more MTRRs available\n"); | 419 | pr_info("mtrr: no more MTRRs available\n"); |
420 | } | ||
398 | error = i; | 421 | error = i; |
399 | out: | 422 | out: |
400 | mutex_unlock(&mtrr_mutex); | 423 | mutex_unlock(&mtrr_mutex); |
@@ -405,10 +428,8 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
405 | static int mtrr_check(unsigned long base, unsigned long size) | 428 | static int mtrr_check(unsigned long base, unsigned long size) |
406 | { | 429 | { |
407 | if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { | 430 | if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { |
408 | printk(KERN_WARNING | 431 | pr_warning("mtrr: size and base must be multiples of 4 kiB\n"); |
409 | "mtrr: size and base must be multiples of 4 kiB\n"); | 432 | pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base); |
410 | printk(KERN_DEBUG | ||
411 | "mtrr: size: 0x%lx base: 0x%lx\n", size, base); | ||
412 | dump_stack(); | 433 | dump_stack(); |
413 | return -1; | 434 | return -1; |
414 | } | 435 | } |
@@ -416,66 +437,64 @@ static int mtrr_check(unsigned long base, unsigned long size) | |||
416 | } | 437 | } |
417 | 438 | ||
418 | /** | 439 | /** |
419 | * mtrr_add - Add a memory type region | 440 | * mtrr_add - Add a memory type region |
420 | * @base: Physical base address of region | 441 | * @base: Physical base address of region |
421 | * @size: Physical size of region | 442 | * @size: Physical size of region |
422 | * @type: Type of MTRR desired | 443 | * @type: Type of MTRR desired |
423 | * @increment: If this is true do usage counting on the region | 444 | * @increment: If this is true do usage counting on the region |
424 | * | 445 | * |
425 | * Memory type region registers control the caching on newer Intel and | 446 | * Memory type region registers control the caching on newer Intel and |
426 | * non Intel processors. This function allows drivers to request an | 447 | * non Intel processors. This function allows drivers to request an |
427 | * MTRR is added. The details and hardware specifics of each processor's | 448 | * MTRR is added. The details and hardware specifics of each processor's |
428 | * implementation are hidden from the caller, but nevertheless the | 449 | * implementation are hidden from the caller, but nevertheless the |
429 | * caller should expect to need to provide a power of two size on an | 450 | * caller should expect to need to provide a power of two size on an |
430 | * equivalent power of two boundary. | 451 | * equivalent power of two boundary. |
431 | * | 452 | * |
432 | * If the region cannot be added either because all regions are in use | 453 | * If the region cannot be added either because all regions are in use |
433 | * or the CPU cannot support it a negative value is returned. On success | 454 | * or the CPU cannot support it a negative value is returned. On success |
434 | * the register number for this entry is returned, but should be treated | 455 | * the register number for this entry is returned, but should be treated |
435 | * as a cookie only. | 456 | * as a cookie only. |
436 | * | 457 | * |
437 | * On a multiprocessor machine the changes are made to all processors. | 458 | * On a multiprocessor machine the changes are made to all processors. |
438 | * This is required on x86 by the Intel processors. | 459 | * This is required on x86 by the Intel processors. |
439 | * | 460 | * |
440 | * The available types are | 461 | * The available types are |
441 | * | 462 | * |
442 | * %MTRR_TYPE_UNCACHABLE - No caching | 463 | * %MTRR_TYPE_UNCACHABLE - No caching |
443 | * | 464 | * |
444 | * %MTRR_TYPE_WRBACK - Write data back in bursts whenever | 465 | * %MTRR_TYPE_WRBACK - Write data back in bursts whenever |
445 | * | 466 | * |
446 | * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts | 467 | * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts |
447 | * | 468 | * |
448 | * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes | 469 | * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes |
449 | * | 470 | * |
450 | * BUGS: Needs a quiet flag for the cases where drivers do not mind | 471 | * BUGS: Needs a quiet flag for the cases where drivers do not mind |
451 | * failures and do not wish system log messages to be sent. | 472 | * failures and do not wish system log messages to be sent. |
452 | */ | 473 | */ |
453 | 474 | int mtrr_add(unsigned long base, unsigned long size, unsigned int type, | |
454 | int | 475 | bool increment) |
455 | mtrr_add(unsigned long base, unsigned long size, unsigned int type, | ||
456 | bool increment) | ||
457 | { | 476 | { |
458 | if (mtrr_check(base, size)) | 477 | if (mtrr_check(base, size)) |
459 | return -EINVAL; | 478 | return -EINVAL; |
460 | return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, | 479 | return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, |
461 | increment); | 480 | increment); |
462 | } | 481 | } |
482 | EXPORT_SYMBOL(mtrr_add); | ||
463 | 483 | ||
464 | /** | 484 | /** |
465 | * mtrr_del_page - delete a memory type region | 485 | * mtrr_del_page - delete a memory type region |
466 | * @reg: Register returned by mtrr_add | 486 | * @reg: Register returned by mtrr_add |
467 | * @base: Physical base address | 487 | * @base: Physical base address |
468 | * @size: Size of region | 488 | * @size: Size of region |
469 | * | 489 | * |
470 | * If register is supplied then base and size are ignored. This is | 490 | * If register is supplied then base and size are ignored. This is |
471 | * how drivers should call it. | 491 | * how drivers should call it. |
472 | * | 492 | * |
473 | * Releases an MTRR region. If the usage count drops to zero the | 493 | * Releases an MTRR region. If the usage count drops to zero the |
474 | * register is freed and the region returns to default state. | 494 | * register is freed and the region returns to default state. |
475 | * On success the register is returned, on failure a negative error | 495 | * On success the register is returned, on failure a negative error |
476 | * code. | 496 | * code. |
477 | */ | 497 | */ |
478 | |||
479 | int mtrr_del_page(int reg, unsigned long base, unsigned long size) | 498 | int mtrr_del_page(int reg, unsigned long base, unsigned long size) |
480 | { | 499 | { |
481 | int i, max; | 500 | int i, max; |
@@ -500,22 +519,22 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) | |||
500 | } | 519 | } |
501 | } | 520 | } |
502 | if (reg < 0) { | 521 | if (reg < 0) { |
503 | printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base, | 522 | pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n", |
504 | size); | 523 | base, size); |
505 | goto out; | 524 | goto out; |
506 | } | 525 | } |
507 | } | 526 | } |
508 | if (reg >= max) { | 527 | if (reg >= max) { |
509 | printk(KERN_WARNING "mtrr: register: %d too big\n", reg); | 528 | pr_warning("mtrr: register: %d too big\n", reg); |
510 | goto out; | 529 | goto out; |
511 | } | 530 | } |
512 | mtrr_if->get(reg, &lbase, &lsize, <ype); | 531 | mtrr_if->get(reg, &lbase, &lsize, <ype); |
513 | if (lsize < 1) { | 532 | if (lsize < 1) { |
514 | printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg); | 533 | pr_warning("mtrr: MTRR %d not used\n", reg); |
515 | goto out; | 534 | goto out; |
516 | } | 535 | } |
517 | if (mtrr_usage_table[reg] < 1) { | 536 | if (mtrr_usage_table[reg] < 1) { |
518 | printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg); | 537 | pr_warning("mtrr: reg: %d has count=0\n", reg); |
519 | goto out; | 538 | goto out; |
520 | } | 539 | } |
521 | if (--mtrr_usage_table[reg] < 1) | 540 | if (--mtrr_usage_table[reg] < 1) |
@@ -526,33 +545,31 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) | |||
526 | put_online_cpus(); | 545 | put_online_cpus(); |
527 | return error; | 546 | return error; |
528 | } | 547 | } |
548 | |||
529 | /** | 549 | /** |
530 | * mtrr_del - delete a memory type region | 550 | * mtrr_del - delete a memory type region |
531 | * @reg: Register returned by mtrr_add | 551 | * @reg: Register returned by mtrr_add |
532 | * @base: Physical base address | 552 | * @base: Physical base address |
533 | * @size: Size of region | 553 | * @size: Size of region |
534 | * | 554 | * |
535 | * If register is supplied then base and size are ignored. This is | 555 | * If register is supplied then base and size are ignored. This is |
536 | * how drivers should call it. | 556 | * how drivers should call it. |
537 | * | 557 | * |
538 | * Releases an MTRR region. If the usage count drops to zero the | 558 | * Releases an MTRR region. If the usage count drops to zero the |
539 | * register is freed and the region returns to default state. | 559 | * register is freed and the region returns to default state. |
540 | * On success the register is returned, on failure a negative error | 560 | * On success the register is returned, on failure a negative error |
541 | * code. | 561 | * code. |
542 | */ | 562 | */ |
543 | 563 | int mtrr_del(int reg, unsigned long base, unsigned long size) | |
544 | int | ||
545 | mtrr_del(int reg, unsigned long base, unsigned long size) | ||
546 | { | 564 | { |
547 | if (mtrr_check(base, size)) | 565 | if (mtrr_check(base, size)) |
548 | return -EINVAL; | 566 | return -EINVAL; |
549 | return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); | 567 | return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); |
550 | } | 568 | } |
551 | |||
552 | EXPORT_SYMBOL(mtrr_add); | ||
553 | EXPORT_SYMBOL(mtrr_del); | 569 | EXPORT_SYMBOL(mtrr_del); |
554 | 570 | ||
555 | /* HACK ALERT! | 571 | /* |
572 | * HACK ALERT! | ||
556 | * These should be called implicitly, but we can't yet until all the initcall | 573 | * These should be called implicitly, but we can't yet until all the initcall |
557 | * stuff is done... | 574 | * stuff is done... |
558 | */ | 575 | */ |
@@ -576,29 +593,28 @@ struct mtrr_value { | |||
576 | 593 | ||
577 | static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; | 594 | static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; |
578 | 595 | ||
579 | static int mtrr_save(struct sys_device * sysdev, pm_message_t state) | 596 | static int mtrr_save(struct sys_device *sysdev, pm_message_t state) |
580 | { | 597 | { |
581 | int i; | 598 | int i; |
582 | 599 | ||
583 | for (i = 0; i < num_var_ranges; i++) { | 600 | for (i = 0; i < num_var_ranges; i++) { |
584 | mtrr_if->get(i, | 601 | mtrr_if->get(i, &mtrr_value[i].lbase, |
585 | &mtrr_value[i].lbase, | 602 | &mtrr_value[i].lsize, |
586 | &mtrr_value[i].lsize, | 603 | &mtrr_value[i].ltype); |
587 | &mtrr_value[i].ltype); | ||
588 | } | 604 | } |
589 | return 0; | 605 | return 0; |
590 | } | 606 | } |
591 | 607 | ||
592 | static int mtrr_restore(struct sys_device * sysdev) | 608 | static int mtrr_restore(struct sys_device *sysdev) |
593 | { | 609 | { |
594 | int i; | 610 | int i; |
595 | 611 | ||
596 | for (i = 0; i < num_var_ranges; i++) { | 612 | for (i = 0; i < num_var_ranges; i++) { |
597 | if (mtrr_value[i].lsize) | 613 | if (mtrr_value[i].lsize) { |
598 | set_mtrr(i, | 614 | set_mtrr(i, mtrr_value[i].lbase, |
599 | mtrr_value[i].lbase, | 615 | mtrr_value[i].lsize, |
600 | mtrr_value[i].lsize, | 616 | mtrr_value[i].ltype); |
601 | mtrr_value[i].ltype); | 617 | } |
602 | } | 618 | } |
603 | return 0; | 619 | return 0; |
604 | } | 620 | } |
@@ -615,26 +631,29 @@ int __initdata changed_by_mtrr_cleanup; | |||
615 | /** | 631 | /** |
616 | * mtrr_bp_init - initialize mtrrs on the boot CPU | 632 | * mtrr_bp_init - initialize mtrrs on the boot CPU |
617 | * | 633 | * |
618 | * This needs to be called early; before any of the other CPUs are | 634 | * This needs to be called early; before any of the other CPUs are |
619 | * initialized (i.e. before smp_init()). | 635 | * initialized (i.e. before smp_init()). |
620 | * | 636 | * |
621 | */ | 637 | */ |
622 | void __init mtrr_bp_init(void) | 638 | void __init mtrr_bp_init(void) |
623 | { | 639 | { |
624 | u32 phys_addr; | 640 | u32 phys_addr; |
641 | |||
625 | init_ifs(); | 642 | init_ifs(); |
626 | 643 | ||
627 | phys_addr = 32; | 644 | phys_addr = 32; |
628 | 645 | ||
629 | if (cpu_has_mtrr) { | 646 | if (cpu_has_mtrr) { |
630 | mtrr_if = &generic_mtrr_ops; | 647 | mtrr_if = &generic_mtrr_ops; |
631 | size_or_mask = 0xff000000; /* 36 bits */ | 648 | size_or_mask = 0xff000000; /* 36 bits */ |
632 | size_and_mask = 0x00f00000; | 649 | size_and_mask = 0x00f00000; |
633 | phys_addr = 36; | 650 | phys_addr = 36; |
634 | 651 | ||
635 | /* This is an AMD specific MSR, but we assume(hope?) that | 652 | /* |
636 | Intel will implement it to when they extend the address | 653 | * This is an AMD specific MSR, but we assume(hope?) that |
637 | bus of the Xeon. */ | 654 | * Intel will implement it to when they extend the address |
655 | * bus of the Xeon. | ||
656 | */ | ||
638 | if (cpuid_eax(0x80000000) >= 0x80000008) { | 657 | if (cpuid_eax(0x80000000) >= 0x80000008) { |
639 | phys_addr = cpuid_eax(0x80000008) & 0xff; | 658 | phys_addr = cpuid_eax(0x80000008) & 0xff; |
640 | /* CPUID workaround for Intel 0F33/0F34 CPU */ | 659 | /* CPUID workaround for Intel 0F33/0F34 CPU */ |
@@ -649,9 +668,11 @@ void __init mtrr_bp_init(void) | |||
649 | size_and_mask = ~size_or_mask & 0xfffff00000ULL; | 668 | size_and_mask = ~size_or_mask & 0xfffff00000ULL; |
650 | } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && | 669 | } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && |
651 | boot_cpu_data.x86 == 6) { | 670 | boot_cpu_data.x86 == 6) { |
652 | /* VIA C* family have Intel style MTRRs, but | 671 | /* |
653 | don't support PAE */ | 672 | * VIA C* family have Intel style MTRRs, |
654 | size_or_mask = 0xfff00000; /* 32 bits */ | 673 | * but don't support PAE |
674 | */ | ||
675 | size_or_mask = 0xfff00000; /* 32 bits */ | ||
655 | size_and_mask = 0; | 676 | size_and_mask = 0; |
656 | phys_addr = 32; | 677 | phys_addr = 32; |
657 | } | 678 | } |
@@ -694,7 +715,6 @@ void __init mtrr_bp_init(void) | |||
694 | changed_by_mtrr_cleanup = 1; | 715 | changed_by_mtrr_cleanup = 1; |
695 | mtrr_if->set_all(); | 716 | mtrr_if->set_all(); |
696 | } | 717 | } |
697 | |||
698 | } | 718 | } |
699 | } | 719 | } |
700 | } | 720 | } |
@@ -706,12 +726,17 @@ void mtrr_ap_init(void) | |||
706 | if (!mtrr_if || !use_intel()) | 726 | if (!mtrr_if || !use_intel()) |
707 | return; | 727 | return; |
708 | /* | 728 | /* |
709 | * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed, | 729 | * Ideally we should hold mtrr_mutex here to avoid mtrr entries |
710 | * but this routine will be called in cpu boot time, holding the lock | 730 | * changed, but this routine will be called in cpu boot time, |
711 | * breaks it. This routine is called in two cases: 1.very earily time | 731 | * holding the lock breaks it. |
712 | * of software resume, when there absolutely isn't mtrr entry changes; | 732 | * |
713 | * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to | 733 | * This routine is called in two cases: |
714 | * prevent mtrr entry changes | 734 | * |
735 | * 1. very earily time of software resume, when there absolutely | ||
736 | * isn't mtrr entry changes; | ||
737 | * | ||
738 | * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug | ||
739 | * lock to prevent mtrr entry changes | ||
715 | */ | 740 | */ |
716 | local_irq_save(flags); | 741 | local_irq_save(flags); |
717 | 742 | ||
@@ -732,19 +757,23 @@ static int __init mtrr_init_finialize(void) | |||
732 | { | 757 | { |
733 | if (!mtrr_if) | 758 | if (!mtrr_if) |
734 | return 0; | 759 | return 0; |
760 | |||
735 | if (use_intel()) { | 761 | if (use_intel()) { |
736 | if (!changed_by_mtrr_cleanup) | 762 | if (!changed_by_mtrr_cleanup) |
737 | mtrr_state_warn(); | 763 | mtrr_state_warn(); |
738 | } else { | 764 | return 0; |
739 | /* The CPUs haven't MTRR and seem to not support SMP. They have | ||
740 | * specific drivers, we use a tricky method to support | ||
741 | * suspend/resume for them. | ||
742 | * TBD: is there any system with such CPU which supports | ||
743 | * suspend/resume? if no, we should remove the code. | ||
744 | */ | ||
745 | sysdev_driver_register(&cpu_sysdev_class, | ||
746 | &mtrr_sysdev_driver); | ||
747 | } | 765 | } |
766 | |||
767 | /* | ||
768 | * The CPU has no MTRR and seems to not support SMP. They have | ||
769 | * specific drivers, we use a tricky method to support | ||
770 | * suspend/resume for them. | ||
771 | * | ||
772 | * TBD: is there any system with such CPU which supports | ||
773 | * suspend/resume? If no, we should remove the code. | ||
774 | */ | ||
775 | sysdev_driver_register(&cpu_sysdev_class, &mtrr_sysdev_driver); | ||
776 | |||
748 | return 0; | 777 | return 0; |
749 | } | 778 | } |
750 | subsys_initcall(mtrr_init_finialize); | 779 | subsys_initcall(mtrr_init_finialize); |
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h index 7538b767f206..a501dee9a87a 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * local mtrr defines. | 2 | * local MTRR defines. |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
@@ -14,13 +14,12 @@ extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; | |||
14 | struct mtrr_ops { | 14 | struct mtrr_ops { |
15 | u32 vendor; | 15 | u32 vendor; |
16 | u32 use_intel_if; | 16 | u32 use_intel_if; |
17 | // void (*init)(void); | ||
18 | void (*set)(unsigned int reg, unsigned long base, | 17 | void (*set)(unsigned int reg, unsigned long base, |
19 | unsigned long size, mtrr_type type); | 18 | unsigned long size, mtrr_type type); |
20 | void (*set_all)(void); | 19 | void (*set_all)(void); |
21 | 20 | ||
22 | void (*get)(unsigned int reg, unsigned long *base, | 21 | void (*get)(unsigned int reg, unsigned long *base, |
23 | unsigned long *size, mtrr_type * type); | 22 | unsigned long *size, mtrr_type *type); |
24 | int (*get_free_region)(unsigned long base, unsigned long size, | 23 | int (*get_free_region)(unsigned long base, unsigned long size, |
25 | int replace_reg); | 24 | int replace_reg); |
26 | int (*validate_add_page)(unsigned long base, unsigned long size, | 25 | int (*validate_add_page)(unsigned long base, unsigned long size, |
@@ -39,11 +38,11 @@ extern int positive_have_wrcomb(void); | |||
39 | 38 | ||
40 | /* library functions for processor-specific routines */ | 39 | /* library functions for processor-specific routines */ |
41 | struct set_mtrr_context { | 40 | struct set_mtrr_context { |
42 | unsigned long flags; | 41 | unsigned long flags; |
43 | unsigned long cr4val; | 42 | unsigned long cr4val; |
44 | u32 deftype_lo; | 43 | u32 deftype_lo; |
45 | u32 deftype_hi; | 44 | u32 deftype_hi; |
46 | u32 ccr3; | 45 | u32 ccr3; |
47 | }; | 46 | }; |
48 | 47 | ||
49 | void set_mtrr_done(struct set_mtrr_context *ctxt); | 48 | void set_mtrr_done(struct set_mtrr_context *ctxt); |
@@ -54,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index, | |||
54 | u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi); | 53 | u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi); |
55 | void get_mtrr_state(void); | 54 | void get_mtrr_state(void); |
56 | 55 | ||
57 | extern void set_mtrr_ops(struct mtrr_ops * ops); | 56 | extern void set_mtrr_ops(struct mtrr_ops *ops); |
58 | 57 | ||
59 | extern u64 size_or_mask, size_and_mask; | 58 | extern u64 size_or_mask, size_and_mask; |
60 | extern struct mtrr_ops * mtrr_if; | 59 | extern struct mtrr_ops *mtrr_if; |
61 | 60 | ||
62 | #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd) | 61 | #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd) |
63 | #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1) | 62 | #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1) |
diff --git a/arch/x86/kernel/cpu/mtrr/state.c b/arch/x86/kernel/cpu/mtrr/state.c index 1f5fb1588d1f..dfc80b4e6b0d 100644 --- a/arch/x86/kernel/cpu/mtrr/state.c +++ b/arch/x86/kernel/cpu/mtrr/state.c | |||
@@ -1,24 +1,25 @@ | |||
1 | #include <linux/mm.h> | ||
2 | #include <linux/init.h> | 1 | #include <linux/init.h> |
3 | #include <asm/io.h> | 2 | #include <linux/io.h> |
4 | #include <asm/mtrr.h> | 3 | #include <linux/mm.h> |
5 | #include <asm/msr.h> | 4 | |
6 | #include <asm/processor-cyrix.h> | 5 | #include <asm/processor-cyrix.h> |
7 | #include <asm/processor-flags.h> | 6 | #include <asm/processor-flags.h> |
8 | #include "mtrr.h" | 7 | #include <asm/mtrr.h> |
8 | #include <asm/msr.h> | ||
9 | 9 | ||
10 | #include "mtrr.h" | ||
10 | 11 | ||
11 | /* Put the processor into a state where MTRRs can be safely set */ | 12 | /* Put the processor into a state where MTRRs can be safely set */ |
12 | void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) | 13 | void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) |
13 | { | 14 | { |
14 | unsigned int cr0; | 15 | unsigned int cr0; |
15 | 16 | ||
16 | /* Disable interrupts locally */ | 17 | /* Disable interrupts locally */ |
17 | local_irq_save(ctxt->flags); | 18 | local_irq_save(ctxt->flags); |
18 | 19 | ||
19 | if (use_intel() || is_cpu(CYRIX)) { | 20 | if (use_intel() || is_cpu(CYRIX)) { |
20 | 21 | ||
21 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ | 22 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ |
22 | if (cpu_has_pge) { | 23 | if (cpu_has_pge) { |
23 | ctxt->cr4val = read_cr4(); | 24 | ctxt->cr4val = read_cr4(); |
24 | write_cr4(ctxt->cr4val & ~X86_CR4_PGE); | 25 | write_cr4(ctxt->cr4val & ~X86_CR4_PGE); |
@@ -33,50 +34,61 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) | |||
33 | write_cr0(cr0); | 34 | write_cr0(cr0); |
34 | wbinvd(); | 35 | wbinvd(); |
35 | 36 | ||
36 | if (use_intel()) | 37 | if (use_intel()) { |
37 | /* Save MTRR state */ | 38 | /* Save MTRR state */ |
38 | rdmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi); | 39 | rdmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi); |
39 | else | 40 | } else { |
40 | /* Cyrix ARRs - everything else were excluded at the top */ | 41 | /* |
42 | * Cyrix ARRs - | ||
43 | * everything else were excluded at the top | ||
44 | */ | ||
41 | ctxt->ccr3 = getCx86(CX86_CCR3); | 45 | ctxt->ccr3 = getCx86(CX86_CCR3); |
46 | } | ||
42 | } | 47 | } |
43 | } | 48 | } |
44 | 49 | ||
45 | void set_mtrr_cache_disable(struct set_mtrr_context *ctxt) | 50 | void set_mtrr_cache_disable(struct set_mtrr_context *ctxt) |
46 | { | 51 | { |
47 | if (use_intel()) | 52 | if (use_intel()) { |
48 | /* Disable MTRRs, and set the default type to uncached */ | 53 | /* Disable MTRRs, and set the default type to uncached */ |
49 | mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo & 0xf300UL, | 54 | mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo & 0xf300UL, |
50 | ctxt->deftype_hi); | 55 | ctxt->deftype_hi); |
51 | else if (is_cpu(CYRIX)) | 56 | } else { |
52 | /* Cyrix ARRs - everything else were excluded at the top */ | 57 | if (is_cpu(CYRIX)) { |
53 | setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10); | 58 | /* Cyrix ARRs - everything else were excluded at the top */ |
59 | setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10); | ||
60 | } | ||
61 | } | ||
54 | } | 62 | } |
55 | 63 | ||
56 | /* Restore the processor after a set_mtrr_prepare */ | 64 | /* Restore the processor after a set_mtrr_prepare */ |
57 | void set_mtrr_done(struct set_mtrr_context *ctxt) | 65 | void set_mtrr_done(struct set_mtrr_context *ctxt) |
58 | { | 66 | { |
59 | if (use_intel() || is_cpu(CYRIX)) { | 67 | if (use_intel() || is_cpu(CYRIX)) { |
60 | 68 | ||
61 | /* Flush caches and TLBs */ | 69 | /* Flush caches and TLBs */ |
62 | wbinvd(); | 70 | wbinvd(); |
63 | 71 | ||
64 | /* Restore MTRRdefType */ | 72 | /* Restore MTRRdefType */ |
65 | if (use_intel()) | 73 | if (use_intel()) { |
66 | /* Intel (P6) standard MTRRs */ | 74 | /* Intel (P6) standard MTRRs */ |
67 | mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi); | 75 | mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo, |
68 | else | 76 | ctxt->deftype_hi); |
69 | /* Cyrix ARRs - everything else was excluded at the top */ | 77 | } else { |
78 | /* | ||
79 | * Cyrix ARRs - | ||
80 | * everything else was excluded at the top | ||
81 | */ | ||
70 | setCx86(CX86_CCR3, ctxt->ccr3); | 82 | setCx86(CX86_CCR3, ctxt->ccr3); |
83 | } | ||
71 | 84 | ||
72 | /* Enable caches */ | 85 | /* Enable caches */ |
73 | write_cr0(read_cr0() & 0xbfffffff); | 86 | write_cr0(read_cr0() & 0xbfffffff); |
74 | 87 | ||
75 | /* Restore value of CR4 */ | 88 | /* Restore value of CR4 */ |
76 | if (cpu_has_pge) | 89 | if (cpu_has_pge) |
77 | write_cr4(ctxt->cr4val); | 90 | write_cr4(ctxt->cr4val); |
78 | } | 91 | } |
79 | /* Re-enable interrupts locally (if enabled previously) */ | 92 | /* Re-enable interrupts locally (if enabled previously) */ |
80 | local_irq_restore(ctxt->flags); | 93 | local_irq_restore(ctxt->flags); |
81 | } | 94 | } |
82 | |||
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 900332b800f8..f9cd0849bd42 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * Copyright (C) 2009 Jaswinder Singh Rajput | 6 | * Copyright (C) 2009 Jaswinder Singh Rajput |
7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | 7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter |
8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> | ||
9 | * | 10 | * |
10 | * For licencing details see kernel-base/COPYING | 11 | * For licencing details see kernel-base/COPYING |
11 | */ | 12 | */ |
@@ -20,6 +21,7 @@ | |||
20 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
21 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
22 | #include <linux/highmem.h> | 23 | #include <linux/highmem.h> |
24 | #include <linux/cpu.h> | ||
23 | 25 | ||
24 | #include <asm/apic.h> | 26 | #include <asm/apic.h> |
25 | #include <asm/stacktrace.h> | 27 | #include <asm/stacktrace.h> |
@@ -27,12 +29,52 @@ | |||
27 | 29 | ||
28 | static u64 perf_counter_mask __read_mostly; | 30 | static u64 perf_counter_mask __read_mostly; |
29 | 31 | ||
32 | /* The maximal number of PEBS counters: */ | ||
33 | #define MAX_PEBS_COUNTERS 4 | ||
34 | |||
35 | /* The size of a BTS record in bytes: */ | ||
36 | #define BTS_RECORD_SIZE 24 | ||
37 | |||
38 | /* The size of a per-cpu BTS buffer in bytes: */ | ||
39 | #define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 1024) | ||
40 | |||
41 | /* The BTS overflow threshold in bytes from the end of the buffer: */ | ||
42 | #define BTS_OVFL_TH (BTS_RECORD_SIZE * 64) | ||
43 | |||
44 | |||
45 | /* | ||
46 | * Bits in the debugctlmsr controlling branch tracing. | ||
47 | */ | ||
48 | #define X86_DEBUGCTL_TR (1 << 6) | ||
49 | #define X86_DEBUGCTL_BTS (1 << 7) | ||
50 | #define X86_DEBUGCTL_BTINT (1 << 8) | ||
51 | #define X86_DEBUGCTL_BTS_OFF_OS (1 << 9) | ||
52 | #define X86_DEBUGCTL_BTS_OFF_USR (1 << 10) | ||
53 | |||
54 | /* | ||
55 | * A debug store configuration. | ||
56 | * | ||
57 | * We only support architectures that use 64bit fields. | ||
58 | */ | ||
59 | struct debug_store { | ||
60 | u64 bts_buffer_base; | ||
61 | u64 bts_index; | ||
62 | u64 bts_absolute_maximum; | ||
63 | u64 bts_interrupt_threshold; | ||
64 | u64 pebs_buffer_base; | ||
65 | u64 pebs_index; | ||
66 | u64 pebs_absolute_maximum; | ||
67 | u64 pebs_interrupt_threshold; | ||
68 | u64 pebs_counter_reset[MAX_PEBS_COUNTERS]; | ||
69 | }; | ||
70 | |||
30 | struct cpu_hw_counters { | 71 | struct cpu_hw_counters { |
31 | struct perf_counter *counters[X86_PMC_IDX_MAX]; | 72 | struct perf_counter *counters[X86_PMC_IDX_MAX]; |
32 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 73 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
33 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 74 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
34 | unsigned long interrupts; | 75 | unsigned long interrupts; |
35 | int enabled; | 76 | int enabled; |
77 | struct debug_store *ds; | ||
36 | }; | 78 | }; |
37 | 79 | ||
38 | /* | 80 | /* |
@@ -58,6 +100,8 @@ struct x86_pmu { | |||
58 | int apic; | 100 | int apic; |
59 | u64 max_period; | 101 | u64 max_period; |
60 | u64 intel_ctrl; | 102 | u64 intel_ctrl; |
103 | void (*enable_bts)(u64 config); | ||
104 | void (*disable_bts)(void); | ||
61 | }; | 105 | }; |
62 | 106 | ||
63 | static struct x86_pmu x86_pmu __read_mostly; | 107 | static struct x86_pmu x86_pmu __read_mostly; |
@@ -577,6 +621,9 @@ x86_perf_counter_update(struct perf_counter *counter, | |||
577 | u64 prev_raw_count, new_raw_count; | 621 | u64 prev_raw_count, new_raw_count; |
578 | s64 delta; | 622 | s64 delta; |
579 | 623 | ||
624 | if (idx == X86_PMC_IDX_FIXED_BTS) | ||
625 | return 0; | ||
626 | |||
580 | /* | 627 | /* |
581 | * Careful: an NMI might modify the previous counter value. | 628 | * Careful: an NMI might modify the previous counter value. |
582 | * | 629 | * |
@@ -666,10 +713,110 @@ static void release_pmc_hardware(void) | |||
666 | #endif | 713 | #endif |
667 | } | 714 | } |
668 | 715 | ||
716 | static inline bool bts_available(void) | ||
717 | { | ||
718 | return x86_pmu.enable_bts != NULL; | ||
719 | } | ||
720 | |||
721 | static inline void init_debug_store_on_cpu(int cpu) | ||
722 | { | ||
723 | struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; | ||
724 | |||
725 | if (!ds) | ||
726 | return; | ||
727 | |||
728 | wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, | ||
729 | (u32)((u64)(unsigned long)ds), | ||
730 | (u32)((u64)(unsigned long)ds >> 32)); | ||
731 | } | ||
732 | |||
733 | static inline void fini_debug_store_on_cpu(int cpu) | ||
734 | { | ||
735 | if (!per_cpu(cpu_hw_counters, cpu).ds) | ||
736 | return; | ||
737 | |||
738 | wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); | ||
739 | } | ||
740 | |||
741 | static void release_bts_hardware(void) | ||
742 | { | ||
743 | int cpu; | ||
744 | |||
745 | if (!bts_available()) | ||
746 | return; | ||
747 | |||
748 | get_online_cpus(); | ||
749 | |||
750 | for_each_online_cpu(cpu) | ||
751 | fini_debug_store_on_cpu(cpu); | ||
752 | |||
753 | for_each_possible_cpu(cpu) { | ||
754 | struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; | ||
755 | |||
756 | if (!ds) | ||
757 | continue; | ||
758 | |||
759 | per_cpu(cpu_hw_counters, cpu).ds = NULL; | ||
760 | |||
761 | kfree((void *)(unsigned long)ds->bts_buffer_base); | ||
762 | kfree(ds); | ||
763 | } | ||
764 | |||
765 | put_online_cpus(); | ||
766 | } | ||
767 | |||
768 | static int reserve_bts_hardware(void) | ||
769 | { | ||
770 | int cpu, err = 0; | ||
771 | |||
772 | if (!bts_available()) | ||
773 | return 0; | ||
774 | |||
775 | get_online_cpus(); | ||
776 | |||
777 | for_each_possible_cpu(cpu) { | ||
778 | struct debug_store *ds; | ||
779 | void *buffer; | ||
780 | |||
781 | err = -ENOMEM; | ||
782 | buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL); | ||
783 | if (unlikely(!buffer)) | ||
784 | break; | ||
785 | |||
786 | ds = kzalloc(sizeof(*ds), GFP_KERNEL); | ||
787 | if (unlikely(!ds)) { | ||
788 | kfree(buffer); | ||
789 | break; | ||
790 | } | ||
791 | |||
792 | ds->bts_buffer_base = (u64)(unsigned long)buffer; | ||
793 | ds->bts_index = ds->bts_buffer_base; | ||
794 | ds->bts_absolute_maximum = | ||
795 | ds->bts_buffer_base + BTS_BUFFER_SIZE; | ||
796 | ds->bts_interrupt_threshold = | ||
797 | ds->bts_absolute_maximum - BTS_OVFL_TH; | ||
798 | |||
799 | per_cpu(cpu_hw_counters, cpu).ds = ds; | ||
800 | err = 0; | ||
801 | } | ||
802 | |||
803 | if (err) | ||
804 | release_bts_hardware(); | ||
805 | else { | ||
806 | for_each_online_cpu(cpu) | ||
807 | init_debug_store_on_cpu(cpu); | ||
808 | } | ||
809 | |||
810 | put_online_cpus(); | ||
811 | |||
812 | return err; | ||
813 | } | ||
814 | |||
669 | static void hw_perf_counter_destroy(struct perf_counter *counter) | 815 | static void hw_perf_counter_destroy(struct perf_counter *counter) |
670 | { | 816 | { |
671 | if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { | 817 | if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { |
672 | release_pmc_hardware(); | 818 | release_pmc_hardware(); |
819 | release_bts_hardware(); | ||
673 | mutex_unlock(&pmc_reserve_mutex); | 820 | mutex_unlock(&pmc_reserve_mutex); |
674 | } | 821 | } |
675 | } | 822 | } |
@@ -712,6 +859,42 @@ set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr) | |||
712 | return 0; | 859 | return 0; |
713 | } | 860 | } |
714 | 861 | ||
862 | static void intel_pmu_enable_bts(u64 config) | ||
863 | { | ||
864 | unsigned long debugctlmsr; | ||
865 | |||
866 | debugctlmsr = get_debugctlmsr(); | ||
867 | |||
868 | debugctlmsr |= X86_DEBUGCTL_TR; | ||
869 | debugctlmsr |= X86_DEBUGCTL_BTS; | ||
870 | debugctlmsr |= X86_DEBUGCTL_BTINT; | ||
871 | |||
872 | if (!(config & ARCH_PERFMON_EVENTSEL_OS)) | ||
873 | debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS; | ||
874 | |||
875 | if (!(config & ARCH_PERFMON_EVENTSEL_USR)) | ||
876 | debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR; | ||
877 | |||
878 | update_debugctlmsr(debugctlmsr); | ||
879 | } | ||
880 | |||
881 | static void intel_pmu_disable_bts(void) | ||
882 | { | ||
883 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
884 | unsigned long debugctlmsr; | ||
885 | |||
886 | if (!cpuc->ds) | ||
887 | return; | ||
888 | |||
889 | debugctlmsr = get_debugctlmsr(); | ||
890 | |||
891 | debugctlmsr &= | ||
892 | ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT | | ||
893 | X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR); | ||
894 | |||
895 | update_debugctlmsr(debugctlmsr); | ||
896 | } | ||
897 | |||
715 | /* | 898 | /* |
716 | * Setup the hardware configuration for a given attr_type | 899 | * Setup the hardware configuration for a given attr_type |
717 | */ | 900 | */ |
@@ -728,9 +911,13 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
728 | err = 0; | 911 | err = 0; |
729 | if (!atomic_inc_not_zero(&active_counters)) { | 912 | if (!atomic_inc_not_zero(&active_counters)) { |
730 | mutex_lock(&pmc_reserve_mutex); | 913 | mutex_lock(&pmc_reserve_mutex); |
731 | if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware()) | 914 | if (atomic_read(&active_counters) == 0) { |
732 | err = -EBUSY; | 915 | if (!reserve_pmc_hardware()) |
733 | else | 916 | err = -EBUSY; |
917 | else | ||
918 | err = reserve_bts_hardware(); | ||
919 | } | ||
920 | if (!err) | ||
734 | atomic_inc(&active_counters); | 921 | atomic_inc(&active_counters); |
735 | mutex_unlock(&pmc_reserve_mutex); | 922 | mutex_unlock(&pmc_reserve_mutex); |
736 | } | 923 | } |
@@ -793,6 +980,20 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
793 | if (config == -1LL) | 980 | if (config == -1LL) |
794 | return -EINVAL; | 981 | return -EINVAL; |
795 | 982 | ||
983 | /* | ||
984 | * Branch tracing: | ||
985 | */ | ||
986 | if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && | ||
987 | (hwc->sample_period == 1)) { | ||
988 | /* BTS is not supported by this architecture. */ | ||
989 | if (!bts_available()) | ||
990 | return -EOPNOTSUPP; | ||
991 | |||
992 | /* BTS is currently only allowed for user-mode. */ | ||
993 | if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) | ||
994 | return -EOPNOTSUPP; | ||
995 | } | ||
996 | |||
796 | hwc->config |= config; | 997 | hwc->config |= config; |
797 | 998 | ||
798 | return 0; | 999 | return 0; |
@@ -817,7 +1018,18 @@ static void p6_pmu_disable_all(void) | |||
817 | 1018 | ||
818 | static void intel_pmu_disable_all(void) | 1019 | static void intel_pmu_disable_all(void) |
819 | { | 1020 | { |
1021 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
1022 | |||
1023 | if (!cpuc->enabled) | ||
1024 | return; | ||
1025 | |||
1026 | cpuc->enabled = 0; | ||
1027 | barrier(); | ||
1028 | |||
820 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | 1029 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); |
1030 | |||
1031 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) | ||
1032 | intel_pmu_disable_bts(); | ||
821 | } | 1033 | } |
822 | 1034 | ||
823 | static void amd_pmu_disable_all(void) | 1035 | static void amd_pmu_disable_all(void) |
@@ -875,7 +1087,25 @@ static void p6_pmu_enable_all(void) | |||
875 | 1087 | ||
876 | static void intel_pmu_enable_all(void) | 1088 | static void intel_pmu_enable_all(void) |
877 | { | 1089 | { |
1090 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
1091 | |||
1092 | if (cpuc->enabled) | ||
1093 | return; | ||
1094 | |||
1095 | cpuc->enabled = 1; | ||
1096 | barrier(); | ||
1097 | |||
878 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); | 1098 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); |
1099 | |||
1100 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { | ||
1101 | struct perf_counter *counter = | ||
1102 | cpuc->counters[X86_PMC_IDX_FIXED_BTS]; | ||
1103 | |||
1104 | if (WARN_ON_ONCE(!counter)) | ||
1105 | return; | ||
1106 | |||
1107 | intel_pmu_enable_bts(counter->hw.config); | ||
1108 | } | ||
879 | } | 1109 | } |
880 | 1110 | ||
881 | static void amd_pmu_enable_all(void) | 1111 | static void amd_pmu_enable_all(void) |
@@ -962,6 +1192,11 @@ p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | |||
962 | static inline void | 1192 | static inline void |
963 | intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | 1193 | intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) |
964 | { | 1194 | { |
1195 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | ||
1196 | intel_pmu_disable_bts(); | ||
1197 | return; | ||
1198 | } | ||
1199 | |||
965 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 1200 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
966 | intel_pmu_disable_fixed(hwc, idx); | 1201 | intel_pmu_disable_fixed(hwc, idx); |
967 | return; | 1202 | return; |
@@ -990,6 +1225,9 @@ x86_perf_counter_set_period(struct perf_counter *counter, | |||
990 | s64 period = hwc->sample_period; | 1225 | s64 period = hwc->sample_period; |
991 | int err, ret = 0; | 1226 | int err, ret = 0; |
992 | 1227 | ||
1228 | if (idx == X86_PMC_IDX_FIXED_BTS) | ||
1229 | return 0; | ||
1230 | |||
993 | /* | 1231 | /* |
994 | * If we are way outside a reasoable range then just skip forward: | 1232 | * If we are way outside a reasoable range then just skip forward: |
995 | */ | 1233 | */ |
@@ -1072,6 +1310,14 @@ static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | |||
1072 | 1310 | ||
1073 | static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | 1311 | static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) |
1074 | { | 1312 | { |
1313 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | ||
1314 | if (!__get_cpu_var(cpu_hw_counters).enabled) | ||
1315 | return; | ||
1316 | |||
1317 | intel_pmu_enable_bts(hwc->config); | ||
1318 | return; | ||
1319 | } | ||
1320 | |||
1075 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 1321 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
1076 | intel_pmu_enable_fixed(hwc, idx); | 1322 | intel_pmu_enable_fixed(hwc, idx); |
1077 | return; | 1323 | return; |
@@ -1093,11 +1339,16 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) | |||
1093 | { | 1339 | { |
1094 | unsigned int event; | 1340 | unsigned int event; |
1095 | 1341 | ||
1342 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; | ||
1343 | |||
1344 | if (unlikely((event == | ||
1345 | x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && | ||
1346 | (hwc->sample_period == 1))) | ||
1347 | return X86_PMC_IDX_FIXED_BTS; | ||
1348 | |||
1096 | if (!x86_pmu.num_counters_fixed) | 1349 | if (!x86_pmu.num_counters_fixed) |
1097 | return -1; | 1350 | return -1; |
1098 | 1351 | ||
1099 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; | ||
1100 | |||
1101 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) | 1352 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) |
1102 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; | 1353 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; |
1103 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) | 1354 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) |
@@ -1118,7 +1369,15 @@ static int x86_pmu_enable(struct perf_counter *counter) | |||
1118 | int idx; | 1369 | int idx; |
1119 | 1370 | ||
1120 | idx = fixed_mode_idx(counter, hwc); | 1371 | idx = fixed_mode_idx(counter, hwc); |
1121 | if (idx >= 0) { | 1372 | if (idx == X86_PMC_IDX_FIXED_BTS) { |
1373 | /* BTS is already occupied. */ | ||
1374 | if (test_and_set_bit(idx, cpuc->used_mask)) | ||
1375 | return -EAGAIN; | ||
1376 | |||
1377 | hwc->config_base = 0; | ||
1378 | hwc->counter_base = 0; | ||
1379 | hwc->idx = idx; | ||
1380 | } else if (idx >= 0) { | ||
1122 | /* | 1381 | /* |
1123 | * Try to get the fixed counter, if that is already taken | 1382 | * Try to get the fixed counter, if that is already taken |
1124 | * then try to get a generic counter: | 1383 | * then try to get a generic counter: |
@@ -1229,6 +1488,44 @@ void perf_counter_print_debug(void) | |||
1229 | local_irq_restore(flags); | 1488 | local_irq_restore(flags); |
1230 | } | 1489 | } |
1231 | 1490 | ||
1491 | static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc, | ||
1492 | struct perf_sample_data *data) | ||
1493 | { | ||
1494 | struct debug_store *ds = cpuc->ds; | ||
1495 | struct bts_record { | ||
1496 | u64 from; | ||
1497 | u64 to; | ||
1498 | u64 flags; | ||
1499 | }; | ||
1500 | struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS]; | ||
1501 | unsigned long orig_ip = data->regs->ip; | ||
1502 | struct bts_record *at, *top; | ||
1503 | |||
1504 | if (!counter) | ||
1505 | return; | ||
1506 | |||
1507 | if (!ds) | ||
1508 | return; | ||
1509 | |||
1510 | at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; | ||
1511 | top = (struct bts_record *)(unsigned long)ds->bts_index; | ||
1512 | |||
1513 | ds->bts_index = ds->bts_buffer_base; | ||
1514 | |||
1515 | for (; at < top; at++) { | ||
1516 | data->regs->ip = at->from; | ||
1517 | data->addr = at->to; | ||
1518 | |||
1519 | perf_counter_output(counter, 1, data); | ||
1520 | } | ||
1521 | |||
1522 | data->regs->ip = orig_ip; | ||
1523 | data->addr = 0; | ||
1524 | |||
1525 | /* There's new data available. */ | ||
1526 | counter->pending_kill = POLL_IN; | ||
1527 | } | ||
1528 | |||
1232 | static void x86_pmu_disable(struct perf_counter *counter) | 1529 | static void x86_pmu_disable(struct perf_counter *counter) |
1233 | { | 1530 | { |
1234 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 1531 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
@@ -1253,6 +1550,15 @@ static void x86_pmu_disable(struct perf_counter *counter) | |||
1253 | * that we are disabling: | 1550 | * that we are disabling: |
1254 | */ | 1551 | */ |
1255 | x86_perf_counter_update(counter, hwc, idx); | 1552 | x86_perf_counter_update(counter, hwc, idx); |
1553 | |||
1554 | /* Drain the remaining BTS records. */ | ||
1555 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | ||
1556 | struct perf_sample_data data; | ||
1557 | struct pt_regs regs; | ||
1558 | |||
1559 | data.regs = ®s; | ||
1560 | intel_pmu_drain_bts_buffer(cpuc, &data); | ||
1561 | } | ||
1256 | cpuc->counters[idx] = NULL; | 1562 | cpuc->counters[idx] = NULL; |
1257 | clear_bit(idx, cpuc->used_mask); | 1563 | clear_bit(idx, cpuc->used_mask); |
1258 | 1564 | ||
@@ -1280,6 +1586,7 @@ static int intel_pmu_save_and_restart(struct perf_counter *counter) | |||
1280 | 1586 | ||
1281 | static void intel_pmu_reset(void) | 1587 | static void intel_pmu_reset(void) |
1282 | { | 1588 | { |
1589 | struct debug_store *ds = __get_cpu_var(cpu_hw_counters).ds; | ||
1283 | unsigned long flags; | 1590 | unsigned long flags; |
1284 | int idx; | 1591 | int idx; |
1285 | 1592 | ||
@@ -1297,6 +1604,8 @@ static void intel_pmu_reset(void) | |||
1297 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { | 1604 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { |
1298 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); | 1605 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); |
1299 | } | 1606 | } |
1607 | if (ds) | ||
1608 | ds->bts_index = ds->bts_buffer_base; | ||
1300 | 1609 | ||
1301 | local_irq_restore(flags); | 1610 | local_irq_restore(flags); |
1302 | } | 1611 | } |
@@ -1362,6 +1671,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
1362 | cpuc = &__get_cpu_var(cpu_hw_counters); | 1671 | cpuc = &__get_cpu_var(cpu_hw_counters); |
1363 | 1672 | ||
1364 | perf_disable(); | 1673 | perf_disable(); |
1674 | intel_pmu_drain_bts_buffer(cpuc, &data); | ||
1365 | status = intel_pmu_get_status(); | 1675 | status = intel_pmu_get_status(); |
1366 | if (!status) { | 1676 | if (!status) { |
1367 | perf_enable(); | 1677 | perf_enable(); |
@@ -1571,6 +1881,8 @@ static struct x86_pmu intel_pmu = { | |||
1571 | * the generic counter period: | 1881 | * the generic counter period: |
1572 | */ | 1882 | */ |
1573 | .max_period = (1ULL << 31) - 1, | 1883 | .max_period = (1ULL << 31) - 1, |
1884 | .enable_bts = intel_pmu_enable_bts, | ||
1885 | .disable_bts = intel_pmu_disable_bts, | ||
1574 | }; | 1886 | }; |
1575 | 1887 | ||
1576 | static struct x86_pmu amd_pmu = { | 1888 | static struct x86_pmu amd_pmu = { |
@@ -1962,3 +2274,8 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
1962 | 2274 | ||
1963 | return entry; | 2275 | return entry; |
1964 | } | 2276 | } |
2277 | |||
2278 | void hw_perf_counter_setup_online(int cpu) | ||
2279 | { | ||
2280 | init_debug_store_on_cpu(cpu); | ||
2281 | } | ||
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index e60ed740d2b3..392bea43b890 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -68,16 +68,16 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) | |||
68 | /* returns the bit offset of the performance counter register */ | 68 | /* returns the bit offset of the performance counter register */ |
69 | switch (boot_cpu_data.x86_vendor) { | 69 | switch (boot_cpu_data.x86_vendor) { |
70 | case X86_VENDOR_AMD: | 70 | case X86_VENDOR_AMD: |
71 | return (msr - MSR_K7_PERFCTR0); | 71 | return msr - MSR_K7_PERFCTR0; |
72 | case X86_VENDOR_INTEL: | 72 | case X86_VENDOR_INTEL: |
73 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 73 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
74 | return (msr - MSR_ARCH_PERFMON_PERFCTR0); | 74 | return msr - MSR_ARCH_PERFMON_PERFCTR0; |
75 | 75 | ||
76 | switch (boot_cpu_data.x86) { | 76 | switch (boot_cpu_data.x86) { |
77 | case 6: | 77 | case 6: |
78 | return (msr - MSR_P6_PERFCTR0); | 78 | return msr - MSR_P6_PERFCTR0; |
79 | case 15: | 79 | case 15: |
80 | return (msr - MSR_P4_BPU_PERFCTR0); | 80 | return msr - MSR_P4_BPU_PERFCTR0; |
81 | } | 81 | } |
82 | } | 82 | } |
83 | return 0; | 83 | return 0; |
@@ -92,16 +92,16 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) | |||
92 | /* returns the bit offset of the event selection register */ | 92 | /* returns the bit offset of the event selection register */ |
93 | switch (boot_cpu_data.x86_vendor) { | 93 | switch (boot_cpu_data.x86_vendor) { |
94 | case X86_VENDOR_AMD: | 94 | case X86_VENDOR_AMD: |
95 | return (msr - MSR_K7_EVNTSEL0); | 95 | return msr - MSR_K7_EVNTSEL0; |
96 | case X86_VENDOR_INTEL: | 96 | case X86_VENDOR_INTEL: |
97 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 97 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
98 | return (msr - MSR_ARCH_PERFMON_EVENTSEL0); | 98 | return msr - MSR_ARCH_PERFMON_EVENTSEL0; |
99 | 99 | ||
100 | switch (boot_cpu_data.x86) { | 100 | switch (boot_cpu_data.x86) { |
101 | case 6: | 101 | case 6: |
102 | return (msr - MSR_P6_EVNTSEL0); | 102 | return msr - MSR_P6_EVNTSEL0; |
103 | case 15: | 103 | case 15: |
104 | return (msr - MSR_P4_BSU_ESCR0); | 104 | return msr - MSR_P4_BSU_ESCR0; |
105 | } | 105 | } |
106 | } | 106 | } |
107 | return 0; | 107 | return 0; |
@@ -113,7 +113,7 @@ int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) | |||
113 | { | 113 | { |
114 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 114 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
115 | 115 | ||
116 | return (!test_bit(counter, perfctr_nmi_owner)); | 116 | return !test_bit(counter, perfctr_nmi_owner); |
117 | } | 117 | } |
118 | 118 | ||
119 | /* checks the an msr for availability */ | 119 | /* checks the an msr for availability */ |
@@ -124,7 +124,7 @@ int avail_to_resrv_perfctr_nmi(unsigned int msr) | |||
124 | counter = nmi_perfctr_msr_to_bit(msr); | 124 | counter = nmi_perfctr_msr_to_bit(msr); |
125 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 125 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
126 | 126 | ||
127 | return (!test_bit(counter, perfctr_nmi_owner)); | 127 | return !test_bit(counter, perfctr_nmi_owner); |
128 | } | 128 | } |
129 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); | 129 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); |
130 | 130 | ||
@@ -237,7 +237,7 @@ static unsigned int adjust_for_32bit_ctr(unsigned int hz) | |||
237 | */ | 237 | */ |
238 | counter_val = (u64)cpu_khz * 1000; | 238 | counter_val = (u64)cpu_khz * 1000; |
239 | do_div(counter_val, retval); | 239 | do_div(counter_val, retval); |
240 | if (counter_val > 0x7fffffffULL) { | 240 | if (counter_val > 0x7fffffffULL) { |
241 | u64 count = (u64)cpu_khz * 1000; | 241 | u64 count = (u64)cpu_khz * 1000; |
242 | do_div(count, 0x7fffffffUL); | 242 | do_div(count, 0x7fffffffUL); |
243 | retval = count + 1; | 243 | retval = count + 1; |
@@ -251,7 +251,7 @@ static void write_watchdog_counter(unsigned int perfctr_msr, | |||
251 | u64 count = (u64)cpu_khz * 1000; | 251 | u64 count = (u64)cpu_khz * 1000; |
252 | 252 | ||
253 | do_div(count, nmi_hz); | 253 | do_div(count, nmi_hz); |
254 | if(descr) | 254 | if (descr) |
255 | pr_debug("setting %s to -0x%08Lx\n", descr, count); | 255 | pr_debug("setting %s to -0x%08Lx\n", descr, count); |
256 | wrmsrl(perfctr_msr, 0 - count); | 256 | wrmsrl(perfctr_msr, 0 - count); |
257 | } | 257 | } |
@@ -262,7 +262,7 @@ static void write_watchdog_counter32(unsigned int perfctr_msr, | |||
262 | u64 count = (u64)cpu_khz * 1000; | 262 | u64 count = (u64)cpu_khz * 1000; |
263 | 263 | ||
264 | do_div(count, nmi_hz); | 264 | do_div(count, nmi_hz); |
265 | if(descr) | 265 | if (descr) |
266 | pr_debug("setting %s to -0x%08Lx\n", descr, count); | 266 | pr_debug("setting %s to -0x%08Lx\n", descr, count); |
267 | wrmsr(perfctr_msr, (u32)(-count), 0); | 267 | wrmsr(perfctr_msr, (u32)(-count), 0); |
268 | } | 268 | } |
@@ -296,7 +296,7 @@ static int setup_k7_watchdog(unsigned nmi_hz) | |||
296 | 296 | ||
297 | /* setup the timer */ | 297 | /* setup the timer */ |
298 | wrmsr(evntsel_msr, evntsel, 0); | 298 | wrmsr(evntsel_msr, evntsel, 0); |
299 | write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz); | 299 | write_watchdog_counter(perfctr_msr, "K7_PERFCTR0", nmi_hz); |
300 | 300 | ||
301 | /* initialize the wd struct before enabling */ | 301 | /* initialize the wd struct before enabling */ |
302 | wd->perfctr_msr = perfctr_msr; | 302 | wd->perfctr_msr = perfctr_msr; |
@@ -387,7 +387,7 @@ static int setup_p6_watchdog(unsigned nmi_hz) | |||
387 | /* setup the timer */ | 387 | /* setup the timer */ |
388 | wrmsr(evntsel_msr, evntsel, 0); | 388 | wrmsr(evntsel_msr, evntsel, 0); |
389 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | 389 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); |
390 | write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz); | 390 | write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0", nmi_hz); |
391 | 391 | ||
392 | /* initialize the wd struct before enabling */ | 392 | /* initialize the wd struct before enabling */ |
393 | wd->perfctr_msr = perfctr_msr; | 393 | wd->perfctr_msr = perfctr_msr; |
@@ -415,7 +415,7 @@ static void __kprobes p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | |||
415 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 415 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
416 | 416 | ||
417 | /* P6/ARCH_PERFMON has 32 bit counter write */ | 417 | /* P6/ARCH_PERFMON has 32 bit counter write */ |
418 | write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz); | 418 | write_watchdog_counter32(wd->perfctr_msr, NULL, nmi_hz); |
419 | } | 419 | } |
420 | 420 | ||
421 | static const struct wd_ops p6_wd_ops = { | 421 | static const struct wd_ops p6_wd_ops = { |
@@ -490,9 +490,9 @@ static int setup_p4_watchdog(unsigned nmi_hz) | |||
490 | if (smp_num_siblings == 2) { | 490 | if (smp_num_siblings == 2) { |
491 | unsigned int ebx, apicid; | 491 | unsigned int ebx, apicid; |
492 | 492 | ||
493 | ebx = cpuid_ebx(1); | 493 | ebx = cpuid_ebx(1); |
494 | apicid = (ebx >> 24) & 0xff; | 494 | apicid = (ebx >> 24) & 0xff; |
495 | ht_num = apicid & 1; | 495 | ht_num = apicid & 1; |
496 | } else | 496 | } else |
497 | #endif | 497 | #endif |
498 | ht_num = 0; | 498 | ht_num = 0; |
@@ -544,7 +544,7 @@ static int setup_p4_watchdog(unsigned nmi_hz) | |||
544 | } | 544 | } |
545 | 545 | ||
546 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) | 546 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) |
547 | | P4_ESCR_OS | 547 | | P4_ESCR_OS |
548 | | P4_ESCR_USR; | 548 | | P4_ESCR_USR; |
549 | 549 | ||
550 | cccr_val |= P4_CCCR_THRESHOLD(15) | 550 | cccr_val |= P4_CCCR_THRESHOLD(15) |
@@ -612,7 +612,7 @@ static void __kprobes p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | |||
612 | { | 612 | { |
613 | unsigned dummy; | 613 | unsigned dummy; |
614 | /* | 614 | /* |
615 | * P4 quirks: | 615 | * P4 quirks: |
616 | * - An overflown perfctr will assert its interrupt | 616 | * - An overflown perfctr will assert its interrupt |
617 | * until the OVF flag in its CCCR is cleared. | 617 | * until the OVF flag in its CCCR is cleared. |
618 | * - LVTPC is masked on interrupt and must be | 618 | * - LVTPC is masked on interrupt and must be |
@@ -662,7 +662,8 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz) | |||
662 | * NOTE: Corresponding bit = 0 in ebx indicates event present. | 662 | * NOTE: Corresponding bit = 0 in ebx indicates event present. |
663 | */ | 663 | */ |
664 | cpuid(10, &(eax.full), &ebx, &unused, &unused); | 664 | cpuid(10, &(eax.full), &ebx, &unused, &unused); |
665 | if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || | 665 | if ((eax.split.mask_length < |
666 | (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || | ||
666 | (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | 667 | (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) |
667 | return 0; | 668 | return 0; |
668 | 669 | ||
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index d5e30397246b..62ac8cb6ba27 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
@@ -116,11 +116,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
116 | seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); | 116 | seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); |
117 | #endif | 117 | #endif |
118 | seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size); | 118 | seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size); |
119 | #ifdef CONFIG_X86_64 | ||
120 | seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); | 119 | seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); |
121 | seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", | 120 | seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", |
122 | c->x86_phys_bits, c->x86_virt_bits); | 121 | c->x86_phys_bits, c->x86_virt_bits); |
123 | #endif | ||
124 | 122 | ||
125 | seq_printf(m, "power management:"); | 123 | seq_printf(m, "power management:"); |
126 | for (i = 0; i < 32; i++) { | 124 | for (i = 0; i < 32; i++) { |
@@ -128,7 +126,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
128 | if (i < ARRAY_SIZE(x86_power_flags) && | 126 | if (i < ARRAY_SIZE(x86_power_flags) && |
129 | x86_power_flags[i]) | 127 | x86_power_flags[i]) |
130 | seq_printf(m, "%s%s", | 128 | seq_printf(m, "%s%s", |
131 | x86_power_flags[i][0]?" ":"", | 129 | x86_power_flags[i][0] ? " " : "", |
132 | x86_power_flags[i]); | 130 | x86_power_flags[i]); |
133 | else | 131 | else |
134 | seq_printf(m, " [%d]", i); | 132 | seq_printf(m, " [%d]", i); |
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 284c399e3234..bc24f514ec93 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
@@ -49,17 +49,17 @@ static inline int __vmware_platform(void) | |||
49 | 49 | ||
50 | static unsigned long __vmware_get_tsc_khz(void) | 50 | static unsigned long __vmware_get_tsc_khz(void) |
51 | { | 51 | { |
52 | uint64_t tsc_hz; | 52 | uint64_t tsc_hz; |
53 | uint32_t eax, ebx, ecx, edx; | 53 | uint32_t eax, ebx, ecx, edx; |
54 | 54 | ||
55 | VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); | 55 | VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); |
56 | 56 | ||
57 | if (ebx == UINT_MAX) | 57 | if (ebx == UINT_MAX) |
58 | return 0; | 58 | return 0; |
59 | tsc_hz = eax | (((uint64_t)ebx) << 32); | 59 | tsc_hz = eax | (((uint64_t)ebx) << 32); |
60 | do_div(tsc_hz, 1000); | 60 | do_div(tsc_hz, 1000); |
61 | BUG_ON(tsc_hz >> 32); | 61 | BUG_ON(tsc_hz >> 32); |
62 | return tsc_hz; | 62 | return tsc_hz; |
63 | } | 63 | } |
64 | 64 | ||
65 | /* | 65 | /* |
diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c index b4f14c6c09d9..37250fe490b1 100644 --- a/arch/x86/kernel/doublefault_32.c +++ b/arch/x86/kernel/doublefault_32.c | |||
@@ -27,9 +27,7 @@ static void doublefault_fn(void) | |||
27 | 27 | ||
28 | if (ptr_ok(gdt)) { | 28 | if (ptr_ok(gdt)) { |
29 | gdt += GDT_ENTRY_TSS << 3; | 29 | gdt += GDT_ENTRY_TSS << 3; |
30 | tss = *(u16 *)(gdt+2); | 30 | tss = get_desc_base((struct desc_struct *)gdt); |
31 | tss += *(u8 *)(gdt+4) << 16; | ||
32 | tss += *(u8 *)(gdt+7) << 24; | ||
33 | printk(KERN_EMERG "double fault, tss at %08lx\n", tss); | 31 | printk(KERN_EMERG "double fault, tss at %08lx\n", tss); |
34 | 32 | ||
35 | if (ptr_ok(tss)) { | 33 | if (ptr_ok(tss)) { |
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index 48bfe1386038..ef42a038f1a6 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c | |||
@@ -509,15 +509,15 @@ enum bts_field { | |||
509 | bts_escape = ((unsigned long)-1 & ~bts_qual_mask) | 509 | bts_escape = ((unsigned long)-1 & ~bts_qual_mask) |
510 | }; | 510 | }; |
511 | 511 | ||
512 | static inline unsigned long bts_get(const char *base, enum bts_field field) | 512 | static inline unsigned long bts_get(const char *base, unsigned long field) |
513 | { | 513 | { |
514 | base += (ds_cfg.sizeof_ptr_field * field); | 514 | base += (ds_cfg.sizeof_ptr_field * field); |
515 | return *(unsigned long *)base; | 515 | return *(unsigned long *)base; |
516 | } | 516 | } |
517 | 517 | ||
518 | static inline void bts_set(char *base, enum bts_field field, unsigned long val) | 518 | static inline void bts_set(char *base, unsigned long field, unsigned long val) |
519 | { | 519 | { |
520 | base += (ds_cfg.sizeof_ptr_field * field);; | 520 | base += (ds_cfg.sizeof_ptr_field * field); |
521 | (*(unsigned long *)base) = val; | 521 | (*(unsigned long *)base) = val; |
522 | } | 522 | } |
523 | 523 | ||
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index c8405718a4c3..2d8a371d4339 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/bug.h> | 15 | #include <linux/bug.h> |
16 | #include <linux/nmi.h> | 16 | #include <linux/nmi.h> |
17 | #include <linux/sysfs.h> | 17 | #include <linux/sysfs.h> |
18 | #include <linux/ftrace.h> | ||
19 | 18 | ||
20 | #include <asm/stacktrace.h> | 19 | #include <asm/stacktrace.h> |
21 | 20 | ||
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 5cb5725b2bae..147005a1cc3c 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -115,7 +115,7 @@ static void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size, | |||
115 | { | 115 | { |
116 | int x = e820x->nr_map; | 116 | int x = e820x->nr_map; |
117 | 117 | ||
118 | if (x == ARRAY_SIZE(e820x->map)) { | 118 | if (x >= ARRAY_SIZE(e820x->map)) { |
119 | printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); | 119 | printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); |
120 | return; | 120 | return; |
121 | } | 121 | } |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index d94e1ea3b9fe..9dbb527e1652 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -417,10 +417,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
417 | unsigned long return_hooker = (unsigned long) | 417 | unsigned long return_hooker = (unsigned long) |
418 | &return_to_handler; | 418 | &return_to_handler; |
419 | 419 | ||
420 | /* Nmi's are currently unsupported */ | ||
421 | if (unlikely(in_nmi())) | ||
422 | return; | ||
423 | |||
424 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 420 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
425 | return; | 421 | return; |
426 | 422 | ||
@@ -498,37 +494,56 @@ static struct syscall_metadata *find_syscall_meta(unsigned long *syscall) | |||
498 | 494 | ||
499 | struct syscall_metadata *syscall_nr_to_meta(int nr) | 495 | struct syscall_metadata *syscall_nr_to_meta(int nr) |
500 | { | 496 | { |
501 | if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0) | 497 | if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) |
502 | return NULL; | 498 | return NULL; |
503 | 499 | ||
504 | return syscalls_metadata[nr]; | 500 | return syscalls_metadata[nr]; |
505 | } | 501 | } |
506 | 502 | ||
507 | void arch_init_ftrace_syscalls(void) | 503 | int syscall_name_to_nr(char *name) |
504 | { | ||
505 | int i; | ||
506 | |||
507 | if (!syscalls_metadata) | ||
508 | return -1; | ||
509 | |||
510 | for (i = 0; i < NR_syscalls; i++) { | ||
511 | if (syscalls_metadata[i]) { | ||
512 | if (!strcmp(syscalls_metadata[i]->name, name)) | ||
513 | return i; | ||
514 | } | ||
515 | } | ||
516 | return -1; | ||
517 | } | ||
518 | |||
519 | void set_syscall_enter_id(int num, int id) | ||
520 | { | ||
521 | syscalls_metadata[num]->enter_id = id; | ||
522 | } | ||
523 | |||
524 | void set_syscall_exit_id(int num, int id) | ||
525 | { | ||
526 | syscalls_metadata[num]->exit_id = id; | ||
527 | } | ||
528 | |||
529 | static int __init arch_init_ftrace_syscalls(void) | ||
508 | { | 530 | { |
509 | int i; | 531 | int i; |
510 | struct syscall_metadata *meta; | 532 | struct syscall_metadata *meta; |
511 | unsigned long **psys_syscall_table = &sys_call_table; | 533 | unsigned long **psys_syscall_table = &sys_call_table; |
512 | static atomic_t refs; | ||
513 | |||
514 | if (atomic_inc_return(&refs) != 1) | ||
515 | goto end; | ||
516 | 534 | ||
517 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * | 535 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * |
518 | FTRACE_SYSCALL_MAX, GFP_KERNEL); | 536 | NR_syscalls, GFP_KERNEL); |
519 | if (!syscalls_metadata) { | 537 | if (!syscalls_metadata) { |
520 | WARN_ON(1); | 538 | WARN_ON(1); |
521 | return; | 539 | return -ENOMEM; |
522 | } | 540 | } |
523 | 541 | ||
524 | for (i = 0; i < FTRACE_SYSCALL_MAX; i++) { | 542 | for (i = 0; i < NR_syscalls; i++) { |
525 | meta = find_syscall_meta(psys_syscall_table[i]); | 543 | meta = find_syscall_meta(psys_syscall_table[i]); |
526 | syscalls_metadata[i] = meta; | 544 | syscalls_metadata[i] = meta; |
527 | } | 545 | } |
528 | return; | 546 | return 0; |
529 | |||
530 | /* Paranoid: avoid overflow */ | ||
531 | end: | ||
532 | atomic_dec(&refs); | ||
533 | } | 547 | } |
548 | arch_initcall(arch_init_ftrace_syscalls); | ||
534 | #endif | 549 | #endif |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index cc827ac9e8d3..7ffec6b3b331 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -439,7 +439,6 @@ is386: movl $2,%ecx # set MP | |||
439 | jne 1f | 439 | jne 1f |
440 | movl $per_cpu__gdt_page,%eax | 440 | movl $per_cpu__gdt_page,%eax |
441 | movl $per_cpu__stack_canary,%ecx | 441 | movl $per_cpu__stack_canary,%ecx |
442 | subl $20, %ecx | ||
443 | movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) | 442 | movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) |
444 | shrl $16, %ecx | 443 | shrl $16, %ecx |
445 | movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) | 444 | movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 3b09634a5153..7d35d0fe2329 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -218,7 +218,6 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) | |||
218 | void fixup_irqs(void) | 218 | void fixup_irqs(void) |
219 | { | 219 | { |
220 | unsigned int irq; | 220 | unsigned int irq; |
221 | static int warned; | ||
222 | struct irq_desc *desc; | 221 | struct irq_desc *desc; |
223 | 222 | ||
224 | for_each_irq_desc(irq, desc) { | 223 | for_each_irq_desc(irq, desc) { |
@@ -236,8 +235,8 @@ void fixup_irqs(void) | |||
236 | } | 235 | } |
237 | if (desc->chip->set_affinity) | 236 | if (desc->chip->set_affinity) |
238 | desc->chip->set_affinity(irq, affinity); | 237 | desc->chip->set_affinity(irq, affinity); |
239 | else if (desc->action && !(warned++)) | 238 | else if (desc->action) |
240 | printk("Cannot set affinity for irq %i\n", irq); | 239 | printk_once("Cannot set affinity for irq %i\n", irq); |
241 | } | 240 | } |
242 | 241 | ||
243 | #if 0 | 242 | #if 0 |
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 651c93b28862..fcd513bf2846 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -482,11 +482,11 @@ static void __init construct_ioapic_table(int mpc_default_type) | |||
482 | MP_bus_info(&bus); | 482 | MP_bus_info(&bus); |
483 | } | 483 | } |
484 | 484 | ||
485 | ioapic.type = MP_IOAPIC; | 485 | ioapic.type = MP_IOAPIC; |
486 | ioapic.apicid = 2; | 486 | ioapic.apicid = 2; |
487 | ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01; | 487 | ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01; |
488 | ioapic.flags = MPC_APIC_USABLE; | 488 | ioapic.flags = MPC_APIC_USABLE; |
489 | ioapic.apicaddr = 0xFEC00000; | 489 | ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE; |
490 | MP_ioapic_info(&ioapic); | 490 | MP_ioapic_info(&ioapic); |
491 | 491 | ||
492 | /* | 492 | /* |
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 98fd6cd4e3a4..7dd950094178 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* ----------------------------------------------------------------------- * | 1 | /* ----------------------------------------------------------------------- * |
2 | * | 2 | * |
3 | * Copyright 2000-2008 H. Peter Anvin - All Rights Reserved | 3 | * Copyright 2000-2008 H. Peter Anvin - All Rights Reserved |
4 | * Copyright 2009 Intel Corporation; author: H. Peter Anvin | ||
4 | * | 5 | * |
5 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
@@ -80,11 +81,8 @@ static ssize_t msr_read(struct file *file, char __user *buf, | |||
80 | 81 | ||
81 | for (; count; count -= 8) { | 82 | for (; count; count -= 8) { |
82 | err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]); | 83 | err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]); |
83 | if (err) { | 84 | if (err) |
84 | if (err == -EFAULT) /* Fix idiotic error code */ | ||
85 | err = -EIO; | ||
86 | break; | 85 | break; |
87 | } | ||
88 | if (copy_to_user(tmp, &data, 8)) { | 86 | if (copy_to_user(tmp, &data, 8)) { |
89 | err = -EFAULT; | 87 | err = -EFAULT; |
90 | break; | 88 | break; |
@@ -115,11 +113,8 @@ static ssize_t msr_write(struct file *file, const char __user *buf, | |||
115 | break; | 113 | break; |
116 | } | 114 | } |
117 | err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]); | 115 | err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]); |
118 | if (err) { | 116 | if (err) |
119 | if (err == -EFAULT) /* Fix idiotic error code */ | ||
120 | err = -EIO; | ||
121 | break; | 117 | break; |
122 | } | ||
123 | tmp += 2; | 118 | tmp += 2; |
124 | bytes += 8; | 119 | bytes += 8; |
125 | } | 120 | } |
@@ -127,6 +122,54 @@ static ssize_t msr_write(struct file *file, const char __user *buf, | |||
127 | return bytes ? bytes : err; | 122 | return bytes ? bytes : err; |
128 | } | 123 | } |
129 | 124 | ||
125 | static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg) | ||
126 | { | ||
127 | u32 __user *uregs = (u32 __user *)arg; | ||
128 | u32 regs[8]; | ||
129 | int cpu = iminor(file->f_path.dentry->d_inode); | ||
130 | int err; | ||
131 | |||
132 | switch (ioc) { | ||
133 | case X86_IOC_RDMSR_REGS: | ||
134 | if (!(file->f_mode & FMODE_READ)) { | ||
135 | err = -EBADF; | ||
136 | break; | ||
137 | } | ||
138 | if (copy_from_user(®s, uregs, sizeof regs)) { | ||
139 | err = -EFAULT; | ||
140 | break; | ||
141 | } | ||
142 | err = rdmsr_safe_regs_on_cpu(cpu, regs); | ||
143 | if (err) | ||
144 | break; | ||
145 | if (copy_to_user(uregs, ®s, sizeof regs)) | ||
146 | err = -EFAULT; | ||
147 | break; | ||
148 | |||
149 | case X86_IOC_WRMSR_REGS: | ||
150 | if (!(file->f_mode & FMODE_WRITE)) { | ||
151 | err = -EBADF; | ||
152 | break; | ||
153 | } | ||
154 | if (copy_from_user(®s, uregs, sizeof regs)) { | ||
155 | err = -EFAULT; | ||
156 | break; | ||
157 | } | ||
158 | err = wrmsr_safe_regs_on_cpu(cpu, regs); | ||
159 | if (err) | ||
160 | break; | ||
161 | if (copy_to_user(uregs, ®s, sizeof regs)) | ||
162 | err = -EFAULT; | ||
163 | break; | ||
164 | |||
165 | default: | ||
166 | err = -ENOTTY; | ||
167 | break; | ||
168 | } | ||
169 | |||
170 | return err; | ||
171 | } | ||
172 | |||
130 | static int msr_open(struct inode *inode, struct file *file) | 173 | static int msr_open(struct inode *inode, struct file *file) |
131 | { | 174 | { |
132 | unsigned int cpu = iminor(file->f_path.dentry->d_inode); | 175 | unsigned int cpu = iminor(file->f_path.dentry->d_inode); |
@@ -157,6 +200,8 @@ static const struct file_operations msr_fops = { | |||
157 | .read = msr_read, | 200 | .read = msr_read, |
158 | .write = msr_write, | 201 | .write = msr_write, |
159 | .open = msr_open, | 202 | .open = msr_open, |
203 | .unlocked_ioctl = msr_ioctl, | ||
204 | .compat_ioctl = msr_ioctl, | ||
160 | }; | 205 | }; |
161 | 206 | ||
162 | static int __cpuinit msr_device_create(int cpu) | 207 | static int __cpuinit msr_device_create(int cpu) |
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 70ec9b951d76..f5b0b4a01fb2 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -362,8 +362,9 @@ struct pv_cpu_ops pv_cpu_ops = { | |||
362 | #endif | 362 | #endif |
363 | .wbinvd = native_wbinvd, | 363 | .wbinvd = native_wbinvd, |
364 | .read_msr = native_read_msr_safe, | 364 | .read_msr = native_read_msr_safe, |
365 | .read_msr_amd = native_read_msr_amd_safe, | 365 | .rdmsr_regs = native_rdmsr_safe_regs, |
366 | .write_msr = native_write_msr_safe, | 366 | .write_msr = native_write_msr_safe, |
367 | .wrmsr_regs = native_wrmsr_safe_regs, | ||
367 | .read_tsc = native_read_tsc, | 368 | .read_tsc = native_read_tsc, |
368 | .read_pmc = native_read_pmc, | 369 | .read_pmc = native_read_pmc, |
369 | .read_tscp = native_read_tscp, | 370 | .read_tscp = native_read_tscp, |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 1a041bcf506b..d71c8655905b 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/dmar.h> | 3 | #include <linux/dmar.h> |
4 | #include <linux/bootmem.h> | 4 | #include <linux/bootmem.h> |
5 | #include <linux/pci.h> | 5 | #include <linux/pci.h> |
6 | #include <linux/kmemleak.h> | ||
6 | 7 | ||
7 | #include <asm/proto.h> | 8 | #include <asm/proto.h> |
8 | #include <asm/dma.h> | 9 | #include <asm/dma.h> |
@@ -32,7 +33,14 @@ int no_iommu __read_mostly; | |||
32 | /* Set this to 1 if there is a HW IOMMU in the system */ | 33 | /* Set this to 1 if there is a HW IOMMU in the system */ |
33 | int iommu_detected __read_mostly = 0; | 34 | int iommu_detected __read_mostly = 0; |
34 | 35 | ||
35 | int iommu_pass_through; | 36 | /* |
37 | * This variable becomes 1 if iommu=pt is passed on the kernel command line. | ||
38 | * If this variable is 1, IOMMU implementations do no DMA ranslation for | ||
39 | * devices and allow every device to access to whole physical memory. This is | ||
40 | * useful if a user want to use an IOMMU only for KVM device assignment to | ||
41 | * guests and not for driver dma translation. | ||
42 | */ | ||
43 | int iommu_pass_through __read_mostly; | ||
36 | 44 | ||
37 | dma_addr_t bad_dma_address __read_mostly = 0; | 45 | dma_addr_t bad_dma_address __read_mostly = 0; |
38 | EXPORT_SYMBOL(bad_dma_address); | 46 | EXPORT_SYMBOL(bad_dma_address); |
@@ -88,6 +96,11 @@ void __init dma32_reserve_bootmem(void) | |||
88 | size = roundup(dma32_bootmem_size, align); | 96 | size = roundup(dma32_bootmem_size, align); |
89 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, | 97 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, |
90 | 512ULL<<20); | 98 | 512ULL<<20); |
99 | /* | ||
100 | * Kmemleak should not scan this block as it may not be mapped via the | ||
101 | * kernel direct mapping. | ||
102 | */ | ||
103 | kmemleak_ignore(dma32_bootmem_ptr); | ||
91 | if (dma32_bootmem_ptr) | 104 | if (dma32_bootmem_ptr) |
92 | dma32_bootmem_size = size; | 105 | dma32_bootmem_size = size; |
93 | else | 106 | else |
@@ -147,7 +160,7 @@ again: | |||
147 | return NULL; | 160 | return NULL; |
148 | 161 | ||
149 | addr = page_to_phys(page); | 162 | addr = page_to_phys(page); |
150 | if (!is_buffer_dma_capable(dma_mask, addr, size)) { | 163 | if (addr + size > dma_mask) { |
151 | __free_pages(page, get_order(size)); | 164 | __free_pages(page, get_order(size)); |
152 | 165 | ||
153 | if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) { | 166 | if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) { |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index d2e56b8f48e7..98a827ee9ed7 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -190,14 +190,13 @@ static void iommu_full(struct device *dev, size_t size, int dir) | |||
190 | static inline int | 190 | static inline int |
191 | need_iommu(struct device *dev, unsigned long addr, size_t size) | 191 | need_iommu(struct device *dev, unsigned long addr, size_t size) |
192 | { | 192 | { |
193 | return force_iommu || | 193 | return force_iommu || !dma_capable(dev, addr, size); |
194 | !is_buffer_dma_capable(*dev->dma_mask, addr, size); | ||
195 | } | 194 | } |
196 | 195 | ||
197 | static inline int | 196 | static inline int |
198 | nonforced_iommu(struct device *dev, unsigned long addr, size_t size) | 197 | nonforced_iommu(struct device *dev, unsigned long addr, size_t size) |
199 | { | 198 | { |
200 | return !is_buffer_dma_capable(*dev->dma_mask, addr, size); | 199 | return !dma_capable(dev, addr, size); |
201 | } | 200 | } |
202 | 201 | ||
203 | /* Map a single continuous physical area into the IOMMU. | 202 | /* Map a single continuous physical area into the IOMMU. |
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 71d412a09f30..a3933d4330cd 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c | |||
@@ -14,7 +14,7 @@ | |||
14 | static int | 14 | static int |
15 | check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) | 15 | check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) |
16 | { | 16 | { |
17 | if (hwdev && !is_buffer_dma_capable(*hwdev->dma_mask, bus, size)) { | 17 | if (hwdev && !dma_capable(hwdev, bus, size)) { |
18 | if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) | 18 | if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) |
19 | printk(KERN_ERR | 19 | printk(KERN_ERR |
20 | "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", | 20 | "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", |
@@ -79,12 +79,29 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
79 | free_pages((unsigned long)vaddr, get_order(size)); | 79 | free_pages((unsigned long)vaddr, get_order(size)); |
80 | } | 80 | } |
81 | 81 | ||
82 | static void nommu_sync_single_for_device(struct device *dev, | ||
83 | dma_addr_t addr, size_t size, | ||
84 | enum dma_data_direction dir) | ||
85 | { | ||
86 | flush_write_buffers(); | ||
87 | } | ||
88 | |||
89 | |||
90 | static void nommu_sync_sg_for_device(struct device *dev, | ||
91 | struct scatterlist *sg, int nelems, | ||
92 | enum dma_data_direction dir) | ||
93 | { | ||
94 | flush_write_buffers(); | ||
95 | } | ||
96 | |||
82 | struct dma_map_ops nommu_dma_ops = { | 97 | struct dma_map_ops nommu_dma_ops = { |
83 | .alloc_coherent = dma_generic_alloc_coherent, | 98 | .alloc_coherent = dma_generic_alloc_coherent, |
84 | .free_coherent = nommu_free_coherent, | 99 | .free_coherent = nommu_free_coherent, |
85 | .map_sg = nommu_map_sg, | 100 | .map_sg = nommu_map_sg, |
86 | .map_page = nommu_map_page, | 101 | .map_page = nommu_map_page, |
87 | .is_phys = 1, | 102 | .sync_single_for_device = nommu_sync_single_for_device, |
103 | .sync_sg_for_device = nommu_sync_sg_for_device, | ||
104 | .is_phys = 1, | ||
88 | }; | 105 | }; |
89 | 106 | ||
90 | void __init no_iommu_init(void) | 107 | void __init no_iommu_init(void) |
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 6af96ee44200..e8a35016115f 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c | |||
@@ -13,31 +13,6 @@ | |||
13 | 13 | ||
14 | int swiotlb __read_mostly; | 14 | int swiotlb __read_mostly; |
15 | 15 | ||
16 | void * __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) | ||
17 | { | ||
18 | return alloc_bootmem_low_pages(size); | ||
19 | } | ||
20 | |||
21 | void *swiotlb_alloc(unsigned order, unsigned long nslabs) | ||
22 | { | ||
23 | return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); | ||
24 | } | ||
25 | |||
26 | dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) | ||
27 | { | ||
28 | return paddr; | ||
29 | } | ||
30 | |||
31 | phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr) | ||
32 | { | ||
33 | return baddr; | ||
34 | } | ||
35 | |||
36 | int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) | ||
37 | { | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 16 | static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
42 | dma_addr_t *dma_handle, gfp_t flags) | 17 | dma_addr_t *dma_handle, gfp_t flags) |
43 | { | 18 | { |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 59f4524984af..4cf79567cdab 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -61,9 +61,6 @@ | |||
61 | 61 | ||
62 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | 62 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
63 | 63 | ||
64 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; | ||
65 | EXPORT_PER_CPU_SYMBOL(current_task); | ||
66 | |||
67 | /* | 64 | /* |
68 | * Return saved PC of a blocked thread. | 65 | * Return saved PC of a blocked thread. |
69 | */ | 66 | */ |
@@ -350,14 +347,21 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
350 | *next = &next_p->thread; | 347 | *next = &next_p->thread; |
351 | int cpu = smp_processor_id(); | 348 | int cpu = smp_processor_id(); |
352 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 349 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
350 | bool preload_fpu; | ||
353 | 351 | ||
354 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ | 352 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ |
355 | 353 | ||
356 | __unlazy_fpu(prev_p); | 354 | /* |
355 | * If the task has used fpu the last 5 timeslices, just do a full | ||
356 | * restore of the math state immediately to avoid the trap; the | ||
357 | * chances of needing FPU soon are obviously high now | ||
358 | */ | ||
359 | preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; | ||
357 | 360 | ||
361 | __unlazy_fpu(prev_p); | ||
358 | 362 | ||
359 | /* we're going to use this soon, after a few expensive things */ | 363 | /* we're going to use this soon, after a few expensive things */ |
360 | if (next_p->fpu_counter > 5) | 364 | if (preload_fpu) |
361 | prefetch(next->xstate); | 365 | prefetch(next->xstate); |
362 | 366 | ||
363 | /* | 367 | /* |
@@ -398,6 +402,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
398 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) | 402 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) |
399 | __switch_to_xtra(prev_p, next_p, tss); | 403 | __switch_to_xtra(prev_p, next_p, tss); |
400 | 404 | ||
405 | /* If we're going to preload the fpu context, make sure clts | ||
406 | is run while we're batching the cpu state updates. */ | ||
407 | if (preload_fpu) | ||
408 | clts(); | ||
409 | |||
401 | /* | 410 | /* |
402 | * Leave lazy mode, flushing any hypercalls made here. | 411 | * Leave lazy mode, flushing any hypercalls made here. |
403 | * This must be done before restoring TLS segments so | 412 | * This must be done before restoring TLS segments so |
@@ -407,15 +416,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
407 | */ | 416 | */ |
408 | arch_end_context_switch(next_p); | 417 | arch_end_context_switch(next_p); |
409 | 418 | ||
410 | /* If the task has used fpu the last 5 timeslices, just do a full | 419 | if (preload_fpu) |
411 | * restore of the math state immediately to avoid the trap; the | 420 | __math_state_restore(); |
412 | * chances of needing FPU soon are obviously high now | ||
413 | * | ||
414 | * tsk_used_math() checks prevent calling math_state_restore(), | ||
415 | * which can sleep in the case of !tsk_used_math() | ||
416 | */ | ||
417 | if (tsk_used_math(next_p) && next_p->fpu_counter > 5) | ||
418 | math_state_restore(); | ||
419 | 421 | ||
420 | /* | 422 | /* |
421 | * Restore %gs if needed (which is common) | 423 | * Restore %gs if needed (which is common) |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index ebefb5407b9d..ad535b683170 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -55,9 +55,6 @@ | |||
55 | 55 | ||
56 | asmlinkage extern void ret_from_fork(void); | 56 | asmlinkage extern void ret_from_fork(void); |
57 | 57 | ||
58 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; | ||
59 | EXPORT_PER_CPU_SYMBOL(current_task); | ||
60 | |||
61 | DEFINE_PER_CPU(unsigned long, old_rsp); | 58 | DEFINE_PER_CPU(unsigned long, old_rsp); |
62 | static DEFINE_PER_CPU(unsigned char, is_idle); | 59 | static DEFINE_PER_CPU(unsigned char, is_idle); |
63 | 60 | ||
@@ -386,9 +383,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
386 | int cpu = smp_processor_id(); | 383 | int cpu = smp_processor_id(); |
387 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 384 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
388 | unsigned fsindex, gsindex; | 385 | unsigned fsindex, gsindex; |
386 | bool preload_fpu; | ||
387 | |||
388 | /* | ||
389 | * If the task has used fpu the last 5 timeslices, just do a full | ||
390 | * restore of the math state immediately to avoid the trap; the | ||
391 | * chances of needing FPU soon are obviously high now | ||
392 | */ | ||
393 | preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; | ||
389 | 394 | ||
390 | /* we're going to use this soon, after a few expensive things */ | 395 | /* we're going to use this soon, after a few expensive things */ |
391 | if (next_p->fpu_counter > 5) | 396 | if (preload_fpu) |
392 | prefetch(next->xstate); | 397 | prefetch(next->xstate); |
393 | 398 | ||
394 | /* | 399 | /* |
@@ -419,6 +424,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
419 | 424 | ||
420 | load_TLS(next, cpu); | 425 | load_TLS(next, cpu); |
421 | 426 | ||
427 | /* Must be after DS reload */ | ||
428 | unlazy_fpu(prev_p); | ||
429 | |||
430 | /* Make sure cpu is ready for new context */ | ||
431 | if (preload_fpu) | ||
432 | clts(); | ||
433 | |||
422 | /* | 434 | /* |
423 | * Leave lazy mode, flushing any hypercalls made here. | 435 | * Leave lazy mode, flushing any hypercalls made here. |
424 | * This must be done before restoring TLS segments so | 436 | * This must be done before restoring TLS segments so |
@@ -459,9 +471,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
459 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); | 471 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); |
460 | prev->gsindex = gsindex; | 472 | prev->gsindex = gsindex; |
461 | 473 | ||
462 | /* Must be after DS reload */ | ||
463 | unlazy_fpu(prev_p); | ||
464 | |||
465 | /* | 474 | /* |
466 | * Switch the PDA and FPU contexts. | 475 | * Switch the PDA and FPU contexts. |
467 | */ | 476 | */ |
@@ -480,15 +489,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
480 | task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) | 489 | task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) |
481 | __switch_to_xtra(prev_p, next_p, tss); | 490 | __switch_to_xtra(prev_p, next_p, tss); |
482 | 491 | ||
483 | /* If the task has used fpu the last 5 timeslices, just do a full | 492 | /* |
484 | * restore of the math state immediately to avoid the trap; the | 493 | * Preload the FPU context, now that we've determined that the |
485 | * chances of needing FPU soon are obviously high now | 494 | * task is likely to be using it. |
486 | * | ||
487 | * tsk_used_math() checks prevent calling math_state_restore(), | ||
488 | * which can sleep in the case of !tsk_used_math() | ||
489 | */ | 495 | */ |
490 | if (tsk_used_math(next_p) && next_p->fpu_counter > 5) | 496 | if (preload_fpu) |
491 | math_state_restore(); | 497 | __math_state_restore(); |
492 | return prev_p; | 498 | return prev_p; |
493 | } | 499 | } |
494 | 500 | ||
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 09ecbde91c13..8d7d5c9c1be3 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -35,10 +35,11 @@ | |||
35 | #include <asm/proto.h> | 35 | #include <asm/proto.h> |
36 | #include <asm/ds.h> | 36 | #include <asm/ds.h> |
37 | 37 | ||
38 | #include <trace/syscall.h> | ||
39 | |||
40 | #include "tls.h" | 38 | #include "tls.h" |
41 | 39 | ||
40 | #define CREATE_TRACE_POINTS | ||
41 | #include <trace/events/syscalls.h> | ||
42 | |||
42 | enum x86_regset { | 43 | enum x86_regset { |
43 | REGSET_GENERAL, | 44 | REGSET_GENERAL, |
44 | REGSET_FP, | 45 | REGSET_FP, |
@@ -1497,8 +1498,8 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs) | |||
1497 | tracehook_report_syscall_entry(regs)) | 1498 | tracehook_report_syscall_entry(regs)) |
1498 | ret = -1L; | 1499 | ret = -1L; |
1499 | 1500 | ||
1500 | if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) | 1501 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
1501 | ftrace_syscall_enter(regs); | 1502 | trace_sys_enter(regs, regs->orig_ax); |
1502 | 1503 | ||
1503 | if (unlikely(current->audit_context)) { | 1504 | if (unlikely(current->audit_context)) { |
1504 | if (IS_IA32) | 1505 | if (IS_IA32) |
@@ -1523,8 +1524,8 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs) | |||
1523 | if (unlikely(current->audit_context)) | 1524 | if (unlikely(current->audit_context)) |
1524 | audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); | 1525 | audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); |
1525 | 1526 | ||
1526 | if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) | 1527 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
1527 | ftrace_syscall_exit(regs); | 1528 | trace_sys_exit(regs, regs->ax); |
1528 | 1529 | ||
1529 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | 1530 | if (test_thread_flag(TIF_SYSCALL_TRACE)) |
1530 | tracehook_report_syscall_exit(regs, 0); | 1531 | tracehook_report_syscall_exit(regs, 0); |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 4c578751e94e..81e58238c4ce 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -869,6 +869,8 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) | |||
869 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 869 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
870 | clear_thread_flag(TIF_NOTIFY_RESUME); | 870 | clear_thread_flag(TIF_NOTIFY_RESUME); |
871 | tracehook_notify_resume(regs); | 871 | tracehook_notify_resume(regs); |
872 | if (current->replacement_session_keyring) | ||
873 | key_replace_session_keyring(); | ||
872 | } | 874 | } |
873 | 875 | ||
874 | #ifdef CONFIG_X86_32 | 876 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 2fecda69ee64..c36cc1452cdc 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -434,7 +434,8 @@ const struct cpumask *cpu_coregroup_mask(int cpu) | |||
434 | * For perf, we return last level cache shared map. | 434 | * For perf, we return last level cache shared map. |
435 | * And for power savings, we return cpu_core_map | 435 | * And for power savings, we return cpu_core_map |
436 | */ | 436 | */ |
437 | if (sched_mc_power_savings || sched_smt_power_savings) | 437 | if ((sched_mc_power_savings || sched_smt_power_savings) && |
438 | !(cpu_has(c, X86_FEATURE_AMD_DCM))) | ||
438 | return cpu_core_mask(cpu); | 439 | return cpu_core_mask(cpu); |
439 | else | 440 | else |
440 | return c->llc_shared_map; | 441 | return c->llc_shared_map; |
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index e8b9863ef8c4..3149032ff107 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
6 | #include <linux/ptrace.h> | 6 | #include <linux/ptrace.h> |
7 | #include <asm/desc.h> | ||
7 | 8 | ||
8 | unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) | 9 | unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) |
9 | { | 10 | { |
@@ -23,7 +24,7 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re | |||
23 | * and APM bios ones we just ignore here. | 24 | * and APM bios ones we just ignore here. |
24 | */ | 25 | */ |
25 | if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) { | 26 | if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) { |
26 | u32 *desc; | 27 | struct desc_struct *desc; |
27 | unsigned long base; | 28 | unsigned long base; |
28 | 29 | ||
29 | seg &= ~7UL; | 30 | seg &= ~7UL; |
@@ -33,12 +34,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re | |||
33 | addr = -1L; /* bogus selector, access would fault */ | 34 | addr = -1L; /* bogus selector, access would fault */ |
34 | else { | 35 | else { |
35 | desc = child->mm->context.ldt + seg; | 36 | desc = child->mm->context.ldt + seg; |
36 | base = ((desc[0] >> 16) | | 37 | base = get_desc_base(desc); |
37 | ((desc[1] & 0xff) << 16) | | ||
38 | (desc[1] & 0xff000000)); | ||
39 | 38 | ||
40 | /* 16-bit code segment? */ | 39 | /* 16-bit code segment? */ |
41 | if (!((desc[1] >> 22) & 1)) | 40 | if (!desc->d) |
42 | addr &= 0xffff; | 41 | addr &= 0xffff; |
43 | addr += base; | 42 | addr += base; |
44 | } | 43 | } |
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 6bc211accf08..45e00eb09c3a 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -18,9 +18,9 @@ | |||
18 | #include <asm/ia32.h> | 18 | #include <asm/ia32.h> |
19 | #include <asm/syscalls.h> | 19 | #include <asm/syscalls.h> |
20 | 20 | ||
21 | asmlinkage long sys_mmap(unsigned long addr, unsigned long len, | 21 | SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, |
22 | unsigned long prot, unsigned long flags, | 22 | unsigned long, prot, unsigned long, flags, |
23 | unsigned long fd, unsigned long off) | 23 | unsigned long, fd, unsigned long, off) |
24 | { | 24 | { |
25 | long error; | 25 | long error; |
26 | struct file *file; | 26 | struct file *file; |
@@ -226,7 +226,7 @@ bottomup: | |||
226 | } | 226 | } |
227 | 227 | ||
228 | 228 | ||
229 | asmlinkage long sys_uname(struct new_utsname __user *name) | 229 | SYSCALL_DEFINE1(uname, struct new_utsname __user *, name) |
230 | { | 230 | { |
231 | int err; | 231 | int err; |
232 | down_read(&uts_sem); | 232 | down_read(&uts_sem); |
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 77b9689f8edb..503c1f2e8835 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -640,13 +640,13 @@ static int __init uv_ptc_init(void) | |||
640 | if (!is_uv_system()) | 640 | if (!is_uv_system()) |
641 | return 0; | 641 | return 0; |
642 | 642 | ||
643 | proc_uv_ptc = create_proc_entry(UV_PTC_BASENAME, 0444, NULL); | 643 | proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL, |
644 | &proc_uv_ptc_operations); | ||
644 | if (!proc_uv_ptc) { | 645 | if (!proc_uv_ptc) { |
645 | printk(KERN_ERR "unable to create %s proc entry\n", | 646 | printk(KERN_ERR "unable to create %s proc entry\n", |
646 | UV_PTC_BASENAME); | 647 | UV_PTC_BASENAME); |
647 | return -EINVAL; | 648 | return -EINVAL; |
648 | } | 649 | } |
649 | proc_uv_ptc->proc_fops = &proc_uv_ptc_operations; | ||
650 | return 0; | 650 | return 0; |
651 | } | 651 | } |
652 | 652 | ||
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 5204332f475d..83264922a878 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -76,7 +76,7 @@ char ignore_fpu_irq; | |||
76 | * F0 0F bug workaround.. We have a special link segment | 76 | * F0 0F bug workaround.. We have a special link segment |
77 | * for this. | 77 | * for this. |
78 | */ | 78 | */ |
79 | gate_desc idt_table[256] | 79 | gate_desc idt_table[NR_VECTORS] |
80 | __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; | 80 | __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; |
81 | #endif | 81 | #endif |
82 | 82 | ||
@@ -786,33 +786,34 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) | |||
786 | #endif | 786 | #endif |
787 | } | 787 | } |
788 | 788 | ||
789 | #ifdef CONFIG_X86_32 | 789 | asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) |
790 | unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp) | ||
791 | { | 790 | { |
792 | struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id()); | ||
793 | unsigned long base = (kesp - uesp) & -THREAD_SIZE; | ||
794 | unsigned long new_kesp = kesp - base; | ||
795 | unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT; | ||
796 | __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS]; | ||
797 | |||
798 | /* Set up base for espfix segment */ | ||
799 | desc &= 0x00f0ff0000000000ULL; | ||
800 | desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) | | ||
801 | ((((__u64)base) << 32) & 0xff00000000000000ULL) | | ||
802 | ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) | | ||
803 | (lim_pages & 0xffff); | ||
804 | *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc; | ||
805 | |||
806 | return new_kesp; | ||
807 | } | 791 | } |
808 | #endif | ||
809 | 792 | ||
810 | asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) | 793 | asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) |
811 | { | 794 | { |
812 | } | 795 | } |
813 | 796 | ||
814 | asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) | 797 | /* |
798 | * __math_state_restore assumes that cr0.TS is already clear and the | ||
799 | * fpu state is all ready for use. Used during context switch. | ||
800 | */ | ||
801 | void __math_state_restore(void) | ||
815 | { | 802 | { |
803 | struct thread_info *thread = current_thread_info(); | ||
804 | struct task_struct *tsk = thread->task; | ||
805 | |||
806 | /* | ||
807 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | ||
808 | */ | ||
809 | if (unlikely(restore_fpu_checking(tsk))) { | ||
810 | stts(); | ||
811 | force_sig(SIGSEGV, tsk); | ||
812 | return; | ||
813 | } | ||
814 | |||
815 | thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ | ||
816 | tsk->fpu_counter++; | ||
816 | } | 817 | } |
817 | 818 | ||
818 | /* | 819 | /* |
@@ -846,17 +847,8 @@ asmlinkage void math_state_restore(void) | |||
846 | } | 847 | } |
847 | 848 | ||
848 | clts(); /* Allow maths ops (or we recurse) */ | 849 | clts(); /* Allow maths ops (or we recurse) */ |
849 | /* | ||
850 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | ||
851 | */ | ||
852 | if (unlikely(restore_fpu_checking(tsk))) { | ||
853 | stts(); | ||
854 | force_sig(SIGSEGV, tsk); | ||
855 | return; | ||
856 | } | ||
857 | 850 | ||
858 | thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ | 851 | __math_state_restore(); |
859 | tsk->fpu_counter++; | ||
860 | } | 852 | } |
861 | EXPORT_SYMBOL_GPL(math_state_restore); | 853 | EXPORT_SYMBOL_GPL(math_state_restore); |
862 | 854 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3d4529011828..633ccc7400a4 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2297,12 +2297,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr, | |||
2297 | unsigned int bytes, | 2297 | unsigned int bytes, |
2298 | struct kvm_vcpu *vcpu) | 2298 | struct kvm_vcpu *vcpu) |
2299 | { | 2299 | { |
2300 | static int reported; | 2300 | printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); |
2301 | |||
2302 | if (!reported) { | ||
2303 | reported = 1; | ||
2304 | printk(KERN_WARNING "kvm: emulating exchange as write\n"); | ||
2305 | } | ||
2306 | #ifndef CONFIG_X86_64 | 2301 | #ifndef CONFIG_X86_64 |
2307 | /* guests cmpxchg8b have to be emulated atomically */ | 2302 | /* guests cmpxchg8b have to be emulated atomically */ |
2308 | if (bytes == 8) { | 2303 | if (bytes == 8) { |
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 07c31899c9c2..9e609206fac9 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile | |||
@@ -9,6 +9,8 @@ lib-y += thunk_$(BITS).o | |||
9 | lib-y += usercopy_$(BITS).o getuser.o putuser.o | 9 | lib-y += usercopy_$(BITS).o getuser.o putuser.o |
10 | lib-y += memcpy_$(BITS).o | 10 | lib-y += memcpy_$(BITS).o |
11 | 11 | ||
12 | obj-y += msr-reg.o msr-reg-export.o | ||
13 | |||
12 | ifeq ($(CONFIG_X86_32),y) | 14 | ifeq ($(CONFIG_X86_32),y) |
13 | obj-y += atomic64_32.o | 15 | obj-y += atomic64_32.o |
14 | lib-y += checksum_32.o | 16 | lib-y += checksum_32.o |
diff --git a/arch/x86/lib/msr-reg-export.c b/arch/x86/lib/msr-reg-export.c new file mode 100644 index 000000000000..a311cc59b65d --- /dev/null +++ b/arch/x86/lib/msr-reg-export.c | |||
@@ -0,0 +1,5 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <asm/msr.h> | ||
3 | |||
4 | EXPORT_SYMBOL(native_rdmsr_safe_regs); | ||
5 | EXPORT_SYMBOL(native_wrmsr_safe_regs); | ||
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S new file mode 100644 index 000000000000..69fa10623f21 --- /dev/null +++ b/arch/x86/lib/msr-reg.S | |||
@@ -0,0 +1,102 @@ | |||
1 | #include <linux/linkage.h> | ||
2 | #include <linux/errno.h> | ||
3 | #include <asm/dwarf2.h> | ||
4 | #include <asm/asm.h> | ||
5 | #include <asm/msr.h> | ||
6 | |||
7 | #ifdef CONFIG_X86_64 | ||
8 | /* | ||
9 | * int native_{rdmsr,wrmsr}_safe_regs(u32 gprs[8]); | ||
10 | * | ||
11 | * reg layout: u32 gprs[eax, ecx, edx, ebx, esp, ebp, esi, edi] | ||
12 | * | ||
13 | */ | ||
14 | .macro op_safe_regs op | ||
15 | ENTRY(native_\op\()_safe_regs) | ||
16 | CFI_STARTPROC | ||
17 | pushq_cfi %rbx | ||
18 | pushq_cfi %rbp | ||
19 | movq %rdi, %r10 /* Save pointer */ | ||
20 | xorl %r11d, %r11d /* Return value */ | ||
21 | movl (%rdi), %eax | ||
22 | movl 4(%rdi), %ecx | ||
23 | movl 8(%rdi), %edx | ||
24 | movl 12(%rdi), %ebx | ||
25 | movl 20(%rdi), %ebp | ||
26 | movl 24(%rdi), %esi | ||
27 | movl 28(%rdi), %edi | ||
28 | CFI_REMEMBER_STATE | ||
29 | 1: \op | ||
30 | 2: movl %eax, (%r10) | ||
31 | movl %r11d, %eax /* Return value */ | ||
32 | movl %ecx, 4(%r10) | ||
33 | movl %edx, 8(%r10) | ||
34 | movl %ebx, 12(%r10) | ||
35 | movl %ebp, 20(%r10) | ||
36 | movl %esi, 24(%r10) | ||
37 | movl %edi, 28(%r10) | ||
38 | popq_cfi %rbp | ||
39 | popq_cfi %rbx | ||
40 | ret | ||
41 | 3: | ||
42 | CFI_RESTORE_STATE | ||
43 | movl $-EIO, %r11d | ||
44 | jmp 2b | ||
45 | |||
46 | _ASM_EXTABLE(1b, 3b) | ||
47 | CFI_ENDPROC | ||
48 | ENDPROC(native_\op\()_safe_regs) | ||
49 | .endm | ||
50 | |||
51 | #else /* X86_32 */ | ||
52 | |||
53 | .macro op_safe_regs op | ||
54 | ENTRY(native_\op\()_safe_regs) | ||
55 | CFI_STARTPROC | ||
56 | pushl_cfi %ebx | ||
57 | pushl_cfi %ebp | ||
58 | pushl_cfi %esi | ||
59 | pushl_cfi %edi | ||
60 | pushl_cfi $0 /* Return value */ | ||
61 | pushl_cfi %eax | ||
62 | movl 4(%eax), %ecx | ||
63 | movl 8(%eax), %edx | ||
64 | movl 12(%eax), %ebx | ||
65 | movl 20(%eax), %ebp | ||
66 | movl 24(%eax), %esi | ||
67 | movl 28(%eax), %edi | ||
68 | movl (%eax), %eax | ||
69 | CFI_REMEMBER_STATE | ||
70 | 1: \op | ||
71 | 2: pushl_cfi %eax | ||
72 | movl 4(%esp), %eax | ||
73 | popl_cfi (%eax) | ||
74 | addl $4, %esp | ||
75 | CFI_ADJUST_CFA_OFFSET -4 | ||
76 | movl %ecx, 4(%eax) | ||
77 | movl %edx, 8(%eax) | ||
78 | movl %ebx, 12(%eax) | ||
79 | movl %ebp, 20(%eax) | ||
80 | movl %esi, 24(%eax) | ||
81 | movl %edi, 28(%eax) | ||
82 | popl_cfi %eax | ||
83 | popl_cfi %edi | ||
84 | popl_cfi %esi | ||
85 | popl_cfi %ebp | ||
86 | popl_cfi %ebx | ||
87 | ret | ||
88 | 3: | ||
89 | CFI_RESTORE_STATE | ||
90 | movl $-EIO, 4(%esp) | ||
91 | jmp 2b | ||
92 | |||
93 | _ASM_EXTABLE(1b, 3b) | ||
94 | CFI_ENDPROC | ||
95 | ENDPROC(native_\op\()_safe_regs) | ||
96 | .endm | ||
97 | |||
98 | #endif | ||
99 | |||
100 | op_safe_regs rdmsr | ||
101 | op_safe_regs wrmsr | ||
102 | |||
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c index caa24aca8115..33a1e3ca22d8 100644 --- a/arch/x86/lib/msr.c +++ b/arch/x86/lib/msr.c | |||
@@ -175,3 +175,52 @@ int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | |||
175 | return err ? err : rv.err; | 175 | return err ? err : rv.err; |
176 | } | 176 | } |
177 | EXPORT_SYMBOL(wrmsr_safe_on_cpu); | 177 | EXPORT_SYMBOL(wrmsr_safe_on_cpu); |
178 | |||
179 | /* | ||
180 | * These variants are significantly slower, but allows control over | ||
181 | * the entire 32-bit GPR set. | ||
182 | */ | ||
183 | struct msr_regs_info { | ||
184 | u32 *regs; | ||
185 | int err; | ||
186 | }; | ||
187 | |||
188 | static void __rdmsr_safe_regs_on_cpu(void *info) | ||
189 | { | ||
190 | struct msr_regs_info *rv = info; | ||
191 | |||
192 | rv->err = rdmsr_safe_regs(rv->regs); | ||
193 | } | ||
194 | |||
195 | static void __wrmsr_safe_regs_on_cpu(void *info) | ||
196 | { | ||
197 | struct msr_regs_info *rv = info; | ||
198 | |||
199 | rv->err = wrmsr_safe_regs(rv->regs); | ||
200 | } | ||
201 | |||
202 | int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) | ||
203 | { | ||
204 | int err; | ||
205 | struct msr_regs_info rv; | ||
206 | |||
207 | rv.regs = regs; | ||
208 | rv.err = -EIO; | ||
209 | err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1); | ||
210 | |||
211 | return err ? err : rv.err; | ||
212 | } | ||
213 | EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu); | ||
214 | |||
215 | int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) | ||
216 | { | ||
217 | int err; | ||
218 | struct msr_regs_info rv; | ||
219 | |||
220 | rv.regs = regs; | ||
221 | rv.err = -EIO; | ||
222 | err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1); | ||
223 | |||
224 | return err ? err : rv.err; | ||
225 | } | ||
226 | EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu); | ||
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index eefdeee8a871..9b5a9f59a478 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile | |||
@@ -1,5 +1,9 @@ | |||
1 | obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ | 1 | obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ |
2 | pat.o pgtable.o gup.o | 2 | pat.o pgtable.o physaddr.o gup.o |
3 | |||
4 | # Make sure __phys_addr has no stackprotector | ||
5 | nostackp := $(call cc-option, -fno-stack-protector) | ||
6 | CFLAGS_physaddr.o := $(nostackp) | ||
3 | 7 | ||
4 | obj-$(CONFIG_SMP) += tlb.o | 8 | obj-$(CONFIG_SMP) += tlb.o |
5 | 9 | ||
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index bfae139182ff..775a020990a5 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -285,26 +285,25 @@ check_v8086_mode(struct pt_regs *regs, unsigned long address, | |||
285 | tsk->thread.screen_bitmap |= 1 << bit; | 285 | tsk->thread.screen_bitmap |= 1 << bit; |
286 | } | 286 | } |
287 | 287 | ||
288 | static void dump_pagetable(unsigned long address) | 288 | static bool low_pfn(unsigned long pfn) |
289 | { | 289 | { |
290 | __typeof__(pte_val(__pte(0))) page; | 290 | return pfn < max_low_pfn; |
291 | } | ||
291 | 292 | ||
292 | page = read_cr3(); | 293 | static void dump_pagetable(unsigned long address) |
293 | page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; | 294 | { |
295 | pgd_t *base = __va(read_cr3()); | ||
296 | pgd_t *pgd = &base[pgd_index(address)]; | ||
297 | pmd_t *pmd; | ||
298 | pte_t *pte; | ||
294 | 299 | ||
295 | #ifdef CONFIG_X86_PAE | 300 | #ifdef CONFIG_X86_PAE |
296 | printk("*pdpt = %016Lx ", page); | 301 | printk("*pdpt = %016Lx ", pgd_val(*pgd)); |
297 | if ((page >> PAGE_SHIFT) < max_low_pfn | 302 | if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) |
298 | && page & _PAGE_PRESENT) { | 303 | goto out; |
299 | page &= PAGE_MASK; | ||
300 | page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) | ||
301 | & (PTRS_PER_PMD - 1)]; | ||
302 | printk(KERN_CONT "*pde = %016Lx ", page); | ||
303 | page &= ~_PAGE_NX; | ||
304 | } | ||
305 | #else | ||
306 | printk("*pde = %08lx ", page); | ||
307 | #endif | 304 | #endif |
305 | pmd = pmd_offset(pud_offset(pgd, address), address); | ||
306 | printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); | ||
308 | 307 | ||
309 | /* | 308 | /* |
310 | * We must not directly access the pte in the highpte | 309 | * We must not directly access the pte in the highpte |
@@ -312,16 +311,12 @@ static void dump_pagetable(unsigned long address) | |||
312 | * And let's rather not kmap-atomic the pte, just in case | 311 | * And let's rather not kmap-atomic the pte, just in case |
313 | * it's allocated already: | 312 | * it's allocated already: |
314 | */ | 313 | */ |
315 | if ((page >> PAGE_SHIFT) < max_low_pfn | 314 | if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) |
316 | && (page & _PAGE_PRESENT) | 315 | goto out; |
317 | && !(page & _PAGE_PSE)) { | ||
318 | |||
319 | page &= PAGE_MASK; | ||
320 | page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) | ||
321 | & (PTRS_PER_PTE - 1)]; | ||
322 | printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page); | ||
323 | } | ||
324 | 316 | ||
317 | pte = pte_offset_kernel(pmd, address); | ||
318 | printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); | ||
319 | out: | ||
325 | printk("\n"); | 320 | printk("\n"); |
326 | } | 321 | } |
327 | 322 | ||
@@ -450,16 +445,12 @@ static int bad_address(void *p) | |||
450 | 445 | ||
451 | static void dump_pagetable(unsigned long address) | 446 | static void dump_pagetable(unsigned long address) |
452 | { | 447 | { |
453 | pgd_t *pgd; | 448 | pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK); |
449 | pgd_t *pgd = base + pgd_index(address); | ||
454 | pud_t *pud; | 450 | pud_t *pud; |
455 | pmd_t *pmd; | 451 | pmd_t *pmd; |
456 | pte_t *pte; | 452 | pte_t *pte; |
457 | 453 | ||
458 | pgd = (pgd_t *)read_cr3(); | ||
459 | |||
460 | pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); | ||
461 | |||
462 | pgd += pgd_index(address); | ||
463 | if (bad_address(pgd)) | 454 | if (bad_address(pgd)) |
464 | goto bad; | 455 | goto bad; |
465 | 456 | ||
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 2112ed55e7ea..1617958a3805 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c | |||
@@ -24,7 +24,7 @@ void kunmap(struct page *page) | |||
24 | * no global lock is needed and because the kmap code must perform a global TLB | 24 | * no global lock is needed and because the kmap code must perform a global TLB |
25 | * invalidation when the kmap pool wraps. | 25 | * invalidation when the kmap pool wraps. |
26 | * | 26 | * |
27 | * However when holding an atomic kmap is is not legal to sleep, so atomic | 27 | * However when holding an atomic kmap it is not legal to sleep, so atomic |
28 | * kmaps are appropriate for short, tight code paths only. | 28 | * kmaps are appropriate for short, tight code paths only. |
29 | */ | 29 | */ |
30 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | 30 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 8a450930834f..04e1ad60c63a 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -22,77 +22,7 @@ | |||
22 | #include <asm/pgalloc.h> | 22 | #include <asm/pgalloc.h> |
23 | #include <asm/pat.h> | 23 | #include <asm/pat.h> |
24 | 24 | ||
25 | static inline int phys_addr_valid(resource_size_t addr) | 25 | #include "physaddr.h" |
26 | { | ||
27 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | ||
28 | return !(addr >> boot_cpu_data.x86_phys_bits); | ||
29 | #else | ||
30 | return 1; | ||
31 | #endif | ||
32 | } | ||
33 | |||
34 | #ifdef CONFIG_X86_64 | ||
35 | |||
36 | unsigned long __phys_addr(unsigned long x) | ||
37 | { | ||
38 | if (x >= __START_KERNEL_map) { | ||
39 | x -= __START_KERNEL_map; | ||
40 | VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE); | ||
41 | x += phys_base; | ||
42 | } else { | ||
43 | VIRTUAL_BUG_ON(x < PAGE_OFFSET); | ||
44 | x -= PAGE_OFFSET; | ||
45 | VIRTUAL_BUG_ON(!phys_addr_valid(x)); | ||
46 | } | ||
47 | return x; | ||
48 | } | ||
49 | EXPORT_SYMBOL(__phys_addr); | ||
50 | |||
51 | bool __virt_addr_valid(unsigned long x) | ||
52 | { | ||
53 | if (x >= __START_KERNEL_map) { | ||
54 | x -= __START_KERNEL_map; | ||
55 | if (x >= KERNEL_IMAGE_SIZE) | ||
56 | return false; | ||
57 | x += phys_base; | ||
58 | } else { | ||
59 | if (x < PAGE_OFFSET) | ||
60 | return false; | ||
61 | x -= PAGE_OFFSET; | ||
62 | if (!phys_addr_valid(x)) | ||
63 | return false; | ||
64 | } | ||
65 | |||
66 | return pfn_valid(x >> PAGE_SHIFT); | ||
67 | } | ||
68 | EXPORT_SYMBOL(__virt_addr_valid); | ||
69 | |||
70 | #else | ||
71 | |||
72 | #ifdef CONFIG_DEBUG_VIRTUAL | ||
73 | unsigned long __phys_addr(unsigned long x) | ||
74 | { | ||
75 | /* VMALLOC_* aren't constants */ | ||
76 | VIRTUAL_BUG_ON(x < PAGE_OFFSET); | ||
77 | VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x)); | ||
78 | return x - PAGE_OFFSET; | ||
79 | } | ||
80 | EXPORT_SYMBOL(__phys_addr); | ||
81 | #endif | ||
82 | |||
83 | bool __virt_addr_valid(unsigned long x) | ||
84 | { | ||
85 | if (x < PAGE_OFFSET) | ||
86 | return false; | ||
87 | if (__vmalloc_start_set && is_vmalloc_addr((void *) x)) | ||
88 | return false; | ||
89 | if (x >= FIXADDR_START) | ||
90 | return false; | ||
91 | return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT); | ||
92 | } | ||
93 | EXPORT_SYMBOL(__virt_addr_valid); | ||
94 | |||
95 | #endif | ||
96 | 26 | ||
97 | int page_is_ram(unsigned long pagenr) | 27 | int page_is_ram(unsigned long pagenr) |
98 | { | 28 | { |
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index 2c55ed098654..528bf954eb74 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c | |||
@@ -331,6 +331,20 @@ static void kmemcheck_read_strict(struct pt_regs *regs, | |||
331 | kmemcheck_shadow_set(shadow, size); | 331 | kmemcheck_shadow_set(shadow, size); |
332 | } | 332 | } |
333 | 333 | ||
334 | bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) | ||
335 | { | ||
336 | enum kmemcheck_shadow status; | ||
337 | void *shadow; | ||
338 | |||
339 | shadow = kmemcheck_shadow_lookup(addr); | ||
340 | if (!shadow) | ||
341 | return true; | ||
342 | |||
343 | status = kmemcheck_shadow_test(shadow, size); | ||
344 | |||
345 | return status == KMEMCHECK_SHADOW_INITIALIZED; | ||
346 | } | ||
347 | |||
334 | /* Access may cross page boundary */ | 348 | /* Access may cross page boundary */ |
335 | static void kmemcheck_read(struct pt_regs *regs, | 349 | static void kmemcheck_read(struct pt_regs *regs, |
336 | unsigned long addr, unsigned int size) | 350 | unsigned long addr, unsigned int size) |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 352aa9e927e2..b2f7d3e59b86 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -827,7 +827,7 @@ static int memtype_seq_show(struct seq_file *seq, void *v) | |||
827 | return 0; | 827 | return 0; |
828 | } | 828 | } |
829 | 829 | ||
830 | static struct seq_operations memtype_seq_ops = { | 830 | static const struct seq_operations memtype_seq_ops = { |
831 | .start = memtype_seq_start, | 831 | .start = memtype_seq_start, |
832 | .next = memtype_seq_next, | 832 | .next = memtype_seq_next, |
833 | .stop = memtype_seq_stop, | 833 | .stop = memtype_seq_stop, |
diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c new file mode 100644 index 000000000000..d2e2735327b4 --- /dev/null +++ b/arch/x86/mm/physaddr.c | |||
@@ -0,0 +1,70 @@ | |||
1 | #include <linux/mmdebug.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/mm.h> | ||
4 | |||
5 | #include <asm/page.h> | ||
6 | |||
7 | #include "physaddr.h" | ||
8 | |||
9 | #ifdef CONFIG_X86_64 | ||
10 | |||
11 | unsigned long __phys_addr(unsigned long x) | ||
12 | { | ||
13 | if (x >= __START_KERNEL_map) { | ||
14 | x -= __START_KERNEL_map; | ||
15 | VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE); | ||
16 | x += phys_base; | ||
17 | } else { | ||
18 | VIRTUAL_BUG_ON(x < PAGE_OFFSET); | ||
19 | x -= PAGE_OFFSET; | ||
20 | VIRTUAL_BUG_ON(!phys_addr_valid(x)); | ||
21 | } | ||
22 | return x; | ||
23 | } | ||
24 | EXPORT_SYMBOL(__phys_addr); | ||
25 | |||
26 | bool __virt_addr_valid(unsigned long x) | ||
27 | { | ||
28 | if (x >= __START_KERNEL_map) { | ||
29 | x -= __START_KERNEL_map; | ||
30 | if (x >= KERNEL_IMAGE_SIZE) | ||
31 | return false; | ||
32 | x += phys_base; | ||
33 | } else { | ||
34 | if (x < PAGE_OFFSET) | ||
35 | return false; | ||
36 | x -= PAGE_OFFSET; | ||
37 | if (!phys_addr_valid(x)) | ||
38 | return false; | ||
39 | } | ||
40 | |||
41 | return pfn_valid(x >> PAGE_SHIFT); | ||
42 | } | ||
43 | EXPORT_SYMBOL(__virt_addr_valid); | ||
44 | |||
45 | #else | ||
46 | |||
47 | #ifdef CONFIG_DEBUG_VIRTUAL | ||
48 | unsigned long __phys_addr(unsigned long x) | ||
49 | { | ||
50 | /* VMALLOC_* aren't constants */ | ||
51 | VIRTUAL_BUG_ON(x < PAGE_OFFSET); | ||
52 | VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x)); | ||
53 | return x - PAGE_OFFSET; | ||
54 | } | ||
55 | EXPORT_SYMBOL(__phys_addr); | ||
56 | #endif | ||
57 | |||
58 | bool __virt_addr_valid(unsigned long x) | ||
59 | { | ||
60 | if (x < PAGE_OFFSET) | ||
61 | return false; | ||
62 | if (__vmalloc_start_set && is_vmalloc_addr((void *) x)) | ||
63 | return false; | ||
64 | if (x >= FIXADDR_START) | ||
65 | return false; | ||
66 | return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT); | ||
67 | } | ||
68 | EXPORT_SYMBOL(__virt_addr_valid); | ||
69 | |||
70 | #endif /* CONFIG_X86_64 */ | ||
diff --git a/arch/x86/mm/physaddr.h b/arch/x86/mm/physaddr.h new file mode 100644 index 000000000000..a3cd5a0c97b3 --- /dev/null +++ b/arch/x86/mm/physaddr.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #include <asm/processor.h> | ||
2 | |||
3 | static inline int phys_addr_valid(resource_size_t addr) | ||
4 | { | ||
5 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | ||
6 | return !(addr >> boot_cpu_data.x86_phys_bits); | ||
7 | #else | ||
8 | return 1; | ||
9 | #endif | ||
10 | } | ||
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c index 29a0e37114f8..6f8aa33031c7 100644 --- a/arch/x86/mm/srat_32.c +++ b/arch/x86/mm/srat_32.c | |||
@@ -215,7 +215,7 @@ int __init get_memcfg_from_srat(void) | |||
215 | goto out_fail; | 215 | goto out_fail; |
216 | 216 | ||
217 | if (num_memory_chunks == 0) { | 217 | if (num_memory_chunks == 0) { |
218 | printk(KERN_WARNING | 218 | printk(KERN_DEBUG |
219 | "could not find any ACPI SRAT memory areas.\n"); | 219 | "could not find any ACPI SRAT memory areas.\n"); |
220 | goto out_fail; | 220 | goto out_fail; |
221 | } | 221 | } |
@@ -277,7 +277,7 @@ int __init get_memcfg_from_srat(void) | |||
277 | } | 277 | } |
278 | return 1; | 278 | return 1; |
279 | out_fail: | 279 | out_fail: |
280 | printk(KERN_ERR "failed to get NUMA memory information from SRAT" | 280 | printk(KERN_DEBUG "failed to get NUMA memory information from SRAT" |
281 | " table\n"); | 281 | " table\n"); |
282 | return 0; | 282 | return 0; |
283 | } | 283 | } |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 89b9a5cd63da..cb88b1a0bd5f 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -1,11 +1,14 @@ | |||
1 | /** | 1 | /** |
2 | * @file nmi_int.c | 2 | * @file nmi_int.c |
3 | * | 3 | * |
4 | * @remark Copyright 2002-2008 OProfile authors | 4 | * @remark Copyright 2002-2009 OProfile authors |
5 | * @remark Read the file COPYING | 5 | * @remark Read the file COPYING |
6 | * | 6 | * |
7 | * @author John Levon <levon@movementarian.org> | 7 | * @author John Levon <levon@movementarian.org> |
8 | * @author Robert Richter <robert.richter@amd.com> | 8 | * @author Robert Richter <robert.richter@amd.com> |
9 | * @author Barry Kasindorf <barry.kasindorf@amd.com> | ||
10 | * @author Jason Yeh <jason.yeh@amd.com> | ||
11 | * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> | ||
9 | */ | 12 | */ |
10 | 13 | ||
11 | #include <linux/init.h> | 14 | #include <linux/init.h> |
@@ -24,13 +27,35 @@ | |||
24 | #include "op_counter.h" | 27 | #include "op_counter.h" |
25 | #include "op_x86_model.h" | 28 | #include "op_x86_model.h" |
26 | 29 | ||
27 | static struct op_x86_model_spec const *model; | 30 | static struct op_x86_model_spec *model; |
28 | static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); | 31 | static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); |
29 | static DEFINE_PER_CPU(unsigned long, saved_lvtpc); | 32 | static DEFINE_PER_CPU(unsigned long, saved_lvtpc); |
30 | 33 | ||
31 | /* 0 == registered but off, 1 == registered and on */ | 34 | /* 0 == registered but off, 1 == registered and on */ |
32 | static int nmi_enabled = 0; | 35 | static int nmi_enabled = 0; |
33 | 36 | ||
37 | struct op_counter_config counter_config[OP_MAX_COUNTER]; | ||
38 | |||
39 | /* common functions */ | ||
40 | |||
41 | u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, | ||
42 | struct op_counter_config *counter_config) | ||
43 | { | ||
44 | u64 val = 0; | ||
45 | u16 event = (u16)counter_config->event; | ||
46 | |||
47 | val |= ARCH_PERFMON_EVENTSEL_INT; | ||
48 | val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0; | ||
49 | val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0; | ||
50 | val |= (counter_config->unit_mask & 0xFF) << 8; | ||
51 | event &= model->event_mask ? model->event_mask : 0xFF; | ||
52 | val |= event & 0xFF; | ||
53 | val |= (event & 0x0F00) << 24; | ||
54 | |||
55 | return val; | ||
56 | } | ||
57 | |||
58 | |||
34 | static int profile_exceptions_notify(struct notifier_block *self, | 59 | static int profile_exceptions_notify(struct notifier_block *self, |
35 | unsigned long val, void *data) | 60 | unsigned long val, void *data) |
36 | { | 61 | { |
@@ -52,36 +77,214 @@ static int profile_exceptions_notify(struct notifier_block *self, | |||
52 | 77 | ||
53 | static void nmi_cpu_save_registers(struct op_msrs *msrs) | 78 | static void nmi_cpu_save_registers(struct op_msrs *msrs) |
54 | { | 79 | { |
55 | unsigned int const nr_ctrs = model->num_counters; | ||
56 | unsigned int const nr_ctrls = model->num_controls; | ||
57 | struct op_msr *counters = msrs->counters; | 80 | struct op_msr *counters = msrs->counters; |
58 | struct op_msr *controls = msrs->controls; | 81 | struct op_msr *controls = msrs->controls; |
59 | unsigned int i; | 82 | unsigned int i; |
60 | 83 | ||
61 | for (i = 0; i < nr_ctrs; ++i) { | 84 | for (i = 0; i < model->num_counters; ++i) { |
62 | if (counters[i].addr) { | 85 | if (counters[i].addr) |
63 | rdmsr(counters[i].addr, | 86 | rdmsrl(counters[i].addr, counters[i].saved); |
64 | counters[i].saved.low, | 87 | } |
65 | counters[i].saved.high); | 88 | |
66 | } | 89 | for (i = 0; i < model->num_controls; ++i) { |
90 | if (controls[i].addr) | ||
91 | rdmsrl(controls[i].addr, controls[i].saved); | ||
92 | } | ||
93 | } | ||
94 | |||
95 | static void nmi_cpu_start(void *dummy) | ||
96 | { | ||
97 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | ||
98 | model->start(msrs); | ||
99 | } | ||
100 | |||
101 | static int nmi_start(void) | ||
102 | { | ||
103 | on_each_cpu(nmi_cpu_start, NULL, 1); | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static void nmi_cpu_stop(void *dummy) | ||
108 | { | ||
109 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | ||
110 | model->stop(msrs); | ||
111 | } | ||
112 | |||
113 | static void nmi_stop(void) | ||
114 | { | ||
115 | on_each_cpu(nmi_cpu_stop, NULL, 1); | ||
116 | } | ||
117 | |||
118 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
119 | |||
120 | static DEFINE_PER_CPU(int, switch_index); | ||
121 | |||
122 | static inline int has_mux(void) | ||
123 | { | ||
124 | return !!model->switch_ctrl; | ||
125 | } | ||
126 | |||
127 | inline int op_x86_phys_to_virt(int phys) | ||
128 | { | ||
129 | return __get_cpu_var(switch_index) + phys; | ||
130 | } | ||
131 | |||
132 | inline int op_x86_virt_to_phys(int virt) | ||
133 | { | ||
134 | return virt % model->num_counters; | ||
135 | } | ||
136 | |||
137 | static void nmi_shutdown_mux(void) | ||
138 | { | ||
139 | int i; | ||
140 | |||
141 | if (!has_mux()) | ||
142 | return; | ||
143 | |||
144 | for_each_possible_cpu(i) { | ||
145 | kfree(per_cpu(cpu_msrs, i).multiplex); | ||
146 | per_cpu(cpu_msrs, i).multiplex = NULL; | ||
147 | per_cpu(switch_index, i) = 0; | ||
67 | } | 148 | } |
149 | } | ||
150 | |||
151 | static int nmi_setup_mux(void) | ||
152 | { | ||
153 | size_t multiplex_size = | ||
154 | sizeof(struct op_msr) * model->num_virt_counters; | ||
155 | int i; | ||
156 | |||
157 | if (!has_mux()) | ||
158 | return 1; | ||
159 | |||
160 | for_each_possible_cpu(i) { | ||
161 | per_cpu(cpu_msrs, i).multiplex = | ||
162 | kmalloc(multiplex_size, GFP_KERNEL); | ||
163 | if (!per_cpu(cpu_msrs, i).multiplex) | ||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | return 1; | ||
168 | } | ||
169 | |||
170 | static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) | ||
171 | { | ||
172 | int i; | ||
173 | struct op_msr *multiplex = msrs->multiplex; | ||
174 | |||
175 | if (!has_mux()) | ||
176 | return; | ||
68 | 177 | ||
69 | for (i = 0; i < nr_ctrls; ++i) { | 178 | for (i = 0; i < model->num_virt_counters; ++i) { |
70 | if (controls[i].addr) { | 179 | if (counter_config[i].enabled) { |
71 | rdmsr(controls[i].addr, | 180 | multiplex[i].saved = -(u64)counter_config[i].count; |
72 | controls[i].saved.low, | 181 | } else { |
73 | controls[i].saved.high); | 182 | multiplex[i].addr = 0; |
183 | multiplex[i].saved = 0; | ||
74 | } | 184 | } |
75 | } | 185 | } |
186 | |||
187 | per_cpu(switch_index, cpu) = 0; | ||
188 | } | ||
189 | |||
190 | static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs) | ||
191 | { | ||
192 | struct op_msr *multiplex = msrs->multiplex; | ||
193 | int i; | ||
194 | |||
195 | for (i = 0; i < model->num_counters; ++i) { | ||
196 | int virt = op_x86_phys_to_virt(i); | ||
197 | if (multiplex[virt].addr) | ||
198 | rdmsrl(multiplex[virt].addr, multiplex[virt].saved); | ||
199 | } | ||
200 | } | ||
201 | |||
202 | static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs) | ||
203 | { | ||
204 | struct op_msr *multiplex = msrs->multiplex; | ||
205 | int i; | ||
206 | |||
207 | for (i = 0; i < model->num_counters; ++i) { | ||
208 | int virt = op_x86_phys_to_virt(i); | ||
209 | if (multiplex[virt].addr) | ||
210 | wrmsrl(multiplex[virt].addr, multiplex[virt].saved); | ||
211 | } | ||
76 | } | 212 | } |
77 | 213 | ||
78 | static void nmi_save_registers(void *dummy) | 214 | static void nmi_cpu_switch(void *dummy) |
79 | { | 215 | { |
80 | int cpu = smp_processor_id(); | 216 | int cpu = smp_processor_id(); |
217 | int si = per_cpu(switch_index, cpu); | ||
81 | struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); | 218 | struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); |
82 | nmi_cpu_save_registers(msrs); | 219 | |
220 | nmi_cpu_stop(NULL); | ||
221 | nmi_cpu_save_mpx_registers(msrs); | ||
222 | |||
223 | /* move to next set */ | ||
224 | si += model->num_counters; | ||
225 | if ((si > model->num_virt_counters) || (counter_config[si].count == 0)) | ||
226 | per_cpu(switch_index, cpu) = 0; | ||
227 | else | ||
228 | per_cpu(switch_index, cpu) = si; | ||
229 | |||
230 | model->switch_ctrl(model, msrs); | ||
231 | nmi_cpu_restore_mpx_registers(msrs); | ||
232 | |||
233 | nmi_cpu_start(NULL); | ||
234 | } | ||
235 | |||
236 | |||
237 | /* | ||
238 | * Quick check to see if multiplexing is necessary. | ||
239 | * The check should be sufficient since counters are used | ||
240 | * in ordre. | ||
241 | */ | ||
242 | static int nmi_multiplex_on(void) | ||
243 | { | ||
244 | return counter_config[model->num_counters].count ? 0 : -EINVAL; | ||
245 | } | ||
246 | |||
247 | static int nmi_switch_event(void) | ||
248 | { | ||
249 | if (!has_mux()) | ||
250 | return -ENOSYS; /* not implemented */ | ||
251 | if (nmi_multiplex_on() < 0) | ||
252 | return -EINVAL; /* not necessary */ | ||
253 | |||
254 | on_each_cpu(nmi_cpu_switch, NULL, 1); | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static inline void mux_init(struct oprofile_operations *ops) | ||
260 | { | ||
261 | if (has_mux()) | ||
262 | ops->switch_events = nmi_switch_event; | ||
263 | } | ||
264 | |||
265 | static void mux_clone(int cpu) | ||
266 | { | ||
267 | if (!has_mux()) | ||
268 | return; | ||
269 | |||
270 | memcpy(per_cpu(cpu_msrs, cpu).multiplex, | ||
271 | per_cpu(cpu_msrs, 0).multiplex, | ||
272 | sizeof(struct op_msr) * model->num_virt_counters); | ||
83 | } | 273 | } |
84 | 274 | ||
275 | #else | ||
276 | |||
277 | inline int op_x86_phys_to_virt(int phys) { return phys; } | ||
278 | inline int op_x86_virt_to_phys(int virt) { return virt; } | ||
279 | static inline void nmi_shutdown_mux(void) { } | ||
280 | static inline int nmi_setup_mux(void) { return 1; } | ||
281 | static inline void | ||
282 | nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { } | ||
283 | static inline void mux_init(struct oprofile_operations *ops) { } | ||
284 | static void mux_clone(int cpu) { } | ||
285 | |||
286 | #endif | ||
287 | |||
85 | static void free_msrs(void) | 288 | static void free_msrs(void) |
86 | { | 289 | { |
87 | int i; | 290 | int i; |
@@ -95,38 +298,32 @@ static void free_msrs(void) | |||
95 | 298 | ||
96 | static int allocate_msrs(void) | 299 | static int allocate_msrs(void) |
97 | { | 300 | { |
98 | int success = 1; | ||
99 | size_t controls_size = sizeof(struct op_msr) * model->num_controls; | 301 | size_t controls_size = sizeof(struct op_msr) * model->num_controls; |
100 | size_t counters_size = sizeof(struct op_msr) * model->num_counters; | 302 | size_t counters_size = sizeof(struct op_msr) * model->num_counters; |
101 | 303 | ||
102 | int i; | 304 | int i; |
103 | for_each_possible_cpu(i) { | 305 | for_each_possible_cpu(i) { |
104 | per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, | 306 | per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, |
105 | GFP_KERNEL); | 307 | GFP_KERNEL); |
106 | if (!per_cpu(cpu_msrs, i).counters) { | 308 | if (!per_cpu(cpu_msrs, i).counters) |
107 | success = 0; | 309 | return 0; |
108 | break; | ||
109 | } | ||
110 | per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, | 310 | per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, |
111 | GFP_KERNEL); | 311 | GFP_KERNEL); |
112 | if (!per_cpu(cpu_msrs, i).controls) { | 312 | if (!per_cpu(cpu_msrs, i).controls) |
113 | success = 0; | 313 | return 0; |
114 | break; | ||
115 | } | ||
116 | } | 314 | } |
117 | 315 | ||
118 | if (!success) | 316 | return 1; |
119 | free_msrs(); | ||
120 | |||
121 | return success; | ||
122 | } | 317 | } |
123 | 318 | ||
124 | static void nmi_cpu_setup(void *dummy) | 319 | static void nmi_cpu_setup(void *dummy) |
125 | { | 320 | { |
126 | int cpu = smp_processor_id(); | 321 | int cpu = smp_processor_id(); |
127 | struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); | 322 | struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); |
323 | nmi_cpu_save_registers(msrs); | ||
128 | spin_lock(&oprofilefs_lock); | 324 | spin_lock(&oprofilefs_lock); |
129 | model->setup_ctrs(msrs); | 325 | model->setup_ctrs(model, msrs); |
326 | nmi_cpu_setup_mux(cpu, msrs); | ||
130 | spin_unlock(&oprofilefs_lock); | 327 | spin_unlock(&oprofilefs_lock); |
131 | per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC); | 328 | per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC); |
132 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 329 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
@@ -144,11 +341,15 @@ static int nmi_setup(void) | |||
144 | int cpu; | 341 | int cpu; |
145 | 342 | ||
146 | if (!allocate_msrs()) | 343 | if (!allocate_msrs()) |
147 | return -ENOMEM; | 344 | err = -ENOMEM; |
345 | else if (!nmi_setup_mux()) | ||
346 | err = -ENOMEM; | ||
347 | else | ||
348 | err = register_die_notifier(&profile_exceptions_nb); | ||
148 | 349 | ||
149 | err = register_die_notifier(&profile_exceptions_nb); | ||
150 | if (err) { | 350 | if (err) { |
151 | free_msrs(); | 351 | free_msrs(); |
352 | nmi_shutdown_mux(); | ||
152 | return err; | 353 | return err; |
153 | } | 354 | } |
154 | 355 | ||
@@ -159,45 +360,38 @@ static int nmi_setup(void) | |||
159 | /* Assume saved/restored counters are the same on all CPUs */ | 360 | /* Assume saved/restored counters are the same on all CPUs */ |
160 | model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); | 361 | model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); |
161 | for_each_possible_cpu(cpu) { | 362 | for_each_possible_cpu(cpu) { |
162 | if (cpu != 0) { | 363 | if (!cpu) |
163 | memcpy(per_cpu(cpu_msrs, cpu).counters, | 364 | continue; |
164 | per_cpu(cpu_msrs, 0).counters, | 365 | |
165 | sizeof(struct op_msr) * model->num_counters); | 366 | memcpy(per_cpu(cpu_msrs, cpu).counters, |
166 | 367 | per_cpu(cpu_msrs, 0).counters, | |
167 | memcpy(per_cpu(cpu_msrs, cpu).controls, | 368 | sizeof(struct op_msr) * model->num_counters); |
168 | per_cpu(cpu_msrs, 0).controls, | 369 | |
169 | sizeof(struct op_msr) * model->num_controls); | 370 | memcpy(per_cpu(cpu_msrs, cpu).controls, |
170 | } | 371 | per_cpu(cpu_msrs, 0).controls, |
372 | sizeof(struct op_msr) * model->num_controls); | ||
171 | 373 | ||
374 | mux_clone(cpu); | ||
172 | } | 375 | } |
173 | on_each_cpu(nmi_save_registers, NULL, 1); | ||
174 | on_each_cpu(nmi_cpu_setup, NULL, 1); | 376 | on_each_cpu(nmi_cpu_setup, NULL, 1); |
175 | nmi_enabled = 1; | 377 | nmi_enabled = 1; |
176 | return 0; | 378 | return 0; |
177 | } | 379 | } |
178 | 380 | ||
179 | static void nmi_restore_registers(struct op_msrs *msrs) | 381 | static void nmi_cpu_restore_registers(struct op_msrs *msrs) |
180 | { | 382 | { |
181 | unsigned int const nr_ctrs = model->num_counters; | ||
182 | unsigned int const nr_ctrls = model->num_controls; | ||
183 | struct op_msr *counters = msrs->counters; | 383 | struct op_msr *counters = msrs->counters; |
184 | struct op_msr *controls = msrs->controls; | 384 | struct op_msr *controls = msrs->controls; |
185 | unsigned int i; | 385 | unsigned int i; |
186 | 386 | ||
187 | for (i = 0; i < nr_ctrls; ++i) { | 387 | for (i = 0; i < model->num_controls; ++i) { |
188 | if (controls[i].addr) { | 388 | if (controls[i].addr) |
189 | wrmsr(controls[i].addr, | 389 | wrmsrl(controls[i].addr, controls[i].saved); |
190 | controls[i].saved.low, | ||
191 | controls[i].saved.high); | ||
192 | } | ||
193 | } | 390 | } |
194 | 391 | ||
195 | for (i = 0; i < nr_ctrs; ++i) { | 392 | for (i = 0; i < model->num_counters; ++i) { |
196 | if (counters[i].addr) { | 393 | if (counters[i].addr) |
197 | wrmsr(counters[i].addr, | 394 | wrmsrl(counters[i].addr, counters[i].saved); |
198 | counters[i].saved.low, | ||
199 | counters[i].saved.high); | ||
200 | } | ||
201 | } | 395 | } |
202 | } | 396 | } |
203 | 397 | ||
@@ -205,7 +399,7 @@ static void nmi_cpu_shutdown(void *dummy) | |||
205 | { | 399 | { |
206 | unsigned int v; | 400 | unsigned int v; |
207 | int cpu = smp_processor_id(); | 401 | int cpu = smp_processor_id(); |
208 | struct op_msrs *msrs = &__get_cpu_var(cpu_msrs); | 402 | struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); |
209 | 403 | ||
210 | /* restoring APIC_LVTPC can trigger an apic error because the delivery | 404 | /* restoring APIC_LVTPC can trigger an apic error because the delivery |
211 | * mode and vector nr combination can be illegal. That's by design: on | 405 | * mode and vector nr combination can be illegal. That's by design: on |
@@ -216,7 +410,7 @@ static void nmi_cpu_shutdown(void *dummy) | |||
216 | apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); | 410 | apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); |
217 | apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); | 411 | apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); |
218 | apic_write(APIC_LVTERR, v); | 412 | apic_write(APIC_LVTERR, v); |
219 | nmi_restore_registers(msrs); | 413 | nmi_cpu_restore_registers(msrs); |
220 | } | 414 | } |
221 | 415 | ||
222 | static void nmi_shutdown(void) | 416 | static void nmi_shutdown(void) |
@@ -226,42 +420,18 @@ static void nmi_shutdown(void) | |||
226 | nmi_enabled = 0; | 420 | nmi_enabled = 0; |
227 | on_each_cpu(nmi_cpu_shutdown, NULL, 1); | 421 | on_each_cpu(nmi_cpu_shutdown, NULL, 1); |
228 | unregister_die_notifier(&profile_exceptions_nb); | 422 | unregister_die_notifier(&profile_exceptions_nb); |
423 | nmi_shutdown_mux(); | ||
229 | msrs = &get_cpu_var(cpu_msrs); | 424 | msrs = &get_cpu_var(cpu_msrs); |
230 | model->shutdown(msrs); | 425 | model->shutdown(msrs); |
231 | free_msrs(); | 426 | free_msrs(); |
232 | put_cpu_var(cpu_msrs); | 427 | put_cpu_var(cpu_msrs); |
233 | } | 428 | } |
234 | 429 | ||
235 | static void nmi_cpu_start(void *dummy) | ||
236 | { | ||
237 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | ||
238 | model->start(msrs); | ||
239 | } | ||
240 | |||
241 | static int nmi_start(void) | ||
242 | { | ||
243 | on_each_cpu(nmi_cpu_start, NULL, 1); | ||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | static void nmi_cpu_stop(void *dummy) | ||
248 | { | ||
249 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | ||
250 | model->stop(msrs); | ||
251 | } | ||
252 | |||
253 | static void nmi_stop(void) | ||
254 | { | ||
255 | on_each_cpu(nmi_cpu_stop, NULL, 1); | ||
256 | } | ||
257 | |||
258 | struct op_counter_config counter_config[OP_MAX_COUNTER]; | ||
259 | |||
260 | static int nmi_create_files(struct super_block *sb, struct dentry *root) | 430 | static int nmi_create_files(struct super_block *sb, struct dentry *root) |
261 | { | 431 | { |
262 | unsigned int i; | 432 | unsigned int i; |
263 | 433 | ||
264 | for (i = 0; i < model->num_counters; ++i) { | 434 | for (i = 0; i < model->num_virt_counters; ++i) { |
265 | struct dentry *dir; | 435 | struct dentry *dir; |
266 | char buf[4]; | 436 | char buf[4]; |
267 | 437 | ||
@@ -270,7 +440,7 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root) | |||
270 | * NOTE: assumes 1:1 mapping here (that counters are organized | 440 | * NOTE: assumes 1:1 mapping here (that counters are organized |
271 | * sequentially in their struct assignment). | 441 | * sequentially in their struct assignment). |
272 | */ | 442 | */ |
273 | if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i))) | 443 | if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i))) |
274 | continue; | 444 | continue; |
275 | 445 | ||
276 | snprintf(buf, sizeof(buf), "%d", i); | 446 | snprintf(buf, sizeof(buf), "%d", i); |
@@ -402,6 +572,7 @@ module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0); | |||
402 | static int __init ppro_init(char **cpu_type) | 572 | static int __init ppro_init(char **cpu_type) |
403 | { | 573 | { |
404 | __u8 cpu_model = boot_cpu_data.x86_model; | 574 | __u8 cpu_model = boot_cpu_data.x86_model; |
575 | struct op_x86_model_spec *spec = &op_ppro_spec; /* default */ | ||
405 | 576 | ||
406 | if (force_arch_perfmon && cpu_has_arch_perfmon) | 577 | if (force_arch_perfmon && cpu_has_arch_perfmon) |
407 | return 0; | 578 | return 0; |
@@ -428,7 +599,7 @@ static int __init ppro_init(char **cpu_type) | |||
428 | *cpu_type = "i386/core_2"; | 599 | *cpu_type = "i386/core_2"; |
429 | break; | 600 | break; |
430 | case 26: | 601 | case 26: |
431 | arch_perfmon_setup_counters(); | 602 | spec = &op_arch_perfmon_spec; |
432 | *cpu_type = "i386/core_i7"; | 603 | *cpu_type = "i386/core_i7"; |
433 | break; | 604 | break; |
434 | case 28: | 605 | case 28: |
@@ -439,17 +610,7 @@ static int __init ppro_init(char **cpu_type) | |||
439 | return 0; | 610 | return 0; |
440 | } | 611 | } |
441 | 612 | ||
442 | model = &op_ppro_spec; | 613 | model = spec; |
443 | return 1; | ||
444 | } | ||
445 | |||
446 | static int __init arch_perfmon_init(char **cpu_type) | ||
447 | { | ||
448 | if (!cpu_has_arch_perfmon) | ||
449 | return 0; | ||
450 | *cpu_type = "i386/arch_perfmon"; | ||
451 | model = &op_arch_perfmon_spec; | ||
452 | arch_perfmon_setup_counters(); | ||
453 | return 1; | 614 | return 1; |
454 | } | 615 | } |
455 | 616 | ||
@@ -471,27 +632,26 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
471 | /* Needs to be at least an Athlon (or hammer in 32bit mode) */ | 632 | /* Needs to be at least an Athlon (or hammer in 32bit mode) */ |
472 | 633 | ||
473 | switch (family) { | 634 | switch (family) { |
474 | default: | ||
475 | return -ENODEV; | ||
476 | case 6: | 635 | case 6: |
477 | model = &op_amd_spec; | ||
478 | cpu_type = "i386/athlon"; | 636 | cpu_type = "i386/athlon"; |
479 | break; | 637 | break; |
480 | case 0xf: | 638 | case 0xf: |
481 | model = &op_amd_spec; | 639 | /* |
482 | /* Actually it could be i386/hammer too, but give | 640 | * Actually it could be i386/hammer too, but |
483 | user space an consistent name. */ | 641 | * give user space an consistent name. |
642 | */ | ||
484 | cpu_type = "x86-64/hammer"; | 643 | cpu_type = "x86-64/hammer"; |
485 | break; | 644 | break; |
486 | case 0x10: | 645 | case 0x10: |
487 | model = &op_amd_spec; | ||
488 | cpu_type = "x86-64/family10"; | 646 | cpu_type = "x86-64/family10"; |
489 | break; | 647 | break; |
490 | case 0x11: | 648 | case 0x11: |
491 | model = &op_amd_spec; | ||
492 | cpu_type = "x86-64/family11h"; | 649 | cpu_type = "x86-64/family11h"; |
493 | break; | 650 | break; |
651 | default: | ||
652 | return -ENODEV; | ||
494 | } | 653 | } |
654 | model = &op_amd_spec; | ||
495 | break; | 655 | break; |
496 | 656 | ||
497 | case X86_VENDOR_INTEL: | 657 | case X86_VENDOR_INTEL: |
@@ -510,8 +670,15 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
510 | break; | 670 | break; |
511 | } | 671 | } |
512 | 672 | ||
513 | if (!cpu_type && !arch_perfmon_init(&cpu_type)) | 673 | if (cpu_type) |
674 | break; | ||
675 | |||
676 | if (!cpu_has_arch_perfmon) | ||
514 | return -ENODEV; | 677 | return -ENODEV; |
678 | |||
679 | /* use arch perfmon as fallback */ | ||
680 | cpu_type = "i386/arch_perfmon"; | ||
681 | model = &op_arch_perfmon_spec; | ||
515 | break; | 682 | break; |
516 | 683 | ||
517 | default: | 684 | default: |
@@ -522,18 +689,23 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
522 | register_cpu_notifier(&oprofile_cpu_nb); | 689 | register_cpu_notifier(&oprofile_cpu_nb); |
523 | #endif | 690 | #endif |
524 | /* default values, can be overwritten by model */ | 691 | /* default values, can be overwritten by model */ |
525 | ops->create_files = nmi_create_files; | 692 | ops->create_files = nmi_create_files; |
526 | ops->setup = nmi_setup; | 693 | ops->setup = nmi_setup; |
527 | ops->shutdown = nmi_shutdown; | 694 | ops->shutdown = nmi_shutdown; |
528 | ops->start = nmi_start; | 695 | ops->start = nmi_start; |
529 | ops->stop = nmi_stop; | 696 | ops->stop = nmi_stop; |
530 | ops->cpu_type = cpu_type; | 697 | ops->cpu_type = cpu_type; |
531 | 698 | ||
532 | if (model->init) | 699 | if (model->init) |
533 | ret = model->init(ops); | 700 | ret = model->init(ops); |
534 | if (ret) | 701 | if (ret) |
535 | return ret; | 702 | return ret; |
536 | 703 | ||
704 | if (!model->num_virt_counters) | ||
705 | model->num_virt_counters = model->num_counters; | ||
706 | |||
707 | mux_init(ops); | ||
708 | |||
537 | init_sysfs(); | 709 | init_sysfs(); |
538 | using_nmi = 1; | 710 | using_nmi = 1; |
539 | printk(KERN_INFO "oprofile: using NMI interrupt.\n"); | 711 | printk(KERN_INFO "oprofile: using NMI interrupt.\n"); |
diff --git a/arch/x86/oprofile/op_counter.h b/arch/x86/oprofile/op_counter.h index 91b6a116165e..e28398df0df2 100644 --- a/arch/x86/oprofile/op_counter.h +++ b/arch/x86/oprofile/op_counter.h | |||
@@ -10,7 +10,7 @@ | |||
10 | #ifndef OP_COUNTER_H | 10 | #ifndef OP_COUNTER_H |
11 | #define OP_COUNTER_H | 11 | #define OP_COUNTER_H |
12 | 12 | ||
13 | #define OP_MAX_COUNTER 8 | 13 | #define OP_MAX_COUNTER 32 |
14 | 14 | ||
15 | /* Per-perfctr configuration as set via | 15 | /* Per-perfctr configuration as set via |
16 | * oprofilefs. | 16 | * oprofilefs. |
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 8fdf06e4edf9..39686c29f03a 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c | |||
@@ -9,12 +9,15 @@ | |||
9 | * @author Philippe Elie | 9 | * @author Philippe Elie |
10 | * @author Graydon Hoare | 10 | * @author Graydon Hoare |
11 | * @author Robert Richter <robert.richter@amd.com> | 11 | * @author Robert Richter <robert.richter@amd.com> |
12 | * @author Barry Kasindorf | 12 | * @author Barry Kasindorf <barry.kasindorf@amd.com> |
13 | * @author Jason Yeh <jason.yeh@amd.com> | ||
14 | * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> | ||
13 | */ | 15 | */ |
14 | 16 | ||
15 | #include <linux/oprofile.h> | 17 | #include <linux/oprofile.h> |
16 | #include <linux/device.h> | 18 | #include <linux/device.h> |
17 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
20 | #include <linux/percpu.h> | ||
18 | 21 | ||
19 | #include <asm/ptrace.h> | 22 | #include <asm/ptrace.h> |
20 | #include <asm/msr.h> | 23 | #include <asm/msr.h> |
@@ -25,43 +28,36 @@ | |||
25 | 28 | ||
26 | #define NUM_COUNTERS 4 | 29 | #define NUM_COUNTERS 4 |
27 | #define NUM_CONTROLS 4 | 30 | #define NUM_CONTROLS 4 |
31 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
32 | #define NUM_VIRT_COUNTERS 32 | ||
33 | #define NUM_VIRT_CONTROLS 32 | ||
34 | #else | ||
35 | #define NUM_VIRT_COUNTERS NUM_COUNTERS | ||
36 | #define NUM_VIRT_CONTROLS NUM_CONTROLS | ||
37 | #endif | ||
38 | |||
39 | #define OP_EVENT_MASK 0x0FFF | ||
40 | #define OP_CTR_OVERFLOW (1ULL<<31) | ||
28 | 41 | ||
29 | #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) | 42 | #define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21)) |
30 | #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) | 43 | |
31 | #define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0) | 44 | static unsigned long reset_value[NUM_VIRT_COUNTERS]; |
32 | #define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) | ||
33 | |||
34 | #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) | ||
35 | #define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0) | ||
36 | #define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0) | ||
37 | #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) | ||
38 | #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) | ||
39 | #define CTRL_CLEAR_LO(x) (x &= (1<<21)) | ||
40 | #define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0) | ||
41 | #define CTRL_SET_ENABLE(val) (val |= 1<<20) | ||
42 | #define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16)) | ||
43 | #define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17)) | ||
44 | #define CTRL_SET_UM(val, m) (val |= (m << 8)) | ||
45 | #define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff)) | ||
46 | #define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf)) | ||
47 | #define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9)) | ||
48 | #define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8)) | ||
49 | |||
50 | static unsigned long reset_value[NUM_COUNTERS]; | ||
51 | 45 | ||
52 | #ifdef CONFIG_OPROFILE_IBS | 46 | #ifdef CONFIG_OPROFILE_IBS |
53 | 47 | ||
54 | /* IbsFetchCtl bits/masks */ | 48 | /* IbsFetchCtl bits/masks */ |
55 | #define IBS_FETCH_HIGH_VALID_BIT (1UL << 17) /* bit 49 */ | 49 | #define IBS_FETCH_RAND_EN (1ULL<<57) |
56 | #define IBS_FETCH_HIGH_ENABLE (1UL << 16) /* bit 48 */ | 50 | #define IBS_FETCH_VAL (1ULL<<49) |
57 | #define IBS_FETCH_LOW_MAX_CNT_MASK 0x0000FFFFUL /* MaxCnt mask */ | 51 | #define IBS_FETCH_ENABLE (1ULL<<48) |
52 | #define IBS_FETCH_CNT_MASK 0xFFFF0000ULL | ||
58 | 53 | ||
59 | /*IbsOpCtl bits */ | 54 | /*IbsOpCtl bits */ |
60 | #define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */ | 55 | #define IBS_OP_CNT_CTL (1ULL<<19) |
61 | #define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */ | 56 | #define IBS_OP_VAL (1ULL<<18) |
57 | #define IBS_OP_ENABLE (1ULL<<17) | ||
62 | 58 | ||
63 | #define IBS_FETCH_SIZE 6 | 59 | #define IBS_FETCH_SIZE 6 |
64 | #define IBS_OP_SIZE 12 | 60 | #define IBS_OP_SIZE 12 |
65 | 61 | ||
66 | static int has_ibs; /* AMD Family10h and later */ | 62 | static int has_ibs; /* AMD Family10h and later */ |
67 | 63 | ||
@@ -78,6 +74,45 @@ static struct op_ibs_config ibs_config; | |||
78 | 74 | ||
79 | #endif | 75 | #endif |
80 | 76 | ||
77 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
78 | |||
79 | static void op_mux_fill_in_addresses(struct op_msrs * const msrs) | ||
80 | { | ||
81 | int i; | ||
82 | |||
83 | for (i = 0; i < NUM_VIRT_COUNTERS; i++) { | ||
84 | int hw_counter = op_x86_virt_to_phys(i); | ||
85 | if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) | ||
86 | msrs->multiplex[i].addr = MSR_K7_PERFCTR0 + hw_counter; | ||
87 | else | ||
88 | msrs->multiplex[i].addr = 0; | ||
89 | } | ||
90 | } | ||
91 | |||
92 | static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, | ||
93 | struct op_msrs const * const msrs) | ||
94 | { | ||
95 | u64 val; | ||
96 | int i; | ||
97 | |||
98 | /* enable active counters */ | ||
99 | for (i = 0; i < NUM_COUNTERS; ++i) { | ||
100 | int virt = op_x86_phys_to_virt(i); | ||
101 | if (!counter_config[virt].enabled) | ||
102 | continue; | ||
103 | rdmsrl(msrs->controls[i].addr, val); | ||
104 | val &= model->reserved; | ||
105 | val |= op_x86_get_ctrl(model, &counter_config[virt]); | ||
106 | wrmsrl(msrs->controls[i].addr, val); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | #else | ||
111 | |||
112 | static inline void op_mux_fill_in_addresses(struct op_msrs * const msrs) { } | ||
113 | |||
114 | #endif | ||
115 | |||
81 | /* functions for op_amd_spec */ | 116 | /* functions for op_amd_spec */ |
82 | 117 | ||
83 | static void op_amd_fill_in_addresses(struct op_msrs * const msrs) | 118 | static void op_amd_fill_in_addresses(struct op_msrs * const msrs) |
@@ -97,150 +132,174 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs) | |||
97 | else | 132 | else |
98 | msrs->controls[i].addr = 0; | 133 | msrs->controls[i].addr = 0; |
99 | } | 134 | } |
100 | } | ||
101 | 135 | ||
136 | op_mux_fill_in_addresses(msrs); | ||
137 | } | ||
102 | 138 | ||
103 | static void op_amd_setup_ctrs(struct op_msrs const * const msrs) | 139 | static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, |
140 | struct op_msrs const * const msrs) | ||
104 | { | 141 | { |
105 | unsigned int low, high; | 142 | u64 val; |
106 | int i; | 143 | int i; |
107 | 144 | ||
145 | /* setup reset_value */ | ||
146 | for (i = 0; i < NUM_VIRT_COUNTERS; ++i) { | ||
147 | if (counter_config[i].enabled) | ||
148 | reset_value[i] = counter_config[i].count; | ||
149 | else | ||
150 | reset_value[i] = 0; | ||
151 | } | ||
152 | |||
108 | /* clear all counters */ | 153 | /* clear all counters */ |
109 | for (i = 0 ; i < NUM_CONTROLS; ++i) { | 154 | for (i = 0; i < NUM_CONTROLS; ++i) { |
110 | if (unlikely(!CTRL_IS_RESERVED(msrs, i))) | 155 | if (unlikely(!msrs->controls[i].addr)) |
111 | continue; | 156 | continue; |
112 | CTRL_READ(low, high, msrs, i); | 157 | rdmsrl(msrs->controls[i].addr, val); |
113 | CTRL_CLEAR_LO(low); | 158 | val &= model->reserved; |
114 | CTRL_CLEAR_HI(high); | 159 | wrmsrl(msrs->controls[i].addr, val); |
115 | CTRL_WRITE(low, high, msrs, i); | ||
116 | } | 160 | } |
117 | 161 | ||
118 | /* avoid a false detection of ctr overflows in NMI handler */ | 162 | /* avoid a false detection of ctr overflows in NMI handler */ |
119 | for (i = 0; i < NUM_COUNTERS; ++i) { | 163 | for (i = 0; i < NUM_COUNTERS; ++i) { |
120 | if (unlikely(!CTR_IS_RESERVED(msrs, i))) | 164 | if (unlikely(!msrs->counters[i].addr)) |
121 | continue; | 165 | continue; |
122 | CTR_WRITE(1, msrs, i); | 166 | wrmsrl(msrs->counters[i].addr, -1LL); |
123 | } | 167 | } |
124 | 168 | ||
125 | /* enable active counters */ | 169 | /* enable active counters */ |
126 | for (i = 0; i < NUM_COUNTERS; ++i) { | 170 | for (i = 0; i < NUM_COUNTERS; ++i) { |
127 | if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { | 171 | int virt = op_x86_phys_to_virt(i); |
128 | reset_value[i] = counter_config[i].count; | 172 | if (!counter_config[virt].enabled) |
173 | continue; | ||
174 | if (!msrs->counters[i].addr) | ||
175 | continue; | ||
129 | 176 | ||
130 | CTR_WRITE(counter_config[i].count, msrs, i); | 177 | /* setup counter registers */ |
131 | 178 | wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]); | |
132 | CTRL_READ(low, high, msrs, i); | 179 | |
133 | CTRL_CLEAR_LO(low); | 180 | /* setup control registers */ |
134 | CTRL_CLEAR_HI(high); | 181 | rdmsrl(msrs->controls[i].addr, val); |
135 | CTRL_SET_ENABLE(low); | 182 | val &= model->reserved; |
136 | CTRL_SET_USR(low, counter_config[i].user); | 183 | val |= op_x86_get_ctrl(model, &counter_config[virt]); |
137 | CTRL_SET_KERN(low, counter_config[i].kernel); | 184 | wrmsrl(msrs->controls[i].addr, val); |
138 | CTRL_SET_UM(low, counter_config[i].unit_mask); | ||
139 | CTRL_SET_EVENT_LOW(low, counter_config[i].event); | ||
140 | CTRL_SET_EVENT_HIGH(high, counter_config[i].event); | ||
141 | CTRL_SET_HOST_ONLY(high, 0); | ||
142 | CTRL_SET_GUEST_ONLY(high, 0); | ||
143 | |||
144 | CTRL_WRITE(low, high, msrs, i); | ||
145 | } else { | ||
146 | reset_value[i] = 0; | ||
147 | } | ||
148 | } | 185 | } |
149 | } | 186 | } |
150 | 187 | ||
151 | #ifdef CONFIG_OPROFILE_IBS | 188 | #ifdef CONFIG_OPROFILE_IBS |
152 | 189 | ||
153 | static inline int | 190 | static inline void |
154 | op_amd_handle_ibs(struct pt_regs * const regs, | 191 | op_amd_handle_ibs(struct pt_regs * const regs, |
155 | struct op_msrs const * const msrs) | 192 | struct op_msrs const * const msrs) |
156 | { | 193 | { |
157 | u32 low, high; | 194 | u64 val, ctl; |
158 | u64 msr; | ||
159 | struct op_entry entry; | 195 | struct op_entry entry; |
160 | 196 | ||
161 | if (!has_ibs) | 197 | if (!has_ibs) |
162 | return 1; | 198 | return; |
163 | 199 | ||
164 | if (ibs_config.fetch_enabled) { | 200 | if (ibs_config.fetch_enabled) { |
165 | rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); | 201 | rdmsrl(MSR_AMD64_IBSFETCHCTL, ctl); |
166 | if (high & IBS_FETCH_HIGH_VALID_BIT) { | 202 | if (ctl & IBS_FETCH_VAL) { |
167 | rdmsrl(MSR_AMD64_IBSFETCHLINAD, msr); | 203 | rdmsrl(MSR_AMD64_IBSFETCHLINAD, val); |
168 | oprofile_write_reserve(&entry, regs, msr, | 204 | oprofile_write_reserve(&entry, regs, val, |
169 | IBS_FETCH_CODE, IBS_FETCH_SIZE); | 205 | IBS_FETCH_CODE, IBS_FETCH_SIZE); |
170 | oprofile_add_data(&entry, (u32)msr); | 206 | oprofile_add_data64(&entry, val); |
171 | oprofile_add_data(&entry, (u32)(msr >> 32)); | 207 | oprofile_add_data64(&entry, ctl); |
172 | oprofile_add_data(&entry, low); | 208 | rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, val); |
173 | oprofile_add_data(&entry, high); | 209 | oprofile_add_data64(&entry, val); |
174 | rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, msr); | ||
175 | oprofile_add_data(&entry, (u32)msr); | ||
176 | oprofile_add_data(&entry, (u32)(msr >> 32)); | ||
177 | oprofile_write_commit(&entry); | 210 | oprofile_write_commit(&entry); |
178 | 211 | ||
179 | /* reenable the IRQ */ | 212 | /* reenable the IRQ */ |
180 | high &= ~IBS_FETCH_HIGH_VALID_BIT; | 213 | ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT_MASK); |
181 | high |= IBS_FETCH_HIGH_ENABLE; | 214 | ctl |= IBS_FETCH_ENABLE; |
182 | low &= IBS_FETCH_LOW_MAX_CNT_MASK; | 215 | wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl); |
183 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); | ||
184 | } | 216 | } |
185 | } | 217 | } |
186 | 218 | ||
187 | if (ibs_config.op_enabled) { | 219 | if (ibs_config.op_enabled) { |
188 | rdmsr(MSR_AMD64_IBSOPCTL, low, high); | 220 | rdmsrl(MSR_AMD64_IBSOPCTL, ctl); |
189 | if (low & IBS_OP_LOW_VALID_BIT) { | 221 | if (ctl & IBS_OP_VAL) { |
190 | rdmsrl(MSR_AMD64_IBSOPRIP, msr); | 222 | rdmsrl(MSR_AMD64_IBSOPRIP, val); |
191 | oprofile_write_reserve(&entry, regs, msr, | 223 | oprofile_write_reserve(&entry, regs, val, |
192 | IBS_OP_CODE, IBS_OP_SIZE); | 224 | IBS_OP_CODE, IBS_OP_SIZE); |
193 | oprofile_add_data(&entry, (u32)msr); | 225 | oprofile_add_data64(&entry, val); |
194 | oprofile_add_data(&entry, (u32)(msr >> 32)); | 226 | rdmsrl(MSR_AMD64_IBSOPDATA, val); |
195 | rdmsrl(MSR_AMD64_IBSOPDATA, msr); | 227 | oprofile_add_data64(&entry, val); |
196 | oprofile_add_data(&entry, (u32)msr); | 228 | rdmsrl(MSR_AMD64_IBSOPDATA2, val); |
197 | oprofile_add_data(&entry, (u32)(msr >> 32)); | 229 | oprofile_add_data64(&entry, val); |
198 | rdmsrl(MSR_AMD64_IBSOPDATA2, msr); | 230 | rdmsrl(MSR_AMD64_IBSOPDATA3, val); |
199 | oprofile_add_data(&entry, (u32)msr); | 231 | oprofile_add_data64(&entry, val); |
200 | oprofile_add_data(&entry, (u32)(msr >> 32)); | 232 | rdmsrl(MSR_AMD64_IBSDCLINAD, val); |
201 | rdmsrl(MSR_AMD64_IBSOPDATA3, msr); | 233 | oprofile_add_data64(&entry, val); |
202 | oprofile_add_data(&entry, (u32)msr); | 234 | rdmsrl(MSR_AMD64_IBSDCPHYSAD, val); |
203 | oprofile_add_data(&entry, (u32)(msr >> 32)); | 235 | oprofile_add_data64(&entry, val); |
204 | rdmsrl(MSR_AMD64_IBSDCLINAD, msr); | ||
205 | oprofile_add_data(&entry, (u32)msr); | ||
206 | oprofile_add_data(&entry, (u32)(msr >> 32)); | ||
207 | rdmsrl(MSR_AMD64_IBSDCPHYSAD, msr); | ||
208 | oprofile_add_data(&entry, (u32)msr); | ||
209 | oprofile_add_data(&entry, (u32)(msr >> 32)); | ||
210 | oprofile_write_commit(&entry); | 236 | oprofile_write_commit(&entry); |
211 | 237 | ||
212 | /* reenable the IRQ */ | 238 | /* reenable the IRQ */ |
213 | high = 0; | 239 | ctl &= ~IBS_OP_VAL & 0xFFFFFFFF; |
214 | low &= ~IBS_OP_LOW_VALID_BIT; | 240 | ctl |= IBS_OP_ENABLE; |
215 | low |= IBS_OP_LOW_ENABLE; | 241 | wrmsrl(MSR_AMD64_IBSOPCTL, ctl); |
216 | wrmsr(MSR_AMD64_IBSOPCTL, low, high); | ||
217 | } | 242 | } |
218 | } | 243 | } |
244 | } | ||
219 | 245 | ||
220 | return 1; | 246 | static inline void op_amd_start_ibs(void) |
247 | { | ||
248 | u64 val; | ||
249 | if (has_ibs && ibs_config.fetch_enabled) { | ||
250 | val = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; | ||
251 | val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0; | ||
252 | val |= IBS_FETCH_ENABLE; | ||
253 | wrmsrl(MSR_AMD64_IBSFETCHCTL, val); | ||
254 | } | ||
255 | |||
256 | if (has_ibs && ibs_config.op_enabled) { | ||
257 | val = (ibs_config.max_cnt_op >> 4) & 0xFFFF; | ||
258 | val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0; | ||
259 | val |= IBS_OP_ENABLE; | ||
260 | wrmsrl(MSR_AMD64_IBSOPCTL, val); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | static void op_amd_stop_ibs(void) | ||
265 | { | ||
266 | if (has_ibs && ibs_config.fetch_enabled) | ||
267 | /* clear max count and enable */ | ||
268 | wrmsrl(MSR_AMD64_IBSFETCHCTL, 0); | ||
269 | |||
270 | if (has_ibs && ibs_config.op_enabled) | ||
271 | /* clear max count and enable */ | ||
272 | wrmsrl(MSR_AMD64_IBSOPCTL, 0); | ||
221 | } | 273 | } |
222 | 274 | ||
275 | #else | ||
276 | |||
277 | static inline void op_amd_handle_ibs(struct pt_regs * const regs, | ||
278 | struct op_msrs const * const msrs) { } | ||
279 | static inline void op_amd_start_ibs(void) { } | ||
280 | static inline void op_amd_stop_ibs(void) { } | ||
281 | |||
223 | #endif | 282 | #endif |
224 | 283 | ||
225 | static int op_amd_check_ctrs(struct pt_regs * const regs, | 284 | static int op_amd_check_ctrs(struct pt_regs * const regs, |
226 | struct op_msrs const * const msrs) | 285 | struct op_msrs const * const msrs) |
227 | { | 286 | { |
228 | unsigned int low, high; | 287 | u64 val; |
229 | int i; | 288 | int i; |
230 | 289 | ||
231 | for (i = 0 ; i < NUM_COUNTERS; ++i) { | 290 | for (i = 0; i < NUM_COUNTERS; ++i) { |
232 | if (!reset_value[i]) | 291 | int virt = op_x86_phys_to_virt(i); |
292 | if (!reset_value[virt]) | ||
233 | continue; | 293 | continue; |
234 | CTR_READ(low, high, msrs, i); | 294 | rdmsrl(msrs->counters[i].addr, val); |
235 | if (CTR_OVERFLOWED(low)) { | 295 | /* bit is clear if overflowed: */ |
236 | oprofile_add_sample(regs, i); | 296 | if (val & OP_CTR_OVERFLOW) |
237 | CTR_WRITE(reset_value[i], msrs, i); | 297 | continue; |
238 | } | 298 | oprofile_add_sample(regs, virt); |
299 | wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]); | ||
239 | } | 300 | } |
240 | 301 | ||
241 | #ifdef CONFIG_OPROFILE_IBS | ||
242 | op_amd_handle_ibs(regs, msrs); | 302 | op_amd_handle_ibs(regs, msrs); |
243 | #endif | ||
244 | 303 | ||
245 | /* See op_model_ppro.c */ | 304 | /* See op_model_ppro.c */ |
246 | return 1; | 305 | return 1; |
@@ -248,79 +307,50 @@ static int op_amd_check_ctrs(struct pt_regs * const regs, | |||
248 | 307 | ||
249 | static void op_amd_start(struct op_msrs const * const msrs) | 308 | static void op_amd_start(struct op_msrs const * const msrs) |
250 | { | 309 | { |
251 | unsigned int low, high; | 310 | u64 val; |
252 | int i; | 311 | int i; |
253 | for (i = 0 ; i < NUM_COUNTERS ; ++i) { | ||
254 | if (reset_value[i]) { | ||
255 | CTRL_READ(low, high, msrs, i); | ||
256 | CTRL_SET_ACTIVE(low); | ||
257 | CTRL_WRITE(low, high, msrs, i); | ||
258 | } | ||
259 | } | ||
260 | 312 | ||
261 | #ifdef CONFIG_OPROFILE_IBS | 313 | for (i = 0; i < NUM_COUNTERS; ++i) { |
262 | if (has_ibs && ibs_config.fetch_enabled) { | 314 | if (!reset_value[op_x86_phys_to_virt(i)]) |
263 | low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; | 315 | continue; |
264 | high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */ | 316 | rdmsrl(msrs->controls[i].addr, val); |
265 | + IBS_FETCH_HIGH_ENABLE; | 317 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
266 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); | 318 | wrmsrl(msrs->controls[i].addr, val); |
267 | } | 319 | } |
268 | 320 | ||
269 | if (has_ibs && ibs_config.op_enabled) { | 321 | op_amd_start_ibs(); |
270 | low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) | ||
271 | + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */ | ||
272 | + IBS_OP_LOW_ENABLE; | ||
273 | high = 0; | ||
274 | wrmsr(MSR_AMD64_IBSOPCTL, low, high); | ||
275 | } | ||
276 | #endif | ||
277 | } | 322 | } |
278 | 323 | ||
279 | |||
280 | static void op_amd_stop(struct op_msrs const * const msrs) | 324 | static void op_amd_stop(struct op_msrs const * const msrs) |
281 | { | 325 | { |
282 | unsigned int low, high; | 326 | u64 val; |
283 | int i; | 327 | int i; |
284 | 328 | ||
285 | /* | 329 | /* |
286 | * Subtle: stop on all counters to avoid race with setting our | 330 | * Subtle: stop on all counters to avoid race with setting our |
287 | * pm callback | 331 | * pm callback |
288 | */ | 332 | */ |
289 | for (i = 0 ; i < NUM_COUNTERS ; ++i) { | 333 | for (i = 0; i < NUM_COUNTERS; ++i) { |
290 | if (!reset_value[i]) | 334 | if (!reset_value[op_x86_phys_to_virt(i)]) |
291 | continue; | 335 | continue; |
292 | CTRL_READ(low, high, msrs, i); | 336 | rdmsrl(msrs->controls[i].addr, val); |
293 | CTRL_SET_INACTIVE(low); | 337 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; |
294 | CTRL_WRITE(low, high, msrs, i); | 338 | wrmsrl(msrs->controls[i].addr, val); |
295 | } | ||
296 | |||
297 | #ifdef CONFIG_OPROFILE_IBS | ||
298 | if (has_ibs && ibs_config.fetch_enabled) { | ||
299 | /* clear max count and enable */ | ||
300 | low = 0; | ||
301 | high = 0; | ||
302 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); | ||
303 | } | 339 | } |
304 | 340 | ||
305 | if (has_ibs && ibs_config.op_enabled) { | 341 | op_amd_stop_ibs(); |
306 | /* clear max count and enable */ | ||
307 | low = 0; | ||
308 | high = 0; | ||
309 | wrmsr(MSR_AMD64_IBSOPCTL, low, high); | ||
310 | } | ||
311 | #endif | ||
312 | } | 342 | } |
313 | 343 | ||
314 | static void op_amd_shutdown(struct op_msrs const * const msrs) | 344 | static void op_amd_shutdown(struct op_msrs const * const msrs) |
315 | { | 345 | { |
316 | int i; | 346 | int i; |
317 | 347 | ||
318 | for (i = 0 ; i < NUM_COUNTERS ; ++i) { | 348 | for (i = 0; i < NUM_COUNTERS; ++i) { |
319 | if (CTR_IS_RESERVED(msrs, i)) | 349 | if (msrs->counters[i].addr) |
320 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); | 350 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); |
321 | } | 351 | } |
322 | for (i = 0 ; i < NUM_CONTROLS ; ++i) { | 352 | for (i = 0; i < NUM_CONTROLS; ++i) { |
323 | if (CTRL_IS_RESERVED(msrs, i)) | 353 | if (msrs->controls[i].addr) |
324 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | 354 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); |
325 | } | 355 | } |
326 | } | 356 | } |
@@ -490,15 +520,21 @@ static void op_amd_exit(void) {} | |||
490 | 520 | ||
491 | #endif /* CONFIG_OPROFILE_IBS */ | 521 | #endif /* CONFIG_OPROFILE_IBS */ |
492 | 522 | ||
493 | struct op_x86_model_spec const op_amd_spec = { | 523 | struct op_x86_model_spec op_amd_spec = { |
494 | .init = op_amd_init, | ||
495 | .exit = op_amd_exit, | ||
496 | .num_counters = NUM_COUNTERS, | 524 | .num_counters = NUM_COUNTERS, |
497 | .num_controls = NUM_CONTROLS, | 525 | .num_controls = NUM_CONTROLS, |
526 | .num_virt_counters = NUM_VIRT_COUNTERS, | ||
527 | .reserved = MSR_AMD_EVENTSEL_RESERVED, | ||
528 | .event_mask = OP_EVENT_MASK, | ||
529 | .init = op_amd_init, | ||
530 | .exit = op_amd_exit, | ||
498 | .fill_in_addresses = &op_amd_fill_in_addresses, | 531 | .fill_in_addresses = &op_amd_fill_in_addresses, |
499 | .setup_ctrs = &op_amd_setup_ctrs, | 532 | .setup_ctrs = &op_amd_setup_ctrs, |
500 | .check_ctrs = &op_amd_check_ctrs, | 533 | .check_ctrs = &op_amd_check_ctrs, |
501 | .start = &op_amd_start, | 534 | .start = &op_amd_start, |
502 | .stop = &op_amd_stop, | 535 | .stop = &op_amd_stop, |
503 | .shutdown = &op_amd_shutdown | 536 | .shutdown = &op_amd_shutdown, |
537 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
538 | .switch_ctrl = &op_mux_switch_ctrl, | ||
539 | #endif | ||
504 | }; | 540 | }; |
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 819b131fd752..ac6b354becdf 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c | |||
@@ -32,6 +32,8 @@ | |||
32 | #define NUM_CCCRS_HT2 9 | 32 | #define NUM_CCCRS_HT2 9 |
33 | #define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2) | 33 | #define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2) |
34 | 34 | ||
35 | #define OP_CTR_OVERFLOW (1ULL<<31) | ||
36 | |||
35 | static unsigned int num_counters = NUM_COUNTERS_NON_HT; | 37 | static unsigned int num_counters = NUM_COUNTERS_NON_HT; |
36 | static unsigned int num_controls = NUM_CONTROLS_NON_HT; | 38 | static unsigned int num_controls = NUM_CONTROLS_NON_HT; |
37 | 39 | ||
@@ -350,8 +352,6 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { | |||
350 | #define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1)) | 352 | #define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1)) |
351 | #define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25)) | 353 | #define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25)) |
352 | #define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9)) | 354 | #define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9)) |
353 | #define ESCR_READ(escr, high, ev, i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0) | ||
354 | #define ESCR_WRITE(escr, high, ev, i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0) | ||
355 | 355 | ||
356 | #define CCCR_RESERVED_BITS 0x38030FFF | 356 | #define CCCR_RESERVED_BITS 0x38030FFF |
357 | #define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS) | 357 | #define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS) |
@@ -361,17 +361,9 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { | |||
361 | #define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27)) | 361 | #define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27)) |
362 | #define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12)) | 362 | #define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12)) |
363 | #define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12)) | 363 | #define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12)) |
364 | #define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0) | ||
365 | #define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0) | ||
366 | #define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) | 364 | #define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) |
367 | #define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) | 365 | #define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) |
368 | 366 | ||
369 | #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) | ||
370 | #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) | ||
371 | #define CTR_READ(l, h, i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h)); } while (0) | ||
372 | #define CTR_WRITE(l, i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1); } while (0) | ||
373 | #define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000)) | ||
374 | |||
375 | 367 | ||
376 | /* this assigns a "stagger" to the current CPU, which is used throughout | 368 | /* this assigns a "stagger" to the current CPU, which is used throughout |
377 | the code in this module as an extra array offset, to select the "even" | 369 | the code in this module as an extra array offset, to select the "even" |
@@ -515,7 +507,7 @@ static void pmc_setup_one_p4_counter(unsigned int ctr) | |||
515 | if (ev->bindings[i].virt_counter & counter_bit) { | 507 | if (ev->bindings[i].virt_counter & counter_bit) { |
516 | 508 | ||
517 | /* modify ESCR */ | 509 | /* modify ESCR */ |
518 | ESCR_READ(escr, high, ev, i); | 510 | rdmsr(ev->bindings[i].escr_address, escr, high); |
519 | ESCR_CLEAR(escr); | 511 | ESCR_CLEAR(escr); |
520 | if (stag == 0) { | 512 | if (stag == 0) { |
521 | ESCR_SET_USR_0(escr, counter_config[ctr].user); | 513 | ESCR_SET_USR_0(escr, counter_config[ctr].user); |
@@ -526,10 +518,11 @@ static void pmc_setup_one_p4_counter(unsigned int ctr) | |||
526 | } | 518 | } |
527 | ESCR_SET_EVENT_SELECT(escr, ev->event_select); | 519 | ESCR_SET_EVENT_SELECT(escr, ev->event_select); |
528 | ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask); | 520 | ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask); |
529 | ESCR_WRITE(escr, high, ev, i); | 521 | wrmsr(ev->bindings[i].escr_address, escr, high); |
530 | 522 | ||
531 | /* modify CCCR */ | 523 | /* modify CCCR */ |
532 | CCCR_READ(cccr, high, VIRT_CTR(stag, ctr)); | 524 | rdmsr(p4_counters[VIRT_CTR(stag, ctr)].cccr_address, |
525 | cccr, high); | ||
533 | CCCR_CLEAR(cccr); | 526 | CCCR_CLEAR(cccr); |
534 | CCCR_SET_REQUIRED_BITS(cccr); | 527 | CCCR_SET_REQUIRED_BITS(cccr); |
535 | CCCR_SET_ESCR_SELECT(cccr, ev->escr_select); | 528 | CCCR_SET_ESCR_SELECT(cccr, ev->escr_select); |
@@ -537,7 +530,8 @@ static void pmc_setup_one_p4_counter(unsigned int ctr) | |||
537 | CCCR_SET_PMI_OVF_0(cccr); | 530 | CCCR_SET_PMI_OVF_0(cccr); |
538 | else | 531 | else |
539 | CCCR_SET_PMI_OVF_1(cccr); | 532 | CCCR_SET_PMI_OVF_1(cccr); |
540 | CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr)); | 533 | wrmsr(p4_counters[VIRT_CTR(stag, ctr)].cccr_address, |
534 | cccr, high); | ||
541 | return; | 535 | return; |
542 | } | 536 | } |
543 | } | 537 | } |
@@ -548,7 +542,8 @@ static void pmc_setup_one_p4_counter(unsigned int ctr) | |||
548 | } | 542 | } |
549 | 543 | ||
550 | 544 | ||
551 | static void p4_setup_ctrs(struct op_msrs const * const msrs) | 545 | static void p4_setup_ctrs(struct op_x86_model_spec const *model, |
546 | struct op_msrs const * const msrs) | ||
552 | { | 547 | { |
553 | unsigned int i; | 548 | unsigned int i; |
554 | unsigned int low, high; | 549 | unsigned int low, high; |
@@ -563,8 +558,8 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs) | |||
563 | } | 558 | } |
564 | 559 | ||
565 | /* clear the cccrs we will use */ | 560 | /* clear the cccrs we will use */ |
566 | for (i = 0 ; i < num_counters ; i++) { | 561 | for (i = 0; i < num_counters; i++) { |
567 | if (unlikely(!CTRL_IS_RESERVED(msrs, i))) | 562 | if (unlikely(!msrs->controls[i].addr)) |
568 | continue; | 563 | continue; |
569 | rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); | 564 | rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); |
570 | CCCR_CLEAR(low); | 565 | CCCR_CLEAR(low); |
@@ -574,17 +569,18 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs) | |||
574 | 569 | ||
575 | /* clear all escrs (including those outside our concern) */ | 570 | /* clear all escrs (including those outside our concern) */ |
576 | for (i = num_counters; i < num_controls; i++) { | 571 | for (i = num_counters; i < num_controls; i++) { |
577 | if (unlikely(!CTRL_IS_RESERVED(msrs, i))) | 572 | if (unlikely(!msrs->controls[i].addr)) |
578 | continue; | 573 | continue; |
579 | wrmsr(msrs->controls[i].addr, 0, 0); | 574 | wrmsr(msrs->controls[i].addr, 0, 0); |
580 | } | 575 | } |
581 | 576 | ||
582 | /* setup all counters */ | 577 | /* setup all counters */ |
583 | for (i = 0 ; i < num_counters ; ++i) { | 578 | for (i = 0; i < num_counters; ++i) { |
584 | if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs, i))) { | 579 | if (counter_config[i].enabled && msrs->controls[i].addr) { |
585 | reset_value[i] = counter_config[i].count; | 580 | reset_value[i] = counter_config[i].count; |
586 | pmc_setup_one_p4_counter(i); | 581 | pmc_setup_one_p4_counter(i); |
587 | CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i)); | 582 | wrmsrl(p4_counters[VIRT_CTR(stag, i)].counter_address, |
583 | -(u64)counter_config[i].count); | ||
588 | } else { | 584 | } else { |
589 | reset_value[i] = 0; | 585 | reset_value[i] = 0; |
590 | } | 586 | } |
@@ -624,14 +620,16 @@ static int p4_check_ctrs(struct pt_regs * const regs, | |||
624 | 620 | ||
625 | real = VIRT_CTR(stag, i); | 621 | real = VIRT_CTR(stag, i); |
626 | 622 | ||
627 | CCCR_READ(low, high, real); | 623 | rdmsr(p4_counters[real].cccr_address, low, high); |
628 | CTR_READ(ctr, high, real); | 624 | rdmsr(p4_counters[real].counter_address, ctr, high); |
629 | if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) { | 625 | if (CCCR_OVF_P(low) || !(ctr & OP_CTR_OVERFLOW)) { |
630 | oprofile_add_sample(regs, i); | 626 | oprofile_add_sample(regs, i); |
631 | CTR_WRITE(reset_value[i], real); | 627 | wrmsrl(p4_counters[real].counter_address, |
628 | -(u64)reset_value[i]); | ||
632 | CCCR_CLEAR_OVF(low); | 629 | CCCR_CLEAR_OVF(low); |
633 | CCCR_WRITE(low, high, real); | 630 | wrmsr(p4_counters[real].cccr_address, low, high); |
634 | CTR_WRITE(reset_value[i], real); | 631 | wrmsrl(p4_counters[real].counter_address, |
632 | -(u64)reset_value[i]); | ||
635 | } | 633 | } |
636 | } | 634 | } |
637 | 635 | ||
@@ -653,9 +651,9 @@ static void p4_start(struct op_msrs const * const msrs) | |||
653 | for (i = 0; i < num_counters; ++i) { | 651 | for (i = 0; i < num_counters; ++i) { |
654 | if (!reset_value[i]) | 652 | if (!reset_value[i]) |
655 | continue; | 653 | continue; |
656 | CCCR_READ(low, high, VIRT_CTR(stag, i)); | 654 | rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); |
657 | CCCR_SET_ENABLE(low); | 655 | CCCR_SET_ENABLE(low); |
658 | CCCR_WRITE(low, high, VIRT_CTR(stag, i)); | 656 | wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); |
659 | } | 657 | } |
660 | } | 658 | } |
661 | 659 | ||
@@ -670,9 +668,9 @@ static void p4_stop(struct op_msrs const * const msrs) | |||
670 | for (i = 0; i < num_counters; ++i) { | 668 | for (i = 0; i < num_counters; ++i) { |
671 | if (!reset_value[i]) | 669 | if (!reset_value[i]) |
672 | continue; | 670 | continue; |
673 | CCCR_READ(low, high, VIRT_CTR(stag, i)); | 671 | rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); |
674 | CCCR_SET_DISABLE(low); | 672 | CCCR_SET_DISABLE(low); |
675 | CCCR_WRITE(low, high, VIRT_CTR(stag, i)); | 673 | wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); |
676 | } | 674 | } |
677 | } | 675 | } |
678 | 676 | ||
@@ -680,8 +678,8 @@ static void p4_shutdown(struct op_msrs const * const msrs) | |||
680 | { | 678 | { |
681 | int i; | 679 | int i; |
682 | 680 | ||
683 | for (i = 0 ; i < num_counters ; ++i) { | 681 | for (i = 0; i < num_counters; ++i) { |
684 | if (CTR_IS_RESERVED(msrs, i)) | 682 | if (msrs->counters[i].addr) |
685 | release_perfctr_nmi(msrs->counters[i].addr); | 683 | release_perfctr_nmi(msrs->counters[i].addr); |
686 | } | 684 | } |
687 | /* | 685 | /* |
@@ -689,15 +687,15 @@ static void p4_shutdown(struct op_msrs const * const msrs) | |||
689 | * conjunction with the counter registers (hence the starting offset). | 687 | * conjunction with the counter registers (hence the starting offset). |
690 | * This saves a few bits. | 688 | * This saves a few bits. |
691 | */ | 689 | */ |
692 | for (i = num_counters ; i < num_controls ; ++i) { | 690 | for (i = num_counters; i < num_controls; ++i) { |
693 | if (CTRL_IS_RESERVED(msrs, i)) | 691 | if (msrs->controls[i].addr) |
694 | release_evntsel_nmi(msrs->controls[i].addr); | 692 | release_evntsel_nmi(msrs->controls[i].addr); |
695 | } | 693 | } |
696 | } | 694 | } |
697 | 695 | ||
698 | 696 | ||
699 | #ifdef CONFIG_SMP | 697 | #ifdef CONFIG_SMP |
700 | struct op_x86_model_spec const op_p4_ht2_spec = { | 698 | struct op_x86_model_spec op_p4_ht2_spec = { |
701 | .num_counters = NUM_COUNTERS_HT2, | 699 | .num_counters = NUM_COUNTERS_HT2, |
702 | .num_controls = NUM_CONTROLS_HT2, | 700 | .num_controls = NUM_CONTROLS_HT2, |
703 | .fill_in_addresses = &p4_fill_in_addresses, | 701 | .fill_in_addresses = &p4_fill_in_addresses, |
@@ -709,7 +707,7 @@ struct op_x86_model_spec const op_p4_ht2_spec = { | |||
709 | }; | 707 | }; |
710 | #endif | 708 | #endif |
711 | 709 | ||
712 | struct op_x86_model_spec const op_p4_spec = { | 710 | struct op_x86_model_spec op_p4_spec = { |
713 | .num_counters = NUM_COUNTERS_NON_HT, | 711 | .num_counters = NUM_COUNTERS_NON_HT, |
714 | .num_controls = NUM_CONTROLS_NON_HT, | 712 | .num_controls = NUM_CONTROLS_NON_HT, |
715 | .fill_in_addresses = &p4_fill_in_addresses, | 713 | .fill_in_addresses = &p4_fill_in_addresses, |
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 4da7230b3d17..4899215999de 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * @author Philippe Elie | 10 | * @author Philippe Elie |
11 | * @author Graydon Hoare | 11 | * @author Graydon Hoare |
12 | * @author Andi Kleen | 12 | * @author Andi Kleen |
13 | * @author Robert Richter <robert.richter@amd.com> | ||
13 | */ | 14 | */ |
14 | 15 | ||
15 | #include <linux/oprofile.h> | 16 | #include <linux/oprofile.h> |
@@ -18,7 +19,6 @@ | |||
18 | #include <asm/msr.h> | 19 | #include <asm/msr.h> |
19 | #include <asm/apic.h> | 20 | #include <asm/apic.h> |
20 | #include <asm/nmi.h> | 21 | #include <asm/nmi.h> |
21 | #include <asm/perf_counter.h> | ||
22 | 22 | ||
23 | #include "op_x86_model.h" | 23 | #include "op_x86_model.h" |
24 | #include "op_counter.h" | 24 | #include "op_counter.h" |
@@ -26,20 +26,7 @@ | |||
26 | static int num_counters = 2; | 26 | static int num_counters = 2; |
27 | static int counter_width = 32; | 27 | static int counter_width = 32; |
28 | 28 | ||
29 | #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) | 29 | #define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21)) |
30 | #define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1)))) | ||
31 | |||
32 | #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) | ||
33 | #define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) | ||
34 | #define CTRL_WRITE(l, h, msrs, c) do {wrmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) | ||
35 | #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) | ||
36 | #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) | ||
37 | #define CTRL_CLEAR(x) (x &= (1<<21)) | ||
38 | #define CTRL_SET_ENABLE(val) (val |= 1<<20) | ||
39 | #define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16)) | ||
40 | #define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17)) | ||
41 | #define CTRL_SET_UM(val, m) (val |= (m << 8)) | ||
42 | #define CTRL_SET_EVENT(val, e) (val |= e) | ||
43 | 30 | ||
44 | static u64 *reset_value; | 31 | static u64 *reset_value; |
45 | 32 | ||
@@ -63,9 +50,10 @@ static void ppro_fill_in_addresses(struct op_msrs * const msrs) | |||
63 | } | 50 | } |
64 | 51 | ||
65 | 52 | ||
66 | static void ppro_setup_ctrs(struct op_msrs const * const msrs) | 53 | static void ppro_setup_ctrs(struct op_x86_model_spec const *model, |
54 | struct op_msrs const * const msrs) | ||
67 | { | 55 | { |
68 | unsigned int low, high; | 56 | u64 val; |
69 | int i; | 57 | int i; |
70 | 58 | ||
71 | if (!reset_value) { | 59 | if (!reset_value) { |
@@ -93,36 +81,30 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) | |||
93 | } | 81 | } |
94 | 82 | ||
95 | /* clear all counters */ | 83 | /* clear all counters */ |
96 | for (i = 0 ; i < num_counters; ++i) { | 84 | for (i = 0; i < num_counters; ++i) { |
97 | if (unlikely(!CTRL_IS_RESERVED(msrs, i))) | 85 | if (unlikely(!msrs->controls[i].addr)) |
98 | continue; | 86 | continue; |
99 | CTRL_READ(low, high, msrs, i); | 87 | rdmsrl(msrs->controls[i].addr, val); |
100 | CTRL_CLEAR(low); | 88 | val &= model->reserved; |
101 | CTRL_WRITE(low, high, msrs, i); | 89 | wrmsrl(msrs->controls[i].addr, val); |
102 | } | 90 | } |
103 | 91 | ||
104 | /* avoid a false detection of ctr overflows in NMI handler */ | 92 | /* avoid a false detection of ctr overflows in NMI handler */ |
105 | for (i = 0; i < num_counters; ++i) { | 93 | for (i = 0; i < num_counters; ++i) { |
106 | if (unlikely(!CTR_IS_RESERVED(msrs, i))) | 94 | if (unlikely(!msrs->counters[i].addr)) |
107 | continue; | 95 | continue; |
108 | wrmsrl(msrs->counters[i].addr, -1LL); | 96 | wrmsrl(msrs->counters[i].addr, -1LL); |
109 | } | 97 | } |
110 | 98 | ||
111 | /* enable active counters */ | 99 | /* enable active counters */ |
112 | for (i = 0; i < num_counters; ++i) { | 100 | for (i = 0; i < num_counters; ++i) { |
113 | if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { | 101 | if (counter_config[i].enabled && msrs->counters[i].addr) { |
114 | reset_value[i] = counter_config[i].count; | 102 | reset_value[i] = counter_config[i].count; |
115 | |||
116 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); | 103 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); |
117 | 104 | rdmsrl(msrs->controls[i].addr, val); | |
118 | CTRL_READ(low, high, msrs, i); | 105 | val &= model->reserved; |
119 | CTRL_CLEAR(low); | 106 | val |= op_x86_get_ctrl(model, &counter_config[i]); |
120 | CTRL_SET_ENABLE(low); | 107 | wrmsrl(msrs->controls[i].addr, val); |
121 | CTRL_SET_USR(low, counter_config[i].user); | ||
122 | CTRL_SET_KERN(low, counter_config[i].kernel); | ||
123 | CTRL_SET_UM(low, counter_config[i].unit_mask); | ||
124 | CTRL_SET_EVENT(low, counter_config[i].event); | ||
125 | CTRL_WRITE(low, high, msrs, i); | ||
126 | } else { | 108 | } else { |
127 | reset_value[i] = 0; | 109 | reset_value[i] = 0; |
128 | } | 110 | } |
@@ -143,14 +125,14 @@ static int ppro_check_ctrs(struct pt_regs * const regs, | |||
143 | if (unlikely(!reset_value)) | 125 | if (unlikely(!reset_value)) |
144 | goto out; | 126 | goto out; |
145 | 127 | ||
146 | for (i = 0 ; i < num_counters; ++i) { | 128 | for (i = 0; i < num_counters; ++i) { |
147 | if (!reset_value[i]) | 129 | if (!reset_value[i]) |
148 | continue; | 130 | continue; |
149 | rdmsrl(msrs->counters[i].addr, val); | 131 | rdmsrl(msrs->counters[i].addr, val); |
150 | if (CTR_OVERFLOWED(val)) { | 132 | if (val & (1ULL << (counter_width - 1))) |
151 | oprofile_add_sample(regs, i); | 133 | continue; |
152 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); | 134 | oprofile_add_sample(regs, i); |
153 | } | 135 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); |
154 | } | 136 | } |
155 | 137 | ||
156 | out: | 138 | out: |
@@ -171,16 +153,16 @@ out: | |||
171 | 153 | ||
172 | static void ppro_start(struct op_msrs const * const msrs) | 154 | static void ppro_start(struct op_msrs const * const msrs) |
173 | { | 155 | { |
174 | unsigned int low, high; | 156 | u64 val; |
175 | int i; | 157 | int i; |
176 | 158 | ||
177 | if (!reset_value) | 159 | if (!reset_value) |
178 | return; | 160 | return; |
179 | for (i = 0; i < num_counters; ++i) { | 161 | for (i = 0; i < num_counters; ++i) { |
180 | if (reset_value[i]) { | 162 | if (reset_value[i]) { |
181 | CTRL_READ(low, high, msrs, i); | 163 | rdmsrl(msrs->controls[i].addr, val); |
182 | CTRL_SET_ACTIVE(low); | 164 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
183 | CTRL_WRITE(low, high, msrs, i); | 165 | wrmsrl(msrs->controls[i].addr, val); |
184 | } | 166 | } |
185 | } | 167 | } |
186 | } | 168 | } |
@@ -188,7 +170,7 @@ static void ppro_start(struct op_msrs const * const msrs) | |||
188 | 170 | ||
189 | static void ppro_stop(struct op_msrs const * const msrs) | 171 | static void ppro_stop(struct op_msrs const * const msrs) |
190 | { | 172 | { |
191 | unsigned int low, high; | 173 | u64 val; |
192 | int i; | 174 | int i; |
193 | 175 | ||
194 | if (!reset_value) | 176 | if (!reset_value) |
@@ -196,9 +178,9 @@ static void ppro_stop(struct op_msrs const * const msrs) | |||
196 | for (i = 0; i < num_counters; ++i) { | 178 | for (i = 0; i < num_counters; ++i) { |
197 | if (!reset_value[i]) | 179 | if (!reset_value[i]) |
198 | continue; | 180 | continue; |
199 | CTRL_READ(low, high, msrs, i); | 181 | rdmsrl(msrs->controls[i].addr, val); |
200 | CTRL_SET_INACTIVE(low); | 182 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; |
201 | CTRL_WRITE(low, high, msrs, i); | 183 | wrmsrl(msrs->controls[i].addr, val); |
202 | } | 184 | } |
203 | } | 185 | } |
204 | 186 | ||
@@ -206,12 +188,12 @@ static void ppro_shutdown(struct op_msrs const * const msrs) | |||
206 | { | 188 | { |
207 | int i; | 189 | int i; |
208 | 190 | ||
209 | for (i = 0 ; i < num_counters ; ++i) { | 191 | for (i = 0; i < num_counters; ++i) { |
210 | if (CTR_IS_RESERVED(msrs, i)) | 192 | if (msrs->counters[i].addr) |
211 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); | 193 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); |
212 | } | 194 | } |
213 | for (i = 0 ; i < num_counters ; ++i) { | 195 | for (i = 0; i < num_counters; ++i) { |
214 | if (CTRL_IS_RESERVED(msrs, i)) | 196 | if (msrs->controls[i].addr) |
215 | release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); | 197 | release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); |
216 | } | 198 | } |
217 | if (reset_value) { | 199 | if (reset_value) { |
@@ -222,8 +204,9 @@ static void ppro_shutdown(struct op_msrs const * const msrs) | |||
222 | 204 | ||
223 | 205 | ||
224 | struct op_x86_model_spec op_ppro_spec = { | 206 | struct op_x86_model_spec op_ppro_spec = { |
225 | .num_counters = 2, /* can be overriden */ | 207 | .num_counters = 2, |
226 | .num_controls = 2, /* dito */ | 208 | .num_controls = 2, |
209 | .reserved = MSR_PPRO_EVENTSEL_RESERVED, | ||
227 | .fill_in_addresses = &ppro_fill_in_addresses, | 210 | .fill_in_addresses = &ppro_fill_in_addresses, |
228 | .setup_ctrs = &ppro_setup_ctrs, | 211 | .setup_ctrs = &ppro_setup_ctrs, |
229 | .check_ctrs = &ppro_check_ctrs, | 212 | .check_ctrs = &ppro_check_ctrs, |
@@ -241,7 +224,7 @@ struct op_x86_model_spec op_ppro_spec = { | |||
241 | * the specific CPU. | 224 | * the specific CPU. |
242 | */ | 225 | */ |
243 | 226 | ||
244 | void arch_perfmon_setup_counters(void) | 227 | static void arch_perfmon_setup_counters(void) |
245 | { | 228 | { |
246 | union cpuid10_eax eax; | 229 | union cpuid10_eax eax; |
247 | 230 | ||
@@ -259,11 +242,17 @@ void arch_perfmon_setup_counters(void) | |||
259 | 242 | ||
260 | op_arch_perfmon_spec.num_counters = num_counters; | 243 | op_arch_perfmon_spec.num_counters = num_counters; |
261 | op_arch_perfmon_spec.num_controls = num_counters; | 244 | op_arch_perfmon_spec.num_controls = num_counters; |
262 | op_ppro_spec.num_counters = num_counters; | 245 | } |
263 | op_ppro_spec.num_controls = num_counters; | 246 | |
247 | static int arch_perfmon_init(struct oprofile_operations *ignore) | ||
248 | { | ||
249 | arch_perfmon_setup_counters(); | ||
250 | return 0; | ||
264 | } | 251 | } |
265 | 252 | ||
266 | struct op_x86_model_spec op_arch_perfmon_spec = { | 253 | struct op_x86_model_spec op_arch_perfmon_spec = { |
254 | .reserved = MSR_PPRO_EVENTSEL_RESERVED, | ||
255 | .init = &arch_perfmon_init, | ||
267 | /* num_counters/num_controls filled in at runtime */ | 256 | /* num_counters/num_controls filled in at runtime */ |
268 | .fill_in_addresses = &ppro_fill_in_addresses, | 257 | .fill_in_addresses = &ppro_fill_in_addresses, |
269 | /* user space does the cpuid check for available events */ | 258 | /* user space does the cpuid check for available events */ |
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index 825e79064d64..b83776180c7f 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h | |||
@@ -6,51 +6,66 @@ | |||
6 | * @remark Read the file COPYING | 6 | * @remark Read the file COPYING |
7 | * | 7 | * |
8 | * @author Graydon Hoare | 8 | * @author Graydon Hoare |
9 | * @author Robert Richter <robert.richter@amd.com> | ||
9 | */ | 10 | */ |
10 | 11 | ||
11 | #ifndef OP_X86_MODEL_H | 12 | #ifndef OP_X86_MODEL_H |
12 | #define OP_X86_MODEL_H | 13 | #define OP_X86_MODEL_H |
13 | 14 | ||
14 | struct op_saved_msr { | 15 | #include <asm/types.h> |
15 | unsigned int high; | 16 | #include <asm/perf_counter.h> |
16 | unsigned int low; | ||
17 | }; | ||
18 | 17 | ||
19 | struct op_msr { | 18 | struct op_msr { |
20 | unsigned long addr; | 19 | unsigned long addr; |
21 | struct op_saved_msr saved; | 20 | u64 saved; |
22 | }; | 21 | }; |
23 | 22 | ||
24 | struct op_msrs { | 23 | struct op_msrs { |
25 | struct op_msr *counters; | 24 | struct op_msr *counters; |
26 | struct op_msr *controls; | 25 | struct op_msr *controls; |
26 | struct op_msr *multiplex; | ||
27 | }; | 27 | }; |
28 | 28 | ||
29 | struct pt_regs; | 29 | struct pt_regs; |
30 | 30 | ||
31 | struct oprofile_operations; | ||
32 | |||
31 | /* The model vtable abstracts the differences between | 33 | /* The model vtable abstracts the differences between |
32 | * various x86 CPU models' perfctr support. | 34 | * various x86 CPU models' perfctr support. |
33 | */ | 35 | */ |
34 | struct op_x86_model_spec { | 36 | struct op_x86_model_spec { |
35 | int (*init)(struct oprofile_operations *ops); | 37 | unsigned int num_counters; |
36 | void (*exit)(void); | 38 | unsigned int num_controls; |
37 | unsigned int num_counters; | 39 | unsigned int num_virt_counters; |
38 | unsigned int num_controls; | 40 | u64 reserved; |
39 | void (*fill_in_addresses)(struct op_msrs * const msrs); | 41 | u16 event_mask; |
40 | void (*setup_ctrs)(struct op_msrs const * const msrs); | 42 | int (*init)(struct oprofile_operations *ops); |
41 | int (*check_ctrs)(struct pt_regs * const regs, | 43 | void (*exit)(void); |
42 | struct op_msrs const * const msrs); | 44 | void (*fill_in_addresses)(struct op_msrs * const msrs); |
43 | void (*start)(struct op_msrs const * const msrs); | 45 | void (*setup_ctrs)(struct op_x86_model_spec const *model, |
44 | void (*stop)(struct op_msrs const * const msrs); | 46 | struct op_msrs const * const msrs); |
45 | void (*shutdown)(struct op_msrs const * const msrs); | 47 | int (*check_ctrs)(struct pt_regs * const regs, |
48 | struct op_msrs const * const msrs); | ||
49 | void (*start)(struct op_msrs const * const msrs); | ||
50 | void (*stop)(struct op_msrs const * const msrs); | ||
51 | void (*shutdown)(struct op_msrs const * const msrs); | ||
52 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
53 | void (*switch_ctrl)(struct op_x86_model_spec const *model, | ||
54 | struct op_msrs const * const msrs); | ||
55 | #endif | ||
46 | }; | 56 | }; |
47 | 57 | ||
58 | struct op_counter_config; | ||
59 | |||
60 | extern u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, | ||
61 | struct op_counter_config *counter_config); | ||
62 | extern int op_x86_phys_to_virt(int phys); | ||
63 | extern int op_x86_virt_to_phys(int virt); | ||
64 | |||
48 | extern struct op_x86_model_spec op_ppro_spec; | 65 | extern struct op_x86_model_spec op_ppro_spec; |
49 | extern struct op_x86_model_spec const op_p4_spec; | 66 | extern struct op_x86_model_spec op_p4_spec; |
50 | extern struct op_x86_model_spec const op_p4_ht2_spec; | 67 | extern struct op_x86_model_spec op_p4_ht2_spec; |
51 | extern struct op_x86_model_spec const op_amd_spec; | 68 | extern struct op_x86_model_spec op_amd_spec; |
52 | extern struct op_x86_model_spec op_arch_perfmon_spec; | 69 | extern struct op_x86_model_spec op_arch_perfmon_spec; |
53 | 70 | ||
54 | extern void arch_perfmon_setup_counters(void); | ||
55 | |||
56 | #endif /* OP_X86_MODEL_H */ | 71 | #endif /* OP_X86_MODEL_H */ |
diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c index bd13c3e4c6db..347d882b3bb3 100644 --- a/arch/x86/pci/direct.c +++ b/arch/x86/pci/direct.c | |||
@@ -192,13 +192,14 @@ struct pci_raw_ops pci_direct_conf2 = { | |||
192 | static int __init pci_sanity_check(struct pci_raw_ops *o) | 192 | static int __init pci_sanity_check(struct pci_raw_ops *o) |
193 | { | 193 | { |
194 | u32 x = 0; | 194 | u32 x = 0; |
195 | int devfn; | 195 | int year, devfn; |
196 | 196 | ||
197 | if (pci_probe & PCI_NO_CHECKS) | 197 | if (pci_probe & PCI_NO_CHECKS) |
198 | return 1; | 198 | return 1; |
199 | /* Assume Type 1 works for newer systems. | 199 | /* Assume Type 1 works for newer systems. |
200 | This handles machines that don't have anything on PCI Bus 0. */ | 200 | This handles machines that don't have anything on PCI Bus 0. */ |
201 | if (dmi_get_year(DMI_BIOS_DATE) >= 2001) | 201 | dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL); |
202 | if (year >= 2001) | ||
202 | return 1; | 203 | return 1; |
203 | 204 | ||
204 | for (devfn = 0; devfn < 0x100; devfn++) { | 205 | for (devfn = 0; devfn < 0x100; devfn++) { |
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 7410640db173..3bb4fc21f4f2 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile | |||
@@ -8,6 +8,7 @@ endif | |||
8 | # Make sure early boot has no stackprotector | 8 | # Make sure early boot has no stackprotector |
9 | nostackp := $(call cc-option, -fno-stack-protector) | 9 | nostackp := $(call cc-option, -fno-stack-protector) |
10 | CFLAGS_enlighten.o := $(nostackp) | 10 | CFLAGS_enlighten.o := $(nostackp) |
11 | CFLAGS_mmu.o := $(nostackp) | ||
11 | 12 | ||
12 | obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ | 13 | obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ |
13 | time.o xen-asm.o xen-asm_$(BITS).o \ | 14 | time.o xen-asm.o xen-asm_$(BITS).o \ |
@@ -16,3 +17,4 @@ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ | |||
16 | obj-$(CONFIG_SMP) += smp.o | 17 | obj-$(CONFIG_SMP) += smp.o |
17 | obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o | 18 | obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o |
18 | obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o | 19 | obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o |
20 | |||
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index eb33aaa8415d..0dd0c2c6cae0 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <asm/pgtable.h> | 51 | #include <asm/pgtable.h> |
52 | #include <asm/tlbflush.h> | 52 | #include <asm/tlbflush.h> |
53 | #include <asm/reboot.h> | 53 | #include <asm/reboot.h> |
54 | #include <asm/stackprotector.h> | ||
54 | 55 | ||
55 | #include "xen-ops.h" | 56 | #include "xen-ops.h" |
56 | #include "mmu.h" | 57 | #include "mmu.h" |
@@ -330,18 +331,28 @@ static void xen_load_gdt(const struct desc_ptr *dtr) | |||
330 | unsigned long frames[pages]; | 331 | unsigned long frames[pages]; |
331 | int f; | 332 | int f; |
332 | 333 | ||
333 | /* A GDT can be up to 64k in size, which corresponds to 8192 | 334 | /* |
334 | 8-byte entries, or 16 4k pages.. */ | 335 | * A GDT can be up to 64k in size, which corresponds to 8192 |
336 | * 8-byte entries, or 16 4k pages.. | ||
337 | */ | ||
335 | 338 | ||
336 | BUG_ON(size > 65536); | 339 | BUG_ON(size > 65536); |
337 | BUG_ON(va & ~PAGE_MASK); | 340 | BUG_ON(va & ~PAGE_MASK); |
338 | 341 | ||
339 | for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { | 342 | for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { |
340 | int level; | 343 | int level; |
341 | pte_t *ptep = lookup_address(va, &level); | 344 | pte_t *ptep; |
342 | unsigned long pfn, mfn; | 345 | unsigned long pfn, mfn; |
343 | void *virt; | 346 | void *virt; |
344 | 347 | ||
348 | /* | ||
349 | * The GDT is per-cpu and is in the percpu data area. | ||
350 | * That can be virtually mapped, so we need to do a | ||
351 | * page-walk to get the underlying MFN for the | ||
352 | * hypercall. The page can also be in the kernel's | ||
353 | * linear range, so we need to RO that mapping too. | ||
354 | */ | ||
355 | ptep = lookup_address(va, &level); | ||
345 | BUG_ON(ptep == NULL); | 356 | BUG_ON(ptep == NULL); |
346 | 357 | ||
347 | pfn = pte_pfn(*ptep); | 358 | pfn = pte_pfn(*ptep); |
@@ -358,6 +369,44 @@ static void xen_load_gdt(const struct desc_ptr *dtr) | |||
358 | BUG(); | 369 | BUG(); |
359 | } | 370 | } |
360 | 371 | ||
372 | /* | ||
373 | * load_gdt for early boot, when the gdt is only mapped once | ||
374 | */ | ||
375 | static __init void xen_load_gdt_boot(const struct desc_ptr *dtr) | ||
376 | { | ||
377 | unsigned long va = dtr->address; | ||
378 | unsigned int size = dtr->size + 1; | ||
379 | unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; | ||
380 | unsigned long frames[pages]; | ||
381 | int f; | ||
382 | |||
383 | /* | ||
384 | * A GDT can be up to 64k in size, which corresponds to 8192 | ||
385 | * 8-byte entries, or 16 4k pages.. | ||
386 | */ | ||
387 | |||
388 | BUG_ON(size > 65536); | ||
389 | BUG_ON(va & ~PAGE_MASK); | ||
390 | |||
391 | for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { | ||
392 | pte_t pte; | ||
393 | unsigned long pfn, mfn; | ||
394 | |||
395 | pfn = virt_to_pfn(va); | ||
396 | mfn = pfn_to_mfn(pfn); | ||
397 | |||
398 | pte = pfn_pte(pfn, PAGE_KERNEL_RO); | ||
399 | |||
400 | if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0)) | ||
401 | BUG(); | ||
402 | |||
403 | frames[f] = mfn; | ||
404 | } | ||
405 | |||
406 | if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct))) | ||
407 | BUG(); | ||
408 | } | ||
409 | |||
361 | static void load_TLS_descriptor(struct thread_struct *t, | 410 | static void load_TLS_descriptor(struct thread_struct *t, |
362 | unsigned int cpu, unsigned int i) | 411 | unsigned int cpu, unsigned int i) |
363 | { | 412 | { |
@@ -581,6 +630,29 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry, | |||
581 | preempt_enable(); | 630 | preempt_enable(); |
582 | } | 631 | } |
583 | 632 | ||
633 | /* | ||
634 | * Version of write_gdt_entry for use at early boot-time needed to | ||
635 | * update an entry as simply as possible. | ||
636 | */ | ||
637 | static __init void xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, | ||
638 | const void *desc, int type) | ||
639 | { | ||
640 | switch (type) { | ||
641 | case DESC_LDT: | ||
642 | case DESC_TSS: | ||
643 | /* ignore */ | ||
644 | break; | ||
645 | |||
646 | default: { | ||
647 | xmaddr_t maddr = virt_to_machine(&dt[entry]); | ||
648 | |||
649 | if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc)) | ||
650 | dt[entry] = *(struct desc_struct *)desc; | ||
651 | } | ||
652 | |||
653 | } | ||
654 | } | ||
655 | |||
584 | static void xen_load_sp0(struct tss_struct *tss, | 656 | static void xen_load_sp0(struct tss_struct *tss, |
585 | struct thread_struct *thread) | 657 | struct thread_struct *thread) |
586 | { | 658 | { |
@@ -714,7 +786,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) | |||
714 | set: | 786 | set: |
715 | base = ((u64)high << 32) | low; | 787 | base = ((u64)high << 32) | low; |
716 | if (HYPERVISOR_set_segment_base(which, base) != 0) | 788 | if (HYPERVISOR_set_segment_base(which, base) != 0) |
717 | ret = -EFAULT; | 789 | ret = -EIO; |
718 | break; | 790 | break; |
719 | #endif | 791 | #endif |
720 | 792 | ||
@@ -965,6 +1037,23 @@ static const struct machine_ops __initdata xen_machine_ops = { | |||
965 | .emergency_restart = xen_emergency_restart, | 1037 | .emergency_restart = xen_emergency_restart, |
966 | }; | 1038 | }; |
967 | 1039 | ||
1040 | /* | ||
1041 | * Set up the GDT and segment registers for -fstack-protector. Until | ||
1042 | * we do this, we have to be careful not to call any stack-protected | ||
1043 | * function, which is most of the kernel. | ||
1044 | */ | ||
1045 | static void __init xen_setup_stackprotector(void) | ||
1046 | { | ||
1047 | pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot; | ||
1048 | pv_cpu_ops.load_gdt = xen_load_gdt_boot; | ||
1049 | |||
1050 | setup_stack_canary_segment(0); | ||
1051 | switch_to_new_gdt(0); | ||
1052 | |||
1053 | pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry; | ||
1054 | pv_cpu_ops.load_gdt = xen_load_gdt; | ||
1055 | } | ||
1056 | |||
968 | /* First C function to be called on Xen boot */ | 1057 | /* First C function to be called on Xen boot */ |
969 | asmlinkage void __init xen_start_kernel(void) | 1058 | asmlinkage void __init xen_start_kernel(void) |
970 | { | 1059 | { |
@@ -983,13 +1072,28 @@ asmlinkage void __init xen_start_kernel(void) | |||
983 | pv_apic_ops = xen_apic_ops; | 1072 | pv_apic_ops = xen_apic_ops; |
984 | pv_mmu_ops = xen_mmu_ops; | 1073 | pv_mmu_ops = xen_mmu_ops; |
985 | 1074 | ||
986 | #ifdef CONFIG_X86_64 | ||
987 | /* | 1075 | /* |
988 | * Setup percpu state. We only need to do this for 64-bit | 1076 | * Set up some pagetable state before starting to set any ptes. |
989 | * because 32-bit already has %fs set properly. | ||
990 | */ | 1077 | */ |
991 | load_percpu_segment(0); | 1078 | |
992 | #endif | 1079 | /* Prevent unwanted bits from being set in PTEs. */ |
1080 | __supported_pte_mask &= ~_PAGE_GLOBAL; | ||
1081 | if (!xen_initial_domain()) | ||
1082 | __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); | ||
1083 | |||
1084 | __supported_pte_mask |= _PAGE_IOMAP; | ||
1085 | |||
1086 | xen_setup_features(); | ||
1087 | |||
1088 | /* Get mfn list */ | ||
1089 | if (!xen_feature(XENFEAT_auto_translated_physmap)) | ||
1090 | xen_build_dynamic_phys_to_machine(); | ||
1091 | |||
1092 | /* | ||
1093 | * Set up kernel GDT and segment registers, mainly so that | ||
1094 | * -fstack-protector code can be executed. | ||
1095 | */ | ||
1096 | xen_setup_stackprotector(); | ||
993 | 1097 | ||
994 | xen_init_irq_ops(); | 1098 | xen_init_irq_ops(); |
995 | xen_init_cpuid_mask(); | 1099 | xen_init_cpuid_mask(); |
@@ -1001,8 +1105,6 @@ asmlinkage void __init xen_start_kernel(void) | |||
1001 | set_xen_basic_apic_ops(); | 1105 | set_xen_basic_apic_ops(); |
1002 | #endif | 1106 | #endif |
1003 | 1107 | ||
1004 | xen_setup_features(); | ||
1005 | |||
1006 | if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { | 1108 | if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { |
1007 | pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start; | 1109 | pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start; |
1008 | pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit; | 1110 | pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit; |
@@ -1019,17 +1121,8 @@ asmlinkage void __init xen_start_kernel(void) | |||
1019 | 1121 | ||
1020 | xen_smp_init(); | 1122 | xen_smp_init(); |
1021 | 1123 | ||
1022 | /* Get mfn list */ | ||
1023 | if (!xen_feature(XENFEAT_auto_translated_physmap)) | ||
1024 | xen_build_dynamic_phys_to_machine(); | ||
1025 | |||
1026 | pgd = (pgd_t *)xen_start_info->pt_base; | 1124 | pgd = (pgd_t *)xen_start_info->pt_base; |
1027 | 1125 | ||
1028 | /* Prevent unwanted bits from being set in PTEs. */ | ||
1029 | __supported_pte_mask &= ~_PAGE_GLOBAL; | ||
1030 | if (!xen_initial_domain()) | ||
1031 | __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); | ||
1032 | |||
1033 | #ifdef CONFIG_X86_64 | 1126 | #ifdef CONFIG_X86_64 |
1034 | /* Work out if we support NX */ | 1127 | /* Work out if we support NX */ |
1035 | check_efer(); | 1128 | check_efer(); |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 429834ec1687..fe03eeed7b48 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -236,6 +236,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | |||
236 | ctxt->user_regs.ss = __KERNEL_DS; | 236 | ctxt->user_regs.ss = __KERNEL_DS; |
237 | #ifdef CONFIG_X86_32 | 237 | #ifdef CONFIG_X86_32 |
238 | ctxt->user_regs.fs = __KERNEL_PERCPU; | 238 | ctxt->user_regs.fs = __KERNEL_PERCPU; |
239 | ctxt->user_regs.gs = __KERNEL_STACK_CANARY; | ||
239 | #else | 240 | #else |
240 | ctxt->gs_base_kernel = per_cpu_offset(cpu); | 241 | ctxt->gs_base_kernel = per_cpu_offset(cpu); |
241 | #endif | 242 | #endif |
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 5601506f2dd9..36a5141108df 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -187,7 +187,6 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl | |||
187 | struct xen_spinlock *prev; | 187 | struct xen_spinlock *prev; |
188 | int irq = __get_cpu_var(lock_kicker_irq); | 188 | int irq = __get_cpu_var(lock_kicker_irq); |
189 | int ret; | 189 | int ret; |
190 | unsigned long flags; | ||
191 | u64 start; | 190 | u64 start; |
192 | 191 | ||
193 | /* If kicker interrupts not initialized yet, just spin */ | 192 | /* If kicker interrupts not initialized yet, just spin */ |
@@ -199,16 +198,12 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl | |||
199 | /* announce we're spinning */ | 198 | /* announce we're spinning */ |
200 | prev = spinning_lock(xl); | 199 | prev = spinning_lock(xl); |
201 | 200 | ||
202 | flags = __raw_local_save_flags(); | ||
203 | if (irq_enable) { | ||
204 | ADD_STATS(taken_slow_irqenable, 1); | ||
205 | raw_local_irq_enable(); | ||
206 | } | ||
207 | |||
208 | ADD_STATS(taken_slow, 1); | 201 | ADD_STATS(taken_slow, 1); |
209 | ADD_STATS(taken_slow_nested, prev != NULL); | 202 | ADD_STATS(taken_slow_nested, prev != NULL); |
210 | 203 | ||
211 | do { | 204 | do { |
205 | unsigned long flags; | ||
206 | |||
212 | /* clear pending */ | 207 | /* clear pending */ |
213 | xen_clear_irq_pending(irq); | 208 | xen_clear_irq_pending(irq); |
214 | 209 | ||
@@ -228,6 +223,12 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl | |||
228 | goto out; | 223 | goto out; |
229 | } | 224 | } |
230 | 225 | ||
226 | flags = __raw_local_save_flags(); | ||
227 | if (irq_enable) { | ||
228 | ADD_STATS(taken_slow_irqenable, 1); | ||
229 | raw_local_irq_enable(); | ||
230 | } | ||
231 | |||
231 | /* | 232 | /* |
232 | * Block until irq becomes pending. If we're | 233 | * Block until irq becomes pending. If we're |
233 | * interrupted at this point (after the trylock but | 234 | * interrupted at this point (after the trylock but |
@@ -238,13 +239,15 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl | |||
238 | * pending. | 239 | * pending. |
239 | */ | 240 | */ |
240 | xen_poll_irq(irq); | 241 | xen_poll_irq(irq); |
242 | |||
243 | raw_local_irq_restore(flags); | ||
244 | |||
241 | ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); | 245 | ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); |
242 | } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ | 246 | } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ |
243 | 247 | ||
244 | kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); | 248 | kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); |
245 | 249 | ||
246 | out: | 250 | out: |
247 | raw_local_irq_restore(flags); | ||
248 | unspinning_lock(xl, prev); | 251 | unspinning_lock(xl, prev); |
249 | spin_time_accum_blocked(start); | 252 | spin_time_accum_blocked(start); |
250 | 253 | ||
@@ -323,8 +326,13 @@ static void xen_spin_unlock(struct raw_spinlock *lock) | |||
323 | smp_wmb(); /* make sure no writes get moved after unlock */ | 326 | smp_wmb(); /* make sure no writes get moved after unlock */ |
324 | xl->lock = 0; /* release lock */ | 327 | xl->lock = 0; /* release lock */ |
325 | 328 | ||
326 | /* make sure unlock happens before kick */ | 329 | /* |
327 | barrier(); | 330 | * Make sure unlock happens before checking for waiting |
331 | * spinners. We need a strong barrier to enforce the | ||
332 | * write-read ordering to different memory locations, as the | ||
333 | * CPU makes no implied guarantees about their ordering. | ||
334 | */ | ||
335 | mb(); | ||
328 | 336 | ||
329 | if (unlikely(xl->spinners)) | 337 | if (unlikely(xl->spinners)) |
330 | xen_spin_unlock_slow(xl); | 338 | xen_spin_unlock_slow(xl); |
diff --git a/arch/xtensa/include/asm/socket.h b/arch/xtensa/include/asm/socket.h index dd1a7a4a1cea..beb3a6bdb61d 100644 --- a/arch/xtensa/include/asm/socket.h +++ b/arch/xtensa/include/asm/socket.h | |||
@@ -68,4 +68,7 @@ | |||
68 | #define SO_TIMESTAMPING 37 | 68 | #define SO_TIMESTAMPING 37 |
69 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | 69 | #define SCM_TIMESTAMPING SO_TIMESTAMPING |
70 | 70 | ||
71 | #define SO_PROTOCOL 38 | ||
72 | #define SO_DOMAIN 39 | ||
73 | |||
71 | #endif /* _XTENSA_SOCKET_H */ | 74 | #endif /* _XTENSA_SOCKET_H */ |
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c index edad4156d89a..2f0b86b37cf9 100644 --- a/arch/xtensa/platforms/iss/network.c +++ b/arch/xtensa/platforms/iss/network.c | |||
@@ -545,7 +545,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
545 | spin_unlock_irqrestore(&lp->lock, flags); | 545 | spin_unlock_irqrestore(&lp->lock, flags); |
546 | 546 | ||
547 | dev_kfree_skb(skb); | 547 | dev_kfree_skb(skb); |
548 | return 0; | 548 | return NETDEV_TX_OK; |
549 | } | 549 | } |
550 | 550 | ||
551 | 551 | ||