diff options
author | David S. Miller <davem@davemloft.net> | 2009-09-11 23:35:13 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-09-11 23:35:13 -0400 |
commit | cabc5c0f7fa1342049042d6e147db5a73773955b (patch) | |
tree | 2be09ae1777d580c7dfe05d6d5b76e57281ec447 /arch | |
parent | b73d884756303316ead4cd7dad51236b2a515a1a (diff) | |
parent | 86d710146fb9975f04c505ec78caa43d227c1018 (diff) |
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Conflicts:
arch/sparc/Kconfig
Diffstat (limited to 'arch')
171 files changed, 4851 insertions, 2338 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 99193b160232..beea3ccebb5e 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -30,6 +30,18 @@ config OPROFILE_IBS | |||
30 | 30 | ||
31 | If unsure, say N. | 31 | If unsure, say N. |
32 | 32 | ||
33 | config OPROFILE_EVENT_MULTIPLEX | ||
34 | bool "OProfile multiplexing support (EXPERIMENTAL)" | ||
35 | default n | ||
36 | depends on OPROFILE && X86 | ||
37 | help | ||
38 | The number of hardware counters is limited. The multiplexing | ||
39 | feature enables OProfile to gather more events than counters | ||
40 | are provided by the hardware. This is realized by switching | ||
41 | between events at an user specified time interval. | ||
42 | |||
43 | If unsure, say N. | ||
44 | |||
33 | config HAVE_OPROFILE | 45 | config HAVE_OPROFILE |
34 | bool | 46 | bool |
35 | 47 | ||
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index 60c83abfde70..5076a8860b18 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h | |||
@@ -75,6 +75,7 @@ register struct thread_info *__current_thread_info __asm__("$8"); | |||
75 | #define TIF_UAC_SIGBUS 7 | 75 | #define TIF_UAC_SIGBUS 7 |
76 | #define TIF_MEMDIE 8 | 76 | #define TIF_MEMDIE 8 |
77 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ | 77 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ |
78 | #define TIF_NOTIFY_RESUME 10 /* callback before returning to user */ | ||
78 | #define TIF_FREEZE 16 /* is freezing for suspend */ | 79 | #define TIF_FREEZE 16 /* is freezing for suspend */ |
79 | 80 | ||
80 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 81 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
@@ -82,10 +83,12 @@ register struct thread_info *__current_thread_info __asm__("$8"); | |||
82 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 83 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
83 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 84 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
84 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 85 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
86 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | ||
85 | #define _TIF_FREEZE (1<<TIF_FREEZE) | 87 | #define _TIF_FREEZE (1<<TIF_FREEZE) |
86 | 88 | ||
87 | /* Work to do on interrupt/exception return. */ | 89 | /* Work to do on interrupt/exception return. */ |
88 | #define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED) | 90 | #define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ |
91 | _TIF_NOTIFY_RESUME) | ||
89 | 92 | ||
90 | /* Work to do on any return to userspace. */ | 93 | /* Work to do on any return to userspace. */ |
91 | #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ | 94 | #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ |
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index df65eaa84c4c..0932dbb1ef8e 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/binfmts.h> | 20 | #include <linux/binfmts.h> |
21 | #include <linux/bitops.h> | 21 | #include <linux/bitops.h> |
22 | #include <linux/syscalls.h> | 22 | #include <linux/syscalls.h> |
23 | #include <linux/tracehook.h> | ||
23 | 24 | ||
24 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
25 | #include <asm/sigcontext.h> | 26 | #include <asm/sigcontext.h> |
@@ -683,4 +684,11 @@ do_notify_resume(struct pt_regs *regs, struct switch_stack *sw, | |||
683 | { | 684 | { |
684 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) | 685 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) |
685 | do_signal(regs, sw, r0, r19); | 686 | do_signal(regs, sw, r0, r19); |
687 | |||
688 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
689 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
690 | tracehook_notify_resume(regs); | ||
691 | if (current->replacement_session_keyring) | ||
692 | key_replace_session_keyring(); | ||
693 | } | ||
686 | } | 694 | } |
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 73394e50cbca..d3a39b1e6c0f 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h | |||
@@ -130,11 +130,13 @@ extern void vfp_sync_state(struct thread_info *thread); | |||
130 | * TIF_SYSCALL_TRACE - syscall trace active | 130 | * TIF_SYSCALL_TRACE - syscall trace active |
131 | * TIF_SIGPENDING - signal pending | 131 | * TIF_SIGPENDING - signal pending |
132 | * TIF_NEED_RESCHED - rescheduling necessary | 132 | * TIF_NEED_RESCHED - rescheduling necessary |
133 | * TIF_NOTIFY_RESUME - callback before returning to user | ||
133 | * TIF_USEDFPU - FPU was used by this task this quantum (SMP) | 134 | * TIF_USEDFPU - FPU was used by this task this quantum (SMP) |
134 | * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED | 135 | * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED |
135 | */ | 136 | */ |
136 | #define TIF_SIGPENDING 0 | 137 | #define TIF_SIGPENDING 0 |
137 | #define TIF_NEED_RESCHED 1 | 138 | #define TIF_NEED_RESCHED 1 |
139 | #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ | ||
138 | #define TIF_SYSCALL_TRACE 8 | 140 | #define TIF_SYSCALL_TRACE 8 |
139 | #define TIF_POLLING_NRFLAG 16 | 141 | #define TIF_POLLING_NRFLAG 16 |
140 | #define TIF_USING_IWMMXT 17 | 142 | #define TIF_USING_IWMMXT 17 |
@@ -143,6 +145,7 @@ extern void vfp_sync_state(struct thread_info *thread); | |||
143 | 145 | ||
144 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 146 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
145 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 147 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
148 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | ||
146 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 149 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
147 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | 150 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
148 | #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) | 151 | #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 8c3de1a350b5..7813ab782fda 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -51,7 +51,7 @@ fast_work_pending: | |||
51 | work_pending: | 51 | work_pending: |
52 | tst r1, #_TIF_NEED_RESCHED | 52 | tst r1, #_TIF_NEED_RESCHED |
53 | bne work_resched | 53 | bne work_resched |
54 | tst r1, #_TIF_SIGPENDING | 54 | tst r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME |
55 | beq no_work_pending | 55 | beq no_work_pending |
56 | mov r0, sp @ 'regs' | 56 | mov r0, sp @ 'regs' |
57 | mov r2, why @ 'syscall' | 57 | mov r2, why @ 'syscall' |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index f6bc5d442782..b76fe06d92e7 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/personality.h> | 12 | #include <linux/personality.h> |
13 | #include <linux/freezer.h> | 13 | #include <linux/freezer.h> |
14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
15 | #include <linux/tracehook.h> | ||
15 | 16 | ||
16 | #include <asm/elf.h> | 17 | #include <asm/elf.h> |
17 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
@@ -707,4 +708,11 @@ do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall) | |||
707 | { | 708 | { |
708 | if (thread_flags & _TIF_SIGPENDING) | 709 | if (thread_flags & _TIF_SIGPENDING) |
709 | do_signal(¤t->blocked, regs, syscall); | 710 | do_signal(¤t->blocked, regs, syscall); |
711 | |||
712 | if (thread_flags & _TIF_NOTIFY_RESUME) { | ||
713 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
714 | tracehook_notify_resume(regs); | ||
715 | if (current->replacement_session_keyring) | ||
716 | key_replace_session_keyring(); | ||
717 | } | ||
710 | } | 718 | } |
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c index 99b6e1546311..0447d26d454b 100644 --- a/arch/arm/mach-omap2/mcbsp.c +++ b/arch/arm/mach-omap2/mcbsp.c | |||
@@ -128,6 +128,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
128 | .rx_irq = INT_24XX_MCBSP1_IRQ_RX, | 128 | .rx_irq = INT_24XX_MCBSP1_IRQ_RX, |
129 | .tx_irq = INT_24XX_MCBSP1_IRQ_TX, | 129 | .tx_irq = INT_24XX_MCBSP1_IRQ_TX, |
130 | .ops = &omap2_mcbsp_ops, | 130 | .ops = &omap2_mcbsp_ops, |
131 | .buffer_size = 0x6F, | ||
131 | }, | 132 | }, |
132 | { | 133 | { |
133 | .phys_base = OMAP34XX_MCBSP2_BASE, | 134 | .phys_base = OMAP34XX_MCBSP2_BASE, |
@@ -136,6 +137,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
136 | .rx_irq = INT_24XX_MCBSP2_IRQ_RX, | 137 | .rx_irq = INT_24XX_MCBSP2_IRQ_RX, |
137 | .tx_irq = INT_24XX_MCBSP2_IRQ_TX, | 138 | .tx_irq = INT_24XX_MCBSP2_IRQ_TX, |
138 | .ops = &omap2_mcbsp_ops, | 139 | .ops = &omap2_mcbsp_ops, |
140 | .buffer_size = 0x3FF, | ||
139 | }, | 141 | }, |
140 | { | 142 | { |
141 | .phys_base = OMAP34XX_MCBSP3_BASE, | 143 | .phys_base = OMAP34XX_MCBSP3_BASE, |
@@ -144,6 +146,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
144 | .rx_irq = INT_24XX_MCBSP3_IRQ_RX, | 146 | .rx_irq = INT_24XX_MCBSP3_IRQ_RX, |
145 | .tx_irq = INT_24XX_MCBSP3_IRQ_TX, | 147 | .tx_irq = INT_24XX_MCBSP3_IRQ_TX, |
146 | .ops = &omap2_mcbsp_ops, | 148 | .ops = &omap2_mcbsp_ops, |
149 | .buffer_size = 0x6F, | ||
147 | }, | 150 | }, |
148 | { | 151 | { |
149 | .phys_base = OMAP34XX_MCBSP4_BASE, | 152 | .phys_base = OMAP34XX_MCBSP4_BASE, |
@@ -152,6 +155,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
152 | .rx_irq = INT_24XX_MCBSP4_IRQ_RX, | 155 | .rx_irq = INT_24XX_MCBSP4_IRQ_RX, |
153 | .tx_irq = INT_24XX_MCBSP4_IRQ_TX, | 156 | .tx_irq = INT_24XX_MCBSP4_IRQ_TX, |
154 | .ops = &omap2_mcbsp_ops, | 157 | .ops = &omap2_mcbsp_ops, |
158 | .buffer_size = 0x6F, | ||
155 | }, | 159 | }, |
156 | { | 160 | { |
157 | .phys_base = OMAP34XX_MCBSP5_BASE, | 161 | .phys_base = OMAP34XX_MCBSP5_BASE, |
@@ -160,6 +164,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
160 | .rx_irq = INT_24XX_MCBSP5_IRQ_RX, | 164 | .rx_irq = INT_24XX_MCBSP5_IRQ_RX, |
161 | .tx_irq = INT_24XX_MCBSP5_IRQ_TX, | 165 | .tx_irq = INT_24XX_MCBSP5_IRQ_TX, |
162 | .ops = &omap2_mcbsp_ops, | 166 | .ops = &omap2_mcbsp_ops, |
167 | .buffer_size = 0x6F, | ||
163 | }, | 168 | }, |
164 | }; | 169 | }; |
165 | #define OMAP34XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap34xx_mcbsp_pdata) | 170 | #define OMAP34XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap34xx_mcbsp_pdata) |
diff --git a/arch/arm/mach-pxa/include/mach/audio.h b/arch/arm/mach-pxa/include/mach/audio.h index 16eb02552d5d..a3449e35a6f5 100644 --- a/arch/arm/mach-pxa/include/mach/audio.h +++ b/arch/arm/mach-pxa/include/mach/audio.h | |||
@@ -3,10 +3,12 @@ | |||
3 | 3 | ||
4 | #include <sound/core.h> | 4 | #include <sound/core.h> |
5 | #include <sound/pcm.h> | 5 | #include <sound/pcm.h> |
6 | #include <sound/ac97_codec.h> | ||
6 | 7 | ||
7 | /* | 8 | /* |
8 | * @reset_gpio: AC97 reset gpio (normally gpio113 or gpio95) | 9 | * @reset_gpio: AC97 reset gpio (normally gpio113 or gpio95) |
9 | * a -1 value means no gpio will be used for reset | 10 | * a -1 value means no gpio will be used for reset |
11 | * @codec_pdata: AC97 codec platform_data | ||
10 | 12 | ||
11 | * reset_gpio should only be specified for pxa27x CPUs where a silicon | 13 | * reset_gpio should only be specified for pxa27x CPUs where a silicon |
12 | * bug prevents correct operation of the reset line. If not specified, | 14 | * bug prevents correct operation of the reset line. If not specified, |
@@ -20,6 +22,7 @@ typedef struct { | |||
20 | void (*resume)(void *); | 22 | void (*resume)(void *); |
21 | void *priv; | 23 | void *priv; |
22 | int reset_gpio; | 24 | int reset_gpio; |
25 | void *codec_pdata[AC97_BUS_MAX_DEVICES]; | ||
23 | } pxa2xx_audio_ops_t; | 26 | } pxa2xx_audio_ops_t; |
24 | 27 | ||
25 | extern void pxa_set_ac97_info(pxa2xx_audio_ops_t *ops); | 28 | extern void pxa_set_ac97_info(pxa2xx_audio_ops_t *ops); |
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index e3ac94f09006..9b00f4cbc903 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c | |||
@@ -1127,6 +1127,11 @@ int omap_dma_running(void) | |||
1127 | void omap_dma_link_lch(int lch_head, int lch_queue) | 1127 | void omap_dma_link_lch(int lch_head, int lch_queue) |
1128 | { | 1128 | { |
1129 | if (omap_dma_in_1510_mode()) { | 1129 | if (omap_dma_in_1510_mode()) { |
1130 | if (lch_head == lch_queue) { | ||
1131 | dma_write(dma_read(CCR(lch_head)) | (3 << 8), | ||
1132 | CCR(lch_head)); | ||
1133 | return; | ||
1134 | } | ||
1130 | printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); | 1135 | printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); |
1131 | BUG(); | 1136 | BUG(); |
1132 | return; | 1137 | return; |
@@ -1149,6 +1154,11 @@ EXPORT_SYMBOL(omap_dma_link_lch); | |||
1149 | void omap_dma_unlink_lch(int lch_head, int lch_queue) | 1154 | void omap_dma_unlink_lch(int lch_head, int lch_queue) |
1150 | { | 1155 | { |
1151 | if (omap_dma_in_1510_mode()) { | 1156 | if (omap_dma_in_1510_mode()) { |
1157 | if (lch_head == lch_queue) { | ||
1158 | dma_write(dma_read(CCR(lch_head)) & ~(3 << 8), | ||
1159 | CCR(lch_head)); | ||
1160 | return; | ||
1161 | } | ||
1152 | printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); | 1162 | printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); |
1153 | BUG(); | 1163 | BUG(); |
1154 | return; | 1164 | return; |
diff --git a/arch/arm/plat-omap/include/mach/mcbsp.h b/arch/arm/plat-omap/include/mach/mcbsp.h index bb154ea76769..63a3f254af7b 100644 --- a/arch/arm/plat-omap/include/mach/mcbsp.h +++ b/arch/arm/plat-omap/include/mach/mcbsp.h | |||
@@ -134,6 +134,11 @@ | |||
134 | #define OMAP_MCBSP_REG_XCERG 0x74 | 134 | #define OMAP_MCBSP_REG_XCERG 0x74 |
135 | #define OMAP_MCBSP_REG_XCERH 0x78 | 135 | #define OMAP_MCBSP_REG_XCERH 0x78 |
136 | #define OMAP_MCBSP_REG_SYSCON 0x8C | 136 | #define OMAP_MCBSP_REG_SYSCON 0x8C |
137 | #define OMAP_MCBSP_REG_THRSH2 0x90 | ||
138 | #define OMAP_MCBSP_REG_THRSH1 0x94 | ||
139 | #define OMAP_MCBSP_REG_IRQST 0xA0 | ||
140 | #define OMAP_MCBSP_REG_IRQEN 0xA4 | ||
141 | #define OMAP_MCBSP_REG_WAKEUPEN 0xA8 | ||
137 | #define OMAP_MCBSP_REG_XCCR 0xAC | 142 | #define OMAP_MCBSP_REG_XCCR 0xAC |
138 | #define OMAP_MCBSP_REG_RCCR 0xB0 | 143 | #define OMAP_MCBSP_REG_RCCR 0xB0 |
139 | 144 | ||
@@ -249,8 +254,27 @@ | |||
249 | #define RDISABLE 0x0001 | 254 | #define RDISABLE 0x0001 |
250 | 255 | ||
251 | /********************** McBSP SYSCONFIG bit definitions ********************/ | 256 | /********************** McBSP SYSCONFIG bit definitions ********************/ |
257 | #define CLOCKACTIVITY(value) ((value)<<8) | ||
258 | #define SIDLEMODE(value) ((value)<<3) | ||
259 | #define ENAWAKEUP 0x0004 | ||
252 | #define SOFTRST 0x0002 | 260 | #define SOFTRST 0x0002 |
253 | 261 | ||
262 | /********************** McBSP DMA operating modes **************************/ | ||
263 | #define MCBSP_DMA_MODE_ELEMENT 0 | ||
264 | #define MCBSP_DMA_MODE_THRESHOLD 1 | ||
265 | #define MCBSP_DMA_MODE_FRAME 2 | ||
266 | |||
267 | /********************** McBSP WAKEUPEN bit definitions *********************/ | ||
268 | #define XEMPTYEOFEN 0x4000 | ||
269 | #define XRDYEN 0x0400 | ||
270 | #define XEOFEN 0x0200 | ||
271 | #define XFSXEN 0x0100 | ||
272 | #define XSYNCERREN 0x0080 | ||
273 | #define RRDYEN 0x0008 | ||
274 | #define REOFEN 0x0004 | ||
275 | #define RFSREN 0x0002 | ||
276 | #define RSYNCERREN 0x0001 | ||
277 | |||
254 | /* we don't do multichannel for now */ | 278 | /* we don't do multichannel for now */ |
255 | struct omap_mcbsp_reg_cfg { | 279 | struct omap_mcbsp_reg_cfg { |
256 | u16 spcr2; | 280 | u16 spcr2; |
@@ -344,6 +368,9 @@ struct omap_mcbsp_platform_data { | |||
344 | u8 dma_rx_sync, dma_tx_sync; | 368 | u8 dma_rx_sync, dma_tx_sync; |
345 | u16 rx_irq, tx_irq; | 369 | u16 rx_irq, tx_irq; |
346 | struct omap_mcbsp_ops *ops; | 370 | struct omap_mcbsp_ops *ops; |
371 | #ifdef CONFIG_ARCH_OMAP34XX | ||
372 | u16 buffer_size; | ||
373 | #endif | ||
347 | }; | 374 | }; |
348 | 375 | ||
349 | struct omap_mcbsp { | 376 | struct omap_mcbsp { |
@@ -377,6 +404,11 @@ struct omap_mcbsp { | |||
377 | struct omap_mcbsp_platform_data *pdata; | 404 | struct omap_mcbsp_platform_data *pdata; |
378 | struct clk *iclk; | 405 | struct clk *iclk; |
379 | struct clk *fclk; | 406 | struct clk *fclk; |
407 | #ifdef CONFIG_ARCH_OMAP34XX | ||
408 | int dma_op_mode; | ||
409 | u16 max_tx_thres; | ||
410 | u16 max_rx_thres; | ||
411 | #endif | ||
380 | }; | 412 | }; |
381 | extern struct omap_mcbsp **mcbsp_ptr; | 413 | extern struct omap_mcbsp **mcbsp_ptr; |
382 | extern int omap_mcbsp_count; | 414 | extern int omap_mcbsp_count; |
@@ -385,10 +417,25 @@ int omap_mcbsp_init(void); | |||
385 | void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config, | 417 | void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config, |
386 | int size); | 418 | int size); |
387 | void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg * config); | 419 | void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg * config); |
420 | #ifdef CONFIG_ARCH_OMAP34XX | ||
421 | void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold); | ||
422 | void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold); | ||
423 | u16 omap_mcbsp_get_max_tx_threshold(unsigned int id); | ||
424 | u16 omap_mcbsp_get_max_rx_threshold(unsigned int id); | ||
425 | int omap_mcbsp_get_dma_op_mode(unsigned int id); | ||
426 | #else | ||
427 | static inline void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold) | ||
428 | { } | ||
429 | static inline void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold) | ||
430 | { } | ||
431 | static inline u16 omap_mcbsp_get_max_tx_threshold(unsigned int id) { return 0; } | ||
432 | static inline u16 omap_mcbsp_get_max_rx_threshold(unsigned int id) { return 0; } | ||
433 | static inline int omap_mcbsp_get_dma_op_mode(unsigned int id) { return 0; } | ||
434 | #endif | ||
388 | int omap_mcbsp_request(unsigned int id); | 435 | int omap_mcbsp_request(unsigned int id); |
389 | void omap_mcbsp_free(unsigned int id); | 436 | void omap_mcbsp_free(unsigned int id); |
390 | void omap_mcbsp_start(unsigned int id); | 437 | void omap_mcbsp_start(unsigned int id, int tx, int rx); |
391 | void omap_mcbsp_stop(unsigned int id); | 438 | void omap_mcbsp_stop(unsigned int id, int tx, int rx); |
392 | void omap_mcbsp_xmit_word(unsigned int id, u32 word); | 439 | void omap_mcbsp_xmit_word(unsigned int id, u32 word); |
393 | u32 omap_mcbsp_recv_word(unsigned int id); | 440 | u32 omap_mcbsp_recv_word(unsigned int id); |
394 | 441 | ||
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c index efa0e0111f38..8dc7927906f1 100644 --- a/arch/arm/plat-omap/mcbsp.c +++ b/arch/arm/plat-omap/mcbsp.c | |||
@@ -198,6 +198,170 @@ void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg *config) | |||
198 | } | 198 | } |
199 | EXPORT_SYMBOL(omap_mcbsp_config); | 199 | EXPORT_SYMBOL(omap_mcbsp_config); |
200 | 200 | ||
201 | #ifdef CONFIG_ARCH_OMAP34XX | ||
202 | /* | ||
203 | * omap_mcbsp_set_tx_threshold configures how to deal | ||
204 | * with transmit threshold. the threshold value and handler can be | ||
205 | * configure in here. | ||
206 | */ | ||
207 | void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold) | ||
208 | { | ||
209 | struct omap_mcbsp *mcbsp; | ||
210 | void __iomem *io_base; | ||
211 | |||
212 | if (!cpu_is_omap34xx()) | ||
213 | return; | ||
214 | |||
215 | if (!omap_mcbsp_check_valid_id(id)) { | ||
216 | printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); | ||
217 | return; | ||
218 | } | ||
219 | mcbsp = id_to_mcbsp_ptr(id); | ||
220 | io_base = mcbsp->io_base; | ||
221 | |||
222 | OMAP_MCBSP_WRITE(io_base, THRSH2, threshold); | ||
223 | } | ||
224 | EXPORT_SYMBOL(omap_mcbsp_set_tx_threshold); | ||
225 | |||
226 | /* | ||
227 | * omap_mcbsp_set_rx_threshold configures how to deal | ||
228 | * with receive threshold. the threshold value and handler can be | ||
229 | * configure in here. | ||
230 | */ | ||
231 | void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold) | ||
232 | { | ||
233 | struct omap_mcbsp *mcbsp; | ||
234 | void __iomem *io_base; | ||
235 | |||
236 | if (!cpu_is_omap34xx()) | ||
237 | return; | ||
238 | |||
239 | if (!omap_mcbsp_check_valid_id(id)) { | ||
240 | printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); | ||
241 | return; | ||
242 | } | ||
243 | mcbsp = id_to_mcbsp_ptr(id); | ||
244 | io_base = mcbsp->io_base; | ||
245 | |||
246 | OMAP_MCBSP_WRITE(io_base, THRSH1, threshold); | ||
247 | } | ||
248 | EXPORT_SYMBOL(omap_mcbsp_set_rx_threshold); | ||
249 | |||
250 | /* | ||
251 | * omap_mcbsp_get_max_tx_thres just return the current configured | ||
252 | * maximum threshold for transmission | ||
253 | */ | ||
254 | u16 omap_mcbsp_get_max_tx_threshold(unsigned int id) | ||
255 | { | ||
256 | struct omap_mcbsp *mcbsp; | ||
257 | |||
258 | if (!omap_mcbsp_check_valid_id(id)) { | ||
259 | printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); | ||
260 | return -ENODEV; | ||
261 | } | ||
262 | mcbsp = id_to_mcbsp_ptr(id); | ||
263 | |||
264 | return mcbsp->max_tx_thres; | ||
265 | } | ||
266 | EXPORT_SYMBOL(omap_mcbsp_get_max_tx_threshold); | ||
267 | |||
268 | /* | ||
269 | * omap_mcbsp_get_max_rx_thres just return the current configured | ||
270 | * maximum threshold for reception | ||
271 | */ | ||
272 | u16 omap_mcbsp_get_max_rx_threshold(unsigned int id) | ||
273 | { | ||
274 | struct omap_mcbsp *mcbsp; | ||
275 | |||
276 | if (!omap_mcbsp_check_valid_id(id)) { | ||
277 | printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); | ||
278 | return -ENODEV; | ||
279 | } | ||
280 | mcbsp = id_to_mcbsp_ptr(id); | ||
281 | |||
282 | return mcbsp->max_rx_thres; | ||
283 | } | ||
284 | EXPORT_SYMBOL(omap_mcbsp_get_max_rx_threshold); | ||
285 | |||
286 | /* | ||
287 | * omap_mcbsp_get_dma_op_mode just return the current configured | ||
288 | * operating mode for the mcbsp channel | ||
289 | */ | ||
290 | int omap_mcbsp_get_dma_op_mode(unsigned int id) | ||
291 | { | ||
292 | struct omap_mcbsp *mcbsp; | ||
293 | int dma_op_mode; | ||
294 | |||
295 | if (!omap_mcbsp_check_valid_id(id)) { | ||
296 | printk(KERN_ERR "%s: Invalid id (%u)\n", __func__, id + 1); | ||
297 | return -ENODEV; | ||
298 | } | ||
299 | mcbsp = id_to_mcbsp_ptr(id); | ||
300 | |||
301 | spin_lock_irq(&mcbsp->lock); | ||
302 | dma_op_mode = mcbsp->dma_op_mode; | ||
303 | spin_unlock_irq(&mcbsp->lock); | ||
304 | |||
305 | return dma_op_mode; | ||
306 | } | ||
307 | EXPORT_SYMBOL(omap_mcbsp_get_dma_op_mode); | ||
308 | |||
309 | static inline void omap34xx_mcbsp_request(struct omap_mcbsp *mcbsp) | ||
310 | { | ||
311 | /* | ||
312 | * Enable wakup behavior, smart idle and all wakeups | ||
313 | * REVISIT: some wakeups may be unnecessary | ||
314 | */ | ||
315 | if (cpu_is_omap34xx()) { | ||
316 | u16 syscon; | ||
317 | |||
318 | syscon = OMAP_MCBSP_READ(mcbsp->io_base, SYSCON); | ||
319 | syscon &= ~(ENAWAKEUP | SIDLEMODE(0x03) | CLOCKACTIVITY(0x03)); | ||
320 | |||
321 | spin_lock_irq(&mcbsp->lock); | ||
322 | if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) { | ||
323 | syscon |= (ENAWAKEUP | SIDLEMODE(0x02) | | ||
324 | CLOCKACTIVITY(0x02)); | ||
325 | OMAP_MCBSP_WRITE(mcbsp->io_base, WAKEUPEN, | ||
326 | XRDYEN | RRDYEN); | ||
327 | } else { | ||
328 | syscon |= SIDLEMODE(0x01); | ||
329 | } | ||
330 | spin_unlock_irq(&mcbsp->lock); | ||
331 | |||
332 | OMAP_MCBSP_WRITE(mcbsp->io_base, SYSCON, syscon); | ||
333 | } | ||
334 | } | ||
335 | |||
336 | static inline void omap34xx_mcbsp_free(struct omap_mcbsp *mcbsp) | ||
337 | { | ||
338 | /* | ||
339 | * Disable wakup behavior, smart idle and all wakeups | ||
340 | */ | ||
341 | if (cpu_is_omap34xx()) { | ||
342 | u16 syscon; | ||
343 | |||
344 | syscon = OMAP_MCBSP_READ(mcbsp->io_base, SYSCON); | ||
345 | syscon &= ~(ENAWAKEUP | SIDLEMODE(0x03) | CLOCKACTIVITY(0x03)); | ||
346 | /* | ||
347 | * HW bug workaround - If no_idle mode is taken, we need to | ||
348 | * go to smart_idle before going to always_idle, or the | ||
349 | * device will not hit retention anymore. | ||
350 | */ | ||
351 | syscon |= SIDLEMODE(0x02); | ||
352 | OMAP_MCBSP_WRITE(mcbsp->io_base, SYSCON, syscon); | ||
353 | |||
354 | syscon &= ~(SIDLEMODE(0x03)); | ||
355 | OMAP_MCBSP_WRITE(mcbsp->io_base, SYSCON, syscon); | ||
356 | |||
357 | OMAP_MCBSP_WRITE(mcbsp->io_base, WAKEUPEN, 0); | ||
358 | } | ||
359 | } | ||
360 | #else | ||
361 | static inline void omap34xx_mcbsp_request(struct omap_mcbsp *mcbsp) {} | ||
362 | static inline void omap34xx_mcbsp_free(struct omap_mcbsp *mcbsp) {} | ||
363 | #endif | ||
364 | |||
201 | /* | 365 | /* |
202 | * We can choose between IRQ based or polled IO. | 366 | * We can choose between IRQ based or polled IO. |
203 | * This needs to be called before omap_mcbsp_request(). | 367 | * This needs to be called before omap_mcbsp_request(). |
@@ -257,6 +421,9 @@ int omap_mcbsp_request(unsigned int id) | |||
257 | clk_enable(mcbsp->iclk); | 421 | clk_enable(mcbsp->iclk); |
258 | clk_enable(mcbsp->fclk); | 422 | clk_enable(mcbsp->fclk); |
259 | 423 | ||
424 | /* Do procedure specific to omap34xx arch, if applicable */ | ||
425 | omap34xx_mcbsp_request(mcbsp); | ||
426 | |||
260 | /* | 427 | /* |
261 | * Make sure that transmitter, receiver and sample-rate generator are | 428 | * Make sure that transmitter, receiver and sample-rate generator are |
262 | * not running before activating IRQs. | 429 | * not running before activating IRQs. |
@@ -305,6 +472,9 @@ void omap_mcbsp_free(unsigned int id) | |||
305 | if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->free) | 472 | if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->free) |
306 | mcbsp->pdata->ops->free(id); | 473 | mcbsp->pdata->ops->free(id); |
307 | 474 | ||
475 | /* Do procedure specific to omap34xx arch, if applicable */ | ||
476 | omap34xx_mcbsp_free(mcbsp); | ||
477 | |||
308 | clk_disable(mcbsp->fclk); | 478 | clk_disable(mcbsp->fclk); |
309 | clk_disable(mcbsp->iclk); | 479 | clk_disable(mcbsp->iclk); |
310 | 480 | ||
@@ -328,14 +498,15 @@ void omap_mcbsp_free(unsigned int id) | |||
328 | EXPORT_SYMBOL(omap_mcbsp_free); | 498 | EXPORT_SYMBOL(omap_mcbsp_free); |
329 | 499 | ||
330 | /* | 500 | /* |
331 | * Here we start the McBSP, by enabling the sample | 501 | * Here we start the McBSP, by enabling transmitter, receiver or both. |
332 | * generator, both transmitter and receivers, | 502 | * If no transmitter or receiver is active prior calling, then sample-rate |
333 | * and the frame sync. | 503 | * generator and frame sync are started. |
334 | */ | 504 | */ |
335 | void omap_mcbsp_start(unsigned int id) | 505 | void omap_mcbsp_start(unsigned int id, int tx, int rx) |
336 | { | 506 | { |
337 | struct omap_mcbsp *mcbsp; | 507 | struct omap_mcbsp *mcbsp; |
338 | void __iomem *io_base; | 508 | void __iomem *io_base; |
509 | int idle; | ||
339 | u16 w; | 510 | u16 w; |
340 | 511 | ||
341 | if (!omap_mcbsp_check_valid_id(id)) { | 512 | if (!omap_mcbsp_check_valid_id(id)) { |
@@ -348,32 +519,58 @@ void omap_mcbsp_start(unsigned int id) | |||
348 | mcbsp->rx_word_length = (OMAP_MCBSP_READ(io_base, RCR1) >> 5) & 0x7; | 519 | mcbsp->rx_word_length = (OMAP_MCBSP_READ(io_base, RCR1) >> 5) & 0x7; |
349 | mcbsp->tx_word_length = (OMAP_MCBSP_READ(io_base, XCR1) >> 5) & 0x7; | 520 | mcbsp->tx_word_length = (OMAP_MCBSP_READ(io_base, XCR1) >> 5) & 0x7; |
350 | 521 | ||
351 | /* Start the sample generator */ | 522 | idle = !((OMAP_MCBSP_READ(io_base, SPCR2) | |
352 | w = OMAP_MCBSP_READ(io_base, SPCR2); | 523 | OMAP_MCBSP_READ(io_base, SPCR1)) & 1); |
353 | OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 6)); | 524 | |
525 | if (idle) { | ||
526 | /* Start the sample generator */ | ||
527 | w = OMAP_MCBSP_READ(io_base, SPCR2); | ||
528 | OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 6)); | ||
529 | } | ||
354 | 530 | ||
355 | /* Enable transmitter and receiver */ | 531 | /* Enable transmitter and receiver */ |
532 | tx &= 1; | ||
356 | w = OMAP_MCBSP_READ(io_base, SPCR2); | 533 | w = OMAP_MCBSP_READ(io_base, SPCR2); |
357 | OMAP_MCBSP_WRITE(io_base, SPCR2, w | 1); | 534 | OMAP_MCBSP_WRITE(io_base, SPCR2, w | tx); |
358 | 535 | ||
536 | rx &= 1; | ||
359 | w = OMAP_MCBSP_READ(io_base, SPCR1); | 537 | w = OMAP_MCBSP_READ(io_base, SPCR1); |
360 | OMAP_MCBSP_WRITE(io_base, SPCR1, w | 1); | 538 | OMAP_MCBSP_WRITE(io_base, SPCR1, w | rx); |
361 | 539 | ||
362 | udelay(100); | 540 | /* |
541 | * Worst case: CLKSRG*2 = 8000khz: (1/8000) * 2 * 2 usec | ||
542 | * REVISIT: 100us may give enough time for two CLKSRG, however | ||
543 | * due to some unknown PM related, clock gating etc. reason it | ||
544 | * is now at 500us. | ||
545 | */ | ||
546 | udelay(500); | ||
363 | 547 | ||
364 | /* Start frame sync */ | 548 | if (idle) { |
365 | w = OMAP_MCBSP_READ(io_base, SPCR2); | 549 | /* Start frame sync */ |
366 | OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 7)); | 550 | w = OMAP_MCBSP_READ(io_base, SPCR2); |
551 | OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 7)); | ||
552 | } | ||
553 | |||
554 | if (cpu_is_omap2430() || cpu_is_omap34xx()) { | ||
555 | /* Release the transmitter and receiver */ | ||
556 | w = OMAP_MCBSP_READ(io_base, XCCR); | ||
557 | w &= ~(tx ? XDISABLE : 0); | ||
558 | OMAP_MCBSP_WRITE(io_base, XCCR, w); | ||
559 | w = OMAP_MCBSP_READ(io_base, RCCR); | ||
560 | w &= ~(rx ? RDISABLE : 0); | ||
561 | OMAP_MCBSP_WRITE(io_base, RCCR, w); | ||
562 | } | ||
367 | 563 | ||
368 | /* Dump McBSP Regs */ | 564 | /* Dump McBSP Regs */ |
369 | omap_mcbsp_dump_reg(id); | 565 | omap_mcbsp_dump_reg(id); |
370 | } | 566 | } |
371 | EXPORT_SYMBOL(omap_mcbsp_start); | 567 | EXPORT_SYMBOL(omap_mcbsp_start); |
372 | 568 | ||
373 | void omap_mcbsp_stop(unsigned int id) | 569 | void omap_mcbsp_stop(unsigned int id, int tx, int rx) |
374 | { | 570 | { |
375 | struct omap_mcbsp *mcbsp; | 571 | struct omap_mcbsp *mcbsp; |
376 | void __iomem *io_base; | 572 | void __iomem *io_base; |
573 | int idle; | ||
377 | u16 w; | 574 | u16 w; |
378 | 575 | ||
379 | if (!omap_mcbsp_check_valid_id(id)) { | 576 | if (!omap_mcbsp_check_valid_id(id)) { |
@@ -385,16 +582,33 @@ void omap_mcbsp_stop(unsigned int id) | |||
385 | io_base = mcbsp->io_base; | 582 | io_base = mcbsp->io_base; |
386 | 583 | ||
387 | /* Reset transmitter */ | 584 | /* Reset transmitter */ |
585 | tx &= 1; | ||
586 | if (cpu_is_omap2430() || cpu_is_omap34xx()) { | ||
587 | w = OMAP_MCBSP_READ(io_base, XCCR); | ||
588 | w |= (tx ? XDISABLE : 0); | ||
589 | OMAP_MCBSP_WRITE(io_base, XCCR, w); | ||
590 | } | ||
388 | w = OMAP_MCBSP_READ(io_base, SPCR2); | 591 | w = OMAP_MCBSP_READ(io_base, SPCR2); |
389 | OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~(1)); | 592 | OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~tx); |
390 | 593 | ||
391 | /* Reset receiver */ | 594 | /* Reset receiver */ |
595 | rx &= 1; | ||
596 | if (cpu_is_omap2430() || cpu_is_omap34xx()) { | ||
597 | w = OMAP_MCBSP_READ(io_base, RCCR); | ||
598 | w |= (tx ? RDISABLE : 0); | ||
599 | OMAP_MCBSP_WRITE(io_base, RCCR, w); | ||
600 | } | ||
392 | w = OMAP_MCBSP_READ(io_base, SPCR1); | 601 | w = OMAP_MCBSP_READ(io_base, SPCR1); |
393 | OMAP_MCBSP_WRITE(io_base, SPCR1, w & ~(1)); | 602 | OMAP_MCBSP_WRITE(io_base, SPCR1, w & ~rx); |
394 | 603 | ||
395 | /* Reset the sample rate generator */ | 604 | idle = !((OMAP_MCBSP_READ(io_base, SPCR2) | |
396 | w = OMAP_MCBSP_READ(io_base, SPCR2); | 605 | OMAP_MCBSP_READ(io_base, SPCR1)) & 1); |
397 | OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~(1 << 6)); | 606 | |
607 | if (idle) { | ||
608 | /* Reset the sample rate generator */ | ||
609 | w = OMAP_MCBSP_READ(io_base, SPCR2); | ||
610 | OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~(1 << 6)); | ||
611 | } | ||
398 | } | 612 | } |
399 | EXPORT_SYMBOL(omap_mcbsp_stop); | 613 | EXPORT_SYMBOL(omap_mcbsp_stop); |
400 | 614 | ||
@@ -883,6 +1097,149 @@ void omap_mcbsp_set_spi_mode(unsigned int id, | |||
883 | } | 1097 | } |
884 | EXPORT_SYMBOL(omap_mcbsp_set_spi_mode); | 1098 | EXPORT_SYMBOL(omap_mcbsp_set_spi_mode); |
885 | 1099 | ||
1100 | #ifdef CONFIG_ARCH_OMAP34XX | ||
1101 | #define max_thres(m) (mcbsp->pdata->buffer_size) | ||
1102 | #define valid_threshold(m, val) ((val) <= max_thres(m)) | ||
1103 | #define THRESHOLD_PROP_BUILDER(prop) \ | ||
1104 | static ssize_t prop##_show(struct device *dev, \ | ||
1105 | struct device_attribute *attr, char *buf) \ | ||
1106 | { \ | ||
1107 | struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); \ | ||
1108 | \ | ||
1109 | return sprintf(buf, "%u\n", mcbsp->prop); \ | ||
1110 | } \ | ||
1111 | \ | ||
1112 | static ssize_t prop##_store(struct device *dev, \ | ||
1113 | struct device_attribute *attr, \ | ||
1114 | const char *buf, size_t size) \ | ||
1115 | { \ | ||
1116 | struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); \ | ||
1117 | unsigned long val; \ | ||
1118 | int status; \ | ||
1119 | \ | ||
1120 | status = strict_strtoul(buf, 0, &val); \ | ||
1121 | if (status) \ | ||
1122 | return status; \ | ||
1123 | \ | ||
1124 | if (!valid_threshold(mcbsp, val)) \ | ||
1125 | return -EDOM; \ | ||
1126 | \ | ||
1127 | mcbsp->prop = val; \ | ||
1128 | return size; \ | ||
1129 | } \ | ||
1130 | \ | ||
1131 | static DEVICE_ATTR(prop, 0644, prop##_show, prop##_store); | ||
1132 | |||
1133 | THRESHOLD_PROP_BUILDER(max_tx_thres); | ||
1134 | THRESHOLD_PROP_BUILDER(max_rx_thres); | ||
1135 | |||
1136 | static const char *dma_op_modes[] = { | ||
1137 | "element", "threshold", "frame", | ||
1138 | }; | ||
1139 | |||
1140 | static ssize_t dma_op_mode_show(struct device *dev, | ||
1141 | struct device_attribute *attr, char *buf) | ||
1142 | { | ||
1143 | struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); | ||
1144 | int dma_op_mode, i = 0; | ||
1145 | ssize_t len = 0; | ||
1146 | const char * const *s; | ||
1147 | |||
1148 | spin_lock_irq(&mcbsp->lock); | ||
1149 | dma_op_mode = mcbsp->dma_op_mode; | ||
1150 | spin_unlock_irq(&mcbsp->lock); | ||
1151 | |||
1152 | for (s = &dma_op_modes[i]; i < ARRAY_SIZE(dma_op_modes); s++, i++) { | ||
1153 | if (dma_op_mode == i) | ||
1154 | len += sprintf(buf + len, "[%s] ", *s); | ||
1155 | else | ||
1156 | len += sprintf(buf + len, "%s ", *s); | ||
1157 | } | ||
1158 | len += sprintf(buf + len, "\n"); | ||
1159 | |||
1160 | return len; | ||
1161 | } | ||
1162 | |||
1163 | static ssize_t dma_op_mode_store(struct device *dev, | ||
1164 | struct device_attribute *attr, | ||
1165 | const char *buf, size_t size) | ||
1166 | { | ||
1167 | struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); | ||
1168 | const char * const *s; | ||
1169 | int i = 0; | ||
1170 | |||
1171 | for (s = &dma_op_modes[i]; i < ARRAY_SIZE(dma_op_modes); s++, i++) | ||
1172 | if (sysfs_streq(buf, *s)) | ||
1173 | break; | ||
1174 | |||
1175 | if (i == ARRAY_SIZE(dma_op_modes)) | ||
1176 | return -EINVAL; | ||
1177 | |||
1178 | spin_lock_irq(&mcbsp->lock); | ||
1179 | if (!mcbsp->free) { | ||
1180 | size = -EBUSY; | ||
1181 | goto unlock; | ||
1182 | } | ||
1183 | mcbsp->dma_op_mode = i; | ||
1184 | |||
1185 | unlock: | ||
1186 | spin_unlock_irq(&mcbsp->lock); | ||
1187 | |||
1188 | return size; | ||
1189 | } | ||
1190 | |||
1191 | static DEVICE_ATTR(dma_op_mode, 0644, dma_op_mode_show, dma_op_mode_store); | ||
1192 | |||
1193 | static const struct attribute *additional_attrs[] = { | ||
1194 | &dev_attr_max_tx_thres.attr, | ||
1195 | &dev_attr_max_rx_thres.attr, | ||
1196 | &dev_attr_dma_op_mode.attr, | ||
1197 | NULL, | ||
1198 | }; | ||
1199 | |||
1200 | static const struct attribute_group additional_attr_group = { | ||
1201 | .attrs = (struct attribute **)additional_attrs, | ||
1202 | }; | ||
1203 | |||
1204 | static inline int __devinit omap_additional_add(struct device *dev) | ||
1205 | { | ||
1206 | return sysfs_create_group(&dev->kobj, &additional_attr_group); | ||
1207 | } | ||
1208 | |||
1209 | static inline void __devexit omap_additional_remove(struct device *dev) | ||
1210 | { | ||
1211 | sysfs_remove_group(&dev->kobj, &additional_attr_group); | ||
1212 | } | ||
1213 | |||
1214 | static inline void __devinit omap34xx_device_init(struct omap_mcbsp *mcbsp) | ||
1215 | { | ||
1216 | mcbsp->dma_op_mode = MCBSP_DMA_MODE_ELEMENT; | ||
1217 | if (cpu_is_omap34xx()) { | ||
1218 | mcbsp->max_tx_thres = max_thres(mcbsp); | ||
1219 | mcbsp->max_rx_thres = max_thres(mcbsp); | ||
1220 | /* | ||
1221 | * REVISIT: Set dmap_op_mode to THRESHOLD as default | ||
1222 | * for mcbsp2 instances. | ||
1223 | */ | ||
1224 | if (omap_additional_add(mcbsp->dev)) | ||
1225 | dev_warn(mcbsp->dev, | ||
1226 | "Unable to create additional controls\n"); | ||
1227 | } else { | ||
1228 | mcbsp->max_tx_thres = -EINVAL; | ||
1229 | mcbsp->max_rx_thres = -EINVAL; | ||
1230 | } | ||
1231 | } | ||
1232 | |||
1233 | static inline void __devexit omap34xx_device_exit(struct omap_mcbsp *mcbsp) | ||
1234 | { | ||
1235 | if (cpu_is_omap34xx()) | ||
1236 | omap_additional_remove(mcbsp->dev); | ||
1237 | } | ||
1238 | #else | ||
1239 | static inline void __devinit omap34xx_device_init(struct omap_mcbsp *mcbsp) {} | ||
1240 | static inline void __devexit omap34xx_device_exit(struct omap_mcbsp *mcbsp) {} | ||
1241 | #endif /* CONFIG_ARCH_OMAP34XX */ | ||
1242 | |||
886 | /* | 1243 | /* |
887 | * McBSP1 and McBSP3 are directly mapped on 1610 and 1510. | 1244 | * McBSP1 and McBSP3 are directly mapped on 1610 and 1510. |
888 | * 730 has only 2 McBSP, and both of them are MPU peripherals. | 1245 | * 730 has only 2 McBSP, and both of them are MPU peripherals. |
@@ -953,6 +1310,10 @@ static int __devinit omap_mcbsp_probe(struct platform_device *pdev) | |||
953 | mcbsp->dev = &pdev->dev; | 1310 | mcbsp->dev = &pdev->dev; |
954 | mcbsp_ptr[id] = mcbsp; | 1311 | mcbsp_ptr[id] = mcbsp; |
955 | platform_set_drvdata(pdev, mcbsp); | 1312 | platform_set_drvdata(pdev, mcbsp); |
1313 | |||
1314 | /* Initialize mcbsp properties for OMAP34XX if needed / applicable */ | ||
1315 | omap34xx_device_init(mcbsp); | ||
1316 | |||
956 | return 0; | 1317 | return 0; |
957 | 1318 | ||
958 | err_fclk: | 1319 | err_fclk: |
@@ -976,6 +1337,8 @@ static int __devexit omap_mcbsp_remove(struct platform_device *pdev) | |||
976 | mcbsp->pdata->ops->free) | 1337 | mcbsp->pdata->ops->free) |
977 | mcbsp->pdata->ops->free(mcbsp->id); | 1338 | mcbsp->pdata->ops->free(mcbsp->id); |
978 | 1339 | ||
1340 | omap34xx_device_exit(mcbsp); | ||
1341 | |||
979 | clk_disable(mcbsp->fclk); | 1342 | clk_disable(mcbsp->fclk); |
980 | clk_disable(mcbsp->iclk); | 1343 | clk_disable(mcbsp->iclk); |
981 | clk_put(mcbsp->fclk); | 1344 | clk_put(mcbsp->fclk); |
diff --git a/arch/arm/plat-s3c/include/plat/audio-simtec.h b/arch/arm/plat-s3c/include/plat/audio-simtec.h new file mode 100644 index 000000000000..0f440b9168db --- /dev/null +++ b/arch/arm/plat-s3c/include/plat/audio-simtec.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* arch/arm/plat-s3c/include/plat/audio-simtec.h | ||
2 | * | ||
3 | * Copyright 2008 Simtec Electronics | ||
4 | * http://armlinux.simtec.co.uk/ | ||
5 | * Ben Dooks <ben@simtec.co.uk> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * Simtec Audio support. | ||
12 | */ | ||
13 | |||
14 | /** | ||
15 | * struct s3c24xx_audio_simtec_pdata - platform data for simtec audio | ||
16 | * @use_mpllin: Select codec clock from MPLLin | ||
17 | * @output_cdclk: Need to output CDCLK to the codec | ||
18 | * @have_mic: Set if we have a MIC socket | ||
19 | * @have_lout: Set if we have a LineOut socket | ||
20 | * @amp_gpio: GPIO pin to enable the AMP | ||
21 | * @amp_gain: Option GPIO to control AMP gain | ||
22 | */ | ||
23 | struct s3c24xx_audio_simtec_pdata { | ||
24 | unsigned int use_mpllin:1; | ||
25 | unsigned int output_cdclk:1; | ||
26 | |||
27 | unsigned int have_mic:1; | ||
28 | unsigned int have_lout:1; | ||
29 | |||
30 | int amp_gpio; | ||
31 | int amp_gain[2]; | ||
32 | |||
33 | void (*startup)(void); | ||
34 | }; | ||
35 | |||
36 | extern int simtec_audio_add(const char *codec_name, | ||
37 | struct s3c24xx_audio_simtec_pdata *pdata); | ||
diff --git a/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h index 0fad7571030e..07659dad1748 100644 --- a/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h +++ b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h | |||
@@ -33,6 +33,11 @@ | |||
33 | #define S3C2412_IISCON_RXDMA_ACTIVE (1 << 1) | 33 | #define S3C2412_IISCON_RXDMA_ACTIVE (1 << 1) |
34 | #define S3C2412_IISCON_IIS_ACTIVE (1 << 0) | 34 | #define S3C2412_IISCON_IIS_ACTIVE (1 << 0) |
35 | 35 | ||
36 | #define S3C64XX_IISMOD_BLC_16BIT (0 << 13) | ||
37 | #define S3C64XX_IISMOD_BLC_8BIT (1 << 13) | ||
38 | #define S3C64XX_IISMOD_BLC_24BIT (2 << 13) | ||
39 | #define S3C64XX_IISMOD_BLC_MASK (3 << 13) | ||
40 | |||
36 | #define S3C64XX_IISMOD_IMS_PCLK (0 << 10) | 41 | #define S3C64XX_IISMOD_IMS_PCLK (0 << 10) |
37 | #define S3C64XX_IISMOD_IMS_SYSMUX (1 << 10) | 42 | #define S3C64XX_IISMOD_IMS_SYSMUX (1 << 10) |
38 | 43 | ||
diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h index fc42de5ca209..fd0c5d7e9337 100644 --- a/arch/avr32/include/asm/thread_info.h +++ b/arch/avr32/include/asm/thread_info.h | |||
@@ -84,6 +84,7 @@ static inline struct thread_info *current_thread_info(void) | |||
84 | #define TIF_MEMDIE 6 | 84 | #define TIF_MEMDIE 6 |
85 | #define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */ | 85 | #define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */ |
86 | #define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */ | 86 | #define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */ |
87 | #define TIF_NOTIFY_RESUME 9 /* callback before returning to user */ | ||
87 | #define TIF_FREEZE 29 | 88 | #define TIF_FREEZE 29 |
88 | #define TIF_DEBUG 30 /* debugging enabled */ | 89 | #define TIF_DEBUG 30 /* debugging enabled */ |
89 | #define TIF_USERSPACE 31 /* true if FS sets userspace */ | 90 | #define TIF_USERSPACE 31 /* true if FS sets userspace */ |
@@ -96,6 +97,7 @@ static inline struct thread_info *current_thread_info(void) | |||
96 | #define _TIF_MEMDIE (1 << TIF_MEMDIE) | 97 | #define _TIF_MEMDIE (1 << TIF_MEMDIE) |
97 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | 98 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) |
98 | #define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP) | 99 | #define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP) |
100 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | ||
99 | #define _TIF_FREEZE (1 << TIF_FREEZE) | 101 | #define _TIF_FREEZE (1 << TIF_FREEZE) |
100 | 102 | ||
101 | /* Note: The masks below must never span more than 16 bits! */ | 103 | /* Note: The masks below must never span more than 16 bits! */ |
@@ -103,13 +105,15 @@ static inline struct thread_info *current_thread_info(void) | |||
103 | /* work to do on interrupt/exception return */ | 105 | /* work to do on interrupt/exception return */ |
104 | #define _TIF_WORK_MASK \ | 106 | #define _TIF_WORK_MASK \ |
105 | ((1 << TIF_SIGPENDING) \ | 107 | ((1 << TIF_SIGPENDING) \ |
108 | | _TIF_NOTIFY_RESUME \ | ||
106 | | (1 << TIF_NEED_RESCHED) \ | 109 | | (1 << TIF_NEED_RESCHED) \ |
107 | | (1 << TIF_POLLING_NRFLAG) \ | 110 | | (1 << TIF_POLLING_NRFLAG) \ |
108 | | (1 << TIF_BREAKPOINT) \ | 111 | | (1 << TIF_BREAKPOINT) \ |
109 | | (1 << TIF_RESTORE_SIGMASK)) | 112 | | (1 << TIF_RESTORE_SIGMASK)) |
110 | 113 | ||
111 | /* work to do on any return to userspace */ | 114 | /* work to do on any return to userspace */ |
112 | #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | (1 << TIF_SYSCALL_TRACE)) | 115 | #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | (1 << TIF_SYSCALL_TRACE) | \ |
116 | _TIF_NOTIFY_RESUME) | ||
113 | /* work to do on return from debug mode */ | 117 | /* work to do on return from debug mode */ |
114 | #define _TIF_DBGWORK_MASK (_TIF_WORK_MASK & ~(1 << TIF_BREAKPOINT)) | 118 | #define _TIF_DBGWORK_MASK (_TIF_WORK_MASK & ~(1 << TIF_BREAKPOINT)) |
115 | 119 | ||
diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S index 009a80155d67..169268c40ae2 100644 --- a/arch/avr32/kernel/entry-avr32b.S +++ b/arch/avr32/kernel/entry-avr32b.S | |||
@@ -281,7 +281,7 @@ syscall_exit_work: | |||
281 | ld.w r1, r0[TI_flags] | 281 | ld.w r1, r0[TI_flags] |
282 | rjmp 1b | 282 | rjmp 1b |
283 | 283 | ||
284 | 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | 284 | 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NOTIFY_RESUME |
285 | tst r1, r2 | 285 | tst r1, r2 |
286 | breq 3f | 286 | breq 3f |
287 | unmask_interrupts | 287 | unmask_interrupts |
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c index 27227561bad6..64f886fac2ef 100644 --- a/arch/avr32/kernel/signal.c +++ b/arch/avr32/kernel/signal.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/ptrace.h> | 16 | #include <linux/ptrace.h> |
17 | #include <linux/unistd.h> | 17 | #include <linux/unistd.h> |
18 | #include <linux/freezer.h> | 18 | #include <linux/freezer.h> |
19 | #include <linux/tracehook.h> | ||
19 | 20 | ||
20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
21 | #include <asm/ucontext.h> | 22 | #include <asm/ucontext.h> |
@@ -322,4 +323,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti) | |||
322 | 323 | ||
323 | if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) | 324 | if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) |
324 | do_signal(regs, ¤t->blocked, syscall); | 325 | do_signal(regs, ¤t->blocked, syscall); |
326 | |||
327 | if (ti->flags & _TIF_NOTIFY_RESUME) { | ||
328 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
329 | tracehook_notify_resume(regs); | ||
330 | if (current->replacement_session_keyring) | ||
331 | key_replace_session_keyring(); | ||
332 | } | ||
325 | } | 333 | } |
diff --git a/arch/cris/kernel/ptrace.c b/arch/cris/kernel/ptrace.c index b326023baab2..48b0f3912632 100644 --- a/arch/cris/kernel/ptrace.c +++ b/arch/cris/kernel/ptrace.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
17 | #include <linux/ptrace.h> | 17 | #include <linux/ptrace.h> |
18 | #include <linux/user.h> | 18 | #include <linux/user.h> |
19 | #include <linux/tracehook.h> | ||
19 | 20 | ||
20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
21 | #include <asm/page.h> | 22 | #include <asm/page.h> |
@@ -36,4 +37,11 @@ void do_notify_resume(int canrestart, struct pt_regs *regs, | |||
36 | /* deal with pending signal delivery */ | 37 | /* deal with pending signal delivery */ |
37 | if (thread_info_flags & _TIF_SIGPENDING) | 38 | if (thread_info_flags & _TIF_SIGPENDING) |
38 | do_signal(canrestart,regs); | 39 | do_signal(canrestart,regs); |
40 | |||
41 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
42 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
43 | tracehook_notify_resume(regs); | ||
44 | if (current->replacement_session_keyring) | ||
45 | key_replace_session_keyring(); | ||
46 | } | ||
39 | } | 47 | } |
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c index 4a7a62c6e783..6b0a2b6fed6a 100644 --- a/arch/frv/kernel/signal.c +++ b/arch/frv/kernel/signal.c | |||
@@ -572,6 +572,8 @@ asmlinkage void do_notify_resume(__u32 thread_info_flags) | |||
572 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 572 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
573 | clear_thread_flag(TIF_NOTIFY_RESUME); | 573 | clear_thread_flag(TIF_NOTIFY_RESUME); |
574 | tracehook_notify_resume(__frame); | 574 | tracehook_notify_resume(__frame); |
575 | if (current->replacement_session_keyring) | ||
576 | key_replace_session_keyring(); | ||
575 | } | 577 | } |
576 | 578 | ||
577 | } /* end do_notify_resume() */ | 579 | } /* end do_notify_resume() */ |
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h index 8bbc8b0ee45d..70e67e47d020 100644 --- a/arch/h8300/include/asm/thread_info.h +++ b/arch/h8300/include/asm/thread_info.h | |||
@@ -89,6 +89,7 @@ static inline struct thread_info *current_thread_info(void) | |||
89 | TIF_NEED_RESCHED */ | 89 | TIF_NEED_RESCHED */ |
90 | #define TIF_MEMDIE 4 | 90 | #define TIF_MEMDIE 4 |
91 | #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ | 91 | #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ |
92 | #define TIF_NOTIFY_RESUME 6 /* callback before returning to user */ | ||
92 | #define TIF_FREEZE 16 /* is freezing for suspend */ | 93 | #define TIF_FREEZE 16 /* is freezing for suspend */ |
93 | 94 | ||
94 | /* as above, but as bit values */ | 95 | /* as above, but as bit values */ |
@@ -97,6 +98,7 @@ static inline struct thread_info *current_thread_info(void) | |||
97 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 98 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
98 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 99 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
99 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 100 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
101 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | ||
100 | #define _TIF_FREEZE (1<<TIF_FREEZE) | 102 | #define _TIF_FREEZE (1<<TIF_FREEZE) |
101 | 103 | ||
102 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ | 104 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ |
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c index cf3472f7389b..af842c369d24 100644 --- a/arch/h8300/kernel/signal.c +++ b/arch/h8300/kernel/signal.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/tty.h> | 39 | #include <linux/tty.h> |
40 | #include <linux/binfmts.h> | 40 | #include <linux/binfmts.h> |
41 | #include <linux/freezer.h> | 41 | #include <linux/freezer.h> |
42 | #include <linux/tracehook.h> | ||
42 | 43 | ||
43 | #include <asm/setup.h> | 44 | #include <asm/setup.h> |
44 | #include <asm/uaccess.h> | 45 | #include <asm/uaccess.h> |
@@ -552,4 +553,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags) | |||
552 | { | 553 | { |
553 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) | 554 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) |
554 | do_signal(regs, NULL); | 555 | do_signal(regs, NULL); |
556 | |||
557 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
558 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
559 | tracehook_notify_resume(regs); | ||
560 | if (current->replacement_session_keyring) | ||
561 | key_replace_session_keyring(); | ||
562 | } | ||
555 | } | 563 | } |
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 5a61b5c2e18f..8d3c79cd81e7 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h | |||
@@ -44,7 +44,6 @@ static inline void dma_free_coherent(struct device *dev, size_t size, | |||
44 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 44 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
45 | 45 | ||
46 | #define get_dma_ops(dev) platform_dma_get_ops(dev) | 46 | #define get_dma_ops(dev) platform_dma_get_ops(dev) |
47 | #define flush_write_buffers() | ||
48 | 47 | ||
49 | #include <asm-generic/dma-mapping-common.h> | 48 | #include <asm-generic/dma-mapping-common.h> |
50 | 49 | ||
@@ -69,6 +68,24 @@ dma_set_mask (struct device *dev, u64 mask) | |||
69 | return 0; | 68 | return 0; |
70 | } | 69 | } |
71 | 70 | ||
71 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | ||
72 | { | ||
73 | if (!dev->dma_mask) | ||
74 | return 0; | ||
75 | |||
76 | return addr + size <= *dev->dma_mask; | ||
77 | } | ||
78 | |||
79 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | ||
80 | { | ||
81 | return paddr; | ||
82 | } | ||
83 | |||
84 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) | ||
85 | { | ||
86 | return daddr; | ||
87 | } | ||
88 | |||
72 | extern int dma_get_cache_alignment(void); | 89 | extern int dma_get_cache_alignment(void); |
73 | 90 | ||
74 | static inline void | 91 | static inline void |
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c index 39a3cd0a4173..f2c1600da097 100644 --- a/arch/ia64/kernel/dma-mapping.c +++ b/arch/ia64/kernel/dma-mapping.c | |||
@@ -10,7 +10,9 @@ EXPORT_SYMBOL(dma_ops); | |||
10 | 10 | ||
11 | static int __init dma_init(void) | 11 | static int __init dma_init(void) |
12 | { | 12 | { |
13 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | 13 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
14 | |||
15 | return 0; | ||
14 | } | 16 | } |
15 | fs_initcall(dma_init); | 17 | fs_initcall(dma_init); |
16 | 18 | ||
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 5d7c0e5b9e76..89969e950045 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -192,6 +192,8 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) | |||
192 | if (test_thread_flag(TIF_NOTIFY_RESUME)) { | 192 | if (test_thread_flag(TIF_NOTIFY_RESUME)) { |
193 | clear_thread_flag(TIF_NOTIFY_RESUME); | 193 | clear_thread_flag(TIF_NOTIFY_RESUME); |
194 | tracehook_notify_resume(&scr->pt); | 194 | tracehook_notify_resume(&scr->pt); |
195 | if (current->replacement_session_keyring) | ||
196 | key_replace_session_keyring(); | ||
195 | } | 197 | } |
196 | 198 | ||
197 | /* copy user rbs to kernel rbs */ | 199 | /* copy user rbs to kernel rbs */ |
diff --git a/arch/ia64/lib/ip_fast_csum.S b/arch/ia64/lib/ip_fast_csum.S index 1f86aeb2c948..620d9dc5220f 100644 --- a/arch/ia64/lib/ip_fast_csum.S +++ b/arch/ia64/lib/ip_fast_csum.S | |||
@@ -96,20 +96,22 @@ END(ip_fast_csum) | |||
96 | GLOBAL_ENTRY(csum_ipv6_magic) | 96 | GLOBAL_ENTRY(csum_ipv6_magic) |
97 | ld4 r20=[in0],4 | 97 | ld4 r20=[in0],4 |
98 | ld4 r21=[in1],4 | 98 | ld4 r21=[in1],4 |
99 | dep r15=in3,in2,32,16 | 99 | zxt4 in2=in2 |
100 | ;; | 100 | ;; |
101 | ld4 r22=[in0],4 | 101 | ld4 r22=[in0],4 |
102 | ld4 r23=[in1],4 | 102 | ld4 r23=[in1],4 |
103 | mux1 r15=r15,@rev | 103 | dep r15=in3,in2,32,16 |
104 | ;; | 104 | ;; |
105 | ld4 r24=[in0],4 | 105 | ld4 r24=[in0],4 |
106 | ld4 r25=[in1],4 | 106 | ld4 r25=[in1],4 |
107 | shr.u r15=r15,16 | 107 | mux1 r15=r15,@rev |
108 | add r16=r20,r21 | 108 | add r16=r20,r21 |
109 | add r17=r22,r23 | 109 | add r17=r22,r23 |
110 | zxt4 in4=in4 | ||
110 | ;; | 111 | ;; |
111 | ld4 r26=[in0],4 | 112 | ld4 r26=[in0],4 |
112 | ld4 r27=[in1],4 | 113 | ld4 r27=[in1],4 |
114 | shr.u r15=r15,16 | ||
113 | add r18=r24,r25 | 115 | add r18=r24,r25 |
114 | add r8=r16,r17 | 116 | add r8=r16,r17 |
115 | ;; | 117 | ;; |
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c index fb8332690179..dbeadb9c8e20 100644 --- a/arch/ia64/xen/time.c +++ b/arch/ia64/xen/time.c | |||
@@ -133,8 +133,7 @@ consider_steal_time(unsigned long new_itm) | |||
133 | account_idle_ticks(blocked); | 133 | account_idle_ticks(blocked); |
134 | run_local_timers(); | 134 | run_local_timers(); |
135 | 135 | ||
136 | if (rcu_pending(cpu)) | 136 | rcu_check_callbacks(cpu, user_mode(get_irq_regs())); |
137 | rcu_check_callbacks(cpu, user_mode(get_irq_regs())); | ||
138 | 137 | ||
139 | scheduler_tick(); | 138 | scheduler_tick(); |
140 | run_posix_cpu_timers(p); | 139 | run_posix_cpu_timers(p); |
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h index 07bb5bd00e2a..71578151a403 100644 --- a/arch/m32r/include/asm/thread_info.h +++ b/arch/m32r/include/asm/thread_info.h | |||
@@ -149,6 +149,7 @@ static inline unsigned int get_thread_fault_code(void) | |||
149 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ | 149 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ |
150 | #define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */ | 150 | #define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */ |
151 | #define TIF_IRET 4 /* return with iret */ | 151 | #define TIF_IRET 4 /* return with iret */ |
152 | #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */ | ||
152 | #define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */ | 153 | #define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */ |
153 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ | 154 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ |
154 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 155 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
@@ -160,6 +161,7 @@ static inline unsigned int get_thread_fault_code(void) | |||
160 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 161 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
161 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) | 162 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) |
162 | #define _TIF_IRET (1<<TIF_IRET) | 163 | #define _TIF_IRET (1<<TIF_IRET) |
164 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | ||
163 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 165 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
164 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) | 166 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) |
165 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 167 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c index 18124542a6eb..144b0f124fc7 100644 --- a/arch/m32r/kernel/signal.c +++ b/arch/m32r/kernel/signal.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/stddef.h> | 21 | #include <linux/stddef.h> |
22 | #include <linux/personality.h> | 22 | #include <linux/personality.h> |
23 | #include <linux/freezer.h> | 23 | #include <linux/freezer.h> |
24 | #include <linux/tracehook.h> | ||
24 | #include <asm/cacheflush.h> | 25 | #include <asm/cacheflush.h> |
25 | #include <asm/ucontext.h> | 26 | #include <asm/ucontext.h> |
26 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
@@ -408,5 +409,12 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, | |||
408 | if (thread_info_flags & _TIF_SIGPENDING) | 409 | if (thread_info_flags & _TIF_SIGPENDING) |
409 | do_signal(regs,oldset); | 410 | do_signal(regs,oldset); |
410 | 411 | ||
412 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
413 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
414 | tracehook_notify_resume(regs); | ||
415 | if (current->replacement_session_keyring) | ||
416 | key_replace_session_keyring(); | ||
417 | } | ||
418 | |||
411 | clear_thread_flag(TIF_IRET); | 419 | clear_thread_flag(TIF_IRET); |
412 | } | 420 | } |
diff --git a/arch/m68k/include/asm/entry_mm.h b/arch/m68k/include/asm/entry_mm.h index 5202f5a5b420..474125886218 100644 --- a/arch/m68k/include/asm/entry_mm.h +++ b/arch/m68k/include/asm/entry_mm.h | |||
@@ -46,7 +46,6 @@ | |||
46 | #define curptr a2 | 46 | #define curptr a2 |
47 | 47 | ||
48 | LFLUSH_I_AND_D = 0x00000808 | 48 | LFLUSH_I_AND_D = 0x00000808 |
49 | LSIGTRAP = 5 | ||
50 | 49 | ||
51 | /* process bits for task_struct.ptrace */ | 50 | /* process bits for task_struct.ptrace */ |
52 | PT_TRACESYS_OFF = 3 | 51 | PT_TRACESYS_OFF = 3 |
@@ -118,9 +117,6 @@ PT_DTRACE_BIT = 2 | |||
118 | #define STR(X) STR1(X) | 117 | #define STR(X) STR1(X) |
119 | #define STR1(X) #X | 118 | #define STR1(X) #X |
120 | 119 | ||
121 | #define PT_OFF_ORIG_D0 0x24 | ||
122 | #define PT_OFF_FORMATVEC 0x32 | ||
123 | #define PT_OFF_SR 0x2C | ||
124 | #define SAVE_ALL_INT \ | 120 | #define SAVE_ALL_INT \ |
125 | "clrl %%sp@-;" /* stk_adj */ \ | 121 | "clrl %%sp@-;" /* stk_adj */ \ |
126 | "pea -1:w;" /* orig d0 = -1 */ \ | 122 | "pea -1:w;" /* orig d0 = -1 */ \ |
diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h index c2553d26273d..907ed03d792f 100644 --- a/arch/m68k/include/asm/entry_no.h +++ b/arch/m68k/include/asm/entry_no.h | |||
@@ -72,8 +72,8 @@ LENOSYS = 38 | |||
72 | lea %sp@(-32),%sp /* space for 8 regs */ | 72 | lea %sp@(-32),%sp /* space for 8 regs */ |
73 | moveml %d1-%d5/%a0-%a2,%sp@ | 73 | moveml %d1-%d5/%a0-%a2,%sp@ |
74 | movel sw_usp,%a0 /* get usp */ | 74 | movel sw_usp,%a0 /* get usp */ |
75 | movel %a0@-,%sp@(PT_PC) /* copy exception program counter */ | 75 | movel %a0@-,%sp@(PT_OFF_PC) /* copy exception program counter */ |
76 | movel %a0@-,%sp@(PT_FORMATVEC)/* copy exception format/vector/sr */ | 76 | movel %a0@-,%sp@(PT_OFF_FORMATVEC)/*copy exception format/vector/sr */ |
77 | bra 7f | 77 | bra 7f |
78 | 6: | 78 | 6: |
79 | clrl %sp@- /* stkadj */ | 79 | clrl %sp@- /* stkadj */ |
@@ -89,8 +89,8 @@ LENOSYS = 38 | |||
89 | bnes 8f /* no, skip */ | 89 | bnes 8f /* no, skip */ |
90 | move #0x2700,%sr /* disable intrs */ | 90 | move #0x2700,%sr /* disable intrs */ |
91 | movel sw_usp,%a0 /* get usp */ | 91 | movel sw_usp,%a0 /* get usp */ |
92 | movel %sp@(PT_PC),%a0@- /* copy exception program counter */ | 92 | movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */ |
93 | movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */ | 93 | movel %sp@(PT_OFF_FORMATVEC),%a0@-/*copy exception format/vector/sr */ |
94 | moveml %sp@,%d1-%d5/%a0-%a2 | 94 | moveml %sp@,%d1-%d5/%a0-%a2 |
95 | lea %sp@(32),%sp /* space for 8 regs */ | 95 | lea %sp@(32),%sp /* space for 8 regs */ |
96 | movel %sp@+,%d0 | 96 | movel %sp@+,%d0 |
diff --git a/arch/m68k/include/asm/math-emu.h b/arch/m68k/include/asm/math-emu.h index ddfab96403cb..5e9249b0014c 100644 --- a/arch/m68k/include/asm/math-emu.h +++ b/arch/m68k/include/asm/math-emu.h | |||
@@ -145,16 +145,16 @@ extern unsigned int fp_debugprint; | |||
145 | * these are only used during instruction decoding | 145 | * these are only used during instruction decoding |
146 | * where we always know how deep we're on the stack. | 146 | * where we always know how deep we're on the stack. |
147 | */ | 147 | */ |
148 | #define FPS_DO (PT_D0) | 148 | #define FPS_DO (PT_OFF_D0) |
149 | #define FPS_D1 (PT_D1) | 149 | #define FPS_D1 (PT_OFF_D1) |
150 | #define FPS_D2 (PT_D2) | 150 | #define FPS_D2 (PT_OFF_D2) |
151 | #define FPS_A0 (PT_A0) | 151 | #define FPS_A0 (PT_OFF_A0) |
152 | #define FPS_A1 (PT_A1) | 152 | #define FPS_A1 (PT_OFF_A1) |
153 | #define FPS_A2 (PT_A2) | 153 | #define FPS_A2 (PT_OFF_A2) |
154 | #define FPS_SR (PT_SR) | 154 | #define FPS_SR (PT_OFF_SR) |
155 | #define FPS_PC (PT_PC) | 155 | #define FPS_PC (PT_OFF_PC) |
156 | #define FPS_EA (PT_PC+6) | 156 | #define FPS_EA (PT_OFF_PC+6) |
157 | #define FPS_PC2 (PT_PC+10) | 157 | #define FPS_PC2 (PT_OFF_PC+10) |
158 | 158 | ||
159 | .macro fp_get_fp_reg | 159 | .macro fp_get_fp_reg |
160 | lea (FPD_FPREG,FPDATA,%d0.w*4),%a0 | 160 | lea (FPD_FPREG,FPDATA,%d0.w*4),%a0 |
diff --git a/arch/m68k/include/asm/thread_info_mm.h b/arch/m68k/include/asm/thread_info_mm.h index 6ea5c33b3c56..b6da3882be9b 100644 --- a/arch/m68k/include/asm/thread_info_mm.h +++ b/arch/m68k/include/asm/thread_info_mm.h | |||
@@ -1,6 +1,10 @@ | |||
1 | #ifndef _ASM_M68K_THREAD_INFO_H | 1 | #ifndef _ASM_M68K_THREAD_INFO_H |
2 | #define _ASM_M68K_THREAD_INFO_H | 2 | #define _ASM_M68K_THREAD_INFO_H |
3 | 3 | ||
4 | #ifndef ASM_OFFSETS_C | ||
5 | #include <asm/asm-offsets.h> | ||
6 | #endif | ||
7 | #include <asm/current.h> | ||
4 | #include <asm/types.h> | 8 | #include <asm/types.h> |
5 | #include <asm/page.h> | 9 | #include <asm/page.h> |
6 | 10 | ||
@@ -31,7 +35,12 @@ struct thread_info { | |||
31 | #define init_thread_info (init_task.thread.info) | 35 | #define init_thread_info (init_task.thread.info) |
32 | #define init_stack (init_thread_union.stack) | 36 | #define init_stack (init_thread_union.stack) |
33 | 37 | ||
34 | #define task_thread_info(tsk) (&(tsk)->thread.info) | 38 | #ifdef ASM_OFFSETS_C |
39 | #define task_thread_info(tsk) ((struct thread_info *) NULL) | ||
40 | #else | ||
41 | #define task_thread_info(tsk) ((struct thread_info *)((char *)tsk+TASK_TINFO)) | ||
42 | #endif | ||
43 | |||
35 | #define task_stack_page(tsk) ((tsk)->stack) | 44 | #define task_stack_page(tsk) ((tsk)->stack) |
36 | #define current_thread_info() task_thread_info(current) | 45 | #define current_thread_info() task_thread_info(current) |
37 | 46 | ||
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c index b1f012f6c493..73e5e581245b 100644 --- a/arch/m68k/kernel/asm-offsets.c +++ b/arch/m68k/kernel/asm-offsets.c | |||
@@ -8,6 +8,8 @@ | |||
8 | * #defines from the assembly-language output. | 8 | * #defines from the assembly-language output. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define ASM_OFFSETS_C | ||
12 | |||
11 | #include <linux/stddef.h> | 13 | #include <linux/stddef.h> |
12 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
13 | #include <linux/kernel_stat.h> | 15 | #include <linux/kernel_stat.h> |
@@ -27,6 +29,9 @@ int main(void) | |||
27 | DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info)); | 29 | DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info)); |
28 | DEFINE(TASK_MM, offsetof(struct task_struct, mm)); | 30 | DEFINE(TASK_MM, offsetof(struct task_struct, mm)); |
29 | DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); | 31 | DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); |
32 | #ifdef CONFIG_MMU | ||
33 | DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info)); | ||
34 | #endif | ||
30 | 35 | ||
31 | /* offsets into the thread struct */ | 36 | /* offsets into the thread struct */ |
32 | DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); | 37 | DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); |
@@ -44,20 +49,20 @@ int main(void) | |||
44 | DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags)); | 49 | DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags)); |
45 | 50 | ||
46 | /* offsets into the pt_regs */ | 51 | /* offsets into the pt_regs */ |
47 | DEFINE(PT_D0, offsetof(struct pt_regs, d0)); | 52 | DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0)); |
48 | DEFINE(PT_ORIG_D0, offsetof(struct pt_regs, orig_d0)); | 53 | DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0)); |
49 | DEFINE(PT_D1, offsetof(struct pt_regs, d1)); | 54 | DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1)); |
50 | DEFINE(PT_D2, offsetof(struct pt_regs, d2)); | 55 | DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2)); |
51 | DEFINE(PT_D3, offsetof(struct pt_regs, d3)); | 56 | DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3)); |
52 | DEFINE(PT_D4, offsetof(struct pt_regs, d4)); | 57 | DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4)); |
53 | DEFINE(PT_D5, offsetof(struct pt_regs, d5)); | 58 | DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5)); |
54 | DEFINE(PT_A0, offsetof(struct pt_regs, a0)); | 59 | DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0)); |
55 | DEFINE(PT_A1, offsetof(struct pt_regs, a1)); | 60 | DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1)); |
56 | DEFINE(PT_A2, offsetof(struct pt_regs, a2)); | 61 | DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2)); |
57 | DEFINE(PT_PC, offsetof(struct pt_regs, pc)); | 62 | DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc)); |
58 | DEFINE(PT_SR, offsetof(struct pt_regs, sr)); | 63 | DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr)); |
59 | /* bitfields are a bit difficult */ | 64 | /* bitfields are a bit difficult */ |
60 | DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4); | 65 | DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4); |
61 | 66 | ||
62 | /* offsets into the irq_handler struct */ | 67 | /* offsets into the irq_handler struct */ |
63 | DEFINE(IRQ_HANDLER, offsetof(struct irq_node, handler)); | 68 | DEFINE(IRQ_HANDLER, offsetof(struct irq_node, handler)); |
@@ -84,10 +89,10 @@ int main(void) | |||
84 | DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref)); | 89 | DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref)); |
85 | 90 | ||
86 | /* signal defines */ | 91 | /* signal defines */ |
87 | DEFINE(SIGSEGV, SIGSEGV); | 92 | DEFINE(LSIGSEGV, SIGSEGV); |
88 | DEFINE(SEGV_MAPERR, SEGV_MAPERR); | 93 | DEFINE(LSEGV_MAPERR, SEGV_MAPERR); |
89 | DEFINE(SIGTRAP, SIGTRAP); | 94 | DEFINE(LSIGTRAP, SIGTRAP); |
90 | DEFINE(TRAP_TRACE, TRAP_TRACE); | 95 | DEFINE(LTRAP_TRACE, TRAP_TRACE); |
91 | 96 | ||
92 | /* offsets into the custom struct */ | 97 | /* offsets into the custom struct */ |
93 | DEFINE(CUSTOMBASE, &amiga_custom); | 98 | DEFINE(CUSTOMBASE, &amiga_custom); |
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S index c3735cd6207e..922f52e7ed1a 100644 --- a/arch/m68k/kernel/entry.S +++ b/arch/m68k/kernel/entry.S | |||
@@ -77,17 +77,17 @@ ENTRY(ret_from_fork) | |||
77 | jra .Lret_from_exception | 77 | jra .Lret_from_exception |
78 | 78 | ||
79 | do_trace_entry: | 79 | do_trace_entry: |
80 | movel #-ENOSYS,%sp@(PT_D0) | needed for strace | 80 | movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace |
81 | subql #4,%sp | 81 | subql #4,%sp |
82 | SAVE_SWITCH_STACK | 82 | SAVE_SWITCH_STACK |
83 | jbsr syscall_trace | 83 | jbsr syscall_trace |
84 | RESTORE_SWITCH_STACK | 84 | RESTORE_SWITCH_STACK |
85 | addql #4,%sp | 85 | addql #4,%sp |
86 | movel %sp@(PT_ORIG_D0),%d0 | 86 | movel %sp@(PT_OFF_ORIG_D0),%d0 |
87 | cmpl #NR_syscalls,%d0 | 87 | cmpl #NR_syscalls,%d0 |
88 | jcs syscall | 88 | jcs syscall |
89 | badsys: | 89 | badsys: |
90 | movel #-ENOSYS,%sp@(PT_D0) | 90 | movel #-ENOSYS,%sp@(PT_OFF_D0) |
91 | jra ret_from_syscall | 91 | jra ret_from_syscall |
92 | 92 | ||
93 | do_trace_exit: | 93 | do_trace_exit: |
@@ -103,7 +103,7 @@ ENTRY(ret_from_signal) | |||
103 | addql #4,%sp | 103 | addql #4,%sp |
104 | /* on 68040 complete pending writebacks if any */ | 104 | /* on 68040 complete pending writebacks if any */ |
105 | #ifdef CONFIG_M68040 | 105 | #ifdef CONFIG_M68040 |
106 | bfextu %sp@(PT_VECTOR){#0,#4},%d0 | 106 | bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0 |
107 | subql #7,%d0 | bus error frame ? | 107 | subql #7,%d0 | bus error frame ? |
108 | jbne 1f | 108 | jbne 1f |
109 | movel %sp,%sp@- | 109 | movel %sp,%sp@- |
@@ -127,7 +127,7 @@ ENTRY(system_call) | |||
127 | jcc badsys | 127 | jcc badsys |
128 | syscall: | 128 | syscall: |
129 | jbsr @(sys_call_table,%d0:l:4)@(0) | 129 | jbsr @(sys_call_table,%d0:l:4)@(0) |
130 | movel %d0,%sp@(PT_D0) | save the return value | 130 | movel %d0,%sp@(PT_OFF_D0) | save the return value |
131 | ret_from_syscall: | 131 | ret_from_syscall: |
132 | |oriw #0x0700,%sr | 132 | |oriw #0x0700,%sr |
133 | movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0 | 133 | movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0 |
@@ -135,7 +135,7 @@ ret_from_syscall: | |||
135 | 1: RESTORE_ALL | 135 | 1: RESTORE_ALL |
136 | 136 | ||
137 | syscall_exit_work: | 137 | syscall_exit_work: |
138 | btst #5,%sp@(PT_SR) | check if returning to kernel | 138 | btst #5,%sp@(PT_OFF_SR) | check if returning to kernel |
139 | bnes 1b | if so, skip resched, signals | 139 | bnes 1b | if so, skip resched, signals |
140 | lslw #1,%d0 | 140 | lslw #1,%d0 |
141 | jcs do_trace_exit | 141 | jcs do_trace_exit |
@@ -148,7 +148,7 @@ syscall_exit_work: | |||
148 | 148 | ||
149 | ENTRY(ret_from_exception) | 149 | ENTRY(ret_from_exception) |
150 | .Lret_from_exception: | 150 | .Lret_from_exception: |
151 | btst #5,%sp@(PT_SR) | check if returning to kernel | 151 | btst #5,%sp@(PT_OFF_SR) | check if returning to kernel |
152 | bnes 1f | if so, skip resched, signals | 152 | bnes 1f | if so, skip resched, signals |
153 | | only allow interrupts when we are really the last one on the | 153 | | only allow interrupts when we are really the last one on the |
154 | | kernel stack, otherwise stack overflow can occur during | 154 | | kernel stack, otherwise stack overflow can occur during |
@@ -182,7 +182,7 @@ do_signal_return: | |||
182 | jbra resume_userspace | 182 | jbra resume_userspace |
183 | 183 | ||
184 | do_delayed_trace: | 184 | do_delayed_trace: |
185 | bclr #7,%sp@(PT_SR) | clear trace bit in SR | 185 | bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR |
186 | pea 1 | send SIGTRAP | 186 | pea 1 | send SIGTRAP |
187 | movel %curptr,%sp@- | 187 | movel %curptr,%sp@- |
188 | pea LSIGTRAP | 188 | pea LSIGTRAP |
@@ -199,7 +199,7 @@ ENTRY(auto_inthandler) | |||
199 | GET_CURRENT(%d0) | 199 | GET_CURRENT(%d0) |
200 | addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) | 200 | addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) |
201 | | put exception # in d0 | 201 | | put exception # in d0 |
202 | bfextu %sp@(PT_VECTOR){#4,#10},%d0 | 202 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 |
203 | subw #VEC_SPUR,%d0 | 203 | subw #VEC_SPUR,%d0 |
204 | 204 | ||
205 | movel %sp,%sp@- | 205 | movel %sp,%sp@- |
@@ -216,7 +216,7 @@ ret_from_interrupt: | |||
216 | ALIGN | 216 | ALIGN |
217 | ret_from_last_interrupt: | 217 | ret_from_last_interrupt: |
218 | moveq #(~ALLOWINT>>8)&0xff,%d0 | 218 | moveq #(~ALLOWINT>>8)&0xff,%d0 |
219 | andb %sp@(PT_SR),%d0 | 219 | andb %sp@(PT_OFF_SR),%d0 |
220 | jne 2b | 220 | jne 2b |
221 | 221 | ||
222 | /* check if we need to do software interrupts */ | 222 | /* check if we need to do software interrupts */ |
@@ -232,7 +232,7 @@ ENTRY(user_inthandler) | |||
232 | GET_CURRENT(%d0) | 232 | GET_CURRENT(%d0) |
233 | addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) | 233 | addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) |
234 | | put exception # in d0 | 234 | | put exception # in d0 |
235 | bfextu %sp@(PT_VECTOR){#4,#10},%d0 | 235 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 |
236 | user_irqvec_fixup = . + 2 | 236 | user_irqvec_fixup = . + 2 |
237 | subw #VEC_USER,%d0 | 237 | subw #VEC_USER,%d0 |
238 | 238 | ||
diff --git a/arch/m68k/math-emu/fp_entry.S b/arch/m68k/math-emu/fp_entry.S index 954b4f304a7d..a3fe1f348dfe 100644 --- a/arch/m68k/math-emu/fp_entry.S +++ b/arch/m68k/math-emu/fp_entry.S | |||
@@ -85,8 +85,8 @@ fp_err_ua2: | |||
85 | fp_err_ua1: | 85 | fp_err_ua1: |
86 | addq.l #4,%sp | 86 | addq.l #4,%sp |
87 | move.l %a0,-(%sp) | 87 | move.l %a0,-(%sp) |
88 | pea SEGV_MAPERR | 88 | pea LSEGV_MAPERR |
89 | pea SIGSEGV | 89 | pea LSIGSEGV |
90 | jsr fpemu_signal | 90 | jsr fpemu_signal |
91 | add.w #12,%sp | 91 | add.w #12,%sp |
92 | jra ret_from_exception | 92 | jra ret_from_exception |
@@ -96,8 +96,8 @@ fp_err_ua1: | |||
96 | | it does not really belong here, but... | 96 | | it does not really belong here, but... |
97 | fp_sendtrace060: | 97 | fp_sendtrace060: |
98 | move.l (FPS_PC,%sp),-(%sp) | 98 | move.l (FPS_PC,%sp),-(%sp) |
99 | pea TRAP_TRACE | 99 | pea LTRAP_TRACE |
100 | pea SIGTRAP | 100 | pea LSIGTRAP |
101 | jsr fpemu_signal | 101 | jsr fpemu_signal |
102 | add.w #12,%sp | 102 | add.w #12,%sp |
103 | jra ret_from_exception | 103 | jra ret_from_exception |
@@ -122,17 +122,17 @@ fp_get_data_reg: | |||
122 | .long fp_get_d6, fp_get_d7 | 122 | .long fp_get_d6, fp_get_d7 |
123 | 123 | ||
124 | fp_get_d0: | 124 | fp_get_d0: |
125 | move.l (PT_D0+8,%sp),%d0 | 125 | move.l (PT_OFF_D0+8,%sp),%d0 |
126 | printf PREGISTER,"{d0->%08x}",1,%d0 | 126 | printf PREGISTER,"{d0->%08x}",1,%d0 |
127 | rts | 127 | rts |
128 | 128 | ||
129 | fp_get_d1: | 129 | fp_get_d1: |
130 | move.l (PT_D1+8,%sp),%d0 | 130 | move.l (PT_OFF_D1+8,%sp),%d0 |
131 | printf PREGISTER,"{d1->%08x}",1,%d0 | 131 | printf PREGISTER,"{d1->%08x}",1,%d0 |
132 | rts | 132 | rts |
133 | 133 | ||
134 | fp_get_d2: | 134 | fp_get_d2: |
135 | move.l (PT_D2+8,%sp),%d0 | 135 | move.l (PT_OFF_D2+8,%sp),%d0 |
136 | printf PREGISTER,"{d2->%08x}",1,%d0 | 136 | printf PREGISTER,"{d2->%08x}",1,%d0 |
137 | rts | 137 | rts |
138 | 138 | ||
@@ -173,35 +173,35 @@ fp_put_data_reg: | |||
173 | 173 | ||
174 | fp_put_d0: | 174 | fp_put_d0: |
175 | printf PREGISTER,"{d0<-%08x}",1,%d0 | 175 | printf PREGISTER,"{d0<-%08x}",1,%d0 |
176 | move.l %d0,(PT_D0+8,%sp) | 176 | move.l %d0,(PT_OFF_D0+8,%sp) |
177 | rts | 177 | rts |
178 | 178 | ||
179 | fp_put_d1: | 179 | fp_put_d1: |
180 | printf PREGISTER,"{d1<-%08x}",1,%d0 | 180 | printf PREGISTER,"{d1<-%08x}",1,%d0 |
181 | move.l %d0,(PT_D1+8,%sp) | 181 | move.l %d0,(PT_OFF_D1+8,%sp) |
182 | rts | 182 | rts |
183 | 183 | ||
184 | fp_put_d2: | 184 | fp_put_d2: |
185 | printf PREGISTER,"{d2<-%08x}",1,%d0 | 185 | printf PREGISTER,"{d2<-%08x}",1,%d0 |
186 | move.l %d0,(PT_D2+8,%sp) | 186 | move.l %d0,(PT_OFF_D2+8,%sp) |
187 | rts | 187 | rts |
188 | 188 | ||
189 | fp_put_d3: | 189 | fp_put_d3: |
190 | printf PREGISTER,"{d3<-%08x}",1,%d0 | 190 | printf PREGISTER,"{d3<-%08x}",1,%d0 |
191 | | move.l %d0,%d3 | 191 | | move.l %d0,%d3 |
192 | move.l %d0,(PT_D3+8,%sp) | 192 | move.l %d0,(PT_OFF_D3+8,%sp) |
193 | rts | 193 | rts |
194 | 194 | ||
195 | fp_put_d4: | 195 | fp_put_d4: |
196 | printf PREGISTER,"{d4<-%08x}",1,%d0 | 196 | printf PREGISTER,"{d4<-%08x}",1,%d0 |
197 | | move.l %d0,%d4 | 197 | | move.l %d0,%d4 |
198 | move.l %d0,(PT_D4+8,%sp) | 198 | move.l %d0,(PT_OFF_D4+8,%sp) |
199 | rts | 199 | rts |
200 | 200 | ||
201 | fp_put_d5: | 201 | fp_put_d5: |
202 | printf PREGISTER,"{d5<-%08x}",1,%d0 | 202 | printf PREGISTER,"{d5<-%08x}",1,%d0 |
203 | | move.l %d0,%d5 | 203 | | move.l %d0,%d5 |
204 | move.l %d0,(PT_D5+8,%sp) | 204 | move.l %d0,(PT_OFF_D5+8,%sp) |
205 | rts | 205 | rts |
206 | 206 | ||
207 | fp_put_d6: | 207 | fp_put_d6: |
@@ -225,17 +225,17 @@ fp_get_addr_reg: | |||
225 | .long fp_get_a6, fp_get_a7 | 225 | .long fp_get_a6, fp_get_a7 |
226 | 226 | ||
227 | fp_get_a0: | 227 | fp_get_a0: |
228 | move.l (PT_A0+8,%sp),%a0 | 228 | move.l (PT_OFF_A0+8,%sp),%a0 |
229 | printf PREGISTER,"{a0->%08x}",1,%a0 | 229 | printf PREGISTER,"{a0->%08x}",1,%a0 |
230 | rts | 230 | rts |
231 | 231 | ||
232 | fp_get_a1: | 232 | fp_get_a1: |
233 | move.l (PT_A1+8,%sp),%a0 | 233 | move.l (PT_OFF_A1+8,%sp),%a0 |
234 | printf PREGISTER,"{a1->%08x}",1,%a0 | 234 | printf PREGISTER,"{a1->%08x}",1,%a0 |
235 | rts | 235 | rts |
236 | 236 | ||
237 | fp_get_a2: | 237 | fp_get_a2: |
238 | move.l (PT_A2+8,%sp),%a0 | 238 | move.l (PT_OFF_A2+8,%sp),%a0 |
239 | printf PREGISTER,"{a2->%08x}",1,%a0 | 239 | printf PREGISTER,"{a2->%08x}",1,%a0 |
240 | rts | 240 | rts |
241 | 241 | ||
@@ -276,17 +276,17 @@ fp_put_addr_reg: | |||
276 | 276 | ||
277 | fp_put_a0: | 277 | fp_put_a0: |
278 | printf PREGISTER,"{a0<-%08x}",1,%a0 | 278 | printf PREGISTER,"{a0<-%08x}",1,%a0 |
279 | move.l %a0,(PT_A0+8,%sp) | 279 | move.l %a0,(PT_OFF_A0+8,%sp) |
280 | rts | 280 | rts |
281 | 281 | ||
282 | fp_put_a1: | 282 | fp_put_a1: |
283 | printf PREGISTER,"{a1<-%08x}",1,%a0 | 283 | printf PREGISTER,"{a1<-%08x}",1,%a0 |
284 | move.l %a0,(PT_A1+8,%sp) | 284 | move.l %a0,(PT_OFF_A1+8,%sp) |
285 | rts | 285 | rts |
286 | 286 | ||
287 | fp_put_a2: | 287 | fp_put_a2: |
288 | printf PREGISTER,"{a2<-%08x}",1,%a0 | 288 | printf PREGISTER,"{a2<-%08x}",1,%a0 |
289 | move.l %a0,(PT_A2+8,%sp) | 289 | move.l %a0,(PT_OFF_A2+8,%sp) |
290 | rts | 290 | rts |
291 | 291 | ||
292 | fp_put_a3: | 292 | fp_put_a3: |
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index f9df720d2e40..01cc1630b66c 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h | |||
@@ -115,6 +115,7 @@ register struct thread_info *__current_thread_info __asm__("$28"); | |||
115 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ | 115 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ |
116 | #define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */ | 116 | #define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */ |
117 | #define TIF_SECCOMP 4 /* secure computing */ | 117 | #define TIF_SECCOMP 4 /* secure computing */ |
118 | #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */ | ||
118 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ | 119 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ |
119 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ | 120 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ |
120 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 121 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
@@ -139,6 +140,7 @@ register struct thread_info *__current_thread_info __asm__("$28"); | |||
139 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 140 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
140 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | 141 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) |
141 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | 142 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) |
143 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | ||
142 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 144 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
143 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) | 145 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) |
144 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 146 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 830c5ef9932b..6254041b942f 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/compiler.h> | 21 | #include <linux/compiler.h> |
22 | #include <linux/syscalls.h> | 22 | #include <linux/syscalls.h> |
23 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
24 | #include <linux/tracehook.h> | ||
24 | 25 | ||
25 | #include <asm/abi.h> | 26 | #include <asm/abi.h> |
26 | #include <asm/asm.h> | 27 | #include <asm/asm.h> |
@@ -700,4 +701,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, | |||
700 | /* deal with pending signal delivery */ | 701 | /* deal with pending signal delivery */ |
701 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) | 702 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) |
702 | do_signal(regs); | 703 | do_signal(regs); |
704 | |||
705 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
706 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
707 | tracehook_notify_resume(regs); | ||
708 | if (current->replacement_session_keyring) | ||
709 | key_replace_session_keyring(); | ||
710 | } | ||
703 | } | 711 | } |
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c index feb2f2e810db..a21f43bc68e2 100644 --- a/arch/mn10300/kernel/signal.c +++ b/arch/mn10300/kernel/signal.c | |||
@@ -568,5 +568,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags) | |||
568 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 568 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
569 | clear_thread_flag(TIF_NOTIFY_RESUME); | 569 | clear_thread_flag(TIF_NOTIFY_RESUME); |
570 | tracehook_notify_resume(__frame); | 570 | tracehook_notify_resume(__frame); |
571 | if (current->replacement_session_keyring) | ||
572 | key_replace_session_keyring(); | ||
571 | } | 573 | } |
572 | } | 574 | } |
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index 4ce0edfbe969..ac775a76bff7 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h | |||
@@ -59,6 +59,7 @@ struct thread_info { | |||
59 | #define TIF_MEMDIE 5 | 59 | #define TIF_MEMDIE 5 |
60 | #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ | 60 | #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ |
61 | #define TIF_FREEZE 7 /* is freezing for suspend */ | 61 | #define TIF_FREEZE 7 /* is freezing for suspend */ |
62 | #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ | ||
62 | 63 | ||
63 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 64 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
64 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 65 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
@@ -67,8 +68,9 @@ struct thread_info { | |||
67 | #define _TIF_32BIT (1 << TIF_32BIT) | 68 | #define _TIF_32BIT (1 << TIF_32BIT) |
68 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | 69 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) |
69 | #define _TIF_FREEZE (1 << TIF_FREEZE) | 70 | #define _TIF_FREEZE (1 << TIF_FREEZE) |
71 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | ||
70 | 72 | ||
71 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | \ | 73 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ |
72 | _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) | 74 | _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) |
73 | 75 | ||
74 | #endif /* __KERNEL__ */ | 76 | #endif /* __KERNEL__ */ |
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index e552e547cb93..8c4712b74dc1 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S | |||
@@ -948,7 +948,7 @@ intr_check_sig: | |||
948 | /* As above */ | 948 | /* As above */ |
949 | mfctl %cr30,%r1 | 949 | mfctl %cr30,%r1 |
950 | LDREG TI_FLAGS(%r1),%r19 | 950 | LDREG TI_FLAGS(%r1),%r19 |
951 | ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r20 | 951 | ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r20 |
952 | and,COND(<>) %r19, %r20, %r0 | 952 | and,COND(<>) %r19, %r20, %r0 |
953 | b,n intr_restore /* skip past if we've nothing to do */ | 953 | b,n intr_restore /* skip past if we've nothing to do */ |
954 | 954 | ||
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index f82544225e8e..8eb3c63c407a 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/stddef.h> | 25 | #include <linux/stddef.h> |
26 | #include <linux/compat.h> | 26 | #include <linux/compat.h> |
27 | #include <linux/elf.h> | 27 | #include <linux/elf.h> |
28 | #include <linux/tracehook.h> | ||
28 | #include <asm/ucontext.h> | 29 | #include <asm/ucontext.h> |
29 | #include <asm/rt_sigframe.h> | 30 | #include <asm/rt_sigframe.h> |
30 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
@@ -645,4 +646,11 @@ void do_notify_resume(struct pt_regs *regs, long in_syscall) | |||
645 | if (test_thread_flag(TIF_SIGPENDING) || | 646 | if (test_thread_flag(TIF_SIGPENDING) || |
646 | test_thread_flag(TIF_RESTORE_SIGMASK)) | 647 | test_thread_flag(TIF_RESTORE_SIGMASK)) |
647 | do_signal(regs, in_syscall); | 648 | do_signal(regs, in_syscall); |
649 | |||
650 | if (test_thread_flag(TIF_NOTIFY_RESUME)) { | ||
651 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
652 | tracehook_notify_resume(regs); | ||
653 | if (current->replacement_session_keyring) | ||
654 | key_replace_session_keyring(); | ||
655 | } | ||
648 | } | 656 | } |
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index b44aaabdd1a6..0c34371ec49c 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -424,6 +424,29 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
424 | #endif | 424 | #endif |
425 | } | 425 | } |
426 | 426 | ||
427 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | ||
428 | { | ||
429 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
430 | |||
431 | if (ops->addr_needs_map && ops->addr_needs_map(dev, addr, size)) | ||
432 | return 0; | ||
433 | |||
434 | if (!dev->dma_mask) | ||
435 | return 0; | ||
436 | |||
437 | return addr + size <= *dev->dma_mask; | ||
438 | } | ||
439 | |||
440 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | ||
441 | { | ||
442 | return paddr + get_dma_direct_offset(dev); | ||
443 | } | ||
444 | |||
445 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) | ||
446 | { | ||
447 | return daddr - get_dma_direct_offset(dev); | ||
448 | } | ||
449 | |||
427 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 450 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
428 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 451 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
429 | #ifdef CONFIG_NOT_COHERENT_CACHE | 452 | #ifdef CONFIG_NOT_COHERENT_CACHE |
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index eb17da781128..2a5da069714e 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h | |||
@@ -104,8 +104,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
104 | else | 104 | else |
105 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); | 105 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); |
106 | 106 | ||
107 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) | 107 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) |
108 | /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we | 108 | /* Second case is 32-bit with 64-bit PTE. In this case, we |
109 | * can just store as long as we do the two halves in the right order | 109 | * can just store as long as we do the two halves in the right order |
110 | * with a barrier in between. This is possible because we take care, | 110 | * with a barrier in between. This is possible because we take care, |
111 | * in the hash code, to pre-invalidate if the PTE was already hashed, | 111 | * in the hash code, to pre-invalidate if the PTE was already hashed, |
@@ -140,7 +140,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
140 | 140 | ||
141 | #else | 141 | #else |
142 | /* Anything else just stores the PTE normally. That covers all 64-bit | 142 | /* Anything else just stores the PTE normally. That covers all 64-bit |
143 | * cases, and 32-bit non-hash with 64-bit PTEs in UP mode | 143 | * cases, and 32-bit non-hash with 32-bit PTEs. |
144 | */ | 144 | */ |
145 | *ptep = pte; | 145 | *ptep = pte; |
146 | #endif | 146 | #endif |
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index c3b193121f81..198266cf9e2d 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h | |||
@@ -54,7 +54,7 @@ | |||
54 | * This returns the old value in the lock, so we succeeded | 54 | * This returns the old value in the lock, so we succeeded |
55 | * in getting the lock if the return value is 0. | 55 | * in getting the lock if the return value is 0. |
56 | */ | 56 | */ |
57 | static inline unsigned long __spin_trylock(raw_spinlock_t *lock) | 57 | static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) |
58 | { | 58 | { |
59 | unsigned long tmp, token; | 59 | unsigned long tmp, token; |
60 | 60 | ||
@@ -76,7 +76,7 @@ static inline unsigned long __spin_trylock(raw_spinlock_t *lock) | |||
76 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 76 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
77 | { | 77 | { |
78 | CLEAR_IO_SYNC; | 78 | CLEAR_IO_SYNC; |
79 | return __spin_trylock(lock) == 0; | 79 | return arch_spin_trylock(lock) == 0; |
80 | } | 80 | } |
81 | 81 | ||
82 | /* | 82 | /* |
@@ -108,7 +108,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
108 | { | 108 | { |
109 | CLEAR_IO_SYNC; | 109 | CLEAR_IO_SYNC; |
110 | while (1) { | 110 | while (1) { |
111 | if (likely(__spin_trylock(lock) == 0)) | 111 | if (likely(arch_spin_trylock(lock) == 0)) |
112 | break; | 112 | break; |
113 | do { | 113 | do { |
114 | HMT_low(); | 114 | HMT_low(); |
@@ -126,7 +126,7 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | |||
126 | 126 | ||
127 | CLEAR_IO_SYNC; | 127 | CLEAR_IO_SYNC; |
128 | while (1) { | 128 | while (1) { |
129 | if (likely(__spin_trylock(lock) == 0)) | 129 | if (likely(arch_spin_trylock(lock) == 0)) |
130 | break; | 130 | break; |
131 | local_save_flags(flags_dis); | 131 | local_save_flags(flags_dis); |
132 | local_irq_restore(flags); | 132 | local_irq_restore(flags); |
@@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); | |||
181 | * This returns the old value in the lock + 1, | 181 | * This returns the old value in the lock + 1, |
182 | * so we got a read lock if the return value is > 0. | 182 | * so we got a read lock if the return value is > 0. |
183 | */ | 183 | */ |
184 | static inline long __read_trylock(raw_rwlock_t *rw) | 184 | static inline long arch_read_trylock(raw_rwlock_t *rw) |
185 | { | 185 | { |
186 | long tmp; | 186 | long tmp; |
187 | 187 | ||
@@ -205,7 +205,7 @@ static inline long __read_trylock(raw_rwlock_t *rw) | |||
205 | * This returns the old value in the lock, | 205 | * This returns the old value in the lock, |
206 | * so we got the write lock if the return value is 0. | 206 | * so we got the write lock if the return value is 0. |
207 | */ | 207 | */ |
208 | static inline long __write_trylock(raw_rwlock_t *rw) | 208 | static inline long arch_write_trylock(raw_rwlock_t *rw) |
209 | { | 209 | { |
210 | long tmp, token; | 210 | long tmp, token; |
211 | 211 | ||
@@ -228,7 +228,7 @@ static inline long __write_trylock(raw_rwlock_t *rw) | |||
228 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 228 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
229 | { | 229 | { |
230 | while (1) { | 230 | while (1) { |
231 | if (likely(__read_trylock(rw) > 0)) | 231 | if (likely(arch_read_trylock(rw) > 0)) |
232 | break; | 232 | break; |
233 | do { | 233 | do { |
234 | HMT_low(); | 234 | HMT_low(); |
@@ -242,7 +242,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
242 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 242 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
243 | { | 243 | { |
244 | while (1) { | 244 | while (1) { |
245 | if (likely(__write_trylock(rw) == 0)) | 245 | if (likely(arch_write_trylock(rw) == 0)) |
246 | break; | 246 | break; |
247 | do { | 247 | do { |
248 | HMT_low(); | 248 | HMT_low(); |
@@ -255,12 +255,12 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
255 | 255 | ||
256 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 256 | static inline int __raw_read_trylock(raw_rwlock_t *rw) |
257 | { | 257 | { |
258 | return __read_trylock(rw) > 0; | 258 | return arch_read_trylock(rw) > 0; |
259 | } | 259 | } |
260 | 260 | ||
261 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 261 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
262 | { | 262 | { |
263 | return __write_trylock(rw) == 0; | 263 | return arch_write_trylock(rw) == 0; |
264 | } | 264 | } |
265 | 265 | ||
266 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 266 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index b73396b93905..9619285f64e8 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o | |||
97 | 97 | ||
98 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 98 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
99 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 99 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
100 | obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o | 100 | obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o perf_callchain.o |
101 | obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ | 101 | obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ |
102 | power5+-pmu.o power6-pmu.o power7-pmu.o | 102 | power5+-pmu.o power6-pmu.o power7-pmu.o |
103 | obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o | 103 | obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 561b64652311..197b15646eeb 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -67,6 +67,8 @@ int main(void) | |||
67 | DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id)); | 67 | DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id)); |
68 | #ifdef CONFIG_PPC64 | 68 | #ifdef CONFIG_PPC64 |
69 | DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); | 69 | DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); |
70 | DEFINE(SIGSEGV, SIGSEGV); | ||
71 | DEFINE(NMI_MASK, NMI_MASK); | ||
70 | #else | 72 | #else |
71 | DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); | 73 | DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); |
72 | #endif /* CONFIG_PPC64 */ | 74 | #endif /* CONFIG_PPC64 */ |
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 68ccf11e4f19..e8a57de85bcf 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c | |||
@@ -24,50 +24,12 @@ | |||
24 | int swiotlb __read_mostly; | 24 | int swiotlb __read_mostly; |
25 | unsigned int ppc_swiotlb_enable; | 25 | unsigned int ppc_swiotlb_enable; |
26 | 26 | ||
27 | void *swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t addr) | ||
28 | { | ||
29 | unsigned long pfn = PFN_DOWN(swiotlb_bus_to_phys(hwdev, addr)); | ||
30 | void *pageaddr = page_address(pfn_to_page(pfn)); | ||
31 | |||
32 | if (pageaddr != NULL) | ||
33 | return pageaddr + (addr % PAGE_SIZE); | ||
34 | return NULL; | ||
35 | } | ||
36 | |||
37 | dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) | ||
38 | { | ||
39 | return paddr + get_dma_direct_offset(hwdev); | ||
40 | } | ||
41 | |||
42 | phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr) | ||
43 | |||
44 | { | ||
45 | return baddr - get_dma_direct_offset(hwdev); | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * Determine if an address needs bounce buffering via swiotlb. | ||
50 | * Going forward I expect the swiotlb code to generalize on using | ||
51 | * a dma_ops->addr_needs_map, and this function will move from here to the | ||
52 | * generic swiotlb code. | ||
53 | */ | ||
54 | int | ||
55 | swiotlb_arch_address_needs_mapping(struct device *hwdev, dma_addr_t addr, | ||
56 | size_t size) | ||
57 | { | ||
58 | struct dma_mapping_ops *dma_ops = get_dma_ops(hwdev); | ||
59 | |||
60 | BUG_ON(!dma_ops); | ||
61 | return dma_ops->addr_needs_map(hwdev, addr, size); | ||
62 | } | ||
63 | |||
64 | /* | 27 | /* |
65 | * Determine if an address is reachable by a pci device, or if we must bounce. | 28 | * Determine if an address is reachable by a pci device, or if we must bounce. |
66 | */ | 29 | */ |
67 | static int | 30 | static int |
68 | swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) | 31 | swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) |
69 | { | 32 | { |
70 | u64 mask = dma_get_mask(hwdev); | ||
71 | dma_addr_t max; | 33 | dma_addr_t max; |
72 | struct pci_controller *hose; | 34 | struct pci_controller *hose; |
73 | struct pci_dev *pdev = to_pci_dev(hwdev); | 35 | struct pci_dev *pdev = to_pci_dev(hwdev); |
@@ -79,16 +41,9 @@ swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) | |||
79 | if ((addr + size > max) | (addr < hose->dma_window_base_cur)) | 41 | if ((addr + size > max) | (addr < hose->dma_window_base_cur)) |
80 | return 1; | 42 | return 1; |
81 | 43 | ||
82 | return !is_buffer_dma_capable(mask, addr, size); | 44 | return 0; |
83 | } | ||
84 | |||
85 | static int | ||
86 | swiotlb_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) | ||
87 | { | ||
88 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); | ||
89 | } | 45 | } |
90 | 46 | ||
91 | |||
92 | /* | 47 | /* |
93 | * At the moment, all platforms that use this code only require | 48 | * At the moment, all platforms that use this code only require |
94 | * swiotlb to be used if we're operating on HIGHMEM. Since | 49 | * swiotlb to be used if we're operating on HIGHMEM. Since |
@@ -104,7 +59,6 @@ struct dma_mapping_ops swiotlb_dma_ops = { | |||
104 | .dma_supported = swiotlb_dma_supported, | 59 | .dma_supported = swiotlb_dma_supported, |
105 | .map_page = swiotlb_map_page, | 60 | .map_page = swiotlb_map_page, |
106 | .unmap_page = swiotlb_unmap_page, | 61 | .unmap_page = swiotlb_unmap_page, |
107 | .addr_needs_map = swiotlb_addr_needs_map, | ||
108 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, | 62 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, |
109 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, | 63 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, |
110 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | 64 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index eb898112e577..8ac85e08ffae 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -729,6 +729,11 @@ BEGIN_FTR_SECTION | |||
729 | bne- do_ste_alloc /* If so handle it */ | 729 | bne- do_ste_alloc /* If so handle it */ |
730 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | 730 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) |
731 | 731 | ||
732 | clrrdi r11,r1,THREAD_SHIFT | ||
733 | lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ | ||
734 | andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ | ||
735 | bne 77f /* then don't call hash_page now */ | ||
736 | |||
732 | /* | 737 | /* |
733 | * On iSeries, we soft-disable interrupts here, then | 738 | * On iSeries, we soft-disable interrupts here, then |
734 | * hard-enable interrupts so that the hash_page code can spin on | 739 | * hard-enable interrupts so that the hash_page code can spin on |
@@ -833,6 +838,20 @@ handle_page_fault: | |||
833 | bl .low_hash_fault | 838 | bl .low_hash_fault |
834 | b .ret_from_except | 839 | b .ret_from_except |
835 | 840 | ||
841 | /* | ||
842 | * We come here as a result of a DSI at a point where we don't want | ||
843 | * to call hash_page, such as when we are accessing memory (possibly | ||
844 | * user memory) inside a PMU interrupt that occurred while interrupts | ||
845 | * were soft-disabled. We want to invoke the exception handler for | ||
846 | * the access, or panic if there isn't a handler. | ||
847 | */ | ||
848 | 77: bl .save_nvgprs | ||
849 | mr r4,r3 | ||
850 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
851 | li r5,SIGSEGV | ||
852 | bl .bad_page_fault | ||
853 | b .ret_from_except | ||
854 | |||
836 | /* here we have a segment miss */ | 855 | /* here we have a segment miss */ |
837 | do_ste_alloc: | 856 | do_ste_alloc: |
838 | bl .ste_allocate /* try to insert stab entry */ | 857 | bl .ste_allocate /* try to insert stab entry */ |
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c new file mode 100644 index 000000000000..f74b62c67511 --- /dev/null +++ b/arch/powerpc/kernel/perf_callchain.c | |||
@@ -0,0 +1,527 @@ | |||
1 | /* | ||
2 | * Performance counter callchain support - powerpc architecture code | ||
3 | * | ||
4 | * Copyright © 2009 Paul Mackerras, IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/perf_counter.h> | ||
14 | #include <linux/percpu.h> | ||
15 | #include <linux/uaccess.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <asm/ptrace.h> | ||
18 | #include <asm/pgtable.h> | ||
19 | #include <asm/sigcontext.h> | ||
20 | #include <asm/ucontext.h> | ||
21 | #include <asm/vdso.h> | ||
22 | #ifdef CONFIG_PPC64 | ||
23 | #include "ppc32.h" | ||
24 | #endif | ||
25 | |||
26 | /* | ||
27 | * Store another value in a callchain_entry. | ||
28 | */ | ||
29 | static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
30 | { | ||
31 | unsigned int nr = entry->nr; | ||
32 | |||
33 | if (nr < PERF_MAX_STACK_DEPTH) { | ||
34 | entry->ip[nr] = ip; | ||
35 | entry->nr = nr + 1; | ||
36 | } | ||
37 | } | ||
38 | |||
39 | /* | ||
40 | * Is sp valid as the address of the next kernel stack frame after prev_sp? | ||
41 | * The next frame may be in a different stack area but should not go | ||
42 | * back down in the same stack area. | ||
43 | */ | ||
44 | static int valid_next_sp(unsigned long sp, unsigned long prev_sp) | ||
45 | { | ||
46 | if (sp & 0xf) | ||
47 | return 0; /* must be 16-byte aligned */ | ||
48 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) | ||
49 | return 0; | ||
50 | if (sp >= prev_sp + STACK_FRAME_OVERHEAD) | ||
51 | return 1; | ||
52 | /* | ||
53 | * sp could decrease when we jump off an interrupt stack | ||
54 | * back to the regular process stack. | ||
55 | */ | ||
56 | if ((sp & ~(THREAD_SIZE - 1)) != (prev_sp & ~(THREAD_SIZE - 1))) | ||
57 | return 1; | ||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | static void perf_callchain_kernel(struct pt_regs *regs, | ||
62 | struct perf_callchain_entry *entry) | ||
63 | { | ||
64 | unsigned long sp, next_sp; | ||
65 | unsigned long next_ip; | ||
66 | unsigned long lr; | ||
67 | long level = 0; | ||
68 | unsigned long *fp; | ||
69 | |||
70 | lr = regs->link; | ||
71 | sp = regs->gpr[1]; | ||
72 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
73 | callchain_store(entry, regs->nip); | ||
74 | |||
75 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) | ||
76 | return; | ||
77 | |||
78 | for (;;) { | ||
79 | fp = (unsigned long *) sp; | ||
80 | next_sp = fp[0]; | ||
81 | |||
82 | if (next_sp == sp + STACK_INT_FRAME_SIZE && | ||
83 | fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { | ||
84 | /* | ||
85 | * This looks like an interrupt frame for an | ||
86 | * interrupt that occurred in the kernel | ||
87 | */ | ||
88 | regs = (struct pt_regs *)(sp + STACK_FRAME_OVERHEAD); | ||
89 | next_ip = regs->nip; | ||
90 | lr = regs->link; | ||
91 | level = 0; | ||
92 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
93 | |||
94 | } else { | ||
95 | if (level == 0) | ||
96 | next_ip = lr; | ||
97 | else | ||
98 | next_ip = fp[STACK_FRAME_LR_SAVE]; | ||
99 | |||
100 | /* | ||
101 | * We can't tell which of the first two addresses | ||
102 | * we get are valid, but we can filter out the | ||
103 | * obviously bogus ones here. We replace them | ||
104 | * with 0 rather than removing them entirely so | ||
105 | * that userspace can tell which is which. | ||
106 | */ | ||
107 | if ((level == 1 && next_ip == lr) || | ||
108 | (level <= 1 && !kernel_text_address(next_ip))) | ||
109 | next_ip = 0; | ||
110 | |||
111 | ++level; | ||
112 | } | ||
113 | |||
114 | callchain_store(entry, next_ip); | ||
115 | if (!valid_next_sp(next_sp, sp)) | ||
116 | return; | ||
117 | sp = next_sp; | ||
118 | } | ||
119 | } | ||
120 | |||
121 | #ifdef CONFIG_PPC64 | ||
122 | |||
123 | #ifdef CONFIG_HUGETLB_PAGE | ||
124 | #define is_huge_psize(pagesize) (HPAGE_SHIFT && mmu_huge_psizes[pagesize]) | ||
125 | #else | ||
126 | #define is_huge_psize(pagesize) 0 | ||
127 | #endif | ||
128 | |||
129 | /* | ||
130 | * On 64-bit we don't want to invoke hash_page on user addresses from | ||
131 | * interrupt context, so if the access faults, we read the page tables | ||
132 | * to find which page (if any) is mapped and access it directly. | ||
133 | */ | ||
134 | static int read_user_stack_slow(void __user *ptr, void *ret, int nb) | ||
135 | { | ||
136 | pgd_t *pgdir; | ||
137 | pte_t *ptep, pte; | ||
138 | int pagesize; | ||
139 | unsigned long addr = (unsigned long) ptr; | ||
140 | unsigned long offset; | ||
141 | unsigned long pfn; | ||
142 | void *kaddr; | ||
143 | |||
144 | pgdir = current->mm->pgd; | ||
145 | if (!pgdir) | ||
146 | return -EFAULT; | ||
147 | |||
148 | pagesize = get_slice_psize(current->mm, addr); | ||
149 | |||
150 | /* align address to page boundary */ | ||
151 | offset = addr & ((1ul << mmu_psize_defs[pagesize].shift) - 1); | ||
152 | addr -= offset; | ||
153 | |||
154 | if (is_huge_psize(pagesize)) | ||
155 | ptep = huge_pte_offset(current->mm, addr); | ||
156 | else | ||
157 | ptep = find_linux_pte(pgdir, addr); | ||
158 | |||
159 | if (ptep == NULL) | ||
160 | return -EFAULT; | ||
161 | pte = *ptep; | ||
162 | if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER)) | ||
163 | return -EFAULT; | ||
164 | pfn = pte_pfn(pte); | ||
165 | if (!page_is_ram(pfn)) | ||
166 | return -EFAULT; | ||
167 | |||
168 | /* no highmem to worry about here */ | ||
169 | kaddr = pfn_to_kaddr(pfn); | ||
170 | memcpy(ret, kaddr + offset, nb); | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret) | ||
175 | { | ||
176 | if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) || | ||
177 | ((unsigned long)ptr & 7)) | ||
178 | return -EFAULT; | ||
179 | |||
180 | if (!__get_user_inatomic(*ret, ptr)) | ||
181 | return 0; | ||
182 | |||
183 | return read_user_stack_slow(ptr, ret, 8); | ||
184 | } | ||
185 | |||
186 | static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) | ||
187 | { | ||
188 | if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) || | ||
189 | ((unsigned long)ptr & 3)) | ||
190 | return -EFAULT; | ||
191 | |||
192 | if (!__get_user_inatomic(*ret, ptr)) | ||
193 | return 0; | ||
194 | |||
195 | return read_user_stack_slow(ptr, ret, 4); | ||
196 | } | ||
197 | |||
198 | static inline int valid_user_sp(unsigned long sp, int is_64) | ||
199 | { | ||
200 | if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32) | ||
201 | return 0; | ||
202 | return 1; | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * 64-bit user processes use the same stack frame for RT and non-RT signals. | ||
207 | */ | ||
208 | struct signal_frame_64 { | ||
209 | char dummy[__SIGNAL_FRAMESIZE]; | ||
210 | struct ucontext uc; | ||
211 | unsigned long unused[2]; | ||
212 | unsigned int tramp[6]; | ||
213 | struct siginfo *pinfo; | ||
214 | void *puc; | ||
215 | struct siginfo info; | ||
216 | char abigap[288]; | ||
217 | }; | ||
218 | |||
219 | static int is_sigreturn_64_address(unsigned long nip, unsigned long fp) | ||
220 | { | ||
221 | if (nip == fp + offsetof(struct signal_frame_64, tramp)) | ||
222 | return 1; | ||
223 | if (vdso64_rt_sigtramp && current->mm->context.vdso_base && | ||
224 | nip == current->mm->context.vdso_base + vdso64_rt_sigtramp) | ||
225 | return 1; | ||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * Do some sanity checking on the signal frame pointed to by sp. | ||
231 | * We check the pinfo and puc pointers in the frame. | ||
232 | */ | ||
233 | static int sane_signal_64_frame(unsigned long sp) | ||
234 | { | ||
235 | struct signal_frame_64 __user *sf; | ||
236 | unsigned long pinfo, puc; | ||
237 | |||
238 | sf = (struct signal_frame_64 __user *) sp; | ||
239 | if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) || | ||
240 | read_user_stack_64((unsigned long __user *) &sf->puc, &puc)) | ||
241 | return 0; | ||
242 | return pinfo == (unsigned long) &sf->info && | ||
243 | puc == (unsigned long) &sf->uc; | ||
244 | } | ||
245 | |||
246 | static void perf_callchain_user_64(struct pt_regs *regs, | ||
247 | struct perf_callchain_entry *entry) | ||
248 | { | ||
249 | unsigned long sp, next_sp; | ||
250 | unsigned long next_ip; | ||
251 | unsigned long lr; | ||
252 | long level = 0; | ||
253 | struct signal_frame_64 __user *sigframe; | ||
254 | unsigned long __user *fp, *uregs; | ||
255 | |||
256 | next_ip = regs->nip; | ||
257 | lr = regs->link; | ||
258 | sp = regs->gpr[1]; | ||
259 | callchain_store(entry, PERF_CONTEXT_USER); | ||
260 | callchain_store(entry, next_ip); | ||
261 | |||
262 | for (;;) { | ||
263 | fp = (unsigned long __user *) sp; | ||
264 | if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) | ||
265 | return; | ||
266 | if (level > 0 && read_user_stack_64(&fp[2], &next_ip)) | ||
267 | return; | ||
268 | |||
269 | /* | ||
270 | * Note: the next_sp - sp >= signal frame size check | ||
271 | * is true when next_sp < sp, which can happen when | ||
272 | * transitioning from an alternate signal stack to the | ||
273 | * normal stack. | ||
274 | */ | ||
275 | if (next_sp - sp >= sizeof(struct signal_frame_64) && | ||
276 | (is_sigreturn_64_address(next_ip, sp) || | ||
277 | (level <= 1 && is_sigreturn_64_address(lr, sp))) && | ||
278 | sane_signal_64_frame(sp)) { | ||
279 | /* | ||
280 | * This looks like an signal frame | ||
281 | */ | ||
282 | sigframe = (struct signal_frame_64 __user *) sp; | ||
283 | uregs = sigframe->uc.uc_mcontext.gp_regs; | ||
284 | if (read_user_stack_64(&uregs[PT_NIP], &next_ip) || | ||
285 | read_user_stack_64(&uregs[PT_LNK], &lr) || | ||
286 | read_user_stack_64(&uregs[PT_R1], &sp)) | ||
287 | return; | ||
288 | level = 0; | ||
289 | callchain_store(entry, PERF_CONTEXT_USER); | ||
290 | callchain_store(entry, next_ip); | ||
291 | continue; | ||
292 | } | ||
293 | |||
294 | if (level == 0) | ||
295 | next_ip = lr; | ||
296 | callchain_store(entry, next_ip); | ||
297 | ++level; | ||
298 | sp = next_sp; | ||
299 | } | ||
300 | } | ||
301 | |||
302 | static inline int current_is_64bit(void) | ||
303 | { | ||
304 | /* | ||
305 | * We can't use test_thread_flag() here because we may be on an | ||
306 | * interrupt stack, and the thread flags don't get copied over | ||
307 | * from the thread_info on the main stack to the interrupt stack. | ||
308 | */ | ||
309 | return !test_ti_thread_flag(task_thread_info(current), TIF_32BIT); | ||
310 | } | ||
311 | |||
312 | #else /* CONFIG_PPC64 */ | ||
313 | /* | ||
314 | * On 32-bit we just access the address and let hash_page create a | ||
315 | * HPTE if necessary, so there is no need to fall back to reading | ||
316 | * the page tables. Since this is called at interrupt level, | ||
317 | * do_page_fault() won't treat a DSI as a page fault. | ||
318 | */ | ||
319 | static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) | ||
320 | { | ||
321 | if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) || | ||
322 | ((unsigned long)ptr & 3)) | ||
323 | return -EFAULT; | ||
324 | |||
325 | return __get_user_inatomic(*ret, ptr); | ||
326 | } | ||
327 | |||
328 | static inline void perf_callchain_user_64(struct pt_regs *regs, | ||
329 | struct perf_callchain_entry *entry) | ||
330 | { | ||
331 | } | ||
332 | |||
333 | static inline int current_is_64bit(void) | ||
334 | { | ||
335 | return 0; | ||
336 | } | ||
337 | |||
338 | static inline int valid_user_sp(unsigned long sp, int is_64) | ||
339 | { | ||
340 | if (!sp || (sp & 7) || sp > TASK_SIZE - 32) | ||
341 | return 0; | ||
342 | return 1; | ||
343 | } | ||
344 | |||
345 | #define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE | ||
346 | #define sigcontext32 sigcontext | ||
347 | #define mcontext32 mcontext | ||
348 | #define ucontext32 ucontext | ||
349 | #define compat_siginfo_t struct siginfo | ||
350 | |||
351 | #endif /* CONFIG_PPC64 */ | ||
352 | |||
353 | /* | ||
354 | * Layout for non-RT signal frames | ||
355 | */ | ||
356 | struct signal_frame_32 { | ||
357 | char dummy[__SIGNAL_FRAMESIZE32]; | ||
358 | struct sigcontext32 sctx; | ||
359 | struct mcontext32 mctx; | ||
360 | int abigap[56]; | ||
361 | }; | ||
362 | |||
363 | /* | ||
364 | * Layout for RT signal frames | ||
365 | */ | ||
366 | struct rt_signal_frame_32 { | ||
367 | char dummy[__SIGNAL_FRAMESIZE32 + 16]; | ||
368 | compat_siginfo_t info; | ||
369 | struct ucontext32 uc; | ||
370 | int abigap[56]; | ||
371 | }; | ||
372 | |||
373 | static int is_sigreturn_32_address(unsigned int nip, unsigned int fp) | ||
374 | { | ||
375 | if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad)) | ||
376 | return 1; | ||
377 | if (vdso32_sigtramp && current->mm->context.vdso_base && | ||
378 | nip == current->mm->context.vdso_base + vdso32_sigtramp) | ||
379 | return 1; | ||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp) | ||
384 | { | ||
385 | if (nip == fp + offsetof(struct rt_signal_frame_32, | ||
386 | uc.uc_mcontext.mc_pad)) | ||
387 | return 1; | ||
388 | if (vdso32_rt_sigtramp && current->mm->context.vdso_base && | ||
389 | nip == current->mm->context.vdso_base + vdso32_rt_sigtramp) | ||
390 | return 1; | ||
391 | return 0; | ||
392 | } | ||
393 | |||
394 | static int sane_signal_32_frame(unsigned int sp) | ||
395 | { | ||
396 | struct signal_frame_32 __user *sf; | ||
397 | unsigned int regs; | ||
398 | |||
399 | sf = (struct signal_frame_32 __user *) (unsigned long) sp; | ||
400 | if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, ®s)) | ||
401 | return 0; | ||
402 | return regs == (unsigned long) &sf->mctx; | ||
403 | } | ||
404 | |||
405 | static int sane_rt_signal_32_frame(unsigned int sp) | ||
406 | { | ||
407 | struct rt_signal_frame_32 __user *sf; | ||
408 | unsigned int regs; | ||
409 | |||
410 | sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp; | ||
411 | if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, ®s)) | ||
412 | return 0; | ||
413 | return regs == (unsigned long) &sf->uc.uc_mcontext; | ||
414 | } | ||
415 | |||
416 | static unsigned int __user *signal_frame_32_regs(unsigned int sp, | ||
417 | unsigned int next_sp, unsigned int next_ip) | ||
418 | { | ||
419 | struct mcontext32 __user *mctx = NULL; | ||
420 | struct signal_frame_32 __user *sf; | ||
421 | struct rt_signal_frame_32 __user *rt_sf; | ||
422 | |||
423 | /* | ||
424 | * Note: the next_sp - sp >= signal frame size check | ||
425 | * is true when next_sp < sp, for example, when | ||
426 | * transitioning from an alternate signal stack to the | ||
427 | * normal stack. | ||
428 | */ | ||
429 | if (next_sp - sp >= sizeof(struct signal_frame_32) && | ||
430 | is_sigreturn_32_address(next_ip, sp) && | ||
431 | sane_signal_32_frame(sp)) { | ||
432 | sf = (struct signal_frame_32 __user *) (unsigned long) sp; | ||
433 | mctx = &sf->mctx; | ||
434 | } | ||
435 | |||
436 | if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) && | ||
437 | is_rt_sigreturn_32_address(next_ip, sp) && | ||
438 | sane_rt_signal_32_frame(sp)) { | ||
439 | rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp; | ||
440 | mctx = &rt_sf->uc.uc_mcontext; | ||
441 | } | ||
442 | |||
443 | if (!mctx) | ||
444 | return NULL; | ||
445 | return mctx->mc_gregs; | ||
446 | } | ||
447 | |||
448 | static void perf_callchain_user_32(struct pt_regs *regs, | ||
449 | struct perf_callchain_entry *entry) | ||
450 | { | ||
451 | unsigned int sp, next_sp; | ||
452 | unsigned int next_ip; | ||
453 | unsigned int lr; | ||
454 | long level = 0; | ||
455 | unsigned int __user *fp, *uregs; | ||
456 | |||
457 | next_ip = regs->nip; | ||
458 | lr = regs->link; | ||
459 | sp = regs->gpr[1]; | ||
460 | callchain_store(entry, PERF_CONTEXT_USER); | ||
461 | callchain_store(entry, next_ip); | ||
462 | |||
463 | while (entry->nr < PERF_MAX_STACK_DEPTH) { | ||
464 | fp = (unsigned int __user *) (unsigned long) sp; | ||
465 | if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp)) | ||
466 | return; | ||
467 | if (level > 0 && read_user_stack_32(&fp[1], &next_ip)) | ||
468 | return; | ||
469 | |||
470 | uregs = signal_frame_32_regs(sp, next_sp, next_ip); | ||
471 | if (!uregs && level <= 1) | ||
472 | uregs = signal_frame_32_regs(sp, next_sp, lr); | ||
473 | if (uregs) { | ||
474 | /* | ||
475 | * This looks like an signal frame, so restart | ||
476 | * the stack trace with the values in it. | ||
477 | */ | ||
478 | if (read_user_stack_32(&uregs[PT_NIP], &next_ip) || | ||
479 | read_user_stack_32(&uregs[PT_LNK], &lr) || | ||
480 | read_user_stack_32(&uregs[PT_R1], &sp)) | ||
481 | return; | ||
482 | level = 0; | ||
483 | callchain_store(entry, PERF_CONTEXT_USER); | ||
484 | callchain_store(entry, next_ip); | ||
485 | continue; | ||
486 | } | ||
487 | |||
488 | if (level == 0) | ||
489 | next_ip = lr; | ||
490 | callchain_store(entry, next_ip); | ||
491 | ++level; | ||
492 | sp = next_sp; | ||
493 | } | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * Since we can't get PMU interrupts inside a PMU interrupt handler, | ||
498 | * we don't need separate irq and nmi entries here. | ||
499 | */ | ||
500 | static DEFINE_PER_CPU(struct perf_callchain_entry, callchain); | ||
501 | |||
502 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
503 | { | ||
504 | struct perf_callchain_entry *entry = &__get_cpu_var(callchain); | ||
505 | |||
506 | entry->nr = 0; | ||
507 | |||
508 | if (current->pid == 0) /* idle task? */ | ||
509 | return entry; | ||
510 | |||
511 | if (!user_mode(regs)) { | ||
512 | perf_callchain_kernel(regs, entry); | ||
513 | if (current->mm) | ||
514 | regs = task_pt_regs(current); | ||
515 | else | ||
516 | regs = NULL; | ||
517 | } | ||
518 | |||
519 | if (regs) { | ||
520 | if (current_is_64bit()) | ||
521 | perf_callchain_user_64(regs, entry); | ||
522 | else | ||
523 | perf_callchain_user_32(regs, entry); | ||
524 | } | ||
525 | |||
526 | return entry; | ||
527 | } | ||
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index 388cf57ad827..018d094d92f9 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c | |||
@@ -317,7 +317,7 @@ static int power7_generic_events[] = { | |||
317 | */ | 317 | */ |
318 | static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | 318 | static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { |
319 | [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ | 319 | [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ |
320 | [C(OP_READ)] = { 0x400f0, 0xc880 }, | 320 | [C(OP_READ)] = { 0xc880, 0x400f0 }, |
321 | [C(OP_WRITE)] = { 0, 0x300f0 }, | 321 | [C(OP_WRITE)] = { 0, 0x300f0 }, |
322 | [C(OP_PREFETCH)] = { 0xd8b8, 0 }, | 322 | [C(OP_PREFETCH)] = { 0xd8b8, 0 }, |
323 | }, | 323 | }, |
@@ -327,8 +327,8 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | |||
327 | [C(OP_PREFETCH)] = { 0x408a, 0 }, | 327 | [C(OP_PREFETCH)] = { 0x408a, 0 }, |
328 | }, | 328 | }, |
329 | [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ | 329 | [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ |
330 | [C(OP_READ)] = { 0x6080, 0x6084 }, | 330 | [C(OP_READ)] = { 0x16080, 0x26080 }, |
331 | [C(OP_WRITE)] = { 0x6082, 0x6086 }, | 331 | [C(OP_WRITE)] = { 0x16082, 0x26082 }, |
332 | [C(OP_PREFETCH)] = { 0, 0 }, | 332 | [C(OP_PREFETCH)] = { 0, 0 }, |
333 | }, | 333 | }, |
334 | [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ | 334 | [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ |
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 5b7038f248b6..a685652effeb 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
@@ -92,15 +92,13 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize, | |||
92 | : "memory" ); | 92 | : "memory" ); |
93 | } | 93 | } |
94 | 94 | ||
95 | void slb_flush_and_rebolt(void) | 95 | static void __slb_flush_and_rebolt(void) |
96 | { | 96 | { |
97 | /* If you change this make sure you change SLB_NUM_BOLTED | 97 | /* If you change this make sure you change SLB_NUM_BOLTED |
98 | * appropriately too. */ | 98 | * appropriately too. */ |
99 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; | 99 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; |
100 | unsigned long ksp_esid_data, ksp_vsid_data; | 100 | unsigned long ksp_esid_data, ksp_vsid_data; |
101 | 101 | ||
102 | WARN_ON(!irqs_disabled()); | ||
103 | |||
104 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; | 102 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; |
105 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; | 103 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; |
106 | lflags = SLB_VSID_KERNEL | linear_llp; | 104 | lflags = SLB_VSID_KERNEL | linear_llp; |
@@ -117,12 +115,6 @@ void slb_flush_and_rebolt(void) | |||
117 | ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; | 115 | ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; |
118 | } | 116 | } |
119 | 117 | ||
120 | /* | ||
121 | * We can't take a PMU exception in the following code, so hard | ||
122 | * disable interrupts. | ||
123 | */ | ||
124 | hard_irq_disable(); | ||
125 | |||
126 | /* We need to do this all in asm, so we're sure we don't touch | 118 | /* We need to do this all in asm, so we're sure we don't touch |
127 | * the stack between the slbia and rebolting it. */ | 119 | * the stack between the slbia and rebolting it. */ |
128 | asm volatile("isync\n" | 120 | asm volatile("isync\n" |
@@ -139,6 +131,21 @@ void slb_flush_and_rebolt(void) | |||
139 | : "memory"); | 131 | : "memory"); |
140 | } | 132 | } |
141 | 133 | ||
134 | void slb_flush_and_rebolt(void) | ||
135 | { | ||
136 | |||
137 | WARN_ON(!irqs_disabled()); | ||
138 | |||
139 | /* | ||
140 | * We can't take a PMU exception in the following code, so hard | ||
141 | * disable interrupts. | ||
142 | */ | ||
143 | hard_irq_disable(); | ||
144 | |||
145 | __slb_flush_and_rebolt(); | ||
146 | get_paca()->slb_cache_ptr = 0; | ||
147 | } | ||
148 | |||
142 | void slb_vmalloc_update(void) | 149 | void slb_vmalloc_update(void) |
143 | { | 150 | { |
144 | unsigned long vflags; | 151 | unsigned long vflags; |
@@ -180,12 +187,20 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2) | |||
180 | /* Flush all user entries from the segment table of the current processor. */ | 187 | /* Flush all user entries from the segment table of the current processor. */ |
181 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | 188 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) |
182 | { | 189 | { |
183 | unsigned long offset = get_paca()->slb_cache_ptr; | 190 | unsigned long offset; |
184 | unsigned long slbie_data = 0; | 191 | unsigned long slbie_data = 0; |
185 | unsigned long pc = KSTK_EIP(tsk); | 192 | unsigned long pc = KSTK_EIP(tsk); |
186 | unsigned long stack = KSTK_ESP(tsk); | 193 | unsigned long stack = KSTK_ESP(tsk); |
187 | unsigned long unmapped_base; | 194 | unsigned long unmapped_base; |
188 | 195 | ||
196 | /* | ||
197 | * We need interrupts hard-disabled here, not just soft-disabled, | ||
198 | * so that a PMU interrupt can't occur, which might try to access | ||
199 | * user memory (to get a stack trace) and possible cause an SLB miss | ||
200 | * which would update the slb_cache/slb_cache_ptr fields in the PACA. | ||
201 | */ | ||
202 | hard_irq_disable(); | ||
203 | offset = get_paca()->slb_cache_ptr; | ||
189 | if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && | 204 | if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && |
190 | offset <= SLB_CACHE_ENTRIES) { | 205 | offset <= SLB_CACHE_ENTRIES) { |
191 | int i; | 206 | int i; |
@@ -200,7 +215,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |||
200 | } | 215 | } |
201 | asm volatile("isync" : : : "memory"); | 216 | asm volatile("isync" : : : "memory"); |
202 | } else { | 217 | } else { |
203 | slb_flush_and_rebolt(); | 218 | __slb_flush_and_rebolt(); |
204 | } | 219 | } |
205 | 220 | ||
206 | /* Workaround POWER5 < DD2.1 issue */ | 221 | /* Workaround POWER5 < DD2.1 issue */ |
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 98cd1dc2ae75..ab5fb48b3e90 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c | |||
@@ -164,7 +164,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) | |||
164 | { | 164 | { |
165 | struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr; | 165 | struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr; |
166 | struct stab_entry *ste; | 166 | struct stab_entry *ste; |
167 | unsigned long offset = __get_cpu_var(stab_cache_ptr); | 167 | unsigned long offset; |
168 | unsigned long pc = KSTK_EIP(tsk); | 168 | unsigned long pc = KSTK_EIP(tsk); |
169 | unsigned long stack = KSTK_ESP(tsk); | 169 | unsigned long stack = KSTK_ESP(tsk); |
170 | unsigned long unmapped_base; | 170 | unsigned long unmapped_base; |
@@ -172,6 +172,15 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) | |||
172 | /* Force previous translations to complete. DRENG */ | 172 | /* Force previous translations to complete. DRENG */ |
173 | asm volatile("isync" : : : "memory"); | 173 | asm volatile("isync" : : : "memory"); |
174 | 174 | ||
175 | /* | ||
176 | * We need interrupts hard-disabled here, not just soft-disabled, | ||
177 | * so that a PMU interrupt can't occur, which might try to access | ||
178 | * user memory (to get a stack trace) and possible cause an STAB miss | ||
179 | * which would update the stab_cache/stab_cache_ptr per-cpu variables. | ||
180 | */ | ||
181 | hard_irq_disable(); | ||
182 | |||
183 | offset = __get_cpu_var(stab_cache_ptr); | ||
175 | if (offset <= NR_STAB_CACHE_ENTRIES) { | 184 | if (offset <= NR_STAB_CACHE_ENTRIES) { |
176 | int i; | 185 | int i; |
177 | 186 | ||
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c index 3ee1fd37bbfc..40edad520770 100644 --- a/arch/powerpc/sysdev/xilinx_intc.c +++ b/arch/powerpc/sysdev/xilinx_intc.c | |||
@@ -234,7 +234,6 @@ static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc) | |||
234 | generic_handle_irq(cascade_irq); | 234 | generic_handle_irq(cascade_irq); |
235 | 235 | ||
236 | /* Let xilinx_intc end the interrupt */ | 236 | /* Let xilinx_intc end the interrupt */ |
237 | desc->chip->ack(irq); | ||
238 | desc->chip->unmask(irq); | 237 | desc->chip->unmask(irq); |
239 | } | 238 | } |
240 | 239 | ||
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 2ae5d72f47ed..1c866efd217d 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -84,7 +84,7 @@ config S390 | |||
84 | select HAVE_FUNCTION_TRACER | 84 | select HAVE_FUNCTION_TRACER |
85 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 85 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
86 | select HAVE_FTRACE_MCOUNT_RECORD | 86 | select HAVE_FTRACE_MCOUNT_RECORD |
87 | select HAVE_FTRACE_SYSCALLS | 87 | select HAVE_SYSCALL_TRACEPOINTS |
88 | select HAVE_DYNAMIC_FTRACE | 88 | select HAVE_DYNAMIC_FTRACE |
89 | select HAVE_FUNCTION_GRAPH_TRACER | 89 | select HAVE_FUNCTION_GRAPH_TRACER |
90 | select HAVE_DEFAULT_NO_SPIN_MUTEXES | 90 | select HAVE_DEFAULT_NO_SPIN_MUTEXES |
@@ -95,7 +95,6 @@ config S390 | |||
95 | select HAVE_ARCH_TRACEHOOK | 95 | select HAVE_ARCH_TRACEHOOK |
96 | select INIT_ALL_POSSIBLE | 96 | select INIT_ALL_POSSIBLE |
97 | select HAVE_PERF_COUNTERS | 97 | select HAVE_PERF_COUNTERS |
98 | select GENERIC_ATOMIC64 if !64BIT | ||
99 | 98 | ||
100 | config SCHED_OMIT_FRAME_POINTER | 99 | config SCHED_OMIT_FRAME_POINTER |
101 | bool | 100 | bool |
@@ -481,13 +480,6 @@ config CMM_IUCV | |||
481 | Select this option to enable the special message interface to | 480 | Select this option to enable the special message interface to |
482 | the cooperative memory management. | 481 | the cooperative memory management. |
483 | 482 | ||
484 | config PAGE_STATES | ||
485 | bool "Unused page notification" | ||
486 | help | ||
487 | This enables the notification of unused pages to the | ||
488 | hypervisor. The ESSA instruction is used to do the states | ||
489 | changes between a page that has content and the unused state. | ||
490 | |||
491 | config APPLDATA_BASE | 483 | config APPLDATA_BASE |
492 | bool "Linux - VM Monitor Stream, base infrastructure" | 484 | bool "Linux - VM Monitor Stream, base infrastructure" |
493 | depends on PROC_FS | 485 | depends on PROC_FS |
diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 0ff387cebf88..fc8fb20e7fc0 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile | |||
@@ -88,8 +88,7 @@ LDFLAGS_vmlinux := -e start | |||
88 | head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o | 88 | head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o |
89 | 89 | ||
90 | core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ | 90 | core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ |
91 | arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ \ | 91 | arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ |
92 | arch/s390/power/ | ||
93 | 92 | ||
94 | libs-y += arch/s390/lib/ | 93 | libs-y += arch/s390/lib/ |
95 | drivers-y += drivers/s390/ | 94 | drivers-y += drivers/s390/ |
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 4aba83b31596..2bc479ab3a66 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c | |||
@@ -250,8 +250,9 @@ static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
250 | const u8 *temp_key = key; | 250 | const u8 *temp_key = key; |
251 | u32 *flags = &tfm->crt_flags; | 251 | u32 *flags = &tfm->crt_flags; |
252 | 252 | ||
253 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) { | 253 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE)) && |
254 | *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; | 254 | (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { |
255 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
255 | return -EINVAL; | 256 | return -EINVAL; |
256 | } | 257 | } |
257 | for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) { | 258 | for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) { |
@@ -411,9 +412,9 @@ static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
411 | 412 | ||
412 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && | 413 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && |
413 | memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], | 414 | memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], |
414 | DES_KEY_SIZE))) { | 415 | DES_KEY_SIZE)) && |
415 | 416 | (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | |
416 | *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; | 417 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; |
417 | return -EINVAL; | 418 | return -EINVAL; |
418 | } | 419 | } |
419 | for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) { | 420 | for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) { |
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index e85ba348722a..f6de7826c979 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c | |||
@@ -46,12 +46,38 @@ static int sha1_init(struct shash_desc *desc) | |||
46 | return 0; | 46 | return 0; |
47 | } | 47 | } |
48 | 48 | ||
49 | static int sha1_export(struct shash_desc *desc, void *out) | ||
50 | { | ||
51 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
52 | struct sha1_state *octx = out; | ||
53 | |||
54 | octx->count = sctx->count; | ||
55 | memcpy(octx->state, sctx->state, sizeof(octx->state)); | ||
56 | memcpy(octx->buffer, sctx->buf, sizeof(octx->buffer)); | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | static int sha1_import(struct shash_desc *desc, const void *in) | ||
61 | { | ||
62 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
63 | const struct sha1_state *ictx = in; | ||
64 | |||
65 | sctx->count = ictx->count; | ||
66 | memcpy(sctx->state, ictx->state, sizeof(ictx->state)); | ||
67 | memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer)); | ||
68 | sctx->func = KIMD_SHA_1; | ||
69 | return 0; | ||
70 | } | ||
71 | |||
49 | static struct shash_alg alg = { | 72 | static struct shash_alg alg = { |
50 | .digestsize = SHA1_DIGEST_SIZE, | 73 | .digestsize = SHA1_DIGEST_SIZE, |
51 | .init = sha1_init, | 74 | .init = sha1_init, |
52 | .update = s390_sha_update, | 75 | .update = s390_sha_update, |
53 | .final = s390_sha_final, | 76 | .final = s390_sha_final, |
77 | .export = sha1_export, | ||
78 | .import = sha1_import, | ||
54 | .descsize = sizeof(struct s390_sha_ctx), | 79 | .descsize = sizeof(struct s390_sha_ctx), |
80 | .statesize = sizeof(struct sha1_state), | ||
55 | .base = { | 81 | .base = { |
56 | .cra_name = "sha1", | 82 | .cra_name = "sha1", |
57 | .cra_driver_name= "sha1-s390", | 83 | .cra_driver_name= "sha1-s390", |
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index f9fefc569632..61a7db372121 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c | |||
@@ -42,12 +42,38 @@ static int sha256_init(struct shash_desc *desc) | |||
42 | return 0; | 42 | return 0; |
43 | } | 43 | } |
44 | 44 | ||
45 | static int sha256_export(struct shash_desc *desc, void *out) | ||
46 | { | ||
47 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
48 | struct sha256_state *octx = out; | ||
49 | |||
50 | octx->count = sctx->count; | ||
51 | memcpy(octx->state, sctx->state, sizeof(octx->state)); | ||
52 | memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); | ||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static int sha256_import(struct shash_desc *desc, const void *in) | ||
57 | { | ||
58 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
59 | const struct sha256_state *ictx = in; | ||
60 | |||
61 | sctx->count = ictx->count; | ||
62 | memcpy(sctx->state, ictx->state, sizeof(ictx->state)); | ||
63 | memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); | ||
64 | sctx->func = KIMD_SHA_256; | ||
65 | return 0; | ||
66 | } | ||
67 | |||
45 | static struct shash_alg alg = { | 68 | static struct shash_alg alg = { |
46 | .digestsize = SHA256_DIGEST_SIZE, | 69 | .digestsize = SHA256_DIGEST_SIZE, |
47 | .init = sha256_init, | 70 | .init = sha256_init, |
48 | .update = s390_sha_update, | 71 | .update = s390_sha_update, |
49 | .final = s390_sha_final, | 72 | .final = s390_sha_final, |
73 | .export = sha256_export, | ||
74 | .import = sha256_import, | ||
50 | .descsize = sizeof(struct s390_sha_ctx), | 75 | .descsize = sizeof(struct s390_sha_ctx), |
76 | .statesize = sizeof(struct sha256_state), | ||
51 | .base = { | 77 | .base = { |
52 | .cra_name = "sha256", | 78 | .cra_name = "sha256", |
53 | .cra_driver_name= "sha256-s390", | 79 | .cra_driver_name= "sha256-s390", |
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c index 83192bfc8048..4bf73d0dc525 100644 --- a/arch/s390/crypto/sha512_s390.c +++ b/arch/s390/crypto/sha512_s390.c | |||
@@ -13,7 +13,10 @@ | |||
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | #include <crypto/internal/hash.h> | 15 | #include <crypto/internal/hash.h> |
16 | #include <crypto/sha.h> | ||
17 | #include <linux/errno.h> | ||
16 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | 20 | #include <linux/module.h> |
18 | 21 | ||
19 | #include "sha.h" | 22 | #include "sha.h" |
@@ -37,12 +40,42 @@ static int sha512_init(struct shash_desc *desc) | |||
37 | return 0; | 40 | return 0; |
38 | } | 41 | } |
39 | 42 | ||
43 | static int sha512_export(struct shash_desc *desc, void *out) | ||
44 | { | ||
45 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
46 | struct sha512_state *octx = out; | ||
47 | |||
48 | octx->count[0] = sctx->count; | ||
49 | octx->count[1] = 0; | ||
50 | memcpy(octx->state, sctx->state, sizeof(octx->state)); | ||
51 | memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | static int sha512_import(struct shash_desc *desc, const void *in) | ||
56 | { | ||
57 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
58 | const struct sha512_state *ictx = in; | ||
59 | |||
60 | if (unlikely(ictx->count[1])) | ||
61 | return -ERANGE; | ||
62 | sctx->count = ictx->count[0]; | ||
63 | |||
64 | memcpy(sctx->state, ictx->state, sizeof(ictx->state)); | ||
65 | memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); | ||
66 | sctx->func = KIMD_SHA_512; | ||
67 | return 0; | ||
68 | } | ||
69 | |||
40 | static struct shash_alg sha512_alg = { | 70 | static struct shash_alg sha512_alg = { |
41 | .digestsize = SHA512_DIGEST_SIZE, | 71 | .digestsize = SHA512_DIGEST_SIZE, |
42 | .init = sha512_init, | 72 | .init = sha512_init, |
43 | .update = s390_sha_update, | 73 | .update = s390_sha_update, |
44 | .final = s390_sha_final, | 74 | .final = s390_sha_final, |
75 | .export = sha512_export, | ||
76 | .import = sha512_import, | ||
45 | .descsize = sizeof(struct s390_sha_ctx), | 77 | .descsize = sizeof(struct s390_sha_ctx), |
78 | .statesize = sizeof(struct sha512_state), | ||
46 | .base = { | 79 | .base = { |
47 | .cra_name = "sha512", | 80 | .cra_name = "sha512", |
48 | .cra_driver_name= "sha512-s390", | 81 | .cra_driver_name= "sha512-s390", |
@@ -78,7 +111,10 @@ static struct shash_alg sha384_alg = { | |||
78 | .init = sha384_init, | 111 | .init = sha384_init, |
79 | .update = s390_sha_update, | 112 | .update = s390_sha_update, |
80 | .final = s390_sha_final, | 113 | .final = s390_sha_final, |
114 | .export = sha512_export, | ||
115 | .import = sha512_import, | ||
81 | .descsize = sizeof(struct s390_sha_ctx), | 116 | .descsize = sizeof(struct s390_sha_ctx), |
117 | .statesize = sizeof(struct sha512_state), | ||
82 | .base = { | 118 | .base = { |
83 | .cra_name = "sha384", | 119 | .cra_name = "sha384", |
84 | .cra_driver_name= "sha384-s390", | 120 | .cra_driver_name= "sha384-s390", |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index fcba206529f3..4e91a2573cc4 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -900,7 +900,7 @@ CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y | |||
900 | CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y | 900 | CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y |
901 | CONFIG_HAVE_DYNAMIC_FTRACE=y | 901 | CONFIG_HAVE_DYNAMIC_FTRACE=y |
902 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y | 902 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y |
903 | CONFIG_HAVE_FTRACE_SYSCALLS=y | 903 | CONFIG_HAVE_SYSCALL_TRACEPOINTS=y |
904 | CONFIG_TRACING_SUPPORT=y | 904 | CONFIG_TRACING_SUPPORT=y |
905 | CONFIG_FTRACE=y | 905 | CONFIG_FTRACE=y |
906 | # CONFIG_FUNCTION_TRACER is not set | 906 | # CONFIG_FUNCTION_TRACER is not set |
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 5a805df216bb..bd9914b89488 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c | |||
@@ -355,11 +355,7 @@ static struct dentry *hypfs_create_file(struct super_block *sb, | |||
355 | { | 355 | { |
356 | struct dentry *dentry; | 356 | struct dentry *dentry; |
357 | struct inode *inode; | 357 | struct inode *inode; |
358 | struct qstr qname; | ||
359 | 358 | ||
360 | qname.name = name; | ||
361 | qname.len = strlen(name); | ||
362 | qname.hash = full_name_hash(name, qname.len); | ||
363 | mutex_lock(&parent->d_inode->i_mutex); | 359 | mutex_lock(&parent->d_inode->i_mutex); |
364 | dentry = lookup_one_len(name, parent, strlen(name)); | 360 | dentry = lookup_one_len(name, parent, strlen(name)); |
365 | if (IS_ERR(dentry)) { | 361 | if (IS_ERR(dentry)) { |
@@ -426,7 +422,7 @@ struct dentry *hypfs_create_u64(struct super_block *sb, struct dentry *dir, | |||
426 | char tmp[TMP_SIZE]; | 422 | char tmp[TMP_SIZE]; |
427 | struct dentry *dentry; | 423 | struct dentry *dentry; |
428 | 424 | ||
429 | snprintf(tmp, TMP_SIZE, "%lld\n", (unsigned long long int)value); | 425 | snprintf(tmp, TMP_SIZE, "%llu\n", (unsigned long long int)value); |
430 | buffer = kstrdup(tmp, GFP_KERNEL); | 426 | buffer = kstrdup(tmp, GFP_KERNEL); |
431 | if (!buffer) | 427 | if (!buffer) |
432 | return ERR_PTR(-ENOMEM); | 428 | return ERR_PTR(-ENOMEM); |
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index c7d0abfb0f00..ae7c8f9f94a5 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h | |||
@@ -1,33 +1,23 @@ | |||
1 | #ifndef __ARCH_S390_ATOMIC__ | 1 | #ifndef __ARCH_S390_ATOMIC__ |
2 | #define __ARCH_S390_ATOMIC__ | 2 | #define __ARCH_S390_ATOMIC__ |
3 | 3 | ||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/types.h> | ||
6 | |||
7 | /* | 4 | /* |
8 | * include/asm-s390/atomic.h | 5 | * Copyright 1999,2009 IBM Corp. |
6 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, | ||
7 | * Denis Joseph Barrow, | ||
8 | * Arnd Bergmann <arndb@de.ibm.com>, | ||
9 | * | 9 | * |
10 | * S390 version | 10 | * Atomic operations that C can't guarantee us. |
11 | * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation | 11 | * Useful for resource counting etc. |
12 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | 12 | * s390 uses 'Compare And Swap' for atomicity in SMP enviroment. |
13 | * Denis Joseph Barrow, | ||
14 | * Arnd Bergmann (arndb@de.ibm.com) | ||
15 | * | ||
16 | * Derived from "include/asm-i386/bitops.h" | ||
17 | * Copyright (C) 1992, Linus Torvalds | ||
18 | * | 13 | * |
19 | */ | 14 | */ |
20 | 15 | ||
21 | /* | 16 | #include <linux/compiler.h> |
22 | * Atomic operations that C can't guarantee us. Useful for | 17 | #include <linux/types.h> |
23 | * resource counting etc.. | ||
24 | * S390 uses 'Compare And Swap' for atomicity in SMP enviroment | ||
25 | */ | ||
26 | 18 | ||
27 | #define ATOMIC_INIT(i) { (i) } | 19 | #define ATOMIC_INIT(i) { (i) } |
28 | 20 | ||
29 | #ifdef __KERNEL__ | ||
30 | |||
31 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 21 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
32 | 22 | ||
33 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ | 23 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ |
@@ -77,7 +67,7 @@ static inline void atomic_set(atomic_t *v, int i) | |||
77 | barrier(); | 67 | barrier(); |
78 | } | 68 | } |
79 | 69 | ||
80 | static __inline__ int atomic_add_return(int i, atomic_t * v) | 70 | static inline int atomic_add_return(int i, atomic_t *v) |
81 | { | 71 | { |
82 | return __CS_LOOP(v, i, "ar"); | 72 | return __CS_LOOP(v, i, "ar"); |
83 | } | 73 | } |
@@ -87,7 +77,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) | |||
87 | #define atomic_inc_return(_v) atomic_add_return(1, _v) | 77 | #define atomic_inc_return(_v) atomic_add_return(1, _v) |
88 | #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) | 78 | #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) |
89 | 79 | ||
90 | static __inline__ int atomic_sub_return(int i, atomic_t * v) | 80 | static inline int atomic_sub_return(int i, atomic_t *v) |
91 | { | 81 | { |
92 | return __CS_LOOP(v, i, "sr"); | 82 | return __CS_LOOP(v, i, "sr"); |
93 | } | 83 | } |
@@ -97,19 +87,19 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) | |||
97 | #define atomic_dec_return(_v) atomic_sub_return(1, _v) | 87 | #define atomic_dec_return(_v) atomic_sub_return(1, _v) |
98 | #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) | 88 | #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) |
99 | 89 | ||
100 | static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) | 90 | static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) |
101 | { | 91 | { |
102 | __CS_LOOP(v, ~mask, "nr"); | 92 | __CS_LOOP(v, ~mask, "nr"); |
103 | } | 93 | } |
104 | 94 | ||
105 | static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) | 95 | static inline void atomic_set_mask(unsigned long mask, atomic_t *v) |
106 | { | 96 | { |
107 | __CS_LOOP(v, mask, "or"); | 97 | __CS_LOOP(v, mask, "or"); |
108 | } | 98 | } |
109 | 99 | ||
110 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 100 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
111 | 101 | ||
112 | static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) | 102 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
113 | { | 103 | { |
114 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 104 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
115 | asm volatile( | 105 | asm volatile( |
@@ -127,7 +117,7 @@ static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) | |||
127 | return old; | 117 | return old; |
128 | } | 118 | } |
129 | 119 | ||
130 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | 120 | static inline int atomic_add_unless(atomic_t *v, int a, int u) |
131 | { | 121 | { |
132 | int c, old; | 122 | int c, old; |
133 | c = atomic_read(v); | 123 | c = atomic_read(v); |
@@ -146,9 +136,10 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
146 | 136 | ||
147 | #undef __CS_LOOP | 137 | #undef __CS_LOOP |
148 | 138 | ||
149 | #ifdef __s390x__ | ||
150 | #define ATOMIC64_INIT(i) { (i) } | 139 | #define ATOMIC64_INIT(i) { (i) } |
151 | 140 | ||
141 | #ifdef CONFIG_64BIT | ||
142 | |||
152 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 143 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
153 | 144 | ||
154 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ | 145 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ |
@@ -162,7 +153,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
162 | : "=&d" (old_val), "=&d" (new_val), \ | 153 | : "=&d" (old_val), "=&d" (new_val), \ |
163 | "=Q" (((atomic_t *)(ptr))->counter) \ | 154 | "=Q" (((atomic_t *)(ptr))->counter) \ |
164 | : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ | 155 | : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ |
165 | : "cc", "memory" ); \ | 156 | : "cc", "memory"); \ |
166 | new_val; \ | 157 | new_val; \ |
167 | }) | 158 | }) |
168 | 159 | ||
@@ -180,7 +171,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
180 | "=m" (((atomic_t *)(ptr))->counter) \ | 171 | "=m" (((atomic_t *)(ptr))->counter) \ |
181 | : "a" (ptr), "d" (op_val), \ | 172 | : "a" (ptr), "d" (op_val), \ |
182 | "m" (((atomic_t *)(ptr))->counter) \ | 173 | "m" (((atomic_t *)(ptr))->counter) \ |
183 | : "cc", "memory" ); \ | 174 | : "cc", "memory"); \ |
184 | new_val; \ | 175 | new_val; \ |
185 | }) | 176 | }) |
186 | 177 | ||
@@ -198,39 +189,29 @@ static inline void atomic64_set(atomic64_t *v, long long i) | |||
198 | barrier(); | 189 | barrier(); |
199 | } | 190 | } |
200 | 191 | ||
201 | static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) | 192 | static inline long long atomic64_add_return(long long i, atomic64_t *v) |
202 | { | 193 | { |
203 | return __CSG_LOOP(v, i, "agr"); | 194 | return __CSG_LOOP(v, i, "agr"); |
204 | } | 195 | } |
205 | #define atomic64_add(_i, _v) atomic64_add_return(_i, _v) | ||
206 | #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) | ||
207 | #define atomic64_inc(_v) atomic64_add_return(1, _v) | ||
208 | #define atomic64_inc_return(_v) atomic64_add_return(1, _v) | ||
209 | #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) | ||
210 | 196 | ||
211 | static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v) | 197 | static inline long long atomic64_sub_return(long long i, atomic64_t *v) |
212 | { | 198 | { |
213 | return __CSG_LOOP(v, i, "sgr"); | 199 | return __CSG_LOOP(v, i, "sgr"); |
214 | } | 200 | } |
215 | #define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) | ||
216 | #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) | ||
217 | #define atomic64_dec(_v) atomic64_sub_return(1, _v) | ||
218 | #define atomic64_dec_return(_v) atomic64_sub_return(1, _v) | ||
219 | #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) | ||
220 | 201 | ||
221 | static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) | 202 | static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) |
222 | { | 203 | { |
223 | __CSG_LOOP(v, ~mask, "ngr"); | 204 | __CSG_LOOP(v, ~mask, "ngr"); |
224 | } | 205 | } |
225 | 206 | ||
226 | static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) | 207 | static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) |
227 | { | 208 | { |
228 | __CSG_LOOP(v, mask, "ogr"); | 209 | __CSG_LOOP(v, mask, "ogr"); |
229 | } | 210 | } |
230 | 211 | ||
231 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | 212 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
232 | 213 | ||
233 | static __inline__ long long atomic64_cmpxchg(atomic64_t *v, | 214 | static inline long long atomic64_cmpxchg(atomic64_t *v, |
234 | long long old, long long new) | 215 | long long old, long long new) |
235 | { | 216 | { |
236 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 217 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
@@ -249,8 +230,112 @@ static __inline__ long long atomic64_cmpxchg(atomic64_t *v, | |||
249 | return old; | 230 | return old; |
250 | } | 231 | } |
251 | 232 | ||
252 | static __inline__ int atomic64_add_unless(atomic64_t *v, | 233 | #undef __CSG_LOOP |
253 | long long a, long long u) | 234 | |
235 | #else /* CONFIG_64BIT */ | ||
236 | |||
237 | typedef struct { | ||
238 | long long counter; | ||
239 | } atomic64_t; | ||
240 | |||
241 | static inline long long atomic64_read(const atomic64_t *v) | ||
242 | { | ||
243 | register_pair rp; | ||
244 | |||
245 | asm volatile( | ||
246 | " lm %0,%N0,0(%1)" | ||
247 | : "=&d" (rp) | ||
248 | : "a" (&v->counter), "m" (v->counter) | ||
249 | ); | ||
250 | return rp.pair; | ||
251 | } | ||
252 | |||
253 | static inline void atomic64_set(atomic64_t *v, long long i) | ||
254 | { | ||
255 | register_pair rp = {.pair = i}; | ||
256 | |||
257 | asm volatile( | ||
258 | " stm %1,%N1,0(%2)" | ||
259 | : "=m" (v->counter) | ||
260 | : "d" (rp), "a" (&v->counter) | ||
261 | ); | ||
262 | } | ||
263 | |||
264 | static inline long long atomic64_xchg(atomic64_t *v, long long new) | ||
265 | { | ||
266 | register_pair rp_new = {.pair = new}; | ||
267 | register_pair rp_old; | ||
268 | |||
269 | asm volatile( | ||
270 | " lm %0,%N0,0(%2)\n" | ||
271 | "0: cds %0,%3,0(%2)\n" | ||
272 | " jl 0b\n" | ||
273 | : "=&d" (rp_old), "+m" (v->counter) | ||
274 | : "a" (&v->counter), "d" (rp_new) | ||
275 | : "cc"); | ||
276 | return rp_old.pair; | ||
277 | } | ||
278 | |||
279 | static inline long long atomic64_cmpxchg(atomic64_t *v, | ||
280 | long long old, long long new) | ||
281 | { | ||
282 | register_pair rp_old = {.pair = old}; | ||
283 | register_pair rp_new = {.pair = new}; | ||
284 | |||
285 | asm volatile( | ||
286 | " cds %0,%3,0(%2)" | ||
287 | : "+&d" (rp_old), "+m" (v->counter) | ||
288 | : "a" (&v->counter), "d" (rp_new) | ||
289 | : "cc"); | ||
290 | return rp_old.pair; | ||
291 | } | ||
292 | |||
293 | |||
294 | static inline long long atomic64_add_return(long long i, atomic64_t *v) | ||
295 | { | ||
296 | long long old, new; | ||
297 | |||
298 | do { | ||
299 | old = atomic64_read(v); | ||
300 | new = old + i; | ||
301 | } while (atomic64_cmpxchg(v, old, new) != old); | ||
302 | return new; | ||
303 | } | ||
304 | |||
305 | static inline long long atomic64_sub_return(long long i, atomic64_t *v) | ||
306 | { | ||
307 | long long old, new; | ||
308 | |||
309 | do { | ||
310 | old = atomic64_read(v); | ||
311 | new = old - i; | ||
312 | } while (atomic64_cmpxchg(v, old, new) != old); | ||
313 | return new; | ||
314 | } | ||
315 | |||
316 | static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v) | ||
317 | { | ||
318 | long long old, new; | ||
319 | |||
320 | do { | ||
321 | old = atomic64_read(v); | ||
322 | new = old | mask; | ||
323 | } while (atomic64_cmpxchg(v, old, new) != old); | ||
324 | } | ||
325 | |||
326 | static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v) | ||
327 | { | ||
328 | long long old, new; | ||
329 | |||
330 | do { | ||
331 | old = atomic64_read(v); | ||
332 | new = old & mask; | ||
333 | } while (atomic64_cmpxchg(v, old, new) != old); | ||
334 | } | ||
335 | |||
336 | #endif /* CONFIG_64BIT */ | ||
337 | |||
338 | static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) | ||
254 | { | 339 | { |
255 | long long c, old; | 340 | long long c, old; |
256 | c = atomic64_read(v); | 341 | c = atomic64_read(v); |
@@ -265,15 +350,17 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, | |||
265 | return c != u; | 350 | return c != u; |
266 | } | 351 | } |
267 | 352 | ||
268 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | 353 | #define atomic64_add(_i, _v) atomic64_add_return(_i, _v) |
269 | 354 | #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) | |
270 | #undef __CSG_LOOP | 355 | #define atomic64_inc(_v) atomic64_add_return(1, _v) |
271 | 356 | #define atomic64_inc_return(_v) atomic64_add_return(1, _v) | |
272 | #else /* __s390x__ */ | 357 | #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) |
273 | 358 | #define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) | |
274 | #include <asm-generic/atomic64.h> | 359 | #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) |
275 | 360 | #define atomic64_dec(_v) atomic64_sub_return(1, _v) | |
276 | #endif /* __s390x__ */ | 361 | #define atomic64_dec_return(_v) atomic64_sub_return(1, _v) |
362 | #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) | ||
363 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | ||
277 | 364 | ||
278 | #define smp_mb__before_atomic_dec() smp_mb() | 365 | #define smp_mb__before_atomic_dec() smp_mb() |
279 | #define smp_mb__after_atomic_dec() smp_mb() | 366 | #define smp_mb__after_atomic_dec() smp_mb() |
@@ -281,5 +368,5 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, | |||
281 | #define smp_mb__after_atomic_inc() smp_mb() | 368 | #define smp_mb__after_atomic_inc() smp_mb() |
282 | 369 | ||
283 | #include <asm-generic/atomic-long.h> | 370 | #include <asm-generic/atomic-long.h> |
284 | #endif /* __KERNEL__ */ | 371 | |
285 | #endif /* __ARCH_S390_ATOMIC__ */ | 372 | #endif /* __ARCH_S390_ATOMIC__ */ |
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h index d5a8e7c1477c..6c00f6800a34 100644 --- a/arch/s390/include/asm/checksum.h +++ b/arch/s390/include/asm/checksum.h | |||
@@ -78,28 +78,11 @@ csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum) | |||
78 | */ | 78 | */ |
79 | static inline __sum16 csum_fold(__wsum sum) | 79 | static inline __sum16 csum_fold(__wsum sum) |
80 | { | 80 | { |
81 | #ifndef __s390x__ | 81 | u32 csum = (__force u32) sum; |
82 | register_pair rp; | ||
83 | 82 | ||
84 | asm volatile( | 83 | csum += (csum >> 16) + (csum << 16); |
85 | " slr %N1,%N1\n" /* %0 = H L */ | 84 | csum >>= 16; |
86 | " lr %1,%0\n" /* %0 = H L, %1 = H L 0 0 */ | 85 | return (__force __sum16) ~csum; |
87 | " srdl %1,16\n" /* %0 = H L, %1 = 0 H L 0 */ | ||
88 | " alr %1,%N1\n" /* %0 = H L, %1 = L H L 0 */ | ||
89 | " alr %0,%1\n" /* %0 = H+L+C L+H */ | ||
90 | " srl %0,16\n" /* %0 = H+L+C */ | ||
91 | : "+&d" (sum), "=d" (rp) : : "cc"); | ||
92 | #else /* __s390x__ */ | ||
93 | asm volatile( | ||
94 | " sr 3,3\n" /* %0 = H*65536 + L */ | ||
95 | " lr 2,%0\n" /* %0 = H L, 2/3 = H L / 0 0 */ | ||
96 | " srdl 2,16\n" /* %0 = H L, 2/3 = 0 H / L 0 */ | ||
97 | " alr 2,3\n" /* %0 = H L, 2/3 = L H / L 0 */ | ||
98 | " alr %0,2\n" /* %0 = H+L+C L+H */ | ||
99 | " srl %0,16\n" /* %0 = H+L+C */ | ||
100 | : "+&d" (sum) : : "cc", "2", "3"); | ||
101 | #endif /* __s390x__ */ | ||
102 | return (__force __sum16) ~sum; | ||
103 | } | 86 | } |
104 | 87 | ||
105 | /* | 88 | /* |
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h index 807997f7414b..4943654ed7fd 100644 --- a/arch/s390/include/asm/chsc.h +++ b/arch/s390/include/asm/chsc.h | |||
@@ -125,4 +125,32 @@ struct chsc_cpd_info { | |||
125 | #define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info) | 125 | #define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info) |
126 | #define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal) | 126 | #define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal) |
127 | 127 | ||
128 | #ifdef __KERNEL__ | ||
129 | |||
130 | struct css_general_char { | ||
131 | u64 : 12; | ||
132 | u32 dynio : 1; /* bit 12 */ | ||
133 | u32 : 28; | ||
134 | u32 aif : 1; /* bit 41 */ | ||
135 | u32 : 3; | ||
136 | u32 mcss : 1; /* bit 45 */ | ||
137 | u32 fcs : 1; /* bit 46 */ | ||
138 | u32 : 1; | ||
139 | u32 ext_mb : 1; /* bit 48 */ | ||
140 | u32 : 7; | ||
141 | u32 aif_tdd : 1; /* bit 56 */ | ||
142 | u32 : 1; | ||
143 | u32 qebsm : 1; /* bit 58 */ | ||
144 | u32 : 8; | ||
145 | u32 aif_osa : 1; /* bit 67 */ | ||
146 | u32 : 14; | ||
147 | u32 cib : 1; /* bit 82 */ | ||
148 | u32 : 5; | ||
149 | u32 fcx : 1; /* bit 88 */ | ||
150 | u32 : 7; | ||
151 | }__attribute__((packed)); | ||
152 | |||
153 | extern struct css_general_char css_general_characteristics; | ||
154 | |||
155 | #endif /* __KERNEL__ */ | ||
128 | #endif | 156 | #endif |
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index 619bf94b11f1..e85679af54dd 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h | |||
@@ -15,228 +15,7 @@ | |||
15 | #define LPM_ANYPATH 0xff | 15 | #define LPM_ANYPATH 0xff |
16 | #define __MAX_CSSID 0 | 16 | #define __MAX_CSSID 0 |
17 | 17 | ||
18 | /** | 18 | #include <asm/scsw.h> |
19 | * struct cmd_scsw - command-mode subchannel status word | ||
20 | * @key: subchannel key | ||
21 | * @sctl: suspend control | ||
22 | * @eswf: esw format | ||
23 | * @cc: deferred condition code | ||
24 | * @fmt: format | ||
25 | * @pfch: prefetch | ||
26 | * @isic: initial-status interruption control | ||
27 | * @alcc: address-limit checking control | ||
28 | * @ssi: suppress-suspended interruption | ||
29 | * @zcc: zero condition code | ||
30 | * @ectl: extended control | ||
31 | * @pno: path not operational | ||
32 | * @res: reserved | ||
33 | * @fctl: function control | ||
34 | * @actl: activity control | ||
35 | * @stctl: status control | ||
36 | * @cpa: channel program address | ||
37 | * @dstat: device status | ||
38 | * @cstat: subchannel status | ||
39 | * @count: residual count | ||
40 | */ | ||
41 | struct cmd_scsw { | ||
42 | __u32 key : 4; | ||
43 | __u32 sctl : 1; | ||
44 | __u32 eswf : 1; | ||
45 | __u32 cc : 2; | ||
46 | __u32 fmt : 1; | ||
47 | __u32 pfch : 1; | ||
48 | __u32 isic : 1; | ||
49 | __u32 alcc : 1; | ||
50 | __u32 ssi : 1; | ||
51 | __u32 zcc : 1; | ||
52 | __u32 ectl : 1; | ||
53 | __u32 pno : 1; | ||
54 | __u32 res : 1; | ||
55 | __u32 fctl : 3; | ||
56 | __u32 actl : 7; | ||
57 | __u32 stctl : 5; | ||
58 | __u32 cpa; | ||
59 | __u32 dstat : 8; | ||
60 | __u32 cstat : 8; | ||
61 | __u32 count : 16; | ||
62 | } __attribute__ ((packed)); | ||
63 | |||
64 | /** | ||
65 | * struct tm_scsw - transport-mode subchannel status word | ||
66 | * @key: subchannel key | ||
67 | * @eswf: esw format | ||
68 | * @cc: deferred condition code | ||
69 | * @fmt: format | ||
70 | * @x: IRB-format control | ||
71 | * @q: interrogate-complete | ||
72 | * @ectl: extended control | ||
73 | * @pno: path not operational | ||
74 | * @fctl: function control | ||
75 | * @actl: activity control | ||
76 | * @stctl: status control | ||
77 | * @tcw: TCW address | ||
78 | * @dstat: device status | ||
79 | * @cstat: subchannel status | ||
80 | * @fcxs: FCX status | ||
81 | * @schxs: subchannel-extended status | ||
82 | */ | ||
83 | struct tm_scsw { | ||
84 | u32 key:4; | ||
85 | u32 :1; | ||
86 | u32 eswf:1; | ||
87 | u32 cc:2; | ||
88 | u32 fmt:3; | ||
89 | u32 x:1; | ||
90 | u32 q:1; | ||
91 | u32 :1; | ||
92 | u32 ectl:1; | ||
93 | u32 pno:1; | ||
94 | u32 :1; | ||
95 | u32 fctl:3; | ||
96 | u32 actl:7; | ||
97 | u32 stctl:5; | ||
98 | u32 tcw; | ||
99 | u32 dstat:8; | ||
100 | u32 cstat:8; | ||
101 | u32 fcxs:8; | ||
102 | u32 schxs:8; | ||
103 | } __attribute__ ((packed)); | ||
104 | |||
105 | /** | ||
106 | * union scsw - subchannel status word | ||
107 | * @cmd: command-mode SCSW | ||
108 | * @tm: transport-mode SCSW | ||
109 | */ | ||
110 | union scsw { | ||
111 | struct cmd_scsw cmd; | ||
112 | struct tm_scsw tm; | ||
113 | } __attribute__ ((packed)); | ||
114 | |||
115 | int scsw_is_tm(union scsw *scsw); | ||
116 | u32 scsw_key(union scsw *scsw); | ||
117 | u32 scsw_eswf(union scsw *scsw); | ||
118 | u32 scsw_cc(union scsw *scsw); | ||
119 | u32 scsw_ectl(union scsw *scsw); | ||
120 | u32 scsw_pno(union scsw *scsw); | ||
121 | u32 scsw_fctl(union scsw *scsw); | ||
122 | u32 scsw_actl(union scsw *scsw); | ||
123 | u32 scsw_stctl(union scsw *scsw); | ||
124 | u32 scsw_dstat(union scsw *scsw); | ||
125 | u32 scsw_cstat(union scsw *scsw); | ||
126 | int scsw_is_solicited(union scsw *scsw); | ||
127 | int scsw_is_valid_key(union scsw *scsw); | ||
128 | int scsw_is_valid_eswf(union scsw *scsw); | ||
129 | int scsw_is_valid_cc(union scsw *scsw); | ||
130 | int scsw_is_valid_ectl(union scsw *scsw); | ||
131 | int scsw_is_valid_pno(union scsw *scsw); | ||
132 | int scsw_is_valid_fctl(union scsw *scsw); | ||
133 | int scsw_is_valid_actl(union scsw *scsw); | ||
134 | int scsw_is_valid_stctl(union scsw *scsw); | ||
135 | int scsw_is_valid_dstat(union scsw *scsw); | ||
136 | int scsw_is_valid_cstat(union scsw *scsw); | ||
137 | int scsw_cmd_is_valid_key(union scsw *scsw); | ||
138 | int scsw_cmd_is_valid_sctl(union scsw *scsw); | ||
139 | int scsw_cmd_is_valid_eswf(union scsw *scsw); | ||
140 | int scsw_cmd_is_valid_cc(union scsw *scsw); | ||
141 | int scsw_cmd_is_valid_fmt(union scsw *scsw); | ||
142 | int scsw_cmd_is_valid_pfch(union scsw *scsw); | ||
143 | int scsw_cmd_is_valid_isic(union scsw *scsw); | ||
144 | int scsw_cmd_is_valid_alcc(union scsw *scsw); | ||
145 | int scsw_cmd_is_valid_ssi(union scsw *scsw); | ||
146 | int scsw_cmd_is_valid_zcc(union scsw *scsw); | ||
147 | int scsw_cmd_is_valid_ectl(union scsw *scsw); | ||
148 | int scsw_cmd_is_valid_pno(union scsw *scsw); | ||
149 | int scsw_cmd_is_valid_fctl(union scsw *scsw); | ||
150 | int scsw_cmd_is_valid_actl(union scsw *scsw); | ||
151 | int scsw_cmd_is_valid_stctl(union scsw *scsw); | ||
152 | int scsw_cmd_is_valid_dstat(union scsw *scsw); | ||
153 | int scsw_cmd_is_valid_cstat(union scsw *scsw); | ||
154 | int scsw_cmd_is_solicited(union scsw *scsw); | ||
155 | int scsw_tm_is_valid_key(union scsw *scsw); | ||
156 | int scsw_tm_is_valid_eswf(union scsw *scsw); | ||
157 | int scsw_tm_is_valid_cc(union scsw *scsw); | ||
158 | int scsw_tm_is_valid_fmt(union scsw *scsw); | ||
159 | int scsw_tm_is_valid_x(union scsw *scsw); | ||
160 | int scsw_tm_is_valid_q(union scsw *scsw); | ||
161 | int scsw_tm_is_valid_ectl(union scsw *scsw); | ||
162 | int scsw_tm_is_valid_pno(union scsw *scsw); | ||
163 | int scsw_tm_is_valid_fctl(union scsw *scsw); | ||
164 | int scsw_tm_is_valid_actl(union scsw *scsw); | ||
165 | int scsw_tm_is_valid_stctl(union scsw *scsw); | ||
166 | int scsw_tm_is_valid_dstat(union scsw *scsw); | ||
167 | int scsw_tm_is_valid_cstat(union scsw *scsw); | ||
168 | int scsw_tm_is_valid_fcxs(union scsw *scsw); | ||
169 | int scsw_tm_is_valid_schxs(union scsw *scsw); | ||
170 | int scsw_tm_is_solicited(union scsw *scsw); | ||
171 | |||
172 | #define SCSW_FCTL_CLEAR_FUNC 0x1 | ||
173 | #define SCSW_FCTL_HALT_FUNC 0x2 | ||
174 | #define SCSW_FCTL_START_FUNC 0x4 | ||
175 | |||
176 | #define SCSW_ACTL_SUSPENDED 0x1 | ||
177 | #define SCSW_ACTL_DEVACT 0x2 | ||
178 | #define SCSW_ACTL_SCHACT 0x4 | ||
179 | #define SCSW_ACTL_CLEAR_PEND 0x8 | ||
180 | #define SCSW_ACTL_HALT_PEND 0x10 | ||
181 | #define SCSW_ACTL_START_PEND 0x20 | ||
182 | #define SCSW_ACTL_RESUME_PEND 0x40 | ||
183 | |||
184 | #define SCSW_STCTL_STATUS_PEND 0x1 | ||
185 | #define SCSW_STCTL_SEC_STATUS 0x2 | ||
186 | #define SCSW_STCTL_PRIM_STATUS 0x4 | ||
187 | #define SCSW_STCTL_INTER_STATUS 0x8 | ||
188 | #define SCSW_STCTL_ALERT_STATUS 0x10 | ||
189 | |||
190 | #define DEV_STAT_ATTENTION 0x80 | ||
191 | #define DEV_STAT_STAT_MOD 0x40 | ||
192 | #define DEV_STAT_CU_END 0x20 | ||
193 | #define DEV_STAT_BUSY 0x10 | ||
194 | #define DEV_STAT_CHN_END 0x08 | ||
195 | #define DEV_STAT_DEV_END 0x04 | ||
196 | #define DEV_STAT_UNIT_CHECK 0x02 | ||
197 | #define DEV_STAT_UNIT_EXCEP 0x01 | ||
198 | |||
199 | #define SCHN_STAT_PCI 0x80 | ||
200 | #define SCHN_STAT_INCORR_LEN 0x40 | ||
201 | #define SCHN_STAT_PROG_CHECK 0x20 | ||
202 | #define SCHN_STAT_PROT_CHECK 0x10 | ||
203 | #define SCHN_STAT_CHN_DATA_CHK 0x08 | ||
204 | #define SCHN_STAT_CHN_CTRL_CHK 0x04 | ||
205 | #define SCHN_STAT_INTF_CTRL_CHK 0x02 | ||
206 | #define SCHN_STAT_CHAIN_CHECK 0x01 | ||
207 | |||
208 | /* | ||
209 | * architectured values for first sense byte | ||
210 | */ | ||
211 | #define SNS0_CMD_REJECT 0x80 | ||
212 | #define SNS_CMD_REJECT SNS0_CMD_REJEC | ||
213 | #define SNS0_INTERVENTION_REQ 0x40 | ||
214 | #define SNS0_BUS_OUT_CHECK 0x20 | ||
215 | #define SNS0_EQUIPMENT_CHECK 0x10 | ||
216 | #define SNS0_DATA_CHECK 0x08 | ||
217 | #define SNS0_OVERRUN 0x04 | ||
218 | #define SNS0_INCOMPL_DOMAIN 0x01 | ||
219 | |||
220 | /* | ||
221 | * architectured values for second sense byte | ||
222 | */ | ||
223 | #define SNS1_PERM_ERR 0x80 | ||
224 | #define SNS1_INV_TRACK_FORMAT 0x40 | ||
225 | #define SNS1_EOC 0x20 | ||
226 | #define SNS1_MESSAGE_TO_OPER 0x10 | ||
227 | #define SNS1_NO_REC_FOUND 0x08 | ||
228 | #define SNS1_FILE_PROTECTED 0x04 | ||
229 | #define SNS1_WRITE_INHIBITED 0x02 | ||
230 | #define SNS1_INPRECISE_END 0x01 | ||
231 | |||
232 | /* | ||
233 | * architectured values for third sense byte | ||
234 | */ | ||
235 | #define SNS2_REQ_INH_WRITE 0x80 | ||
236 | #define SNS2_CORRECTABLE 0x40 | ||
237 | #define SNS2_FIRST_LOG_ERR 0x20 | ||
238 | #define SNS2_ENV_DATA_PRESENT 0x10 | ||
239 | #define SNS2_INPRECISE_END 0x04 | ||
240 | 19 | ||
241 | /** | 20 | /** |
242 | * struct ccw1 - channel command word | 21 | * struct ccw1 - channel command word |
diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h new file mode 100644 index 000000000000..471234b90574 --- /dev/null +++ b/arch/s390/include/asm/cpu.h | |||
@@ -0,0 +1,26 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2000,2009 | ||
3 | * Author(s): Hartmut Penner <hp@de.ibm.com>, | ||
4 | * Martin Schwidefsky <schwidefsky@de.ibm.com>, | ||
5 | * Christian Ehrhardt <ehrhardt@de.ibm.com>, | ||
6 | */ | ||
7 | |||
8 | #ifndef _ASM_S390_CPU_H | ||
9 | #define _ASM_S390_CPU_H | ||
10 | |||
11 | #define MAX_CPU_ADDRESS 255 | ||
12 | |||
13 | #ifndef __ASSEMBLY__ | ||
14 | |||
15 | #include <linux/types.h> | ||
16 | |||
17 | struct cpuid | ||
18 | { | ||
19 | unsigned int version : 8; | ||
20 | unsigned int ident : 24; | ||
21 | unsigned int machine : 16; | ||
22 | unsigned int unused : 16; | ||
23 | } __packed; | ||
24 | |||
25 | #endif /* __ASSEMBLY__ */ | ||
26 | #endif /* _ASM_S390_CPU_H */ | ||
diff --git a/arch/s390/include/asm/cpuid.h b/arch/s390/include/asm/cpuid.h deleted file mode 100644 index 07836a2e5222..000000000000 --- a/arch/s390/include/asm/cpuid.h +++ /dev/null | |||
@@ -1,25 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2000,2009 | ||
3 | * Author(s): Hartmut Penner <hp@de.ibm.com>, | ||
4 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
5 | * Christian Ehrhardt <ehrhardt@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef _ASM_S390_CPUID_H_ | ||
9 | #define _ASM_S390_CPUID_H_ | ||
10 | |||
11 | /* | ||
12 | * CPU type and hardware bug flags. Kept separately for each CPU. | ||
13 | * Members of this structure are referenced in head.S, so think twice | ||
14 | * before touching them. [mj] | ||
15 | */ | ||
16 | |||
17 | typedef struct | ||
18 | { | ||
19 | unsigned int version : 8; | ||
20 | unsigned int ident : 24; | ||
21 | unsigned int machine : 16; | ||
22 | unsigned int unused : 16; | ||
23 | } __attribute__ ((packed)) cpuid_t; | ||
24 | |||
25 | #endif /* _ASM_S390_CPUID_H_ */ | ||
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h index 31ed5686a968..18124b75a7ab 100644 --- a/arch/s390/include/asm/debug.h +++ b/arch/s390/include/asm/debug.h | |||
@@ -167,6 +167,10 @@ debug_text_event(debug_info_t* id, int level, const char* txt) | |||
167 | return debug_event_common(id,level,txt,strlen(txt)); | 167 | return debug_event_common(id,level,txt,strlen(txt)); |
168 | } | 168 | } |
169 | 169 | ||
170 | /* | ||
171 | * IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are | ||
172 | * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details! | ||
173 | */ | ||
170 | extern debug_entry_t * | 174 | extern debug_entry_t * |
171 | debug_sprintf_event(debug_info_t* id,int level,char *string,...) | 175 | debug_sprintf_event(debug_info_t* id,int level,char *string,...) |
172 | __attribute__ ((format(printf, 3, 4))); | 176 | __attribute__ ((format(printf, 3, 4))); |
@@ -206,7 +210,10 @@ debug_text_exception(debug_info_t* id, int level, const char* txt) | |||
206 | return debug_exception_common(id,level,txt,strlen(txt)); | 210 | return debug_exception_common(id,level,txt,strlen(txt)); |
207 | } | 211 | } |
208 | 212 | ||
209 | 213 | /* | |
214 | * IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are | ||
215 | * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details! | ||
216 | */ | ||
210 | extern debug_entry_t * | 217 | extern debug_entry_t * |
211 | debug_sprintf_exception(debug_info_t* id,int level,char *string,...) | 218 | debug_sprintf_exception(debug_info_t* id,int level,char *string,...) |
212 | __attribute__ ((format(printf, 3, 4))); | 219 | __attribute__ ((format(printf, 3, 4))); |
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h index 89ec7056da28..498bc3892385 100644 --- a/arch/s390/include/asm/hardirq.h +++ b/arch/s390/include/asm/hardirq.h | |||
@@ -18,13 +18,6 @@ | |||
18 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
19 | #include <asm/lowcore.h> | 19 | #include <asm/lowcore.h> |
20 | 20 | ||
21 | /* irq_cpustat_t is unused currently, but could be converted | ||
22 | * into a percpu variable instead of storing softirq_pending | ||
23 | * on the lowcore */ | ||
24 | typedef struct { | ||
25 | unsigned int __softirq_pending; | ||
26 | } irq_cpustat_t; | ||
27 | |||
28 | #define local_softirq_pending() (S390_lowcore.softirq_pending) | 21 | #define local_softirq_pending() (S390_lowcore.softirq_pending) |
29 | 22 | ||
30 | #define __ARCH_IRQ_STAT | 23 | #define __ARCH_IRQ_STAT |
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 1171e6d144a3..5e95d95450b3 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h | |||
@@ -57,6 +57,8 @@ struct ipl_block_fcp { | |||
57 | } __attribute__((packed)); | 57 | } __attribute__((packed)); |
58 | 58 | ||
59 | #define DIAG308_VMPARM_SIZE 64 | 59 | #define DIAG308_VMPARM_SIZE 64 |
60 | #define DIAG308_SCPDATA_SIZE (PAGE_SIZE - (sizeof(struct ipl_list_hdr) + \ | ||
61 | offsetof(struct ipl_block_fcp, scp_data))) | ||
60 | 62 | ||
61 | struct ipl_block_ccw { | 63 | struct ipl_block_ccw { |
62 | u8 load_parm[8]; | 64 | u8 load_parm[8]; |
@@ -91,7 +93,8 @@ extern void do_halt(void); | |||
91 | extern void do_poff(void); | 93 | extern void do_poff(void); |
92 | extern void ipl_save_parameters(void); | 94 | extern void ipl_save_parameters(void); |
93 | extern void ipl_update_parameters(void); | 95 | extern void ipl_update_parameters(void); |
94 | extern void get_ipl_vmparm(char *); | 96 | extern size_t append_ipl_vmparm(char *, size_t); |
97 | extern size_t append_ipl_scpdata(char *, size_t); | ||
95 | 98 | ||
96 | enum { | 99 | enum { |
97 | IPL_DEVNO_VALID = 1, | 100 | IPL_DEVNO_VALID = 1, |
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 1cd02f6073a0..698988f69403 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/kvm_host.h> | 18 | #include <linux/kvm_host.h> |
19 | #include <asm/debug.h> | 19 | #include <asm/debug.h> |
20 | #include <asm/cpuid.h> | 20 | #include <asm/cpu.h> |
21 | 21 | ||
22 | #define KVM_MAX_VCPUS 64 | 22 | #define KVM_MAX_VCPUS 64 |
23 | #define KVM_MEMORY_SLOTS 32 | 23 | #define KVM_MEMORY_SLOTS 32 |
@@ -217,8 +217,8 @@ struct kvm_vcpu_arch { | |||
217 | struct hrtimer ckc_timer; | 217 | struct hrtimer ckc_timer; |
218 | struct tasklet_struct tasklet; | 218 | struct tasklet_struct tasklet; |
219 | union { | 219 | union { |
220 | cpuid_t cpu_id; | 220 | struct cpuid cpu_id; |
221 | u64 stidp_data; | 221 | u64 stidp_data; |
222 | }; | 222 | }; |
223 | }; | 223 | }; |
224 | 224 | ||
diff --git a/arch/s390/include/asm/kvm_virtio.h b/arch/s390/include/asm/kvm_virtio.h index 0503936f101f..acdfdff26611 100644 --- a/arch/s390/include/asm/kvm_virtio.h +++ b/arch/s390/include/asm/kvm_virtio.h | |||
@@ -54,14 +54,4 @@ struct kvm_vqconfig { | |||
54 | * This is pagesize for historical reasons. */ | 54 | * This is pagesize for historical reasons. */ |
55 | #define KVM_S390_VIRTIO_RING_ALIGN 4096 | 55 | #define KVM_S390_VIRTIO_RING_ALIGN 4096 |
56 | 56 | ||
57 | #ifdef __KERNEL__ | ||
58 | /* early virtio console setup */ | ||
59 | #ifdef CONFIG_S390_GUEST | ||
60 | extern void s390_virtio_console_init(void); | ||
61 | #else | ||
62 | static inline void s390_virtio_console_init(void) | ||
63 | { | ||
64 | } | ||
65 | #endif /* CONFIG_VIRTIO_CONSOLE */ | ||
66 | #endif /* __KERNEL__ */ | ||
67 | #endif | 57 | #endif |
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 5046ad6b7a63..6bc9426a6fbf 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h | |||
@@ -132,7 +132,7 @@ | |||
132 | 132 | ||
133 | #ifndef __ASSEMBLY__ | 133 | #ifndef __ASSEMBLY__ |
134 | 134 | ||
135 | #include <asm/cpuid.h> | 135 | #include <asm/cpu.h> |
136 | #include <asm/ptrace.h> | 136 | #include <asm/ptrace.h> |
137 | #include <linux/types.h> | 137 | #include <linux/types.h> |
138 | 138 | ||
@@ -275,7 +275,7 @@ struct _lowcore | |||
275 | __u32 user_exec_asce; /* 0x02ac */ | 275 | __u32 user_exec_asce; /* 0x02ac */ |
276 | 276 | ||
277 | /* SMP info area */ | 277 | /* SMP info area */ |
278 | cpuid_t cpu_id; /* 0x02b0 */ | 278 | struct cpuid cpu_id; /* 0x02b0 */ |
279 | __u32 cpu_nr; /* 0x02b8 */ | 279 | __u32 cpu_nr; /* 0x02b8 */ |
280 | __u32 softirq_pending; /* 0x02bc */ | 280 | __u32 softirq_pending; /* 0x02bc */ |
281 | __u32 percpu_offset; /* 0x02c0 */ | 281 | __u32 percpu_offset; /* 0x02c0 */ |
@@ -380,7 +380,7 @@ struct _lowcore | |||
380 | __u64 user_exec_asce; /* 0x0318 */ | 380 | __u64 user_exec_asce; /* 0x0318 */ |
381 | 381 | ||
382 | /* SMP info area */ | 382 | /* SMP info area */ |
383 | cpuid_t cpu_id; /* 0x0320 */ | 383 | struct cpuid cpu_id; /* 0x0320 */ |
384 | __u32 cpu_nr; /* 0x0328 */ | 384 | __u32 cpu_nr; /* 0x0328 */ |
385 | __u32 softirq_pending; /* 0x032c */ | 385 | __u32 softirq_pending; /* 0x032c */ |
386 | __u64 percpu_offset; /* 0x0330 */ | 386 | __u64 percpu_offset; /* 0x0330 */ |
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index 3b59216e6284..03be99919d62 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define __MMU_H | 2 | #define __MMU_H |
3 | 3 | ||
4 | typedef struct { | 4 | typedef struct { |
5 | spinlock_t list_lock; | ||
5 | struct list_head crst_list; | 6 | struct list_head crst_list; |
6 | struct list_head pgtable_list; | 7 | struct list_head pgtable_list; |
7 | unsigned long asce_bits; | 8 | unsigned long asce_bits; |
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 3e3594d01f83..5e9daf5d7f22 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h | |||
@@ -125,8 +125,6 @@ page_get_storage_key(unsigned long addr) | |||
125 | return skey; | 125 | return skey; |
126 | } | 126 | } |
127 | 127 | ||
128 | #ifdef CONFIG_PAGE_STATES | ||
129 | |||
130 | struct page; | 128 | struct page; |
131 | void arch_free_page(struct page *page, int order); | 129 | void arch_free_page(struct page *page, int order); |
132 | void arch_alloc_page(struct page *page, int order); | 130 | void arch_alloc_page(struct page *page, int order); |
@@ -134,8 +132,6 @@ void arch_alloc_page(struct page *page, int order); | |||
134 | #define HAVE_ARCH_FREE_PAGE | 132 | #define HAVE_ARCH_FREE_PAGE |
135 | #define HAVE_ARCH_ALLOC_PAGE | 133 | #define HAVE_ARCH_ALLOC_PAGE |
136 | 134 | ||
137 | #endif | ||
138 | |||
139 | #endif /* !__ASSEMBLY__ */ | 135 | #endif /* !__ASSEMBLY__ */ |
140 | 136 | ||
141 | #define __PAGE_OFFSET 0x0UL | 137 | #define __PAGE_OFFSET 0x0UL |
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index b2658b9220fe..ddad5903341c 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h | |||
@@ -140,6 +140,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |||
140 | 140 | ||
141 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | 141 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
142 | { | 142 | { |
143 | spin_lock_init(&mm->context.list_lock); | ||
143 | INIT_LIST_HEAD(&mm->context.crst_list); | 144 | INIT_LIST_HEAD(&mm->context.crst_list); |
144 | INIT_LIST_HEAD(&mm->context.pgtable_list); | 145 | INIT_LIST_HEAD(&mm->context.pgtable_list); |
145 | return (pgd_t *) crst_table_alloc(mm, s390_noexec); | 146 | return (pgd_t *) crst_table_alloc(mm, s390_noexec); |
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index c139fa7b8e89..cf8eed3fa779 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -14,7 +14,7 @@ | |||
14 | #define __ASM_S390_PROCESSOR_H | 14 | #define __ASM_S390_PROCESSOR_H |
15 | 15 | ||
16 | #include <linux/linkage.h> | 16 | #include <linux/linkage.h> |
17 | #include <asm/cpuid.h> | 17 | #include <asm/cpu.h> |
18 | #include <asm/page.h> | 18 | #include <asm/page.h> |
19 | #include <asm/ptrace.h> | 19 | #include <asm/ptrace.h> |
20 | #include <asm/setup.h> | 20 | #include <asm/setup.h> |
@@ -26,7 +26,7 @@ | |||
26 | */ | 26 | */ |
27 | #define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; }) | 27 | #define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; }) |
28 | 28 | ||
29 | static inline void get_cpu_id(cpuid_t *ptr) | 29 | static inline void get_cpu_id(struct cpuid *ptr) |
30 | { | 30 | { |
31 | asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr)); | 31 | asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr)); |
32 | } | 32 | } |
diff --git a/arch/s390/include/asm/scatterlist.h b/arch/s390/include/asm/scatterlist.h index 29ec8e28c8df..35d786fe93ae 100644 --- a/arch/s390/include/asm/scatterlist.h +++ b/arch/s390/include/asm/scatterlist.h | |||
@@ -1,19 +1 @@ | |||
1 | #ifndef _ASMS390_SCATTERLIST_H | #include <asm-generic/scatterlist.h> | |
2 | #define _ASMS390_SCATTERLIST_H | ||
3 | |||
4 | struct scatterlist { | ||
5 | #ifdef CONFIG_DEBUG_SG | ||
6 | unsigned long sg_magic; | ||
7 | #endif | ||
8 | unsigned long page_link; | ||
9 | unsigned int offset; | ||
10 | unsigned int length; | ||
11 | }; | ||
12 | |||
13 | #ifdef __s390x__ | ||
14 | #define ISA_DMA_THRESHOLD (0xffffffffffffffffUL) | ||
15 | #else | ||
16 | #define ISA_DMA_THRESHOLD (0xffffffffUL) | ||
17 | #endif | ||
18 | |||
19 | #endif /* _ASMS390X_SCATTERLIST_H */ | ||
diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h new file mode 100644 index 000000000000..de389cb54d28 --- /dev/null +++ b/arch/s390/include/asm/scsw.h | |||
@@ -0,0 +1,956 @@ | |||
1 | /* | ||
2 | * Helper functions for scsw access. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008,2009 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef _ASM_S390_SCSW_H_ | ||
9 | #define _ASM_S390_SCSW_H_ | ||
10 | |||
11 | #include <linux/types.h> | ||
12 | #include <asm/chsc.h> | ||
13 | #include <asm/cio.h> | ||
14 | |||
15 | /** | ||
16 | * struct cmd_scsw - command-mode subchannel status word | ||
17 | * @key: subchannel key | ||
18 | * @sctl: suspend control | ||
19 | * @eswf: esw format | ||
20 | * @cc: deferred condition code | ||
21 | * @fmt: format | ||
22 | * @pfch: prefetch | ||
23 | * @isic: initial-status interruption control | ||
24 | * @alcc: address-limit checking control | ||
25 | * @ssi: suppress-suspended interruption | ||
26 | * @zcc: zero condition code | ||
27 | * @ectl: extended control | ||
28 | * @pno: path not operational | ||
29 | * @res: reserved | ||
30 | * @fctl: function control | ||
31 | * @actl: activity control | ||
32 | * @stctl: status control | ||
33 | * @cpa: channel program address | ||
34 | * @dstat: device status | ||
35 | * @cstat: subchannel status | ||
36 | * @count: residual count | ||
37 | */ | ||
38 | struct cmd_scsw { | ||
39 | __u32 key : 4; | ||
40 | __u32 sctl : 1; | ||
41 | __u32 eswf : 1; | ||
42 | __u32 cc : 2; | ||
43 | __u32 fmt : 1; | ||
44 | __u32 pfch : 1; | ||
45 | __u32 isic : 1; | ||
46 | __u32 alcc : 1; | ||
47 | __u32 ssi : 1; | ||
48 | __u32 zcc : 1; | ||
49 | __u32 ectl : 1; | ||
50 | __u32 pno : 1; | ||
51 | __u32 res : 1; | ||
52 | __u32 fctl : 3; | ||
53 | __u32 actl : 7; | ||
54 | __u32 stctl : 5; | ||
55 | __u32 cpa; | ||
56 | __u32 dstat : 8; | ||
57 | __u32 cstat : 8; | ||
58 | __u32 count : 16; | ||
59 | } __attribute__ ((packed)); | ||
60 | |||
61 | /** | ||
62 | * struct tm_scsw - transport-mode subchannel status word | ||
63 | * @key: subchannel key | ||
64 | * @eswf: esw format | ||
65 | * @cc: deferred condition code | ||
66 | * @fmt: format | ||
67 | * @x: IRB-format control | ||
68 | * @q: interrogate-complete | ||
69 | * @ectl: extended control | ||
70 | * @pno: path not operational | ||
71 | * @fctl: function control | ||
72 | * @actl: activity control | ||
73 | * @stctl: status control | ||
74 | * @tcw: TCW address | ||
75 | * @dstat: device status | ||
76 | * @cstat: subchannel status | ||
77 | * @fcxs: FCX status | ||
78 | * @schxs: subchannel-extended status | ||
79 | */ | ||
80 | struct tm_scsw { | ||
81 | u32 key:4; | ||
82 | u32 :1; | ||
83 | u32 eswf:1; | ||
84 | u32 cc:2; | ||
85 | u32 fmt:3; | ||
86 | u32 x:1; | ||
87 | u32 q:1; | ||
88 | u32 :1; | ||
89 | u32 ectl:1; | ||
90 | u32 pno:1; | ||
91 | u32 :1; | ||
92 | u32 fctl:3; | ||
93 | u32 actl:7; | ||
94 | u32 stctl:5; | ||
95 | u32 tcw; | ||
96 | u32 dstat:8; | ||
97 | u32 cstat:8; | ||
98 | u32 fcxs:8; | ||
99 | u32 schxs:8; | ||
100 | } __attribute__ ((packed)); | ||
101 | |||
102 | /** | ||
103 | * union scsw - subchannel status word | ||
104 | * @cmd: command-mode SCSW | ||
105 | * @tm: transport-mode SCSW | ||
106 | */ | ||
107 | union scsw { | ||
108 | struct cmd_scsw cmd; | ||
109 | struct tm_scsw tm; | ||
110 | } __attribute__ ((packed)); | ||
111 | |||
112 | #define SCSW_FCTL_CLEAR_FUNC 0x1 | ||
113 | #define SCSW_FCTL_HALT_FUNC 0x2 | ||
114 | #define SCSW_FCTL_START_FUNC 0x4 | ||
115 | |||
116 | #define SCSW_ACTL_SUSPENDED 0x1 | ||
117 | #define SCSW_ACTL_DEVACT 0x2 | ||
118 | #define SCSW_ACTL_SCHACT 0x4 | ||
119 | #define SCSW_ACTL_CLEAR_PEND 0x8 | ||
120 | #define SCSW_ACTL_HALT_PEND 0x10 | ||
121 | #define SCSW_ACTL_START_PEND 0x20 | ||
122 | #define SCSW_ACTL_RESUME_PEND 0x40 | ||
123 | |||
124 | #define SCSW_STCTL_STATUS_PEND 0x1 | ||
125 | #define SCSW_STCTL_SEC_STATUS 0x2 | ||
126 | #define SCSW_STCTL_PRIM_STATUS 0x4 | ||
127 | #define SCSW_STCTL_INTER_STATUS 0x8 | ||
128 | #define SCSW_STCTL_ALERT_STATUS 0x10 | ||
129 | |||
130 | #define DEV_STAT_ATTENTION 0x80 | ||
131 | #define DEV_STAT_STAT_MOD 0x40 | ||
132 | #define DEV_STAT_CU_END 0x20 | ||
133 | #define DEV_STAT_BUSY 0x10 | ||
134 | #define DEV_STAT_CHN_END 0x08 | ||
135 | #define DEV_STAT_DEV_END 0x04 | ||
136 | #define DEV_STAT_UNIT_CHECK 0x02 | ||
137 | #define DEV_STAT_UNIT_EXCEP 0x01 | ||
138 | |||
139 | #define SCHN_STAT_PCI 0x80 | ||
140 | #define SCHN_STAT_INCORR_LEN 0x40 | ||
141 | #define SCHN_STAT_PROG_CHECK 0x20 | ||
142 | #define SCHN_STAT_PROT_CHECK 0x10 | ||
143 | #define SCHN_STAT_CHN_DATA_CHK 0x08 | ||
144 | #define SCHN_STAT_CHN_CTRL_CHK 0x04 | ||
145 | #define SCHN_STAT_INTF_CTRL_CHK 0x02 | ||
146 | #define SCHN_STAT_CHAIN_CHECK 0x01 | ||
147 | |||
148 | /* | ||
149 | * architectured values for first sense byte | ||
150 | */ | ||
151 | #define SNS0_CMD_REJECT 0x80 | ||
152 | #define SNS_CMD_REJECT SNS0_CMD_REJEC | ||
153 | #define SNS0_INTERVENTION_REQ 0x40 | ||
154 | #define SNS0_BUS_OUT_CHECK 0x20 | ||
155 | #define SNS0_EQUIPMENT_CHECK 0x10 | ||
156 | #define SNS0_DATA_CHECK 0x08 | ||
157 | #define SNS0_OVERRUN 0x04 | ||
158 | #define SNS0_INCOMPL_DOMAIN 0x01 | ||
159 | |||
160 | /* | ||
161 | * architectured values for second sense byte | ||
162 | */ | ||
163 | #define SNS1_PERM_ERR 0x80 | ||
164 | #define SNS1_INV_TRACK_FORMAT 0x40 | ||
165 | #define SNS1_EOC 0x20 | ||
166 | #define SNS1_MESSAGE_TO_OPER 0x10 | ||
167 | #define SNS1_NO_REC_FOUND 0x08 | ||
168 | #define SNS1_FILE_PROTECTED 0x04 | ||
169 | #define SNS1_WRITE_INHIBITED 0x02 | ||
170 | #define SNS1_INPRECISE_END 0x01 | ||
171 | |||
172 | /* | ||
173 | * architectured values for third sense byte | ||
174 | */ | ||
175 | #define SNS2_REQ_INH_WRITE 0x80 | ||
176 | #define SNS2_CORRECTABLE 0x40 | ||
177 | #define SNS2_FIRST_LOG_ERR 0x20 | ||
178 | #define SNS2_ENV_DATA_PRESENT 0x10 | ||
179 | #define SNS2_INPRECISE_END 0x04 | ||
180 | |||
181 | /** | ||
182 | * scsw_is_tm - check for transport mode scsw | ||
183 | * @scsw: pointer to scsw | ||
184 | * | ||
185 | * Return non-zero if the specified scsw is a transport mode scsw, zero | ||
186 | * otherwise. | ||
187 | */ | ||
188 | static inline int scsw_is_tm(union scsw *scsw) | ||
189 | { | ||
190 | return css_general_characteristics.fcx && (scsw->tm.x == 1); | ||
191 | } | ||
192 | |||
193 | /** | ||
194 | * scsw_key - return scsw key field | ||
195 | * @scsw: pointer to scsw | ||
196 | * | ||
197 | * Return the value of the key field of the specified scsw, regardless of | ||
198 | * whether it is a transport mode or command mode scsw. | ||
199 | */ | ||
200 | static inline u32 scsw_key(union scsw *scsw) | ||
201 | { | ||
202 | if (scsw_is_tm(scsw)) | ||
203 | return scsw->tm.key; | ||
204 | else | ||
205 | return scsw->cmd.key; | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * scsw_eswf - return scsw eswf field | ||
210 | * @scsw: pointer to scsw | ||
211 | * | ||
212 | * Return the value of the eswf field of the specified scsw, regardless of | ||
213 | * whether it is a transport mode or command mode scsw. | ||
214 | */ | ||
215 | static inline u32 scsw_eswf(union scsw *scsw) | ||
216 | { | ||
217 | if (scsw_is_tm(scsw)) | ||
218 | return scsw->tm.eswf; | ||
219 | else | ||
220 | return scsw->cmd.eswf; | ||
221 | } | ||
222 | |||
223 | /** | ||
224 | * scsw_cc - return scsw cc field | ||
225 | * @scsw: pointer to scsw | ||
226 | * | ||
227 | * Return the value of the cc field of the specified scsw, regardless of | ||
228 | * whether it is a transport mode or command mode scsw. | ||
229 | */ | ||
230 | static inline u32 scsw_cc(union scsw *scsw) | ||
231 | { | ||
232 | if (scsw_is_tm(scsw)) | ||
233 | return scsw->tm.cc; | ||
234 | else | ||
235 | return scsw->cmd.cc; | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * scsw_ectl - return scsw ectl field | ||
240 | * @scsw: pointer to scsw | ||
241 | * | ||
242 | * Return the value of the ectl field of the specified scsw, regardless of | ||
243 | * whether it is a transport mode or command mode scsw. | ||
244 | */ | ||
245 | static inline u32 scsw_ectl(union scsw *scsw) | ||
246 | { | ||
247 | if (scsw_is_tm(scsw)) | ||
248 | return scsw->tm.ectl; | ||
249 | else | ||
250 | return scsw->cmd.ectl; | ||
251 | } | ||
252 | |||
253 | /** | ||
254 | * scsw_pno - return scsw pno field | ||
255 | * @scsw: pointer to scsw | ||
256 | * | ||
257 | * Return the value of the pno field of the specified scsw, regardless of | ||
258 | * whether it is a transport mode or command mode scsw. | ||
259 | */ | ||
260 | static inline u32 scsw_pno(union scsw *scsw) | ||
261 | { | ||
262 | if (scsw_is_tm(scsw)) | ||
263 | return scsw->tm.pno; | ||
264 | else | ||
265 | return scsw->cmd.pno; | ||
266 | } | ||
267 | |||
268 | /** | ||
269 | * scsw_fctl - return scsw fctl field | ||
270 | * @scsw: pointer to scsw | ||
271 | * | ||
272 | * Return the value of the fctl field of the specified scsw, regardless of | ||
273 | * whether it is a transport mode or command mode scsw. | ||
274 | */ | ||
275 | static inline u32 scsw_fctl(union scsw *scsw) | ||
276 | { | ||
277 | if (scsw_is_tm(scsw)) | ||
278 | return scsw->tm.fctl; | ||
279 | else | ||
280 | return scsw->cmd.fctl; | ||
281 | } | ||
282 | |||
283 | /** | ||
284 | * scsw_actl - return scsw actl field | ||
285 | * @scsw: pointer to scsw | ||
286 | * | ||
287 | * Return the value of the actl field of the specified scsw, regardless of | ||
288 | * whether it is a transport mode or command mode scsw. | ||
289 | */ | ||
290 | static inline u32 scsw_actl(union scsw *scsw) | ||
291 | { | ||
292 | if (scsw_is_tm(scsw)) | ||
293 | return scsw->tm.actl; | ||
294 | else | ||
295 | return scsw->cmd.actl; | ||
296 | } | ||
297 | |||
298 | /** | ||
299 | * scsw_stctl - return scsw stctl field | ||
300 | * @scsw: pointer to scsw | ||
301 | * | ||
302 | * Return the value of the stctl field of the specified scsw, regardless of | ||
303 | * whether it is a transport mode or command mode scsw. | ||
304 | */ | ||
305 | static inline u32 scsw_stctl(union scsw *scsw) | ||
306 | { | ||
307 | if (scsw_is_tm(scsw)) | ||
308 | return scsw->tm.stctl; | ||
309 | else | ||
310 | return scsw->cmd.stctl; | ||
311 | } | ||
312 | |||
313 | /** | ||
314 | * scsw_dstat - return scsw dstat field | ||
315 | * @scsw: pointer to scsw | ||
316 | * | ||
317 | * Return the value of the dstat field of the specified scsw, regardless of | ||
318 | * whether it is a transport mode or command mode scsw. | ||
319 | */ | ||
320 | static inline u32 scsw_dstat(union scsw *scsw) | ||
321 | { | ||
322 | if (scsw_is_tm(scsw)) | ||
323 | return scsw->tm.dstat; | ||
324 | else | ||
325 | return scsw->cmd.dstat; | ||
326 | } | ||
327 | |||
328 | /** | ||
329 | * scsw_cstat - return scsw cstat field | ||
330 | * @scsw: pointer to scsw | ||
331 | * | ||
332 | * Return the value of the cstat field of the specified scsw, regardless of | ||
333 | * whether it is a transport mode or command mode scsw. | ||
334 | */ | ||
335 | static inline u32 scsw_cstat(union scsw *scsw) | ||
336 | { | ||
337 | if (scsw_is_tm(scsw)) | ||
338 | return scsw->tm.cstat; | ||
339 | else | ||
340 | return scsw->cmd.cstat; | ||
341 | } | ||
342 | |||
343 | /** | ||
344 | * scsw_cmd_is_valid_key - check key field validity | ||
345 | * @scsw: pointer to scsw | ||
346 | * | ||
347 | * Return non-zero if the key field of the specified command mode scsw is | ||
348 | * valid, zero otherwise. | ||
349 | */ | ||
350 | static inline int scsw_cmd_is_valid_key(union scsw *scsw) | ||
351 | { | ||
352 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
353 | } | ||
354 | |||
355 | /** | ||
356 | * scsw_cmd_is_valid_sctl - check fctl field validity | ||
357 | * @scsw: pointer to scsw | ||
358 | * | ||
359 | * Return non-zero if the fctl field of the specified command mode scsw is | ||
360 | * valid, zero otherwise. | ||
361 | */ | ||
362 | static inline int scsw_cmd_is_valid_sctl(union scsw *scsw) | ||
363 | { | ||
364 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
365 | } | ||
366 | |||
367 | /** | ||
368 | * scsw_cmd_is_valid_eswf - check eswf field validity | ||
369 | * @scsw: pointer to scsw | ||
370 | * | ||
371 | * Return non-zero if the eswf field of the specified command mode scsw is | ||
372 | * valid, zero otherwise. | ||
373 | */ | ||
374 | static inline int scsw_cmd_is_valid_eswf(union scsw *scsw) | ||
375 | { | ||
376 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); | ||
377 | } | ||
378 | |||
379 | /** | ||
380 | * scsw_cmd_is_valid_cc - check cc field validity | ||
381 | * @scsw: pointer to scsw | ||
382 | * | ||
383 | * Return non-zero if the cc field of the specified command mode scsw is | ||
384 | * valid, zero otherwise. | ||
385 | */ | ||
386 | static inline int scsw_cmd_is_valid_cc(union scsw *scsw) | ||
387 | { | ||
388 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && | ||
389 | (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); | ||
390 | } | ||
391 | |||
392 | /** | ||
393 | * scsw_cmd_is_valid_fmt - check fmt field validity | ||
394 | * @scsw: pointer to scsw | ||
395 | * | ||
396 | * Return non-zero if the fmt field of the specified command mode scsw is | ||
397 | * valid, zero otherwise. | ||
398 | */ | ||
399 | static inline int scsw_cmd_is_valid_fmt(union scsw *scsw) | ||
400 | { | ||
401 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
402 | } | ||
403 | |||
404 | /** | ||
405 | * scsw_cmd_is_valid_pfch - check pfch field validity | ||
406 | * @scsw: pointer to scsw | ||
407 | * | ||
408 | * Return non-zero if the pfch field of the specified command mode scsw is | ||
409 | * valid, zero otherwise. | ||
410 | */ | ||
411 | static inline int scsw_cmd_is_valid_pfch(union scsw *scsw) | ||
412 | { | ||
413 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
414 | } | ||
415 | |||
416 | /** | ||
417 | * scsw_cmd_is_valid_isic - check isic field validity | ||
418 | * @scsw: pointer to scsw | ||
419 | * | ||
420 | * Return non-zero if the isic field of the specified command mode scsw is | ||
421 | * valid, zero otherwise. | ||
422 | */ | ||
423 | static inline int scsw_cmd_is_valid_isic(union scsw *scsw) | ||
424 | { | ||
425 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
426 | } | ||
427 | |||
428 | /** | ||
429 | * scsw_cmd_is_valid_alcc - check alcc field validity | ||
430 | * @scsw: pointer to scsw | ||
431 | * | ||
432 | * Return non-zero if the alcc field of the specified command mode scsw is | ||
433 | * valid, zero otherwise. | ||
434 | */ | ||
435 | static inline int scsw_cmd_is_valid_alcc(union scsw *scsw) | ||
436 | { | ||
437 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * scsw_cmd_is_valid_ssi - check ssi field validity | ||
442 | * @scsw: pointer to scsw | ||
443 | * | ||
444 | * Return non-zero if the ssi field of the specified command mode scsw is | ||
445 | * valid, zero otherwise. | ||
446 | */ | ||
447 | static inline int scsw_cmd_is_valid_ssi(union scsw *scsw) | ||
448 | { | ||
449 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
450 | } | ||
451 | |||
452 | /** | ||
453 | * scsw_cmd_is_valid_zcc - check zcc field validity | ||
454 | * @scsw: pointer to scsw | ||
455 | * | ||
456 | * Return non-zero if the zcc field of the specified command mode scsw is | ||
457 | * valid, zero otherwise. | ||
458 | */ | ||
459 | static inline int scsw_cmd_is_valid_zcc(union scsw *scsw) | ||
460 | { | ||
461 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && | ||
462 | (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS); | ||
463 | } | ||
464 | |||
465 | /** | ||
466 | * scsw_cmd_is_valid_ectl - check ectl field validity | ||
467 | * @scsw: pointer to scsw | ||
468 | * | ||
469 | * Return non-zero if the ectl field of the specified command mode scsw is | ||
470 | * valid, zero otherwise. | ||
471 | */ | ||
472 | static inline int scsw_cmd_is_valid_ectl(union scsw *scsw) | ||
473 | { | ||
474 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
475 | !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && | ||
476 | (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS); | ||
477 | } | ||
478 | |||
479 | /** | ||
480 | * scsw_cmd_is_valid_pno - check pno field validity | ||
481 | * @scsw: pointer to scsw | ||
482 | * | ||
483 | * Return non-zero if the pno field of the specified command mode scsw is | ||
484 | * valid, zero otherwise. | ||
485 | */ | ||
486 | static inline int scsw_cmd_is_valid_pno(union scsw *scsw) | ||
487 | { | ||
488 | return (scsw->cmd.fctl != 0) && | ||
489 | (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
490 | (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) || | ||
491 | ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && | ||
492 | (scsw->cmd.actl & SCSW_ACTL_SUSPENDED))); | ||
493 | } | ||
494 | |||
495 | /** | ||
496 | * scsw_cmd_is_valid_fctl - check fctl field validity | ||
497 | * @scsw: pointer to scsw | ||
498 | * | ||
499 | * Return non-zero if the fctl field of the specified command mode scsw is | ||
500 | * valid, zero otherwise. | ||
501 | */ | ||
502 | static inline int scsw_cmd_is_valid_fctl(union scsw *scsw) | ||
503 | { | ||
504 | /* Only valid if pmcw.dnv == 1*/ | ||
505 | return 1; | ||
506 | } | ||
507 | |||
508 | /** | ||
509 | * scsw_cmd_is_valid_actl - check actl field validity | ||
510 | * @scsw: pointer to scsw | ||
511 | * | ||
512 | * Return non-zero if the actl field of the specified command mode scsw is | ||
513 | * valid, zero otherwise. | ||
514 | */ | ||
515 | static inline int scsw_cmd_is_valid_actl(union scsw *scsw) | ||
516 | { | ||
517 | /* Only valid if pmcw.dnv == 1*/ | ||
518 | return 1; | ||
519 | } | ||
520 | |||
521 | /** | ||
522 | * scsw_cmd_is_valid_stctl - check stctl field validity | ||
523 | * @scsw: pointer to scsw | ||
524 | * | ||
525 | * Return non-zero if the stctl field of the specified command mode scsw is | ||
526 | * valid, zero otherwise. | ||
527 | */ | ||
528 | static inline int scsw_cmd_is_valid_stctl(union scsw *scsw) | ||
529 | { | ||
530 | /* Only valid if pmcw.dnv == 1*/ | ||
531 | return 1; | ||
532 | } | ||
533 | |||
534 | /** | ||
535 | * scsw_cmd_is_valid_dstat - check dstat field validity | ||
536 | * @scsw: pointer to scsw | ||
537 | * | ||
538 | * Return non-zero if the dstat field of the specified command mode scsw is | ||
539 | * valid, zero otherwise. | ||
540 | */ | ||
541 | static inline int scsw_cmd_is_valid_dstat(union scsw *scsw) | ||
542 | { | ||
543 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
544 | (scsw->cmd.cc != 3); | ||
545 | } | ||
546 | |||
547 | /** | ||
548 | * scsw_cmd_is_valid_cstat - check cstat field validity | ||
549 | * @scsw: pointer to scsw | ||
550 | * | ||
551 | * Return non-zero if the cstat field of the specified command mode scsw is | ||
552 | * valid, zero otherwise. | ||
553 | */ | ||
554 | static inline int scsw_cmd_is_valid_cstat(union scsw *scsw) | ||
555 | { | ||
556 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
557 | (scsw->cmd.cc != 3); | ||
558 | } | ||
559 | |||
560 | /** | ||
561 | * scsw_tm_is_valid_key - check key field validity | ||
562 | * @scsw: pointer to scsw | ||
563 | * | ||
564 | * Return non-zero if the key field of the specified transport mode scsw is | ||
565 | * valid, zero otherwise. | ||
566 | */ | ||
567 | static inline int scsw_tm_is_valid_key(union scsw *scsw) | ||
568 | { | ||
569 | return (scsw->tm.fctl & SCSW_FCTL_START_FUNC); | ||
570 | } | ||
571 | |||
572 | /** | ||
573 | * scsw_tm_is_valid_eswf - check eswf field validity | ||
574 | * @scsw: pointer to scsw | ||
575 | * | ||
576 | * Return non-zero if the eswf field of the specified transport mode scsw is | ||
577 | * valid, zero otherwise. | ||
578 | */ | ||
579 | static inline int scsw_tm_is_valid_eswf(union scsw *scsw) | ||
580 | { | ||
581 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); | ||
582 | } | ||
583 | |||
584 | /** | ||
585 | * scsw_tm_is_valid_cc - check cc field validity | ||
586 | * @scsw: pointer to scsw | ||
587 | * | ||
588 | * Return non-zero if the cc field of the specified transport mode scsw is | ||
589 | * valid, zero otherwise. | ||
590 | */ | ||
591 | static inline int scsw_tm_is_valid_cc(union scsw *scsw) | ||
592 | { | ||
593 | return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) && | ||
594 | (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); | ||
595 | } | ||
596 | |||
597 | /** | ||
598 | * scsw_tm_is_valid_fmt - check fmt field validity | ||
599 | * @scsw: pointer to scsw | ||
600 | * | ||
601 | * Return non-zero if the fmt field of the specified transport mode scsw is | ||
602 | * valid, zero otherwise. | ||
603 | */ | ||
604 | static inline int scsw_tm_is_valid_fmt(union scsw *scsw) | ||
605 | { | ||
606 | return 1; | ||
607 | } | ||
608 | |||
609 | /** | ||
610 | * scsw_tm_is_valid_x - check x field validity | ||
611 | * @scsw: pointer to scsw | ||
612 | * | ||
613 | * Return non-zero if the x field of the specified transport mode scsw is | ||
614 | * valid, zero otherwise. | ||
615 | */ | ||
616 | static inline int scsw_tm_is_valid_x(union scsw *scsw) | ||
617 | { | ||
618 | return 1; | ||
619 | } | ||
620 | |||
621 | /** | ||
622 | * scsw_tm_is_valid_q - check q field validity | ||
623 | * @scsw: pointer to scsw | ||
624 | * | ||
625 | * Return non-zero if the q field of the specified transport mode scsw is | ||
626 | * valid, zero otherwise. | ||
627 | */ | ||
628 | static inline int scsw_tm_is_valid_q(union scsw *scsw) | ||
629 | { | ||
630 | return 1; | ||
631 | } | ||
632 | |||
633 | /** | ||
634 | * scsw_tm_is_valid_ectl - check ectl field validity | ||
635 | * @scsw: pointer to scsw | ||
636 | * | ||
637 | * Return non-zero if the ectl field of the specified transport mode scsw is | ||
638 | * valid, zero otherwise. | ||
639 | */ | ||
640 | static inline int scsw_tm_is_valid_ectl(union scsw *scsw) | ||
641 | { | ||
642 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
643 | !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && | ||
644 | (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS); | ||
645 | } | ||
646 | |||
647 | /** | ||
648 | * scsw_tm_is_valid_pno - check pno field validity | ||
649 | * @scsw: pointer to scsw | ||
650 | * | ||
651 | * Return non-zero if the pno field of the specified transport mode scsw is | ||
652 | * valid, zero otherwise. | ||
653 | */ | ||
654 | static inline int scsw_tm_is_valid_pno(union scsw *scsw) | ||
655 | { | ||
656 | return (scsw->tm.fctl != 0) && | ||
657 | (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
658 | (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) || | ||
659 | ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && | ||
660 | (scsw->tm.actl & SCSW_ACTL_SUSPENDED))); | ||
661 | } | ||
662 | |||
663 | /** | ||
664 | * scsw_tm_is_valid_fctl - check fctl field validity | ||
665 | * @scsw: pointer to scsw | ||
666 | * | ||
667 | * Return non-zero if the fctl field of the specified transport mode scsw is | ||
668 | * valid, zero otherwise. | ||
669 | */ | ||
670 | static inline int scsw_tm_is_valid_fctl(union scsw *scsw) | ||
671 | { | ||
672 | /* Only valid if pmcw.dnv == 1*/ | ||
673 | return 1; | ||
674 | } | ||
675 | |||
676 | /** | ||
677 | * scsw_tm_is_valid_actl - check actl field validity | ||
678 | * @scsw: pointer to scsw | ||
679 | * | ||
680 | * Return non-zero if the actl field of the specified transport mode scsw is | ||
681 | * valid, zero otherwise. | ||
682 | */ | ||
683 | static inline int scsw_tm_is_valid_actl(union scsw *scsw) | ||
684 | { | ||
685 | /* Only valid if pmcw.dnv == 1*/ | ||
686 | return 1; | ||
687 | } | ||
688 | |||
689 | /** | ||
690 | * scsw_tm_is_valid_stctl - check stctl field validity | ||
691 | * @scsw: pointer to scsw | ||
692 | * | ||
693 | * Return non-zero if the stctl field of the specified transport mode scsw is | ||
694 | * valid, zero otherwise. | ||
695 | */ | ||
696 | static inline int scsw_tm_is_valid_stctl(union scsw *scsw) | ||
697 | { | ||
698 | /* Only valid if pmcw.dnv == 1*/ | ||
699 | return 1; | ||
700 | } | ||
701 | |||
702 | /** | ||
703 | * scsw_tm_is_valid_dstat - check dstat field validity | ||
704 | * @scsw: pointer to scsw | ||
705 | * | ||
706 | * Return non-zero if the dstat field of the specified transport mode scsw is | ||
707 | * valid, zero otherwise. | ||
708 | */ | ||
709 | static inline int scsw_tm_is_valid_dstat(union scsw *scsw) | ||
710 | { | ||
711 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
712 | (scsw->tm.cc != 3); | ||
713 | } | ||
714 | |||
715 | /** | ||
716 | * scsw_tm_is_valid_cstat - check cstat field validity | ||
717 | * @scsw: pointer to scsw | ||
718 | * | ||
719 | * Return non-zero if the cstat field of the specified transport mode scsw is | ||
720 | * valid, zero otherwise. | ||
721 | */ | ||
722 | static inline int scsw_tm_is_valid_cstat(union scsw *scsw) | ||
723 | { | ||
724 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
725 | (scsw->tm.cc != 3); | ||
726 | } | ||
727 | |||
728 | /** | ||
729 | * scsw_tm_is_valid_fcxs - check fcxs field validity | ||
730 | * @scsw: pointer to scsw | ||
731 | * | ||
732 | * Return non-zero if the fcxs field of the specified transport mode scsw is | ||
733 | * valid, zero otherwise. | ||
734 | */ | ||
735 | static inline int scsw_tm_is_valid_fcxs(union scsw *scsw) | ||
736 | { | ||
737 | return 1; | ||
738 | } | ||
739 | |||
740 | /** | ||
741 | * scsw_tm_is_valid_schxs - check schxs field validity | ||
742 | * @scsw: pointer to scsw | ||
743 | * | ||
744 | * Return non-zero if the schxs field of the specified transport mode scsw is | ||
745 | * valid, zero otherwise. | ||
746 | */ | ||
747 | static inline int scsw_tm_is_valid_schxs(union scsw *scsw) | ||
748 | { | ||
749 | return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK | | ||
750 | SCHN_STAT_INTF_CTRL_CHK | | ||
751 | SCHN_STAT_PROT_CHECK | | ||
752 | SCHN_STAT_CHN_DATA_CHK)); | ||
753 | } | ||
754 | |||
755 | /** | ||
756 | * scsw_is_valid_actl - check actl field validity | ||
757 | * @scsw: pointer to scsw | ||
758 | * | ||
759 | * Return non-zero if the actl field of the specified scsw is valid, | ||
760 | * regardless of whether it is a transport mode or command mode scsw. | ||
761 | * Return zero if the field does not contain a valid value. | ||
762 | */ | ||
763 | static inline int scsw_is_valid_actl(union scsw *scsw) | ||
764 | { | ||
765 | if (scsw_is_tm(scsw)) | ||
766 | return scsw_tm_is_valid_actl(scsw); | ||
767 | else | ||
768 | return scsw_cmd_is_valid_actl(scsw); | ||
769 | } | ||
770 | |||
771 | /** | ||
772 | * scsw_is_valid_cc - check cc field validity | ||
773 | * @scsw: pointer to scsw | ||
774 | * | ||
775 | * Return non-zero if the cc field of the specified scsw is valid, | ||
776 | * regardless of whether it is a transport mode or command mode scsw. | ||
777 | * Return zero if the field does not contain a valid value. | ||
778 | */ | ||
779 | static inline int scsw_is_valid_cc(union scsw *scsw) | ||
780 | { | ||
781 | if (scsw_is_tm(scsw)) | ||
782 | return scsw_tm_is_valid_cc(scsw); | ||
783 | else | ||
784 | return scsw_cmd_is_valid_cc(scsw); | ||
785 | } | ||
786 | |||
787 | /** | ||
788 | * scsw_is_valid_cstat - check cstat field validity | ||
789 | * @scsw: pointer to scsw | ||
790 | * | ||
791 | * Return non-zero if the cstat field of the specified scsw is valid, | ||
792 | * regardless of whether it is a transport mode or command mode scsw. | ||
793 | * Return zero if the field does not contain a valid value. | ||
794 | */ | ||
795 | static inline int scsw_is_valid_cstat(union scsw *scsw) | ||
796 | { | ||
797 | if (scsw_is_tm(scsw)) | ||
798 | return scsw_tm_is_valid_cstat(scsw); | ||
799 | else | ||
800 | return scsw_cmd_is_valid_cstat(scsw); | ||
801 | } | ||
802 | |||
803 | /** | ||
804 | * scsw_is_valid_dstat - check dstat field validity | ||
805 | * @scsw: pointer to scsw | ||
806 | * | ||
807 | * Return non-zero if the dstat field of the specified scsw is valid, | ||
808 | * regardless of whether it is a transport mode or command mode scsw. | ||
809 | * Return zero if the field does not contain a valid value. | ||
810 | */ | ||
811 | static inline int scsw_is_valid_dstat(union scsw *scsw) | ||
812 | { | ||
813 | if (scsw_is_tm(scsw)) | ||
814 | return scsw_tm_is_valid_dstat(scsw); | ||
815 | else | ||
816 | return scsw_cmd_is_valid_dstat(scsw); | ||
817 | } | ||
818 | |||
819 | /** | ||
820 | * scsw_is_valid_ectl - check ectl field validity | ||
821 | * @scsw: pointer to scsw | ||
822 | * | ||
823 | * Return non-zero if the ectl field of the specified scsw is valid, | ||
824 | * regardless of whether it is a transport mode or command mode scsw. | ||
825 | * Return zero if the field does not contain a valid value. | ||
826 | */ | ||
827 | static inline int scsw_is_valid_ectl(union scsw *scsw) | ||
828 | { | ||
829 | if (scsw_is_tm(scsw)) | ||
830 | return scsw_tm_is_valid_ectl(scsw); | ||
831 | else | ||
832 | return scsw_cmd_is_valid_ectl(scsw); | ||
833 | } | ||
834 | |||
835 | /** | ||
836 | * scsw_is_valid_eswf - check eswf field validity | ||
837 | * @scsw: pointer to scsw | ||
838 | * | ||
839 | * Return non-zero if the eswf field of the specified scsw is valid, | ||
840 | * regardless of whether it is a transport mode or command mode scsw. | ||
841 | * Return zero if the field does not contain a valid value. | ||
842 | */ | ||
843 | static inline int scsw_is_valid_eswf(union scsw *scsw) | ||
844 | { | ||
845 | if (scsw_is_tm(scsw)) | ||
846 | return scsw_tm_is_valid_eswf(scsw); | ||
847 | else | ||
848 | return scsw_cmd_is_valid_eswf(scsw); | ||
849 | } | ||
850 | |||
851 | /** | ||
852 | * scsw_is_valid_fctl - check fctl field validity | ||
853 | * @scsw: pointer to scsw | ||
854 | * | ||
855 | * Return non-zero if the fctl field of the specified scsw is valid, | ||
856 | * regardless of whether it is a transport mode or command mode scsw. | ||
857 | * Return zero if the field does not contain a valid value. | ||
858 | */ | ||
859 | static inline int scsw_is_valid_fctl(union scsw *scsw) | ||
860 | { | ||
861 | if (scsw_is_tm(scsw)) | ||
862 | return scsw_tm_is_valid_fctl(scsw); | ||
863 | else | ||
864 | return scsw_cmd_is_valid_fctl(scsw); | ||
865 | } | ||
866 | |||
867 | /** | ||
868 | * scsw_is_valid_key - check key field validity | ||
869 | * @scsw: pointer to scsw | ||
870 | * | ||
871 | * Return non-zero if the key field of the specified scsw is valid, | ||
872 | * regardless of whether it is a transport mode or command mode scsw. | ||
873 | * Return zero if the field does not contain a valid value. | ||
874 | */ | ||
875 | static inline int scsw_is_valid_key(union scsw *scsw) | ||
876 | { | ||
877 | if (scsw_is_tm(scsw)) | ||
878 | return scsw_tm_is_valid_key(scsw); | ||
879 | else | ||
880 | return scsw_cmd_is_valid_key(scsw); | ||
881 | } | ||
882 | |||
883 | /** | ||
884 | * scsw_is_valid_pno - check pno field validity | ||
885 | * @scsw: pointer to scsw | ||
886 | * | ||
887 | * Return non-zero if the pno field of the specified scsw is valid, | ||
888 | * regardless of whether it is a transport mode or command mode scsw. | ||
889 | * Return zero if the field does not contain a valid value. | ||
890 | */ | ||
891 | static inline int scsw_is_valid_pno(union scsw *scsw) | ||
892 | { | ||
893 | if (scsw_is_tm(scsw)) | ||
894 | return scsw_tm_is_valid_pno(scsw); | ||
895 | else | ||
896 | return scsw_cmd_is_valid_pno(scsw); | ||
897 | } | ||
898 | |||
899 | /** | ||
900 | * scsw_is_valid_stctl - check stctl field validity | ||
901 | * @scsw: pointer to scsw | ||
902 | * | ||
903 | * Return non-zero if the stctl field of the specified scsw is valid, | ||
904 | * regardless of whether it is a transport mode or command mode scsw. | ||
905 | * Return zero if the field does not contain a valid value. | ||
906 | */ | ||
907 | static inline int scsw_is_valid_stctl(union scsw *scsw) | ||
908 | { | ||
909 | if (scsw_is_tm(scsw)) | ||
910 | return scsw_tm_is_valid_stctl(scsw); | ||
911 | else | ||
912 | return scsw_cmd_is_valid_stctl(scsw); | ||
913 | } | ||
914 | |||
915 | /** | ||
916 | * scsw_cmd_is_solicited - check for solicited scsw | ||
917 | * @scsw: pointer to scsw | ||
918 | * | ||
919 | * Return non-zero if the command mode scsw indicates that the associated | ||
920 | * status condition is solicited, zero if it is unsolicited. | ||
921 | */ | ||
922 | static inline int scsw_cmd_is_solicited(union scsw *scsw) | ||
923 | { | ||
924 | return (scsw->cmd.cc != 0) || (scsw->cmd.stctl != | ||
925 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); | ||
926 | } | ||
927 | |||
928 | /** | ||
929 | * scsw_tm_is_solicited - check for solicited scsw | ||
930 | * @scsw: pointer to scsw | ||
931 | * | ||
932 | * Return non-zero if the transport mode scsw indicates that the associated | ||
933 | * status condition is solicited, zero if it is unsolicited. | ||
934 | */ | ||
935 | static inline int scsw_tm_is_solicited(union scsw *scsw) | ||
936 | { | ||
937 | return (scsw->tm.cc != 0) || (scsw->tm.stctl != | ||
938 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); | ||
939 | } | ||
940 | |||
941 | /** | ||
942 | * scsw_is_solicited - check for solicited scsw | ||
943 | * @scsw: pointer to scsw | ||
944 | * | ||
945 | * Return non-zero if the transport or command mode scsw indicates that the | ||
946 | * associated status condition is solicited, zero if it is unsolicited. | ||
947 | */ | ||
948 | static inline int scsw_is_solicited(union scsw *scsw) | ||
949 | { | ||
950 | if (scsw_is_tm(scsw)) | ||
951 | return scsw_tm_is_solicited(scsw); | ||
952 | else | ||
953 | return scsw_cmd_is_solicited(scsw); | ||
954 | } | ||
955 | |||
956 | #endif /* _ASM_S390_SCSW_H_ */ | ||
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 38b0fc221ed7..e37478e87286 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #ifndef _ASM_S390_SETUP_H | 8 | #ifndef _ASM_S390_SETUP_H |
9 | #define _ASM_S390_SETUP_H | 9 | #define _ASM_S390_SETUP_H |
10 | 10 | ||
11 | #define COMMAND_LINE_SIZE 1024 | 11 | #define COMMAND_LINE_SIZE 4096 |
12 | 12 | ||
13 | #define ARCH_COMMAND_LINE_SIZE 896 | 13 | #define ARCH_COMMAND_LINE_SIZE 896 |
14 | 14 | ||
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index 72137bc907ac..c991fe6473c9 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h | |||
@@ -51,32 +51,7 @@ extern void machine_power_off_smp(void); | |||
51 | #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */ | 51 | #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */ |
52 | 52 | ||
53 | #define raw_smp_processor_id() (S390_lowcore.cpu_nr) | 53 | #define raw_smp_processor_id() (S390_lowcore.cpu_nr) |
54 | 54 | #define cpu_logical_map(cpu) (cpu) | |
55 | /* | ||
56 | * returns 1 if cpu is in stopped/check stopped state or not operational | ||
57 | * returns 0 otherwise | ||
58 | */ | ||
59 | static inline int | ||
60 | smp_cpu_not_running(int cpu) | ||
61 | { | ||
62 | __u32 status; | ||
63 | |||
64 | switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { | ||
65 | case sigp_order_code_accepted: | ||
66 | case sigp_status_stored: | ||
67 | /* Check for stopped and check stop state */ | ||
68 | if (status & 0x50) | ||
69 | return 1; | ||
70 | break; | ||
71 | case sigp_not_operational: | ||
72 | return 1; | ||
73 | default: | ||
74 | break; | ||
75 | } | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | #define cpu_logical_map(cpu) (cpu) | ||
80 | 55 | ||
81 | extern int __cpu_disable (void); | 56 | extern int __cpu_disable (void); |
82 | extern void __cpu_die (unsigned int cpu); | 57 | extern void __cpu_die (unsigned int cpu); |
@@ -91,11 +66,6 @@ extern void arch_send_call_function_ipi(cpumask_t mask); | |||
91 | 66 | ||
92 | #endif | 67 | #endif |
93 | 68 | ||
94 | #ifndef CONFIG_SMP | ||
95 | #define hard_smp_processor_id() 0 | ||
96 | #define smp_cpu_not_running(cpu) 1 | ||
97 | #endif | ||
98 | |||
99 | #ifdef CONFIG_HOTPLUG_CPU | 69 | #ifdef CONFIG_HOTPLUG_CPU |
100 | extern int smp_rescan_cpus(void); | 70 | extern int smp_rescan_cpus(void); |
101 | #else | 71 | #else |
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index c9af0d19c7ab..41ce6861174e 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
@@ -191,4 +191,33 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
191 | #define _raw_read_relax(lock) cpu_relax() | 191 | #define _raw_read_relax(lock) cpu_relax() |
192 | #define _raw_write_relax(lock) cpu_relax() | 192 | #define _raw_write_relax(lock) cpu_relax() |
193 | 193 | ||
194 | #define __always_inline__spin_lock | ||
195 | #define __always_inline__read_lock | ||
196 | #define __always_inline__write_lock | ||
197 | #define __always_inline__spin_lock_bh | ||
198 | #define __always_inline__read_lock_bh | ||
199 | #define __always_inline__write_lock_bh | ||
200 | #define __always_inline__spin_lock_irq | ||
201 | #define __always_inline__read_lock_irq | ||
202 | #define __always_inline__write_lock_irq | ||
203 | #define __always_inline__spin_lock_irqsave | ||
204 | #define __always_inline__read_lock_irqsave | ||
205 | #define __always_inline__write_lock_irqsave | ||
206 | #define __always_inline__spin_trylock | ||
207 | #define __always_inline__read_trylock | ||
208 | #define __always_inline__write_trylock | ||
209 | #define __always_inline__spin_trylock_bh | ||
210 | #define __always_inline__spin_unlock | ||
211 | #define __always_inline__read_unlock | ||
212 | #define __always_inline__write_unlock | ||
213 | #define __always_inline__spin_unlock_bh | ||
214 | #define __always_inline__read_unlock_bh | ||
215 | #define __always_inline__write_unlock_bh | ||
216 | #define __always_inline__spin_unlock_irq | ||
217 | #define __always_inline__read_unlock_irq | ||
218 | #define __always_inline__write_unlock_irq | ||
219 | #define __always_inline__spin_unlock_irqrestore | ||
220 | #define __always_inline__read_unlock_irqrestore | ||
221 | #define __always_inline__write_unlock_irqrestore | ||
222 | |||
194 | #endif /* __ASM_SPINLOCK_H */ | 223 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 4fb83c1cdb77..379661d2f81a 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h | |||
@@ -109,11 +109,7 @@ extern void pfault_fini(void); | |||
109 | #define pfault_fini() do { } while (0) | 109 | #define pfault_fini() do { } while (0) |
110 | #endif /* CONFIG_PFAULT */ | 110 | #endif /* CONFIG_PFAULT */ |
111 | 111 | ||
112 | #ifdef CONFIG_PAGE_STATES | ||
113 | extern void cmma_init(void); | 112 | extern void cmma_init(void); |
114 | #else | ||
115 | static inline void cmma_init(void) { } | ||
116 | #endif | ||
117 | 113 | ||
118 | #define finish_arch_switch(prev) do { \ | 114 | #define finish_arch_switch(prev) do { \ |
119 | set_fs(current->thread.mm_segment); \ | 115 | set_fs(current->thread.mm_segment); \ |
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index ba1cab9fc1f9..07eb61b2fb3a 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h | |||
@@ -92,7 +92,7 @@ static inline struct thread_info *current_thread_info(void) | |||
92 | #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ | 92 | #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ |
93 | #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ | 93 | #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ |
94 | #define TIF_SECCOMP 10 /* secure computing */ | 94 | #define TIF_SECCOMP 10 /* secure computing */ |
95 | #define TIF_SYSCALL_FTRACE 11 /* ftrace syscall instrumentation */ | 95 | #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ |
96 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ | 96 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ |
97 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling | 97 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling |
98 | TIF_NEED_RESCHED */ | 98 | TIF_NEED_RESCHED */ |
@@ -111,7 +111,7 @@ static inline struct thread_info *current_thread_info(void) | |||
111 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 111 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
112 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | 112 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) |
113 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | 113 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) |
114 | #define _TIF_SYSCALL_FTRACE (1<<TIF_SYSCALL_FTRACE) | 114 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) |
115 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) | 115 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) |
116 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 116 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
117 | #define _TIF_31BIT (1<<TIF_31BIT) | 117 | #define _TIF_31BIT (1<<TIF_31BIT) |
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index cc21e3e20fd7..24aa1cda20ad 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h | |||
@@ -90,4 +90,18 @@ unsigned long long monotonic_clock(void); | |||
90 | 90 | ||
91 | extern u64 sched_clock_base_cc; | 91 | extern u64 sched_clock_base_cc; |
92 | 92 | ||
93 | /** | ||
94 | * get_clock_monotonic - returns current time in clock rate units | ||
95 | * | ||
96 | * The caller must ensure that preemption is disabled. | ||
97 | * The clock and sched_clock_base get changed via stop_machine. | ||
98 | * Therefore preemption must be disabled when calling this | ||
99 | * function, otherwise the returned value is not guaranteed to | ||
100 | * be monotonic. | ||
101 | */ | ||
102 | static inline unsigned long long get_clock_monotonic(void) | ||
103 | { | ||
104 | return get_clock_xt() - sched_clock_base_cc; | ||
105 | } | ||
106 | |||
93 | #endif | 107 | #endif |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index c75ed43b1a18..c7be8e10b87e 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -32,7 +32,7 @@ extra-y += head.o init_task.o vmlinux.lds | |||
32 | 32 | ||
33 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o | 33 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o |
34 | obj-$(CONFIG_SMP) += smp.o topology.o | 34 | obj-$(CONFIG_SMP) += smp.o topology.o |
35 | 35 | obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o | |
36 | obj-$(CONFIG_AUDIT) += audit.o | 36 | obj-$(CONFIG_AUDIT) += audit.o |
37 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o | 37 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o |
38 | obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ | 38 | obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ |
@@ -41,7 +41,7 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ | |||
41 | 41 | ||
42 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 42 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
43 | obj-$(CONFIG_KPROBES) += kprobes.o | 43 | obj-$(CONFIG_KPROBES) += kprobes.o |
44 | obj-$(CONFIG_FUNCTION_TRACER) += mcount.o | 44 | obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o) |
45 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 45 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
46 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 46 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
47 | 47 | ||
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index cae14c499511..bf8b4ae7ff2d 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Heiko Carstens <heiko.carstens@de.ibm.com> | 6 | * Heiko Carstens <heiko.carstens@de.ibm.com> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "setup" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
10 | #include <linux/init.h> | 13 | #include <linux/init.h> |
11 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
@@ -16,6 +19,7 @@ | |||
16 | #include <linux/module.h> | 19 | #include <linux/module.h> |
17 | #include <linux/pfn.h> | 20 | #include <linux/pfn.h> |
18 | #include <linux/uaccess.h> | 21 | #include <linux/uaccess.h> |
22 | #include <linux/kernel.h> | ||
19 | #include <asm/ebcdic.h> | 23 | #include <asm/ebcdic.h> |
20 | #include <asm/ipl.h> | 24 | #include <asm/ipl.h> |
21 | #include <asm/lowcore.h> | 25 | #include <asm/lowcore.h> |
@@ -35,8 +39,6 @@ | |||
35 | 39 | ||
36 | char kernel_nss_name[NSS_NAME_SIZE + 1]; | 40 | char kernel_nss_name[NSS_NAME_SIZE + 1]; |
37 | 41 | ||
38 | static unsigned long machine_flags; | ||
39 | |||
40 | static void __init setup_boot_command_line(void); | 42 | static void __init setup_boot_command_line(void); |
41 | 43 | ||
42 | /* | 44 | /* |
@@ -81,6 +83,8 @@ asm( | |||
81 | " br 14\n" | 83 | " br 14\n" |
82 | " .size savesys_ipl_nss, .-savesys_ipl_nss\n"); | 84 | " .size savesys_ipl_nss, .-savesys_ipl_nss\n"); |
83 | 85 | ||
86 | static __initdata char upper_command_line[COMMAND_LINE_SIZE]; | ||
87 | |||
84 | static noinline __init void create_kernel_nss(void) | 88 | static noinline __init void create_kernel_nss(void) |
85 | { | 89 | { |
86 | unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size; | 90 | unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size; |
@@ -90,7 +94,6 @@ static noinline __init void create_kernel_nss(void) | |||
90 | int response; | 94 | int response; |
91 | size_t len; | 95 | size_t len; |
92 | char *savesys_ptr; | 96 | char *savesys_ptr; |
93 | char upper_command_line[COMMAND_LINE_SIZE]; | ||
94 | char defsys_cmd[DEFSYS_CMD_SIZE]; | 97 | char defsys_cmd[DEFSYS_CMD_SIZE]; |
95 | char savesys_cmd[SAVESYS_CMD_SIZE]; | 98 | char savesys_cmd[SAVESYS_CMD_SIZE]; |
96 | 99 | ||
@@ -141,6 +144,8 @@ static noinline __init void create_kernel_nss(void) | |||
141 | __cpcmd(defsys_cmd, NULL, 0, &response); | 144 | __cpcmd(defsys_cmd, NULL, 0, &response); |
142 | 145 | ||
143 | if (response != 0) { | 146 | if (response != 0) { |
147 | pr_err("Defining the Linux kernel NSS failed with rc=%d\n", | ||
148 | response); | ||
144 | kernel_nss_name[0] = '\0'; | 149 | kernel_nss_name[0] = '\0'; |
145 | return; | 150 | return; |
146 | } | 151 | } |
@@ -153,8 +158,11 @@ static noinline __init void create_kernel_nss(void) | |||
153 | * max SAVESYS_CMD_SIZE | 158 | * max SAVESYS_CMD_SIZE |
154 | * On error: response contains the numeric portion of cp error message. | 159 | * On error: response contains the numeric portion of cp error message. |
155 | * for SAVESYS it will be >= 263 | 160 | * for SAVESYS it will be >= 263 |
161 | * for missing privilege class, it will be 1 | ||
156 | */ | 162 | */ |
157 | if (response > SAVESYS_CMD_SIZE) { | 163 | if (response > SAVESYS_CMD_SIZE || response == 1) { |
164 | pr_err("Saving the Linux kernel NSS failed with rc=%d\n", | ||
165 | response); | ||
158 | kernel_nss_name[0] = '\0'; | 166 | kernel_nss_name[0] = '\0'; |
159 | return; | 167 | return; |
160 | } | 168 | } |
@@ -205,12 +213,9 @@ static noinline __init void detect_machine_type(void) | |||
205 | 213 | ||
206 | /* Running under KVM? If not we assume z/VM */ | 214 | /* Running under KVM? If not we assume z/VM */ |
207 | if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3)) | 215 | if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3)) |
208 | machine_flags |= MACHINE_FLAG_KVM; | 216 | S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; |
209 | else | 217 | else |
210 | machine_flags |= MACHINE_FLAG_VM; | 218 | S390_lowcore.machine_flags |= MACHINE_FLAG_VM; |
211 | |||
212 | /* Store machine flags for setting up lowcore early */ | ||
213 | S390_lowcore.machine_flags = machine_flags; | ||
214 | } | 219 | } |
215 | 220 | ||
216 | static __init void early_pgm_check_handler(void) | 221 | static __init void early_pgm_check_handler(void) |
@@ -245,7 +250,7 @@ static noinline __init void setup_hpage(void) | |||
245 | facilities = stfl(); | 250 | facilities = stfl(); |
246 | if (!(facilities & (1UL << 23)) || !(facilities & (1UL << 29))) | 251 | if (!(facilities & (1UL << 23)) || !(facilities & (1UL << 29))) |
247 | return; | 252 | return; |
248 | machine_flags |= MACHINE_FLAG_HPAGE; | 253 | S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE; |
249 | __ctl_set_bit(0, 23); | 254 | __ctl_set_bit(0, 23); |
250 | #endif | 255 | #endif |
251 | } | 256 | } |
@@ -263,7 +268,7 @@ static __init void detect_mvpg(void) | |||
263 | EX_TABLE(0b,1b) | 268 | EX_TABLE(0b,1b) |
264 | : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0"); | 269 | : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0"); |
265 | if (!rc) | 270 | if (!rc) |
266 | machine_flags |= MACHINE_FLAG_MVPG; | 271 | S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG; |
267 | #endif | 272 | #endif |
268 | } | 273 | } |
269 | 274 | ||
@@ -279,7 +284,7 @@ static __init void detect_ieee(void) | |||
279 | EX_TABLE(0b,1b) | 284 | EX_TABLE(0b,1b) |
280 | : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc"); | 285 | : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc"); |
281 | if (!rc) | 286 | if (!rc) |
282 | machine_flags |= MACHINE_FLAG_IEEE; | 287 | S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE; |
283 | #endif | 288 | #endif |
284 | } | 289 | } |
285 | 290 | ||
@@ -298,7 +303,7 @@ static __init void detect_csp(void) | |||
298 | EX_TABLE(0b,1b) | 303 | EX_TABLE(0b,1b) |
299 | : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2"); | 304 | : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2"); |
300 | if (!rc) | 305 | if (!rc) |
301 | machine_flags |= MACHINE_FLAG_CSP; | 306 | S390_lowcore.machine_flags |= MACHINE_FLAG_CSP; |
302 | #endif | 307 | #endif |
303 | } | 308 | } |
304 | 309 | ||
@@ -315,7 +320,7 @@ static __init void detect_diag9c(void) | |||
315 | EX_TABLE(0b,1b) | 320 | EX_TABLE(0b,1b) |
316 | : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc"); | 321 | : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc"); |
317 | if (!rc) | 322 | if (!rc) |
318 | machine_flags |= MACHINE_FLAG_DIAG9C; | 323 | S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C; |
319 | } | 324 | } |
320 | 325 | ||
321 | static __init void detect_diag44(void) | 326 | static __init void detect_diag44(void) |
@@ -330,7 +335,7 @@ static __init void detect_diag44(void) | |||
330 | EX_TABLE(0b,1b) | 335 | EX_TABLE(0b,1b) |
331 | : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc"); | 336 | : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc"); |
332 | if (!rc) | 337 | if (!rc) |
333 | machine_flags |= MACHINE_FLAG_DIAG44; | 338 | S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44; |
334 | #endif | 339 | #endif |
335 | } | 340 | } |
336 | 341 | ||
@@ -341,11 +346,11 @@ static __init void detect_machine_facilities(void) | |||
341 | 346 | ||
342 | facilities = stfl(); | 347 | facilities = stfl(); |
343 | if (facilities & (1 << 28)) | 348 | if (facilities & (1 << 28)) |
344 | machine_flags |= MACHINE_FLAG_IDTE; | 349 | S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; |
345 | if (facilities & (1 << 23)) | 350 | if (facilities & (1 << 23)) |
346 | machine_flags |= MACHINE_FLAG_PFMF; | 351 | S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF; |
347 | if (facilities & (1 << 4)) | 352 | if (facilities & (1 << 4)) |
348 | machine_flags |= MACHINE_FLAG_MVCOS; | 353 | S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; |
349 | #endif | 354 | #endif |
350 | } | 355 | } |
351 | 356 | ||
@@ -367,21 +372,35 @@ static __init void rescue_initrd(void) | |||
367 | } | 372 | } |
368 | 373 | ||
369 | /* Set up boot command line */ | 374 | /* Set up boot command line */ |
370 | static void __init setup_boot_command_line(void) | 375 | static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t)) |
371 | { | 376 | { |
372 | char *parm = NULL; | 377 | char *parm, *delim; |
378 | size_t rc, len; | ||
379 | |||
380 | len = strlen(boot_command_line); | ||
381 | |||
382 | delim = boot_command_line + len; /* '\0' character position */ | ||
383 | parm = boot_command_line + len + 1; /* append right after '\0' */ | ||
373 | 384 | ||
385 | rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1); | ||
386 | if (rc) { | ||
387 | if (*parm == '=') | ||
388 | memmove(boot_command_line, parm + 1, rc); | ||
389 | else | ||
390 | *delim = ' '; /* replace '\0' with space */ | ||
391 | } | ||
392 | } | ||
393 | |||
394 | static void __init setup_boot_command_line(void) | ||
395 | { | ||
374 | /* copy arch command line */ | 396 | /* copy arch command line */ |
375 | strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); | 397 | strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); |
376 | 398 | ||
377 | /* append IPL PARM data to the boot command line */ | 399 | /* append IPL PARM data to the boot command line */ |
378 | if (MACHINE_IS_VM) { | 400 | if (MACHINE_IS_VM) |
379 | parm = boot_command_line + strlen(boot_command_line); | 401 | append_to_cmdline(append_ipl_vmparm); |
380 | *parm++ = ' '; | 402 | |
381 | get_ipl_vmparm(parm); | 403 | append_to_cmdline(append_ipl_scpdata); |
382 | if (parm[0] == '=') | ||
383 | memmove(boot_command_line, parm + 1, strlen(parm)); | ||
384 | } | ||
385 | } | 404 | } |
386 | 405 | ||
387 | 406 | ||
@@ -413,7 +432,6 @@ void __init startup_init(void) | |||
413 | setup_hpage(); | 432 | setup_hpage(); |
414 | sclp_facilities_detect(); | 433 | sclp_facilities_detect(); |
415 | detect_memory_layout(memory_chunk); | 434 | detect_memory_layout(memory_chunk); |
416 | S390_lowcore.machine_flags = machine_flags; | ||
417 | #ifdef CONFIG_DYNAMIC_FTRACE | 435 | #ifdef CONFIG_DYNAMIC_FTRACE |
418 | S390_lowcore.ftrace_func = (unsigned long)ftrace_caller; | 436 | S390_lowcore.ftrace_func = (unsigned long)ftrace_caller; |
419 | #endif | 437 | #endif |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index c4c80a22bc1f..f43d2ee54464 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -54,7 +54,7 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | |||
54 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 54 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
55 | _TIF_MCCK_PENDING) | 55 | _TIF_MCCK_PENDING) |
56 | _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ | 56 | _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ |
57 | _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8) | 57 | _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8) |
58 | 58 | ||
59 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | 59 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER |
60 | STACK_SIZE = 1 << STACK_SHIFT | 60 | STACK_SIZE = 1 << STACK_SHIFT |
@@ -278,7 +278,8 @@ sysc_return: | |||
278 | bnz BASED(sysc_work) # there is work to do (signals etc.) | 278 | bnz BASED(sysc_work) # there is work to do (signals etc.) |
279 | sysc_restore: | 279 | sysc_restore: |
280 | #ifdef CONFIG_TRACE_IRQFLAGS | 280 | #ifdef CONFIG_TRACE_IRQFLAGS |
281 | la %r1,BASED(sysc_restore_trace_psw) | 281 | la %r1,BASED(sysc_restore_trace_psw_addr) |
282 | l %r1,0(%r1) | ||
282 | lpsw 0(%r1) | 283 | lpsw 0(%r1) |
283 | sysc_restore_trace: | 284 | sysc_restore_trace: |
284 | TRACE_IRQS_CHECK | 285 | TRACE_IRQS_CHECK |
@@ -289,10 +290,15 @@ sysc_leave: | |||
289 | sysc_done: | 290 | sysc_done: |
290 | 291 | ||
291 | #ifdef CONFIG_TRACE_IRQFLAGS | 292 | #ifdef CONFIG_TRACE_IRQFLAGS |
293 | sysc_restore_trace_psw_addr: | ||
294 | .long sysc_restore_trace_psw | ||
295 | |||
296 | .section .data,"aw",@progbits | ||
292 | .align 8 | 297 | .align 8 |
293 | .globl sysc_restore_trace_psw | 298 | .globl sysc_restore_trace_psw |
294 | sysc_restore_trace_psw: | 299 | sysc_restore_trace_psw: |
295 | .long 0, sysc_restore_trace + 0x80000000 | 300 | .long 0, sysc_restore_trace + 0x80000000 |
301 | .previous | ||
296 | #endif | 302 | #endif |
297 | 303 | ||
298 | # | 304 | # |
@@ -606,7 +612,8 @@ io_return: | |||
606 | bnz BASED(io_work) # there is work to do (signals etc.) | 612 | bnz BASED(io_work) # there is work to do (signals etc.) |
607 | io_restore: | 613 | io_restore: |
608 | #ifdef CONFIG_TRACE_IRQFLAGS | 614 | #ifdef CONFIG_TRACE_IRQFLAGS |
609 | la %r1,BASED(io_restore_trace_psw) | 615 | la %r1,BASED(io_restore_trace_psw_addr) |
616 | l %r1,0(%r1) | ||
610 | lpsw 0(%r1) | 617 | lpsw 0(%r1) |
611 | io_restore_trace: | 618 | io_restore_trace: |
612 | TRACE_IRQS_CHECK | 619 | TRACE_IRQS_CHECK |
@@ -617,10 +624,15 @@ io_leave: | |||
617 | io_done: | 624 | io_done: |
618 | 625 | ||
619 | #ifdef CONFIG_TRACE_IRQFLAGS | 626 | #ifdef CONFIG_TRACE_IRQFLAGS |
627 | io_restore_trace_psw_addr: | ||
628 | .long io_restore_trace_psw | ||
629 | |||
630 | .section .data,"aw",@progbits | ||
620 | .align 8 | 631 | .align 8 |
621 | .globl io_restore_trace_psw | 632 | .globl io_restore_trace_psw |
622 | io_restore_trace_psw: | 633 | io_restore_trace_psw: |
623 | .long 0, io_restore_trace + 0x80000000 | 634 | .long 0, io_restore_trace + 0x80000000 |
635 | .previous | ||
624 | #endif | 636 | #endif |
625 | 637 | ||
626 | # | 638 | # |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index f6618e9e15ef..a6f7b20df616 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -57,7 +57,7 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | |||
57 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 57 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
58 | _TIF_MCCK_PENDING) | 58 | _TIF_MCCK_PENDING) |
59 | _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ | 59 | _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ |
60 | _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8) | 60 | _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8) |
61 | 61 | ||
62 | #define BASED(name) name-system_call(%r13) | 62 | #define BASED(name) name-system_call(%r13) |
63 | 63 | ||
@@ -284,10 +284,12 @@ sysc_leave: | |||
284 | sysc_done: | 284 | sysc_done: |
285 | 285 | ||
286 | #ifdef CONFIG_TRACE_IRQFLAGS | 286 | #ifdef CONFIG_TRACE_IRQFLAGS |
287 | .section .data,"aw",@progbits | ||
287 | .align 8 | 288 | .align 8 |
288 | .globl sysc_restore_trace_psw | 289 | .globl sysc_restore_trace_psw |
289 | sysc_restore_trace_psw: | 290 | sysc_restore_trace_psw: |
290 | .quad 0, sysc_restore_trace | 291 | .quad 0, sysc_restore_trace |
292 | .previous | ||
291 | #endif | 293 | #endif |
292 | 294 | ||
293 | # | 295 | # |
@@ -595,10 +597,12 @@ io_leave: | |||
595 | io_done: | 597 | io_done: |
596 | 598 | ||
597 | #ifdef CONFIG_TRACE_IRQFLAGS | 599 | #ifdef CONFIG_TRACE_IRQFLAGS |
600 | .section .data,"aw",@progbits | ||
598 | .align 8 | 601 | .align 8 |
599 | .globl io_restore_trace_psw | 602 | .globl io_restore_trace_psw |
600 | io_restore_trace_psw: | 603 | io_restore_trace_psw: |
601 | .quad 0, io_restore_trace | 604 | .quad 0, io_restore_trace |
605 | .previous | ||
602 | #endif | 606 | #endif |
603 | 607 | ||
604 | # | 608 | # |
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 3e298e64f0db..57bdcb1e3cdf 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c | |||
@@ -220,6 +220,29 @@ struct syscall_metadata *syscall_nr_to_meta(int nr) | |||
220 | return syscalls_metadata[nr]; | 220 | return syscalls_metadata[nr]; |
221 | } | 221 | } |
222 | 222 | ||
223 | int syscall_name_to_nr(char *name) | ||
224 | { | ||
225 | int i; | ||
226 | |||
227 | if (!syscalls_metadata) | ||
228 | return -1; | ||
229 | for (i = 0; i < NR_syscalls; i++) | ||
230 | if (syscalls_metadata[i]) | ||
231 | if (!strcmp(syscalls_metadata[i]->name, name)) | ||
232 | return i; | ||
233 | return -1; | ||
234 | } | ||
235 | |||
236 | void set_syscall_enter_id(int num, int id) | ||
237 | { | ||
238 | syscalls_metadata[num]->enter_id = id; | ||
239 | } | ||
240 | |||
241 | void set_syscall_exit_id(int num, int id) | ||
242 | { | ||
243 | syscalls_metadata[num]->exit_id = id; | ||
244 | } | ||
245 | |||
223 | static struct syscall_metadata *find_syscall_meta(unsigned long syscall) | 246 | static struct syscall_metadata *find_syscall_meta(unsigned long syscall) |
224 | { | 247 | { |
225 | struct syscall_metadata *start; | 248 | struct syscall_metadata *start; |
@@ -237,24 +260,19 @@ static struct syscall_metadata *find_syscall_meta(unsigned long syscall) | |||
237 | return NULL; | 260 | return NULL; |
238 | } | 261 | } |
239 | 262 | ||
240 | void arch_init_ftrace_syscalls(void) | 263 | static int __init arch_init_ftrace_syscalls(void) |
241 | { | 264 | { |
242 | struct syscall_metadata *meta; | 265 | struct syscall_metadata *meta; |
243 | int i; | 266 | int i; |
244 | static atomic_t refs; | ||
245 | |||
246 | if (atomic_inc_return(&refs) != 1) | ||
247 | goto out; | ||
248 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls, | 267 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls, |
249 | GFP_KERNEL); | 268 | GFP_KERNEL); |
250 | if (!syscalls_metadata) | 269 | if (!syscalls_metadata) |
251 | goto out; | 270 | return -ENOMEM; |
252 | for (i = 0; i < NR_syscalls; i++) { | 271 | for (i = 0; i < NR_syscalls; i++) { |
253 | meta = find_syscall_meta((unsigned long)sys_call_table[i]); | 272 | meta = find_syscall_meta((unsigned long)sys_call_table[i]); |
254 | syscalls_metadata[i] = meta; | 273 | syscalls_metadata[i] = meta; |
255 | } | 274 | } |
256 | return; | 275 | return 0; |
257 | out: | ||
258 | atomic_dec(&refs); | ||
259 | } | 276 | } |
277 | arch_initcall(arch_init_ftrace_syscalls); | ||
260 | #endif | 278 | #endif |
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index ec6882348520..c52b4f7742fa 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <asm/asm-offsets.h> | 27 | #include <asm/asm-offsets.h> |
28 | #include <asm/thread_info.h> | 28 | #include <asm/thread_info.h> |
29 | #include <asm/page.h> | 29 | #include <asm/page.h> |
30 | #include <asm/cpu.h> | ||
30 | 31 | ||
31 | #ifdef CONFIG_64BIT | 32 | #ifdef CONFIG_64BIT |
32 | #define ARCH_OFFSET 4 | 33 | #define ARCH_OFFSET 4 |
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index 2ced846065b7..602b508cd4c4 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S | |||
@@ -24,6 +24,7 @@ startup_continue: | |||
24 | # Setup stack | 24 | # Setup stack |
25 | # | 25 | # |
26 | l %r15,.Linittu-.LPG1(%r13) | 26 | l %r15,.Linittu-.LPG1(%r13) |
27 | st %r15,__LC_THREAD_INFO # cache thread info in lowcore | ||
27 | mvc __LC_CURRENT(4),__TI_task(%r15) | 28 | mvc __LC_CURRENT(4),__TI_task(%r15) |
28 | ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE | 29 | ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE |
29 | st %r15,__LC_KERNEL_STACK # set end of kernel stack | 30 | st %r15,__LC_KERNEL_STACK # set end of kernel stack |
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 65667b2e65ce..6a250808092b 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S | |||
@@ -62,9 +62,9 @@ startup_continue: | |||
62 | clr %r11,%r12 | 62 | clr %r11,%r12 |
63 | je 5f # no more space in prefix array | 63 | je 5f # no more space in prefix array |
64 | 4: | 64 | 4: |
65 | ahi %r8,1 # next cpu (r8 += 1) | 65 | ahi %r8,1 # next cpu (r8 += 1) |
66 | cl %r8,.Llast_cpu-.LPG1(%r13) # is last possible cpu ? | 66 | chi %r8,MAX_CPU_ADDRESS # is last possible cpu ? |
67 | jl 1b # jump if not last cpu | 67 | jle 1b # jump if not last cpu |
68 | 5: | 68 | 5: |
69 | lhi %r1,2 # mode 2 = esame (dump) | 69 | lhi %r1,2 # mode 2 = esame (dump) |
70 | j 6f | 70 | j 6f |
@@ -92,6 +92,7 @@ startup_continue: | |||
92 | # Setup stack | 92 | # Setup stack |
93 | # | 93 | # |
94 | larl %r15,init_thread_union | 94 | larl %r15,init_thread_union |
95 | stg %r15,__LC_THREAD_INFO # cache thread info in lowcore | ||
95 | lg %r14,__TI_task(%r15) # cache current in lowcore | 96 | lg %r14,__TI_task(%r15) # cache current in lowcore |
96 | stg %r14,__LC_CURRENT | 97 | stg %r14,__LC_CURRENT |
97 | aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE | 98 | aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE |
@@ -129,8 +130,6 @@ startup_continue: | |||
129 | #ifdef CONFIG_ZFCPDUMP | 130 | #ifdef CONFIG_ZFCPDUMP |
130 | .Lcurrent_cpu: | 131 | .Lcurrent_cpu: |
131 | .long 0x0 | 132 | .long 0x0 |
132 | .Llast_cpu: | ||
133 | .long 0x0000ffff | ||
134 | .Lpref_arr_ptr: | 133 | .Lpref_arr_ptr: |
135 | .long zfcpdump_prefix_array | 134 | .long zfcpdump_prefix_array |
136 | #endif /* CONFIG_ZFCPDUMP */ | 135 | #endif /* CONFIG_ZFCPDUMP */ |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 371a2d88f4ac..ee57a42e6e93 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -272,17 +272,18 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr, | |||
272 | static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); | 272 | static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); |
273 | 273 | ||
274 | /* VM IPL PARM routines */ | 274 | /* VM IPL PARM routines */ |
275 | static void reipl_get_ascii_vmparm(char *dest, | 275 | size_t reipl_get_ascii_vmparm(char *dest, size_t size, |
276 | const struct ipl_parameter_block *ipb) | 276 | const struct ipl_parameter_block *ipb) |
277 | { | 277 | { |
278 | int i; | 278 | int i; |
279 | int len = 0; | 279 | size_t len; |
280 | char has_lowercase = 0; | 280 | char has_lowercase = 0; |
281 | 281 | ||
282 | len = 0; | ||
282 | if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) && | 283 | if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) && |
283 | (ipb->ipl_info.ccw.vm_parm_len > 0)) { | 284 | (ipb->ipl_info.ccw.vm_parm_len > 0)) { |
284 | 285 | ||
285 | len = ipb->ipl_info.ccw.vm_parm_len; | 286 | len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len); |
286 | memcpy(dest, ipb->ipl_info.ccw.vm_parm, len); | 287 | memcpy(dest, ipb->ipl_info.ccw.vm_parm, len); |
287 | /* If at least one character is lowercase, we assume mixed | 288 | /* If at least one character is lowercase, we assume mixed |
288 | * case; otherwise we convert everything to lowercase. | 289 | * case; otherwise we convert everything to lowercase. |
@@ -299,14 +300,20 @@ static void reipl_get_ascii_vmparm(char *dest, | |||
299 | EBCASC(dest, len); | 300 | EBCASC(dest, len); |
300 | } | 301 | } |
301 | dest[len] = 0; | 302 | dest[len] = 0; |
303 | |||
304 | return len; | ||
302 | } | 305 | } |
303 | 306 | ||
304 | void get_ipl_vmparm(char *dest) | 307 | size_t append_ipl_vmparm(char *dest, size_t size) |
305 | { | 308 | { |
309 | size_t rc; | ||
310 | |||
311 | rc = 0; | ||
306 | if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW)) | 312 | if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW)) |
307 | reipl_get_ascii_vmparm(dest, &ipl_block); | 313 | rc = reipl_get_ascii_vmparm(dest, size, &ipl_block); |
308 | else | 314 | else |
309 | dest[0] = 0; | 315 | dest[0] = 0; |
316 | return rc; | ||
310 | } | 317 | } |
311 | 318 | ||
312 | static ssize_t ipl_vm_parm_show(struct kobject *kobj, | 319 | static ssize_t ipl_vm_parm_show(struct kobject *kobj, |
@@ -314,10 +321,65 @@ static ssize_t ipl_vm_parm_show(struct kobject *kobj, | |||
314 | { | 321 | { |
315 | char parm[DIAG308_VMPARM_SIZE + 1] = {}; | 322 | char parm[DIAG308_VMPARM_SIZE + 1] = {}; |
316 | 323 | ||
317 | get_ipl_vmparm(parm); | 324 | append_ipl_vmparm(parm, sizeof(parm)); |
318 | return sprintf(page, "%s\n", parm); | 325 | return sprintf(page, "%s\n", parm); |
319 | } | 326 | } |
320 | 327 | ||
328 | static size_t scpdata_length(const char* buf, size_t count) | ||
329 | { | ||
330 | while (count) { | ||
331 | if (buf[count - 1] != '\0' && buf[count - 1] != ' ') | ||
332 | break; | ||
333 | count--; | ||
334 | } | ||
335 | return count; | ||
336 | } | ||
337 | |||
338 | size_t reipl_append_ascii_scpdata(char *dest, size_t size, | ||
339 | const struct ipl_parameter_block *ipb) | ||
340 | { | ||
341 | size_t count; | ||
342 | size_t i; | ||
343 | int has_lowercase; | ||
344 | |||
345 | count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data, | ||
346 | ipb->ipl_info.fcp.scp_data_len)); | ||
347 | if (!count) | ||
348 | goto out; | ||
349 | |||
350 | has_lowercase = 0; | ||
351 | for (i = 0; i < count; i++) { | ||
352 | if (!isascii(ipb->ipl_info.fcp.scp_data[i])) { | ||
353 | count = 0; | ||
354 | goto out; | ||
355 | } | ||
356 | if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i])) | ||
357 | has_lowercase = 1; | ||
358 | } | ||
359 | |||
360 | if (has_lowercase) | ||
361 | memcpy(dest, ipb->ipl_info.fcp.scp_data, count); | ||
362 | else | ||
363 | for (i = 0; i < count; i++) | ||
364 | dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]); | ||
365 | out: | ||
366 | dest[count] = '\0'; | ||
367 | return count; | ||
368 | } | ||
369 | |||
370 | size_t append_ipl_scpdata(char *dest, size_t len) | ||
371 | { | ||
372 | size_t rc; | ||
373 | |||
374 | rc = 0; | ||
375 | if (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP) | ||
376 | rc = reipl_append_ascii_scpdata(dest, len, &ipl_block); | ||
377 | else | ||
378 | dest[0] = 0; | ||
379 | return rc; | ||
380 | } | ||
381 | |||
382 | |||
321 | static struct kobj_attribute sys_ipl_vm_parm_attr = | 383 | static struct kobj_attribute sys_ipl_vm_parm_attr = |
322 | __ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL); | 384 | __ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL); |
323 | 385 | ||
@@ -553,7 +615,7 @@ static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb, | |||
553 | { | 615 | { |
554 | char vmparm[DIAG308_VMPARM_SIZE + 1] = {}; | 616 | char vmparm[DIAG308_VMPARM_SIZE + 1] = {}; |
555 | 617 | ||
556 | reipl_get_ascii_vmparm(vmparm, ipb); | 618 | reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb); |
557 | return sprintf(page, "%s\n", vmparm); | 619 | return sprintf(page, "%s\n", vmparm); |
558 | } | 620 | } |
559 | 621 | ||
@@ -626,6 +688,59 @@ static struct kobj_attribute sys_reipl_ccw_vmparm_attr = | |||
626 | 688 | ||
627 | /* FCP reipl device attributes */ | 689 | /* FCP reipl device attributes */ |
628 | 690 | ||
691 | static ssize_t reipl_fcp_scpdata_read(struct kobject *kobj, | ||
692 | struct bin_attribute *attr, | ||
693 | char *buf, loff_t off, size_t count) | ||
694 | { | ||
695 | size_t size = reipl_block_fcp->ipl_info.fcp.scp_data_len; | ||
696 | void *scp_data = reipl_block_fcp->ipl_info.fcp.scp_data; | ||
697 | |||
698 | return memory_read_from_buffer(buf, count, &off, scp_data, size); | ||
699 | } | ||
700 | |||
701 | static ssize_t reipl_fcp_scpdata_write(struct kobject *kobj, | ||
702 | struct bin_attribute *attr, | ||
703 | char *buf, loff_t off, size_t count) | ||
704 | { | ||
705 | size_t padding; | ||
706 | size_t scpdata_len; | ||
707 | |||
708 | if (off < 0) | ||
709 | return -EINVAL; | ||
710 | |||
711 | if (off >= DIAG308_SCPDATA_SIZE) | ||
712 | return -ENOSPC; | ||
713 | |||
714 | if (count > DIAG308_SCPDATA_SIZE - off) | ||
715 | count = DIAG308_SCPDATA_SIZE - off; | ||
716 | |||
717 | memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf + off, count); | ||
718 | scpdata_len = off + count; | ||
719 | |||
720 | if (scpdata_len % 8) { | ||
721 | padding = 8 - (scpdata_len % 8); | ||
722 | memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len, | ||
723 | 0, padding); | ||
724 | scpdata_len += padding; | ||
725 | } | ||
726 | |||
727 | reipl_block_fcp->ipl_info.fcp.scp_data_len = scpdata_len; | ||
728 | reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN + scpdata_len; | ||
729 | reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN + scpdata_len; | ||
730 | |||
731 | return count; | ||
732 | } | ||
733 | |||
734 | static struct bin_attribute sys_reipl_fcp_scp_data_attr = { | ||
735 | .attr = { | ||
736 | .name = "scp_data", | ||
737 | .mode = S_IRUGO | S_IWUSR, | ||
738 | }, | ||
739 | .size = PAGE_SIZE, | ||
740 | .read = reipl_fcp_scpdata_read, | ||
741 | .write = reipl_fcp_scpdata_write, | ||
742 | }; | ||
743 | |||
629 | DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", | 744 | DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", |
630 | reipl_block_fcp->ipl_info.fcp.wwpn); | 745 | reipl_block_fcp->ipl_info.fcp.wwpn); |
631 | DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n", | 746 | DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n", |
@@ -647,7 +762,6 @@ static struct attribute *reipl_fcp_attrs[] = { | |||
647 | }; | 762 | }; |
648 | 763 | ||
649 | static struct attribute_group reipl_fcp_attr_group = { | 764 | static struct attribute_group reipl_fcp_attr_group = { |
650 | .name = IPL_FCP_STR, | ||
651 | .attrs = reipl_fcp_attrs, | 765 | .attrs = reipl_fcp_attrs, |
652 | }; | 766 | }; |
653 | 767 | ||
@@ -895,6 +1009,7 @@ static struct kobj_attribute reipl_type_attr = | |||
895 | __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store); | 1009 | __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store); |
896 | 1010 | ||
897 | static struct kset *reipl_kset; | 1011 | static struct kset *reipl_kset; |
1012 | static struct kset *reipl_fcp_kset; | ||
898 | 1013 | ||
899 | static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb, | 1014 | static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb, |
900 | const enum ipl_method m) | 1015 | const enum ipl_method m) |
@@ -906,7 +1021,7 @@ static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb, | |||
906 | 1021 | ||
907 | reipl_get_ascii_loadparm(loadparm, ipb); | 1022 | reipl_get_ascii_loadparm(loadparm, ipb); |
908 | reipl_get_ascii_nss_name(nss_name, ipb); | 1023 | reipl_get_ascii_nss_name(nss_name, ipb); |
909 | reipl_get_ascii_vmparm(vmparm, ipb); | 1024 | reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb); |
910 | 1025 | ||
911 | switch (m) { | 1026 | switch (m) { |
912 | case REIPL_METHOD_CCW_VM: | 1027 | case REIPL_METHOD_CCW_VM: |
@@ -1076,23 +1191,44 @@ static int __init reipl_fcp_init(void) | |||
1076 | int rc; | 1191 | int rc; |
1077 | 1192 | ||
1078 | if (!diag308_set_works) { | 1193 | if (!diag308_set_works) { |
1079 | if (ipl_info.type == IPL_TYPE_FCP) | 1194 | if (ipl_info.type == IPL_TYPE_FCP) { |
1080 | make_attrs_ro(reipl_fcp_attrs); | 1195 | make_attrs_ro(reipl_fcp_attrs); |
1081 | else | 1196 | sys_reipl_fcp_scp_data_attr.attr.mode = S_IRUGO; |
1197 | } else | ||
1082 | return 0; | 1198 | return 0; |
1083 | } | 1199 | } |
1084 | 1200 | ||
1085 | reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); | 1201 | reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); |
1086 | if (!reipl_block_fcp) | 1202 | if (!reipl_block_fcp) |
1087 | return -ENOMEM; | 1203 | return -ENOMEM; |
1088 | rc = sysfs_create_group(&reipl_kset->kobj, &reipl_fcp_attr_group); | 1204 | |
1205 | /* sysfs: create fcp kset for mixing attr group and bin attrs */ | ||
1206 | reipl_fcp_kset = kset_create_and_add(IPL_FCP_STR, NULL, | ||
1207 | &reipl_kset->kobj); | ||
1208 | if (!reipl_kset) { | ||
1209 | free_page((unsigned long) reipl_block_fcp); | ||
1210 | return -ENOMEM; | ||
1211 | } | ||
1212 | |||
1213 | rc = sysfs_create_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group); | ||
1214 | if (rc) { | ||
1215 | kset_unregister(reipl_fcp_kset); | ||
1216 | free_page((unsigned long) reipl_block_fcp); | ||
1217 | return rc; | ||
1218 | } | ||
1219 | |||
1220 | rc = sysfs_create_bin_file(&reipl_fcp_kset->kobj, | ||
1221 | &sys_reipl_fcp_scp_data_attr); | ||
1089 | if (rc) { | 1222 | if (rc) { |
1090 | free_page((unsigned long)reipl_block_fcp); | 1223 | sysfs_remove_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group); |
1224 | kset_unregister(reipl_fcp_kset); | ||
1225 | free_page((unsigned long) reipl_block_fcp); | ||
1091 | return rc; | 1226 | return rc; |
1092 | } | 1227 | } |
1093 | if (ipl_info.type == IPL_TYPE_FCP) { | 1228 | |
1229 | if (ipl_info.type == IPL_TYPE_FCP) | ||
1094 | memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); | 1230 | memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); |
1095 | } else { | 1231 | else { |
1096 | reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; | 1232 | reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; |
1097 | reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION; | 1233 | reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION; |
1098 | reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN; | 1234 | reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN; |
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 2a0a5e97ba8c..dfe015d7398c 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S | |||
@@ -11,111 +11,27 @@ | |||
11 | ftrace_stub: | 11 | ftrace_stub: |
12 | br %r14 | 12 | br %r14 |
13 | 13 | ||
14 | #ifdef CONFIG_64BIT | ||
15 | |||
16 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
17 | |||
18 | .globl _mcount | 14 | .globl _mcount |
19 | _mcount: | 15 | _mcount: |
20 | br %r14 | 16 | #ifdef CONFIG_DYNAMIC_FTRACE |
21 | |||
22 | .globl ftrace_caller | ||
23 | ftrace_caller: | ||
24 | larl %r1,function_trace_stop | ||
25 | icm %r1,0xf,0(%r1) | ||
26 | bnzr %r14 | ||
27 | stmg %r2,%r5,32(%r15) | ||
28 | stg %r14,112(%r15) | ||
29 | lgr %r1,%r15 | ||
30 | aghi %r15,-160 | ||
31 | stg %r1,__SF_BACKCHAIN(%r15) | ||
32 | lgr %r2,%r14 | ||
33 | lg %r3,168(%r15) | ||
34 | larl %r14,ftrace_dyn_func | ||
35 | lg %r14,0(%r14) | ||
36 | basr %r14,%r14 | ||
37 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
38 | .globl ftrace_graph_caller | ||
39 | ftrace_graph_caller: | ||
40 | # This unconditional branch gets runtime patched. Change only if | ||
41 | # you know what you are doing. See ftrace_enable_graph_caller(). | ||
42 | j 0f | ||
43 | lg %r2,272(%r15) | ||
44 | lg %r3,168(%r15) | ||
45 | brasl %r14,prepare_ftrace_return | ||
46 | stg %r2,168(%r15) | ||
47 | 0: | ||
48 | #endif | ||
49 | aghi %r15,160 | ||
50 | lmg %r2,%r5,32(%r15) | ||
51 | lg %r14,112(%r15) | ||
52 | br %r14 | 17 | br %r14 |
53 | 18 | ||
54 | .data | 19 | .data |
55 | .globl ftrace_dyn_func | 20 | .globl ftrace_dyn_func |
56 | ftrace_dyn_func: | 21 | ftrace_dyn_func: |
57 | .quad ftrace_stub | 22 | .long ftrace_stub |
58 | .previous | 23 | .previous |
59 | 24 | ||
60 | #else /* CONFIG_DYNAMIC_FTRACE */ | ||
61 | |||
62 | .globl _mcount | ||
63 | _mcount: | ||
64 | larl %r1,function_trace_stop | ||
65 | icm %r1,0xf,0(%r1) | ||
66 | bnzr %r14 | ||
67 | stmg %r2,%r5,32(%r15) | ||
68 | stg %r14,112(%r15) | ||
69 | lgr %r1,%r15 | ||
70 | aghi %r15,-160 | ||
71 | stg %r1,__SF_BACKCHAIN(%r15) | ||
72 | lgr %r2,%r14 | ||
73 | lg %r3,168(%r15) | ||
74 | larl %r14,ftrace_trace_function | ||
75 | lg %r14,0(%r14) | ||
76 | basr %r14,%r14 | ||
77 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
78 | lg %r2,272(%r15) | ||
79 | lg %r3,168(%r15) | ||
80 | brasl %r14,prepare_ftrace_return | ||
81 | stg %r2,168(%r15) | ||
82 | #endif | ||
83 | aghi %r15,160 | ||
84 | lmg %r2,%r5,32(%r15) | ||
85 | lg %r14,112(%r15) | ||
86 | br %r14 | ||
87 | |||
88 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
89 | |||
90 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
91 | |||
92 | .globl return_to_handler | ||
93 | return_to_handler: | ||
94 | stmg %r2,%r5,32(%r15) | ||
95 | lgr %r1,%r15 | ||
96 | aghi %r15,-160 | ||
97 | stg %r1,__SF_BACKCHAIN(%r15) | ||
98 | brasl %r14,ftrace_return_to_handler | ||
99 | aghi %r15,160 | ||
100 | lgr %r14,%r2 | ||
101 | lmg %r2,%r5,32(%r15) | ||
102 | br %r14 | ||
103 | |||
104 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
105 | |||
106 | #else /* CONFIG_64BIT */ | ||
107 | |||
108 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
109 | |||
110 | .globl _mcount | ||
111 | _mcount: | ||
112 | br %r14 | ||
113 | |||
114 | .globl ftrace_caller | 25 | .globl ftrace_caller |
115 | ftrace_caller: | 26 | ftrace_caller: |
27 | #endif | ||
116 | stm %r2,%r5,16(%r15) | 28 | stm %r2,%r5,16(%r15) |
117 | bras %r1,2f | 29 | bras %r1,2f |
30 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
31 | 0: .long ftrace_dyn_func | ||
32 | #else | ||
118 | 0: .long ftrace_trace_function | 33 | 0: .long ftrace_trace_function |
34 | #endif | ||
119 | 1: .long function_trace_stop | 35 | 1: .long function_trace_stop |
120 | 2: l %r2,1b-0b(%r1) | 36 | 2: l %r2,1b-0b(%r1) |
121 | icm %r2,0xf,0(%r2) | 37 | icm %r2,0xf,0(%r2) |
@@ -131,53 +47,13 @@ ftrace_caller: | |||
131 | l %r14,0(%r14) | 47 | l %r14,0(%r14) |
132 | basr %r14,%r14 | 48 | basr %r14,%r14 |
133 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 49 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
50 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
134 | .globl ftrace_graph_caller | 51 | .globl ftrace_graph_caller |
135 | ftrace_graph_caller: | 52 | ftrace_graph_caller: |
136 | # This unconditional branch gets runtime patched. Change only if | 53 | # This unconditional branch gets runtime patched. Change only if |
137 | # you know what you are doing. See ftrace_enable_graph_caller(). | 54 | # you know what you are doing. See ftrace_enable_graph_caller(). |
138 | j 1f | 55 | j 1f |
139 | bras %r1,0f | ||
140 | .long prepare_ftrace_return | ||
141 | 0: l %r2,152(%r15) | ||
142 | l %r4,0(%r1) | ||
143 | l %r3,100(%r15) | ||
144 | basr %r14,%r4 | ||
145 | st %r2,100(%r15) | ||
146 | 1: | ||
147 | #endif | 56 | #endif |
148 | ahi %r15,96 | ||
149 | l %r14,56(%r15) | ||
150 | 3: lm %r2,%r5,16(%r15) | ||
151 | br %r14 | ||
152 | |||
153 | .data | ||
154 | .globl ftrace_dyn_func | ||
155 | ftrace_dyn_func: | ||
156 | .long ftrace_stub | ||
157 | .previous | ||
158 | |||
159 | #else /* CONFIG_DYNAMIC_FTRACE */ | ||
160 | |||
161 | .globl _mcount | ||
162 | _mcount: | ||
163 | stm %r2,%r5,16(%r15) | ||
164 | bras %r1,2f | ||
165 | 0: .long ftrace_trace_function | ||
166 | 1: .long function_trace_stop | ||
167 | 2: l %r2,1b-0b(%r1) | ||
168 | icm %r2,0xf,0(%r2) | ||
169 | jnz 3f | ||
170 | st %r14,56(%r15) | ||
171 | lr %r0,%r15 | ||
172 | ahi %r15,-96 | ||
173 | l %r3,100(%r15) | ||
174 | la %r2,0(%r14) | ||
175 | st %r0,__SF_BACKCHAIN(%r15) | ||
176 | la %r3,0(%r3) | ||
177 | l %r14,0b-0b(%r1) | ||
178 | l %r14,0(%r14) | ||
179 | basr %r14,%r14 | ||
180 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
181 | bras %r1,0f | 57 | bras %r1,0f |
182 | .long prepare_ftrace_return | 58 | .long prepare_ftrace_return |
183 | 0: l %r2,152(%r15) | 59 | 0: l %r2,152(%r15) |
@@ -185,14 +61,13 @@ _mcount: | |||
185 | l %r3,100(%r15) | 61 | l %r3,100(%r15) |
186 | basr %r14,%r4 | 62 | basr %r14,%r4 |
187 | st %r2,100(%r15) | 63 | st %r2,100(%r15) |
64 | 1: | ||
188 | #endif | 65 | #endif |
189 | ahi %r15,96 | 66 | ahi %r15,96 |
190 | l %r14,56(%r15) | 67 | l %r14,56(%r15) |
191 | 3: lm %r2,%r5,16(%r15) | 68 | 3: lm %r2,%r5,16(%r15) |
192 | br %r14 | 69 | br %r14 |
193 | 70 | ||
194 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
195 | |||
196 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 71 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
197 | 72 | ||
198 | .globl return_to_handler | 73 | .globl return_to_handler |
@@ -211,6 +86,4 @@ return_to_handler: | |||
211 | lm %r2,%r5,16(%r15) | 86 | lm %r2,%r5,16(%r15) |
212 | br %r14 | 87 | br %r14 |
213 | 88 | ||
214 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 89 | #endif |
215 | |||
216 | #endif /* CONFIG_64BIT */ | ||
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S new file mode 100644 index 000000000000..c37211c6092b --- /dev/null +++ b/arch/s390/kernel/mcount64.S | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2008,2009 | ||
3 | * | ||
4 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <asm/asm-offsets.h> | ||
9 | |||
10 | .globl ftrace_stub | ||
11 | ftrace_stub: | ||
12 | br %r14 | ||
13 | |||
14 | .globl _mcount | ||
15 | _mcount: | ||
16 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
17 | br %r14 | ||
18 | |||
19 | .data | ||
20 | .globl ftrace_dyn_func | ||
21 | ftrace_dyn_func: | ||
22 | .quad ftrace_stub | ||
23 | .previous | ||
24 | |||
25 | .globl ftrace_caller | ||
26 | ftrace_caller: | ||
27 | #endif | ||
28 | larl %r1,function_trace_stop | ||
29 | icm %r1,0xf,0(%r1) | ||
30 | bnzr %r14 | ||
31 | stmg %r2,%r5,32(%r15) | ||
32 | stg %r14,112(%r15) | ||
33 | lgr %r1,%r15 | ||
34 | aghi %r15,-160 | ||
35 | stg %r1,__SF_BACKCHAIN(%r15) | ||
36 | lgr %r2,%r14 | ||
37 | lg %r3,168(%r15) | ||
38 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
39 | larl %r14,ftrace_dyn_func | ||
40 | #else | ||
41 | larl %r14,ftrace_trace_function | ||
42 | #endif | ||
43 | lg %r14,0(%r14) | ||
44 | basr %r14,%r14 | ||
45 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
46 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
47 | .globl ftrace_graph_caller | ||
48 | ftrace_graph_caller: | ||
49 | # This unconditional branch gets runtime patched. Change only if | ||
50 | # you know what you are doing. See ftrace_enable_graph_caller(). | ||
51 | j 0f | ||
52 | #endif | ||
53 | lg %r2,272(%r15) | ||
54 | lg %r3,168(%r15) | ||
55 | brasl %r14,prepare_ftrace_return | ||
56 | stg %r2,168(%r15) | ||
57 | 0: | ||
58 | #endif | ||
59 | aghi %r15,160 | ||
60 | lmg %r2,%r5,32(%r15) | ||
61 | lg %r14,112(%r15) | ||
62 | br %r14 | ||
63 | |||
64 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
65 | |||
66 | .globl return_to_handler | ||
67 | return_to_handler: | ||
68 | stmg %r2,%r5,32(%r15) | ||
69 | lgr %r1,%r15 | ||
70 | aghi %r15,-160 | ||
71 | stg %r1,__SF_BACKCHAIN(%r15) | ||
72 | brasl %r14,ftrace_return_to_handler | ||
73 | aghi %r15,160 | ||
74 | lgr %r14,%r2 | ||
75 | lmg %r2,%r5,32(%r15) | ||
76 | br %r14 | ||
77 | |||
78 | #endif | ||
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 43acd73105b7..f3ddd7ac06c5 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -51,6 +51,9 @@ | |||
51 | #include "compat_ptrace.h" | 51 | #include "compat_ptrace.h" |
52 | #endif | 52 | #endif |
53 | 53 | ||
54 | #define CREATE_TRACE_POINTS | ||
55 | #include <trace/events/syscalls.h> | ||
56 | |||
54 | enum s390_regset { | 57 | enum s390_regset { |
55 | REGSET_GENERAL, | 58 | REGSET_GENERAL, |
56 | REGSET_FP, | 59 | REGSET_FP, |
@@ -661,8 +664,8 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) | |||
661 | ret = -1; | 664 | ret = -1; |
662 | } | 665 | } |
663 | 666 | ||
664 | if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) | 667 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
665 | ftrace_syscall_enter(regs); | 668 | trace_sys_enter(regs, regs->gprs[2]); |
666 | 669 | ||
667 | if (unlikely(current->audit_context)) | 670 | if (unlikely(current->audit_context)) |
668 | audit_syscall_entry(is_compat_task() ? | 671 | audit_syscall_entry(is_compat_task() ? |
@@ -679,8 +682,8 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs) | |||
679 | audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), | 682 | audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), |
680 | regs->gprs[2]); | 683 | regs->gprs[2]); |
681 | 684 | ||
682 | if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) | 685 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
683 | ftrace_syscall_exit(regs); | 686 | trace_sys_exit(regs, regs->gprs[2]); |
684 | 687 | ||
685 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | 688 | if (test_thread_flag(TIF_SYSCALL_TRACE)) |
686 | tracehook_report_syscall_exit(regs, 0); | 689 | tracehook_report_syscall_exit(regs, 0); |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index cbb897bc50bd..9ed13a1ed376 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -156,15 +156,11 @@ __setup("condev=", condev_setup); | |||
156 | 156 | ||
157 | static void __init set_preferred_console(void) | 157 | static void __init set_preferred_console(void) |
158 | { | 158 | { |
159 | if (MACHINE_IS_KVM) { | 159 | if (MACHINE_IS_KVM) |
160 | add_preferred_console("hvc", 0, NULL); | 160 | add_preferred_console("hvc", 0, NULL); |
161 | s390_virtio_console_init(); | 161 | else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP) |
162 | return; | ||
163 | } | ||
164 | |||
165 | if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP) | ||
166 | add_preferred_console("ttyS", 0, NULL); | 162 | add_preferred_console("ttyS", 0, NULL); |
167 | if (CONSOLE_IS_3270) | 163 | else if (CONSOLE_IS_3270) |
168 | add_preferred_console("tty3270", 0, NULL); | 164 | add_preferred_console("tty3270", 0, NULL); |
169 | } | 165 | } |
170 | 166 | ||
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 062bd64e65fa..6b4fef877f9d 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -536,4 +536,6 @@ void do_notify_resume(struct pt_regs *regs) | |||
536 | { | 536 | { |
537 | clear_thread_flag(TIF_NOTIFY_RESUME); | 537 | clear_thread_flag(TIF_NOTIFY_RESUME); |
538 | tracehook_notify_resume(regs); | 538 | tracehook_notify_resume(regs); |
539 | if (current->replacement_session_keyring) | ||
540 | key_replace_session_keyring(); | ||
539 | } | 541 | } |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index be2cae083406..56c16876b919 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <asm/sclp.h> | 49 | #include <asm/sclp.h> |
50 | #include <asm/cputime.h> | 50 | #include <asm/cputime.h> |
51 | #include <asm/vdso.h> | 51 | #include <asm/vdso.h> |
52 | #include <asm/cpu.h> | ||
52 | #include "entry.h" | 53 | #include "entry.h" |
53 | 54 | ||
54 | static struct task_struct *current_set[NR_CPUS]; | 55 | static struct task_struct *current_set[NR_CPUS]; |
@@ -70,6 +71,23 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); | |||
70 | 71 | ||
71 | static void smp_ext_bitcall(int, ec_bit_sig); | 72 | static void smp_ext_bitcall(int, ec_bit_sig); |
72 | 73 | ||
74 | static int cpu_stopped(int cpu) | ||
75 | { | ||
76 | __u32 status; | ||
77 | |||
78 | switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { | ||
79 | case sigp_order_code_accepted: | ||
80 | case sigp_status_stored: | ||
81 | /* Check for stopped and check stop state */ | ||
82 | if (status & 0x50) | ||
83 | return 1; | ||
84 | break; | ||
85 | default: | ||
86 | break; | ||
87 | } | ||
88 | return 0; | ||
89 | } | ||
90 | |||
73 | void smp_send_stop(void) | 91 | void smp_send_stop(void) |
74 | { | 92 | { |
75 | int cpu, rc; | 93 | int cpu, rc; |
@@ -86,7 +104,7 @@ void smp_send_stop(void) | |||
86 | rc = signal_processor(cpu, sigp_stop); | 104 | rc = signal_processor(cpu, sigp_stop); |
87 | } while (rc == sigp_busy); | 105 | } while (rc == sigp_busy); |
88 | 106 | ||
89 | while (!smp_cpu_not_running(cpu)) | 107 | while (!cpu_stopped(cpu)) |
90 | cpu_relax(); | 108 | cpu_relax(); |
91 | } | 109 | } |
92 | } | 110 | } |
@@ -269,19 +287,6 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } | |||
269 | 287 | ||
270 | #endif /* CONFIG_ZFCPDUMP */ | 288 | #endif /* CONFIG_ZFCPDUMP */ |
271 | 289 | ||
272 | static int cpu_stopped(int cpu) | ||
273 | { | ||
274 | __u32 status; | ||
275 | |||
276 | /* Check for stopped state */ | ||
277 | if (signal_processor_ps(&status, 0, cpu, sigp_sense) == | ||
278 | sigp_status_stored) { | ||
279 | if (status & 0x40) | ||
280 | return 1; | ||
281 | } | ||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | static int cpu_known(int cpu_id) | 290 | static int cpu_known(int cpu_id) |
286 | { | 291 | { |
287 | int cpu; | 292 | int cpu; |
@@ -300,7 +305,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail) | |||
300 | logical_cpu = cpumask_first(&avail); | 305 | logical_cpu = cpumask_first(&avail); |
301 | if (logical_cpu >= nr_cpu_ids) | 306 | if (logical_cpu >= nr_cpu_ids) |
302 | return 0; | 307 | return 0; |
303 | for (cpu_id = 0; cpu_id <= 65535; cpu_id++) { | 308 | for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) { |
304 | if (cpu_known(cpu_id)) | 309 | if (cpu_known(cpu_id)) |
305 | continue; | 310 | continue; |
306 | __cpu_logical_map[logical_cpu] = cpu_id; | 311 | __cpu_logical_map[logical_cpu] = cpu_id; |
@@ -379,7 +384,7 @@ static void __init smp_detect_cpus(void) | |||
379 | /* Use sigp detection algorithm if sclp doesn't work. */ | 384 | /* Use sigp detection algorithm if sclp doesn't work. */ |
380 | if (sclp_get_cpu_info(info)) { | 385 | if (sclp_get_cpu_info(info)) { |
381 | smp_use_sigp_detection = 1; | 386 | smp_use_sigp_detection = 1; |
382 | for (cpu = 0; cpu <= 65535; cpu++) { | 387 | for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { |
383 | if (cpu == boot_cpu_addr) | 388 | if (cpu == boot_cpu_addr) |
384 | continue; | 389 | continue; |
385 | __cpu_logical_map[CPU_INIT_NO] = cpu; | 390 | __cpu_logical_map[CPU_INIT_NO] = cpu; |
@@ -635,7 +640,7 @@ int __cpu_disable(void) | |||
635 | void __cpu_die(unsigned int cpu) | 640 | void __cpu_die(unsigned int cpu) |
636 | { | 641 | { |
637 | /* Wait until target cpu is down */ | 642 | /* Wait until target cpu is down */ |
638 | while (!smp_cpu_not_running(cpu)) | 643 | while (!cpu_stopped(cpu)) |
639 | cpu_relax(); | 644 | cpu_relax(); |
640 | smp_free_lowcore(cpu); | 645 | smp_free_lowcore(cpu); |
641 | pr_info("Processor %d stopped\n", cpu); | 646 | pr_info("Processor %d stopped\n", cpu); |
diff --git a/arch/s390/power/swsusp.c b/arch/s390/kernel/suspend.c index bd1f5c6b0b8c..086bee970cae 100644 --- a/arch/s390/power/swsusp.c +++ b/arch/s390/kernel/suspend.c | |||
@@ -1,13 +1,44 @@ | |||
1 | /* | 1 | /* |
2 | * Support for suspend and resume on s390 | 2 | * Suspend support specific for s390. |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2009 | 4 | * Copyright IBM Corp. 2009 |
5 | * | 5 | * |
6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> | 6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> |
7 | * | ||
8 | */ | 7 | */ |
9 | 8 | ||
9 | #include <linux/suspend.h> | ||
10 | #include <linux/reboot.h> | ||
11 | #include <linux/pfn.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <asm/sections.h> | ||
10 | #include <asm/system.h> | 14 | #include <asm/system.h> |
15 | #include <asm/ipl.h> | ||
16 | |||
17 | /* | ||
18 | * References to section boundaries | ||
19 | */ | ||
20 | extern const void __nosave_begin, __nosave_end; | ||
21 | |||
22 | /* | ||
23 | * check if given pfn is in the 'nosave' or in the read only NSS section | ||
24 | */ | ||
25 | int pfn_is_nosave(unsigned long pfn) | ||
26 | { | ||
27 | unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; | ||
28 | unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) | ||
29 | >> PAGE_SHIFT; | ||
30 | unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1; | ||
31 | unsigned long stext_pfn = PFN_DOWN(__pa(&_stext)); | ||
32 | |||
33 | if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) | ||
34 | return 1; | ||
35 | if (pfn >= stext_pfn && pfn <= eshared_pfn) { | ||
36 | if (ipl_info.type == IPL_TYPE_NSS) | ||
37 | return 1; | ||
38 | } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0)) | ||
39 | return 1; | ||
40 | return 0; | ||
41 | } | ||
11 | 42 | ||
12 | void save_processor_state(void) | 43 | void save_processor_state(void) |
13 | { | 44 | { |
diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index b26df5c5933e..7cd6b096f0d1 100644 --- a/arch/s390/power/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S | |||
@@ -21,7 +21,7 @@ | |||
21 | * This function runs with disabled interrupts. | 21 | * This function runs with disabled interrupts. |
22 | */ | 22 | */ |
23 | .section .text | 23 | .section .text |
24 | .align 2 | 24 | .align 4 |
25 | .globl swsusp_arch_suspend | 25 | .globl swsusp_arch_suspend |
26 | swsusp_arch_suspend: | 26 | swsusp_arch_suspend: |
27 | stmg %r6,%r15,__SF_GPRS(%r15) | 27 | stmg %r6,%r15,__SF_GPRS(%r15) |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index d4c8e9c47c81..54e327e9af04 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #define TICK_SIZE tick | 60 | #define TICK_SIZE tick |
61 | 61 | ||
62 | u64 sched_clock_base_cc = -1; /* Force to data section. */ | 62 | u64 sched_clock_base_cc = -1; /* Force to data section. */ |
63 | EXPORT_SYMBOL_GPL(sched_clock_base_cc); | ||
63 | 64 | ||
64 | static DEFINE_PER_CPU(struct clock_event_device, comparators); | 65 | static DEFINE_PER_CPU(struct clock_event_device, comparators); |
65 | 66 | ||
@@ -68,7 +69,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators); | |||
68 | */ | 69 | */ |
69 | unsigned long long notrace sched_clock(void) | 70 | unsigned long long notrace sched_clock(void) |
70 | { | 71 | { |
71 | return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9; | 72 | return (get_clock_monotonic() * 125) >> 9; |
72 | } | 73 | } |
73 | 74 | ||
74 | /* | 75 | /* |
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index a53db23ee092..7315f9e67e1d 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S | |||
@@ -52,55 +52,18 @@ SECTIONS | |||
52 | . = ALIGN(PAGE_SIZE); | 52 | . = ALIGN(PAGE_SIZE); |
53 | _eshared = .; /* End of shareable data */ | 53 | _eshared = .; /* End of shareable data */ |
54 | 54 | ||
55 | . = ALIGN(16); /* Exception table */ | 55 | EXCEPTION_TABLE(16) :data |
56 | __ex_table : { | ||
57 | __start___ex_table = .; | ||
58 | *(__ex_table) | ||
59 | __stop___ex_table = .; | ||
60 | } :data | ||
61 | |||
62 | .data : { /* Data */ | ||
63 | DATA_DATA | ||
64 | CONSTRUCTORS | ||
65 | } | ||
66 | |||
67 | . = ALIGN(PAGE_SIZE); | ||
68 | .data_nosave : { | ||
69 | __nosave_begin = .; | ||
70 | *(.data.nosave) | ||
71 | } | ||
72 | . = ALIGN(PAGE_SIZE); | ||
73 | __nosave_end = .; | ||
74 | |||
75 | . = ALIGN(PAGE_SIZE); | ||
76 | .data.page_aligned : { | ||
77 | *(.data.idt) | ||
78 | } | ||
79 | 56 | ||
80 | . = ALIGN(0x100); | 57 | RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE) |
81 | .data.cacheline_aligned : { | ||
82 | *(.data.cacheline_aligned) | ||
83 | } | ||
84 | 58 | ||
85 | . = ALIGN(0x100); | ||
86 | .data.read_mostly : { | ||
87 | *(.data.read_mostly) | ||
88 | } | ||
89 | _edata = .; /* End of data section */ | 59 | _edata = .; /* End of data section */ |
90 | 60 | ||
91 | . = ALIGN(THREAD_SIZE); /* init_task */ | ||
92 | .data.init_task : { | ||
93 | *(.data.init_task) | ||
94 | } | ||
95 | |||
96 | /* will be freed after init */ | 61 | /* will be freed after init */ |
97 | . = ALIGN(PAGE_SIZE); /* Init code and data */ | 62 | . = ALIGN(PAGE_SIZE); /* Init code and data */ |
98 | __init_begin = .; | 63 | __init_begin = .; |
99 | .init.text : { | 64 | |
100 | _sinittext = .; | 65 | INIT_TEXT_SECTION(PAGE_SIZE) |
101 | INIT_TEXT | 66 | |
102 | _einittext = .; | ||
103 | } | ||
104 | /* | 67 | /* |
105 | * .exit.text is discarded at runtime, not link time, | 68 | * .exit.text is discarded at runtime, not link time, |
106 | * to deal with references from __bug_table | 69 | * to deal with references from __bug_table |
@@ -111,49 +74,13 @@ SECTIONS | |||
111 | 74 | ||
112 | /* early.c uses stsi, which requires page aligned data. */ | 75 | /* early.c uses stsi, which requires page aligned data. */ |
113 | . = ALIGN(PAGE_SIZE); | 76 | . = ALIGN(PAGE_SIZE); |
114 | .init.data : { | 77 | INIT_DATA_SECTION(0x100) |
115 | INIT_DATA | ||
116 | } | ||
117 | . = ALIGN(0x100); | ||
118 | .init.setup : { | ||
119 | __setup_start = .; | ||
120 | *(.init.setup) | ||
121 | __setup_end = .; | ||
122 | } | ||
123 | .initcall.init : { | ||
124 | __initcall_start = .; | ||
125 | INITCALLS | ||
126 | __initcall_end = .; | ||
127 | } | ||
128 | |||
129 | .con_initcall.init : { | ||
130 | __con_initcall_start = .; | ||
131 | *(.con_initcall.init) | ||
132 | __con_initcall_end = .; | ||
133 | } | ||
134 | SECURITY_INIT | ||
135 | |||
136 | #ifdef CONFIG_BLK_DEV_INITRD | ||
137 | . = ALIGN(0x100); | ||
138 | .init.ramfs : { | ||
139 | __initramfs_start = .; | ||
140 | *(.init.ramfs) | ||
141 | . = ALIGN(2); | ||
142 | __initramfs_end = .; | ||
143 | } | ||
144 | #endif | ||
145 | 78 | ||
146 | PERCPU(PAGE_SIZE) | 79 | PERCPU(PAGE_SIZE) |
147 | . = ALIGN(PAGE_SIZE); | 80 | . = ALIGN(PAGE_SIZE); |
148 | __init_end = .; /* freed after init ends here */ | 81 | __init_end = .; /* freed after init ends here */ |
149 | 82 | ||
150 | /* BSS */ | 83 | BSS_SECTION(0, 2, 0) |
151 | .bss : { | ||
152 | __bss_start = .; | ||
153 | *(.bss) | ||
154 | . = ALIGN(2); | ||
155 | __bss_stop = .; | ||
156 | } | ||
157 | 84 | ||
158 | _end = . ; | 85 | _end = . ; |
159 | 86 | ||
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile index db05661ac895..eec054484419 100644 --- a/arch/s390/mm/Makefile +++ b/arch/s390/mm/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the linux s390-specific parts of the memory manager. | 2 | # Makefile for the linux s390-specific parts of the memory manager. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o | 5 | obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \ |
6 | page-states.o | ||
6 | obj-$(CONFIG_CMM) += cmm.o | 7 | obj-$(CONFIG_CMM) += cmm.o |
7 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 8 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
8 | obj-$(CONFIG_PAGE_STATES) += page-states.o | ||
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index e5e119fe03b2..1abbadd497e1 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * Copyright (C) 1995 Linus Torvalds | 10 | * Copyright (C) 1995 Linus Torvalds |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/perf_counter.h> | ||
13 | #include <linux/signal.h> | 14 | #include <linux/signal.h> |
14 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
15 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
@@ -305,7 +306,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write) | |||
305 | * interrupts again and then search the VMAs | 306 | * interrupts again and then search the VMAs |
306 | */ | 307 | */ |
307 | local_irq_enable(); | 308 | local_irq_enable(); |
308 | 309 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | |
309 | down_read(&mm->mmap_sem); | 310 | down_read(&mm->mmap_sem); |
310 | 311 | ||
311 | si_code = SEGV_MAPERR; | 312 | si_code = SEGV_MAPERR; |
@@ -363,11 +364,15 @@ good_area: | |||
363 | } | 364 | } |
364 | BUG(); | 365 | BUG(); |
365 | } | 366 | } |
366 | if (fault & VM_FAULT_MAJOR) | 367 | if (fault & VM_FAULT_MAJOR) { |
367 | tsk->maj_flt++; | 368 | tsk->maj_flt++; |
368 | else | 369 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, |
370 | regs, address); | ||
371 | } else { | ||
369 | tsk->min_flt++; | 372 | tsk->min_flt++; |
370 | 373 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | |
374 | regs, address); | ||
375 | } | ||
371 | up_read(&mm->mmap_sem); | 376 | up_read(&mm->mmap_sem); |
372 | /* | 377 | /* |
373 | * The instruction that caused the program check will | 378 | * The instruction that caused the program check will |
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index fc0ad73ffd90..f92ec203ad92 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/mm/page-states.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | 2 | * Copyright IBM Corp. 2008 |
5 | * | 3 | * |
6 | * Guest page hinting for unused pages. | 4 | * Guest page hinting for unused pages. |
@@ -17,11 +15,12 @@ | |||
17 | #define ESSA_SET_STABLE 1 | 15 | #define ESSA_SET_STABLE 1 |
18 | #define ESSA_SET_UNUSED 2 | 16 | #define ESSA_SET_UNUSED 2 |
19 | 17 | ||
20 | static int cmma_flag; | 18 | static int cmma_flag = 1; |
21 | 19 | ||
22 | static int __init cmma(char *str) | 20 | static int __init cmma(char *str) |
23 | { | 21 | { |
24 | char *parm; | 22 | char *parm; |
23 | |||
25 | parm = strstrip(str); | 24 | parm = strstrip(str); |
26 | if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) { | 25 | if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) { |
27 | cmma_flag = 1; | 26 | cmma_flag = 1; |
@@ -32,7 +31,6 @@ static int __init cmma(char *str) | |||
32 | return 1; | 31 | return 1; |
33 | return 0; | 32 | return 0; |
34 | } | 33 | } |
35 | |||
36 | __setup("cmma=", cmma); | 34 | __setup("cmma=", cmma); |
37 | 35 | ||
38 | void __init cmma_init(void) | 36 | void __init cmma_init(void) |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 565667207985..c70215247071 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -78,9 +78,9 @@ unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) | |||
78 | } | 78 | } |
79 | page->index = page_to_phys(shadow); | 79 | page->index = page_to_phys(shadow); |
80 | } | 80 | } |
81 | spin_lock(&mm->page_table_lock); | 81 | spin_lock(&mm->context.list_lock); |
82 | list_add(&page->lru, &mm->context.crst_list); | 82 | list_add(&page->lru, &mm->context.crst_list); |
83 | spin_unlock(&mm->page_table_lock); | 83 | spin_unlock(&mm->context.list_lock); |
84 | return (unsigned long *) page_to_phys(page); | 84 | return (unsigned long *) page_to_phys(page); |
85 | } | 85 | } |
86 | 86 | ||
@@ -89,9 +89,9 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table) | |||
89 | unsigned long *shadow = get_shadow_table(table); | 89 | unsigned long *shadow = get_shadow_table(table); |
90 | struct page *page = virt_to_page(table); | 90 | struct page *page = virt_to_page(table); |
91 | 91 | ||
92 | spin_lock(&mm->page_table_lock); | 92 | spin_lock(&mm->context.list_lock); |
93 | list_del(&page->lru); | 93 | list_del(&page->lru); |
94 | spin_unlock(&mm->page_table_lock); | 94 | spin_unlock(&mm->context.list_lock); |
95 | if (shadow) | 95 | if (shadow) |
96 | free_pages((unsigned long) shadow, ALLOC_ORDER); | 96 | free_pages((unsigned long) shadow, ALLOC_ORDER); |
97 | free_pages((unsigned long) table, ALLOC_ORDER); | 97 | free_pages((unsigned long) table, ALLOC_ORDER); |
@@ -182,7 +182,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
182 | unsigned long bits; | 182 | unsigned long bits; |
183 | 183 | ||
184 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; | 184 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; |
185 | spin_lock(&mm->page_table_lock); | 185 | spin_lock(&mm->context.list_lock); |
186 | page = NULL; | 186 | page = NULL; |
187 | if (!list_empty(&mm->context.pgtable_list)) { | 187 | if (!list_empty(&mm->context.pgtable_list)) { |
188 | page = list_first_entry(&mm->context.pgtable_list, | 188 | page = list_first_entry(&mm->context.pgtable_list, |
@@ -191,7 +191,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
191 | page = NULL; | 191 | page = NULL; |
192 | } | 192 | } |
193 | if (!page) { | 193 | if (!page) { |
194 | spin_unlock(&mm->page_table_lock); | 194 | spin_unlock(&mm->context.list_lock); |
195 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); | 195 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); |
196 | if (!page) | 196 | if (!page) |
197 | return NULL; | 197 | return NULL; |
@@ -202,7 +202,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
202 | clear_table_pgstes(table); | 202 | clear_table_pgstes(table); |
203 | else | 203 | else |
204 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); | 204 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); |
205 | spin_lock(&mm->page_table_lock); | 205 | spin_lock(&mm->context.list_lock); |
206 | list_add(&page->lru, &mm->context.pgtable_list); | 206 | list_add(&page->lru, &mm->context.pgtable_list); |
207 | } | 207 | } |
208 | table = (unsigned long *) page_to_phys(page); | 208 | table = (unsigned long *) page_to_phys(page); |
@@ -213,7 +213,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
213 | page->flags |= bits; | 213 | page->flags |= bits; |
214 | if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) | 214 | if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) |
215 | list_move_tail(&page->lru, &mm->context.pgtable_list); | 215 | list_move_tail(&page->lru, &mm->context.pgtable_list); |
216 | spin_unlock(&mm->page_table_lock); | 216 | spin_unlock(&mm->context.list_lock); |
217 | return table; | 217 | return table; |
218 | } | 218 | } |
219 | 219 | ||
@@ -225,7 +225,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) | |||
225 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; | 225 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; |
226 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); | 226 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); |
227 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | 227 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
228 | spin_lock(&mm->page_table_lock); | 228 | spin_lock(&mm->context.list_lock); |
229 | page->flags ^= bits; | 229 | page->flags ^= bits; |
230 | if (page->flags & FRAG_MASK) { | 230 | if (page->flags & FRAG_MASK) { |
231 | /* Page now has some free pgtable fragments. */ | 231 | /* Page now has some free pgtable fragments. */ |
@@ -234,7 +234,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) | |||
234 | } else | 234 | } else |
235 | /* All fragments of the 4K page have been freed. */ | 235 | /* All fragments of the 4K page have been freed. */ |
236 | list_del(&page->lru); | 236 | list_del(&page->lru); |
237 | spin_unlock(&mm->page_table_lock); | 237 | spin_unlock(&mm->context.list_lock); |
238 | if (page) { | 238 | if (page) { |
239 | pgtable_page_dtor(page); | 239 | pgtable_page_dtor(page); |
240 | __free_page(page); | 240 | __free_page(page); |
@@ -245,7 +245,7 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) | |||
245 | { | 245 | { |
246 | struct page *page; | 246 | struct page *page; |
247 | 247 | ||
248 | spin_lock(&mm->page_table_lock); | 248 | spin_lock(&mm->context.list_lock); |
249 | /* Free shadow region and segment tables. */ | 249 | /* Free shadow region and segment tables. */ |
250 | list_for_each_entry(page, &mm->context.crst_list, lru) | 250 | list_for_each_entry(page, &mm->context.crst_list, lru) |
251 | if (page->index) { | 251 | if (page->index) { |
@@ -255,7 +255,7 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) | |||
255 | /* "Free" second halves of page tables. */ | 255 | /* "Free" second halves of page tables. */ |
256 | list_for_each_entry(page, &mm->context.pgtable_list, lru) | 256 | list_for_each_entry(page, &mm->context.pgtable_list, lru) |
257 | page->flags &= ~SECOND_HALVES; | 257 | page->flags &= ~SECOND_HALVES; |
258 | spin_unlock(&mm->page_table_lock); | 258 | spin_unlock(&mm->context.list_lock); |
259 | mm->context.noexec = 0; | 259 | mm->context.noexec = 0; |
260 | update_mm(mm, tsk); | 260 | update_mm(mm, tsk); |
261 | } | 261 | } |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index e4868bfc672f..5f91a38d7592 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -331,6 +331,7 @@ void __init vmem_map_init(void) | |||
331 | unsigned long start, end; | 331 | unsigned long start, end; |
332 | int i; | 332 | int i; |
333 | 333 | ||
334 | spin_lock_init(&init_mm.context.list_lock); | ||
334 | INIT_LIST_HEAD(&init_mm.context.crst_list); | 335 | INIT_LIST_HEAD(&init_mm.context.crst_list); |
335 | INIT_LIST_HEAD(&init_mm.context.pgtable_list); | 336 | INIT_LIST_HEAD(&init_mm.context.pgtable_list); |
336 | init_mm.context.noexec = 0; | 337 | init_mm.context.noexec = 0; |
diff --git a/arch/s390/power/Makefile b/arch/s390/power/Makefile deleted file mode 100644 index 973bb45a8fec..000000000000 --- a/arch/s390/power/Makefile +++ /dev/null | |||
@@ -1,8 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for s390 PM support | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_HIBERNATION) += suspend.o | ||
6 | obj-$(CONFIG_HIBERNATION) += swsusp.o | ||
7 | obj-$(CONFIG_HIBERNATION) += swsusp_64.o | ||
8 | obj-$(CONFIG_HIBERNATION) += swsusp_asm64.o | ||
diff --git a/arch/s390/power/suspend.c b/arch/s390/power/suspend.c deleted file mode 100644 index b3351eceebbe..000000000000 --- a/arch/s390/power/suspend.c +++ /dev/null | |||
@@ -1,40 +0,0 @@ | |||
1 | /* | ||
2 | * Suspend support specific for s390. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * | ||
6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/mm.h> | ||
10 | #include <linux/suspend.h> | ||
11 | #include <linux/reboot.h> | ||
12 | #include <linux/pfn.h> | ||
13 | #include <asm/sections.h> | ||
14 | #include <asm/ipl.h> | ||
15 | |||
16 | /* | ||
17 | * References to section boundaries | ||
18 | */ | ||
19 | extern const void __nosave_begin, __nosave_end; | ||
20 | |||
21 | /* | ||
22 | * check if given pfn is in the 'nosave' or in the read only NSS section | ||
23 | */ | ||
24 | int pfn_is_nosave(unsigned long pfn) | ||
25 | { | ||
26 | unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; | ||
27 | unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) | ||
28 | >> PAGE_SHIFT; | ||
29 | unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1; | ||
30 | unsigned long stext_pfn = PFN_DOWN(__pa(&_stext)); | ||
31 | |||
32 | if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) | ||
33 | return 1; | ||
34 | if (pfn >= stext_pfn && pfn <= eshared_pfn) { | ||
35 | if (ipl_info.type == IPL_TYPE_NSS) | ||
36 | return 1; | ||
37 | } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0)) | ||
38 | return 1; | ||
39 | return 0; | ||
40 | } | ||
diff --git a/arch/s390/power/swsusp_64.c b/arch/s390/power/swsusp_64.c deleted file mode 100644 index 9516a517d72f..000000000000 --- a/arch/s390/power/swsusp_64.c +++ /dev/null | |||
@@ -1,17 +0,0 @@ | |||
1 | /* | ||
2 | * Support for suspend and resume on s390 | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * | ||
6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include <asm/system.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | |||
13 | void do_after_copyback(void) | ||
14 | { | ||
15 | mb(); | ||
16 | } | ||
17 | |||
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index b5afbec1db59..04a21883f327 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c | |||
@@ -640,5 +640,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0, | |||
640 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 640 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
641 | clear_thread_flag(TIF_NOTIFY_RESUME); | 641 | clear_thread_flag(TIF_NOTIFY_RESUME); |
642 | tracehook_notify_resume(regs); | 642 | tracehook_notify_resume(regs); |
643 | if (current->replacement_session_keyring) | ||
644 | key_replace_session_keyring(); | ||
643 | } | 645 | } |
644 | } | 646 | } |
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index 0663a0ee6021..9e5c9b1d7e98 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c | |||
@@ -772,5 +772,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info | |||
772 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 772 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
773 | clear_thread_flag(TIF_NOTIFY_RESUME); | 773 | clear_thread_flag(TIF_NOTIFY_RESUME); |
774 | tracehook_notify_resume(regs); | 774 | tracehook_notify_resume(regs); |
775 | if (current->replacement_session_keyring) | ||
776 | key_replace_session_keyring(); | ||
775 | } | 777 | } |
776 | } | 778 | } |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 2aa7cd39b481..2bd5c287538a 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -26,6 +26,8 @@ config SPARC | |||
26 | select RTC_CLASS | 26 | select RTC_CLASS |
27 | select RTC_DRV_M48T59 | 27 | select RTC_DRV_M48T59 |
28 | select HAVE_PERF_COUNTERS | 28 | select HAVE_PERF_COUNTERS |
29 | select HAVE_DMA_ATTRS | ||
30 | select HAVE_DMA_API_DEBUG | ||
29 | 31 | ||
30 | config SPARC32 | 32 | config SPARC32 |
31 | def_bool !64BIT | 33 | def_bool !64BIT |
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 204e4bf64438..5a8c308e2b5c 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/scatterlist.h> | 4 | #include <linux/scatterlist.h> |
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
6 | #include <linux/dma-debug.h> | ||
6 | 7 | ||
7 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | 8 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) |
8 | 9 | ||
@@ -13,142 +14,40 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask); | |||
13 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 14 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
14 | #define dma_is_consistent(d, h) (1) | 15 | #define dma_is_consistent(d, h) (1) |
15 | 16 | ||
16 | struct dma_ops { | 17 | extern struct dma_map_ops *dma_ops, pci32_dma_ops; |
17 | void *(*alloc_coherent)(struct device *dev, size_t size, | 18 | extern struct bus_type pci_bus_type; |
18 | dma_addr_t *dma_handle, gfp_t flag); | ||
19 | void (*free_coherent)(struct device *dev, size_t size, | ||
20 | void *cpu_addr, dma_addr_t dma_handle); | ||
21 | dma_addr_t (*map_page)(struct device *dev, struct page *page, | ||
22 | unsigned long offset, size_t size, | ||
23 | enum dma_data_direction direction); | ||
24 | void (*unmap_page)(struct device *dev, dma_addr_t dma_addr, | ||
25 | size_t size, | ||
26 | enum dma_data_direction direction); | ||
27 | int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, | ||
28 | enum dma_data_direction direction); | ||
29 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, | ||
30 | int nhwentries, | ||
31 | enum dma_data_direction direction); | ||
32 | void (*sync_single_for_cpu)(struct device *dev, | ||
33 | dma_addr_t dma_handle, size_t size, | ||
34 | enum dma_data_direction direction); | ||
35 | void (*sync_single_for_device)(struct device *dev, | ||
36 | dma_addr_t dma_handle, size_t size, | ||
37 | enum dma_data_direction direction); | ||
38 | void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, | ||
39 | int nelems, | ||
40 | enum dma_data_direction direction); | ||
41 | void (*sync_sg_for_device)(struct device *dev, | ||
42 | struct scatterlist *sg, int nents, | ||
43 | enum dma_data_direction dir); | ||
44 | }; | ||
45 | extern const struct dma_ops *dma_ops; | ||
46 | 19 | ||
47 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | 20 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
48 | dma_addr_t *dma_handle, gfp_t flag) | ||
49 | { | ||
50 | return dma_ops->alloc_coherent(dev, size, dma_handle, flag); | ||
51 | } | ||
52 | |||
53 | static inline void dma_free_coherent(struct device *dev, size_t size, | ||
54 | void *cpu_addr, dma_addr_t dma_handle) | ||
55 | { | ||
56 | dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); | ||
57 | } | ||
58 | |||
59 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | ||
60 | size_t size, | ||
61 | enum dma_data_direction direction) | ||
62 | { | ||
63 | return dma_ops->map_page(dev, virt_to_page(cpu_addr), | ||
64 | (unsigned long)cpu_addr & ~PAGE_MASK, size, | ||
65 | direction); | ||
66 | } | ||
67 | |||
68 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
69 | size_t size, | ||
70 | enum dma_data_direction direction) | ||
71 | { | ||
72 | dma_ops->unmap_page(dev, dma_addr, size, direction); | ||
73 | } | ||
74 | |||
75 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
76 | unsigned long offset, size_t size, | ||
77 | enum dma_data_direction direction) | ||
78 | { | ||
79 | return dma_ops->map_page(dev, page, offset, size, direction); | ||
80 | } | ||
81 | |||
82 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
83 | size_t size, | ||
84 | enum dma_data_direction direction) | ||
85 | { | ||
86 | dma_ops->unmap_page(dev, dma_address, size, direction); | ||
87 | } | ||
88 | |||
89 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
90 | int nents, enum dma_data_direction direction) | ||
91 | { | ||
92 | return dma_ops->map_sg(dev, sg, nents, direction); | ||
93 | } | ||
94 | |||
95 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
96 | int nents, enum dma_data_direction direction) | ||
97 | { | 21 | { |
98 | dma_ops->unmap_sg(dev, sg, nents, direction); | 22 | #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) |
99 | } | 23 | if (dev->bus == &pci_bus_type) |
100 | 24 | return &pci32_dma_ops; | |
101 | static inline void dma_sync_single_for_cpu(struct device *dev, | 25 | #endif |
102 | dma_addr_t dma_handle, size_t size, | 26 | return dma_ops; |
103 | enum dma_data_direction direction) | ||
104 | { | ||
105 | dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction); | ||
106 | } | 27 | } |
107 | 28 | ||
108 | static inline void dma_sync_single_for_device(struct device *dev, | 29 | #include <asm-generic/dma-mapping-common.h> |
109 | dma_addr_t dma_handle, | ||
110 | size_t size, | ||
111 | enum dma_data_direction direction) | ||
112 | { | ||
113 | if (dma_ops->sync_single_for_device) | ||
114 | dma_ops->sync_single_for_device(dev, dma_handle, size, | ||
115 | direction); | ||
116 | } | ||
117 | 30 | ||
118 | static inline void dma_sync_sg_for_cpu(struct device *dev, | 31 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
119 | struct scatterlist *sg, int nelems, | 32 | dma_addr_t *dma_handle, gfp_t flag) |
120 | enum dma_data_direction direction) | ||
121 | { | 33 | { |
122 | dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction); | 34 | struct dma_map_ops *ops = get_dma_ops(dev); |
123 | } | 35 | void *cpu_addr; |
124 | 36 | ||
125 | static inline void dma_sync_sg_for_device(struct device *dev, | 37 | cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag); |
126 | struct scatterlist *sg, int nelems, | 38 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); |
127 | enum dma_data_direction direction) | 39 | return cpu_addr; |
128 | { | ||
129 | if (dma_ops->sync_sg_for_device) | ||
130 | dma_ops->sync_sg_for_device(dev, sg, nelems, direction); | ||
131 | } | 40 | } |
132 | 41 | ||
133 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | 42 | static inline void dma_free_coherent(struct device *dev, size_t size, |
134 | dma_addr_t dma_handle, | 43 | void *cpu_addr, dma_addr_t dma_handle) |
135 | unsigned long offset, | ||
136 | size_t size, | ||
137 | enum dma_data_direction dir) | ||
138 | { | 44 | { |
139 | dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir); | 45 | struct dma_map_ops *ops = get_dma_ops(dev); |
140 | } | ||
141 | 46 | ||
142 | static inline void dma_sync_single_range_for_device(struct device *dev, | 47 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); |
143 | dma_addr_t dma_handle, | 48 | ops->free_coherent(dev, size, cpu_addr, dma_handle); |
144 | unsigned long offset, | ||
145 | size_t size, | ||
146 | enum dma_data_direction dir) | ||
147 | { | ||
148 | dma_sync_single_for_device(dev, dma_handle+offset, size, dir); | ||
149 | } | 49 | } |
150 | 50 | ||
151 | |||
152 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 51 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
153 | { | 52 | { |
154 | return (dma_addr == DMA_ERROR_CODE); | 53 | return (dma_addr == DMA_ERROR_CODE); |
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h index 1934f2cbf513..a0b443cb3c1f 100644 --- a/arch/sparc/include/asm/irq_64.h +++ b/arch/sparc/include/asm/irq_64.h | |||
@@ -89,8 +89,8 @@ static inline unsigned long get_softint(void) | |||
89 | return retval; | 89 | return retval; |
90 | } | 90 | } |
91 | 91 | ||
92 | void __trigger_all_cpu_backtrace(void); | 92 | void arch_trigger_all_cpu_backtrace(void); |
93 | #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() | 93 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace |
94 | 94 | ||
95 | extern void *hardirq_stack[NR_CPUS]; | 95 | extern void *hardirq_stack[NR_CPUS]; |
96 | extern void *softirq_stack[NR_CPUS]; | 96 | extern void *softirq_stack[NR_CPUS]; |
diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h index 6e14fd179335..d9c031f9910f 100644 --- a/arch/sparc/include/asm/pci.h +++ b/arch/sparc/include/asm/pci.h | |||
@@ -5,4 +5,7 @@ | |||
5 | #else | 5 | #else |
6 | #include <asm/pci_32.h> | 6 | #include <asm/pci_32.h> |
7 | #endif | 7 | #endif |
8 | |||
9 | #include <asm-generic/pci-dma-compat.h> | ||
10 | |||
8 | #endif | 11 | #endif |
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h index b41c4c198159..ac0e8369fd97 100644 --- a/arch/sparc/include/asm/pci_32.h +++ b/arch/sparc/include/asm/pci_32.h | |||
@@ -31,42 +31,8 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) | |||
31 | */ | 31 | */ |
32 | #define PCI_DMA_BUS_IS_PHYS (0) | 32 | #define PCI_DMA_BUS_IS_PHYS (0) |
33 | 33 | ||
34 | #include <asm/scatterlist.h> | ||
35 | |||
36 | struct pci_dev; | 34 | struct pci_dev; |
37 | 35 | ||
38 | /* Allocate and map kernel buffer using consistent mode DMA for a device. | ||
39 | * hwdev should be valid struct pci_dev pointer for PCI devices. | ||
40 | */ | ||
41 | extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle); | ||
42 | |||
43 | /* Free and unmap a consistent DMA buffer. | ||
44 | * cpu_addr is what was returned from pci_alloc_consistent, | ||
45 | * size must be the same as what as passed into pci_alloc_consistent, | ||
46 | * and likewise dma_addr must be the same as what *dma_addrp was set to. | ||
47 | * | ||
48 | * References to the memory and mappings assosciated with cpu_addr/dma_addr | ||
49 | * past this call are illegal. | ||
50 | */ | ||
51 | extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); | ||
52 | |||
53 | /* Map a single buffer of the indicated size for DMA in streaming mode. | ||
54 | * The 32-bit bus address to use is returned. | ||
55 | * | ||
56 | * Once the device is given the dma address, the device owns this memory | ||
57 | * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed. | ||
58 | */ | ||
59 | extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction); | ||
60 | |||
61 | /* Unmap a single streaming mode DMA translation. The dma_addr and size | ||
62 | * must match what was provided for in a previous pci_map_single call. All | ||
63 | * other usages are undefined. | ||
64 | * | ||
65 | * After this call, reads by the cpu to the buffer are guaranteed to see | ||
66 | * whatever the device wrote there. | ||
67 | */ | ||
68 | extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction); | ||
69 | |||
70 | /* pci_unmap_{single,page} is not a nop, thus... */ | 36 | /* pci_unmap_{single,page} is not a nop, thus... */ |
71 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | 37 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ |
72 | dma_addr_t ADDR_NAME; | 38 | dma_addr_t ADDR_NAME; |
@@ -81,69 +47,6 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t | |||
81 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | 47 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ |
82 | (((PTR)->LEN_NAME) = (VAL)) | 48 | (((PTR)->LEN_NAME) = (VAL)) |
83 | 49 | ||
84 | /* | ||
85 | * Same as above, only with pages instead of mapped addresses. | ||
86 | */ | ||
87 | extern dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, | ||
88 | unsigned long offset, size_t size, int direction); | ||
89 | extern void pci_unmap_page(struct pci_dev *hwdev, | ||
90 | dma_addr_t dma_address, size_t size, int direction); | ||
91 | |||
92 | /* Map a set of buffers described by scatterlist in streaming | ||
93 | * mode for DMA. This is the scather-gather version of the | ||
94 | * above pci_map_single interface. Here the scatter gather list | ||
95 | * elements are each tagged with the appropriate dma address | ||
96 | * and length. They are obtained via sg_dma_{address,length}(SG). | ||
97 | * | ||
98 | * NOTE: An implementation may be able to use a smaller number of | ||
99 | * DMA address/length pairs than there are SG table elements. | ||
100 | * (for example via virtual mapping capabilities) | ||
101 | * The routine returns the number of addr/length pairs actually | ||
102 | * used, at most nents. | ||
103 | * | ||
104 | * Device ownership issues as mentioned above for pci_map_single are | ||
105 | * the same here. | ||
106 | */ | ||
107 | extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction); | ||
108 | |||
109 | /* Unmap a set of streaming mode DMA translations. | ||
110 | * Again, cpu read rules concerning calls here are the same as for | ||
111 | * pci_unmap_single() above. | ||
112 | */ | ||
113 | extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction); | ||
114 | |||
115 | /* Make physical memory consistent for a single | ||
116 | * streaming mode DMA translation after a transfer. | ||
117 | * | ||
118 | * If you perform a pci_map_single() but wish to interrogate the | ||
119 | * buffer using the cpu, yet do not wish to teardown the PCI dma | ||
120 | * mapping, you must call this function before doing so. At the | ||
121 | * next point you give the PCI dma address back to the card, you | ||
122 | * must first perform a pci_dma_sync_for_device, and then the device | ||
123 | * again owns the buffer. | ||
124 | */ | ||
125 | extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction); | ||
126 | extern void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction); | ||
127 | |||
128 | /* Make physical memory consistent for a set of streaming | ||
129 | * mode DMA translations after a transfer. | ||
130 | * | ||
131 | * The same as pci_dma_sync_single_* but for a scatter-gather list, | ||
132 | * same rules and usage. | ||
133 | */ | ||
134 | extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction); | ||
135 | extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction); | ||
136 | |||
137 | /* Return whether the given PCI device DMA address mask can | ||
138 | * be supported properly. For example, if your device can | ||
139 | * only drive the low 24-bits during PCI bus mastering, then | ||
140 | * you would pass 0x00ffffff as the mask to this function. | ||
141 | */ | ||
142 | static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask) | ||
143 | { | ||
144 | return 1; | ||
145 | } | ||
146 | |||
147 | #ifdef CONFIG_PCI | 50 | #ifdef CONFIG_PCI |
148 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, | 51 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, |
149 | enum pci_dma_burst_strategy *strat, | 52 | enum pci_dma_burst_strategy *strat, |
@@ -154,14 +57,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, | |||
154 | } | 57 | } |
155 | #endif | 58 | #endif |
156 | 59 | ||
157 | #define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0) | ||
158 | |||
159 | static inline int pci_dma_mapping_error(struct pci_dev *pdev, | ||
160 | dma_addr_t dma_addr) | ||
161 | { | ||
162 | return (dma_addr == PCI_DMA_ERROR_CODE); | ||
163 | } | ||
164 | |||
165 | struct device_node; | 60 | struct device_node; |
166 | extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); | 61 | extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); |
167 | 62 | ||
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h index 7a1e3566e59c..5cc9f6aa5494 100644 --- a/arch/sparc/include/asm/pci_64.h +++ b/arch/sparc/include/asm/pci_64.h | |||
@@ -35,37 +35,6 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) | |||
35 | */ | 35 | */ |
36 | #define PCI_DMA_BUS_IS_PHYS (0) | 36 | #define PCI_DMA_BUS_IS_PHYS (0) |
37 | 37 | ||
38 | static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, | ||
39 | dma_addr_t *dma_handle) | ||
40 | { | ||
41 | return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC); | ||
42 | } | ||
43 | |||
44 | static inline void pci_free_consistent(struct pci_dev *pdev, size_t size, | ||
45 | void *vaddr, dma_addr_t dma_handle) | ||
46 | { | ||
47 | return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle); | ||
48 | } | ||
49 | |||
50 | static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, | ||
51 | size_t size, int direction) | ||
52 | { | ||
53 | return dma_map_single(&pdev->dev, ptr, size, | ||
54 | (enum dma_data_direction) direction); | ||
55 | } | ||
56 | |||
57 | static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, | ||
58 | size_t size, int direction) | ||
59 | { | ||
60 | dma_unmap_single(&pdev->dev, dma_addr, size, | ||
61 | (enum dma_data_direction) direction); | ||
62 | } | ||
63 | |||
64 | #define pci_map_page(dev, page, off, size, dir) \ | ||
65 | pci_map_single(dev, (page_address(page) + (off)), size, dir) | ||
66 | #define pci_unmap_page(dev,addr,sz,dir) \ | ||
67 | pci_unmap_single(dev,addr,sz,dir) | ||
68 | |||
69 | /* pci_unmap_{single,page} is not a nop, thus... */ | 38 | /* pci_unmap_{single,page} is not a nop, thus... */ |
70 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | 39 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ |
71 | dma_addr_t ADDR_NAME; | 40 | dma_addr_t ADDR_NAME; |
@@ -80,57 +49,6 @@ static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, | |||
80 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | 49 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ |
81 | (((PTR)->LEN_NAME) = (VAL)) | 50 | (((PTR)->LEN_NAME) = (VAL)) |
82 | 51 | ||
83 | static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, | ||
84 | int nents, int direction) | ||
85 | { | ||
86 | return dma_map_sg(&pdev->dev, sg, nents, | ||
87 | (enum dma_data_direction) direction); | ||
88 | } | ||
89 | |||
90 | static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, | ||
91 | int nents, int direction) | ||
92 | { | ||
93 | dma_unmap_sg(&pdev->dev, sg, nents, | ||
94 | (enum dma_data_direction) direction); | ||
95 | } | ||
96 | |||
97 | static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, | ||
98 | dma_addr_t dma_handle, | ||
99 | size_t size, int direction) | ||
100 | { | ||
101 | dma_sync_single_for_cpu(&pdev->dev, dma_handle, size, | ||
102 | (enum dma_data_direction) direction); | ||
103 | } | ||
104 | |||
105 | static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev, | ||
106 | dma_addr_t dma_handle, | ||
107 | size_t size, int direction) | ||
108 | { | ||
109 | /* No flushing needed to sync cpu writes to the device. */ | ||
110 | } | ||
111 | |||
112 | static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, | ||
113 | struct scatterlist *sg, | ||
114 | int nents, int direction) | ||
115 | { | ||
116 | dma_sync_sg_for_cpu(&pdev->dev, sg, nents, | ||
117 | (enum dma_data_direction) direction); | ||
118 | } | ||
119 | |||
120 | static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev, | ||
121 | struct scatterlist *sg, | ||
122 | int nelems, int direction) | ||
123 | { | ||
124 | /* No flushing needed to sync cpu writes to the device. */ | ||
125 | } | ||
126 | |||
127 | /* Return whether the given PCI device DMA address mask can | ||
128 | * be supported properly. For example, if your device can | ||
129 | * only drive the low 24-bits during PCI bus mastering, then | ||
130 | * you would pass 0x00ffffff as the mask to this function. | ||
131 | */ | ||
132 | extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask); | ||
133 | |||
134 | /* PCI IOMMU mapping bypass support. */ | 52 | /* PCI IOMMU mapping bypass support. */ |
135 | 53 | ||
136 | /* PCI 64-bit addressing works for all slots on all controller | 54 | /* PCI 64-bit addressing works for all slots on all controller |
@@ -140,12 +58,6 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask); | |||
140 | #define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) | 58 | #define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) |
141 | #define PCI64_ADDR_BASE 0xfffc000000000000UL | 59 | #define PCI64_ADDR_BASE 0xfffc000000000000UL |
142 | 60 | ||
143 | static inline int pci_dma_mapping_error(struct pci_dev *pdev, | ||
144 | dma_addr_t dma_addr) | ||
145 | { | ||
146 | return dma_mapping_error(&pdev->dev, dma_addr); | ||
147 | } | ||
148 | |||
149 | #ifdef CONFIG_PCI | 61 | #ifdef CONFIG_PCI |
150 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, | 62 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, |
151 | enum pci_dma_burst_strategy *strat, | 63 | enum pci_dma_burst_strategy *strat, |
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 46f91ab66a50..857630cff636 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h | |||
@@ -76,7 +76,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
76 | * | 76 | * |
77 | * Unfortunately this scheme limits us to ~16,000,000 cpus. | 77 | * Unfortunately this scheme limits us to ~16,000,000 cpus. |
78 | */ | 78 | */ |
79 | static inline void __read_lock(raw_rwlock_t *rw) | 79 | static inline void arch_read_lock(raw_rwlock_t *rw) |
80 | { | 80 | { |
81 | register raw_rwlock_t *lp asm("g1"); | 81 | register raw_rwlock_t *lp asm("g1"); |
82 | lp = rw; | 82 | lp = rw; |
@@ -92,11 +92,11 @@ static inline void __read_lock(raw_rwlock_t *rw) | |||
92 | #define __raw_read_lock(lock) \ | 92 | #define __raw_read_lock(lock) \ |
93 | do { unsigned long flags; \ | 93 | do { unsigned long flags; \ |
94 | local_irq_save(flags); \ | 94 | local_irq_save(flags); \ |
95 | __read_lock(lock); \ | 95 | arch_read_lock(lock); \ |
96 | local_irq_restore(flags); \ | 96 | local_irq_restore(flags); \ |
97 | } while(0) | 97 | } while(0) |
98 | 98 | ||
99 | static inline void __read_unlock(raw_rwlock_t *rw) | 99 | static inline void arch_read_unlock(raw_rwlock_t *rw) |
100 | { | 100 | { |
101 | register raw_rwlock_t *lp asm("g1"); | 101 | register raw_rwlock_t *lp asm("g1"); |
102 | lp = rw; | 102 | lp = rw; |
@@ -112,7 +112,7 @@ static inline void __read_unlock(raw_rwlock_t *rw) | |||
112 | #define __raw_read_unlock(lock) \ | 112 | #define __raw_read_unlock(lock) \ |
113 | do { unsigned long flags; \ | 113 | do { unsigned long flags; \ |
114 | local_irq_save(flags); \ | 114 | local_irq_save(flags); \ |
115 | __read_unlock(lock); \ | 115 | arch_read_unlock(lock); \ |
116 | local_irq_restore(flags); \ | 116 | local_irq_restore(flags); \ |
117 | } while(0) | 117 | } while(0) |
118 | 118 | ||
@@ -150,7 +150,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
150 | return (val == 0); | 150 | return (val == 0); |
151 | } | 151 | } |
152 | 152 | ||
153 | static inline int __read_trylock(raw_rwlock_t *rw) | 153 | static inline int arch_read_trylock(raw_rwlock_t *rw) |
154 | { | 154 | { |
155 | register raw_rwlock_t *lp asm("g1"); | 155 | register raw_rwlock_t *lp asm("g1"); |
156 | register int res asm("o0"); | 156 | register int res asm("o0"); |
@@ -169,7 +169,7 @@ static inline int __read_trylock(raw_rwlock_t *rw) | |||
169 | ({ unsigned long flags; \ | 169 | ({ unsigned long flags; \ |
170 | int res; \ | 170 | int res; \ |
171 | local_irq_save(flags); \ | 171 | local_irq_save(flags); \ |
172 | res = __read_trylock(lock); \ | 172 | res = arch_read_trylock(lock); \ |
173 | local_irq_restore(flags); \ | 173 | local_irq_restore(flags); \ |
174 | res; \ | 174 | res; \ |
175 | }) | 175 | }) |
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index f6b2b92ad8d2..43e514783582 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h | |||
@@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla | |||
92 | 92 | ||
93 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ | 93 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ |
94 | 94 | ||
95 | static void inline __read_lock(raw_rwlock_t *lock) | 95 | static void inline arch_read_lock(raw_rwlock_t *lock) |
96 | { | 96 | { |
97 | unsigned long tmp1, tmp2; | 97 | unsigned long tmp1, tmp2; |
98 | 98 | ||
@@ -115,7 +115,7 @@ static void inline __read_lock(raw_rwlock_t *lock) | |||
115 | : "memory"); | 115 | : "memory"); |
116 | } | 116 | } |
117 | 117 | ||
118 | static int inline __read_trylock(raw_rwlock_t *lock) | 118 | static int inline arch_read_trylock(raw_rwlock_t *lock) |
119 | { | 119 | { |
120 | int tmp1, tmp2; | 120 | int tmp1, tmp2; |
121 | 121 | ||
@@ -136,7 +136,7 @@ static int inline __read_trylock(raw_rwlock_t *lock) | |||
136 | return tmp1; | 136 | return tmp1; |
137 | } | 137 | } |
138 | 138 | ||
139 | static void inline __read_unlock(raw_rwlock_t *lock) | 139 | static void inline arch_read_unlock(raw_rwlock_t *lock) |
140 | { | 140 | { |
141 | unsigned long tmp1, tmp2; | 141 | unsigned long tmp1, tmp2; |
142 | 142 | ||
@@ -152,7 +152,7 @@ static void inline __read_unlock(raw_rwlock_t *lock) | |||
152 | : "memory"); | 152 | : "memory"); |
153 | } | 153 | } |
154 | 154 | ||
155 | static void inline __write_lock(raw_rwlock_t *lock) | 155 | static void inline arch_write_lock(raw_rwlock_t *lock) |
156 | { | 156 | { |
157 | unsigned long mask, tmp1, tmp2; | 157 | unsigned long mask, tmp1, tmp2; |
158 | 158 | ||
@@ -177,7 +177,7 @@ static void inline __write_lock(raw_rwlock_t *lock) | |||
177 | : "memory"); | 177 | : "memory"); |
178 | } | 178 | } |
179 | 179 | ||
180 | static void inline __write_unlock(raw_rwlock_t *lock) | 180 | static void inline arch_write_unlock(raw_rwlock_t *lock) |
181 | { | 181 | { |
182 | __asm__ __volatile__( | 182 | __asm__ __volatile__( |
183 | " stw %%g0, [%0]" | 183 | " stw %%g0, [%0]" |
@@ -186,7 +186,7 @@ static void inline __write_unlock(raw_rwlock_t *lock) | |||
186 | : "memory"); | 186 | : "memory"); |
187 | } | 187 | } |
188 | 188 | ||
189 | static int inline __write_trylock(raw_rwlock_t *lock) | 189 | static int inline arch_write_trylock(raw_rwlock_t *lock) |
190 | { | 190 | { |
191 | unsigned long mask, tmp1, tmp2, result; | 191 | unsigned long mask, tmp1, tmp2, result; |
192 | 192 | ||
@@ -210,14 +210,14 @@ static int inline __write_trylock(raw_rwlock_t *lock) | |||
210 | return result; | 210 | return result; |
211 | } | 211 | } |
212 | 212 | ||
213 | #define __raw_read_lock(p) __read_lock(p) | 213 | #define __raw_read_lock(p) arch_read_lock(p) |
214 | #define __raw_read_lock_flags(p, f) __read_lock(p) | 214 | #define __raw_read_lock_flags(p, f) arch_read_lock(p) |
215 | #define __raw_read_trylock(p) __read_trylock(p) | 215 | #define __raw_read_trylock(p) arch_read_trylock(p) |
216 | #define __raw_read_unlock(p) __read_unlock(p) | 216 | #define __raw_read_unlock(p) arch_read_unlock(p) |
217 | #define __raw_write_lock(p) __write_lock(p) | 217 | #define __raw_write_lock(p) arch_write_lock(p) |
218 | #define __raw_write_lock_flags(p, f) __write_lock(p) | 218 | #define __raw_write_lock_flags(p, f) arch_write_lock(p) |
219 | #define __raw_write_unlock(p) __write_unlock(p) | 219 | #define __raw_write_unlock(p) arch_write_unlock(p) |
220 | #define __raw_write_trylock(p) __write_trylock(p) | 220 | #define __raw_write_trylock(p) arch_write_trylock(p) |
221 | 221 | ||
222 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) | 222 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) |
223 | #define __raw_write_can_lock(rw) (!(rw)->lock) | 223 | #define __raw_write_can_lock(rw) (!(rw)->lock) |
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index f96dc5761f74..247cc620cee5 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile | |||
@@ -63,7 +63,7 @@ obj-$(CONFIG_SPARC64_SMP) += cpumap.o | |||
63 | obj-$(CONFIG_SPARC32) += devres.o | 63 | obj-$(CONFIG_SPARC32) += devres.o |
64 | devres-y := ../../../kernel/irq/devres.o | 64 | devres-y := ../../../kernel/irq/devres.o |
65 | 65 | ||
66 | obj-$(CONFIG_SPARC32) += dma.o | 66 | obj-y += dma.o |
67 | 67 | ||
68 | obj-$(CONFIG_SPARC32_PCI) += pcic.o | 68 | obj-$(CONFIG_SPARC32_PCI) += pcic.o |
69 | 69 | ||
diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c index 524c32f97c55..e1ba8ee21b9a 100644 --- a/arch/sparc/kernel/dma.c +++ b/arch/sparc/kernel/dma.c | |||
@@ -1,178 +1,13 @@ | |||
1 | /* dma.c: PCI and SBUS DMA accessors for 32-bit sparc. | ||
2 | * | ||
3 | * Copyright (C) 2008 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | 1 | #include <linux/kernel.h> |
7 | #include <linux/module.h> | 2 | #include <linux/module.h> |
8 | #include <linux/dma-mapping.h> | 3 | #include <linux/dma-mapping.h> |
9 | #include <linux/scatterlist.h> | 4 | #include <linux/dma-debug.h> |
10 | #include <linux/mm.h> | ||
11 | |||
12 | #ifdef CONFIG_PCI | ||
13 | #include <linux/pci.h> | ||
14 | #endif | ||
15 | 5 | ||
16 | #include "dma.h" | 6 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 15) |
17 | 7 | ||
18 | int dma_supported(struct device *dev, u64 mask) | 8 | static int __init dma_init(void) |
19 | { | 9 | { |
20 | #ifdef CONFIG_PCI | 10 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
21 | if (dev->bus == &pci_bus_type) | ||
22 | return pci_dma_supported(to_pci_dev(dev), mask); | ||
23 | #endif | ||
24 | return 0; | 11 | return 0; |
25 | } | 12 | } |
26 | EXPORT_SYMBOL(dma_supported); | 13 | fs_initcall(dma_init); |
27 | |||
28 | int dma_set_mask(struct device *dev, u64 dma_mask) | ||
29 | { | ||
30 | #ifdef CONFIG_PCI | ||
31 | if (dev->bus == &pci_bus_type) | ||
32 | return pci_set_dma_mask(to_pci_dev(dev), dma_mask); | ||
33 | #endif | ||
34 | return -EOPNOTSUPP; | ||
35 | } | ||
36 | EXPORT_SYMBOL(dma_set_mask); | ||
37 | |||
38 | static void *dma32_alloc_coherent(struct device *dev, size_t size, | ||
39 | dma_addr_t *dma_handle, gfp_t flag) | ||
40 | { | ||
41 | #ifdef CONFIG_PCI | ||
42 | if (dev->bus == &pci_bus_type) | ||
43 | return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle); | ||
44 | #endif | ||
45 | return sbus_alloc_consistent(dev, size, dma_handle); | ||
46 | } | ||
47 | |||
48 | static void dma32_free_coherent(struct device *dev, size_t size, | ||
49 | void *cpu_addr, dma_addr_t dma_handle) | ||
50 | { | ||
51 | #ifdef CONFIG_PCI | ||
52 | if (dev->bus == &pci_bus_type) { | ||
53 | pci_free_consistent(to_pci_dev(dev), size, | ||
54 | cpu_addr, dma_handle); | ||
55 | return; | ||
56 | } | ||
57 | #endif | ||
58 | sbus_free_consistent(dev, size, cpu_addr, dma_handle); | ||
59 | } | ||
60 | |||
61 | static dma_addr_t dma32_map_page(struct device *dev, struct page *page, | ||
62 | unsigned long offset, size_t size, | ||
63 | enum dma_data_direction direction) | ||
64 | { | ||
65 | #ifdef CONFIG_PCI | ||
66 | if (dev->bus == &pci_bus_type) | ||
67 | return pci_map_page(to_pci_dev(dev), page, offset, | ||
68 | size, (int)direction); | ||
69 | #endif | ||
70 | return sbus_map_single(dev, page_address(page) + offset, | ||
71 | size, (int)direction); | ||
72 | } | ||
73 | |||
74 | static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
75 | size_t size, enum dma_data_direction direction) | ||
76 | { | ||
77 | #ifdef CONFIG_PCI | ||
78 | if (dev->bus == &pci_bus_type) { | ||
79 | pci_unmap_page(to_pci_dev(dev), dma_address, | ||
80 | size, (int)direction); | ||
81 | return; | ||
82 | } | ||
83 | #endif | ||
84 | sbus_unmap_single(dev, dma_address, size, (int)direction); | ||
85 | } | ||
86 | |||
87 | static int dma32_map_sg(struct device *dev, struct scatterlist *sg, | ||
88 | int nents, enum dma_data_direction direction) | ||
89 | { | ||
90 | #ifdef CONFIG_PCI | ||
91 | if (dev->bus == &pci_bus_type) | ||
92 | return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction); | ||
93 | #endif | ||
94 | return sbus_map_sg(dev, sg, nents, direction); | ||
95 | } | ||
96 | |||
97 | void dma32_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
98 | int nents, enum dma_data_direction direction) | ||
99 | { | ||
100 | #ifdef CONFIG_PCI | ||
101 | if (dev->bus == &pci_bus_type) { | ||
102 | pci_unmap_sg(to_pci_dev(dev), sg, nents, (int)direction); | ||
103 | return; | ||
104 | } | ||
105 | #endif | ||
106 | sbus_unmap_sg(dev, sg, nents, (int)direction); | ||
107 | } | ||
108 | |||
109 | static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
110 | size_t size, | ||
111 | enum dma_data_direction direction) | ||
112 | { | ||
113 | #ifdef CONFIG_PCI | ||
114 | if (dev->bus == &pci_bus_type) { | ||
115 | pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle, | ||
116 | size, (int)direction); | ||
117 | return; | ||
118 | } | ||
119 | #endif | ||
120 | sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction); | ||
121 | } | ||
122 | |||
123 | static void dma32_sync_single_for_device(struct device *dev, | ||
124 | dma_addr_t dma_handle, size_t size, | ||
125 | enum dma_data_direction direction) | ||
126 | { | ||
127 | #ifdef CONFIG_PCI | ||
128 | if (dev->bus == &pci_bus_type) { | ||
129 | pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle, | ||
130 | size, (int)direction); | ||
131 | return; | ||
132 | } | ||
133 | #endif | ||
134 | sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction); | ||
135 | } | ||
136 | |||
137 | static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
138 | int nelems, enum dma_data_direction direction) | ||
139 | { | ||
140 | #ifdef CONFIG_PCI | ||
141 | if (dev->bus == &pci_bus_type) { | ||
142 | pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, | ||
143 | nelems, (int)direction); | ||
144 | return; | ||
145 | } | ||
146 | #endif | ||
147 | BUG(); | ||
148 | } | ||
149 | |||
150 | static void dma32_sync_sg_for_device(struct device *dev, | ||
151 | struct scatterlist *sg, int nelems, | ||
152 | enum dma_data_direction direction) | ||
153 | { | ||
154 | #ifdef CONFIG_PCI | ||
155 | if (dev->bus == &pci_bus_type) { | ||
156 | pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, | ||
157 | nelems, (int)direction); | ||
158 | return; | ||
159 | } | ||
160 | #endif | ||
161 | BUG(); | ||
162 | } | ||
163 | |||
164 | static const struct dma_ops dma32_dma_ops = { | ||
165 | .alloc_coherent = dma32_alloc_coherent, | ||
166 | .free_coherent = dma32_free_coherent, | ||
167 | .map_page = dma32_map_page, | ||
168 | .unmap_page = dma32_unmap_page, | ||
169 | .map_sg = dma32_map_sg, | ||
170 | .unmap_sg = dma32_unmap_sg, | ||
171 | .sync_single_for_cpu = dma32_sync_single_for_cpu, | ||
172 | .sync_single_for_device = dma32_sync_single_for_device, | ||
173 | .sync_sg_for_cpu = dma32_sync_sg_for_cpu, | ||
174 | .sync_sg_for_device = dma32_sync_sg_for_device, | ||
175 | }; | ||
176 | |||
177 | const struct dma_ops *dma_ops = &dma32_dma_ops; | ||
178 | EXPORT_SYMBOL(dma_ops); | ||
diff --git a/arch/sparc/kernel/dma.h b/arch/sparc/kernel/dma.h deleted file mode 100644 index f8d8951adb53..000000000000 --- a/arch/sparc/kernel/dma.h +++ /dev/null | |||
@@ -1,14 +0,0 @@ | |||
1 | void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp); | ||
2 | void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba); | ||
3 | dma_addr_t sbus_map_single(struct device *dev, void *va, | ||
4 | size_t len, int direction); | ||
5 | void sbus_unmap_single(struct device *dev, dma_addr_t ba, | ||
6 | size_t n, int direction); | ||
7 | int sbus_map_sg(struct device *dev, struct scatterlist *sg, | ||
8 | int n, int direction); | ||
9 | void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
10 | int n, int direction); | ||
11 | void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, | ||
12 | size_t size, int direction); | ||
13 | void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, | ||
14 | size_t size, int direction); | ||
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 0aeaefe696b9..7690cc219ecc 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c | |||
@@ -353,7 +353,8 @@ static void dma_4u_free_coherent(struct device *dev, size_t size, | |||
353 | 353 | ||
354 | static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, | 354 | static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, |
355 | unsigned long offset, size_t sz, | 355 | unsigned long offset, size_t sz, |
356 | enum dma_data_direction direction) | 356 | enum dma_data_direction direction, |
357 | struct dma_attrs *attrs) | ||
357 | { | 358 | { |
358 | struct iommu *iommu; | 359 | struct iommu *iommu; |
359 | struct strbuf *strbuf; | 360 | struct strbuf *strbuf; |
@@ -474,7 +475,8 @@ do_flush_sync: | |||
474 | } | 475 | } |
475 | 476 | ||
476 | static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, | 477 | static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, |
477 | size_t sz, enum dma_data_direction direction) | 478 | size_t sz, enum dma_data_direction direction, |
479 | struct dma_attrs *attrs) | ||
478 | { | 480 | { |
479 | struct iommu *iommu; | 481 | struct iommu *iommu; |
480 | struct strbuf *strbuf; | 482 | struct strbuf *strbuf; |
@@ -520,7 +522,8 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, | |||
520 | } | 522 | } |
521 | 523 | ||
522 | static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, | 524 | static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, |
523 | int nelems, enum dma_data_direction direction) | 525 | int nelems, enum dma_data_direction direction, |
526 | struct dma_attrs *attrs) | ||
524 | { | 527 | { |
525 | struct scatterlist *s, *outs, *segstart; | 528 | struct scatterlist *s, *outs, *segstart; |
526 | unsigned long flags, handle, prot, ctx; | 529 | unsigned long flags, handle, prot, ctx; |
@@ -691,7 +694,8 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) | |||
691 | } | 694 | } |
692 | 695 | ||
693 | static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, | 696 | static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, |
694 | int nelems, enum dma_data_direction direction) | 697 | int nelems, enum dma_data_direction direction, |
698 | struct dma_attrs *attrs) | ||
695 | { | 699 | { |
696 | unsigned long flags, ctx; | 700 | unsigned long flags, ctx; |
697 | struct scatterlist *sg; | 701 | struct scatterlist *sg; |
@@ -822,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, | |||
822 | spin_unlock_irqrestore(&iommu->lock, flags); | 826 | spin_unlock_irqrestore(&iommu->lock, flags); |
823 | } | 827 | } |
824 | 828 | ||
825 | static const struct dma_ops sun4u_dma_ops = { | 829 | static struct dma_map_ops sun4u_dma_ops = { |
826 | .alloc_coherent = dma_4u_alloc_coherent, | 830 | .alloc_coherent = dma_4u_alloc_coherent, |
827 | .free_coherent = dma_4u_free_coherent, | 831 | .free_coherent = dma_4u_free_coherent, |
828 | .map_page = dma_4u_map_page, | 832 | .map_page = dma_4u_map_page, |
@@ -833,9 +837,11 @@ static const struct dma_ops sun4u_dma_ops = { | |||
833 | .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, | 837 | .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, |
834 | }; | 838 | }; |
835 | 839 | ||
836 | const struct dma_ops *dma_ops = &sun4u_dma_ops; | 840 | struct dma_map_ops *dma_ops = &sun4u_dma_ops; |
837 | EXPORT_SYMBOL(dma_ops); | 841 | EXPORT_SYMBOL(dma_ops); |
838 | 842 | ||
843 | extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask); | ||
844 | |||
839 | int dma_supported(struct device *dev, u64 device_mask) | 845 | int dma_supported(struct device *dev, u64 device_mask) |
840 | { | 846 | { |
841 | struct iommu *iommu = dev->archdata.iommu; | 847 | struct iommu *iommu = dev->archdata.iommu; |
@@ -849,7 +855,7 @@ int dma_supported(struct device *dev, u64 device_mask) | |||
849 | 855 | ||
850 | #ifdef CONFIG_PCI | 856 | #ifdef CONFIG_PCI |
851 | if (dev->bus == &pci_bus_type) | 857 | if (dev->bus == &pci_bus_type) |
852 | return pci_dma_supported(to_pci_dev(dev), device_mask); | 858 | return pci64_dma_supported(to_pci_dev(dev), device_mask); |
853 | #endif | 859 | #endif |
854 | 860 | ||
855 | return 0; | 861 | return 0; |
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index e71ce79d8c15..9f61fd8cbb7b 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c | |||
@@ -49,8 +49,6 @@ | |||
49 | #include <asm/iommu.h> | 49 | #include <asm/iommu.h> |
50 | #include <asm/io-unit.h> | 50 | #include <asm/io-unit.h> |
51 | 51 | ||
52 | #include "dma.h" | ||
53 | |||
54 | #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ | 52 | #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ |
55 | 53 | ||
56 | static struct resource *_sparc_find_resource(struct resource *r, | 54 | static struct resource *_sparc_find_resource(struct resource *r, |
@@ -247,7 +245,8 @@ EXPORT_SYMBOL(sbus_set_sbus64); | |||
247 | * Typically devices use them for control blocks. | 245 | * Typically devices use them for control blocks. |
248 | * CPU may access them without any explicit flushing. | 246 | * CPU may access them without any explicit flushing. |
249 | */ | 247 | */ |
250 | void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp) | 248 | static void *sbus_alloc_coherent(struct device *dev, size_t len, |
249 | dma_addr_t *dma_addrp, gfp_t gfp) | ||
251 | { | 250 | { |
252 | struct of_device *op = to_of_device(dev); | 251 | struct of_device *op = to_of_device(dev); |
253 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; | 252 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; |
@@ -300,7 +299,8 @@ err_nopages: | |||
300 | return NULL; | 299 | return NULL; |
301 | } | 300 | } |
302 | 301 | ||
303 | void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) | 302 | static void sbus_free_coherent(struct device *dev, size_t n, void *p, |
303 | dma_addr_t ba) | ||
304 | { | 304 | { |
305 | struct resource *res; | 305 | struct resource *res; |
306 | struct page *pgv; | 306 | struct page *pgv; |
@@ -318,7 +318,7 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) | |||
318 | 318 | ||
319 | n = (n + PAGE_SIZE-1) & PAGE_MASK; | 319 | n = (n + PAGE_SIZE-1) & PAGE_MASK; |
320 | if ((res->end-res->start)+1 != n) { | 320 | if ((res->end-res->start)+1 != n) { |
321 | printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n", | 321 | printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n", |
322 | (long)((res->end-res->start)+1), n); | 322 | (long)((res->end-res->start)+1), n); |
323 | return; | 323 | return; |
324 | } | 324 | } |
@@ -338,8 +338,13 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) | |||
338 | * CPU view of this memory may be inconsistent with | 338 | * CPU view of this memory may be inconsistent with |
339 | * a device view and explicit flushing is necessary. | 339 | * a device view and explicit flushing is necessary. |
340 | */ | 340 | */ |
341 | dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction) | 341 | static dma_addr_t sbus_map_page(struct device *dev, struct page *page, |
342 | unsigned long offset, size_t len, | ||
343 | enum dma_data_direction dir, | ||
344 | struct dma_attrs *attrs) | ||
342 | { | 345 | { |
346 | void *va = page_address(page) + offset; | ||
347 | |||
343 | /* XXX why are some lengths signed, others unsigned? */ | 348 | /* XXX why are some lengths signed, others unsigned? */ |
344 | if (len <= 0) { | 349 | if (len <= 0) { |
345 | return 0; | 350 | return 0; |
@@ -351,12 +356,14 @@ dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int directi | |||
351 | return mmu_get_scsi_one(dev, va, len); | 356 | return mmu_get_scsi_one(dev, va, len); |
352 | } | 357 | } |
353 | 358 | ||
354 | void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction) | 359 | static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n, |
360 | enum dma_data_direction dir, struct dma_attrs *attrs) | ||
355 | { | 361 | { |
356 | mmu_release_scsi_one(dev, ba, n); | 362 | mmu_release_scsi_one(dev, ba, n); |
357 | } | 363 | } |
358 | 364 | ||
359 | int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction) | 365 | static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, |
366 | enum dma_data_direction dir, struct dma_attrs *attrs) | ||
360 | { | 367 | { |
361 | mmu_get_scsi_sgl(dev, sg, n); | 368 | mmu_get_scsi_sgl(dev, sg, n); |
362 | 369 | ||
@@ -367,19 +374,38 @@ int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction | |||
367 | return n; | 374 | return n; |
368 | } | 375 | } |
369 | 376 | ||
370 | void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction) | 377 | static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, |
378 | enum dma_data_direction dir, struct dma_attrs *attrs) | ||
371 | { | 379 | { |
372 | mmu_release_scsi_sgl(dev, sg, n); | 380 | mmu_release_scsi_sgl(dev, sg, n); |
373 | } | 381 | } |
374 | 382 | ||
375 | void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction) | 383 | static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
384 | int n, enum dma_data_direction dir) | ||
376 | { | 385 | { |
386 | BUG(); | ||
377 | } | 387 | } |
378 | 388 | ||
379 | void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction) | 389 | static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
390 | int n, enum dma_data_direction dir) | ||
380 | { | 391 | { |
392 | BUG(); | ||
381 | } | 393 | } |
382 | 394 | ||
395 | struct dma_map_ops sbus_dma_ops = { | ||
396 | .alloc_coherent = sbus_alloc_coherent, | ||
397 | .free_coherent = sbus_free_coherent, | ||
398 | .map_page = sbus_map_page, | ||
399 | .unmap_page = sbus_unmap_page, | ||
400 | .map_sg = sbus_map_sg, | ||
401 | .unmap_sg = sbus_unmap_sg, | ||
402 | .sync_sg_for_cpu = sbus_sync_sg_for_cpu, | ||
403 | .sync_sg_for_device = sbus_sync_sg_for_device, | ||
404 | }; | ||
405 | |||
406 | struct dma_map_ops *dma_ops = &sbus_dma_ops; | ||
407 | EXPORT_SYMBOL(dma_ops); | ||
408 | |||
383 | static int __init sparc_register_ioport(void) | 409 | static int __init sparc_register_ioport(void) |
384 | { | 410 | { |
385 | register_proc_sparc_ioport(); | 411 | register_proc_sparc_ioport(); |
@@ -396,7 +422,8 @@ arch_initcall(sparc_register_ioport); | |||
396 | /* Allocate and map kernel buffer using consistent mode DMA for a device. | 422 | /* Allocate and map kernel buffer using consistent mode DMA for a device. |
397 | * hwdev should be valid struct pci_dev pointer for PCI devices. | 423 | * hwdev should be valid struct pci_dev pointer for PCI devices. |
398 | */ | 424 | */ |
399 | void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) | 425 | static void *pci32_alloc_coherent(struct device *dev, size_t len, |
426 | dma_addr_t *pba, gfp_t gfp) | ||
400 | { | 427 | { |
401 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; | 428 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; |
402 | unsigned long va; | 429 | unsigned long va; |
@@ -440,7 +467,6 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) | |||
440 | *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ | 467 | *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ |
441 | return (void *) res->start; | 468 | return (void *) res->start; |
442 | } | 469 | } |
443 | EXPORT_SYMBOL(pci_alloc_consistent); | ||
444 | 470 | ||
445 | /* Free and unmap a consistent DMA buffer. | 471 | /* Free and unmap a consistent DMA buffer. |
446 | * cpu_addr is what was returned from pci_alloc_consistent, | 472 | * cpu_addr is what was returned from pci_alloc_consistent, |
@@ -450,7 +476,8 @@ EXPORT_SYMBOL(pci_alloc_consistent); | |||
450 | * References to the memory and mappings associated with cpu_addr/dma_addr | 476 | * References to the memory and mappings associated with cpu_addr/dma_addr |
451 | * past this call are illegal. | 477 | * past this call are illegal. |
452 | */ | 478 | */ |
453 | void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) | 479 | static void pci32_free_coherent(struct device *dev, size_t n, void *p, |
480 | dma_addr_t ba) | ||
454 | { | 481 | { |
455 | struct resource *res; | 482 | struct resource *res; |
456 | unsigned long pgp; | 483 | unsigned long pgp; |
@@ -482,60 +509,18 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) | |||
482 | 509 | ||
483 | free_pages(pgp, get_order(n)); | 510 | free_pages(pgp, get_order(n)); |
484 | } | 511 | } |
485 | EXPORT_SYMBOL(pci_free_consistent); | ||
486 | |||
487 | /* Map a single buffer of the indicated size for DMA in streaming mode. | ||
488 | * The 32-bit bus address to use is returned. | ||
489 | * | ||
490 | * Once the device is given the dma address, the device owns this memory | ||
491 | * until either pci_unmap_single or pci_dma_sync_single_* is performed. | ||
492 | */ | ||
493 | dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, | ||
494 | int direction) | ||
495 | { | ||
496 | BUG_ON(direction == PCI_DMA_NONE); | ||
497 | /* IIep is write-through, not flushing. */ | ||
498 | return virt_to_phys(ptr); | ||
499 | } | ||
500 | EXPORT_SYMBOL(pci_map_single); | ||
501 | |||
502 | /* Unmap a single streaming mode DMA translation. The dma_addr and size | ||
503 | * must match what was provided for in a previous pci_map_single call. All | ||
504 | * other usages are undefined. | ||
505 | * | ||
506 | * After this call, reads by the cpu to the buffer are guaranteed to see | ||
507 | * whatever the device wrote there. | ||
508 | */ | ||
509 | void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, | ||
510 | int direction) | ||
511 | { | ||
512 | BUG_ON(direction == PCI_DMA_NONE); | ||
513 | if (direction != PCI_DMA_TODEVICE) { | ||
514 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | ||
515 | (size + PAGE_SIZE-1) & PAGE_MASK); | ||
516 | } | ||
517 | } | ||
518 | EXPORT_SYMBOL(pci_unmap_single); | ||
519 | 512 | ||
520 | /* | 513 | /* |
521 | * Same as pci_map_single, but with pages. | 514 | * Same as pci_map_single, but with pages. |
522 | */ | 515 | */ |
523 | dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, | 516 | static dma_addr_t pci32_map_page(struct device *dev, struct page *page, |
524 | unsigned long offset, size_t size, int direction) | 517 | unsigned long offset, size_t size, |
518 | enum dma_data_direction dir, | ||
519 | struct dma_attrs *attrs) | ||
525 | { | 520 | { |
526 | BUG_ON(direction == PCI_DMA_NONE); | ||
527 | /* IIep is write-through, not flushing. */ | 521 | /* IIep is write-through, not flushing. */ |
528 | return page_to_phys(page) + offset; | 522 | return page_to_phys(page) + offset; |
529 | } | 523 | } |
530 | EXPORT_SYMBOL(pci_map_page); | ||
531 | |||
532 | void pci_unmap_page(struct pci_dev *hwdev, | ||
533 | dma_addr_t dma_address, size_t size, int direction) | ||
534 | { | ||
535 | BUG_ON(direction == PCI_DMA_NONE); | ||
536 | /* mmu_inval_dma_area XXX */ | ||
537 | } | ||
538 | EXPORT_SYMBOL(pci_unmap_page); | ||
539 | 524 | ||
540 | /* Map a set of buffers described by scatterlist in streaming | 525 | /* Map a set of buffers described by scatterlist in streaming |
541 | * mode for DMA. This is the scather-gather version of the | 526 | * mode for DMA. This is the scather-gather version of the |
@@ -552,13 +537,13 @@ EXPORT_SYMBOL(pci_unmap_page); | |||
552 | * Device ownership issues as mentioned above for pci_map_single are | 537 | * Device ownership issues as mentioned above for pci_map_single are |
553 | * the same here. | 538 | * the same here. |
554 | */ | 539 | */ |
555 | int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, | 540 | static int pci32_map_sg(struct device *device, struct scatterlist *sgl, |
556 | int direction) | 541 | int nents, enum dma_data_direction dir, |
542 | struct dma_attrs *attrs) | ||
557 | { | 543 | { |
558 | struct scatterlist *sg; | 544 | struct scatterlist *sg; |
559 | int n; | 545 | int n; |
560 | 546 | ||
561 | BUG_ON(direction == PCI_DMA_NONE); | ||
562 | /* IIep is write-through, not flushing. */ | 547 | /* IIep is write-through, not flushing. */ |
563 | for_each_sg(sgl, sg, nents, n) { | 548 | for_each_sg(sgl, sg, nents, n) { |
564 | BUG_ON(page_address(sg_page(sg)) == NULL); | 549 | BUG_ON(page_address(sg_page(sg)) == NULL); |
@@ -567,20 +552,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, | |||
567 | } | 552 | } |
568 | return nents; | 553 | return nents; |
569 | } | 554 | } |
570 | EXPORT_SYMBOL(pci_map_sg); | ||
571 | 555 | ||
572 | /* Unmap a set of streaming mode DMA translations. | 556 | /* Unmap a set of streaming mode DMA translations. |
573 | * Again, cpu read rules concerning calls here are the same as for | 557 | * Again, cpu read rules concerning calls here are the same as for |
574 | * pci_unmap_single() above. | 558 | * pci_unmap_single() above. |
575 | */ | 559 | */ |
576 | void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, | 560 | static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, |
577 | int direction) | 561 | int nents, enum dma_data_direction dir, |
562 | struct dma_attrs *attrs) | ||
578 | { | 563 | { |
579 | struct scatterlist *sg; | 564 | struct scatterlist *sg; |
580 | int n; | 565 | int n; |
581 | 566 | ||
582 | BUG_ON(direction == PCI_DMA_NONE); | 567 | if (dir != PCI_DMA_TODEVICE) { |
583 | if (direction != PCI_DMA_TODEVICE) { | ||
584 | for_each_sg(sgl, sg, nents, n) { | 568 | for_each_sg(sgl, sg, nents, n) { |
585 | BUG_ON(page_address(sg_page(sg)) == NULL); | 569 | BUG_ON(page_address(sg_page(sg)) == NULL); |
586 | mmu_inval_dma_area( | 570 | mmu_inval_dma_area( |
@@ -589,7 +573,6 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, | |||
589 | } | 573 | } |
590 | } | 574 | } |
591 | } | 575 | } |
592 | EXPORT_SYMBOL(pci_unmap_sg); | ||
593 | 576 | ||
594 | /* Make physical memory consistent for a single | 577 | /* Make physical memory consistent for a single |
595 | * streaming mode DMA translation before or after a transfer. | 578 | * streaming mode DMA translation before or after a transfer. |
@@ -601,25 +584,23 @@ EXPORT_SYMBOL(pci_unmap_sg); | |||
601 | * must first perform a pci_dma_sync_for_device, and then the | 584 | * must first perform a pci_dma_sync_for_device, and then the |
602 | * device again owns the buffer. | 585 | * device again owns the buffer. |
603 | */ | 586 | */ |
604 | void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) | 587 | static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba, |
588 | size_t size, enum dma_data_direction dir) | ||
605 | { | 589 | { |
606 | BUG_ON(direction == PCI_DMA_NONE); | 590 | if (dir != PCI_DMA_TODEVICE) { |
607 | if (direction != PCI_DMA_TODEVICE) { | ||
608 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 591 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), |
609 | (size + PAGE_SIZE-1) & PAGE_MASK); | 592 | (size + PAGE_SIZE-1) & PAGE_MASK); |
610 | } | 593 | } |
611 | } | 594 | } |
612 | EXPORT_SYMBOL(pci_dma_sync_single_for_cpu); | ||
613 | 595 | ||
614 | void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) | 596 | static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba, |
597 | size_t size, enum dma_data_direction dir) | ||
615 | { | 598 | { |
616 | BUG_ON(direction == PCI_DMA_NONE); | 599 | if (dir != PCI_DMA_TODEVICE) { |
617 | if (direction != PCI_DMA_TODEVICE) { | ||
618 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 600 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), |
619 | (size + PAGE_SIZE-1) & PAGE_MASK); | 601 | (size + PAGE_SIZE-1) & PAGE_MASK); |
620 | } | 602 | } |
621 | } | 603 | } |
622 | EXPORT_SYMBOL(pci_dma_sync_single_for_device); | ||
623 | 604 | ||
624 | /* Make physical memory consistent for a set of streaming | 605 | /* Make physical memory consistent for a set of streaming |
625 | * mode DMA translations after a transfer. | 606 | * mode DMA translations after a transfer. |
@@ -627,13 +608,13 @@ EXPORT_SYMBOL(pci_dma_sync_single_for_device); | |||
627 | * The same as pci_dma_sync_single_* but for a scatter-gather list, | 608 | * The same as pci_dma_sync_single_* but for a scatter-gather list, |
628 | * same rules and usage. | 609 | * same rules and usage. |
629 | */ | 610 | */ |
630 | void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) | 611 | static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, |
612 | int nents, enum dma_data_direction dir) | ||
631 | { | 613 | { |
632 | struct scatterlist *sg; | 614 | struct scatterlist *sg; |
633 | int n; | 615 | int n; |
634 | 616 | ||
635 | BUG_ON(direction == PCI_DMA_NONE); | 617 | if (dir != PCI_DMA_TODEVICE) { |
636 | if (direction != PCI_DMA_TODEVICE) { | ||
637 | for_each_sg(sgl, sg, nents, n) { | 618 | for_each_sg(sgl, sg, nents, n) { |
638 | BUG_ON(page_address(sg_page(sg)) == NULL); | 619 | BUG_ON(page_address(sg_page(sg)) == NULL); |
639 | mmu_inval_dma_area( | 620 | mmu_inval_dma_area( |
@@ -642,15 +623,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int | |||
642 | } | 623 | } |
643 | } | 624 | } |
644 | } | 625 | } |
645 | EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu); | ||
646 | 626 | ||
647 | void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) | 627 | static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl, |
628 | int nents, enum dma_data_direction dir) | ||
648 | { | 629 | { |
649 | struct scatterlist *sg; | 630 | struct scatterlist *sg; |
650 | int n; | 631 | int n; |
651 | 632 | ||
652 | BUG_ON(direction == PCI_DMA_NONE); | 633 | if (dir != PCI_DMA_TODEVICE) { |
653 | if (direction != PCI_DMA_TODEVICE) { | ||
654 | for_each_sg(sgl, sg, nents, n) { | 634 | for_each_sg(sgl, sg, nents, n) { |
655 | BUG_ON(page_address(sg_page(sg)) == NULL); | 635 | BUG_ON(page_address(sg_page(sg)) == NULL); |
656 | mmu_inval_dma_area( | 636 | mmu_inval_dma_area( |
@@ -659,9 +639,49 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, | |||
659 | } | 639 | } |
660 | } | 640 | } |
661 | } | 641 | } |
662 | EXPORT_SYMBOL(pci_dma_sync_sg_for_device); | 642 | |
643 | struct dma_map_ops pci32_dma_ops = { | ||
644 | .alloc_coherent = pci32_alloc_coherent, | ||
645 | .free_coherent = pci32_free_coherent, | ||
646 | .map_page = pci32_map_page, | ||
647 | .map_sg = pci32_map_sg, | ||
648 | .unmap_sg = pci32_unmap_sg, | ||
649 | .sync_single_for_cpu = pci32_sync_single_for_cpu, | ||
650 | .sync_single_for_device = pci32_sync_single_for_device, | ||
651 | .sync_sg_for_cpu = pci32_sync_sg_for_cpu, | ||
652 | .sync_sg_for_device = pci32_sync_sg_for_device, | ||
653 | }; | ||
654 | EXPORT_SYMBOL(pci32_dma_ops); | ||
655 | |||
663 | #endif /* CONFIG_PCI */ | 656 | #endif /* CONFIG_PCI */ |
664 | 657 | ||
658 | /* | ||
659 | * Return whether the given PCI device DMA address mask can be | ||
660 | * supported properly. For example, if your device can only drive the | ||
661 | * low 24-bits during PCI bus mastering, then you would pass | ||
662 | * 0x00ffffff as the mask to this function. | ||
663 | */ | ||
664 | int dma_supported(struct device *dev, u64 mask) | ||
665 | { | ||
666 | #ifdef CONFIG_PCI | ||
667 | if (dev->bus == &pci_bus_type) | ||
668 | return 1; | ||
669 | #endif | ||
670 | return 0; | ||
671 | } | ||
672 | EXPORT_SYMBOL(dma_supported); | ||
673 | |||
674 | int dma_set_mask(struct device *dev, u64 dma_mask) | ||
675 | { | ||
676 | #ifdef CONFIG_PCI | ||
677 | if (dev->bus == &pci_bus_type) | ||
678 | return pci_set_dma_mask(to_pci_dev(dev), dma_mask); | ||
679 | #endif | ||
680 | return -EOPNOTSUPP; | ||
681 | } | ||
682 | EXPORT_SYMBOL(dma_set_mask); | ||
683 | |||
684 | |||
665 | #ifdef CONFIG_PROC_FS | 685 | #ifdef CONFIG_PROC_FS |
666 | 686 | ||
667 | static int sparc_io_proc_show(struct seq_file *m, void *v) | 687 | static int sparc_io_proc_show(struct seq_file *m, void *v) |
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index f0ee79055409..8daab33fc17d 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c | |||
@@ -886,7 +886,7 @@ void notrace init_irqwork_curcpu(void) | |||
886 | * Therefore you cannot make any OBP calls, not even prom_printf, | 886 | * Therefore you cannot make any OBP calls, not even prom_printf, |
887 | * from these two routines. | 887 | * from these two routines. |
888 | */ | 888 | */ |
889 | static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) | 889 | static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) |
890 | { | 890 | { |
891 | unsigned long num_entries = (qmask + 1) / 64; | 891 | unsigned long num_entries = (qmask + 1) / 64; |
892 | unsigned long status; | 892 | unsigned long status; |
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index 391a6ed9a184..378eb53e0776 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c | |||
@@ -113,7 +113,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) | |||
113 | } | 113 | } |
114 | if (!touched && __get_cpu_var(last_irq_sum) == sum) { | 114 | if (!touched && __get_cpu_var(last_irq_sum) == sum) { |
115 | local_inc(&__get_cpu_var(alert_counter)); | 115 | local_inc(&__get_cpu_var(alert_counter)); |
116 | if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz) | 116 | if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz) |
117 | die_nmi("BUG: NMI Watchdog detected LOCKUP", | 117 | die_nmi("BUG: NMI Watchdog detected LOCKUP", |
118 | regs, panic_on_timeout); | 118 | regs, panic_on_timeout); |
119 | } else { | 119 | } else { |
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 57859ad23547..c68648662802 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c | |||
@@ -1039,7 +1039,7 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) | |||
1039 | pci_dev_put(ali_isa_bridge); | 1039 | pci_dev_put(ali_isa_bridge); |
1040 | } | 1040 | } |
1041 | 1041 | ||
1042 | int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) | 1042 | int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask) |
1043 | { | 1043 | { |
1044 | u64 dma_addr_mask; | 1044 | u64 dma_addr_mask; |
1045 | 1045 | ||
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 2485eaa23101..23c33ff9c31e 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c | |||
@@ -232,7 +232,8 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, | |||
232 | 232 | ||
233 | static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, | 233 | static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, |
234 | unsigned long offset, size_t sz, | 234 | unsigned long offset, size_t sz, |
235 | enum dma_data_direction direction) | 235 | enum dma_data_direction direction, |
236 | struct dma_attrs *attrs) | ||
236 | { | 237 | { |
237 | struct iommu *iommu; | 238 | struct iommu *iommu; |
238 | unsigned long flags, npages, oaddr; | 239 | unsigned long flags, npages, oaddr; |
@@ -296,7 +297,8 @@ iommu_map_fail: | |||
296 | } | 297 | } |
297 | 298 | ||
298 | static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, | 299 | static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, |
299 | size_t sz, enum dma_data_direction direction) | 300 | size_t sz, enum dma_data_direction direction, |
301 | struct dma_attrs *attrs) | ||
300 | { | 302 | { |
301 | struct pci_pbm_info *pbm; | 303 | struct pci_pbm_info *pbm; |
302 | struct iommu *iommu; | 304 | struct iommu *iommu; |
@@ -336,7 +338,8 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, | |||
336 | } | 338 | } |
337 | 339 | ||
338 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | 340 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, |
339 | int nelems, enum dma_data_direction direction) | 341 | int nelems, enum dma_data_direction direction, |
342 | struct dma_attrs *attrs) | ||
340 | { | 343 | { |
341 | struct scatterlist *s, *outs, *segstart; | 344 | struct scatterlist *s, *outs, *segstart; |
342 | unsigned long flags, handle, prot; | 345 | unsigned long flags, handle, prot; |
@@ -478,7 +481,8 @@ iommu_map_failed: | |||
478 | } | 481 | } |
479 | 482 | ||
480 | static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, | 483 | static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, |
481 | int nelems, enum dma_data_direction direction) | 484 | int nelems, enum dma_data_direction direction, |
485 | struct dma_attrs *attrs) | ||
482 | { | 486 | { |
483 | struct pci_pbm_info *pbm; | 487 | struct pci_pbm_info *pbm; |
484 | struct scatterlist *sg; | 488 | struct scatterlist *sg; |
@@ -521,29 +525,13 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
521 | spin_unlock_irqrestore(&iommu->lock, flags); | 525 | spin_unlock_irqrestore(&iommu->lock, flags); |
522 | } | 526 | } |
523 | 527 | ||
524 | static void dma_4v_sync_single_for_cpu(struct device *dev, | 528 | static struct dma_map_ops sun4v_dma_ops = { |
525 | dma_addr_t bus_addr, size_t sz, | ||
526 | enum dma_data_direction direction) | ||
527 | { | ||
528 | /* Nothing to do... */ | ||
529 | } | ||
530 | |||
531 | static void dma_4v_sync_sg_for_cpu(struct device *dev, | ||
532 | struct scatterlist *sglist, int nelems, | ||
533 | enum dma_data_direction direction) | ||
534 | { | ||
535 | /* Nothing to do... */ | ||
536 | } | ||
537 | |||
538 | static const struct dma_ops sun4v_dma_ops = { | ||
539 | .alloc_coherent = dma_4v_alloc_coherent, | 529 | .alloc_coherent = dma_4v_alloc_coherent, |
540 | .free_coherent = dma_4v_free_coherent, | 530 | .free_coherent = dma_4v_free_coherent, |
541 | .map_page = dma_4v_map_page, | 531 | .map_page = dma_4v_map_page, |
542 | .unmap_page = dma_4v_unmap_page, | 532 | .unmap_page = dma_4v_unmap_page, |
543 | .map_sg = dma_4v_map_sg, | 533 | .map_sg = dma_4v_map_sg, |
544 | .unmap_sg = dma_4v_unmap_sg, | 534 | .unmap_sg = dma_4v_unmap_sg, |
545 | .sync_single_for_cpu = dma_4v_sync_single_for_cpu, | ||
546 | .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu, | ||
547 | }; | 535 | }; |
548 | 536 | ||
549 | static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm, | 537 | static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm, |
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 4041f94e7724..18d67854a1b8 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c | |||
@@ -251,7 +251,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp) | |||
251 | } | 251 | } |
252 | } | 252 | } |
253 | 253 | ||
254 | void __trigger_all_cpu_backtrace(void) | 254 | void arch_trigger_all_cpu_backtrace(void) |
255 | { | 255 | { |
256 | struct thread_info *tp = current_thread_info(); | 256 | struct thread_info *tp = current_thread_info(); |
257 | struct pt_regs *regs = get_irq_regs(); | 257 | struct pt_regs *regs = get_irq_regs(); |
@@ -304,7 +304,7 @@ void __trigger_all_cpu_backtrace(void) | |||
304 | 304 | ||
305 | static void sysrq_handle_globreg(int key, struct tty_struct *tty) | 305 | static void sysrq_handle_globreg(int key, struct tty_struct *tty) |
306 | { | 306 | { |
307 | __trigger_all_cpu_backtrace(); | 307 | arch_trigger_all_cpu_backtrace(); |
308 | } | 308 | } |
309 | 309 | ||
310 | static struct sysrq_key_op sparc_globalreg_op = { | 310 | static struct sysrq_key_op sparc_globalreg_op = { |
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 181d069a2d44..7ce1a1005b1d 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c | |||
@@ -590,6 +590,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, | |||
590 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 590 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
591 | clear_thread_flag(TIF_NOTIFY_RESUME); | 591 | clear_thread_flag(TIF_NOTIFY_RESUME); |
592 | tracehook_notify_resume(regs); | 592 | tracehook_notify_resume(regs); |
593 | if (current->replacement_session_keyring) | ||
594 | key_replace_session_keyring(); | ||
593 | } | 595 | } |
594 | } | 596 | } |
595 | 597 | ||
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index ec82d76dc6f2..647afbda7ae1 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c | |||
@@ -613,5 +613,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long | |||
613 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 613 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
614 | clear_thread_flag(TIF_NOTIFY_RESUME); | 614 | clear_thread_flag(TIF_NOTIFY_RESUME); |
615 | tracehook_notify_resume(regs); | 615 | tracehook_notify_resume(regs); |
616 | if (current->replacement_session_keyring) | ||
617 | key_replace_session_keyring(); | ||
616 | } | 618 | } |
617 | } | 619 | } |
620 | |||
diff --git a/arch/sparc/prom/misc_64.c b/arch/sparc/prom/misc_64.c index eedffb4fec2d..39fc6af21b7c 100644 --- a/arch/sparc/prom/misc_64.c +++ b/arch/sparc/prom/misc_64.c | |||
@@ -88,7 +88,7 @@ void prom_cmdline(void) | |||
88 | /* Drop into the prom, but completely terminate the program. | 88 | /* Drop into the prom, but completely terminate the program. |
89 | * No chance of continuing. | 89 | * No chance of continuing. |
90 | */ | 90 | */ |
91 | void prom_halt(void) | 91 | void notrace prom_halt(void) |
92 | { | 92 | { |
93 | #ifdef CONFIG_SUN_LDOMS | 93 | #ifdef CONFIG_SUN_LDOMS |
94 | if (ldom_domaining_enabled) | 94 | if (ldom_domaining_enabled) |
diff --git a/arch/sparc/prom/printf.c b/arch/sparc/prom/printf.c index 660943ee4c2a..ca869266b9f3 100644 --- a/arch/sparc/prom/printf.c +++ b/arch/sparc/prom/printf.c | |||
@@ -14,14 +14,14 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/compiler.h> | ||
17 | 18 | ||
18 | #include <asm/openprom.h> | 19 | #include <asm/openprom.h> |
19 | #include <asm/oplib.h> | 20 | #include <asm/oplib.h> |
20 | 21 | ||
21 | static char ppbuf[1024]; | 22 | static char ppbuf[1024]; |
22 | 23 | ||
23 | void | 24 | void notrace prom_write(const char *buf, unsigned int n) |
24 | prom_write(const char *buf, unsigned int n) | ||
25 | { | 25 | { |
26 | char ch; | 26 | char ch; |
27 | 27 | ||
@@ -33,8 +33,7 @@ prom_write(const char *buf, unsigned int n) | |||
33 | } | 33 | } |
34 | } | 34 | } |
35 | 35 | ||
36 | void | 36 | void notrace prom_printf(const char *fmt, ...) |
37 | prom_printf(const char *fmt, ...) | ||
38 | { | 37 | { |
39 | va_list args; | 38 | va_list args; |
40 | int i; | 39 | int i; |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 13ffa5df37d7..fc20fdc0f7f2 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -38,7 +38,7 @@ config X86 | |||
38 | select HAVE_FUNCTION_GRAPH_FP_TEST | 38 | select HAVE_FUNCTION_GRAPH_FP_TEST |
39 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 39 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
40 | select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE | 40 | select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE |
41 | select HAVE_FTRACE_SYSCALLS | 41 | select HAVE_SYSCALL_TRACEPOINTS |
42 | select HAVE_KVM | 42 | select HAVE_KVM |
43 | select HAVE_ARCH_KGDB | 43 | select HAVE_ARCH_KGDB |
44 | select HAVE_ARCH_TRACEHOOK | 44 | select HAVE_ARCH_TRACEHOOK |
@@ -586,7 +586,6 @@ config GART_IOMMU | |||
586 | bool "GART IOMMU support" if EMBEDDED | 586 | bool "GART IOMMU support" if EMBEDDED |
587 | default y | 587 | default y |
588 | select SWIOTLB | 588 | select SWIOTLB |
589 | select AGP | ||
590 | depends on X86_64 && PCI | 589 | depends on X86_64 && PCI |
591 | ---help--- | 590 | ---help--- |
592 | Support for full DMA access of devices with 32bit memory access only | 591 | Support for full DMA access of devices with 32bit memory access only |
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index edb992ebef92..d28fad19654a 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig | |||
@@ -2355,7 +2355,7 @@ CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y | |||
2355 | CONFIG_HAVE_DYNAMIC_FTRACE=y | 2355 | CONFIG_HAVE_DYNAMIC_FTRACE=y |
2356 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y | 2356 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y |
2357 | CONFIG_HAVE_HW_BRANCH_TRACER=y | 2357 | CONFIG_HAVE_HW_BRANCH_TRACER=y |
2358 | CONFIG_HAVE_FTRACE_SYSCALLS=y | 2358 | CONFIG_HAVE_SYSCALL_TRACEPOINTS=y |
2359 | CONFIG_RING_BUFFER=y | 2359 | CONFIG_RING_BUFFER=y |
2360 | CONFIG_TRACING=y | 2360 | CONFIG_TRACING=y |
2361 | CONFIG_TRACING_SUPPORT=y | 2361 | CONFIG_TRACING_SUPPORT=y |
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index cee1dd2e69b2..6c86acd847a4 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig | |||
@@ -2329,7 +2329,7 @@ CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y | |||
2329 | CONFIG_HAVE_DYNAMIC_FTRACE=y | 2329 | CONFIG_HAVE_DYNAMIC_FTRACE=y |
2330 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y | 2330 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y |
2331 | CONFIG_HAVE_HW_BRANCH_TRACER=y | 2331 | CONFIG_HAVE_HW_BRANCH_TRACER=y |
2332 | CONFIG_HAVE_FTRACE_SYSCALLS=y | 2332 | CONFIG_HAVE_SYSCALL_TRACEPOINTS=y |
2333 | CONFIG_RING_BUFFER=y | 2333 | CONFIG_RING_BUFFER=y |
2334 | CONFIG_TRACING=y | 2334 | CONFIG_TRACING=y |
2335 | CONFIG_TRACING_SUPPORT=y | 2335 | CONFIG_TRACING_SUPPORT=y |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index c580c5ec1cad..d3ec8d588d4b 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -636,7 +636,7 @@ static int __init aesni_init(void) | |||
636 | int err; | 636 | int err; |
637 | 637 | ||
638 | if (!cpu_has_aes) { | 638 | if (!cpu_has_aes) { |
639 | printk(KERN_ERR "Intel AES-NI instructions are not detected.\n"); | 639 | printk(KERN_INFO "Intel AES-NI instructions are not detected.\n"); |
640 | return -ENODEV; | 640 | return -ENODEV; |
641 | } | 641 | } |
642 | if ((err = crypto_register_alg(&aesni_alg))) | 642 | if ((err = crypto_register_alg(&aesni_alg))) |
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h index bdf96f119f06..ac95995b7bad 100644 --- a/arch/x86/include/asm/amd_iommu.h +++ b/arch/x86/include/asm/amd_iommu.h | |||
@@ -25,6 +25,7 @@ | |||
25 | #ifdef CONFIG_AMD_IOMMU | 25 | #ifdef CONFIG_AMD_IOMMU |
26 | extern int amd_iommu_init(void); | 26 | extern int amd_iommu_init(void); |
27 | extern int amd_iommu_init_dma_ops(void); | 27 | extern int amd_iommu_init_dma_ops(void); |
28 | extern int amd_iommu_init_passthrough(void); | ||
28 | extern void amd_iommu_detect(void); | 29 | extern void amd_iommu_detect(void); |
29 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); | 30 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); |
30 | extern void amd_iommu_flush_all_domains(void); | 31 | extern void amd_iommu_flush_all_domains(void); |
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 0c878caaa0a2..2a2cc7a78a81 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -143,22 +143,29 @@ | |||
143 | #define EVT_BUFFER_SIZE 8192 /* 512 entries */ | 143 | #define EVT_BUFFER_SIZE 8192 /* 512 entries */ |
144 | #define EVT_LEN_MASK (0x9ULL << 56) | 144 | #define EVT_LEN_MASK (0x9ULL << 56) |
145 | 145 | ||
146 | #define PAGE_MODE_NONE 0x00 | ||
146 | #define PAGE_MODE_1_LEVEL 0x01 | 147 | #define PAGE_MODE_1_LEVEL 0x01 |
147 | #define PAGE_MODE_2_LEVEL 0x02 | 148 | #define PAGE_MODE_2_LEVEL 0x02 |
148 | #define PAGE_MODE_3_LEVEL 0x03 | 149 | #define PAGE_MODE_3_LEVEL 0x03 |
149 | 150 | #define PAGE_MODE_4_LEVEL 0x04 | |
150 | #define IOMMU_PDE_NL_0 0x000ULL | 151 | #define PAGE_MODE_5_LEVEL 0x05 |
151 | #define IOMMU_PDE_NL_1 0x200ULL | 152 | #define PAGE_MODE_6_LEVEL 0x06 |
152 | #define IOMMU_PDE_NL_2 0x400ULL | 153 | |
153 | #define IOMMU_PDE_NL_3 0x600ULL | 154 | #define PM_LEVEL_SHIFT(x) (12 + ((x) * 9)) |
154 | 155 | #define PM_LEVEL_SIZE(x) (((x) < 6) ? \ | |
155 | #define IOMMU_PTE_L2_INDEX(address) (((address) >> 30) & 0x1ffULL) | 156 | ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \ |
156 | #define IOMMU_PTE_L1_INDEX(address) (((address) >> 21) & 0x1ffULL) | 157 | (0xffffffffffffffffULL)) |
157 | #define IOMMU_PTE_L0_INDEX(address) (((address) >> 12) & 0x1ffULL) | 158 | #define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) |
158 | 159 | #define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) | |
159 | #define IOMMU_MAP_SIZE_L1 (1ULL << 21) | 160 | #define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ |
160 | #define IOMMU_MAP_SIZE_L2 (1ULL << 30) | 161 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW) |
161 | #define IOMMU_MAP_SIZE_L3 (1ULL << 39) | 162 | #define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL) |
163 | |||
164 | #define PM_MAP_4k 0 | ||
165 | #define PM_ADDR_MASK 0x000ffffffffff000ULL | ||
166 | #define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \ | ||
167 | (~((1ULL << (12 + ((lvl) * 9))) - 1))) | ||
168 | #define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) | ||
162 | 169 | ||
163 | #define IOMMU_PTE_P (1ULL << 0) | 170 | #define IOMMU_PTE_P (1ULL << 0) |
164 | #define IOMMU_PTE_TV (1ULL << 1) | 171 | #define IOMMU_PTE_TV (1ULL << 1) |
@@ -167,11 +174,6 @@ | |||
167 | #define IOMMU_PTE_IR (1ULL << 61) | 174 | #define IOMMU_PTE_IR (1ULL << 61) |
168 | #define IOMMU_PTE_IW (1ULL << 62) | 175 | #define IOMMU_PTE_IW (1ULL << 62) |
169 | 176 | ||
170 | #define IOMMU_L1_PDE(address) \ | ||
171 | ((address) | IOMMU_PDE_NL_1 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW) | ||
172 | #define IOMMU_L2_PDE(address) \ | ||
173 | ((address) | IOMMU_PDE_NL_2 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW) | ||
174 | |||
175 | #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) | 177 | #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) |
176 | #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) | 178 | #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) |
177 | #define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) | 179 | #define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) |
@@ -194,11 +196,14 @@ | |||
194 | #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ | 196 | #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ |
195 | #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops | 197 | #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops |
196 | domain for an IOMMU */ | 198 | domain for an IOMMU */ |
199 | #define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page | ||
200 | translation */ | ||
201 | |||
197 | extern bool amd_iommu_dump; | 202 | extern bool amd_iommu_dump; |
198 | #define DUMP_printk(format, arg...) \ | 203 | #define DUMP_printk(format, arg...) \ |
199 | do { \ | 204 | do { \ |
200 | if (amd_iommu_dump) \ | 205 | if (amd_iommu_dump) \ |
201 | printk(KERN_INFO "AMD IOMMU: " format, ## arg); \ | 206 | printk(KERN_INFO "AMD-Vi: " format, ## arg); \ |
202 | } while(0); | 207 | } while(0); |
203 | 208 | ||
204 | /* | 209 | /* |
@@ -226,6 +231,7 @@ struct protection_domain { | |||
226 | int mode; /* paging mode (0-6 levels) */ | 231 | int mode; /* paging mode (0-6 levels) */ |
227 | u64 *pt_root; /* page table root pointer */ | 232 | u64 *pt_root; /* page table root pointer */ |
228 | unsigned long flags; /* flags to find out type of domain */ | 233 | unsigned long flags; /* flags to find out type of domain */ |
234 | bool updated; /* complete domain flush required */ | ||
229 | unsigned dev_cnt; /* devices assigned to this domain */ | 235 | unsigned dev_cnt; /* devices assigned to this domain */ |
230 | void *priv; /* private data */ | 236 | void *priv; /* private data */ |
231 | }; | 237 | }; |
@@ -337,6 +343,9 @@ struct amd_iommu { | |||
337 | /* if one, we need to send a completion wait command */ | 343 | /* if one, we need to send a completion wait command */ |
338 | bool need_sync; | 344 | bool need_sync; |
339 | 345 | ||
346 | /* becomes true if a command buffer reset is running */ | ||
347 | bool reset_in_progress; | ||
348 | |||
340 | /* default dma_ops domain for that IOMMU */ | 349 | /* default dma_ops domain for that IOMMU */ |
341 | struct dma_ops_domain *default_dom; | 350 | struct dma_ops_domain *default_dom; |
342 | }; | 351 | }; |
@@ -457,4 +466,7 @@ static inline void amd_iommu_stats_init(void) { } | |||
457 | 466 | ||
458 | #endif /* CONFIG_AMD_IOMMU_STATS */ | 467 | #endif /* CONFIG_AMD_IOMMU_STATS */ |
459 | 468 | ||
469 | /* some function prototypes */ | ||
470 | extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); | ||
471 | |||
460 | #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ | 472 | #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 1c3f9435f1c9..0ee770d23d0e 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -55,6 +55,24 @@ extern int dma_set_mask(struct device *dev, u64 mask); | |||
55 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 55 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
56 | dma_addr_t *dma_addr, gfp_t flag); | 56 | dma_addr_t *dma_addr, gfp_t flag); |
57 | 57 | ||
58 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | ||
59 | { | ||
60 | if (!dev->dma_mask) | ||
61 | return 0; | ||
62 | |||
63 | return addr + size <= *dev->dma_mask; | ||
64 | } | ||
65 | |||
66 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | ||
67 | { | ||
68 | return paddr; | ||
69 | } | ||
70 | |||
71 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) | ||
72 | { | ||
73 | return daddr; | ||
74 | } | ||
75 | |||
58 | static inline void | 76 | static inline void |
59 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 77 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
60 | enum dma_data_direction dir) | 78 | enum dma_data_direction dir) |
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index bd2c6511c887..db24c2278be0 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -28,13 +28,6 @@ | |||
28 | 28 | ||
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | /* FIXME: I don't want to stay hardcoded */ | ||
32 | #ifdef CONFIG_X86_64 | ||
33 | # define FTRACE_SYSCALL_MAX 296 | ||
34 | #else | ||
35 | # define FTRACE_SYSCALL_MAX 333 | ||
36 | #endif | ||
37 | |||
38 | #ifdef CONFIG_FUNCTION_TRACER | 31 | #ifdef CONFIG_FUNCTION_TRACER |
39 | #define MCOUNT_ADDR ((long)(mcount)) | 32 | #define MCOUNT_ADDR ((long)(mcount)) |
40 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ | 33 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ |
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index c86e5ed4af51..e63cf7d441e1 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h | |||
@@ -45,8 +45,8 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, | |||
45 | void __user *, size_t *, loff_t *); | 45 | void __user *, size_t *, loff_t *); |
46 | extern int unknown_nmi_panic; | 46 | extern int unknown_nmi_panic; |
47 | 47 | ||
48 | void __trigger_all_cpu_backtrace(void); | 48 | void arch_trigger_all_cpu_backtrace(void); |
49 | #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() | 49 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace |
50 | 50 | ||
51 | static inline void localise_nmi_watchdog(void) | 51 | static inline void localise_nmi_watchdog(void) |
52 | { | 52 | { |
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h index fa64e401589d..e7b7c938ae27 100644 --- a/arch/x86/include/asm/perf_counter.h +++ b/arch/x86/include/asm/perf_counter.h | |||
@@ -84,6 +84,16 @@ union cpuid10_edx { | |||
84 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b | 84 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b |
85 | #define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) | 85 | #define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) |
86 | 86 | ||
87 | /* | ||
88 | * We model BTS tracing as another fixed-mode PMC. | ||
89 | * | ||
90 | * We choose a value in the middle of the fixed counter range, since lower | ||
91 | * values are used by actual fixed counters and higher values are used | ||
92 | * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. | ||
93 | */ | ||
94 | #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) | ||
95 | |||
96 | |||
87 | #ifdef CONFIG_PERF_COUNTERS | 97 | #ifdef CONFIG_PERF_COUNTERS |
88 | extern void init_hw_perf_counters(void); | 98 | extern void init_hw_perf_counters(void); |
89 | extern void perf_counters_lapic_init(void); | 99 | extern void perf_counters_lapic_init(void); |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index fad7d40b75f8..6f7786aea4fc 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -95,7 +95,7 @@ struct thread_info { | |||
95 | #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ | 95 | #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ |
96 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ | 96 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ |
97 | #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ | 97 | #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ |
98 | #define TIF_SYSCALL_FTRACE 28 /* for ftrace syscall instrumentation */ | 98 | #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ |
99 | 99 | ||
100 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 100 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
101 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 101 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
@@ -118,17 +118,17 @@ struct thread_info { | |||
118 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) | 118 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) |
119 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) | 119 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) |
120 | #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) | 120 | #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) |
121 | #define _TIF_SYSCALL_FTRACE (1 << TIF_SYSCALL_FTRACE) | 121 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) |
122 | 122 | ||
123 | /* work to do in syscall_trace_enter() */ | 123 | /* work to do in syscall_trace_enter() */ |
124 | #define _TIF_WORK_SYSCALL_ENTRY \ | 124 | #define _TIF_WORK_SYSCALL_ENTRY \ |
125 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_FTRACE | \ | 125 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \ |
126 | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | _TIF_SINGLESTEP) | 126 | _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT) |
127 | 127 | ||
128 | /* work to do in syscall_trace_leave() */ | 128 | /* work to do in syscall_trace_leave() */ |
129 | #define _TIF_WORK_SYSCALL_EXIT \ | 129 | #define _TIF_WORK_SYSCALL_EXIT \ |
130 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ | 130 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ |
131 | _TIF_SYSCALL_FTRACE) | 131 | _TIF_SYSCALL_TRACEPOINT) |
132 | 132 | ||
133 | /* work to do on interrupt/exception return */ | 133 | /* work to do on interrupt/exception return */ |
134 | #define _TIF_WORK_MASK \ | 134 | #define _TIF_WORK_MASK \ |
@@ -137,7 +137,8 @@ struct thread_info { | |||
137 | _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU)) | 137 | _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU)) |
138 | 138 | ||
139 | /* work to do on any return to user space */ | 139 | /* work to do on any return to user space */ |
140 | #define _TIF_ALLWORK_MASK ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_FTRACE) | 140 | #define _TIF_ALLWORK_MASK \ |
141 | ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT) | ||
141 | 142 | ||
142 | /* Only used for 64 bit */ | 143 | /* Only used for 64 bit */ |
143 | #define _TIF_DO_NOTIFY_MASK \ | 144 | #define _TIF_DO_NOTIFY_MASK \ |
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 066ef590d7e0..26d06e052a18 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
@@ -129,25 +129,34 @@ extern unsigned long node_remap_size[]; | |||
129 | #endif | 129 | #endif |
130 | 130 | ||
131 | /* sched_domains SD_NODE_INIT for NUMA machines */ | 131 | /* sched_domains SD_NODE_INIT for NUMA machines */ |
132 | #define SD_NODE_INIT (struct sched_domain) { \ | 132 | #define SD_NODE_INIT (struct sched_domain) { \ |
133 | .min_interval = 8, \ | 133 | .min_interval = 8, \ |
134 | .max_interval = 32, \ | 134 | .max_interval = 32, \ |
135 | .busy_factor = 32, \ | 135 | .busy_factor = 32, \ |
136 | .imbalance_pct = 125, \ | 136 | .imbalance_pct = 125, \ |
137 | .cache_nice_tries = SD_CACHE_NICE_TRIES, \ | 137 | .cache_nice_tries = SD_CACHE_NICE_TRIES, \ |
138 | .busy_idx = 3, \ | 138 | .busy_idx = 3, \ |
139 | .idle_idx = SD_IDLE_IDX, \ | 139 | .idle_idx = SD_IDLE_IDX, \ |
140 | .newidle_idx = SD_NEWIDLE_IDX, \ | 140 | .newidle_idx = SD_NEWIDLE_IDX, \ |
141 | .wake_idx = 1, \ | 141 | .wake_idx = 1, \ |
142 | .forkexec_idx = SD_FORKEXEC_IDX, \ | 142 | .forkexec_idx = SD_FORKEXEC_IDX, \ |
143 | .flags = SD_LOAD_BALANCE \ | 143 | \ |
144 | | SD_BALANCE_EXEC \ | 144 | .flags = 1*SD_LOAD_BALANCE \ |
145 | | SD_BALANCE_FORK \ | 145 | | 1*SD_BALANCE_NEWIDLE \ |
146 | | SD_WAKE_AFFINE \ | 146 | | 1*SD_BALANCE_EXEC \ |
147 | | SD_WAKE_BALANCE \ | 147 | | 1*SD_BALANCE_FORK \ |
148 | | SD_SERIALIZE, \ | 148 | | 0*SD_WAKE_IDLE \ |
149 | .last_balance = jiffies, \ | 149 | | 1*SD_WAKE_AFFINE \ |
150 | .balance_interval = 1, \ | 150 | | 1*SD_WAKE_BALANCE \ |
151 | | 0*SD_SHARE_CPUPOWER \ | ||
152 | | 0*SD_POWERSAVINGS_BALANCE \ | ||
153 | | 0*SD_SHARE_PKG_RESOURCES \ | ||
154 | | 1*SD_SERIALIZE \ | ||
155 | | 1*SD_WAKE_IDLE_FAR \ | ||
156 | | 0*SD_PREFER_SIBLING \ | ||
157 | , \ | ||
158 | .last_balance = jiffies, \ | ||
159 | .balance_interval = 1, \ | ||
151 | } | 160 | } |
152 | 161 | ||
153 | #ifdef CONFIG_X86_64_ACPI_NUMA | 162 | #ifdef CONFIG_X86_64_ACPI_NUMA |
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h index 732a30706153..8deaada61bc8 100644 --- a/arch/x86/include/asm/unistd_32.h +++ b/arch/x86/include/asm/unistd_32.h | |||
@@ -345,6 +345,8 @@ | |||
345 | 345 | ||
346 | #ifdef __KERNEL__ | 346 | #ifdef __KERNEL__ |
347 | 347 | ||
348 | #define NR_syscalls 337 | ||
349 | |||
348 | #define __ARCH_WANT_IPC_PARSE_VERSION | 350 | #define __ARCH_WANT_IPC_PARSE_VERSION |
349 | #define __ARCH_WANT_OLD_READDIR | 351 | #define __ARCH_WANT_OLD_READDIR |
350 | #define __ARCH_WANT_OLD_STAT | 352 | #define __ARCH_WANT_OLD_STAT |
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index 900e1617e672..b9f3c60de5f7 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h | |||
@@ -688,6 +688,12 @@ __SYSCALL(__NR_perf_counter_open, sys_perf_counter_open) | |||
688 | #endif /* __NO_STUBS */ | 688 | #endif /* __NO_STUBS */ |
689 | 689 | ||
690 | #ifdef __KERNEL__ | 690 | #ifdef __KERNEL__ |
691 | |||
692 | #ifndef COMPILE_OFFSETS | ||
693 | #include <asm/asm-offsets.h> | ||
694 | #define NR_syscalls (__NR_syscall_max + 1) | ||
695 | #endif | ||
696 | |||
691 | /* | 697 | /* |
692 | * "Conditional" syscalls | 698 | * "Conditional" syscalls |
693 | * | 699 | * |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 6c99f5037801..98f230f6a28d 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -41,9 +41,13 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock); | |||
41 | static LIST_HEAD(iommu_pd_list); | 41 | static LIST_HEAD(iommu_pd_list); |
42 | static DEFINE_SPINLOCK(iommu_pd_list_lock); | 42 | static DEFINE_SPINLOCK(iommu_pd_list_lock); |
43 | 43 | ||
44 | #ifdef CONFIG_IOMMU_API | 44 | /* |
45 | * Domain for untranslated devices - only allocated | ||
46 | * if iommu=pt passed on kernel cmd line. | ||
47 | */ | ||
48 | static struct protection_domain *pt_domain; | ||
49 | |||
45 | static struct iommu_ops amd_iommu_ops; | 50 | static struct iommu_ops amd_iommu_ops; |
46 | #endif | ||
47 | 51 | ||
48 | /* | 52 | /* |
49 | * general struct to manage commands send to an IOMMU | 53 | * general struct to manage commands send to an IOMMU |
@@ -55,16 +59,16 @@ struct iommu_cmd { | |||
55 | static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | 59 | static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, |
56 | struct unity_map_entry *e); | 60 | struct unity_map_entry *e); |
57 | static struct dma_ops_domain *find_protection_domain(u16 devid); | 61 | static struct dma_ops_domain *find_protection_domain(u16 devid); |
58 | static u64* alloc_pte(struct protection_domain *dom, | 62 | static u64 *alloc_pte(struct protection_domain *domain, |
59 | unsigned long address, u64 | 63 | unsigned long address, int end_lvl, |
60 | **pte_page, gfp_t gfp); | 64 | u64 **pte_page, gfp_t gfp); |
61 | static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, | 65 | static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, |
62 | unsigned long start_page, | 66 | unsigned long start_page, |
63 | unsigned int pages); | 67 | unsigned int pages); |
64 | 68 | static void reset_iommu_command_buffer(struct amd_iommu *iommu); | |
65 | #ifndef BUS_NOTIFY_UNBOUND_DRIVER | 69 | static u64 *fetch_pte(struct protection_domain *domain, |
66 | #define BUS_NOTIFY_UNBOUND_DRIVER 0x0005 | 70 | unsigned long address, int map_size); |
67 | #endif | 71 | static void update_domain(struct protection_domain *domain); |
68 | 72 | ||
69 | #ifdef CONFIG_AMD_IOMMU_STATS | 73 | #ifdef CONFIG_AMD_IOMMU_STATS |
70 | 74 | ||
@@ -138,7 +142,25 @@ static int iommu_has_npcache(struct amd_iommu *iommu) | |||
138 | * | 142 | * |
139 | ****************************************************************************/ | 143 | ****************************************************************************/ |
140 | 144 | ||
141 | static void iommu_print_event(void *__evt) | 145 | static void dump_dte_entry(u16 devid) |
146 | { | ||
147 | int i; | ||
148 | |||
149 | for (i = 0; i < 8; ++i) | ||
150 | pr_err("AMD-Vi: DTE[%d]: %08x\n", i, | ||
151 | amd_iommu_dev_table[devid].data[i]); | ||
152 | } | ||
153 | |||
154 | static void dump_command(unsigned long phys_addr) | ||
155 | { | ||
156 | struct iommu_cmd *cmd = phys_to_virt(phys_addr); | ||
157 | int i; | ||
158 | |||
159 | for (i = 0; i < 4; ++i) | ||
160 | pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]); | ||
161 | } | ||
162 | |||
163 | static void iommu_print_event(struct amd_iommu *iommu, void *__evt) | ||
142 | { | 164 | { |
143 | u32 *event = __evt; | 165 | u32 *event = __evt; |
144 | int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; | 166 | int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; |
@@ -147,7 +169,7 @@ static void iommu_print_event(void *__evt) | |||
147 | int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; | 169 | int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; |
148 | u64 address = (u64)(((u64)event[3]) << 32) | event[2]; | 170 | u64 address = (u64)(((u64)event[3]) << 32) | event[2]; |
149 | 171 | ||
150 | printk(KERN_ERR "AMD IOMMU: Event logged ["); | 172 | printk(KERN_ERR "AMD-Vi: Event logged ["); |
151 | 173 | ||
152 | switch (type) { | 174 | switch (type) { |
153 | case EVENT_TYPE_ILL_DEV: | 175 | case EVENT_TYPE_ILL_DEV: |
@@ -155,6 +177,7 @@ static void iommu_print_event(void *__evt) | |||
155 | "address=0x%016llx flags=0x%04x]\n", | 177 | "address=0x%016llx flags=0x%04x]\n", |
156 | PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), | 178 | PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), |
157 | address, flags); | 179 | address, flags); |
180 | dump_dte_entry(devid); | ||
158 | break; | 181 | break; |
159 | case EVENT_TYPE_IO_FAULT: | 182 | case EVENT_TYPE_IO_FAULT: |
160 | printk("IO_PAGE_FAULT device=%02x:%02x.%x " | 183 | printk("IO_PAGE_FAULT device=%02x:%02x.%x " |
@@ -176,6 +199,8 @@ static void iommu_print_event(void *__evt) | |||
176 | break; | 199 | break; |
177 | case EVENT_TYPE_ILL_CMD: | 200 | case EVENT_TYPE_ILL_CMD: |
178 | printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); | 201 | printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); |
202 | reset_iommu_command_buffer(iommu); | ||
203 | dump_command(address); | ||
179 | break; | 204 | break; |
180 | case EVENT_TYPE_CMD_HARD_ERR: | 205 | case EVENT_TYPE_CMD_HARD_ERR: |
181 | printk("COMMAND_HARDWARE_ERROR address=0x%016llx " | 206 | printk("COMMAND_HARDWARE_ERROR address=0x%016llx " |
@@ -209,7 +234,7 @@ static void iommu_poll_events(struct amd_iommu *iommu) | |||
209 | tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); | 234 | tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); |
210 | 235 | ||
211 | while (head != tail) { | 236 | while (head != tail) { |
212 | iommu_print_event(iommu->evt_buf + head); | 237 | iommu_print_event(iommu, iommu->evt_buf + head); |
213 | head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size; | 238 | head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size; |
214 | } | 239 | } |
215 | 240 | ||
@@ -296,8 +321,11 @@ static void __iommu_wait_for_completion(struct amd_iommu *iommu) | |||
296 | status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; | 321 | status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; |
297 | writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); | 322 | writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); |
298 | 323 | ||
299 | if (unlikely(i == EXIT_LOOP_COUNT)) | 324 | if (unlikely(i == EXIT_LOOP_COUNT)) { |
300 | panic("AMD IOMMU: Completion wait loop failed\n"); | 325 | spin_unlock(&iommu->lock); |
326 | reset_iommu_command_buffer(iommu); | ||
327 | spin_lock(&iommu->lock); | ||
328 | } | ||
301 | } | 329 | } |
302 | 330 | ||
303 | /* | 331 | /* |
@@ -445,47 +473,78 @@ static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid) | |||
445 | } | 473 | } |
446 | 474 | ||
447 | /* | 475 | /* |
476 | * This function flushes one domain on one IOMMU | ||
477 | */ | ||
478 | static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid) | ||
479 | { | ||
480 | struct iommu_cmd cmd; | ||
481 | unsigned long flags; | ||
482 | |||
483 | __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, | ||
484 | domid, 1, 1); | ||
485 | |||
486 | spin_lock_irqsave(&iommu->lock, flags); | ||
487 | __iommu_queue_command(iommu, &cmd); | ||
488 | __iommu_completion_wait(iommu); | ||
489 | __iommu_wait_for_completion(iommu); | ||
490 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
491 | } | ||
492 | |||
493 | static void flush_all_domains_on_iommu(struct amd_iommu *iommu) | ||
494 | { | ||
495 | int i; | ||
496 | |||
497 | for (i = 1; i < MAX_DOMAIN_ID; ++i) { | ||
498 | if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) | ||
499 | continue; | ||
500 | flush_domain_on_iommu(iommu, i); | ||
501 | } | ||
502 | |||
503 | } | ||
504 | |||
505 | /* | ||
448 | * This function is used to flush the IO/TLB for a given protection domain | 506 | * This function is used to flush the IO/TLB for a given protection domain |
449 | * on every IOMMU in the system | 507 | * on every IOMMU in the system |
450 | */ | 508 | */ |
451 | static void iommu_flush_domain(u16 domid) | 509 | static void iommu_flush_domain(u16 domid) |
452 | { | 510 | { |
453 | unsigned long flags; | ||
454 | struct amd_iommu *iommu; | 511 | struct amd_iommu *iommu; |
455 | struct iommu_cmd cmd; | ||
456 | 512 | ||
457 | INC_STATS_COUNTER(domain_flush_all); | 513 | INC_STATS_COUNTER(domain_flush_all); |
458 | 514 | ||
459 | __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, | 515 | for_each_iommu(iommu) |
460 | domid, 1, 1); | 516 | flush_domain_on_iommu(iommu, domid); |
461 | |||
462 | for_each_iommu(iommu) { | ||
463 | spin_lock_irqsave(&iommu->lock, flags); | ||
464 | __iommu_queue_command(iommu, &cmd); | ||
465 | __iommu_completion_wait(iommu); | ||
466 | __iommu_wait_for_completion(iommu); | ||
467 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
468 | } | ||
469 | } | 517 | } |
470 | 518 | ||
471 | void amd_iommu_flush_all_domains(void) | 519 | void amd_iommu_flush_all_domains(void) |
472 | { | 520 | { |
521 | struct amd_iommu *iommu; | ||
522 | |||
523 | for_each_iommu(iommu) | ||
524 | flush_all_domains_on_iommu(iommu); | ||
525 | } | ||
526 | |||
527 | static void flush_all_devices_for_iommu(struct amd_iommu *iommu) | ||
528 | { | ||
473 | int i; | 529 | int i; |
474 | 530 | ||
475 | for (i = 1; i < MAX_DOMAIN_ID; ++i) { | 531 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { |
476 | if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) | 532 | if (iommu != amd_iommu_rlookup_table[i]) |
477 | continue; | 533 | continue; |
478 | iommu_flush_domain(i); | 534 | |
535 | iommu_queue_inv_dev_entry(iommu, i); | ||
536 | iommu_completion_wait(iommu); | ||
479 | } | 537 | } |
480 | } | 538 | } |
481 | 539 | ||
482 | void amd_iommu_flush_all_devices(void) | 540 | static void flush_devices_by_domain(struct protection_domain *domain) |
483 | { | 541 | { |
484 | struct amd_iommu *iommu; | 542 | struct amd_iommu *iommu; |
485 | int i; | 543 | int i; |
486 | 544 | ||
487 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { | 545 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { |
488 | if (amd_iommu_pd_table[i] == NULL) | 546 | if ((domain == NULL && amd_iommu_pd_table[i] == NULL) || |
547 | (amd_iommu_pd_table[i] != domain)) | ||
489 | continue; | 548 | continue; |
490 | 549 | ||
491 | iommu = amd_iommu_rlookup_table[i]; | 550 | iommu = amd_iommu_rlookup_table[i]; |
@@ -497,6 +556,27 @@ void amd_iommu_flush_all_devices(void) | |||
497 | } | 556 | } |
498 | } | 557 | } |
499 | 558 | ||
559 | static void reset_iommu_command_buffer(struct amd_iommu *iommu) | ||
560 | { | ||
561 | pr_err("AMD-Vi: Resetting IOMMU command buffer\n"); | ||
562 | |||
563 | if (iommu->reset_in_progress) | ||
564 | panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n"); | ||
565 | |||
566 | iommu->reset_in_progress = true; | ||
567 | |||
568 | amd_iommu_reset_cmd_buffer(iommu); | ||
569 | flush_all_devices_for_iommu(iommu); | ||
570 | flush_all_domains_on_iommu(iommu); | ||
571 | |||
572 | iommu->reset_in_progress = false; | ||
573 | } | ||
574 | |||
575 | void amd_iommu_flush_all_devices(void) | ||
576 | { | ||
577 | flush_devices_by_domain(NULL); | ||
578 | } | ||
579 | |||
500 | /**************************************************************************** | 580 | /**************************************************************************** |
501 | * | 581 | * |
502 | * The functions below are used the create the page table mappings for | 582 | * The functions below are used the create the page table mappings for |
@@ -514,18 +594,21 @@ void amd_iommu_flush_all_devices(void) | |||
514 | static int iommu_map_page(struct protection_domain *dom, | 594 | static int iommu_map_page(struct protection_domain *dom, |
515 | unsigned long bus_addr, | 595 | unsigned long bus_addr, |
516 | unsigned long phys_addr, | 596 | unsigned long phys_addr, |
517 | int prot) | 597 | int prot, |
598 | int map_size) | ||
518 | { | 599 | { |
519 | u64 __pte, *pte; | 600 | u64 __pte, *pte; |
520 | 601 | ||
521 | bus_addr = PAGE_ALIGN(bus_addr); | 602 | bus_addr = PAGE_ALIGN(bus_addr); |
522 | phys_addr = PAGE_ALIGN(phys_addr); | 603 | phys_addr = PAGE_ALIGN(phys_addr); |
523 | 604 | ||
524 | /* only support 512GB address spaces for now */ | 605 | BUG_ON(!PM_ALIGNED(map_size, bus_addr)); |
525 | if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) | 606 | BUG_ON(!PM_ALIGNED(map_size, phys_addr)); |
607 | |||
608 | if (!(prot & IOMMU_PROT_MASK)) | ||
526 | return -EINVAL; | 609 | return -EINVAL; |
527 | 610 | ||
528 | pte = alloc_pte(dom, bus_addr, NULL, GFP_KERNEL); | 611 | pte = alloc_pte(dom, bus_addr, map_size, NULL, GFP_KERNEL); |
529 | 612 | ||
530 | if (IOMMU_PTE_PRESENT(*pte)) | 613 | if (IOMMU_PTE_PRESENT(*pte)) |
531 | return -EBUSY; | 614 | return -EBUSY; |
@@ -538,29 +621,18 @@ static int iommu_map_page(struct protection_domain *dom, | |||
538 | 621 | ||
539 | *pte = __pte; | 622 | *pte = __pte; |
540 | 623 | ||
624 | update_domain(dom); | ||
625 | |||
541 | return 0; | 626 | return 0; |
542 | } | 627 | } |
543 | 628 | ||
544 | static void iommu_unmap_page(struct protection_domain *dom, | 629 | static void iommu_unmap_page(struct protection_domain *dom, |
545 | unsigned long bus_addr) | 630 | unsigned long bus_addr, int map_size) |
546 | { | 631 | { |
547 | u64 *pte; | 632 | u64 *pte = fetch_pte(dom, bus_addr, map_size); |
548 | |||
549 | pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)]; | ||
550 | |||
551 | if (!IOMMU_PTE_PRESENT(*pte)) | ||
552 | return; | ||
553 | |||
554 | pte = IOMMU_PTE_PAGE(*pte); | ||
555 | pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)]; | ||
556 | 633 | ||
557 | if (!IOMMU_PTE_PRESENT(*pte)) | 634 | if (pte) |
558 | return; | 635 | *pte = 0; |
559 | |||
560 | pte = IOMMU_PTE_PAGE(*pte); | ||
561 | pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)]; | ||
562 | |||
563 | *pte = 0; | ||
564 | } | 636 | } |
565 | 637 | ||
566 | /* | 638 | /* |
@@ -615,7 +687,8 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | |||
615 | 687 | ||
616 | for (addr = e->address_start; addr < e->address_end; | 688 | for (addr = e->address_start; addr < e->address_end; |
617 | addr += PAGE_SIZE) { | 689 | addr += PAGE_SIZE) { |
618 | ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot); | 690 | ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, |
691 | PM_MAP_4k); | ||
619 | if (ret) | 692 | if (ret) |
620 | return ret; | 693 | return ret; |
621 | /* | 694 | /* |
@@ -670,24 +743,29 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, | |||
670 | * This function checks if there is a PTE for a given dma address. If | 743 | * This function checks if there is a PTE for a given dma address. If |
671 | * there is one, it returns the pointer to it. | 744 | * there is one, it returns the pointer to it. |
672 | */ | 745 | */ |
673 | static u64* fetch_pte(struct protection_domain *domain, | 746 | static u64 *fetch_pte(struct protection_domain *domain, |
674 | unsigned long address) | 747 | unsigned long address, int map_size) |
675 | { | 748 | { |
749 | int level; | ||
676 | u64 *pte; | 750 | u64 *pte; |
677 | 751 | ||
678 | pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(address)]; | 752 | level = domain->mode - 1; |
753 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; | ||
679 | 754 | ||
680 | if (!IOMMU_PTE_PRESENT(*pte)) | 755 | while (level > map_size) { |
681 | return NULL; | 756 | if (!IOMMU_PTE_PRESENT(*pte)) |
757 | return NULL; | ||
682 | 758 | ||
683 | pte = IOMMU_PTE_PAGE(*pte); | 759 | level -= 1; |
684 | pte = &pte[IOMMU_PTE_L1_INDEX(address)]; | ||
685 | 760 | ||
686 | if (!IOMMU_PTE_PRESENT(*pte)) | 761 | pte = IOMMU_PTE_PAGE(*pte); |
687 | return NULL; | 762 | pte = &pte[PM_LEVEL_INDEX(level, address)]; |
688 | 763 | ||
689 | pte = IOMMU_PTE_PAGE(*pte); | 764 | if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) { |
690 | pte = &pte[IOMMU_PTE_L0_INDEX(address)]; | 765 | pte = NULL; |
766 | break; | ||
767 | } | ||
768 | } | ||
691 | 769 | ||
692 | return pte; | 770 | return pte; |
693 | } | 771 | } |
@@ -727,7 +805,7 @@ static int alloc_new_range(struct amd_iommu *iommu, | |||
727 | u64 *pte, *pte_page; | 805 | u64 *pte, *pte_page; |
728 | 806 | ||
729 | for (i = 0; i < num_ptes; ++i) { | 807 | for (i = 0; i < num_ptes; ++i) { |
730 | pte = alloc_pte(&dma_dom->domain, address, | 808 | pte = alloc_pte(&dma_dom->domain, address, PM_MAP_4k, |
731 | &pte_page, gfp); | 809 | &pte_page, gfp); |
732 | if (!pte) | 810 | if (!pte) |
733 | goto out_free; | 811 | goto out_free; |
@@ -760,16 +838,20 @@ static int alloc_new_range(struct amd_iommu *iommu, | |||
760 | for (i = dma_dom->aperture[index]->offset; | 838 | for (i = dma_dom->aperture[index]->offset; |
761 | i < dma_dom->aperture_size; | 839 | i < dma_dom->aperture_size; |
762 | i += PAGE_SIZE) { | 840 | i += PAGE_SIZE) { |
763 | u64 *pte = fetch_pte(&dma_dom->domain, i); | 841 | u64 *pte = fetch_pte(&dma_dom->domain, i, PM_MAP_4k); |
764 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) | 842 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
765 | continue; | 843 | continue; |
766 | 844 | ||
767 | dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1); | 845 | dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1); |
768 | } | 846 | } |
769 | 847 | ||
848 | update_domain(&dma_dom->domain); | ||
849 | |||
770 | return 0; | 850 | return 0; |
771 | 851 | ||
772 | out_free: | 852 | out_free: |
853 | update_domain(&dma_dom->domain); | ||
854 | |||
773 | free_page((unsigned long)dma_dom->aperture[index]->bitmap); | 855 | free_page((unsigned long)dma_dom->aperture[index]->bitmap); |
774 | 856 | ||
775 | kfree(dma_dom->aperture[index]); | 857 | kfree(dma_dom->aperture[index]); |
@@ -1009,7 +1091,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu) | |||
1009 | dma_dom->domain.id = domain_id_alloc(); | 1091 | dma_dom->domain.id = domain_id_alloc(); |
1010 | if (dma_dom->domain.id == 0) | 1092 | if (dma_dom->domain.id == 0) |
1011 | goto free_dma_dom; | 1093 | goto free_dma_dom; |
1012 | dma_dom->domain.mode = PAGE_MODE_3_LEVEL; | 1094 | dma_dom->domain.mode = PAGE_MODE_2_LEVEL; |
1013 | dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); | 1095 | dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); |
1014 | dma_dom->domain.flags = PD_DMA_OPS_MASK; | 1096 | dma_dom->domain.flags = PD_DMA_OPS_MASK; |
1015 | dma_dom->domain.priv = dma_dom; | 1097 | dma_dom->domain.priv = dma_dom; |
@@ -1063,6 +1145,41 @@ static struct protection_domain *domain_for_device(u16 devid) | |||
1063 | return dom; | 1145 | return dom; |
1064 | } | 1146 | } |
1065 | 1147 | ||
1148 | static void set_dte_entry(u16 devid, struct protection_domain *domain) | ||
1149 | { | ||
1150 | u64 pte_root = virt_to_phys(domain->pt_root); | ||
1151 | |||
1152 | pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) | ||
1153 | << DEV_ENTRY_MODE_SHIFT; | ||
1154 | pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; | ||
1155 | |||
1156 | amd_iommu_dev_table[devid].data[2] = domain->id; | ||
1157 | amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); | ||
1158 | amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); | ||
1159 | |||
1160 | amd_iommu_pd_table[devid] = domain; | ||
1161 | } | ||
1162 | |||
1163 | /* | ||
1164 | * If a device is not yet associated with a domain, this function does | ||
1165 | * assigns it visible for the hardware | ||
1166 | */ | ||
1167 | static void __attach_device(struct amd_iommu *iommu, | ||
1168 | struct protection_domain *domain, | ||
1169 | u16 devid) | ||
1170 | { | ||
1171 | /* lock domain */ | ||
1172 | spin_lock(&domain->lock); | ||
1173 | |||
1174 | /* update DTE entry */ | ||
1175 | set_dte_entry(devid, domain); | ||
1176 | |||
1177 | domain->dev_cnt += 1; | ||
1178 | |||
1179 | /* ready */ | ||
1180 | spin_unlock(&domain->lock); | ||
1181 | } | ||
1182 | |||
1066 | /* | 1183 | /* |
1067 | * If a device is not yet associated with a domain, this function does | 1184 | * If a device is not yet associated with a domain, this function does |
1068 | * assigns it visible for the hardware | 1185 | * assigns it visible for the hardware |
@@ -1072,27 +1189,16 @@ static void attach_device(struct amd_iommu *iommu, | |||
1072 | u16 devid) | 1189 | u16 devid) |
1073 | { | 1190 | { |
1074 | unsigned long flags; | 1191 | unsigned long flags; |
1075 | u64 pte_root = virt_to_phys(domain->pt_root); | ||
1076 | |||
1077 | domain->dev_cnt += 1; | ||
1078 | |||
1079 | pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) | ||
1080 | << DEV_ENTRY_MODE_SHIFT; | ||
1081 | pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; | ||
1082 | 1192 | ||
1083 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | 1193 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); |
1084 | amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); | 1194 | __attach_device(iommu, domain, devid); |
1085 | amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); | ||
1086 | amd_iommu_dev_table[devid].data[2] = domain->id; | ||
1087 | |||
1088 | amd_iommu_pd_table[devid] = domain; | ||
1089 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 1195 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
1090 | 1196 | ||
1091 | /* | 1197 | /* |
1092 | * We might boot into a crash-kernel here. The crashed kernel | 1198 | * We might boot into a crash-kernel here. The crashed kernel |
1093 | * left the caches in the IOMMU dirty. So we have to flush | 1199 | * left the caches in the IOMMU dirty. So we have to flush |
1094 | * here to evict all dirty stuff. | 1200 | * here to evict all dirty stuff. |
1095 | */ | 1201 | */ |
1096 | iommu_queue_inv_dev_entry(iommu, devid); | 1202 | iommu_queue_inv_dev_entry(iommu, devid); |
1097 | iommu_flush_tlb_pde(iommu, domain->id); | 1203 | iommu_flush_tlb_pde(iommu, domain->id); |
1098 | } | 1204 | } |
@@ -1119,6 +1225,15 @@ static void __detach_device(struct protection_domain *domain, u16 devid) | |||
1119 | 1225 | ||
1120 | /* ready */ | 1226 | /* ready */ |
1121 | spin_unlock(&domain->lock); | 1227 | spin_unlock(&domain->lock); |
1228 | |||
1229 | /* | ||
1230 | * If we run in passthrough mode the device must be assigned to the | ||
1231 | * passthrough domain if it is detached from any other domain | ||
1232 | */ | ||
1233 | if (iommu_pass_through) { | ||
1234 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; | ||
1235 | __attach_device(iommu, pt_domain, devid); | ||
1236 | } | ||
1122 | } | 1237 | } |
1123 | 1238 | ||
1124 | /* | 1239 | /* |
@@ -1164,6 +1279,8 @@ static int device_change_notifier(struct notifier_block *nb, | |||
1164 | case BUS_NOTIFY_UNBOUND_DRIVER: | 1279 | case BUS_NOTIFY_UNBOUND_DRIVER: |
1165 | if (!domain) | 1280 | if (!domain) |
1166 | goto out; | 1281 | goto out; |
1282 | if (iommu_pass_through) | ||
1283 | break; | ||
1167 | detach_device(domain, devid); | 1284 | detach_device(domain, devid); |
1168 | break; | 1285 | break; |
1169 | case BUS_NOTIFY_ADD_DEVICE: | 1286 | case BUS_NOTIFY_ADD_DEVICE: |
@@ -1292,39 +1409,91 @@ static int get_device_resources(struct device *dev, | |||
1292 | return 1; | 1409 | return 1; |
1293 | } | 1410 | } |
1294 | 1411 | ||
1412 | static void update_device_table(struct protection_domain *domain) | ||
1413 | { | ||
1414 | unsigned long flags; | ||
1415 | int i; | ||
1416 | |||
1417 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { | ||
1418 | if (amd_iommu_pd_table[i] != domain) | ||
1419 | continue; | ||
1420 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | ||
1421 | set_dte_entry(i, domain); | ||
1422 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | ||
1423 | } | ||
1424 | } | ||
1425 | |||
1426 | static void update_domain(struct protection_domain *domain) | ||
1427 | { | ||
1428 | if (!domain->updated) | ||
1429 | return; | ||
1430 | |||
1431 | update_device_table(domain); | ||
1432 | flush_devices_by_domain(domain); | ||
1433 | iommu_flush_domain(domain->id); | ||
1434 | |||
1435 | domain->updated = false; | ||
1436 | } | ||
1437 | |||
1295 | /* | 1438 | /* |
1296 | * If the pte_page is not yet allocated this function is called | 1439 | * This function is used to add another level to an IO page table. Adding |
1440 | * another level increases the size of the address space by 9 bits to a size up | ||
1441 | * to 64 bits. | ||
1297 | */ | 1442 | */ |
1298 | static u64* alloc_pte(struct protection_domain *dom, | 1443 | static bool increase_address_space(struct protection_domain *domain, |
1299 | unsigned long address, u64 **pte_page, gfp_t gfp) | 1444 | gfp_t gfp) |
1445 | { | ||
1446 | u64 *pte; | ||
1447 | |||
1448 | if (domain->mode == PAGE_MODE_6_LEVEL) | ||
1449 | /* address space already 64 bit large */ | ||
1450 | return false; | ||
1451 | |||
1452 | pte = (void *)get_zeroed_page(gfp); | ||
1453 | if (!pte) | ||
1454 | return false; | ||
1455 | |||
1456 | *pte = PM_LEVEL_PDE(domain->mode, | ||
1457 | virt_to_phys(domain->pt_root)); | ||
1458 | domain->pt_root = pte; | ||
1459 | domain->mode += 1; | ||
1460 | domain->updated = true; | ||
1461 | |||
1462 | return true; | ||
1463 | } | ||
1464 | |||
1465 | static u64 *alloc_pte(struct protection_domain *domain, | ||
1466 | unsigned long address, | ||
1467 | int end_lvl, | ||
1468 | u64 **pte_page, | ||
1469 | gfp_t gfp) | ||
1300 | { | 1470 | { |
1301 | u64 *pte, *page; | 1471 | u64 *pte, *page; |
1472 | int level; | ||
1302 | 1473 | ||
1303 | pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(address)]; | 1474 | while (address > PM_LEVEL_SIZE(domain->mode)) |
1475 | increase_address_space(domain, gfp); | ||
1304 | 1476 | ||
1305 | if (!IOMMU_PTE_PRESENT(*pte)) { | 1477 | level = domain->mode - 1; |
1306 | page = (u64 *)get_zeroed_page(gfp); | 1478 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; |
1307 | if (!page) | ||
1308 | return NULL; | ||
1309 | *pte = IOMMU_L2_PDE(virt_to_phys(page)); | ||
1310 | } | ||
1311 | 1479 | ||
1312 | pte = IOMMU_PTE_PAGE(*pte); | 1480 | while (level > end_lvl) { |
1313 | pte = &pte[IOMMU_PTE_L1_INDEX(address)]; | 1481 | if (!IOMMU_PTE_PRESENT(*pte)) { |
1482 | page = (u64 *)get_zeroed_page(gfp); | ||
1483 | if (!page) | ||
1484 | return NULL; | ||
1485 | *pte = PM_LEVEL_PDE(level, virt_to_phys(page)); | ||
1486 | } | ||
1314 | 1487 | ||
1315 | if (!IOMMU_PTE_PRESENT(*pte)) { | 1488 | level -= 1; |
1316 | page = (u64 *)get_zeroed_page(gfp); | ||
1317 | if (!page) | ||
1318 | return NULL; | ||
1319 | *pte = IOMMU_L1_PDE(virt_to_phys(page)); | ||
1320 | } | ||
1321 | 1489 | ||
1322 | pte = IOMMU_PTE_PAGE(*pte); | 1490 | pte = IOMMU_PTE_PAGE(*pte); |
1323 | 1491 | ||
1324 | if (pte_page) | 1492 | if (pte_page && level == end_lvl) |
1325 | *pte_page = pte; | 1493 | *pte_page = pte; |
1326 | 1494 | ||
1327 | pte = &pte[IOMMU_PTE_L0_INDEX(address)]; | 1495 | pte = &pte[PM_LEVEL_INDEX(level, address)]; |
1496 | } | ||
1328 | 1497 | ||
1329 | return pte; | 1498 | return pte; |
1330 | } | 1499 | } |
@@ -1344,10 +1513,13 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom, | |||
1344 | 1513 | ||
1345 | pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; | 1514 | pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; |
1346 | if (!pte) { | 1515 | if (!pte) { |
1347 | pte = alloc_pte(&dom->domain, address, &pte_page, GFP_ATOMIC); | 1516 | pte = alloc_pte(&dom->domain, address, PM_MAP_4k, &pte_page, |
1517 | GFP_ATOMIC); | ||
1348 | aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; | 1518 | aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; |
1349 | } else | 1519 | } else |
1350 | pte += IOMMU_PTE_L0_INDEX(address); | 1520 | pte += PM_LEVEL_INDEX(0, address); |
1521 | |||
1522 | update_domain(&dom->domain); | ||
1351 | 1523 | ||
1352 | return pte; | 1524 | return pte; |
1353 | } | 1525 | } |
@@ -1409,7 +1581,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu, | |||
1409 | if (!pte) | 1581 | if (!pte) |
1410 | return; | 1582 | return; |
1411 | 1583 | ||
1412 | pte += IOMMU_PTE_L0_INDEX(address); | 1584 | pte += PM_LEVEL_INDEX(0, address); |
1413 | 1585 | ||
1414 | WARN_ON(!*pte); | 1586 | WARN_ON(!*pte); |
1415 | 1587 | ||
@@ -1988,19 +2160,47 @@ static void cleanup_domain(struct protection_domain *domain) | |||
1988 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 2160 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
1989 | } | 2161 | } |
1990 | 2162 | ||
1991 | static int amd_iommu_domain_init(struct iommu_domain *dom) | 2163 | static void protection_domain_free(struct protection_domain *domain) |
2164 | { | ||
2165 | if (!domain) | ||
2166 | return; | ||
2167 | |||
2168 | if (domain->id) | ||
2169 | domain_id_free(domain->id); | ||
2170 | |||
2171 | kfree(domain); | ||
2172 | } | ||
2173 | |||
2174 | static struct protection_domain *protection_domain_alloc(void) | ||
1992 | { | 2175 | { |
1993 | struct protection_domain *domain; | 2176 | struct protection_domain *domain; |
1994 | 2177 | ||
1995 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); | 2178 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); |
1996 | if (!domain) | 2179 | if (!domain) |
1997 | return -ENOMEM; | 2180 | return NULL; |
1998 | 2181 | ||
1999 | spin_lock_init(&domain->lock); | 2182 | spin_lock_init(&domain->lock); |
2000 | domain->mode = PAGE_MODE_3_LEVEL; | ||
2001 | domain->id = domain_id_alloc(); | 2183 | domain->id = domain_id_alloc(); |
2002 | if (!domain->id) | 2184 | if (!domain->id) |
2185 | goto out_err; | ||
2186 | |||
2187 | return domain; | ||
2188 | |||
2189 | out_err: | ||
2190 | kfree(domain); | ||
2191 | |||
2192 | return NULL; | ||
2193 | } | ||
2194 | |||
2195 | static int amd_iommu_domain_init(struct iommu_domain *dom) | ||
2196 | { | ||
2197 | struct protection_domain *domain; | ||
2198 | |||
2199 | domain = protection_domain_alloc(); | ||
2200 | if (!domain) | ||
2003 | goto out_free; | 2201 | goto out_free; |
2202 | |||
2203 | domain->mode = PAGE_MODE_3_LEVEL; | ||
2004 | domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); | 2204 | domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); |
2005 | if (!domain->pt_root) | 2205 | if (!domain->pt_root) |
2006 | goto out_free; | 2206 | goto out_free; |
@@ -2010,7 +2210,7 @@ static int amd_iommu_domain_init(struct iommu_domain *dom) | |||
2010 | return 0; | 2210 | return 0; |
2011 | 2211 | ||
2012 | out_free: | 2212 | out_free: |
2013 | kfree(domain); | 2213 | protection_domain_free(domain); |
2014 | 2214 | ||
2015 | return -ENOMEM; | 2215 | return -ENOMEM; |
2016 | } | 2216 | } |
@@ -2115,7 +2315,7 @@ static int amd_iommu_map_range(struct iommu_domain *dom, | |||
2115 | paddr &= PAGE_MASK; | 2315 | paddr &= PAGE_MASK; |
2116 | 2316 | ||
2117 | for (i = 0; i < npages; ++i) { | 2317 | for (i = 0; i < npages; ++i) { |
2118 | ret = iommu_map_page(domain, iova, paddr, prot); | 2318 | ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); |
2119 | if (ret) | 2319 | if (ret) |
2120 | return ret; | 2320 | return ret; |
2121 | 2321 | ||
@@ -2136,7 +2336,7 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom, | |||
2136 | iova &= PAGE_MASK; | 2336 | iova &= PAGE_MASK; |
2137 | 2337 | ||
2138 | for (i = 0; i < npages; ++i) { | 2338 | for (i = 0; i < npages; ++i) { |
2139 | iommu_unmap_page(domain, iova); | 2339 | iommu_unmap_page(domain, iova, PM_MAP_4k); |
2140 | iova += PAGE_SIZE; | 2340 | iova += PAGE_SIZE; |
2141 | } | 2341 | } |
2142 | 2342 | ||
@@ -2151,21 +2351,9 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, | |||
2151 | phys_addr_t paddr; | 2351 | phys_addr_t paddr; |
2152 | u64 *pte; | 2352 | u64 *pte; |
2153 | 2353 | ||
2154 | pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(iova)]; | 2354 | pte = fetch_pte(domain, iova, PM_MAP_4k); |
2155 | |||
2156 | if (!IOMMU_PTE_PRESENT(*pte)) | ||
2157 | return 0; | ||
2158 | |||
2159 | pte = IOMMU_PTE_PAGE(*pte); | ||
2160 | pte = &pte[IOMMU_PTE_L1_INDEX(iova)]; | ||
2161 | |||
2162 | if (!IOMMU_PTE_PRESENT(*pte)) | ||
2163 | return 0; | ||
2164 | |||
2165 | pte = IOMMU_PTE_PAGE(*pte); | ||
2166 | pte = &pte[IOMMU_PTE_L0_INDEX(iova)]; | ||
2167 | 2355 | ||
2168 | if (!IOMMU_PTE_PRESENT(*pte)) | 2356 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
2169 | return 0; | 2357 | return 0; |
2170 | 2358 | ||
2171 | paddr = *pte & IOMMU_PAGE_MASK; | 2359 | paddr = *pte & IOMMU_PAGE_MASK; |
@@ -2191,3 +2379,46 @@ static struct iommu_ops amd_iommu_ops = { | |||
2191 | .domain_has_cap = amd_iommu_domain_has_cap, | 2379 | .domain_has_cap = amd_iommu_domain_has_cap, |
2192 | }; | 2380 | }; |
2193 | 2381 | ||
2382 | /***************************************************************************** | ||
2383 | * | ||
2384 | * The next functions do a basic initialization of IOMMU for pass through | ||
2385 | * mode | ||
2386 | * | ||
2387 | * In passthrough mode the IOMMU is initialized and enabled but not used for | ||
2388 | * DMA-API translation. | ||
2389 | * | ||
2390 | *****************************************************************************/ | ||
2391 | |||
2392 | int __init amd_iommu_init_passthrough(void) | ||
2393 | { | ||
2394 | struct pci_dev *dev = NULL; | ||
2395 | u16 devid, devid2; | ||
2396 | |||
2397 | /* allocate passthroug domain */ | ||
2398 | pt_domain = protection_domain_alloc(); | ||
2399 | if (!pt_domain) | ||
2400 | return -ENOMEM; | ||
2401 | |||
2402 | pt_domain->mode |= PAGE_MODE_NONE; | ||
2403 | |||
2404 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
2405 | struct amd_iommu *iommu; | ||
2406 | |||
2407 | devid = calc_devid(dev->bus->number, dev->devfn); | ||
2408 | if (devid > amd_iommu_last_bdf) | ||
2409 | continue; | ||
2410 | |||
2411 | devid2 = amd_iommu_alias_table[devid]; | ||
2412 | |||
2413 | iommu = amd_iommu_rlookup_table[devid2]; | ||
2414 | if (!iommu) | ||
2415 | continue; | ||
2416 | |||
2417 | __attach_device(iommu, pt_domain, devid); | ||
2418 | __attach_device(iommu, pt_domain, devid2); | ||
2419 | } | ||
2420 | |||
2421 | pr_info("AMD-Vi: Initialized for Passthrough Mode\n"); | ||
2422 | |||
2423 | return 0; | ||
2424 | } | ||
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index c1b17e97252e..b4b61d462dcc 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -252,7 +252,7 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit) | |||
252 | /* Function to enable the hardware */ | 252 | /* Function to enable the hardware */ |
253 | static void iommu_enable(struct amd_iommu *iommu) | 253 | static void iommu_enable(struct amd_iommu *iommu) |
254 | { | 254 | { |
255 | printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at %s cap 0x%hx\n", | 255 | printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n", |
256 | dev_name(&iommu->dev->dev), iommu->cap_ptr); | 256 | dev_name(&iommu->dev->dev), iommu->cap_ptr); |
257 | 257 | ||
258 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); | 258 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); |
@@ -435,6 +435,20 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) | |||
435 | } | 435 | } |
436 | 436 | ||
437 | /* | 437 | /* |
438 | * This function resets the command buffer if the IOMMU stopped fetching | ||
439 | * commands from it. | ||
440 | */ | ||
441 | void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) | ||
442 | { | ||
443 | iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); | ||
444 | |||
445 | writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); | ||
446 | writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | ||
447 | |||
448 | iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); | ||
449 | } | ||
450 | |||
451 | /* | ||
438 | * This function writes the command buffer address to the hardware and | 452 | * This function writes the command buffer address to the hardware and |
439 | * enables it. | 453 | * enables it. |
440 | */ | 454 | */ |
@@ -450,11 +464,7 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu) | |||
450 | memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, | 464 | memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, |
451 | &entry, sizeof(entry)); | 465 | &entry, sizeof(entry)); |
452 | 466 | ||
453 | /* set head and tail to zero manually */ | 467 | amd_iommu_reset_cmd_buffer(iommu); |
454 | writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); | ||
455 | writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | ||
456 | |||
457 | iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); | ||
458 | } | 468 | } |
459 | 469 | ||
460 | static void __init free_command_buffer(struct amd_iommu *iommu) | 470 | static void __init free_command_buffer(struct amd_iommu *iommu) |
@@ -858,7 +868,7 @@ static int __init init_iommu_all(struct acpi_table_header *table) | |||
858 | switch (*p) { | 868 | switch (*p) { |
859 | case ACPI_IVHD_TYPE: | 869 | case ACPI_IVHD_TYPE: |
860 | 870 | ||
861 | DUMP_printk("IOMMU: device: %02x:%02x.%01x cap: %04x " | 871 | DUMP_printk("device: %02x:%02x.%01x cap: %04x " |
862 | "seg: %d flags: %01x info %04x\n", | 872 | "seg: %d flags: %01x info %04x\n", |
863 | PCI_BUS(h->devid), PCI_SLOT(h->devid), | 873 | PCI_BUS(h->devid), PCI_SLOT(h->devid), |
864 | PCI_FUNC(h->devid), h->cap_ptr, | 874 | PCI_FUNC(h->devid), h->cap_ptr, |
@@ -902,7 +912,7 @@ static int __init iommu_setup_msi(struct amd_iommu *iommu) | |||
902 | 912 | ||
903 | r = request_irq(iommu->dev->irq, amd_iommu_int_handler, | 913 | r = request_irq(iommu->dev->irq, amd_iommu_int_handler, |
904 | IRQF_SAMPLE_RANDOM, | 914 | IRQF_SAMPLE_RANDOM, |
905 | "AMD IOMMU", | 915 | "AMD-Vi", |
906 | NULL); | 916 | NULL); |
907 | 917 | ||
908 | if (r) { | 918 | if (r) { |
@@ -1150,7 +1160,7 @@ int __init amd_iommu_init(void) | |||
1150 | 1160 | ||
1151 | 1161 | ||
1152 | if (no_iommu) { | 1162 | if (no_iommu) { |
1153 | printk(KERN_INFO "AMD IOMMU disabled by kernel command line\n"); | 1163 | printk(KERN_INFO "AMD-Vi disabled by kernel command line\n"); |
1154 | return 0; | 1164 | return 0; |
1155 | } | 1165 | } |
1156 | 1166 | ||
@@ -1242,22 +1252,28 @@ int __init amd_iommu_init(void) | |||
1242 | if (ret) | 1252 | if (ret) |
1243 | goto free; | 1253 | goto free; |
1244 | 1254 | ||
1245 | ret = amd_iommu_init_dma_ops(); | 1255 | if (iommu_pass_through) |
1256 | ret = amd_iommu_init_passthrough(); | ||
1257 | else | ||
1258 | ret = amd_iommu_init_dma_ops(); | ||
1246 | if (ret) | 1259 | if (ret) |
1247 | goto free; | 1260 | goto free; |
1248 | 1261 | ||
1249 | enable_iommus(); | 1262 | enable_iommus(); |
1250 | 1263 | ||
1251 | printk(KERN_INFO "AMD IOMMU: device isolation "); | 1264 | if (iommu_pass_through) |
1265 | goto out; | ||
1266 | |||
1267 | printk(KERN_INFO "AMD-Vi: device isolation "); | ||
1252 | if (amd_iommu_isolate) | 1268 | if (amd_iommu_isolate) |
1253 | printk("enabled\n"); | 1269 | printk("enabled\n"); |
1254 | else | 1270 | else |
1255 | printk("disabled\n"); | 1271 | printk("disabled\n"); |
1256 | 1272 | ||
1257 | if (amd_iommu_unmap_flush) | 1273 | if (amd_iommu_unmap_flush) |
1258 | printk(KERN_INFO "AMD IOMMU: IO/TLB flush on unmap enabled\n"); | 1274 | printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); |
1259 | else | 1275 | else |
1260 | printk(KERN_INFO "AMD IOMMU: Lazy IO/TLB flushing enabled\n"); | 1276 | printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); |
1261 | 1277 | ||
1262 | out: | 1278 | out: |
1263 | return ret; | 1279 | return ret; |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 676debfc1702..128111d8ffe0 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
21 | #include <linux/ioport.h> | 21 | #include <linux/ioport.h> |
22 | #include <linux/suspend.h> | 22 | #include <linux/suspend.h> |
23 | #include <linux/kmemleak.h> | ||
23 | #include <asm/e820.h> | 24 | #include <asm/e820.h> |
24 | #include <asm/io.h> | 25 | #include <asm/io.h> |
25 | #include <asm/iommu.h> | 26 | #include <asm/iommu.h> |
@@ -94,6 +95,11 @@ static u32 __init allocate_aperture(void) | |||
94 | * code for safe | 95 | * code for safe |
95 | */ | 96 | */ |
96 | p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20); | 97 | p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20); |
98 | /* | ||
99 | * Kmemleak should not scan this block as it may not be mapped via the | ||
100 | * kernel direct mapping. | ||
101 | */ | ||
102 | kmemleak_ignore(p); | ||
97 | if (!p || __pa(p)+aper_size > 0xffffffff) { | 103 | if (!p || __pa(p)+aper_size > 0xffffffff) { |
98 | printk(KERN_ERR | 104 | printk(KERN_ERR |
99 | "Cannot allocate aperture memory hole (%p,%uK)\n", | 105 | "Cannot allocate aperture memory hole (%p,%uK)\n", |
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index b3025b43b63a..db7220220d09 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
@@ -39,7 +39,7 @@ | |||
39 | int unknown_nmi_panic; | 39 | int unknown_nmi_panic; |
40 | int nmi_watchdog_enabled; | 40 | int nmi_watchdog_enabled; |
41 | 41 | ||
42 | static cpumask_var_t backtrace_mask; | 42 | static cpumask_t backtrace_mask __read_mostly; |
43 | 43 | ||
44 | /* nmi_active: | 44 | /* nmi_active: |
45 | * >0: the lapic NMI watchdog is active, but can be disabled | 45 | * >0: the lapic NMI watchdog is active, but can be disabled |
@@ -138,7 +138,6 @@ int __init check_nmi_watchdog(void) | |||
138 | if (!prev_nmi_count) | 138 | if (!prev_nmi_count) |
139 | goto error; | 139 | goto error; |
140 | 140 | ||
141 | alloc_cpumask_var(&backtrace_mask, GFP_KERNEL|__GFP_ZERO); | ||
142 | printk(KERN_INFO "Testing NMI watchdog ... "); | 141 | printk(KERN_INFO "Testing NMI watchdog ... "); |
143 | 142 | ||
144 | #ifdef CONFIG_SMP | 143 | #ifdef CONFIG_SMP |
@@ -415,14 +414,17 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
415 | } | 414 | } |
416 | 415 | ||
417 | /* We can be called before check_nmi_watchdog, hence NULL check. */ | 416 | /* We can be called before check_nmi_watchdog, hence NULL check. */ |
418 | if (backtrace_mask != NULL && cpumask_test_cpu(cpu, backtrace_mask)) { | 417 | if (cpumask_test_cpu(cpu, &backtrace_mask)) { |
419 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ | 418 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ |
420 | 419 | ||
421 | spin_lock(&lock); | 420 | spin_lock(&lock); |
422 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); | 421 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); |
422 | show_regs(regs); | ||
423 | dump_stack(); | 423 | dump_stack(); |
424 | spin_unlock(&lock); | 424 | spin_unlock(&lock); |
425 | cpumask_clear_cpu(cpu, backtrace_mask); | 425 | cpumask_clear_cpu(cpu, &backtrace_mask); |
426 | |||
427 | rc = 1; | ||
426 | } | 428 | } |
427 | 429 | ||
428 | /* Could check oops_in_progress here too, but it's safer not to */ | 430 | /* Could check oops_in_progress here too, but it's safer not to */ |
@@ -552,14 +554,18 @@ int do_nmi_callback(struct pt_regs *regs, int cpu) | |||
552 | return 0; | 554 | return 0; |
553 | } | 555 | } |
554 | 556 | ||
555 | void __trigger_all_cpu_backtrace(void) | 557 | void arch_trigger_all_cpu_backtrace(void) |
556 | { | 558 | { |
557 | int i; | 559 | int i; |
558 | 560 | ||
559 | cpumask_copy(backtrace_mask, cpu_online_mask); | 561 | cpumask_copy(&backtrace_mask, cpu_online_mask); |
562 | |||
563 | printk(KERN_INFO "sending NMI to all CPUs:\n"); | ||
564 | apic->send_IPI_all(NMI_VECTOR); | ||
565 | |||
560 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ | 566 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ |
561 | for (i = 0; i < 10 * 1000; i++) { | 567 | for (i = 0; i < 10 * 1000; i++) { |
562 | if (cpumask_empty(backtrace_mask)) | 568 | if (cpumask_empty(&backtrace_mask)) |
563 | break; | 569 | break; |
564 | mdelay(1); | 570 | mdelay(1); |
565 | } | 571 | } |
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 898ecc47e129..4a6aeedcd965 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * This code generates raw asm output which is post-processed to extract | 3 | * This code generates raw asm output which is post-processed to extract |
4 | * and format the required data. | 4 | * and format the required data. |
5 | */ | 5 | */ |
6 | #define COMPILE_OFFSETS | ||
6 | 7 | ||
7 | #include <linux/crypto.h> | 8 | #include <linux/crypto.h> |
8 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 900332b800f8..f9cd0849bd42 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * Copyright (C) 2009 Jaswinder Singh Rajput | 6 | * Copyright (C) 2009 Jaswinder Singh Rajput |
7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | 7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter |
8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> | ||
9 | * | 10 | * |
10 | * For licencing details see kernel-base/COPYING | 11 | * For licencing details see kernel-base/COPYING |
11 | */ | 12 | */ |
@@ -20,6 +21,7 @@ | |||
20 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
21 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
22 | #include <linux/highmem.h> | 23 | #include <linux/highmem.h> |
24 | #include <linux/cpu.h> | ||
23 | 25 | ||
24 | #include <asm/apic.h> | 26 | #include <asm/apic.h> |
25 | #include <asm/stacktrace.h> | 27 | #include <asm/stacktrace.h> |
@@ -27,12 +29,52 @@ | |||
27 | 29 | ||
28 | static u64 perf_counter_mask __read_mostly; | 30 | static u64 perf_counter_mask __read_mostly; |
29 | 31 | ||
32 | /* The maximal number of PEBS counters: */ | ||
33 | #define MAX_PEBS_COUNTERS 4 | ||
34 | |||
35 | /* The size of a BTS record in bytes: */ | ||
36 | #define BTS_RECORD_SIZE 24 | ||
37 | |||
38 | /* The size of a per-cpu BTS buffer in bytes: */ | ||
39 | #define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 1024) | ||
40 | |||
41 | /* The BTS overflow threshold in bytes from the end of the buffer: */ | ||
42 | #define BTS_OVFL_TH (BTS_RECORD_SIZE * 64) | ||
43 | |||
44 | |||
45 | /* | ||
46 | * Bits in the debugctlmsr controlling branch tracing. | ||
47 | */ | ||
48 | #define X86_DEBUGCTL_TR (1 << 6) | ||
49 | #define X86_DEBUGCTL_BTS (1 << 7) | ||
50 | #define X86_DEBUGCTL_BTINT (1 << 8) | ||
51 | #define X86_DEBUGCTL_BTS_OFF_OS (1 << 9) | ||
52 | #define X86_DEBUGCTL_BTS_OFF_USR (1 << 10) | ||
53 | |||
54 | /* | ||
55 | * A debug store configuration. | ||
56 | * | ||
57 | * We only support architectures that use 64bit fields. | ||
58 | */ | ||
59 | struct debug_store { | ||
60 | u64 bts_buffer_base; | ||
61 | u64 bts_index; | ||
62 | u64 bts_absolute_maximum; | ||
63 | u64 bts_interrupt_threshold; | ||
64 | u64 pebs_buffer_base; | ||
65 | u64 pebs_index; | ||
66 | u64 pebs_absolute_maximum; | ||
67 | u64 pebs_interrupt_threshold; | ||
68 | u64 pebs_counter_reset[MAX_PEBS_COUNTERS]; | ||
69 | }; | ||
70 | |||
30 | struct cpu_hw_counters { | 71 | struct cpu_hw_counters { |
31 | struct perf_counter *counters[X86_PMC_IDX_MAX]; | 72 | struct perf_counter *counters[X86_PMC_IDX_MAX]; |
32 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 73 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
33 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 74 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
34 | unsigned long interrupts; | 75 | unsigned long interrupts; |
35 | int enabled; | 76 | int enabled; |
77 | struct debug_store *ds; | ||
36 | }; | 78 | }; |
37 | 79 | ||
38 | /* | 80 | /* |
@@ -58,6 +100,8 @@ struct x86_pmu { | |||
58 | int apic; | 100 | int apic; |
59 | u64 max_period; | 101 | u64 max_period; |
60 | u64 intel_ctrl; | 102 | u64 intel_ctrl; |
103 | void (*enable_bts)(u64 config); | ||
104 | void (*disable_bts)(void); | ||
61 | }; | 105 | }; |
62 | 106 | ||
63 | static struct x86_pmu x86_pmu __read_mostly; | 107 | static struct x86_pmu x86_pmu __read_mostly; |
@@ -577,6 +621,9 @@ x86_perf_counter_update(struct perf_counter *counter, | |||
577 | u64 prev_raw_count, new_raw_count; | 621 | u64 prev_raw_count, new_raw_count; |
578 | s64 delta; | 622 | s64 delta; |
579 | 623 | ||
624 | if (idx == X86_PMC_IDX_FIXED_BTS) | ||
625 | return 0; | ||
626 | |||
580 | /* | 627 | /* |
581 | * Careful: an NMI might modify the previous counter value. | 628 | * Careful: an NMI might modify the previous counter value. |
582 | * | 629 | * |
@@ -666,10 +713,110 @@ static void release_pmc_hardware(void) | |||
666 | #endif | 713 | #endif |
667 | } | 714 | } |
668 | 715 | ||
716 | static inline bool bts_available(void) | ||
717 | { | ||
718 | return x86_pmu.enable_bts != NULL; | ||
719 | } | ||
720 | |||
721 | static inline void init_debug_store_on_cpu(int cpu) | ||
722 | { | ||
723 | struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; | ||
724 | |||
725 | if (!ds) | ||
726 | return; | ||
727 | |||
728 | wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, | ||
729 | (u32)((u64)(unsigned long)ds), | ||
730 | (u32)((u64)(unsigned long)ds >> 32)); | ||
731 | } | ||
732 | |||
733 | static inline void fini_debug_store_on_cpu(int cpu) | ||
734 | { | ||
735 | if (!per_cpu(cpu_hw_counters, cpu).ds) | ||
736 | return; | ||
737 | |||
738 | wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); | ||
739 | } | ||
740 | |||
741 | static void release_bts_hardware(void) | ||
742 | { | ||
743 | int cpu; | ||
744 | |||
745 | if (!bts_available()) | ||
746 | return; | ||
747 | |||
748 | get_online_cpus(); | ||
749 | |||
750 | for_each_online_cpu(cpu) | ||
751 | fini_debug_store_on_cpu(cpu); | ||
752 | |||
753 | for_each_possible_cpu(cpu) { | ||
754 | struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; | ||
755 | |||
756 | if (!ds) | ||
757 | continue; | ||
758 | |||
759 | per_cpu(cpu_hw_counters, cpu).ds = NULL; | ||
760 | |||
761 | kfree((void *)(unsigned long)ds->bts_buffer_base); | ||
762 | kfree(ds); | ||
763 | } | ||
764 | |||
765 | put_online_cpus(); | ||
766 | } | ||
767 | |||
768 | static int reserve_bts_hardware(void) | ||
769 | { | ||
770 | int cpu, err = 0; | ||
771 | |||
772 | if (!bts_available()) | ||
773 | return 0; | ||
774 | |||
775 | get_online_cpus(); | ||
776 | |||
777 | for_each_possible_cpu(cpu) { | ||
778 | struct debug_store *ds; | ||
779 | void *buffer; | ||
780 | |||
781 | err = -ENOMEM; | ||
782 | buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL); | ||
783 | if (unlikely(!buffer)) | ||
784 | break; | ||
785 | |||
786 | ds = kzalloc(sizeof(*ds), GFP_KERNEL); | ||
787 | if (unlikely(!ds)) { | ||
788 | kfree(buffer); | ||
789 | break; | ||
790 | } | ||
791 | |||
792 | ds->bts_buffer_base = (u64)(unsigned long)buffer; | ||
793 | ds->bts_index = ds->bts_buffer_base; | ||
794 | ds->bts_absolute_maximum = | ||
795 | ds->bts_buffer_base + BTS_BUFFER_SIZE; | ||
796 | ds->bts_interrupt_threshold = | ||
797 | ds->bts_absolute_maximum - BTS_OVFL_TH; | ||
798 | |||
799 | per_cpu(cpu_hw_counters, cpu).ds = ds; | ||
800 | err = 0; | ||
801 | } | ||
802 | |||
803 | if (err) | ||
804 | release_bts_hardware(); | ||
805 | else { | ||
806 | for_each_online_cpu(cpu) | ||
807 | init_debug_store_on_cpu(cpu); | ||
808 | } | ||
809 | |||
810 | put_online_cpus(); | ||
811 | |||
812 | return err; | ||
813 | } | ||
814 | |||
669 | static void hw_perf_counter_destroy(struct perf_counter *counter) | 815 | static void hw_perf_counter_destroy(struct perf_counter *counter) |
670 | { | 816 | { |
671 | if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { | 817 | if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { |
672 | release_pmc_hardware(); | 818 | release_pmc_hardware(); |
819 | release_bts_hardware(); | ||
673 | mutex_unlock(&pmc_reserve_mutex); | 820 | mutex_unlock(&pmc_reserve_mutex); |
674 | } | 821 | } |
675 | } | 822 | } |
@@ -712,6 +859,42 @@ set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr) | |||
712 | return 0; | 859 | return 0; |
713 | } | 860 | } |
714 | 861 | ||
862 | static void intel_pmu_enable_bts(u64 config) | ||
863 | { | ||
864 | unsigned long debugctlmsr; | ||
865 | |||
866 | debugctlmsr = get_debugctlmsr(); | ||
867 | |||
868 | debugctlmsr |= X86_DEBUGCTL_TR; | ||
869 | debugctlmsr |= X86_DEBUGCTL_BTS; | ||
870 | debugctlmsr |= X86_DEBUGCTL_BTINT; | ||
871 | |||
872 | if (!(config & ARCH_PERFMON_EVENTSEL_OS)) | ||
873 | debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS; | ||
874 | |||
875 | if (!(config & ARCH_PERFMON_EVENTSEL_USR)) | ||
876 | debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR; | ||
877 | |||
878 | update_debugctlmsr(debugctlmsr); | ||
879 | } | ||
880 | |||
881 | static void intel_pmu_disable_bts(void) | ||
882 | { | ||
883 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
884 | unsigned long debugctlmsr; | ||
885 | |||
886 | if (!cpuc->ds) | ||
887 | return; | ||
888 | |||
889 | debugctlmsr = get_debugctlmsr(); | ||
890 | |||
891 | debugctlmsr &= | ||
892 | ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT | | ||
893 | X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR); | ||
894 | |||
895 | update_debugctlmsr(debugctlmsr); | ||
896 | } | ||
897 | |||
715 | /* | 898 | /* |
716 | * Setup the hardware configuration for a given attr_type | 899 | * Setup the hardware configuration for a given attr_type |
717 | */ | 900 | */ |
@@ -728,9 +911,13 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
728 | err = 0; | 911 | err = 0; |
729 | if (!atomic_inc_not_zero(&active_counters)) { | 912 | if (!atomic_inc_not_zero(&active_counters)) { |
730 | mutex_lock(&pmc_reserve_mutex); | 913 | mutex_lock(&pmc_reserve_mutex); |
731 | if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware()) | 914 | if (atomic_read(&active_counters) == 0) { |
732 | err = -EBUSY; | 915 | if (!reserve_pmc_hardware()) |
733 | else | 916 | err = -EBUSY; |
917 | else | ||
918 | err = reserve_bts_hardware(); | ||
919 | } | ||
920 | if (!err) | ||
734 | atomic_inc(&active_counters); | 921 | atomic_inc(&active_counters); |
735 | mutex_unlock(&pmc_reserve_mutex); | 922 | mutex_unlock(&pmc_reserve_mutex); |
736 | } | 923 | } |
@@ -793,6 +980,20 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
793 | if (config == -1LL) | 980 | if (config == -1LL) |
794 | return -EINVAL; | 981 | return -EINVAL; |
795 | 982 | ||
983 | /* | ||
984 | * Branch tracing: | ||
985 | */ | ||
986 | if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && | ||
987 | (hwc->sample_period == 1)) { | ||
988 | /* BTS is not supported by this architecture. */ | ||
989 | if (!bts_available()) | ||
990 | return -EOPNOTSUPP; | ||
991 | |||
992 | /* BTS is currently only allowed for user-mode. */ | ||
993 | if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) | ||
994 | return -EOPNOTSUPP; | ||
995 | } | ||
996 | |||
796 | hwc->config |= config; | 997 | hwc->config |= config; |
797 | 998 | ||
798 | return 0; | 999 | return 0; |
@@ -817,7 +1018,18 @@ static void p6_pmu_disable_all(void) | |||
817 | 1018 | ||
818 | static void intel_pmu_disable_all(void) | 1019 | static void intel_pmu_disable_all(void) |
819 | { | 1020 | { |
1021 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
1022 | |||
1023 | if (!cpuc->enabled) | ||
1024 | return; | ||
1025 | |||
1026 | cpuc->enabled = 0; | ||
1027 | barrier(); | ||
1028 | |||
820 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | 1029 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); |
1030 | |||
1031 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) | ||
1032 | intel_pmu_disable_bts(); | ||
821 | } | 1033 | } |
822 | 1034 | ||
823 | static void amd_pmu_disable_all(void) | 1035 | static void amd_pmu_disable_all(void) |
@@ -875,7 +1087,25 @@ static void p6_pmu_enable_all(void) | |||
875 | 1087 | ||
876 | static void intel_pmu_enable_all(void) | 1088 | static void intel_pmu_enable_all(void) |
877 | { | 1089 | { |
1090 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
1091 | |||
1092 | if (cpuc->enabled) | ||
1093 | return; | ||
1094 | |||
1095 | cpuc->enabled = 1; | ||
1096 | barrier(); | ||
1097 | |||
878 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); | 1098 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); |
1099 | |||
1100 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { | ||
1101 | struct perf_counter *counter = | ||
1102 | cpuc->counters[X86_PMC_IDX_FIXED_BTS]; | ||
1103 | |||
1104 | if (WARN_ON_ONCE(!counter)) | ||
1105 | return; | ||
1106 | |||
1107 | intel_pmu_enable_bts(counter->hw.config); | ||
1108 | } | ||
879 | } | 1109 | } |
880 | 1110 | ||
881 | static void amd_pmu_enable_all(void) | 1111 | static void amd_pmu_enable_all(void) |
@@ -962,6 +1192,11 @@ p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | |||
962 | static inline void | 1192 | static inline void |
963 | intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | 1193 | intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) |
964 | { | 1194 | { |
1195 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | ||
1196 | intel_pmu_disable_bts(); | ||
1197 | return; | ||
1198 | } | ||
1199 | |||
965 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 1200 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
966 | intel_pmu_disable_fixed(hwc, idx); | 1201 | intel_pmu_disable_fixed(hwc, idx); |
967 | return; | 1202 | return; |
@@ -990,6 +1225,9 @@ x86_perf_counter_set_period(struct perf_counter *counter, | |||
990 | s64 period = hwc->sample_period; | 1225 | s64 period = hwc->sample_period; |
991 | int err, ret = 0; | 1226 | int err, ret = 0; |
992 | 1227 | ||
1228 | if (idx == X86_PMC_IDX_FIXED_BTS) | ||
1229 | return 0; | ||
1230 | |||
993 | /* | 1231 | /* |
994 | * If we are way outside a reasoable range then just skip forward: | 1232 | * If we are way outside a reasoable range then just skip forward: |
995 | */ | 1233 | */ |
@@ -1072,6 +1310,14 @@ static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | |||
1072 | 1310 | ||
1073 | static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | 1311 | static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) |
1074 | { | 1312 | { |
1313 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | ||
1314 | if (!__get_cpu_var(cpu_hw_counters).enabled) | ||
1315 | return; | ||
1316 | |||
1317 | intel_pmu_enable_bts(hwc->config); | ||
1318 | return; | ||
1319 | } | ||
1320 | |||
1075 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 1321 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
1076 | intel_pmu_enable_fixed(hwc, idx); | 1322 | intel_pmu_enable_fixed(hwc, idx); |
1077 | return; | 1323 | return; |
@@ -1093,11 +1339,16 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) | |||
1093 | { | 1339 | { |
1094 | unsigned int event; | 1340 | unsigned int event; |
1095 | 1341 | ||
1342 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; | ||
1343 | |||
1344 | if (unlikely((event == | ||
1345 | x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && | ||
1346 | (hwc->sample_period == 1))) | ||
1347 | return X86_PMC_IDX_FIXED_BTS; | ||
1348 | |||
1096 | if (!x86_pmu.num_counters_fixed) | 1349 | if (!x86_pmu.num_counters_fixed) |
1097 | return -1; | 1350 | return -1; |
1098 | 1351 | ||
1099 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; | ||
1100 | |||
1101 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) | 1352 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) |
1102 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; | 1353 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; |
1103 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) | 1354 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) |
@@ -1118,7 +1369,15 @@ static int x86_pmu_enable(struct perf_counter *counter) | |||
1118 | int idx; | 1369 | int idx; |
1119 | 1370 | ||
1120 | idx = fixed_mode_idx(counter, hwc); | 1371 | idx = fixed_mode_idx(counter, hwc); |
1121 | if (idx >= 0) { | 1372 | if (idx == X86_PMC_IDX_FIXED_BTS) { |
1373 | /* BTS is already occupied. */ | ||
1374 | if (test_and_set_bit(idx, cpuc->used_mask)) | ||
1375 | return -EAGAIN; | ||
1376 | |||
1377 | hwc->config_base = 0; | ||
1378 | hwc->counter_base = 0; | ||
1379 | hwc->idx = idx; | ||
1380 | } else if (idx >= 0) { | ||
1122 | /* | 1381 | /* |
1123 | * Try to get the fixed counter, if that is already taken | 1382 | * Try to get the fixed counter, if that is already taken |
1124 | * then try to get a generic counter: | 1383 | * then try to get a generic counter: |
@@ -1229,6 +1488,44 @@ void perf_counter_print_debug(void) | |||
1229 | local_irq_restore(flags); | 1488 | local_irq_restore(flags); |
1230 | } | 1489 | } |
1231 | 1490 | ||
1491 | static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc, | ||
1492 | struct perf_sample_data *data) | ||
1493 | { | ||
1494 | struct debug_store *ds = cpuc->ds; | ||
1495 | struct bts_record { | ||
1496 | u64 from; | ||
1497 | u64 to; | ||
1498 | u64 flags; | ||
1499 | }; | ||
1500 | struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS]; | ||
1501 | unsigned long orig_ip = data->regs->ip; | ||
1502 | struct bts_record *at, *top; | ||
1503 | |||
1504 | if (!counter) | ||
1505 | return; | ||
1506 | |||
1507 | if (!ds) | ||
1508 | return; | ||
1509 | |||
1510 | at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; | ||
1511 | top = (struct bts_record *)(unsigned long)ds->bts_index; | ||
1512 | |||
1513 | ds->bts_index = ds->bts_buffer_base; | ||
1514 | |||
1515 | for (; at < top; at++) { | ||
1516 | data->regs->ip = at->from; | ||
1517 | data->addr = at->to; | ||
1518 | |||
1519 | perf_counter_output(counter, 1, data); | ||
1520 | } | ||
1521 | |||
1522 | data->regs->ip = orig_ip; | ||
1523 | data->addr = 0; | ||
1524 | |||
1525 | /* There's new data available. */ | ||
1526 | counter->pending_kill = POLL_IN; | ||
1527 | } | ||
1528 | |||
1232 | static void x86_pmu_disable(struct perf_counter *counter) | 1529 | static void x86_pmu_disable(struct perf_counter *counter) |
1233 | { | 1530 | { |
1234 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 1531 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
@@ -1253,6 +1550,15 @@ static void x86_pmu_disable(struct perf_counter *counter) | |||
1253 | * that we are disabling: | 1550 | * that we are disabling: |
1254 | */ | 1551 | */ |
1255 | x86_perf_counter_update(counter, hwc, idx); | 1552 | x86_perf_counter_update(counter, hwc, idx); |
1553 | |||
1554 | /* Drain the remaining BTS records. */ | ||
1555 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | ||
1556 | struct perf_sample_data data; | ||
1557 | struct pt_regs regs; | ||
1558 | |||
1559 | data.regs = ®s; | ||
1560 | intel_pmu_drain_bts_buffer(cpuc, &data); | ||
1561 | } | ||
1256 | cpuc->counters[idx] = NULL; | 1562 | cpuc->counters[idx] = NULL; |
1257 | clear_bit(idx, cpuc->used_mask); | 1563 | clear_bit(idx, cpuc->used_mask); |
1258 | 1564 | ||
@@ -1280,6 +1586,7 @@ static int intel_pmu_save_and_restart(struct perf_counter *counter) | |||
1280 | 1586 | ||
1281 | static void intel_pmu_reset(void) | 1587 | static void intel_pmu_reset(void) |
1282 | { | 1588 | { |
1589 | struct debug_store *ds = __get_cpu_var(cpu_hw_counters).ds; | ||
1283 | unsigned long flags; | 1590 | unsigned long flags; |
1284 | int idx; | 1591 | int idx; |
1285 | 1592 | ||
@@ -1297,6 +1604,8 @@ static void intel_pmu_reset(void) | |||
1297 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { | 1604 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { |
1298 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); | 1605 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); |
1299 | } | 1606 | } |
1607 | if (ds) | ||
1608 | ds->bts_index = ds->bts_buffer_base; | ||
1300 | 1609 | ||
1301 | local_irq_restore(flags); | 1610 | local_irq_restore(flags); |
1302 | } | 1611 | } |
@@ -1362,6 +1671,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
1362 | cpuc = &__get_cpu_var(cpu_hw_counters); | 1671 | cpuc = &__get_cpu_var(cpu_hw_counters); |
1363 | 1672 | ||
1364 | perf_disable(); | 1673 | perf_disable(); |
1674 | intel_pmu_drain_bts_buffer(cpuc, &data); | ||
1365 | status = intel_pmu_get_status(); | 1675 | status = intel_pmu_get_status(); |
1366 | if (!status) { | 1676 | if (!status) { |
1367 | perf_enable(); | 1677 | perf_enable(); |
@@ -1571,6 +1881,8 @@ static struct x86_pmu intel_pmu = { | |||
1571 | * the generic counter period: | 1881 | * the generic counter period: |
1572 | */ | 1882 | */ |
1573 | .max_period = (1ULL << 31) - 1, | 1883 | .max_period = (1ULL << 31) - 1, |
1884 | .enable_bts = intel_pmu_enable_bts, | ||
1885 | .disable_bts = intel_pmu_disable_bts, | ||
1574 | }; | 1886 | }; |
1575 | 1887 | ||
1576 | static struct x86_pmu amd_pmu = { | 1888 | static struct x86_pmu amd_pmu = { |
@@ -1962,3 +2274,8 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
1962 | 2274 | ||
1963 | return entry; | 2275 | return entry; |
1964 | } | 2276 | } |
2277 | |||
2278 | void hw_perf_counter_setup_online(int cpu) | ||
2279 | { | ||
2280 | init_debug_store_on_cpu(cpu); | ||
2281 | } | ||
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index d94e1ea3b9fe..9dbb527e1652 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -417,10 +417,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
417 | unsigned long return_hooker = (unsigned long) | 417 | unsigned long return_hooker = (unsigned long) |
418 | &return_to_handler; | 418 | &return_to_handler; |
419 | 419 | ||
420 | /* Nmi's are currently unsupported */ | ||
421 | if (unlikely(in_nmi())) | ||
422 | return; | ||
423 | |||
424 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 420 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
425 | return; | 421 | return; |
426 | 422 | ||
@@ -498,37 +494,56 @@ static struct syscall_metadata *find_syscall_meta(unsigned long *syscall) | |||
498 | 494 | ||
499 | struct syscall_metadata *syscall_nr_to_meta(int nr) | 495 | struct syscall_metadata *syscall_nr_to_meta(int nr) |
500 | { | 496 | { |
501 | if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0) | 497 | if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) |
502 | return NULL; | 498 | return NULL; |
503 | 499 | ||
504 | return syscalls_metadata[nr]; | 500 | return syscalls_metadata[nr]; |
505 | } | 501 | } |
506 | 502 | ||
507 | void arch_init_ftrace_syscalls(void) | 503 | int syscall_name_to_nr(char *name) |
504 | { | ||
505 | int i; | ||
506 | |||
507 | if (!syscalls_metadata) | ||
508 | return -1; | ||
509 | |||
510 | for (i = 0; i < NR_syscalls; i++) { | ||
511 | if (syscalls_metadata[i]) { | ||
512 | if (!strcmp(syscalls_metadata[i]->name, name)) | ||
513 | return i; | ||
514 | } | ||
515 | } | ||
516 | return -1; | ||
517 | } | ||
518 | |||
519 | void set_syscall_enter_id(int num, int id) | ||
520 | { | ||
521 | syscalls_metadata[num]->enter_id = id; | ||
522 | } | ||
523 | |||
524 | void set_syscall_exit_id(int num, int id) | ||
525 | { | ||
526 | syscalls_metadata[num]->exit_id = id; | ||
527 | } | ||
528 | |||
529 | static int __init arch_init_ftrace_syscalls(void) | ||
508 | { | 530 | { |
509 | int i; | 531 | int i; |
510 | struct syscall_metadata *meta; | 532 | struct syscall_metadata *meta; |
511 | unsigned long **psys_syscall_table = &sys_call_table; | 533 | unsigned long **psys_syscall_table = &sys_call_table; |
512 | static atomic_t refs; | ||
513 | |||
514 | if (atomic_inc_return(&refs) != 1) | ||
515 | goto end; | ||
516 | 534 | ||
517 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * | 535 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * |
518 | FTRACE_SYSCALL_MAX, GFP_KERNEL); | 536 | NR_syscalls, GFP_KERNEL); |
519 | if (!syscalls_metadata) { | 537 | if (!syscalls_metadata) { |
520 | WARN_ON(1); | 538 | WARN_ON(1); |
521 | return; | 539 | return -ENOMEM; |
522 | } | 540 | } |
523 | 541 | ||
524 | for (i = 0; i < FTRACE_SYSCALL_MAX; i++) { | 542 | for (i = 0; i < NR_syscalls; i++) { |
525 | meta = find_syscall_meta(psys_syscall_table[i]); | 543 | meta = find_syscall_meta(psys_syscall_table[i]); |
526 | syscalls_metadata[i] = meta; | 544 | syscalls_metadata[i] = meta; |
527 | } | 545 | } |
528 | return; | 546 | return 0; |
529 | |||
530 | /* Paranoid: avoid overflow */ | ||
531 | end: | ||
532 | atomic_dec(&refs); | ||
533 | } | 547 | } |
548 | arch_initcall(arch_init_ftrace_syscalls); | ||
534 | #endif | 549 | #endif |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 1a041bcf506b..d71c8655905b 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/dmar.h> | 3 | #include <linux/dmar.h> |
4 | #include <linux/bootmem.h> | 4 | #include <linux/bootmem.h> |
5 | #include <linux/pci.h> | 5 | #include <linux/pci.h> |
6 | #include <linux/kmemleak.h> | ||
6 | 7 | ||
7 | #include <asm/proto.h> | 8 | #include <asm/proto.h> |
8 | #include <asm/dma.h> | 9 | #include <asm/dma.h> |
@@ -32,7 +33,14 @@ int no_iommu __read_mostly; | |||
32 | /* Set this to 1 if there is a HW IOMMU in the system */ | 33 | /* Set this to 1 if there is a HW IOMMU in the system */ |
33 | int iommu_detected __read_mostly = 0; | 34 | int iommu_detected __read_mostly = 0; |
34 | 35 | ||
35 | int iommu_pass_through; | 36 | /* |
37 | * This variable becomes 1 if iommu=pt is passed on the kernel command line. | ||
38 | * If this variable is 1, IOMMU implementations do no DMA ranslation for | ||
39 | * devices and allow every device to access to whole physical memory. This is | ||
40 | * useful if a user want to use an IOMMU only for KVM device assignment to | ||
41 | * guests and not for driver dma translation. | ||
42 | */ | ||
43 | int iommu_pass_through __read_mostly; | ||
36 | 44 | ||
37 | dma_addr_t bad_dma_address __read_mostly = 0; | 45 | dma_addr_t bad_dma_address __read_mostly = 0; |
38 | EXPORT_SYMBOL(bad_dma_address); | 46 | EXPORT_SYMBOL(bad_dma_address); |
@@ -88,6 +96,11 @@ void __init dma32_reserve_bootmem(void) | |||
88 | size = roundup(dma32_bootmem_size, align); | 96 | size = roundup(dma32_bootmem_size, align); |
89 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, | 97 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, |
90 | 512ULL<<20); | 98 | 512ULL<<20); |
99 | /* | ||
100 | * Kmemleak should not scan this block as it may not be mapped via the | ||
101 | * kernel direct mapping. | ||
102 | */ | ||
103 | kmemleak_ignore(dma32_bootmem_ptr); | ||
91 | if (dma32_bootmem_ptr) | 104 | if (dma32_bootmem_ptr) |
92 | dma32_bootmem_size = size; | 105 | dma32_bootmem_size = size; |
93 | else | 106 | else |
@@ -147,7 +160,7 @@ again: | |||
147 | return NULL; | 160 | return NULL; |
148 | 161 | ||
149 | addr = page_to_phys(page); | 162 | addr = page_to_phys(page); |
150 | if (!is_buffer_dma_capable(dma_mask, addr, size)) { | 163 | if (addr + size > dma_mask) { |
151 | __free_pages(page, get_order(size)); | 164 | __free_pages(page, get_order(size)); |
152 | 165 | ||
153 | if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) { | 166 | if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) { |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index d2e56b8f48e7..98a827ee9ed7 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -190,14 +190,13 @@ static void iommu_full(struct device *dev, size_t size, int dir) | |||
190 | static inline int | 190 | static inline int |
191 | need_iommu(struct device *dev, unsigned long addr, size_t size) | 191 | need_iommu(struct device *dev, unsigned long addr, size_t size) |
192 | { | 192 | { |
193 | return force_iommu || | 193 | return force_iommu || !dma_capable(dev, addr, size); |
194 | !is_buffer_dma_capable(*dev->dma_mask, addr, size); | ||
195 | } | 194 | } |
196 | 195 | ||
197 | static inline int | 196 | static inline int |
198 | nonforced_iommu(struct device *dev, unsigned long addr, size_t size) | 197 | nonforced_iommu(struct device *dev, unsigned long addr, size_t size) |
199 | { | 198 | { |
200 | return !is_buffer_dma_capable(*dev->dma_mask, addr, size); | 199 | return !dma_capable(dev, addr, size); |
201 | } | 200 | } |
202 | 201 | ||
203 | /* Map a single continuous physical area into the IOMMU. | 202 | /* Map a single continuous physical area into the IOMMU. |
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 71d412a09f30..a3933d4330cd 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c | |||
@@ -14,7 +14,7 @@ | |||
14 | static int | 14 | static int |
15 | check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) | 15 | check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) |
16 | { | 16 | { |
17 | if (hwdev && !is_buffer_dma_capable(*hwdev->dma_mask, bus, size)) { | 17 | if (hwdev && !dma_capable(hwdev, bus, size)) { |
18 | if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) | 18 | if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) |
19 | printk(KERN_ERR | 19 | printk(KERN_ERR |
20 | "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", | 20 | "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", |
@@ -79,12 +79,29 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
79 | free_pages((unsigned long)vaddr, get_order(size)); | 79 | free_pages((unsigned long)vaddr, get_order(size)); |
80 | } | 80 | } |
81 | 81 | ||
82 | static void nommu_sync_single_for_device(struct device *dev, | ||
83 | dma_addr_t addr, size_t size, | ||
84 | enum dma_data_direction dir) | ||
85 | { | ||
86 | flush_write_buffers(); | ||
87 | } | ||
88 | |||
89 | |||
90 | static void nommu_sync_sg_for_device(struct device *dev, | ||
91 | struct scatterlist *sg, int nelems, | ||
92 | enum dma_data_direction dir) | ||
93 | { | ||
94 | flush_write_buffers(); | ||
95 | } | ||
96 | |||
82 | struct dma_map_ops nommu_dma_ops = { | 97 | struct dma_map_ops nommu_dma_ops = { |
83 | .alloc_coherent = dma_generic_alloc_coherent, | 98 | .alloc_coherent = dma_generic_alloc_coherent, |
84 | .free_coherent = nommu_free_coherent, | 99 | .free_coherent = nommu_free_coherent, |
85 | .map_sg = nommu_map_sg, | 100 | .map_sg = nommu_map_sg, |
86 | .map_page = nommu_map_page, | 101 | .map_page = nommu_map_page, |
87 | .is_phys = 1, | 102 | .sync_single_for_device = nommu_sync_single_for_device, |
103 | .sync_sg_for_device = nommu_sync_sg_for_device, | ||
104 | .is_phys = 1, | ||
88 | }; | 105 | }; |
89 | 106 | ||
90 | void __init no_iommu_init(void) | 107 | void __init no_iommu_init(void) |
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 6af96ee44200..e8a35016115f 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c | |||
@@ -13,31 +13,6 @@ | |||
13 | 13 | ||
14 | int swiotlb __read_mostly; | 14 | int swiotlb __read_mostly; |
15 | 15 | ||
16 | void * __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) | ||
17 | { | ||
18 | return alloc_bootmem_low_pages(size); | ||
19 | } | ||
20 | |||
21 | void *swiotlb_alloc(unsigned order, unsigned long nslabs) | ||
22 | { | ||
23 | return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); | ||
24 | } | ||
25 | |||
26 | dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) | ||
27 | { | ||
28 | return paddr; | ||
29 | } | ||
30 | |||
31 | phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr) | ||
32 | { | ||
33 | return baddr; | ||
34 | } | ||
35 | |||
36 | int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) | ||
37 | { | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 16 | static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
42 | dma_addr_t *dma_handle, gfp_t flags) | 17 | dma_addr_t *dma_handle, gfp_t flags) |
43 | { | 18 | { |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 09ecbde91c13..8d7d5c9c1be3 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -35,10 +35,11 @@ | |||
35 | #include <asm/proto.h> | 35 | #include <asm/proto.h> |
36 | #include <asm/ds.h> | 36 | #include <asm/ds.h> |
37 | 37 | ||
38 | #include <trace/syscall.h> | ||
39 | |||
40 | #include "tls.h" | 38 | #include "tls.h" |
41 | 39 | ||
40 | #define CREATE_TRACE_POINTS | ||
41 | #include <trace/events/syscalls.h> | ||
42 | |||
42 | enum x86_regset { | 43 | enum x86_regset { |
43 | REGSET_GENERAL, | 44 | REGSET_GENERAL, |
44 | REGSET_FP, | 45 | REGSET_FP, |
@@ -1497,8 +1498,8 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs) | |||
1497 | tracehook_report_syscall_entry(regs)) | 1498 | tracehook_report_syscall_entry(regs)) |
1498 | ret = -1L; | 1499 | ret = -1L; |
1499 | 1500 | ||
1500 | if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) | 1501 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
1501 | ftrace_syscall_enter(regs); | 1502 | trace_sys_enter(regs, regs->orig_ax); |
1502 | 1503 | ||
1503 | if (unlikely(current->audit_context)) { | 1504 | if (unlikely(current->audit_context)) { |
1504 | if (IS_IA32) | 1505 | if (IS_IA32) |
@@ -1523,8 +1524,8 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs) | |||
1523 | if (unlikely(current->audit_context)) | 1524 | if (unlikely(current->audit_context)) |
1524 | audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); | 1525 | audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); |
1525 | 1526 | ||
1526 | if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) | 1527 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
1527 | ftrace_syscall_exit(regs); | 1528 | trace_sys_exit(regs, regs->ax); |
1528 | 1529 | ||
1529 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | 1530 | if (test_thread_flag(TIF_SYSCALL_TRACE)) |
1530 | tracehook_report_syscall_exit(regs, 0); | 1531 | tracehook_report_syscall_exit(regs, 0); |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 4c578751e94e..81e58238c4ce 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -869,6 +869,8 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) | |||
869 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 869 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
870 | clear_thread_flag(TIF_NOTIFY_RESUME); | 870 | clear_thread_flag(TIF_NOTIFY_RESUME); |
871 | tracehook_notify_resume(regs); | 871 | tracehook_notify_resume(regs); |
872 | if (current->replacement_session_keyring) | ||
873 | key_replace_session_keyring(); | ||
872 | } | 874 | } |
873 | 875 | ||
874 | #ifdef CONFIG_X86_32 | 876 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 6bc211accf08..45e00eb09c3a 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -18,9 +18,9 @@ | |||
18 | #include <asm/ia32.h> | 18 | #include <asm/ia32.h> |
19 | #include <asm/syscalls.h> | 19 | #include <asm/syscalls.h> |
20 | 20 | ||
21 | asmlinkage long sys_mmap(unsigned long addr, unsigned long len, | 21 | SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, |
22 | unsigned long prot, unsigned long flags, | 22 | unsigned long, prot, unsigned long, flags, |
23 | unsigned long fd, unsigned long off) | 23 | unsigned long, fd, unsigned long, off) |
24 | { | 24 | { |
25 | long error; | 25 | long error; |
26 | struct file *file; | 26 | struct file *file; |
@@ -226,7 +226,7 @@ bottomup: | |||
226 | } | 226 | } |
227 | 227 | ||
228 | 228 | ||
229 | asmlinkage long sys_uname(struct new_utsname __user *name) | 229 | SYSCALL_DEFINE1(uname, struct new_utsname __user *, name) |
230 | { | 230 | { |
231 | int err; | 231 | int err; |
232 | down_read(&uts_sem); | 232 | down_read(&uts_sem); |
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index 2c55ed098654..528bf954eb74 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c | |||
@@ -331,6 +331,20 @@ static void kmemcheck_read_strict(struct pt_regs *regs, | |||
331 | kmemcheck_shadow_set(shadow, size); | 331 | kmemcheck_shadow_set(shadow, size); |
332 | } | 332 | } |
333 | 333 | ||
334 | bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) | ||
335 | { | ||
336 | enum kmemcheck_shadow status; | ||
337 | void *shadow; | ||
338 | |||
339 | shadow = kmemcheck_shadow_lookup(addr); | ||
340 | if (!shadow) | ||
341 | return true; | ||
342 | |||
343 | status = kmemcheck_shadow_test(shadow, size); | ||
344 | |||
345 | return status == KMEMCHECK_SHADOW_INITIALIZED; | ||
346 | } | ||
347 | |||
334 | /* Access may cross page boundary */ | 348 | /* Access may cross page boundary */ |
335 | static void kmemcheck_read(struct pt_regs *regs, | 349 | static void kmemcheck_read(struct pt_regs *regs, |
336 | unsigned long addr, unsigned int size) | 350 | unsigned long addr, unsigned int size) |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 89b9a5cd63da..cb88b1a0bd5f 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -1,11 +1,14 @@ | |||
1 | /** | 1 | /** |
2 | * @file nmi_int.c | 2 | * @file nmi_int.c |
3 | * | 3 | * |
4 | * @remark Copyright 2002-2008 OProfile authors | 4 | * @remark Copyright 2002-2009 OProfile authors |
5 | * @remark Read the file COPYING | 5 | * @remark Read the file COPYING |
6 | * | 6 | * |
7 | * @author John Levon <levon@movementarian.org> | 7 | * @author John Levon <levon@movementarian.org> |
8 | * @author Robert Richter <robert.richter@amd.com> | 8 | * @author Robert Richter <robert.richter@amd.com> |
9 | * @author Barry Kasindorf <barry.kasindorf@amd.com> | ||
10 | * @author Jason Yeh <jason.yeh@amd.com> | ||
11 | * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> | ||
9 | */ | 12 | */ |
10 | 13 | ||
11 | #include <linux/init.h> | 14 | #include <linux/init.h> |
@@ -24,13 +27,35 @@ | |||
24 | #include "op_counter.h" | 27 | #include "op_counter.h" |
25 | #include "op_x86_model.h" | 28 | #include "op_x86_model.h" |
26 | 29 | ||
27 | static struct op_x86_model_spec const *model; | 30 | static struct op_x86_model_spec *model; |
28 | static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); | 31 | static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); |
29 | static DEFINE_PER_CPU(unsigned long, saved_lvtpc); | 32 | static DEFINE_PER_CPU(unsigned long, saved_lvtpc); |
30 | 33 | ||
31 | /* 0 == registered but off, 1 == registered and on */ | 34 | /* 0 == registered but off, 1 == registered and on */ |
32 | static int nmi_enabled = 0; | 35 | static int nmi_enabled = 0; |
33 | 36 | ||
37 | struct op_counter_config counter_config[OP_MAX_COUNTER]; | ||
38 | |||
39 | /* common functions */ | ||
40 | |||
41 | u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, | ||
42 | struct op_counter_config *counter_config) | ||
43 | { | ||
44 | u64 val = 0; | ||
45 | u16 event = (u16)counter_config->event; | ||
46 | |||
47 | val |= ARCH_PERFMON_EVENTSEL_INT; | ||
48 | val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0; | ||
49 | val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0; | ||
50 | val |= (counter_config->unit_mask & 0xFF) << 8; | ||
51 | event &= model->event_mask ? model->event_mask : 0xFF; | ||
52 | val |= event & 0xFF; | ||
53 | val |= (event & 0x0F00) << 24; | ||
54 | |||
55 | return val; | ||
56 | } | ||
57 | |||
58 | |||
34 | static int profile_exceptions_notify(struct notifier_block *self, | 59 | static int profile_exceptions_notify(struct notifier_block *self, |
35 | unsigned long val, void *data) | 60 | unsigned long val, void *data) |
36 | { | 61 | { |
@@ -52,36 +77,214 @@ static int profile_exceptions_notify(struct notifier_block *self, | |||
52 | 77 | ||
53 | static void nmi_cpu_save_registers(struct op_msrs *msrs) | 78 | static void nmi_cpu_save_registers(struct op_msrs *msrs) |
54 | { | 79 | { |
55 | unsigned int const nr_ctrs = model->num_counters; | ||
56 | unsigned int const nr_ctrls = model->num_controls; | ||
57 | struct op_msr *counters = msrs->counters; | 80 | struct op_msr *counters = msrs->counters; |
58 | struct op_msr *controls = msrs->controls; | 81 | struct op_msr *controls = msrs->controls; |
59 | unsigned int i; | 82 | unsigned int i; |
60 | 83 | ||
61 | for (i = 0; i < nr_ctrs; ++i) { | 84 | for (i = 0; i < model->num_counters; ++i) { |
62 | if (counters[i].addr) { | 85 | if (counters[i].addr) |
63 | rdmsr(counters[i].addr, | 86 | rdmsrl(counters[i].addr, counters[i].saved); |
64 | counters[i].saved.low, | 87 | } |
65 | counters[i].saved.high); | 88 | |
66 | } | 89 | for (i = 0; i < model->num_controls; ++i) { |
90 | if (controls[i].addr) | ||
91 | rdmsrl(controls[i].addr, controls[i].saved); | ||
92 | } | ||
93 | } | ||
94 | |||
95 | static void nmi_cpu_start(void *dummy) | ||
96 | { | ||
97 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | ||
98 | model->start(msrs); | ||
99 | } | ||
100 | |||
101 | static int nmi_start(void) | ||
102 | { | ||
103 | on_each_cpu(nmi_cpu_start, NULL, 1); | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static void nmi_cpu_stop(void *dummy) | ||
108 | { | ||
109 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | ||
110 | model->stop(msrs); | ||
111 | } | ||
112 | |||
113 | static void nmi_stop(void) | ||
114 | { | ||
115 | on_each_cpu(nmi_cpu_stop, NULL, 1); | ||
116 | } | ||
117 | |||
118 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
119 | |||
120 | static DEFINE_PER_CPU(int, switch_index); | ||
121 | |||
122 | static inline int has_mux(void) | ||
123 | { | ||
124 | return !!model->switch_ctrl; | ||
125 | } | ||
126 | |||
127 | inline int op_x86_phys_to_virt(int phys) | ||
128 | { | ||
129 | return __get_cpu_var(switch_index) + phys; | ||
130 | } | ||
131 | |||
132 | inline int op_x86_virt_to_phys(int virt) | ||
133 | { | ||
134 | return virt % model->num_counters; | ||
135 | } | ||
136 | |||
137 | static void nmi_shutdown_mux(void) | ||
138 | { | ||
139 | int i; | ||
140 | |||
141 | if (!has_mux()) | ||
142 | return; | ||
143 | |||
144 | for_each_possible_cpu(i) { | ||
145 | kfree(per_cpu(cpu_msrs, i).multiplex); | ||
146 | per_cpu(cpu_msrs, i).multiplex = NULL; | ||
147 | per_cpu(switch_index, i) = 0; | ||
67 | } | 148 | } |
149 | } | ||
150 | |||
151 | static int nmi_setup_mux(void) | ||
152 | { | ||
153 | size_t multiplex_size = | ||
154 | sizeof(struct op_msr) * model->num_virt_counters; | ||
155 | int i; | ||
156 | |||
157 | if (!has_mux()) | ||
158 | return 1; | ||
159 | |||
160 | for_each_possible_cpu(i) { | ||
161 | per_cpu(cpu_msrs, i).multiplex = | ||
162 | kmalloc(multiplex_size, GFP_KERNEL); | ||
163 | if (!per_cpu(cpu_msrs, i).multiplex) | ||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | return 1; | ||
168 | } | ||
169 | |||
170 | static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) | ||
171 | { | ||
172 | int i; | ||
173 | struct op_msr *multiplex = msrs->multiplex; | ||
174 | |||
175 | if (!has_mux()) | ||
176 | return; | ||
68 | 177 | ||
69 | for (i = 0; i < nr_ctrls; ++i) { | 178 | for (i = 0; i < model->num_virt_counters; ++i) { |
70 | if (controls[i].addr) { | 179 | if (counter_config[i].enabled) { |
71 | rdmsr(controls[i].addr, | 180 | multiplex[i].saved = -(u64)counter_config[i].count; |
72 | controls[i].saved.low, | 181 | } else { |
73 | controls[i].saved.high); | 182 | multiplex[i].addr = 0; |
183 | multiplex[i].saved = 0; | ||
74 | } | 184 | } |
75 | } | 185 | } |
186 | |||
187 | per_cpu(switch_index, cpu) = 0; | ||
188 | } | ||
189 | |||
190 | static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs) | ||
191 | { | ||
192 | struct op_msr *multiplex = msrs->multiplex; | ||
193 | int i; | ||
194 | |||
195 | for (i = 0; i < model->num_counters; ++i) { | ||
196 | int virt = op_x86_phys_to_virt(i); | ||
197 | if (multiplex[virt].addr) | ||
198 | rdmsrl(multiplex[virt].addr, multiplex[virt].saved); | ||
199 | } | ||
200 | } | ||
201 | |||
202 | static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs) | ||
203 | { | ||
204 | struct op_msr *multiplex = msrs->multiplex; | ||
205 | int i; | ||
206 | |||
207 | for (i = 0; i < model->num_counters; ++i) { | ||
208 | int virt = op_x86_phys_to_virt(i); | ||
209 | if (multiplex[virt].addr) | ||
210 | wrmsrl(multiplex[virt].addr, multiplex[virt].saved); | ||
211 | } | ||
76 | } | 212 | } |
77 | 213 | ||
78 | static void nmi_save_registers(void *dummy) | 214 | static void nmi_cpu_switch(void *dummy) |
79 | { | 215 | { |
80 | int cpu = smp_processor_id(); | 216 | int cpu = smp_processor_id(); |
217 | int si = per_cpu(switch_index, cpu); | ||
81 | struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); | 218 | struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); |
82 | nmi_cpu_save_registers(msrs); | 219 | |
220 | nmi_cpu_stop(NULL); | ||
221 | nmi_cpu_save_mpx_registers(msrs); | ||
222 | |||
223 | /* move to next set */ | ||
224 | si += model->num_counters; | ||
225 | if ((si > model->num_virt_counters) || (counter_config[si].count == 0)) | ||
226 | per_cpu(switch_index, cpu) = 0; | ||
227 | else | ||
228 | per_cpu(switch_index, cpu) = si; | ||
229 | |||
230 | model->switch_ctrl(model, msrs); | ||
231 | nmi_cpu_restore_mpx_registers(msrs); | ||
232 | |||
233 | nmi_cpu_start(NULL); | ||
234 | } | ||
235 | |||
236 | |||
237 | /* | ||
238 | * Quick check to see if multiplexing is necessary. | ||
239 | * The check should be sufficient since counters are used | ||
240 | * in ordre. | ||
241 | */ | ||
242 | static int nmi_multiplex_on(void) | ||
243 | { | ||
244 | return counter_config[model->num_counters].count ? 0 : -EINVAL; | ||
245 | } | ||
246 | |||
247 | static int nmi_switch_event(void) | ||
248 | { | ||
249 | if (!has_mux()) | ||
250 | return -ENOSYS; /* not implemented */ | ||
251 | if (nmi_multiplex_on() < 0) | ||
252 | return -EINVAL; /* not necessary */ | ||
253 | |||
254 | on_each_cpu(nmi_cpu_switch, NULL, 1); | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static inline void mux_init(struct oprofile_operations *ops) | ||
260 | { | ||
261 | if (has_mux()) | ||
262 | ops->switch_events = nmi_switch_event; | ||
263 | } | ||
264 | |||
265 | static void mux_clone(int cpu) | ||
266 | { | ||
267 | if (!has_mux()) | ||
268 | return; | ||
269 | |||
270 | memcpy(per_cpu(cpu_msrs, cpu).multiplex, | ||
271 | per_cpu(cpu_msrs, 0).multiplex, | ||
272 | sizeof(struct op_msr) * model->num_virt_counters); | ||
83 | } | 273 | } |
84 | 274 | ||
275 | #else | ||
276 | |||
277 | inline int op_x86_phys_to_virt(int phys) { return phys; } | ||
278 | inline int op_x86_virt_to_phys(int virt) { return virt; } | ||
279 | static inline void nmi_shutdown_mux(void) { } | ||
280 | static inline int nmi_setup_mux(void) { return 1; } | ||
281 | static inline void | ||
282 | nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { } | ||
283 | static inline void mux_init(struct oprofile_operations *ops) { } | ||
284 | static void mux_clone(int cpu) { } | ||
285 | |||
286 | #endif | ||
287 | |||
85 | static void free_msrs(void) | 288 | static void free_msrs(void) |
86 | { | 289 | { |
87 | int i; | 290 | int i; |
@@ -95,38 +298,32 @@ static void free_msrs(void) | |||
95 | 298 | ||
96 | static int allocate_msrs(void) | 299 | static int allocate_msrs(void) |
97 | { | 300 | { |
98 | int success = 1; | ||
99 | size_t controls_size = sizeof(struct op_msr) * model->num_controls; | 301 | size_t controls_size = sizeof(struct op_msr) * model->num_controls; |
100 | size_t counters_size = sizeof(struct op_msr) * model->num_counters; | 302 | size_t counters_size = sizeof(struct op_msr) * model->num_counters; |
101 | 303 | ||
102 | int i; | 304 | int i; |
103 | for_each_possible_cpu(i) { | 305 | for_each_possible_cpu(i) { |
104 | per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, | 306 | per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, |
105 | GFP_KERNEL); | 307 | GFP_KERNEL); |
106 | if (!per_cpu(cpu_msrs, i).counters) { | 308 | if (!per_cpu(cpu_msrs, i).counters) |
107 | success = 0; | 309 | return 0; |
108 | break; | ||
109 | } | ||
110 | per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, | 310 | per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, |
111 | GFP_KERNEL); | 311 | GFP_KERNEL); |
112 | if (!per_cpu(cpu_msrs, i).controls) { | 312 | if (!per_cpu(cpu_msrs, i).controls) |
113 | success = 0; | 313 | return 0; |
114 | break; | ||
115 | } | ||
116 | } | 314 | } |
117 | 315 | ||
118 | if (!success) | 316 | return 1; |
119 | free_msrs(); | ||
120 | |||
121 | return success; | ||
122 | } | 317 | } |
123 | 318 | ||
124 | static void nmi_cpu_setup(void *dummy) | 319 | static void nmi_cpu_setup(void *dummy) |
125 | { | 320 | { |
126 | int cpu = smp_processor_id(); | 321 | int cpu = smp_processor_id(); |
127 | struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); | 322 | struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); |
323 | nmi_cpu_save_registers(msrs); | ||
128 | spin_lock(&oprofilefs_lock); | 324 | spin_lock(&oprofilefs_lock); |
129 | model->setup_ctrs(msrs); | 325 | model->setup_ctrs(model, msrs); |
326 | nmi_cpu_setup_mux(cpu, msrs); | ||
130 | spin_unlock(&oprofilefs_lock); | 327 | spin_unlock(&oprofilefs_lock); |
131 | per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC); | 328 | per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC); |
132 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 329 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
@@ -144,11 +341,15 @@ static int nmi_setup(void) | |||
144 | int cpu; | 341 | int cpu; |
145 | 342 | ||
146 | if (!allocate_msrs()) | 343 | if (!allocate_msrs()) |
147 | return -ENOMEM; | 344 | err = -ENOMEM; |
345 | else if (!nmi_setup_mux()) | ||
346 | err = -ENOMEM; | ||
347 | else | ||
348 | err = register_die_notifier(&profile_exceptions_nb); | ||
148 | 349 | ||
149 | err = register_die_notifier(&profile_exceptions_nb); | ||
150 | if (err) { | 350 | if (err) { |
151 | free_msrs(); | 351 | free_msrs(); |
352 | nmi_shutdown_mux(); | ||
152 | return err; | 353 | return err; |
153 | } | 354 | } |
154 | 355 | ||
@@ -159,45 +360,38 @@ static int nmi_setup(void) | |||
159 | /* Assume saved/restored counters are the same on all CPUs */ | 360 | /* Assume saved/restored counters are the same on all CPUs */ |
160 | model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); | 361 | model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); |
161 | for_each_possible_cpu(cpu) { | 362 | for_each_possible_cpu(cpu) { |
162 | if (cpu != 0) { | 363 | if (!cpu) |
163 | memcpy(per_cpu(cpu_msrs, cpu).counters, | 364 | continue; |
164 | per_cpu(cpu_msrs, 0).counters, | 365 | |
165 | sizeof(struct op_msr) * model->num_counters); | 366 | memcpy(per_cpu(cpu_msrs, cpu).counters, |
166 | 367 | per_cpu(cpu_msrs, 0).counters, | |
167 | memcpy(per_cpu(cpu_msrs, cpu).controls, | 368 | sizeof(struct op_msr) * model->num_counters); |
168 | per_cpu(cpu_msrs, 0).controls, | 369 | |
169 | sizeof(struct op_msr) * model->num_controls); | 370 | memcpy(per_cpu(cpu_msrs, cpu).controls, |
170 | } | 371 | per_cpu(cpu_msrs, 0).controls, |
372 | sizeof(struct op_msr) * model->num_controls); | ||
171 | 373 | ||
374 | mux_clone(cpu); | ||
172 | } | 375 | } |
173 | on_each_cpu(nmi_save_registers, NULL, 1); | ||
174 | on_each_cpu(nmi_cpu_setup, NULL, 1); | 376 | on_each_cpu(nmi_cpu_setup, NULL, 1); |
175 | nmi_enabled = 1; | 377 | nmi_enabled = 1; |
176 | return 0; | 378 | return 0; |
177 | } | 379 | } |
178 | 380 | ||
179 | static void nmi_restore_registers(struct op_msrs *msrs) | 381 | static void nmi_cpu_restore_registers(struct op_msrs *msrs) |
180 | { | 382 | { |
181 | unsigned int const nr_ctrs = model->num_counters; | ||
182 | unsigned int const nr_ctrls = model->num_controls; | ||
183 | struct op_msr *counters = msrs->counters; | 383 | struct op_msr *counters = msrs->counters; |
184 | struct op_msr *controls = msrs->controls; | 384 | struct op_msr *controls = msrs->controls; |
185 | unsigned int i; | 385 | unsigned int i; |
186 | 386 | ||
187 | for (i = 0; i < nr_ctrls; ++i) { | 387 | for (i = 0; i < model->num_controls; ++i) { |
188 | if (controls[i].addr) { | 388 | if (controls[i].addr) |
189 | wrmsr(controls[i].addr, | 389 | wrmsrl(controls[i].addr, controls[i].saved); |
190 | controls[i].saved.low, | ||
191 | controls[i].saved.high); | ||
192 | } | ||
193 | } | 390 | } |
194 | 391 | ||
195 | for (i = 0; i < nr_ctrs; ++i) { | 392 | for (i = 0; i < model->num_counters; ++i) { |
196 | if (counters[i].addr) { | 393 | if (counters[i].addr) |
197 | wrmsr(counters[i].addr, | 394 | wrmsrl(counters[i].addr, counters[i].saved); |
198 | counters[i].saved.low, | ||
199 | counters[i].saved.high); | ||
200 | } | ||
201 | } | 395 | } |
202 | } | 396 | } |
203 | 397 | ||
@@ -205,7 +399,7 @@ static void nmi_cpu_shutdown(void *dummy) | |||
205 | { | 399 | { |
206 | unsigned int v; | 400 | unsigned int v; |
207 | int cpu = smp_processor_id(); | 401 | int cpu = smp_processor_id(); |
208 | struct op_msrs *msrs = &__get_cpu_var(cpu_msrs); | 402 | struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); |
209 | 403 | ||
210 | /* restoring APIC_LVTPC can trigger an apic error because the delivery | 404 | /* restoring APIC_LVTPC can trigger an apic error because the delivery |
211 | * mode and vector nr combination can be illegal. That's by design: on | 405 | * mode and vector nr combination can be illegal. That's by design: on |
@@ -216,7 +410,7 @@ static void nmi_cpu_shutdown(void *dummy) | |||
216 | apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); | 410 | apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); |
217 | apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); | 411 | apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); |
218 | apic_write(APIC_LVTERR, v); | 412 | apic_write(APIC_LVTERR, v); |
219 | nmi_restore_registers(msrs); | 413 | nmi_cpu_restore_registers(msrs); |
220 | } | 414 | } |
221 | 415 | ||
222 | static void nmi_shutdown(void) | 416 | static void nmi_shutdown(void) |
@@ -226,42 +420,18 @@ static void nmi_shutdown(void) | |||
226 | nmi_enabled = 0; | 420 | nmi_enabled = 0; |
227 | on_each_cpu(nmi_cpu_shutdown, NULL, 1); | 421 | on_each_cpu(nmi_cpu_shutdown, NULL, 1); |
228 | unregister_die_notifier(&profile_exceptions_nb); | 422 | unregister_die_notifier(&profile_exceptions_nb); |
423 | nmi_shutdown_mux(); | ||
229 | msrs = &get_cpu_var(cpu_msrs); | 424 | msrs = &get_cpu_var(cpu_msrs); |
230 | model->shutdown(msrs); | 425 | model->shutdown(msrs); |
231 | free_msrs(); | 426 | free_msrs(); |
232 | put_cpu_var(cpu_msrs); | 427 | put_cpu_var(cpu_msrs); |
233 | } | 428 | } |
234 | 429 | ||
235 | static void nmi_cpu_start(void *dummy) | ||
236 | { | ||
237 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | ||
238 | model->start(msrs); | ||
239 | } | ||
240 | |||
241 | static int nmi_start(void) | ||
242 | { | ||
243 | on_each_cpu(nmi_cpu_start, NULL, 1); | ||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | static void nmi_cpu_stop(void *dummy) | ||
248 | { | ||
249 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | ||
250 | model->stop(msrs); | ||
251 | } | ||
252 | |||
253 | static void nmi_stop(void) | ||
254 | { | ||
255 | on_each_cpu(nmi_cpu_stop, NULL, 1); | ||
256 | } | ||
257 | |||
258 | struct op_counter_config counter_config[OP_MAX_COUNTER]; | ||
259 | |||
260 | static int nmi_create_files(struct super_block *sb, struct dentry *root) | 430 | static int nmi_create_files(struct super_block *sb, struct dentry *root) |
261 | { | 431 | { |
262 | unsigned int i; | 432 | unsigned int i; |
263 | 433 | ||
264 | for (i = 0; i < model->num_counters; ++i) { | 434 | for (i = 0; i < model->num_virt_counters; ++i) { |
265 | struct dentry *dir; | 435 | struct dentry *dir; |
266 | char buf[4]; | 436 | char buf[4]; |
267 | 437 | ||
@@ -270,7 +440,7 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root) | |||
270 | * NOTE: assumes 1:1 mapping here (that counters are organized | 440 | * NOTE: assumes 1:1 mapping here (that counters are organized |
271 | * sequentially in their struct assignment). | 441 | * sequentially in their struct assignment). |
272 | */ | 442 | */ |
273 | if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i))) | 443 | if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i))) |
274 | continue; | 444 | continue; |
275 | 445 | ||
276 | snprintf(buf, sizeof(buf), "%d", i); | 446 | snprintf(buf, sizeof(buf), "%d", i); |
@@ -402,6 +572,7 @@ module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0); | |||
402 | static int __init ppro_init(char **cpu_type) | 572 | static int __init ppro_init(char **cpu_type) |
403 | { | 573 | { |
404 | __u8 cpu_model = boot_cpu_data.x86_model; | 574 | __u8 cpu_model = boot_cpu_data.x86_model; |
575 | struct op_x86_model_spec *spec = &op_ppro_spec; /* default */ | ||
405 | 576 | ||
406 | if (force_arch_perfmon && cpu_has_arch_perfmon) | 577 | if (force_arch_perfmon && cpu_has_arch_perfmon) |
407 | return 0; | 578 | return 0; |
@@ -428,7 +599,7 @@ static int __init ppro_init(char **cpu_type) | |||
428 | *cpu_type = "i386/core_2"; | 599 | *cpu_type = "i386/core_2"; |
429 | break; | 600 | break; |
430 | case 26: | 601 | case 26: |
431 | arch_perfmon_setup_counters(); | 602 | spec = &op_arch_perfmon_spec; |
432 | *cpu_type = "i386/core_i7"; | 603 | *cpu_type = "i386/core_i7"; |
433 | break; | 604 | break; |
434 | case 28: | 605 | case 28: |
@@ -439,17 +610,7 @@ static int __init ppro_init(char **cpu_type) | |||
439 | return 0; | 610 | return 0; |
440 | } | 611 | } |
441 | 612 | ||
442 | model = &op_ppro_spec; | 613 | model = spec; |
443 | return 1; | ||
444 | } | ||
445 | |||
446 | static int __init arch_perfmon_init(char **cpu_type) | ||
447 | { | ||
448 | if (!cpu_has_arch_perfmon) | ||
449 | return 0; | ||
450 | *cpu_type = "i386/arch_perfmon"; | ||
451 | model = &op_arch_perfmon_spec; | ||
452 | arch_perfmon_setup_counters(); | ||
453 | return 1; | 614 | return 1; |
454 | } | 615 | } |
455 | 616 | ||
@@ -471,27 +632,26 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
471 | /* Needs to be at least an Athlon (or hammer in 32bit mode) */ | 632 | /* Needs to be at least an Athlon (or hammer in 32bit mode) */ |
472 | 633 | ||
473 | switch (family) { | 634 | switch (family) { |
474 | default: | ||
475 | return -ENODEV; | ||
476 | case 6: | 635 | case 6: |
477 | model = &op_amd_spec; | ||
478 | cpu_type = "i386/athlon"; | 636 | cpu_type = "i386/athlon"; |
479 | break; | 637 | break; |
480 | case 0xf: | 638 | case 0xf: |
481 | model = &op_amd_spec; | 639 | /* |
482 | /* Actually it could be i386/hammer too, but give | 640 | * Actually it could be i386/hammer too, but |
483 | user space an consistent name. */ | 641 | * give user space an consistent name. |
642 | */ | ||
484 | cpu_type = "x86-64/hammer"; | 643 | cpu_type = "x86-64/hammer"; |
485 | break; | 644 | break; |
486 | case 0x10: | 645 | case 0x10: |
487 | model = &op_amd_spec; | ||
488 | cpu_type = "x86-64/family10"; | 646 | cpu_type = "x86-64/family10"; |
489 | break; | 647 | break; |
490 | case 0x11: | 648 | case 0x11: |
491 | model = &op_amd_spec; | ||
492 | cpu_type = "x86-64/family11h"; | 649 | cpu_type = "x86-64/family11h"; |
493 | break; | 650 | break; |
651 | default: | ||
652 | return -ENODEV; | ||
494 | } | 653 | } |
654 | model = &op_amd_spec; | ||
495 | break; | 655 | break; |
496 | 656 | ||
497 | case X86_VENDOR_INTEL: | 657 | case X86_VENDOR_INTEL: |
@@ -510,8 +670,15 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
510 | break; | 670 | break; |
511 | } | 671 | } |
512 | 672 | ||
513 | if (!cpu_type && !arch_perfmon_init(&cpu_type)) | 673 | if (cpu_type) |
674 | break; | ||
675 | |||
676 | if (!cpu_has_arch_perfmon) | ||
514 | return -ENODEV; | 677 | return -ENODEV; |
678 | |||
679 | /* use arch perfmon as fallback */ | ||
680 | cpu_type = "i386/arch_perfmon"; | ||
681 | model = &op_arch_perfmon_spec; | ||
515 | break; | 682 | break; |
516 | 683 | ||
517 | default: | 684 | default: |
@@ -522,18 +689,23 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
522 | register_cpu_notifier(&oprofile_cpu_nb); | 689 | register_cpu_notifier(&oprofile_cpu_nb); |
523 | #endif | 690 | #endif |
524 | /* default values, can be overwritten by model */ | 691 | /* default values, can be overwritten by model */ |
525 | ops->create_files = nmi_create_files; | 692 | ops->create_files = nmi_create_files; |
526 | ops->setup = nmi_setup; | 693 | ops->setup = nmi_setup; |
527 | ops->shutdown = nmi_shutdown; | 694 | ops->shutdown = nmi_shutdown; |
528 | ops->start = nmi_start; | 695 | ops->start = nmi_start; |
529 | ops->stop = nmi_stop; | 696 | ops->stop = nmi_stop; |
530 | ops->cpu_type = cpu_type; | 697 | ops->cpu_type = cpu_type; |
531 | 698 | ||
532 | if (model->init) | 699 | if (model->init) |
533 | ret = model->init(ops); | 700 | ret = model->init(ops); |
534 | if (ret) | 701 | if (ret) |
535 | return ret; | 702 | return ret; |
536 | 703 | ||
704 | if (!model->num_virt_counters) | ||
705 | model->num_virt_counters = model->num_counters; | ||
706 | |||
707 | mux_init(ops); | ||
708 | |||
537 | init_sysfs(); | 709 | init_sysfs(); |
538 | using_nmi = 1; | 710 | using_nmi = 1; |
539 | printk(KERN_INFO "oprofile: using NMI interrupt.\n"); | 711 | printk(KERN_INFO "oprofile: using NMI interrupt.\n"); |
diff --git a/arch/x86/oprofile/op_counter.h b/arch/x86/oprofile/op_counter.h index 91b6a116165e..e28398df0df2 100644 --- a/arch/x86/oprofile/op_counter.h +++ b/arch/x86/oprofile/op_counter.h | |||
@@ -10,7 +10,7 @@ | |||
10 | #ifndef OP_COUNTER_H | 10 | #ifndef OP_COUNTER_H |
11 | #define OP_COUNTER_H | 11 | #define OP_COUNTER_H |
12 | 12 | ||
13 | #define OP_MAX_COUNTER 8 | 13 | #define OP_MAX_COUNTER 32 |
14 | 14 | ||
15 | /* Per-perfctr configuration as set via | 15 | /* Per-perfctr configuration as set via |
16 | * oprofilefs. | 16 | * oprofilefs. |
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 8fdf06e4edf9..39686c29f03a 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c | |||
@@ -9,12 +9,15 @@ | |||
9 | * @author Philippe Elie | 9 | * @author Philippe Elie |
10 | * @author Graydon Hoare | 10 | * @author Graydon Hoare |
11 | * @author Robert Richter <robert.richter@amd.com> | 11 | * @author Robert Richter <robert.richter@amd.com> |
12 | * @author Barry Kasindorf | 12 | * @author Barry Kasindorf <barry.kasindorf@amd.com> |
13 | * @author Jason Yeh <jason.yeh@amd.com> | ||
14 | * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> | ||
13 | */ | 15 | */ |
14 | 16 | ||
15 | #include <linux/oprofile.h> | 17 | #include <linux/oprofile.h> |
16 | #include <linux/device.h> | 18 | #include <linux/device.h> |
17 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
20 | #include <linux/percpu.h> | ||
18 | 21 | ||
19 | #include <asm/ptrace.h> | 22 | #include <asm/ptrace.h> |
20 | #include <asm/msr.h> | 23 | #include <asm/msr.h> |
@@ -25,43 +28,36 @@ | |||
25 | 28 | ||
26 | #define NUM_COUNTERS 4 | 29 | #define NUM_COUNTERS 4 |
27 | #define NUM_CONTROLS 4 | 30 | #define NUM_CONTROLS 4 |
31 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
32 | #define NUM_VIRT_COUNTERS 32 | ||
33 | #define NUM_VIRT_CONTROLS 32 | ||
34 | #else | ||
35 | #define NUM_VIRT_COUNTERS NUM_COUNTERS | ||
36 | #define NUM_VIRT_CONTROLS NUM_CONTROLS | ||
37 | #endif | ||
38 | |||
39 | #define OP_EVENT_MASK 0x0FFF | ||
40 | #define OP_CTR_OVERFLOW (1ULL<<31) | ||
28 | 41 | ||
29 | #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) | 42 | #define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21)) |
30 | #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) | 43 | |
31 | #define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0) | 44 | static unsigned long reset_value[NUM_VIRT_COUNTERS]; |
32 | #define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) | ||
33 | |||
34 | #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) | ||
35 | #define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0) | ||
36 | #define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0) | ||
37 | #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) | ||
38 | #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) | ||
39 | #define CTRL_CLEAR_LO(x) (x &= (1<<21)) | ||
40 | #define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0) | ||
41 | #define CTRL_SET_ENABLE(val) (val |= 1<<20) | ||
42 | #define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16)) | ||
43 | #define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17)) | ||
44 | #define CTRL_SET_UM(val, m) (val |= (m << 8)) | ||
45 | #define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff)) | ||
46 | #define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf)) | ||
47 | #define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9)) | ||
48 | #define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8)) | ||
49 | |||
50 | static unsigned long reset_value[NUM_COUNTERS]; | ||
51 | 45 | ||
52 | #ifdef CONFIG_OPROFILE_IBS | 46 | #ifdef CONFIG_OPROFILE_IBS |
53 | 47 | ||
54 | /* IbsFetchCtl bits/masks */ | 48 | /* IbsFetchCtl bits/masks */ |
55 | #define IBS_FETCH_HIGH_VALID_BIT (1UL << 17) /* bit 49 */ | 49 | #define IBS_FETCH_RAND_EN (1ULL<<57) |
56 | #define IBS_FETCH_HIGH_ENABLE (1UL << 16) /* bit 48 */ | 50 | #define IBS_FETCH_VAL (1ULL<<49) |
57 | #define IBS_FETCH_LOW_MAX_CNT_MASK 0x0000FFFFUL /* MaxCnt mask */ | 51 | #define IBS_FETCH_ENABLE (1ULL<<48) |
52 | #define IBS_FETCH_CNT_MASK 0xFFFF0000ULL | ||
58 | 53 | ||
59 | /*IbsOpCtl bits */ | 54 | /*IbsOpCtl bits */ |
60 | #define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */ | 55 | #define IBS_OP_CNT_CTL (1ULL<<19) |
61 | #define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */ | 56 | #define IBS_OP_VAL (1ULL<<18) |
57 | #define IBS_OP_ENABLE (1ULL<<17) | ||
62 | 58 | ||
63 | #define IBS_FETCH_SIZE 6 | 59 | #define IBS_FETCH_SIZE 6 |
64 | #define IBS_OP_SIZE 12 | 60 | #define IBS_OP_SIZE 12 |
65 | 61 | ||
66 | static int has_ibs; /* AMD Family10h and later */ | 62 | static int has_ibs; /* AMD Family10h and later */ |
67 | 63 | ||
@@ -78,6 +74,45 @@ static struct op_ibs_config ibs_config; | |||
78 | 74 | ||
79 | #endif | 75 | #endif |
80 | 76 | ||
77 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
78 | |||
79 | static void op_mux_fill_in_addresses(struct op_msrs * const msrs) | ||
80 | { | ||
81 | int i; | ||
82 | |||
83 | for (i = 0; i < NUM_VIRT_COUNTERS; i++) { | ||
84 | int hw_counter = op_x86_virt_to_phys(i); | ||
85 | if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) | ||
86 | msrs->multiplex[i].addr = MSR_K7_PERFCTR0 + hw_counter; | ||
87 | else | ||
88 | msrs->multiplex[i].addr = 0; | ||
89 | } | ||
90 | } | ||
91 | |||
92 | static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, | ||
93 | struct op_msrs const * const msrs) | ||
94 | { | ||
95 | u64 val; | ||
96 | int i; | ||
97 | |||
98 | /* enable active counters */ | ||
99 | for (i = 0; i < NUM_COUNTERS; ++i) { | ||
100 | int virt = op_x86_phys_to_virt(i); | ||
101 | if (!counter_config[virt].enabled) | ||
102 | continue; | ||
103 | rdmsrl(msrs->controls[i].addr, val); | ||
104 | val &= model->reserved; | ||
105 | val |= op_x86_get_ctrl(model, &counter_config[virt]); | ||
106 | wrmsrl(msrs->controls[i].addr, val); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | #else | ||
111 | |||
112 | static inline void op_mux_fill_in_addresses(struct op_msrs * const msrs) { } | ||
113 | |||
114 | #endif | ||
115 | |||
81 | /* functions for op_amd_spec */ | 116 | /* functions for op_amd_spec */ |
82 | 117 | ||
83 | static void op_amd_fill_in_addresses(struct op_msrs * const msrs) | 118 | static void op_amd_fill_in_addresses(struct op_msrs * const msrs) |
@@ -97,150 +132,174 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs) | |||
97 | else | 132 | else |
98 | msrs->controls[i].addr = 0; | 133 | msrs->controls[i].addr = 0; |
99 | } | 134 | } |
100 | } | ||
101 | 135 | ||
136 | op_mux_fill_in_addresses(msrs); | ||
137 | } | ||
102 | 138 | ||
103 | static void op_amd_setup_ctrs(struct op_msrs const * const msrs) | 139 | static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, |
140 | struct op_msrs const * const msrs) | ||
104 | { | 141 | { |
105 | unsigned int low, high; | 142 | u64 val; |
106 | int i; | 143 | int i; |
107 | 144 | ||
145 | /* setup reset_value */ | ||
146 | for (i = 0; i < NUM_VIRT_COUNTERS; ++i) { | ||
147 | if (counter_config[i].enabled) | ||
148 | reset_value[i] = counter_config[i].count; | ||
149 | else | ||
150 | reset_value[i] = 0; | ||
151 | } | ||
152 | |||
108 | /* clear all counters */ | 153 | /* clear all counters */ |
109 | for (i = 0 ; i < NUM_CONTROLS; ++i) { | 154 | for (i = 0; i < NUM_CONTROLS; ++i) { |
110 | if (unlikely(!CTRL_IS_RESERVED(msrs, i))) | 155 | if (unlikely(!msrs->controls[i].addr)) |
111 | continue; | 156 | continue; |
112 | CTRL_READ(low, high, msrs, i); | 157 | rdmsrl(msrs->controls[i].addr, val); |
113 | CTRL_CLEAR_LO(low); | 158 | val &= model->reserved; |
114 | CTRL_CLEAR_HI(high); | 159 | wrmsrl(msrs->controls[i].addr, val); |
115 | CTRL_WRITE(low, high, msrs, i); | ||
116 | } | 160 | } |
117 | 161 | ||
118 | /* avoid a false detection of ctr overflows in NMI handler */ | 162 | /* avoid a false detection of ctr overflows in NMI handler */ |
119 | for (i = 0; i < NUM_COUNTERS; ++i) { | 163 | for (i = 0; i < NUM_COUNTERS; ++i) { |
120 | if (unlikely(!CTR_IS_RESERVED(msrs, i))) | 164 | if (unlikely(!msrs->counters[i].addr)) |
121 | continue; | 165 | continue; |
122 | CTR_WRITE(1, msrs, i); | 166 | wrmsrl(msrs->counters[i].addr, -1LL); |
123 | } | 167 | } |
124 | 168 | ||
125 | /* enable active counters */ | 169 | /* enable active counters */ |
126 | for (i = 0; i < NUM_COUNTERS; ++i) { | 170 | for (i = 0; i < NUM_COUNTERS; ++i) { |
127 | if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { | 171 | int virt = op_x86_phys_to_virt(i); |
128 | reset_value[i] = counter_config[i].count; | 172 | if (!counter_config[virt].enabled) |
173 | continue; | ||
174 | if (!msrs->counters[i].addr) | ||
175 | continue; | ||
129 | 176 | ||
130 | CTR_WRITE(counter_config[i].count, msrs, i); | 177 | /* setup counter registers */ |
131 | 178 | wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]); | |
132 | CTRL_READ(low, high, msrs, i); | 179 | |
133 | CTRL_CLEAR_LO(low); | 180 | /* setup control registers */ |
134 | CTRL_CLEAR_HI(high); | 181 | rdmsrl(msrs->controls[i].addr, val); |
135 | CTRL_SET_ENABLE(low); | 182 | val &= model->reserved; |
136 | CTRL_SET_USR(low, counter_config[i].user); | 183 | val |= op_x86_get_ctrl(model, &counter_config[virt]); |
137 | CTRL_SET_KERN(low, counter_config[i].kernel); | 184 | wrmsrl(msrs->controls[i].addr, val); |
138 | CTRL_SET_UM(low, counter_config[i].unit_mask); | ||
139 | CTRL_SET_EVENT_LOW(low, counter_config[i].event); | ||
140 | CTRL_SET_EVENT_HIGH(high, counter_config[i].event); | ||
141 | CTRL_SET_HOST_ONLY(high, 0); | ||
142 | CTRL_SET_GUEST_ONLY(high, 0); | ||
143 | |||
144 | CTRL_WRITE(low, high, msrs, i); | ||
145 | } else { | ||
146 | reset_value[i] = 0; | ||
147 | } | ||
148 | } | 185 | } |
149 | } | 186 | } |
150 | 187 | ||
151 | #ifdef CONFIG_OPROFILE_IBS | 188 | #ifdef CONFIG_OPROFILE_IBS |
152 | 189 | ||
153 | static inline int | 190 | static inline void |
154 | op_amd_handle_ibs(struct pt_regs * const regs, | 191 | op_amd_handle_ibs(struct pt_regs * const regs, |
155 | struct op_msrs const * const msrs) | 192 | struct op_msrs const * const msrs) |
156 | { | 193 | { |
157 | u32 low, high; | 194 | u64 val, ctl; |
158 | u64 msr; | ||
159 | struct op_entry entry; | 195 | struct op_entry entry; |
160 | 196 | ||
161 | if (!has_ibs) | 197 | if (!has_ibs) |
162 | return 1; | 198 | return; |
163 | 199 | ||
164 | if (ibs_config.fetch_enabled) { | 200 | if (ibs_config.fetch_enabled) { |
165 | rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); | 201 | rdmsrl(MSR_AMD64_IBSFETCHCTL, ctl); |
166 | if (high & IBS_FETCH_HIGH_VALID_BIT) { | 202 | if (ctl & IBS_FETCH_VAL) { |
167 | rdmsrl(MSR_AMD64_IBSFETCHLINAD, msr); | 203 | rdmsrl(MSR_AMD64_IBSFETCHLINAD, val); |
168 | oprofile_write_reserve(&entry, regs, msr, | 204 | oprofile_write_reserve(&entry, regs, val, |
169 | IBS_FETCH_CODE, IBS_FETCH_SIZE); | 205 | IBS_FETCH_CODE, IBS_FETCH_SIZE); |
170 | oprofile_add_data(&entry, (u32)msr); | 206 | oprofile_add_data64(&entry, val); |
171 | oprofile_add_data(&entry, (u32)(msr >> 32)); | 207 | oprofile_add_data64(&entry, ctl); |
172 | oprofile_add_data(&entry, low); | 208 | rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, val); |
173 | oprofile_add_data(&entry, high); | 209 | oprofile_add_data64(&entry, val); |
174 | rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, msr); | ||
175 | oprofile_add_data(&entry, (u32)msr); | ||
176 | oprofile_add_data(&entry, (u32)(msr >> 32)); | ||
177 | oprofile_write_commit(&entry); | 210 | oprofile_write_commit(&entry); |
178 | 211 | ||
179 | /* reenable the IRQ */ | 212 | /* reenable the IRQ */ |
180 | high &= ~IBS_FETCH_HIGH_VALID_BIT; | 213 | ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT_MASK); |
181 | high |= IBS_FETCH_HIGH_ENABLE; | 214 | ctl |= IBS_FETCH_ENABLE; |
182 | low &= IBS_FETCH_LOW_MAX_CNT_MASK; | 215 | wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl); |
183 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); | ||
184 | } | 216 | } |
185 | } | 217 | } |
186 | 218 | ||
187 | if (ibs_config.op_enabled) { | 219 | if (ibs_config.op_enabled) { |
188 | rdmsr(MSR_AMD64_IBSOPCTL, low, high); | 220 | rdmsrl(MSR_AMD64_IBSOPCTL, ctl); |
189 | if (low & IBS_OP_LOW_VALID_BIT) { | 221 | if (ctl & IBS_OP_VAL) { |
190 | rdmsrl(MSR_AMD64_IBSOPRIP, msr); | 222 | rdmsrl(MSR_AMD64_IBSOPRIP, val); |
191 | oprofile_write_reserve(&entry, regs, msr, | 223 | oprofile_write_reserve(&entry, regs, val, |
192 | IBS_OP_CODE, IBS_OP_SIZE); | 224 | IBS_OP_CODE, IBS_OP_SIZE); |
193 | oprofile_add_data(&entry, (u32)msr); | 225 | oprofile_add_data64(&entry, val); |
194 | oprofile_add_data(&entry, (u32)(msr >> 32)); | 226 | rdmsrl(MSR_AMD64_IBSOPDATA, val); |
195 | rdmsrl(MSR_AMD64_IBSOPDATA, msr); | 227 | oprofile_add_data64(&entry, val); |
196 | oprofile_add_data(&entry, (u32)msr); | 228 | rdmsrl(MSR_AMD64_IBSOPDATA2, val); |
197 | oprofile_add_data(&entry, (u32)(msr >> 32)); | 229 | oprofile_add_data64(&entry, val); |
198 | rdmsrl(MSR_AMD64_IBSOPDATA2, msr); | 230 | rdmsrl(MSR_AMD64_IBSOPDATA3, val); |
199 | oprofile_add_data(&entry, (u32)msr); | 231 | oprofile_add_data64(&entry, val); |
200 | oprofile_add_data(&entry, (u32)(msr >> 32)); | 232 | rdmsrl(MSR_AMD64_IBSDCLINAD, val); |
201 | rdmsrl(MSR_AMD64_IBSOPDATA3, msr); | 233 | oprofile_add_data64(&entry, val); |
202 | oprofile_add_data(&entry, (u32)msr); | 234 | rdmsrl(MSR_AMD64_IBSDCPHYSAD, val); |
203 | oprofile_add_data(&entry, (u32)(msr >> 32)); | 235 | oprofile_add_data64(&entry, val); |
204 | rdmsrl(MSR_AMD64_IBSDCLINAD, msr); | ||
205 | oprofile_add_data(&entry, (u32)msr); | ||
206 | oprofile_add_data(&entry, (u32)(msr >> 32)); | ||
207 | rdmsrl(MSR_AMD64_IBSDCPHYSAD, msr); | ||
208 | oprofile_add_data(&entry, (u32)msr); | ||
209 | oprofile_add_data(&entry, (u32)(msr >> 32)); | ||
210 | oprofile_write_commit(&entry); | 236 | oprofile_write_commit(&entry); |
211 | 237 | ||
212 | /* reenable the IRQ */ | 238 | /* reenable the IRQ */ |
213 | high = 0; | 239 | ctl &= ~IBS_OP_VAL & 0xFFFFFFFF; |
214 | low &= ~IBS_OP_LOW_VALID_BIT; | 240 | ctl |= IBS_OP_ENABLE; |
215 | low |= IBS_OP_LOW_ENABLE; | 241 | wrmsrl(MSR_AMD64_IBSOPCTL, ctl); |
216 | wrmsr(MSR_AMD64_IBSOPCTL, low, high); | ||
217 | } | 242 | } |
218 | } | 243 | } |
244 | } | ||
219 | 245 | ||
220 | return 1; | 246 | static inline void op_amd_start_ibs(void) |
247 | { | ||
248 | u64 val; | ||
249 | if (has_ibs && ibs_config.fetch_enabled) { | ||
250 | val = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; | ||
251 | val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0; | ||
252 | val |= IBS_FETCH_ENABLE; | ||
253 | wrmsrl(MSR_AMD64_IBSFETCHCTL, val); | ||
254 | } | ||
255 | |||
256 | if (has_ibs && ibs_config.op_enabled) { | ||
257 | val = (ibs_config.max_cnt_op >> 4) & 0xFFFF; | ||
258 | val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0; | ||
259 | val |= IBS_OP_ENABLE; | ||
260 | wrmsrl(MSR_AMD64_IBSOPCTL, val); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | static void op_amd_stop_ibs(void) | ||
265 | { | ||
266 | if (has_ibs && ibs_config.fetch_enabled) | ||
267 | /* clear max count and enable */ | ||
268 | wrmsrl(MSR_AMD64_IBSFETCHCTL, 0); | ||
269 | |||
270 | if (has_ibs && ibs_config.op_enabled) | ||
271 | /* clear max count and enable */ | ||
272 | wrmsrl(MSR_AMD64_IBSOPCTL, 0); | ||
221 | } | 273 | } |
222 | 274 | ||
275 | #else | ||
276 | |||
277 | static inline void op_amd_handle_ibs(struct pt_regs * const regs, | ||
278 | struct op_msrs const * const msrs) { } | ||
279 | static inline void op_amd_start_ibs(void) { } | ||
280 | static inline void op_amd_stop_ibs(void) { } | ||
281 | |||
223 | #endif | 282 | #endif |
224 | 283 | ||
225 | static int op_amd_check_ctrs(struct pt_regs * const regs, | 284 | static int op_amd_check_ctrs(struct pt_regs * const regs, |
226 | struct op_msrs const * const msrs) | 285 | struct op_msrs const * const msrs) |
227 | { | 286 | { |
228 | unsigned int low, high; | 287 | u64 val; |
229 | int i; | 288 | int i; |
230 | 289 | ||
231 | for (i = 0 ; i < NUM_COUNTERS; ++i) { | 290 | for (i = 0; i < NUM_COUNTERS; ++i) { |
232 | if (!reset_value[i]) | 291 | int virt = op_x86_phys_to_virt(i); |
292 | if (!reset_value[virt]) | ||
233 | continue; | 293 | continue; |
234 | CTR_READ(low, high, msrs, i); | 294 | rdmsrl(msrs->counters[i].addr, val); |
235 | if (CTR_OVERFLOWED(low)) { | 295 | /* bit is clear if overflowed: */ |
236 | oprofile_add_sample(regs, i); | 296 | if (val & OP_CTR_OVERFLOW) |
237 | CTR_WRITE(reset_value[i], msrs, i); | 297 | continue; |
238 | } | 298 | oprofile_add_sample(regs, virt); |
299 | wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]); | ||
239 | } | 300 | } |
240 | 301 | ||
241 | #ifdef CONFIG_OPROFILE_IBS | ||
242 | op_amd_handle_ibs(regs, msrs); | 302 | op_amd_handle_ibs(regs, msrs); |
243 | #endif | ||
244 | 303 | ||
245 | /* See op_model_ppro.c */ | 304 | /* See op_model_ppro.c */ |
246 | return 1; | 305 | return 1; |
@@ -248,79 +307,50 @@ static int op_amd_check_ctrs(struct pt_regs * const regs, | |||
248 | 307 | ||
249 | static void op_amd_start(struct op_msrs const * const msrs) | 308 | static void op_amd_start(struct op_msrs const * const msrs) |
250 | { | 309 | { |
251 | unsigned int low, high; | 310 | u64 val; |
252 | int i; | 311 | int i; |
253 | for (i = 0 ; i < NUM_COUNTERS ; ++i) { | ||
254 | if (reset_value[i]) { | ||
255 | CTRL_READ(low, high, msrs, i); | ||
256 | CTRL_SET_ACTIVE(low); | ||
257 | CTRL_WRITE(low, high, msrs, i); | ||
258 | } | ||
259 | } | ||
260 | 312 | ||
261 | #ifdef CONFIG_OPROFILE_IBS | 313 | for (i = 0; i < NUM_COUNTERS; ++i) { |
262 | if (has_ibs && ibs_config.fetch_enabled) { | 314 | if (!reset_value[op_x86_phys_to_virt(i)]) |
263 | low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; | 315 | continue; |
264 | high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */ | 316 | rdmsrl(msrs->controls[i].addr, val); |
265 | + IBS_FETCH_HIGH_ENABLE; | 317 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
266 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); | 318 | wrmsrl(msrs->controls[i].addr, val); |
267 | } | 319 | } |
268 | 320 | ||
269 | if (has_ibs && ibs_config.op_enabled) { | 321 | op_amd_start_ibs(); |
270 | low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) | ||
271 | + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */ | ||
272 | + IBS_OP_LOW_ENABLE; | ||
273 | high = 0; | ||
274 | wrmsr(MSR_AMD64_IBSOPCTL, low, high); | ||
275 | } | ||
276 | #endif | ||
277 | } | 322 | } |
278 | 323 | ||
279 | |||
280 | static void op_amd_stop(struct op_msrs const * const msrs) | 324 | static void op_amd_stop(struct op_msrs const * const msrs) |
281 | { | 325 | { |
282 | unsigned int low, high; | 326 | u64 val; |
283 | int i; | 327 | int i; |
284 | 328 | ||
285 | /* | 329 | /* |
286 | * Subtle: stop on all counters to avoid race with setting our | 330 | * Subtle: stop on all counters to avoid race with setting our |
287 | * pm callback | 331 | * pm callback |
288 | */ | 332 | */ |
289 | for (i = 0 ; i < NUM_COUNTERS ; ++i) { | 333 | for (i = 0; i < NUM_COUNTERS; ++i) { |
290 | if (!reset_value[i]) | 334 | if (!reset_value[op_x86_phys_to_virt(i)]) |
291 | continue; | 335 | continue; |
292 | CTRL_READ(low, high, msrs, i); | 336 | rdmsrl(msrs->controls[i].addr, val); |
293 | CTRL_SET_INACTIVE(low); | 337 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; |
294 | CTRL_WRITE(low, high, msrs, i); | 338 | wrmsrl(msrs->controls[i].addr, val); |
295 | } | ||
296 | |||
297 | #ifdef CONFIG_OPROFILE_IBS | ||
298 | if (has_ibs && ibs_config.fetch_enabled) { | ||
299 | /* clear max count and enable */ | ||
300 | low = 0; | ||
301 | high = 0; | ||
302 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); | ||
303 | } | 339 | } |
304 | 340 | ||
305 | if (has_ibs && ibs_config.op_enabled) { | 341 | op_amd_stop_ibs(); |
306 | /* clear max count and enable */ | ||
307 | low = 0; | ||
308 | high = 0; | ||
309 | wrmsr(MSR_AMD64_IBSOPCTL, low, high); | ||
310 | } | ||
311 | #endif | ||
312 | } | 342 | } |
313 | 343 | ||
314 | static void op_amd_shutdown(struct op_msrs const * const msrs) | 344 | static void op_amd_shutdown(struct op_msrs const * const msrs) |
315 | { | 345 | { |
316 | int i; | 346 | int i; |
317 | 347 | ||
318 | for (i = 0 ; i < NUM_COUNTERS ; ++i) { | 348 | for (i = 0; i < NUM_COUNTERS; ++i) { |
319 | if (CTR_IS_RESERVED(msrs, i)) | 349 | if (msrs->counters[i].addr) |
320 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); | 350 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); |
321 | } | 351 | } |
322 | for (i = 0 ; i < NUM_CONTROLS ; ++i) { | 352 | for (i = 0; i < NUM_CONTROLS; ++i) { |
323 | if (CTRL_IS_RESERVED(msrs, i)) | 353 | if (msrs->controls[i].addr) |
324 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | 354 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); |
325 | } | 355 | } |
326 | } | 356 | } |
@@ -490,15 +520,21 @@ static void op_amd_exit(void) {} | |||
490 | 520 | ||
491 | #endif /* CONFIG_OPROFILE_IBS */ | 521 | #endif /* CONFIG_OPROFILE_IBS */ |
492 | 522 | ||
493 | struct op_x86_model_spec const op_amd_spec = { | 523 | struct op_x86_model_spec op_amd_spec = { |
494 | .init = op_amd_init, | ||
495 | .exit = op_amd_exit, | ||
496 | .num_counters = NUM_COUNTERS, | 524 | .num_counters = NUM_COUNTERS, |
497 | .num_controls = NUM_CONTROLS, | 525 | .num_controls = NUM_CONTROLS, |
526 | .num_virt_counters = NUM_VIRT_COUNTERS, | ||
527 | .reserved = MSR_AMD_EVENTSEL_RESERVED, | ||
528 | .event_mask = OP_EVENT_MASK, | ||
529 | .init = op_amd_init, | ||
530 | .exit = op_amd_exit, | ||
498 | .fill_in_addresses = &op_amd_fill_in_addresses, | 531 | .fill_in_addresses = &op_amd_fill_in_addresses, |
499 | .setup_ctrs = &op_amd_setup_ctrs, | 532 | .setup_ctrs = &op_amd_setup_ctrs, |
500 | .check_ctrs = &op_amd_check_ctrs, | 533 | .check_ctrs = &op_amd_check_ctrs, |
501 | .start = &op_amd_start, | 534 | .start = &op_amd_start, |
502 | .stop = &op_amd_stop, | 535 | .stop = &op_amd_stop, |
503 | .shutdown = &op_amd_shutdown | 536 | .shutdown = &op_amd_shutdown, |
537 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
538 | .switch_ctrl = &op_mux_switch_ctrl, | ||
539 | #endif | ||
504 | }; | 540 | }; |
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 819b131fd752..ac6b354becdf 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c | |||
@@ -32,6 +32,8 @@ | |||
32 | #define NUM_CCCRS_HT2 9 | 32 | #define NUM_CCCRS_HT2 9 |
33 | #define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2) | 33 | #define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2) |
34 | 34 | ||
35 | #define OP_CTR_OVERFLOW (1ULL<<31) | ||
36 | |||
35 | static unsigned int num_counters = NUM_COUNTERS_NON_HT; | 37 | static unsigned int num_counters = NUM_COUNTERS_NON_HT; |
36 | static unsigned int num_controls = NUM_CONTROLS_NON_HT; | 38 | static unsigned int num_controls = NUM_CONTROLS_NON_HT; |
37 | 39 | ||
@@ -350,8 +352,6 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { | |||
350 | #define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1)) | 352 | #define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1)) |
351 | #define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25)) | 353 | #define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25)) |
352 | #define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9)) | 354 | #define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9)) |
353 | #define ESCR_READ(escr, high, ev, i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0) | ||
354 | #define ESCR_WRITE(escr, high, ev, i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0) | ||
355 | 355 | ||
356 | #define CCCR_RESERVED_BITS 0x38030FFF | 356 | #define CCCR_RESERVED_BITS 0x38030FFF |
357 | #define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS) | 357 | #define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS) |
@@ -361,17 +361,9 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { | |||
361 | #define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27)) | 361 | #define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27)) |
362 | #define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12)) | 362 | #define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12)) |
363 | #define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12)) | 363 | #define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12)) |
364 | #define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0) | ||
365 | #define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0) | ||
366 | #define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) | 364 | #define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) |
367 | #define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) | 365 | #define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) |
368 | 366 | ||
369 | #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) | ||
370 | #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) | ||
371 | #define CTR_READ(l, h, i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h)); } while (0) | ||
372 | #define CTR_WRITE(l, i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1); } while (0) | ||
373 | #define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000)) | ||
374 | |||
375 | 367 | ||
376 | /* this assigns a "stagger" to the current CPU, which is used throughout | 368 | /* this assigns a "stagger" to the current CPU, which is used throughout |
377 | the code in this module as an extra array offset, to select the "even" | 369 | the code in this module as an extra array offset, to select the "even" |
@@ -515,7 +507,7 @@ static void pmc_setup_one_p4_counter(unsigned int ctr) | |||
515 | if (ev->bindings[i].virt_counter & counter_bit) { | 507 | if (ev->bindings[i].virt_counter & counter_bit) { |
516 | 508 | ||
517 | /* modify ESCR */ | 509 | /* modify ESCR */ |
518 | ESCR_READ(escr, high, ev, i); | 510 | rdmsr(ev->bindings[i].escr_address, escr, high); |
519 | ESCR_CLEAR(escr); | 511 | ESCR_CLEAR(escr); |
520 | if (stag == 0) { | 512 | if (stag == 0) { |
521 | ESCR_SET_USR_0(escr, counter_config[ctr].user); | 513 | ESCR_SET_USR_0(escr, counter_config[ctr].user); |
@@ -526,10 +518,11 @@ static void pmc_setup_one_p4_counter(unsigned int ctr) | |||
526 | } | 518 | } |
527 | ESCR_SET_EVENT_SELECT(escr, ev->event_select); | 519 | ESCR_SET_EVENT_SELECT(escr, ev->event_select); |
528 | ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask); | 520 | ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask); |
529 | ESCR_WRITE(escr, high, ev, i); | 521 | wrmsr(ev->bindings[i].escr_address, escr, high); |
530 | 522 | ||
531 | /* modify CCCR */ | 523 | /* modify CCCR */ |
532 | CCCR_READ(cccr, high, VIRT_CTR(stag, ctr)); | 524 | rdmsr(p4_counters[VIRT_CTR(stag, ctr)].cccr_address, |
525 | cccr, high); | ||
533 | CCCR_CLEAR(cccr); | 526 | CCCR_CLEAR(cccr); |
534 | CCCR_SET_REQUIRED_BITS(cccr); | 527 | CCCR_SET_REQUIRED_BITS(cccr); |
535 | CCCR_SET_ESCR_SELECT(cccr, ev->escr_select); | 528 | CCCR_SET_ESCR_SELECT(cccr, ev->escr_select); |
@@ -537,7 +530,8 @@ static void pmc_setup_one_p4_counter(unsigned int ctr) | |||
537 | CCCR_SET_PMI_OVF_0(cccr); | 530 | CCCR_SET_PMI_OVF_0(cccr); |
538 | else | 531 | else |
539 | CCCR_SET_PMI_OVF_1(cccr); | 532 | CCCR_SET_PMI_OVF_1(cccr); |
540 | CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr)); | 533 | wrmsr(p4_counters[VIRT_CTR(stag, ctr)].cccr_address, |
534 | cccr, high); | ||
541 | return; | 535 | return; |
542 | } | 536 | } |
543 | } | 537 | } |
@@ -548,7 +542,8 @@ static void pmc_setup_one_p4_counter(unsigned int ctr) | |||
548 | } | 542 | } |
549 | 543 | ||
550 | 544 | ||
551 | static void p4_setup_ctrs(struct op_msrs const * const msrs) | 545 | static void p4_setup_ctrs(struct op_x86_model_spec const *model, |
546 | struct op_msrs const * const msrs) | ||
552 | { | 547 | { |
553 | unsigned int i; | 548 | unsigned int i; |
554 | unsigned int low, high; | 549 | unsigned int low, high; |
@@ -563,8 +558,8 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs) | |||
563 | } | 558 | } |
564 | 559 | ||
565 | /* clear the cccrs we will use */ | 560 | /* clear the cccrs we will use */ |
566 | for (i = 0 ; i < num_counters ; i++) { | 561 | for (i = 0; i < num_counters; i++) { |
567 | if (unlikely(!CTRL_IS_RESERVED(msrs, i))) | 562 | if (unlikely(!msrs->controls[i].addr)) |
568 | continue; | 563 | continue; |
569 | rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); | 564 | rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); |
570 | CCCR_CLEAR(low); | 565 | CCCR_CLEAR(low); |
@@ -574,17 +569,18 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs) | |||
574 | 569 | ||
575 | /* clear all escrs (including those outside our concern) */ | 570 | /* clear all escrs (including those outside our concern) */ |
576 | for (i = num_counters; i < num_controls; i++) { | 571 | for (i = num_counters; i < num_controls; i++) { |
577 | if (unlikely(!CTRL_IS_RESERVED(msrs, i))) | 572 | if (unlikely(!msrs->controls[i].addr)) |
578 | continue; | 573 | continue; |
579 | wrmsr(msrs->controls[i].addr, 0, 0); | 574 | wrmsr(msrs->controls[i].addr, 0, 0); |
580 | } | 575 | } |
581 | 576 | ||
582 | /* setup all counters */ | 577 | /* setup all counters */ |
583 | for (i = 0 ; i < num_counters ; ++i) { | 578 | for (i = 0; i < num_counters; ++i) { |
584 | if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs, i))) { | 579 | if (counter_config[i].enabled && msrs->controls[i].addr) { |
585 | reset_value[i] = counter_config[i].count; | 580 | reset_value[i] = counter_config[i].count; |
586 | pmc_setup_one_p4_counter(i); | 581 | pmc_setup_one_p4_counter(i); |
587 | CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i)); | 582 | wrmsrl(p4_counters[VIRT_CTR(stag, i)].counter_address, |
583 | -(u64)counter_config[i].count); | ||
588 | } else { | 584 | } else { |
589 | reset_value[i] = 0; | 585 | reset_value[i] = 0; |
590 | } | 586 | } |
@@ -624,14 +620,16 @@ static int p4_check_ctrs(struct pt_regs * const regs, | |||
624 | 620 | ||
625 | real = VIRT_CTR(stag, i); | 621 | real = VIRT_CTR(stag, i); |
626 | 622 | ||
627 | CCCR_READ(low, high, real); | 623 | rdmsr(p4_counters[real].cccr_address, low, high); |
628 | CTR_READ(ctr, high, real); | 624 | rdmsr(p4_counters[real].counter_address, ctr, high); |
629 | if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) { | 625 | if (CCCR_OVF_P(low) || !(ctr & OP_CTR_OVERFLOW)) { |
630 | oprofile_add_sample(regs, i); | 626 | oprofile_add_sample(regs, i); |
631 | CTR_WRITE(reset_value[i], real); | 627 | wrmsrl(p4_counters[real].counter_address, |
628 | -(u64)reset_value[i]); | ||
632 | CCCR_CLEAR_OVF(low); | 629 | CCCR_CLEAR_OVF(low); |
633 | CCCR_WRITE(low, high, real); | 630 | wrmsr(p4_counters[real].cccr_address, low, high); |
634 | CTR_WRITE(reset_value[i], real); | 631 | wrmsrl(p4_counters[real].counter_address, |
632 | -(u64)reset_value[i]); | ||
635 | } | 633 | } |
636 | } | 634 | } |
637 | 635 | ||
@@ -653,9 +651,9 @@ static void p4_start(struct op_msrs const * const msrs) | |||
653 | for (i = 0; i < num_counters; ++i) { | 651 | for (i = 0; i < num_counters; ++i) { |
654 | if (!reset_value[i]) | 652 | if (!reset_value[i]) |
655 | continue; | 653 | continue; |
656 | CCCR_READ(low, high, VIRT_CTR(stag, i)); | 654 | rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); |
657 | CCCR_SET_ENABLE(low); | 655 | CCCR_SET_ENABLE(low); |
658 | CCCR_WRITE(low, high, VIRT_CTR(stag, i)); | 656 | wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); |
659 | } | 657 | } |
660 | } | 658 | } |
661 | 659 | ||
@@ -670,9 +668,9 @@ static void p4_stop(struct op_msrs const * const msrs) | |||
670 | for (i = 0; i < num_counters; ++i) { | 668 | for (i = 0; i < num_counters; ++i) { |
671 | if (!reset_value[i]) | 669 | if (!reset_value[i]) |
672 | continue; | 670 | continue; |
673 | CCCR_READ(low, high, VIRT_CTR(stag, i)); | 671 | rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); |
674 | CCCR_SET_DISABLE(low); | 672 | CCCR_SET_DISABLE(low); |
675 | CCCR_WRITE(low, high, VIRT_CTR(stag, i)); | 673 | wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); |
676 | } | 674 | } |
677 | } | 675 | } |
678 | 676 | ||
@@ -680,8 +678,8 @@ static void p4_shutdown(struct op_msrs const * const msrs) | |||
680 | { | 678 | { |
681 | int i; | 679 | int i; |
682 | 680 | ||
683 | for (i = 0 ; i < num_counters ; ++i) { | 681 | for (i = 0; i < num_counters; ++i) { |
684 | if (CTR_IS_RESERVED(msrs, i)) | 682 | if (msrs->counters[i].addr) |
685 | release_perfctr_nmi(msrs->counters[i].addr); | 683 | release_perfctr_nmi(msrs->counters[i].addr); |
686 | } | 684 | } |
687 | /* | 685 | /* |
@@ -689,15 +687,15 @@ static void p4_shutdown(struct op_msrs const * const msrs) | |||
689 | * conjunction with the counter registers (hence the starting offset). | 687 | * conjunction with the counter registers (hence the starting offset). |
690 | * This saves a few bits. | 688 | * This saves a few bits. |
691 | */ | 689 | */ |
692 | for (i = num_counters ; i < num_controls ; ++i) { | 690 | for (i = num_counters; i < num_controls; ++i) { |
693 | if (CTRL_IS_RESERVED(msrs, i)) | 691 | if (msrs->controls[i].addr) |
694 | release_evntsel_nmi(msrs->controls[i].addr); | 692 | release_evntsel_nmi(msrs->controls[i].addr); |
695 | } | 693 | } |
696 | } | 694 | } |
697 | 695 | ||
698 | 696 | ||
699 | #ifdef CONFIG_SMP | 697 | #ifdef CONFIG_SMP |
700 | struct op_x86_model_spec const op_p4_ht2_spec = { | 698 | struct op_x86_model_spec op_p4_ht2_spec = { |
701 | .num_counters = NUM_COUNTERS_HT2, | 699 | .num_counters = NUM_COUNTERS_HT2, |
702 | .num_controls = NUM_CONTROLS_HT2, | 700 | .num_controls = NUM_CONTROLS_HT2, |
703 | .fill_in_addresses = &p4_fill_in_addresses, | 701 | .fill_in_addresses = &p4_fill_in_addresses, |
@@ -709,7 +707,7 @@ struct op_x86_model_spec const op_p4_ht2_spec = { | |||
709 | }; | 707 | }; |
710 | #endif | 708 | #endif |
711 | 709 | ||
712 | struct op_x86_model_spec const op_p4_spec = { | 710 | struct op_x86_model_spec op_p4_spec = { |
713 | .num_counters = NUM_COUNTERS_NON_HT, | 711 | .num_counters = NUM_COUNTERS_NON_HT, |
714 | .num_controls = NUM_CONTROLS_NON_HT, | 712 | .num_controls = NUM_CONTROLS_NON_HT, |
715 | .fill_in_addresses = &p4_fill_in_addresses, | 713 | .fill_in_addresses = &p4_fill_in_addresses, |
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 4da7230b3d17..4899215999de 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * @author Philippe Elie | 10 | * @author Philippe Elie |
11 | * @author Graydon Hoare | 11 | * @author Graydon Hoare |
12 | * @author Andi Kleen | 12 | * @author Andi Kleen |
13 | * @author Robert Richter <robert.richter@amd.com> | ||
13 | */ | 14 | */ |
14 | 15 | ||
15 | #include <linux/oprofile.h> | 16 | #include <linux/oprofile.h> |
@@ -18,7 +19,6 @@ | |||
18 | #include <asm/msr.h> | 19 | #include <asm/msr.h> |
19 | #include <asm/apic.h> | 20 | #include <asm/apic.h> |
20 | #include <asm/nmi.h> | 21 | #include <asm/nmi.h> |
21 | #include <asm/perf_counter.h> | ||
22 | 22 | ||
23 | #include "op_x86_model.h" | 23 | #include "op_x86_model.h" |
24 | #include "op_counter.h" | 24 | #include "op_counter.h" |
@@ -26,20 +26,7 @@ | |||
26 | static int num_counters = 2; | 26 | static int num_counters = 2; |
27 | static int counter_width = 32; | 27 | static int counter_width = 32; |
28 | 28 | ||
29 | #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) | 29 | #define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21)) |
30 | #define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1)))) | ||
31 | |||
32 | #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) | ||
33 | #define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) | ||
34 | #define CTRL_WRITE(l, h, msrs, c) do {wrmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) | ||
35 | #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) | ||
36 | #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) | ||
37 | #define CTRL_CLEAR(x) (x &= (1<<21)) | ||
38 | #define CTRL_SET_ENABLE(val) (val |= 1<<20) | ||
39 | #define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16)) | ||
40 | #define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17)) | ||
41 | #define CTRL_SET_UM(val, m) (val |= (m << 8)) | ||
42 | #define CTRL_SET_EVENT(val, e) (val |= e) | ||
43 | 30 | ||
44 | static u64 *reset_value; | 31 | static u64 *reset_value; |
45 | 32 | ||
@@ -63,9 +50,10 @@ static void ppro_fill_in_addresses(struct op_msrs * const msrs) | |||
63 | } | 50 | } |
64 | 51 | ||
65 | 52 | ||
66 | static void ppro_setup_ctrs(struct op_msrs const * const msrs) | 53 | static void ppro_setup_ctrs(struct op_x86_model_spec const *model, |
54 | struct op_msrs const * const msrs) | ||
67 | { | 55 | { |
68 | unsigned int low, high; | 56 | u64 val; |
69 | int i; | 57 | int i; |
70 | 58 | ||
71 | if (!reset_value) { | 59 | if (!reset_value) { |
@@ -93,36 +81,30 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) | |||
93 | } | 81 | } |
94 | 82 | ||
95 | /* clear all counters */ | 83 | /* clear all counters */ |
96 | for (i = 0 ; i < num_counters; ++i) { | 84 | for (i = 0; i < num_counters; ++i) { |
97 | if (unlikely(!CTRL_IS_RESERVED(msrs, i))) | 85 | if (unlikely(!msrs->controls[i].addr)) |
98 | continue; | 86 | continue; |
99 | CTRL_READ(low, high, msrs, i); | 87 | rdmsrl(msrs->controls[i].addr, val); |
100 | CTRL_CLEAR(low); | 88 | val &= model->reserved; |
101 | CTRL_WRITE(low, high, msrs, i); | 89 | wrmsrl(msrs->controls[i].addr, val); |
102 | } | 90 | } |
103 | 91 | ||
104 | /* avoid a false detection of ctr overflows in NMI handler */ | 92 | /* avoid a false detection of ctr overflows in NMI handler */ |
105 | for (i = 0; i < num_counters; ++i) { | 93 | for (i = 0; i < num_counters; ++i) { |
106 | if (unlikely(!CTR_IS_RESERVED(msrs, i))) | 94 | if (unlikely(!msrs->counters[i].addr)) |
107 | continue; | 95 | continue; |
108 | wrmsrl(msrs->counters[i].addr, -1LL); | 96 | wrmsrl(msrs->counters[i].addr, -1LL); |
109 | } | 97 | } |
110 | 98 | ||
111 | /* enable active counters */ | 99 | /* enable active counters */ |
112 | for (i = 0; i < num_counters; ++i) { | 100 | for (i = 0; i < num_counters; ++i) { |
113 | if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { | 101 | if (counter_config[i].enabled && msrs->counters[i].addr) { |
114 | reset_value[i] = counter_config[i].count; | 102 | reset_value[i] = counter_config[i].count; |
115 | |||
116 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); | 103 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); |
117 | 104 | rdmsrl(msrs->controls[i].addr, val); | |
118 | CTRL_READ(low, high, msrs, i); | 105 | val &= model->reserved; |
119 | CTRL_CLEAR(low); | 106 | val |= op_x86_get_ctrl(model, &counter_config[i]); |
120 | CTRL_SET_ENABLE(low); | 107 | wrmsrl(msrs->controls[i].addr, val); |
121 | CTRL_SET_USR(low, counter_config[i].user); | ||
122 | CTRL_SET_KERN(low, counter_config[i].kernel); | ||
123 | CTRL_SET_UM(low, counter_config[i].unit_mask); | ||
124 | CTRL_SET_EVENT(low, counter_config[i].event); | ||
125 | CTRL_WRITE(low, high, msrs, i); | ||
126 | } else { | 108 | } else { |
127 | reset_value[i] = 0; | 109 | reset_value[i] = 0; |
128 | } | 110 | } |
@@ -143,14 +125,14 @@ static int ppro_check_ctrs(struct pt_regs * const regs, | |||
143 | if (unlikely(!reset_value)) | 125 | if (unlikely(!reset_value)) |
144 | goto out; | 126 | goto out; |
145 | 127 | ||
146 | for (i = 0 ; i < num_counters; ++i) { | 128 | for (i = 0; i < num_counters; ++i) { |
147 | if (!reset_value[i]) | 129 | if (!reset_value[i]) |
148 | continue; | 130 | continue; |
149 | rdmsrl(msrs->counters[i].addr, val); | 131 | rdmsrl(msrs->counters[i].addr, val); |
150 | if (CTR_OVERFLOWED(val)) { | 132 | if (val & (1ULL << (counter_width - 1))) |
151 | oprofile_add_sample(regs, i); | 133 | continue; |
152 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); | 134 | oprofile_add_sample(regs, i); |
153 | } | 135 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); |
154 | } | 136 | } |
155 | 137 | ||
156 | out: | 138 | out: |
@@ -171,16 +153,16 @@ out: | |||
171 | 153 | ||
172 | static void ppro_start(struct op_msrs const * const msrs) | 154 | static void ppro_start(struct op_msrs const * const msrs) |
173 | { | 155 | { |
174 | unsigned int low, high; | 156 | u64 val; |
175 | int i; | 157 | int i; |
176 | 158 | ||
177 | if (!reset_value) | 159 | if (!reset_value) |
178 | return; | 160 | return; |
179 | for (i = 0; i < num_counters; ++i) { | 161 | for (i = 0; i < num_counters; ++i) { |
180 | if (reset_value[i]) { | 162 | if (reset_value[i]) { |
181 | CTRL_READ(low, high, msrs, i); | 163 | rdmsrl(msrs->controls[i].addr, val); |
182 | CTRL_SET_ACTIVE(low); | 164 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
183 | CTRL_WRITE(low, high, msrs, i); | 165 | wrmsrl(msrs->controls[i].addr, val); |
184 | } | 166 | } |
185 | } | 167 | } |
186 | } | 168 | } |
@@ -188,7 +170,7 @@ static void ppro_start(struct op_msrs const * const msrs) | |||
188 | 170 | ||
189 | static void ppro_stop(struct op_msrs const * const msrs) | 171 | static void ppro_stop(struct op_msrs const * const msrs) |
190 | { | 172 | { |
191 | unsigned int low, high; | 173 | u64 val; |
192 | int i; | 174 | int i; |
193 | 175 | ||
194 | if (!reset_value) | 176 | if (!reset_value) |
@@ -196,9 +178,9 @@ static void ppro_stop(struct op_msrs const * const msrs) | |||
196 | for (i = 0; i < num_counters; ++i) { | 178 | for (i = 0; i < num_counters; ++i) { |
197 | if (!reset_value[i]) | 179 | if (!reset_value[i]) |
198 | continue; | 180 | continue; |
199 | CTRL_READ(low, high, msrs, i); | 181 | rdmsrl(msrs->controls[i].addr, val); |
200 | CTRL_SET_INACTIVE(low); | 182 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; |
201 | CTRL_WRITE(low, high, msrs, i); | 183 | wrmsrl(msrs->controls[i].addr, val); |
202 | } | 184 | } |
203 | } | 185 | } |
204 | 186 | ||
@@ -206,12 +188,12 @@ static void ppro_shutdown(struct op_msrs const * const msrs) | |||
206 | { | 188 | { |
207 | int i; | 189 | int i; |
208 | 190 | ||
209 | for (i = 0 ; i < num_counters ; ++i) { | 191 | for (i = 0; i < num_counters; ++i) { |
210 | if (CTR_IS_RESERVED(msrs, i)) | 192 | if (msrs->counters[i].addr) |
211 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); | 193 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); |
212 | } | 194 | } |
213 | for (i = 0 ; i < num_counters ; ++i) { | 195 | for (i = 0; i < num_counters; ++i) { |
214 | if (CTRL_IS_RESERVED(msrs, i)) | 196 | if (msrs->controls[i].addr) |
215 | release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); | 197 | release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); |
216 | } | 198 | } |
217 | if (reset_value) { | 199 | if (reset_value) { |
@@ -222,8 +204,9 @@ static void ppro_shutdown(struct op_msrs const * const msrs) | |||
222 | 204 | ||
223 | 205 | ||
224 | struct op_x86_model_spec op_ppro_spec = { | 206 | struct op_x86_model_spec op_ppro_spec = { |
225 | .num_counters = 2, /* can be overriden */ | 207 | .num_counters = 2, |
226 | .num_controls = 2, /* dito */ | 208 | .num_controls = 2, |
209 | .reserved = MSR_PPRO_EVENTSEL_RESERVED, | ||
227 | .fill_in_addresses = &ppro_fill_in_addresses, | 210 | .fill_in_addresses = &ppro_fill_in_addresses, |
228 | .setup_ctrs = &ppro_setup_ctrs, | 211 | .setup_ctrs = &ppro_setup_ctrs, |
229 | .check_ctrs = &ppro_check_ctrs, | 212 | .check_ctrs = &ppro_check_ctrs, |
@@ -241,7 +224,7 @@ struct op_x86_model_spec op_ppro_spec = { | |||
241 | * the specific CPU. | 224 | * the specific CPU. |
242 | */ | 225 | */ |
243 | 226 | ||
244 | void arch_perfmon_setup_counters(void) | 227 | static void arch_perfmon_setup_counters(void) |
245 | { | 228 | { |
246 | union cpuid10_eax eax; | 229 | union cpuid10_eax eax; |
247 | 230 | ||
@@ -259,11 +242,17 @@ void arch_perfmon_setup_counters(void) | |||
259 | 242 | ||
260 | op_arch_perfmon_spec.num_counters = num_counters; | 243 | op_arch_perfmon_spec.num_counters = num_counters; |
261 | op_arch_perfmon_spec.num_controls = num_counters; | 244 | op_arch_perfmon_spec.num_controls = num_counters; |
262 | op_ppro_spec.num_counters = num_counters; | 245 | } |
263 | op_ppro_spec.num_controls = num_counters; | 246 | |
247 | static int arch_perfmon_init(struct oprofile_operations *ignore) | ||
248 | { | ||
249 | arch_perfmon_setup_counters(); | ||
250 | return 0; | ||
264 | } | 251 | } |
265 | 252 | ||
266 | struct op_x86_model_spec op_arch_perfmon_spec = { | 253 | struct op_x86_model_spec op_arch_perfmon_spec = { |
254 | .reserved = MSR_PPRO_EVENTSEL_RESERVED, | ||
255 | .init = &arch_perfmon_init, | ||
267 | /* num_counters/num_controls filled in at runtime */ | 256 | /* num_counters/num_controls filled in at runtime */ |
268 | .fill_in_addresses = &ppro_fill_in_addresses, | 257 | .fill_in_addresses = &ppro_fill_in_addresses, |
269 | /* user space does the cpuid check for available events */ | 258 | /* user space does the cpuid check for available events */ |
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index 825e79064d64..b83776180c7f 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h | |||
@@ -6,51 +6,66 @@ | |||
6 | * @remark Read the file COPYING | 6 | * @remark Read the file COPYING |
7 | * | 7 | * |
8 | * @author Graydon Hoare | 8 | * @author Graydon Hoare |
9 | * @author Robert Richter <robert.richter@amd.com> | ||
9 | */ | 10 | */ |
10 | 11 | ||
11 | #ifndef OP_X86_MODEL_H | 12 | #ifndef OP_X86_MODEL_H |
12 | #define OP_X86_MODEL_H | 13 | #define OP_X86_MODEL_H |
13 | 14 | ||
14 | struct op_saved_msr { | 15 | #include <asm/types.h> |
15 | unsigned int high; | 16 | #include <asm/perf_counter.h> |
16 | unsigned int low; | ||
17 | }; | ||
18 | 17 | ||
19 | struct op_msr { | 18 | struct op_msr { |
20 | unsigned long addr; | 19 | unsigned long addr; |
21 | struct op_saved_msr saved; | 20 | u64 saved; |
22 | }; | 21 | }; |
23 | 22 | ||
24 | struct op_msrs { | 23 | struct op_msrs { |
25 | struct op_msr *counters; | 24 | struct op_msr *counters; |
26 | struct op_msr *controls; | 25 | struct op_msr *controls; |
26 | struct op_msr *multiplex; | ||
27 | }; | 27 | }; |
28 | 28 | ||
29 | struct pt_regs; | 29 | struct pt_regs; |
30 | 30 | ||
31 | struct oprofile_operations; | ||
32 | |||
31 | /* The model vtable abstracts the differences between | 33 | /* The model vtable abstracts the differences between |
32 | * various x86 CPU models' perfctr support. | 34 | * various x86 CPU models' perfctr support. |
33 | */ | 35 | */ |
34 | struct op_x86_model_spec { | 36 | struct op_x86_model_spec { |
35 | int (*init)(struct oprofile_operations *ops); | 37 | unsigned int num_counters; |
36 | void (*exit)(void); | 38 | unsigned int num_controls; |
37 | unsigned int num_counters; | 39 | unsigned int num_virt_counters; |
38 | unsigned int num_controls; | 40 | u64 reserved; |
39 | void (*fill_in_addresses)(struct op_msrs * const msrs); | 41 | u16 event_mask; |
40 | void (*setup_ctrs)(struct op_msrs const * const msrs); | 42 | int (*init)(struct oprofile_operations *ops); |
41 | int (*check_ctrs)(struct pt_regs * const regs, | 43 | void (*exit)(void); |
42 | struct op_msrs const * const msrs); | 44 | void (*fill_in_addresses)(struct op_msrs * const msrs); |
43 | void (*start)(struct op_msrs const * const msrs); | 45 | void (*setup_ctrs)(struct op_x86_model_spec const *model, |
44 | void (*stop)(struct op_msrs const * const msrs); | 46 | struct op_msrs const * const msrs); |
45 | void (*shutdown)(struct op_msrs const * const msrs); | 47 | int (*check_ctrs)(struct pt_regs * const regs, |
48 | struct op_msrs const * const msrs); | ||
49 | void (*start)(struct op_msrs const * const msrs); | ||
50 | void (*stop)(struct op_msrs const * const msrs); | ||
51 | void (*shutdown)(struct op_msrs const * const msrs); | ||
52 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
53 | void (*switch_ctrl)(struct op_x86_model_spec const *model, | ||
54 | struct op_msrs const * const msrs); | ||
55 | #endif | ||
46 | }; | 56 | }; |
47 | 57 | ||
58 | struct op_counter_config; | ||
59 | |||
60 | extern u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, | ||
61 | struct op_counter_config *counter_config); | ||
62 | extern int op_x86_phys_to_virt(int phys); | ||
63 | extern int op_x86_virt_to_phys(int virt); | ||
64 | |||
48 | extern struct op_x86_model_spec op_ppro_spec; | 65 | extern struct op_x86_model_spec op_ppro_spec; |
49 | extern struct op_x86_model_spec const op_p4_spec; | 66 | extern struct op_x86_model_spec op_p4_spec; |
50 | extern struct op_x86_model_spec const op_p4_ht2_spec; | 67 | extern struct op_x86_model_spec op_p4_ht2_spec; |
51 | extern struct op_x86_model_spec const op_amd_spec; | 68 | extern struct op_x86_model_spec op_amd_spec; |
52 | extern struct op_x86_model_spec op_arch_perfmon_spec; | 69 | extern struct op_x86_model_spec op_arch_perfmon_spec; |
53 | 70 | ||
54 | extern void arch_perfmon_setup_counters(void); | ||
55 | |||
56 | #endif /* OP_X86_MODEL_H */ | 71 | #endif /* OP_X86_MODEL_H */ |
diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c index bd13c3e4c6db..347d882b3bb3 100644 --- a/arch/x86/pci/direct.c +++ b/arch/x86/pci/direct.c | |||
@@ -192,13 +192,14 @@ struct pci_raw_ops pci_direct_conf2 = { | |||
192 | static int __init pci_sanity_check(struct pci_raw_ops *o) | 192 | static int __init pci_sanity_check(struct pci_raw_ops *o) |
193 | { | 193 | { |
194 | u32 x = 0; | 194 | u32 x = 0; |
195 | int devfn; | 195 | int year, devfn; |
196 | 196 | ||
197 | if (pci_probe & PCI_NO_CHECKS) | 197 | if (pci_probe & PCI_NO_CHECKS) |
198 | return 1; | 198 | return 1; |
199 | /* Assume Type 1 works for newer systems. | 199 | /* Assume Type 1 works for newer systems. |
200 | This handles machines that don't have anything on PCI Bus 0. */ | 200 | This handles machines that don't have anything on PCI Bus 0. */ |
201 | if (dmi_get_year(DMI_BIOS_DATE) >= 2001) | 201 | dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL); |
202 | if (year >= 2001) | ||
202 | return 1; | 203 | return 1; |
203 | 204 | ||
204 | for (devfn = 0; devfn < 0x100; devfn++) { | 205 | for (devfn = 0; devfn < 0x100; devfn++) { |