diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-11-20 04:48:28 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-20 04:48:31 -0500 |
commit | c032a2de4c1a82187e9a754511043be47c8a92b5 (patch) | |
tree | 6d20bfcff683555b641a376ffdffb2dbc1f1599a /arch | |
parent | 722024dbb74f3ea316c285c0a71a4512e113b0c4 (diff) | |
parent | cbe9ee00cea58d1f77b172fe22a51080e90877f2 (diff) |
Merge branch 'x86/cleanups' into x86/irq
[ merged x86/cleanups into x86/irq to enable a wider IRQ entry code
patch to be applied, which depends on a cleanup patch in x86/cleanups. ]
Diffstat (limited to 'arch')
66 files changed, 849 insertions, 463 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 1cb8602dd9d5..4ed149cbb32a 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -256,8 +256,17 @@ int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, | |||
256 | int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, | 256 | int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, |
257 | size_t, enum dma_data_direction); | 257 | size_t, enum dma_data_direction); |
258 | #else | 258 | #else |
259 | #define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1) | 259 | static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr, |
260 | #define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1) | 260 | unsigned long offset, size_t size, enum dma_data_direction dir) |
261 | { | ||
262 | return 1; | ||
263 | } | ||
264 | |||
265 | static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, | ||
266 | unsigned long offset, size_t size, enum dma_data_direction dir) | ||
267 | { | ||
268 | return 1; | ||
269 | } | ||
261 | 270 | ||
262 | 271 | ||
263 | /** | 272 | /** |
diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h index 87bff09633aa..83e6ba338e2c 100644 --- a/arch/arm/include/asm/hardware/iop3xx-adma.h +++ b/arch/arm/include/asm/hardware/iop3xx-adma.h | |||
@@ -730,7 +730,8 @@ static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc, | |||
730 | { | 730 | { |
731 | /* hw_desc->next_desc is the same location for all channels */ | 731 | /* hw_desc->next_desc is the same location for all channels */ |
732 | union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; | 732 | union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; |
733 | BUG_ON(hw_desc.dma->next_desc); | 733 | |
734 | iop_paranoia(hw_desc.dma->next_desc); | ||
734 | hw_desc.dma->next_desc = next_desc_addr; | 735 | hw_desc.dma->next_desc = next_desc_addr; |
735 | } | 736 | } |
736 | 737 | ||
@@ -760,7 +761,7 @@ static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) | |||
760 | struct iop3xx_desc_aau *hw_desc = desc->hw_desc; | 761 | struct iop3xx_desc_aau *hw_desc = desc->hw_desc; |
761 | struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field; | 762 | struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field; |
762 | 763 | ||
763 | BUG_ON(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en)); | 764 | iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en)); |
764 | return desc_ctrl.zero_result_err; | 765 | return desc_ctrl.zero_result_err; |
765 | } | 766 | } |
766 | 767 | ||
diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h index cb7e3611bcba..385c6e8cbbd2 100644 --- a/arch/arm/include/asm/hardware/iop_adma.h +++ b/arch/arm/include/asm/hardware/iop_adma.h | |||
@@ -23,6 +23,12 @@ | |||
23 | 23 | ||
24 | #define IOP_ADMA_SLOT_SIZE 32 | 24 | #define IOP_ADMA_SLOT_SIZE 32 |
25 | #define IOP_ADMA_THRESHOLD 4 | 25 | #define IOP_ADMA_THRESHOLD 4 |
26 | #ifdef DEBUG | ||
27 | #define IOP_PARANOIA 1 | ||
28 | #else | ||
29 | #define IOP_PARANOIA 0 | ||
30 | #endif | ||
31 | #define iop_paranoia(x) BUG_ON(IOP_PARANOIA && (x)) | ||
26 | 32 | ||
27 | /** | 33 | /** |
28 | * struct iop_adma_device - internal representation of an ADMA device | 34 | * struct iop_adma_device - internal representation of an ADMA device |
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index cb1139ac1943..39d949b63e80 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h | |||
@@ -19,12 +19,13 @@ struct map_desc { | |||
19 | }; | 19 | }; |
20 | 20 | ||
21 | /* types 0-3 are defined in asm/io.h */ | 21 | /* types 0-3 are defined in asm/io.h */ |
22 | #define MT_CACHECLEAN 4 | 22 | #define MT_UNCACHED 4 |
23 | #define MT_MINICLEAN 5 | 23 | #define MT_CACHECLEAN 5 |
24 | #define MT_LOW_VECTORS 6 | 24 | #define MT_MINICLEAN 6 |
25 | #define MT_HIGH_VECTORS 7 | 25 | #define MT_LOW_VECTORS 7 |
26 | #define MT_MEMORY 8 | 26 | #define MT_HIGH_VECTORS 8 |
27 | #define MT_ROM 9 | 27 | #define MT_MEMORY 9 |
28 | #define MT_ROM 10 | ||
28 | 29 | ||
29 | #ifdef CONFIG_MMU | 30 | #ifdef CONFIG_MMU |
30 | extern void iotable_init(struct map_desc *, int); | 31 | extern void iotable_init(struct map_desc *, int); |
diff --git a/arch/arm/mach-clps711x/include/mach/hardware.h b/arch/arm/mach-clps711x/include/mach/hardware.h index 4c3e101b96c9..b3ebe9e4871f 100644 --- a/arch/arm/mach-clps711x/include/mach/hardware.h +++ b/arch/arm/mach-clps711x/include/mach/hardware.h | |||
@@ -94,20 +94,6 @@ | |||
94 | #include <asm/hardware/ep7212.h> | 94 | #include <asm/hardware/ep7212.h> |
95 | #include <asm/hardware/cs89712.h> | 95 | #include <asm/hardware/cs89712.h> |
96 | 96 | ||
97 | /* dynamic ioremap() areas */ | ||
98 | #define FLASH_START 0x00000000 | ||
99 | #define FLASH_SIZE 0x800000 | ||
100 | #define FLASH_WIDTH 4 | ||
101 | |||
102 | #define SRAM_START 0x60000000 | ||
103 | #define SRAM_SIZE 0xc000 | ||
104 | #define SRAM_WIDTH 4 | ||
105 | |||
106 | #define BOOTROM_START 0x70000000 | ||
107 | #define BOOTROM_SIZE 0x80 | ||
108 | #define BOOTROM_WIDTH 4 | ||
109 | |||
110 | |||
111 | /* static cdb89712_map_io() areas */ | 97 | /* static cdb89712_map_io() areas */ |
112 | #define REGISTER_START 0x80000000 | 98 | #define REGISTER_START 0x80000000 |
113 | #define REGISTER_SIZE 0x4000 | 99 | #define REGISTER_SIZE 0x4000 |
@@ -198,14 +184,6 @@ | |||
198 | #define CEIVA_FLASH_SIZE 0x100000 | 184 | #define CEIVA_FLASH_SIZE 0x100000 |
199 | #define CEIVA_FLASH_WIDTH 2 | 185 | #define CEIVA_FLASH_WIDTH 2 |
200 | 186 | ||
201 | #define SRAM_START 0x60000000 | ||
202 | #define SRAM_SIZE 0xc000 | ||
203 | #define SRAM_WIDTH 4 | ||
204 | |||
205 | #define BOOTROM_START 0x70000000 | ||
206 | #define BOOTROM_SIZE 0x80 | ||
207 | #define BOOTROM_WIDTH 4 | ||
208 | |||
209 | /* | 187 | /* |
210 | * SED1355 LCD controller | 188 | * SED1355 LCD controller |
211 | */ | 189 | */ |
diff --git a/arch/arm/mach-clps7500/core.c b/arch/arm/mach-clps7500/core.c index c3a33b8a5aac..7e247c04d41c 100644 --- a/arch/arm/mach-clps7500/core.c +++ b/arch/arm/mach-clps7500/core.c | |||
@@ -275,9 +275,9 @@ static struct map_desc cl7500_io_desc[] __initdata = { | |||
275 | .length = ISA_SIZE, | 275 | .length = ISA_SIZE, |
276 | .type = MT_DEVICE | 276 | .type = MT_DEVICE |
277 | }, { /* Flash */ | 277 | }, { /* Flash */ |
278 | .virtual = FLASH_BASE, | 278 | .virtual = CLPS7500_FLASH_BASE, |
279 | .pfn = __phys_to_pfn(FLASH_START), | 279 | .pfn = __phys_to_pfn(CLPS7500_FLASH_START), |
280 | .length = FLASH_SIZE, | 280 | .length = CLPS7500_FLASH_SIZE, |
281 | .type = MT_DEVICE | 281 | .type = MT_DEVICE |
282 | }, { /* LED */ | 282 | }, { /* LED */ |
283 | .virtual = LED_BASE, | 283 | .virtual = LED_BASE, |
diff --git a/arch/arm/mach-clps7500/include/mach/hardware.h b/arch/arm/mach-clps7500/include/mach/hardware.h index d66578a3371c..a6ad1d44badf 100644 --- a/arch/arm/mach-clps7500/include/mach/hardware.h +++ b/arch/arm/mach-clps7500/include/mach/hardware.h | |||
@@ -39,9 +39,9 @@ | |||
39 | #define ISA_SIZE 0x00010000 | 39 | #define ISA_SIZE 0x00010000 |
40 | #define ISA_BASE 0xe1000000 | 40 | #define ISA_BASE 0xe1000000 |
41 | 41 | ||
42 | #define FLASH_START 0x01000000 /* XXX */ | 42 | #define CLPS7500_FLASH_START 0x01000000 /* XXX */ |
43 | #define FLASH_SIZE 0x01000000 | 43 | #define CLPS7500_FLASH_SIZE 0x01000000 |
44 | #define FLASH_BASE 0xe2000000 | 44 | #define CLPS7500_FLASH_BASE 0xe2000000 |
45 | 45 | ||
46 | #define LED_START 0x0302B000 | 46 | #define LED_START 0x0302B000 |
47 | #define LED_SIZE 0x00001000 | 47 | #define LED_SIZE 0x00001000 |
diff --git a/arch/arm/mach-h720x/include/mach/boards.h b/arch/arm/mach-h720x/include/mach/boards.h index 079b279e1242..38b8e0d61fbf 100644 --- a/arch/arm/mach-h720x/include/mach/boards.h +++ b/arch/arm/mach-h720x/include/mach/boards.h | |||
@@ -19,9 +19,9 @@ | |||
19 | #ifdef CONFIG_ARCH_H7202 | 19 | #ifdef CONFIG_ARCH_H7202 |
20 | 20 | ||
21 | /* FLASH */ | 21 | /* FLASH */ |
22 | #define FLASH_VIRT 0xd0000000 | 22 | #define H720X_FLASH_VIRT 0xd0000000 |
23 | #define FLASH_PHYS 0x00000000 | 23 | #define H720X_FLASH_PHYS 0x00000000 |
24 | #define FLASH_SIZE 0x02000000 | 24 | #define H720X_FLASH_SIZE 0x02000000 |
25 | 25 | ||
26 | /* onboard LAN controller */ | 26 | /* onboard LAN controller */ |
27 | # define ETH0_PHYS 0x08000000 | 27 | # define ETH0_PHYS 0x08000000 |
diff --git a/arch/arm/mach-integrator/include/mach/platform.h b/arch/arm/mach-integrator/include/mach/platform.h index 028b87839c0f..e00a2624f269 100644 --- a/arch/arm/mach-integrator/include/mach/platform.h +++ b/arch/arm/mach-integrator/include/mach/platform.h | |||
@@ -408,27 +408,10 @@ | |||
408 | #define uHAL_MEMORY_SIZE INTEGRATOR_SSRAM_SIZE | 408 | #define uHAL_MEMORY_SIZE INTEGRATOR_SSRAM_SIZE |
409 | 409 | ||
410 | /* | 410 | /* |
411 | * Application Flash | ||
412 | * | ||
413 | */ | ||
414 | #define FLASH_BASE INTEGRATOR_FLASH_BASE | ||
415 | #define FLASH_SIZE INTEGRATOR_FLASH_SIZE | ||
416 | #define FLASH_END (FLASH_BASE + FLASH_SIZE - 1) | ||
417 | #define FLASH_BLOCK_SIZE SZ_128K | ||
418 | |||
419 | /* | ||
420 | * Boot Flash | ||
421 | * | ||
422 | */ | ||
423 | #define EPROM_BASE INTEGRATOR_BOOT_ROM_HI | ||
424 | #define EPROM_SIZE INTEGRATOR_BOOT_ROM_SIZE | ||
425 | #define EPROM_END (EPROM_BASE + EPROM_SIZE - 1) | ||
426 | |||
427 | /* | ||
428 | * Clean base - dummy | 411 | * Clean base - dummy |
429 | * | 412 | * |
430 | */ | 413 | */ |
431 | #define CLEAN_BASE EPROM_BASE | 414 | #define CLEAN_BASE INTEGRATOR_BOOT_ROM_HI |
432 | 415 | ||
433 | /* | 416 | /* |
434 | * Timer definitions | 417 | * Timer definitions |
diff --git a/arch/arm/mach-iop13xx/include/mach/adma.h b/arch/arm/mach-iop13xx/include/mach/adma.h index 60019c8e6465..5722e86f2174 100644 --- a/arch/arm/mach-iop13xx/include/mach/adma.h +++ b/arch/arm/mach-iop13xx/include/mach/adma.h | |||
@@ -404,7 +404,8 @@ static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc, | |||
404 | u32 next_desc_addr) | 404 | u32 next_desc_addr) |
405 | { | 405 | { |
406 | struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; | 406 | struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; |
407 | BUG_ON(hw_desc->next_desc); | 407 | |
408 | iop_paranoia(hw_desc->next_desc); | ||
408 | hw_desc->next_desc = next_desc_addr; | 409 | hw_desc->next_desc = next_desc_addr; |
409 | } | 410 | } |
410 | 411 | ||
diff --git a/arch/arm/mach-realview/clock.c b/arch/arm/mach-realview/clock.c index 3e706c57833a..3347c4236a60 100644 --- a/arch/arm/mach-realview/clock.c +++ b/arch/arm/mach-realview/clock.c | |||
@@ -104,7 +104,7 @@ static struct clk uart_clk = { | |||
104 | 104 | ||
105 | static struct clk mmci_clk = { | 105 | static struct clk mmci_clk = { |
106 | .name = "MCLK", | 106 | .name = "MCLK", |
107 | .rate = 33000000, | 107 | .rate = 24000000, |
108 | }; | 108 | }; |
109 | 109 | ||
110 | int clk_register(struct clk *clk) | 110 | int clk_register(struct clk *clk) |
diff --git a/arch/arm/mach-realview/include/mach/platform.h b/arch/arm/mach-realview/include/mach/platform.h index 4034b54950c2..793a3a332712 100644 --- a/arch/arm/mach-realview/include/mach/platform.h +++ b/arch/arm/mach-realview/include/mach/platform.h | |||
@@ -239,27 +239,10 @@ | |||
239 | #define REALVIEW_DECODE_OFFSET 0xC /* Fitted logic modules */ | 239 | #define REALVIEW_DECODE_OFFSET 0xC /* Fitted logic modules */ |
240 | 240 | ||
241 | /* | 241 | /* |
242 | * Application Flash | ||
243 | * | ||
244 | */ | ||
245 | #define FLASH_BASE REALVIEW_FLASH_BASE | ||
246 | #define FLASH_SIZE REALVIEW_FLASH_SIZE | ||
247 | #define FLASH_END (FLASH_BASE + FLASH_SIZE - 1) | ||
248 | #define FLASH_BLOCK_SIZE SZ_128K | ||
249 | |||
250 | /* | ||
251 | * Boot Flash | ||
252 | * | ||
253 | */ | ||
254 | #define EPROM_BASE REALVIEW_BOOT_ROM_HI | ||
255 | #define EPROM_SIZE REALVIEW_BOOT_ROM_SIZE | ||
256 | #define EPROM_END (EPROM_BASE + EPROM_SIZE - 1) | ||
257 | |||
258 | /* | ||
259 | * Clean base - dummy | 242 | * Clean base - dummy |
260 | * | 243 | * |
261 | */ | 244 | */ |
262 | #define CLEAN_BASE EPROM_BASE | 245 | #define CLEAN_BASE REALVIEW_BOOT_ROM_HI |
263 | 246 | ||
264 | /* | 247 | /* |
265 | * System controller bit assignment | 248 | * System controller bit assignment |
diff --git a/arch/arm/mach-versatile/clock.c b/arch/arm/mach-versatile/clock.c index 9336508ec0b2..58937f1fb38c 100644 --- a/arch/arm/mach-versatile/clock.c +++ b/arch/arm/mach-versatile/clock.c | |||
@@ -105,7 +105,7 @@ static struct clk uart_clk = { | |||
105 | 105 | ||
106 | static struct clk mmci_clk = { | 106 | static struct clk mmci_clk = { |
107 | .name = "MCLK", | 107 | .name = "MCLK", |
108 | .rate = 33000000, | 108 | .rate = 24000000, |
109 | }; | 109 | }; |
110 | 110 | ||
111 | int clk_register(struct clk *clk) | 111 | int clk_register(struct clk *clk) |
diff --git a/arch/arm/mach-versatile/include/mach/platform.h b/arch/arm/mach-versatile/include/mach/platform.h index 27cbe6a3f220..f91ba930ca8a 100644 --- a/arch/arm/mach-versatile/include/mach/platform.h +++ b/arch/arm/mach-versatile/include/mach/platform.h | |||
@@ -436,28 +436,12 @@ | |||
436 | #define SIC_INTMASK_PCI1 (1 << SIC_INT_PCI1) | 436 | #define SIC_INTMASK_PCI1 (1 << SIC_INT_PCI1) |
437 | #define SIC_INTMASK_PCI2 (1 << SIC_INT_PCI2) | 437 | #define SIC_INTMASK_PCI2 (1 << SIC_INT_PCI2) |
438 | #define SIC_INTMASK_PCI3 (1 << SIC_INT_PCI3) | 438 | #define SIC_INTMASK_PCI3 (1 << SIC_INT_PCI3) |
439 | /* | ||
440 | * Application Flash | ||
441 | * | ||
442 | */ | ||
443 | #define FLASH_BASE VERSATILE_FLASH_BASE | ||
444 | #define FLASH_SIZE VERSATILE_FLASH_SIZE | ||
445 | #define FLASH_END (FLASH_BASE + FLASH_SIZE - 1) | ||
446 | #define FLASH_BLOCK_SIZE SZ_128K | ||
447 | |||
448 | /* | ||
449 | * Boot Flash | ||
450 | * | ||
451 | */ | ||
452 | #define EPROM_BASE VERSATILE_BOOT_ROM_HI | ||
453 | #define EPROM_SIZE VERSATILE_BOOT_ROM_SIZE | ||
454 | #define EPROM_END (EPROM_BASE + EPROM_SIZE - 1) | ||
455 | 439 | ||
456 | /* | 440 | /* |
457 | * Clean base - dummy | 441 | * Clean base - dummy |
458 | * | 442 | * |
459 | */ | 443 | */ |
460 | #define CLEAN_BASE EPROM_BASE | 444 | #define CLEAN_BASE VERSATILE_BOOT_ROM_HI |
461 | 445 | ||
462 | /* | 446 | /* |
463 | * System controller bit assignment | 447 | * System controller bit assignment |
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index 13cdae8b0d44..80cd207cbaea 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c | |||
@@ -150,7 +150,7 @@ static void feroceon_l2_inv_range(unsigned long start, unsigned long end) | |||
150 | /* | 150 | /* |
151 | * Clean and invalidate partial last cache line. | 151 | * Clean and invalidate partial last cache line. |
152 | */ | 152 | */ |
153 | if (end & (CACHE_LINE_SIZE - 1)) { | 153 | if (start < end && end & (CACHE_LINE_SIZE - 1)) { |
154 | l2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); | 154 | l2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); |
155 | end &= ~(CACHE_LINE_SIZE - 1); | 155 | end &= ~(CACHE_LINE_SIZE - 1); |
156 | } | 156 | } |
@@ -158,7 +158,7 @@ static void feroceon_l2_inv_range(unsigned long start, unsigned long end) | |||
158 | /* | 158 | /* |
159 | * Invalidate all full cache lines between 'start' and 'end'. | 159 | * Invalidate all full cache lines between 'start' and 'end'. |
160 | */ | 160 | */ |
161 | while (start != end) { | 161 | while (start < end) { |
162 | unsigned long range_end = calc_range_end(start, end); | 162 | unsigned long range_end = calc_range_end(start, end); |
163 | l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE); | 163 | l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE); |
164 | start = range_end; | 164 | start = range_end; |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e63db11f16a8..7f36c825718d 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -208,6 +208,12 @@ static struct mem_type mem_types[] = { | |||
208 | .prot_sect = PROT_SECT_DEVICE, | 208 | .prot_sect = PROT_SECT_DEVICE, |
209 | .domain = DOMAIN_IO, | 209 | .domain = DOMAIN_IO, |
210 | }, | 210 | }, |
211 | [MT_UNCACHED] = { | ||
212 | .prot_pte = PROT_PTE_DEVICE, | ||
213 | .prot_l1 = PMD_TYPE_TABLE, | ||
214 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, | ||
215 | .domain = DOMAIN_IO, | ||
216 | }, | ||
211 | [MT_CACHECLEAN] = { | 217 | [MT_CACHECLEAN] = { |
212 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, | 218 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, |
213 | .domain = DOMAIN_KERNEL, | 219 | .domain = DOMAIN_KERNEL, |
diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c index 4689db638e95..9e573e78176a 100644 --- a/arch/arm/plat-iop/setup.c +++ b/arch/arm/plat-iop/setup.c | |||
@@ -16,14 +16,15 @@ | |||
16 | #include <asm/hardware/iop3xx.h> | 16 | #include <asm/hardware/iop3xx.h> |
17 | 17 | ||
18 | /* | 18 | /* |
19 | * Standard IO mapping for all IOP3xx based systems | 19 | * Standard IO mapping for all IOP3xx based systems. Note that |
20 | * the IOP3xx OCCDR must be mapped uncached and unbuffered. | ||
20 | */ | 21 | */ |
21 | static struct map_desc iop3xx_std_desc[] __initdata = { | 22 | static struct map_desc iop3xx_std_desc[] __initdata = { |
22 | { /* mem mapped registers */ | 23 | { /* mem mapped registers */ |
23 | .virtual = IOP3XX_PERIPHERAL_VIRT_BASE, | 24 | .virtual = IOP3XX_PERIPHERAL_VIRT_BASE, |
24 | .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE), | 25 | .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE), |
25 | .length = IOP3XX_PERIPHERAL_SIZE, | 26 | .length = IOP3XX_PERIPHERAL_SIZE, |
26 | .type = MT_DEVICE, | 27 | .type = MT_UNCACHED, |
27 | }, { /* PCI IO space */ | 28 | }, { /* PCI IO space */ |
28 | .virtual = IOP3XX_PCI_LOWER_IO_VA, | 29 | .virtual = IOP3XX_PCI_LOWER_IO_VA, |
29 | .pfn = __phys_to_pfn(IOP3XX_PCI_LOWER_IO_PA), | 30 | .pfn = __phys_to_pfn(IOP3XX_PCI_LOWER_IO_PA), |
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig index 8e99fed6b3fd..f833a0b4188d 100644 --- a/arch/ia64/kvm/Kconfig +++ b/arch/ia64/kvm/Kconfig | |||
@@ -20,6 +20,8 @@ if VIRTUALIZATION | |||
20 | config KVM | 20 | config KVM |
21 | tristate "Kernel-based Virtual Machine (KVM) support" | 21 | tristate "Kernel-based Virtual Machine (KVM) support" |
22 | depends on HAVE_KVM && EXPERIMENTAL | 22 | depends on HAVE_KVM && EXPERIMENTAL |
23 | # for device assignment: | ||
24 | depends on PCI | ||
23 | select PREEMPT_NOTIFIERS | 25 | select PREEMPT_NOTIFIERS |
24 | select ANON_INODES | 26 | select ANON_INODES |
25 | ---help--- | 27 | ---help--- |
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 3caac477de9e..af1464f7a6ad 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -673,16 +673,16 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
673 | 673 | ||
674 | vcpu_load(vcpu); | 674 | vcpu_load(vcpu); |
675 | 675 | ||
676 | if (vcpu->sigset_active) | ||
677 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | ||
678 | |||
676 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { | 679 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { |
677 | kvm_vcpu_block(vcpu); | 680 | kvm_vcpu_block(vcpu); |
678 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | 681 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); |
679 | vcpu_put(vcpu); | 682 | r = -EAGAIN; |
680 | return -EAGAIN; | 683 | goto out; |
681 | } | 684 | } |
682 | 685 | ||
683 | if (vcpu->sigset_active) | ||
684 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | ||
685 | |||
686 | if (vcpu->mmio_needed) { | 686 | if (vcpu->mmio_needed) { |
687 | memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); | 687 | memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); |
688 | kvm_set_mmio_data(vcpu); | 688 | kvm_set_mmio_data(vcpu); |
@@ -690,7 +690,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
690 | vcpu->mmio_needed = 0; | 690 | vcpu->mmio_needed = 0; |
691 | } | 691 | } |
692 | r = __vcpu_run(vcpu, kvm_run); | 692 | r = __vcpu_run(vcpu, kvm_run); |
693 | 693 | out: | |
694 | if (vcpu->sigset_active) | 694 | if (vcpu->sigset_active) |
695 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | 695 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); |
696 | 696 | ||
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h index 341e3fee280c..e9b2a4e121c0 100644 --- a/arch/ia64/kvm/vcpu.h +++ b/arch/ia64/kvm/vcpu.h | |||
@@ -384,6 +384,10 @@ static inline u64 __gpfn_is_io(u64 gpfn) | |||
384 | #define MODE_IND(psr) \ | 384 | #define MODE_IND(psr) \ |
385 | (((psr).it << 2) + ((psr).dt << 1) + (psr).rt) | 385 | (((psr).it << 2) + ((psr).dt << 1) + (psr).rt) |
386 | 386 | ||
387 | #ifndef CONFIG_SMP | ||
388 | #define _vmm_raw_spin_lock(x) do {}while(0) | ||
389 | #define _vmm_raw_spin_unlock(x) do {}while(0) | ||
390 | #else | ||
387 | #define _vmm_raw_spin_lock(x) \ | 391 | #define _vmm_raw_spin_lock(x) \ |
388 | do { \ | 392 | do { \ |
389 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ | 393 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ |
@@ -403,6 +407,7 @@ static inline u64 __gpfn_is_io(u64 gpfn) | |||
403 | do { barrier(); \ | 407 | do { barrier(); \ |
404 | ((spinlock_t *)x)->raw_lock.lock = 0; } \ | 408 | ((spinlock_t *)x)->raw_lock.lock = 0; } \ |
405 | while (0) | 409 | while (0) |
410 | #endif | ||
406 | 411 | ||
407 | void vmm_spin_lock(spinlock_t *lock); | 412 | void vmm_spin_lock(spinlock_t *lock); |
408 | void vmm_spin_unlock(spinlock_t *lock); | 413 | void vmm_spin_unlock(spinlock_t *lock); |
diff --git a/arch/m68k/kernel/ints.c b/arch/m68k/kernel/ints.c index 7e8a0d394e61..761ee0440c99 100644 --- a/arch/m68k/kernel/ints.c +++ b/arch/m68k/kernel/ints.c | |||
@@ -133,7 +133,7 @@ void __init m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt, | |||
133 | { | 133 | { |
134 | int i; | 134 | int i; |
135 | 135 | ||
136 | BUG_ON(IRQ_USER + cnt >= NR_IRQS); | 136 | BUG_ON(IRQ_USER + cnt > NR_IRQS); |
137 | m68k_first_user_vec = vec; | 137 | m68k_first_user_vec = vec; |
138 | for (i = 0; i < cnt; i++) | 138 | for (i = 0; i < cnt; i++) |
139 | irq_controller[IRQ_USER + i] = &user_irq_controller; | 139 | irq_controller[IRQ_USER + i] = &user_irq_controller; |
diff --git a/arch/mn10300/Kconfig.debug b/arch/mn10300/Kconfig.debug index 524e33819f32..ff80e86b9bd2 100644 --- a/arch/mn10300/Kconfig.debug +++ b/arch/mn10300/Kconfig.debug | |||
@@ -15,6 +15,15 @@ config DEBUG_DECOMPRESS_KERNEL | |||
15 | decompressing Linux seeing "Uncompressing Linux... " and | 15 | decompressing Linux seeing "Uncompressing Linux... " and |
16 | "Ok, booting the kernel.\n" on console. | 16 | "Ok, booting the kernel.\n" on console. |
17 | 17 | ||
18 | config TEST_MISALIGNMENT_HANDLER | ||
19 | bool "Run tests on the misalignment handler" | ||
20 | depends on DEBUG_KERNEL | ||
21 | default n | ||
22 | help | ||
23 | If you say Y here the kernel will execute a list of misaligned memory | ||
24 | accesses to make sure the misalignment handler deals them with | ||
25 | correctly. If it does not, the kernel will throw a BUG. | ||
26 | |||
18 | config KPROBES | 27 | config KPROBES |
19 | bool "Kprobes" | 28 | bool "Kprobes" |
20 | depends on DEBUG_KERNEL | 29 | depends on DEBUG_KERNEL |
diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index 32aa89dc3848..94c4a4358065 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c | |||
@@ -37,26 +37,22 @@ | |||
37 | #include <asm/asm-offsets.h> | 37 | #include <asm/asm-offsets.h> |
38 | 38 | ||
39 | #if 0 | 39 | #if 0 |
40 | #define kdebug(FMT, ...) printk(KERN_DEBUG FMT, ##__VA_ARGS__) | 40 | #define kdebug(FMT, ...) printk(KERN_DEBUG "MISALIGN: "FMT"\n", ##__VA_ARGS__) |
41 | #else | 41 | #else |
42 | #define kdebug(FMT, ...) do {} while (0) | 42 | #define kdebug(FMT, ...) do {} while (0) |
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | static int misalignment_addr(unsigned long *registers, unsigned params, | 45 | static int misalignment_addr(unsigned long *registers, unsigned long sp, |
46 | unsigned opcode, unsigned disp, | 46 | unsigned params, unsigned opcode, |
47 | void **_address, unsigned long **_postinc); | 47 | unsigned long disp, |
48 | void **_address, unsigned long **_postinc, | ||
49 | unsigned long *_inc); | ||
48 | 50 | ||
49 | static int misalignment_reg(unsigned long *registers, unsigned params, | 51 | static int misalignment_reg(unsigned long *registers, unsigned params, |
50 | unsigned opcode, unsigned disp, | 52 | unsigned opcode, unsigned long disp, |
51 | unsigned long **_register); | 53 | unsigned long **_register); |
52 | 54 | ||
53 | static inline unsigned int_log2(unsigned x) | 55 | static void misalignment_MOV_Lcc(struct pt_regs *regs, uint32_t opcode); |
54 | { | ||
55 | unsigned y; | ||
56 | asm("bsch %1,%0" : "=r"(y) : "r"(x), "0"(0)); | ||
57 | return y; | ||
58 | } | ||
59 | #define log2(x) int_log2(x) | ||
60 | 56 | ||
61 | static const unsigned Dreg_index[] = { | 57 | static const unsigned Dreg_index[] = { |
62 | REG_D0 >> 2, REG_D1 >> 2, REG_D2 >> 2, REG_D3 >> 2 | 58 | REG_D0 >> 2, REG_D1 >> 2, REG_D2 >> 2, REG_D3 >> 2 |
@@ -86,9 +82,10 @@ enum format_id { | |||
86 | FMT_D7, | 82 | FMT_D7, |
87 | FMT_D8, | 83 | FMT_D8, |
88 | FMT_D9, | 84 | FMT_D9, |
85 | FMT_D10, | ||
89 | }; | 86 | }; |
90 | 87 | ||
91 | struct { | 88 | static const struct { |
92 | u_int8_t opsz, dispsz; | 89 | u_int8_t opsz, dispsz; |
93 | } format_tbl[16] = { | 90 | } format_tbl[16] = { |
94 | [FMT_S0] = { 8, 0 }, | 91 | [FMT_S0] = { 8, 0 }, |
@@ -103,6 +100,7 @@ struct { | |||
103 | [FMT_D7] = { 24, 8 }, | 100 | [FMT_D7] = { 24, 8 }, |
104 | [FMT_D8] = { 24, 24 }, | 101 | [FMT_D8] = { 24, 24 }, |
105 | [FMT_D9] = { 24, 32 }, | 102 | [FMT_D9] = { 24, 32 }, |
103 | [FMT_D10] = { 32, 0 }, | ||
106 | }; | 104 | }; |
107 | 105 | ||
108 | enum value_id { | 106 | enum value_id { |
@@ -128,9 +126,14 @@ enum value_id { | |||
128 | SD24, /* 24-bit signed displacement */ | 126 | SD24, /* 24-bit signed displacement */ |
129 | SIMM4_2, /* 4-bit signed displacement in opcode bits 4-7 */ | 127 | SIMM4_2, /* 4-bit signed displacement in opcode bits 4-7 */ |
130 | SIMM8, /* 8-bit signed immediate */ | 128 | SIMM8, /* 8-bit signed immediate */ |
129 | IMM8, /* 8-bit unsigned immediate */ | ||
130 | IMM16, /* 16-bit unsigned immediate */ | ||
131 | IMM24, /* 24-bit unsigned immediate */ | 131 | IMM24, /* 24-bit unsigned immediate */ |
132 | IMM32, /* 32-bit unsigned immediate */ | 132 | IMM32, /* 32-bit unsigned immediate */ |
133 | IMM32_HIGH8, /* 32-bit unsigned immediate, high 8-bits in opcode */ | 133 | IMM32_HIGH8, /* 32-bit unsigned immediate, LSB in opcode */ |
134 | |||
135 | IMM32_MEM, /* 32-bit unsigned displacement */ | ||
136 | IMM32_HIGH8_MEM, /* 32-bit unsigned displacement, LSB in opcode */ | ||
134 | 137 | ||
135 | DN0 = DM0, | 138 | DN0 = DM0, |
136 | DN1 = DM1, | 139 | DN1 = DM1, |
@@ -149,7 +152,7 @@ enum value_id { | |||
149 | }; | 152 | }; |
150 | 153 | ||
151 | struct mn10300_opcode { | 154 | struct mn10300_opcode { |
152 | const char *name; | 155 | const char name[8]; |
153 | u_int32_t opcode; | 156 | u_int32_t opcode; |
154 | u_int32_t opmask; | 157 | u_int32_t opmask; |
155 | unsigned exclusion; | 158 | unsigned exclusion; |
@@ -185,6 +188,10 @@ struct mn10300_opcode { | |||
185 | Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 188 | Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
186 | */ | 189 | */ |
187 | static const struct mn10300_opcode mn10300_opcodes[] = { | 190 | static const struct mn10300_opcode mn10300_opcodes[] = { |
191 | { "mov", 0x4200, 0xf300, 0, FMT_S1, 0, {DM1, MEM2(IMM8, SP)}}, | ||
192 | { "mov", 0x4300, 0xf300, 0, FMT_S1, 0, {AM1, MEM2(IMM8, SP)}}, | ||
193 | { "mov", 0x5800, 0xfc00, 0, FMT_S1, 0, {MEM2(IMM8, SP), DN0}}, | ||
194 | { "mov", 0x5c00, 0xfc00, 0, FMT_S1, 0, {MEM2(IMM8, SP), AN0}}, | ||
188 | { "mov", 0x60, 0xf0, 0, FMT_S0, 0, {DM1, MEM(AN0)}}, | 195 | { "mov", 0x60, 0xf0, 0, FMT_S0, 0, {DM1, MEM(AN0)}}, |
189 | { "mov", 0x70, 0xf0, 0, FMT_S0, 0, {MEM(AM0), DN1}}, | 196 | { "mov", 0x70, 0xf0, 0, FMT_S0, 0, {MEM(AM0), DN1}}, |
190 | { "mov", 0xf000, 0xfff0, 0, FMT_D0, 0, {MEM(AM0), AN1}}, | 197 | { "mov", 0xf000, 0xfff0, 0, FMT_D0, 0, {MEM(AM0), AN1}}, |
@@ -197,8 +204,6 @@ static const struct mn10300_opcode mn10300_opcodes[] = { | |||
197 | { "mov", 0xf81000, 0xfff000, 0, FMT_D1, 0, {DM1, MEM2(SD8, AN0)}}, | 204 | { "mov", 0xf81000, 0xfff000, 0, FMT_D1, 0, {DM1, MEM2(SD8, AN0)}}, |
198 | { "mov", 0xf82000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8,AM0), AN1}}, | 205 | { "mov", 0xf82000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8,AM0), AN1}}, |
199 | { "mov", 0xf83000, 0xfff000, 0, FMT_D1, 0, {AM1, MEM2(SD8, AN0)}}, | 206 | { "mov", 0xf83000, 0xfff000, 0, FMT_D1, 0, {AM1, MEM2(SD8, AN0)}}, |
200 | { "mov", 0xf8f000, 0xfffc00, 0, FMT_D1, AM33, {MEM2(SD8, AM0), SP}}, | ||
201 | { "mov", 0xf8f400, 0xfffc00, 0, FMT_D1, AM33, {SP, MEM2(SD8, AN0)}}, | ||
202 | { "mov", 0xf90a00, 0xffff00, 0, FMT_D6, AM33, {MEM(RM0), RN2}}, | 207 | { "mov", 0xf90a00, 0xffff00, 0, FMT_D6, AM33, {MEM(RM0), RN2}}, |
203 | { "mov", 0xf91a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEM(RN0)}}, | 208 | { "mov", 0xf91a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEM(RN0)}}, |
204 | { "mov", 0xf96a00, 0xffff00, 0x12, FMT_D6, AM33, {MEMINC(RM0), RN2}}, | 209 | { "mov", 0xf96a00, 0xffff00, 0x12, FMT_D6, AM33, {MEMINC(RM0), RN2}}, |
@@ -207,24 +212,46 @@ static const struct mn10300_opcode mn10300_opcodes[] = { | |||
207 | { "mov", 0xfa100000, 0xfff00000, 0, FMT_D2, 0, {DM1, MEM2(SD16, AN0)}}, | 212 | { "mov", 0xfa100000, 0xfff00000, 0, FMT_D2, 0, {DM1, MEM2(SD16, AN0)}}, |
208 | { "mov", 0xfa200000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), AN1}}, | 213 | { "mov", 0xfa200000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), AN1}}, |
209 | { "mov", 0xfa300000, 0xfff00000, 0, FMT_D2, 0, {AM1, MEM2(SD16, AN0)}}, | 214 | { "mov", 0xfa300000, 0xfff00000, 0, FMT_D2, 0, {AM1, MEM2(SD16, AN0)}}, |
215 | { "mov", 0xfa900000, 0xfff30000, 0, FMT_D2, 0, {AM1, MEM2(IMM16, SP)}}, | ||
216 | { "mov", 0xfa910000, 0xfff30000, 0, FMT_D2, 0, {DM1, MEM2(IMM16, SP)}}, | ||
217 | { "mov", 0xfab00000, 0xfffc0000, 0, FMT_D2, 0, {MEM2(IMM16, SP), AN0}}, | ||
218 | { "mov", 0xfab40000, 0xfffc0000, 0, FMT_D2, 0, {MEM2(IMM16, SP), DN0}}, | ||
210 | { "mov", 0xfb0a0000, 0xffff0000, 0, FMT_D7, AM33, {MEM2(SD8, RM0), RN2}}, | 219 | { "mov", 0xfb0a0000, 0xffff0000, 0, FMT_D7, AM33, {MEM2(SD8, RM0), RN2}}, |
211 | { "mov", 0xfb1a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEM2(SD8, RN0)}}, | 220 | { "mov", 0xfb1a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEM2(SD8, RN0)}}, |
212 | { "mov", 0xfb6a0000, 0xffff0000, 0x22, FMT_D7, AM33, {MEMINC2 (RM0, SIMM8), RN2}}, | 221 | { "mov", 0xfb6a0000, 0xffff0000, 0x22, FMT_D7, AM33, {MEMINC2 (RM0, SIMM8), RN2}}, |
213 | { "mov", 0xfb7a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEMINC2 (RN0, SIMM8)}}, | 222 | { "mov", 0xfb7a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEMINC2 (RN0, SIMM8)}}, |
223 | { "mov", 0xfb8a0000, 0xffff0f00, 0, FMT_D7, AM33, {MEM2(IMM8, SP), RN2}}, | ||
214 | { "mov", 0xfb8e0000, 0xffff000f, 0, FMT_D7, AM33, {MEM2(RI, RM0), RD2}}, | 224 | { "mov", 0xfb8e0000, 0xffff000f, 0, FMT_D7, AM33, {MEM2(RI, RM0), RD2}}, |
225 | { "mov", 0xfb9a0000, 0xffff0f00, 0, FMT_D7, AM33, {RM2, MEM2(IMM8, SP)}}, | ||
215 | { "mov", 0xfb9e0000, 0xffff000f, 0, FMT_D7, AM33, {RD2, MEM2(RI, RN0)}}, | 226 | { "mov", 0xfb9e0000, 0xffff000f, 0, FMT_D7, AM33, {RD2, MEM2(RI, RN0)}}, |
216 | { "mov", 0xfc000000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), DN1}}, | 227 | { "mov", 0xfc000000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), DN1}}, |
217 | { "mov", 0xfc100000, 0xfff00000, 0, FMT_D4, 0, {DM1, MEM2(IMM32,AN0)}}, | 228 | { "mov", 0xfc100000, 0xfff00000, 0, FMT_D4, 0, {DM1, MEM2(IMM32,AN0)}}, |
218 | { "mov", 0xfc200000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), AN1}}, | 229 | { "mov", 0xfc200000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), AN1}}, |
219 | { "mov", 0xfc300000, 0xfff00000, 0, FMT_D4, 0, {AM1, MEM2(IMM32,AN0)}}, | 230 | { "mov", 0xfc300000, 0xfff00000, 0, FMT_D4, 0, {AM1, MEM2(IMM32,AN0)}}, |
231 | { "mov", 0xfc800000, 0xfff30000, 0, FMT_D4, 0, {AM1, MEM(IMM32_MEM)}}, | ||
232 | { "mov", 0xfc810000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM(IMM32_MEM)}}, | ||
233 | { "mov", 0xfc900000, 0xfff30000, 0, FMT_D4, 0, {AM1, MEM2(IMM32, SP)}}, | ||
234 | { "mov", 0xfc910000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM2(IMM32, SP)}}, | ||
235 | { "mov", 0xfca00000, 0xfffc0000, 0, FMT_D4, 0, {MEM(IMM32_MEM), AN0}}, | ||
236 | { "mov", 0xfca40000, 0xfffc0000, 0, FMT_D4, 0, {MEM(IMM32_MEM), DN0}}, | ||
237 | { "mov", 0xfcb00000, 0xfffc0000, 0, FMT_D4, 0, {MEM2(IMM32, SP), AN0}}, | ||
238 | { "mov", 0xfcb40000, 0xfffc0000, 0, FMT_D4, 0, {MEM2(IMM32, SP), DN0}}, | ||
220 | { "mov", 0xfd0a0000, 0xffff0000, 0, FMT_D8, AM33, {MEM2(SD24, RM0), RN2}}, | 239 | { "mov", 0xfd0a0000, 0xffff0000, 0, FMT_D8, AM33, {MEM2(SD24, RM0), RN2}}, |
221 | { "mov", 0xfd1a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEM2(SD24, RN0)}}, | 240 | { "mov", 0xfd1a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEM2(SD24, RN0)}}, |
222 | { "mov", 0xfd6a0000, 0xffff0000, 0x22, FMT_D8, AM33, {MEMINC2 (RM0, IMM24), RN2}}, | 241 | { "mov", 0xfd6a0000, 0xffff0000, 0x22, FMT_D8, AM33, {MEMINC2 (RM0, IMM24), RN2}}, |
223 | { "mov", 0xfd7a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEMINC2 (RN0, IMM24)}}, | 242 | { "mov", 0xfd7a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEMINC2 (RN0, IMM24)}}, |
243 | { "mov", 0xfd8a0000, 0xffff0f00, 0, FMT_D8, AM33, {MEM2(IMM24, SP), RN2}}, | ||
244 | { "mov", 0xfd9a0000, 0xffff0f00, 0, FMT_D8, AM33, {RM2, MEM2(IMM24, SP)}}, | ||
224 | { "mov", 0xfe0a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}}, | 245 | { "mov", 0xfe0a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}}, |
246 | { "mov", 0xfe0a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}}, | ||
247 | { "mov", 0xfe0e0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM(IMM32_HIGH8_MEM), RN2}}, | ||
248 | { "mov", 0xfe1a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}}, | ||
225 | { "mov", 0xfe1a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}}, | 249 | { "mov", 0xfe1a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}}, |
250 | { "mov", 0xfe1e0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM(IMM32_HIGH8_MEM)}}, | ||
226 | { "mov", 0xfe6a0000, 0xffff0000, 0x22, FMT_D9, AM33, {MEMINC2 (RM0, IMM32_HIGH8), RN2}}, | 251 | { "mov", 0xfe6a0000, 0xffff0000, 0x22, FMT_D9, AM33, {MEMINC2 (RM0, IMM32_HIGH8), RN2}}, |
227 | { "mov", 0xfe7a0000, 0xffff0000, 0, FMT_D9, AM33, {RN2, MEMINC2 (RM0, IMM32_HIGH8)}}, | 252 | { "mov", 0xfe7a0000, 0xffff0000, 0, FMT_D9, AM33, {RN2, MEMINC2 (RM0, IMM32_HIGH8)}}, |
253 | { "mov", 0xfe8a0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8, SP), RN2}}, | ||
254 | { "mov", 0xfe9a0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, SP)}}, | ||
228 | 255 | ||
229 | { "movhu", 0xf060, 0xfff0, 0, FMT_D0, 0, {MEM(AM0), DN1}}, | 256 | { "movhu", 0xf060, 0xfff0, 0, FMT_D0, 0, {MEM(AM0), DN1}}, |
230 | { "movhu", 0xf070, 0xfff0, 0, FMT_D0, 0, {DM1, MEM(AN0)}}, | 257 | { "movhu", 0xf070, 0xfff0, 0, FMT_D0, 0, {DM1, MEM(AN0)}}, |
@@ -232,29 +259,58 @@ static const struct mn10300_opcode mn10300_opcodes[] = { | |||
232 | { "movhu", 0xf4c0, 0xffc0, 0, FMT_D0, 0, {DM2, MEM2(DI, AN0)}}, | 259 | { "movhu", 0xf4c0, 0xffc0, 0, FMT_D0, 0, {DM2, MEM2(DI, AN0)}}, |
233 | { "movhu", 0xf86000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8, AM0), DN1}}, | 260 | { "movhu", 0xf86000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8, AM0), DN1}}, |
234 | { "movhu", 0xf87000, 0xfff000, 0, FMT_D1, 0, {DM1, MEM2(SD8, AN0)}}, | 261 | { "movhu", 0xf87000, 0xfff000, 0, FMT_D1, 0, {DM1, MEM2(SD8, AN0)}}, |
262 | { "movhu", 0xf89300, 0xfff300, 0, FMT_D1, 0, {DM1, MEM2(IMM8, SP)}}, | ||
263 | { "movhu", 0xf8bc00, 0xfffc00, 0, FMT_D1, 0, {MEM2(IMM8, SP), DN0}}, | ||
235 | { "movhu", 0xf94a00, 0xffff00, 0, FMT_D6, AM33, {MEM(RM0), RN2}}, | 264 | { "movhu", 0xf94a00, 0xffff00, 0, FMT_D6, AM33, {MEM(RM0), RN2}}, |
236 | { "movhu", 0xf95a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEM(RN0)}}, | 265 | { "movhu", 0xf95a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEM(RN0)}}, |
237 | { "movhu", 0xf9ea00, 0xffff00, 0x12, FMT_D6, AM33, {MEMINC(RM0), RN2}}, | 266 | { "movhu", 0xf9ea00, 0xffff00, 0x12, FMT_D6, AM33, {MEMINC(RM0), RN2}}, |
238 | { "movhu", 0xf9fa00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEMINC(RN0)}}, | 267 | { "movhu", 0xf9fa00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEMINC(RN0)}}, |
239 | { "movhu", 0xfa600000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), DN1}}, | 268 | { "movhu", 0xfa600000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), DN1}}, |
240 | { "movhu", 0xfa700000, 0xfff00000, 0, FMT_D2, 0, {DM1, MEM2(SD16, AN0)}}, | 269 | { "movhu", 0xfa700000, 0xfff00000, 0, FMT_D2, 0, {DM1, MEM2(SD16, AN0)}}, |
270 | { "movhu", 0xfa930000, 0xfff30000, 0, FMT_D2, 0, {DM1, MEM2(IMM16, SP)}}, | ||
271 | { "movhu", 0xfabc0000, 0xfffc0000, 0, FMT_D2, 0, {MEM2(IMM16, SP), DN0}}, | ||
241 | { "movhu", 0xfb4a0000, 0xffff0000, 0, FMT_D7, AM33, {MEM2(SD8, RM0), RN2}}, | 272 | { "movhu", 0xfb4a0000, 0xffff0000, 0, FMT_D7, AM33, {MEM2(SD8, RM0), RN2}}, |
242 | { "movhu", 0xfb5a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEM2(SD8, RN0)}}, | 273 | { "movhu", 0xfb5a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEM2(SD8, RN0)}}, |
274 | { "movhu", 0xfbca0000, 0xffff0f00, 0, FMT_D7, AM33, {MEM2(IMM8, SP), RN2}}, | ||
243 | { "movhu", 0xfbce0000, 0xffff000f, 0, FMT_D7, AM33, {MEM2(RI, RM0), RD2}}, | 275 | { "movhu", 0xfbce0000, 0xffff000f, 0, FMT_D7, AM33, {MEM2(RI, RM0), RD2}}, |
276 | { "movhu", 0xfbda0000, 0xffff0f00, 0, FMT_D7, AM33, {RM2, MEM2(IMM8, SP)}}, | ||
244 | { "movhu", 0xfbde0000, 0xffff000f, 0, FMT_D7, AM33, {RD2, MEM2(RI, RN0)}}, | 277 | { "movhu", 0xfbde0000, 0xffff000f, 0, FMT_D7, AM33, {RD2, MEM2(RI, RN0)}}, |
245 | { "movhu", 0xfbea0000, 0xffff0000, 0x22, FMT_D7, AM33, {MEMINC2 (RM0, SIMM8), RN2}}, | 278 | { "movhu", 0xfbea0000, 0xffff0000, 0x22, FMT_D7, AM33, {MEMINC2 (RM0, SIMM8), RN2}}, |
246 | { "movhu", 0xfbfa0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEMINC2 (RN0, SIMM8)}}, | 279 | { "movhu", 0xfbfa0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEMINC2 (RN0, SIMM8)}}, |
247 | { "movhu", 0xfc600000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), DN1}}, | 280 | { "movhu", 0xfc600000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), DN1}}, |
248 | { "movhu", 0xfc700000, 0xfff00000, 0, FMT_D4, 0, {DM1, MEM2(IMM32,AN0)}}, | 281 | { "movhu", 0xfc700000, 0xfff00000, 0, FMT_D4, 0, {DM1, MEM2(IMM32,AN0)}}, |
282 | { "movhu", 0xfc830000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM(IMM32_MEM)}}, | ||
283 | { "movhu", 0xfc930000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM2(IMM32, SP)}}, | ||
284 | { "movhu", 0xfcac0000, 0xfffc0000, 0, FMT_D4, 0, {MEM(IMM32_MEM), DN0}}, | ||
285 | { "movhu", 0xfcbc0000, 0xfffc0000, 0, FMT_D4, 0, {MEM2(IMM32, SP), DN0}}, | ||
249 | { "movhu", 0xfd4a0000, 0xffff0000, 0, FMT_D8, AM33, {MEM2(SD24, RM0), RN2}}, | 286 | { "movhu", 0xfd4a0000, 0xffff0000, 0, FMT_D8, AM33, {MEM2(SD24, RM0), RN2}}, |
250 | { "movhu", 0xfd5a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEM2(SD24, RN0)}}, | 287 | { "movhu", 0xfd5a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEM2(SD24, RN0)}}, |
288 | { "movhu", 0xfdca0000, 0xffff0f00, 0, FMT_D8, AM33, {MEM2(IMM24, SP), RN2}}, | ||
289 | { "movhu", 0xfdda0000, 0xffff0f00, 0, FMT_D8, AM33, {RM2, MEM2(IMM24, SP)}}, | ||
251 | { "movhu", 0xfdea0000, 0xffff0000, 0x22, FMT_D8, AM33, {MEMINC2 (RM0, IMM24), RN2}}, | 290 | { "movhu", 0xfdea0000, 0xffff0000, 0x22, FMT_D8, AM33, {MEMINC2 (RM0, IMM24), RN2}}, |
252 | { "movhu", 0xfdfa0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEMINC2 (RN0, IMM24)}}, | 291 | { "movhu", 0xfdfa0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEMINC2 (RN0, IMM24)}}, |
253 | { "movhu", 0xfe4a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}}, | 292 | { "movhu", 0xfe4a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}}, |
293 | { "movhu", 0xfe4e0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM(IMM32_HIGH8_MEM), RN2}}, | ||
254 | { "movhu", 0xfe5a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}}, | 294 | { "movhu", 0xfe5a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}}, |
295 | { "movhu", 0xfe5e0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM(IMM32_HIGH8_MEM)}}, | ||
296 | { "movhu", 0xfeca0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8, SP), RN2}}, | ||
297 | { "movhu", 0xfeda0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, SP)}}, | ||
255 | { "movhu", 0xfeea0000, 0xffff0000, 0x22, FMT_D9, AM33, {MEMINC2 (RM0, IMM32_HIGH8), RN2}}, | 298 | { "movhu", 0xfeea0000, 0xffff0000, 0x22, FMT_D9, AM33, {MEMINC2 (RM0, IMM32_HIGH8), RN2}}, |
256 | { "movhu", 0xfefa0000, 0xffff0000, 0, FMT_D9, AM33, {RN2, MEMINC2 (RM0, IMM32_HIGH8)}}, | 299 | { "movhu", 0xfefa0000, 0xffff0000, 0, FMT_D9, AM33, {RN2, MEMINC2 (RM0, IMM32_HIGH8)}}, |
257 | { 0, 0, 0, 0, 0, 0, {0}}, | 300 | |
301 | { "mov_llt", 0xf7e00000, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, | ||
302 | { "mov_lgt", 0xf7e00001, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, | ||
303 | { "mov_lge", 0xf7e00002, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, | ||
304 | { "mov_lle", 0xf7e00003, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, | ||
305 | { "mov_lcs", 0xf7e00004, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, | ||
306 | { "mov_lhi", 0xf7e00005, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, | ||
307 | { "mov_lcc", 0xf7e00006, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, | ||
308 | { "mov_lls", 0xf7e00007, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, | ||
309 | { "mov_leq", 0xf7e00008, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, | ||
310 | { "mov_lne", 0xf7e00009, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, | ||
311 | { "mov_lra", 0xf7e0000a, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, | ||
312 | |||
313 | { "", 0, 0, 0, 0, 0, {0}}, | ||
258 | }; | 314 | }; |
259 | 315 | ||
260 | /* | 316 | /* |
@@ -265,18 +321,21 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code) | |||
265 | const struct exception_table_entry *fixup; | 321 | const struct exception_table_entry *fixup; |
266 | const struct mn10300_opcode *pop; | 322 | const struct mn10300_opcode *pop; |
267 | unsigned long *registers = (unsigned long *) regs; | 323 | unsigned long *registers = (unsigned long *) regs; |
268 | unsigned long data, *store, *postinc; | 324 | unsigned long data, *store, *postinc, disp, inc, sp; |
269 | mm_segment_t seg; | 325 | mm_segment_t seg; |
270 | siginfo_t info; | 326 | siginfo_t info; |
271 | uint32_t opcode, disp, noc, xo, xm; | 327 | uint32_t opcode, noc, xo, xm; |
272 | uint8_t *pc, byte; | 328 | uint8_t *pc, byte, datasz; |
273 | void *address; | 329 | void *address; |
274 | unsigned tmp, npop; | 330 | unsigned tmp, npop, dispsz, loop; |
331 | |||
332 | /* we don't fix up userspace misalignment faults */ | ||
333 | if (user_mode(regs)) | ||
334 | goto bus_error; | ||
275 | 335 | ||
276 | kdebug("MISALIGN at %lx\n", regs->pc); | 336 | sp = (unsigned long) regs + sizeof(*regs); |
277 | 337 | ||
278 | if (in_interrupt()) | 338 | kdebug("==>misalignment({pc=%lx,sp=%lx})", regs->pc, sp); |
279 | die("Misalignment trap in interrupt context", regs, code); | ||
280 | 339 | ||
281 | if (regs->epsw & EPSW_IE) | 340 | if (regs->epsw & EPSW_IE) |
282 | asm volatile("or %0,epsw" : : "i"(EPSW_IE)); | 341 | asm volatile("or %0,epsw" : : "i"(EPSW_IE)); |
@@ -294,8 +353,8 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code) | |||
294 | opcode = byte; | 353 | opcode = byte; |
295 | noc = 8; | 354 | noc = 8; |
296 | 355 | ||
297 | for (pop = mn10300_opcodes; pop->name; pop++) { | 356 | for (pop = mn10300_opcodes; pop->name[0]; pop++) { |
298 | npop = log2(pop->opcode | pop->opmask); | 357 | npop = ilog2(pop->opcode | pop->opmask); |
299 | if (npop <= 0 || npop > 31) | 358 | if (npop <= 0 || npop > 31) |
300 | continue; | 359 | continue; |
301 | npop = (npop + 8) & ~7; | 360 | npop = (npop + 8) & ~7; |
@@ -328,15 +387,15 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code) | |||
328 | } | 387 | } |
329 | 388 | ||
330 | /* didn't manage to find a fixup */ | 389 | /* didn't manage to find a fixup */ |
331 | if (!user_mode(regs)) | 390 | printk(KERN_CRIT "MISALIGN: %lx: unsupported instruction %x\n", |
332 | printk(KERN_CRIT "MISALIGN: %lx: unsupported instruction %x\n", | 391 | regs->pc, opcode); |
333 | regs->pc, opcode); | ||
334 | 392 | ||
335 | failed: | 393 | failed: |
336 | set_fs(seg); | 394 | set_fs(seg); |
337 | if (die_if_no_fixup("misalignment error", regs, code)) | 395 | if (die_if_no_fixup("misalignment error", regs, code)) |
338 | return; | 396 | return; |
339 | 397 | ||
398 | bus_error: | ||
340 | info.si_signo = SIGBUS; | 399 | info.si_signo = SIGBUS; |
341 | info.si_errno = 0; | 400 | info.si_errno = 0; |
342 | info.si_code = BUS_ADRALN; | 401 | info.si_code = BUS_ADRALN; |
@@ -346,31 +405,27 @@ failed: | |||
346 | 405 | ||
347 | /* error reading opcodes */ | 406 | /* error reading opcodes */ |
348 | fetch_error: | 407 | fetch_error: |
349 | if (!user_mode(regs)) | 408 | printk(KERN_CRIT |
350 | printk(KERN_CRIT | 409 | "MISALIGN: %p: fault whilst reading instruction data\n", |
351 | "MISALIGN: %p: fault whilst reading instruction data\n", | 410 | pc); |
352 | pc); | ||
353 | goto failed; | 411 | goto failed; |
354 | 412 | ||
355 | bad_addr_mode: | 413 | bad_addr_mode: |
356 | if (!user_mode(regs)) | 414 | printk(KERN_CRIT |
357 | printk(KERN_CRIT | 415 | "MISALIGN: %lx: unsupported addressing mode %x\n", |
358 | "MISALIGN: %lx: unsupported addressing mode %x\n", | 416 | regs->pc, opcode); |
359 | regs->pc, opcode); | ||
360 | goto failed; | 417 | goto failed; |
361 | 418 | ||
362 | bad_reg_mode: | 419 | bad_reg_mode: |
363 | if (!user_mode(regs)) | 420 | printk(KERN_CRIT |
364 | printk(KERN_CRIT | 421 | "MISALIGN: %lx: unsupported register mode %x\n", |
365 | "MISALIGN: %lx: unsupported register mode %x\n", | 422 | regs->pc, opcode); |
366 | regs->pc, opcode); | ||
367 | goto failed; | 423 | goto failed; |
368 | 424 | ||
369 | unsupported_instruction: | 425 | unsupported_instruction: |
370 | if (!user_mode(regs)) | 426 | printk(KERN_CRIT |
371 | printk(KERN_CRIT | 427 | "MISALIGN: %lx: unsupported instruction %x (%s)\n", |
372 | "MISALIGN: %lx: unsupported instruction %x (%s)\n", | 428 | regs->pc, opcode, pop->name); |
373 | regs->pc, opcode, pop->name); | ||
374 | goto failed; | 429 | goto failed; |
375 | 430 | ||
376 | transfer_failed: | 431 | transfer_failed: |
@@ -391,7 +446,7 @@ transfer_failed: | |||
391 | 446 | ||
392 | /* we matched the opcode */ | 447 | /* we matched the opcode */ |
393 | found_opcode: | 448 | found_opcode: |
394 | kdebug("MISALIGN: %lx: %x==%x { %x, %x }\n", | 449 | kdebug("%lx: %x==%x { %x, %x }", |
395 | regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]); | 450 | regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]); |
396 | 451 | ||
397 | tmp = format_tbl[pop->format].opsz; | 452 | tmp = format_tbl[pop->format].opsz; |
@@ -406,106 +461,108 @@ found_opcode: | |||
406 | 461 | ||
407 | /* grab the extra displacement (note it's LSB first) */ | 462 | /* grab the extra displacement (note it's LSB first) */ |
408 | disp = 0; | 463 | disp = 0; |
409 | tmp = format_tbl[pop->format].dispsz >> 3; | 464 | dispsz = format_tbl[pop->format].dispsz; |
410 | while (tmp > 0) { | 465 | for (loop = 0; loop < dispsz; loop += 8) { |
411 | tmp--; | ||
412 | disp <<= 8; | ||
413 | |||
414 | pc++; | 466 | pc++; |
415 | if (__get_user(byte, pc) != 0) | 467 | if (__get_user(byte, pc) != 0) |
416 | goto fetch_error; | 468 | goto fetch_error; |
417 | disp |= byte; | 469 | disp |= byte << loop; |
470 | kdebug("{%p} disp[%02x]=%02x", pc, loop, byte); | ||
418 | } | 471 | } |
419 | 472 | ||
473 | kdebug("disp=%lx", disp); | ||
474 | |||
420 | set_fs(KERNEL_XDS); | 475 | set_fs(KERNEL_XDS); |
421 | if (fixup || regs->epsw & EPSW_nSL) | 476 | if (fixup) |
422 | set_fs(seg); | 477 | set_fs(seg); |
423 | 478 | ||
424 | tmp = (pop->params[0] ^ pop->params[1]) & 0x80000000; | 479 | tmp = (pop->params[0] ^ pop->params[1]) & 0x80000000; |
425 | if (!tmp) { | 480 | if (!tmp) { |
426 | if (!user_mode(regs)) | 481 | printk(KERN_CRIT |
427 | printk(KERN_CRIT | 482 | "MISALIGN: %lx: insn not move to/from memory %x\n", |
428 | "MISALIGN: %lx:" | 483 | regs->pc, opcode); |
429 | " insn not move to/from memory %x\n", | ||
430 | regs->pc, opcode); | ||
431 | goto failed; | 484 | goto failed; |
432 | } | 485 | } |
433 | 486 | ||
487 | /* determine the data transfer size of the move */ | ||
488 | if (pop->name[3] == 0 || /* "mov" */ | ||
489 | pop->name[4] == 'l') /* mov_lcc */ | ||
490 | inc = datasz = 4; | ||
491 | else if (pop->name[3] == 'h') /* movhu */ | ||
492 | inc = datasz = 2; | ||
493 | else | ||
494 | goto unsupported_instruction; | ||
495 | |||
434 | if (pop->params[0] & 0x80000000) { | 496 | if (pop->params[0] & 0x80000000) { |
435 | /* move memory to register */ | 497 | /* move memory to register */ |
436 | if (!misalignment_addr(registers, pop->params[0], opcode, disp, | 498 | if (!misalignment_addr(registers, sp, |
437 | &address, &postinc)) | 499 | pop->params[0], opcode, disp, |
500 | &address, &postinc, &inc)) | ||
438 | goto bad_addr_mode; | 501 | goto bad_addr_mode; |
439 | 502 | ||
440 | if (!misalignment_reg(registers, pop->params[1], opcode, disp, | 503 | if (!misalignment_reg(registers, pop->params[1], opcode, disp, |
441 | &store)) | 504 | &store)) |
442 | goto bad_reg_mode; | 505 | goto bad_reg_mode; |
443 | 506 | ||
444 | if (strcmp(pop->name, "mov") == 0) { | 507 | kdebug("mov%u (%p),DARn", datasz, address); |
445 | kdebug("FIXUP: mov (%p),DARn\n", address); | 508 | if (copy_from_user(&data, (void *) address, datasz) != 0) |
446 | if (copy_from_user(&data, (void *) address, 4) != 0) | 509 | goto transfer_failed; |
447 | goto transfer_failed; | 510 | if (pop->params[0] & 0x1000000) { |
448 | if (pop->params[0] & 0x1000000) | 511 | kdebug("inc=%lx", inc); |
449 | *postinc += 4; | 512 | *postinc += inc; |
450 | } else if (strcmp(pop->name, "movhu") == 0) { | ||
451 | kdebug("FIXUP: movhu (%p),DARn\n", address); | ||
452 | data = 0; | ||
453 | if (copy_from_user(&data, (void *) address, 2) != 0) | ||
454 | goto transfer_failed; | ||
455 | if (pop->params[0] & 0x1000000) | ||
456 | *postinc += 2; | ||
457 | } else { | ||
458 | goto unsupported_instruction; | ||
459 | } | 513 | } |
460 | 514 | ||
461 | *store = data; | 515 | *store = data; |
516 | kdebug("loaded %lx", data); | ||
462 | } else { | 517 | } else { |
463 | /* move register to memory */ | 518 | /* move register to memory */ |
464 | if (!misalignment_reg(registers, pop->params[0], opcode, disp, | 519 | if (!misalignment_reg(registers, pop->params[0], opcode, disp, |
465 | &store)) | 520 | &store)) |
466 | goto bad_reg_mode; | 521 | goto bad_reg_mode; |
467 | 522 | ||
468 | if (!misalignment_addr(registers, pop->params[1], opcode, disp, | 523 | if (!misalignment_addr(registers, sp, |
469 | &address, &postinc)) | 524 | pop->params[1], opcode, disp, |
525 | &address, &postinc, &inc)) | ||
470 | goto bad_addr_mode; | 526 | goto bad_addr_mode; |
471 | 527 | ||
472 | data = *store; | 528 | data = *store; |
473 | 529 | ||
474 | if (strcmp(pop->name, "mov") == 0) { | 530 | kdebug("mov%u %lx,(%p)", datasz, data, address); |
475 | kdebug("FIXUP: mov %lx,(%p)\n", data, address); | 531 | if (copy_to_user((void *) address, &data, datasz) != 0) |
476 | if (copy_to_user((void *) address, &data, 4) != 0) | 532 | goto transfer_failed; |
477 | goto transfer_failed; | 533 | if (pop->params[1] & 0x1000000) |
478 | if (pop->params[1] & 0x1000000) | 534 | *postinc += inc; |
479 | *postinc += 4; | ||
480 | } else if (strcmp(pop->name, "movhu") == 0) { | ||
481 | kdebug("FIXUP: movhu %hx,(%p)\n", | ||
482 | (uint16_t) data, address); | ||
483 | if (copy_to_user((void *) address, &data, 2) != 0) | ||
484 | goto transfer_failed; | ||
485 | if (pop->params[1] & 0x1000000) | ||
486 | *postinc += 2; | ||
487 | } else { | ||
488 | goto unsupported_instruction; | ||
489 | } | ||
490 | } | 535 | } |
491 | 536 | ||
492 | tmp = format_tbl[pop->format].opsz + format_tbl[pop->format].dispsz; | 537 | tmp = format_tbl[pop->format].opsz + format_tbl[pop->format].dispsz; |
493 | regs->pc += tmp >> 3; | 538 | regs->pc += tmp >> 3; |
494 | 539 | ||
540 | /* handle MOV_Lcc, which are currently the only FMT_D10 insns that | ||
541 | * access memory */ | ||
542 | if (pop->format == FMT_D10) | ||
543 | misalignment_MOV_Lcc(regs, opcode); | ||
544 | |||
495 | set_fs(seg); | 545 | set_fs(seg); |
496 | return; | ||
497 | } | 546 | } |
498 | 547 | ||
499 | /* | 548 | /* |
500 | * determine the address that was being accessed | 549 | * determine the address that was being accessed |
501 | */ | 550 | */ |
502 | static int misalignment_addr(unsigned long *registers, unsigned params, | 551 | static int misalignment_addr(unsigned long *registers, unsigned long sp, |
503 | unsigned opcode, unsigned disp, | 552 | unsigned params, unsigned opcode, |
504 | void **_address, unsigned long **_postinc) | 553 | unsigned long disp, |
554 | void **_address, unsigned long **_postinc, | ||
555 | unsigned long *_inc) | ||
505 | { | 556 | { |
506 | unsigned long *postinc = NULL, address = 0, tmp; | 557 | unsigned long *postinc = NULL, address = 0, tmp; |
507 | 558 | ||
508 | params &= 0x7fffffff; | 559 | if (!(params & 0x1000000)) { |
560 | kdebug("noinc"); | ||
561 | *_inc = 0; | ||
562 | _inc = NULL; | ||
563 | } | ||
564 | |||
565 | params &= 0x00ffffff; | ||
509 | 566 | ||
510 | do { | 567 | do { |
511 | switch (params & 0xff) { | 568 | switch (params & 0xff) { |
@@ -514,11 +571,11 @@ static int misalignment_addr(unsigned long *registers, unsigned params, | |||
514 | address += *postinc; | 571 | address += *postinc; |
515 | break; | 572 | break; |
516 | case DM1: | 573 | case DM1: |
517 | postinc = ®isters[Dreg_index[opcode >> 2 & 0x0c]]; | 574 | postinc = ®isters[Dreg_index[opcode >> 2 & 0x03]]; |
518 | address += *postinc; | 575 | address += *postinc; |
519 | break; | 576 | break; |
520 | case DM2: | 577 | case DM2: |
521 | postinc = ®isters[Dreg_index[opcode >> 4 & 0x30]]; | 578 | postinc = ®isters[Dreg_index[opcode >> 4 & 0x03]]; |
522 | address += *postinc; | 579 | address += *postinc; |
523 | break; | 580 | break; |
524 | case AM0: | 581 | case AM0: |
@@ -526,11 +583,11 @@ static int misalignment_addr(unsigned long *registers, unsigned params, | |||
526 | address += *postinc; | 583 | address += *postinc; |
527 | break; | 584 | break; |
528 | case AM1: | 585 | case AM1: |
529 | postinc = ®isters[Areg_index[opcode >> 2 & 0x0c]]; | 586 | postinc = ®isters[Areg_index[opcode >> 2 & 0x03]]; |
530 | address += *postinc; | 587 | address += *postinc; |
531 | break; | 588 | break; |
532 | case AM2: | 589 | case AM2: |
533 | postinc = ®isters[Areg_index[opcode >> 4 & 0x30]]; | 590 | postinc = ®isters[Areg_index[opcode >> 4 & 0x03]]; |
534 | address += *postinc; | 591 | address += *postinc; |
535 | break; | 592 | break; |
536 | case RM0: | 593 | case RM0: |
@@ -561,33 +618,53 @@ static int misalignment_addr(unsigned long *registers, unsigned params, | |||
561 | postinc = ®isters[Rreg_index[disp >> 4 & 0x0f]]; | 618 | postinc = ®isters[Rreg_index[disp >> 4 & 0x0f]]; |
562 | address += *postinc; | 619 | address += *postinc; |
563 | break; | 620 | break; |
621 | case SP: | ||
622 | address += sp; | ||
623 | break; | ||
564 | 624 | ||
625 | /* displacements are either to be added to the address | ||
626 | * before use, or, in the case of post-inc addressing, | ||
627 | * to be added into the base register after use */ | ||
565 | case SD8: | 628 | case SD8: |
566 | case SIMM8: | 629 | case SIMM8: |
567 | address += (int32_t) (int8_t) (disp & 0xff); | 630 | disp = (long) (int8_t) (disp & 0xff); |
568 | break; | 631 | goto displace_or_inc; |
569 | case SD16: | 632 | case SD16: |
570 | address += (int32_t) (int16_t) (disp & 0xffff); | 633 | disp = (long) (int16_t) (disp & 0xffff); |
571 | break; | 634 | goto displace_or_inc; |
572 | case SD24: | 635 | case SD24: |
573 | tmp = disp << 8; | 636 | tmp = disp << 8; |
574 | asm("asr 8,%0" : "=r"(tmp) : "0"(tmp)); | 637 | asm("asr 8,%0" : "=r"(tmp) : "0"(tmp)); |
575 | address += tmp; | 638 | disp = (long) tmp; |
576 | break; | 639 | goto displace_or_inc; |
577 | case SIMM4_2: | 640 | case SIMM4_2: |
578 | tmp = opcode >> 4 & 0x0f; | 641 | tmp = opcode >> 4 & 0x0f; |
579 | tmp <<= 28; | 642 | tmp <<= 28; |
580 | asm("asr 28,%0" : "=r"(tmp) : "0"(tmp)); | 643 | asm("asr 28,%0" : "=r"(tmp) : "0"(tmp)); |
581 | address += tmp; | 644 | disp = (long) tmp; |
582 | break; | 645 | goto displace_or_inc; |
646 | case IMM8: | ||
647 | disp &= 0x000000ff; | ||
648 | goto displace_or_inc; | ||
649 | case IMM16: | ||
650 | disp &= 0x0000ffff; | ||
651 | goto displace_or_inc; | ||
583 | case IMM24: | 652 | case IMM24: |
584 | address += disp & 0x00ffffff; | 653 | disp &= 0x00ffffff; |
585 | break; | 654 | goto displace_or_inc; |
586 | case IMM32: | 655 | case IMM32: |
656 | case IMM32_MEM: | ||
587 | case IMM32_HIGH8: | 657 | case IMM32_HIGH8: |
588 | address += disp; | 658 | case IMM32_HIGH8_MEM: |
659 | displace_or_inc: | ||
660 | kdebug("%s %lx", _inc ? "incr" : "disp", disp); | ||
661 | if (!_inc) | ||
662 | address += disp; | ||
663 | else | ||
664 | *_inc = disp; | ||
589 | break; | 665 | break; |
590 | default: | 666 | default: |
667 | BUG(); | ||
591 | return 0; | 668 | return 0; |
592 | } | 669 | } |
593 | } while ((params >>= 8)); | 670 | } while ((params >>= 8)); |
@@ -601,7 +678,7 @@ static int misalignment_addr(unsigned long *registers, unsigned params, | |||
601 | * determine the register that is acting as source/dest | 678 | * determine the register that is acting as source/dest |
602 | */ | 679 | */ |
603 | static int misalignment_reg(unsigned long *registers, unsigned params, | 680 | static int misalignment_reg(unsigned long *registers, unsigned params, |
604 | unsigned opcode, unsigned disp, | 681 | unsigned opcode, unsigned long disp, |
605 | unsigned long **_register) | 682 | unsigned long **_register) |
606 | { | 683 | { |
607 | params &= 0x7fffffff; | 684 | params &= 0x7fffffff; |
@@ -654,8 +731,239 @@ static int misalignment_reg(unsigned long *registers, unsigned params, | |||
654 | break; | 731 | break; |
655 | 732 | ||
656 | default: | 733 | default: |
734 | BUG(); | ||
657 | return 0; | 735 | return 0; |
658 | } | 736 | } |
659 | 737 | ||
660 | return 1; | 738 | return 1; |
661 | } | 739 | } |
740 | |||
741 | /* | ||
742 | * handle the conditional loop part of the move-and-loop instructions | ||
743 | */ | ||
744 | static void misalignment_MOV_Lcc(struct pt_regs *regs, uint32_t opcode) | ||
745 | { | ||
746 | unsigned long epsw = regs->epsw; | ||
747 | unsigned long NxorV; | ||
748 | |||
749 | kdebug("MOV_Lcc %x [flags=%lx]", opcode, epsw & 0xf); | ||
750 | |||
751 | /* calculate N^V and shift onto the same bit position as Z */ | ||
752 | NxorV = ((epsw >> 3) ^ epsw >> 1) & 1; | ||
753 | |||
754 | switch (opcode & 0xf) { | ||
755 | case 0x0: /* MOV_LLT: N^V */ | ||
756 | if (NxorV) | ||
757 | goto take_the_loop; | ||
758 | return; | ||
759 | case 0x1: /* MOV_LGT: ~(Z or (N^V))*/ | ||
760 | if (!((epsw & EPSW_FLAG_Z) | NxorV)) | ||
761 | goto take_the_loop; | ||
762 | return; | ||
763 | case 0x2: /* MOV_LGE: ~(N^V) */ | ||
764 | if (!NxorV) | ||
765 | goto take_the_loop; | ||
766 | return; | ||
767 | case 0x3: /* MOV_LLE: Z or (N^V) */ | ||
768 | if ((epsw & EPSW_FLAG_Z) | NxorV) | ||
769 | goto take_the_loop; | ||
770 | return; | ||
771 | |||
772 | case 0x4: /* MOV_LCS: C */ | ||
773 | if (epsw & EPSW_FLAG_C) | ||
774 | goto take_the_loop; | ||
775 | return; | ||
776 | case 0x5: /* MOV_LHI: ~(C or Z) */ | ||
777 | if (!(epsw & (EPSW_FLAG_C | EPSW_FLAG_Z))) | ||
778 | goto take_the_loop; | ||
779 | return; | ||
780 | case 0x6: /* MOV_LCC: ~C */ | ||
781 | if (!(epsw & EPSW_FLAG_C)) | ||
782 | goto take_the_loop; | ||
783 | return; | ||
784 | case 0x7: /* MOV_LLS: C or Z */ | ||
785 | if (epsw & (EPSW_FLAG_C | EPSW_FLAG_Z)) | ||
786 | goto take_the_loop; | ||
787 | return; | ||
788 | |||
789 | case 0x8: /* MOV_LEQ: Z */ | ||
790 | if (epsw & EPSW_FLAG_Z) | ||
791 | goto take_the_loop; | ||
792 | return; | ||
793 | case 0x9: /* MOV_LNE: ~Z */ | ||
794 | if (!(epsw & EPSW_FLAG_Z)) | ||
795 | goto take_the_loop; | ||
796 | return; | ||
797 | case 0xa: /* MOV_LRA: always */ | ||
798 | goto take_the_loop; | ||
799 | |||
800 | default: | ||
801 | BUG(); | ||
802 | } | ||
803 | |||
804 | take_the_loop: | ||
805 | /* wind the PC back to just after the SETLB insn */ | ||
806 | kdebug("loop LAR=%lx", regs->lar); | ||
807 | regs->pc = regs->lar - 4; | ||
808 | } | ||
809 | |||
810 | /* | ||
811 | * misalignment handler tests | ||
812 | */ | ||
813 | #ifdef CONFIG_TEST_MISALIGNMENT_HANDLER | ||
814 | static u8 __initdata testbuf[512] __attribute__((aligned(16))) = { | ||
815 | [257] = 0x11, | ||
816 | [258] = 0x22, | ||
817 | [259] = 0x33, | ||
818 | [260] = 0x44, | ||
819 | }; | ||
820 | |||
821 | #define ASSERTCMP(X, OP, Y) \ | ||
822 | do { \ | ||
823 | if (unlikely(!((X) OP (Y)))) { \ | ||
824 | printk(KERN_ERR "\n"); \ | ||
825 | printk(KERN_ERR "MISALIGN: Assertion failed at line %u\n", \ | ||
826 | __LINE__); \ | ||
827 | printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \ | ||
828 | (unsigned long)(X), (unsigned long)(Y)); \ | ||
829 | BUG(); \ | ||
830 | } \ | ||
831 | } while(0) | ||
832 | |||
833 | static int __init test_misalignment(void) | ||
834 | { | ||
835 | register void *r asm("e0"); | ||
836 | register u32 y asm("e1"); | ||
837 | void *p = testbuf, *q; | ||
838 | u32 tmp, tmp2, x; | ||
839 | |||
840 | printk(KERN_NOTICE "==>test_misalignment() [testbuf=%p]\n", p); | ||
841 | p++; | ||
842 | |||
843 | printk(KERN_NOTICE "___ MOV (Am),Dn ___\n"); | ||
844 | q = p + 256; | ||
845 | asm volatile("mov (%0),%1" : "+a"(q), "=d"(x)); | ||
846 | ASSERTCMP(q, ==, p + 256); | ||
847 | ASSERTCMP(x, ==, 0x44332211); | ||
848 | |||
849 | printk(KERN_NOTICE "___ MOV (256,Am),Dn ___\n"); | ||
850 | q = p; | ||
851 | asm volatile("mov (256,%0),%1" : "+a"(q), "=d"(x)); | ||
852 | ASSERTCMP(q, ==, p); | ||
853 | ASSERTCMP(x, ==, 0x44332211); | ||
854 | |||
855 | printk(KERN_NOTICE "___ MOV (Di,Am),Dn ___\n"); | ||
856 | tmp = 256; | ||
857 | q = p; | ||
858 | asm volatile("mov (%2,%0),%1" : "+a"(q), "=d"(x), "+d"(tmp)); | ||
859 | ASSERTCMP(q, ==, p); | ||
860 | ASSERTCMP(x, ==, 0x44332211); | ||
861 | ASSERTCMP(tmp, ==, 256); | ||
862 | |||
863 | printk(KERN_NOTICE "___ MOV (256,Rm),Rn ___\n"); | ||
864 | r = p; | ||
865 | asm volatile("mov (256,%0),%1" : "+r"(r), "=r"(y)); | ||
866 | ASSERTCMP(r, ==, p); | ||
867 | ASSERTCMP(y, ==, 0x44332211); | ||
868 | |||
869 | printk(KERN_NOTICE "___ MOV (Rm+),Rn ___\n"); | ||
870 | r = p + 256; | ||
871 | asm volatile("mov (%0+),%1" : "+r"(r), "=r"(y)); | ||
872 | ASSERTCMP(r, ==, p + 256 + 4); | ||
873 | ASSERTCMP(y, ==, 0x44332211); | ||
874 | |||
875 | printk(KERN_NOTICE "___ MOV (Rm+,8),Rn ___\n"); | ||
876 | r = p + 256; | ||
877 | asm volatile("mov (%0+,8),%1" : "+r"(r), "=r"(y)); | ||
878 | ASSERTCMP(r, ==, p + 256 + 8); | ||
879 | ASSERTCMP(y, ==, 0x44332211); | ||
880 | |||
881 | printk(KERN_NOTICE "___ MOV (7,SP),Rn ___\n"); | ||
882 | asm volatile( | ||
883 | "add -16,sp \n" | ||
884 | "mov +0x11,%0 \n" | ||
885 | "movbu %0,(7,sp) \n" | ||
886 | "mov +0x22,%0 \n" | ||
887 | "movbu %0,(8,sp) \n" | ||
888 | "mov +0x33,%0 \n" | ||
889 | "movbu %0,(9,sp) \n" | ||
890 | "mov +0x44,%0 \n" | ||
891 | "movbu %0,(10,sp) \n" | ||
892 | "mov (7,sp),%1 \n" | ||
893 | "add +16,sp \n" | ||
894 | : "+a"(q), "=d"(x)); | ||
895 | ASSERTCMP(x, ==, 0x44332211); | ||
896 | |||
897 | printk(KERN_NOTICE "___ MOV (259,SP),Rn ___\n"); | ||
898 | asm volatile( | ||
899 | "add -264,sp \n" | ||
900 | "mov +0x11,%0 \n" | ||
901 | "movbu %0,(259,sp) \n" | ||
902 | "mov +0x22,%0 \n" | ||
903 | "movbu %0,(260,sp) \n" | ||
904 | "mov +0x33,%0 \n" | ||
905 | "movbu %0,(261,sp) \n" | ||
906 | "mov +0x55,%0 \n" | ||
907 | "movbu %0,(262,sp) \n" | ||
908 | "mov (259,sp),%1 \n" | ||
909 | "add +264,sp \n" | ||
910 | : "+d"(tmp), "=d"(x)); | ||
911 | ASSERTCMP(x, ==, 0x55332211); | ||
912 | |||
913 | printk(KERN_NOTICE "___ MOV (260,SP),Rn ___\n"); | ||
914 | asm volatile( | ||
915 | "add -264,sp \n" | ||
916 | "mov +0x11,%0 \n" | ||
917 | "movbu %0,(260,sp) \n" | ||
918 | "mov +0x22,%0 \n" | ||
919 | "movbu %0,(261,sp) \n" | ||
920 | "mov +0x33,%0 \n" | ||
921 | "movbu %0,(262,sp) \n" | ||
922 | "mov +0x55,%0 \n" | ||
923 | "movbu %0,(263,sp) \n" | ||
924 | "mov (260,sp),%1 \n" | ||
925 | "add +264,sp \n" | ||
926 | : "+d"(tmp), "=d"(x)); | ||
927 | ASSERTCMP(x, ==, 0x55332211); | ||
928 | |||
929 | |||
930 | printk(KERN_NOTICE "___ MOV_LNE ___\n"); | ||
931 | tmp = 1; | ||
932 | tmp2 = 2; | ||
933 | q = p + 256; | ||
934 | asm volatile( | ||
935 | "setlb \n" | ||
936 | "mov %2,%3 \n" | ||
937 | "mov %1,%2 \n" | ||
938 | "cmp +0,%1 \n" | ||
939 | "mov_lne (%0+,4),%1" | ||
940 | : "+r"(q), "+d"(tmp), "+d"(tmp2), "=d"(x) | ||
941 | : | ||
942 | : "cc"); | ||
943 | ASSERTCMP(q, ==, p + 256 + 12); | ||
944 | ASSERTCMP(x, ==, 0x44332211); | ||
945 | |||
946 | printk(KERN_NOTICE "___ MOV in SETLB ___\n"); | ||
947 | tmp = 1; | ||
948 | tmp2 = 2; | ||
949 | q = p + 256; | ||
950 | asm volatile( | ||
951 | "setlb \n" | ||
952 | "mov %1,%3 \n" | ||
953 | "mov (%0+),%1 \n" | ||
954 | "cmp +0,%1 \n" | ||
955 | "lne " | ||
956 | : "+a"(q), "+d"(tmp), "+d"(tmp2), "=d"(x) | ||
957 | : | ||
958 | : "cc"); | ||
959 | |||
960 | ASSERTCMP(q, ==, p + 256 + 8); | ||
961 | ASSERTCMP(x, ==, 0x44332211); | ||
962 | |||
963 | printk(KERN_NOTICE "<==test_misalignment()\n"); | ||
964 | return 0; | ||
965 | } | ||
966 | |||
967 | arch_initcall(test_misalignment); | ||
968 | |||
969 | #endif /* CONFIG_TEST_MISALIGNMENT_HANDLER */ | ||
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h index 398cdbaf4e54..409e698f4361 100644 --- a/arch/parisc/include/asm/smp.h +++ b/arch/parisc/include/asm/smp.h | |||
@@ -44,8 +44,6 @@ extern void arch_send_call_function_ipi(cpumask_t mask); | |||
44 | 44 | ||
45 | #define PROC_CHANGE_PENALTY 15 /* Schedule penalty */ | 45 | #define PROC_CHANGE_PENALTY 15 /* Schedule penalty */ |
46 | 46 | ||
47 | extern unsigned long cpu_present_mask; | ||
48 | |||
49 | #define raw_smp_processor_id() (current_thread_info()->cpu) | 47 | #define raw_smp_processor_id() (current_thread_info()->cpu) |
50 | 48 | ||
51 | #else /* CONFIG_SMP */ | 49 | #else /* CONFIG_SMP */ |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index ed500ef799b7..08844fc24a2e 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -61,22 +61,25 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
61 | 61 | ||
62 | #ifdef CONFIG_TRACE_IRQFLAGS | 62 | #ifdef CONFIG_TRACE_IRQFLAGS |
63 | .macro TRACE_IRQS_ON | 63 | .macro TRACE_IRQS_ON |
64 | l %r1,BASED(.Ltrace_irq_on) | 64 | basr %r2,%r0 |
65 | l %r1,BASED(.Ltrace_irq_on_caller) | ||
65 | basr %r14,%r1 | 66 | basr %r14,%r1 |
66 | .endm | 67 | .endm |
67 | 68 | ||
68 | .macro TRACE_IRQS_OFF | 69 | .macro TRACE_IRQS_OFF |
69 | l %r1,BASED(.Ltrace_irq_off) | 70 | basr %r2,%r0 |
71 | l %r1,BASED(.Ltrace_irq_off_caller) | ||
70 | basr %r14,%r1 | 72 | basr %r14,%r1 |
71 | .endm | 73 | .endm |
72 | 74 | ||
73 | .macro TRACE_IRQS_CHECK | 75 | .macro TRACE_IRQS_CHECK |
76 | basr %r2,%r0 | ||
74 | tm SP_PSW(%r15),0x03 # irqs enabled? | 77 | tm SP_PSW(%r15),0x03 # irqs enabled? |
75 | jz 0f | 78 | jz 0f |
76 | l %r1,BASED(.Ltrace_irq_on) | 79 | l %r1,BASED(.Ltrace_irq_on_caller) |
77 | basr %r14,%r1 | 80 | basr %r14,%r1 |
78 | j 1f | 81 | j 1f |
79 | 0: l %r1,BASED(.Ltrace_irq_off) | 82 | 0: l %r1,BASED(.Ltrace_irq_off_caller) |
80 | basr %r14,%r1 | 83 | basr %r14,%r1 |
81 | 1: | 84 | 1: |
82 | .endm | 85 | .endm |
@@ -1113,9 +1116,12 @@ cleanup_io_leave_insn: | |||
1113 | .Lschedtail: .long schedule_tail | 1116 | .Lschedtail: .long schedule_tail |
1114 | .Lsysc_table: .long sys_call_table | 1117 | .Lsysc_table: .long sys_call_table |
1115 | #ifdef CONFIG_TRACE_IRQFLAGS | 1118 | #ifdef CONFIG_TRACE_IRQFLAGS |
1116 | .Ltrace_irq_on: .long trace_hardirqs_on | 1119 | .Ltrace_irq_on_caller: |
1117 | .Ltrace_irq_off: | 1120 | .long trace_hardirqs_on_caller |
1118 | .long trace_hardirqs_off | 1121 | .Ltrace_irq_off_caller: |
1122 | .long trace_hardirqs_off_caller | ||
1123 | #endif | ||
1124 | #ifdef CONFIG_LOCKDEP | ||
1119 | .Llockdep_sys_exit: | 1125 | .Llockdep_sys_exit: |
1120 | .long lockdep_sys_exit | 1126 | .long lockdep_sys_exit |
1121 | #endif | 1127 | #endif |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index d7ce150453f2..41aca06682aa 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -61,19 +61,22 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | |||
61 | 61 | ||
62 | #ifdef CONFIG_TRACE_IRQFLAGS | 62 | #ifdef CONFIG_TRACE_IRQFLAGS |
63 | .macro TRACE_IRQS_ON | 63 | .macro TRACE_IRQS_ON |
64 | brasl %r14,trace_hardirqs_on | 64 | basr %r2,%r0 |
65 | brasl %r14,trace_hardirqs_on_caller | ||
65 | .endm | 66 | .endm |
66 | 67 | ||
67 | .macro TRACE_IRQS_OFF | 68 | .macro TRACE_IRQS_OFF |
68 | brasl %r14,trace_hardirqs_off | 69 | basr %r2,%r0 |
70 | brasl %r14,trace_hardirqs_off_caller | ||
69 | .endm | 71 | .endm |
70 | 72 | ||
71 | .macro TRACE_IRQS_CHECK | 73 | .macro TRACE_IRQS_CHECK |
74 | basr %r2,%r0 | ||
72 | tm SP_PSW(%r15),0x03 # irqs enabled? | 75 | tm SP_PSW(%r15),0x03 # irqs enabled? |
73 | jz 0f | 76 | jz 0f |
74 | brasl %r14,trace_hardirqs_on | 77 | brasl %r14,trace_hardirqs_on_caller |
75 | j 1f | 78 | j 1f |
76 | 0: brasl %r14,trace_hardirqs_off | 79 | 0: brasl %r14,trace_hardirqs_off_caller |
77 | 1: | 80 | 1: |
78 | .endm | 81 | .endm |
79 | #else | 82 | #else |
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 3e2c05cb6a87..04f8c67a6101 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -136,9 +136,12 @@ static void default_idle(void) | |||
136 | return; | 136 | return; |
137 | } | 137 | } |
138 | trace_hardirqs_on(); | 138 | trace_hardirqs_on(); |
139 | /* Don't trace preempt off for idle. */ | ||
140 | stop_critical_timings(); | ||
139 | /* Wait for external, I/O or machine check interrupt. */ | 141 | /* Wait for external, I/O or machine check interrupt. */ |
140 | __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | | 142 | __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | |
141 | PSW_MASK_IO | PSW_MASK_EXT); | 143 | PSW_MASK_IO | PSW_MASK_EXT); |
144 | start_critical_timings(); | ||
142 | } | 145 | } |
143 | 146 | ||
144 | void cpu_idle(void) | 147 | void cpu_idle(void) |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 62122bad1e33..400b040df7fa 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -604,13 +604,13 @@ setup_memory(void) | |||
604 | if (memory_chunk[i].type != CHUNK_READ_WRITE) | 604 | if (memory_chunk[i].type != CHUNK_READ_WRITE) |
605 | continue; | 605 | continue; |
606 | start_chunk = PFN_DOWN(memory_chunk[i].addr); | 606 | start_chunk = PFN_DOWN(memory_chunk[i].addr); |
607 | end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1; | 607 | end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); |
608 | end_chunk = min(end_chunk, end_pfn); | 608 | end_chunk = min(end_chunk, end_pfn); |
609 | if (start_chunk >= end_chunk) | 609 | if (start_chunk >= end_chunk) |
610 | continue; | 610 | continue; |
611 | add_active_range(0, start_chunk, end_chunk); | 611 | add_active_range(0, start_chunk, end_chunk); |
612 | pfn = max(start_chunk, start_pfn); | 612 | pfn = max(start_chunk, start_pfn); |
613 | for (; pfn <= end_chunk; pfn++) | 613 | for (; pfn < end_chunk; pfn++) |
614 | page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY); | 614 | page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY); |
615 | } | 615 | } |
616 | 616 | ||
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index 5fdb799062b7..4fe952e557ac 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c | |||
@@ -198,7 +198,7 @@ asmlinkage long s390x_newuname(struct new_utsname __user *name) | |||
198 | { | 198 | { |
199 | int ret = sys_newuname(name); | 199 | int ret = sys_newuname(name); |
200 | 200 | ||
201 | if (current->personality == PER_LINUX32 && !ret) { | 201 | if (personality(current->personality) == PER_LINUX32 && !ret) { |
202 | ret = copy_to_user(name->machine, "s390\0\0\0\0", 8); | 202 | ret = copy_to_user(name->machine, "s390\0\0\0\0", 8); |
203 | if (ret) ret = -EFAULT; | 203 | if (ret) ret = -EFAULT; |
204 | } | 204 | } |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 632b13e10053..a947899dcba1 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -65,18 +65,21 @@ static int machine_has_topology_irq; | |||
65 | static struct timer_list topology_timer; | 65 | static struct timer_list topology_timer; |
66 | static void set_topology_timer(void); | 66 | static void set_topology_timer(void); |
67 | static DECLARE_WORK(topology_work, topology_work_fn); | 67 | static DECLARE_WORK(topology_work, topology_work_fn); |
68 | /* topology_lock protects the core linked list */ | ||
69 | static DEFINE_SPINLOCK(topology_lock); | ||
68 | 70 | ||
69 | cpumask_t cpu_core_map[NR_CPUS]; | 71 | cpumask_t cpu_core_map[NR_CPUS]; |
70 | 72 | ||
71 | cpumask_t cpu_coregroup_map(unsigned int cpu) | 73 | cpumask_t cpu_coregroup_map(unsigned int cpu) |
72 | { | 74 | { |
73 | struct core_info *core = &core_info; | 75 | struct core_info *core = &core_info; |
76 | unsigned long flags; | ||
74 | cpumask_t mask; | 77 | cpumask_t mask; |
75 | 78 | ||
76 | cpus_clear(mask); | 79 | cpus_clear(mask); |
77 | if (!machine_has_topology) | 80 | if (!machine_has_topology) |
78 | return cpu_present_map; | 81 | return cpu_present_map; |
79 | mutex_lock(&smp_cpu_state_mutex); | 82 | spin_lock_irqsave(&topology_lock, flags); |
80 | while (core) { | 83 | while (core) { |
81 | if (cpu_isset(cpu, core->mask)) { | 84 | if (cpu_isset(cpu, core->mask)) { |
82 | mask = core->mask; | 85 | mask = core->mask; |
@@ -84,7 +87,7 @@ cpumask_t cpu_coregroup_map(unsigned int cpu) | |||
84 | } | 87 | } |
85 | core = core->next; | 88 | core = core->next; |
86 | } | 89 | } |
87 | mutex_unlock(&smp_cpu_state_mutex); | 90 | spin_unlock_irqrestore(&topology_lock, flags); |
88 | if (cpus_empty(mask)) | 91 | if (cpus_empty(mask)) |
89 | mask = cpumask_of_cpu(cpu); | 92 | mask = cpumask_of_cpu(cpu); |
90 | return mask; | 93 | return mask; |
@@ -133,7 +136,7 @@ static void tl_to_cores(struct tl_info *info) | |||
133 | union tl_entry *tle, *end; | 136 | union tl_entry *tle, *end; |
134 | struct core_info *core = &core_info; | 137 | struct core_info *core = &core_info; |
135 | 138 | ||
136 | mutex_lock(&smp_cpu_state_mutex); | 139 | spin_lock_irq(&topology_lock); |
137 | clear_cores(); | 140 | clear_cores(); |
138 | tle = info->tle; | 141 | tle = info->tle; |
139 | end = (union tl_entry *)((unsigned long)info + info->length); | 142 | end = (union tl_entry *)((unsigned long)info + info->length); |
@@ -157,7 +160,7 @@ static void tl_to_cores(struct tl_info *info) | |||
157 | } | 160 | } |
158 | tle = next_tle(tle); | 161 | tle = next_tle(tle); |
159 | } | 162 | } |
160 | mutex_unlock(&smp_cpu_state_mutex); | 163 | spin_unlock_irq(&topology_lock); |
161 | } | 164 | } |
162 | 165 | ||
163 | static void topology_update_polarization_simple(void) | 166 | static void topology_update_polarization_simple(void) |
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 436c28539577..65eaae34e753 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h | |||
@@ -293,6 +293,10 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) | |||
293 | */ | 293 | */ |
294 | #define xlate_dev_kmem_ptr(p) p | 294 | #define xlate_dev_kmem_ptr(p) p |
295 | 295 | ||
296 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE | ||
297 | int valid_phys_addr_range(unsigned long addr, size_t size); | ||
298 | int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); | ||
299 | |||
296 | #endif /* __KERNEL__ */ | 300 | #endif /* __KERNEL__ */ |
297 | 301 | ||
298 | #endif /* __ASM_SH_IO_H */ | 302 | #endif /* __ASM_SH_IO_H */ |
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 52220d70a096..b517ae08b9c0 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h | |||
@@ -148,6 +148,12 @@ extern void paging_init(void); | |||
148 | extern void page_table_range_init(unsigned long start, unsigned long end, | 148 | extern void page_table_range_init(unsigned long start, unsigned long end, |
149 | pgd_t *pgd); | 149 | pgd_t *pgd); |
150 | 150 | ||
151 | #if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_CPU_SH4) && defined(CONFIG_MMU) | ||
152 | extern void kmap_coherent_init(void); | ||
153 | #else | ||
154 | #define kmap_coherent_init() do { } while (0) | ||
155 | #endif | ||
156 | |||
151 | #include <asm-generic/pgtable.h> | 157 | #include <asm-generic/pgtable.h> |
152 | 158 | ||
153 | #endif /* __ASM_SH_PGTABLE_H */ | 159 | #endif /* __ASM_SH_PGTABLE_H */ |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c index a7412cede534..6d9e6972cfc9 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c | |||
@@ -119,17 +119,17 @@ static struct plat_sci_port sci_platform_data[] = { | |||
119 | },{ | 119 | },{ |
120 | .mapbase = 0xa4e30000, | 120 | .mapbase = 0xa4e30000, |
121 | .flags = UPF_BOOT_AUTOCONF, | 121 | .flags = UPF_BOOT_AUTOCONF, |
122 | .type = PORT_SCI, | 122 | .type = PORT_SCIFA, |
123 | .irqs = { 56, 56, 56, 56 }, | 123 | .irqs = { 56, 56, 56, 56 }, |
124 | },{ | 124 | },{ |
125 | .mapbase = 0xa4e40000, | 125 | .mapbase = 0xa4e40000, |
126 | .flags = UPF_BOOT_AUTOCONF, | 126 | .flags = UPF_BOOT_AUTOCONF, |
127 | .type = PORT_SCI, | 127 | .type = PORT_SCIFA, |
128 | .irqs = { 88, 88, 88, 88 }, | 128 | .irqs = { 88, 88, 88, 88 }, |
129 | },{ | 129 | },{ |
130 | .mapbase = 0xa4e50000, | 130 | .mapbase = 0xa4e50000, |
131 | .flags = UPF_BOOT_AUTOCONF, | 131 | .flags = UPF_BOOT_AUTOCONF, |
132 | .type = PORT_SCI, | 132 | .type = PORT_SCIFA, |
133 | .irqs = { 109, 109, 109, 109 }, | 133 | .irqs = { 109, 109, 109, 109 }, |
134 | }, { | 134 | }, { |
135 | .flags = 0, | 135 | .flags = 0, |
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c index 6b7d166694e2..a952dcf9999d 100644 --- a/arch/sh/kernel/early_printk.c +++ b/arch/sh/kernel/early_printk.c | |||
@@ -75,6 +75,7 @@ static struct console bios_console = { | |||
75 | #endif | 75 | #endif |
76 | 76 | ||
77 | static struct uart_port scif_port = { | 77 | static struct uart_port scif_port = { |
78 | .type = PORT_SCIF, | ||
78 | .mapbase = CONFIG_EARLY_SCIF_CONSOLE_PORT, | 79 | .mapbase = CONFIG_EARLY_SCIF_CONSOLE_PORT, |
79 | .membase = (char __iomem *)CONFIG_EARLY_SCIF_CONSOLE_PORT, | 80 | .membase = (char __iomem *)CONFIG_EARLY_SCIF_CONSOLE_PORT, |
80 | }; | 81 | }; |
@@ -84,9 +85,9 @@ static void scif_sercon_putc(int c) | |||
84 | while (((sci_in(&scif_port, SCFDR) & EPK_FIFO_BITS) >= EPK_FIFO_SIZE)) | 85 | while (((sci_in(&scif_port, SCFDR) & EPK_FIFO_BITS) >= EPK_FIFO_SIZE)) |
85 | ; | 86 | ; |
86 | 87 | ||
87 | sci_out(&scif_port, SCxTDR, c); | ||
88 | sci_in(&scif_port, SCxSR); | 88 | sci_in(&scif_port, SCxSR); |
89 | sci_out(&scif_port, SCxSR, 0xf3 & ~(0x20 | 0x40)); | 89 | sci_out(&scif_port, SCxSR, 0xf3 & ~(0x20 | 0x40)); |
90 | sci_out(&scif_port, SCxTDR, c); | ||
90 | 91 | ||
91 | while ((sci_in(&scif_port, SCxSR) & 0x40) == 0) | 92 | while ((sci_in(&scif_port, SCxSR) & 0x40) == 0) |
92 | ; | 93 | ; |
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c index aaaf90d06b85..3c61ddd4d43e 100644 --- a/arch/sh/kernel/timers/timer-tmu.c +++ b/arch/sh/kernel/timers/timer-tmu.c | |||
@@ -120,7 +120,7 @@ static void tmu_set_mode(enum clock_event_mode mode, | |||
120 | { | 120 | { |
121 | switch (mode) { | 121 | switch (mode) { |
122 | case CLOCK_EVT_MODE_PERIODIC: | 122 | case CLOCK_EVT_MODE_PERIODIC: |
123 | ctrl_outl(ctrl_inl(TMU0_TCNT), TMU0_TCOR); | 123 | ctrl_outl(tmu_latest_interval[TMU0], TMU0_TCOR); |
124 | break; | 124 | break; |
125 | case CLOCK_EVT_MODE_ONESHOT: | 125 | case CLOCK_EVT_MODE_ONESHOT: |
126 | ctrl_outl(0, TMU0_TCOR); | 126 | ctrl_outl(0, TMU0_TCOR); |
diff --git a/arch/sh/lib/copy_page.S b/arch/sh/lib/copy_page.S index 5d12e657be34..43de7e8e4e17 100644 --- a/arch/sh/lib/copy_page.S +++ b/arch/sh/lib/copy_page.S | |||
@@ -80,6 +80,11 @@ ENTRY(copy_page) | |||
80 | .section __ex_table, "a"; \ | 80 | .section __ex_table, "a"; \ |
81 | .long 9999b, 6000f ; \ | 81 | .long 9999b, 6000f ; \ |
82 | .previous | 82 | .previous |
83 | #define EX_NO_POP(...) \ | ||
84 | 9999: __VA_ARGS__ ; \ | ||
85 | .section __ex_table, "a"; \ | ||
86 | .long 9999b, 6005f ; \ | ||
87 | .previous | ||
83 | ENTRY(__copy_user) | 88 | ENTRY(__copy_user) |
84 | ! Check if small number of bytes | 89 | ! Check if small number of bytes |
85 | mov #11,r0 | 90 | mov #11,r0 |
@@ -139,9 +144,9 @@ EX( mov.b r1,@r4 ) | |||
139 | bt 1f | 144 | bt 1f |
140 | 145 | ||
141 | 2: | 146 | 2: |
142 | EX( mov.b @r5+,r0 ) | 147 | EX_NO_POP( mov.b @r5+,r0 ) |
143 | dt r6 | 148 | dt r6 |
144 | EX( mov.b r0,@r4 ) | 149 | EX_NO_POP( mov.b r0,@r4 ) |
145 | bf/s 2b | 150 | bf/s 2b |
146 | add #1,r4 | 151 | add #1,r4 |
147 | 152 | ||
@@ -150,7 +155,7 @@ EX( mov.b r0,@r4 ) | |||
150 | 155 | ||
151 | # Exception handler: | 156 | # Exception handler: |
152 | .section .fixup, "ax" | 157 | .section .fixup, "ax" |
153 | 6000: | 158 | 6005: |
154 | mov.l 8000f,r1 | 159 | mov.l 8000f,r1 |
155 | mov r3,r0 | 160 | mov r3,r0 |
156 | jmp @r1 | 161 | jmp @r1 |
diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32 index 70e0906023cc..f066e76da204 100644 --- a/arch/sh/mm/Makefile_32 +++ b/arch/sh/mm/Makefile_32 | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the Linux SuperH-specific parts of the memory manager. | 2 | # Makefile for the Linux SuperH-specific parts of the memory manager. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := init.o extable_32.o consistent.o | 5 | obj-y := init.o extable_32.o consistent.o mmap.o |
6 | 6 | ||
7 | ifndef CONFIG_CACHE_OFF | 7 | ifndef CONFIG_CACHE_OFF |
8 | cache-$(CONFIG_CPU_SH2) := cache-sh2.o | 8 | cache-$(CONFIG_CPU_SH2) := cache-sh2.o |
diff --git a/arch/sh/mm/Makefile_64 b/arch/sh/mm/Makefile_64 index 0d92a8a3ac9a..9481d0f54efd 100644 --- a/arch/sh/mm/Makefile_64 +++ b/arch/sh/mm/Makefile_64 | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the Linux SuperH-specific parts of the memory manager. | 2 | # Makefile for the Linux SuperH-specific parts of the memory manager. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := init.o consistent.o | 5 | obj-y := init.o consistent.o mmap.o |
6 | 6 | ||
7 | mmu-y := tlb-nommu.o pg-nommu.o extable_32.o | 7 | mmu-y := tlb-nommu.o pg-nommu.o extable_32.o |
8 | mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o \ | 8 | mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o \ |
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 4abf00031dae..6cbef8caeb56 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -137,6 +137,7 @@ void __init page_table_range_init(unsigned long start, unsigned long end, | |||
137 | void __init paging_init(void) | 137 | void __init paging_init(void) |
138 | { | 138 | { |
139 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 139 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
140 | unsigned long vaddr; | ||
140 | int nid; | 141 | int nid; |
141 | 142 | ||
142 | /* We don't need to map the kernel through the TLB, as | 143 | /* We don't need to map the kernel through the TLB, as |
@@ -148,10 +149,15 @@ void __init paging_init(void) | |||
148 | * check for a null value. */ | 149 | * check for a null value. */ |
149 | set_TTB(swapper_pg_dir); | 150 | set_TTB(swapper_pg_dir); |
150 | 151 | ||
151 | /* Populate the relevant portions of swapper_pg_dir so that | 152 | /* |
153 | * Populate the relevant portions of swapper_pg_dir so that | ||
152 | * we can use the fixmap entries without calling kmalloc. | 154 | * we can use the fixmap entries without calling kmalloc. |
153 | * pte's will be filled in by __set_fixmap(). */ | 155 | * pte's will be filled in by __set_fixmap(). |
154 | page_table_range_init(FIXADDR_START, FIXADDR_TOP, swapper_pg_dir); | 156 | */ |
157 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | ||
158 | page_table_range_init(vaddr, 0, swapper_pg_dir); | ||
159 | |||
160 | kmap_coherent_init(); | ||
155 | 161 | ||
156 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 162 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
157 | 163 | ||
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c new file mode 100644 index 000000000000..8837d511710a --- /dev/null +++ b/arch/sh/mm/mmap.c | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/mmap.c | ||
3 | * | ||
4 | * Copyright (C) 2008 Paul Mundt | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #include <linux/io.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <asm/page.h> | ||
13 | |||
14 | /* | ||
15 | * You really shouldn't be using read() or write() on /dev/mem. This | ||
16 | * might go away in the future. | ||
17 | */ | ||
18 | int valid_phys_addr_range(unsigned long addr, size_t count) | ||
19 | { | ||
20 | if (addr < __MEMORY_START) | ||
21 | return 0; | ||
22 | if (addr + count > __pa(high_memory)) | ||
23 | return 0; | ||
24 | |||
25 | return 1; | ||
26 | } | ||
27 | |||
28 | int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) | ||
29 | { | ||
30 | return 1; | ||
31 | } | ||
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index 38870e0fc182..2fe14da1f839 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * Released under the terms of the GNU GPL v2.0. | 7 | * Released under the terms of the GNU GPL v2.0. |
8 | */ | 8 | */ |
9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
10 | #include <linux/init.h> | ||
10 | #include <linux/mutex.h> | 11 | #include <linux/mutex.h> |
11 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
12 | #include <linux/highmem.h> | 13 | #include <linux/highmem.h> |
@@ -16,6 +17,20 @@ | |||
16 | 17 | ||
17 | #define CACHE_ALIAS (current_cpu_data.dcache.alias_mask) | 18 | #define CACHE_ALIAS (current_cpu_data.dcache.alias_mask) |
18 | 19 | ||
20 | #define kmap_get_fixmap_pte(vaddr) \ | ||
21 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) | ||
22 | |||
23 | static pte_t *kmap_coherent_pte; | ||
24 | |||
25 | void __init kmap_coherent_init(void) | ||
26 | { | ||
27 | unsigned long vaddr; | ||
28 | |||
29 | /* cache the first coherent kmap pte */ | ||
30 | vaddr = __fix_to_virt(FIX_CMAP_BEGIN); | ||
31 | kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); | ||
32 | } | ||
33 | |||
19 | static inline void *kmap_coherent(struct page *page, unsigned long addr) | 34 | static inline void *kmap_coherent(struct page *page, unsigned long addr) |
20 | { | 35 | { |
21 | enum fixed_addresses idx; | 36 | enum fixed_addresses idx; |
@@ -34,6 +49,8 @@ static inline void *kmap_coherent(struct page *page, unsigned long addr) | |||
34 | 49 | ||
35 | update_mmu_cache(NULL, vaddr, pte); | 50 | update_mmu_cache(NULL, vaddr, pte); |
36 | 51 | ||
52 | set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); | ||
53 | |||
37 | return (void *)vaddr; | 54 | return (void *)vaddr; |
38 | } | 55 | } |
39 | 56 | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 93224b569187..3cf457f90e8e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -167,9 +167,12 @@ config GENERIC_PENDING_IRQ | |||
167 | config X86_SMP | 167 | config X86_SMP |
168 | bool | 168 | bool |
169 | depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) | 169 | depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) |
170 | select USE_GENERIC_SMP_HELPERS | ||
171 | default y | 170 | default y |
172 | 171 | ||
172 | config USE_GENERIC_SMP_HELPERS | ||
173 | def_bool y | ||
174 | depends on SMP | ||
175 | |||
173 | config X86_32_SMP | 176 | config X86_32_SMP |
174 | def_bool y | 177 | def_bool y |
175 | depends on X86_32 && SMP | 178 | depends on X86_32 && SMP |
@@ -239,21 +242,13 @@ config X86_FIND_SMP_CONFIG | |||
239 | def_bool y | 242 | def_bool y |
240 | depends on X86_MPPARSE || X86_VOYAGER | 243 | depends on X86_MPPARSE || X86_VOYAGER |
241 | 244 | ||
242 | if ACPI | ||
243 | config X86_MPPARSE | 245 | config X86_MPPARSE |
244 | def_bool y | 246 | bool "Enable MPS table" if ACPI |
245 | bool "Enable MPS table" | 247 | default y |
246 | depends on X86_LOCAL_APIC | 248 | depends on X86_LOCAL_APIC |
247 | help | 249 | help |
248 | For old smp systems that do not have proper acpi support. Newer systems | 250 | For old smp systems that do not have proper acpi support. Newer systems |
249 | (esp with 64bit cpus) with acpi support, MADT and DSDT will override it | 251 | (esp with 64bit cpus) with acpi support, MADT and DSDT will override it |
250 | endif | ||
251 | |||
252 | if !ACPI | ||
253 | config X86_MPPARSE | ||
254 | def_bool y | ||
255 | depends on X86_LOCAL_APIC | ||
256 | endif | ||
257 | 252 | ||
258 | choice | 253 | choice |
259 | prompt "Subarchitecture Type" | 254 | prompt "Subarchitecture Type" |
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 4bc02b23674b..e82ebd652263 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c | |||
@@ -572,11 +572,6 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
572 | regs->dx = (unsigned long) &frame->info; | 572 | regs->dx = (unsigned long) &frame->info; |
573 | regs->cx = (unsigned long) &frame->uc; | 573 | regs->cx = (unsigned long) &frame->uc; |
574 | 574 | ||
575 | /* Make -mregparm=3 work */ | ||
576 | regs->ax = sig; | ||
577 | regs->dx = (unsigned long) &frame->info; | ||
578 | regs->cx = (unsigned long) &frame->uc; | ||
579 | |||
580 | loadsegment(ds, __USER32_DS); | 575 | loadsegment(ds, __USER32_DS); |
581 | loadsegment(es, __USER32_DS); | 576 | loadsegment(es, __USER32_DS); |
582 | 577 | ||
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 8d676d8ecde9..9830681446ad 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h | |||
@@ -113,7 +113,6 @@ static inline void acpi_disable_pci(void) | |||
113 | acpi_pci_disabled = 1; | 113 | acpi_pci_disabled = 1; |
114 | acpi_noirq_set(); | 114 | acpi_noirq_set(); |
115 | } | 115 | } |
116 | extern int acpi_irq_balance_set(char *str); | ||
117 | 116 | ||
118 | /* routines for saving/restoring kernel state */ | 117 | /* routines for saving/restoring kernel state */ |
119 | extern int acpi_save_state_mem(void); | 118 | extern int acpi_save_state_mem(void); |
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 360010322711..9fa9dcdf344b 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h | |||
@@ -168,7 +168,15 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) | |||
168 | */ | 168 | */ |
169 | static inline void change_bit(int nr, volatile unsigned long *addr) | 169 | static inline void change_bit(int nr, volatile unsigned long *addr) |
170 | { | 170 | { |
171 | asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr)); | 171 | if (IS_IMMEDIATE(nr)) { |
172 | asm volatile(LOCK_PREFIX "xorb %1,%0" | ||
173 | : CONST_MASK_ADDR(nr, addr) | ||
174 | : "iq" ((u8)CONST_MASK(nr))); | ||
175 | } else { | ||
176 | asm volatile(LOCK_PREFIX "btc %1,%0" | ||
177 | : BITOP_ADDR(addr) | ||
178 | : "Ir" (nr)); | ||
179 | } | ||
172 | } | 180 | } |
173 | 181 | ||
174 | /** | 182 | /** |
diff --git a/arch/x86/include/asm/byteorder.h b/arch/x86/include/asm/byteorder.h index e02ae2d89acf..f110ad417df3 100644 --- a/arch/x86/include/asm/byteorder.h +++ b/arch/x86/include/asm/byteorder.h | |||
@@ -4,26 +4,33 @@ | |||
4 | #include <asm/types.h> | 4 | #include <asm/types.h> |
5 | #include <linux/compiler.h> | 5 | #include <linux/compiler.h> |
6 | 6 | ||
7 | #ifdef __GNUC__ | 7 | #define __LITTLE_ENDIAN |
8 | 8 | ||
9 | #ifdef __i386__ | 9 | static inline __attribute_const__ __u32 __arch_swab32(__u32 val) |
10 | |||
11 | static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) | ||
12 | { | 10 | { |
13 | #ifdef CONFIG_X86_BSWAP | 11 | #ifdef __i386__ |
14 | asm("bswap %0" : "=r" (x) : "0" (x)); | 12 | # ifdef CONFIG_X86_BSWAP |
15 | #else | 13 | asm("bswap %0" : "=r" (val) : "0" (val)); |
14 | # else | ||
16 | asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ | 15 | asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ |
17 | "rorl $16,%0\n\t" /* swap words */ | 16 | "rorl $16,%0\n\t" /* swap words */ |
18 | "xchgb %b0,%h0" /* swap higher bytes */ | 17 | "xchgb %b0,%h0" /* swap higher bytes */ |
19 | : "=q" (x) | 18 | : "=q" (val) |
20 | : "0" (x)); | 19 | : "0" (val)); |
20 | # endif | ||
21 | |||
22 | #else /* __i386__ */ | ||
23 | asm("bswapl %0" | ||
24 | : "=r" (val) | ||
25 | : "0" (val)); | ||
21 | #endif | 26 | #endif |
22 | return x; | 27 | return val; |
23 | } | 28 | } |
29 | #define __arch_swab32 __arch_swab32 | ||
24 | 30 | ||
25 | static inline __attribute_const__ __u64 ___arch__swab64(__u64 val) | 31 | static inline __attribute_const__ __u64 __arch_swab64(__u64 val) |
26 | { | 32 | { |
33 | #ifdef __i386__ | ||
27 | union { | 34 | union { |
28 | struct { | 35 | struct { |
29 | __u32 a; | 36 | __u32 a; |
@@ -32,50 +39,27 @@ static inline __attribute_const__ __u64 ___arch__swab64(__u64 val) | |||
32 | __u64 u; | 39 | __u64 u; |
33 | } v; | 40 | } v; |
34 | v.u = val; | 41 | v.u = val; |
35 | #ifdef CONFIG_X86_BSWAP | 42 | # ifdef CONFIG_X86_BSWAP |
36 | asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" | 43 | asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" |
37 | : "=r" (v.s.a), "=r" (v.s.b) | 44 | : "=r" (v.s.a), "=r" (v.s.b) |
38 | : "0" (v.s.a), "1" (v.s.b)); | 45 | : "0" (v.s.a), "1" (v.s.b)); |
39 | #else | 46 | # else |
40 | v.s.a = ___arch__swab32(v.s.a); | 47 | v.s.a = __arch_swab32(v.s.a); |
41 | v.s.b = ___arch__swab32(v.s.b); | 48 | v.s.b = __arch_swab32(v.s.b); |
42 | asm("xchgl %0,%1" | 49 | asm("xchgl %0,%1" |
43 | : "=r" (v.s.a), "=r" (v.s.b) | 50 | : "=r" (v.s.a), "=r" (v.s.b) |
44 | : "0" (v.s.a), "1" (v.s.b)); | 51 | : "0" (v.s.a), "1" (v.s.b)); |
45 | #endif | 52 | # endif |
46 | return v.u; | 53 | return v.u; |
47 | } | ||
48 | |||
49 | #else /* __i386__ */ | 54 | #else /* __i386__ */ |
50 | |||
51 | static inline __attribute_const__ __u64 ___arch__swab64(__u64 x) | ||
52 | { | ||
53 | asm("bswapq %0" | 55 | asm("bswapq %0" |
54 | : "=r" (x) | 56 | : "=r" (val) |
55 | : "0" (x)); | 57 | : "0" (val)); |
56 | return x; | 58 | return val; |
57 | } | ||
58 | |||
59 | static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) | ||
60 | { | ||
61 | asm("bswapl %0" | ||
62 | : "=r" (x) | ||
63 | : "0" (x)); | ||
64 | return x; | ||
65 | } | ||
66 | |||
67 | #endif | 59 | #endif |
60 | } | ||
61 | #define __arch_swab64 __arch_swab64 | ||
68 | 62 | ||
69 | /* Do not define swab16. Gcc is smart enough to recognize "C" version and | 63 | #include <linux/byteorder.h> |
70 | convert it into rotation or exhange. */ | ||
71 | |||
72 | #define __arch__swab64(x) ___arch__swab64(x) | ||
73 | #define __arch__swab32(x) ___arch__swab32(x) | ||
74 | |||
75 | #define __BYTEORDER_HAS_U64__ | ||
76 | |||
77 | #endif /* __GNUC__ */ | ||
78 | |||
79 | #include <linux/byteorder/little_endian.h> | ||
80 | 64 | ||
81 | #endif /* _ASM_X86_BYTEORDER_H */ | 65 | #endif /* _ASM_X86_BYTEORDER_H */ |
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h new file mode 100644 index 000000000000..c1f06289b14b --- /dev/null +++ b/arch/x86/include/asm/iomap.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * Copyright © 2008 Ingo Molnar | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
12 | * General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along | ||
15 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/fs.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | #include <asm/cacheflush.h> | ||
23 | #include <asm/pgtable.h> | ||
24 | #include <asm/tlbflush.h> | ||
25 | |||
26 | void * | ||
27 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); | ||
28 | |||
29 | void | ||
30 | iounmap_atomic(void *kvaddr, enum km_type type); | ||
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index e4a552d44465..0b500c5b6446 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h | |||
@@ -6,7 +6,6 @@ extern void no_iommu_init(void); | |||
6 | extern struct dma_mapping_ops nommu_dma_ops; | 6 | extern struct dma_mapping_ops nommu_dma_ops; |
7 | extern int force_iommu, no_iommu; | 7 | extern int force_iommu, no_iommu; |
8 | extern int iommu_detected; | 8 | extern int iommu_detected; |
9 | extern int dmar_disabled; | ||
10 | 9 | ||
11 | extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); | 10 | extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); |
12 | 11 | ||
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index bae0eda95486..28e409fc73f3 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h | |||
@@ -31,10 +31,6 @@ static inline int irq_canonicalize(int irq) | |||
31 | # endif | 31 | # endif |
32 | #endif | 32 | #endif |
33 | 33 | ||
34 | #ifdef CONFIG_IRQBALANCE | ||
35 | extern int irqbalance_disable(char *str); | ||
36 | #endif | ||
37 | |||
38 | #ifdef CONFIG_HOTPLUG_CPU | 34 | #ifdef CONFIG_HOTPLUG_CPU |
39 | #include <linux/cpumask.h> | 35 | #include <linux/cpumask.h> |
40 | extern void fixup_irqs(cpumask_t map); | 36 | extern void fixup_irqs(cpumask_t map); |
diff --git a/arch/x86/include/asm/irq_regs_32.h b/arch/x86/include/asm/irq_regs_32.h index af2f02d27fc7..86afd7473457 100644 --- a/arch/x86/include/asm/irq_regs_32.h +++ b/arch/x86/include/asm/irq_regs_32.h | |||
@@ -9,6 +9,8 @@ | |||
9 | 9 | ||
10 | #include <asm/percpu.h> | 10 | #include <asm/percpu.h> |
11 | 11 | ||
12 | #define ARCH_HAS_OWN_IRQ_REGS | ||
13 | |||
12 | DECLARE_PER_CPU(struct pt_regs *, irq_regs); | 14 | DECLARE_PER_CPU(struct pt_regs *, irq_regs); |
13 | 15 | ||
14 | static inline struct pt_regs *get_irq_regs(void) | 16 | static inline struct pt_regs *get_irq_regs(void) |
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 9cd83a8e40d5..38ae163cc91b 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h | |||
@@ -34,8 +34,6 @@ static inline cycles_t get_cycles(void) | |||
34 | 34 | ||
35 | static __always_inline cycles_t vget_cycles(void) | 35 | static __always_inline cycles_t vget_cycles(void) |
36 | { | 36 | { |
37 | cycles_t cycles; | ||
38 | |||
39 | /* | 37 | /* |
40 | * We only do VDSOs on TSC capable CPUs, so this shouldnt | 38 | * We only do VDSOs on TSC capable CPUs, so this shouldnt |
41 | * access boot_cpu_data (which is not VDSO-safe): | 39 | * access boot_cpu_data (which is not VDSO-safe): |
@@ -44,11 +42,7 @@ static __always_inline cycles_t vget_cycles(void) | |||
44 | if (!cpu_has_tsc) | 42 | if (!cpu_has_tsc) |
45 | return 0; | 43 | return 0; |
46 | #endif | 44 | #endif |
47 | rdtsc_barrier(); | 45 | return (cycles_t)__native_read_tsc(); |
48 | cycles = (cycles_t)__native_read_tsc(); | ||
49 | rdtsc_barrier(); | ||
50 | |||
51 | return cycles; | ||
52 | } | 46 | } |
53 | 47 | ||
54 | extern void tsc_init(void); | 48 | extern void tsc_init(void); |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 8c1f76abae9e..4c51a2f8fd31 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -1343,7 +1343,6 @@ static void __init acpi_process_madt(void) | |||
1343 | error = acpi_parse_madt_ioapic_entries(); | 1343 | error = acpi_parse_madt_ioapic_entries(); |
1344 | if (!error) { | 1344 | if (!error) { |
1345 | acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; | 1345 | acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; |
1346 | acpi_irq_balance_set(NULL); | ||
1347 | acpi_ioapic = 1; | 1346 | acpi_ioapic = 1; |
1348 | 1347 | ||
1349 | smp_found_config = 1; | 1348 | smp_found_config = 1; |
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index 2b69994fd3a8..d1a121443bde 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c | |||
@@ -236,17 +236,33 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task) | |||
236 | struct ds_context *context = *p_context; | 236 | struct ds_context *context = *p_context; |
237 | 237 | ||
238 | if (!context) { | 238 | if (!context) { |
239 | spin_unlock(&ds_lock); | ||
240 | |||
239 | context = kzalloc(sizeof(*context), GFP_KERNEL); | 241 | context = kzalloc(sizeof(*context), GFP_KERNEL); |
240 | 242 | ||
241 | if (!context) | 243 | if (!context) { |
244 | spin_lock(&ds_lock); | ||
242 | return NULL; | 245 | return NULL; |
246 | } | ||
243 | 247 | ||
244 | context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL); | 248 | context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL); |
245 | if (!context->ds) { | 249 | if (!context->ds) { |
246 | kfree(context); | 250 | kfree(context); |
251 | spin_lock(&ds_lock); | ||
247 | return NULL; | 252 | return NULL; |
248 | } | 253 | } |
249 | 254 | ||
255 | spin_lock(&ds_lock); | ||
256 | /* | ||
257 | * Check for race - another CPU could have allocated | ||
258 | * it meanwhile: | ||
259 | */ | ||
260 | if (*p_context) { | ||
261 | kfree(context->ds); | ||
262 | kfree(context); | ||
263 | return *p_context; | ||
264 | } | ||
265 | |||
250 | *p_context = context; | 266 | *p_context = context; |
251 | 267 | ||
252 | context->this = p_context; | 268 | context->this = p_context; |
@@ -384,14 +400,15 @@ static int ds_request(struct task_struct *task, void *base, size_t size, | |||
384 | 400 | ||
385 | spin_lock(&ds_lock); | 401 | spin_lock(&ds_lock); |
386 | 402 | ||
387 | if (!check_tracer(task)) | ||
388 | return -EPERM; | ||
389 | |||
390 | error = -ENOMEM; | 403 | error = -ENOMEM; |
391 | context = ds_alloc_context(task); | 404 | context = ds_alloc_context(task); |
392 | if (!context) | 405 | if (!context) |
393 | goto out_unlock; | 406 | goto out_unlock; |
394 | 407 | ||
408 | error = -EPERM; | ||
409 | if (!check_tracer(task)) | ||
410 | goto out_unlock; | ||
411 | |||
395 | error = -EALREADY; | 412 | error = -EALREADY; |
396 | if (context->owner[qual] == current) | 413 | if (context->owner[qual] == current) |
397 | goto out_unlock; | 414 | goto out_unlock; |
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 3ce029ffaa55..1b894b72c0f5 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -188,20 +188,6 @@ static void __init ati_bugs_contd(int num, int slot, int func) | |||
188 | } | 188 | } |
189 | #endif | 189 | #endif |
190 | 190 | ||
191 | #ifdef CONFIG_DMAR | ||
192 | static void __init intel_g33_dmar(int num, int slot, int func) | ||
193 | { | ||
194 | struct acpi_table_header *dmar_tbl; | ||
195 | acpi_status status; | ||
196 | |||
197 | status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl); | ||
198 | if (ACPI_SUCCESS(status)) { | ||
199 | printk(KERN_INFO "BIOS BUG: DMAR advertised on Intel G31/G33 chipset -- ignoring\n"); | ||
200 | dmar_disabled = 1; | ||
201 | } | ||
202 | } | ||
203 | #endif | ||
204 | |||
205 | #define QFLAG_APPLY_ONCE 0x1 | 191 | #define QFLAG_APPLY_ONCE 0x1 |
206 | #define QFLAG_APPLIED 0x2 | 192 | #define QFLAG_APPLIED 0x2 |
207 | #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) | 193 | #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) |
@@ -225,10 +211,6 @@ static struct chipset early_qrk[] __initdata = { | |||
225 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, | 211 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, |
226 | { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, | 212 | { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, |
227 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, | 213 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, |
228 | #ifdef CONFIG_DMAR | ||
229 | { PCI_VENDOR_ID_INTEL, 0x29c0, | ||
230 | PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, intel_g33_dmar }, | ||
231 | #endif | ||
232 | {} | 214 | {} |
233 | }; | 215 | }; |
234 | 216 | ||
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 369de6973c58..dbf06a0ef3d5 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -11,15 +11,15 @@ | |||
11 | * | 11 | * |
12 | * NOTE: This code handles signal-recognition, which happens every time | 12 | * NOTE: This code handles signal-recognition, which happens every time |
13 | * after an interrupt and after each system call. | 13 | * after an interrupt and after each system call. |
14 | * | 14 | * |
15 | * Normal syscalls and interrupts don't save a full stack frame, this is | 15 | * Normal syscalls and interrupts don't save a full stack frame, this is |
16 | * only done for syscall tracing, signals or fork/exec et.al. | 16 | * only done for syscall tracing, signals or fork/exec et.al. |
17 | * | 17 | * |
18 | * A note on terminology: | 18 | * A note on terminology: |
19 | * - top of stack: Architecture defined interrupt frame from SS to RIP | 19 | * - top of stack: Architecture defined interrupt frame from SS to RIP |
20 | * at the top of the kernel process stack. | 20 | * at the top of the kernel process stack. |
21 | * - partial stack frame: partially saved registers upto R11. | 21 | * - partial stack frame: partially saved registers upto R11. |
22 | * - full stack frame: Like partial stack frame, but all register saved. | 22 | * - full stack frame: Like partial stack frame, but all register saved. |
23 | * | 23 | * |
24 | * Some macro usage: | 24 | * Some macro usage: |
25 | * - CFI macros are used to generate dwarf2 unwind information for better | 25 | * - CFI macros are used to generate dwarf2 unwind information for better |
@@ -142,7 +142,7 @@ END(mcount) | |||
142 | 142 | ||
143 | #ifndef CONFIG_PREEMPT | 143 | #ifndef CONFIG_PREEMPT |
144 | #define retint_kernel retint_restore_args | 144 | #define retint_kernel retint_restore_args |
145 | #endif | 145 | #endif |
146 | 146 | ||
147 | #ifdef CONFIG_PARAVIRT | 147 | #ifdef CONFIG_PARAVIRT |
148 | ENTRY(native_usergs_sysret64) | 148 | ENTRY(native_usergs_sysret64) |
@@ -161,14 +161,14 @@ ENTRY(native_usergs_sysret64) | |||
161 | .endm | 161 | .endm |
162 | 162 | ||
163 | /* | 163 | /* |
164 | * C code is not supposed to know about undefined top of stack. Every time | 164 | * C code is not supposed to know about undefined top of stack. Every time |
165 | * a C function with an pt_regs argument is called from the SYSCALL based | 165 | * a C function with an pt_regs argument is called from the SYSCALL based |
166 | * fast path FIXUP_TOP_OF_STACK is needed. | 166 | * fast path FIXUP_TOP_OF_STACK is needed. |
167 | * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs | 167 | * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs |
168 | * manipulation. | 168 | * manipulation. |
169 | */ | 169 | */ |
170 | 170 | ||
171 | /* %rsp:at FRAMEEND */ | 171 | /* %rsp:at FRAMEEND */ |
172 | .macro FIXUP_TOP_OF_STACK tmp | 172 | .macro FIXUP_TOP_OF_STACK tmp |
173 | movq %gs:pda_oldrsp,\tmp | 173 | movq %gs:pda_oldrsp,\tmp |
174 | movq \tmp,RSP(%rsp) | 174 | movq \tmp,RSP(%rsp) |
@@ -244,8 +244,8 @@ ENTRY(native_usergs_sysret64) | |||
244 | .endm | 244 | .endm |
245 | /* | 245 | /* |
246 | * A newly forked process directly context switches into this. | 246 | * A newly forked process directly context switches into this. |
247 | */ | 247 | */ |
248 | /* rdi: prev */ | 248 | /* rdi: prev */ |
249 | ENTRY(ret_from_fork) | 249 | ENTRY(ret_from_fork) |
250 | CFI_DEFAULT_STACK | 250 | CFI_DEFAULT_STACK |
251 | push kernel_eflags(%rip) | 251 | push kernel_eflags(%rip) |
@@ -256,7 +256,7 @@ ENTRY(ret_from_fork) | |||
256 | GET_THREAD_INFO(%rcx) | 256 | GET_THREAD_INFO(%rcx) |
257 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx) | 257 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx) |
258 | jnz rff_trace | 258 | jnz rff_trace |
259 | rff_action: | 259 | rff_action: |
260 | RESTORE_REST | 260 | RESTORE_REST |
261 | testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread? | 261 | testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread? |
262 | je int_ret_from_sys_call | 262 | je int_ret_from_sys_call |
@@ -267,7 +267,7 @@ rff_action: | |||
267 | rff_trace: | 267 | rff_trace: |
268 | movq %rsp,%rdi | 268 | movq %rsp,%rdi |
269 | call syscall_trace_leave | 269 | call syscall_trace_leave |
270 | GET_THREAD_INFO(%rcx) | 270 | GET_THREAD_INFO(%rcx) |
271 | jmp rff_action | 271 | jmp rff_action |
272 | CFI_ENDPROC | 272 | CFI_ENDPROC |
273 | END(ret_from_fork) | 273 | END(ret_from_fork) |
@@ -278,20 +278,20 @@ END(ret_from_fork) | |||
278 | * SYSCALL does not save anything on the stack and does not change the | 278 | * SYSCALL does not save anything on the stack and does not change the |
279 | * stack pointer. | 279 | * stack pointer. |
280 | */ | 280 | */ |
281 | 281 | ||
282 | /* | 282 | /* |
283 | * Register setup: | 283 | * Register setup: |
284 | * rax system call number | 284 | * rax system call number |
285 | * rdi arg0 | 285 | * rdi arg0 |
286 | * rcx return address for syscall/sysret, C arg3 | 286 | * rcx return address for syscall/sysret, C arg3 |
287 | * rsi arg1 | 287 | * rsi arg1 |
288 | * rdx arg2 | 288 | * rdx arg2 |
289 | * r10 arg3 (--> moved to rcx for C) | 289 | * r10 arg3 (--> moved to rcx for C) |
290 | * r8 arg4 | 290 | * r8 arg4 |
291 | * r9 arg5 | 291 | * r9 arg5 |
292 | * r11 eflags for syscall/sysret, temporary for C | 292 | * r11 eflags for syscall/sysret, temporary for C |
293 | * r12-r15,rbp,rbx saved by C code, not touched. | 293 | * r12-r15,rbp,rbx saved by C code, not touched. |
294 | * | 294 | * |
295 | * Interrupts are off on entry. | 295 | * Interrupts are off on entry. |
296 | * Only called from user space. | 296 | * Only called from user space. |
297 | * | 297 | * |
@@ -301,7 +301,7 @@ END(ret_from_fork) | |||
301 | * When user can change the frames always force IRET. That is because | 301 | * When user can change the frames always force IRET. That is because |
302 | * it deals with uncanonical addresses better. SYSRET has trouble | 302 | * it deals with uncanonical addresses better. SYSRET has trouble |
303 | * with them due to bugs in both AMD and Intel CPUs. | 303 | * with them due to bugs in both AMD and Intel CPUs. |
304 | */ | 304 | */ |
305 | 305 | ||
306 | ENTRY(system_call) | 306 | ENTRY(system_call) |
307 | CFI_STARTPROC simple | 307 | CFI_STARTPROC simple |
@@ -317,7 +317,7 @@ ENTRY(system_call) | |||
317 | */ | 317 | */ |
318 | ENTRY(system_call_after_swapgs) | 318 | ENTRY(system_call_after_swapgs) |
319 | 319 | ||
320 | movq %rsp,%gs:pda_oldrsp | 320 | movq %rsp,%gs:pda_oldrsp |
321 | movq %gs:pda_kernelstack,%rsp | 321 | movq %gs:pda_kernelstack,%rsp |
322 | /* | 322 | /* |
323 | * No need to follow this irqs off/on section - it's straight | 323 | * No need to follow this irqs off/on section - it's straight |
@@ -325,7 +325,7 @@ ENTRY(system_call_after_swapgs) | |||
325 | */ | 325 | */ |
326 | ENABLE_INTERRUPTS(CLBR_NONE) | 326 | ENABLE_INTERRUPTS(CLBR_NONE) |
327 | SAVE_ARGS 8,1 | 327 | SAVE_ARGS 8,1 |
328 | movq %rax,ORIG_RAX-ARGOFFSET(%rsp) | 328 | movq %rax,ORIG_RAX-ARGOFFSET(%rsp) |
329 | movq %rcx,RIP-ARGOFFSET(%rsp) | 329 | movq %rcx,RIP-ARGOFFSET(%rsp) |
330 | CFI_REL_OFFSET rip,RIP-ARGOFFSET | 330 | CFI_REL_OFFSET rip,RIP-ARGOFFSET |
331 | GET_THREAD_INFO(%rcx) | 331 | GET_THREAD_INFO(%rcx) |
@@ -339,19 +339,19 @@ system_call_fastpath: | |||
339 | movq %rax,RAX-ARGOFFSET(%rsp) | 339 | movq %rax,RAX-ARGOFFSET(%rsp) |
340 | /* | 340 | /* |
341 | * Syscall return path ending with SYSRET (fast path) | 341 | * Syscall return path ending with SYSRET (fast path) |
342 | * Has incomplete stack frame and undefined top of stack. | 342 | * Has incomplete stack frame and undefined top of stack. |
343 | */ | 343 | */ |
344 | ret_from_sys_call: | 344 | ret_from_sys_call: |
345 | movl $_TIF_ALLWORK_MASK,%edi | 345 | movl $_TIF_ALLWORK_MASK,%edi |
346 | /* edi: flagmask */ | 346 | /* edi: flagmask */ |
347 | sysret_check: | 347 | sysret_check: |
348 | LOCKDEP_SYS_EXIT | 348 | LOCKDEP_SYS_EXIT |
349 | GET_THREAD_INFO(%rcx) | 349 | GET_THREAD_INFO(%rcx) |
350 | DISABLE_INTERRUPTS(CLBR_NONE) | 350 | DISABLE_INTERRUPTS(CLBR_NONE) |
351 | TRACE_IRQS_OFF | 351 | TRACE_IRQS_OFF |
352 | movl TI_flags(%rcx),%edx | 352 | movl TI_flags(%rcx),%edx |
353 | andl %edi,%edx | 353 | andl %edi,%edx |
354 | jnz sysret_careful | 354 | jnz sysret_careful |
355 | CFI_REMEMBER_STATE | 355 | CFI_REMEMBER_STATE |
356 | /* | 356 | /* |
357 | * sysretq will re-enable interrupts: | 357 | * sysretq will re-enable interrupts: |
@@ -366,7 +366,7 @@ sysret_check: | |||
366 | 366 | ||
367 | CFI_RESTORE_STATE | 367 | CFI_RESTORE_STATE |
368 | /* Handle reschedules */ | 368 | /* Handle reschedules */ |
369 | /* edx: work, edi: workmask */ | 369 | /* edx: work, edi: workmask */ |
370 | sysret_careful: | 370 | sysret_careful: |
371 | bt $TIF_NEED_RESCHED,%edx | 371 | bt $TIF_NEED_RESCHED,%edx |
372 | jnc sysret_signal | 372 | jnc sysret_signal |
@@ -379,7 +379,7 @@ sysret_careful: | |||
379 | CFI_ADJUST_CFA_OFFSET -8 | 379 | CFI_ADJUST_CFA_OFFSET -8 |
380 | jmp sysret_check | 380 | jmp sysret_check |
381 | 381 | ||
382 | /* Handle a signal */ | 382 | /* Handle a signal */ |
383 | sysret_signal: | 383 | sysret_signal: |
384 | TRACE_IRQS_ON | 384 | TRACE_IRQS_ON |
385 | ENABLE_INTERRUPTS(CLBR_NONE) | 385 | ENABLE_INTERRUPTS(CLBR_NONE) |
@@ -398,7 +398,7 @@ sysret_signal: | |||
398 | DISABLE_INTERRUPTS(CLBR_NONE) | 398 | DISABLE_INTERRUPTS(CLBR_NONE) |
399 | TRACE_IRQS_OFF | 399 | TRACE_IRQS_OFF |
400 | jmp int_with_check | 400 | jmp int_with_check |
401 | 401 | ||
402 | badsys: | 402 | badsys: |
403 | movq $-ENOSYS,RAX-ARGOFFSET(%rsp) | 403 | movq $-ENOSYS,RAX-ARGOFFSET(%rsp) |
404 | jmp ret_from_sys_call | 404 | jmp ret_from_sys_call |
@@ -437,7 +437,7 @@ sysret_audit: | |||
437 | #endif /* CONFIG_AUDITSYSCALL */ | 437 | #endif /* CONFIG_AUDITSYSCALL */ |
438 | 438 | ||
439 | /* Do syscall tracing */ | 439 | /* Do syscall tracing */ |
440 | tracesys: | 440 | tracesys: |
441 | #ifdef CONFIG_AUDITSYSCALL | 441 | #ifdef CONFIG_AUDITSYSCALL |
442 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx) | 442 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx) |
443 | jz auditsys | 443 | jz auditsys |
@@ -460,8 +460,8 @@ tracesys: | |||
460 | call *sys_call_table(,%rax,8) | 460 | call *sys_call_table(,%rax,8) |
461 | movq %rax,RAX-ARGOFFSET(%rsp) | 461 | movq %rax,RAX-ARGOFFSET(%rsp) |
462 | /* Use IRET because user could have changed frame */ | 462 | /* Use IRET because user could have changed frame */ |
463 | 463 | ||
464 | /* | 464 | /* |
465 | * Syscall return path ending with IRET. | 465 | * Syscall return path ending with IRET. |
466 | * Has correct top of stack, but partial stack frame. | 466 | * Has correct top of stack, but partial stack frame. |
467 | */ | 467 | */ |
@@ -505,18 +505,18 @@ int_very_careful: | |||
505 | TRACE_IRQS_ON | 505 | TRACE_IRQS_ON |
506 | ENABLE_INTERRUPTS(CLBR_NONE) | 506 | ENABLE_INTERRUPTS(CLBR_NONE) |
507 | SAVE_REST | 507 | SAVE_REST |
508 | /* Check for syscall exit trace */ | 508 | /* Check for syscall exit trace */ |
509 | testl $_TIF_WORK_SYSCALL_EXIT,%edx | 509 | testl $_TIF_WORK_SYSCALL_EXIT,%edx |
510 | jz int_signal | 510 | jz int_signal |
511 | pushq %rdi | 511 | pushq %rdi |
512 | CFI_ADJUST_CFA_OFFSET 8 | 512 | CFI_ADJUST_CFA_OFFSET 8 |
513 | leaq 8(%rsp),%rdi # &ptregs -> arg1 | 513 | leaq 8(%rsp),%rdi # &ptregs -> arg1 |
514 | call syscall_trace_leave | 514 | call syscall_trace_leave |
515 | popq %rdi | 515 | popq %rdi |
516 | CFI_ADJUST_CFA_OFFSET -8 | 516 | CFI_ADJUST_CFA_OFFSET -8 |
517 | andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi | 517 | andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi |
518 | jmp int_restore_rest | 518 | jmp int_restore_rest |
519 | 519 | ||
520 | int_signal: | 520 | int_signal: |
521 | testl $_TIF_DO_NOTIFY_MASK,%edx | 521 | testl $_TIF_DO_NOTIFY_MASK,%edx |
522 | jz 1f | 522 | jz 1f |
@@ -531,11 +531,11 @@ int_restore_rest: | |||
531 | jmp int_with_check | 531 | jmp int_with_check |
532 | CFI_ENDPROC | 532 | CFI_ENDPROC |
533 | END(system_call) | 533 | END(system_call) |
534 | 534 | ||
535 | /* | 535 | /* |
536 | * Certain special system calls that need to save a complete full stack frame. | 536 | * Certain special system calls that need to save a complete full stack frame. |
537 | */ | 537 | */ |
538 | 538 | ||
539 | .macro PTREGSCALL label,func,arg | 539 | .macro PTREGSCALL label,func,arg |
540 | .globl \label | 540 | .globl \label |
541 | \label: | 541 | \label: |
@@ -572,7 +572,7 @@ ENTRY(ptregscall_common) | |||
572 | ret | 572 | ret |
573 | CFI_ENDPROC | 573 | CFI_ENDPROC |
574 | END(ptregscall_common) | 574 | END(ptregscall_common) |
575 | 575 | ||
576 | ENTRY(stub_execve) | 576 | ENTRY(stub_execve) |
577 | CFI_STARTPROC | 577 | CFI_STARTPROC |
578 | popq %r11 | 578 | popq %r11 |
@@ -588,11 +588,11 @@ ENTRY(stub_execve) | |||
588 | jmp int_ret_from_sys_call | 588 | jmp int_ret_from_sys_call |
589 | CFI_ENDPROC | 589 | CFI_ENDPROC |
590 | END(stub_execve) | 590 | END(stub_execve) |
591 | 591 | ||
592 | /* | 592 | /* |
593 | * sigreturn is special because it needs to restore all registers on return. | 593 | * sigreturn is special because it needs to restore all registers on return. |
594 | * This cannot be done with SYSRET, so use the IRET return path instead. | 594 | * This cannot be done with SYSRET, so use the IRET return path instead. |
595 | */ | 595 | */ |
596 | ENTRY(stub_rt_sigreturn) | 596 | ENTRY(stub_rt_sigreturn) |
597 | CFI_STARTPROC | 597 | CFI_STARTPROC |
598 | addq $8, %rsp | 598 | addq $8, %rsp |
@@ -731,12 +731,12 @@ exit_intr: | |||
731 | GET_THREAD_INFO(%rcx) | 731 | GET_THREAD_INFO(%rcx) |
732 | testl $3,CS-ARGOFFSET(%rsp) | 732 | testl $3,CS-ARGOFFSET(%rsp) |
733 | je retint_kernel | 733 | je retint_kernel |
734 | 734 | ||
735 | /* Interrupt came from user space */ | 735 | /* Interrupt came from user space */ |
736 | /* | 736 | /* |
737 | * Has a correct top of stack, but a partial stack frame | 737 | * Has a correct top of stack, but a partial stack frame |
738 | * %rcx: thread info. Interrupts off. | 738 | * %rcx: thread info. Interrupts off. |
739 | */ | 739 | */ |
740 | retint_with_reschedule: | 740 | retint_with_reschedule: |
741 | movl $_TIF_WORK_MASK,%edi | 741 | movl $_TIF_WORK_MASK,%edi |
742 | retint_check: | 742 | retint_check: |
@@ -809,20 +809,20 @@ retint_careful: | |||
809 | pushq %rdi | 809 | pushq %rdi |
810 | CFI_ADJUST_CFA_OFFSET 8 | 810 | CFI_ADJUST_CFA_OFFSET 8 |
811 | call schedule | 811 | call schedule |
812 | popq %rdi | 812 | popq %rdi |
813 | CFI_ADJUST_CFA_OFFSET -8 | 813 | CFI_ADJUST_CFA_OFFSET -8 |
814 | GET_THREAD_INFO(%rcx) | 814 | GET_THREAD_INFO(%rcx) |
815 | DISABLE_INTERRUPTS(CLBR_NONE) | 815 | DISABLE_INTERRUPTS(CLBR_NONE) |
816 | TRACE_IRQS_OFF | 816 | TRACE_IRQS_OFF |
817 | jmp retint_check | 817 | jmp retint_check |
818 | 818 | ||
819 | retint_signal: | 819 | retint_signal: |
820 | testl $_TIF_DO_NOTIFY_MASK,%edx | 820 | testl $_TIF_DO_NOTIFY_MASK,%edx |
821 | jz retint_swapgs | 821 | jz retint_swapgs |
822 | TRACE_IRQS_ON | 822 | TRACE_IRQS_ON |
823 | ENABLE_INTERRUPTS(CLBR_NONE) | 823 | ENABLE_INTERRUPTS(CLBR_NONE) |
824 | SAVE_REST | 824 | SAVE_REST |
825 | movq $-1,ORIG_RAX(%rsp) | 825 | movq $-1,ORIG_RAX(%rsp) |
826 | xorl %esi,%esi # oldset | 826 | xorl %esi,%esi # oldset |
827 | movq %rsp,%rdi # &pt_regs | 827 | movq %rsp,%rdi # &pt_regs |
828 | call do_notify_resume | 828 | call do_notify_resume |
@@ -844,14 +844,14 @@ ENTRY(retint_kernel) | |||
844 | jnc retint_restore_args | 844 | jnc retint_restore_args |
845 | call preempt_schedule_irq | 845 | call preempt_schedule_irq |
846 | jmp exit_intr | 846 | jmp exit_intr |
847 | #endif | 847 | #endif |
848 | 848 | ||
849 | CFI_ENDPROC | 849 | CFI_ENDPROC |
850 | END(common_interrupt) | 850 | END(common_interrupt) |
851 | 851 | ||
852 | /* | 852 | /* |
853 | * APIC interrupts. | 853 | * APIC interrupts. |
854 | */ | 854 | */ |
855 | .macro apicinterrupt num,func | 855 | .macro apicinterrupt num,func |
856 | INTR_FRAME | 856 | INTR_FRAME |
857 | pushq $~(\num) | 857 | pushq $~(\num) |
@@ -869,14 +869,14 @@ ENTRY(threshold_interrupt) | |||
869 | apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt | 869 | apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt |
870 | END(threshold_interrupt) | 870 | END(threshold_interrupt) |
871 | 871 | ||
872 | #ifdef CONFIG_SMP | 872 | #ifdef CONFIG_SMP |
873 | ENTRY(reschedule_interrupt) | 873 | ENTRY(reschedule_interrupt) |
874 | apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt | 874 | apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt |
875 | END(reschedule_interrupt) | 875 | END(reschedule_interrupt) |
876 | 876 | ||
877 | .macro INVALIDATE_ENTRY num | 877 | .macro INVALIDATE_ENTRY num |
878 | ENTRY(invalidate_interrupt\num) | 878 | ENTRY(invalidate_interrupt\num) |
879 | apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt | 879 | apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt |
880 | END(invalidate_interrupt\num) | 880 | END(invalidate_interrupt\num) |
881 | .endm | 881 | .endm |
882 | 882 | ||
@@ -915,22 +915,22 @@ END(error_interrupt) | |||
915 | ENTRY(spurious_interrupt) | 915 | ENTRY(spurious_interrupt) |
916 | apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt | 916 | apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt |
917 | END(spurious_interrupt) | 917 | END(spurious_interrupt) |
918 | 918 | ||
919 | /* | 919 | /* |
920 | * Exception entry points. | 920 | * Exception entry points. |
921 | */ | 921 | */ |
922 | .macro zeroentry sym | 922 | .macro zeroentry sym |
923 | INTR_FRAME | 923 | INTR_FRAME |
924 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 924 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
925 | pushq $0 /* push error code/oldrax */ | 925 | pushq $0 /* push error code/oldrax */ |
926 | CFI_ADJUST_CFA_OFFSET 8 | 926 | CFI_ADJUST_CFA_OFFSET 8 |
927 | pushq %rax /* push real oldrax to the rdi slot */ | 927 | pushq %rax /* push real oldrax to the rdi slot */ |
928 | CFI_ADJUST_CFA_OFFSET 8 | 928 | CFI_ADJUST_CFA_OFFSET 8 |
929 | CFI_REL_OFFSET rax,0 | 929 | CFI_REL_OFFSET rax,0 |
930 | leaq \sym(%rip),%rax | 930 | leaq \sym(%rip),%rax |
931 | jmp error_entry | 931 | jmp error_entry |
932 | CFI_ENDPROC | 932 | CFI_ENDPROC |
933 | .endm | 933 | .endm |
934 | 934 | ||
935 | .macro errorentry sym | 935 | .macro errorentry sym |
936 | XCPT_FRAME | 936 | XCPT_FRAME |
@@ -1044,13 +1044,13 @@ paranoid_schedule\trace: | |||
1044 | 1044 | ||
1045 | /* | 1045 | /* |
1046 | * Exception entry point. This expects an error code/orig_rax on the stack | 1046 | * Exception entry point. This expects an error code/orig_rax on the stack |
1047 | * and the exception handler in %rax. | 1047 | * and the exception handler in %rax. |
1048 | */ | 1048 | */ |
1049 | KPROBE_ENTRY(error_entry) | 1049 | KPROBE_ENTRY(error_entry) |
1050 | _frame RDI | 1050 | _frame RDI |
1051 | CFI_REL_OFFSET rax,0 | 1051 | CFI_REL_OFFSET rax,0 |
1052 | /* rdi slot contains rax, oldrax contains error code */ | 1052 | /* rdi slot contains rax, oldrax contains error code */ |
1053 | cld | 1053 | cld |
1054 | subq $14*8,%rsp | 1054 | subq $14*8,%rsp |
1055 | CFI_ADJUST_CFA_OFFSET (14*8) | 1055 | CFI_ADJUST_CFA_OFFSET (14*8) |
1056 | movq %rsi,13*8(%rsp) | 1056 | movq %rsi,13*8(%rsp) |
@@ -1061,7 +1061,7 @@ KPROBE_ENTRY(error_entry) | |||
1061 | CFI_REL_OFFSET rdx,RDX | 1061 | CFI_REL_OFFSET rdx,RDX |
1062 | movq %rcx,11*8(%rsp) | 1062 | movq %rcx,11*8(%rsp) |
1063 | CFI_REL_OFFSET rcx,RCX | 1063 | CFI_REL_OFFSET rcx,RCX |
1064 | movq %rsi,10*8(%rsp) /* store rax */ | 1064 | movq %rsi,10*8(%rsp) /* store rax */ |
1065 | CFI_REL_OFFSET rax,RAX | 1065 | CFI_REL_OFFSET rax,RAX |
1066 | movq %r8, 9*8(%rsp) | 1066 | movq %r8, 9*8(%rsp) |
1067 | CFI_REL_OFFSET r8,R8 | 1067 | CFI_REL_OFFSET r8,R8 |
@@ -1071,29 +1071,29 @@ KPROBE_ENTRY(error_entry) | |||
1071 | CFI_REL_OFFSET r10,R10 | 1071 | CFI_REL_OFFSET r10,R10 |
1072 | movq %r11,6*8(%rsp) | 1072 | movq %r11,6*8(%rsp) |
1073 | CFI_REL_OFFSET r11,R11 | 1073 | CFI_REL_OFFSET r11,R11 |
1074 | movq %rbx,5*8(%rsp) | 1074 | movq %rbx,5*8(%rsp) |
1075 | CFI_REL_OFFSET rbx,RBX | 1075 | CFI_REL_OFFSET rbx,RBX |
1076 | movq %rbp,4*8(%rsp) | 1076 | movq %rbp,4*8(%rsp) |
1077 | CFI_REL_OFFSET rbp,RBP | 1077 | CFI_REL_OFFSET rbp,RBP |
1078 | movq %r12,3*8(%rsp) | 1078 | movq %r12,3*8(%rsp) |
1079 | CFI_REL_OFFSET r12,R12 | 1079 | CFI_REL_OFFSET r12,R12 |
1080 | movq %r13,2*8(%rsp) | 1080 | movq %r13,2*8(%rsp) |
1081 | CFI_REL_OFFSET r13,R13 | 1081 | CFI_REL_OFFSET r13,R13 |
1082 | movq %r14,1*8(%rsp) | 1082 | movq %r14,1*8(%rsp) |
1083 | CFI_REL_OFFSET r14,R14 | 1083 | CFI_REL_OFFSET r14,R14 |
1084 | movq %r15,(%rsp) | 1084 | movq %r15,(%rsp) |
1085 | CFI_REL_OFFSET r15,R15 | 1085 | CFI_REL_OFFSET r15,R15 |
1086 | xorl %ebx,%ebx | 1086 | xorl %ebx,%ebx |
1087 | testl $3,CS(%rsp) | 1087 | testl $3,CS(%rsp) |
1088 | je error_kernelspace | 1088 | je error_kernelspace |
1089 | error_swapgs: | 1089 | error_swapgs: |
1090 | SWAPGS | 1090 | SWAPGS |
1091 | error_sti: | 1091 | error_sti: |
1092 | TRACE_IRQS_OFF | 1092 | TRACE_IRQS_OFF |
1093 | movq %rdi,RDI(%rsp) | 1093 | movq %rdi,RDI(%rsp) |
1094 | CFI_REL_OFFSET rdi,RDI | 1094 | CFI_REL_OFFSET rdi,RDI |
1095 | movq %rsp,%rdi | 1095 | movq %rsp,%rdi |
1096 | movq ORIG_RAX(%rsp),%rsi /* get error code */ | 1096 | movq ORIG_RAX(%rsp),%rsi /* get error code */ |
1097 | movq $-1,ORIG_RAX(%rsp) | 1097 | movq $-1,ORIG_RAX(%rsp) |
1098 | call *%rax | 1098 | call *%rax |
1099 | /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ | 1099 | /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ |
@@ -1102,7 +1102,7 @@ error_exit: | |||
1102 | RESTORE_REST | 1102 | RESTORE_REST |
1103 | DISABLE_INTERRUPTS(CLBR_NONE) | 1103 | DISABLE_INTERRUPTS(CLBR_NONE) |
1104 | TRACE_IRQS_OFF | 1104 | TRACE_IRQS_OFF |
1105 | GET_THREAD_INFO(%rcx) | 1105 | GET_THREAD_INFO(%rcx) |
1106 | testl %eax,%eax | 1106 | testl %eax,%eax |
1107 | jne retint_kernel | 1107 | jne retint_kernel |
1108 | LOCKDEP_SYS_EXIT_IRQ | 1108 | LOCKDEP_SYS_EXIT_IRQ |
@@ -1118,7 +1118,7 @@ error_kernelspace: | |||
1118 | /* There are two places in the kernel that can potentially fault with | 1118 | /* There are two places in the kernel that can potentially fault with |
1119 | usergs. Handle them here. The exception handlers after | 1119 | usergs. Handle them here. The exception handlers after |
1120 | iret run with kernel gs again, so don't set the user space flag. | 1120 | iret run with kernel gs again, so don't set the user space flag. |
1121 | B stepping K8s sometimes report an truncated RIP for IRET | 1121 | B stepping K8s sometimes report an truncated RIP for IRET |
1122 | exceptions returning to compat mode. Check for these here too. */ | 1122 | exceptions returning to compat mode. Check for these here too. */ |
1123 | leaq irq_return(%rip),%rcx | 1123 | leaq irq_return(%rip),%rcx |
1124 | cmpq %rcx,RIP(%rsp) | 1124 | cmpq %rcx,RIP(%rsp) |
@@ -1130,17 +1130,17 @@ error_kernelspace: | |||
1130 | je error_swapgs | 1130 | je error_swapgs |
1131 | jmp error_sti | 1131 | jmp error_sti |
1132 | KPROBE_END(error_entry) | 1132 | KPROBE_END(error_entry) |
1133 | 1133 | ||
1134 | /* Reload gs selector with exception handling */ | 1134 | /* Reload gs selector with exception handling */ |
1135 | /* edi: new selector */ | 1135 | /* edi: new selector */ |
1136 | ENTRY(native_load_gs_index) | 1136 | ENTRY(native_load_gs_index) |
1137 | CFI_STARTPROC | 1137 | CFI_STARTPROC |
1138 | pushf | 1138 | pushf |
1139 | CFI_ADJUST_CFA_OFFSET 8 | 1139 | CFI_ADJUST_CFA_OFFSET 8 |
1140 | DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) | 1140 | DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) |
1141 | SWAPGS | 1141 | SWAPGS |
1142 | gs_change: | 1142 | gs_change: |
1143 | movl %edi,%gs | 1143 | movl %edi,%gs |
1144 | 2: mfence /* workaround */ | 1144 | 2: mfence /* workaround */ |
1145 | SWAPGS | 1145 | SWAPGS |
1146 | popf | 1146 | popf |
@@ -1148,20 +1148,20 @@ gs_change: | |||
1148 | ret | 1148 | ret |
1149 | CFI_ENDPROC | 1149 | CFI_ENDPROC |
1150 | ENDPROC(native_load_gs_index) | 1150 | ENDPROC(native_load_gs_index) |
1151 | 1151 | ||
1152 | .section __ex_table,"a" | 1152 | .section __ex_table,"a" |
1153 | .align 8 | 1153 | .align 8 |
1154 | .quad gs_change,bad_gs | 1154 | .quad gs_change,bad_gs |
1155 | .previous | 1155 | .previous |
1156 | .section .fixup,"ax" | 1156 | .section .fixup,"ax" |
1157 | /* running with kernelgs */ | 1157 | /* running with kernelgs */ |
1158 | bad_gs: | 1158 | bad_gs: |
1159 | SWAPGS /* switch back to user gs */ | 1159 | SWAPGS /* switch back to user gs */ |
1160 | xorl %eax,%eax | 1160 | xorl %eax,%eax |
1161 | movl %eax,%gs | 1161 | movl %eax,%gs |
1162 | jmp 2b | 1162 | jmp 2b |
1163 | .previous | 1163 | .previous |
1164 | 1164 | ||
1165 | /* | 1165 | /* |
1166 | * Create a kernel thread. | 1166 | * Create a kernel thread. |
1167 | * | 1167 | * |
@@ -1184,7 +1184,7 @@ ENTRY(kernel_thread) | |||
1184 | 1184 | ||
1185 | xorl %r8d,%r8d | 1185 | xorl %r8d,%r8d |
1186 | xorl %r9d,%r9d | 1186 | xorl %r9d,%r9d |
1187 | 1187 | ||
1188 | # clone now | 1188 | # clone now |
1189 | call do_fork | 1189 | call do_fork |
1190 | movq %rax,RAX(%rsp) | 1190 | movq %rax,RAX(%rsp) |
@@ -1195,14 +1195,14 @@ ENTRY(kernel_thread) | |||
1195 | * so internally to the x86_64 port you can rely on kernel_thread() | 1195 | * so internally to the x86_64 port you can rely on kernel_thread() |
1196 | * not to reschedule the child before returning, this avoids the need | 1196 | * not to reschedule the child before returning, this avoids the need |
1197 | * of hacks for example to fork off the per-CPU idle tasks. | 1197 | * of hacks for example to fork off the per-CPU idle tasks. |
1198 | * [Hopefully no generic code relies on the reschedule -AK] | 1198 | * [Hopefully no generic code relies on the reschedule -AK] |
1199 | */ | 1199 | */ |
1200 | RESTORE_ALL | 1200 | RESTORE_ALL |
1201 | UNFAKE_STACK_FRAME | 1201 | UNFAKE_STACK_FRAME |
1202 | ret | 1202 | ret |
1203 | CFI_ENDPROC | 1203 | CFI_ENDPROC |
1204 | ENDPROC(kernel_thread) | 1204 | ENDPROC(kernel_thread) |
1205 | 1205 | ||
1206 | child_rip: | 1206 | child_rip: |
1207 | pushq $0 # fake return address | 1207 | pushq $0 # fake return address |
1208 | CFI_STARTPROC | 1208 | CFI_STARTPROC |
@@ -1237,10 +1237,10 @@ ENDPROC(child_rip) | |||
1237 | ENTRY(kernel_execve) | 1237 | ENTRY(kernel_execve) |
1238 | CFI_STARTPROC | 1238 | CFI_STARTPROC |
1239 | FAKE_STACK_FRAME $0 | 1239 | FAKE_STACK_FRAME $0 |
1240 | SAVE_ALL | 1240 | SAVE_ALL |
1241 | movq %rsp,%rcx | 1241 | movq %rsp,%rcx |
1242 | call sys_execve | 1242 | call sys_execve |
1243 | movq %rax, RAX(%rsp) | 1243 | movq %rax, RAX(%rsp) |
1244 | RESTORE_REST | 1244 | RESTORE_REST |
1245 | testq %rax,%rax | 1245 | testq %rax,%rax |
1246 | je int_ret_from_sys_call | 1246 | je int_ret_from_sys_call |
@@ -1259,7 +1259,7 @@ ENTRY(coprocessor_error) | |||
1259 | END(coprocessor_error) | 1259 | END(coprocessor_error) |
1260 | 1260 | ||
1261 | ENTRY(simd_coprocessor_error) | 1261 | ENTRY(simd_coprocessor_error) |
1262 | zeroentry do_simd_coprocessor_error | 1262 | zeroentry do_simd_coprocessor_error |
1263 | END(simd_coprocessor_error) | 1263 | END(simd_coprocessor_error) |
1264 | 1264 | ||
1265 | ENTRY(device_not_available) | 1265 | ENTRY(device_not_available) |
@@ -1271,12 +1271,12 @@ KPROBE_ENTRY(debug) | |||
1271 | INTR_FRAME | 1271 | INTR_FRAME |
1272 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1272 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1273 | pushq $0 | 1273 | pushq $0 |
1274 | CFI_ADJUST_CFA_OFFSET 8 | 1274 | CFI_ADJUST_CFA_OFFSET 8 |
1275 | paranoidentry do_debug, DEBUG_STACK | 1275 | paranoidentry do_debug, DEBUG_STACK |
1276 | paranoidexit | 1276 | paranoidexit |
1277 | KPROBE_END(debug) | 1277 | KPROBE_END(debug) |
1278 | 1278 | ||
1279 | /* runs on exception stack */ | 1279 | /* runs on exception stack */ |
1280 | KPROBE_ENTRY(nmi) | 1280 | KPROBE_ENTRY(nmi) |
1281 | INTR_FRAME | 1281 | INTR_FRAME |
1282 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1282 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
@@ -1310,7 +1310,7 @@ ENTRY(bounds) | |||
1310 | END(bounds) | 1310 | END(bounds) |
1311 | 1311 | ||
1312 | ENTRY(invalid_op) | 1312 | ENTRY(invalid_op) |
1313 | zeroentry do_invalid_op | 1313 | zeroentry do_invalid_op |
1314 | END(invalid_op) | 1314 | END(invalid_op) |
1315 | 1315 | ||
1316 | ENTRY(coprocessor_segment_overrun) | 1316 | ENTRY(coprocessor_segment_overrun) |
@@ -1365,7 +1365,7 @@ ENTRY(machine_check) | |||
1365 | INTR_FRAME | 1365 | INTR_FRAME |
1366 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1366 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1367 | pushq $0 | 1367 | pushq $0 |
1368 | CFI_ADJUST_CFA_OFFSET 8 | 1368 | CFI_ADJUST_CFA_OFFSET 8 |
1369 | paranoidentry do_machine_check | 1369 | paranoidentry do_machine_check |
1370 | jmp paranoid_exit1 | 1370 | jmp paranoid_exit1 |
1371 | CFI_ENDPROC | 1371 | CFI_ENDPROC |
diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index f454c78fcef6..0aa2c443d600 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c | |||
@@ -250,31 +250,24 @@ int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) | |||
250 | { | 250 | { |
251 | struct acpi_table_header *header = NULL; | 251 | struct acpi_table_header *header = NULL; |
252 | int i = 0; | 252 | int i = 0; |
253 | acpi_size tbl_size; | ||
254 | 253 | ||
255 | while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) { | 254 | while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) { |
256 | if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) { | 255 | if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) { |
257 | struct oem_table *t = (struct oem_table *)header; | 256 | struct oem_table *t = (struct oem_table *)header; |
258 | 257 | ||
259 | oem_addrX = t->OEMTableAddr; | 258 | oem_addrX = t->OEMTableAddr; |
260 | oem_size = t->OEMTableSize; | 259 | oem_size = t->OEMTableSize; |
261 | early_acpi_os_unmap_memory(header, tbl_size); | ||
262 | 260 | ||
263 | *oem_addr = (unsigned long)__acpi_map_table(oem_addrX, | 261 | *oem_addr = (unsigned long)__acpi_map_table(oem_addrX, |
264 | oem_size); | 262 | oem_size); |
265 | return 0; | 263 | return 0; |
266 | } | 264 | } |
267 | early_acpi_os_unmap_memory(header, tbl_size); | ||
268 | } | 265 | } |
269 | return -1; | 266 | return -1; |
270 | } | 267 | } |
271 | 268 | ||
272 | void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) | 269 | void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) |
273 | { | 270 | { |
274 | if (!oem_addr) | ||
275 | return; | ||
276 | |||
277 | __acpi_unmap_table((char *)oem_addr, oem_size); | ||
278 | } | 271 | } |
279 | #endif | 272 | #endif |
280 | 273 | ||
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 774ac4991568..1c9cc431ea4f 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -128,7 +128,7 @@ static int kvm_register_clock(char *txt) | |||
128 | } | 128 | } |
129 | 129 | ||
130 | #ifdef CONFIG_X86_LOCAL_APIC | 130 | #ifdef CONFIG_X86_LOCAL_APIC |
131 | static void kvm_setup_secondary_clock(void) | 131 | static void __devinit kvm_setup_secondary_clock(void) |
132 | { | 132 | { |
133 | /* | 133 | /* |
134 | * Now that the first cpu already had this clocksource initialized, | 134 | * Now that the first cpu already had this clocksource initialized, |
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 9ffb01c31c40..1c0dfbca87c1 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c | |||
@@ -46,7 +46,9 @@ static __cpuinit void check_tsc_warp(void) | |||
46 | cycles_t start, now, prev, end; | 46 | cycles_t start, now, prev, end; |
47 | int i; | 47 | int i; |
48 | 48 | ||
49 | rdtsc_barrier(); | ||
49 | start = get_cycles(); | 50 | start = get_cycles(); |
51 | rdtsc_barrier(); | ||
50 | /* | 52 | /* |
51 | * The measurement runs for 20 msecs: | 53 | * The measurement runs for 20 msecs: |
52 | */ | 54 | */ |
@@ -61,7 +63,9 @@ static __cpuinit void check_tsc_warp(void) | |||
61 | */ | 63 | */ |
62 | __raw_spin_lock(&sync_lock); | 64 | __raw_spin_lock(&sync_lock); |
63 | prev = last_tsc; | 65 | prev = last_tsc; |
66 | rdtsc_barrier(); | ||
64 | now = get_cycles(); | 67 | now = get_cycles(); |
68 | rdtsc_barrier(); | ||
65 | last_tsc = now; | 69 | last_tsc = now; |
66 | __raw_spin_unlock(&sync_lock); | 70 | __raw_spin_unlock(&sync_lock); |
67 | 71 | ||
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 0b8b6690a86d..ebf2f12900f5 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -128,7 +128,16 @@ static __always_inline void do_vgettimeofday(struct timeval * tv) | |||
128 | gettimeofday(tv,NULL); | 128 | gettimeofday(tv,NULL); |
129 | return; | 129 | return; |
130 | } | 130 | } |
131 | |||
132 | /* | ||
133 | * Surround the RDTSC by barriers, to make sure it's not | ||
134 | * speculated to outside the seqlock critical section and | ||
135 | * does not cause time warps: | ||
136 | */ | ||
137 | rdtsc_barrier(); | ||
131 | now = vread(); | 138 | now = vread(); |
139 | rdtsc_barrier(); | ||
140 | |||
132 | base = __vsyscall_gtod_data.clock.cycle_last; | 141 | base = __vsyscall_gtod_data.clock.cycle_last; |
133 | mask = __vsyscall_gtod_data.clock.mask; | 142 | mask = __vsyscall_gtod_data.clock.mask; |
134 | mult = __vsyscall_gtod_data.clock.mult; | 143 | mult = __vsyscall_gtod_data.clock.mult; |
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index ce3251ce5504..b81125f0bdee 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig | |||
@@ -20,6 +20,8 @@ if VIRTUALIZATION | |||
20 | config KVM | 20 | config KVM |
21 | tristate "Kernel-based Virtual Machine (KVM) support" | 21 | tristate "Kernel-based Virtual Machine (KVM) support" |
22 | depends on HAVE_KVM | 22 | depends on HAVE_KVM |
23 | # for device assignment: | ||
24 | depends on PCI | ||
23 | select PREEMPT_NOTIFIERS | 25 | select PREEMPT_NOTIFIERS |
24 | select MMU_NOTIFIER | 26 | select MMU_NOTIFIER |
25 | select ANON_INODES | 27 | select ANON_INODES |
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 8772dc946823..59ebd37ad79e 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -548,8 +548,10 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) | |||
548 | mutex_lock(&kvm->lock); | 548 | mutex_lock(&kvm->lock); |
549 | pit->irq_source_id = kvm_request_irq_source_id(kvm); | 549 | pit->irq_source_id = kvm_request_irq_source_id(kvm); |
550 | mutex_unlock(&kvm->lock); | 550 | mutex_unlock(&kvm->lock); |
551 | if (pit->irq_source_id < 0) | 551 | if (pit->irq_source_id < 0) { |
552 | kfree(pit); | ||
552 | return NULL; | 553 | return NULL; |
554 | } | ||
553 | 555 | ||
554 | mutex_init(&pit->pit_state.lock); | 556 | mutex_init(&pit->pit_state.lock); |
555 | mutex_lock(&pit->pit_state.lock); | 557 | mutex_lock(&pit->pit_state.lock); |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2a5e64881d9b..f1983d9477cd 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -314,7 +314,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) | |||
314 | if (r) | 314 | if (r) |
315 | goto out; | 315 | goto out; |
316 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, | 316 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, |
317 | rmap_desc_cache, 1); | 317 | rmap_desc_cache, 4); |
318 | if (r) | 318 | if (r) |
319 | goto out; | 319 | goto out; |
320 | r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); | 320 | r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2643b430d83a..d06b4dc0e2ea 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -3564,7 +3564,8 @@ static int __init vmx_init(void) | |||
3564 | bypass_guest_pf = 0; | 3564 | bypass_guest_pf = 0; |
3565 | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | | 3565 | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | |
3566 | VMX_EPT_WRITABLE_MASK | | 3566 | VMX_EPT_WRITABLE_MASK | |
3567 | VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); | 3567 | VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT | |
3568 | VMX_EPT_IGMT_BIT); | ||
3568 | kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, | 3569 | kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, |
3569 | VMX_EPT_EXECUTABLE_MASK); | 3570 | VMX_EPT_EXECUTABLE_MASK); |
3570 | kvm_enable_tdp(); | 3571 | kvm_enable_tdp(); |
diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h index 3e010d21fdd7..ec5edc339da6 100644 --- a/arch/x86/kvm/vmx.h +++ b/arch/x86/kvm/vmx.h | |||
@@ -352,6 +352,7 @@ enum vmcs_field { | |||
352 | #define VMX_EPT_READABLE_MASK 0x1ull | 352 | #define VMX_EPT_READABLE_MASK 0x1ull |
353 | #define VMX_EPT_WRITABLE_MASK 0x2ull | 353 | #define VMX_EPT_WRITABLE_MASK 0x2ull |
354 | #define VMX_EPT_EXECUTABLE_MASK 0x4ull | 354 | #define VMX_EPT_EXECUTABLE_MASK 0x4ull |
355 | #define VMX_EPT_IGMT_BIT (1ull << 6) | ||
355 | 356 | ||
356 | #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul | 357 | #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul |
357 | 358 | ||
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 0e331652681e..52145007bd7e 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * This file provides all the same external entries as smp.c but uses | 7 | * This file provides all the same external entries as smp.c but uses |
8 | * the voyager hal to provide the functionality | 8 | * the voyager hal to provide the functionality |
9 | */ | 9 | */ |
10 | #include <linux/cpu.h> | ||
10 | #include <linux/module.h> | 11 | #include <linux/module.h> |
11 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
12 | #include <linux/kernel_stat.h> | 13 | #include <linux/kernel_stat.h> |
@@ -1790,6 +1791,17 @@ void __init smp_setup_processor_id(void) | |||
1790 | x86_write_percpu(cpu_number, hard_smp_processor_id()); | 1791 | x86_write_percpu(cpu_number, hard_smp_processor_id()); |
1791 | } | 1792 | } |
1792 | 1793 | ||
1794 | static void voyager_send_call_func(cpumask_t callmask) | ||
1795 | { | ||
1796 | __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id()); | ||
1797 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); | ||
1798 | } | ||
1799 | |||
1800 | static void voyager_send_call_func_single(int cpu) | ||
1801 | { | ||
1802 | send_CPI(1 << cpu, VIC_CALL_FUNCTION_SINGLE_CPI); | ||
1803 | } | ||
1804 | |||
1793 | struct smp_ops smp_ops = { | 1805 | struct smp_ops smp_ops = { |
1794 | .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu, | 1806 | .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu, |
1795 | .smp_prepare_cpus = voyager_smp_prepare_cpus, | 1807 | .smp_prepare_cpus = voyager_smp_prepare_cpus, |
@@ -1799,6 +1811,6 @@ struct smp_ops smp_ops = { | |||
1799 | .smp_send_stop = voyager_smp_send_stop, | 1811 | .smp_send_stop = voyager_smp_send_stop, |
1800 | .smp_send_reschedule = voyager_smp_send_reschedule, | 1812 | .smp_send_reschedule = voyager_smp_send_reschedule, |
1801 | 1813 | ||
1802 | .send_call_func_ipi = native_send_call_func_ipi, | 1814 | .send_call_func_ipi = voyager_send_call_func, |
1803 | .send_call_func_single_ipi = native_send_call_func_single_ipi, | 1815 | .send_call_func_single_ipi = voyager_send_call_func_single, |
1804 | }; | 1816 | }; |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index c483f4242079..3ffed259883e 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -102,6 +102,8 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd) | |||
102 | set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); | 102 | set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); |
103 | pud = pud_offset(pgd, 0); | 103 | pud = pud_offset(pgd, 0); |
104 | BUG_ON(pmd_table != pmd_offset(pud, 0)); | 104 | BUG_ON(pmd_table != pmd_offset(pud, 0)); |
105 | |||
106 | return pmd_table; | ||
105 | } | 107 | } |
106 | #endif | 108 | #endif |
107 | pud = pud_offset(pgd, 0); | 109 | pud = pud_offset(pgd, 0); |